aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--CREDITS4
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt5
-rw-r--r--Documentation/admin-guide/sysctl/net.rst20
-rw-r--r--Documentation/bpf/bpf_devel_QA.rst23
-rw-r--r--Documentation/bpf/btf.rst25
-rw-r--r--Documentation/bpf/index.rst1
-rw-r--r--Documentation/bpf/prog_sk_lookup.rst98
-rw-r--r--Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt7
-rw-r--r--Documentation/devicetree/bindings/net/brcm,systemport.txt5
-rw-r--r--Documentation/devicetree/bindings/net/can/fsl-flexcan.txt10
-rw-r--r--Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt7
-rw-r--r--Documentation/devicetree/bindings/net/can/microchip,mcp251xfd.yaml79
-rw-r--r--Documentation/devicetree/bindings/net/can/rcar_can.txt8
-rw-r--r--Documentation/devicetree/bindings/net/can/rcar_canfd.txt5
-rw-r--r--Documentation/devicetree/bindings/net/dsa/b53.txt9
-rw-r--r--Documentation/devicetree/bindings/net/dsa/mt7530.txt13
-rw-r--r--Documentation/devicetree/bindings/net/ethernet-controller.yaml14
-rw-r--r--Documentation/devicetree/bindings/net/intel,dwmac-plat.yaml130
-rw-r--r--Documentation/devicetree/bindings/net/marvell,prestera.txt34
-rw-r--r--Documentation/devicetree/bindings/net/nfc/s3fwrn5.txt25
-rw-r--r--Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml73
-rw-r--r--Documentation/devicetree/bindings/net/renesas,etheravb.yaml262
-rw-r--r--Documentation/devicetree/bindings/net/renesas,ravb.txt135
-rw-r--r--Documentation/devicetree/bindings/net/smsc-lan87xx.txt4
-rw-r--r--Documentation/devicetree/bindings/net/ti,dp83822.yaml80
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt4
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml4
-rw-r--r--Documentation/devicetree/bindings/ptp/ptp-qoriq.txt2
-rw-r--r--Documentation/driver-api/80211/cfg80211.rst392
-rw-r--r--Documentation/driver-api/80211/mac80211-advanced.rst151
-rw-r--r--Documentation/driver-api/80211/mac80211.rst148
-rw-r--r--Documentation/networking/af_xdp.rst68
-rw-r--r--Documentation/networking/caif/index.rst1
-rw-r--r--Documentation/networking/caif/spi_porting.rst229
-rw-r--r--Documentation/networking/device_drivers/ethernet/amazon/ena.rst25
-rw-r--r--Documentation/networking/devlink/devlink-flash.rst28
-rw-r--r--Documentation/networking/devlink/devlink-params.rst6
-rw-r--r--Documentation/networking/devlink/devlink-reload.rst81
-rw-r--r--Documentation/networking/devlink/devlink-trap.rst70
-rw-r--r--Documentation/networking/devlink/ice.rst36
-rw-r--r--Documentation/networking/devlink/index.rst1
-rw-r--r--Documentation/networking/ethtool-netlink.rst11
-rw-r--r--Documentation/networking/index.rst1
-rw-r--r--Documentation/networking/kapi.rst9
-rw-r--r--Documentation/networking/l2tp.rst939
-rw-r--r--Documentation/networking/scaling.rst6
-rw-r--r--Documentation/networking/statistics.rst179
-rw-r--r--Documentation/networking/vxlan.rst28
-rw-r--r--MAINTAINERS50
-rw-r--r--Makefile4
-rw-r--r--arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi4
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts1
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts50
-rw-r--r--arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts63
-rw-r--r--arch/mips/boot/dts/mscc/ocelot.dtsi4
-rw-r--r--arch/powerpc/boot/dts/fsl/t1040rdb.dts107
-rw-r--r--arch/powerpc/boot/dts/fsl/t1040si-post.dtsi78
-rw-r--r--arch/s390/include/asm/ccwdev.h9
-rw-r--r--arch/s390/include/asm/chsc.h7
-rw-r--r--arch/s390/include/asm/css_chars.h4
-rw-r--r--arch/s390/net/bpf_jit_comp.c61
-rw-r--r--arch/x86/include/asm/nospec-branch.h16
-rw-r--r--arch/x86/net/bpf_jit_comp.c310
-rw-r--r--drivers/atm/atmtcp.c2
-rw-r--r--drivers/bcma/driver_pci_host.c4
-rw-r--r--drivers/block/nbd.c6
-rw-r--r--drivers/bluetooth/btintel.c291
-rw-r--r--drivers/bluetooth/btintel.h91
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c54
-rw-r--r--drivers/bluetooth/btmtksdio.c4
-rw-r--r--drivers/bluetooth/btusb.c129
-rw-r--r--drivers/bluetooth/hci_h5.c2
-rw-r--r--drivers/bluetooth/hci_intel.c54
-rw-r--r--drivers/bluetooth/hci_ldisc.c1
-rw-r--r--drivers/bluetooth/hci_qca.c8
-rw-r--r--drivers/bluetooth/hci_serdev.c36
-rw-r--r--drivers/connector/connector.c7
-rw-r--r--drivers/crypto/chelsio/Kconfig32
-rw-r--r--drivers/crypto/chelsio/Makefile5
-rw-r--r--drivers/crypto/chelsio/chcr_algo.h33
-rw-r--r--drivers/crypto/chelsio/chcr_core.c62
-rw-r--r--drivers/crypto/chelsio/chcr_core.h98
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_main.c34
-rw-r--r--drivers/net/Kconfig4
-rw-r--r--drivers/net/Makefile2
-rw-r--r--drivers/net/appletalk/cops.c2
-rw-r--r--drivers/net/appletalk/ltpc.c2
-rw-r--r--drivers/net/bareudp.c11
-rw-r--r--drivers/net/caif/Kconfig19
-rw-r--r--drivers/net/caif/Makefile4
-rw-r--r--drivers/net/caif/caif_hsi.c19
-rw-r--r--drivers/net/caif/caif_spi.c874
-rw-r--r--drivers/net/caif/caif_spi_slave.c254
-rw-r--r--drivers/net/caif/caif_virtio.c2
-rw-r--r--drivers/net/can/Kconfig4
-rw-r--r--drivers/net/can/at91_can.c8
-rw-r--r--drivers/net/can/c_can/c_can.c9
-rw-r--r--drivers/net/can/c_can/c_can.h4
-rw-r--r--drivers/net/can/cc770/cc770.c2
-rw-r--r--drivers/net/can/cc770/cc770.h2
-rw-r--r--drivers/net/can/dev.c58
-rw-r--r--drivers/net/can/flexcan.c610
-rw-r--r--drivers/net/can/grcan.c4
-rw-r--r--drivers/net/can/m_can/Kconfig2
-rw-r--r--drivers/net/can/m_can/m_can_platform.c2
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c2
-rw-r--r--drivers/net/can/mscan/mscan.c29
-rw-r--r--drivers/net/can/pch_can.c67
-rw-r--r--drivers/net/can/peak_canfd/peak_pciefd_main.c2
-rw-r--r--drivers/net/can/rx-offload.c11
-rw-r--r--drivers/net/can/sja1000/peak_pci.c2
-rw-r--r--drivers/net/can/sja1000/peak_pcmcia.c2
-rw-r--r--drivers/net/can/softing/Kconfig6
-rw-r--r--drivers/net/can/softing/softing_fw.c8
-rw-r--r--drivers/net/can/softing/softing_main.c11
-rw-r--r--drivers/net/can/softing/softing_platform.h2
-rw-r--r--drivers/net/can/spi/Kconfig4
-rw-r--r--drivers/net/can/spi/Makefile1
-rw-r--r--drivers/net/can/spi/mcp251x.c345
-rw-r--r--drivers/net/can/spi/mcp251xfd/Kconfig17
-rw-r--r--drivers/net/can/spi/mcp251xfd/Makefile8
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c2927
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-crc16.c89
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c556
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd.h835
-rw-r--r--drivers/net/can/ti_hecc.c29
-rw-r--r--drivers/net/can/usb/Kconfig2
-rw-r--r--drivers/net/can/usb/gs_usb.c4
-rw-r--r--drivers/net/can/usb/mcba_usb.c4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c166
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c4
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c4
-rw-r--r--drivers/net/can/usb/ucan.c4
-rw-r--r--drivers/net/can/usb/usb_8dev.c4
-rw-r--r--drivers/net/can/xilinx_can.c16
-rw-r--r--drivers/net/dsa/Kconfig6
-rw-r--r--drivers/net/dsa/b53/b53_common.c99
-rw-r--r--drivers/net/dsa/b53/b53_priv.h5
-rw-r--r--drivers/net/dsa/bcm_sf2.c136
-rw-r--r--drivers/net/dsa/bcm_sf2.h4
-rw-r--r--drivers/net/dsa/dsa_loop.c59
-rw-r--r--drivers/net/dsa/lantiq_gswip.c26
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c6
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c32
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c1
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c19
-rw-r--r--drivers/net/dsa/mt7530.c1197
-rw-r--r--drivers/net/dsa/mt7530.h259
-rw-r--r--drivers/net/dsa/mv88e6xxx/Makefile1
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c308
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h18
-rw-r--r--drivers/net/dsa/mv88e6xxx/devlink.c633
-rw-r--r--drivers/net/dsa/mv88e6xxx/devlink.h21
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.c59
-rw-r--r--drivers/net/dsa/ocelot/Kconfig23
-rw-r--r--drivers/net/dsa/ocelot/Makefile6
-rw-r--r--drivers/net/dsa/ocelot/felix.c124
-rw-r--r--drivers/net/dsa/ocelot/felix.h32
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c639
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c284
-rw-r--r--drivers/net/dsa/qca8k.c6
-rw-r--r--drivers/net/dsa/realtek-smi-core.c3
-rw-r--r--drivers/net/dsa/realtek-smi-core.h9
-rw-r--r--drivers/net/dsa/rtl8366.c291
-rw-r--r--drivers/net/dsa/rtl8366rb.c115
-rw-r--r--drivers/net/dsa/sja1105/Makefile1
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h20
-rw-r--r--drivers/net/dsa/sja1105/sja1105_devlink.c262
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.c10
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c326
-rw-r--r--drivers/net/dsa/sja1105/sja1105_spi.c5
-rw-r--r--drivers/net/ethernet/3com/typhoon.c61
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c17
-rw-r--r--drivers/net/ethernet/8390/lib8390.c32
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c6
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c77
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c6
-rw-r--r--drivers/net/ethernet/alteon/acenic.c9
-rw-r--r--drivers/net/ethernet/alteon/acenic.h3
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_admin_defs.h128
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c247
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h42
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_common_defs.h31
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c84
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.h37
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h31
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c203
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c178
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h40
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h31
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_regs_defs.h31
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c11
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c19
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-i2c.c11
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c11
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c53
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c50
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c37
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c13
-rw-r--r--drivers/net/ethernet/arc/emac_arc.c2
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c160
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c55
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c66
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c50
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c19
-rw-r--r--drivers/net/ethernet/broadcom/b44.c8
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c40
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c98
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c735
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h162
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c173
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c336
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h397
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c18
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cee.c20
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c13
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c7
-rw-r--r--drivers/net/ethernet/cadence/macb.h21
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c80
-rw-r--r--drivers/net/ethernet/cadence/macb_pci.c3
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c2
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.c10
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_device.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c92
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c363
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c158
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_console.c12
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c13
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c11
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_main.h1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c1
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c8
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c14
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c4
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h2
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig2
-rw-r--r--drivers/net/ethernet/chelsio/Makefile1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c76
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/adapter.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/ael1002.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c91
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h15
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c57
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c54
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c17
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c204
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c175
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h15
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h58
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c32
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c92
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/Kconfig52
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/Makefile4
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/Makefile8
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c (renamed from drivers/crypto/chelsio/chcr_ipsec.c)225
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.h58
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/Makefile5
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_common.h (renamed from drivers/crypto/chelsio/chcr_common.h)24
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c (renamed from drivers/crypto/chelsio/chcr_ktls.c)471
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h (renamed from drivers/crypto/chelsio/chcr_ktls.h)43
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/Makefile (renamed from drivers/crypto/chelsio/chtls/Makefile)0
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h (renamed from drivers/crypto/chelsio/chtls/chtls.h)88
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c (renamed from drivers/crypto/chelsio/chtls/chtls_cm.c)0
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h (renamed from drivers/crypto/chelsio/chtls/chtls_cm.h)0
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c (renamed from drivers/crypto/chelsio/chtls/chtls_hw.c)0
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c (renamed from drivers/crypto/chelsio/chtls/chtls_io.c)0
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c (renamed from drivers/crypto/chelsio/chtls/chtls_main.c)2
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.h4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_api.c8
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c115
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c66
-rw-r--r--drivers/net/ethernet/cortina/gemini.c40
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c62
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c44
-rw-r--r--drivers/net/ethernet/dec/tulip/interrupt.c56
-rw-r--r--drivers/net/ethernet/dec/tulip/media.c5
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c65
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c44
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c80
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c81
-rw-r--r--drivers/net/ethernet/dlink/sundance.c21
-rw-r--r--drivers/net/ethernet/dnet.c13
-rw-r--r--drivers/net/ethernet/ethoc.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/Makefile2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-dcb.c8
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c63
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c309
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c746
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h125
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c98
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c88
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c3
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h4
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h21
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.c79
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpni.h35
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig5
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c53
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h9
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c26
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c335
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.h8
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c9
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c7
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c38
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c10
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c8
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c14
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_muram.c6
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c23
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c11
-rw-r--r--drivers/net/ethernet/google/gve/gve.h106
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c315
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h62
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c365
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c301
-rw-r--r--drivers/net/ethernet/google/gve/gve_register.h1
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c37
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c34
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c148
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c17
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c15
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h90
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c77
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c352
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h35
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c45
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_trace.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c67
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h38
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c37
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h26
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c180
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c103
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c62
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h34
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c174
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c3
-rw-r--r--drivers/net/ethernet/huawei/hinic/Makefile3
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_debugfs.c318
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_debugfs.h114
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_dev.h20
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_devlink.c8
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c7
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c27
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.c1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_io.c6
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_io.h1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h6
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c92
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c55
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c7
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c19
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c415
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h4
-rw-r--r--drivers/net/ethernet/intel/e100.c12
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c159
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c40
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h5
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c23
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c56
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c3
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c349
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_trace.h8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c50
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx_common.h19
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c105
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.h4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_adminq.h4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c20
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_trace.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c11
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h27
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c116
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c233
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c66
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c51
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c127
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c138
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h7
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c6
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h80
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c472
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c8
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c17
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c5
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h16
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h11
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c52
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c62
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.c135
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c17
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c49
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c63
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c16
-rw-r--r--drivers/net/ethernet/jme.c40
-rw-r--r--drivers/net/ethernet/korina.c3
-rw-r--r--drivers/net/ethernet/marvell/Kconfig7
-rw-r--r--drivers/net/ethernet/marvell/Makefile1
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c47
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/Makefile3
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2.h203
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c878
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_tai.c457
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/Makefile5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c29
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h22
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h47
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h541
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c275
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.h25
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c36
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h22
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c41
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c87
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c239
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h103
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c98
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h26
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c35
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c180
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c212
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c112
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c5
-rw-r--r--drivers/net/ethernet/marvell/prestera/Kconfig25
-rw-r--r--drivers/net/ethernet/marvell/prestera/Makefile7
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera.h206
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_devlink.c112
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_devlink.h23
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_dsa.c104
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_dsa.h35
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_ethtool.c780
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_ethtool.h11
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.c1253
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.h182
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c667
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_pci.c769
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_rxtx.c820
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_rxtx.h19
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_switchdev.c1277
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_switchdev.h13
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c7
-rw-r--r--drivers/net/ethernet/marvell/skge.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c116
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ecpf.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c527
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h75
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h83
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h60
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c)112
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c182
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c110
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c91
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c106
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c881
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h97
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c663
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c944
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/chains.h68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c124
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c505
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c463
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c911
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h93
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c183
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c642
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c368
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c173
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h239
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c594
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h91
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c377
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c163
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c204
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c120
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h6
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c17
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c76
-rw-r--r--drivers/net/ethernet/microchip/encx24j600-regmap.c2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c11
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c114
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c565
-rw-r--r--drivers/net/ethernet/mscc/ocelot_io.c17
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c61
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c3
-rw-r--r--drivers/net/ethernet/mscc/ocelot_s2.h64
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.c856
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.h99
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c195
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c5
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c63
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c77
-rw-r--r--drivers/net/ethernet/natsemi/sonic.c24
-rw-r--r--drivers/net/ethernet/natsemi/sonic.h2
-rw-r--r--drivers/net/ethernet/neterion/s2io.c91
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c14
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.h7
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.c2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c12
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-traffic.c72
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c18
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h17
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h6
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c73
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c85
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c7
-rw-r--r--drivers/net/ethernet/ni/nixge.c7
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c4
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c5
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c14
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c2
-rw-r--r--drivers/net/ethernet/pensando/Kconfig1
-rw-r--r--drivers/net/ethernet/pensando/ionic/Makefile2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h7
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c47
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_debugfs.c31
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c87
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h73
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_devlink.c12
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_devlink.h3
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c198
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_fw.c206
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h34
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c1076
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h115
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c101
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.c48
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c182
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig5
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h3
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_devlink.c259
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_devlink.h20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c27
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c18
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c130
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c9
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c38
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c10
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c1
-rw-r--r--drivers/net/ethernet/qualcomm/qca_uart.c2
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c4
-rw-r--r--drivers/net/ethernet/realtek/8139too.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c56
-rw-r--r--drivers/net/ethernet/renesas/ravb.h5
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c55
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c12
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c83
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c17
-rw-r--r--drivers/net/ethernet/sfc/ef10.c152
-rw-r--r--drivers/net/ethernet/sfc/ef100_ethtool.c41
-rw-r--r--drivers/net/ethernet/sfc/ef100_netdev.c4
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c23
-rw-r--r--drivers/net/ethernet/sfc/ef100_tx.c44
-rw-r--r--drivers/net/ethernet/sfc/ef100_tx.h1
-rw-r--r--drivers/net/ethernet/sfc/efx.c21
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c15
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.h2
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c124
-rw-r--r--drivers/net/ethernet/sfc/efx_common.h3
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c3
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.c47
-rw-r--r--drivers/net/ethernet/sfc/falcon/farch.c29
-rw-r--r--drivers/net/ethernet/sfc/falcon/rx.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon/selftest.c2
-rw-r--r--drivers/net/ethernet/sfc/farch.c33
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c6
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h4
-rw-r--r--drivers/net/ethernet/sfc/mcdi_functions.c24
-rw-r--r--drivers/net/ethernet/sfc/mcdi_functions.h2
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c593
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port_common.c605
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port_common.h15
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h131
-rw-r--r--drivers/net/ethernet/sfc/nic.h4
-rw-r--r--drivers/net/ethernet/sfc/nic_common.h47
-rw-r--r--drivers/net/ethernet/sfc/ptp.c12
-rw-r--r--drivers/net/ethernet/sfc/selftest.c18
-rw-r--r--drivers/net/ethernet/sfc/selftest.h4
-rw-r--r--drivers/net/ethernet/sfc/siena.c1
-rw-r--r--drivers/net/ethernet/sfc/tx.c136
-rw-r--r--drivers/net/ethernet/sfc/tx.h26
-rw-r--r--drivers/net/ethernet/sfc/tx_common.c19
-rw-r--r--drivers/net/ethernet/silan/sc92031.c40
-rw-r--r--drivers/net/ethernet/sis/sis900.c8
-rw-r--r--drivers/net/ethernet/smsc/epic100.c71
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c13
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c6
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c51
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c196
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.h6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c55
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c297
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c2
-rw-r--r--drivers/net/ethernet/sun/cassini.c4
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c18
-rw-r--r--drivers/net/ethernet/sun/sungem.c5
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-common.c2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c70
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c10
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c16
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h1
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c43
-rw-r--r--drivers/net/ethernet/ti/cpsw.c10
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c421
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h7
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c3
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c3
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h2
-rw-r--r--drivers/net/ethernet/ti/cpts.c42
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c10
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c18
-rw-r--r--drivers/net/ethernet/ti/tlan.c67
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c48
-rw-r--r--drivers/net/ethernet/via/via-rhine.c2
-rw-r--r--drivers/net/ethernet/via/via-velocity.c40
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c26
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c3
-rw-r--r--drivers/net/fddi/skfp/h/smc.h2
-rw-r--r--drivers/net/geneve.c11
-rw-r--r--drivers/net/gtp.c74
-rw-r--r--drivers/net/hippi/rrunner.c117
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c6
-rw-r--r--drivers/net/ipa/gsi.c32
-rw-r--r--drivers/net/ipa/gsi.h1
-rw-r--r--drivers/net/ipa/gsi_reg.h59
-rw-r--r--drivers/net/ipa/gsi_trans.c1
-rw-r--r--drivers/net/ipa/ipa.h17
-rw-r--r--drivers/net/ipa/ipa_clock.c28
-rw-r--r--drivers/net/ipa/ipa_endpoint.c53
-rw-r--r--drivers/net/ipa/ipa_interrupt.c14
-rw-r--r--drivers/net/ipa/ipa_main.c72
-rw-r--r--drivers/net/ipa/ipa_reg.h2
-rw-r--r--drivers/net/ipa/ipa_uc.c2
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c8
-rw-r--r--drivers/net/macsec.c30
-rw-r--r--drivers/net/mdio/Kconfig251
-rw-r--r--drivers/net/mdio/Makefile29
-rw-r--r--drivers/net/mdio/mdio-aspeed.c (renamed from drivers/net/phy/mdio-aspeed.c)0
-rw-r--r--drivers/net/mdio/mdio-bcm-iproc.c (renamed from drivers/net/phy/mdio-bcm-iproc.c)0
-rw-r--r--drivers/net/mdio/mdio-bcm-unimac.c (renamed from drivers/net/phy/mdio-bcm-unimac.c)0
-rw-r--r--drivers/net/mdio/mdio-bitbang.c (renamed from drivers/net/phy/mdio-bitbang.c)0
-rw-r--r--drivers/net/mdio/mdio-cavium.c (renamed from drivers/net/phy/mdio-cavium.c)0
-rw-r--r--drivers/net/mdio/mdio-cavium.h (renamed from drivers/net/phy/mdio-cavium.h)0
-rw-r--r--drivers/net/mdio/mdio-gpio.c (renamed from drivers/net/phy/mdio-gpio.c)0
-rw-r--r--drivers/net/mdio/mdio-hisi-femac.c (renamed from drivers/net/phy/mdio-hisi-femac.c)0
-rw-r--r--drivers/net/mdio/mdio-i2c.c (renamed from drivers/net/phy/mdio-i2c.c)3
-rw-r--r--drivers/net/mdio/mdio-ipq4019.c (renamed from drivers/net/phy/mdio-ipq4019.c)109
-rw-r--r--drivers/net/mdio/mdio-ipq8064.c (renamed from drivers/net/phy/mdio-ipq8064.c)0
-rw-r--r--drivers/net/mdio/mdio-moxart.c (renamed from drivers/net/phy/mdio-moxart.c)0
-rw-r--r--drivers/net/mdio/mdio-mscc-miim.c (renamed from drivers/net/phy/mdio-mscc-miim.c)0
-rw-r--r--drivers/net/mdio/mdio-mux-bcm-iproc.c (renamed from drivers/net/phy/mdio-mux-bcm-iproc.c)0
-rw-r--r--drivers/net/mdio/mdio-mux-gpio.c (renamed from drivers/net/phy/mdio-mux-gpio.c)0
-rw-r--r--drivers/net/mdio/mdio-mux-meson-g12a.c (renamed from drivers/net/phy/mdio-mux-meson-g12a.c)0
-rw-r--r--drivers/net/mdio/mdio-mux-mmioreg.c (renamed from drivers/net/phy/mdio-mux-mmioreg.c)0
-rw-r--r--drivers/net/mdio/mdio-mux-multiplexer.c (renamed from drivers/net/phy/mdio-mux-multiplexer.c)0
-rw-r--r--drivers/net/mdio/mdio-mux.c (renamed from drivers/net/phy/mdio-mux.c)0
-rw-r--r--drivers/net/mdio/mdio-mvusb.c (renamed from drivers/net/phy/mdio-mvusb.c)0
-rw-r--r--drivers/net/mdio/mdio-octeon.c (renamed from drivers/net/phy/mdio-octeon.c)0
-rw-r--r--drivers/net/mdio/mdio-sun4i.c (renamed from drivers/net/phy/mdio-sun4i.c)0
-rw-r--r--drivers/net/mdio/mdio-thunder.c (renamed from drivers/net/phy/mdio-thunder.c)0
-rw-r--r--drivers/net/mdio/mdio-xgene.c (renamed from drivers/net/phy/mdio-xgene.c)2
-rw-r--r--drivers/net/mdio/of_mdio.c (renamed from drivers/of/of_mdio.c)38
-rw-r--r--drivers/net/netdevsim/Makefile2
-rw-r--r--drivers/net/netdevsim/dev.c35
-rw-r--r--drivers/net/netdevsim/ethtool.c64
-rw-r--r--drivers/net/netdevsim/netdev.c1
-rw-r--r--drivers/net/netdevsim/netdevsim.h20
-rw-r--r--drivers/net/netdevsim/udp_tunnels.c34
-rw-r--r--drivers/net/pcs/Kconfig22
-rw-r--r--drivers/net/pcs/Makefile5
-rw-r--r--drivers/net/pcs/pcs-lynx.c318
-rw-r--r--drivers/net/pcs/pcs-xpcs.c (renamed from drivers/net/phy/mdio-xpcs.c)2
-rw-r--r--drivers/net/phy/Kconfig405
-rw-r--r--drivers/net/phy/Makefile37
-rw-r--r--drivers/net/phy/at803x.c4
-rw-r--r--drivers/net/phy/bcm7xxx.c32
-rw-r--r--drivers/net/phy/dp83640.c70
-rw-r--r--drivers/net/phy/dp83822.c232
-rw-r--r--drivers/net/phy/dp83867.c45
-rw-r--r--drivers/net/phy/dp83869.c365
-rw-r--r--drivers/net/phy/marvell.c14
-rw-r--r--drivers/net/phy/mdio_bus.c15
-rw-r--r--drivers/net/phy/micrel.c14
-rw-r--r--drivers/net/phy/mscc/mscc_macsec.c2
-rw-r--r--drivers/net/phy/phy-core.c36
-rw-r--r--drivers/net/phy/phy.c69
-rw-r--r--drivers/net/phy/phylink.c48
-rw-r--r--drivers/net/phy/realtek.c47
-rw-r--r--drivers/net/phy/sfp.c2
-rw-r--r--drivers/net/phy/smsc.c126
-rw-r--r--drivers/net/phy/spi_ks8995.c4
-rw-r--r--drivers/net/team/team.c6
-rw-r--r--drivers/net/tun.c18
-rw-r--r--drivers/net/usb/Kconfig2
-rw-r--r--drivers/net/usb/cx82310_eth.c78
-rw-r--r--drivers/net/usb/kaweth.c261
-rw-r--r--drivers/net/usb/net1080.c1
-rw-r--r--drivers/net/usb/qmi_wwan.c24
-rw-r--r--drivers/net/usb/smsc75xx.c13
-rw-r--r--drivers/net/usb/smsc95xx.c488
-rw-r--r--drivers/net/usb/usbnet.c30
-rw-r--r--drivers/net/veth.c18
-rw-r--r--drivers/net/virtio_net.c55
-rw-r--r--drivers/net/vxlan.c22
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c1
-rw-r--r--drivers/net/wan/hdlc_fr.c172
-rw-r--r--drivers/net/wan/lmc/lmc_debug.c18
-rw-r--r--drivers/net/wan/lmc/lmc_debug.h1
-rw-r--r--drivers/net/wan/lmc/lmc_main.c105
-rw-r--r--drivers/net/wan/lmc/lmc_media.c4
-rw-r--r--drivers/net/wan/lmc/lmc_proto.c16
-rw-r--r--drivers/net/wan/sbni.c101
-rw-r--r--drivers/net/wan/slic_ds26522.c2
-rw-r--r--drivers/net/wan/x25_asy.c5
-rw-r--r--drivers/net/wan/x25_asy.h1
-rw-r--r--drivers/net/wimax/i2400m/control.c2
-rw-r--r--drivers/net/wireguard/netlink.c14
-rw-r--r--drivers/net/wireless/admtek/adm8211.c83
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c10
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c81
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h15
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c55
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h22
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.c349
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c26
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c929
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c331
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c29
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/targaddrs.h11
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c11
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h19
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c73
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h76
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/Kconfig18
-rw-r--r--drivers/net/wireless/ath/ath11k/Makefile12
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c455
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.h8
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.c224
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.h15
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c291
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h79
-rw-r--r--drivers/net/wireless/ath/ath11k/dbring.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.c1104
-rw-r--r--drivers/net/wireless/ath/ath11k/debug.h247
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c1097
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.h217
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c (renamed from drivers/net/wireless/ath/ath11k/debug_htt_stats.c)56
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h (renamed from drivers/net/wireless/ath/ath11k/debug_htt_stats.h)27
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.c29
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.h44
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c316
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h40
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c375
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.h6
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c200
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c306
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h198
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.c16
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_tx.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/hif.h30
-rw-r--r--drivers/net/wireless/ath/ath11k/htc.c19
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.c894
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h152
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c412
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c467
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.h39
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c1062
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.h72
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.c3
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c357
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.h29
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c8
-rw-r--r--drivers/net/wireless/ath/ath11k/spectral.c36
-rw-r--r--drivers/net/wireless/ath/ath11k/thermal.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c154
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h2
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c26
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c25
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/rfbuffer.h2
-rw-r--r--drivers/net/wireless/ath/ath5k/rfkill.c7
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c5
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig12
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_initvals.h68
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c35
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9001_initvals.h37
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_initvals.h14
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h21
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c21
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.h4
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h5
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c12
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c7
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.c15
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c57
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h222
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c288
-rw-r--r--drivers/net/wireless/ath/wcn36xx/pmc.c7
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c757
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h12
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c279
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h18
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c8
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/pmc.c12
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c30
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx_edma.c10
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_platform.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c36
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c11
-rw-r--r--drivers/net/wireless/atmel/atmel.c4
-rw-r--r--drivers/net/wireless/broadcom/b43/dma.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c14
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_common.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_ht.c3
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_n.c21
-rw-r--r--drivers/net/wireless/broadcom/b43/pio.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/tables_nphy.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/dma.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c15
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/pio.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c62
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h14
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c39
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c31
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c30
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c9
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c31
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c15
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c35
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c17
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c47
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c99
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c47
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_lcn.c112
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c268
-rw-r--r--drivers/net/wireless/cisco/airo.c913
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c12
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c52
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.h6
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw.h3
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c34
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-rs.c8
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.c46
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-calib.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c67
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-rs.c10
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.c25
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c76
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h4
-rw-r--r--drivers/net/wireless/intel/iwlegacy/debug.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c70
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/calib.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/devices.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/lib.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rx.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rxon.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/scan.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/sta.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tx.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c92
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h59
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/alive.h25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/binding.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h82
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h231
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h133
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sta.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/stats.h471
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c56
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.c55
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c274
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.h18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-debug.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-debug.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c98
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c76
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h53
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/binding.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c294
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c363
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c203
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c459
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c118
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h59
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c123
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c32
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c197
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c87
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c107
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c84
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tdls.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c51
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c82
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c99
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c53
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c27
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c41
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h161
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c137
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c1089
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c530
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.c1529
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.h230
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap.h6
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ap.c2
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_hw.c33
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ioctl.c3
-rw-r--r--drivers/net/wireless/intersil/orinoco/main.c11
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_usb.c14
-rw-r--r--drivers/net/wireless/intersil/p54/p54pci.c12
-rw-r--r--drivers/net/wireless/intersil/prism54/isl_38xx.c2
-rw-r--r--drivers/net/wireless/intersil/prism54/isl_ioctl.c5
-rw-r--r--drivers/net/wireless/intersil/prism54/islpci_dev.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c108
-rw-r--r--drivers/net/wireless/marvell/libertas/defs.h3
-rw-r--r--drivers/net/wireless/marvell/libertas/firmware.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/main.c6
-rw-r--r--drivers/net/wireless/marvell/libertas/rx.c11
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/cmd.c22
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/deb_defs.h3
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/if_usb.c37
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/main.c7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h8
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ie.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c323
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.h149
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c429
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.h427
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_txrx.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/util.c6
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.c15
-rw-r--r--drivers/net/wireless/marvell/mwifiex/wmm.h18
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/debugfs.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c162
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c47
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h61
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/beacon.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c26
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/pci.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/soc.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/dma.c55
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c42
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c200
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mmio.c25
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/regs.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio.c38
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c22
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c282
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/testmode.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c29
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/init.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h145
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/initvals_init.h159
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c34
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dma.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c70
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb.h3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/dma.c146
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c257
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c39
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c140
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h33
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h48
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/pci.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/regs.h17
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c160
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c330
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c86
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.c28
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.h76
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/debugfs.c34
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.c4
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/mac.c4
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/phy.c4
-rw-r--r--drivers/net/wireless/microchip/wilc1000/mon.c3
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c5
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c24
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c7
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c7
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2400pci.c16
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500pci.c16
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c42
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800mmio.c25
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800mmio.h10
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h10
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt61pci.c23
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt73usb.c1
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c70
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c193
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c712
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c354
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c720
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c668
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c756
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c40
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/cam.c82
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c269
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/debug.c24
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/debug.h14
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.c72
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c423
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c125
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.h10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/regd.c18
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c192
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c90
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c215
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c20
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c405
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c41
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c224
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c88
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c271
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c40
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c184
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c121
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c28
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c38
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c154
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c72
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c134
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c58
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c312
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c116
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c214
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c423
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c30
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c32
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c72
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c102
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c210
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c18
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c366
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c45
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c42
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c40
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c159
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c220
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c72
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c22
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c162
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c64
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.c150
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c647
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c232
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c365
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c32
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c124
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c66
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c213
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c310
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c37
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c22
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c44
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c827
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c134
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c467
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c32
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c553
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c72
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c28
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h4
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c32
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c86
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h18
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c13
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c81
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c205
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h32
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c38
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.h4
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c11
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h5
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c7
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c22
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c_table.c32
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/tx.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/util.h2
-rw-r--r--drivers/net/wireless/rndis_wlan.c4
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_coex.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_core.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_debugfs.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c8
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_main.c5
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c33
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_ps.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c7
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio_ops.c2
-rw-r--r--drivers/net/wireless/st/cw1200/wsm.c6
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/reg.h2
-rw-r--r--drivers/net/wireless/ti/wl12xx/reg.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c7
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c7
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.h6
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c1
-rw-r--r--drivers/net/wireless/wl3501_cs.c26
-rw-r--r--drivers/net/wireless/zydas/zd1201.c6
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_chip.c4
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_mac.c15
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c10
-rw-r--r--drivers/nfc/pn533/usb.c2
-rw-r--r--drivers/nfc/s3fwrn5/Kconfig1
-rw-r--r--drivers/nfc/s3fwrn5/firmware.c4
-rw-r--r--drivers/nfc/s3fwrn5/firmware.h2
-rw-r--r--drivers/nfc/s3fwrn5/i2c.c24
-rw-r--r--drivers/nfc/st-nci/se.c3
-rw-r--r--drivers/nfc/st21nfca/se.c3
-rw-r--r--drivers/of/Kconfig7
-rw-r--r--drivers/of/Makefile1
-rw-r--r--drivers/of/base.c1
-rw-r--r--drivers/ptp/ptp_ines.c91
-rw-r--r--drivers/ptp/ptp_qoriq.c20
-rw-r--r--drivers/s390/cio/chsc.c22
-rw-r--r--drivers/s390/cio/chsc.h8
-rw-r--r--drivers/s390/cio/css.c11
-rw-r--r--drivers/s390/cio/css.h4
-rw-r--r--drivers/s390/cio/device_ops.c93
-rw-r--r--drivers/s390/net/Kconfig2
-rw-r--r--drivers/s390/net/ctcm_fsms.h1
-rw-r--r--drivers/s390/net/ctcm_mpc.h1
-rw-r--r--drivers/s390/net/ism.h7
-rw-r--r--drivers/s390/net/ism_drv.c47
-rw-r--r--drivers/s390/net/qeth_core.h102
-rw-r--r--drivers/s390/net/qeth_core_main.c359
-rw-r--r--drivers/s390/net/qeth_core_mpc.h14
-rw-r--r--drivers/s390/net/qeth_core_sys.c71
-rw-r--r--drivers/s390/net/qeth_ethtool.c16
-rw-r--r--drivers/s390/net/qeth_l2.h9
-rw-r--r--drivers/s390/net/qeth_l2_main.c888
-rw-r--r--drivers/s390/net/qeth_l2_sys.c17
-rw-r--r--drivers/s390/net/qeth_l3.h4
-rw-r--r--drivers/s390/net/qeth_l3_main.c176
-rw-r--r--drivers/s390/net/qeth_l3_sys.c72
-rw-r--r--drivers/ssb/pci.c7
-rw-r--r--drivers/target/target_core_user.c6
-rw-r--r--drivers/thermal/thermal_netlink.c8
-rw-r--r--fs/dlm/netlink.c6
-rw-r--r--fs/io_uring.c6
-rw-r--r--include/linux/bpf-cgroup.h25
-rw-r--r--include/linux/bpf.h149
-rw-r--r--include/linux/bpf_local_storage.h163
-rw-r--r--include/linux/bpf_lsm.h29
-rw-r--r--include/linux/bpf_types.h3
-rw-r--r--include/linux/bpf_verifier.h28
-rw-r--r--include/linux/brcmphy.h1
-rw-r--r--include/linux/btf.h68
-rw-r--r--include/linux/btf_ids.h59
-rw-r--r--include/linux/can/core.h9
-rw-r--r--include/linux/can/dev.h27
-rw-r--r--include/linux/can/rx-offload.h3
-rw-r--r--include/linux/cookie.h51
-rw-r--r--include/linux/dsa/8021q.h51
-rw-r--r--include/linux/ethtool.h30
-rw-r--r--include/linux/filter.h12
-rw-r--r--include/linux/fsl/ptp_qoriq.h3
-rw-r--r--include/linux/ieee80211.h230
-rw-r--r--include/linux/if_bridge.h8
-rw-r--r--include/linux/if_tun.h19
-rw-r--r--include/linux/inet_diag.h2
-rw-r--r--include/linux/ipv6.h22
-rw-r--r--include/linux/mdio.h9
-rw-r--r--include/linux/mdio/mdio-i2c.h (renamed from drivers/net/phy/mdio-i2c.h)0
-rw-r--r--include/linux/mdio/mdio-xgene.h (renamed from drivers/net/phy/mdio-xgene.h)0
-rw-r--r--include/linux/micrel_phy.h1
-rw-r--r--include/linux/mlx5/device.h4
-rw-r--r--include/linux/mlx5/driver.h3
-rw-r--r--include/linux/mlx5/eswitch.h15
-rw-r--r--include/linux/mlx5/fs.h1
-rw-r--r--include/linux/mlx5/qp.h6
-rw-r--r--include/linux/net.h3
-rw-r--r--include/linux/netdevice.h105
-rw-r--r--include/linux/netfilter/nf_conntrack_common.h2
-rw-r--r--include/linux/netlink.h30
-rw-r--r--include/linux/of.h5
-rw-r--r--include/linux/of_mdio.h6
-rw-r--r--include/linux/pcs-lynx.h21
-rw-r--r--include/linux/pcs/pcs-xpcs.h (renamed from include/linux/mdio-xpcs.h)8
-rw-r--r--include/linux/phy.h426
-rw-r--r--include/linux/phylink.h3
-rw-r--r--include/linux/platform_data/macb.h20
-rw-r--r--include/linux/prefetch.h8
-rw-r--r--include/linux/ptp_classify.h78
-rw-r--r--include/linux/qed/qed_if.h82
-rw-r--r--include/linux/rcupdate_trace.h13
-rw-r--r--include/linux/skbuff.h8
-rw-r--r--include/linux/skmsg.h19
-rw-r--r--include/linux/sock_diag.h14
-rw-r--r--include/linux/stmmac.h3
-rw-r--r--include/linux/tcp.h21
-rw-r--r--include/net/bluetooth/hci_core.h6
-rw-r--r--include/net/bluetooth/l2cap.h2
-rw-r--r--include/net/bluetooth/mgmt.h18
-rw-r--r--include/net/bpf_sk_storage.h12
-rw-r--r--include/net/caif/caif_spi.h155
-rw-r--r--include/net/cfg80211.h112
-rw-r--r--include/net/devlink.h228
-rw-r--r--include/net/drop_monitor.h36
-rw-r--r--include/net/dsa.h86
-rw-r--r--include/net/dst.h2
-rw-r--r--include/net/genetlink.h75
-rw-r--r--include/net/inet_connection_sock.h10
-rw-r--r--include/net/inet_sock.h7
-rw-r--r--include/net/ip.h2
-rw-r--r--include/net/ip_vs.h3
-rw-r--r--include/net/ipv6_stubs.h3
-rw-r--r--include/net/mac80211.h149
-rw-r--r--include/net/mptcp.h6
-rw-r--r--include/net/net_namespace.h2
-rw-r--r--include/net/netfilter/nf_log.h1
-rw-r--r--include/net/netfilter/nf_tables.h23
-rw-r--r--include/net/netfilter/nf_tables_core.h11
-rw-r--r--include/net/netfilter/nf_tables_ipv4.h33
-rw-r--r--include/net/netfilter/nf_tables_ipv6.h46
-rw-r--r--include/net/netlink.h105
-rw-r--r--include/net/netns/can.h1
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/netns/nexthop.h2
-rw-r--r--include/net/nexthop.h4
-rw-r--r--include/net/pkt_sched.h5
-rw-r--r--include/net/request_sock.h9
-rw-r--r--include/net/sch_generic.h11
-rw-r--r--include/net/smc.h4
-rw-r--r--include/net/sock.h10
-rw-r--r--include/net/switchdev.h1
-rw-r--r--include/net/tc_act/tc_tunnel_key.h5
-rw-r--r--include/net/tc_act/tc_vlan.h2
-rw-r--r--include/net/tcp.h40
-rw-r--r--include/net/tls.h4
-rw-r--r--include/net/udp_tunnel.h24
-rw-r--r--include/net/xdp_sock.h30
-rw-r--r--include/net/xdp_sock_drv.h122
-rw-r--r--include/net/xfrm.h33
-rw-r--r--include/net/xsk_buff_pool.h53
-rw-r--r--include/soc/mscc/ocelot.h76
-rw-r--r--include/soc/mscc/ocelot_ptp.h3
-rw-r--r--include/soc/mscc/ocelot_vcap.h202
-rw-r--r--include/trace/events/devlink.h37
-rw-r--r--include/trace/events/rxrpc.h35
-rw-r--r--include/uapi/linux/bpf.h655
-rw-r--r--include/uapi/linux/can/isotp.h165
-rw-r--r--include/uapi/linux/can/raw.h3
-rw-r--r--include/uapi/linux/devlink.h69
-rw-r--r--include/uapi/linux/ethtool.h2
-rw-r--r--include/uapi/linux/ethtool_netlink.h18
-rw-r--r--include/uapi/linux/genetlink.h11
-rw-r--r--include/uapi/linux/gtp.h2
-rw-r--r--include/uapi/linux/if_bridge.h38
-rw-r--r--include/uapi/linux/if_link.h235
-rw-r--r--include/uapi/linux/if_pppol2tp.h2
-rw-r--r--include/uapi/linux/inet_diag.h18
-rw-r--r--include/uapi/linux/l2tp.h7
-rw-r--r--include/uapi/linux/mroute.h5
-rw-r--r--include/uapi/linux/netfilter.h3
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h10
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_conntrack.h3
-rw-r--r--include/uapi/linux/netlink.h4
-rw-r--r--include/uapi/linux/nl80211.h196
-rw-r--r--include/uapi/linux/tc_act/tc_mpls.h1
-rw-r--r--include/uapi/linux/tc_act/tc_vlan.h4
-rw-r--r--include/uapi/linux/tipc.h2
-rw-r--r--include/uapi/linux/tipc_netlink.h2
-rw-r--r--init/Kconfig3
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/bpf/Makefile3
-rw-r--r--kernel/bpf/arraymap.c102
-rw-r--r--kernel/bpf/bpf_inode_storage.c272
-rw-r--r--kernel/bpf/bpf_iter.c62
-rw-r--r--kernel/bpf/bpf_local_storage.c600
-rw-r--r--kernel/bpf/bpf_lsm.c21
-rw-r--r--kernel/bpf/bpf_struct_ops.c6
-rw-r--r--kernel/bpf/btf.c1221
-rw-r--r--kernel/bpf/core.c29
-rw-r--r--kernel/bpf/cpumap.c17
-rw-r--r--kernel/bpf/devmap.c17
-rw-r--r--kernel/bpf/hashtab.c22
-rw-r--r--kernel/bpf/helpers.c58
-rw-r--r--kernel/bpf/inode.c116
-rw-r--r--kernel/bpf/lpm_trie.c1
-rw-r--r--kernel/bpf/map_in_map.c24
-rw-r--r--kernel/bpf/map_in_map.h2
-rw-r--r--kernel/bpf/map_iter.c15
-rw-r--r--kernel/bpf/percpu_freelist.c101
-rw-r--r--kernel/bpf/percpu_freelist.h1
-rw-r--r--kernel/bpf/preload/.gitignore4
-rw-r--r--kernel/bpf/preload/Kconfig26
-rw-r--r--kernel/bpf/preload/Makefile25
-rw-r--r--kernel/bpf/preload/bpf_preload.h16
-rw-r--r--kernel/bpf/preload/bpf_preload_kern.c91
-rw-r--r--kernel/bpf/preload/bpf_preload_umd_blob.S7
-rw-r--r--kernel/bpf/preload/iterators/.gitignore2
-rw-r--r--kernel/bpf/preload/iterators/Makefile57
-rw-r--r--kernel/bpf/preload/iterators/README4
-rw-r--r--kernel/bpf/preload/iterators/bpf_preload_common.h13
-rw-r--r--kernel/bpf/preload/iterators/iterators.bpf.c114
-rw-r--r--kernel/bpf/preload/iterators/iterators.c94
-rw-r--r--kernel/bpf/preload/iterators/iterators.skel.h412
-rw-r--r--kernel/bpf/queue_stack_maps.c2
-rw-r--r--kernel/bpf/reuseport_array.c3
-rw-r--r--kernel/bpf/ringbuf.c1
-rw-r--r--kernel/bpf/stackmap.c6
-rw-r--r--kernel/bpf/syscall.c331
-rw-r--r--kernel/bpf/task_iter.c15
-rw-r--r--kernel/bpf/trampoline.c63
-rw-r--r--kernel/bpf/verifier.c1380
-rw-r--r--kernel/rcu/tasks.h53
-rw-r--r--kernel/taskstats.c40
-rw-r--r--kernel/trace/bpf_trace.c172
-rw-r--r--lib/nlattr.c122
-rw-r--r--mm/filemap.c8
-rw-r--r--mm/page_alloc.c2
-rw-r--r--net/8021q/vlan.c6
-rw-r--r--net/8021q/vlan.h19
-rw-r--r--net/Kconfig1
-rw-r--r--net/atm/lec.c2
-rw-r--r--net/atm/signaling.c2
-rw-r--r--net/batman-adv/bat_iv_ogm.c1
-rw-r--r--net/batman-adv/bat_v_elp.c1
-rw-r--r--net/batman-adv/bat_v_ogm.c1
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c2
-rw-r--r--net/batman-adv/fragmentation.c2
-rw-r--r--net/batman-adv/hard-interface.c19
-rw-r--r--net/batman-adv/hard-interface.h1
-rw-r--r--net/batman-adv/main.c1
-rw-r--r--net/batman-adv/main.h2
-rw-r--r--net/batman-adv/multicast.c16
-rw-r--r--net/batman-adv/netlink.c6
-rw-r--r--net/batman-adv/network-coding.c4
-rw-r--r--net/batman-adv/send.c2
-rw-r--r--net/batman-adv/soft-interface.c4
-rw-r--r--net/batman-adv/types.h4
-rw-r--r--net/bluetooth/Kconfig1
-rw-r--r--net/bluetooth/a2mp.c22
-rw-r--r--net/bluetooth/hci_conn.c2
-rw-r--r--net/bluetooth/hci_core.c43
-rw-r--r--net/bluetooth/hci_event.c89
-rw-r--r--net/bluetooth/hci_request.c85
-rw-r--r--net/bluetooth/l2cap_core.c7
-rw-r--r--net/bluetooth/l2cap_sock.c21
-rw-r--r--net/bluetooth/mgmt.c57
-rw-r--r--net/bluetooth/sco.c6
-rw-r--r--net/bpf/test_run.c88
-rw-r--r--net/bpfilter/Kconfig1
-rw-r--r--net/bridge/br.c5
-rw-r--r--net/bridge/br_device.c21
-rw-r--r--net/bridge/br_forward.c17
-rw-r--r--net/bridge/br_ioctl.c2
-rw-r--r--net/bridge/br_mdb.c573
-rw-r--r--net/bridge/br_multicast.c1825
-rw-r--r--net/bridge/br_netlink.c4
-rw-r--r--net/bridge/br_private.h117
-rw-r--r--net/bridge/br_vlan.c6
-rw-r--r--net/bridge/netfilter/ebt_stp.c1
-rw-r--r--net/caif/cfsrvl.c1
-rw-r--r--net/can/Kconfig14
-rw-r--r--net/can/Makefile3
-rw-r--r--net/can/af_can.c8
-rw-r--r--net/can/bcm.c6
-rw-r--r--net/can/gw.c6
-rw-r--r--net/can/isotp.c1424
-rw-r--r--net/can/j1939/transport.c2
-rw-r--r--net/can/proc.c14
-rw-r--r--net/can/raw.c34
-rw-r--r--net/core/bpf_sk_storage.c836
-rw-r--r--net/core/datagram.c33
-rw-r--r--net/core/dev.c179
-rw-r--r--net/core/devlink.c896
-rw-r--r--net/core/drop_monitor.c139
-rw-r--r--net/core/filter.c962
-rw-r--r--net/core/flow_dissector.c10
-rw-r--r--net/core/net-procfs.c15
-rw-r--r--net/core/net-sysfs.c4
-rw-r--r--net/core/net_namespace.c12
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/pktgen.c10
-rw-r--r--net/core/ptp_classifier.c30
-rw-r--r--net/core/skbuff.c79
-rw-r--r--net/core/skmsg.c195
-rw-r--r--net/core/sock.c32
-rw-r--r--net/core/sock_diag.c9
-rw-r--r--net/core/sock_map.c441
-rw-r--r--net/core/sysctl_net_core.c17
-rw-r--r--net/dccp/ackvec.c2
-rw-r--r--net/dccp/ipv4.c8
-rw-r--r--net/dccp/timer.c3
-rw-r--r--net/dsa/dsa.c51
-rw-r--r--net/dsa/dsa2.c134
-rw-r--r--net/dsa/dsa_priv.h62
-rw-r--r--net/dsa/master.c20
-rw-r--r--net/dsa/port.c104
-rw-r--r--net/dsa/slave.c212
-rw-r--r--net/dsa/switch.c50
-rw-r--r--net/dsa/tag_8021q.c158
-rw-r--r--net/dsa/tag_brcm.c35
-rw-r--r--net/dsa/tag_dsa.c9
-rw-r--r--net/dsa/tag_edsa.c9
-rw-r--r--net/dsa/tag_ksz.c1
-rw-r--r--net/dsa/tag_mtk.c10
-rw-r--r--net/dsa/tag_ocelot.c60
-rw-r--r--net/dsa/tag_qca.c10
-rw-r--r--net/dsa/tag_rtl4_a.c11
-rw-r--r--net/dsa/tag_sja1105.c33
-rw-r--r--net/dsa/tag_trailer.c1
-rw-r--r--net/ethtool/bitset.c26
-rw-r--r--net/ethtool/cabletest.c41
-rw-r--r--net/ethtool/channels.c37
-rw-r--r--net/ethtool/coalesce.c45
-rw-r--r--net/ethtool/common.c2
-rw-r--r--net/ethtool/debug.c24
-rw-r--r--net/ethtool/eee.c32
-rw-r--r--net/ethtool/features.c30
-rw-r--r--net/ethtool/ioctl.c67
-rw-r--r--net/ethtool/linkinfo.c30
-rw-r--r--net/ethtool/linkmodes.c34
-rw-r--r--net/ethtool/linkstate.c14
-rw-r--r--net/ethtool/netlink.c124
-rw-r--r--net/ethtool/netlink.h35
-rw-r--r--net/ethtool/pause.c86
-rw-r--r--net/ethtool/privflags.c24
-rw-r--r--net/ethtool/rings.c35
-rw-r--r--net/ethtool/strset.c26
-rw-r--r--net/ethtool/tsinfo.c13
-rw-r--r--net/ethtool/tunnels.c42
-rw-r--r--net/ethtool/wol.c24
-rw-r--r--net/hsr/hsr_debugfs.c21
-rw-r--r--net/hsr/hsr_netlink.c6
-rw-r--r--net/ieee802154/netlink.c6
-rw-r--r--net/ipv4/af_inet.c1
-rw-r--r--net/ipv4/bpf_tcp_ca.c34
-rw-r--r--net/ipv4/cipso_ipv4.c2
-rw-r--r--net/ipv4/fou.c10
-rw-r--r--net/ipv4/icmp.c29
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/inet_diag.c17
-rw-r--r--net/ipv4/inet_hashtables.c6
-rw-r--r--net/ipv4/ip_gre.c15
-rw-r--r--net/ipv4/ip_options.c35
-rw-r--r--net/ipv4/ip_output.c11
-rw-r--r--net/ipv4/ip_sockglue.c5
-rw-r--r--net/ipv4/ip_tunnel.c8
-rw-r--r--net/ipv4/ip_tunnel_core.c23
-rw-r--r--net/ipv4/ip_vti.c9
-rw-r--r--net/ipv4/ipmr.c14
-rw-r--r--net/ipv4/netfilter/nf_log_arp.c19
-rw-r--r--net/ipv4/netfilter/nf_log_ipv4.c6
-rw-r--r--net/ipv4/nexthop.c66
-rw-r--r--net/ipv4/ping.c29
-rw-r--r--net/ipv4/raw.c5
-rw-r--r--net/ipv4/route.c23
-rw-r--r--net/ipv4/syncookies.c6
-rw-r--r--net/ipv4/sysctl_net_ipv4.c9
-rw-r--r--net/ipv4/tcp.c51
-rw-r--r--net/ipv4/tcp_bpf.c13
-rw-r--r--net/ipv4/tcp_cong.c27
-rw-r--r--net/ipv4/tcp_fastopen.c2
-rw-r--r--net/ipv4/tcp_input.c226
-rw-r--r--net/ipv4/tcp_ipv4.c18
-rw-r--r--net/ipv4/tcp_metrics.c6
-rw-r--r--net/ipv4/tcp_output.c212
-rw-r--r--net/ipv4/tcp_recovery.c16
-rw-r--r--net/ipv4/tcp_scalable.c2
-rw-r--r--net/ipv4/tcp_timer.c1
-rw-r--r--net/ipv4/tcp_vegas.c8
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv4/udp_bpf.c9
-rw-r--r--net/ipv4/udp_tunnel_nic.c96
-rw-r--r--net/ipv6/addrconf_core.c8
-rw-r--r--net/ipv6/af_inet6.c2
-rw-r--r--net/ipv6/calipso.c2
-rw-r--r--net/ipv6/icmp.c7
-rw-r--r--net/ipv6/inet6_hashtables.c6
-rw-r--r--net/ipv6/ip6_fib.c16
-rw-r--r--net/ipv6/ip6_gre.c33
-rw-r--r--net/ipv6/ip6_output.c4
-rw-r--r--net/ipv6/ip6_vti.c8
-rw-r--r--net/ipv6/netfilter/ip6t_NPT.c39
-rw-r--r--net/ipv6/netfilter/nf_log_ipv6.c8
-rw-r--r--net/ipv6/route.c6
-rw-r--r--net/ipv6/tcp_ipv6.c27
-rw-r--r--net/iucv/af_iucv.c2
-rw-r--r--net/iucv/iucv.c8
-rw-r--r--net/l2tp/Makefile2
-rw-r--r--net/l2tp/l2tp_core.c329
-rw-r--r--net/l2tp/l2tp_core.h33
-rw-r--r--net/l2tp/l2tp_debugfs.c4
-rw-r--r--net/l2tp/l2tp_eth.c13
-rw-r--r--net/l2tp/l2tp_ip.c17
-rw-r--r--net/l2tp/l2tp_ip6.c17
-rw-r--r--net/l2tp/l2tp_netlink.c30
-rw-r--r--net/l2tp/l2tp_ppp.c70
-rw-r--r--net/l2tp/trace.h211
-rw-r--r--net/mac80211/Makefile1
-rw-r--r--net/mac80211/agg-rx.c2
-rw-r--r--net/mac80211/cfg.c118
-rw-r--r--net/mac80211/chan.c9
-rw-r--r--net/mac80211/debugfs.c1
-rw-r--r--net/mac80211/driver-ops.h29
-rw-r--r--net/mac80211/ibss.c7
-rw-r--r--net/mac80211/ieee80211_i.h47
-rw-r--r--net/mac80211/iface.c1025
-rw-r--r--net/mac80211/key.c15
-rw-r--r--net/mac80211/main.c2
-rw-r--r--net/mac80211/mesh.c6
-rw-r--r--net/mac80211/mesh_hwmp.c4
-rw-r--r--net/mac80211/mesh_plink.c1
-rw-r--r--net/mac80211/mesh_ps.c6
-rw-r--r--net/mac80211/mlme.c233
-rw-r--r--net/mac80211/offchannel.c40
-rw-r--r--net/mac80211/rate.c40
-rw-r--r--net/mac80211/rx.c98
-rw-r--r--net/mac80211/s1g.c16
-rw-r--r--net/mac80211/scan.c43
-rw-r--r--net/mac80211/sta_info.c4
-rw-r--r--net/mac80211/sta_info.h3
-rw-r--r--net/mac80211/status.c229
-rw-r--r--net/mac80211/trace.h33
-rw-r--r--net/mac80211/tx.c249
-rw-r--r--net/mac80211/util.c193
-rw-r--r--net/mac80211/vht.c4
-rw-r--r--net/mptcp/mib.c9
-rw-r--r--net/mptcp/mib.h9
-rw-r--r--net/mptcp/options.c120
-rw-r--r--net/mptcp/pm.c94
-rw-r--r--net/mptcp/pm_netlink.c325
-rw-r--r--net/mptcp/protocol.c570
-rw-r--r--net/mptcp/protocol.h71
-rw-r--r--net/mptcp/subflow.c119
-rw-r--r--net/ncsi/ncsi-netlink.c6
-rw-r--r--net/netfilter/Kconfig1
-rw-r--r--net/netfilter/core.c129
-rw-r--r--net/netfilter/ipset/ip_set_core.c17
-rw-r--r--net/netfilter/ipvs/Kconfig1
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c18
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c19
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c13
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c6
-rw-r--r--net/netfilter/nf_conntrack_core.c25
-rw-r--r--net/netfilter/nf_conntrack_netlink.c5
-rw-r--r--net/netfilter/nf_conntrack_standalone.c6
-rw-r--r--net/netfilter/nf_flow_table_core.c12
-rw-r--r--net/netfilter/nf_flow_table_ip.c45
-rw-r--r--net/netfilter/nf_log_common.c12
-rw-r--r--net/netfilter/nf_tables_api.c121
-rw-r--r--net/netfilter/nf_tables_core.c15
-rw-r--r--net/netfilter/nf_tables_offload.c2
-rw-r--r--net/netfilter/nfnetlink.c19
-rw-r--r--net/netfilter/nft_bitwise.c141
-rw-r--r--net/netfilter/nft_chain_filter.c35
-rw-r--r--net/netfilter/nft_cmp.c13
-rw-r--r--net/netfilter/nft_payload.c28
-rw-r--r--net/netfilter/nft_socket.c27
-rw-r--r--net/netfilter/xt_HMARK.c2
-rw-r--r--net/netlabel/netlabel_calipso.c10
-rw-r--r--net/netlabel/netlabel_cipso_v4.c6
-rw-r--r--net/netlabel/netlabel_domainhash.c5
-rw-r--r--net/netlabel/netlabel_mgmt.c6
-rw-r--r--net/netlabel/netlabel_unlabeled.c6
-rw-r--r--net/netlink/af_netlink.c68
-rw-r--r--net/netlink/genetlink.c377
-rw-r--r--net/netlink/policy.c288
-rw-r--r--net/nfc/digital_dep.c3
-rw-r--r--net/openvswitch/actions.c40
-rw-r--r--net/openvswitch/conntrack.c10
-rw-r--r--net/openvswitch/datapath.c70
-rw-r--r--net/openvswitch/flow_table.c70
-rw-r--r--net/openvswitch/flow_table.h1
-rw-r--r--net/openvswitch/meter.c6
-rw-r--r--net/openvswitch/vport-internal_dev.c28
-rw-r--r--net/openvswitch/vport.c7
-rw-r--r--net/packet/af_packet.c41
-rw-r--r--net/psample/psample.c6
-rw-r--r--net/rds/cong.c2
-rw-r--r--net/rds/ib_cm.c2
-rw-r--r--net/rds/ib_recv.c6
-rw-r--r--net/rds/rdma.c2
-rw-r--r--net/rxrpc/af_rxrpc.c7
-rw-r--r--net/rxrpc/ar-internal.h71
-rw-r--r--net/rxrpc/call_object.c43
-rw-r--r--net/rxrpc/conn_client.c1092
-rw-r--r--net/rxrpc/conn_event.c20
-rw-r--r--net/rxrpc/conn_object.c12
-rw-r--r--net/rxrpc/conn_service.c7
-rw-r--r--net/rxrpc/local_object.c4
-rw-r--r--net/rxrpc/net_ns.c5
-rw-r--r--net/rxrpc/output.c6
-rw-r--r--net/rxrpc/proc.c2
-rw-r--r--net/rxrpc/rtt.c1
-rw-r--r--net/rxrpc/rxkad.c8
-rw-r--r--net/rxrpc/sysctl.c10
-rw-r--r--net/sched/act_api.c5
-rw-r--r--net/sched/act_ct.c8
-rw-r--r--net/sched/act_ctinfo.c5
-rw-r--r--net/sched/act_gate.c4
-rw-r--r--net/sched/act_mpls.c18
-rw-r--r--net/sched/act_vlan.c40
-rw-r--r--net/sched/cls_u32.c8
-rw-r--r--net/sched/sch_generic.c23
-rw-r--r--net/sctp/associola.c4
-rw-r--r--net/sctp/auth.c4
-rw-r--r--net/sctp/bind_addr.c2
-rw-r--r--net/sctp/chunk.c2
-rw-r--r--net/sctp/protocol.c8
-rw-r--r--net/sctp/sm_make_chunk.c6
-rw-r--r--net/sctp/ulpqueue.c2
-rw-r--r--net/smc/af_smc.c881
-rw-r--r--net/smc/smc.h19
-rw-r--r--net/smc/smc_cdc.c4
-rw-r--r--net/smc/smc_clc.c500
-rw-r--r--net/smc/smc_clc.h250
-rw-r--r--net/smc/smc_close.c4
-rw-r--r--net/smc/smc_core.c82
-rw-r--r--net/smc/smc_core.h24
-rw-r--r--net/smc/smc_diag.c30
-rw-r--r--net/smc/smc_ism.c32
-rw-r--r--net/smc/smc_ism.h8
-rw-r--r--net/smc/smc_llc.c21
-rw-r--r--net/smc/smc_netns.h1
-rw-r--r--net/smc/smc_pnet.c174
-rw-r--r--net/smc/smc_pnet.h15
-rw-r--r--net/smc/smc_tx.c10
-rw-r--r--net/socket.c8
-rw-r--r--net/sunrpc/sysctl.c6
-rw-r--r--net/tipc/core.c6
-rw-r--r--net/tipc/core.h8
-rw-r--r--net/tipc/crypto.c981
-rw-r--r--net/tipc/crypto.h43
-rw-r--r--net/tipc/link.c10
-rw-r--r--net/tipc/msg.c5
-rw-r--r--net/tipc/msg.h8
-rw-r--r--net/tipc/name_distr.c10
-rw-r--r--net/tipc/net.c20
-rw-r--r--net/tipc/net.h1
-rw-r--r--net/tipc/netlink.c2
-rw-r--r--net/tipc/netlink_compat.c6
-rw-r--r--net/tipc/node.c96
-rw-r--r--net/tipc/node.h2
-rw-r--r--net/tipc/socket.c3
-rw-r--r--net/tipc/sysctl.c9
-rw-r--r--net/tipc/topsrv.c1
-rw-r--r--net/tipc/udp_media.c1
-rw-r--r--net/tls/tls_device.c11
-rw-r--r--net/tls/tls_main.c27
-rw-r--r--net/unix/af_unix.c3
-rw-r--r--net/wimax/stack.c6
-rw-r--r--net/wireless/chan.c135
-rw-r--r--net/wireless/core.c8
-rw-r--r--net/wireless/core.h9
-rw-r--r--net/wireless/lib80211.c2
-rw-r--r--net/wireless/mlme.c14
-rw-r--r--net/wireless/nl80211.c517
-rw-r--r--net/wireless/radiotap.c1
-rw-r--r--net/wireless/reg.c329
-rw-r--r--net/wireless/scan.c585
-rw-r--r--net/wireless/sme.c2
-rw-r--r--net/wireless/util.c32
-rw-r--r--net/wireless/wext-compat.c2
-rw-r--r--net/xdp/xdp_umem.c225
-rw-r--r--net/xdp/xdp_umem.h6
-rw-r--r--net/xdp/xsk.c219
-rw-r--r--net/xdp/xsk.h11
-rw-r--r--net/xdp/xsk_buff_pool.c377
-rw-r--r--net/xdp/xsk_diag.c20
-rw-r--r--net/xdp/xsk_queue.h18
-rw-r--r--net/xdp/xskmap.c15
-rw-r--r--net/xfrm/Kconfig11
-rw-r--r--net/xfrm/Makefile1
-rw-r--r--net/xfrm/xfrm_compat.c625
-rw-r--r--net/xfrm/xfrm_interface.c31
-rw-r--r--net/xfrm/xfrm_state.c77
-rw-r--r--net/xfrm/xfrm_user.c110
-rw-r--r--samples/bpf/.gitignore1
-rw-r--r--samples/bpf/Makefile36
-rw-r--r--samples/bpf/cpustat_kern.c36
-rw-r--r--samples/bpf/cpustat_user.c47
-rw-r--r--samples/bpf/hbm.c3
-rw-r--r--samples/bpf/lathist_kern.c24
-rw-r--r--samples/bpf/lathist_user.c42
-rw-r--r--samples/bpf/offwaketime_kern.c52
-rw-r--r--samples/bpf/offwaketime_user.c66
-rw-r--r--samples/bpf/sockex3_kern.c20
-rw-r--r--samples/bpf/sockex3_user.c6
-rw-r--r--samples/bpf/spintest_kern.c36
-rw-r--r--samples/bpf/spintest_user.c68
-rw-r--r--samples/bpf/syscall_tp_kern.c24
-rw-r--r--samples/bpf/syscall_tp_user.c54
-rw-r--r--samples/bpf/task_fd_query_kern.c2
-rw-r--r--samples/bpf/task_fd_query_user.c2
-rw-r--r--samples/bpf/test_current_task_under_cgroup_kern.c27
-rw-r--r--samples/bpf/test_current_task_under_cgroup_user.c52
-rw-r--r--samples/bpf/test_map_in_map_kern.c7
-rw-r--r--samples/bpf/test_probe_write_user_kern.c12
-rw-r--r--samples/bpf/test_probe_write_user_user.c49
-rw-r--r--samples/bpf/trace_output_kern.c15
-rw-r--r--samples/bpf/trace_output_user.c55
-rw-r--r--samples/bpf/tracex3_kern.c2
-rw-r--r--samples/bpf/tracex5_user.c6
-rw-r--r--samples/bpf/xdp_monitor_kern.c60
-rw-r--r--samples/bpf/xdp_monitor_user.c159
-rw-r--r--samples/bpf/xdp_redirect_cpu_user.c155
-rw-r--r--samples/bpf/xdp_sample_pkts_kern.c14
-rw-r--r--samples/bpf/xdp_sample_pkts_user.c1
-rw-r--r--samples/bpf/xdpsock_user.c406
-rw-r--r--samples/bpf/xsk_fwd.c1085
-rwxr-xr-xscripts/bpf_helpers_doc.py4
-rwxr-xr-xscripts/link-vmlinux.sh6
-rw-r--r--security/bpf/hooks.c6
-rw-r--r--tools/bpf/bpftool/Documentation/Makefile15
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-btf.rst37
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-cgroup.rst33
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-feature.rst33
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-gen.rst37
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-iter.rst27
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-link.rst36
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-map.rst48
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-net.rst34
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-perf.rst34
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst34
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst35
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool.rst34
-rw-r--r--tools/bpf/bpftool/Documentation/common_options.rst22
-rw-r--r--tools/bpf/bpftool/Makefile6
-rw-r--r--tools/bpf/bpftool/bash-completion/bpftool25
-rw-r--r--tools/bpf/bpftool/gen.c2
-rw-r--r--tools/bpf/bpftool/json_writer.c6
-rw-r--r--tools/bpf/bpftool/json_writer.h3
-rw-r--r--tools/bpf/bpftool/link.c44
-rw-r--r--tools/bpf/bpftool/main.c33
-rw-r--r--tools/bpf/bpftool/map.c152
-rw-r--r--tools/bpf/bpftool/net.c299
-rw-r--r--tools/bpf/bpftool/prog.c203
-rw-r--r--tools/bpf/resolve_btfids/Makefile2
-rw-r--r--tools/bpf/resolve_btfids/main.c29
-rw-r--r--tools/build/Makefile2
-rw-r--r--tools/build/Makefile.feature1
-rw-r--r--tools/build/feature/Makefile4
-rw-r--r--tools/build/feature/test-all.c4
-rw-r--r--tools/build/feature/test-libelf-mmap.c9
-rw-r--r--tools/include/linux/btf_ids.h59
-rw-r--r--tools/include/uapi/linux/bpf.h655
-rw-r--r--tools/lib/bpf/Makefile28
-rw-r--r--tools/lib/bpf/bpf.c70
-rw-r--r--tools/lib/bpf/bpf.h39
-rw-r--r--tools/lib/bpf/bpf_core_read.h120
-rw-r--r--tools/lib/bpf/bpf_helpers.h49
-rw-r--r--tools/lib/bpf/bpf_prog_linfo.c3
-rw-r--r--tools/lib/bpf/bpf_tracing.h4
-rw-r--r--tools/lib/bpf/btf.c1631
-rw-r--r--tools/lib/bpf/btf.h103
-rw-r--r--tools/lib/bpf/btf_dump.c87
-rw-r--r--tools/lib/bpf/hashmap.c3
-rw-r--r--tools/lib/bpf/hashmap.h12
-rw-r--r--tools/lib/bpf/libbpf.c3407
-rw-r--r--tools/lib/bpf/libbpf.h12
-rw-r--r--tools/lib/bpf/libbpf.map38
-rw-r--r--tools/lib/bpf/libbpf_common.h2
-rw-r--r--tools/lib/bpf/libbpf_internal.h147
-rw-r--r--tools/lib/bpf/libbpf_probes.c8
-rw-r--r--tools/lib/bpf/netlink.c128
-rw-r--r--tools/lib/bpf/nlattr.c9
-rw-r--r--tools/lib/bpf/ringbuf.c8
-rw-r--r--tools/lib/bpf/xsk.c383
-rw-r--r--tools/lib/bpf/xsk.h9
-rw-r--r--tools/perf/Makefile.config4
-rw-r--r--tools/perf/util/bpf-loader.c12
-rw-r--r--tools/perf/util/symbol.h2
-rw-r--r--tools/testing/selftests/bpf/.gitignore2
-rw-r--r--tools/testing/selftests/bpf/Makefile14
-rw-r--r--tools/testing/selftests/bpf/README.rst59
-rw-r--r--tools/testing/selftests/bpf/bench.c5
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_rename.c17
-rw-r--r--tools/testing/selftests/bpf/benchs/bench_trigger.c17
-rw-r--r--tools/testing/selftests/bpf/bpf_tcp_helpers.h13
-rw-r--r--tools/testing/selftests/bpf/flow_dissector_load.h8
-rw-r--r--tools/testing/selftests/bpf/network_helpers.c37
-rw-r--r--tools/testing/selftests/bpf/network_helpers.h2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/align.c16
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_iter.c115
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf.c (renamed from tools/testing/selftests/bpf/test_btf.c)410
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_dump.c105
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_endian.c101
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c74
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c234
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf_write.c244
-rw-r--r--tools/testing/selftests/bpf/prog_tests/cls_redirect.c72
-rw-r--r--tools/testing/selftests/bpf/prog_tests/core_autosize.c225
-rw-r--r--tools/testing/selftests/bpf/prog_tests/core_reloc.c350
-rw-r--r--tools/testing/selftests/bpf/prog_tests/d_path.c157
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c293
-rw-r--r--tools/testing/selftests/bpf/prog_tests/global_data_init.c3
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ksyms.c42
-rw-r--r--tools/testing/selftests/bpf/prog_tests/ksyms_btf.c88
-rw-r--r--tools/testing/selftests/bpf/prog_tests/l4lb_all.c9
-rw-r--r--tools/testing/selftests/bpf/prog_tests/metadata.c141
-rw-r--r--tools/testing/selftests/bpf/prog_tests/pe_preserve_elems.c66
-rw-r--r--tools/testing/selftests/bpf/prog_tests/perf_buffer.c65
-rw-r--r--tools/testing/selftests/bpf/prog_tests/pinning.c49
-rw-r--r--tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c96
-rw-r--r--tools/testing/selftests/bpf/prog_tests/reference_tracking.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/resolve_btfids.c45
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sk_assign.c7
-rw-r--r--tools/testing/selftests/bpf/prog_tests/snprintf_btf.c62
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sock_fields.c382
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_basic.c189
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockopt_sk.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/subprogs.c31
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tailcalls.c332
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c610
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_bpffs.c94
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_local_storage.c60
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_lsm.c9
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_overhead.c14
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_profiler.c72
-rw-r--r--tools/testing/selftests/bpf/prog_tests/trace_ext.c111
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_noinline.c51
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_cubic.c2
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_dctcp.c2
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_flow.c12
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter.h32
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_sockmap.c59
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_task_btf.c50
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_iter_task_file.c10
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_enumval.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_enumval___diff.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_enumval___err_missing.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_enumval___val3_missing.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_size___err_ambiguous.c4
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_type_based.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___all_missing.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___diff_sz.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___fn_wrong_args.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___incompat.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_type_id.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf__core_reloc_type_id___missing_targets.c3
-rw-r--r--tools/testing/selftests/bpf/progs/btf_ptr.h27
-rw-r--r--tools/testing/selftests/bpf/progs/connect4_prog.c19
-rw-r--r--tools/testing/selftests/bpf/progs/core_reloc_types.h352
-rw-r--r--tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c27
-rw-r--r--tools/testing/selftests/bpf/progs/fmod_ret_freplace.c14
-rw-r--r--tools/testing/selftests/bpf/progs/freplace_attach_probe.c40
-rw-r--r--tools/testing/selftests/bpf/progs/freplace_cls_redirect.c34
-rw-r--r--tools/testing/selftests/bpf/progs/freplace_connect_v4_prog.c19
-rw-r--r--tools/testing/selftests/bpf/progs/freplace_get_constant.c15
-rw-r--r--tools/testing/selftests/bpf/progs/local_storage.c140
-rw-r--r--tools/testing/selftests/bpf/progs/lsm.c64
-rw-r--r--tools/testing/selftests/bpf/progs/map_ptr_kern.c16
-rw-r--r--tools/testing/selftests/bpf/progs/metadata_unused.c15
-rw-r--r--tools/testing/selftests/bpf/progs/metadata_used.c15
-rw-r--r--tools/testing/selftests/bpf/progs/netif_receive_skb.c249
-rw-r--r--tools/testing/selftests/bpf/progs/profiler.h177
-rw-r--r--tools/testing/selftests/bpf/progs/profiler.inc.h969
-rw-r--r--tools/testing/selftests/bpf/progs/profiler1.c6
-rw-r--r--tools/testing/selftests/bpf/progs/profiler2.c6
-rw-r--r--tools/testing/selftests/bpf/progs/profiler3.c6
-rw-r--r--tools/testing/selftests/bpf/progs/pyperf.h11
-rw-r--r--tools/testing/selftests/bpf/progs/pyperf_subprogs.c5
-rw-r--r--tools/testing/selftests/bpf/progs/strobemeta.h30
-rw-r--r--tools/testing/selftests/bpf/progs/strobemeta_subprogs.c10
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall1.c28
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall2.c14
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall3.c4
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf1.c38
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c41
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c61
-rw-r--r--tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c61
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_map_in_map.c74
-rw-r--r--tools/testing/selftests/bpf/progs/test_btf_skc_cls_ingress.c174
-rw-r--r--tools/testing/selftests/bpf/progs/test_cls_redirect.c105
-rw-r--r--tools/testing/selftests/bpf/progs/test_cls_redirect_subprogs.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_autosize.c172
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_enumval.c72
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c2
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_type_based.c110
-rw-r--r--tools/testing/selftests/bpf/progs/test_core_reloc_type_id.c115
-rw-r--r--tools/testing/selftests/bpf/progs/test_d_path.c65
-rw-r--r--tools/testing/selftests/bpf/progs/test_ksyms_btf.c55
-rw-r--r--tools/testing/selftests/bpf/progs/test_l4lb_noinline.c41
-rw-r--r--tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c325
-rw-r--r--tools/testing/selftests/bpf/progs/test_overhead.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_pe_preserve_elems.c38
-rw-r--r--tools/testing/selftests/bpf/progs/test_pkt_access.c20
-rw-r--r--tools/testing/selftests/bpf/progs/test_raw_tp_test_run.c24
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_lookup.c216
-rw-r--r--tools/testing/selftests/bpf/progs/test_sock_fields.c (renamed from tools/testing/selftests/bpf/progs/test_sock_fields_kern.c)176
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_invalid_update.c23
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_kern.h34
-rw-r--r--tools/testing/selftests/bpf/progs/test_sockmap_update.c48
-rw-r--r--tools/testing/selftests/bpf/progs/test_subprogs.c103
-rw-r--r--tools/testing/selftests/bpf/progs/test_sysctl_loop1.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_sysctl_loop2.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_sysctl_prog.c4
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_neigh.c148
-rw-r--r--tools/testing/selftests/bpf/progs/test_tc_peer.c45
-rw-r--r--tools/testing/selftests/bpf/progs/test_tcp_hdr_options.c626
-rw-r--r--tools/testing/selftests/bpf/progs/test_trace_ext.c18
-rw-r--r--tools/testing/selftests/bpf/progs/test_trace_ext_tracing.c25
-rw-r--r--tools/testing/selftests/bpf/progs/test_vmlinux.c12
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_noinline.c36
-rw-r--r--tools/testing/selftests/bpf/progs/trigger_bench.c7
-rwxr-xr-xtools/testing/selftests/bpf/test_bpftool_build.sh21
-rwxr-xr-xtools/testing/selftests/bpf/test_bpftool_metadata.sh82
-rw-r--r--tools/testing/selftests/bpf/test_current_pid_tgid_new_ns.c1
-rw-r--r--tools/testing/selftests/bpf/test_progs.h63
-rw-r--r--tools/testing/selftests/bpf/test_sock_fields.c482
-rw-r--r--tools/testing/selftests/bpf/test_socket_cookie.c2
-rw-r--r--tools/testing/selftests/bpf/test_sockmap.c81
-rwxr-xr-xtools/testing/selftests/bpf/test_tc_redirect.sh204
-rw-r--r--tools/testing/selftests/bpf/test_tcp_hdr_options.h152
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c19
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.c27
-rw-r--r--tools/testing/selftests/bpf/trace_helpers.h4
-rw-r--r--tools/testing/selftests/bpf/verifier/and.c16
-rw-r--r--tools/testing/selftests/bpf/verifier/basic.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/bounds.c146
-rw-r--r--tools/testing/selftests/bpf/verifier/calls.c6
-rw-r--r--tools/testing/selftests/bpf/verifier/d_path.c37
-rw-r--r--tools/testing/selftests/bpf/verifier/direct_packet_access.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/ld_imm64.c8
-rw-r--r--tools/testing/selftests/bpf/verifier/map_ptr.c32
-rw-r--r--tools/testing/selftests/bpf/verifier/ref_tracking.c47
-rw-r--r--tools/testing/selftests/bpf/verifier/regalloc.c269
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/devlink_trap_policer.sh33
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_ets_strict.sh9
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_headroom.sh379
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/qos_lib.sh14
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh5
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh403
-rwxr-xr-xtools/testing/selftests/drivers/net/mlxsw/sch_ets.sh6
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh1
-rw-r--r--tools/testing/selftests/drivers/net/mlxsw/tc_police_scale.sh12
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/devlink.sh21
-rwxr-xr-xtools/testing/selftests/drivers/net/netdevsim/ethtool-pause.sh108
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh167
-rwxr-xr-xtools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh316
-rw-r--r--tools/testing/selftests/net/.gitignore1
-rw-r--r--tools/testing/selftests/net/Makefile3
-rw-r--r--tools/testing/selftests/net/config6
-rwxr-xr-xtools/testing/selftests/net/drop_monitor_tests.sh215
-rwxr-xr-xtools/testing/selftests/net/fib_nexthops.sh44
-rw-r--r--tools/testing/selftests/net/forwarding/devlink_lib.sh70
-rw-r--r--tools/testing/selftests/net/forwarding/lib.sh43
-rw-r--r--tools/testing/selftests/net/forwarding/mirror_lib.sh2
-rw-r--r--tools/testing/selftests/net/ipsec.c2195
-rw-r--r--tools/testing/selftests/net/mptcp/Makefile3
-rw-r--r--tools/testing/selftests/net/mptcp/mptcp_connect.c22
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_connect.sh21
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh193
-rwxr-xr-xtools/testing/selftests/net/mptcp/simult_flows.sh293
-rw-r--r--tools/testing/selftests/net/nettest.c2
-rwxr-xr-xtools/testing/selftests/net/psock_snd.sh16
-rw-r--r--tools/testing/selftests/net/tcp_mmap.c42
-rwxr-xr-xtools/testing/selftests/net/vrf_route_leaking.sh626
-rw-r--r--tools/testing/selftests/netfilter/nf-queue.c61
-rwxr-xr-xtools/testing/selftests/netfilter/nft_meta.sh32
-rwxr-xr-xtools/testing/selftests/netfilter/nft_queue.sh70
2301 files changed, 129947 insertions, 50868 deletions
diff --git a/CREDITS b/CREDITS
index c741455498a4..cb02b9923a52 100644
--- a/CREDITS
+++ b/CREDITS
@@ -191,6 +191,10 @@ N: Krishna Balasubramanian
E: balasub@cis.ohio-state.edu
D: Wrote SYS V IPC (part of standard kernel since 0.99.10)
+B: Robert Baldyga
+E: r.baldyga@hackerion.com
+D: Samsung S3FWRN5 NCI NFC Controller
+
N: Chris Ball
E: chris@printf.net
D: Former maintainer of the MMC/SD/SDIO subsystem.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index f7ac0e663976..0f1fb7e86237 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1349,6 +1349,11 @@
Format: <interval>,<probability>,<space>,<times>
See also Documentation/fault-injection/.
+ fb_tunnels= [NET]
+ Format: { initns | none }
+ See Documentation/admin-guide/sysctl/net.rst for
+ fb_tunnels_only_for_init_ns
+
floppy= [HW]
See Documentation/admin-guide/blockdev/floppy.rst.
diff --git a/Documentation/admin-guide/sysctl/net.rst b/Documentation/admin-guide/sysctl/net.rst
index 42cd04bca548..57fd6ce68fe0 100644
--- a/Documentation/admin-guide/sysctl/net.rst
+++ b/Documentation/admin-guide/sysctl/net.rst
@@ -300,7 +300,6 @@ Note:
0: 0 1 2 3 4 5 6 7
RSS hash key:
84:50:f4:00:a8:15:d1:a7:e9:7f:1d:60:35:c7:47:25:42:97:74:ca:56:bb:b6:a1:d8:43:e3:c9:0c:fd:17:55:c2:3a:4d:69:ed:f1:42:89
-
netdev_tstamp_prequeue
----------------------
@@ -321,11 +320,20 @@ fb_tunnels_only_for_init_net
----------------------------
Controls if fallback tunnels (like tunl0, gre0, gretap0, erspan0,
-sit0, ip6tnl0, ip6gre0) are automatically created when a new
-network namespace is created, if corresponding tunnel is present
-in initial network namespace.
-If set to 1, these devices are not automatically created, and
-user space is responsible for creating them if needed.
+sit0, ip6tnl0, ip6gre0) are automatically created. There are 3 possibilities
+(a) value = 0; respective fallback tunnels are created when module is
+loaded in every net namespaces (backward compatible behavior).
+(b) value = 1; [kcmd value: initns] respective fallback tunnels are
+created only in init net namespace and every other net namespace will
+not have them.
+(c) value = 2; [kcmd value: none] fallback tunnels are not created
+when a module is loaded in any of the net namespace. Setting value to
+"2" is pointless after boot if these modules are built-in, so there is
+a kernel command-line option that can change this default. Please refer to
+Documentation/admin-guide/kernel-parameters.txt for additional details.
+
+Not creating fallback tunnels gives control to userspace to create
+whatever is needed only and avoid creating devices which are redundant.
Default : 0 (for compatibility reasons)
diff --git a/Documentation/bpf/bpf_devel_QA.rst b/Documentation/bpf/bpf_devel_QA.rst
index a26aa1b9b259..5b613d2a5f1a 100644
--- a/Documentation/bpf/bpf_devel_QA.rst
+++ b/Documentation/bpf/bpf_devel_QA.rst
@@ -60,13 +60,13 @@ Q: Where can I find patches currently under discussion for BPF subsystem?
A: All patches that are Cc'ed to netdev are queued for review under netdev
patchwork project:
- http://patchwork.ozlabs.org/project/netdev/list/
+ https://patchwork.kernel.org/project/netdevbpf/list/
Those patches which target BPF, are assigned to a 'bpf' delegate for
further processing from BPF maintainers. The current queue with
patches under review can be found at:
- https://patchwork.ozlabs.org/project/netdev/list/?delegate=77147
+ https://patchwork.kernel.org/project/netdevbpf/list/?delegate=121173
Once the patches have been reviewed by the BPF community as a whole
and approved by the BPF maintainers, their status in patchwork will be
@@ -149,7 +149,7 @@ In case the patch or patch series has to be reworked and sent out
again in a second or later revision, it is also required to add a
version number (``v2``, ``v3``, ...) into the subject prefix::
- git format-patch --subject-prefix='PATCH net-next v2' start..finish
+ git format-patch --subject-prefix='PATCH bpf-next v2' start..finish
When changes have been requested to the patch series, always send the
whole patch series again with the feedback incorporated (never send
@@ -479,17 +479,18 @@ LLVM's static compiler lists the supported targets through
$ llc --version
LLVM (http://llvm.org/):
- LLVM version 6.0.0svn
+ LLVM version 10.0.0
Optimized build.
Default target: x86_64-unknown-linux-gnu
Host CPU: skylake
Registered Targets:
- bpf - BPF (host endian)
- bpfeb - BPF (big endian)
- bpfel - BPF (little endian)
- x86 - 32-bit X86: Pentium-Pro and above
- x86-64 - 64-bit X86: EM64T and AMD64
+ aarch64 - AArch64 (little endian)
+ bpf - BPF (host endian)
+ bpfeb - BPF (big endian)
+ bpfel - BPF (little endian)
+ x86 - 32-bit X86: Pentium-Pro and above
+ x86-64 - 64-bit X86: EM64T and AMD64
For developers in order to utilize the latest features added to LLVM's
BPF back end, it is advisable to run the latest LLVM releases. Support
@@ -517,6 +518,10 @@ from the git repositories::
The built binaries can then be found in the build/bin/ directory, where
you can point the PATH variable to.
+Set ``-DLLVM_TARGETS_TO_BUILD`` equal to the target you wish to build, you
+will find a full list of targets within the llvm-project/llvm/lib/Target
+directory.
+
Q: Reporting LLVM BPF issues
----------------------------
Q: Should I notify BPF kernel maintainers about issues in LLVM's BPF code
diff --git a/Documentation/bpf/btf.rst b/Documentation/bpf/btf.rst
index b5361b8621c9..44dc789de2b4 100644
--- a/Documentation/bpf/btf.rst
+++ b/Documentation/bpf/btf.rst
@@ -724,6 +724,31 @@ want to define unused entry in BTF_ID_LIST, like::
BTF_ID_UNUSED
BTF_ID(struct, task_struct)
+The ``BTF_SET_START/END`` macros pair defines sorted list of BTF ID values
+and their count, with following syntax::
+
+ BTF_SET_START(set)
+ BTF_ID(type1, name1)
+ BTF_ID(type2, name2)
+ BTF_SET_END(set)
+
+resulting in following layout in .BTF_ids section::
+
+ __BTF_ID__set__set:
+ .zero 4
+ __BTF_ID__type1__name1__3:
+ .zero 4
+ __BTF_ID__type2__name2__4:
+ .zero 4
+
+The ``struct btf_id_set set;`` variable is defined to access the list.
+
+The ``typeX`` name can be one of following::
+
+ struct, union, typedef, func
+
+and is used as a filter when resolving the BTF ID value.
+
All the BTF ID lists and sets are compiled in the .BTF_ids section and
resolved during the linking phase of kernel build by ``resolve_btfids`` tool.
diff --git a/Documentation/bpf/index.rst b/Documentation/bpf/index.rst
index 7df2465fd108..4f2874b729c3 100644
--- a/Documentation/bpf/index.rst
+++ b/Documentation/bpf/index.rst
@@ -52,6 +52,7 @@ Program types
prog_cgroup_sysctl
prog_flow_dissector
bpf_lsm
+ prog_sk_lookup
Map types
diff --git a/Documentation/bpf/prog_sk_lookup.rst b/Documentation/bpf/prog_sk_lookup.rst
new file mode 100644
index 000000000000..85a305c19bcd
--- /dev/null
+++ b/Documentation/bpf/prog_sk_lookup.rst
@@ -0,0 +1,98 @@
+.. SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+
+=====================
+BPF sk_lookup program
+=====================
+
+BPF sk_lookup program type (``BPF_PROG_TYPE_SK_LOOKUP``) introduces programmability
+into the socket lookup performed by the transport layer when a packet is to be
+delivered locally.
+
+When invoked BPF sk_lookup program can select a socket that will receive the
+incoming packet by calling the ``bpf_sk_assign()`` BPF helper function.
+
+Hooks for a common attach point (``BPF_SK_LOOKUP``) exist for both TCP and UDP.
+
+Motivation
+==========
+
+BPF sk_lookup program type was introduced to address setup scenarios where
+binding sockets to an address with ``bind()`` socket call is impractical, such
+as:
+
+1. receiving connections on a range of IP addresses, e.g. 192.0.2.0/24, when
+ binding to a wildcard address ``INADRR_ANY`` is not possible due to a port
+ conflict,
+2. receiving connections on all or a wide range of ports, i.e. an L7 proxy use
+ case.
+
+Such setups would require creating and ``bind()``'ing one socket to each of the
+IP address/port in the range, leading to resource consumption and potential
+latency spikes during socket lookup.
+
+Attachment
+==========
+
+BPF sk_lookup program can be attached to a network namespace with
+``bpf(BPF_LINK_CREATE, ...)`` syscall using the ``BPF_SK_LOOKUP`` attach type and a
+netns FD as attachment ``target_fd``.
+
+Multiple programs can be attached to one network namespace. Programs will be
+invoked in the same order as they were attached.
+
+Hooks
+=====
+
+The attached BPF sk_lookup programs run whenever the transport layer needs to
+find a listening (TCP) or an unconnected (UDP) socket for an incoming packet.
+
+Incoming traffic to established (TCP) and connected (UDP) sockets is delivered
+as usual without triggering the BPF sk_lookup hook.
+
+The attached BPF programs must return with either ``SK_PASS`` or ``SK_DROP``
+verdict code. As for other BPF program types that are network filters,
+``SK_PASS`` signifies that the socket lookup should continue on to regular
+hashtable-based lookup, while ``SK_DROP`` causes the transport layer to drop the
+packet.
+
+A BPF sk_lookup program can also select a socket to receive the packet by
+calling ``bpf_sk_assign()`` BPF helper. Typically, the program looks up a socket
+in a map holding sockets, such as ``SOCKMAP`` or ``SOCKHASH``, and passes a
+``struct bpf_sock *`` to ``bpf_sk_assign()`` helper to record the
+selection. Selecting a socket only takes effect if the program has terminated
+with ``SK_PASS`` code.
+
+When multiple programs are attached, the end result is determined from return
+codes of all the programs according to the following rules:
+
+1. If any program returned ``SK_PASS`` and selected a valid socket, the socket
+ is used as the result of the socket lookup.
+2. If more than one program returned ``SK_PASS`` and selected a socket, the last
+ selection takes effect.
+3. If any program returned ``SK_DROP``, and no program returned ``SK_PASS`` and
+ selected a socket, socket lookup fails.
+4. If all programs returned ``SK_PASS`` and none of them selected a socket,
+ socket lookup continues on.
+
+API
+===
+
+In its context, an instance of ``struct bpf_sk_lookup``, BPF sk_lookup program
+receives information about the packet that triggered the socket lookup. Namely:
+
+* IP version (``AF_INET`` or ``AF_INET6``),
+* L4 protocol identifier (``IPPROTO_TCP`` or ``IPPROTO_UDP``),
+* source and destination IP address,
+* source and destination L4 port,
+* the socket that has been selected with ``bpf_sk_assign()``.
+
+Refer to ``struct bpf_sk_lookup`` declaration in ``linux/bpf.h`` user API
+header, and `bpf-helpers(7)
+<https://man7.org/linux/man-pages/man7/bpf-helpers.7.html>`_ man-page section
+for ``bpf_sk_assign()`` for details.
+
+Example
+=======
+
+See ``tools/testing/selftests/bpf/prog_tests/sk_lookup.c`` for the reference
+implementation.
diff --git a/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt b/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
index 88b57b0ca1f4..97ca62b0e14d 100644
--- a/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
+++ b/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
@@ -50,6 +50,13 @@ Optional properties:
- reset-names: If the "reset" property is specified, this property should have
the value "switch" to denote the switch reset line.
+- clocks: when provided, the first phandle is to the switch's main clock and
+ is valid for both BCM7445 and BCM7278. The second phandle is only applicable
+ to BCM7445 and is to support dividing the switch core clock.
+
+- clock-names: when provided, the first phandle must be "sw_switch", and the
+ second must be named "sw_switch_mdiv".
+
Port subnodes:
Optional properties:
diff --git a/Documentation/devicetree/bindings/net/brcm,systemport.txt b/Documentation/devicetree/bindings/net/brcm,systemport.txt
index 83f29e0e11ba..75736739bfdd 100644
--- a/Documentation/devicetree/bindings/net/brcm,systemport.txt
+++ b/Documentation/devicetree/bindings/net/brcm,systemport.txt
@@ -20,6 +20,11 @@ Optional properties:
- systemport,num-tier1-arb: number of tier 1 arbiters, an integer
- systemport,num-txq: number of HW transmit queues, an integer
- systemport,num-rxq: number of HW receive queues, an integer
+- clocks: When provided, must be two phandles to the functional clocks nodes of
+ the SYSTEMPORT block. The first phandle is the main SYSTEMPORT clock used
+ during normal operation, while the second phandle is the Wake-on-LAN clock.
+- clock-names: When provided, names of the functional clock phandles, first
+ name should be "sw_sysport" and second should be "sw_sysportwol".
Example:
ethernet@f04a0000 {
diff --git a/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt b/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt
index 94c0f8bf4deb..e10b6eb955e1 100644
--- a/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt
+++ b/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt
@@ -4,6 +4,12 @@ Required properties:
- compatible : Should be "fsl,<processor>-flexcan"
+ where <processor> is imx8qm, imx6q, imx28, imx53, imx35, imx25, p1010,
+ vf610, ls1021ar2, lx2160ar1, ls1028ar1.
+
+ The ls1028ar1 must be followed by lx2160ar1, e.g.
+ - "fsl,ls1028ar1-flexcan", "fsl,lx2160ar1-flexcan"
+
An implementation should also claim any of the following compatibles
that it is fully backwards compatible with:
@@ -25,12 +31,10 @@ Optional properties:
endian.
- fsl,stop-mode: register bits of stop mode control, the format is
- <&gpr req_gpr req_bit ack_gpr ack_bit>.
+ <&gpr req_gpr req_bit>.
gpr is the phandle to general purpose register node.
req_gpr is the gpr register offset of CAN stop request.
req_bit is the bit offset of CAN stop request.
- ack_gpr is the gpr register offset of CAN stop acknowledge.
- ack_bit is the bit offset of CAN stop acknowledge.
- fsl,clk-source: Select the clock source to the CAN Protocol Engine (PE).
It's SoC Implementation dependent. Refer to RM for detailed
diff --git a/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt b/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt
index 5a0111d4de58..381f8fb3e865 100644
--- a/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt
+++ b/Documentation/devicetree/bindings/net/can/microchip,mcp251x.txt
@@ -12,6 +12,9 @@ Required properties:
Optional properties:
- vdd-supply: Regulator that powers the CAN controller.
- xceiver-supply: Regulator that powers the CAN transceiver.
+ - gpio-controller: Indicates this device is a GPIO controller.
+ - #gpio-cells: Should be two. The first cell is the pin number and
+ the second cell is used to specify the gpio polarity.
Example:
can0: can@1 {
@@ -19,7 +22,9 @@ Example:
reg = <1>;
clocks = <&clk24m>;
interrupt-parent = <&gpio4>;
- interrupts = <13 0x2>;
+ interrupts = <13 IRQ_TYPE_LEVEL_LOW>;
vdd-supply = <&reg5v0>;
xceiver-supply = <&reg5v0>;
+ gpio-controller;
+ #gpio-cells = <2>;
};
diff --git a/Documentation/devicetree/bindings/net/can/microchip,mcp251xfd.yaml b/Documentation/devicetree/bindings/net/can/microchip,mcp251xfd.yaml
new file mode 100644
index 000000000000..2a884c1fe0e0
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/can/microchip,mcp251xfd.yaml
@@ -0,0 +1,79 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/can/microchip,mcp251xfd.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title:
+ Microchip MCP2517FD and MCP2518FD stand-alone CAN controller device tree
+ bindings
+
+maintainers:
+ - Marc Kleine-Budde <mkl@pengutronix.de>
+
+properties:
+ compatible:
+ oneOf:
+ - const: microchip,mcp2517fd
+ description: for MCP2517FD
+ - const: microchip,mcp2518fd
+ description: for MCP2518FD
+ - const: microchip,mcp251xfd
+ description: to autodetect chip variant
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ vdd-supply:
+ description: Regulator that powers the CAN controller.
+
+ xceiver-supply:
+ description: Regulator that powers the CAN transceiver.
+
+ microchip,rx-int-gpios:
+ description:
+ GPIO phandle of GPIO connected to to INT1 pin of the MCP251XFD, which
+ signals a pending RX interrupt.
+ maxItems: 1
+
+ spi-max-frequency:
+ description:
+ Must be half or less of "clocks" frequency.
+ maximum: 20000000
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ spi0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ can@0 {
+ compatible = "microchip,mcp251xfd";
+ reg = <0>;
+ clocks = <&can0_osc>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&can0_pins>;
+ spi-max-frequency = <20000000>;
+ interrupts-extended = <&gpio 13 IRQ_TYPE_LEVEL_LOW>;
+ microchip,rx-int-gpios = <&gpio 27 GPIO_ACTIVE_LOW>;
+ vdd-supply = <&reg5v0>;
+ xceiver-supply = <&reg5v0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/can/rcar_can.txt b/Documentation/devicetree/bindings/net/can/rcar_can.txt
index 85c6551b602a..6a5956347816 100644
--- a/Documentation/devicetree/bindings/net/can/rcar_can.txt
+++ b/Documentation/devicetree/bindings/net/can/rcar_can.txt
@@ -2,13 +2,15 @@ Renesas R-Car CAN controller Device Tree Bindings
-------------------------------------------------
Required properties:
-- compatible: "renesas,can-r8a7743" if CAN controller is a part of R8A7743 SoC.
+- compatible: "renesas,can-r8a7742" if CAN controller is a part of R8A7742 SoC.
+ "renesas,can-r8a7743" if CAN controller is a part of R8A7743 SoC.
"renesas,can-r8a7744" if CAN controller is a part of R8A7744 SoC.
"renesas,can-r8a7745" if CAN controller is a part of R8A7745 SoC.
"renesas,can-r8a77470" if CAN controller is a part of R8A77470 SoC.
"renesas,can-r8a774a1" if CAN controller is a part of R8A774A1 SoC.
"renesas,can-r8a774b1" if CAN controller is a part of R8A774B1 SoC.
"renesas,can-r8a774c0" if CAN controller is a part of R8A774C0 SoC.
+ "renesas,can-r8a774e1" if CAN controller is a part of R8A774E1 SoC.
"renesas,can-r8a7778" if CAN controller is a part of R8A7778 SoC.
"renesas,can-r8a7779" if CAN controller is a part of R8A7779 SoC.
"renesas,can-r8a7790" if CAN controller is a part of R8A7790 SoC.
@@ -37,8 +39,8 @@ Required properties:
- pinctrl-0: pin control group to be used for this controller.
- pinctrl-names: must be "default".
-Required properties for R8A774A1, R8A774B1, R8A774C0, R8A7795, R8A7796,
-R8A77965, R8A77990, and R8A77995:
+Required properties for R8A774A1, R8A774B1, R8A774C0, R8A774E1, R8A7795,
+R8A7796, R8A77965, R8A77990, and R8A77995:
For the denoted SoCs, "clkp2" can be CANFD clock. This is a div6 clock and can
be used by both CAN and CAN FD controller at the same time. It needs to be
scaled to maximum frequency if any of these controllers use it. This is done
diff --git a/Documentation/devicetree/bindings/net/can/rcar_canfd.txt b/Documentation/devicetree/bindings/net/can/rcar_canfd.txt
index 13a4e34c0c73..22cf2a889b2c 100644
--- a/Documentation/devicetree/bindings/net/can/rcar_canfd.txt
+++ b/Documentation/devicetree/bindings/net/can/rcar_canfd.txt
@@ -7,6 +7,7 @@ Required properties:
- "renesas,r8a774a1-canfd" for R8A774A1 (RZ/G2M) compatible controller.
- "renesas,r8a774b1-canfd" for R8A774B1 (RZ/G2N) compatible controller.
- "renesas,r8a774c0-canfd" for R8A774C0 (RZ/G2E) compatible controller.
+ - "renesas,r8a774e1-canfd" for R8A774E1 (RZ/G2H) compatible controller.
- "renesas,r8a7795-canfd" for R8A7795 (R-Car H3) compatible controller.
- "renesas,r8a7796-canfd" for R8A7796 (R-Car M3-W) compatible controller.
- "renesas,r8a77965-canfd" for R8A77965 (R-Car M3-N) compatible controller.
@@ -32,8 +33,8 @@ The name of the child nodes are "channel0" and "channel1" respectively. Each
child node supports the "status" property only, which is used to
enable/disable the respective channel.
-Required properties for R8A774A1, R8A774B1, R8A774C0, R8A7795, R8A7796,
-R8A77965, R8A77990, and R8A77995:
+Required properties for R8A774A1, R8A774B1, R8A774C0, R8A774E1, R8A7795,
+R8A7796, R8A77965, R8A77990, and R8A77995:
In the denoted SoCs, canfd clock is a div6 clock and can be used by both CAN
and CAN FD controller at the same time. It needs to be scaled to maximum
frequency if any of these controllers use it. This is done using the below
diff --git a/Documentation/devicetree/bindings/net/dsa/b53.txt b/Documentation/devicetree/bindings/net/dsa/b53.txt
index cfd1afdc6e94..f1487a751b1a 100644
--- a/Documentation/devicetree/bindings/net/dsa/b53.txt
+++ b/Documentation/devicetree/bindings/net/dsa/b53.txt
@@ -95,7 +95,7 @@ Ethernet switch connected via MDIO to the host, CPU port wired to eth0:
fixed-link {
speed = <1000>;
- duplex-full;
+ full-duplex;
};
};
@@ -104,8 +104,9 @@ Ethernet switch connected via MDIO to the host, CPU port wired to eth0:
#address-cells = <1>;
#size-cells = <0>;
- switch0: ethernet-switch@30 {
+ switch0: ethernet-switch@1e {
compatible = "brcm,bcm53125";
+ reg = <30>;
#address-cells = <1>;
#size-cells = <0>;
@@ -128,7 +129,7 @@ Ethernet switch connected via MDIO to the host, CPU port wired to eth0:
label = "cable-modem";
fixed-link {
speed = <1000>;
- duplex-full;
+ full-duplex;
};
phy-mode = "rgmii-txid";
};
@@ -138,7 +139,7 @@ Ethernet switch connected via MDIO to the host, CPU port wired to eth0:
label = "cpu";
fixed-link {
speed = <1000>;
- duplex-full;
+ full-duplex;
};
phy-mode = "rgmii-txid";
ethernet = <&eth0>;
diff --git a/Documentation/devicetree/bindings/net/dsa/mt7530.txt b/Documentation/devicetree/bindings/net/dsa/mt7530.txt
index c5ed5d25f642..560369efad6c 100644
--- a/Documentation/devicetree/bindings/net/dsa/mt7530.txt
+++ b/Documentation/devicetree/bindings/net/dsa/mt7530.txt
@@ -5,6 +5,7 @@ Required properties:
- compatible: may be compatible = "mediatek,mt7530"
or compatible = "mediatek,mt7621"
+ or compatible = "mediatek,mt7531"
- #address-cells: Must be 1.
- #size-cells: Must be 0.
- mediatek,mcm: Boolean; if defined, indicates that either MT7530 is the part
@@ -32,10 +33,14 @@ Required properties for the child nodes within ports container:
- reg: Port address described must be 6 for CPU port and from 0 to 5 for
user ports.
-- phy-mode: String, must be either "trgmii" or "rgmii" for port labeled
- "cpu".
-
-Port 5 of the switch is muxed between:
+- phy-mode: String, the following values are acceptable for port labeled
+ "cpu":
+ If compatible mediatek,mt7530 or mediatek,mt7621 is set,
+ must be either "trgmii" or "rgmii"
+ If compatible mediatek,mt7531 is set,
+ must be either "sgmii", "1000base-x" or "2500base-x"
+
+Port 5 of mt7530 and mt7621 switch is muxed between:
1. GMAC5: GMAC5 can interface with another external MAC or PHY.
2. PHY of port 0 or port 4: PHY interfaces with an external MAC like 2nd GMAC
of the SOC. Used in many setups where port 0/4 becomes the WAN port.
diff --git a/Documentation/devicetree/bindings/net/ethernet-controller.yaml b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
index 3fd85ce37e9c..fdf709817218 100644
--- a/Documentation/devicetree/bindings/net/ethernet-controller.yaml
+++ b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
@@ -120,6 +120,13 @@ properties:
and is useful for determining certain configuration settings
such as flow control thresholds.
+ rx-internal-delay-ps:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: |
+ RGMII Receive Clock Delay defined in pico seconds.
+ This is used for controllers that have configurable RX internal delays.
+ If this property is present then the MAC applies the RX delay.
+
sfp:
$ref: /schemas/types.yaml#definitions/phandle
description:
@@ -131,6 +138,13 @@ properties:
The size of the controller\'s transmit fifo in bytes. This
is used for components that can have configurable fifo sizes.
+ tx-internal-delay-ps:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: |
+ RGMII Transmit Clock Delay defined in pico seconds.
+ This is used for controllers that have configurable TX internal delays.
+ If this property is present then the MAC applies the TX delay.
+
managed:
description:
Specifies the PHY management type. If auto is set and fixed-link
diff --git a/Documentation/devicetree/bindings/net/intel,dwmac-plat.yaml b/Documentation/devicetree/bindings/net/intel,dwmac-plat.yaml
new file mode 100644
index 000000000000..fa3ebba4e635
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/intel,dwmac-plat.yaml
@@ -0,0 +1,130 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/intel,dwmac-plat.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Intel DWMAC glue layer Device Tree Bindings
+
+maintainers:
+ - Vineetha G. Jaya Kumaran <vineetha.g.jaya.kumaran@intel.com>
+
+select:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - intel,keembay-dwmac
+ required:
+ - compatible
+
+allOf:
+ - $ref: "snps,dwmac.yaml#"
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - intel,keembay-dwmac
+ - const: snps,dwmac-4.10a
+
+ clocks:
+ items:
+ - description: GMAC main clock
+ - description: PTP reference clock
+ - description: Tx clock
+
+ clock-names:
+ items:
+ - const: stmmaceth
+ - const: ptp_ref
+ - const: tx_clk
+
+required:
+ - compatible
+ - clocks
+ - clock-names
+
+examples:
+# FIXME: Remove defines and include the correct header file
+# once it is available in mainline.
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #define MOVISOC_KMB_PSS_GBE
+ #define MOVISOC_KMB_PSS_AUX_GBE_PTP
+ #define MOVISOC_KMB_PSS_AUX_GBE_TX
+
+ stmmac_axi_setup: stmmac-axi-config {
+ snps,lpi_en;
+ snps,wr_osr_lmt = <0x0>;
+ snps,rd_osr_lmt = <0x2>;
+ snps,blen = <0 0 0 0 16 8 4>;
+ };
+
+ mtl_rx_setup: rx-queues-config {
+ snps,rx-queues-to-use = <2>;
+ snps,rx-sched-sp;
+ queue0 {
+ snps,dcb-algorithm;
+ snps,map-to-dma-channel = <0x0>;
+ snps,priority = <0x0>;
+ };
+
+ queue1 {
+ snps,dcb-algorithm;
+ snps,map-to-dma-channel = <0x1>;
+ snps,priority = <0x1>;
+ };
+ };
+
+ mtl_tx_setup: tx-queues-config {
+ snps,tx-queues-to-use = <2>;
+ snps,tx-sched-wrr;
+ queue0 {
+ snps,weight = <0x10>;
+ snps,dcb-algorithm;
+ snps,priority = <0x0>;
+ };
+
+ queue1 {
+ snps,weight = <0x10>;
+ snps,dcb-algorithm;
+ snps,priority = <0x1>;
+ };
+ };
+
+ gmac0: ethernet@3a000000 {
+ compatible = "intel,keembay-dwmac", "snps,dwmac-4.10a";
+ interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq";
+ reg = <0x3a000000 0x8000>;
+ snps,perfect-filter-entries = <128>;
+ phy-handle = <&eth_phy0>;
+ phy-mode = "rgmii";
+ rx-fifo-depth = <4096>;
+ tx-fifo-depth = <4096>;
+ clock-names = "stmmaceth", "ptp_ref", "tx_clk";
+ clocks = <&scmi_clk MOVISOC_KMB_PSS_GBE>,
+ <&scmi_clk MOVISOC_KMB_PSS_AUX_GBE_PTP>,
+ <&scmi_clk MOVISOC_KMB_PSS_AUX_GBE_TX>;
+ snps,pbl = <0x4>;
+ snps,axi-config = <&stmmac_axi_setup>;
+ snps,mtl-rx-config = <&mtl_rx_setup>;
+ snps,mtl-tx-config = <&mtl_tx_setup>;
+ snps,tso;
+ status = "okay";
+
+ mdio0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "snps,dwmac-mdio";
+
+ ethernet-phy@0 {
+ reg = <0>;
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/net/marvell,prestera.txt b/Documentation/devicetree/bindings/net/marvell,prestera.txt
index 83370ebf5b89..e28938ddfdf5 100644
--- a/Documentation/devicetree/bindings/net/marvell,prestera.txt
+++ b/Documentation/devicetree/bindings/net/marvell,prestera.txt
@@ -45,3 +45,37 @@ dfx-server {
ranges = <0 MBUS_ID(0x08, 0x00) 0 0x100000>;
reg = <MBUS_ID(0x08, 0x00) 0 0x100000>;
};
+
+Marvell Prestera SwitchDev bindings
+-----------------------------------
+Optional properties:
+- compatible: must be "marvell,prestera"
+- base-mac-provider: describes handle to node which provides base mac address,
+ might be a static base mac address or nvme cell provider.
+
+Example:
+
+eeprom_mac_addr: eeprom-mac-addr {
+ compatible = "eeprom,mac-addr-cell";
+ status = "okay";
+
+ nvmem = <&eeprom_at24>;
+};
+
+prestera {
+ compatible = "marvell,prestera";
+ status = "okay";
+
+ base-mac-provider = <&eeprom_mac_addr>;
+};
+
+The current implementation of Prestera Switchdev PCI interface driver requires
+that BAR2 is assigned to 0xf6000000 as base address from the PCI IO range:
+
+&cp0_pcie0 {
+ ranges = <0x81000000 0x0 0xfb000000 0x0 0xfb000000 0x0 0xf0000
+ 0x82000000 0x0 0xf6000000 0x0 0xf6000000 0x0 0x2000000
+ 0x82000000 0x0 0xf9000000 0x0 0xf9000000 0x0 0x100000>;
+ phys = <&cp0_comphy0 0>;
+ status = "okay";
+};
diff --git a/Documentation/devicetree/bindings/net/nfc/s3fwrn5.txt b/Documentation/devicetree/bindings/net/nfc/s3fwrn5.txt
deleted file mode 100644
index f02f6fb7f81c..000000000000
--- a/Documentation/devicetree/bindings/net/nfc/s3fwrn5.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-* Samsung S3FWRN5 NCI NFC Controller
-
-Required properties:
-- compatible: Should be "samsung,s3fwrn5-i2c".
-- reg: address on the bus
-- interrupts: GPIO interrupt to which the chip is connected
-- s3fwrn5,en-gpios: Output GPIO pin used for enabling/disabling the chip
-- s3fwrn5,fw-gpios: Output GPIO pin used to enter firmware mode and
- sleep/wakeup control
-
-Example:
-
-&hsi2c_4 {
- s3fwrn5@27 {
- compatible = "samsung,s3fwrn5-i2c";
-
- reg = <0x27>;
-
- interrupt-parent = <&gpa1>;
- interrupts = <3 0 0>;
-
- s3fwrn5,en-gpios = <&gpf1 4 0>;
- s3fwrn5,fw-gpios = <&gpj0 2 0>;
- };
-};
diff --git a/Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml b/Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml
new file mode 100644
index 000000000000..cb0b8a560282
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml
@@ -0,0 +1,73 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/nfc/samsung,s3fwrn5.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung S3FWRN5 NCI NFC Controller
+
+maintainers:
+ - Krzysztof Kozlowski <krzk@kernel.org>
+ - Krzysztof Opasiak <k.opasiak@samsung.com>
+
+properties:
+ compatible:
+ const: samsung,s3fwrn5-i2c
+
+ en-gpios:
+ maxItems: 1
+ description:
+ Output GPIO pin used for enabling/disabling the chip
+
+ interrupts:
+ maxItems: 1
+
+ reg:
+ maxItems: 1
+
+ wake-gpios:
+ maxItems: 1
+ description:
+ Output GPIO pin used to enter firmware mode and sleep/wakeup control
+
+ s3fwrn5,en-gpios:
+ maxItems: 1
+ deprecated: true
+ description:
+ Use en-gpios
+
+ s3fwrn5,fw-gpios:
+ maxItems: 1
+ deprecated: true
+ description:
+ Use wake-gpios
+
+additionalProperties: false
+
+required:
+ - compatible
+ - en-gpios
+ - interrupts
+ - reg
+ - wake-gpios
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ i2c4 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ s3fwrn5@27 {
+ compatible = "samsung,s3fwrn5-i2c";
+ reg = <0x27>;
+
+ interrupt-parent = <&gpa1>;
+ interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
+
+ en-gpios = <&gpf1 4 GPIO_ACTIVE_HIGH>;
+ wake-gpios = <&gpj0 2 GPIO_ACTIVE_HIGH>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/renesas,etheravb.yaml b/Documentation/devicetree/bindings/net/renesas,etheravb.yaml
new file mode 100644
index 000000000000..244befb6402a
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/renesas,etheravb.yaml
@@ -0,0 +1,262 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/renesas,etheravb.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas Ethernet AVB
+
+maintainers:
+ - Sergei Shtylyov <sergei.shtylyov@gmail.com>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - renesas,etheravb-r8a7742 # RZ/G1H
+ - renesas,etheravb-r8a7743 # RZ/G1M
+ - renesas,etheravb-r8a7744 # RZ/G1N
+ - renesas,etheravb-r8a7745 # RZ/G1E
+ - renesas,etheravb-r8a77470 # RZ/G1C
+ - renesas,etheravb-r8a7790 # R-Car H2
+ - renesas,etheravb-r8a7791 # R-Car M2-W
+ - renesas,etheravb-r8a7792 # R-Car V2H
+ - renesas,etheravb-r8a7793 # R-Car M2-N
+ - renesas,etheravb-r8a7794 # R-Car E2
+ - const: renesas,etheravb-rcar-gen2 # R-Car Gen2 and RZ/G1
+
+ - items:
+ - enum:
+ - renesas,etheravb-r8a774a1 # RZ/G2M
+ - renesas,etheravb-r8a774b1 # RZ/G2N
+ - renesas,etheravb-r8a774c0 # RZ/G2E
+ - renesas,etheravb-r8a774e1 # RZ/G2H
+ - renesas,etheravb-r8a7795 # R-Car H3
+ - renesas,etheravb-r8a7796 # R-Car M3-W
+ - renesas,etheravb-r8a77961 # R-Car M3-W+
+ - renesas,etheravb-r8a77965 # R-Car M3-N
+ - renesas,etheravb-r8a77970 # R-Car V3M
+ - renesas,etheravb-r8a77980 # R-Car V3H
+ - renesas,etheravb-r8a77990 # R-Car E3
+ - renesas,etheravb-r8a77995 # R-Car D3
+ - const: renesas,etheravb-rcar-gen3 # R-Car Gen3 and RZ/G2
+
+ reg: true
+
+ interrupts: true
+
+ interrupt-names: true
+
+ clocks:
+ maxItems: 1
+
+ iommus:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ phy-mode: true
+
+ phy-handle: true
+
+ '#address-cells':
+ description: Number of address cells for the MDIO bus.
+ const: 1
+
+ '#size-cells':
+ description: Number of size cells on the MDIO bus.
+ const: 0
+
+ renesas,no-ether-link:
+ type: boolean
+ description:
+ Specify when a board does not provide a proper AVB_LINK signal.
+
+ renesas,ether-link-active-low:
+ type: boolean
+ description:
+ Specify when the AVB_LINK signal is active-low instead of normal
+ active-high.
+
+ rx-internal-delay-ps:
+ enum: [0, 1800]
+
+ tx-internal-delay-ps:
+ enum: [0, 2000]
+
+patternProperties:
+ "^ethernet-phy@[0-9a-f]$":
+ type: object
+ $ref: ethernet-phy.yaml#
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - power-domains
+ - resets
+ - phy-mode
+ - phy-handle
+ - '#address-cells'
+ - '#size-cells'
+
+allOf:
+ - $ref: ethernet-controller.yaml#
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,etheravb-rcar-gen2
+ - renesas,etheravb-r8a7795
+ - renesas,etheravb-r8a7796
+ - renesas,etheravb-r8a77961
+ - renesas,etheravb-r8a77965
+ then:
+ properties:
+ reg:
+ items:
+ - description: MAC register block
+ - description: Stream buffer
+ else:
+ properties:
+ reg:
+ items:
+ - description: MAC register block
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: renesas,etheravb-rcar-gen2
+ then:
+ properties:
+ interrupts:
+ maxItems: 1
+ interrupt-names:
+ items:
+ - const: mux
+ rx-internal-delay-ps: false
+ else:
+ properties:
+ interrupts:
+ minItems: 25
+ maxItems: 25
+ interrupt-names:
+ items:
+ pattern: '^ch[0-9]+$'
+ required:
+ - interrupt-names
+ - rx-internal-delay-ps
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,etheravb-r8a774a1
+ - renesas,etheravb-r8a774b1
+ - renesas,etheravb-r8a7795
+ - renesas,etheravb-r8a7796
+ - renesas,etheravb-r8a77961
+ - renesas,etheravb-r8a77965
+ - renesas,etheravb-r8a77970
+ - renesas,etheravb-r8a77980
+ then:
+ required:
+ - tx-internal-delay-ps
+ else:
+ properties:
+ tx-internal-delay-ps: false
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: renesas,etheravb-r8a77995
+ then:
+ properties:
+ rx-internal-delay-ps:
+ const: 1800
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: renesas,etheravb-r8a77980
+ then:
+ properties:
+ tx-internal-delay-ps:
+ const: 2000
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a7795-cpg-mssr.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/power/r8a7795-sysc.h>
+ #include <dt-bindings/gpio/gpio.h>
+ aliases {
+ ethernet0 = &avb;
+ };
+
+ avb: ethernet@e6800000 {
+ compatible = "renesas,etheravb-r8a7795",
+ "renesas,etheravb-rcar-gen3";
+ reg = <0xe6800000 0x800>, <0xe6a00000 0x10000>;
+ interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "ch0", "ch1", "ch2", "ch3", "ch4", "ch5", "ch6",
+ "ch7", "ch8", "ch9", "ch10", "ch11", "ch12",
+ "ch13", "ch14", "ch15", "ch16", "ch17", "ch18",
+ "ch19", "ch20", "ch21", "ch22", "ch23", "ch24";
+ clocks = <&cpg CPG_MOD 812>;
+ iommus = <&ipmmu_ds0 16>;
+ power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ resets = <&cpg 812>;
+ phy-mode = "rgmii";
+ phy-handle = <&phy0>;
+ rx-internal-delay-ps = <0>;
+ tx-internal-delay-ps = <2000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy0: ethernet-phy@0 {
+ rxc-skew-ps = <1500>;
+ reg = <0>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
+ reset-gpios = <&gpio2 10 GPIO_ACTIVE_LOW>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/renesas,ravb.txt b/Documentation/devicetree/bindings/net/renesas,ravb.txt
deleted file mode 100644
index 9119f1caf391..000000000000
--- a/Documentation/devicetree/bindings/net/renesas,ravb.txt
+++ /dev/null
@@ -1,135 +0,0 @@
-* Renesas Electronics Ethernet AVB
-
-This file provides information on what the device node for the Ethernet AVB
-interface contains.
-
-Required properties:
-- compatible: Must contain one or more of the following:
- - "renesas,etheravb-r8a7742" for the R8A7742 SoC.
- - "renesas,etheravb-r8a7743" for the R8A7743 SoC.
- - "renesas,etheravb-r8a7744" for the R8A7744 SoC.
- - "renesas,etheravb-r8a7745" for the R8A7745 SoC.
- - "renesas,etheravb-r8a77470" for the R8A77470 SoC.
- - "renesas,etheravb-r8a7790" for the R8A7790 SoC.
- - "renesas,etheravb-r8a7791" for the R8A7791 SoC.
- - "renesas,etheravb-r8a7792" for the R8A7792 SoC.
- - "renesas,etheravb-r8a7793" for the R8A7793 SoC.
- - "renesas,etheravb-r8a7794" for the R8A7794 SoC.
- - "renesas,etheravb-rcar-gen2" as a fallback for the above
- R-Car Gen2 and RZ/G1 devices.
-
- - "renesas,etheravb-r8a774a1" for the R8A774A1 SoC.
- - "renesas,etheravb-r8a774b1" for the R8A774B1 SoC.
- - "renesas,etheravb-r8a774c0" for the R8A774C0 SoC.
- - "renesas,etheravb-r8a774e1" for the R8A774E1 SoC.
- - "renesas,etheravb-r8a7795" for the R8A7795 SoC.
- - "renesas,etheravb-r8a7796" for the R8A77960 SoC.
- - "renesas,etheravb-r8a77961" for the R8A77961 SoC.
- - "renesas,etheravb-r8a77965" for the R8A77965 SoC.
- - "renesas,etheravb-r8a77970" for the R8A77970 SoC.
- - "renesas,etheravb-r8a77980" for the R8A77980 SoC.
- - "renesas,etheravb-r8a77990" for the R8A77990 SoC.
- - "renesas,etheravb-r8a77995" for the R8A77995 SoC.
- - "renesas,etheravb-rcar-gen3" as a fallback for the above
- R-Car Gen3 and RZ/G2 devices.
-
- When compatible with the generic version, nodes must list the
- SoC-specific version corresponding to the platform first followed by
- the generic version.
-
-- reg: Offset and length of (1) the register block and (2) the stream buffer.
- The region for the register block is mandatory.
- The region for the stream buffer is optional, as it is only present on
- R-Car Gen2 and RZ/G1 SoCs, and on R-Car H3 (R8A7795), M3-W (R8A77960),
- M3-W+ (R8A77961), and M3-N (R8A77965).
-- interrupts: A list of interrupt-specifiers, one for each entry in
- interrupt-names.
- If interrupt-names is not present, an interrupt specifier
- for a single muxed interrupt.
-- phy-mode: see ethernet.txt file in the same directory.
-- phy-handle: see ethernet.txt file in the same directory.
-- #address-cells: number of address cells for the MDIO bus, must be equal to 1.
-- #size-cells: number of size cells on the MDIO bus, must be equal to 0.
-- clocks: clock phandle and specifier pair.
-- pinctrl-0: phandle, referring to a default pin configuration node.
-
-Optional properties:
-- interrupt-names: A list of interrupt names.
- For the R-Car Gen 3 SoCs this property is mandatory;
- it should include one entry per channel, named "ch%u",
- where %u is the channel number ranging from 0 to 24.
- For other SoCs this property is optional; if present
- it should contain "mux" for a single muxed interrupt.
-- pinctrl-names: pin configuration state name ("default").
-- renesas,no-ether-link: boolean, specify when a board does not provide a proper
- AVB_LINK signal.
-- renesas,ether-link-active-low: boolean, specify when the AVB_LINK signal is
- active-low instead of normal active-high.
-
-Example:
-
- ethernet@e6800000 {
- compatible = "renesas,etheravb-r8a7795", "renesas,etheravb-rcar-gen3";
- reg = <0 0xe6800000 0 0x800>, <0 0xe6a00000 0 0x10000>;
- interrupt-parent = <&gic>;
- interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 46 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 49 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 51 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 57 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 58 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>,
- <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
- interrupt-names = "ch0", "ch1", "ch2", "ch3",
- "ch4", "ch5", "ch6", "ch7",
- "ch8", "ch9", "ch10", "ch11",
- "ch12", "ch13", "ch14", "ch15",
- "ch16", "ch17", "ch18", "ch19",
- "ch20", "ch21", "ch22", "ch23",
- "ch24";
- clocks = <&cpg CPG_MOD 812>;
- power-domains = <&cpg>;
- phy-mode = "rgmii-id";
- phy-handle = <&phy0>;
-
- pinctrl-0 = <&ether_pins>;
- pinctrl-names = "default";
- renesas,no-ether-link;
- #address-cells = <1>;
- #size-cells = <0>;
-
- phy0: ethernet-phy@0 {
- rxc-skew-ps = <900>;
- rxdv-skew-ps = <0>;
- rxd0-skew-ps = <0>;
- rxd1-skew-ps = <0>;
- rxd2-skew-ps = <0>;
- rxd3-skew-ps = <0>;
- txc-skew-ps = <900>;
- txen-skew-ps = <0>;
- txd0-skew-ps = <0>;
- txd1-skew-ps = <0>;
- txd2-skew-ps = <0>;
- txd3-skew-ps = <0>;
- reg = <0>;
- interrupt-parent = <&gpio2>;
- interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
- };
- };
diff --git a/Documentation/devicetree/bindings/net/smsc-lan87xx.txt b/Documentation/devicetree/bindings/net/smsc-lan87xx.txt
index 8b7c719b0bb9..a8d0dc9a8c0e 100644
--- a/Documentation/devicetree/bindings/net/smsc-lan87xx.txt
+++ b/Documentation/devicetree/bindings/net/smsc-lan87xx.txt
@@ -5,6 +5,10 @@ through an Ethernet OF device node.
Optional properties:
+- clocks:
+ The clock used as phy reference clock and is connected to phy
+ pin XTAL1/CLKIN.
+
- smsc,disable-energy-detect:
If set, do not enable energy detect mode for the SMSC phy.
default: enable energy detect mode
diff --git a/Documentation/devicetree/bindings/net/ti,dp83822.yaml b/Documentation/devicetree/bindings/net/ti,dp83822.yaml
new file mode 100644
index 000000000000..55913534cbc2
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/ti,dp83822.yaml
@@ -0,0 +1,80 @@
+# SPDX-License-Identifier: (GPL-2.0+ OR BSD-2-Clause)
+# Copyright (C) 2020 Texas Instruments Incorporated
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/net/ti,dp83822.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: TI DP83822 ethernet PHY
+
+maintainers:
+ - Dan Murphy <dmurphy@ti.com>
+
+description: |
+ The DP83822 is a low-power, single-port, 10/100 Mbps Ethernet PHY. It
+ provides all of the physical layer functions needed to transmit and receive
+ data over standard, twisted-pair cables or to connect to an external,
+ fiber-optic transceiver. Additionally, the DP83822 provides flexibility to
+ connect to a MAC through a standard MII, RMII, or RGMII interface
+
+ Specifications about the Ethernet PHY can be found at:
+ http://www.ti.com/lit/ds/symlink/dp83822i.pdf
+
+allOf:
+ - $ref: "ethernet-phy.yaml#"
+
+properties:
+ reg:
+ maxItems: 1
+
+ ti,link-loss-low:
+ type: boolean
+ description: |
+ DP83822 PHY in Fiber mode only.
+ Sets the DP83822 to detect a link drop condition when the signal goes
+ high. If not set then link drop will occur when the signal goes low.
+ This property is only applicable if the fiber mode support is strapped
+ to on.
+
+ ti,fiber-mode:
+ type: boolean
+ description: |
+ DP83822 PHY only.
+ If present the DP83822 PHY is configured to operate in fiber mode
+ Fiber mode support can also be strapped. If the strap pin is not set
+ correctly or not set at all then this boolean can be used to enable it.
+ If the fiber mode is not strapped then signal detection for the PHY
+ is disabled.
+ In fiber mode, auto-negotiation is disabled and the PHY can only work in
+ 100base-fx (full and half duplex) modes.
+
+ rx-internal-delay-ps:
+ description: |
+ DP83822 PHY only.
+ Setting this property to a non-zero number sets the RX internal delay
+ for the PHY. The internal delay for the PHY is fixed to 3.5ns relative
+ to receive data.
+
+ tx-internal-delay-ps:
+ description: |
+ DP83822 PHY only.
+ Setting this property to a non-zero number sets the TX internal delay
+ for the PHY. The internal delay for the PHY is fixed to 3.5ns relative
+ to transmit data.
+
+required:
+ - reg
+
+examples:
+ - |
+ mdio0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ ethphy0: ethernet-phy@0 {
+ reg = <0>;
+ rx-internal-delay-ps = <1>;
+ tx-internal-delay-ps = <1>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
index 65ee68efd574..b61c2d5a0ff7 100644
--- a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
@@ -65,7 +65,8 @@ Optional properties:
the length can vary between hw versions.
- <supply-name>-supply: handle to the regulator device tree node
optional "supply-name" are "vdd-0.8-cx-mx",
- "vdd-1.8-xo", "vdd-1.3-rfa" and "vdd-3.3-ch0".
+ "vdd-1.8-xo", "vdd-1.3-rfa", "vdd-3.3-ch0",
+ and "vdd-3.3-ch1".
- memory-region:
Usage: optional
Value type: <phandle>
@@ -204,6 +205,7 @@ wifi@18000000 {
vdd-1.8-xo-supply = <&vreg_l7a_1p8>;
vdd-1.3-rfa-supply = <&vreg_l17a_1p3>;
vdd-3.3-ch0-supply = <&vreg_l25a_3p3>;
+ vdd-3.3-ch1-supply = <&vreg_l26a_3p3>;
memory-region = <&wifi_msa_mem>;
iommus = <&apps_smmu 0x0040 0x1>;
qcom,msa-fixed-perm;
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml
index a1717db36dba..4b365c9d9378 100644
--- a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml
@@ -17,7 +17,9 @@ description: |
properties:
compatible:
- const: qcom,ipq8074-wifi
+ enum:
+ - qcom,ipq8074-wifi
+ - qcom,ipq6018-wifi
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt b/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt
index d48f9eb3636e..743eda754e65 100644
--- a/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt
+++ b/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt
@@ -18,6 +18,8 @@ Clock Properties:
- fsl,tmr-add Frequency compensation value.
- fsl,tmr-fiper1 Fixed interval period pulse generator.
- fsl,tmr-fiper2 Fixed interval period pulse generator.
+ - fsl,tmr-fiper3 Fixed interval period pulse generator.
+ Supported only on DPAA2 and ENETC hardware.
- fsl,max-adj Maximum frequency adjustment in parts per billion.
- fsl,extts-fifo The presence of this property indicates hardware
support for the external trigger stamp FIFO.
diff --git a/Documentation/driver-api/80211/cfg80211.rst b/Documentation/driver-api/80211/cfg80211.rst
index eeab91b59457..836f609c3f75 100644
--- a/Documentation/driver-api/80211/cfg80211.rst
+++ b/Documentation/driver-api/80211/cfg80211.rst
@@ -12,79 +12,32 @@ Device registration
:doc: Device registration
.. kernel-doc:: include/net/cfg80211.h
- :functions: ieee80211_channel_flags
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: ieee80211_channel
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: ieee80211_rate_flags
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: ieee80211_rate
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: ieee80211_sta_ht_cap
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: ieee80211_supported_band
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_signal_type
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: wiphy_params_flags
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: wiphy_flags
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: wiphy
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: wireless_dev
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: wiphy_new
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: wiphy_read_of_freq_limits
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: wiphy_register
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: wiphy_unregister
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: wiphy_free
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: wiphy_name
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: wiphy_dev
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: wiphy_priv
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: priv_to_wiphy
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: set_wiphy_dev
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: wdev_priv
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: ieee80211_iface_limit
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: ieee80211_iface_combination
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_check_combinations
+ :functions:
+ ieee80211_channel_flags
+ ieee80211_channel
+ ieee80211_rate_flags
+ ieee80211_rate
+ ieee80211_sta_ht_cap
+ ieee80211_supported_band
+ cfg80211_signal_type
+ wiphy_params_flags
+ wiphy_flags
+ wiphy
+ wireless_dev
+ wiphy_new
+ wiphy_read_of_freq_limits
+ wiphy_register
+ wiphy_unregister
+ wiphy_free
+ wiphy_name
+ wiphy_dev
+ wiphy_priv
+ priv_to_wiphy
+ set_wiphy_dev
+ wdev_priv
+ ieee80211_iface_limit
+ ieee80211_iface_combination
+ cfg80211_check_combinations
Actions and configuration
=========================
@@ -93,139 +46,52 @@ Actions and configuration
:doc: Actions and configuration
.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_ops
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: vif_params
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: key_params
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: survey_info_flags
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: survey_info
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_beacon_data
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_ap_settings
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: station_parameters
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: rate_info_flags
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: rate_info
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: station_info
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: monitor_flags
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: mpath_info_flags
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: mpath_info
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: bss_parameters
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: ieee80211_txq_params
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_crypto_settings
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_auth_request
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_assoc_request
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_deauth_request
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_disassoc_request
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_ibss_params
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_connect_params
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_pmksa
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_rx_mlme_mgmt
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_auth_timeout
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_rx_assoc_resp
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_assoc_timeout
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_tx_mlme_mgmt
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_ibss_joined
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_connect_resp_params
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_connect_done
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_connect_result
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_connect_bss
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_connect_timeout
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_roamed
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_disconnected
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_ready_on_channel
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_remain_on_channel_expired
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_new_sta
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_rx_mgmt
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_mgmt_tx_status
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_cqm_rssi_notify
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_cqm_pktloss_notify
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_michael_mic_failure
+ :functions:
+ cfg80211_ops
+ vif_params
+ key_params
+ survey_info_flags
+ survey_info
+ cfg80211_beacon_data
+ cfg80211_ap_settings
+ station_parameters
+ rate_info_flags
+ rate_info
+ station_info
+ monitor_flags
+ mpath_info_flags
+ mpath_info
+ bss_parameters
+ ieee80211_txq_params
+ cfg80211_crypto_settings
+ cfg80211_auth_request
+ cfg80211_assoc_request
+ cfg80211_deauth_request
+ cfg80211_disassoc_request
+ cfg80211_ibss_params
+ cfg80211_connect_params
+ cfg80211_pmksa
+ cfg80211_rx_mlme_mgmt
+ cfg80211_auth_timeout
+ cfg80211_rx_assoc_resp
+ cfg80211_assoc_timeout
+ cfg80211_tx_mlme_mgmt
+ cfg80211_ibss_joined
+ cfg80211_connect_resp_params
+ cfg80211_connect_done
+ cfg80211_connect_result
+ cfg80211_connect_bss
+ cfg80211_connect_timeout
+ cfg80211_roamed
+ cfg80211_disconnected
+ cfg80211_ready_on_channel
+ cfg80211_remain_on_channel_expired
+ cfg80211_new_sta
+ cfg80211_rx_mgmt
+ cfg80211_mgmt_tx_status
+ cfg80211_cqm_rssi_notify
+ cfg80211_cqm_pktloss_notify
+ cfg80211_michael_mic_failure
Scanning and BSS list handling
==============================
@@ -234,34 +100,17 @@ Scanning and BSS list handling
:doc: Scanning and BSS list handling
.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_ssid
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_scan_request
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_scan_done
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_bss
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_inform_bss
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_inform_bss_frame_data
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_inform_bss_data
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_unlink_bss
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_find_ie
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: ieee80211_bss_get_ie
+ :functions:
+ cfg80211_ssid
+ cfg80211_scan_request
+ cfg80211_scan_done
+ cfg80211_bss
+ cfg80211_inform_bss
+ cfg80211_inform_bss_frame_data
+ cfg80211_inform_bss_data
+ cfg80211_unlink_bss
+ cfg80211_find_ie
+ ieee80211_bss_get_ie
Utility functions
=================
@@ -270,25 +119,14 @@ Utility functions
:doc: Utility functions
.. kernel-doc:: include/net/cfg80211.h
- :functions: ieee80211_channel_to_frequency
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: ieee80211_frequency_to_channel
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: ieee80211_get_channel
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: ieee80211_get_response_rate
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: ieee80211_hdrlen
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: ieee80211_get_hdrlen_from_skb
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: ieee80211_radiotap_iterator
+ :functions:
+ ieee80211_channel_to_frequency
+ ieee80211_frequency_to_channel
+ ieee80211_get_channel
+ ieee80211_get_response_rate
+ ieee80211_hdrlen
+ ieee80211_get_hdrlen_from_skb
+ ieee80211_radiotap_iterator
Data path helpers
=================
@@ -297,13 +135,10 @@ Data path helpers
:doc: Data path helpers
.. kernel-doc:: include/net/cfg80211.h
- :functions: ieee80211_data_to_8023
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: ieee80211_amsdu_to_8023s
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_classify8021d
+ :functions:
+ ieee80211_data_to_8023
+ ieee80211_amsdu_to_8023s
+ cfg80211_classify8021d
Regulatory enforcement infrastructure
=====================================
@@ -312,13 +147,10 @@ Regulatory enforcement infrastructure
:doc: Regulatory enforcement infrastructure
.. kernel-doc:: include/net/cfg80211.h
- :functions: regulatory_hint
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: wiphy_apply_custom_regulatory
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: freq_reg_info
+ :functions:
+ regulatory_hint
+ wiphy_apply_custom_regulatory
+ freq_reg_info
RFkill integration
==================
@@ -327,13 +159,10 @@ RFkill integration
:doc: RFkill integration
.. kernel-doc:: include/net/cfg80211.h
- :functions: wiphy_rfkill_set_hw_state
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: wiphy_rfkill_start_polling
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: wiphy_rfkill_stop_polling
+ :functions:
+ wiphy_rfkill_set_hw_state
+ wiphy_rfkill_start_polling
+ wiphy_rfkill_stop_polling
Test mode
=========
@@ -342,13 +171,8 @@ Test mode
:doc: Test mode
.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_testmode_alloc_reply_skb
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_testmode_reply
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_testmode_alloc_event_skb
-
-.. kernel-doc:: include/net/cfg80211.h
- :functions: cfg80211_testmode_event
+ :functions:
+ cfg80211_testmode_alloc_reply_skb
+ cfg80211_testmode_reply
+ cfg80211_testmode_alloc_event_skb
+ cfg80211_testmode_event
diff --git a/Documentation/driver-api/80211/mac80211-advanced.rst b/Documentation/driver-api/80211/mac80211-advanced.rst
index 24cb64b3b715..f8df7b3af8f5 100644
--- a/Documentation/driver-api/80211/mac80211-advanced.rst
+++ b/Documentation/driver-api/80211/mac80211-advanced.rst
@@ -15,25 +15,14 @@ appropriate trigger, which will then be triggered appropriately by
mac80211.
.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_get_tx_led_name
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_get_rx_led_name
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_get_assoc_led_name
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_get_radio_led_name
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_tpt_blink
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_tpt_led_trigger_flags
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_create_tpt_led_trigger
+ :functions:
+ ieee80211_get_tx_led_name
+ ieee80211_get_rx_led_name
+ ieee80211_get_assoc_led_name
+ ieee80211_get_radio_led_name
+ ieee80211_tpt_blink
+ ieee80211_tpt_led_trigger_flags
+ ieee80211_create_tpt_led_trigger
Hardware crypto acceleration
============================
@@ -42,22 +31,13 @@ Hardware crypto acceleration
:doc: Hardware crypto acceleration
.. kernel-doc:: include/net/mac80211.h
- :functions: set_key_cmd
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_key_conf
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_key_flags
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_get_tkip_p1k
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_get_tkip_p1k_iv
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_get_tkip_p2k
+ :functions:
+ set_key_cmd
+ ieee80211_key_conf
+ ieee80211_key_flags
+ ieee80211_get_tkip_p1k
+ ieee80211_get_tkip_p1k_iv
+ ieee80211_get_tkip_p2k
Powersave support
=================
@@ -99,28 +79,15 @@ support for powersaving clients
:doc: AP support for powersaving clients
.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_get_buffered_bc
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_beacon_get
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_sta_eosp
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_frame_release_type
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_sta_ps_transition
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_sta_ps_transition_ni
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_sta_set_buffered
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_sta_block_awake
+ :functions:
+ ieee80211_get_buffered_bc
+ ieee80211_beacon_get
+ ieee80211_sta_eosp
+ ieee80211_frame_release_type
+ ieee80211_sta_ps_transition
+ ieee80211_sta_ps_transition_ni
+ ieee80211_sta_set_buffered
+ ieee80211_sta_block_awake
Supporting multiple virtual interfaces
======================================
@@ -134,10 +101,9 @@ addresses here, note which configurations are supported by mac80211, add
notes about supporting hw crypto with it.
.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_iterate_active_interfaces
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_iterate_active_interfaces_atomic
+ :functions:
+ ieee80211_iterate_active_interfaces
+ ieee80211_iterate_active_interfaces_atomic
Station handling
================
@@ -145,16 +111,11 @@ Station handling
TODO
.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_sta
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: sta_notify_cmd
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_find_sta
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_find_sta_by_ifaddr
+ :functions:
+ ieee80211_sta
+ sta_notify_cmd
+ ieee80211_find_sta
+ ieee80211_find_sta_by_ifaddr
Hardware scan offload
=====================
@@ -193,10 +154,9 @@ Spatial Multiplexing Powersave (SMPS)
:doc: Spatial multiplexing power save
.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_request_smps
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_smps_mode
+ :functions:
+ ieee80211_request_smps
+ ieee80211_smps_mode
TBD
@@ -209,22 +169,13 @@ Rate Control API
TBD
.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_start_tx_ba_session
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_start_tx_ba_cb_irqsafe
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_stop_tx_ba_session
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_stop_tx_ba_cb_irqsafe
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_rate_control_changed
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_tx_rate_control
+ :functions:
+ ieee80211_start_tx_ba_session
+ ieee80211_start_tx_ba_cb_irqsafe
+ ieee80211_stop_tx_ba_session
+ ieee80211_stop_tx_ba_cb_irqsafe
+ ieee80211_rate_control_changed
+ ieee80211_tx_rate_control
TBD
@@ -261,10 +212,9 @@ Programming information
-----------------------
.. kernel-doc:: net/mac80211/sta_info.h
- :functions: sta_info
-
-.. kernel-doc:: net/mac80211/sta_info.h
- :functions: ieee80211_sta_info_flags
+ :functions:
+ sta_info
+ ieee80211_sta_info_flags
STA information lifetime rules
------------------------------
@@ -276,13 +226,10 @@ Aggregation Functions
=====================
.. kernel-doc:: net/mac80211/sta_info.h
- :functions: sta_ampdu_mlme
-
-.. kernel-doc:: net/mac80211/sta_info.h
- :functions: tid_ampdu_tx
-
-.. kernel-doc:: net/mac80211/sta_info.h
- :functions: tid_ampdu_rx
+ :functions:
+ sta_ampdu_mlme
+ tid_ampdu_tx
+ tid_ampdu_rx
Synchronisation Functions
=========================
diff --git a/Documentation/driver-api/80211/mac80211.rst b/Documentation/driver-api/80211/mac80211.rst
index eab40bcf3987..67d2e58b45e4 100644
--- a/Documentation/driver-api/80211/mac80211.rst
+++ b/Documentation/driver-api/80211/mac80211.rst
@@ -30,31 +30,16 @@ Finally, a discussion of hardware capabilities should be done with
references to other parts of the book.
.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_hw
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_hw_flags
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: SET_IEEE80211_DEV
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: SET_IEEE80211_PERM_ADDR
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_ops
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_alloc_hw
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_register_hw
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_unregister_hw
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_free_hw
+ :functions:
+ ieee80211_hw
+ ieee80211_hw_flags
+ SET_IEEE80211_DEV
+ SET_IEEE80211_PERM_ADDR
+ ieee80211_ops
+ ieee80211_alloc_hw
+ ieee80211_register_hw
+ ieee80211_unregister_hw
+ ieee80211_free_hw
PHY configuration
=================
@@ -65,10 +50,9 @@ This chapter should describe PHY handling including start/stop callbacks
and the various structures used.
.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_conf
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_conf_flags
+ :functions:
+ ieee80211_conf
+ ieee80211_conf_flags
Virtual interfaces
==================
@@ -123,79 +107,32 @@ functions/definitions
---------------------
.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_rx_status
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: mac80211_rx_encoding_flags
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: mac80211_rx_flags
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: mac80211_tx_info_flags
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: mac80211_tx_control_flags
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: mac80211_rate_control_flags
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_tx_rate
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_tx_info
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_tx_info_clear_status
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_rx
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_rx_ni
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_rx_irqsafe
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_tx_status
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_tx_status_ni
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_tx_status_irqsafe
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_rts_get
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_rts_duration
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_ctstoself_get
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_ctstoself_duration
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_generic_frame_duration
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_wake_queue
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_stop_queue
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_wake_queues
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_stop_queues
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_queue_stopped
+ :functions:
+ ieee80211_rx_status
+ mac80211_rx_encoding_flags
+ mac80211_rx_flags
+ mac80211_tx_info_flags
+ mac80211_tx_control_flags
+ mac80211_rate_control_flags
+ ieee80211_tx_rate
+ ieee80211_tx_info
+ ieee80211_tx_info_clear_status
+ ieee80211_rx
+ ieee80211_rx_ni
+ ieee80211_rx_irqsafe
+ ieee80211_tx_status
+ ieee80211_tx_status_ni
+ ieee80211_tx_status_irqsafe
+ ieee80211_rts_get
+ ieee80211_rts_duration
+ ieee80211_ctstoself_get
+ ieee80211_ctstoself_duration
+ ieee80211_generic_frame_duration
+ ieee80211_wake_queue
+ ieee80211_stop_queue
+ ieee80211_wake_queues
+ ieee80211_stop_queues
+ ieee80211_queue_stopped
Frame filtering
===============
@@ -213,7 +150,6 @@ The mac80211 workqueue
:doc: mac80211 workqueue
.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_queue_work
-
-.. kernel-doc:: include/net/mac80211.h
- :functions: ieee80211_queue_delayed_work
+ :functions:
+ ieee80211_queue_work
+ ieee80211_queue_delayed_work
diff --git a/Documentation/networking/af_xdp.rst b/Documentation/networking/af_xdp.rst
index 5bc55a4e3bce..2ccc5644cc98 100644
--- a/Documentation/networking/af_xdp.rst
+++ b/Documentation/networking/af_xdp.rst
@@ -258,14 +258,21 @@ socket into zero-copy mode or fail.
XDP_SHARED_UMEM bind flag
-------------------------
-This flag enables you to bind multiple sockets to the same UMEM, but
-only if they share the same queue id. In this mode, each socket has
-their own RX and TX rings, but the UMEM (tied to the fist socket
-created) only has a single FILL ring and a single COMPLETION
-ring. To use this mode, create the first socket and bind it in the normal
-way. Create a second socket and create an RX and a TX ring, or at
-least one of them, but no FILL or COMPLETION rings as the ones from
-the first socket will be used. In the bind call, set he
+This flag enables you to bind multiple sockets to the same UMEM. It
+works on the same queue id, between queue ids and between
+netdevs/devices. In this mode, each socket has their own RX and TX
+rings as usual, but you are going to have one or more FILL and
+COMPLETION ring pairs. You have to create one of these pairs per
+unique netdev and queue id tuple that you bind to.
+
+Starting with the case were we would like to share a UMEM between
+sockets bound to the same netdev and queue id. The UMEM (tied to the
+fist socket created) will only have a single FILL ring and a single
+COMPLETION ring as there is only on unique netdev,queue_id tuple that
+we have bound to. To use this mode, create the first socket and bind
+it in the normal way. Create a second socket and create an RX and a TX
+ring, or at least one of them, but no FILL or COMPLETION rings as the
+ones from the first socket will be used. In the bind call, set he
XDP_SHARED_UMEM option and provide the initial socket's fd in the
sxdp_shared_umem_fd field. You can attach an arbitrary number of extra
sockets this way.
@@ -305,11 +312,41 @@ concurrently. There are no synchronization primitives in the
libbpf code that protects multiple users at this point in time.
Libbpf uses this mode if you create more than one socket tied to the
-same umem. However, note that you need to supply the
+same UMEM. However, note that you need to supply the
XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD libbpf_flag with the
xsk_socket__create calls and load your own XDP program as there is no
built in one in libbpf that will route the traffic for you.
+The second case is when you share a UMEM between sockets that are
+bound to different queue ids and/or netdevs. In this case you have to
+create one FILL ring and one COMPLETION ring for each unique
+netdev,queue_id pair. Let us say you want to create two sockets bound
+to two different queue ids on the same netdev. Create the first socket
+and bind it in the normal way. Create a second socket and create an RX
+and a TX ring, or at least one of them, and then one FILL and
+COMPLETION ring for this socket. Then in the bind call, set he
+XDP_SHARED_UMEM option and provide the initial socket's fd in the
+sxdp_shared_umem_fd field as you registered the UMEM on that
+socket. These two sockets will now share one and the same UMEM.
+
+There is no need to supply an XDP program like the one in the previous
+case where sockets were bound to the same queue id and
+device. Instead, use the NIC's packet steering capabilities to steer
+the packets to the right queue. In the previous example, there is only
+one queue shared among sockets, so the NIC cannot do this steering. It
+can only steer between queues.
+
+In libbpf, you need to use the xsk_socket__create_shared() API as it
+takes a reference to a FILL ring and a COMPLETION ring that will be
+created for you and bound to the shared UMEM. You can use this
+function for all the sockets you create, or you can use it for the
+second and following ones and use xsk_socket__create() for the first
+one. Both methods yield the same result.
+
+Note that a UMEM can be shared between sockets on the same queue id
+and device, as well as between queues on the same device and between
+devices at the same time.
+
XDP_USE_NEED_WAKEUP bind flag
-----------------------------
@@ -364,7 +401,7 @@ resources by only setting up one of them. Both the FILL ring and the
COMPLETION ring are mandatory as you need to have a UMEM tied to your
socket. But if the XDP_SHARED_UMEM flag is used, any socket after the
first one does not have a UMEM and should in that case not have any
-FILL or COMPLETION rings created as the ones from the shared umem will
+FILL or COMPLETION rings created as the ones from the shared UMEM will
be used. Note, that the rings are single-producer single-consumer, so
do not try to access them from multiple processes at the same
time. See the XDP_SHARED_UMEM section.
@@ -567,6 +604,17 @@ A: The short answer is no, that is not supported at the moment. The
switch, or other distribution mechanism, in your NIC to direct
traffic to the correct queue id and socket.
+Q: My packets are sometimes corrupted. What is wrong?
+
+A: Care has to be taken not to feed the same buffer in the UMEM into
+ more than one ring at the same time. If you for example feed the
+ same buffer into the FILL ring and the TX ring at the same time, the
+ NIC might receive data into the buffer at the same time it is
+ sending it. This will cause some packets to become corrupted. Same
+ thing goes for feeding the same buffer into the FILL rings
+ belonging to different queue ids or netdevs bound with the
+ XDP_SHARED_UMEM flag.
+
Credits
=======
diff --git a/Documentation/networking/caif/index.rst b/Documentation/networking/caif/index.rst
index 86e5b7832ec3..ec29b6f4bdb4 100644
--- a/Documentation/networking/caif/index.rst
+++ b/Documentation/networking/caif/index.rst
@@ -10,4 +10,3 @@ Contents:
linux_caif
caif
- spi_porting
diff --git a/Documentation/networking/caif/spi_porting.rst b/Documentation/networking/caif/spi_porting.rst
deleted file mode 100644
index d49f874b20ac..000000000000
--- a/Documentation/networking/caif/spi_porting.rst
+++ /dev/null
@@ -1,229 +0,0 @@
-.. SPDX-License-Identifier: GPL-2.0
-
-================
-CAIF SPI porting
-================
-
-CAIF SPI basics
-===============
-
-Running CAIF over SPI needs some extra setup, owing to the nature of SPI.
-Two extra GPIOs have been added in order to negotiate the transfers
-between the master and the slave. The minimum requirement for running
-CAIF over SPI is a SPI slave chip and two GPIOs (more details below).
-Please note that running as a slave implies that you need to keep up
-with the master clock. An overrun or underrun event is fatal.
-
-CAIF SPI framework
-==================
-
-To make porting as easy as possible, the CAIF SPI has been divided in
-two parts. The first part (called the interface part) deals with all
-generic functionality such as length framing, SPI frame negotiation
-and SPI frame delivery and transmission. The other part is the CAIF
-SPI slave device part, which is the module that you have to write if
-you want to run SPI CAIF on a new hardware. This part takes care of
-the physical hardware, both with regard to SPI and to GPIOs.
-
-- Implementing a CAIF SPI device:
-
- - Functionality provided by the CAIF SPI slave device:
-
- In order to implement a SPI device you will, as a minimum,
- need to implement the following
- functions:
-
- ::
-
- int (*init_xfer) (struct cfspi_xfer * xfer, struct cfspi_dev *dev):
-
- This function is called by the CAIF SPI interface to give
- you a chance to set up your hardware to be ready to receive
- a stream of data from the master. The xfer structure contains
- both physical and logical addresses, as well as the total length
- of the transfer in both directions.The dev parameter can be used
- to map to different CAIF SPI slave devices.
-
- ::
-
- void (*sig_xfer) (bool xfer, struct cfspi_dev *dev):
-
- This function is called by the CAIF SPI interface when the output
- (SPI_INT) GPIO needs to change state. The boolean value of the xfer
- variable indicates whether the GPIO should be asserted (HIGH) or
- deasserted (LOW). The dev parameter can be used to map to different CAIF
- SPI slave devices.
-
- - Functionality provided by the CAIF SPI interface:
-
- ::
-
- void (*ss_cb) (bool assert, struct cfspi_ifc *ifc);
-
- This function is called by the CAIF SPI slave device in order to
- signal a change of state of the input GPIO (SS) to the interface.
- Only active edges are mandatory to be reported.
- This function can be called from IRQ context (recommended in order
- not to introduce latency). The ifc parameter should be the pointer
- returned from the platform probe function in the SPI device structure.
-
- ::
-
- void (*xfer_done_cb) (struct cfspi_ifc *ifc);
-
- This function is called by the CAIF SPI slave device in order to
- report that a transfer is completed. This function should only be
- called once both the transmission and the reception are completed.
- This function can be called from IRQ context (recommended in order
- not to introduce latency). The ifc parameter should be the pointer
- returned from the platform probe function in the SPI device structure.
-
- - Connecting the bits and pieces:
-
- - Filling in the SPI slave device structure:
-
- Connect the necessary callback functions.
-
- Indicate clock speed (used to calculate toggle delays).
-
- Chose a suitable name (helps debugging if you use several CAIF
- SPI slave devices).
-
- Assign your private data (can be used to map to your
- structure).
-
- - Filling in the SPI slave platform device structure:
-
- Add name of driver to connect to ("cfspi_sspi").
-
- Assign the SPI slave device structure as platform data.
-
-Padding
-=======
-
-In order to optimize throughput, a number of SPI padding options are provided.
-Padding can be enabled independently for uplink and downlink transfers.
-Padding can be enabled for the head, the tail and for the total frame size.
-The padding needs to be correctly configured on both sides of the link.
-The padding can be changed via module parameters in cfspi_sspi.c or via
-the sysfs directory of the cfspi_sspi driver (before device registration).
-
-- CAIF SPI device template::
-
- /*
- * Copyright (C) ST-Ericsson AB 2010
- * Author: Daniel Martensson / Daniel.Martensson@stericsson.com
- * License terms: GNU General Public License (GPL), version 2.
- *
- */
-
- #include <linux/init.h>
- #include <linux/module.h>
- #include <linux/device.h>
- #include <linux/wait.h>
- #include <linux/interrupt.h>
- #include <linux/dma-mapping.h>
- #include <net/caif/caif_spi.h>
-
- MODULE_LICENSE("GPL");
-
- struct sspi_struct {
- struct cfspi_dev sdev;
- struct cfspi_xfer *xfer;
- };
-
- static struct sspi_struct slave;
- static struct platform_device slave_device;
-
- static irqreturn_t sspi_irq(int irq, void *arg)
- {
- /* You only need to trigger on an edge to the active state of the
- * SS signal. Once a edge is detected, the ss_cb() function should be
- * called with the parameter assert set to true. It is OK
- * (and even advised) to call the ss_cb() function in IRQ context in
- * order not to add any delay. */
-
- return IRQ_HANDLED;
- }
-
- static void sspi_complete(void *context)
- {
- /* Normally the DMA or the SPI framework will call you back
- * in something similar to this. The only thing you need to
- * do is to call the xfer_done_cb() function, providing the pointer
- * to the CAIF SPI interface. It is OK to call this function
- * from IRQ context. */
- }
-
- static int sspi_init_xfer(struct cfspi_xfer *xfer, struct cfspi_dev *dev)
- {
- /* Store transfer info. For a normal implementation you should
- * set up your DMA here and make sure that you are ready to
- * receive the data from the master SPI. */
-
- struct sspi_struct *sspi = (struct sspi_struct *)dev->priv;
-
- sspi->xfer = xfer;
-
- return 0;
- }
-
- void sspi_sig_xfer(bool xfer, struct cfspi_dev *dev)
- {
- /* If xfer is true then you should assert the SPI_INT to indicate to
- * the master that you are ready to receive the data from the master
- * SPI. If xfer is false then you should de-assert SPI_INT to indicate
- * that the transfer is done.
- */
-
- struct sspi_struct *sspi = (struct sspi_struct *)dev->priv;
- }
-
- static void sspi_release(struct device *dev)
- {
- /*
- * Here you should release your SPI device resources.
- */
- }
-
- static int __init sspi_init(void)
- {
- /* Here you should initialize your SPI device by providing the
- * necessary functions, clock speed, name and private data. Once
- * done, you can register your device with the
- * platform_device_register() function. This function will return
- * with the CAIF SPI interface initialized. This is probably also
- * the place where you should set up your GPIOs, interrupts and SPI
- * resources. */
-
- int res = 0;
-
- /* Initialize slave device. */
- slave.sdev.init_xfer = sspi_init_xfer;
- slave.sdev.sig_xfer = sspi_sig_xfer;
- slave.sdev.clk_mhz = 13;
- slave.sdev.priv = &slave;
- slave.sdev.name = "spi_sspi";
- slave_device.dev.release = sspi_release;
-
- /* Initialize platform device. */
- slave_device.name = "cfspi_sspi";
- slave_device.dev.platform_data = &slave.sdev;
-
- /* Register platform device. */
- res = platform_device_register(&slave_device);
- if (res) {
- printk(KERN_WARNING "sspi_init: failed to register dev.\n");
- return -ENODEV;
- }
-
- return res;
- }
-
- static void __exit sspi_exit(void)
- {
- platform_device_del(&slave_device);
- }
-
- module_init(sspi_init);
- module_exit(sspi_exit);
diff --git a/Documentation/networking/device_drivers/ethernet/amazon/ena.rst b/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
index 11af6388ea87..3561a8a29fd2 100644
--- a/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
+++ b/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
@@ -39,16 +39,6 @@ debug logs.
Some of the ENA devices support a working mode called Low-latency
Queue (LLQ), which saves several more microseconds.
-Supported PCI vendor ID/device IDs
-==================================
-
-========= =======================
-1d0f:0ec2 ENA PF
-1d0f:1ec2 ENA PF with LLQ support
-1d0f:ec20 ENA VF
-1d0f:ec21 ENA VF with LLQ support
-========= =======================
-
ENA Source Code Directory Structure
===================================
@@ -212,20 +202,11 @@ In adaptive interrupt moderation mode the interrupt delay value is
updated by the driver dynamically and adjusted every NAPI cycle
according to the traffic nature.
-By default ENA driver applies adaptive coalescing on Rx traffic and
-conventional coalescing on Tx traffic.
-
Adaptive coalescing can be switched on/off through ethtool(8)
adaptive_rx on|off parameter.
-The driver chooses interrupt delay value according to the number of
-bytes and packets received between interrupt unmasking and interrupt
-posting. The driver uses interrupt delay table that subdivides the
-range of received bytes/packets into 5 levels and assigns interrupt
-delay value to each level.
-
-The user can enable/disable adaptive moderation, modify the interrupt
-delay table and restore its default values through sysfs.
+More information about Adaptive Interrupt Moderation (DIM) can be found in
+Documentation/networking/net_dim.rst
RX copybreak
============
@@ -274,7 +255,7 @@ RSS
inputs for hash functions.
- The driver configures RSS settings using the AQ SetFeature command
(ENA_ADMIN_RSS_HASH_FUNCTION, ENA_ADMIN_RSS_HASH_INPUT and
- ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG properties).
+ ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG properties).
- If the NETIF_F_RXHASH flag is set, the 32-bit result of the hash
function delivered in the Rx CQ descriptor is set in the received
SKB.
diff --git a/Documentation/networking/devlink/devlink-flash.rst b/Documentation/networking/devlink/devlink-flash.rst
index 40a87c0222cb..603e732f00cc 100644
--- a/Documentation/networking/devlink/devlink-flash.rst
+++ b/Documentation/networking/devlink/devlink-flash.rst
@@ -16,6 +16,34 @@ Note that the file name is a path relative to the firmware loading path
(usually ``/lib/firmware/``). Drivers may send status updates to inform
user space about the progress of the update operation.
+Overwrite Mask
+==============
+
+The ``devlink-flash`` command allows optionally specifying a mask indicating
+how the device should handle subsections of flash components when updating.
+This mask indicates the set of sections which are allowed to be overwritten.
+
+.. list-table:: List of overwrite mask bits
+ :widths: 5 95
+
+ * - Name
+ - Description
+ * - ``DEVLINK_FLASH_OVERWRITE_SETTINGS``
+ - Indicates that the device should overwrite settings in the components
+ being updated with the settings found in the provided image.
+ * - ``DEVLINK_FLASH_OVERWRITE_IDENTIFIERS``
+ - Indicates that the device should overwrite identifiers in the
+ components being updated with the identifiers found in the provided
+ image. This includes MAC addresses, serial IDs, and similar device
+ identifiers.
+
+Multiple overwrite bits may be combined and requested together. If no bits
+are provided, it is expected that the device only update firmware binaries
+in the components being updated. Settings and identifiers are expected to be
+preserved across the update. A device may not support every combination and
+the driver for such a device must reject any combination which cannot be
+faithfully implemented.
+
Firmware Loading
================
diff --git a/Documentation/networking/devlink/devlink-params.rst b/Documentation/networking/devlink/devlink-params.rst
index d075fd090b3d..54c9f107c4b0 100644
--- a/Documentation/networking/devlink/devlink-params.rst
+++ b/Documentation/networking/devlink/devlink-params.rst
@@ -108,3 +108,9 @@ own name.
* - ``region_snapshot_enable``
- Boolean
- Enable capture of ``devlink-region`` snapshots.
+ * - ``enable_remote_dev_reset``
+ - Boolean
+ - Enable device reset by remote host. When cleared, the device driver
+ will NACK any attempt of other host to reset the device. This parameter
+ is useful for setups where a device is shared by different hosts, such
+ as multi-host setup.
diff --git a/Documentation/networking/devlink/devlink-reload.rst b/Documentation/networking/devlink/devlink-reload.rst
new file mode 100644
index 000000000000..505d22da027d
--- /dev/null
+++ b/Documentation/networking/devlink/devlink-reload.rst
@@ -0,0 +1,81 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==============
+Devlink Reload
+==============
+
+``devlink-reload`` provides mechanism to reinit driver entities, applying
+``devlink-params`` and ``devlink-resources`` new values. It also provides
+mechanism to activate firmware.
+
+Reload Actions
+==============
+
+User may select a reload action.
+By default ``driver_reinit`` action is selected.
+
+.. list-table:: Possible reload actions
+ :widths: 5 90
+
+ * - Name
+ - Description
+ * - ``driver-reinit``
+ - Devlink driver entities re-initialization, including applying
+ new values to devlink entities which are used during driver
+ load such as ``devlink-params`` in configuration mode
+ ``driverinit`` or ``devlink-resources``
+ * - ``fw_activate``
+ - Firmware activate. Activates new firmware if such image is stored and
+ pending activation. If no limitation specified this action may involve
+ firmware reset. If no new image pending this action will reload current
+ firmware image.
+
+Note that even though user asks for a specific action, the driver
+implementation might require to perform another action alongside with
+it. For example, some driver do not support driver reinitialization
+being performed without fw activation. Therefore, the devlink reload
+command returns the list of actions which were actrually performed.
+
+Reload Limits
+=============
+
+By default reload actions are not limited and driver implementation may
+include reset or downtime as needed to perform the actions.
+
+However, some drivers support action limits, which limit the action
+implementation to specific constraints.
+
+.. list-table:: Possible reload limits
+ :widths: 5 90
+
+ * - Name
+ - Description
+ * - ``no_reset``
+ - No reset allowed, no down time allowed, no link flap and no
+ configuration is lost.
+
+Change Namespace
+================
+
+The netns option allows user to be able to move devlink instances into
+namespaces during devlink reload operation.
+By default all devlink instances are created in init_net and stay there.
+
+example usage
+-------------
+
+.. code:: shell
+
+ $ devlink dev reload help
+ $ devlink dev reload DEV [ netns { PID | NAME | ID } ] [ action { driver_reinit | fw_activate } ] [ limit no_reset ]
+
+ # Run reload command for devlink driver entities re-initialization:
+ $ devlink dev reload pci/0000:82:00.0 action driver_reinit
+ reload_actions_performed:
+ driver_reinit
+
+ # Run reload command to activate firmware:
+ # Note that mlx5 driver reloads the driver while activating firmware
+ $ devlink dev reload pci/0000:82:00.0 action fw_activate
+ reload_actions_performed:
+ driver_reinit fw_activate
diff --git a/Documentation/networking/devlink/devlink-trap.rst b/Documentation/networking/devlink/devlink-trap.rst
index 7a798352b45d..ef719ceac299 100644
--- a/Documentation/networking/devlink/devlink-trap.rst
+++ b/Documentation/networking/devlink/devlink-trap.rst
@@ -409,6 +409,73 @@ be added to the following table:
- ``drop``
- Traps packets dropped due to the RED (Random Early Detection) algorithm
(i.e., early drops)
+ * - ``vxlan_parsing``
+ - ``drop``
+ - Traps packets dropped due to an error in the VXLAN header parsing which
+ might be because of packet truncation or the I flag is not set.
+ * - ``llc_snap_parsing``
+ - ``drop``
+ - Traps packets dropped due to an error in the LLC+SNAP header parsing
+ * - ``vlan_parsing``
+ - ``drop``
+ - Traps packets dropped due to an error in the VLAN header parsing. Could
+ include unexpected packet truncation.
+ * - ``pppoe_ppp_parsing``
+ - ``drop``
+ - Traps packets dropped due to an error in the PPPoE+PPP header parsing.
+ This could include finding a session ID of 0xFFFF (which is reserved and
+ not for use), a PPPoE length which is larger than the frame received or
+ any common error on this type of header
+ * - ``mpls_parsing``
+ - ``drop``
+ - Traps packets dropped due to an error in the MPLS header parsing which
+ could include unexpected header truncation
+ * - ``arp_parsing``
+ - ``drop``
+ - Traps packets dropped due to an error in the ARP header parsing
+ * - ``ip_1_parsing``
+ - ``drop``
+ - Traps packets dropped due to an error in the first IP header parsing.
+ This packet trap could include packets which do not pass an IP checksum
+ check, a header length check (a minimum of 20 bytes), which might suffer
+ from packet truncation thus the total length field exceeds the received
+ packet length etc
+ * - ``ip_n_parsing``
+ - ``drop``
+ - Traps packets dropped due to an error in the parsing of the last IP
+ header (the inner one in case of an IP over IP tunnel). The same common
+ error checking is performed here as for the ip_1_parsing trap
+ * - ``gre_parsing``
+ - ``drop``
+ - Traps packets dropped due to an error in the GRE header parsing
+ * - ``udp_parsing``
+ - ``drop``
+ - Traps packets dropped due to an error in the UDP header parsing.
+ This packet trap could include checksum errorrs, an improper UDP
+ length detected (smaller than 8 bytes) or detection of header
+ truncation.
+ * - ``tcp_parsing``
+ - ``drop``
+ - Traps packets dropped due to an error in the TCP header parsing.
+ This could include TCP checksum errors, improper combination of SYN, FIN
+ and/or RESET etc.
+ * - ``ipsec_parsing``
+ - ``drop``
+ - Traps packets dropped due to an error in the IPSEC header parsing
+ * - ``sctp_parsing``
+ - ``drop``
+ - Traps packets dropped due to an error in the SCTP header parsing.
+ This would mean that port number 0 was used or that the header is
+ truncated.
+ * - ``dccp_parsing``
+ - ``drop``
+ - Traps packets dropped due to an error in the DCCP header parsing
+ * - ``gtp_parsing``
+ - ``drop``
+ - Traps packets dropped due to an error in the GTP header parsing
+ * - ``esp_parsing``
+ - ``drop``
+ - Traps packets dropped due to an error in the ESP header parsing
Driver-specific Packet Traps
============================
@@ -509,6 +576,9 @@ narrow. The description of these groups must be added to the following table:
* - ``acl_trap``
- Contains packet traps for packets that were trapped (logged) by the
device during ACL processing
+ * - ``parser_error_drops``
+ - Contains packet traps for packets that were marked by the device during
+ parsing as erroneous
Packet Trap Policers
====================
diff --git a/Documentation/networking/devlink/ice.rst b/Documentation/networking/devlink/ice.rst
index 237848d56f9b..b165181d5d4d 100644
--- a/Documentation/networking/devlink/ice.rst
+++ b/Documentation/networking/devlink/ice.rst
@@ -69,6 +69,11 @@ The ``ice`` driver reports the following versions
- The version of the DDP package that is active in the device. Note
that both the name (as reported by ``fw.app.name``) and version are
required to uniquely identify the package.
+ * - ``fw.app.bundle_id``
+ - 0xc0000001
+ - Unique identifier for the DDP package loaded in the device. Also
+ referred to as the DDP Track ID. Can be used to uniquely identify
+ the specific DDP package.
* - ``fw.netlist``
- running
- 1.1.2000-6.7.0
@@ -81,6 +86,37 @@ The ``ice`` driver reports the following versions
- 0xee16ced7
- The first 4 bytes of the hash of the netlist module contents.
+Flash Update
+============
+
+The ``ice`` driver implements support for flash update using the
+``devlink-flash`` interface. It supports updating the device flash using a
+combined flash image that contains the ``fw.mgmt``, ``fw.undi``, and
+``fw.netlist`` components.
+
+.. list-table:: List of supported overwrite modes
+ :widths: 5 95
+
+ * - Bits
+ - Behavior
+ * - ``DEVLINK_FLASH_OVERWRITE_SETTINGS``
+ - Do not preserve settings stored in the flash components being
+ updated. This includes overwriting the port configuration that
+ determines the number of physical functions the device will
+ initialize with.
+ * - ``DEVLINK_FLASH_OVERWRITE_SETTINGS`` and ``DEVLINK_FLASH_OVERWRITE_IDENTIFIERS``
+ - Do not preserve either settings or identifiers. Overwrite everything
+ in the flash with the contents from the provided image, without
+ performing any preservation. This includes overwriting device
+ identifying fields such as the MAC address, VPD area, and device
+ serial number. It is expected that this combination be used with an
+ image customized for the specific device.
+
+The ice hardware does not support overwriting only identifiers while
+preserving settings, and thus ``DEVLINK_FLASH_OVERWRITE_IDENTIFIERS`` on its
+own will be rejected. If no overwrite mask is provided, the firmware will be
+instructed to preserve all settings and identifying fields when updating.
+
Regions
=======
diff --git a/Documentation/networking/devlink/index.rst b/Documentation/networking/devlink/index.rst
index 7684ae5c4a4a..d82874760ae2 100644
--- a/Documentation/networking/devlink/index.rst
+++ b/Documentation/networking/devlink/index.rst
@@ -20,6 +20,7 @@ general.
devlink-params
devlink-region
devlink-resource
+ devlink-reload
devlink-trap
Driver-specific documentation
diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst
index b5a79881551f..30b98245979f 100644
--- a/Documentation/networking/ethtool-netlink.rst
+++ b/Documentation/networking/ethtool-netlink.rst
@@ -68,6 +68,7 @@ the flags may not apply to requests. Recognized flags are:
================================= ===================================
``ETHTOOL_FLAG_COMPACT_BITSETS`` use compact format bitsets in reply
``ETHTOOL_FLAG_OMIT_REPLY`` omit optional reply (_SET and _ACT)
+ ``ETHTOOL_FLAG_STATS`` include optional device statistics
================================= ===================================
New request flags should follow the general idea that if the flag is not set,
@@ -991,8 +992,18 @@ Kernel response contents:
``ETHTOOL_A_PAUSE_AUTONEG`` bool pause autonegotiation
``ETHTOOL_A_PAUSE_RX`` bool receive pause frames
``ETHTOOL_A_PAUSE_TX`` bool transmit pause frames
+ ``ETHTOOL_A_PAUSE_STATS`` nested pause statistics
===================================== ====== ==========================
+``ETHTOOL_A_PAUSE_STATS`` are reported if ``ETHTOOL_FLAG_STATS`` was set
+in ``ETHTOOL_A_HEADER_FLAGS``.
+It will be empty if driver did not report any statistics. Drivers fill in
+the statistics in the following structure:
+
+.. kernel-doc:: include/linux/ethtool.h
+ :identifiers: ethtool_pause_stats
+
+Each member has a corresponding attribute defined.
PAUSE_SET
============
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst
index 611e4b130c1e..63ef386afd0a 100644
--- a/Documentation/networking/index.rst
+++ b/Documentation/networking/index.rst
@@ -93,6 +93,7 @@ Contents:
sctp
secid
seg6-sysctl
+ statistics
strparser
switchdev
sysfs-tagging
diff --git a/Documentation/networking/kapi.rst b/Documentation/networking/kapi.rst
index f03ae64be8bc..d198fa5eaacd 100644
--- a/Documentation/networking/kapi.rst
+++ b/Documentation/networking/kapi.rst
@@ -134,6 +134,15 @@ PHY Support
.. kernel-doc:: drivers/net/phy/phy.c
:internal:
+.. kernel-doc:: drivers/net/phy/phy-core.c
+ :export:
+
+.. kernel-doc:: drivers/net/phy/phy-c45.c
+ :export:
+
+.. kernel-doc:: include/linux/phy.h
+ :internal:
+
.. kernel-doc:: drivers/net/phy/phy_device.c
:export:
diff --git a/Documentation/networking/l2tp.rst b/Documentation/networking/l2tp.rst
index a48238a2ec09..498b382d25a0 100644
--- a/Documentation/networking/l2tp.rst
+++ b/Documentation/networking/l2tp.rst
@@ -4,124 +4,364 @@
L2TP
====
-This document describes how to use the kernel's L2TP drivers to
-provide L2TP functionality. L2TP is a protocol that tunnels one or
-more sessions over an IP tunnel. It is commonly used for VPNs
-(L2TP/IPSec) and by ISPs to tunnel subscriber PPP sessions over an IP
-network infrastructure. With L2TPv3, it is also useful as a Layer-2
-tunneling infrastructure.
-
-Features
+Layer 2 Tunneling Protocol (L2TP) allows L2 frames to be tunneled over
+an IP network.
+
+This document covers the kernel's L2TP subsystem. It documents kernel
+APIs for application developers who want to use the L2TP subsystem and
+it provides some technical details about the internal implementation
+which may be useful to kernel developers and maintainers.
+
+Overview
========
-L2TPv2 (PPP over L2TP (UDP tunnels)).
-L2TPv3 ethernet pseudowires.
-L2TPv3 PPP pseudowires.
-L2TPv3 IP encapsulation.
-Netlink sockets for L2TPv3 configuration management.
-
-History
-=======
-
-The original pppol2tp driver was introduced in 2.6.23 and provided
-L2TPv2 functionality (rfc2661). L2TPv2 is used to tunnel one or more PPP
-sessions over a UDP tunnel.
-
-L2TPv3 (rfc3931) changes the protocol to allow different frame types
-to be passed over an L2TP tunnel by moving the PPP-specific parts of
-the protocol out of the core L2TP packet headers. Each frame type is
-known as a pseudowire type. Ethernet, PPP, HDLC, Frame Relay and ATM
-pseudowires for L2TP are defined in separate RFC standards. Another
-change for L2TPv3 is that it can be carried directly over IP with no
-UDP header (UDP is optional). It is also possible to create static
-unmanaged L2TPv3 tunnels manually without a control protocol
-(userspace daemon) to manage them.
-
-To support L2TPv3, the original pppol2tp driver was split up to
-separate the L2TP and PPP functionality. Existing L2TPv2 userspace
-apps should be unaffected as the original pppol2tp sockets API is
-retained. L2TPv3, however, uses netlink to manage L2TPv3 tunnels and
-sessions.
-
-Design
-======
-
-The L2TP protocol separates control and data frames. The L2TP kernel
-drivers handle only L2TP data frames; control frames are always
-handled by userspace. L2TP control frames carry messages between L2TP
-clients/servers and are used to setup / teardown tunnels and
-sessions. An L2TP client or server is implemented in userspace.
-
-Each L2TP tunnel is implemented using a UDP or L2TPIP socket; L2TPIP
-provides L2TPv3 IP encapsulation (no UDP) and is implemented using a
-new l2tpip socket family. The tunnel socket is typically created by
-userspace, though for unmanaged L2TPv3 tunnels, the socket can also be
-created by the kernel. Each L2TP session (pseudowire) gets a network
-interface instance. In the case of PPP, these interfaces are created
-indirectly by pppd using a pppol2tp socket. In the case of ethernet,
-the netdevice is created upon a netlink request to create an L2TPv3
-ethernet pseudowire.
-
-For PPP, the PPPoL2TP driver, net/l2tp/l2tp_ppp.c, provides a
-mechanism by which PPP frames carried through an L2TP session are
-passed through the kernel's PPP subsystem. The standard PPP daemon,
-pppd, handles all PPP interaction with the peer. PPP network
-interfaces are created for each local PPP endpoint. The kernel's PPP
-subsystem arranges for PPP control frames to be delivered to pppd,
-while data frames are forwarded as usual.
-
-For ethernet, the L2TPETH driver, net/l2tp/l2tp_eth.c, implements a
-netdevice driver, managing virtual ethernet devices, one per
-pseudowire. These interfaces can be managed using standard Linux tools
-such as "ip" and "ifconfig". If only IP frames are passed over the
-tunnel, the interface can be given an IP addresses of itself and its
-peer. If non-IP frames are to be passed over the tunnel, the interface
-can be added to a bridge using brctl. All L2TP datapath protocol
-functions are handled by the L2TP core driver.
-
-Each tunnel and session within a tunnel is assigned a unique tunnel_id
-and session_id. These ids are carried in the L2TP header of every
-control and data packet. (Actually, in L2TPv3, the tunnel_id isn't
-present in data frames - it is inferred from the IP connection on
-which the packet was received.) The L2TP driver uses the ids to lookup
-internal tunnel and/or session contexts to determine how to handle the
-packet. Zero tunnel / session ids are treated specially - zero ids are
-never assigned to tunnels or sessions in the network. In the driver,
-the tunnel context keeps a reference to the tunnel UDP or L2TPIP
-socket. The session context holds data that lets the driver interface
-to the kernel's network frame type subsystems, i.e. PPP, ethernet.
-
-Userspace Programming
-=====================
-
-For L2TPv2, there are a number of requirements on the userspace L2TP
-daemon in order to use the pppol2tp driver.
-
-1. Use a UDP socket per tunnel.
-
-2. Create a single PPPoL2TP socket per tunnel bound to a special null
- session id. This is used only for communicating with the driver but
- must remain open while the tunnel is active. Opening this tunnel
- management socket causes the driver to mark the tunnel socket as an
- L2TP UDP encapsulation socket and flags it for use by the
- referenced tunnel id. This hooks up the UDP receive path via
- udp_encap_rcv() in net/ipv4/udp.c. PPP data frames are never passed
- in this special PPPoX socket.
-
-3. Create a PPPoL2TP socket per L2TP session. This is typically done
- by starting pppd with the pppol2tp plugin and appropriate
- arguments. A PPPoL2TP tunnel management socket (Step 2) must be
- created before the first PPPoL2TP session socket is created.
+The kernel's L2TP subsystem implements the datapath for L2TPv2 and
+L2TPv3. L2TPv2 is carried over UDP. L2TPv3 is carried over UDP or
+directly over IP (protocol 115).
+
+The L2TP RFCs define two basic kinds of L2TP packets: control packets
+(the "control plane"), and data packets (the "data plane"). The kernel
+deals only with data packets. The more complex control packets are
+handled by user space.
+
+An L2TP tunnel carries one or more L2TP sessions. Each tunnel is
+associated with a socket. Each session is associated with a virtual
+netdevice, e.g. ``pppN``, ``l2tpethN``, through which data frames pass
+to/from L2TP. Fields in the L2TP header identify the tunnel or session
+and whether it is a control or data packet. When tunnels and sessions
+are set up using the Linux kernel API, we're just setting up the L2TP
+data path. All aspects of the control protocol are to be handled by
+user space.
+
+This split in responsibilities leads to a natural sequence of
+operations when establishing tunnels and sessions. The procedure looks
+like this:
+
+ 1) Create a tunnel socket. Exchange L2TP control protocol messages
+ with the peer over that socket in order to establish a tunnel.
+
+ 2) Create a tunnel context in the kernel, using information
+ obtained from the peer using the control protocol messages.
+
+ 3) Exchange L2TP control protocol messages with the peer over the
+ tunnel socket in order to establish a session.
+
+ 4) Create a session context in the kernel using information
+ obtained from the peer using the control protocol messages.
+
+L2TP APIs
+=========
+
+This section documents each userspace API of the L2TP subsystem.
+
+Tunnel Sockets
+--------------
+
+L2TPv2 always uses UDP. L2TPv3 may use UDP or IP encapsulation.
+
+To create a tunnel socket for use by L2TP, the standard POSIX
+socket API is used.
+
+For example, for a tunnel using IPv4 addresses and UDP encapsulation::
+
+ int sockfd = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
+
+Or for a tunnel using IPv6 addresses and IP encapsulation::
+
+ int sockfd = socket(AF_INET6, SOCK_DGRAM, IPPROTO_L2TP);
+
+UDP socket programming doesn't need to be covered here.
+
+IPPROTO_L2TP is an IP protocol type implemented by the kernel's L2TP
+subsystem. The L2TPIP socket address is defined in struct
+sockaddr_l2tpip and struct sockaddr_l2tpip6 at
+`include/uapi/linux/l2tp.h`_. The address includes the L2TP tunnel
+(connection) id. To use L2TP IP encapsulation, an L2TPv3 application
+should bind the L2TPIP socket using the locally assigned
+tunnel id. When the peer's tunnel id and IP address is known, a
+connect must be done.
+
+If the L2TP application needs to handle L2TPv3 tunnel setup requests
+from peers using L2TPIP, it must open a dedicated L2TPIP
+socket to listen for those requests and bind the socket using tunnel
+id 0 since tunnel setup requests are addressed to tunnel id 0.
+
+An L2TP tunnel and all of its sessions are automatically closed when
+its tunnel socket is closed.
+
+Netlink API
+-----------
+
+L2TP applications use netlink to manage L2TP tunnel and session
+instances in the kernel. The L2TP netlink API is defined in
+`include/uapi/linux/l2tp.h`_.
+
+L2TP uses `Generic Netlink`_ (GENL). Several commands are defined:
+Create, Delete, Modify and Get for tunnel and session
+instances, e.g. ``L2TP_CMD_TUNNEL_CREATE``. The API header lists the
+netlink attribute types that can be used with each command.
+
+Tunnel and session instances are identified by a locally unique
+32-bit id. L2TP tunnel ids are given by ``L2TP_ATTR_CONN_ID`` and
+``L2TP_ATTR_PEER_CONN_ID`` attributes and L2TP session ids are given
+by ``L2TP_ATTR_SESSION_ID`` and ``L2TP_ATTR_PEER_SESSION_ID``
+attributes. If netlink is used to manage L2TPv2 tunnel and session
+instances, the L2TPv2 16-bit tunnel/session id is cast to a 32-bit
+value in these attributes.
+
+In the ``L2TP_CMD_TUNNEL_CREATE`` command, ``L2TP_ATTR_FD`` tells the
+kernel the tunnel socket fd being used. If not specified, the kernel
+creates a kernel socket for the tunnel, using IP parameters set in
+``L2TP_ATTR_IP[6]_SADDR``, ``L2TP_ATTR_IP[6]_DADDR``,
+``L2TP_ATTR_UDP_SPORT``, ``L2TP_ATTR_UDP_DPORT`` attributes. Kernel
+sockets are used to implement unmanaged L2TPv3 tunnels (iproute2's "ip
+l2tp" commands). If ``L2TP_ATTR_FD`` is given, it must be a socket fd
+that is already bound and connected. There is more information about
+unmanaged tunnels later in this document.
+
+``L2TP_CMD_TUNNEL_CREATE`` attributes:-
+
+================== ======== ===
+Attribute Required Use
+================== ======== ===
+CONN_ID Y Sets the tunnel (connection) id.
+PEER_CONN_ID Y Sets the peer tunnel (connection) id.
+PROTO_VERSION Y Protocol version. 2 or 3.
+ENCAP_TYPE Y Encapsulation type: UDP or IP.
+FD N Tunnel socket file descriptor.
+UDP_CSUM N Enable IPv4 UDP checksums. Used only if FD is
+ not set.
+UDP_ZERO_CSUM6_TX N Zero IPv6 UDP checksum on transmit. Used only
+ if FD is not set.
+UDP_ZERO_CSUM6_RX N Zero IPv6 UDP checksum on receive. Used only if
+ FD is not set.
+IP_SADDR N IPv4 source address. Used only if FD is not
+ set.
+IP_DADDR N IPv4 destination address. Used only if FD is
+ not set.
+UDP_SPORT N UDP source port. Used only if FD is not set.
+UDP_DPORT N UDP destination port. Used only if FD is not
+ set.
+IP6_SADDR N IPv6 source address. Used only if FD is not
+ set.
+IP6_DADDR N IPv6 destination address. Used only if FD is
+ not set.
+DEBUG N Debug flags.
+================== ======== ===
+
+``L2TP_CMD_TUNNEL_DESTROY`` attributes:-
+
+================== ======== ===
+Attribute Required Use
+================== ======== ===
+CONN_ID Y Identifies the tunnel id to be destroyed.
+================== ======== ===
+
+``L2TP_CMD_TUNNEL_MODIFY`` attributes:-
+
+================== ======== ===
+Attribute Required Use
+================== ======== ===
+CONN_ID Y Identifies the tunnel id to be modified.
+DEBUG N Debug flags.
+================== ======== ===
+
+``L2TP_CMD_TUNNEL_GET`` attributes:-
+
+================== ======== ===
+Attribute Required Use
+================== ======== ===
+CONN_ID N Identifies the tunnel id to be queried.
+ Ignored in DUMP requests.
+================== ======== ===
+
+``L2TP_CMD_SESSION_CREATE`` attributes:-
+
+================== ======== ===
+Attribute Required Use
+================== ======== ===
+CONN_ID Y The parent tunnel id.
+SESSION_ID Y Sets the session id.
+PEER_SESSION_ID Y Sets the parent session id.
+PW_TYPE Y Sets the pseudowire type.
+DEBUG N Debug flags.
+RECV_SEQ N Enable rx data sequence numbers.
+SEND_SEQ N Enable tx data sequence numbers.
+LNS_MODE N Enable LNS mode (auto-enable data sequence
+ numbers).
+RECV_TIMEOUT N Timeout to wait when reordering received
+ packets.
+L2SPEC_TYPE N Sets layer2-specific-sublayer type (L2TPv3
+ only).
+COOKIE N Sets optional cookie (L2TPv3 only).
+PEER_COOKIE N Sets optional peer cookie (L2TPv3 only).
+IFNAME N Sets interface name (L2TPv3 only).
+================== ======== ===
+
+For Ethernet session types, this will create an l2tpeth virtual
+interface which can then be configured as required. For PPP session
+types, a PPPoL2TP socket must also be opened and connected, mapping it
+onto the new session. This is covered in "PPPoL2TP Sockets" later.
+
+``L2TP_CMD_SESSION_DESTROY`` attributes:-
+
+================== ======== ===
+Attribute Required Use
+================== ======== ===
+CONN_ID Y Identifies the parent tunnel id of the session
+ to be destroyed.
+SESSION_ID Y Identifies the session id to be destroyed.
+IFNAME N Identifies the session by interface name. If
+ set, this overrides any CONN_ID and SESSION_ID
+ attributes. Currently supported for L2TPv3
+ Ethernet sessions only.
+================== ======== ===
+
+``L2TP_CMD_SESSION_MODIFY`` attributes:-
+
+================== ======== ===
+Attribute Required Use
+================== ======== ===
+CONN_ID Y Identifies the parent tunnel id of the session
+ to be modified.
+SESSION_ID Y Identifies the session id to be modified.
+IFNAME N Identifies the session by interface name. If
+ set, this overrides any CONN_ID and SESSION_ID
+ attributes. Currently supported for L2TPv3
+ Ethernet sessions only.
+DEBUG N Debug flags.
+RECV_SEQ N Enable rx data sequence numbers.
+SEND_SEQ N Enable tx data sequence numbers.
+LNS_MODE N Enable LNS mode (auto-enable data sequence
+ numbers).
+RECV_TIMEOUT N Timeout to wait when reordering received
+ packets.
+================== ======== ===
+
+``L2TP_CMD_SESSION_GET`` attributes:-
+
+================== ======== ===
+Attribute Required Use
+================== ======== ===
+CONN_ID N Identifies the tunnel id to be queried.
+ Ignored for DUMP requests.
+SESSION_ID N Identifies the session id to be queried.
+ Ignored for DUMP requests.
+IFNAME N Identifies the session by interface name.
+ If set, this overrides any CONN_ID and
+ SESSION_ID attributes. Ignored for DUMP
+ requests. Currently supported for L2TPv3
+ Ethernet sessions only.
+================== ======== ===
+
+Application developers should refer to `include/uapi/linux/l2tp.h`_ for
+netlink command and attribute definitions.
+
+Sample userspace code using libmnl_:
+
+ - Open L2TP netlink socket::
+
+ struct nl_sock *nl_sock;
+ int l2tp_nl_family_id;
+
+ nl_sock = nl_socket_alloc();
+ genl_connect(nl_sock);
+ genl_id = genl_ctrl_resolve(nl_sock, L2TP_GENL_NAME);
+
+ - Create a tunnel::
+
+ struct nlmsghdr *nlh;
+ struct genlmsghdr *gnlh;
+
+ nlh = mnl_nlmsg_put_header(buf);
+ nlh->nlmsg_type = genl_id; /* assigned to genl socket */
+ nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ nlh->nlmsg_seq = seq;
+
+ gnlh = mnl_nlmsg_put_extra_header(nlh, sizeof(*gnlh));
+ gnlh->cmd = L2TP_CMD_TUNNEL_CREATE;
+ gnlh->version = L2TP_GENL_VERSION;
+ gnlh->reserved = 0;
+
+ mnl_attr_put_u32(nlh, L2TP_ATTR_FD, tunl_sock_fd);
+ mnl_attr_put_u32(nlh, L2TP_ATTR_CONN_ID, tid);
+ mnl_attr_put_u32(nlh, L2TP_ATTR_PEER_CONN_ID, peer_tid);
+ mnl_attr_put_u8(nlh, L2TP_ATTR_PROTO_VERSION, protocol_version);
+ mnl_attr_put_u16(nlh, L2TP_ATTR_ENCAP_TYPE, encap);
+
+ - Create a session::
+
+ struct nlmsghdr *nlh;
+ struct genlmsghdr *gnlh;
+
+ nlh = mnl_nlmsg_put_header(buf);
+ nlh->nlmsg_type = genl_id; /* assigned to genl socket */
+ nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ nlh->nlmsg_seq = seq;
+
+ gnlh = mnl_nlmsg_put_extra_header(nlh, sizeof(*gnlh));
+ gnlh->cmd = L2TP_CMD_SESSION_CREATE;
+ gnlh->version = L2TP_GENL_VERSION;
+ gnlh->reserved = 0;
+
+ mnl_attr_put_u32(nlh, L2TP_ATTR_CONN_ID, tid);
+ mnl_attr_put_u32(nlh, L2TP_ATTR_PEER_CONN_ID, peer_tid);
+ mnl_attr_put_u32(nlh, L2TP_ATTR_SESSION_ID, sid);
+ mnl_attr_put_u32(nlh, L2TP_ATTR_PEER_SESSION_ID, peer_sid);
+ mnl_attr_put_u16(nlh, L2TP_ATTR_PW_TYPE, pwtype);
+ /* there are other session options which can be set using netlink
+ * attributes during session creation -- see l2tp.h
+ */
+
+ - Delete a session::
+
+ struct nlmsghdr *nlh;
+ struct genlmsghdr *gnlh;
+
+ nlh = mnl_nlmsg_put_header(buf);
+ nlh->nlmsg_type = genl_id; /* assigned to genl socket */
+ nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ nlh->nlmsg_seq = seq;
+
+ gnlh = mnl_nlmsg_put_extra_header(nlh, sizeof(*gnlh));
+ gnlh->cmd = L2TP_CMD_SESSION_DELETE;
+ gnlh->version = L2TP_GENL_VERSION;
+ gnlh->reserved = 0;
+
+ mnl_attr_put_u32(nlh, L2TP_ATTR_CONN_ID, tid);
+ mnl_attr_put_u32(nlh, L2TP_ATTR_SESSION_ID, sid);
+
+ - Delete a tunnel and all of its sessions (if any)::
+
+ struct nlmsghdr *nlh;
+ struct genlmsghdr *gnlh;
+
+ nlh = mnl_nlmsg_put_header(buf);
+ nlh->nlmsg_type = genl_id; /* assigned to genl socket */
+ nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ nlh->nlmsg_seq = seq;
+
+ gnlh = mnl_nlmsg_put_extra_header(nlh, sizeof(*gnlh));
+ gnlh->cmd = L2TP_CMD_TUNNEL_DELETE;
+ gnlh->version = L2TP_GENL_VERSION;
+ gnlh->reserved = 0;
+
+ mnl_attr_put_u32(nlh, L2TP_ATTR_CONN_ID, tid);
+
+PPPoL2TP Session Socket API
+---------------------------
+
+For PPP session types, a PPPoL2TP socket must be opened and connected
+to the L2TP session.
When creating PPPoL2TP sockets, the application provides information
-to the driver about the socket in a socket connect() call. Source and
-destination tunnel and session ids are provided, as well as the file
-descriptor of a UDP socket. See struct pppol2tp_addr in
-include/linux/if_pppol2tp.h. Note that zero tunnel / session ids are
-treated specially. When creating the per-tunnel PPPoL2TP management
-socket in Step 2 above, zero source and destination session ids are
-specified, which tells the driver to prepare the supplied UDP file
-descriptor for use as an L2TP tunnel socket.
+to the kernel about the tunnel and session in a socket connect()
+call. Source and destination tunnel and session ids are provided, as
+well as the file descriptor of a UDP or L2TPIP socket. See struct
+pppol2tp_addr in `include/linux/if_pppol2tp.h`_. For historical reasons,
+there are unfortunately slightly different address structures for
+L2TPv2/L2TPv3 IPv4/IPv6 tunnels and userspace must use the appropriate
+structure that matches the tunnel socket type.
Userspace may control behavior of the tunnel or session using
setsockopt and ioctl on the PPPoX socket. The following socket
@@ -130,229 +370,308 @@ options are supported:-
========= ===========================================================
DEBUG bitmask of debug message categories. See below.
SENDSEQ - 0 => don't send packets with sequence numbers
- - 1 => send packets with sequence numbers
+ - 1 => send packets with sequence numbers
RECVSEQ - 0 => receive packet sequence numbers are optional
- - 1 => drop receive packets without sequence numbers
+ - 1 => drop receive packets without sequence numbers
LNSMODE - 0 => act as LAC.
- - 1 => act as LNS.
+ - 1 => act as LNS.
REORDERTO reorder timeout (in millisecs). If 0, don't try to reorder.
========= ===========================================================
-Only the DEBUG option is supported by the special tunnel management
-PPPoX socket.
-
In addition to the standard PPP ioctls, a PPPIOCGL2TPSTATS is provided
to retrieve tunnel and session statistics from the kernel using the
PPPoX socket of the appropriate tunnel or session.
-For L2TPv3, userspace must use the netlink API defined in
-include/linux/l2tp.h to manage tunnel and session contexts. The
-general procedure to create a new L2TP tunnel with one session is:-
-
-1. Open a GENL socket using L2TP_GENL_NAME for configuring the kernel
- using netlink.
-
-2. Create a UDP or L2TPIP socket for the tunnel.
-
-3. Create a new L2TP tunnel using a L2TP_CMD_TUNNEL_CREATE
- request. Set attributes according to desired tunnel parameters,
- referencing the UDP or L2TPIP socket created in the previous step.
-
-4. Create a new L2TP session in the tunnel using a
- L2TP_CMD_SESSION_CREATE request.
-
-The tunnel and all of its sessions are closed when the tunnel socket
-is closed. The netlink API may also be used to delete sessions and
-tunnels. Configuration and status info may be set or read using netlink.
-
-The L2TP driver also supports static (unmanaged) L2TPv3 tunnels. These
-are where there is no L2TP control message exchange with the peer to
-setup the tunnel; the tunnel is configured manually at each end of the
-tunnel. There is no need for an L2TP userspace application in this
-case -- the tunnel socket is created by the kernel and configured
-using parameters sent in the L2TP_CMD_TUNNEL_CREATE netlink
-request. The "ip" utility of iproute2 has commands for managing static
-L2TPv3 tunnels; do "ip l2tp help" for more information.
+Sample userspace code:
+
+ - Create session PPPoX data socket::
+
+ struct sockaddr_pppol2tp sax;
+ int fd;
+
+ /* Note, the tunnel socket must be bound already, else it
+ * will not be ready
+ */
+ sax.sa_family = AF_PPPOX;
+ sax.sa_protocol = PX_PROTO_OL2TP;
+ sax.pppol2tp.fd = tunnel_fd;
+ sax.pppol2tp.addr.sin_addr.s_addr = addr->sin_addr.s_addr;
+ sax.pppol2tp.addr.sin_port = addr->sin_port;
+ sax.pppol2tp.addr.sin_family = AF_INET;
+ sax.pppol2tp.s_tunnel = tunnel_id;
+ sax.pppol2tp.s_session = session_id;
+ sax.pppol2tp.d_tunnel = peer_tunnel_id;
+ sax.pppol2tp.d_session = peer_session_id;
+
+ /* session_fd is the fd of the session's PPPoL2TP socket.
+ * tunnel_fd is the fd of the tunnel UDP / L2TPIP socket.
+ */
+ fd = connect(session_fd, (struct sockaddr *)&sax, sizeof(sax));
+ if (fd < 0 ) {
+ return -errno;
+ }
+ return 0;
+
+Old L2TPv2-only API
+-------------------
+
+When L2TP was first added to the Linux kernel in 2.6.23, it
+implemented only L2TPv2 and did not include a netlink API. Instead,
+tunnel and session instances in the kernel were managed directly using
+only PPPoL2TP sockets. The PPPoL2TP socket is used as described in
+section "PPPoL2TP Session Socket API" but tunnel and session instances
+are automatically created on a connect() of the socket instead of
+being created by a separate netlink request:
+
+ - Tunnels are managed using a tunnel management socket which is a
+ dedicated PPPoL2TP socket, connected to (invalid) session
+ id 0. The L2TP tunnel instance is created when the PPPoL2TP
+ tunnel management socket is connected and is destroyed when the
+ socket is closed.
+
+ - Session instances are created in the kernel when a PPPoL2TP
+ socket is connected to a non-zero session id. Session parameters
+ are set using setsockopt. The L2TP session instance is destroyed
+ when the socket is closed.
+
+This API is still supported but its use is discouraged. Instead, new
+L2TPv2 applications should use netlink to first create the tunnel and
+session, then create a PPPoL2TP socket for the session.
+
+Unmanaged L2TPv3 tunnels
+------------------------
+
+The kernel L2TP subsystem also supports static (unmanaged) L2TPv3
+tunnels. Unmanaged tunnels have no userspace tunnel socket, and
+exchange no control messages with the peer to set up the tunnel; the
+tunnel is configured manually at each end of the tunnel. All
+configuration is done using netlink. There is no need for an L2TP
+userspace application in this case -- the tunnel socket is created by
+the kernel and configured using parameters sent in the
+``L2TP_CMD_TUNNEL_CREATE`` netlink request. The ``ip`` utility of
+``iproute2`` has commands for managing static L2TPv3 tunnels; do ``ip
+l2tp help`` for more information.
Debugging
-=========
-
-The driver supports a flexible debug scheme where kernel trace
-messages may be optionally enabled per tunnel and per session. Care is
-needed when debugging a live system since the messages are not
-rate-limited and a busy system could be swamped. Userspace uses
-setsockopt on the PPPoX socket to set a debug mask.
+---------
-The following debug mask bits are available:
+The L2TP subsystem offers a range of debugging interfaces through the
+debugfs filesystem.
-================ ==============================
-L2TP_MSG_DEBUG verbose debug (if compiled in)
-L2TP_MSG_CONTROL userspace - kernel interface
-L2TP_MSG_SEQ sequence numbers handling
-L2TP_MSG_DATA data packets
-================ ==============================
+To access these interfaces, the debugfs filesystem must first be mounted::
-If enabled, files under a l2tp debugfs directory can be used to dump
-kernel state about L2TP tunnels and sessions. To access it, the
-debugfs filesystem must first be mounted::
+ # mount -t debugfs debugfs /debug
- # mount -t debugfs debugfs /debug
+Files under the l2tp directory can then be accessed, providing a summary
+of the current population of tunnel and session contexts existing in the
+kernel::
-Files under the l2tp directory can then be accessed::
-
- # cat /debug/l2tp/tunnels
+ # cat /debug/l2tp/tunnels
The debugfs files should not be used by applications to obtain L2TP
state information because the file format is subject to change. It is
implemented to provide extra debug information to help diagnose
-problems.) Users should use the netlink API.
+problems. Applications should instead use the netlink API.
-/proc/net/pppol2tp is also provided for backwards compatibility with
-the original pppol2tp driver. It lists information about L2TPv2
-tunnels and sessions only. Its use is discouraged.
+In addition the L2TP subsystem implements tracepoints using the standard
+kernel event tracing API. The available L2TP events can be reviewed as
+follows::
-Unmanaged L2TPv3 Tunnels
-========================
-
-Some commercial L2TP products support unmanaged L2TPv3 ethernet
-tunnels, where there is no L2TP control protocol; tunnels are
-configured at each side manually. New commands are available in
-iproute2's ip utility to support this.
-
-To create an L2TPv3 ethernet pseudowire between local host 192.168.1.1
-and peer 192.168.1.2, using IP addresses 10.5.1.1 and 10.5.1.2 for the
-tunnel endpoints::
-
- # ip l2tp add tunnel tunnel_id 1 peer_tunnel_id 1 udp_sport 5000 \
- udp_dport 5000 encap udp local 192.168.1.1 remote 192.168.1.2
- # ip l2tp add session tunnel_id 1 session_id 1 peer_session_id 1
- # ip -s -d show dev l2tpeth0
- # ip addr add 10.5.1.2/32 peer 10.5.1.1/32 dev l2tpeth0
- # ip li set dev l2tpeth0 up
-
-Choose IP addresses to be the address of a local IP interface and that
-of the remote system. The IP addresses of the l2tpeth0 interface can be
-anything suitable.
-
-Repeat the above at the peer, with ports, tunnel/session ids and IP
-addresses reversed. The tunnel and session IDs can be any non-zero
-32-bit number, but the values must be reversed at the peer.
-
-======================== ===================
-Host 1 Host2
-======================== ===================
-udp_sport=5000 udp_sport=5001
-udp_dport=5001 udp_dport=5000
-tunnel_id=42 tunnel_id=45
-peer_tunnel_id=45 peer_tunnel_id=42
-session_id=128 session_id=5196755
-peer_session_id=5196755 peer_session_id=128
-======================== ===================
-
-When done at both ends of the tunnel, it should be possible to send
-data over the network. e.g.::
-
- # ping 10.5.1.1
-
-
-Sample Userspace Code
-=====================
-
-1. Create tunnel management PPPoX socket::
-
- kernel_fd = socket(AF_PPPOX, SOCK_DGRAM, PX_PROTO_OL2TP);
- if (kernel_fd >= 0) {
- struct sockaddr_pppol2tp sax;
- struct sockaddr_in const *peer_addr;
-
- peer_addr = l2tp_tunnel_get_peer_addr(tunnel);
- memset(&sax, 0, sizeof(sax));
- sax.sa_family = AF_PPPOX;
- sax.sa_protocol = PX_PROTO_OL2TP;
- sax.pppol2tp.fd = udp_fd; /* fd of tunnel UDP socket */
- sax.pppol2tp.addr.sin_addr.s_addr = peer_addr->sin_addr.s_addr;
- sax.pppol2tp.addr.sin_port = peer_addr->sin_port;
- sax.pppol2tp.addr.sin_family = AF_INET;
- sax.pppol2tp.s_tunnel = tunnel_id;
- sax.pppol2tp.s_session = 0; /* special case: mgmt socket */
- sax.pppol2tp.d_tunnel = 0;
- sax.pppol2tp.d_session = 0; /* special case: mgmt socket */
-
- if(connect(kernel_fd, (struct sockaddr *)&sax, sizeof(sax) ) < 0 ) {
- perror("connect failed");
- result = -errno;
- goto err;
- }
- }
-
-2. Create session PPPoX data socket::
-
- struct sockaddr_pppol2tp sax;
- int fd;
-
- /* Note, the target socket must be bound already, else it will not be ready */
- sax.sa_family = AF_PPPOX;
- sax.sa_protocol = PX_PROTO_OL2TP;
- sax.pppol2tp.fd = tunnel_fd;
- sax.pppol2tp.addr.sin_addr.s_addr = addr->sin_addr.s_addr;
- sax.pppol2tp.addr.sin_port = addr->sin_port;
- sax.pppol2tp.addr.sin_family = AF_INET;
- sax.pppol2tp.s_tunnel = tunnel_id;
- sax.pppol2tp.s_session = session_id;
- sax.pppol2tp.d_tunnel = peer_tunnel_id;
- sax.pppol2tp.d_session = peer_session_id;
-
- /* session_fd is the fd of the session's PPPoL2TP socket.
- * tunnel_fd is the fd of the tunnel UDP socket.
- */
- fd = connect(session_fd, (struct sockaddr *)&sax, sizeof(sax));
- if (fd < 0 ) {
- return -errno;
- }
- return 0;
+ # find /debug/tracing/events/l2tp
+
+Finally, /proc/net/pppol2tp is also provided for backwards compatibility
+with the original pppol2tp code. It lists information about L2TPv2
+tunnels and sessions only. Its use is discouraged.
Internal Implementation
=======================
-The driver keeps a struct l2tp_tunnel context per L2TP tunnel and a
-struct l2tp_session context for each session. The l2tp_tunnel is
-always associated with a UDP or L2TP/IP socket and keeps a list of
-sessions in the tunnel. The l2tp_session context keeps kernel state
-about the session. It has private data which is used for data specific
-to the session type. With L2TPv2, the session always carried PPP
-traffic. With L2TPv3, the session can also carry ethernet frames
-(ethernet pseudowire) or other data types such as ATM, HDLC or Frame
-Relay.
-
-When a tunnel is first opened, the reference count on the socket is
-increased using sock_hold(). This ensures that the kernel socket
-cannot be removed while L2TP's data structures reference it.
-
-Some L2TP sessions also have a socket (PPP pseudowires) while others
-do not (ethernet pseudowires). We can't use the socket reference count
-as the reference count for session contexts. The L2TP implementation
-therefore has its own internal reference counts on the session
-contexts.
-
-To Do
-=====
-
-Add L2TP tunnel switching support. This would route tunneled traffic
-from one L2TP tunnel into another. Specified in
-http://tools.ietf.org/html/draft-ietf-l2tpext-tunnel-switching-08
-
-Add L2TPv3 VLAN pseudowire support.
-
-Add L2TPv3 IP pseudowire support.
-
-Add L2TPv3 ATM pseudowire support.
+This section is for kernel developers and maintainers.
+
+Sockets
+-------
+
+UDP sockets are implemented by the networking core. When an L2TP
+tunnel is created using a UDP socket, the socket is set up as an
+encapsulated UDP socket by setting encap_rcv and encap_destroy
+callbacks on the UDP socket. l2tp_udp_encap_recv is called when
+packets are received on the socket. l2tp_udp_encap_destroy is called
+when userspace closes the socket.
+
+L2TPIP sockets are implemented in `net/l2tp/l2tp_ip.c`_ and
+`net/l2tp/l2tp_ip6.c`_.
+
+Tunnels
+-------
+
+The kernel keeps a struct l2tp_tunnel context per L2TP tunnel. The
+l2tp_tunnel is always associated with a UDP or L2TP/IP socket and
+keeps a list of sessions in the tunnel. When a tunnel is first
+registered with L2TP core, the reference count on the socket is
+increased. This ensures that the socket cannot be removed while L2TP's
+data structures reference it.
+
+Tunnels are identified by a unique tunnel id. The id is 16-bit for
+L2TPv2 and 32-bit for L2TPv3. Internally, the id is stored as a 32-bit
+value.
+
+Tunnels are kept in a per-net list, indexed by tunnel id. The tunnel
+id namespace is shared by L2TPv2 and L2TPv3. The tunnel context can be
+derived from the socket's sk_user_data.
+
+Handling tunnel socket close is perhaps the most tricky part of the
+L2TP implementation. If userspace closes a tunnel socket, the L2TP
+tunnel and all of its sessions must be closed and destroyed. Since the
+tunnel context holds a ref on the tunnel socket, the socket's
+sk_destruct won't be called until the tunnel sock_put's its
+socket. For UDP sockets, when userspace closes the tunnel socket, the
+socket's encap_destroy handler is invoked, which L2TP uses to initiate
+its tunnel close actions. For L2TPIP sockets, the socket's close
+handler initiates the same tunnel close actions. All sessions are
+first closed. Each session drops its tunnel ref. When the tunnel ref
+reaches zero, the tunnel puts its socket ref. When the socket is
+eventually destroyed, it's sk_destruct finally frees the L2TP tunnel
+context.
+
+Sessions
+--------
+
+The kernel keeps a struct l2tp_session context for each session. Each
+session has private data which is used for data specific to the
+session type. With L2TPv2, the session always carries PPP
+traffic. With L2TPv3, the session can carry Ethernet frames (Ethernet
+pseudowire) or other data types such as PPP, ATM, HDLC or Frame
+Relay. Linux currently implements only Ethernet and PPP session types.
+
+Some L2TP session types also have a socket (PPP pseudowires) while
+others do not (Ethernet pseudowires). We can't therefore use the
+socket reference count as the reference count for session
+contexts. The L2TP implementation therefore has its own internal
+reference counts on the session contexts.
+
+Like tunnels, L2TP sessions are identified by a unique
+session id. Just as with tunnel ids, the session id is 16-bit for
+L2TPv2 and 32-bit for L2TPv3. Internally, the id is stored as a 32-bit
+value.
+
+Sessions hold a ref on their parent tunnel to ensure that the tunnel
+stays extant while one or more sessions references it.
+
+Sessions are kept in a per-tunnel list, indexed by session id. L2TPv3
+sessions are also kept in a per-net list indexed by session id,
+because L2TPv3 session ids are unique across all tunnels and L2TPv3
+data packets do not contain a tunnel id in the header. This list is
+therefore needed to find the session context associated with a
+received data packet when the tunnel context cannot be derived from
+the tunnel socket.
+
+Although the L2TPv3 RFC specifies that L2TPv3 session ids are not
+scoped by the tunnel, the kernel does not police this for L2TPv3 UDP
+tunnels and does not add sessions of L2TPv3 UDP tunnels into the
+per-net session list. In the UDP receive code, we must trust that the
+tunnel can be identified using the tunnel socket's sk_user_data and
+lookup the session in the tunnel's session list instead of the per-net
+session list.
+
+PPP
+---
+
+`net/l2tp/l2tp_ppp.c`_ implements the PPPoL2TP socket family. Each PPP
+session has a PPPoL2TP socket.
+
+The PPPoL2TP socket's sk_user_data references the l2tp_session.
+
+Userspace sends and receives PPP packets over L2TP using a PPPoL2TP
+socket. Only PPP control frames pass over this socket: PPP data
+packets are handled entirely by the kernel, passing between the L2TP
+session and its associated ``pppN`` netdev through the PPP channel
+interface of the kernel PPP subsystem.
+
+The L2TP PPP implementation handles the closing of a PPPoL2TP socket
+by closing its corresponding L2TP session. This is complicated because
+it must consider racing with netlink session create/destroy requests
+and pppol2tp_connect trying to reconnect with a session that is in the
+process of being closed. Unlike tunnels, PPP sessions do not hold a
+ref on their associated socket, so code must be careful to sock_hold
+the socket where necessary. For all the details, see commit
+3d609342cc04129ff7568e19316ce3d7451a27e8.
+
+Ethernet
+--------
+
+`net/l2tp/l2tp_eth.c`_ implements L2TPv3 Ethernet pseudowires. It
+manages a netdev for each session.
+
+L2TP Ethernet sessions are created and destroyed by netlink request,
+or are destroyed when the tunnel is destroyed. Unlike PPP sessions,
+Ethernet sessions do not have an associated socket.
Miscellaneous
=============
-The L2TP drivers were developed as part of the OpenL2TP project by
-Katalix Systems Ltd. OpenL2TP is a full-featured L2TP client / server,
-designed from the ground up to have the L2TP datapath in the
-kernel. The project also implemented the pppol2tp plugin for pppd
-which allows pppd to use the kernel driver. Details can be found at
-http://www.openl2tp.org.
+RFCs
+----
+
+The kernel code implements the datapath features specified in the
+following RFCs:
+
+======= =============== ===================================
+RFC2661 L2TPv2 https://tools.ietf.org/html/rfc2661
+RFC3931 L2TPv3 https://tools.ietf.org/html/rfc3931
+RFC4719 L2TPv3 Ethernet https://tools.ietf.org/html/rfc4719
+======= =============== ===================================
+
+Implementations
+---------------
+
+A number of open source applications use the L2TP kernel subsystem:
+
+============ ==============================================
+iproute2 https://github.com/shemminger/iproute2
+go-l2tp https://github.com/katalix/go-l2tp
+tunneldigger https://github.com/wlanslovenija/tunneldigger
+xl2tpd https://github.com/xelerance/xl2tpd
+============ ==============================================
+
+Limitations
+-----------
+
+The current implementation has a number of limitations:
+
+ 1) Multiple UDP sockets with the same 5-tuple address cannot be
+ used. The kernel's tunnel context is identified using private
+ data associated with the socket so it is important that each
+ socket is uniquely identified by its address.
+
+ 2) Interfacing with openvswitch is not yet implemented. It may be
+ useful to map OVS Ethernet and VLAN ports into L2TPv3 tunnels.
+
+ 3) VLAN pseudowires are implemented using an ``l2tpethN`` interface
+ configured with a VLAN sub-interface. Since L2TPv3 VLAN
+ pseudowires carry one and only one VLAN, it may be better to use
+ a single netdevice rather than an ``l2tpethN`` and ``l2tpethN``:M
+ pair per VLAN session. The netlink attribute
+ ``L2TP_ATTR_VLAN_ID`` was added for this, but it was never
+ implemented.
+
+Testing
+-------
+
+Unmanaged L2TPv3 Ethernet features are tested by the kernel's built-in
+selftests. See `tools/testing/selftests/net/l2tp.sh`_.
+
+Another test suite, l2tp-ktest_, covers all
+of the L2TP APIs and tunnel/session types. This may be integrated into
+the kernel's built-in L2TP selftests in the future.
+
+.. Links
+.. _Generic Netlink: generic_netlink.html
+.. _libmnl: https://www.netfilter.org/projects/libmnl
+.. _include/uapi/linux/l2tp.h: ../../../include/uapi/linux/l2tp.h
+.. _include/linux/if_pppol2tp.h: ../../../include/linux/if_pppol2tp.h
+.. _net/l2tp/l2tp_ip.c: ../../../net/l2tp/l2tp_ip.c
+.. _net/l2tp/l2tp_ip6.c: ../../../net/l2tp/l2tp_ip6.c
+.. _net/l2tp/l2tp_ppp.c: ../../../net/l2tp/l2tp_ppp.c
+.. _net/l2tp/l2tp_eth.c: ../../../net/l2tp/l2tp_eth.c
+.. _tools/testing/selftests/net/l2tp.sh: ../../../tools/testing/selftests/net/l2tp.sh
+.. _l2tp-ktest: https://github.com/katalix/l2tp-ktest
diff --git a/Documentation/networking/scaling.rst b/Documentation/networking/scaling.rst
index 8f0347b9fb3d..3d435caa3ef2 100644
--- a/Documentation/networking/scaling.rst
+++ b/Documentation/networking/scaling.rst
@@ -465,9 +465,9 @@ XPS Configuration
-----------------
XPS is only available if the kconfig symbol CONFIG_XPS is enabled (on by
-default for SMP). The functionality remains disabled until explicitly
-configured. To enable XPS, the bitmap of CPUs/receive-queues that may
-use a transmit queue is configured using the sysfs file entry:
+default for SMP). If compiled in, it is driver dependent whether, and
+how, XPS is configured at device init. The mapping of CPUs/receive-queues
+to transmit queue can be inspected and configured using sysfs:
For selection based on CPUs map::
diff --git a/Documentation/networking/statistics.rst b/Documentation/networking/statistics.rst
new file mode 100644
index 000000000000..8e15bc98830b
--- /dev/null
+++ b/Documentation/networking/statistics.rst
@@ -0,0 +1,179 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+====================
+Interface statistics
+====================
+
+Overview
+========
+
+This document is a guide to Linux network interface statistics.
+
+There are three main sources of interface statistics in Linux:
+
+ - standard interface statistics based on
+ :c:type:`struct rtnl_link_stats64 <rtnl_link_stats64>`;
+ - protocol-specific statistics; and
+ - driver-defined statistics available via ethtool.
+
+Standard interface statistics
+-----------------------------
+
+There are multiple interfaces to reach the standard statistics.
+Most commonly used is the `ip` command from `iproute2`::
+
+ $ ip -s -s link show dev ens4u1u1
+ 6: ens4u1u1: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP mode DEFAULT group default qlen 1000
+ link/ether 48:2a:e3:4c:b1:d1 brd ff:ff:ff:ff:ff:ff
+ RX: bytes packets errors dropped overrun mcast
+ 74327665117 69016965 0 0 0 0
+ RX errors: length crc frame fifo missed
+ 0 0 0 0 0
+ TX: bytes packets errors dropped carrier collsns
+ 21405556176 44608960 0 0 0 0
+ TX errors: aborted fifo window heartbeat transns
+ 0 0 0 0 128
+ altname enp58s0u1u1
+
+Note that `-s` has been specified twice to see all members of
+:c:type:`struct rtnl_link_stats64 <rtnl_link_stats64>`.
+If `-s` is specified once the detailed errors won't be shown.
+
+`ip` supports JSON formatting via the `-j` option.
+
+Protocol-specific statistics
+----------------------------
+
+Some of the interfaces used for configuring devices are also able
+to report related statistics. For example ethtool interface used
+to configure pause frames can report corresponding hardware counters::
+
+ $ ethtool --include-statistics -a eth0
+ Pause parameters for eth0:
+ Autonegotiate: on
+ RX: on
+ TX: on
+ Statistics:
+ tx_pause_frames: 1
+ rx_pause_frames: 1
+
+Driver-defined statistics
+-------------------------
+
+Driver-defined ethtool statistics can be dumped using `ethtool -S $ifc`, e.g.::
+
+ $ ethtool -S ens4u1u1
+ NIC statistics:
+ tx_single_collisions: 0
+ tx_multi_collisions: 0
+
+uAPIs
+=====
+
+procfs
+------
+
+The historical `/proc/net/dev` text interface gives access to the list
+of interfaces as well as their statistics.
+
+Note that even though this interface is using
+:c:type:`struct rtnl_link_stats64 <rtnl_link_stats64>`
+internally it combines some of the fields.
+
+sysfs
+-----
+
+Each device directory in sysfs contains a `statistics` directory (e.g.
+`/sys/class/net/lo/statistics/`) with files corresponding to
+members of :c:type:`struct rtnl_link_stats64 <rtnl_link_stats64>`.
+
+This simple interface is convenient especially in constrained/embedded
+environments without access to tools. However, it's inefficient when
+reading multiple stats as it internally performs a full dump of
+:c:type:`struct rtnl_link_stats64 <rtnl_link_stats64>`
+and reports only the stat corresponding to the accessed file.
+
+Sysfs files are documented in
+`Documentation/ABI/testing/sysfs-class-net-statistics`.
+
+
+netlink
+-------
+
+`rtnetlink` (`NETLINK_ROUTE`) is the preferred method of accessing
+:c:type:`struct rtnl_link_stats64 <rtnl_link_stats64>` stats.
+
+Statistics are reported both in the responses to link information
+requests (`RTM_GETLINK`) and statistic requests (`RTM_GETSTATS`,
+when `IFLA_STATS_LINK_64` bit is set in the `.filter_mask` of the request).
+
+ethtool
+-------
+
+Ethtool IOCTL interface allows drivers to report implementation
+specific statistics. Historically it has also been used to report
+statistics for which other APIs did not exist, like per-device-queue
+statistics, or standard-based statistics (e.g. RFC 2863).
+
+Statistics and their string identifiers are retrieved separately.
+Identifiers via `ETHTOOL_GSTRINGS` with `string_set` set to `ETH_SS_STATS`,
+and values via `ETHTOOL_GSTATS`. User space should use `ETHTOOL_GDRVINFO`
+to retrieve the number of statistics (`.n_stats`).
+
+ethtool-netlink
+---------------
+
+Ethtool netlink is a replacement for the older IOCTL interface.
+
+Protocol-related statistics can be requested in get commands by setting
+the `ETHTOOL_FLAG_STATS` flag in `ETHTOOL_A_HEADER_FLAGS`. Currently
+statistics are supported in the following commands:
+
+ - `ETHTOOL_MSG_PAUSE_GET`
+
+debugfs
+-------
+
+Some drivers expose extra statistics via `debugfs`.
+
+struct rtnl_link_stats64
+========================
+
+.. kernel-doc:: include/uapi/linux/if_link.h
+ :identifiers: rtnl_link_stats64
+
+Notes for driver authors
+========================
+
+Drivers should report all statistics which have a matching member in
+:c:type:`struct rtnl_link_stats64 <rtnl_link_stats64>` exclusively
+via `.ndo_get_stats64`. Reporting such standard stats via ethtool
+or debugfs will not be accepted.
+
+Drivers must ensure best possible compliance with
+:c:type:`struct rtnl_link_stats64 <rtnl_link_stats64>`.
+Please note for example that detailed error statistics must be
+added into the general `rx_error` / `tx_error` counters.
+
+The `.ndo_get_stats64` callback can not sleep because of accesses
+via `/proc/net/dev`. If driver may sleep when retrieving the statistics
+from the device it should do so periodically asynchronously and only return
+a recent copy from `.ndo_get_stats64`. Ethtool interrupt coalescing interface
+allows setting the frequency of refreshing statistics, if needed.
+
+Retrieving ethtool statistics is a multi-syscall process, drivers are advised
+to keep the number of statistics constant to avoid race conditions with
+user space trying to read them.
+
+Statistics must persist across routine operations like bringing the interface
+down and up.
+
+Kernel-internal data structures
+-------------------------------
+
+The following structures are internal to the kernel, their members are
+translated to netlink attributes when dumped. Drivers must not overwrite
+the statistics they don't report with 0.
+
+.. kernel-doc:: include/linux/ethtool.h
+ :identifiers: ethtool_pause_stats
diff --git a/Documentation/networking/vxlan.rst b/Documentation/networking/vxlan.rst
index ce239fa01848..2759dc1cc525 100644
--- a/Documentation/networking/vxlan.rst
+++ b/Documentation/networking/vxlan.rst
@@ -58,3 +58,31 @@ forwarding table using the new bridge command.
3. Show forwarding table::
# bridge fdb show dev vxlan0
+
+The following NIC features may indicate support for UDP tunnel-related
+offloads (most commonly VXLAN features, but support for a particular
+encapsulation protocol is NIC specific):
+
+ - `tx-udp_tnl-segmentation`
+ - `tx-udp_tnl-csum-segmentation`
+ ability to perform TCP segmentation offload of UDP encapsulated frames
+
+ - `rx-udp_tunnel-port-offload`
+ receive side parsing of UDP encapsulated frames which allows NICs to
+ perform protocol-aware offloads, like checksum validation offload of
+ inner frames (only needed by NICs without protocol-agnostic offloads)
+
+For devices supporting `rx-udp_tunnel-port-offload` the list of currently
+offloaded ports can be interrogated with `ethtool`::
+
+ $ ethtool --show-tunnels eth0
+ Tunnel information for eth0:
+ UDP port table 0:
+ Size: 4
+ Types: vxlan
+ No entries
+ UDP port table 1:
+ Size: 4
+ Types: geneve, vxlan-gpe
+ Entries (1):
+ port 1230, vxlan-gpe
diff --git a/MAINTAINERS b/MAINTAINERS
index f310f0a09904..0f59b0412953 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1259,7 +1259,7 @@ S: Supported
F: Documentation/devicetree/bindings/net/apm-xgene-enet.txt
F: Documentation/devicetree/bindings/net/apm-xgene-mdio.txt
F: drivers/net/ethernet/apm/xgene/
-F: drivers/net/phy/mdio-xgene.c
+F: drivers/net/mdio/mdio-xgene.c
APPLIED MICRO (APM) X-GENE SOC PMU
M: Khuong Dinh <khuong@os.amperecomputing.com>
@@ -3238,7 +3238,7 @@ M: Daniel Borkmann <daniel@iogearbox.net>
R: Martin KaFai Lau <kafai@fb.com>
R: Song Liu <songliubraving@fb.com>
R: Yonghong Song <yhs@fb.com>
-R: Andrii Nakryiko <andriin@fb.com>
+R: Andrii Nakryiko <andrii@kernel.org>
R: John Fastabend <john.fastabend@gmail.com>
R: KP Singh <kpsingh@chromium.org>
L: netdev@vger.kernel.org
@@ -3914,6 +3914,7 @@ F: include/net/netns/can.h
F: include/uapi/linux/can.h
F: include/uapi/linux/can/bcm.h
F: include/uapi/linux/can/gw.h
+F: include/uapi/linux/can/isotp.h
F: include/uapi/linux/can/raw.h
F: net/can/
@@ -4713,6 +4714,15 @@ S: Supported
W: http://www.chelsio.com
F: drivers/crypto/chelsio
+CXGB4 INLINE CRYPTO DRIVER
+M: Ayush Sawal <ayush.sawal@chelsio.com>
+M: Vinay Kumar Yadav <vinay.yadav@chelsio.com>
+M: Rohit Maheshwari <rohitm@chelsio.com>
+L: netdev@vger.kernel.org
+S: Supported
+W: http://www.chelsio.com
+F: drivers/net/ethernet/chelsio/inline_crypto/
+
CXGB4 ETHERNET DRIVER (CXGB4)
M: Vishal Kulkarni <vishal@chelsio.com>
L: netdev@vger.kernel.org
@@ -6546,11 +6556,14 @@ F: Documentation/devicetree/bindings/net/ethernet-phy.yaml
F: Documentation/devicetree/bindings/net/mdio*
F: Documentation/devicetree/bindings/net/qca,ar803x.yaml
F: Documentation/networking/phy.rst
+F: drivers/net/mdio/
+F: drivers/net/mdio/of_mdio.c
+F: drivers/net/pcs/
F: drivers/net/phy/
-F: drivers/of/of_mdio.c
F: drivers/of/of_net.c
F: include/dt-bindings/net/qca-ar803x.h
F: include/linux/*mdio*.h
+F: include/linux/mdio/*.h
F: include/linux/of_net.h
F: include/linux/phy.h
F: include/linux/phy_fixed.h
@@ -10347,6 +10360,13 @@ S: Maintained
W: http://linux-test-project.github.io/
T: git git://github.com/linux-test-project/ltp.git
+LYNX PCS MODULE
+M: Ioana Ciornei <ioana.ciornei@nxp.com>
+L: netdev@vger.kernel.org
+S: Supported
+F: drivers/net/pcs/pcs-lynx.c
+F: include/linux/pcs-lynx.h
+
M68K ARCHITECTURE
M: Geert Uytterhoeven <geert@linux-m68k.org>
L: linux-m68k@lists.linux-m68k.org
@@ -10554,7 +10574,7 @@ M: Tobias Waldekranz <tobias@waldekranz.com>
L: netdev@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/net/marvell,mvusb.yaml
-F: drivers/net/phy/mdio-mvusb.c
+F: drivers/net/mdio/mdio-mvusb.c
MARVELL XENON MMC/SD/SDIO HOST CONTROLLER DRIVER
M: Hu Ziji <huziji@marvell.com>
@@ -10701,6 +10721,15 @@ L: linux-input@vger.kernel.org
S: Maintained
F: drivers/hid/hid-mcp2221.c
+MCP251XFD SPI-CAN NETWORK DRIVER
+M: Marc Kleine-Budde <mkl@pengutronix.de>
+M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+R: Thomas Kopp <thomas.kopp@microchip.com>
+L: linux-can@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/net/can/microchip,mcp251xfd.yaml
+F: drivers/net/can/spi/mcp251xfd/
+
MCP4018 AND MCP4531 MICROCHIP DIGITAL POTENTIOMETER DRIVERS
M: Peter Rosin <peda@axentia.se>
L: linux-iio@vger.kernel.org
@@ -12090,7 +12119,6 @@ M: Neil Horman <nhorman@tuxdriver.com>
L: netdev@vger.kernel.org
S: Maintained
W: https://fedorahosted.org/dropwatch/
-F: include/net/drop_monitor.h
F: include/uapi/linux/net_dropmon.h
F: net/core/drop_monitor.c
@@ -12185,6 +12213,7 @@ F: net/ipv6/ipcomp6.c
F: net/ipv6/xfrm*
F: net/key/
F: net/xfrm/
+F: tools/testing/selftests/net/ipsec.c
NETWORKING [IPv4/IPv6]
M: "David S. Miller" <davem@davemloft.net>
@@ -12597,6 +12626,7 @@ F: drivers/net/dsa/ocelot/*
F: drivers/net/ethernet/mscc/
F: include/soc/mscc/ocelot*
F: net/dsa/tag_ocelot.c
+F: tools/testing/selftests/drivers/net/ocelot/*
OCXL (Open Coherent Accelerator Processor Interface OpenCAPI) DRIVER
M: Frederic Barrat <fbarrat@linux.ibm.com>
@@ -15372,10 +15402,11 @@ F: drivers/media/platform/s3c-camif/
F: include/media/drv-intf/s3c_camif.h
SAMSUNG S3FWRN5 NFC DRIVER
-M: Robert Baldyga <r.baldyga@samsung.com>
+M: Krzysztof Kozlowski <krzk@kernel.org>
M: Krzysztof Opasiak <k.opasiak@samsung.com>
L: linux-nfc@lists.01.org (moderated for non-subscribers)
-S: Supported
+S: Maintained
+F: Documentation/devicetree/bindings/net/nfc/samsung,s3fwrn5.yaml
F: drivers/nfc/s3fwrn5
SAMSUNG S5C73M3 CAMERA DRIVER
@@ -15762,6 +15793,7 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/phy/phylink.c
F: drivers/net/phy/sfp*
+F: include/linux/mdio/mdio-i2c.h
F: include/linux/phylink.h
F: include/linux/sfp.h
K: phylink\.h|struct\s+phylink|\.phylink|>phylink_|phylink_(autoneg|clear|connect|create|destroy|disconnect|ethtool|helper|mac|mii|of|set|start|stop|test|validate)
@@ -16851,8 +16883,8 @@ SYNOPSYS DESIGNWARE ETHERNET XPCS DRIVER
M: Jose Abreu <Jose.Abreu@synopsys.com>
L: netdev@vger.kernel.org
S: Supported
-F: drivers/net/phy/mdio-xpcs.c
-F: include/linux/mdio-xpcs.h
+F: drivers/net/pcs/pcs-xpcs.c
+F: include/linux/pcs/pcs-xpcs.h
SYNOPSYS DESIGNWARE I2C DRIVER
M: Jarkko Nikula <jarkko.nikula@linux.intel.com>
diff --git a/Makefile b/Makefile
index 342d26432553..ebbd34801476 100644
--- a/Makefile
+++ b/Makefile
@@ -1071,13 +1071,15 @@ ifdef CONFIG_STACK_VALIDATION
endif
endif
+ifdef CONFIG_BPF
ifdef CONFIG_DEBUG_INFO_BTF
ifeq ($(has_libelf),1)
resolve_btfids_target := tools/bpf/resolve_btfids FORCE
else
ERROR_RESOLVE_BTFIDS := 1
endif
-endif
+endif # CONFIG_DEBUG_INFO_BTF
+endif # CONFIG_BPF
PHONY += prepare0
diff --git a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
index 250fc01de78d..24aab3ea3f52 100644
--- a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
@@ -795,8 +795,8 @@
reg = <0x27>;
interrupt-parent = <&gpa1>;
interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
- s3fwrn5,en-gpios = <&gpf1 4 GPIO_ACTIVE_HIGH>;
- s3fwrn5,fw-gpios = <&gpj0 2 GPIO_ACTIVE_HIGH>;
+ en-gpios = <&gpf1 4 GPIO_ACTIVE_HIGH>;
+ wake-gpios = <&gpj0 2 GPIO_ACTIVE_HIGH>;
};
};
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts
index c2dc1232f93f..1efb61cff454 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-rdb.dts
@@ -199,6 +199,7 @@
&enetc_port0 {
phy-handle = <&sgmii_phy0>;
phy-connection-type = "sgmii";
+ managed = "in-band-status";
status = "okay";
mdio {
diff --git a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
index d174ad214857..9a11e5c60c26 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
+++ b/arch/arm64/boot/dts/mediatek/mt7622-bananapi-bpi-r64.dts
@@ -143,6 +143,56 @@
mdio: mdio-bus {
#address-cells = <1>;
#size-cells = <0>;
+
+ switch@0 {
+ compatible = "mediatek,mt7531";
+ reg = <0>;
+ reset-gpios = <&pio 54 0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "wan";
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan0";
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan1";
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "lan2";
+ };
+
+ port@4 {
+ reg = <4>;
+ label = "lan3";
+ };
+
+ port@6 {
+ reg = <6>;
+ label = "cpu";
+ ethernet = <&gmac0>;
+ phy-mode = "2500base-x";
+
+ fixed-link {
+ speed = <2500>;
+ full-duplex;
+ pause;
+ };
+ };
+ };
+ };
+
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
index 0b4de627f96e..08ad0ffb24df 100644
--- a/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
+++ b/arch/arm64/boot/dts/mediatek/mt7622-rfb1.dts
@@ -105,20 +105,71 @@
pinctrl-0 = <&eth_pins>;
status = "okay";
- gmac1: mac@1 {
+ gmac0: mac@0 {
compatible = "mediatek,eth-mac";
- reg = <1>;
- phy-handle = <&phy5>;
+ reg = <0>;
+ phy-mode = "2500base-x";
+
+ fixed-link {
+ speed = <2500>;
+ full-duplex;
+ pause;
+ };
};
mdio-bus {
#address-cells = <1>;
#size-cells = <0>;
- phy5: ethernet-phy@5 {
- reg = <5>;
- phy-mode = "sgmii";
+ switch@0 {
+ compatible = "mediatek,mt7531";
+ reg = <0>;
+ reset-gpios = <&pio 54 0>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "lan0";
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan1";
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan2";
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "lan3";
+ };
+
+ port@4 {
+ reg = <4>;
+ label = "wan";
+ };
+
+ port@6 {
+ reg = <6>;
+ label = "cpu";
+ ethernet = <&gmac0>;
+ phy-mode = "2500base-x";
+
+ fixed-link {
+ speed = <2500>;
+ full-duplex;
+ pause;
+ };
+ };
+ };
};
+
};
};
diff --git a/arch/mips/boot/dts/mscc/ocelot.dtsi b/arch/mips/boot/dts/mscc/ocelot.dtsi
index f94e8a02ed06..535a98284dcb 100644
--- a/arch/mips/boot/dts/mscc/ocelot.dtsi
+++ b/arch/mips/boot/dts/mscc/ocelot.dtsi
@@ -134,11 +134,13 @@
<0x1280000 0x100>,
<0x1800000 0x80000>,
<0x1880000 0x10000>,
+ <0x1040000 0x10000>,
+ <0x1050000 0x10000>,
<0x1060000 0x10000>;
reg-names = "sys", "rew", "qs", "ptp", "port0", "port1",
"port2", "port3", "port4", "port5", "port6",
"port7", "port8", "port9", "port10", "qsys",
- "ana", "s2";
+ "ana", "s0", "s1", "s2";
interrupts = <18 21 22>;
interrupt-names = "ptp_rdy", "xtr", "inj";
diff --git a/arch/powerpc/boot/dts/fsl/t1040rdb.dts b/arch/powerpc/boot/dts/fsl/t1040rdb.dts
index 65ff34c49025..af0c8a6f5613 100644
--- a/arch/powerpc/boot/dts/fsl/t1040rdb.dts
+++ b/arch/powerpc/boot/dts/fsl/t1040rdb.dts
@@ -64,6 +64,40 @@
phy_sgmii_2: ethernet-phy@3 {
reg = <0x03>;
};
+
+ /* VSC8514 QSGMII PHY */
+ phy_qsgmii_0: ethernet-phy@4 {
+ reg = <0x4>;
+ };
+
+ phy_qsgmii_1: ethernet-phy@5 {
+ reg = <0x5>;
+ };
+
+ phy_qsgmii_2: ethernet-phy@6 {
+ reg = <0x6>;
+ };
+
+ phy_qsgmii_3: ethernet-phy@7 {
+ reg = <0x7>;
+ };
+
+ /* VSC8514 QSGMII PHY */
+ phy_qsgmii_4: ethernet-phy@8 {
+ reg = <0x8>;
+ };
+
+ phy_qsgmii_5: ethernet-phy@9 {
+ reg = <0x9>;
+ };
+
+ phy_qsgmii_6: ethernet-phy@a {
+ reg = <0xa>;
+ };
+
+ phy_qsgmii_7: ethernet-phy@b {
+ reg = <0xb>;
+ };
};
};
};
@@ -76,3 +110,76 @@
};
#include "t1040si-post.dtsi"
+
+&seville_switch {
+ status = "okay";
+};
+
+&seville_port0 {
+ managed = "in-band-status";
+ phy-handle = <&phy_qsgmii_0>;
+ phy-mode = "qsgmii";
+ label = "ETH5";
+ status = "okay";
+};
+
+&seville_port1 {
+ managed = "in-band-status";
+ phy-handle = <&phy_qsgmii_1>;
+ phy-mode = "qsgmii";
+ label = "ETH4";
+ status = "okay";
+};
+
+&seville_port2 {
+ managed = "in-band-status";
+ phy-handle = <&phy_qsgmii_2>;
+ phy-mode = "qsgmii";
+ label = "ETH7";
+ status = "okay";
+};
+
+&seville_port3 {
+ managed = "in-band-status";
+ phy-handle = <&phy_qsgmii_3>;
+ phy-mode = "qsgmii";
+ label = "ETH6";
+ status = "okay";
+};
+
+&seville_port4 {
+ managed = "in-band-status";
+ phy-handle = <&phy_qsgmii_4>;
+ phy-mode = "qsgmii";
+ label = "ETH9";
+ status = "okay";
+};
+
+&seville_port5 {
+ managed = "in-band-status";
+ phy-handle = <&phy_qsgmii_5>;
+ phy-mode = "qsgmii";
+ label = "ETH8";
+ status = "okay";
+};
+
+&seville_port6 {
+ managed = "in-band-status";
+ phy-handle = <&phy_qsgmii_6>;
+ phy-mode = "qsgmii";
+ label = "ETH11";
+ status = "okay";
+};
+
+&seville_port7 {
+ managed = "in-band-status";
+ phy-handle = <&phy_qsgmii_7>;
+ phy-mode = "qsgmii";
+ label = "ETH10";
+ status = "okay";
+};
+
+&seville_port8 {
+ ethernet = <&enet0>;
+ status = "okay";
+};
diff --git a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
index 315d0557eefc..f58eb820eb5e 100644
--- a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
@@ -628,6 +628,84 @@
status = "disabled";
};
};
+
+ seville_switch: ethernet-switch@800000 {
+ compatible = "mscc,vsc9953-switch";
+ reg = <0x800000 0x290000>;
+ interrupts = <26 2 0 0>;
+ interrupt-names = "xtr";
+ little-endian;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ seville_port0: port@0 {
+ reg = <0>;
+ status = "disabled";
+ };
+
+ seville_port1: port@1 {
+ reg = <1>;
+ status = "disabled";
+ };
+
+ seville_port2: port@2 {
+ reg = <2>;
+ status = "disabled";
+ };
+
+ seville_port3: port@3 {
+ reg = <3>;
+ status = "disabled";
+ };
+
+ seville_port4: port@4 {
+ reg = <4>;
+ status = "disabled";
+ };
+
+ seville_port5: port@5 {
+ reg = <5>;
+ status = "disabled";
+ };
+
+ seville_port6: port@6 {
+ reg = <6>;
+ status = "disabled";
+ };
+
+ seville_port7: port@7 {
+ reg = <7>;
+ status = "disabled";
+ };
+
+ seville_port8: port@8 {
+ reg = <8>;
+ phy-mode = "internal";
+ status = "disabled";
+
+ fixed-link {
+ speed = <2500>;
+ full-duplex;
+ };
+ };
+
+ seville_port9: port@9 {
+ reg = <9>;
+ phy-mode = "internal";
+ status = "disabled";
+
+ fixed-link {
+ speed = <2500>;
+ full-duplex;
+ };
+ };
+ };
+ };
};
&qe {
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h
index 3cfe1eb89838..c0be5fe1ddba 100644
--- a/arch/s390/include/asm/ccwdev.h
+++ b/arch/s390/include/asm/ccwdev.h
@@ -238,7 +238,10 @@ extern void ccw_device_get_schid(struct ccw_device *, struct subchannel_id *);
struct channel_path_desc_fmt0 *ccw_device_get_chp_desc(struct ccw_device *, int);
u8 *ccw_device_get_util_str(struct ccw_device *cdev, int chp_idx);
int ccw_device_pnso(struct ccw_device *cdev,
- struct chsc_pnso_area *pnso_area,
- struct chsc_pnso_resume_token resume_token,
- int cnc);
+ struct chsc_pnso_area *pnso_area, u8 oc,
+ struct chsc_pnso_resume_token resume_token, int cnc);
+int ccw_device_get_cssid(struct ccw_device *cdev, u8 *cssid);
+int ccw_device_get_iid(struct ccw_device *cdev, u8 *iid);
+int ccw_device_get_chpid(struct ccw_device *cdev, int chp_idx, u8 *chpid);
+int ccw_device_get_chid(struct ccw_device *cdev, int chp_idx, u16 *chid);
#endif /* _S390_CCWDEV_H_ */
diff --git a/arch/s390/include/asm/chsc.h b/arch/s390/include/asm/chsc.h
index 36ce2d25a5fc..ae4d2549cd67 100644
--- a/arch/s390/include/asm/chsc.h
+++ b/arch/s390/include/asm/chsc.h
@@ -12,6 +12,13 @@
#include <uapi/asm/chsc.h>
/**
+ * Operation codes for CHSC PNSO:
+ * PNSO_OC_NET_BRIDGE_INFO - only addresses that are visible to a bridgeport
+ * PNSO_OC_NET_ADDR_INFO - all addresses
+ */
+#define PNSO_OC_NET_BRIDGE_INFO 0
+#define PNSO_OC_NET_ADDR_INFO 3
+/**
* struct chsc_pnso_naid_l2 - network address information descriptor
* @nit: Network interface token
* @addr_lnid: network address and logical network id (VLAN ID)
diff --git a/arch/s390/include/asm/css_chars.h b/arch/s390/include/asm/css_chars.h
index 480bb02ccacd..638137d46c85 100644
--- a/arch/s390/include/asm/css_chars.h
+++ b/arch/s390/include/asm/css_chars.h
@@ -36,7 +36,9 @@ struct css_general_char {
u64 alt_ssi : 1; /* bit 108 */
u64 : 1;
u64 narf : 1; /* bit 110 */
- u64 : 12;
+ u64 : 5;
+ u64 enarf: 1; /* bit 116 */
+ u64 : 6;
u64 util_str : 1;/* bit 123 */
} __packed;
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index be4b8532dd3c..0a4182792876 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -50,7 +50,6 @@ struct bpf_jit {
int r14_thunk_ip; /* Address of expoline thunk for 'br %r14' */
int tail_call_start; /* Tail call start offset */
int excnt; /* Number of exception table entries */
- int labels[1]; /* Labels for local jumps */
};
#define SEEN_MEM BIT(0) /* use mem[] for temporary storage */
@@ -229,18 +228,18 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
REG_SET_SEEN(b3); \
})
-#define EMIT6_PCREL_LABEL(op1, op2, b1, b2, label, mask) \
+#define EMIT6_PCREL_RIEB(op1, op2, b1, b2, mask, target) \
({ \
- int rel = (jit->labels[label] - jit->prg) >> 1; \
+ unsigned int rel = (int)((target) - jit->prg) / 2; \
_EMIT6((op1) | reg(b1, b2) << 16 | (rel & 0xffff), \
(op2) | (mask) << 12); \
REG_SET_SEEN(b1); \
REG_SET_SEEN(b2); \
})
-#define EMIT6_PCREL_IMM_LABEL(op1, op2, b1, imm, label, mask) \
+#define EMIT6_PCREL_RIEC(op1, op2, b1, imm, mask, target) \
({ \
- int rel = (jit->labels[label] - jit->prg) >> 1; \
+ unsigned int rel = (int)((target) - jit->prg) / 2; \
_EMIT6((op1) | (reg_high(b1) | (mask)) << 16 | \
(rel & 0xffff), (op2) | ((imm) & 0xff) << 8); \
REG_SET_SEEN(b1); \
@@ -1282,7 +1281,9 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT4(0xb9040000, BPF_REG_0, REG_2);
break;
}
- case BPF_JMP | BPF_TAIL_CALL:
+ case BPF_JMP | BPF_TAIL_CALL: {
+ int patch_1_clrj, patch_2_clij, patch_3_brc;
+
/*
* Implicit input:
* B1: pointer to ctx
@@ -1300,16 +1301,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
offsetof(struct bpf_array, map.max_entries));
/* if ((u32)%b3 >= (u32)%w1) goto out; */
- if (!is_first_pass(jit) && can_use_rel(jit, jit->labels[0])) {
- /* clrj %b3,%w1,0xa,label0 */
- EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3,
- REG_W1, 0, 0xa);
- } else {
- /* clr %b3,%w1 */
- EMIT2(0x1500, BPF_REG_3, REG_W1);
- /* brcl 0xa,label0 */
- EMIT6_PCREL_RILC(0xc0040000, 0xa, jit->labels[0]);
- }
+ /* clrj %b3,%w1,0xa,out */
+ patch_1_clrj = jit->prg;
+ EMIT6_PCREL_RIEB(0xec000000, 0x0077, BPF_REG_3, REG_W1, 0xa,
+ jit->prg);
/*
* if (tail_call_cnt++ > MAX_TAIL_CALL_CNT)
@@ -1324,16 +1319,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
EMIT4_IMM(0xa7080000, REG_W0, 1);
/* laal %w1,%w0,off(%r15) */
EMIT6_DISP_LH(0xeb000000, 0x00fa, REG_W1, REG_W0, REG_15, off);
- if (!is_first_pass(jit) && can_use_rel(jit, jit->labels[0])) {
- /* clij %w1,MAX_TAIL_CALL_CNT,0x2,label0 */
- EMIT6_PCREL_IMM_LABEL(0xec000000, 0x007f, REG_W1,
- MAX_TAIL_CALL_CNT, 0, 0x2);
- } else {
- /* clfi %w1,MAX_TAIL_CALL_CNT */
- EMIT6_IMM(0xc20f0000, REG_W1, MAX_TAIL_CALL_CNT);
- /* brcl 0x2,label0 */
- EMIT6_PCREL_RILC(0xc0040000, 0x2, jit->labels[0]);
- }
+ /* clij %w1,MAX_TAIL_CALL_CNT,0x2,out */
+ patch_2_clij = jit->prg;
+ EMIT6_PCREL_RIEC(0xec000000, 0x007f, REG_W1, MAX_TAIL_CALL_CNT,
+ 2, jit->prg);
/*
* prog = array->ptrs[index];
@@ -1348,13 +1337,9 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
/* ltg %r1,prog(%b2,%r1) */
EMIT6_DISP_LH(0xe3000000, 0x0002, REG_1, BPF_REG_2,
REG_1, offsetof(struct bpf_array, ptrs));
- if (!is_first_pass(jit) && can_use_rel(jit, jit->labels[0])) {
- /* brc 0x8,label0 */
- EMIT4_PCREL_RIC(0xa7040000, 0x8, jit->labels[0]);
- } else {
- /* brcl 0x8,label0 */
- EMIT6_PCREL_RILC(0xc0040000, 0x8, jit->labels[0]);
- }
+ /* brc 0x8,out */
+ patch_3_brc = jit->prg;
+ EMIT4_PCREL_RIC(0xa7040000, 8, jit->prg);
/*
* Restore registers before calling function
@@ -1371,8 +1356,16 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp,
/* bc 0xf,tail_call_start(%r1) */
_EMIT4(0x47f01000 + jit->tail_call_start);
/* out: */
- jit->labels[0] = jit->prg;
+ if (jit->prg_buf) {
+ *(u16 *)(jit->prg_buf + patch_1_clrj + 2) =
+ (jit->prg - patch_1_clrj) >> 1;
+ *(u16 *)(jit->prg_buf + patch_2_clij + 2) =
+ (jit->prg - patch_2_clij) >> 1;
+ *(u16 *)(jit->prg_buf + patch_3_brc + 2) =
+ (jit->prg - patch_3_brc) >> 1;
+ }
break;
+ }
case BPF_JMP | BPF_EXIT: /* return b0 */
last = (i == fp->len - 1) ? 1 : 0;
if (last)
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 86651e86289d..cb9ad6b73973 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -314,19 +314,19 @@ static inline void mds_idle_clear_cpu_buffers(void)
* lfence
* jmp spec_trap
* do_rop:
- * mov %rax,(%rsp) for x86_64
+ * mov %rcx,(%rsp) for x86_64
* mov %edx,(%esp) for x86_32
* retq
*
* Without retpolines configured:
*
- * jmp *%rax for x86_64
+ * jmp *%rcx for x86_64
* jmp *%edx for x86_32
*/
#ifdef CONFIG_RETPOLINE
# ifdef CONFIG_X86_64
-# define RETPOLINE_RAX_BPF_JIT_SIZE 17
-# define RETPOLINE_RAX_BPF_JIT() \
+# define RETPOLINE_RCX_BPF_JIT_SIZE 17
+# define RETPOLINE_RCX_BPF_JIT() \
do { \
EMIT1_off32(0xE8, 7); /* callq do_rop */ \
/* spec_trap: */ \
@@ -334,7 +334,7 @@ do { \
EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \
EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \
/* do_rop: */ \
- EMIT4(0x48, 0x89, 0x04, 0x24); /* mov %rax,(%rsp) */ \
+ EMIT4(0x48, 0x89, 0x0C, 0x24); /* mov %rcx,(%rsp) */ \
EMIT1(0xC3); /* retq */ \
} while (0)
# else /* !CONFIG_X86_64 */
@@ -352,9 +352,9 @@ do { \
# endif
#else /* !CONFIG_RETPOLINE */
# ifdef CONFIG_X86_64
-# define RETPOLINE_RAX_BPF_JIT_SIZE 2
-# define RETPOLINE_RAX_BPF_JIT() \
- EMIT2(0xFF, 0xE0); /* jmp *%rax */
+# define RETPOLINE_RCX_BPF_JIT_SIZE 2
+# define RETPOLINE_RCX_BPF_JIT() \
+ EMIT2(0xFF, 0xE1); /* jmp *%rcx */
# else /* !CONFIG_X86_64 */
# define RETPOLINE_EDX_BPF_JIT() \
EMIT2(0xFF, 0xE2) /* jmp *%edx */
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 42b6709e6dc7..796506dcfc42 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -221,14 +221,48 @@ struct jit_context {
/* Number of bytes emit_patch() needs to generate instructions */
#define X86_PATCH_SIZE 5
+/* Number of bytes that will be skipped on tailcall */
+#define X86_TAIL_CALL_OFFSET 11
-#define PROLOGUE_SIZE 25
+static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
+{
+ u8 *prog = *pprog;
+ int cnt = 0;
+
+ if (callee_regs_used[0])
+ EMIT1(0x53); /* push rbx */
+ if (callee_regs_used[1])
+ EMIT2(0x41, 0x55); /* push r13 */
+ if (callee_regs_used[2])
+ EMIT2(0x41, 0x56); /* push r14 */
+ if (callee_regs_used[3])
+ EMIT2(0x41, 0x57); /* push r15 */
+ *pprog = prog;
+}
+
+static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
+{
+ u8 *prog = *pprog;
+ int cnt = 0;
+
+ if (callee_regs_used[3])
+ EMIT2(0x41, 0x5F); /* pop r15 */
+ if (callee_regs_used[2])
+ EMIT2(0x41, 0x5E); /* pop r14 */
+ if (callee_regs_used[1])
+ EMIT2(0x41, 0x5D); /* pop r13 */
+ if (callee_regs_used[0])
+ EMIT1(0x5B); /* pop rbx */
+ *pprog = prog;
+}
/*
- * Emit x86-64 prologue code for BPF program and check its size.
- * bpf_tail_call helper will skip it while jumping into another program
+ * Emit x86-64 prologue code for BPF program.
+ * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes
+ * while jumping to another program
*/
-static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf)
+static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
+ bool tail_call_reachable, bool is_subprog)
{
u8 *prog = *pprog;
int cnt = X86_PATCH_SIZE;
@@ -238,19 +272,19 @@ static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf)
*/
memcpy(prog, ideal_nops[NOP_ATOMIC5], cnt);
prog += cnt;
+ if (!ebpf_from_cbpf) {
+ if (tail_call_reachable && !is_subprog)
+ EMIT2(0x31, 0xC0); /* xor eax, eax */
+ else
+ EMIT2(0x66, 0x90); /* nop2 */
+ }
EMIT1(0x55); /* push rbp */
EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
/* sub rsp, rounded_stack_depth */
- EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
- EMIT1(0x53); /* push rbx */
- EMIT2(0x41, 0x55); /* push r13 */
- EMIT2(0x41, 0x56); /* push r14 */
- EMIT2(0x41, 0x57); /* push r15 */
- if (!ebpf_from_cbpf) {
- /* zero init tail_call_cnt */
- EMIT2(0x6a, 0x00);
- BUILD_BUG_ON(cnt != PROLOGUE_SIZE);
- }
+ if (stack_depth)
+ EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
+ if (tail_call_reachable)
+ EMIT1(0x50); /* push rax */
*pprog = prog;
}
@@ -314,13 +348,14 @@ static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
mutex_lock(&text_mutex);
if (memcmp(ip, old_insn, X86_PATCH_SIZE))
goto out;
+ ret = 1;
if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
if (text_live)
text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
else
memcpy(ip, new_insn, X86_PATCH_SIZE);
+ ret = 0;
}
- ret = 0;
out:
mutex_unlock(&text_mutex);
return ret;
@@ -337,6 +372,22 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
return __bpf_arch_text_poke(ip, t, old_addr, new_addr, true);
}
+static int get_pop_bytes(bool *callee_regs_used)
+{
+ int bytes = 0;
+
+ if (callee_regs_used[3])
+ bytes += 2;
+ if (callee_regs_used[2])
+ bytes += 2;
+ if (callee_regs_used[1])
+ bytes += 2;
+ if (callee_regs_used[0])
+ bytes += 1;
+
+ return bytes;
+}
+
/*
* Generate the following code:
*
@@ -351,12 +402,32 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
* goto *(prog->bpf_func + prologue_size);
* out:
*/
-static void emit_bpf_tail_call_indirect(u8 **pprog)
+static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used,
+ u32 stack_depth)
{
+ int tcc_off = -4 - round_up(stack_depth, 8);
u8 *prog = *pprog;
- int label1, label2, label3;
+ int pop_bytes = 0;
+ int off1 = 42;
+ int off2 = 31;
+ int off3 = 9;
int cnt = 0;
+ /* count the additional bytes used for popping callee regs from stack
+ * that need to be taken into account for each of the offsets that
+ * are used for bailing out of the tail call
+ */
+ pop_bytes = get_pop_bytes(callee_regs_used);
+ off1 += pop_bytes;
+ off2 += pop_bytes;
+ off3 += pop_bytes;
+
+ if (stack_depth) {
+ off1 += 7;
+ off2 += 7;
+ off3 += 7;
+ }
+
/*
* rdi - pointer to ctx
* rsi - pointer to bpf_array
@@ -370,72 +441,112 @@ static void emit_bpf_tail_call_indirect(u8 **pprog)
EMIT2(0x89, 0xD2); /* mov edx, edx */
EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
offsetof(struct bpf_array, map.max_entries));
-#define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE) /* Number of bytes to jump */
+#define OFFSET1 (off1 + RETPOLINE_RCX_BPF_JIT_SIZE) /* Number of bytes to jump */
EMIT2(X86_JBE, OFFSET1); /* jbe out */
- label1 = cnt;
/*
* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
* goto out;
*/
- EMIT2_off32(0x8B, 0x85, -36 - MAX_BPF_STACK); /* mov eax, dword ptr [rbp - 548] */
+ EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */
EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
-#define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE)
+#define OFFSET2 (off2 + RETPOLINE_RCX_BPF_JIT_SIZE)
EMIT2(X86_JA, OFFSET2); /* ja out */
- label2 = cnt;
EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
- EMIT2_off32(0x89, 0x85, -36 - MAX_BPF_STACK); /* mov dword ptr [rbp -548], eax */
+ EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */
/* prog = array->ptrs[index]; */
- EMIT4_off32(0x48, 0x8B, 0x84, 0xD6, /* mov rax, [rsi + rdx * 8 + offsetof(...)] */
+ EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */
offsetof(struct bpf_array, ptrs));
/*
* if (prog == NULL)
* goto out;
*/
- EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
-#define OFFSET3 (8 + RETPOLINE_RAX_BPF_JIT_SIZE)
+ EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */
+#define OFFSET3 (off3 + RETPOLINE_RCX_BPF_JIT_SIZE)
EMIT2(X86_JE, OFFSET3); /* je out */
- label3 = cnt;
- /* goto *(prog->bpf_func + prologue_size); */
- EMIT4(0x48, 0x8B, 0x40, /* mov rax, qword ptr [rax + 32] */
- offsetof(struct bpf_prog, bpf_func));
- EMIT4(0x48, 0x83, 0xC0, PROLOGUE_SIZE); /* add rax, prologue_size */
+ *pprog = prog;
+ pop_callee_regs(pprog, callee_regs_used);
+ prog = *pprog;
+
+ EMIT1(0x58); /* pop rax */
+ if (stack_depth)
+ EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */
+ round_up(stack_depth, 8));
+ /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */
+ EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */
+ offsetof(struct bpf_prog, bpf_func));
+ EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */
+ X86_TAIL_CALL_OFFSET);
/*
- * Wow we're ready to jump into next BPF program
+ * Now we're ready to jump into next BPF program
* rdi == ctx (1st arg)
- * rax == prog->bpf_func + prologue_size
+ * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
*/
- RETPOLINE_RAX_BPF_JIT();
+ RETPOLINE_RCX_BPF_JIT();
/* out: */
- BUILD_BUG_ON(cnt - label1 != OFFSET1);
- BUILD_BUG_ON(cnt - label2 != OFFSET2);
- BUILD_BUG_ON(cnt - label3 != OFFSET3);
*pprog = prog;
}
static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
- u8 **pprog, int addr, u8 *image)
+ u8 **pprog, int addr, u8 *image,
+ bool *callee_regs_used, u32 stack_depth)
{
+ int tcc_off = -4 - round_up(stack_depth, 8);
u8 *prog = *pprog;
+ int pop_bytes = 0;
+ int off1 = 20;
+ int poke_off;
int cnt = 0;
+ /* count the additional bytes used for popping callee regs to stack
+ * that need to be taken into account for jump offset that is used for
+ * bailing out from of the tail call when limit is reached
+ */
+ pop_bytes = get_pop_bytes(callee_regs_used);
+ off1 += pop_bytes;
+
+ /*
+ * total bytes for:
+ * - nop5/ jmpq $off
+ * - pop callee regs
+ * - sub rsp, $val if depth > 0
+ * - pop rax
+ */
+ poke_off = X86_PATCH_SIZE + pop_bytes + 1;
+ if (stack_depth) {
+ poke_off += 7;
+ off1 += 7;
+ }
+
/*
* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
* goto out;
*/
- EMIT2_off32(0x8B, 0x85, -36 - MAX_BPF_STACK); /* mov eax, dword ptr [rbp - 548] */
+ EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */
EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
- EMIT2(X86_JA, 14); /* ja out */
+ EMIT2(X86_JA, off1); /* ja out */
EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
- EMIT2_off32(0x89, 0x85, -36 - MAX_BPF_STACK); /* mov dword ptr [rbp -548], eax */
+ EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */
- poke->ip = image + (addr - X86_PATCH_SIZE);
- poke->adj_off = PROLOGUE_SIZE;
+ poke->tailcall_bypass = image + (addr - poke_off - X86_PATCH_SIZE);
+ poke->adj_off = X86_TAIL_CALL_OFFSET;
+ poke->tailcall_target = image + (addr - X86_PATCH_SIZE);
+ poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE;
+
+ emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
+ poke->tailcall_bypass);
+
+ *pprog = prog;
+ pop_callee_regs(pprog, callee_regs_used);
+ prog = *pprog;
+ EMIT1(0x58); /* pop rax */
+ if (stack_depth)
+ EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE);
prog += X86_PATCH_SIZE;
@@ -453,7 +564,7 @@ static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
for (i = 0; i < prog->aux->size_poke_tab; i++) {
poke = &prog->aux->poke_tab[i];
- WARN_ON_ONCE(READ_ONCE(poke->ip_stable));
+ WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));
if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
continue;
@@ -464,18 +575,25 @@ static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
if (target) {
/* Plain memcpy is used when image is not live yet
* and still not locked as read-only. Once poke
- * location is active (poke->ip_stable), any parallel
- * bpf_arch_text_poke() might occur still on the
- * read-write image until we finally locked it as
- * read-only. Both modifications on the given image
- * are under text_mutex to avoid interference.
+ * location is active (poke->tailcall_target_stable),
+ * any parallel bpf_arch_text_poke() might occur
+ * still on the read-write image until we finally
+ * locked it as read-only. Both modifications on
+ * the given image are under text_mutex to avoid
+ * interference.
*/
- ret = __bpf_arch_text_poke(poke->ip, BPF_MOD_JUMP, NULL,
+ ret = __bpf_arch_text_poke(poke->tailcall_target,
+ BPF_MOD_JUMP, NULL,
(u8 *)target->bpf_func +
poke->adj_off, false);
BUG_ON(ret < 0);
+ ret = __bpf_arch_text_poke(poke->tailcall_bypass,
+ BPF_MOD_JUMP,
+ (u8 *)poke->tailcall_target +
+ X86_PATCH_SIZE, NULL, false);
+ BUG_ON(ret < 0);
}
- WRITE_ONCE(poke->ip_stable, true);
+ WRITE_ONCE(poke->tailcall_target_stable, true);
mutex_unlock(&array->aux->poke_mutex);
}
}
@@ -652,19 +770,49 @@ static bool ex_handler_bpf(const struct exception_table_entry *x,
return true;
}
+static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt,
+ bool *regs_used, bool *tail_call_seen)
+{
+ int i;
+
+ for (i = 1; i <= insn_cnt; i++, insn++) {
+ if (insn->code == (BPF_JMP | BPF_TAIL_CALL))
+ *tail_call_seen = true;
+ if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6)
+ regs_used[0] = true;
+ if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7)
+ regs_used[1] = true;
+ if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8)
+ regs_used[2] = true;
+ if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9)
+ regs_used[3] = true;
+ }
+}
+
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
int oldproglen, struct jit_context *ctx)
{
+ bool tail_call_reachable = bpf_prog->aux->tail_call_reachable;
struct bpf_insn *insn = bpf_prog->insnsi;
+ bool callee_regs_used[4] = {};
int insn_cnt = bpf_prog->len;
+ bool tail_call_seen = false;
bool seen_exit = false;
u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
int i, cnt = 0, excnt = 0;
int proglen = 0;
u8 *prog = temp;
+ detect_reg_usage(insn, insn_cnt, callee_regs_used,
+ &tail_call_seen);
+
+ /* tail call's presence in current prog implies it is reachable */
+ tail_call_reachable |= tail_call_seen;
+
emit_prologue(&prog, bpf_prog->aux->stack_depth,
- bpf_prog_was_classic(bpf_prog));
+ bpf_prog_was_classic(bpf_prog), tail_call_reachable,
+ bpf_prog->aux->func_idx != 0);
+ push_callee_regs(&prog, callee_regs_used);
addrs[0] = prog - temp;
for (i = 1; i <= insn_cnt; i++, insn++) {
@@ -1102,16 +1250,27 @@ xadd: if (is_imm8(insn->off))
/* call */
case BPF_JMP | BPF_CALL:
func = (u8 *) __bpf_call_base + imm32;
- if (!imm32 || emit_call(&prog, func, image + addrs[i - 1]))
- return -EINVAL;
+ if (tail_call_reachable) {
+ EMIT3_off32(0x48, 0x8B, 0x85,
+ -(bpf_prog->aux->stack_depth + 8));
+ if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7))
+ return -EINVAL;
+ } else {
+ if (!imm32 || emit_call(&prog, func, image + addrs[i - 1]))
+ return -EINVAL;
+ }
break;
case BPF_JMP | BPF_TAIL_CALL:
if (imm32)
emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1],
- &prog, addrs[i], image);
+ &prog, addrs[i], image,
+ callee_regs_used,
+ bpf_prog->aux->stack_depth);
else
- emit_bpf_tail_call_indirect(&prog);
+ emit_bpf_tail_call_indirect(&prog,
+ callee_regs_used,
+ bpf_prog->aux->stack_depth);
break;
/* cond jump */
@@ -1294,12 +1453,7 @@ emit_jmp:
seen_exit = true;
/* Update cleanup_addr */
ctx->cleanup_addr = proglen;
- if (!bpf_prog_was_classic(bpf_prog))
- EMIT1(0x5B); /* get rid of tail_call_cnt */
- EMIT2(0x41, 0x5F); /* pop r15 */
- EMIT2(0x41, 0x5E); /* pop r14 */
- EMIT2(0x41, 0x5D); /* pop r13 */
- EMIT1(0x5B); /* pop rbx */
+ pop_callee_regs(&prog, callee_regs_used);
EMIT1(0xC9); /* leave */
EMIT1(0xC3); /* ret */
break;
@@ -1379,10 +1533,15 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
u8 *prog = *pprog;
int cnt = 0;
- if (emit_call(&prog, __bpf_prog_enter, prog))
- return -EINVAL;
- /* remember prog start time returned by __bpf_prog_enter */
- emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
+ if (p->aux->sleepable) {
+ if (emit_call(&prog, __bpf_prog_enter_sleepable, prog))
+ return -EINVAL;
+ } else {
+ if (emit_call(&prog, __bpf_prog_enter, prog))
+ return -EINVAL;
+ /* remember prog start time returned by __bpf_prog_enter */
+ emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
+ }
/* arg1: lea rdi, [rbp - stack_size] */
EMIT4(0x48, 0x8D, 0x7D, -stack_size);
@@ -1402,13 +1561,18 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
if (mod_ret)
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
- /* arg1: mov rdi, progs[i] */
- emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32,
- (u32) (long) p);
- /* arg2: mov rsi, rbx <- start time in nsec */
- emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
- if (emit_call(&prog, __bpf_prog_exit, prog))
- return -EINVAL;
+ if (p->aux->sleepable) {
+ if (emit_call(&prog, __bpf_prog_exit_sleepable, prog))
+ return -EINVAL;
+ } else {
+ /* arg1: mov rdi, progs[i] */
+ emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32,
+ (u32) (long) p);
+ /* arg2: mov rsi, rbx <- start time in nsec */
+ emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
+ if (emit_call(&prog, __bpf_prog_exit, prog))
+ return -EINVAL;
+ }
*pprog = prog;
return 0;
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index 7f814da3c2d0..96bea1ab1ecc 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -327,7 +327,7 @@ done:
*/
-static struct atmdev_ops atmtcp_v_dev_ops = {
+static const struct atmdev_ops atmtcp_v_dev_ops = {
.dev_close = atmtcp_v_dev_close,
.open = atmtcp_v_open,
.close = atmtcp_v_close,
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c
index 88a93c266c19..6f8fc5f587fe 100644
--- a/drivers/bcma/driver_pci_host.c
+++ b/drivers/bcma/driver_pci_host.c
@@ -419,12 +419,12 @@ void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
pc_host->pci_ops.read = bcma_core_pci_hostmode_read_config;
pc_host->pci_ops.write = bcma_core_pci_hostmode_write_config;
- pc_host->mem_resource.name = "BCMA PCIcore external memory",
+ pc_host->mem_resource.name = "BCMA PCIcore external memory";
pc_host->mem_resource.start = BCMA_SOC_PCI_DMA;
pc_host->mem_resource.end = BCMA_SOC_PCI_DMA + BCMA_SOC_PCI_DMA_SZ - 1;
pc_host->mem_resource.flags = IORESOURCE_MEM | IORESOURCE_PCI_FIXED;
- pc_host->io_resource.name = "BCMA PCIcore external I/O",
+ pc_host->io_resource.name = "BCMA PCIcore external I/O";
pc_host->io_resource.start = 0x100;
pc_host->io_resource.end = 0x7FF;
pc_host->io_resource.flags = IORESOURCE_IO | IORESOURCE_PCI_FIXED;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 2dca0aab0a9a..3c9485acdd81 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -2184,7 +2184,7 @@ out:
return ret;
}
-static const struct genl_ops nbd_connect_genl_ops[] = {
+static const struct genl_small_ops nbd_connect_genl_ops[] = {
{
.cmd = NBD_CMD_CONNECT,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
@@ -2216,8 +2216,8 @@ static struct genl_family nbd_genl_family __ro_after_init = {
.name = NBD_GENL_FAMILY_NAME,
.version = NBD_GENL_VERSION,
.module = THIS_MODULE,
- .ops = nbd_connect_genl_ops,
- .n_ops = ARRAY_SIZE(nbd_connect_genl_ops),
+ .small_ops = nbd_connect_genl_ops,
+ .n_small_ops = ARRAY_SIZE(nbd_connect_genl_ops),
.maxattr = NBD_ATTR_MAX,
.policy = nbd_attr_policy,
.mcgrps = nbd_mcast_grps,
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 5fa5be3c5598..88ce5f0ffc4b 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -18,7 +18,11 @@
#define VERSION "0.1"
-#define BDADDR_INTEL (&(bdaddr_t) {{0x00, 0x8b, 0x9e, 0x19, 0x03, 0x00}})
+#define BDADDR_INTEL (&(bdaddr_t){{0x00, 0x8b, 0x9e, 0x19, 0x03, 0x00}})
+#define RSA_HEADER_LEN 644
+#define CSS_HEADER_OFFSET 8
+#define ECDSA_OFFSET 644
+#define ECDSA_HEADER_LEN 320
int btintel_check_bdaddr(struct hci_dev *hdev)
{
@@ -360,6 +364,144 @@ int btintel_read_version(struct hci_dev *hdev, struct intel_version *ver)
}
EXPORT_SYMBOL_GPL(btintel_read_version);
+void btintel_version_info_tlv(struct hci_dev *hdev, struct intel_version_tlv *version)
+{
+ const char *variant;
+
+ switch (version->img_type) {
+ case 0x01:
+ variant = "Bootloader";
+ bt_dev_info(hdev, "Device revision is %u", version->dev_rev_id);
+ bt_dev_info(hdev, "Secure boot is %s",
+ version->secure_boot ? "enabled" : "disabled");
+ bt_dev_info(hdev, "OTP lock is %s",
+ version->otp_lock ? "enabled" : "disabled");
+ bt_dev_info(hdev, "API lock is %s",
+ version->api_lock ? "enabled" : "disabled");
+ bt_dev_info(hdev, "Debug lock is %s",
+ version->debug_lock ? "enabled" : "disabled");
+ bt_dev_info(hdev, "Minimum firmware build %u week %u %u",
+ version->min_fw_build_nn, version->min_fw_build_cw,
+ 2000 + version->min_fw_build_yy);
+ break;
+ case 0x03:
+ variant = "Firmware";
+ break;
+ default:
+ bt_dev_err(hdev, "Unsupported image type(%02x)", version->img_type);
+ goto done;
+ }
+
+ bt_dev_info(hdev, "%s timestamp %u.%u buildtype %u build %u", variant,
+ 2000 + (version->timestamp >> 8), version->timestamp & 0xff,
+ version->build_type, version->build_num);
+
+done:
+ return;
+}
+EXPORT_SYMBOL_GPL(btintel_version_info_tlv);
+
+int btintel_read_version_tlv(struct hci_dev *hdev, struct intel_version_tlv *version)
+{
+ struct sk_buff *skb;
+ const u8 param[1] = { 0xFF };
+
+ if (!version)
+ return -EINVAL;
+
+ skb = __hci_cmd_sync(hdev, 0xfc05, 1, param, HCI_CMD_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Reading Intel version information failed (%ld)",
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+
+ if (skb->data[0]) {
+ bt_dev_err(hdev, "Intel Read Version command failed (%02x)",
+ skb->data[0]);
+ kfree_skb(skb);
+ return -EIO;
+ }
+
+ /* Consume Command Complete Status field */
+ skb_pull(skb, 1);
+
+ /* Event parameters contatin multiple TLVs. Read each of them
+ * and only keep the required data. Also, it use existing legacy
+ * version field like hw_platform, hw_variant, and fw_variant
+ * to keep the existing setup flow
+ */
+ while (skb->len) {
+ struct intel_tlv *tlv;
+
+ tlv = (struct intel_tlv *)skb->data;
+ switch (tlv->type) {
+ case INTEL_TLV_CNVI_TOP:
+ version->cnvi_top = get_unaligned_le32(tlv->val);
+ break;
+ case INTEL_TLV_CNVR_TOP:
+ version->cnvr_top = get_unaligned_le32(tlv->val);
+ break;
+ case INTEL_TLV_CNVI_BT:
+ version->cnvi_bt = get_unaligned_le32(tlv->val);
+ break;
+ case INTEL_TLV_CNVR_BT:
+ version->cnvr_bt = get_unaligned_le32(tlv->val);
+ break;
+ case INTEL_TLV_DEV_REV_ID:
+ version->dev_rev_id = get_unaligned_le16(tlv->val);
+ break;
+ case INTEL_TLV_IMAGE_TYPE:
+ version->img_type = tlv->val[0];
+ break;
+ case INTEL_TLV_TIME_STAMP:
+ version->timestamp = get_unaligned_le16(tlv->val);
+ break;
+ case INTEL_TLV_BUILD_TYPE:
+ version->build_type = tlv->val[0];
+ break;
+ case INTEL_TLV_BUILD_NUM:
+ version->build_num = get_unaligned_le32(tlv->val);
+ break;
+ case INTEL_TLV_SECURE_BOOT:
+ version->secure_boot = tlv->val[0];
+ break;
+ case INTEL_TLV_OTP_LOCK:
+ version->otp_lock = tlv->val[0];
+ break;
+ case INTEL_TLV_API_LOCK:
+ version->api_lock = tlv->val[0];
+ break;
+ case INTEL_TLV_DEBUG_LOCK:
+ version->debug_lock = tlv->val[0];
+ break;
+ case INTEL_TLV_MIN_FW:
+ version->min_fw_build_nn = tlv->val[0];
+ version->min_fw_build_cw = tlv->val[1];
+ version->min_fw_build_yy = tlv->val[2];
+ break;
+ case INTEL_TLV_LIMITED_CCE:
+ version->limited_cce = tlv->val[0];
+ break;
+ case INTEL_TLV_SBE_TYPE:
+ version->sbe_type = tlv->val[0];
+ break;
+ case INTEL_TLV_OTP_BDADDR:
+ memcpy(&version->otp_bd_addr, tlv->val, tlv->len);
+ break;
+ default:
+ /* Ignore rest of information */
+ break;
+ }
+ /* consume the current tlv and move to next*/
+ skb_pull(skb, tlv->len + sizeof(*tlv));
+ }
+
+ kfree_skb(skb);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btintel_read_version_tlv);
+
/* ------- REGMAP IBT SUPPORT ------- */
#define IBT_REG_MODE_8BIT 0x00
@@ -626,12 +768,10 @@ int btintel_read_boot_params(struct hci_dev *hdev,
}
EXPORT_SYMBOL_GPL(btintel_read_boot_params);
-int btintel_download_firmware(struct hci_dev *hdev, const struct firmware *fw,
- u32 *boot_param)
+static int btintel_sfi_rsa_header_secure_send(struct hci_dev *hdev,
+ const struct firmware *fw)
{
int err;
- const u8 *fw_ptr;
- u32 frag_len;
/* Start the firmware download transaction with the Init fragment
* represented by the 128 bytes of CSS header.
@@ -660,8 +800,56 @@ int btintel_download_firmware(struct hci_dev *hdev, const struct firmware *fw,
goto done;
}
- fw_ptr = fw->data + 644;
+done:
+ return err;
+}
+
+static int btintel_sfi_ecdsa_header_secure_send(struct hci_dev *hdev,
+ const struct firmware *fw)
+{
+ int err;
+
+ /* Start the firmware download transaction with the Init fragment
+ * represented by the 128 bytes of CSS header.
+ */
+ err = btintel_secure_send(hdev, 0x00, 128, fw->data + 644);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send firmware header (%d)", err);
+ return err;
+ }
+
+ /* Send the 96 bytes of public key information from the firmware
+ * as the PKey fragment.
+ */
+ err = btintel_secure_send(hdev, 0x03, 96, fw->data + 644 + 128);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send firmware pkey (%d)", err);
+ return err;
+ }
+
+ /* Send the 96 bytes of signature information from the firmware
+ * as the Sign fragment
+ */
+ err = btintel_secure_send(hdev, 0x02, 96, fw->data + 644 + 224);
+ if (err < 0) {
+ bt_dev_err(hdev, "Failed to send firmware signature (%d)",
+ err);
+ return err;
+ }
+ return 0;
+}
+
+static int btintel_download_firmware_payload(struct hci_dev *hdev,
+ const struct firmware *fw,
+ u32 *boot_param, size_t offset)
+{
+ int err;
+ const u8 *fw_ptr;
+ u32 frag_len;
+
+ fw_ptr = fw->data + offset;
frag_len = 0;
+ err = -EINVAL;
while (fw_ptr - fw->data < fw->size) {
struct hci_command_hdr *cmd = (void *)(fw_ptr + frag_len);
@@ -707,8 +895,99 @@ int btintel_download_firmware(struct hci_dev *hdev, const struct firmware *fw,
done:
return err;
}
+
+int btintel_download_firmware(struct hci_dev *hdev,
+ const struct firmware *fw,
+ u32 *boot_param)
+{
+ int err;
+
+ err = btintel_sfi_rsa_header_secure_send(hdev, fw);
+ if (err)
+ return err;
+
+ return btintel_download_firmware_payload(hdev, fw, boot_param,
+ RSA_HEADER_LEN);
+}
EXPORT_SYMBOL_GPL(btintel_download_firmware);
+int btintel_download_firmware_newgen(struct hci_dev *hdev,
+ const struct firmware *fw, u32 *boot_param,
+ u8 hw_variant, u8 sbe_type)
+{
+ int err;
+ u32 css_header_ver;
+
+ /* iBT hardware variants 0x0b, 0x0c, 0x11, 0x12, 0x13, 0x14 support
+ * only RSA secure boot engine. Hence, the corresponding sfi file will
+ * have RSA header of 644 bytes followed by Command Buffer.
+ *
+ * iBT hardware variants 0x17, 0x18 onwards support both RSA and ECDSA
+ * secure boot engine. As a result, the corresponding sfi file will
+ * have RSA header of 644, ECDSA header of 320 bytes followed by
+ * Command Buffer.
+ *
+ * CSS Header byte positions 0x08 to 0x0B represent the CSS Header
+ * version: RSA(0x00010000) , ECDSA (0x00020000)
+ */
+ css_header_ver = get_unaligned_le32(fw->data + CSS_HEADER_OFFSET);
+ if (css_header_ver != 0x00010000) {
+ bt_dev_err(hdev, "Invalid CSS Header version");
+ return -EINVAL;
+ }
+
+ if (hw_variant <= 0x14) {
+ if (sbe_type != 0x00) {
+ bt_dev_err(hdev, "Invalid SBE type for hardware variant (%d)",
+ hw_variant);
+ return -EINVAL;
+ }
+
+ err = btintel_sfi_rsa_header_secure_send(hdev, fw);
+ if (err)
+ return err;
+
+ err = btintel_download_firmware_payload(hdev, fw, boot_param, RSA_HEADER_LEN);
+ if (err)
+ return err;
+ } else if (hw_variant >= 0x17) {
+ /* Check if CSS header for ECDSA follows the RSA header */
+ if (fw->data[ECDSA_OFFSET] != 0x06)
+ return -EINVAL;
+
+ /* Check if the CSS Header version is ECDSA(0x00020000) */
+ css_header_ver = get_unaligned_le32(fw->data + ECDSA_OFFSET + CSS_HEADER_OFFSET);
+ if (css_header_ver != 0x00020000) {
+ bt_dev_err(hdev, "Invalid CSS Header version");
+ return -EINVAL;
+ }
+
+ if (sbe_type == 0x00) {
+ err = btintel_sfi_rsa_header_secure_send(hdev, fw);
+ if (err)
+ return err;
+
+ err = btintel_download_firmware_payload(hdev, fw,
+ boot_param,
+ RSA_HEADER_LEN + ECDSA_HEADER_LEN);
+ if (err)
+ return err;
+ } else if (sbe_type == 0x01) {
+ err = btintel_sfi_ecdsa_header_secure_send(hdev, fw);
+ if (err)
+ return err;
+
+ err = btintel_download_firmware_payload(hdev, fw,
+ boot_param,
+ RSA_HEADER_LEN + ECDSA_HEADER_LEN);
+ if (err)
+ return err;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(btintel_download_firmware_newgen);
+
void btintel_reset_to_bootloader(struct hci_dev *hdev)
{
struct intel_reset params;
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index 08e20606fb58..09346ae308eb 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -6,6 +6,72 @@
* Copyright (C) 2015 Intel Corporation
*/
+/* List of tlv type */
+enum {
+ INTEL_TLV_CNVI_TOP = 0x10,
+ INTEL_TLV_CNVR_TOP,
+ INTEL_TLV_CNVI_BT,
+ INTEL_TLV_CNVR_BT,
+ INTEL_TLV_CNVI_OTP,
+ INTEL_TLV_CNVR_OTP,
+ INTEL_TLV_DEV_REV_ID,
+ INTEL_TLV_USB_VENDOR_ID,
+ INTEL_TLV_USB_PRODUCT_ID,
+ INTEL_TLV_PCIE_VENDOR_ID,
+ INTEL_TLV_PCIE_DEVICE_ID,
+ INTEL_TLV_PCIE_SUBSYSTEM_ID,
+ INTEL_TLV_IMAGE_TYPE,
+ INTEL_TLV_TIME_STAMP,
+ INTEL_TLV_BUILD_TYPE,
+ INTEL_TLV_BUILD_NUM,
+ INTEL_TLV_FW_BUILD_PRODUCT,
+ INTEL_TLV_FW_BUILD_HW,
+ INTEL_TLV_FW_STEP,
+ INTEL_TLV_BT_SPEC,
+ INTEL_TLV_MFG_NAME,
+ INTEL_TLV_HCI_REV,
+ INTEL_TLV_LMP_SUBVER,
+ INTEL_TLV_OTP_PATCH_VER,
+ INTEL_TLV_SECURE_BOOT,
+ INTEL_TLV_KEY_FROM_HDR,
+ INTEL_TLV_OTP_LOCK,
+ INTEL_TLV_API_LOCK,
+ INTEL_TLV_DEBUG_LOCK,
+ INTEL_TLV_MIN_FW,
+ INTEL_TLV_LIMITED_CCE,
+ INTEL_TLV_SBE_TYPE,
+ INTEL_TLV_OTP_BDADDR,
+ INTEL_TLV_UNLOCKED_STATE
+};
+
+struct intel_tlv {
+ u8 type;
+ u8 len;
+ u8 val[0];
+} __packed;
+
+struct intel_version_tlv {
+ u32 cnvi_top;
+ u32 cnvr_top;
+ u32 cnvi_bt;
+ u32 cnvr_bt;
+ u16 dev_rev_id;
+ u8 img_type;
+ u16 timestamp;
+ u8 build_type;
+ u32 build_num;
+ u8 secure_boot;
+ u8 otp_lock;
+ u8 api_lock;
+ u8 debug_lock;
+ u8 min_fw_build_nn;
+ u8 min_fw_build_cw;
+ u8 min_fw_build_yy;
+ u8 limited_cce;
+ u8 sbe_type;
+ bdaddr_t otp_bd_addr;
+};
+
struct intel_version {
u8 status;
u8 hw_platform;
@@ -77,12 +143,14 @@ int btintel_set_diag_mfg(struct hci_dev *hdev, bool enable);
void btintel_hw_error(struct hci_dev *hdev, u8 code);
void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver);
+void btintel_version_info_tlv(struct hci_dev *hdev, struct intel_version_tlv *version);
int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen,
const void *param);
int btintel_load_ddc_config(struct hci_dev *hdev, const char *ddc_name);
int btintel_set_event_mask(struct hci_dev *hdev, bool debug);
int btintel_set_event_mask_mfg(struct hci_dev *hdev, bool debug);
int btintel_read_version(struct hci_dev *hdev, struct intel_version *ver);
+int btintel_read_version_tlv(struct hci_dev *hdev, struct intel_version_tlv *ver);
struct regmap *btintel_regmap_init(struct hci_dev *hdev, u16 opcode_read,
u16 opcode_write);
@@ -91,6 +159,10 @@ int btintel_read_boot_params(struct hci_dev *hdev,
struct intel_boot_params *params);
int btintel_download_firmware(struct hci_dev *dev, const struct firmware *fw,
u32 *boot_param);
+int btintel_download_firmware_newgen(struct hci_dev *hdev,
+ const struct firmware *fw,
+ u32 *boot_param, u8 hw_variant,
+ u8 sbe_type);
void btintel_reset_to_bootloader(struct hci_dev *hdev);
int btintel_read_debug_features(struct hci_dev *hdev,
struct intel_debug_features *features);
@@ -137,6 +209,11 @@ static inline void btintel_version_info(struct hci_dev *hdev,
{
}
+static inline void btintel_version_info_tlv(struct hci_dev *hdev,
+ struct intel_version_tlv *version)
+{
+}
+
static inline int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type,
u32 plen, const void *param)
{
@@ -165,6 +242,12 @@ static inline int btintel_read_version(struct hci_dev *hdev,
return -EOPNOTSUPP;
}
+static inline int btintel_read_version_tlv(struct hci_dev *hdev,
+ struct intel_version_tlv *ver)
+{
+ return -EOPNOTSUPP;
+}
+
static inline struct regmap *btintel_regmap_init(struct hci_dev *hdev,
u16 opcode_read,
u16 opcode_write)
@@ -191,6 +274,14 @@ static inline int btintel_download_firmware(struct hci_dev *dev,
return -EOPNOTSUPP;
}
+static inline int btintel_download_firmware_newgen(struct hci_dev *hdev,
+ const struct firmware *fw,
+ u32 *boot_param,
+ u8 hw_variant, u8 sbe_type)
+{
+ return -EOPNOTSUPP;
+}
+
static inline void btintel_reset_to_bootloader(struct hci_dev *hdev)
{
}
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index d15fd5be0216..33d58b30c5ac 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -215,30 +215,7 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_8897 = {
.fw_dump_end = 0xea,
};
-static const struct btmrvl_sdio_card_reg btmrvl_reg_8977 = {
- .cfg = 0x00,
- .host_int_mask = 0x08,
- .host_intstatus = 0x0c,
- .card_status = 0x5c,
- .sq_read_base_addr_a0 = 0xf8,
- .sq_read_base_addr_a1 = 0xf9,
- .card_revision = 0xc8,
- .card_fw_status0 = 0xe8,
- .card_fw_status1 = 0xe9,
- .card_rx_len = 0xea,
- .card_rx_unit = 0xeb,
- .io_port_0 = 0xe4,
- .io_port_1 = 0xe5,
- .io_port_2 = 0xe6,
- .int_read_to_clear = true,
- .host_int_rsr = 0x04,
- .card_misc_cfg = 0xD8,
- .fw_dump_ctrl = 0xf0,
- .fw_dump_start = 0xf1,
- .fw_dump_end = 0xf8,
-};
-
-static const struct btmrvl_sdio_card_reg btmrvl_reg_8987 = {
+static const struct btmrvl_sdio_card_reg btmrvl_reg_89xx = {
.cfg = 0x00,
.host_int_mask = 0x08,
.host_intstatus = 0x0c,
@@ -261,29 +238,6 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_8987 = {
.fw_dump_end = 0xf8,
};
-static const struct btmrvl_sdio_card_reg btmrvl_reg_8997 = {
- .cfg = 0x00,
- .host_int_mask = 0x08,
- .host_intstatus = 0x0c,
- .card_status = 0x5c,
- .sq_read_base_addr_a0 = 0xf8,
- .sq_read_base_addr_a1 = 0xf9,
- .card_revision = 0xc8,
- .card_fw_status0 = 0xe8,
- .card_fw_status1 = 0xe9,
- .card_rx_len = 0xea,
- .card_rx_unit = 0xeb,
- .io_port_0 = 0xe4,
- .io_port_1 = 0xe5,
- .io_port_2 = 0xe6,
- .int_read_to_clear = true,
- .host_int_rsr = 0x04,
- .card_misc_cfg = 0xD8,
- .fw_dump_ctrl = 0xf0,
- .fw_dump_start = 0xf1,
- .fw_dump_end = 0xf8,
-};
-
static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
.helper = "mrvl/sd8688_helper.bin",
.firmware = "mrvl/sd8688.bin",
@@ -332,7 +286,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8977 = {
.helper = NULL,
.firmware = "mrvl/sdsd8977_combo_v2.bin",
- .reg = &btmrvl_reg_8977,
+ .reg = &btmrvl_reg_89xx,
.support_pscan_win_report = true,
.sd_blksz_fw_dl = 256,
.supports_fw_dump = true,
@@ -341,7 +295,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8977 = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8987 = {
.helper = NULL,
.firmware = "mrvl/sd8987_uapsta.bin",
- .reg = &btmrvl_reg_8987,
+ .reg = &btmrvl_reg_89xx,
.support_pscan_win_report = true,
.sd_blksz_fw_dl = 256,
.supports_fw_dump = true,
@@ -350,7 +304,7 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8987 = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8997 = {
.helper = NULL,
.firmware = "mrvl/sdsd8997_combo_v4.bin",
- .reg = &btmrvl_reg_8997,
+ .reg = &btmrvl_reg_89xx,
.support_pscan_win_report = true,
.sd_blksz_fw_dl = 256,
.supports_fw_dump = true,
diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c
index c7ab7a23bd67..ba45c59bd9f3 100644
--- a/drivers/bluetooth/btmtksdio.c
+++ b/drivers/bluetooth/btmtksdio.c
@@ -496,7 +496,7 @@ static void btmtksdio_interrupt(struct sdio_func *func)
sdio_claim_host(bdev->func);
/* Disable interrupt */
- sdio_writel(func, C_INT_EN_CLR, MTK_REG_CHLPCR, 0);
+ sdio_writel(func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL);
int_status = sdio_readl(func, MTK_REG_CHISR, NULL);
@@ -530,7 +530,7 @@ static void btmtksdio_interrupt(struct sdio_func *func)
}
/* Enable interrupt */
- sdio_writel(func, C_INT_EN_SET, MTK_REG_CHLPCR, 0);
+ sdio_writel(func, C_INT_EN_SET, MTK_REG_CHLPCR, NULL);
pm_runtime_mark_last_busy(bdev->dev);
pm_runtime_put_autosuspend(bdev->dev);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 8d2608ddfd08..1005b6e8ff74 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -59,6 +59,7 @@ static struct usb_driver btusb_driver;
#define BTUSB_MEDIATEK 0x200000
#define BTUSB_WIDEBAND_SPEECH 0x400000
#define BTUSB_VALID_LE_STATES 0x800000
+#define BTUSB_QCA_WCN6855 0x1000000
static const struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
@@ -254,24 +255,46 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
/* QCA ROME chipset */
- { USB_DEVICE(0x0cf3, 0x535b), .driver_info = BTUSB_QCA_ROME },
- { USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
- { USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME },
- { USB_DEVICE(0x0cf3, 0xe010), .driver_info = BTUSB_QCA_ROME },
- { USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME },
- { USB_DEVICE(0x0cf3, 0xe301), .driver_info = BTUSB_QCA_ROME },
- { USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME },
- { USB_DEVICE(0x0489, 0xe092), .driver_info = BTUSB_QCA_ROME },
- { USB_DEVICE(0x0489, 0xe09f), .driver_info = BTUSB_QCA_ROME },
- { USB_DEVICE(0x0489, 0xe0a2), .driver_info = BTUSB_QCA_ROME },
- { USB_DEVICE(0x04ca, 0x3011), .driver_info = BTUSB_QCA_ROME },
- { USB_DEVICE(0x04ca, 0x3015), .driver_info = BTUSB_QCA_ROME },
- { USB_DEVICE(0x04ca, 0x3016), .driver_info = BTUSB_QCA_ROME },
- { USB_DEVICE(0x04ca, 0x301a), .driver_info = BTUSB_QCA_ROME },
- { USB_DEVICE(0x04ca, 0x3021), .driver_info = BTUSB_QCA_ROME },
- { USB_DEVICE(0x13d3, 0x3491), .driver_info = BTUSB_QCA_ROME },
- { USB_DEVICE(0x13d3, 0x3496), .driver_info = BTUSB_QCA_ROME },
- { USB_DEVICE(0x13d3, 0x3501), .driver_info = BTUSB_QCA_ROME },
+ { USB_DEVICE(0x0cf3, 0x535b), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0cf3, 0xe010), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0cf3, 0xe301), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe092), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe09f), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0489, 0xe0a2), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x04ca, 0x3011), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x04ca, 0x3015), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x04ca, 0x3016), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x04ca, 0x301a), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x04ca, 0x3021), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3491), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3496), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3501), .driver_info = BTUSB_QCA_ROME |
+ BTUSB_WIDEBAND_SPEECH },
+
+ /* QCA WCN6855 chipset */
+ { USB_DEVICE(0x0cf3, 0xe600), .driver_info = BTUSB_QCA_WCN6855 |
+ BTUSB_WIDEBAND_SPEECH },
/* Broadcom BCM2035 */
{ USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
@@ -2338,10 +2361,10 @@ static bool btusb_setup_intel_new_get_fw_name(struct intel_version *ver,
static int btusb_intel_download_firmware(struct hci_dev *hdev,
struct intel_version *ver,
- struct intel_boot_params *params)
+ struct intel_boot_params *params,
+ u32 *boot_param)
{
const struct firmware *fw;
- u32 boot_param;
char fwname[64];
int err;
struct btusb_data *data = hci_get_drvdata(hdev);
@@ -2479,7 +2502,7 @@ static int btusb_intel_download_firmware(struct hci_dev *hdev,
set_bit(BTUSB_DOWNLOADING, &data->flags);
/* Start firmware downloading and get boot parameter */
- err = btintel_download_firmware(hdev, fw, &boot_param);
+ err = btintel_download_firmware(hdev, fw, boot_param);
if (err < 0) {
/* When FW download fails, send Intel Reset to retry
* FW download.
@@ -2561,7 +2584,7 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
return err;
}
- err = btusb_intel_download_firmware(hdev, &ver, &params);
+ err = btusb_intel_download_firmware(hdev, &ver, &params, &boot_param);
if (err)
return err;
@@ -2896,6 +2919,7 @@ static int btusb_mtk_submit_wmt_recv_urb(struct hci_dev *hdev)
buf = kmalloc(size, GFP_KERNEL);
if (!buf) {
kfree(dr);
+ usb_free_urb(urb);
return -ENOMEM;
}
@@ -3390,6 +3414,27 @@ static int btusb_set_bdaddr_ath3012(struct hci_dev *hdev,
return 0;
}
+static int btusb_set_bdaddr_wcn6855(struct hci_dev *hdev,
+ const bdaddr_t *bdaddr)
+{
+ struct sk_buff *skb;
+ u8 buf[6];
+ long ret;
+
+ memcpy(buf, bdaddr, sizeof(bdaddr_t));
+
+ skb = __hci_cmd_sync_ev(hdev, 0xfc14, sizeof(buf), buf,
+ HCI_EV_CMD_COMPLETE, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ ret = PTR_ERR(skb);
+ bt_dev_err(hdev, "Change address command failed (%ld)", ret);
+ return ret;
+ }
+ kfree_skb(skb);
+
+ return 0;
+}
+
#define QCA_DFU_PACKET_LEN 4096
#define QCA_GET_TARGET_VERSION 0x09
@@ -3409,7 +3454,8 @@ struct qca_version {
} __packed;
struct qca_rampatch_version {
- __le16 rom_version;
+ __le16 rom_version_high;
+ __le16 rom_version_low;
__le16 patch_version;
} __packed;
@@ -3421,12 +3467,14 @@ struct qca_device_info {
};
static const struct qca_device_info qca_devices_table[] = {
- { 0x00000100, 20, 4, 10 }, /* Rome 1.0 */
- { 0x00000101, 20, 4, 10 }, /* Rome 1.1 */
- { 0x00000200, 28, 4, 18 }, /* Rome 2.0 */
- { 0x00000201, 28, 4, 18 }, /* Rome 2.1 */
- { 0x00000300, 28, 4, 18 }, /* Rome 3.0 */
- { 0x00000302, 28, 4, 18 }, /* Rome 3.2 */
+ { 0x00000100, 20, 4, 8 }, /* Rome 1.0 */
+ { 0x00000101, 20, 4, 8 }, /* Rome 1.1 */
+ { 0x00000200, 28, 4, 16 }, /* Rome 2.0 */
+ { 0x00000201, 28, 4, 16 }, /* Rome 2.1 */
+ { 0x00000300, 28, 4, 16 }, /* Rome 3.0 */
+ { 0x00000302, 28, 4, 16 }, /* Rome 3.2 */
+ { 0x00130100, 40, 4, 16 }, /* WCN6855 1.0 */
+ { 0x00130200, 40, 4, 16 }, /* WCN6855 2.0 */
};
static int btusb_qca_send_vendor_req(struct usb_device *udev, u8 request,
@@ -3528,8 +3576,8 @@ static int btusb_setup_qca_load_rampatch(struct hci_dev *hdev,
{
struct qca_rampatch_version *rver;
const struct firmware *fw;
- u32 ver_rom, ver_patch;
- u16 rver_rom, rver_patch;
+ u32 ver_rom, ver_patch, rver_rom;
+ u16 rver_rom_low, rver_rom_high, rver_patch;
char fwname[64];
int err;
@@ -3548,9 +3596,16 @@ static int btusb_setup_qca_load_rampatch(struct hci_dev *hdev,
bt_dev_info(hdev, "using rampatch file: %s", fwname);
rver = (struct qca_rampatch_version *)(fw->data + info->ver_offset);
- rver_rom = le16_to_cpu(rver->rom_version);
+ rver_rom_low = le16_to_cpu(rver->rom_version_low);
rver_patch = le16_to_cpu(rver->patch_version);
+ if (ver_rom & ~0xffffU) {
+ rver_rom_high = le16_to_cpu(rver->rom_version_high);
+ rver_rom = le32_to_cpu(rver_rom_high << 16 | rver_rom_low);
+ } else {
+ rver_rom = rver_rom_low;
+ }
+
bt_dev_info(hdev, "QCA: patch rome 0x%x build 0x%x, "
"firmware rome 0x%x build 0x%x",
rver_rom, rver_patch, ver_rom, ver_patch);
@@ -3624,9 +3679,6 @@ static int btusb_setup_qca(struct hci_dev *hdev)
return err;
ver_rom = le32_to_cpu(ver.rom_version);
- /* Don't care about high ROM versions */
- if (ver_rom & ~0xffffU)
- return 0;
for (i = 0; i < ARRAY_SIZE(qca_devices_table); i++) {
if (ver_rom == qca_devices_table[i].rom_version)
@@ -4062,6 +4114,13 @@ static int btusb_probe(struct usb_interface *intf,
btusb_check_needs_reset_resume(intf);
}
+ if (id->driver_info & BTUSB_QCA_WCN6855) {
+ data->setup_on_usb = btusb_setup_qca;
+ hdev->set_bdaddr = btusb_set_bdaddr_wcn6855;
+ hdev->cmd_timeout = btusb_qca_cmd_timeout;
+ set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
+ }
+
if (id->driver_info & BTUSB_AMP) {
/* AMP controllers do not support SCO packets */
data->isoc = NULL;
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index e41854e0d79a..981d96cc7695 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -793,8 +793,6 @@ static int h5_serdev_probe(struct serdev_device *serdev)
if (!h5)
return -ENOMEM;
- set_bit(HCI_UART_RESET_ON_INIT, &h5->serdev_hu.hdev_flags);
-
h5->hu = &h5->serdev_hu;
h5->serdev_hu.serdev = serdev;
serdev_device_set_drvdata(serdev, h5);
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index f1299da6eed8..b20a40fab83e 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -288,7 +288,7 @@ static irqreturn_t intel_irq(int irq, void *dev_id)
static int intel_set_power(struct hci_uart *hu, bool powered)
{
- struct list_head *p;
+ struct intel_device *idev;
int err = -ENODEV;
if (!hu->tty->dev)
@@ -296,10 +296,7 @@ static int intel_set_power(struct hci_uart *hu, bool powered)
mutex_lock(&intel_device_list_lock);
- list_for_each(p, &intel_device_list) {
- struct intel_device *idev = list_entry(p, struct intel_device,
- list);
-
+ list_for_each_entry(idev, &intel_device_list, list) {
/* tty device and pdev device should share the same parent
* which is the UART port.
*/
@@ -362,19 +359,16 @@ static int intel_set_power(struct hci_uart *hu, bool powered)
static void intel_busy_work(struct work_struct *work)
{
- struct list_head *p;
struct intel_data *intel = container_of(work, struct intel_data,
busy_work);
+ struct intel_device *idev;
if (!intel->hu->tty->dev)
return;
/* Link is busy, delay the suspend */
mutex_lock(&intel_device_list_lock);
- list_for_each(p, &intel_device_list) {
- struct intel_device *idev = list_entry(p, struct intel_device,
- list);
-
+ list_for_each_entry(idev, &intel_device_list, list) {
if (intel->hu->tty->dev->parent == idev->pdev->dev.parent) {
pm_runtime_get(&idev->pdev->dev);
pm_runtime_mark_last_busy(&idev->pdev->dev);
@@ -533,7 +527,7 @@ static int intel_setup(struct hci_uart *hu)
struct sk_buff *skb;
struct intel_version ver;
struct intel_boot_params params;
- struct list_head *p;
+ struct intel_device *idev;
const struct firmware *fw;
char fwname[64];
u32 boot_param;
@@ -693,14 +687,11 @@ static int intel_setup(struct hci_uart *hu)
case 0x0b: /* SfP */
case 0x0c: /* WsP */
snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.sfi",
- le16_to_cpu(ver.hw_variant),
- le16_to_cpu(params.dev_revid));
+ ver.hw_variant, le16_to_cpu(params.dev_revid));
break;
case 0x12: /* ThP */
snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.sfi",
- le16_to_cpu(ver.hw_variant),
- le16_to_cpu(ver.hw_revision),
- le16_to_cpu(ver.fw_revision));
+ ver.hw_variant, ver.hw_revision, ver.fw_revision);
break;
default:
bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)",
@@ -722,14 +713,11 @@ static int intel_setup(struct hci_uart *hu)
case 0x0b: /* SfP */
case 0x0c: /* WsP */
snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.ddc",
- le16_to_cpu(ver.hw_variant),
- le16_to_cpu(params.dev_revid));
+ ver.hw_variant, le16_to_cpu(params.dev_revid));
break;
case 0x12: /* ThP */
snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.ddc",
- le16_to_cpu(ver.hw_variant),
- le16_to_cpu(ver.hw_revision),
- le16_to_cpu(ver.fw_revision));
+ ver.hw_variant, ver.hw_revision, ver.fw_revision);
break;
default:
bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)",
@@ -839,13 +827,11 @@ done:
* until further LPM TX notification.
*/
mutex_lock(&intel_device_list_lock);
- list_for_each(p, &intel_device_list) {
- struct intel_device *dev = list_entry(p, struct intel_device,
- list);
+ list_for_each_entry(idev, &intel_device_list, list) {
if (!hu->tty->dev)
break;
- if (hu->tty->dev->parent == dev->pdev->dev.parent) {
- if (device_may_wakeup(&dev->pdev->dev)) {
+ if (hu->tty->dev->parent == idev->pdev->dev.parent) {
+ if (device_may_wakeup(&idev->pdev->dev)) {
set_bit(STATE_LPM_ENABLED, &intel->flags);
set_bit(STATE_TX_ACTIVE, &intel->flags);
}
@@ -999,7 +985,7 @@ static int intel_recv(struct hci_uart *hu, const void *data, int count)
static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb)
{
struct intel_data *intel = hu->priv;
- struct list_head *p;
+ struct intel_device *idev;
BT_DBG("hu %p skb %p", hu, skb);
@@ -1010,10 +996,7 @@ static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb)
* completed before enqueuing any packet.
*/
mutex_lock(&intel_device_list_lock);
- list_for_each(p, &intel_device_list) {
- struct intel_device *idev = list_entry(p, struct intel_device,
- list);
-
+ list_for_each_entry(idev, &intel_device_list, list) {
if (hu->tty->dev->parent == idev->pdev->dev.parent) {
pm_runtime_get_sync(&idev->pdev->dev);
pm_runtime_mark_last_busy(&idev->pdev->dev);
@@ -1076,7 +1059,8 @@ static const struct hci_uart_proto intel_proto = {
#ifdef CONFIG_ACPI
static const struct acpi_device_id intel_acpi_match[] = {
{ "INT33E1", 0 },
- { },
+ { "INT33E3", 0 },
+ { }
};
MODULE_DEVICE_TABLE(acpi, intel_acpi_match);
#endif
@@ -1138,9 +1122,9 @@ static const struct acpi_gpio_params reset_gpios = { 0, 0, false };
static const struct acpi_gpio_params host_wake_gpios = { 1, 0, false };
static const struct acpi_gpio_mapping acpi_hci_intel_gpios[] = {
- { "reset-gpios", &reset_gpios, 1 },
- { "host-wake-gpios", &host_wake_gpios, 1 },
- { },
+ { "reset-gpios", &reset_gpios, 1, ACPI_GPIO_QUIRK_ONLY_GPIOIO },
+ { "host-wake-gpios", &host_wake_gpios, 1, ACPI_GPIO_QUIRK_ONLY_GPIOIO },
+ { }
};
static int intel_probe(struct platform_device *pdev)
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 85a30fb9177b..f83d67eafc9f 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -538,6 +538,7 @@ static void hci_uart_tty_close(struct tty_struct *tty)
clear_bit(HCI_UART_PROTO_READY, &hu->flags);
percpu_up_write(&hu->proto_lock);
+ cancel_work_sync(&hu->init_ready);
cancel_work_sync(&hu->write_work);
if (hdev) {
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 20e1dedbc58c..244b8feba523 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -693,8 +693,6 @@ static int qca_close(struct hci_uart *hu)
destroy_workqueue(qca->workqueue);
qca->hu = NULL;
- qca_power_shutdown(hu);
-
kfree_skb(qca->rx_skb);
hu->priv = NULL;
@@ -2007,8 +2005,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
if (err) {
BT_ERR("Rome serdev registration failed");
- if (qcadev->susclk)
- clk_disable_unprepare(qcadev->susclk);
+ clk_disable_unprepare(qcadev->susclk);
return err;
}
}
@@ -2032,8 +2029,9 @@ static int qca_serdev_probe(struct serdev_device *serdev)
static void qca_serdev_remove(struct serdev_device *serdev)
{
struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
+ struct qca_power *power = qcadev->bt_power;
- if (qca_is_wcn399x(qcadev->btsoc_type))
+ if (qca_is_wcn399x(qcadev->btsoc_type) && power->vregs_on)
qca_power_shutdown(&qcadev->serdev_hu);
else if (qcadev->susclk)
clk_disable_unprepare(qcadev->susclk);
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index 7b233312e723..ef96ad06fa54 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -113,8 +113,22 @@ static int hci_uart_flush(struct hci_dev *hdev)
/* Initialize device */
static int hci_uart_open(struct hci_dev *hdev)
{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+ int err;
+
BT_DBG("%s %p", hdev->name, hdev);
+ /* When Quirk HCI_QUIRK_NON_PERSISTENT_SETUP is set by
+ * driver, BT SoC is completely turned OFF during
+ * BT OFF. Upon next BT ON UART port should be opened.
+ */
+ if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) {
+ err = serdev_device_open(hu->serdev);
+ if (err)
+ return err;
+ set_bit(HCI_UART_PROTO_READY, &hu->flags);
+ }
+
/* Undo clearing this from hci_uart_close() */
hdev->flush = hci_uart_flush;
@@ -124,11 +138,25 @@ static int hci_uart_open(struct hci_dev *hdev)
/* Close device */
static int hci_uart_close(struct hci_dev *hdev)
{
+ struct hci_uart *hu = hci_get_drvdata(hdev);
+
BT_DBG("hdev %p", hdev);
+ if (!test_bit(HCI_UART_PROTO_READY, &hu->flags))
+ return 0;
+
hci_uart_flush(hdev);
hdev->flush = NULL;
+ /* When QUIRK HCI_QUIRK_NON_PERSISTENT_SETUP is set by driver,
+ * BT SOC is completely powered OFF during BT OFF, holding port
+ * open may drain the battery.
+ */
+ if (test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) {
+ clear_bit(HCI_UART_PROTO_READY, &hu->flags);
+ serdev_device_close(hu->serdev);
+ }
+
return 0;
}
@@ -354,7 +382,7 @@ void hci_uart_unregister_device(struct hci_uart *hu)
{
struct hci_dev *hdev = hu->hdev;
- clear_bit(HCI_UART_PROTO_READY, &hu->flags);
+ cancel_work_sync(&hu->init_ready);
if (test_bit(HCI_UART_REGISTERED, &hu->flags))
hci_unregister_dev(hdev);
hci_free_dev(hdev);
@@ -362,6 +390,10 @@ void hci_uart_unregister_device(struct hci_uart *hu)
cancel_work_sync(&hu->write_work);
hu->proto->close(hu);
- serdev_device_close(hu->serdev);
+
+ if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) {
+ clear_bit(HCI_UART_PROTO_READY, &hu->flags);
+ serdev_device_close(hu->serdev);
+ }
}
EXPORT_SYMBOL_GPL(hci_uart_unregister_device);
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index 2d22d6bf52f2..7d59d18c6f26 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -197,17 +197,12 @@ int cn_add_callback(struct cb_id *id, const char *name,
void (*callback)(struct cn_msg *,
struct netlink_skb_parms *))
{
- int err;
struct cn_dev *dev = &cdev;
if (!cn_already_initialized)
return -EAGAIN;
- err = cn_queue_add_callback(dev->cbdev, name, id, callback);
- if (err)
- return err;
-
- return 0;
+ return cn_queue_add_callback(dev->cbdev, name, id, callback);
}
EXPORT_SYMBOL_GPL(cn_add_callback);
diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig
index 2984fdf51e85..f886401af13e 100644
--- a/drivers/crypto/chelsio/Kconfig
+++ b/drivers/crypto/chelsio/Kconfig
@@ -21,35 +21,3 @@ config CRYPTO_DEV_CHELSIO
To compile this driver as a module, choose M here: the module
will be called chcr.
-
-config CHELSIO_IPSEC_INLINE
- bool "Chelsio IPSec XFRM Tx crypto offload"
- depends on CHELSIO_T4
- depends on CRYPTO_DEV_CHELSIO
- depends on XFRM_OFFLOAD
- depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
- default n
- help
- Enable support for IPSec Tx Inline.
-
-config CRYPTO_DEV_CHELSIO_TLS
- tristate "Chelsio Crypto Inline TLS Driver"
- depends on CHELSIO_T4
- depends on TLS_TOE
- select CRYPTO_DEV_CHELSIO
- help
- Support Chelsio Inline TLS with Chelsio crypto accelerator.
-
- To compile this driver as a module, choose M here: the module
- will be called chtls.
-
-config CHELSIO_TLS_DEVICE
- bool "Chelsio Inline KTLS Offload"
- depends on CHELSIO_T4
- depends on TLS_DEVICE
- select CRYPTO_DEV_CHELSIO
- default y
- help
- This flag enables support for kernel tls offload over Chelsio T6
- crypto accelerator. CONFIG_CHELSIO_TLS_DEVICE flag can be enabled
- only if CONFIG_TLS and CONFIG_TLS_DEVICE flags are enabled.
diff --git a/drivers/crypto/chelsio/Makefile b/drivers/crypto/chelsio/Makefile
index 0e9d035927e9..2e5df484ab01 100644
--- a/drivers/crypto/chelsio/Makefile
+++ b/drivers/crypto/chelsio/Makefile
@@ -3,8 +3,3 @@ ccflags-y := -I $(srctree)/drivers/net/ethernet/chelsio/cxgb4
obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chcr.o
chcr-objs := chcr_core.o chcr_algo.o
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
-chcr-objs += chcr_ktls.o
-#endif
-chcr-$(CONFIG_CHELSIO_IPSEC_INLINE) += chcr_ipsec.o
-obj-$(CONFIG_CRYPTO_DEV_CHELSIO_TLS) += chtls/
diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h
index d4f6e010dc79..507aafe93f21 100644
--- a/drivers/crypto/chelsio/chcr_algo.h
+++ b/drivers/crypto/chelsio/chcr_algo.h
@@ -86,39 +86,6 @@
KEY_CONTEXT_OPAD_PRESENT_M)
#define KEY_CONTEXT_OPAD_PRESENT_F KEY_CONTEXT_OPAD_PRESENT_V(1U)
-#define TLS_KEYCTX_RXFLIT_CNT_S 24
-#define TLS_KEYCTX_RXFLIT_CNT_V(x) ((x) << TLS_KEYCTX_RXFLIT_CNT_S)
-
-#define TLS_KEYCTX_RXPROT_VER_S 20
-#define TLS_KEYCTX_RXPROT_VER_M 0xf
-#define TLS_KEYCTX_RXPROT_VER_V(x) ((x) << TLS_KEYCTX_RXPROT_VER_S)
-
-#define TLS_KEYCTX_RXCIPH_MODE_S 16
-#define TLS_KEYCTX_RXCIPH_MODE_M 0xf
-#define TLS_KEYCTX_RXCIPH_MODE_V(x) ((x) << TLS_KEYCTX_RXCIPH_MODE_S)
-
-#define TLS_KEYCTX_RXAUTH_MODE_S 12
-#define TLS_KEYCTX_RXAUTH_MODE_M 0xf
-#define TLS_KEYCTX_RXAUTH_MODE_V(x) ((x) << TLS_KEYCTX_RXAUTH_MODE_S)
-
-#define TLS_KEYCTX_RXCIAU_CTRL_S 11
-#define TLS_KEYCTX_RXCIAU_CTRL_V(x) ((x) << TLS_KEYCTX_RXCIAU_CTRL_S)
-
-#define TLS_KEYCTX_RX_SEQCTR_S 9
-#define TLS_KEYCTX_RX_SEQCTR_M 0x3
-#define TLS_KEYCTX_RX_SEQCTR_V(x) ((x) << TLS_KEYCTX_RX_SEQCTR_S)
-
-#define TLS_KEYCTX_RX_VALID_S 8
-#define TLS_KEYCTX_RX_VALID_V(x) ((x) << TLS_KEYCTX_RX_VALID_S)
-
-#define TLS_KEYCTX_RXCK_SIZE_S 3
-#define TLS_KEYCTX_RXCK_SIZE_M 0x7
-#define TLS_KEYCTX_RXCK_SIZE_V(x) ((x) << TLS_KEYCTX_RXCK_SIZE_S)
-
-#define TLS_KEYCTX_RXMK_SIZE_S 0
-#define TLS_KEYCTX_RXMK_SIZE_M 0x7
-#define TLS_KEYCTX_RXMK_SIZE_V(x) ((x) << TLS_KEYCTX_RXMK_SIZE_S)
-
#define CHCR_HASH_MAX_DIGEST_SIZE 64
#define CHCR_MAX_SHA_DIGEST_SIZE 64
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index ed7989cf151e..f91f9d762a45 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -33,23 +33,8 @@ static int cpl_fw6_pld_handler(struct adapter *adap, unsigned char *input);
static void *chcr_uld_add(const struct cxgb4_lld_info *lld);
static int chcr_uld_state_change(void *handle, enum cxgb4_state state);
-#if defined(CONFIG_CHELSIO_TLS_DEVICE)
-static const struct tlsdev_ops chcr_ktls_ops = {
- .tls_dev_add = chcr_ktls_dev_add,
- .tls_dev_del = chcr_ktls_dev_del,
-};
-#endif
-
-#ifdef CONFIG_CHELSIO_IPSEC_INLINE
-static void update_netdev_features(void);
-#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
-
static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
[CPL_FW6_PLD] = cpl_fw6_pld_handler,
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
- [CPL_ACT_OPEN_RPL] = chcr_ktls_cpl_act_open_rpl,
- [CPL_SET_TCB_RPL] = chcr_ktls_cpl_set_tcb_rpl,
-#endif
};
static struct cxgb4_uld_info chcr_uld_info = {
@@ -60,12 +45,6 @@ static struct cxgb4_uld_info chcr_uld_info = {
.add = chcr_uld_add,
.state_change = chcr_uld_state_change,
.rx_handler = chcr_uld_rx_handler,
-#if defined(CONFIG_CHELSIO_IPSEC_INLINE) || defined(CONFIG_CHELSIO_TLS_DEVICE)
- .tx_handler = chcr_uld_tx_handler,
-#endif /* CONFIG_CHELSIO_IPSEC_INLINE || CONFIG_CHELSIO_TLS_DEVICE */
-#if defined(CONFIG_CHELSIO_TLS_DEVICE)
- .tlsdev_ops = &chcr_ktls_ops,
-#endif
};
static void detach_work_fn(struct work_struct *work)
@@ -241,23 +220,6 @@ int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
return 0;
}
-#if defined(CONFIG_CHELSIO_IPSEC_INLINE) || defined(CONFIG_CHELSIO_TLS_DEVICE)
-int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev)
-{
- /* In case if skb's decrypted bit is set, it's nic tls packet, else it's
- * ipsec packet.
- */
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
- if (skb->decrypted)
- return chcr_ktls_xmit(skb, dev);
-#endif
-#ifdef CONFIG_CHELSIO_IPSEC_INLINE
- return chcr_ipsec_xmit(skb, dev);
-#endif
- return 0;
-}
-#endif /* CONFIG_CHELSIO_IPSEC_INLINE || CONFIG_CHELSIO_TLS_DEVICE */
-
static void chcr_detach_device(struct uld_ctx *u_ctx)
{
struct chcr_dev *dev = &u_ctx->dev;
@@ -305,24 +267,6 @@ static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
return ret;
}
-#ifdef CONFIG_CHELSIO_IPSEC_INLINE
-static void update_netdev_features(void)
-{
- struct uld_ctx *u_ctx, *tmp;
-
- mutex_lock(&drv_data.drv_mutex);
- list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) {
- if (u_ctx->lldi.crypto & ULP_CRYPTO_IPSEC_INLINE)
- chcr_add_xfrmops(&u_ctx->lldi);
- }
- list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) {
- if (u_ctx->lldi.crypto & ULP_CRYPTO_IPSEC_INLINE)
- chcr_add_xfrmops(&u_ctx->lldi);
- }
- mutex_unlock(&drv_data.drv_mutex);
-}
-#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
-
static int __init chcr_crypto_init(void)
{
INIT_LIST_HEAD(&drv_data.act_dev);
@@ -332,12 +276,6 @@ static int __init chcr_crypto_init(void)
drv_data.last_dev = NULL;
cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info);
- #ifdef CONFIG_CHELSIO_IPSEC_INLINE
- rtnl_lock();
- update_netdev_features();
- rtnl_unlock();
- #endif /* CONFIG_CHELSIO_IPSEC_INLINE */
-
return 0;
}
diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h
index 67d77abd6775..b02f981e7c32 100644
--- a/drivers/crypto/chelsio/chcr_core.h
+++ b/drivers/crypto/chelsio/chcr_core.h
@@ -72,54 +72,6 @@ struct _key_ctx {
unsigned char key[];
};
-#define KEYCTX_TX_WR_IV_S 55
-#define KEYCTX_TX_WR_IV_M 0x1ffULL
-#define KEYCTX_TX_WR_IV_V(x) ((x) << KEYCTX_TX_WR_IV_S)
-#define KEYCTX_TX_WR_IV_G(x) \
- (((x) >> KEYCTX_TX_WR_IV_S) & KEYCTX_TX_WR_IV_M)
-
-#define KEYCTX_TX_WR_AAD_S 47
-#define KEYCTX_TX_WR_AAD_M 0xffULL
-#define KEYCTX_TX_WR_AAD_V(x) ((x) << KEYCTX_TX_WR_AAD_S)
-#define KEYCTX_TX_WR_AAD_G(x) (((x) >> KEYCTX_TX_WR_AAD_S) & \
- KEYCTX_TX_WR_AAD_M)
-
-#define KEYCTX_TX_WR_AADST_S 39
-#define KEYCTX_TX_WR_AADST_M 0xffULL
-#define KEYCTX_TX_WR_AADST_V(x) ((x) << KEYCTX_TX_WR_AADST_S)
-#define KEYCTX_TX_WR_AADST_G(x) \
- (((x) >> KEYCTX_TX_WR_AADST_S) & KEYCTX_TX_WR_AADST_M)
-
-#define KEYCTX_TX_WR_CIPHER_S 30
-#define KEYCTX_TX_WR_CIPHER_M 0x1ffULL
-#define KEYCTX_TX_WR_CIPHER_V(x) ((x) << KEYCTX_TX_WR_CIPHER_S)
-#define KEYCTX_TX_WR_CIPHER_G(x) \
- (((x) >> KEYCTX_TX_WR_CIPHER_S) & KEYCTX_TX_WR_CIPHER_M)
-
-#define KEYCTX_TX_WR_CIPHERST_S 23
-#define KEYCTX_TX_WR_CIPHERST_M 0x7f
-#define KEYCTX_TX_WR_CIPHERST_V(x) ((x) << KEYCTX_TX_WR_CIPHERST_S)
-#define KEYCTX_TX_WR_CIPHERST_G(x) \
- (((x) >> KEYCTX_TX_WR_CIPHERST_S) & KEYCTX_TX_WR_CIPHERST_M)
-
-#define KEYCTX_TX_WR_AUTH_S 14
-#define KEYCTX_TX_WR_AUTH_M 0x1ff
-#define KEYCTX_TX_WR_AUTH_V(x) ((x) << KEYCTX_TX_WR_AUTH_S)
-#define KEYCTX_TX_WR_AUTH_G(x) \
- (((x) >> KEYCTX_TX_WR_AUTH_S) & KEYCTX_TX_WR_AUTH_M)
-
-#define KEYCTX_TX_WR_AUTHST_S 7
-#define KEYCTX_TX_WR_AUTHST_M 0x7f
-#define KEYCTX_TX_WR_AUTHST_V(x) ((x) << KEYCTX_TX_WR_AUTHST_S)
-#define KEYCTX_TX_WR_AUTHST_G(x) \
- (((x) >> KEYCTX_TX_WR_AUTHST_S) & KEYCTX_TX_WR_AUTHST_M)
-
-#define KEYCTX_TX_WR_AUTHIN_S 0
-#define KEYCTX_TX_WR_AUTHIN_M 0x7f
-#define KEYCTX_TX_WR_AUTHIN_V(x) ((x) << KEYCTX_TX_WR_AUTHIN_S)
-#define KEYCTX_TX_WR_AUTHIN_G(x) \
- (((x) >> KEYCTX_TX_WR_AUTHIN_S) & KEYCTX_TX_WR_AUTHIN_M)
-
#define WQ_RETRY 5
struct chcr_driver_data {
struct list_head act_dev;
@@ -157,42 +109,6 @@ struct uld_ctx {
struct chcr_dev dev;
};
-struct sge_opaque_hdr {
- void *dev;
- dma_addr_t addr[MAX_SKB_FRAGS + 1];
-};
-
-struct chcr_ipsec_req {
- struct ulp_txpkt ulptx;
- struct ulptx_idata sc_imm;
- struct cpl_tx_sec_pdu sec_cpl;
- struct _key_ctx key_ctx;
-};
-
-struct chcr_ipsec_wr {
- struct fw_ulptx_wr wreq;
- struct chcr_ipsec_req req;
-};
-
-#define ESN_IV_INSERT_OFFSET 12
-struct chcr_ipsec_aadiv {
- __be32 spi;
- u8 seq_no[8];
- u8 iv[8];
-};
-
-struct ipsec_sa_entry {
- int hmac_ctrl;
- u16 esn;
- u16 resv;
- unsigned int enckey_len;
- unsigned int kctx_len;
- unsigned int authsize;
- __be32 key_ctx_hdr;
- char salt[MAX_SALT];
- char key[2 * AES_MAX_KEY_SIZE];
-};
-
/*
* sgl_len - calculates the size of an SGL of the given capacity
* @n: the number of SGL entries
@@ -221,18 +137,4 @@ int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev);
int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
int err);
-int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev);
-void chcr_add_xfrmops(const struct cxgb4_lld_info *lld);
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
-int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, unsigned char *input);
-int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input);
-int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev);
-extern int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
- enum tls_offload_ctx_dir direction,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn);
-extern void chcr_ktls_dev_del(struct net_device *netdev,
- struct tls_context *tls_ctx,
- enum tls_offload_ctx_dir direction);
-#endif
#endif /* __CHCR_CORE_H__ */
diff --git a/drivers/infiniband/hw/hfi1/ipoib_main.c b/drivers/infiniband/hw/hfi1/ipoib_main.c
index 014351ebbefa..9f71b9d706bd 100644
--- a/drivers/infiniband/hw/hfi1/ipoib_main.c
+++ b/drivers/infiniband/hw/hfi1/ipoib_main.c
@@ -97,41 +97,9 @@ static void hfi1_ipoib_dev_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *storage)
{
struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
- u64 rx_packets = 0ull;
- u64 rx_bytes = 0ull;
- u64 tx_packets = 0ull;
- u64 tx_bytes = 0ull;
- int i;
netdev_stats_to_stats64(storage, &dev->stats);
-
- for_each_possible_cpu(i) {
- const struct pcpu_sw_netstats *stats;
- unsigned int start;
- u64 trx_packets;
- u64 trx_bytes;
- u64 ttx_packets;
- u64 ttx_bytes;
-
- stats = per_cpu_ptr(priv->netstats, i);
- do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- trx_packets = stats->rx_packets;
- trx_bytes = stats->rx_bytes;
- ttx_packets = stats->tx_packets;
- ttx_bytes = stats->tx_bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
-
- rx_packets += trx_packets;
- rx_bytes += trx_bytes;
- tx_packets += ttx_packets;
- tx_bytes += ttx_bytes;
- }
-
- storage->rx_packets += rx_packets;
- storage->rx_bytes += rx_bytes;
- storage->tx_packets += tx_packets;
- storage->tx_bytes += tx_bytes;
+ dev_fetch_sw_netstats(storage, priv->netstats);
}
static const struct net_device_ops hfi1_ipoib_netdev_ops = {
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 1368d1d6a114..c3dbe64e628e 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -473,6 +473,10 @@ config NET_SB1000
source "drivers/net/phy/Kconfig"
+source "drivers/net/mdio/Kconfig"
+
+source "drivers/net/pcs/Kconfig"
+
source "drivers/net/plip/Kconfig"
source "drivers/net/ppp/Kconfig"
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 94b60800887a..72e18d505d1a 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -21,6 +21,8 @@ obj-$(CONFIG_MDIO) += mdio.o
obj-$(CONFIG_NET) += Space.o loopback.o
obj-$(CONFIG_NETCONSOLE) += netconsole.o
obj-y += phy/
+obj-y += mdio/
+obj-y += pcs/
obj-$(CONFIG_RIONET) += rionet.o
obj-$(CONFIG_NET_TEAM) += team/
obj-$(CONFIG_TUN) += tun.o
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index 1c6c27f35ac4..ba8e70a8e312 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -70,6 +70,8 @@ static const char *version =
#include <linux/bitops.h>
#include <linux/jiffies.h>
+#include <net/Space.h>
+
#include <asm/io.h>
#include <asm/dma.h>
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index 75a5a9b87c5a..c6f73aa3700c 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -229,6 +229,8 @@ static int dma;
#include <linux/bitops.h>
#include <linux/gfp.h>
+#include <net/Space.h>
+
#include <asm/dma.h>
#include <asm/io.h>
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index 841910f1db65..ff0bea1554f9 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -54,7 +54,6 @@ struct bareudp_dev {
static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
struct metadata_dst *tun_dst = NULL;
- struct pcpu_sw_netstats *stats;
struct bareudp_dev *bareudp;
unsigned short family;
unsigned int len;
@@ -160,13 +159,9 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
len = skb->len;
err = gro_cells_receive(&bareudp->gro_cells, skb);
- if (likely(err == NET_RX_SUCCESS)) {
- stats = this_cpu_ptr(bareudp->dev->tstats);
- u64_stats_update_begin(&stats->syncp);
- stats->rx_packets++;
- stats->rx_bytes += len;
- u64_stats_update_end(&stats->syncp);
- }
+ if (likely(err == NET_RX_SUCCESS))
+ dev_sw_netstats_rx_add(bareudp->dev, len);
+
return 0;
drop:
/* Consume bad packet */
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index c245edddb48b..a77124bc1f4b 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -20,25 +20,6 @@ config CAIF_TTY
identified as N_CAIF. When this ldisc is opened from user space
it will redirect the TTY's traffic into the CAIF stack.
-config CAIF_SPI_SLAVE
- tristate "CAIF SPI transport driver for slave interface"
- depends on CAIF && HAS_DMA
- default n
- help
- The CAIF Link layer SPI Protocol driver for Slave SPI interface.
- This driver implements a platform driver to accommodate for a
- platform specific SPI device. A sample CAIF SPI Platform device is
- provided in <file:Documentation/networking/caif/spi_porting.rst>.
-
-config CAIF_SPI_SYNC
- bool "Next command and length in start of frame"
- depends on CAIF_SPI_SLAVE
- default n
- help
- Putting the next command and length in the start of the frame can
- help to synchronize to the next transfer in case of over or under-runs.
- This option also needs to be enabled on the modem.
-
config CAIF_HSI
tristate "CAIF HSI transport driver"
depends on CAIF
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
index 54ae1165d60a..b1918c8c126c 100644
--- a/drivers/net/caif/Makefile
+++ b/drivers/net/caif/Makefile
@@ -4,10 +4,6 @@ ccflags-$(CONFIG_CAIF_DEBUG) := -DDEBUG
# Serial interface
obj-$(CONFIG_CAIF_TTY) += caif_serial.o
-# SPI slave physical interfaces module
-cfspi_slave-objs := caif_spi.o caif_spi_slave.o
-obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o
-
# HSI interface
obj-$(CONFIG_CAIF_HSI) += caif_hsi.o
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index 4a33ec4fc089..3d63b15bbaa1 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -458,15 +458,7 @@ static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
skb_reset_mac_header(skb);
skb->dev = cfhsi->ndev;
- /*
- * We are in a callback handler and
- * unfortunately we don't know what context we're
- * running in.
- */
- if (in_interrupt())
- netif_rx(skb);
- else
- netif_rx_ni(skb);
+ netif_rx_any_context(skb);
/* Update network statistics. */
cfhsi->ndev->stats.rx_packets++;
@@ -587,14 +579,7 @@ static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
skb_reset_mac_header(skb);
skb->dev = cfhsi->ndev;
- /*
- * We're called in callback from HSI
- * and don't know the context we're running in.
- */
- if (in_interrupt())
- netif_rx(skb);
- else
- netif_rx_ni(skb);
+ netif_rx_any_context(skb);
/* Update network statistics. */
cfhsi->ndev->stats.rx_packets++;
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
deleted file mode 100644
index 7d5899626130..000000000000
--- a/drivers/net/caif/caif_spi.c
+++ /dev/null
@@ -1,874 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) ST-Ericsson AB 2010
- * Author: Daniel Martensson
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/string.h>
-#include <linux/workqueue.h>
-#include <linux/completion.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/debugfs.h>
-#include <linux/if_arp.h>
-#include <net/caif/caif_layer.h>
-#include <net/caif/caif_spi.h>
-
-#ifndef CONFIG_CAIF_SPI_SYNC
-#define FLAVOR "Flavour: Vanilla.\n"
-#else
-#define FLAVOR "Flavour: Master CMD&LEN at start.\n"
-#endif /* CONFIG_CAIF_SPI_SYNC */
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Daniel Martensson");
-MODULE_DESCRIPTION("CAIF SPI driver");
-
-/* Returns the number of padding bytes for alignment. */
-#define PAD_POW2(x, pow) ((((x)&((pow)-1))==0) ? 0 : (((pow)-((x)&((pow)-1)))))
-
-static bool spi_loop;
-module_param(spi_loop, bool, 0444);
-MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
-
-/* SPI frame alignment. */
-module_param(spi_frm_align, int, 0444);
-MODULE_PARM_DESC(spi_frm_align, "SPI frame alignment.");
-
-/*
- * SPI padding options.
- * Warning: must be a base of 2 (& operation used) and can not be zero !
- */
-module_param(spi_up_head_align, int, 0444);
-MODULE_PARM_DESC(spi_up_head_align, "SPI uplink head alignment.");
-
-module_param(spi_up_tail_align, int, 0444);
-MODULE_PARM_DESC(spi_up_tail_align, "SPI uplink tail alignment.");
-
-module_param(spi_down_head_align, int, 0444);
-MODULE_PARM_DESC(spi_down_head_align, "SPI downlink head alignment.");
-
-module_param(spi_down_tail_align, int, 0444);
-MODULE_PARM_DESC(spi_down_tail_align, "SPI downlink tail alignment.");
-
-#ifdef CONFIG_ARM
-#define BYTE_HEX_FMT "%02X"
-#else
-#define BYTE_HEX_FMT "%02hhX"
-#endif
-
-#define SPI_MAX_PAYLOAD_SIZE 4096
-/*
- * Threshold values for the SPI packet queue. Flowcontrol will be asserted
- * when the number of packets exceeds HIGH_WATER_MARK. It will not be
- * deasserted before the number of packets drops below LOW_WATER_MARK.
- */
-#define LOW_WATER_MARK 100
-#define HIGH_WATER_MARK (LOW_WATER_MARK*5)
-
-#ifndef CONFIG_HAS_DMA
-
-/*
- * We sometimes use UML for debugging, but it cannot handle
- * dma_alloc_coherent so we have to wrap it.
- */
-static inline void *dma_alloc(struct cfspi *cfspi, dma_addr_t *daddr)
-{
- return kmalloc(SPI_DMA_BUF_LEN, GFP_KERNEL);
-}
-
-static inline void dma_free(struct cfspi *cfspi, void *cpu_addr,
- dma_addr_t handle)
-{
- kfree(cpu_addr);
-}
-
-#else
-
-static inline void *dma_alloc(struct cfspi *cfspi, dma_addr_t *daddr)
-{
- return dma_alloc_coherent(&cfspi->pdev->dev, SPI_DMA_BUF_LEN, daddr,
- GFP_KERNEL);
-}
-
-static inline void dma_free(struct cfspi *cfspi, void *cpu_addr,
- dma_addr_t handle)
-{
- dma_free_coherent(&cfspi->pdev->dev, SPI_DMA_BUF_LEN, cpu_addr, handle);
-}
-#endif /* CONFIG_HAS_DMA */
-
-#ifdef CONFIG_DEBUG_FS
-
-#define DEBUGFS_BUF_SIZE 4096
-
-static struct dentry *dbgfs_root;
-
-static inline void driver_debugfs_create(void)
-{
- dbgfs_root = debugfs_create_dir(cfspi_spi_driver.driver.name, NULL);
-}
-
-static inline void driver_debugfs_remove(void)
-{
- debugfs_remove(dbgfs_root);
-}
-
-static inline void dev_debugfs_rem(struct cfspi *cfspi)
-{
- debugfs_remove(cfspi->dbgfs_frame);
- debugfs_remove(cfspi->dbgfs_state);
- debugfs_remove(cfspi->dbgfs_dir);
-}
-
-static ssize_t dbgfs_state(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- char *buf;
- int len = 0;
- ssize_t size;
- struct cfspi *cfspi = file->private_data;
-
- buf = kzalloc(DEBUGFS_BUF_SIZE, GFP_KERNEL);
- if (!buf)
- return 0;
-
- /* Print out debug information. */
- len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
- "CAIF SPI debug information:\n");
-
- len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), FLAVOR);
-
- len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
- "STATE: %d\n", cfspi->dbg_state);
- len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
- "Previous CMD: 0x%x\n", cfspi->pcmd);
- len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
- "Current CMD: 0x%x\n", cfspi->cmd);
- len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
- "Previous TX len: %d\n", cfspi->tx_ppck_len);
- len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
- "Previous RX len: %d\n", cfspi->rx_ppck_len);
- len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
- "Current TX len: %d\n", cfspi->tx_cpck_len);
- len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
- "Current RX len: %d\n", cfspi->rx_cpck_len);
- len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
- "Next TX len: %d\n", cfspi->tx_npck_len);
- len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
- "Next RX len: %d\n", cfspi->rx_npck_len);
-
- if (len > DEBUGFS_BUF_SIZE)
- len = DEBUGFS_BUF_SIZE;
-
- size = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
-
- return size;
-}
-
-static ssize_t print_frame(char *buf, size_t size, char *frm,
- size_t count, size_t cut)
-{
- int len = 0;
- int i;
- for (i = 0; i < count; i++) {
- len += scnprintf((buf + len), (size - len),
- "[0x" BYTE_HEX_FMT "]",
- frm[i]);
- if ((i == cut) && (count > (cut * 2))) {
- /* Fast forward. */
- i = count - cut;
- len += scnprintf((buf + len), (size - len),
- "--- %zu bytes skipped ---\n",
- count - (cut * 2));
- }
-
- if ((!(i % 10)) && i) {
- len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
- "\n");
- }
- }
- len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len), "\n");
- return len;
-}
-
-static ssize_t dbgfs_frame(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- char *buf;
- int len = 0;
- ssize_t size;
- struct cfspi *cfspi;
-
- cfspi = file->private_data;
- buf = kzalloc(DEBUGFS_BUF_SIZE, GFP_KERNEL);
- if (!buf)
- return 0;
-
- /* Print out debug information. */
- len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
- "Current frame:\n");
-
- len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
- "Tx data (Len: %d):\n", cfspi->tx_cpck_len);
-
- len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len),
- cfspi->xfer.va_tx[0],
- (cfspi->tx_cpck_len + SPI_CMD_SZ), 100);
-
- len += scnprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
- "Rx data (Len: %d):\n", cfspi->rx_cpck_len);
-
- len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len),
- cfspi->xfer.va_rx,
- (cfspi->rx_cpck_len + SPI_CMD_SZ), 100);
-
- size = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
-
- return size;
-}
-
-static const struct file_operations dbgfs_state_fops = {
- .open = simple_open,
- .read = dbgfs_state,
- .owner = THIS_MODULE
-};
-
-static const struct file_operations dbgfs_frame_fops = {
- .open = simple_open,
- .read = dbgfs_frame,
- .owner = THIS_MODULE
-};
-
-static inline void dev_debugfs_add(struct cfspi *cfspi)
-{
- cfspi->dbgfs_dir = debugfs_create_dir(cfspi->pdev->name, dbgfs_root);
- cfspi->dbgfs_state = debugfs_create_file("state", 0444,
- cfspi->dbgfs_dir, cfspi,
- &dbgfs_state_fops);
- cfspi->dbgfs_frame = debugfs_create_file("frame", 0444,
- cfspi->dbgfs_dir, cfspi,
- &dbgfs_frame_fops);
-}
-
-inline void cfspi_dbg_state(struct cfspi *cfspi, int state)
-{
- cfspi->dbg_state = state;
-};
-#else
-
-static inline void driver_debugfs_create(void)
-{
-}
-
-static inline void driver_debugfs_remove(void)
-{
-}
-
-static inline void dev_debugfs_add(struct cfspi *cfspi)
-{
-}
-
-static inline void dev_debugfs_rem(struct cfspi *cfspi)
-{
-}
-
-inline void cfspi_dbg_state(struct cfspi *cfspi, int state)
-{
-}
-#endif /* CONFIG_DEBUG_FS */
-
-static LIST_HEAD(cfspi_list);
-static spinlock_t cfspi_list_lock;
-
-/* SPI uplink head alignment. */
-static ssize_t up_head_align_show(struct device_driver *driver, char *buf)
-{
- return sprintf(buf, "%d\n", spi_up_head_align);
-}
-
-static DRIVER_ATTR_RO(up_head_align);
-
-/* SPI uplink tail alignment. */
-static ssize_t up_tail_align_show(struct device_driver *driver, char *buf)
-{
- return sprintf(buf, "%d\n", spi_up_tail_align);
-}
-
-static DRIVER_ATTR_RO(up_tail_align);
-
-/* SPI downlink head alignment. */
-static ssize_t down_head_align_show(struct device_driver *driver, char *buf)
-{
- return sprintf(buf, "%d\n", spi_down_head_align);
-}
-
-static DRIVER_ATTR_RO(down_head_align);
-
-/* SPI downlink tail alignment. */
-static ssize_t down_tail_align_show(struct device_driver *driver, char *buf)
-{
- return sprintf(buf, "%d\n", spi_down_tail_align);
-}
-
-static DRIVER_ATTR_RO(down_tail_align);
-
-/* SPI frame alignment. */
-static ssize_t frame_align_show(struct device_driver *driver, char *buf)
-{
- return sprintf(buf, "%d\n", spi_frm_align);
-}
-
-static DRIVER_ATTR_RO(frame_align);
-
-int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len)
-{
- u8 *dst = buf;
- caif_assert(buf);
-
- if (cfspi->slave && !cfspi->slave_talked)
- cfspi->slave_talked = true;
-
- do {
- struct sk_buff *skb;
- struct caif_payload_info *info;
- int spad = 0;
- int epad;
-
- skb = skb_dequeue(&cfspi->chead);
- if (!skb)
- break;
-
- /*
- * Calculate length of frame including SPI padding.
- * The payload position is found in the control buffer.
- */
- info = (struct caif_payload_info *)&skb->cb;
-
- /*
- * Compute head offset i.e. number of bytes to add to
- * get the start of the payload aligned.
- */
- if (spi_up_head_align > 1) {
- spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align);
- *dst = (u8)(spad - 1);
- dst += spad;
- }
-
- /* Copy in CAIF frame. */
- skb_copy_bits(skb, 0, dst, skb->len);
- dst += skb->len;
- cfspi->ndev->stats.tx_packets++;
- cfspi->ndev->stats.tx_bytes += skb->len;
-
- /*
- * Compute tail offset i.e. number of bytes to add to
- * get the complete CAIF frame aligned.
- */
- epad = PAD_POW2((skb->len + spad), spi_up_tail_align);
- dst += epad;
-
- dev_kfree_skb(skb);
-
- } while ((dst - buf) < len);
-
- return dst - buf;
-}
-
-int cfspi_xmitlen(struct cfspi *cfspi)
-{
- struct sk_buff *skb = NULL;
- int frm_len = 0;
- int pkts = 0;
-
- /*
- * Decommit previously committed frames.
- * skb_queue_splice_tail(&cfspi->chead,&cfspi->qhead)
- */
- while (skb_peek(&cfspi->chead)) {
- skb = skb_dequeue_tail(&cfspi->chead);
- skb_queue_head(&cfspi->qhead, skb);
- }
-
- do {
- struct caif_payload_info *info = NULL;
- int spad = 0;
- int epad = 0;
-
- skb = skb_dequeue(&cfspi->qhead);
- if (!skb)
- break;
-
- /*
- * Calculate length of frame including SPI padding.
- * The payload position is found in the control buffer.
- */
- info = (struct caif_payload_info *)&skb->cb;
-
- /*
- * Compute head offset i.e. number of bytes to add to
- * get the start of the payload aligned.
- */
- if (spi_up_head_align > 1)
- spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align);
-
- /*
- * Compute tail offset i.e. number of bytes to add to
- * get the complete CAIF frame aligned.
- */
- epad = PAD_POW2((skb->len + spad), spi_up_tail_align);
-
- if ((skb->len + spad + epad + frm_len) <= CAIF_MAX_SPI_FRAME) {
- skb_queue_tail(&cfspi->chead, skb);
- pkts++;
- frm_len += skb->len + spad + epad;
- } else {
- /* Put back packet. */
- skb_queue_head(&cfspi->qhead, skb);
- break;
- }
- } while (pkts <= CAIF_MAX_SPI_PKTS);
-
- /*
- * Send flow on if previously sent flow off
- * and now go below the low water mark
- */
- if (cfspi->flow_off_sent && cfspi->qhead.qlen < cfspi->qd_low_mark &&
- cfspi->cfdev.flowctrl) {
- cfspi->flow_off_sent = 0;
- cfspi->cfdev.flowctrl(cfspi->ndev, 1);
- }
-
- return frm_len;
-}
-
-static void cfspi_ss_cb(bool assert, struct cfspi_ifc *ifc)
-{
- struct cfspi *cfspi = (struct cfspi *)ifc->priv;
-
- /*
- * The slave device is the master on the link. Interrupts before the
- * slave has transmitted are considered spurious.
- */
- if (cfspi->slave && !cfspi->slave_talked) {
- printk(KERN_WARNING "CFSPI: Spurious SS interrupt.\n");
- return;
- }
-
- if (!in_interrupt())
- spin_lock(&cfspi->lock);
- if (assert) {
- set_bit(SPI_SS_ON, &cfspi->state);
- set_bit(SPI_XFER, &cfspi->state);
- } else {
- set_bit(SPI_SS_OFF, &cfspi->state);
- }
- if (!in_interrupt())
- spin_unlock(&cfspi->lock);
-
- /* Wake up the xfer thread. */
- if (assert)
- wake_up_interruptible(&cfspi->wait);
-}
-
-static void cfspi_xfer_done_cb(struct cfspi_ifc *ifc)
-{
- struct cfspi *cfspi = (struct cfspi *)ifc->priv;
-
- /* Transfer done, complete work queue */
- complete(&cfspi->comp);
-}
-
-static netdev_tx_t cfspi_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct cfspi *cfspi = NULL;
- unsigned long flags;
- if (!dev)
- return -EINVAL;
-
- cfspi = netdev_priv(dev);
-
- skb_queue_tail(&cfspi->qhead, skb);
-
- spin_lock_irqsave(&cfspi->lock, flags);
- if (!test_and_set_bit(SPI_XFER, &cfspi->state)) {
- /* Wake up xfer thread. */
- wake_up_interruptible(&cfspi->wait);
- }
- spin_unlock_irqrestore(&cfspi->lock, flags);
-
- /* Send flow off if number of bytes is above high water mark */
- if (!cfspi->flow_off_sent &&
- cfspi->qhead.qlen > cfspi->qd_high_mark &&
- cfspi->cfdev.flowctrl) {
- cfspi->flow_off_sent = 1;
- cfspi->cfdev.flowctrl(cfspi->ndev, 0);
- }
-
- return NETDEV_TX_OK;
-}
-
-int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len)
-{
- u8 *src = buf;
-
- caif_assert(buf != NULL);
-
- do {
- int res;
- struct sk_buff *skb = NULL;
- int spad = 0;
- int epad = 0;
- int pkt_len = 0;
-
- /*
- * Compute head offset i.e. number of bytes added to
- * get the start of the payload aligned.
- */
- if (spi_down_head_align > 1) {
- spad = 1 + *src;
- src += spad;
- }
-
- /* Read length of CAIF frame (little endian). */
- pkt_len = *src;
- pkt_len |= ((*(src+1)) << 8) & 0xFF00;
- pkt_len += 2; /* Add FCS fields. */
-
- /* Get a suitable caif packet and copy in data. */
-
- skb = netdev_alloc_skb(cfspi->ndev, pkt_len + 1);
- caif_assert(skb != NULL);
-
- skb_put_data(skb, src, pkt_len);
- src += pkt_len;
-
- skb->protocol = htons(ETH_P_CAIF);
- skb_reset_mac_header(skb);
-
- /*
- * Push received packet up the stack.
- */
- if (!spi_loop)
- res = netif_rx_ni(skb);
- else
- res = cfspi_xmit(skb, cfspi->ndev);
-
- if (!res) {
- cfspi->ndev->stats.rx_packets++;
- cfspi->ndev->stats.rx_bytes += pkt_len;
- } else
- cfspi->ndev->stats.rx_dropped++;
-
- /*
- * Compute tail offset i.e. number of bytes added to
- * get the complete CAIF frame aligned.
- */
- epad = PAD_POW2((pkt_len + spad), spi_down_tail_align);
- src += epad;
- } while ((src - buf) < len);
-
- return src - buf;
-}
-
-static int cfspi_open(struct net_device *dev)
-{
- netif_wake_queue(dev);
- return 0;
-}
-
-static int cfspi_close(struct net_device *dev)
-{
- netif_stop_queue(dev);
- return 0;
-}
-
-static int cfspi_init(struct net_device *dev)
-{
- int res = 0;
- struct cfspi *cfspi = netdev_priv(dev);
-
- /* Set flow info. */
- cfspi->flow_off_sent = 0;
- cfspi->qd_low_mark = LOW_WATER_MARK;
- cfspi->qd_high_mark = HIGH_WATER_MARK;
-
- /* Set slave info. */
- if (!strncmp(cfspi_spi_driver.driver.name, "cfspi_sspi", 10)) {
- cfspi->slave = true;
- cfspi->slave_talked = false;
- } else {
- cfspi->slave = false;
- cfspi->slave_talked = false;
- }
-
- /* Allocate DMA buffers. */
- cfspi->xfer.va_tx[0] = dma_alloc(cfspi, &cfspi->xfer.pa_tx[0]);
- if (!cfspi->xfer.va_tx[0]) {
- res = -ENODEV;
- goto err_dma_alloc_tx_0;
- }
-
- cfspi->xfer.va_rx = dma_alloc(cfspi, &cfspi->xfer.pa_rx);
-
- if (!cfspi->xfer.va_rx) {
- res = -ENODEV;
- goto err_dma_alloc_rx;
- }
-
- /* Initialize the work queue. */
- INIT_WORK(&cfspi->work, cfspi_xfer);
-
- /* Initialize spin locks. */
- spin_lock_init(&cfspi->lock);
-
- /* Initialize flow control state. */
- cfspi->flow_stop = false;
-
- /* Initialize wait queue. */
- init_waitqueue_head(&cfspi->wait);
-
- /* Create work thread. */
- cfspi->wq = create_singlethread_workqueue(dev->name);
- if (!cfspi->wq) {
- printk(KERN_WARNING "CFSPI: failed to create work queue.\n");
- res = -ENODEV;
- goto err_create_wq;
- }
-
- /* Initialize work queue. */
- init_completion(&cfspi->comp);
-
- /* Create debugfs entries. */
- dev_debugfs_add(cfspi);
-
- /* Set up the ifc. */
- cfspi->ifc.ss_cb = cfspi_ss_cb;
- cfspi->ifc.xfer_done_cb = cfspi_xfer_done_cb;
- cfspi->ifc.priv = cfspi;
-
- /* Add CAIF SPI device to list. */
- spin_lock(&cfspi_list_lock);
- list_add_tail(&cfspi->list, &cfspi_list);
- spin_unlock(&cfspi_list_lock);
-
- /* Schedule the work queue. */
- queue_work(cfspi->wq, &cfspi->work);
-
- return 0;
-
- err_create_wq:
- dma_free(cfspi, cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
- err_dma_alloc_rx:
- dma_free(cfspi, cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
- err_dma_alloc_tx_0:
- return res;
-}
-
-static void cfspi_uninit(struct net_device *dev)
-{
- struct cfspi *cfspi = netdev_priv(dev);
-
- /* Remove from list. */
- spin_lock(&cfspi_list_lock);
- list_del(&cfspi->list);
- spin_unlock(&cfspi_list_lock);
-
- cfspi->ndev = NULL;
- /* Free DMA buffers. */
- dma_free(cfspi, cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
- dma_free(cfspi, cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
- set_bit(SPI_TERMINATE, &cfspi->state);
- wake_up_interruptible(&cfspi->wait);
- destroy_workqueue(cfspi->wq);
- /* Destroy debugfs directory and files. */
- dev_debugfs_rem(cfspi);
- return;
-}
-
-static const struct net_device_ops cfspi_ops = {
- .ndo_open = cfspi_open,
- .ndo_stop = cfspi_close,
- .ndo_init = cfspi_init,
- .ndo_uninit = cfspi_uninit,
- .ndo_start_xmit = cfspi_xmit
-};
-
-static void cfspi_setup(struct net_device *dev)
-{
- struct cfspi *cfspi = netdev_priv(dev);
- dev->features = 0;
- dev->netdev_ops = &cfspi_ops;
- dev->type = ARPHRD_CAIF;
- dev->flags = IFF_NOARP | IFF_POINTOPOINT;
- dev->priv_flags |= IFF_NO_QUEUE;
- dev->mtu = SPI_MAX_PAYLOAD_SIZE;
- dev->needs_free_netdev = true;
- skb_queue_head_init(&cfspi->qhead);
- skb_queue_head_init(&cfspi->chead);
- cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
- cfspi->cfdev.use_frag = false;
- cfspi->cfdev.use_stx = false;
- cfspi->cfdev.use_fcs = false;
- cfspi->ndev = dev;
-}
-
-int cfspi_spi_probe(struct platform_device *pdev)
-{
- struct cfspi *cfspi = NULL;
- struct net_device *ndev;
- struct cfspi_dev *dev;
- int res;
- dev = (struct cfspi_dev *)pdev->dev.platform_data;
-
- if (!dev)
- return -ENODEV;
-
- ndev = alloc_netdev(sizeof(struct cfspi), "cfspi%d",
- NET_NAME_UNKNOWN, cfspi_setup);
- if (!ndev)
- return -ENOMEM;
-
- cfspi = netdev_priv(ndev);
- netif_stop_queue(ndev);
- cfspi->ndev = ndev;
- cfspi->pdev = pdev;
-
- /* Assign the SPI device. */
- cfspi->dev = dev;
- /* Assign the device ifc to this SPI interface. */
- dev->ifc = &cfspi->ifc;
-
- /* Register network device. */
- res = register_netdev(ndev);
- if (res) {
- printk(KERN_ERR "CFSPI: Reg. error: %d.\n", res);
- goto err_net_reg;
- }
- return res;
-
- err_net_reg:
- free_netdev(ndev);
-
- return res;
-}
-
-int cfspi_spi_remove(struct platform_device *pdev)
-{
- /* Everything is done in cfspi_uninit(). */
- return 0;
-}
-
-static void __exit cfspi_exit_module(void)
-{
- struct list_head *list_node;
- struct list_head *n;
- struct cfspi *cfspi = NULL;
-
- list_for_each_safe(list_node, n, &cfspi_list) {
- cfspi = list_entry(list_node, struct cfspi, list);
- unregister_netdev(cfspi->ndev);
- }
-
- /* Destroy sysfs files. */
- driver_remove_file(&cfspi_spi_driver.driver,
- &driver_attr_up_head_align);
- driver_remove_file(&cfspi_spi_driver.driver,
- &driver_attr_up_tail_align);
- driver_remove_file(&cfspi_spi_driver.driver,
- &driver_attr_down_head_align);
- driver_remove_file(&cfspi_spi_driver.driver,
- &driver_attr_down_tail_align);
- driver_remove_file(&cfspi_spi_driver.driver, &driver_attr_frame_align);
- /* Unregister platform driver. */
- platform_driver_unregister(&cfspi_spi_driver);
- /* Destroy debugfs root directory. */
- driver_debugfs_remove();
-}
-
-static int __init cfspi_init_module(void)
-{
- int result;
-
- /* Initialize spin lock. */
- spin_lock_init(&cfspi_list_lock);
-
- /* Register platform driver. */
- result = platform_driver_register(&cfspi_spi_driver);
- if (result) {
- printk(KERN_ERR "Could not register platform SPI driver.\n");
- goto err_dev_register;
- }
-
- /* Create sysfs files. */
- result =
- driver_create_file(&cfspi_spi_driver.driver,
- &driver_attr_up_head_align);
- if (result) {
- printk(KERN_ERR "Sysfs creation failed 1.\n");
- goto err_create_up_head_align;
- }
-
- result =
- driver_create_file(&cfspi_spi_driver.driver,
- &driver_attr_up_tail_align);
- if (result) {
- printk(KERN_ERR "Sysfs creation failed 2.\n");
- goto err_create_up_tail_align;
- }
-
- result =
- driver_create_file(&cfspi_spi_driver.driver,
- &driver_attr_down_head_align);
- if (result) {
- printk(KERN_ERR "Sysfs creation failed 3.\n");
- goto err_create_down_head_align;
- }
-
- result =
- driver_create_file(&cfspi_spi_driver.driver,
- &driver_attr_down_tail_align);
- if (result) {
- printk(KERN_ERR "Sysfs creation failed 4.\n");
- goto err_create_down_tail_align;
- }
-
- result =
- driver_create_file(&cfspi_spi_driver.driver,
- &driver_attr_frame_align);
- if (result) {
- printk(KERN_ERR "Sysfs creation failed 5.\n");
- goto err_create_frame_align;
- }
- driver_debugfs_create();
- return result;
-
- err_create_frame_align:
- driver_remove_file(&cfspi_spi_driver.driver,
- &driver_attr_down_tail_align);
- err_create_down_tail_align:
- driver_remove_file(&cfspi_spi_driver.driver,
- &driver_attr_down_head_align);
- err_create_down_head_align:
- driver_remove_file(&cfspi_spi_driver.driver,
- &driver_attr_up_tail_align);
- err_create_up_tail_align:
- driver_remove_file(&cfspi_spi_driver.driver,
- &driver_attr_up_head_align);
- err_create_up_head_align:
- platform_driver_unregister(&cfspi_spi_driver);
- err_dev_register:
- return result;
-}
-
-module_init(cfspi_init_module);
-module_exit(cfspi_exit_module);
diff --git a/drivers/net/caif/caif_spi_slave.c b/drivers/net/caif/caif_spi_slave.c
deleted file mode 100644
index bb776d33dd8f..000000000000
--- a/drivers/net/caif/caif_spi_slave.c
+++ /dev/null
@@ -1,254 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) ST-Ericsson AB 2010
- * Author: Daniel Martensson
- */
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/string.h>
-#include <linux/semaphore.h>
-#include <linux/workqueue.h>
-#include <linux/completion.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/debugfs.h>
-#include <net/caif/caif_spi.h>
-
-#ifndef CONFIG_CAIF_SPI_SYNC
-#define SPI_DATA_POS 0
-static inline int forward_to_spi_cmd(struct cfspi *cfspi)
-{
- return cfspi->rx_cpck_len;
-}
-#else
-#define SPI_DATA_POS SPI_CMD_SZ
-static inline int forward_to_spi_cmd(struct cfspi *cfspi)
-{
- return 0;
-}
-#endif
-
-int spi_frm_align = 2;
-
-/*
- * SPI padding options.
- * Warning: must be a base of 2 (& operation used) and can not be zero !
- */
-int spi_up_head_align = 1 << 1;
-int spi_up_tail_align = 1 << 0;
-int spi_down_head_align = 1 << 2;
-int spi_down_tail_align = 1 << 1;
-
-#ifdef CONFIG_DEBUG_FS
-static inline void debugfs_store_prev(struct cfspi *cfspi)
-{
- /* Store previous command for debugging reasons.*/
- cfspi->pcmd = cfspi->cmd;
- /* Store previous transfer. */
- cfspi->tx_ppck_len = cfspi->tx_cpck_len;
- cfspi->rx_ppck_len = cfspi->rx_cpck_len;
-}
-#else
-static inline void debugfs_store_prev(struct cfspi *cfspi)
-{
-}
-#endif
-
-void cfspi_xfer(struct work_struct *work)
-{
- struct cfspi *cfspi;
- u8 *ptr = NULL;
- unsigned long flags;
- int ret;
- cfspi = container_of(work, struct cfspi, work);
-
- /* Initialize state. */
- cfspi->cmd = SPI_CMD_EOT;
-
- for (;;) {
-
- cfspi_dbg_state(cfspi, CFSPI_STATE_WAITING);
-
- /* Wait for master talk or transmit event. */
- wait_event_interruptible(cfspi->wait,
- test_bit(SPI_XFER, &cfspi->state) ||
- test_bit(SPI_TERMINATE, &cfspi->state));
-
- if (test_bit(SPI_TERMINATE, &cfspi->state))
- return;
-
-#if CFSPI_DBG_PREFILL
- /* Prefill buffers for easier debugging. */
- memset(cfspi->xfer.va_tx, 0xFF, SPI_DMA_BUF_LEN);
- memset(cfspi->xfer.va_rx, 0xFF, SPI_DMA_BUF_LEN);
-#endif /* CFSPI_DBG_PREFILL */
-
- cfspi_dbg_state(cfspi, CFSPI_STATE_AWAKE);
-
- /* Check whether we have a committed frame. */
- if (cfspi->tx_cpck_len) {
- int len;
-
- cfspi_dbg_state(cfspi, CFSPI_STATE_FETCH_PKT);
-
- /* Copy committed SPI frames after the SPI indication. */
- ptr = (u8 *) cfspi->xfer.va_tx;
- ptr += SPI_IND_SZ;
- len = cfspi_xmitfrm(cfspi, ptr, cfspi->tx_cpck_len);
- WARN_ON(len != cfspi->tx_cpck_len);
- }
-
- cfspi_dbg_state(cfspi, CFSPI_STATE_GET_NEXT);
-
- /* Get length of next frame to commit. */
- cfspi->tx_npck_len = cfspi_xmitlen(cfspi);
-
- WARN_ON(cfspi->tx_npck_len > SPI_DMA_BUF_LEN);
-
- /*
- * Add indication and length at the beginning of the frame,
- * using little endian.
- */
- ptr = (u8 *) cfspi->xfer.va_tx;
- *ptr++ = SPI_CMD_IND;
- *ptr++ = (SPI_CMD_IND & 0xFF00) >> 8;
- *ptr++ = cfspi->tx_npck_len & 0x00FF;
- *ptr++ = (cfspi->tx_npck_len & 0xFF00) >> 8;
-
- /* Calculate length of DMAs. */
- cfspi->xfer.tx_dma_len = cfspi->tx_cpck_len + SPI_IND_SZ;
- cfspi->xfer.rx_dma_len = cfspi->rx_cpck_len + SPI_CMD_SZ;
-
- /* Add SPI TX frame alignment padding, if necessary. */
- if (cfspi->tx_cpck_len &&
- (cfspi->xfer.tx_dma_len % spi_frm_align)) {
-
- cfspi->xfer.tx_dma_len += spi_frm_align -
- (cfspi->xfer.tx_dma_len % spi_frm_align);
- }
-
- /* Add SPI RX frame alignment padding, if necessary. */
- if (cfspi->rx_cpck_len &&
- (cfspi->xfer.rx_dma_len % spi_frm_align)) {
-
- cfspi->xfer.rx_dma_len += spi_frm_align -
- (cfspi->xfer.rx_dma_len % spi_frm_align);
- }
-
- cfspi_dbg_state(cfspi, CFSPI_STATE_INIT_XFER);
-
- /* Start transfer. */
- ret = cfspi->dev->init_xfer(&cfspi->xfer, cfspi->dev);
- WARN_ON(ret);
-
- cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_ACTIVE);
-
- /*
- * TODO: We might be able to make an assumption if this is the
- * first loop. Make sure that minimum toggle time is respected.
- */
- udelay(MIN_TRANSITION_TIME_USEC);
-
- cfspi_dbg_state(cfspi, CFSPI_STATE_SIG_ACTIVE);
-
- /* Signal that we are ready to receive data. */
- cfspi->dev->sig_xfer(true, cfspi->dev);
-
- cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_XFER_DONE);
-
- /* Wait for transfer completion. */
- wait_for_completion(&cfspi->comp);
-
- cfspi_dbg_state(cfspi, CFSPI_STATE_XFER_DONE);
-
- if (cfspi->cmd == SPI_CMD_EOT) {
- /*
- * Clear the master talk bit. A xfer is always at
- * least two bursts.
- */
- clear_bit(SPI_SS_ON, &cfspi->state);
- }
-
- cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_INACTIVE);
-
- /* Make sure that the minimum toggle time is respected. */
- if (SPI_XFER_TIME_USEC(cfspi->xfer.tx_dma_len,
- cfspi->dev->clk_mhz) <
- MIN_TRANSITION_TIME_USEC) {
-
- udelay(MIN_TRANSITION_TIME_USEC -
- SPI_XFER_TIME_USEC
- (cfspi->xfer.tx_dma_len, cfspi->dev->clk_mhz));
- }
-
- cfspi_dbg_state(cfspi, CFSPI_STATE_SIG_INACTIVE);
-
- /* De-assert transfer signal. */
- cfspi->dev->sig_xfer(false, cfspi->dev);
-
- /* Check whether we received a CAIF packet. */
- if (cfspi->rx_cpck_len) {
- int len;
-
- cfspi_dbg_state(cfspi, CFSPI_STATE_DELIVER_PKT);
-
- /* Parse SPI frame. */
- ptr = ((u8 *)(cfspi->xfer.va_rx + SPI_DATA_POS));
-
- len = cfspi_rxfrm(cfspi, ptr, cfspi->rx_cpck_len);
- WARN_ON(len != cfspi->rx_cpck_len);
- }
-
- /* Check the next SPI command and length. */
- ptr = (u8 *) cfspi->xfer.va_rx;
-
- ptr += forward_to_spi_cmd(cfspi);
-
- cfspi->cmd = *ptr++;
- cfspi->cmd |= ((*ptr++) << 8) & 0xFF00;
- cfspi->rx_npck_len = *ptr++;
- cfspi->rx_npck_len |= ((*ptr++) << 8) & 0xFF00;
-
- WARN_ON(cfspi->rx_npck_len > SPI_DMA_BUF_LEN);
- WARN_ON(cfspi->cmd > SPI_CMD_EOT);
-
- debugfs_store_prev(cfspi);
-
- /* Check whether the master issued an EOT command. */
- if (cfspi->cmd == SPI_CMD_EOT) {
- /* Reset state. */
- cfspi->tx_cpck_len = 0;
- cfspi->rx_cpck_len = 0;
- } else {
- /* Update state. */
- cfspi->tx_cpck_len = cfspi->tx_npck_len;
- cfspi->rx_cpck_len = cfspi->rx_npck_len;
- }
-
- /*
- * Check whether we need to clear the xfer bit.
- * Spin lock needed for packet insertion.
- * Test and clear of different bits
- * are not supported.
- */
- spin_lock_irqsave(&cfspi->lock, flags);
- if (cfspi->cmd == SPI_CMD_EOT && !cfspi_xmitlen(cfspi)
- && !test_bit(SPI_SS_ON, &cfspi->state))
- clear_bit(SPI_XFER, &cfspi->state);
-
- spin_unlock_irqrestore(&cfspi->lock, flags);
- }
-}
-
-struct platform_driver cfspi_spi_driver = {
- .probe = cfspi_spi_probe,
- .remove = cfspi_spi_remove,
- .driver = {
- .name = "cfspi_sspi",
- .owner = THIS_MODULE,
- },
-};
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
index 80ea2e913c2b..47a6d62b7511 100644
--- a/drivers/net/caif/caif_virtio.c
+++ b/drivers/net/caif/caif_virtio.c
@@ -652,7 +652,7 @@ static int cfv_probe(struct virtio_device *vdev)
const char *cfv_netdev_name = "cfvrt";
struct net_device *netdev;
struct cfv_info *cfv;
- int err = -EINVAL;
+ int err;
netdev = alloc_netdev(sizeof(struct cfv_info), cfv_netdev_name,
NET_NAME_UNKNOWN, cfv_netdev_setup);
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index f07012a76c0c..424970939fd4 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -41,8 +41,8 @@ config CAN_SLCAN
www.canusb.com / www.can232.com / www.mictronics.de / www.canhack.de
Userspace tools to attach the SLCAN line discipline (slcan_attach,
- slcand) can be found in the can-utils at the SocketCAN SVN, see
- http://developer.berlios.de/projects/socketcan for details.
+ slcand) can be found in the can-utils at the linux-can project, see
+ https://github.com/linux-can/can-utils for details.
The slcan driver supports up to 10 CAN netdevices by default which
can be changed by the 'maxdev=xx' module option. This driver can
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 38e9f80ed1ef..c14de95d2ca7 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -643,7 +643,7 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb)
*
* The first message goes into mb nr. 1 and issues an interrupt. All
* rx ints are disabled in the interrupt handler and a napi poll is
- * scheduled. We read the mailbox, but do _not_ reenable the mb (to
+ * scheduled. We read the mailbox, but do _not_ re-enable the mb (to
* receive another message).
*
* lower mbxs upper
@@ -661,13 +661,13 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb)
*
* The variable priv->rx_next points to the next mailbox to read a
* message from. As long we're in the lower mailboxes we just read the
- * mailbox but not reenable it.
+ * mailbox but not re-enable it.
*
- * With completion of the last of the lower mailboxes, we reenable the
+ * With completion of the last of the lower mailboxes, we re-enable the
* whole first group, but continue to look for filled mailboxes in the
* upper mailboxes. Imagine the second group like overflow mailboxes,
* which takes CAN messages if the lower goup is full. While in the
- * upper group we reenable the mailbox right after reading it. Giving
+ * upper group we re-enable the mailbox right after reading it. Giving
* the chip more room to store messages.
*
* After finishing we look again in the lower group if we've still
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 8e9f5620c9a2..1ccdbe89585b 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -356,15 +356,6 @@ static void c_can_setup_tx_object(struct net_device *dev, int iface,
}
}
-static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
- int iface)
-{
- int i;
-
- for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++)
- c_can_object_get(dev, iface, i, IF_COMM_CLR_NEWDAT);
-}
-
static int c_can_handle_lost_msg_obj(struct net_device *dev,
int iface, int objno, u32 ctrl)
{
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index d5567a7c1c6d..92213d3d96eb 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -81,7 +81,7 @@ enum reg {
C_CAN_FUNCTION_REG,
};
-static const u16 reg_map_c_can[] = {
+static const u16 __maybe_unused reg_map_c_can[] = {
[C_CAN_CTRL_REG] = 0x00,
[C_CAN_STS_REG] = 0x02,
[C_CAN_ERR_CNT_REG] = 0x04,
@@ -121,7 +121,7 @@ static const u16 reg_map_c_can[] = {
[C_CAN_MSGVAL2_REG] = 0xB2,
};
-static const u16 reg_map_d_can[] = {
+static const u16 __maybe_unused reg_map_d_can[] = {
[C_CAN_CTRL_REG] = 0x00,
[C_CAN_CTRL_EX_REG] = 0x02,
[C_CAN_STS_REG] = 0x04,
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index 7cdc232cbfea..07e2b8df5153 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -538,7 +538,7 @@ static int cc770_err(struct net_device *dev, u8 status)
priv->can.can_stats.error_warning++;
}
} else {
- /* Back to error avtive */
+ /* Back to error active */
cf->can_id |= CAN_ERR_PROT;
cf->data[2] = CAN_ERR_PROT_ACTIVE;
priv->can.state = CAN_STATE_ERROR_ACTIVE;
diff --git a/drivers/net/can/cc770/cc770.h b/drivers/net/can/cc770/cc770.h
index 948541491ab5..0628fd9e1980 100644
--- a/drivers/net/can/cc770/cc770.h
+++ b/drivers/net/can/cc770/cc770.h
@@ -184,7 +184,7 @@ struct cc770_priv {
u8 control_normal_mode; /* Control register for normal mode */
u8 cpu_interface; /* CPU interface register */
u8 clkout; /* Clock out register */
- u8 bus_config; /* Bus conffiguration register */
+ u8 bus_config; /* Bus configuration register */
struct sk_buff *tx_skb;
};
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 68834a2853c9..b70ded3760f2 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -60,7 +60,6 @@ EXPORT_SYMBOL_GPL(can_len2dlc);
#ifdef CONFIG_CAN_CALC_BITTIMING
#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
-#define CAN_CALC_SYNC_SEG 1
/* Bit-timing calculation derived from:
*
@@ -86,8 +85,8 @@ can_update_sample_point(const struct can_bittiming_const *btc,
int i;
for (i = 0; i <= 1; i++) {
- tseg2 = tseg + CAN_CALC_SYNC_SEG -
- (sample_point_nominal * (tseg + CAN_CALC_SYNC_SEG)) /
+ tseg2 = tseg + CAN_SYNC_SEG -
+ (sample_point_nominal * (tseg + CAN_SYNC_SEG)) /
1000 - i;
tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max);
tseg1 = tseg - tseg2;
@@ -96,8 +95,8 @@ can_update_sample_point(const struct can_bittiming_const *btc,
tseg2 = tseg - tseg1;
}
- sample_point = 1000 * (tseg + CAN_CALC_SYNC_SEG - tseg2) /
- (tseg + CAN_CALC_SYNC_SEG);
+ sample_point = 1000 * (tseg + CAN_SYNC_SEG - tseg2) /
+ (tseg + CAN_SYNC_SEG);
sample_point_error = abs(sample_point_nominal - sample_point);
if (sample_point <= sample_point_nominal &&
@@ -145,7 +144,7 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
/* tseg even = round down, odd = round up */
for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) {
- tsegall = CAN_CALC_SYNC_SEG + tseg / 2;
+ tsegall = CAN_SYNC_SEG + tseg / 2;
/* Compute all possible tseg choices (tseg=tseg1+tseg2) */
brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2;
@@ -223,7 +222,7 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
/* real bitrate */
bt->bitrate = priv->clock.freq /
- (bt->brp * (CAN_CALC_SYNC_SEG + tseg1 + tseg2));
+ (bt->brp * (CAN_SYNC_SEG + tseg1 + tseg2));
return 0;
}
@@ -371,6 +370,28 @@ static int can_rx_state_to_frame(struct net_device *dev, enum can_state state)
}
}
+static const char *can_get_state_str(const enum can_state state)
+{
+ switch (state) {
+ case CAN_STATE_ERROR_ACTIVE:
+ return "Error Active";
+ case CAN_STATE_ERROR_WARNING:
+ return "Error Warning";
+ case CAN_STATE_ERROR_PASSIVE:
+ return "Error Passive";
+ case CAN_STATE_BUS_OFF:
+ return "Bus Off";
+ case CAN_STATE_STOPPED:
+ return "Stopped";
+ case CAN_STATE_SLEEPING:
+ return "Sleeping";
+ default:
+ return "<unknown>";
+ }
+
+ return "<unknown>";
+}
+
void can_change_state(struct net_device *dev, struct can_frame *cf,
enum can_state tx_state, enum can_state rx_state)
{
@@ -382,7 +403,9 @@ void can_change_state(struct net_device *dev, struct can_frame *cf,
return;
}
- netdev_dbg(dev, "New error state: %d\n", new_state);
+ netdev_dbg(dev, "Controller changed from %s State (%d) into %s State (%d).\n",
+ can_get_state_str(priv->state), priv->state,
+ can_get_state_str(new_state), new_state);
can_update_state_error_stats(dev, new_state);
priv->state = new_state;
@@ -434,8 +457,8 @@ static void can_flush_echo_skb(struct net_device *dev)
* of the device driver. The driver must protect access to
* priv->echo_skb, if necessary.
*/
-void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
- unsigned int idx)
+int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
+ unsigned int idx)
{
struct can_priv *priv = netdev_priv(dev);
@@ -446,13 +469,13 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
(skb->protocol != htons(ETH_P_CAN) &&
skb->protocol != htons(ETH_P_CANFD))) {
kfree_skb(skb);
- return;
+ return 0;
}
if (!priv->echo_skb[idx]) {
skb = can_create_echo_skb(skb);
if (!skb)
- return;
+ return -ENOMEM;
/* make settings for echo to reduce code in irq context */
skb->pkt_type = PACKET_BROADCAST;
@@ -463,9 +486,12 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
priv->echo_skb[idx] = skb;
} else {
/* locking problem with netif_stop_queue() ?? */
- netdev_err(dev, "%s: BUG! echo_skb is occupied!\n", __func__);
+ netdev_err(dev, "%s: BUG! echo_skb %d is occupied!\n", __func__, idx);
kfree_skb(skb);
+ return -EBUSY;
}
+
+ return 0;
}
EXPORT_SYMBOL_GPL(can_put_echo_skb);
@@ -612,7 +638,11 @@ void can_bus_off(struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
- netdev_info(dev, "bus-off\n");
+ if (priv->restart_ms)
+ netdev_info(dev, "bus-off, scheduling restart in %d ms\n",
+ priv->restart_ms);
+ else
+ netdev_info(dev, "bus-off\n");
netif_carrier_off(dev);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 94d10ec954a0..4d594e977497 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -9,7 +9,7 @@
//
// Based on code originally by Andrey Volkov <avolkov@varma-el.com>
-#include <linux/netdevice.h>
+#include <linux/bitfield.h>
#include <linux/can.h>
#include <linux/can/dev.h>
#include <linux/can/error.h>
@@ -21,12 +21,14 @@
#include <linux/io.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
+#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <linux/regulator/consumer.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#define DRV_NAME "flexcan"
@@ -52,6 +54,7 @@
#define FLEXCAN_MCR_IRMQ BIT(16)
#define FLEXCAN_MCR_LPRIO_EN BIT(13)
#define FLEXCAN_MCR_AEN BIT(12)
+#define FLEXCAN_MCR_FDEN BIT(11)
/* MCR_MAXMB: maximum used MBs is MAXMB + 1 */
#define FLEXCAN_MCR_MAXMB(x) ((x) & 0x7f)
#define FLEXCAN_MCR_IDAM_A (0x0 << 8)
@@ -91,6 +94,7 @@
#define FLEXCAN_CTRL2_MRP BIT(18)
#define FLEXCAN_CTRL2_RRS BIT(17)
#define FLEXCAN_CTRL2_EACEN BIT(16)
+#define FLEXCAN_CTRL2_ISOCANFDEN BIT(12)
/* FLEXCAN memory error control register (MECR) bits */
#define FLEXCAN_MECR_ECRWRDIS BIT(31)
@@ -134,8 +138,35 @@
(FLEXCAN_ESR_ERR_BUS | FLEXCAN_ESR_ERR_STATE)
#define FLEXCAN_ESR_ALL_INT \
(FLEXCAN_ESR_TWRN_INT | FLEXCAN_ESR_RWRN_INT | \
- FLEXCAN_ESR_BOFF_INT | FLEXCAN_ESR_ERR_INT | \
- FLEXCAN_ESR_WAK_INT)
+ FLEXCAN_ESR_BOFF_INT | FLEXCAN_ESR_ERR_INT)
+
+/* FLEXCAN Bit Timing register (CBT) bits */
+#define FLEXCAN_CBT_BTF BIT(31)
+#define FLEXCAN_CBT_EPRESDIV_MASK GENMASK(30, 21)
+#define FLEXCAN_CBT_ERJW_MASK GENMASK(20, 16)
+#define FLEXCAN_CBT_EPROPSEG_MASK GENMASK(15, 10)
+#define FLEXCAN_CBT_EPSEG1_MASK GENMASK(9, 5)
+#define FLEXCAN_CBT_EPSEG2_MASK GENMASK(4, 0)
+
+/* FLEXCAN FD control register (FDCTRL) bits */
+#define FLEXCAN_FDCTRL_FDRATE BIT(31)
+#define FLEXCAN_FDCTRL_MBDSR1 GENMASK(20, 19)
+#define FLEXCAN_FDCTRL_MBDSR0 GENMASK(17, 16)
+#define FLEXCAN_FDCTRL_MBDSR_8 0x0
+#define FLEXCAN_FDCTRL_MBDSR_12 0x1
+#define FLEXCAN_FDCTRL_MBDSR_32 0x2
+#define FLEXCAN_FDCTRL_MBDSR_64 0x3
+#define FLEXCAN_FDCTRL_TDCEN BIT(15)
+#define FLEXCAN_FDCTRL_TDCFAIL BIT(14)
+#define FLEXCAN_FDCTRL_TDCOFF GENMASK(12, 8)
+#define FLEXCAN_FDCTRL_TDCVAL GENMASK(5, 0)
+
+/* FLEXCAN FD Bit Timing register (FDCBT) bits */
+#define FLEXCAN_FDCBT_FPRESDIV_MASK GENMASK(29, 20)
+#define FLEXCAN_FDCBT_FRJW_MASK GENMASK(18, 16)
+#define FLEXCAN_FDCBT_FPROPSEG_MASK GENMASK(14, 10)
+#define FLEXCAN_FDCBT_FPSEG1_MASK GENMASK(7, 5)
+#define FLEXCAN_FDCBT_FPSEG2_MASK GENMASK(2, 0)
/* FLEXCAN interrupt flag register (IFLAG) bits */
/* Errata ERR005829 step7: Reserve first valid MB */
@@ -161,6 +192,9 @@
#define FLEXCAN_MB_CODE_TX_DATA (0xc << 24)
#define FLEXCAN_MB_CODE_TX_TANSWER (0xe << 24)
+#define FLEXCAN_MB_CNT_EDL BIT(31)
+#define FLEXCAN_MB_CNT_BRS BIT(30)
+#define FLEXCAN_MB_CNT_ESI BIT(29)
#define FLEXCAN_MB_CNT_SRR BIT(22)
#define FLEXCAN_MB_CNT_IDE BIT(21)
#define FLEXCAN_MB_CNT_RTR BIT(20)
@@ -172,26 +206,42 @@
/* FLEXCAN hardware feature flags
*
* Below is some version info we got:
- * SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR re-
- * Filter? connected? Passive detection ception in MB
- * MX25 FlexCAN2 03.00.00.00 no no no no no
- * MX28 FlexCAN2 03.00.04.00 yes yes no no no
- * MX35 FlexCAN2 03.00.00.00 no no no no no
- * MX53 FlexCAN2 03.00.00.00 yes no no no no
- * MX6s FlexCAN3 10.00.12.00 yes yes no no yes
- * VF610 FlexCAN3 ? no yes no yes yes?
- * LS1021A FlexCAN2 03.00.04.00 no yes no no yes
+ * SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR rece- FD Mode
+ * Filter? connected? Passive detection ption in MB Supported?
+ * MX25 FlexCAN2 03.00.00.00 no no no no no no
+ * MX28 FlexCAN2 03.00.04.00 yes yes no no no no
+ * MX35 FlexCAN2 03.00.00.00 no no no no no no
+ * MX53 FlexCAN2 03.00.00.00 yes no no no no no
+ * MX6s FlexCAN3 10.00.12.00 yes yes no no yes no
+ * MX8QM FlexCAN3 03.00.23.00 yes yes no no yes yes
+ * MX8MP FlexCAN3 03.00.17.01 yes yes no yes yes yes
+ * VF610 FlexCAN3 ? no yes no yes yes? no
+ * LS1021A FlexCAN2 03.00.04.00 no yes no no yes no
+ * LX2160A FlexCAN3 03.00.23.00 no yes no no yes yes
*
* Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
*/
-#define FLEXCAN_QUIRK_BROKEN_WERR_STATE BIT(1) /* [TR]WRN_INT not connected */
-#define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2) /* Disable RX FIFO Global mask */
-#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) /* Enable EACEN and RRS bit in ctrl2 */
-#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disable Memory error detection */
-#define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */
-#define FLEXCAN_QUIRK_BROKEN_PERR_STATE BIT(6) /* No interrupt for error passive */
-#define FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN BIT(7) /* default to BE register access */
-#define FLEXCAN_QUIRK_SETUP_STOP_MODE BIT(8) /* Setup stop mode to support wakeup */
+
+/* [TR]WRN_INT not connected */
+#define FLEXCAN_QUIRK_BROKEN_WERR_STATE BIT(1)
+ /* Disable RX FIFO Global mask */
+#define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2)
+/* Enable EACEN and RRS bit in ctrl2 */
+#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3)
+/* Disable non-correctable errors interrupt and freeze mode */
+#define FLEXCAN_QUIRK_DISABLE_MECR BIT(4)
+/* Use timestamp based offloading */
+#define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5)
+/* No interrupt for error passive */
+#define FLEXCAN_QUIRK_BROKEN_PERR_STATE BIT(6)
+/* default to BE register access */
+#define FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN BIT(7)
+/* Setup stop mode to support wakeup */
+#define FLEXCAN_QUIRK_SETUP_STOP_MODE BIT(8)
+/* Support CAN-FD mode */
+#define FLEXCAN_QUIRK_SUPPORT_FD BIT(9)
+/* support memory detection and correction */
+#define FLEXCAN_QUIRK_SUPPORT_ECC BIT(10)
/* Structure of the message buffer */
struct flexcan_mb {
@@ -203,12 +253,12 @@ struct flexcan_mb {
/* Structure of the hardware registers */
struct flexcan_regs {
u32 mcr; /* 0x00 */
- u32 ctrl; /* 0x04 */
+ u32 ctrl; /* 0x04 - Not affected by Soft Reset */
u32 timer; /* 0x08 */
- u32 _reserved1; /* 0x0c */
- u32 rxgmask; /* 0x10 */
- u32 rx14mask; /* 0x14 */
- u32 rx15mask; /* 0x18 */
+ u32 tcr; /* 0x0c */
+ u32 rxgmask; /* 0x10 - Not affected by Soft Reset */
+ u32 rx14mask; /* 0x14 - Not affected by Soft Reset */
+ u32 rx15mask; /* 0x18 - Not affected by Soft Reset */
u32 ecr; /* 0x1c */
u32 esr; /* 0x20 */
u32 imask2; /* 0x24 */
@@ -217,20 +267,24 @@ struct flexcan_regs {
u32 iflag1; /* 0x30 */
union { /* 0x34 */
u32 gfwr_mx28; /* MX28, MX53 */
- u32 ctrl2; /* MX6, VF610 */
+ u32 ctrl2; /* MX6, VF610 - Not affected by Soft Reset */
};
u32 esr2; /* 0x38 */
u32 imeur; /* 0x3c */
u32 lrfr; /* 0x40 */
u32 crcr; /* 0x44 */
u32 rxfgmask; /* 0x48 */
- u32 rxfir; /* 0x4c */
- u32 _reserved3[12]; /* 0x50 */
- u8 mb[2][512]; /* 0x80 */
+ u32 rxfir; /* 0x4c - Not affected by Soft Reset */
+ u32 cbt; /* 0x50 - Not affected by Soft Reset */
+ u32 _reserved2; /* 0x54 */
+ u32 dbg1; /* 0x58 */
+ u32 dbg2; /* 0x5c */
+ u32 _reserved3[8]; /* 0x60 */
+ u8 mb[2][512]; /* 0x80 - Not affected by Soft Reset */
/* FIFO-mode:
* MB
* 0x080...0x08f 0 RX message buffer
- * 0x090...0x0df 1-5 reserverd
+ * 0x090...0x0df 1-5 reserved
* 0x0e0...0x0ff 6-7 8 entry ID table
* (mx25, mx28, mx35, mx53)
* 0x0e0...0x2df 6-7..37 8..128 entry ID table
@@ -238,10 +292,19 @@ struct flexcan_regs {
* (mx6, vf610)
*/
u32 _reserved4[256]; /* 0x480 */
- u32 rximr[64]; /* 0x880 */
+ u32 rximr[64]; /* 0x880 - Not affected by Soft Reset */
u32 _reserved5[24]; /* 0x980 */
u32 gfwr_mx6; /* 0x9e0 - MX6 */
- u32 _reserved6[63]; /* 0x9e4 */
+ u32 _reserved6[39]; /* 0x9e4 */
+ u32 _rxfir[6]; /* 0xa80 */
+ u32 _reserved8[2]; /* 0xa98 */
+ u32 _rxmgmask; /* 0xaa0 */
+ u32 _rxfgmask; /* 0xaa4 */
+ u32 _rx14mask; /* 0xaa8 */
+ u32 _rx15mask; /* 0xaac */
+ u32 tx_smb[4]; /* 0xab0 */
+ u32 rx_smb0[4]; /* 0xac0 */
+ u32 rx_smb1[4]; /* 0xad0 */
u32 mecr; /* 0xae0 */
u32 erriar; /* 0xae4 */
u32 erridpr; /* 0xae8 */
@@ -250,8 +313,18 @@ struct flexcan_regs {
u32 rerrdr; /* 0xaf4 */
u32 rerrsynr; /* 0xaf8 */
u32 errsr; /* 0xafc */
+ u32 _reserved7[64]; /* 0xb00 */
+ u32 fdctrl; /* 0xc00 - Not affected by Soft Reset */
+ u32 fdcbt; /* 0xc04 - Not affected by Soft Reset */
+ u32 fdcrc; /* 0xc08 */
+ u32 _reserved9[199]; /* 0xc0c */
+ u32 tx_smb_fd[18]; /* 0xf28 */
+ u32 rx_smb0_fd[18]; /* 0xf70 */
+ u32 rx_smb1_fd[18]; /* 0xfb8 */
};
+static_assert(sizeof(struct flexcan_regs) == 0x4 * 18 + 0xfb8);
+
struct flexcan_devtype_data {
u32 quirks; /* quirks needed for different IP cores */
};
@@ -260,8 +333,6 @@ struct flexcan_stop_mode {
struct regmap *gpr;
u8 req_gpr;
u8 req_bit;
- u8 ack_gpr;
- u8 ack_bit;
};
struct flexcan_priv {
@@ -313,6 +384,19 @@ static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
FLEXCAN_QUIRK_SETUP_STOP_MODE,
};
+static const struct flexcan_devtype_data fsl_imx8qm_devtype_data = {
+ .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+ FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
+ FLEXCAN_QUIRK_SUPPORT_FD,
+};
+
+static struct flexcan_devtype_data fsl_imx8mp_devtype_data = {
+ .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+ FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP |
+ FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SETUP_STOP_MODE |
+ FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SUPPORT_ECC,
+};
+
static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP |
@@ -325,6 +409,12 @@ static const struct flexcan_devtype_data fsl_ls1021a_r2_devtype_data = {
FLEXCAN_QUIRK_USE_OFF_TIMESTAMP,
};
+static const struct flexcan_devtype_data fsl_lx2160a_r1_devtype_data = {
+ .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+ FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
+ FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_SUPPORT_FD,
+};
+
static const struct can_bittiming_const flexcan_bittiming_const = {
.name = DRV_NAME,
.tseg1_min = 4,
@@ -337,6 +427,30 @@ static const struct can_bittiming_const flexcan_bittiming_const = {
.brp_inc = 1,
};
+static const struct can_bittiming_const flexcan_fd_bittiming_const = {
+ .name = DRV_NAME,
+ .tseg1_min = 2,
+ .tseg1_max = 96,
+ .tseg2_min = 2,
+ .tseg2_max = 32,
+ .sjw_max = 16,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+};
+
+static const struct can_bittiming_const flexcan_fd_data_bittiming_const = {
+ .name = DRV_NAME,
+ .tseg1_min = 2,
+ .tseg1_max = 39,
+ .tseg2_min = 2,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 1024,
+ .brp_inc = 1,
+};
+
/* FlexCAN module is essentially modelled as a little-endian IP in most
* SoCs, i.e the registers as well as the message buffer areas are
* implemented in a little-endian fashion.
@@ -457,7 +571,6 @@ static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv)
regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
1 << priv->stm.req_bit, 0);
-
reg_mcr = priv->read(&regs->mcr);
reg_mcr &= ~FLEXCAN_MCR_SLF_WAK;
priv->write(reg_mcr, &regs->mcr);
@@ -628,10 +741,10 @@ static int flexcan_get_berr_counter(const struct net_device *dev,
static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
const struct flexcan_priv *priv = netdev_priv(dev);
- struct can_frame *cf = (struct can_frame *)skb->data;
+ struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
u32 can_id;
u32 data;
- u32 ctrl = FLEXCAN_MB_CODE_TX_DATA | (cf->can_dlc << 16);
+ u32 ctrl = FLEXCAN_MB_CODE_TX_DATA | ((can_len2dlc(cfd->len)) << 16);
int i;
if (can_dropped_invalid_skb(dev, skb))
@@ -639,18 +752,25 @@ static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *de
netif_stop_queue(dev);
- if (cf->can_id & CAN_EFF_FLAG) {
- can_id = cf->can_id & CAN_EFF_MASK;
+ if (cfd->can_id & CAN_EFF_FLAG) {
+ can_id = cfd->can_id & CAN_EFF_MASK;
ctrl |= FLEXCAN_MB_CNT_IDE | FLEXCAN_MB_CNT_SRR;
} else {
- can_id = (cf->can_id & CAN_SFF_MASK) << 18;
+ can_id = (cfd->can_id & CAN_SFF_MASK) << 18;
}
- if (cf->can_id & CAN_RTR_FLAG)
+ if (cfd->can_id & CAN_RTR_FLAG)
ctrl |= FLEXCAN_MB_CNT_RTR;
- for (i = 0; i < cf->can_dlc; i += sizeof(u32)) {
- data = be32_to_cpup((__be32 *)&cf->data[i]);
+ if (can_is_canfd_skb(skb)) {
+ ctrl |= FLEXCAN_MB_CNT_EDL;
+
+ if (cfd->flags & CANFD_BRS)
+ ctrl |= FLEXCAN_MB_CNT_BRS;
+ }
+
+ for (i = 0; i < cfd->len; i += sizeof(u32)) {
+ data = be32_to_cpup((__be32 *)&cfd->data[i]);
priv->write(data, &priv->tx_mb->data[i / sizeof(u32)]);
}
@@ -822,7 +942,7 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
struct flexcan_regs __iomem *regs = priv->regs;
struct flexcan_mb __iomem *mb;
struct sk_buff *skb;
- struct can_frame *cf;
+ struct canfd_frame *cfd;
u32 reg_ctrl, reg_id, reg_iflag1;
int i;
@@ -859,8 +979,11 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
reg_ctrl = priv->read(&mb->can_ctrl);
}
- skb = alloc_can_skb(offload->dev, &cf);
- if (!skb) {
+ if (reg_ctrl & FLEXCAN_MB_CNT_EDL)
+ skb = alloc_canfd_skb(offload->dev, &cfd);
+ else
+ skb = alloc_can_skb(offload->dev, (struct can_frame **)&cfd);
+ if (unlikely(!skb)) {
skb = ERR_PTR(-ENOMEM);
goto mark_as_read;
}
@@ -870,17 +993,28 @@ static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload,
reg_id = priv->read(&mb->can_id);
if (reg_ctrl & FLEXCAN_MB_CNT_IDE)
- cf->can_id = ((reg_id >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG;
+ cfd->can_id = ((reg_id >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG;
else
- cf->can_id = (reg_id >> 18) & CAN_SFF_MASK;
+ cfd->can_id = (reg_id >> 18) & CAN_SFF_MASK;
+
+ if (reg_ctrl & FLEXCAN_MB_CNT_EDL) {
+ cfd->len = can_dlc2len(get_canfd_dlc((reg_ctrl >> 16) & 0xf));
+
+ if (reg_ctrl & FLEXCAN_MB_CNT_BRS)
+ cfd->flags |= CANFD_BRS;
+ } else {
+ cfd->len = get_can_dlc((reg_ctrl >> 16) & 0xf);
+
+ if (reg_ctrl & FLEXCAN_MB_CNT_RTR)
+ cfd->can_id |= CAN_RTR_FLAG;
+ }
- if (reg_ctrl & FLEXCAN_MB_CNT_RTR)
- cf->can_id |= CAN_RTR_FLAG;
- cf->can_dlc = get_can_dlc((reg_ctrl >> 16) & 0xf);
+ if (reg_ctrl & FLEXCAN_MB_CNT_ESI)
+ cfd->flags |= CANFD_ESI;
- for (i = 0; i < cf->can_dlc; i += sizeof(u32)) {
+ for (i = 0; i < cfd->len; i += sizeof(u32)) {
__be32 data = cpu_to_be32(priv->read(&mb->data[i / sizeof(u32)]));
- *(__be32 *)(cf->data + i) = data;
+ *(__be32 *)(cfd->data + i) = data;
}
mark_as_read:
@@ -961,10 +1095,10 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
reg_esr = priv->read(&regs->esr);
- /* ACK all bus error and state change IRQ sources */
- if (reg_esr & FLEXCAN_ESR_ALL_INT) {
+ /* ACK all bus error, state change and wake IRQ sources */
+ if (reg_esr & (FLEXCAN_ESR_ALL_INT | FLEXCAN_ESR_WAK_INT)) {
handled = IRQ_HANDLED;
- priv->write(reg_esr & FLEXCAN_ESR_ALL_INT, &regs->esr);
+ priv->write(reg_esr & (FLEXCAN_ESR_ALL_INT | FLEXCAN_ESR_WAK_INT), &regs->esr);
}
/* state change interrupt or broken error state quirk fix is enabled */
@@ -1019,7 +1153,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
return handled;
}
-static void flexcan_set_bittiming(struct net_device *dev)
+static void flexcan_set_bittiming_ctrl(const struct net_device *dev)
{
const struct flexcan_priv *priv = netdev_priv(dev);
const struct can_bittiming *bt = &priv->can.bittiming;
@@ -1031,10 +1165,7 @@ static void flexcan_set_bittiming(struct net_device *dev)
FLEXCAN_CTRL_RJW(0x3) |
FLEXCAN_CTRL_PSEG1(0x7) |
FLEXCAN_CTRL_PSEG2(0x7) |
- FLEXCAN_CTRL_PROPSEG(0x7) |
- FLEXCAN_CTRL_LPB |
- FLEXCAN_CTRL_SMP |
- FLEXCAN_CTRL_LOM);
+ FLEXCAN_CTRL_PROPSEG(0x7));
reg |= FLEXCAN_CTRL_PRESDIV(bt->brp - 1) |
FLEXCAN_CTRL_PSEG1(bt->phase_seg1 - 1) |
@@ -1042,6 +1173,130 @@ static void flexcan_set_bittiming(struct net_device *dev)
FLEXCAN_CTRL_RJW(bt->sjw - 1) |
FLEXCAN_CTRL_PROPSEG(bt->prop_seg - 1);
+ netdev_dbg(dev, "writing ctrl=0x%08x\n", reg);
+ priv->write(reg, &regs->ctrl);
+
+ /* print chip status */
+ netdev_dbg(dev, "%s: mcr=0x%08x ctrl=0x%08x\n", __func__,
+ priv->read(&regs->mcr), priv->read(&regs->ctrl));
+}
+
+static void flexcan_set_bittiming_cbt(const struct net_device *dev)
+{
+ struct flexcan_priv *priv = netdev_priv(dev);
+ struct can_bittiming *bt = &priv->can.bittiming;
+ struct can_bittiming *dbt = &priv->can.data_bittiming;
+ struct flexcan_regs __iomem *regs = priv->regs;
+ u32 reg_cbt, reg_fdctrl;
+
+ /* CBT */
+ /* CBT[EPSEG1] is 5 bit long and CBT[EPROPSEG] is 6 bit
+ * long. The can_calc_bittiming() tries to divide the tseg1
+ * equally between phase_seg1 and prop_seg, which may not fit
+ * in CBT register. Therefore, if phase_seg1 is more than
+ * possible value, increase prop_seg and decrease phase_seg1.
+ */
+ if (bt->phase_seg1 > 0x20) {
+ bt->prop_seg += (bt->phase_seg1 - 0x20);
+ bt->phase_seg1 = 0x20;
+ }
+
+ reg_cbt = FLEXCAN_CBT_BTF |
+ FIELD_PREP(FLEXCAN_CBT_EPRESDIV_MASK, bt->brp - 1) |
+ FIELD_PREP(FLEXCAN_CBT_ERJW_MASK, bt->sjw - 1) |
+ FIELD_PREP(FLEXCAN_CBT_EPROPSEG_MASK, bt->prop_seg - 1) |
+ FIELD_PREP(FLEXCAN_CBT_EPSEG1_MASK, bt->phase_seg1 - 1) |
+ FIELD_PREP(FLEXCAN_CBT_EPSEG2_MASK, bt->phase_seg2 - 1);
+
+ netdev_dbg(dev, "writing cbt=0x%08x\n", reg_cbt);
+ priv->write(reg_cbt, &regs->cbt);
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+ u32 reg_fdcbt, reg_ctrl2;
+
+ if (bt->brp != dbt->brp)
+ netdev_warn(dev, "Data brp=%d and brp=%d don't match, this may result in a phase error. Consider using different bitrate and/or data bitrate.\n",
+ dbt->brp, bt->brp);
+
+ /* FDCBT */
+ /* FDCBT[FPSEG1] is 3 bit long and FDCBT[FPROPSEG] is
+ * 5 bit long. The can_calc_bittiming tries to divide
+ * the tseg1 equally between phase_seg1 and prop_seg,
+ * which may not fit in FDCBT register. Therefore, if
+ * phase_seg1 is more than possible value, increase
+ * prop_seg and decrease phase_seg1
+ */
+ if (dbt->phase_seg1 > 0x8) {
+ dbt->prop_seg += (dbt->phase_seg1 - 0x8);
+ dbt->phase_seg1 = 0x8;
+ }
+
+ reg_fdcbt = priv->read(&regs->fdcbt);
+ reg_fdcbt &= ~(FIELD_PREP(FLEXCAN_FDCBT_FPRESDIV_MASK, 0x3ff) |
+ FIELD_PREP(FLEXCAN_FDCBT_FRJW_MASK, 0x7) |
+ FIELD_PREP(FLEXCAN_FDCBT_FPROPSEG_MASK, 0x1f) |
+ FIELD_PREP(FLEXCAN_FDCBT_FPSEG1_MASK, 0x7) |
+ FIELD_PREP(FLEXCAN_FDCBT_FPSEG2_MASK, 0x7));
+
+ reg_fdcbt |= FIELD_PREP(FLEXCAN_FDCBT_FPRESDIV_MASK, dbt->brp - 1) |
+ FIELD_PREP(FLEXCAN_FDCBT_FRJW_MASK, dbt->sjw - 1) |
+ FIELD_PREP(FLEXCAN_FDCBT_FPROPSEG_MASK, dbt->prop_seg) |
+ FIELD_PREP(FLEXCAN_FDCBT_FPSEG1_MASK, dbt->phase_seg1 - 1) |
+ FIELD_PREP(FLEXCAN_FDCBT_FPSEG2_MASK, dbt->phase_seg2 - 1);
+
+ netdev_dbg(dev, "writing fdcbt=0x%08x\n", reg_fdcbt);
+ priv->write(reg_fdcbt, &regs->fdcbt);
+
+ /* CTRL2 */
+ reg_ctrl2 = priv->read(&regs->ctrl2);
+ reg_ctrl2 &= ~FLEXCAN_CTRL2_ISOCANFDEN;
+ if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO))
+ reg_ctrl2 |= FLEXCAN_CTRL2_ISOCANFDEN;
+
+ netdev_dbg(dev, "writing ctrl2=0x%08x\n", reg_ctrl2);
+ priv->write(reg_ctrl2, &regs->ctrl2);
+ }
+
+ /* FDCTRL */
+ reg_fdctrl = priv->read(&regs->fdctrl);
+ reg_fdctrl &= ~(FLEXCAN_FDCTRL_FDRATE |
+ FIELD_PREP(FLEXCAN_FDCTRL_TDCOFF, 0x1f));
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+ reg_fdctrl |= FLEXCAN_FDCTRL_FDRATE;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
+ /* TDC must be disabled for Loop Back mode */
+ reg_fdctrl &= ~FLEXCAN_FDCTRL_TDCEN;
+ } else {
+ reg_fdctrl |= FLEXCAN_FDCTRL_TDCEN |
+ FIELD_PREP(FLEXCAN_FDCTRL_TDCOFF,
+ ((dbt->phase_seg1 - 1) +
+ dbt->prop_seg + 2) *
+ ((dbt->brp - 1 ) + 1));
+ }
+ }
+
+ netdev_dbg(dev, "writing fdctrl=0x%08x\n", reg_fdctrl);
+ priv->write(reg_fdctrl, &regs->fdctrl);
+
+ netdev_dbg(dev, "%s: mcr=0x%08x ctrl=0x%08x ctrl2=0x%08x fdctrl=0x%08x cbt=0x%08x fdcbt=0x%08x\n",
+ __func__,
+ priv->read(&regs->mcr), priv->read(&regs->ctrl),
+ priv->read(&regs->ctrl2), priv->read(&regs->fdctrl),
+ priv->read(&regs->cbt), priv->read(&regs->fdcbt));
+}
+
+static void flexcan_set_bittiming(struct net_device *dev)
+{
+ const struct flexcan_priv *priv = netdev_priv(dev);
+ struct flexcan_regs __iomem *regs = priv->regs;
+ u32 reg;
+
+ reg = priv->read(&regs->ctrl);
+ reg &= ~(FLEXCAN_CTRL_LPB | FLEXCAN_CTRL_SMP |
+ FLEXCAN_CTRL_LOM);
+
if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
reg |= FLEXCAN_CTRL_LPB;
if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
@@ -1052,9 +1307,41 @@ static void flexcan_set_bittiming(struct net_device *dev)
netdev_dbg(dev, "writing ctrl=0x%08x\n", reg);
priv->write(reg, &regs->ctrl);
- /* print chip status */
- netdev_dbg(dev, "%s: mcr=0x%08x ctrl=0x%08x\n", __func__,
- priv->read(&regs->mcr), priv->read(&regs->ctrl));
+ if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD)
+ return flexcan_set_bittiming_cbt(dev);
+ else
+ return flexcan_set_bittiming_ctrl(dev);
+}
+
+static void flexcan_ram_init(struct net_device *dev)
+{
+ struct flexcan_priv *priv = netdev_priv(dev);
+ struct flexcan_regs __iomem *regs = priv->regs;
+ u32 reg_ctrl2;
+
+ /* 11.8.3.13 Detection and correction of memory errors:
+ * CTRL2[WRMFRZ] grants write access to all memory positions
+ * that require initialization, ranging from 0x080 to 0xADF
+ * and from 0xF28 to 0xFFF when the CAN FD feature is enabled.
+ * The RXMGMASK, RX14MASK, RX15MASK, and RXFGMASK registers
+ * need to be initialized as well. MCR[RFEN] must not be set
+ * during memory initialization.
+ */
+ reg_ctrl2 = priv->read(&regs->ctrl2);
+ reg_ctrl2 |= FLEXCAN_CTRL2_WRMFRZ;
+ priv->write(reg_ctrl2, &regs->ctrl2);
+
+ memset_io(&regs->mb[0][0], 0,
+ offsetof(struct flexcan_regs, rx_smb1[3]) -
+ offsetof(struct flexcan_regs, mb[0][0]) + 0x4);
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
+ memset_io(&regs->tx_smb_fd[0], 0,
+ offsetof(struct flexcan_regs, rx_smb1_fd[17]) -
+ offsetof(struct flexcan_regs, tx_smb_fd[0]) + 0x4);
+
+ reg_ctrl2 &= ~FLEXCAN_CTRL2_WRMFRZ;
+ priv->write(reg_ctrl2, &regs->ctrl2);
}
/* flexcan_chip_start
@@ -1081,6 +1368,9 @@ static int flexcan_chip_start(struct net_device *dev)
if (err)
goto out_chip_disable;
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_ECC)
+ flexcan_ram_init(dev);
+
flexcan_set_bittiming(dev);
/* MCR
@@ -1127,6 +1417,12 @@ static int flexcan_chip_start(struct net_device *dev)
else
reg_mcr |= FLEXCAN_MCR_SRX_DIS;
+ /* MCR - CAN-FD */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
+ reg_mcr |= FLEXCAN_MCR_FDEN;
+ else
+ reg_mcr &= ~FLEXCAN_MCR_FDEN;
+
netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr);
priv->write(reg_mcr, &regs->mcr);
@@ -1169,6 +1465,32 @@ static int flexcan_chip_start(struct net_device *dev)
priv->write(reg_ctrl2, &regs->ctrl2);
}
+ if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) {
+ u32 reg_fdctrl;
+
+ reg_fdctrl = priv->read(&regs->fdctrl);
+ reg_fdctrl &= ~(FIELD_PREP(FLEXCAN_FDCTRL_MBDSR1, 0x3) |
+ FIELD_PREP(FLEXCAN_FDCTRL_MBDSR0, 0x3));
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
+ reg_fdctrl |=
+ FIELD_PREP(FLEXCAN_FDCTRL_MBDSR1,
+ FLEXCAN_FDCTRL_MBDSR_64) |
+ FIELD_PREP(FLEXCAN_FDCTRL_MBDSR0,
+ FLEXCAN_FDCTRL_MBDSR_64);
+ } else {
+ reg_fdctrl |=
+ FIELD_PREP(FLEXCAN_FDCTRL_MBDSR1,
+ FLEXCAN_FDCTRL_MBDSR_8) |
+ FIELD_PREP(FLEXCAN_FDCTRL_MBDSR0,
+ FLEXCAN_FDCTRL_MBDSR_8);
+ }
+
+ netdev_dbg(dev, "%s: writing fdctrl=0x%08x",
+ __func__, reg_fdctrl);
+ priv->write(reg_fdctrl, &regs->fdctrl);
+ }
+
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++) {
mb = flexcan_get_mb(priv, i);
@@ -1204,28 +1526,43 @@ static int flexcan_chip_start(struct net_device *dev)
for (i = 0; i < priv->mb_count; i++)
priv->write(0, &regs->rximr[i]);
- /* On Vybrid, disable memory error detection interrupts
- * and freeze mode.
- * This also works around errata e5295 which generates
- * false positive memory errors and put the device in
- * freeze mode.
+ /* On Vybrid, disable non-correctable errors interrupt and
+ * freeze mode. It still can correct the correctable errors
+ * when HW supports ECC.
+ *
+ * This also works around errata e5295 which generates false
+ * positive memory errors and put the device in freeze mode.
*/
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_DISABLE_MECR) {
/* Follow the protocol as described in "Detection
* and Correction of Memory Errors" to write to
- * MECR register
+ * MECR register (step 1 - 5)
+ *
+ * 1. By default, CTRL2[ECRWRE] = 0, MECR[ECRWRDIS] = 1
+ * 2. set CTRL2[ECRWRE]
*/
reg_ctrl2 = priv->read(&regs->ctrl2);
reg_ctrl2 |= FLEXCAN_CTRL2_ECRWRE;
priv->write(reg_ctrl2, &regs->ctrl2);
+ /* 3. clear MECR[ECRWRDIS] */
reg_mecr = priv->read(&regs->mecr);
reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS;
priv->write(reg_mecr, &regs->mecr);
- reg_mecr |= FLEXCAN_MECR_ECCDIS;
+
+ /* 4. all writes to MECR must keep MECR[ECRWRDIS] cleared */
reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK |
FLEXCAN_MECR_FANCEI_MSK);
priv->write(reg_mecr, &regs->mecr);
+
+ /* 5. after configuration done, lock MECR by either
+ * setting MECR[ECRWRDIS] or clearing CTRL2[ECRWRE]
+ */
+ reg_mecr |= FLEXCAN_MECR_ECRWRDIS;
+ priv->write(reg_mecr, &regs->mecr);
+
+ reg_ctrl2 &= ~FLEXCAN_CTRL2_ECRWRE;
+ priv->write(reg_ctrl2, &regs->ctrl2);
}
err = flexcan_transceiver_enable(priv);
@@ -1260,18 +1597,23 @@ static int flexcan_chip_start(struct net_device *dev)
return err;
}
-/* flexcan_chip_stop
+/* __flexcan_chip_stop
*
- * this functions is entered with clocks enabled
+ * this function is entered with clocks enabled
*/
-static void flexcan_chip_stop(struct net_device *dev)
+static int __flexcan_chip_stop(struct net_device *dev, bool disable_on_error)
{
struct flexcan_priv *priv = netdev_priv(dev);
struct flexcan_regs __iomem *regs = priv->regs;
+ int err;
/* freeze + disable module */
- flexcan_chip_freeze(priv);
- flexcan_chip_disable(priv);
+ err = flexcan_chip_freeze(priv);
+ if (err && !disable_on_error)
+ return err;
+ err = flexcan_chip_disable(priv);
+ if (err && !disable_on_error)
+ goto out_chip_unfreeze;
/* Disable all interrupts */
priv->write(0, &regs->imask2);
@@ -1281,6 +1623,23 @@ static void flexcan_chip_stop(struct net_device *dev)
flexcan_transceiver_disable(priv);
priv->can.state = CAN_STATE_STOPPED;
+
+ return 0;
+
+ out_chip_unfreeze:
+ flexcan_chip_unfreeze(priv);
+
+ return err;
+}
+
+static inline int flexcan_chip_stop_disable_on_error(struct net_device *dev)
+{
+ return __flexcan_chip_stop(dev, true);
+}
+
+static inline int flexcan_chip_stop(struct net_device *dev)
+{
+ return __flexcan_chip_stop(dev, false);
}
static int flexcan_open(struct net_device *dev)
@@ -1288,6 +1647,12 @@ static int flexcan_open(struct net_device *dev)
struct flexcan_priv *priv = netdev_priv(dev);
int err;
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) &&
+ (priv->can.ctrlmode & CAN_CTRLMODE_FD)) {
+ netdev_err(dev, "Three Samples mode and CAN-FD mode can't be used together\n");
+ return -EINVAL;
+ }
+
err = pm_runtime_get_sync(priv->dev);
if (err < 0)
return err;
@@ -1300,7 +1665,10 @@ static int flexcan_open(struct net_device *dev)
if (err)
goto out_close;
- priv->mb_size = sizeof(struct flexcan_mb) + CAN_MAX_DLEN;
+ if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
+ priv->mb_size = sizeof(struct flexcan_mb) + CANFD_MAX_DLEN;
+ else
+ priv->mb_size = sizeof(struct flexcan_mb) + CAN_MAX_DLEN;
priv->mb_count = (sizeof(priv->regs->mb[0]) / priv->mb_size) +
(sizeof(priv->regs->mb[1]) / priv->mb_size);
@@ -1362,7 +1730,7 @@ static int flexcan_close(struct net_device *dev)
netif_stop_queue(dev);
can_rx_offload_disable(&priv->offload);
- flexcan_chip_stop(dev);
+ flexcan_chip_stop_disable_on_error(dev);
can_rx_offload_del(&priv->offload);
free_irq(dev->irq, dev);
@@ -1477,14 +1845,14 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
struct device_node *gpr_np;
struct flexcan_priv *priv;
phandle phandle;
- u32 out_val[5];
+ u32 out_val[3];
int ret;
if (!np)
return -EINVAL;
/* stop mode property format is:
- * <&gpr req_gpr req_bit ack_gpr ack_bit>.
+ * <&gpr req_gpr>.
*/
ret = of_property_read_u32_array(np, "fsl,stop-mode", out_val,
ARRAY_SIZE(out_val));
@@ -1510,13 +1878,10 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
priv->stm.req_gpr = out_val[1];
priv->stm.req_bit = out_val[2];
- priv->stm.ack_gpr = out_val[3];
- priv->stm.ack_bit = out_val[4];
dev_dbg(&pdev->dev,
- "gpr %s req_gpr=0x02%x req_bit=%u ack_gpr=0x02%x ack_bit=%u\n",
- gpr_np->full_name, priv->stm.req_gpr, priv->stm.req_bit,
- priv->stm.ack_gpr, priv->stm.ack_bit);
+ "gpr %s req_gpr=0x02%x req_bit=%u\n",
+ gpr_np->full_name, priv->stm.req_gpr, priv->stm.req_bit);
device_set_wakeup_capable(&pdev->dev, true);
@@ -1531,6 +1896,8 @@ out_put_node:
}
static const struct of_device_id flexcan_of_match[] = {
+ { .compatible = "fsl,imx8qm-flexcan", .data = &fsl_imx8qm_devtype_data, },
+ { .compatible = "fsl,imx8mp-flexcan", .data = &fsl_imx8mp_devtype_data, },
{ .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
{ .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
{ .compatible = "fsl,imx53-flexcan", .data = &fsl_imx25_devtype_data, },
@@ -1539,6 +1906,7 @@ static const struct of_device_id flexcan_of_match[] = {
{ .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
{ .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, },
{ .compatible = "fsl,ls1021ar2-flexcan", .data = &fsl_ls1021a_r2_devtype_data, },
+ { .compatible = "fsl,lx2160ar1-flexcan", .data = &fsl_lx2160a_r1_devtype_data, },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, flexcan_of_match);
@@ -1562,11 +1930,13 @@ static int flexcan_probe(struct platform_device *pdev)
u8 clk_src = 1;
u32 clock_freq = 0;
- reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
+ reg_xceiver = devm_regulator_get_optional(&pdev->dev, "xceiver");
if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER)
return -EPROBE_DEFER;
- else if (IS_ERR(reg_xceiver))
+ else if (PTR_ERR(reg_xceiver) == -ENODEV)
reg_xceiver = NULL;
+ else if (IS_ERR(reg_xceiver))
+ return PTR_ERR(reg_xceiver);
if (pdev->dev.of_node) {
of_property_read_u32(pdev->dev.of_node,
@@ -1608,6 +1978,12 @@ static int flexcan_probe(struct platform_device *pdev)
return -ENODEV;
}
+ if ((devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_FD) &&
+ !(devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP)) {
+ dev_err(&pdev->dev, "CAN-FD mode doesn't work with FIFO mode!\n");
+ return -EINVAL;
+ }
+
dev = alloc_candev(sizeof(struct flexcan_priv), 1);
if (!dev)
return -ENOMEM;
@@ -1632,7 +2008,6 @@ static int flexcan_probe(struct platform_device *pdev)
priv->dev = &pdev->dev;
priv->can.clock.freq = clock_freq;
- priv->can.bittiming_const = &flexcan_bittiming_const;
priv->can.do_set_mode = flexcan_set_mode;
priv->can.do_get_berr_counter = flexcan_get_berr_counter;
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
@@ -1645,6 +2020,16 @@ static int flexcan_probe(struct platform_device *pdev)
priv->devtype_data = devtype_data;
priv->reg_xceiver = reg_xceiver;
+ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_FD) {
+ priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD |
+ CAN_CTRLMODE_FD_NON_ISO;
+ priv->can.bittiming_const = &flexcan_fd_bittiming_const;
+ priv->can.data_bittiming_const =
+ &flexcan_fd_data_bittiming_const;
+ } else {
+ priv->can.bittiming_const = &flexcan_bittiming_const;
+ }
+
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
@@ -1655,6 +2040,7 @@ static int flexcan_probe(struct platform_device *pdev)
goto failed_register;
}
+ of_can_transceiver(dev);
devm_can_led_init(dev);
if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE) {
@@ -1666,6 +2052,8 @@ static int flexcan_probe(struct platform_device *pdev)
return 0;
failed_register:
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
free_candev(dev);
return err;
}
@@ -1685,7 +2073,7 @@ static int __maybe_unused flexcan_suspend(struct device *device)
{
struct net_device *dev = dev_get_drvdata(device);
struct flexcan_priv *priv = netdev_priv(dev);
- int err = 0;
+ int err;
if (netif_running(dev)) {
/* if wakeup is enabled, enter stop mode
@@ -1697,25 +2085,27 @@ static int __maybe_unused flexcan_suspend(struct device *device)
if (err)
return err;
} else {
- err = flexcan_chip_disable(priv);
+ err = flexcan_chip_stop(dev);
if (err)
return err;
- err = pm_runtime_force_suspend(device);
+ err = pinctrl_pm_select_sleep_state(device);
+ if (err)
+ return err;
}
netif_stop_queue(dev);
netif_device_detach(dev);
}
priv->can.state = CAN_STATE_SLEEPING;
- return err;
+ return 0;
}
static int __maybe_unused flexcan_resume(struct device *device)
{
struct net_device *dev = dev_get_drvdata(device);
struct flexcan_priv *priv = netdev_priv(dev);
- int err = 0;
+ int err;
priv->can.state = CAN_STATE_ERROR_ACTIVE;
if (netif_running(dev)) {
@@ -1727,15 +2117,17 @@ static int __maybe_unused flexcan_resume(struct device *device)
if (err)
return err;
} else {
- err = pm_runtime_force_resume(device);
+ err = pinctrl_pm_select_default_state(device);
if (err)
return err;
- err = flexcan_chip_enable(priv);
+ err = flexcan_chip_start(dev);
+ if (err)
+ return err;
}
}
- return err;
+ return 0;
}
static int __maybe_unused flexcan_runtime_suspend(struct device *device)
@@ -1761,8 +2153,16 @@ static int __maybe_unused flexcan_noirq_suspend(struct device *device)
struct net_device *dev = dev_get_drvdata(device);
struct flexcan_priv *priv = netdev_priv(dev);
- if (netif_running(dev) && device_may_wakeup(device))
- flexcan_enable_wakeup_irq(priv, true);
+ if (netif_running(dev)) {
+ int err;
+
+ if (device_may_wakeup(device))
+ flexcan_enable_wakeup_irq(priv, true);
+
+ err = pm_runtime_force_suspend(device);
+ if (err)
+ return err;
+ }
return 0;
}
@@ -1772,8 +2172,16 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device)
struct net_device *dev = dev_get_drvdata(device);
struct flexcan_priv *priv = netdev_priv(dev);
- if (netif_running(dev) && device_may_wakeup(device))
- flexcan_enable_wakeup_irq(priv, false);
+ if (netif_running(dev)) {
+ int err;
+
+ err = pm_runtime_force_resume(device);
+ if (err)
+ return err;
+
+ if (device_may_wakeup(device))
+ flexcan_enable_wakeup_irq(priv, false);
+ }
return 0;
}
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index 378200b682fa..39802f107eb1 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -726,7 +726,7 @@ static void grcan_err(struct net_device *dev, u32 sources, u32 status)
txrx = "on rx ";
stats->rx_errors++;
}
- netdev_err(dev, "Fatal AHB buss error %s- halting device\n",
+ netdev_err(dev, "Fatal AHB bus error %s- halting device\n",
txrx);
spin_lock_irqsave(&priv->lock, flags);
@@ -1243,7 +1243,7 @@ static int grcan_poll(struct napi_struct *napi, int budget)
int rx_budget = budget / 2;
int tx_budget = budget - rx_budget;
- /* Half of the budget for receiveing messages */
+ /* Half of the budget for receiving messages */
rx_work_done = grcan_receive(dev, rx_budget);
/* Half of the budget for transmitting messages as that can trigger echo
diff --git a/drivers/net/can/m_can/Kconfig b/drivers/net/can/m_can/Kconfig
index d9216147ca93..48be627c85c2 100644
--- a/drivers/net/can/m_can/Kconfig
+++ b/drivers/net/can/m_can/Kconfig
@@ -20,5 +20,5 @@ config CAN_M_CAN_TCAN4X5X
tristate "TCAN4X5X M_CAN device"
help
Say Y here if you want support for Texas Instruments TCAN4x5x
- M_CAN controller. This device is a peripherial device that uses the
+ M_CAN controller. This device is a peripheral device that uses the
SPI bus for communication.
diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
index 38ea5e600fb8..e6d0cb9ee02f 100644
--- a/drivers/net/can/m_can/m_can_platform.c
+++ b/drivers/net/can/m_can/m_can_platform.c
@@ -144,8 +144,6 @@ static int __maybe_unused m_can_runtime_suspend(struct device *dev)
struct net_device *ndev = dev_get_drvdata(dev);
struct m_can_classdev *mcan_class = netdev_priv(ndev);
- m_can_class_suspend(dev);
-
clk_disable_unprepare(mcan_class->cclk);
clk_disable_unprepare(mcan_class->hclk);
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index e4f4b5c9ebd6..e254e04ae257 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -5,7 +5,7 @@
* Copyright (C) 2004-2005 Andrey Volkov <avolkov@varma-el.com>,
* Varma Electronics Oy
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
- * Copyright (C) 2009 Wolfram Sang, Pengutronix <w.sang@pengutronix.de>
+ * Copyright (C) 2009 Wolfram Sang, Pengutronix <kernel@pengutronix.de>
*/
#include <linux/kernel.h>
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 99101d7027a8..640ba1b356ec 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -209,6 +209,7 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
* since buffer with lower id have higher priority (hell..)
*/
netif_stop_queue(dev);
+ fallthrough;
case 2:
if (buf_id < priv->prev_buf_id) {
priv->cur_pri++;
@@ -540,16 +541,12 @@ static int mscan_open(struct net_device *dev)
struct mscan_priv *priv = netdev_priv(dev);
struct mscan_regs __iomem *regs = priv->reg_base;
- if (priv->clk_ipg) {
- ret = clk_prepare_enable(priv->clk_ipg);
- if (ret)
- goto exit_retcode;
- }
- if (priv->clk_can) {
- ret = clk_prepare_enable(priv->clk_can);
- if (ret)
- goto exit_dis_ipg_clock;
- }
+ ret = clk_prepare_enable(priv->clk_ipg);
+ if (ret)
+ goto exit_retcode;
+ ret = clk_prepare_enable(priv->clk_can);
+ if (ret)
+ goto exit_dis_ipg_clock;
/* common open */
ret = open_candev(dev);
@@ -583,11 +580,9 @@ exit_napi_disable:
napi_disable(&priv->napi);
close_candev(dev);
exit_dis_can_clock:
- if (priv->clk_can)
- clk_disable_unprepare(priv->clk_can);
+ clk_disable_unprepare(priv->clk_can);
exit_dis_ipg_clock:
- if (priv->clk_ipg)
- clk_disable_unprepare(priv->clk_ipg);
+ clk_disable_unprepare(priv->clk_ipg);
exit_retcode:
return ret;
}
@@ -606,10 +601,8 @@ static int mscan_close(struct net_device *dev)
close_candev(dev);
free_irq(dev->irq, dev);
- if (priv->clk_can)
- clk_disable_unprepare(priv->clk_can);
- if (priv->clk_ipg)
- clk_disable_unprepare(priv->clk_ipg);
+ clk_disable_unprepare(priv->clk_can);
+ clk_disable_unprepare(priv->clk_ipg);
return 0;
}
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index db41dddd5771..5c180d2f3c3c 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -461,7 +461,7 @@ static void pch_can_int_clr(struct pch_can_priv *priv, u32 mask)
PCH_ID2_DIR | (0x7ff << 2));
iowrite32(0x0, &priv->regs->ifregs[1].id1);
- /* Claring NewDat, TxRqst & IntPnd */
+ /* Clearing NewDat, TxRqst & IntPnd */
pch_can_bit_clear(&priv->regs->ifregs[1].mcont,
PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND |
PCH_IF_MCONT_TXRQXT);
@@ -834,7 +834,7 @@ static int pch_can_open(struct net_device *ndev)
struct pch_can_priv *priv = netdev_priv(ndev);
int retval;
- /* Regstering the interrupt. */
+ /* Registering the interrupt. */
retval = request_irq(priv->dev->irq, pch_can_interrupt, IRQF_SHARED,
ndev->name, ndev);
if (retval) {
@@ -957,8 +957,7 @@ static void pch_can_remove(struct pci_dev *pdev)
free_candev(priv->ndev);
}
-#ifdef CONFIG_PM
-static void pch_can_set_int_custom(struct pch_can_priv *priv)
+static void __maybe_unused pch_can_set_int_custom(struct pch_can_priv *priv)
{
/* Clearing the IE, SIE and EIE bits of Can control register. */
pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
@@ -969,14 +968,14 @@ static void pch_can_set_int_custom(struct pch_can_priv *priv)
}
/* This function retrieves interrupt enabled for the CAN device. */
-static u32 pch_can_get_int_enables(struct pch_can_priv *priv)
+static u32 __maybe_unused pch_can_get_int_enables(struct pch_can_priv *priv)
{
/* Obtaining the status of IE, SIE and EIE interrupt bits. */
return (ioread32(&priv->regs->cont) & PCH_CTRL_IE_SIE_EIE) >> 1;
}
-static u32 pch_can_get_rxtx_ir(struct pch_can_priv *priv, u32 buff_num,
- enum pch_ifreg dir)
+static u32 __maybe_unused pch_can_get_rxtx_ir(struct pch_can_priv *priv,
+ u32 buff_num, enum pch_ifreg dir)
{
u32 ie, enable;
@@ -997,8 +996,8 @@ static u32 pch_can_get_rxtx_ir(struct pch_can_priv *priv, u32 buff_num,
return enable;
}
-static void pch_can_set_rx_buffer_link(struct pch_can_priv *priv,
- u32 buffer_num, int set)
+static void __maybe_unused pch_can_set_rx_buffer_link(struct pch_can_priv *priv,
+ u32 buffer_num, int set)
{
iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num);
@@ -1013,7 +1012,8 @@ static void pch_can_set_rx_buffer_link(struct pch_can_priv *priv,
pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num);
}
-static u32 pch_can_get_rx_buffer_link(struct pch_can_priv *priv, u32 buffer_num)
+static u32 __maybe_unused pch_can_get_rx_buffer_link(struct pch_can_priv *priv,
+ u32 buffer_num)
{
u32 link;
@@ -1027,20 +1027,19 @@ static u32 pch_can_get_rx_buffer_link(struct pch_can_priv *priv, u32 buffer_num)
return link;
}
-static int pch_can_get_buffer_status(struct pch_can_priv *priv)
+static int __maybe_unused pch_can_get_buffer_status(struct pch_can_priv *priv)
{
return (ioread32(&priv->regs->treq1) & 0xffff) |
(ioread32(&priv->regs->treq2) << 16);
}
-static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused pch_can_suspend(struct device *dev_d)
{
int i;
- int retval;
u32 buf_stat; /* Variable for reading the transmit buffer status. */
int counter = PCH_COUNTER_LIMIT;
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(dev_d);
struct pch_can_priv *priv = netdev_priv(dev);
/* Stop the CAN controller */
@@ -1058,7 +1057,7 @@ static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
udelay(1);
}
if (!counter)
- dev_err(&pdev->dev, "%s -> Transmission time out.\n", __func__);
+ dev_err(dev_d, "%s -> Transmission time out.\n", __func__);
/* Save interrupt configuration and then disable them */
priv->int_enables = pch_can_get_int_enables(priv);
@@ -1081,35 +1080,16 @@ static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
/* Disable all Receive buffers */
pch_can_set_rx_all(priv, 0);
- retval = pci_save_state(pdev);
- if (retval) {
- dev_err(&pdev->dev, "pci_save_state failed.\n");
- } else {
- pci_enable_wake(pdev, PCI_D3hot, 0);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
- }
- return retval;
+ return 0;
}
-static int pch_can_resume(struct pci_dev *pdev)
+static int __maybe_unused pch_can_resume(struct device *dev_d)
{
int i;
- int retval;
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(dev_d);
struct pch_can_priv *priv = netdev_priv(dev);
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
- retval = pci_enable_device(pdev);
- if (retval) {
- dev_err(&pdev->dev, "pci_enable_device failed.\n");
- return retval;
- }
-
- pci_enable_wake(pdev, PCI_D3hot, 0);
-
priv->can.state = CAN_STATE_ERROR_ACTIVE;
/* Disabling all interrupts. */
@@ -1146,12 +1126,8 @@ static int pch_can_resume(struct pci_dev *pdev)
/* Restore Run Mode */
pch_can_set_run_mode(priv, PCH_CAN_RUN);
- return retval;
+ return 0;
}
-#else
-#define pch_can_suspend NULL
-#define pch_can_resume NULL
-#endif
static int pch_can_get_berr_counter(const struct net_device *dev,
struct can_berr_counter *bec)
@@ -1252,13 +1228,16 @@ probe_exit_endev:
return rc;
}
+static SIMPLE_DEV_PM_OPS(pch_can_pm_ops,
+ pch_can_suspend,
+ pch_can_resume);
+
static struct pci_driver pch_can_pci_driver = {
.name = "pch_can",
.id_table = pch_pci_tbl,
.probe = pch_can_probe,
.remove = pch_can_remove,
- .suspend = pch_can_suspend,
- .resume = pch_can_resume,
+ .driver.pm = &pch_can_pm_ops,
};
module_pci_driver(pch_can_pci_driver);
diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c
index 9469d4421afe..0df1cdfa6835 100644
--- a/drivers/net/can/peak_canfd/peak_pciefd_main.c
+++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c
@@ -116,8 +116,6 @@ MODULE_LICENSE("GPL v2");
#define CANFD_CTL_IRQ_CL_DEF 16 /* Rx msg max nb per IRQ in Rx DMA */
#define CANFD_CTL_IRQ_TL_DEF 10 /* Time before IRQ if < CL (x100 µs) */
-#define CANFD_OPTIONS_SET (CANFD_OPTION_ERROR | CANFD_OPTION_BUSLOAD)
-
/* Tx anticipation window (link logical address should be aligned on 2K
* boundary)
*/
diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c
index e8328910a234..3b180269a92d 100644
--- a/drivers/net/can/rx-offload.c
+++ b/drivers/net/can/rx-offload.c
@@ -351,6 +351,17 @@ int can_rx_offload_add_fifo(struct net_device *dev,
}
EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo);
+int can_rx_offload_add_manual(struct net_device *dev,
+ struct can_rx_offload *offload,
+ unsigned int weight)
+{
+ if (offload->mailbox_read)
+ return -EINVAL;
+
+ return can_rx_offload_init_queue(dev, offload, weight);
+}
+EXPORT_SYMBOL_GPL(can_rx_offload_add_manual);
+
void can_rx_offload_enable(struct can_rx_offload *offload)
{
napi_enable(&offload->napi);
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 8c0244f51059..4713921bd511 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -97,7 +97,7 @@ MODULE_DEVICE_TABLE(pci, peak_pci_tbl);
/* GPIOICR byte access offsets */
#define PITA_GPOUT 0x18 /* GPx output value */
#define PITA_GPIN 0x19 /* GPx input value */
-#define PITA_GPOEN 0x1A /* configure GPx as ouput pin */
+#define PITA_GPOEN 0x1A /* configure GPx as output pin */
/* I2C GP bits */
#define PITA_GPIN_SCL 0x01 /* Serial Clock Line */
diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
index 5e0d5e8101c8..cf951a783078 100644
--- a/drivers/net/can/sja1000/peak_pcmcia.c
+++ b/drivers/net/can/sja1000/peak_pcmcia.c
@@ -671,7 +671,7 @@ static int pcan_probe(struct pcmcia_device *pdev)
card->fw_major = pcan_read_reg(card, PCC_FW_MAJOR);
card->fw_minor = pcan_read_reg(card, PCC_FW_MINOR);
- /* display board name and firware version */
+ /* display board name and firmware version */
dev_info(&pdev->dev, "PEAK-System pcmcia card %s fw %d.%d\n",
pdev->prod_id[1] ? pdev->prod_id[1] : "PCAN-PC Card",
card->fw_major, card->fw_minor);
diff --git a/drivers/net/can/softing/Kconfig b/drivers/net/can/softing/Kconfig
index 16b9eec63490..8afd7d0a1000 100644
--- a/drivers/net/can/softing/Kconfig
+++ b/drivers/net/can/softing/Kconfig
@@ -5,14 +5,14 @@ config CAN_SOFTING
help
Support for CAN cards from Softing Gmbh & some cards
from Vector Gmbh.
- Softing Gmbh CAN cards come with 1 or 2 physical busses.
+ Softing Gmbh CAN cards come with 1 or 2 physical buses.
Those cards typically use Dual Port RAM to communicate
with the host CPU. The interface is then identical for PCI
and PCMCIA cards. This driver operates on a platform device,
which has been created by softing_cs or softing_pci driver.
Warning:
The API of the card does not allow fine control per bus, but
- controls the 2 busses on the card together.
+ controls the 2 buses on the card together.
As such, some actions (start/stop/busoff recovery) on 1 bus
must bring down the other bus too temporarily.
@@ -24,7 +24,7 @@ config CAN_SOFTING_CS
Support for PCMCIA cards from Softing Gmbh & some cards
from Vector Gmbh.
You need firmware for these, which you can get at
- http://developer.berlios.de/projects/socketcan/
+ https://github.com/linux-can/can-firmware
This version of the driver is written against
firmware version 4.6 (softing-fw-4.6-binaries.tar.gz)
In order to use the card as CAN device, you need the Softing generic
diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c
index 8f44fdd8804b..ccd649a8e37b 100644
--- a/drivers/net/can/softing/softing_fw.c
+++ b/drivers/net/can/softing/softing_fw.c
@@ -273,7 +273,7 @@ int softing_load_app_fw(const char *file, struct softing *card)
goto failed;
}
- /* regualar data */
+ /* regular data */
for (sum = 0, j = 0; j < len; ++j)
sum += dat[j];
/* work in 16bit (target) */
@@ -474,14 +474,14 @@ int softing_startstop(struct net_device *dev, int up)
if (ret)
goto failed;
if (!bus_bitmask_start)
- /* no busses to be brought up */
+ /* no buses to be brought up */
goto card_done;
if ((bus_bitmask_start & 1) && (bus_bitmask_start & 2)
&& (softing_error_reporting(card->net[0])
!= softing_error_reporting(card->net[1]))) {
dev_alert(&card->pdev->dev,
- "err_reporting flag differs for busses\n");
+ "err_reporting flag differs for buses\n");
goto invalid;
}
error_reporting = 0;
@@ -635,7 +635,7 @@ int softing_startstop(struct net_device *dev, int up)
priv->can.state = CAN_STATE_ERROR_ACTIVE;
open_candev(netdev);
if (dev != netdev) {
- /* notify other busses on the restart */
+ /* notify other buses on the restart */
softing_netdev_rx(netdev, &msg, 0);
++priv->can.can_stats.restarts;
}
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index d1ddf763b188..9d2faaa39ce4 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -170,8 +170,8 @@ static int softing_handle_1(struct softing *card)
msg.can_dlc = CAN_ERR_DLC;
msg.data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
/*
- * service to all busses, we don't know which it was applicable
- * but only service busses that are online
+ * service to all buses, we don't know which it was applicable
+ * but only service buses that are online
*/
for (j = 0; j < ARRAY_SIZE(card->net); ++j) {
netdev = card->net[j];
@@ -339,7 +339,7 @@ static irqreturn_t softing_irq_thread(int irq, void *dev_id)
continue;
priv = netdev_priv(netdev);
if (!canif_is_active(netdev))
- /* it makes no sense to wake dead busses */
+ /* it makes no sense to wake dead buses */
continue;
if (priv->tx.pending >= TX_ECHO_SKB_MAX)
continue;
@@ -374,7 +374,7 @@ static irqreturn_t softing_irq_v1(int irq, void *dev_id)
}
/*
- * netdev/candev inter-operability
+ * netdev/candev interoperability
*/
static int softing_netdev_open(struct net_device *ndev)
{
@@ -447,8 +447,9 @@ static void softing_card_shutdown(struct softing *card)
{
int fw_up = 0;
- if (mutex_lock_interruptible(&card->fw.lock))
+ if (mutex_lock_interruptible(&card->fw.lock)) {
/* return -ERESTARTSYS */;
+ }
fw_up = card->fw.up;
card->fw.up = 0;
diff --git a/drivers/net/can/softing/softing_platform.h b/drivers/net/can/softing/softing_platform.h
index 68a161547644..cd8d7904c5f0 100644
--- a/drivers/net/can/softing/softing_platform.h
+++ b/drivers/net/can/softing/softing_platform.h
@@ -19,7 +19,7 @@ struct softing_platform_data {
* 16bit, shared interrupt
*/
int generation;
- int nbus; /* # busses on device */
+ int nbus; /* # buses on device */
unsigned int freq; /* operating frequency in Hz */
unsigned int max_brp;
unsigned int max_sjw;
diff --git a/drivers/net/can/spi/Kconfig b/drivers/net/can/spi/Kconfig
index f780c79aac6f..f45449210047 100644
--- a/drivers/net/can/spi/Kconfig
+++ b/drivers/net/can/spi/Kconfig
@@ -4,15 +4,15 @@ menu "CAN SPI interfaces"
config CAN_HI311X
tristate "Holt HI311x SPI CAN controllers"
- depends on CAN_DEV && SPI && HAS_DMA
help
Driver for the Holt HI311x SPI CAN controllers.
config CAN_MCP251X
tristate "Microchip MCP251x and MCP25625 SPI CAN controllers"
- depends on HAS_DMA
help
Driver for the Microchip MCP251x and MCP25625 SPI CAN
controllers.
+source "drivers/net/can/spi/mcp251xfd/Kconfig"
+
endmenu
diff --git a/drivers/net/can/spi/Makefile b/drivers/net/can/spi/Makefile
index f115b2c46623..33e3f60bbc10 100644
--- a/drivers/net/can/spi/Makefile
+++ b/drivers/net/can/spi/Makefile
@@ -6,3 +6,4 @@
obj-$(CONFIG_CAN_HI311X) += hi311x.o
obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
+obj-y += mcp251xfd/
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index d17608870f2d..22d814ae4edc 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -19,6 +19,7 @@
* Copyright 2007
*/
+#include <linux/bitfield.h>
#include <linux/can/core.h>
#include <linux/can/dev.h>
#include <linux/can/led.h>
@@ -27,17 +28,20 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/freezer.h>
+#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
-#include <linux/property.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/uaccess.h>
-#include <linux/regulator/consumer.h>
/* SPI interface instruction set */
#define INSTRUCTION_WRITE 0x02
@@ -52,6 +56,30 @@
#define INSTRUCTION_RTS(n) (0x80 | ((n) & 0x07))
/* MPC251x registers */
+#define BFPCTRL 0x0c
+# define BFPCTRL_B0BFM BIT(0)
+# define BFPCTRL_B1BFM BIT(1)
+# define BFPCTRL_BFM(n) (BFPCTRL_B0BFM << (n))
+# define BFPCTRL_BFM_MASK GENMASK(1, 0)
+# define BFPCTRL_B0BFE BIT(2)
+# define BFPCTRL_B1BFE BIT(3)
+# define BFPCTRL_BFE(n) (BFPCTRL_B0BFE << (n))
+# define BFPCTRL_BFE_MASK GENMASK(3, 2)
+# define BFPCTRL_B0BFS BIT(4)
+# define BFPCTRL_B1BFS BIT(5)
+# define BFPCTRL_BFS(n) (BFPCTRL_B0BFS << (n))
+# define BFPCTRL_BFS_MASK GENMASK(5, 4)
+#define TXRTSCTRL 0x0d
+# define TXRTSCTRL_B0RTSM BIT(0)
+# define TXRTSCTRL_B1RTSM BIT(1)
+# define TXRTSCTRL_B2RTSM BIT(2)
+# define TXRTSCTRL_RTSM(n) (TXRTSCTRL_B0RTSM << (n))
+# define TXRTSCTRL_RTSM_MASK GENMASK(2, 0)
+# define TXRTSCTRL_B0RTS BIT(3)
+# define TXRTSCTRL_B1RTS BIT(4)
+# define TXRTSCTRL_B2RTS BIT(5)
+# define TXRTSCTRL_RTS(n) (TXRTSCTRL_B0RTS << (n))
+# define TXRTSCTRL_RTS_MASK GENMASK(5, 3)
#define CANSTAT 0x0e
#define CANCTRL 0x0f
# define CANCTRL_REQOP_MASK 0xe0
@@ -225,6 +253,10 @@ struct mcp251x_priv {
struct regulator *power;
struct regulator *transceiver;
struct clk *clk;
+#ifdef CONFIG_GPIOLIB
+ struct gpio_chip gpio;
+ u8 reg_bfpctrl;
+#endif
};
#define MCP251X_IS(_model) \
@@ -290,8 +322,12 @@ static u8 mcp251x_read_reg(struct spi_device *spi, u8 reg)
priv->spi_tx_buf[0] = INSTRUCTION_READ;
priv->spi_tx_buf[1] = reg;
- mcp251x_spi_trans(spi, 3);
- val = priv->spi_rx_buf[2];
+ if (spi->controller->flags & SPI_CONTROLLER_HALF_DUPLEX) {
+ spi_write_then_read(spi, priv->spi_tx_buf, 2, &val, 1);
+ } else {
+ mcp251x_spi_trans(spi, 3);
+ val = priv->spi_rx_buf[2];
+ }
return val;
}
@@ -303,10 +339,18 @@ static void mcp251x_read_2regs(struct spi_device *spi, u8 reg, u8 *v1, u8 *v2)
priv->spi_tx_buf[0] = INSTRUCTION_READ;
priv->spi_tx_buf[1] = reg;
- mcp251x_spi_trans(spi, 4);
+ if (spi->controller->flags & SPI_CONTROLLER_HALF_DUPLEX) {
+ u8 val[2] = { 0 };
- *v1 = priv->spi_rx_buf[2];
- *v2 = priv->spi_rx_buf[3];
+ spi_write_then_read(spi, priv->spi_tx_buf, 2, val, 2);
+ *v1 = val[0];
+ *v2 = val[1];
+ } else {
+ mcp251x_spi_trans(spi, 4);
+
+ *v1 = priv->spi_rx_buf[2];
+ *v2 = priv->spi_rx_buf[3];
+ }
}
static void mcp251x_write_reg(struct spi_device *spi, u8 reg, u8 val)
@@ -345,6 +389,222 @@ static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
mcp251x_spi_trans(spi, 4);
}
+static u8 mcp251x_read_stat(struct spi_device *spi)
+{
+ return mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK;
+}
+
+#define mcp251x_read_stat_poll_timeout(addr, val, cond, delay_us, timeout_us) \
+ readx_poll_timeout(mcp251x_read_stat, addr, val, cond, \
+ delay_us, timeout_us)
+
+#ifdef CONFIG_GPIOLIB
+enum {
+ MCP251X_GPIO_TX0RTS = 0, /* inputs */
+ MCP251X_GPIO_TX1RTS,
+ MCP251X_GPIO_TX2RTS,
+ MCP251X_GPIO_RX0BF, /* outputs */
+ MCP251X_GPIO_RX1BF,
+};
+
+#define MCP251X_GPIO_INPUT_MASK \
+ GENMASK(MCP251X_GPIO_TX2RTS, MCP251X_GPIO_TX0RTS)
+#define MCP251X_GPIO_OUTPUT_MASK \
+ GENMASK(MCP251X_GPIO_RX1BF, MCP251X_GPIO_RX0BF)
+
+static const char * const mcp251x_gpio_names[] = {
+ [MCP251X_GPIO_TX0RTS] = "TX0RTS", /* inputs */
+ [MCP251X_GPIO_TX1RTS] = "TX1RTS",
+ [MCP251X_GPIO_TX2RTS] = "TX2RTS",
+ [MCP251X_GPIO_RX0BF] = "RX0BF", /* outputs */
+ [MCP251X_GPIO_RX1BF] = "RX1BF",
+};
+
+static inline bool mcp251x_gpio_is_input(unsigned int offset)
+{
+ return offset <= MCP251X_GPIO_TX2RTS;
+}
+
+static int mcp251x_gpio_request(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct mcp251x_priv *priv = gpiochip_get_data(chip);
+ u8 val;
+
+ /* nothing to be done for inputs */
+ if (mcp251x_gpio_is_input(offset))
+ return 0;
+
+ val = BFPCTRL_BFE(offset - MCP251X_GPIO_RX0BF);
+
+ mutex_lock(&priv->mcp_lock);
+ mcp251x_write_bits(priv->spi, BFPCTRL, val, val);
+ mutex_unlock(&priv->mcp_lock);
+
+ priv->reg_bfpctrl |= val;
+
+ return 0;
+}
+
+static void mcp251x_gpio_free(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct mcp251x_priv *priv = gpiochip_get_data(chip);
+ u8 val;
+
+ /* nothing to be done for inputs */
+ if (mcp251x_gpio_is_input(offset))
+ return;
+
+ val = BFPCTRL_BFE(offset - MCP251X_GPIO_RX0BF);
+
+ mutex_lock(&priv->mcp_lock);
+ mcp251x_write_bits(priv->spi, BFPCTRL, val, 0);
+ mutex_unlock(&priv->mcp_lock);
+
+ priv->reg_bfpctrl &= ~val;
+}
+
+static int mcp251x_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ if (mcp251x_gpio_is_input(offset))
+ return GPIOF_DIR_IN;
+
+ return GPIOF_DIR_OUT;
+}
+
+static int mcp251x_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct mcp251x_priv *priv = gpiochip_get_data(chip);
+ u8 reg, mask, val;
+
+ if (mcp251x_gpio_is_input(offset)) {
+ reg = TXRTSCTRL;
+ mask = TXRTSCTRL_RTS(offset);
+ } else {
+ reg = BFPCTRL;
+ mask = BFPCTRL_BFS(offset - MCP251X_GPIO_RX0BF);
+ }
+
+ mutex_lock(&priv->mcp_lock);
+ val = mcp251x_read_reg(priv->spi, reg);
+ mutex_unlock(&priv->mcp_lock);
+
+ return !!(val & mask);
+}
+
+static int mcp251x_gpio_get_multiple(struct gpio_chip *chip,
+ unsigned long *maskp, unsigned long *bitsp)
+{
+ struct mcp251x_priv *priv = gpiochip_get_data(chip);
+ unsigned long bits = 0;
+ u8 val;
+
+ mutex_lock(&priv->mcp_lock);
+ if (maskp[0] & MCP251X_GPIO_INPUT_MASK) {
+ val = mcp251x_read_reg(priv->spi, TXRTSCTRL);
+ val = FIELD_GET(TXRTSCTRL_RTS_MASK, val);
+ bits |= FIELD_PREP(MCP251X_GPIO_INPUT_MASK, val);
+ }
+ if (maskp[0] & MCP251X_GPIO_OUTPUT_MASK) {
+ val = mcp251x_read_reg(priv->spi, BFPCTRL);
+ val = FIELD_GET(BFPCTRL_BFS_MASK, val);
+ bits |= FIELD_PREP(MCP251X_GPIO_OUTPUT_MASK, val);
+ }
+ mutex_unlock(&priv->mcp_lock);
+
+ bitsp[0] = bits;
+ return 0;
+}
+
+static void mcp251x_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ struct mcp251x_priv *priv = gpiochip_get_data(chip);
+ u8 mask, val;
+
+ mask = BFPCTRL_BFS(offset - MCP251X_GPIO_RX0BF);
+ val = value ? mask : 0;
+
+ mutex_lock(&priv->mcp_lock);
+ mcp251x_write_bits(priv->spi, BFPCTRL, mask, val);
+ mutex_unlock(&priv->mcp_lock);
+
+ priv->reg_bfpctrl &= ~mask;
+ priv->reg_bfpctrl |= val;
+}
+
+static void
+mcp251x_gpio_set_multiple(struct gpio_chip *chip,
+ unsigned long *maskp, unsigned long *bitsp)
+{
+ struct mcp251x_priv *priv = gpiochip_get_data(chip);
+ u8 mask, val;
+
+ mask = FIELD_GET(MCP251X_GPIO_OUTPUT_MASK, maskp[0]);
+ mask = FIELD_PREP(BFPCTRL_BFS_MASK, mask);
+
+ val = FIELD_GET(MCP251X_GPIO_OUTPUT_MASK, bitsp[0]);
+ val = FIELD_PREP(BFPCTRL_BFS_MASK, val);
+
+ if (!mask)
+ return;
+
+ mutex_lock(&priv->mcp_lock);
+ mcp251x_write_bits(priv->spi, BFPCTRL, mask, val);
+ mutex_unlock(&priv->mcp_lock);
+
+ priv->reg_bfpctrl &= ~mask;
+ priv->reg_bfpctrl |= val;
+}
+
+static void mcp251x_gpio_restore(struct spi_device *spi)
+{
+ struct mcp251x_priv *priv = spi_get_drvdata(spi);
+
+ mcp251x_write_reg(spi, BFPCTRL, priv->reg_bfpctrl);
+}
+
+static int mcp251x_gpio_setup(struct mcp251x_priv *priv)
+{
+ struct gpio_chip *gpio = &priv->gpio;
+
+ if (!device_property_present(&priv->spi->dev, "gpio-controller"))
+ return 0;
+
+ /* gpiochip handles TX[0..2]RTS and RX[0..1]BF */
+ gpio->label = priv->spi->modalias;
+ gpio->parent = &priv->spi->dev;
+ gpio->owner = THIS_MODULE;
+ gpio->request = mcp251x_gpio_request;
+ gpio->free = mcp251x_gpio_free;
+ gpio->get_direction = mcp251x_gpio_get_direction;
+ gpio->get = mcp251x_gpio_get;
+ gpio->get_multiple = mcp251x_gpio_get_multiple;
+ gpio->set = mcp251x_gpio_set;
+ gpio->set_multiple = mcp251x_gpio_set_multiple;
+ gpio->base = -1;
+ gpio->ngpio = ARRAY_SIZE(mcp251x_gpio_names);
+ gpio->names = mcp251x_gpio_names;
+ gpio->can_sleep = true;
+#ifdef CONFIG_OF_GPIO
+ gpio->of_node = priv->spi->dev.of_node;
+#endif
+
+ return devm_gpiochip_add_data(&priv->spi->dev, gpio, priv);
+}
+#else
+static inline void mcp251x_gpio_restore(struct spi_device *spi)
+{
+}
+
+static inline int mcp251x_gpio_setup(struct mcp251x_priv *priv)
+{
+ return 0;
+}
+#endif
+
static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
int len, int tx_buf_idx)
{
@@ -409,8 +669,16 @@ static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i);
} else {
priv->spi_tx_buf[RXBCTRL_OFF] = INSTRUCTION_READ_RXB(buf_idx);
- mcp251x_spi_trans(spi, SPI_TRANSFER_BUF_LEN);
- memcpy(buf, priv->spi_rx_buf, SPI_TRANSFER_BUF_LEN);
+ if (spi->controller->flags & SPI_CONTROLLER_HALF_DUPLEX) {
+ spi_write_then_read(spi, priv->spi_tx_buf, 1,
+ priv->spi_rx_buf,
+ SPI_TRANSFER_BUF_LEN);
+ memcpy(buf + 1, priv->spi_rx_buf,
+ SPI_TRANSFER_BUF_LEN - 1);
+ } else {
+ mcp251x_spi_trans(spi, SPI_TRANSFER_BUF_LEN);
+ memcpy(buf, priv->spi_rx_buf, SPI_TRANSFER_BUF_LEN);
+ }
}
}
@@ -471,7 +739,8 @@ static void mcp251x_hw_sleep(struct spi_device *spi)
/* May only be called when device is sleeping! */
static int mcp251x_hw_wake(struct spi_device *spi)
{
- unsigned long timeout;
+ u8 value;
+ int ret;
/* Force wakeup interrupt to wake device, but don't execute IST */
disable_irq(spi->irq);
@@ -484,14 +753,12 @@ static int mcp251x_hw_wake(struct spi_device *spi)
mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_CONF);
/* Wait for the device to enter config mode */
- timeout = jiffies + HZ;
- while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) !=
- CANCTRL_REQOP_CONF) {
- schedule();
- if (time_after(jiffies, timeout)) {
- dev_err(&spi->dev, "MCP251x didn't enter in config mode\n");
- return -EBUSY;
- }
+ ret = mcp251x_read_stat_poll_timeout(spi, value, value == CANCTRL_REQOP_CONF,
+ MCP251X_OST_DELAY_MS * 1000,
+ USEC_PER_SEC);
+ if (ret) {
+ dev_err(&spi->dev, "MCP251x didn't enter in config mode\n");
+ return ret;
}
/* Disable and clear pending interrupts */
@@ -546,7 +813,8 @@ static int mcp251x_do_set_mode(struct net_device *net, enum can_mode mode)
static int mcp251x_set_normal_mode(struct spi_device *spi)
{
struct mcp251x_priv *priv = spi_get_drvdata(spi);
- unsigned long timeout;
+ u8 value;
+ int ret;
/* Enable interrupts */
mcp251x_write_reg(spi, CANINTE,
@@ -564,13 +832,12 @@ static int mcp251x_set_normal_mode(struct spi_device *spi)
mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_NORMAL);
/* Wait for the device to enter normal mode */
- timeout = jiffies + HZ;
- while (mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) {
- schedule();
- if (time_after(jiffies, timeout)) {
- dev_err(&spi->dev, "MCP251x didn't enter in normal mode\n");
- return -EBUSY;
- }
+ ret = mcp251x_read_stat_poll_timeout(spi, value, value == 0,
+ MCP251X_OST_DELAY_MS * 1000,
+ USEC_PER_SEC);
+ if (ret) {
+ dev_err(&spi->dev, "MCP251x didn't enter in normal mode\n");
+ return ret;
}
}
priv->can.state = CAN_STATE_ERROR_ACTIVE;
@@ -614,7 +881,7 @@ static int mcp251x_setup(struct net_device *net, struct spi_device *spi)
static int mcp251x_hw_reset(struct spi_device *spi)
{
struct mcp251x_priv *priv = spi_get_drvdata(spi);
- unsigned long timeout;
+ u8 value;
int ret;
/* Wait for oscillator startup timer after power up */
@@ -629,19 +896,12 @@ static int mcp251x_hw_reset(struct spi_device *spi)
mdelay(MCP251X_OST_DELAY_MS);
/* Wait for reset to finish */
- timeout = jiffies + HZ;
- while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) !=
- CANCTRL_REQOP_CONF) {
- usleep_range(MCP251X_OST_DELAY_MS * 1000,
- MCP251X_OST_DELAY_MS * 1000 * 2);
-
- if (time_after(jiffies, timeout)) {
- dev_err(&spi->dev,
- "MCP251x didn't enter in conf mode after reset\n");
- return -EBUSY;
- }
- }
- return 0;
+ ret = mcp251x_read_stat_poll_timeout(spi, value, value == CANCTRL_REQOP_CONF,
+ MCP251X_OST_DELAY_MS * 1000,
+ USEC_PER_SEC);
+ if (ret)
+ dev_err(&spi->dev, "MCP251x didn't enter in conf mode after reset\n");
+ return ret;
}
static int mcp251x_hw_probe(struct spi_device *spi)
@@ -761,6 +1021,7 @@ static void mcp251x_restart_work_handler(struct work_struct *ws)
if (priv->after_suspend & AFTER_SUSPEND_POWER) {
mcp251x_hw_reset(spi);
mcp251x_setup(net, spi);
+ mcp251x_gpio_restore(spi);
} else {
mcp251x_hw_wake(spi);
}
@@ -1136,6 +1397,10 @@ static int mcp251x_can_probe(struct spi_device *spi)
devm_can_led_init(net);
+ ret = mcp251x_gpio_setup(priv);
+ if (ret)
+ goto error_probe;
+
netdev_info(net, "MCP%x successfully initialized.\n", priv->model);
return 0;
diff --git a/drivers/net/can/spi/mcp251xfd/Kconfig b/drivers/net/can/spi/mcp251xfd/Kconfig
new file mode 100644
index 000000000000..f5a147a92cb2
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config CAN_MCP251XFD
+ tristate "Microchip MCP251xFD SPI CAN controllers"
+ select REGMAP
+ help
+ Driver for the Microchip MCP251XFD SPI FD-CAN controller
+ family.
+
+config CAN_MCP251XFD_SANITY
+ depends on CAN_MCP251XFD
+ bool "Additional Sanity Checks"
+ help
+ This option enables additional sanity checks in the driver,
+ that compares various internal counters with the in chip
+ variants. This comes with a runtime overhead.
+ Disable if unsure.
diff --git a/drivers/net/can/spi/mcp251xfd/Makefile b/drivers/net/can/spi/mcp251xfd/Makefile
new file mode 100644
index 000000000000..cb71244cbe89
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_CAN_MCP251XFD) += mcp251xfd.o
+
+mcp251xfd-objs :=
+mcp251xfd-objs += mcp251xfd-core.o
+mcp251xfd-objs += mcp251xfd-crc16.o
+mcp251xfd-objs += mcp251xfd-regmap.o
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
new file mode 100644
index 000000000000..c3f49543ff26
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -0,0 +1,2927 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+//
+// Copyright (c) 2019, 2020 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+// Based on:
+//
+// CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
+//
+// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
+//
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+
+#include <asm/unaligned.h>
+
+#include "mcp251xfd.h"
+
+#define DEVICE_NAME "mcp251xfd"
+
+static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2517fd = {
+ .quirks = MCP251XFD_QUIRK_MAB_NO_WARN | MCP251XFD_QUIRK_CRC_REG |
+ MCP251XFD_QUIRK_CRC_RX | MCP251XFD_QUIRK_CRC_TX |
+ MCP251XFD_QUIRK_ECC,
+ .model = MCP251XFD_MODEL_MCP2517FD,
+};
+
+static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2518fd = {
+ .quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX |
+ MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC,
+ .model = MCP251XFD_MODEL_MCP2518FD,
+};
+
+/* Autodetect model, start with CRC enabled. */
+static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp251xfd = {
+ .quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX |
+ MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC,
+ .model = MCP251XFD_MODEL_MCP251XFD,
+};
+
+static const struct can_bittiming_const mcp251xfd_bittiming_const = {
+ .name = DEVICE_NAME,
+ .tseg1_min = 2,
+ .tseg1_max = 256,
+ .tseg2_min = 1,
+ .tseg2_max = 128,
+ .sjw_max = 128,
+ .brp_min = 1,
+ .brp_max = 256,
+ .brp_inc = 1,
+};
+
+static const struct can_bittiming_const mcp251xfd_data_bittiming_const = {
+ .name = DEVICE_NAME,
+ .tseg1_min = 1,
+ .tseg1_max = 32,
+ .tseg2_min = 1,
+ .tseg2_max = 16,
+ .sjw_max = 16,
+ .brp_min = 1,
+ .brp_max = 256,
+ .brp_inc = 1,
+};
+
+static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model)
+{
+ switch (model) {
+ case MCP251XFD_MODEL_MCP2517FD:
+ return "MCP2517FD"; break;
+ case MCP251XFD_MODEL_MCP2518FD:
+ return "MCP2518FD"; break;
+ case MCP251XFD_MODEL_MCP251XFD:
+ return "MCP251xFD"; break;
+ }
+
+ return "<unknown>";
+}
+
+static inline const char *
+mcp251xfd_get_model_str(const struct mcp251xfd_priv *priv)
+{
+ return __mcp251xfd_get_model_str(priv->devtype_data.model);
+}
+
+static const char *mcp251xfd_get_mode_str(const u8 mode)
+{
+ switch (mode) {
+ case MCP251XFD_REG_CON_MODE_MIXED:
+ return "Mixed (CAN FD/CAN 2.0)"; break;
+ case MCP251XFD_REG_CON_MODE_SLEEP:
+ return "Sleep"; break;
+ case MCP251XFD_REG_CON_MODE_INT_LOOPBACK:
+ return "Internal Loopback"; break;
+ case MCP251XFD_REG_CON_MODE_LISTENONLY:
+ return "Listen Only"; break;
+ case MCP251XFD_REG_CON_MODE_CONFIG:
+ return "Configuration"; break;
+ case MCP251XFD_REG_CON_MODE_EXT_LOOPBACK:
+ return "External Loopback"; break;
+ case MCP251XFD_REG_CON_MODE_CAN2_0:
+ return "CAN 2.0"; break;
+ case MCP251XFD_REG_CON_MODE_RESTRICTED:
+ return "Restricted Operation"; break;
+ }
+
+ return "<unknown>";
+}
+
+static inline int mcp251xfd_vdd_enable(const struct mcp251xfd_priv *priv)
+{
+ if (!priv->reg_vdd)
+ return 0;
+
+ return regulator_enable(priv->reg_vdd);
+}
+
+static inline int mcp251xfd_vdd_disable(const struct mcp251xfd_priv *priv)
+{
+ if (!priv->reg_vdd)
+ return 0;
+
+ return regulator_disable(priv->reg_vdd);
+}
+
+static inline int
+mcp251xfd_transceiver_enable(const struct mcp251xfd_priv *priv)
+{
+ if (!priv->reg_xceiver)
+ return 0;
+
+ return regulator_enable(priv->reg_xceiver);
+}
+
+static inline int
+mcp251xfd_transceiver_disable(const struct mcp251xfd_priv *priv)
+{
+ if (!priv->reg_xceiver)
+ return 0;
+
+ return regulator_disable(priv->reg_xceiver);
+}
+
+static int mcp251xfd_clks_and_vdd_enable(const struct mcp251xfd_priv *priv)
+{
+ int err;
+
+ err = clk_prepare_enable(priv->clk);
+ if (err)
+ return err;
+
+ err = mcp251xfd_vdd_enable(priv);
+ if (err)
+ clk_disable_unprepare(priv->clk);
+
+ /* Wait for oscillator stabilisation time after power up */
+ usleep_range(MCP251XFD_OSC_STAB_SLEEP_US,
+ 2 * MCP251XFD_OSC_STAB_SLEEP_US);
+
+ return err;
+}
+
+static int mcp251xfd_clks_and_vdd_disable(const struct mcp251xfd_priv *priv)
+{
+ int err;
+
+ err = mcp251xfd_vdd_disable(priv);
+ if (err)
+ return err;
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static inline u8
+mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv,
+ union mcp251xfd_write_reg_buf *write_reg_buf,
+ const u16 reg, const u32 mask, const u32 val)
+{
+ u8 first_byte, last_byte, len;
+ u8 *data;
+ __le32 val_le32;
+
+ first_byte = mcp251xfd_first_byte_set(mask);
+ last_byte = mcp251xfd_last_byte_set(mask);
+ len = last_byte - first_byte + 1;
+
+ data = mcp251xfd_spi_cmd_write(priv, write_reg_buf, reg + first_byte);
+ val_le32 = cpu_to_le32(val >> BITS_PER_BYTE * first_byte);
+ memcpy(data, &val_le32, len);
+
+ if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG) {
+ u16 crc;
+
+ mcp251xfd_spi_cmd_crc_set_len_in_reg(&write_reg_buf->crc.cmd,
+ len);
+ /* CRC */
+ len += sizeof(write_reg_buf->crc.cmd);
+ crc = mcp251xfd_crc16_compute(&write_reg_buf->crc, len);
+ put_unaligned_be16(crc, (void *)write_reg_buf + len);
+
+ /* Total length */
+ len += sizeof(write_reg_buf->crc.crc);
+ } else {
+ len += sizeof(write_reg_buf->nocrc.cmd);
+ }
+
+ return len;
+}
+
+static inline int
+mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv,
+ u8 *tef_tail)
+{
+ u32 tef_ua;
+ int err;
+
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFUA, &tef_ua);
+ if (err)
+ return err;
+
+ *tef_tail = tef_ua / sizeof(struct mcp251xfd_hw_tef_obj);
+
+ return 0;
+}
+
+static inline int
+mcp251xfd_tx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
+ u8 *tx_tail)
+{
+ u32 fifo_sta;
+ int err;
+
+ err = regmap_read(priv->map_reg,
+ MCP251XFD_REG_FIFOSTA(MCP251XFD_TX_FIFO),
+ &fifo_sta);
+ if (err)
+ return err;
+
+ *tx_tail = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
+
+ return 0;
+}
+
+static inline int
+mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_rx_ring *ring,
+ u8 *rx_head)
+{
+ u32 fifo_sta;
+ int err;
+
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
+ &fifo_sta);
+ if (err)
+ return err;
+
+ *rx_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
+
+ return 0;
+}
+
+static inline int
+mcp251xfd_rx_tail_get_from_chip(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_rx_ring *ring,
+ u8 *rx_tail)
+{
+ u32 fifo_ua;
+ int err;
+
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOUA(ring->fifo_nr),
+ &fifo_ua);
+ if (err)
+ return err;
+
+ fifo_ua -= ring->base - MCP251XFD_RAM_START;
+ *rx_tail = fifo_ua / ring->obj_size;
+
+ return 0;
+}
+
+static void
+mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_tx_ring *ring,
+ struct mcp251xfd_tx_obj *tx_obj,
+ const u8 rts_buf_len,
+ const u8 n)
+{
+ struct spi_transfer *xfer;
+ u16 addr;
+
+ /* FIFO load */
+ addr = mcp251xfd_get_tx_obj_addr(ring, n);
+ if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
+ mcp251xfd_spi_cmd_write_crc_set_addr(&tx_obj->buf.crc.cmd,
+ addr);
+ else
+ mcp251xfd_spi_cmd_write_nocrc(&tx_obj->buf.nocrc.cmd,
+ addr);
+
+ xfer = &tx_obj->xfer[0];
+ xfer->tx_buf = &tx_obj->buf;
+ xfer->len = 0; /* actual len is assigned on the fly */
+ xfer->cs_change = 1;
+ xfer->cs_change_delay.value = 0;
+ xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+
+ /* FIFO request to send */
+ xfer = &tx_obj->xfer[1];
+ xfer->tx_buf = &ring->rts_buf;
+ xfer->len = rts_buf_len;
+
+ /* SPI message */
+ spi_message_init_with_transfers(&tx_obj->msg, tx_obj->xfer,
+ ARRAY_SIZE(tx_obj->xfer));
+}
+
+static void mcp251xfd_ring_init(struct mcp251xfd_priv *priv)
+{
+ struct mcp251xfd_tx_ring *tx_ring;
+ struct mcp251xfd_rx_ring *rx_ring, *prev_rx_ring = NULL;
+ struct mcp251xfd_tx_obj *tx_obj;
+ u32 val;
+ u16 addr;
+ u8 len;
+ int i;
+
+ /* TEF */
+ priv->tef.head = 0;
+ priv->tef.tail = 0;
+
+ /* TX */
+ tx_ring = priv->tx;
+ tx_ring->head = 0;
+ tx_ring->tail = 0;
+ tx_ring->base = mcp251xfd_get_tef_obj_addr(tx_ring->obj_num);
+
+ /* FIFO request to send */
+ addr = MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO);
+ val = MCP251XFD_REG_FIFOCON_TXREQ | MCP251XFD_REG_FIFOCON_UINC;
+ len = mcp251xfd_cmd_prepare_write_reg(priv, &tx_ring->rts_buf,
+ addr, val, val);
+
+ mcp251xfd_for_each_tx_obj(tx_ring, tx_obj, i)
+ mcp251xfd_tx_ring_init_tx_obj(priv, tx_ring, tx_obj, len, i);
+
+ /* RX */
+ mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
+ rx_ring->head = 0;
+ rx_ring->tail = 0;
+ rx_ring->nr = i;
+ rx_ring->fifo_nr = MCP251XFD_RX_FIFO(i);
+
+ if (!prev_rx_ring)
+ rx_ring->base =
+ mcp251xfd_get_tx_obj_addr(tx_ring,
+ tx_ring->obj_num);
+ else
+ rx_ring->base = prev_rx_ring->base +
+ prev_rx_ring->obj_size *
+ prev_rx_ring->obj_num;
+
+ prev_rx_ring = rx_ring;
+ }
+}
+
+static void mcp251xfd_ring_free(struct mcp251xfd_priv *priv)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(priv->rx) - 1; i >= 0; i--) {
+ kfree(priv->rx[i]);
+ priv->rx[i] = NULL;
+ }
+}
+
+static int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
+{
+ struct mcp251xfd_tx_ring *tx_ring;
+ struct mcp251xfd_rx_ring *rx_ring;
+ int tef_obj_size, tx_obj_size, rx_obj_size;
+ int tx_obj_num;
+ int ram_free, i;
+
+ tef_obj_size = sizeof(struct mcp251xfd_hw_tef_obj);
+ /* listen-only mode works like FD mode */
+ if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD)) {
+ tx_obj_num = MCP251XFD_TX_OBJ_NUM_CANFD;
+ tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_canfd);
+ rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_canfd);
+ } else {
+ tx_obj_num = MCP251XFD_TX_OBJ_NUM_CAN;
+ tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_can);
+ rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_can);
+ }
+
+ tx_ring = priv->tx;
+ tx_ring->obj_num = tx_obj_num;
+ tx_ring->obj_size = tx_obj_size;
+
+ ram_free = MCP251XFD_RAM_SIZE - tx_obj_num *
+ (tef_obj_size + tx_obj_size);
+
+ for (i = 0;
+ i < ARRAY_SIZE(priv->rx) && ram_free >= rx_obj_size;
+ i++) {
+ int rx_obj_num;
+
+ rx_obj_num = ram_free / rx_obj_size;
+ rx_obj_num = min(1 << (fls(rx_obj_num) - 1), 32);
+
+ rx_ring = kzalloc(sizeof(*rx_ring) + rx_obj_size * rx_obj_num,
+ GFP_KERNEL);
+ if (!rx_ring) {
+ mcp251xfd_ring_free(priv);
+ return -ENOMEM;
+ }
+ rx_ring->obj_num = rx_obj_num;
+ rx_ring->obj_size = rx_obj_size;
+ priv->rx[i] = rx_ring;
+
+ ram_free -= rx_ring->obj_num * rx_ring->obj_size;
+ }
+ priv->rx_ring_num = i;
+
+ netdev_dbg(priv->ndev,
+ "FIFO setup: TEF: %d*%d bytes = %d bytes, TX: %d*%d bytes = %d bytes\n",
+ tx_obj_num, tef_obj_size, tef_obj_size * tx_obj_num,
+ tx_obj_num, tx_obj_size, tx_obj_size * tx_obj_num);
+
+ mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
+ netdev_dbg(priv->ndev,
+ "FIFO setup: RX-%d: %d*%d bytes = %d bytes\n",
+ i, rx_ring->obj_num, rx_ring->obj_size,
+ rx_ring->obj_size * rx_ring->obj_num);
+ }
+
+ netdev_dbg(priv->ndev,
+ "FIFO setup: free: %d bytes\n",
+ ram_free);
+
+ return 0;
+}
+
+static inline int
+mcp251xfd_chip_get_mode(const struct mcp251xfd_priv *priv, u8 *mode)
+{
+ u32 val;
+ int err;
+
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_CON, &val);
+ if (err)
+ return err;
+
+ *mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, val);
+
+ return 0;
+}
+
+static int
+__mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
+ const u8 mode_req, bool nowait)
+{
+ u32 con, con_reqop;
+ int err;
+
+ con_reqop = FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK, mode_req);
+ err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_CON,
+ MCP251XFD_REG_CON_REQOP_MASK, con_reqop);
+ if (err)
+ return err;
+
+ if (mode_req == MCP251XFD_REG_CON_MODE_SLEEP || nowait)
+ return 0;
+
+ err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_CON, con,
+ FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK,
+ con) == mode_req,
+ MCP251XFD_POLL_SLEEP_US,
+ MCP251XFD_POLL_TIMEOUT_US);
+ if (err) {
+ u8 mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, con);
+
+ netdev_err(priv->ndev,
+ "Controller failed to enter mode %s Mode (%u) and stays in %s Mode (%u).\n",
+ mcp251xfd_get_mode_str(mode_req), mode_req,
+ mcp251xfd_get_mode_str(mode), mode);
+ return err;
+ }
+
+ return 0;
+}
+
+static inline int
+mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv,
+ const u8 mode_req)
+{
+ return __mcp251xfd_chip_set_mode(priv, mode_req, false);
+}
+
+static inline int
+mcp251xfd_chip_set_mode_nowait(const struct mcp251xfd_priv *priv,
+ const u8 mode_req)
+{
+ return __mcp251xfd_chip_set_mode(priv, mode_req, true);
+}
+
+static inline bool mcp251xfd_osc_invalid(u32 reg)
+{
+ return reg == 0x0 || reg == 0xffffffff;
+}
+
+static int mcp251xfd_chip_clock_enable(const struct mcp251xfd_priv *priv)
+{
+ u32 osc, osc_reference, osc_mask;
+ int err;
+
+ /* Set Power On Defaults for "Clock Output Divisor" and remove
+ * "Oscillator Disable" bit.
+ */
+ osc = FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
+ MCP251XFD_REG_OSC_CLKODIV_10);
+ osc_reference = MCP251XFD_REG_OSC_OSCRDY;
+ osc_mask = MCP251XFD_REG_OSC_OSCRDY | MCP251XFD_REG_OSC_PLLRDY;
+
+ /* Note:
+ *
+ * If the controller is in Sleep Mode the following write only
+ * removes the "Oscillator Disable" bit and powers it up. All
+ * other bits are unaffected.
+ */
+ err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc);
+ if (err)
+ return err;
+
+ /* Wait for "Oscillator Ready" bit */
+ err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_OSC, osc,
+ (osc & osc_mask) == osc_reference,
+ MCP251XFD_OSC_STAB_SLEEP_US,
+ MCP251XFD_OSC_STAB_TIMEOUT_US);
+ if (mcp251xfd_osc_invalid(osc)) {
+ netdev_err(priv->ndev,
+ "Failed to detect %s (osc=0x%08x).\n",
+ mcp251xfd_get_model_str(priv), osc);
+ return -ENODEV;
+ } else if (err == -ETIMEDOUT) {
+ netdev_err(priv->ndev,
+ "Timeout waiting for Oscillator Ready (osc=0x%08x, osc_reference=0x%08x)\n",
+ osc, osc_reference);
+ return -ETIMEDOUT;
+ } else if (err) {
+ return err;
+ }
+
+ return 0;
+}
+
+static int mcp251xfd_chip_softreset_do(const struct mcp251xfd_priv *priv)
+{
+ const __be16 cmd = mcp251xfd_cmd_reset();
+ int err;
+
+ /* The Set Mode and SPI Reset command only seems to works if
+ * the controller is not in Sleep Mode.
+ */
+ err = mcp251xfd_chip_clock_enable(priv);
+ if (err)
+ return err;
+
+ err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_CONFIG);
+ if (err)
+ return err;
+
+ /* spi_write_then_read() works with non DMA-safe buffers */
+ return spi_write_then_read(priv->spi, &cmd, sizeof(cmd), NULL, 0);
+}
+
+static int mcp251xfd_chip_softreset_check(const struct mcp251xfd_priv *priv)
+{
+ u32 osc, osc_reference;
+ u8 mode;
+ int err;
+
+ err = mcp251xfd_chip_get_mode(priv, &mode);
+ if (err)
+ return err;
+
+ if (mode != MCP251XFD_REG_CON_MODE_CONFIG) {
+ netdev_info(priv->ndev,
+ "Controller not in Config Mode after reset, but in %s Mode (%u).\n",
+ mcp251xfd_get_mode_str(mode), mode);
+ return -ETIMEDOUT;
+ }
+
+ osc_reference = MCP251XFD_REG_OSC_OSCRDY |
+ FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
+ MCP251XFD_REG_OSC_CLKODIV_10);
+
+ /* check reset defaults of OSC reg */
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc);
+ if (err)
+ return err;
+
+ if (osc != osc_reference) {
+ netdev_info(priv->ndev,
+ "Controller failed to reset. osc=0x%08x, reference value=0x%08x\n",
+ osc, osc_reference);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int mcp251xfd_chip_softreset(const struct mcp251xfd_priv *priv)
+{
+ int err, i;
+
+ for (i = 0; i < MCP251XFD_SOFTRESET_RETRIES_MAX; i++) {
+ if (i)
+ netdev_info(priv->ndev,
+ "Retrying to reset Controller.\n");
+
+ err = mcp251xfd_chip_softreset_do(priv);
+ if (err == -ETIMEDOUT)
+ continue;
+ if (err)
+ return err;
+
+ err = mcp251xfd_chip_softreset_check(priv);
+ if (err == -ETIMEDOUT)
+ continue;
+ if (err)
+ return err;
+
+ return 0;
+ }
+
+ if (err)
+ return err;
+
+ return -ETIMEDOUT;
+}
+
+static int mcp251xfd_chip_clock_init(const struct mcp251xfd_priv *priv)
+{
+ u32 osc;
+ int err;
+
+ /* Activate Low Power Mode on Oscillator Disable. This only
+ * works on the MCP2518FD. The MCP2517FD will go into normal
+ * Sleep Mode instead.
+ */
+ osc = MCP251XFD_REG_OSC_LPMEN |
+ FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK,
+ MCP251XFD_REG_OSC_CLKODIV_10);
+ err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc);
+ if (err)
+ return err;
+
+ /* Set Time Base Counter Prescaler to 1.
+ *
+ * This means an overflow of the 32 bit Time Base Counter
+ * register at 40 MHz every 107 seconds.
+ */
+ return regmap_write(priv->map_reg, MCP251XFD_REG_TSCON,
+ MCP251XFD_REG_TSCON_TBCEN);
+}
+
+static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv)
+{
+ const struct can_bittiming *bt = &priv->can.bittiming;
+ const struct can_bittiming *dbt = &priv->can.data_bittiming;
+ u32 val = 0;
+ s8 tdco;
+ int err;
+
+ /* CAN Control Register
+ *
+ * - no transmit bandwidth sharing
+ * - config mode
+ * - disable transmit queue
+ * - store in transmit FIFO event
+ * - transition to restricted operation mode on system error
+ * - ESI is transmitted recessive when ESI of message is high or
+ * CAN controller error passive
+ * - restricted retransmission attempts,
+ * use TQXCON_TXAT and FIFOCON_TXAT
+ * - wake-up filter bits T11FILTER
+ * - use CAN bus line filter for wakeup
+ * - protocol exception is treated as a form error
+ * - Do not compare data bytes
+ */
+ val = FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK,
+ MCP251XFD_REG_CON_MODE_CONFIG) |
+ MCP251XFD_REG_CON_STEF |
+ MCP251XFD_REG_CON_ESIGM |
+ MCP251XFD_REG_CON_RTXAT |
+ FIELD_PREP(MCP251XFD_REG_CON_WFT_MASK,
+ MCP251XFD_REG_CON_WFT_T11FILTER) |
+ MCP251XFD_REG_CON_WAKFIL |
+ MCP251XFD_REG_CON_PXEDIS;
+
+ if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO))
+ val |= MCP251XFD_REG_CON_ISOCRCEN;
+
+ err = regmap_write(priv->map_reg, MCP251XFD_REG_CON, val);
+ if (err)
+ return err;
+
+ /* Nominal Bit Time */
+ val = FIELD_PREP(MCP251XFD_REG_NBTCFG_BRP_MASK, bt->brp - 1) |
+ FIELD_PREP(MCP251XFD_REG_NBTCFG_TSEG1_MASK,
+ bt->prop_seg + bt->phase_seg1 - 1) |
+ FIELD_PREP(MCP251XFD_REG_NBTCFG_TSEG2_MASK,
+ bt->phase_seg2 - 1) |
+ FIELD_PREP(MCP251XFD_REG_NBTCFG_SJW_MASK, bt->sjw - 1);
+
+ err = regmap_write(priv->map_reg, MCP251XFD_REG_NBTCFG, val);
+ if (err)
+ return err;
+
+ if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD))
+ return 0;
+
+ /* Data Bit Time */
+ val = FIELD_PREP(MCP251XFD_REG_DBTCFG_BRP_MASK, dbt->brp - 1) |
+ FIELD_PREP(MCP251XFD_REG_DBTCFG_TSEG1_MASK,
+ dbt->prop_seg + dbt->phase_seg1 - 1) |
+ FIELD_PREP(MCP251XFD_REG_DBTCFG_TSEG2_MASK,
+ dbt->phase_seg2 - 1) |
+ FIELD_PREP(MCP251XFD_REG_DBTCFG_SJW_MASK, dbt->sjw - 1);
+
+ err = regmap_write(priv->map_reg, MCP251XFD_REG_DBTCFG, val);
+ if (err)
+ return err;
+
+ /* Transmitter Delay Compensation */
+ tdco = clamp_t(int, dbt->brp * (dbt->prop_seg + dbt->phase_seg1),
+ -64, 63);
+ val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK,
+ MCP251XFD_REG_TDC_TDCMOD_AUTO) |
+ FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, tdco);
+
+ return regmap_write(priv->map_reg, MCP251XFD_REG_TDC, val);
+}
+
+static int mcp251xfd_chip_rx_int_enable(const struct mcp251xfd_priv *priv)
+{
+ u32 val;
+
+ if (!priv->rx_int)
+ return 0;
+
+ /* Configure GPIOs:
+ * - PIN0: GPIO Input
+ * - PIN1: GPIO Input/RX Interrupt
+ *
+ * PIN1 must be Input, otherwise there is a glitch on the
+ * rx-INT line. It happens between setting the PIN as output
+ * (in the first byte of the SPI transfer) and configuring the
+ * PIN as interrupt (in the last byte of the SPI transfer).
+ */
+ val = MCP251XFD_REG_IOCON_PM0 | MCP251XFD_REG_IOCON_TRIS1 |
+ MCP251XFD_REG_IOCON_TRIS0;
+ return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val);
+}
+
+static int mcp251xfd_chip_rx_int_disable(const struct mcp251xfd_priv *priv)
+{
+ u32 val;
+
+ if (!priv->rx_int)
+ return 0;
+
+ /* Configure GPIOs:
+ * - PIN0: GPIO Input
+ * - PIN1: GPIO Input
+ */
+ val = MCP251XFD_REG_IOCON_PM1 | MCP251XFD_REG_IOCON_PM0 |
+ MCP251XFD_REG_IOCON_TRIS1 | MCP251XFD_REG_IOCON_TRIS0;
+ return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val);
+}
+
+static int
+mcp251xfd_chip_rx_fifo_init_one(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_rx_ring *ring)
+{
+ u32 fifo_con;
+
+ /* Enable RXOVIE on _all_ RX FIFOs, not just the last one.
+ *
+ * FIFOs hit by a RX MAB overflow and RXOVIE enabled will
+ * generate a RXOVIF, use this to properly detect RX MAB
+ * overflows.
+ */
+ fifo_con = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK,
+ ring->obj_num - 1) |
+ MCP251XFD_REG_FIFOCON_RXTSEN |
+ MCP251XFD_REG_FIFOCON_RXOVIE |
+ MCP251XFD_REG_FIFOCON_TFNRFNIE;
+
+ if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD))
+ fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
+ MCP251XFD_REG_FIFOCON_PLSIZE_64);
+ else
+ fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
+ MCP251XFD_REG_FIFOCON_PLSIZE_8);
+
+ return regmap_write(priv->map_reg,
+ MCP251XFD_REG_FIFOCON(ring->fifo_nr), fifo_con);
+}
+
+static int
+mcp251xfd_chip_rx_filter_init_one(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_rx_ring *ring)
+{
+ u32 fltcon;
+
+ fltcon = MCP251XFD_REG_FLTCON_FLTEN(ring->nr) |
+ MCP251XFD_REG_FLTCON_FBP(ring->nr, ring->fifo_nr);
+
+ return regmap_update_bits(priv->map_reg,
+ MCP251XFD_REG_FLTCON(ring->nr >> 2),
+ MCP251XFD_REG_FLTCON_FLT_MASK(ring->nr),
+ fltcon);
+}
+
+static int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv)
+{
+ const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ const struct mcp251xfd_rx_ring *rx_ring;
+ u32 val;
+ int err, n;
+
+ /* TEF */
+ val = FIELD_PREP(MCP251XFD_REG_TEFCON_FSIZE_MASK,
+ tx_ring->obj_num - 1) |
+ MCP251XFD_REG_TEFCON_TEFTSEN |
+ MCP251XFD_REG_TEFCON_TEFOVIE |
+ MCP251XFD_REG_TEFCON_TEFNEIE;
+
+ err = regmap_write(priv->map_reg, MCP251XFD_REG_TEFCON, val);
+ if (err)
+ return err;
+
+ /* FIFO 1 - TX */
+ val = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK,
+ tx_ring->obj_num - 1) |
+ MCP251XFD_REG_FIFOCON_TXEN |
+ MCP251XFD_REG_FIFOCON_TXATIE;
+
+ if (priv->can.ctrlmode & (CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD))
+ val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
+ MCP251XFD_REG_FIFOCON_PLSIZE_64);
+ else
+ val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK,
+ MCP251XFD_REG_FIFOCON_PLSIZE_8);
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
+ val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK,
+ MCP251XFD_REG_FIFOCON_TXAT_ONE_SHOT);
+ else
+ val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK,
+ MCP251XFD_REG_FIFOCON_TXAT_UNLIMITED);
+
+ err = regmap_write(priv->map_reg,
+ MCP251XFD_REG_FIFOCON(MCP251XFD_TX_FIFO),
+ val);
+ if (err)
+ return err;
+
+ /* RX FIFOs */
+ mcp251xfd_for_each_rx_ring(priv, rx_ring, n) {
+ err = mcp251xfd_chip_rx_fifo_init_one(priv, rx_ring);
+ if (err)
+ return err;
+
+ err = mcp251xfd_chip_rx_filter_init_one(priv, rx_ring);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mcp251xfd_chip_ecc_init(struct mcp251xfd_priv *priv)
+{
+ struct mcp251xfd_ecc *ecc = &priv->ecc;
+ void *ram;
+ u32 val = 0;
+ int err;
+
+ ecc->ecc_stat = 0;
+
+ if (priv->devtype_data.quirks & MCP251XFD_QUIRK_ECC)
+ val = MCP251XFD_REG_ECCCON_ECCEN;
+
+ err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON,
+ MCP251XFD_REG_ECCCON_ECCEN, val);
+ if (err)
+ return err;
+
+ ram = kzalloc(MCP251XFD_RAM_SIZE, GFP_KERNEL);
+ if (!ram)
+ return -ENOMEM;
+
+ err = regmap_raw_write(priv->map_reg, MCP251XFD_RAM_START, ram,
+ MCP251XFD_RAM_SIZE);
+ kfree(ram);
+
+ return err;
+}
+
+static inline void mcp251xfd_ecc_tefif_successful(struct mcp251xfd_priv *priv)
+{
+ struct mcp251xfd_ecc *ecc = &priv->ecc;
+
+ ecc->ecc_stat = 0;
+}
+
+static u8 mcp251xfd_get_normal_mode(const struct mcp251xfd_priv *priv)
+{
+ u8 mode;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ mode = MCP251XFD_REG_CON_MODE_LISTENONLY;
+ else if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
+ mode = MCP251XFD_REG_CON_MODE_MIXED;
+ else
+ mode = MCP251XFD_REG_CON_MODE_CAN2_0;
+
+ return mode;
+}
+
+static int
+__mcp251xfd_chip_set_normal_mode(const struct mcp251xfd_priv *priv,
+ bool nowait)
+{
+ u8 mode;
+
+ mode = mcp251xfd_get_normal_mode(priv);
+
+ return __mcp251xfd_chip_set_mode(priv, mode, nowait);
+}
+
+static inline int
+mcp251xfd_chip_set_normal_mode(const struct mcp251xfd_priv *priv)
+{
+ return __mcp251xfd_chip_set_normal_mode(priv, false);
+}
+
+static inline int
+mcp251xfd_chip_set_normal_mode_nowait(const struct mcp251xfd_priv *priv)
+{
+ return __mcp251xfd_chip_set_normal_mode(priv, true);
+}
+
+static int mcp251xfd_chip_interrupts_enable(const struct mcp251xfd_priv *priv)
+{
+ u32 val;
+ int err;
+
+ val = MCP251XFD_REG_CRC_FERRIE | MCP251XFD_REG_CRC_CRCERRIE;
+ err = regmap_write(priv->map_reg, MCP251XFD_REG_CRC, val);
+ if (err)
+ return err;
+
+ val = MCP251XFD_REG_ECCCON_DEDIE | MCP251XFD_REG_ECCCON_SECIE;
+ err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON, val, val);
+ if (err)
+ return err;
+
+ val = MCP251XFD_REG_INT_CERRIE |
+ MCP251XFD_REG_INT_SERRIE |
+ MCP251XFD_REG_INT_RXOVIE |
+ MCP251XFD_REG_INT_TXATIE |
+ MCP251XFD_REG_INT_SPICRCIE |
+ MCP251XFD_REG_INT_ECCIE |
+ MCP251XFD_REG_INT_TEFIE |
+ MCP251XFD_REG_INT_MODIE |
+ MCP251XFD_REG_INT_RXIE;
+
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ val |= MCP251XFD_REG_INT_IVMIE;
+
+ return regmap_write(priv->map_reg, MCP251XFD_REG_INT, val);
+}
+
+static int mcp251xfd_chip_interrupts_disable(const struct mcp251xfd_priv *priv)
+{
+ int err;
+ u32 mask;
+
+ err = regmap_write(priv->map_reg, MCP251XFD_REG_INT, 0);
+ if (err)
+ return err;
+
+ mask = MCP251XFD_REG_ECCCON_DEDIE | MCP251XFD_REG_ECCCON_SECIE;
+ err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON,
+ mask, 0x0);
+ if (err)
+ return err;
+
+ return regmap_write(priv->map_reg, MCP251XFD_REG_CRC, 0);
+}
+
+static int mcp251xfd_chip_stop(struct mcp251xfd_priv *priv,
+ const enum can_state state)
+{
+ priv->can.state = state;
+
+ mcp251xfd_chip_interrupts_disable(priv);
+ mcp251xfd_chip_rx_int_disable(priv);
+ return mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
+}
+
+static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
+{
+ int err;
+
+ err = mcp251xfd_chip_softreset(priv);
+ if (err)
+ goto out_chip_stop;
+
+ err = mcp251xfd_chip_clock_init(priv);
+ if (err)
+ goto out_chip_stop;
+
+ err = mcp251xfd_set_bittiming(priv);
+ if (err)
+ goto out_chip_stop;
+
+ err = mcp251xfd_chip_rx_int_enable(priv);
+ if (err)
+ return err;
+
+ err = mcp251xfd_chip_ecc_init(priv);
+ if (err)
+ goto out_chip_stop;
+
+ mcp251xfd_ring_init(priv);
+
+ err = mcp251xfd_chip_fifo_init(priv);
+ if (err)
+ goto out_chip_stop;
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ err = mcp251xfd_chip_set_normal_mode(priv);
+ if (err)
+ goto out_chip_stop;
+
+ return 0;
+
+ out_chip_stop:
+ mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
+
+ return err;
+}
+
+static int mcp251xfd_set_mode(struct net_device *ndev, enum can_mode mode)
+{
+ struct mcp251xfd_priv *priv = netdev_priv(ndev);
+ int err;
+
+ switch (mode) {
+ case CAN_MODE_START:
+ err = mcp251xfd_chip_start(priv);
+ if (err)
+ return err;
+
+ err = mcp251xfd_chip_interrupts_enable(priv);
+ if (err) {
+ mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
+ return err;
+ }
+
+ netif_wake_queue(ndev);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int __mcp251xfd_get_berr_counter(const struct net_device *ndev,
+ struct can_berr_counter *bec)
+{
+ const struct mcp251xfd_priv *priv = netdev_priv(ndev);
+ u32 trec;
+ int err;
+
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec);
+ if (err)
+ return err;
+
+ if (trec & MCP251XFD_REG_TREC_TXBO)
+ bec->txerr = 256;
+ else
+ bec->txerr = FIELD_GET(MCP251XFD_REG_TREC_TEC_MASK, trec);
+ bec->rxerr = FIELD_GET(MCP251XFD_REG_TREC_REC_MASK, trec);
+
+ return 0;
+}
+
+static int mcp251xfd_get_berr_counter(const struct net_device *ndev,
+ struct can_berr_counter *bec)
+{
+ const struct mcp251xfd_priv *priv = netdev_priv(ndev);
+
+ /* Avoid waking up the controller if the interface is down */
+ if (!(ndev->flags & IFF_UP))
+ return 0;
+
+ /* The controller is powered down during Bus Off, use saved
+ * bec values.
+ */
+ if (priv->can.state == CAN_STATE_BUS_OFF) {
+ *bec = priv->bec;
+ return 0;
+ }
+
+ return __mcp251xfd_get_berr_counter(ndev, bec);
+}
+
+static int mcp251xfd_check_tef_tail(const struct mcp251xfd_priv *priv)
+{
+ u8 tef_tail_chip, tef_tail;
+ int err;
+
+ if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY))
+ return 0;
+
+ err = mcp251xfd_tef_tail_get_from_chip(priv, &tef_tail_chip);
+ if (err)
+ return err;
+
+ tef_tail = mcp251xfd_get_tef_tail(priv);
+ if (tef_tail_chip != tef_tail) {
+ netdev_err(priv->ndev,
+ "TEF tail of chip (0x%02x) and ours (0x%08x) inconsistent.\n",
+ tef_tail_chip, tef_tail);
+ return -EILSEQ;
+ }
+
+ return 0;
+}
+
+static int
+mcp251xfd_check_rx_tail(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_rx_ring *ring)
+{
+ u8 rx_tail_chip, rx_tail;
+ int err;
+
+ if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY))
+ return 0;
+
+ err = mcp251xfd_rx_tail_get_from_chip(priv, ring, &rx_tail_chip);
+ if (err)
+ return err;
+
+ rx_tail = mcp251xfd_get_rx_tail(ring);
+ if (rx_tail_chip != rx_tail) {
+ netdev_err(priv->ndev,
+ "RX tail of chip (%d) and ours (%d) inconsistent.\n",
+ rx_tail_chip, rx_tail);
+ return -EILSEQ;
+ }
+
+ return 0;
+}
+
+static int
+mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq)
+{
+ const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ u32 tef_sta;
+ int err;
+
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFSTA, &tef_sta);
+ if (err)
+ return err;
+
+ if (tef_sta & MCP251XFD_REG_TEFSTA_TEFOVIF) {
+ netdev_err(priv->ndev,
+ "Transmit Event FIFO buffer overflow.\n");
+ return -ENOBUFS;
+ }
+
+ netdev_info(priv->ndev,
+ "Transmit Event FIFO buffer %s. (seq=0x%08x, tef_tail=0x%08x, tef_head=0x%08x, tx_head=0x%08x)\n",
+ tef_sta & MCP251XFD_REG_TEFSTA_TEFFIF ?
+ "full" : tef_sta & MCP251XFD_REG_TEFSTA_TEFNEIF ?
+ "not empty" : "empty",
+ seq, priv->tef.tail, priv->tef.head, tx_ring->head);
+
+ /* The Sequence Number in the TEF doesn't match our tef_tail. */
+ return -EAGAIN;
+}
+
+static int
+mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_hw_tef_obj *hw_tef_obj)
+{
+ struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ struct net_device_stats *stats = &priv->ndev->stats;
+ u32 seq, seq_masked, tef_tail_masked;
+ int err;
+
+ seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK,
+ hw_tef_obj->flags);
+
+ /* Use the MCP2517FD mask on the MCP2518FD, too. We only
+ * compare 7 bits, this should be enough to detect
+ * net-yet-completed, i.e. old TEF objects.
+ */
+ seq_masked = seq &
+ field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
+ tef_tail_masked = priv->tef.tail &
+ field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
+ if (seq_masked != tef_tail_masked)
+ return mcp251xfd_handle_tefif_recover(priv, seq);
+
+ stats->tx_bytes +=
+ can_rx_offload_get_echo_skb(&priv->offload,
+ mcp251xfd_get_tef_tail(priv),
+ hw_tef_obj->ts);
+ stats->tx_packets++;
+
+ /* finally increment the TEF pointer */
+ err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_TEFCON,
+ GENMASK(15, 8),
+ MCP251XFD_REG_TEFCON_UINC);
+ if (err)
+ return err;
+
+ priv->tef.tail++;
+ tx_ring->tail++;
+
+ return mcp251xfd_check_tef_tail(priv);
+}
+
+static int mcp251xfd_tef_ring_update(struct mcp251xfd_priv *priv)
+{
+ const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ unsigned int new_head;
+ u8 chip_tx_tail;
+ int err;
+
+ err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail);
+ if (err)
+ return err;
+
+ /* chip_tx_tail, is the next TX-Object send by the HW.
+ * The new TEF head must be >= the old head, ...
+ */
+ new_head = round_down(priv->tef.head, tx_ring->obj_num) + chip_tx_tail;
+ if (new_head <= priv->tef.head)
+ new_head += tx_ring->obj_num;
+
+ /* ... but it cannot exceed the TX head. */
+ priv->tef.head = min(new_head, tx_ring->head);
+
+ return mcp251xfd_check_tef_tail(priv);
+}
+
+static inline int
+mcp251xfd_tef_obj_read(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_hw_tef_obj *hw_tef_obj,
+ const u8 offset, const u8 len)
+{
+ const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+
+ if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) &&
+ (offset > tx_ring->obj_num ||
+ len > tx_ring->obj_num ||
+ offset + len > tx_ring->obj_num)) {
+ netdev_err(priv->ndev,
+ "Trying to read to many TEF objects (max=%d, offset=%d, len=%d).\n",
+ tx_ring->obj_num, offset, len);
+ return -ERANGE;
+ }
+
+ return regmap_bulk_read(priv->map_rx,
+ mcp251xfd_get_tef_obj_addr(offset),
+ hw_tef_obj,
+ sizeof(*hw_tef_obj) / sizeof(u32) * len);
+}
+
+static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
+{
+ struct mcp251xfd_hw_tef_obj hw_tef_obj[MCP251XFD_TX_OBJ_NUM_MAX];
+ u8 tef_tail, len, l;
+ int err, i;
+
+ err = mcp251xfd_tef_ring_update(priv);
+ if (err)
+ return err;
+
+ tef_tail = mcp251xfd_get_tef_tail(priv);
+ len = mcp251xfd_get_tef_len(priv);
+ l = mcp251xfd_get_tef_linear_len(priv);
+ err = mcp251xfd_tef_obj_read(priv, hw_tef_obj, tef_tail, l);
+ if (err)
+ return err;
+
+ if (l < len) {
+ err = mcp251xfd_tef_obj_read(priv, &hw_tef_obj[l], 0, len - l);
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < len; i++) {
+ err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i]);
+ /* -EAGAIN means the Sequence Number in the TEF
+ * doesn't match our tef_tail. This can happen if we
+ * read the TEF objects too early. Leave loop let the
+ * interrupt handler call us again.
+ */
+ if (err == -EAGAIN)
+ goto out_netif_wake_queue;
+ if (err)
+ return err;
+ }
+
+ out_netif_wake_queue:
+ mcp251xfd_ecc_tefif_successful(priv);
+
+ if (mcp251xfd_get_tx_free(priv->tx)) {
+ /* Make sure that anybody stopping the queue after
+ * this sees the new tx_ring->tail.
+ */
+ smp_mb();
+ netif_wake_queue(priv->ndev);
+ }
+
+ return 0;
+}
+
+static int
+mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_rx_ring *ring)
+{
+ u32 new_head;
+ u8 chip_rx_head;
+ int err;
+
+ err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head);
+ if (err)
+ return err;
+
+ /* chip_rx_head, is the next RX-Object filled by the HW.
+ * The new RX head must be >= the old head.
+ */
+ new_head = round_down(ring->head, ring->obj_num) + chip_rx_head;
+ if (new_head <= ring->head)
+ new_head += ring->obj_num;
+
+ ring->head = new_head;
+
+ return mcp251xfd_check_rx_tail(priv, ring);
+}
+
+static void
+mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
+ struct sk_buff *skb)
+{
+ struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+
+ if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_IDE) {
+ u32 sid, eid;
+
+ eid = FIELD_GET(MCP251XFD_OBJ_ID_EID_MASK, hw_rx_obj->id);
+ sid = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK, hw_rx_obj->id);
+
+ cfd->can_id = CAN_EFF_FLAG |
+ FIELD_PREP(MCP251XFD_REG_FRAME_EFF_EID_MASK, eid) |
+ FIELD_PREP(MCP251XFD_REG_FRAME_EFF_SID_MASK, sid);
+ } else {
+ cfd->can_id = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK,
+ hw_rx_obj->id);
+ }
+
+ /* CANFD */
+ if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF) {
+ u8 dlc;
+
+ if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_ESI)
+ cfd->flags |= CANFD_ESI;
+
+ if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_BRS)
+ cfd->flags |= CANFD_BRS;
+
+ dlc = FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC, hw_rx_obj->flags);
+ cfd->len = can_dlc2len(get_canfd_dlc(dlc));
+ } else {
+ if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR)
+ cfd->can_id |= CAN_RTR_FLAG;
+
+ cfd->len = get_can_dlc(FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC,
+ hw_rx_obj->flags));
+ }
+
+ memcpy(cfd->data, hw_rx_obj->data, cfd->len);
+}
+
+static int
+mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
+ struct mcp251xfd_rx_ring *ring,
+ const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct sk_buff *skb;
+ struct canfd_frame *cfd;
+ int err;
+
+ if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF)
+ skb = alloc_canfd_skb(priv->ndev, &cfd);
+ else
+ skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cfd);
+
+ if (!cfd) {
+ stats->rx_dropped++;
+ return 0;
+ }
+
+ mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb);
+ err = can_rx_offload_queue_sorted(&priv->offload, skb, hw_rx_obj->ts);
+ if (err)
+ stats->rx_fifo_errors++;
+
+ ring->tail++;
+
+ /* finally increment the RX pointer */
+ return regmap_update_bits(priv->map_reg,
+ MCP251XFD_REG_FIFOCON(ring->fifo_nr),
+ GENMASK(15, 8),
+ MCP251XFD_REG_FIFOCON_UINC);
+}
+
+static inline int
+mcp251xfd_rx_obj_read(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_rx_ring *ring,
+ struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj,
+ const u8 offset, const u8 len)
+{
+ int err;
+
+ err = regmap_bulk_read(priv->map_rx,
+ mcp251xfd_get_rx_obj_addr(ring, offset),
+ hw_rx_obj,
+ len * ring->obj_size / sizeof(u32));
+
+ return err;
+}
+
+static int
+mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
+ struct mcp251xfd_rx_ring *ring)
+{
+ struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj = ring->obj;
+ u8 rx_tail, len;
+ int err, i;
+
+ err = mcp251xfd_rx_ring_update(priv, ring);
+ if (err)
+ return err;
+
+ while ((len = mcp251xfd_get_rx_linear_len(ring))) {
+ rx_tail = mcp251xfd_get_rx_tail(ring);
+
+ err = mcp251xfd_rx_obj_read(priv, ring, hw_rx_obj,
+ rx_tail, len);
+ if (err)
+ return err;
+
+ for (i = 0; i < len; i++) {
+ err = mcp251xfd_handle_rxif_one(priv, ring,
+ (void *)hw_rx_obj +
+ i * ring->obj_size);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv)
+{
+ struct mcp251xfd_rx_ring *ring;
+ int err, n;
+
+ mcp251xfd_for_each_rx_ring(priv, ring, n) {
+ err = mcp251xfd_handle_rxif_ring(priv, ring);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static inline int mcp251xfd_get_timestamp(const struct mcp251xfd_priv *priv,
+ u32 *timestamp)
+{
+ return regmap_read(priv->map_reg, MCP251XFD_REG_TBC, timestamp);
+}
+
+static struct sk_buff *
+mcp251xfd_alloc_can_err_skb(const struct mcp251xfd_priv *priv,
+ struct can_frame **cf, u32 *timestamp)
+{
+ int err;
+
+ err = mcp251xfd_get_timestamp(priv, timestamp);
+ if (err)
+ return NULL;
+
+ return alloc_can_err_skb(priv->ndev, cf);
+}
+
+static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct mcp251xfd_rx_ring *ring;
+ struct sk_buff *skb;
+ struct can_frame *cf;
+ u32 timestamp, rxovif;
+ int err, i;
+
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_RXOVIF, &rxovif);
+ if (err)
+ return err;
+
+ mcp251xfd_for_each_rx_ring(priv, ring, i) {
+ if (!(rxovif & BIT(ring->fifo_nr)))
+ continue;
+
+ /* If SERRIF is active, there was a RX MAB overflow. */
+ if (priv->regs_status.intf & MCP251XFD_REG_INT_SERRIF) {
+ netdev_info(priv->ndev,
+ "RX-%d: MAB overflow detected.\n",
+ ring->nr);
+ } else {
+ netdev_info(priv->ndev,
+ "RX-%d: FIFO overflow.\n", ring->nr);
+ }
+
+ err = regmap_update_bits(priv->map_reg,
+ MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
+ MCP251XFD_REG_FIFOSTA_RXOVIF,
+ 0x0);
+ if (err)
+ return err;
+ }
+
+ skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &timestamp);
+ if (!skb)
+ return 0;
+
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+
+ err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ if (err)
+ stats->rx_fifo_errors++;
+
+ return 0;
+}
+
+static int mcp251xfd_handle_txatif(struct mcp251xfd_priv *priv)
+{
+ netdev_info(priv->ndev, "%s\n", __func__);
+
+ return 0;
+}
+
+static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ u32 bdiag1, timestamp;
+ struct sk_buff *skb;
+ struct can_frame *cf = NULL;
+ int err;
+
+ err = mcp251xfd_get_timestamp(priv, &timestamp);
+ if (err)
+ return err;
+
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_BDIAG1, &bdiag1);
+ if (err)
+ return err;
+
+ /* Write 0s to clear error bits, don't write 1s to non active
+ * bits, as they will be set.
+ */
+ err = regmap_write(priv->map_reg, MCP251XFD_REG_BDIAG1, 0x0);
+ if (err)
+ return err;
+
+ priv->can.can_stats.bus_error++;
+
+ skb = alloc_can_err_skb(priv->ndev, &cf);
+ if (cf)
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+
+ /* Controller misconfiguration */
+ if (WARN_ON(bdiag1 & MCP251XFD_REG_BDIAG1_DLCMM))
+ netdev_err(priv->ndev,
+ "recv'd DLC is larger than PLSIZE of FIFO element.");
+
+ /* RX errors */
+ if (bdiag1 & (MCP251XFD_REG_BDIAG1_DCRCERR |
+ MCP251XFD_REG_BDIAG1_NCRCERR)) {
+ netdev_dbg(priv->ndev, "CRC error\n");
+
+ stats->rx_errors++;
+ if (cf)
+ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+ }
+ if (bdiag1 & (MCP251XFD_REG_BDIAG1_DSTUFERR |
+ MCP251XFD_REG_BDIAG1_NSTUFERR)) {
+ netdev_dbg(priv->ndev, "Stuff error\n");
+
+ stats->rx_errors++;
+ if (cf)
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ }
+ if (bdiag1 & (MCP251XFD_REG_BDIAG1_DFORMERR |
+ MCP251XFD_REG_BDIAG1_NFORMERR)) {
+ netdev_dbg(priv->ndev, "Format error\n");
+
+ stats->rx_errors++;
+ if (cf)
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ }
+
+ /* TX errors */
+ if (bdiag1 & MCP251XFD_REG_BDIAG1_NACKERR) {
+ netdev_dbg(priv->ndev, "NACK error\n");
+
+ stats->tx_errors++;
+ if (cf) {
+ cf->can_id |= CAN_ERR_ACK;
+ cf->data[2] |= CAN_ERR_PROT_TX;
+ }
+ }
+ if (bdiag1 & (MCP251XFD_REG_BDIAG1_DBIT1ERR |
+ MCP251XFD_REG_BDIAG1_NBIT1ERR)) {
+ netdev_dbg(priv->ndev, "Bit1 error\n");
+
+ stats->tx_errors++;
+ if (cf)
+ cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT1;
+ }
+ if (bdiag1 & (MCP251XFD_REG_BDIAG1_DBIT0ERR |
+ MCP251XFD_REG_BDIAG1_NBIT0ERR)) {
+ netdev_dbg(priv->ndev, "Bit0 error\n");
+
+ stats->tx_errors++;
+ if (cf)
+ cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT0;
+ }
+
+ if (!cf)
+ return 0;
+
+ err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ if (err)
+ stats->rx_fifo_errors++;
+
+ return 0;
+}
+
+static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
+{
+ struct net_device_stats *stats = &priv->ndev->stats;
+ struct sk_buff *skb;
+ struct can_frame *cf = NULL;
+ enum can_state new_state, rx_state, tx_state;
+ u32 trec, timestamp;
+ int err;
+
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec);
+ if (err)
+ return err;
+
+ if (trec & MCP251XFD_REG_TREC_TXBO)
+ tx_state = CAN_STATE_BUS_OFF;
+ else if (trec & MCP251XFD_REG_TREC_TXBP)
+ tx_state = CAN_STATE_ERROR_PASSIVE;
+ else if (trec & MCP251XFD_REG_TREC_TXWARN)
+ tx_state = CAN_STATE_ERROR_WARNING;
+ else
+ tx_state = CAN_STATE_ERROR_ACTIVE;
+
+ if (trec & MCP251XFD_REG_TREC_RXBP)
+ rx_state = CAN_STATE_ERROR_PASSIVE;
+ else if (trec & MCP251XFD_REG_TREC_RXWARN)
+ rx_state = CAN_STATE_ERROR_WARNING;
+ else
+ rx_state = CAN_STATE_ERROR_ACTIVE;
+
+ new_state = max(tx_state, rx_state);
+ if (new_state == priv->can.state)
+ return 0;
+
+ /* The skb allocation might fail, but can_change_state()
+ * handles cf == NULL.
+ */
+ skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &timestamp);
+ can_change_state(priv->ndev, cf, tx_state, rx_state);
+
+ if (new_state == CAN_STATE_BUS_OFF) {
+ /* As we're going to switch off the chip now, let's
+ * save the error counters and return them to
+ * userspace, if do_get_berr_counter() is called while
+ * the chip is in Bus Off.
+ */
+ err = __mcp251xfd_get_berr_counter(priv->ndev, &priv->bec);
+ if (err)
+ return err;
+
+ mcp251xfd_chip_stop(priv, CAN_STATE_BUS_OFF);
+ can_bus_off(priv->ndev);
+ }
+
+ if (!skb)
+ return 0;
+
+ if (new_state != CAN_STATE_BUS_OFF) {
+ struct can_berr_counter bec;
+
+ err = mcp251xfd_get_berr_counter(priv->ndev, &bec);
+ if (err)
+ return err;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
+
+ err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
+ if (err)
+ stats->rx_fifo_errors++;
+
+ return 0;
+}
+
+static int
+mcp251xfd_handle_modif(const struct mcp251xfd_priv *priv, bool *set_normal_mode)
+{
+ const u8 mode_reference = mcp251xfd_get_normal_mode(priv);
+ u8 mode;
+ int err;
+
+ err = mcp251xfd_chip_get_mode(priv, &mode);
+ if (err)
+ return err;
+
+ if (mode == mode_reference) {
+ netdev_dbg(priv->ndev,
+ "Controller changed into %s Mode (%u).\n",
+ mcp251xfd_get_mode_str(mode), mode);
+ return 0;
+ }
+
+ /* According to MCP2517FD errata DS80000792B 1., during a TX
+ * MAB underflow, the controller will transition to Restricted
+ * Operation Mode or Listen Only Mode (depending on SERR2LOM).
+ *
+ * However this is not always the case. If SERR2LOM is
+ * configured for Restricted Operation Mode (SERR2LOM not set)
+ * the MCP2517FD will sometimes transition to Listen Only Mode
+ * first. When polling this bit we see that it will transition
+ * to Restricted Operation Mode shortly after.
+ */
+ if ((priv->devtype_data.quirks & MCP251XFD_QUIRK_MAB_NO_WARN) &&
+ (mode == MCP251XFD_REG_CON_MODE_RESTRICTED ||
+ mode == MCP251XFD_REG_CON_MODE_LISTENONLY))
+ netdev_dbg(priv->ndev,
+ "Controller changed into %s Mode (%u).\n",
+ mcp251xfd_get_mode_str(mode), mode);
+ else
+ netdev_err(priv->ndev,
+ "Controller changed into %s Mode (%u).\n",
+ mcp251xfd_get_mode_str(mode), mode);
+
+ /* After the application requests Normal mode, the Controller
+ * will automatically attempt to retransmit the message that
+ * caused the TX MAB underflow.
+ *
+ * However, if there is an ECC error in the TX-RAM, we first
+ * have to reload the tx-object before requesting Normal
+ * mode. This is done later in mcp251xfd_handle_eccif().
+ */
+ if (priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF) {
+ *set_normal_mode = true;
+ return 0;
+ }
+
+ return mcp251xfd_chip_set_normal_mode_nowait(priv);
+}
+
+static int mcp251xfd_handle_serrif(struct mcp251xfd_priv *priv)
+{
+ struct mcp251xfd_ecc *ecc = &priv->ecc;
+ struct net_device_stats *stats = &priv->ndev->stats;
+ bool handled = false;
+
+ /* TX MAB underflow
+ *
+ * According to MCP2517FD Errata DS80000792B 1. a TX MAB
+ * underflow is indicated by SERRIF and MODIF.
+ *
+ * In addition to the effects mentioned in the Errata, there
+ * are Bus Errors due to the aborted CAN frame, so a IVMIF
+ * will be seen as well.
+ *
+ * Sometimes there is an ECC error in the TX-RAM, which leads
+ * to a TX MAB underflow.
+ *
+ * However, probably due to a race condition, there is no
+ * associated MODIF pending.
+ *
+ * Further, there are situations, where the SERRIF is caused
+ * by an ECC error in the TX-RAM, but not even the ECCIF is
+ * set. This only seems to happen _after_ the first occurrence
+ * of a ECCIF (which is tracked in ecc->cnt).
+ *
+ * Treat all as a known system errors..
+ */
+ if ((priv->regs_status.intf & MCP251XFD_REG_INT_MODIF &&
+ priv->regs_status.intf & MCP251XFD_REG_INT_IVMIF) ||
+ priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF ||
+ ecc->cnt) {
+ const char *msg;
+
+ if (priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF ||
+ ecc->cnt)
+ msg = "TX MAB underflow due to ECC error detected.";
+ else
+ msg = "TX MAB underflow detected.";
+
+ if (priv->devtype_data.quirks & MCP251XFD_QUIRK_MAB_NO_WARN)
+ netdev_dbg(priv->ndev, "%s\n", msg);
+ else
+ netdev_info(priv->ndev, "%s\n", msg);
+
+ stats->tx_aborted_errors++;
+ stats->tx_errors++;
+ handled = true;
+ }
+
+ /* RX MAB overflow
+ *
+ * According to MCP2517FD Errata DS80000792B 1. a RX MAB
+ * overflow is indicated by SERRIF.
+ *
+ * In addition to the effects mentioned in the Errata, (most
+ * of the times) a RXOVIF is raised, if the FIFO that is being
+ * received into has the RXOVIE activated (and we have enabled
+ * RXOVIE on all FIFOs).
+ *
+ * Sometimes there is no RXOVIF just a RXIF is pending.
+ *
+ * Treat all as a known system errors..
+ */
+ if (priv->regs_status.intf & MCP251XFD_REG_INT_RXOVIF ||
+ priv->regs_status.intf & MCP251XFD_REG_INT_RXIF) {
+ stats->rx_dropped++;
+ handled = true;
+ }
+
+ if (!handled)
+ netdev_err(priv->ndev,
+ "Unhandled System Error Interrupt (intf=0x%08x)!\n",
+ priv->regs_status.intf);
+
+ return 0;
+}
+
+static int
+mcp251xfd_handle_eccif_recover(struct mcp251xfd_priv *priv, u8 nr)
+{
+ struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ struct mcp251xfd_ecc *ecc = &priv->ecc;
+ struct mcp251xfd_tx_obj *tx_obj;
+ u8 chip_tx_tail, tx_tail, offset;
+ u16 addr;
+ int err;
+
+ addr = FIELD_GET(MCP251XFD_REG_ECCSTAT_ERRADDR_MASK, ecc->ecc_stat);
+
+ err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail);
+ if (err)
+ return err;
+
+ tx_tail = mcp251xfd_get_tx_tail(tx_ring);
+ offset = (nr - chip_tx_tail) & (tx_ring->obj_num - 1);
+
+ /* Bail out if one of the following is met:
+ * - tx_tail information is inconsistent
+ * - for mcp2517fd: offset not 0
+ * - for mcp2518fd: offset not 0 or 1
+ */
+ if (chip_tx_tail != tx_tail ||
+ !(offset == 0 || (offset == 1 && mcp251xfd_is_2518(priv)))) {
+ netdev_err(priv->ndev,
+ "ECC Error information inconsistent (addr=0x%04x, nr=%d, tx_tail=0x%08x(%d), chip_tx_tail=%d, offset=%d).\n",
+ addr, nr, tx_ring->tail, tx_tail, chip_tx_tail,
+ offset);
+ return -EINVAL;
+ }
+
+ netdev_info(priv->ndev,
+ "Recovering %s ECC Error at address 0x%04x (in TX-RAM, tx_obj=%d, tx_tail=0x%08x(%d), offset=%d).\n",
+ ecc->ecc_stat & MCP251XFD_REG_ECCSTAT_SECIF ?
+ "Single" : "Double",
+ addr, nr, tx_ring->tail, tx_tail, offset);
+
+ /* reload tx_obj into controller RAM ... */
+ tx_obj = &tx_ring->obj[nr];
+ err = spi_sync_transfer(priv->spi, tx_obj->xfer, 1);
+ if (err)
+ return err;
+
+ /* ... and trigger retransmit */
+ return mcp251xfd_chip_set_normal_mode(priv);
+}
+
+static int
+mcp251xfd_handle_eccif(struct mcp251xfd_priv *priv, bool set_normal_mode)
+{
+ struct mcp251xfd_ecc *ecc = &priv->ecc;
+ const char *msg;
+ bool in_tx_ram;
+ u32 ecc_stat;
+ u16 addr;
+ u8 nr;
+ int err;
+
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_ECCSTAT, &ecc_stat);
+ if (err)
+ return err;
+
+ err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCSTAT,
+ MCP251XFD_REG_ECCSTAT_IF_MASK, ~ecc_stat);
+ if (err)
+ return err;
+
+ /* Check if ECC error occurred in TX-RAM */
+ addr = FIELD_GET(MCP251XFD_REG_ECCSTAT_ERRADDR_MASK, ecc_stat);
+ err = mcp251xfd_get_tx_nr_by_addr(priv->tx, &nr, addr);
+ if (!err)
+ in_tx_ram = true;
+ else if (err == -ENOENT)
+ in_tx_ram = false;
+ else
+ return err;
+
+ /* Errata Reference:
+ * mcp2517fd: DS80000789B, mcp2518fd: DS80000792C 2.
+ *
+ * ECC single error correction does not work in all cases:
+ *
+ * Fix/Work Around:
+ * Enable single error correction and double error detection
+ * interrupts by setting SECIE and DEDIE. Handle SECIF as a
+ * detection interrupt and do not rely on the error
+ * correction. Instead, handle both interrupts as a
+ * notification that the RAM word at ERRADDR was corrupted.
+ */
+ if (ecc_stat & MCP251XFD_REG_ECCSTAT_SECIF)
+ msg = "Single ECC Error detected at address";
+ else if (ecc_stat & MCP251XFD_REG_ECCSTAT_DEDIF)
+ msg = "Double ECC Error detected at address";
+ else
+ return -EINVAL;
+
+ if (!in_tx_ram) {
+ ecc->ecc_stat = 0;
+
+ netdev_notice(priv->ndev, "%s 0x%04x.\n", msg, addr);
+ } else {
+ /* Re-occurring error? */
+ if (ecc->ecc_stat == ecc_stat) {
+ ecc->cnt++;
+ } else {
+ ecc->ecc_stat = ecc_stat;
+ ecc->cnt = 1;
+ }
+
+ netdev_info(priv->ndev,
+ "%s 0x%04x (in TX-RAM, tx_obj=%d), occurred %d time%s.\n",
+ msg, addr, nr, ecc->cnt, ecc->cnt > 1 ? "s" : "");
+
+ if (ecc->cnt >= MCP251XFD_ECC_CNT_MAX)
+ return mcp251xfd_handle_eccif_recover(priv, nr);
+ }
+
+ if (set_normal_mode)
+ return mcp251xfd_chip_set_normal_mode_nowait(priv);
+
+ return 0;
+}
+
+static int mcp251xfd_handle_spicrcif(struct mcp251xfd_priv *priv)
+{
+ int err;
+ u32 crc;
+
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_CRC, &crc);
+ if (err)
+ return err;
+
+ err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_CRC,
+ MCP251XFD_REG_CRC_IF_MASK,
+ ~crc);
+ if (err)
+ return err;
+
+ if (crc & MCP251XFD_REG_CRC_FERRIF)
+ netdev_notice(priv->ndev, "CRC write command format error.\n");
+ else if (crc & MCP251XFD_REG_CRC_CRCERRIF)
+ netdev_notice(priv->ndev,
+ "CRC write error detected. CRC=0x%04lx.\n",
+ FIELD_GET(MCP251XFD_REG_CRC_MASK, crc));
+
+ return 0;
+}
+
+#define mcp251xfd_handle(priv, irq, ...) \
+({ \
+ struct mcp251xfd_priv *_priv = (priv); \
+ int err; \
+\
+ err = mcp251xfd_handle_##irq(_priv, ## __VA_ARGS__); \
+ if (err) \
+ netdev_err(_priv->ndev, \
+ "IRQ handler mcp251xfd_handle_%s() returned %d.\n", \
+ __stringify(irq), err); \
+ err; \
+})
+
+static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
+{
+ struct mcp251xfd_priv *priv = dev_id;
+ irqreturn_t handled = IRQ_NONE;
+ int err;
+
+ if (priv->rx_int)
+ do {
+ int rx_pending;
+
+ rx_pending = gpiod_get_value_cansleep(priv->rx_int);
+ if (!rx_pending)
+ break;
+
+ err = mcp251xfd_handle(priv, rxif);
+ if (err)
+ goto out_fail;
+
+ handled = IRQ_HANDLED;
+ } while (1);
+
+ do {
+ u32 intf_pending, intf_pending_clearable;
+ bool set_normal_mode = false;
+
+ err = regmap_bulk_read(priv->map_reg, MCP251XFD_REG_INT,
+ &priv->regs_status,
+ sizeof(priv->regs_status) /
+ sizeof(u32));
+ if (err)
+ goto out_fail;
+
+ intf_pending = FIELD_GET(MCP251XFD_REG_INT_IF_MASK,
+ priv->regs_status.intf) &
+ FIELD_GET(MCP251XFD_REG_INT_IE_MASK,
+ priv->regs_status.intf);
+
+ if (!(intf_pending))
+ return handled;
+
+ /* Some interrupts must be ACKed in the
+ * MCP251XFD_REG_INT register.
+ * - First ACK then handle, to avoid lost-IRQ race
+ * condition on fast re-occurring interrupts.
+ * - Write "0" to clear active IRQs, "1" to all other,
+ * to avoid r/m/w race condition on the
+ * MCP251XFD_REG_INT register.
+ */
+ intf_pending_clearable = intf_pending &
+ MCP251XFD_REG_INT_IF_CLEARABLE_MASK;
+ if (intf_pending_clearable) {
+ err = regmap_update_bits(priv->map_reg,
+ MCP251XFD_REG_INT,
+ MCP251XFD_REG_INT_IF_MASK,
+ ~intf_pending_clearable);
+ if (err)
+ goto out_fail;
+ }
+
+ if (intf_pending & MCP251XFD_REG_INT_MODIF) {
+ err = mcp251xfd_handle(priv, modif, &set_normal_mode);
+ if (err)
+ goto out_fail;
+ }
+
+ if (intf_pending & MCP251XFD_REG_INT_RXIF) {
+ err = mcp251xfd_handle(priv, rxif);
+ if (err)
+ goto out_fail;
+ }
+
+ if (intf_pending & MCP251XFD_REG_INT_TEFIF) {
+ err = mcp251xfd_handle(priv, tefif);
+ if (err)
+ goto out_fail;
+ }
+
+ if (intf_pending & MCP251XFD_REG_INT_RXOVIF) {
+ err = mcp251xfd_handle(priv, rxovif);
+ if (err)
+ goto out_fail;
+ }
+
+ if (intf_pending & MCP251XFD_REG_INT_TXATIF) {
+ err = mcp251xfd_handle(priv, txatif);
+ if (err)
+ goto out_fail;
+ }
+
+ if (intf_pending & MCP251XFD_REG_INT_IVMIF) {
+ err = mcp251xfd_handle(priv, ivmif);
+ if (err)
+ goto out_fail;
+ }
+
+ if (intf_pending & MCP251XFD_REG_INT_SERRIF) {
+ err = mcp251xfd_handle(priv, serrif);
+ if (err)
+ goto out_fail;
+ }
+
+ if (intf_pending & MCP251XFD_REG_INT_ECCIF) {
+ err = mcp251xfd_handle(priv, eccif, set_normal_mode);
+ if (err)
+ goto out_fail;
+ }
+
+ if (intf_pending & MCP251XFD_REG_INT_SPICRCIF) {
+ err = mcp251xfd_handle(priv, spicrcif);
+ if (err)
+ goto out_fail;
+ }
+
+ /* On the MCP2527FD and MCP2518FD, we don't get a
+ * CERRIF IRQ on the transition TX ERROR_WARNING -> TX
+ * ERROR_ACTIVE.
+ */
+ if (intf_pending & MCP251XFD_REG_INT_CERRIF ||
+ priv->can.state > CAN_STATE_ERROR_ACTIVE) {
+ err = mcp251xfd_handle(priv, cerrif);
+ if (err)
+ goto out_fail;
+
+ /* In Bus Off we completely shut down the
+ * controller. Every subsequent register read
+ * will read bogus data, and if
+ * MCP251XFD_QUIRK_CRC_REG is enabled the CRC
+ * check will fail, too. So leave IRQ handler
+ * directly.
+ */
+ if (priv->can.state == CAN_STATE_BUS_OFF)
+ return IRQ_HANDLED;
+ }
+
+ handled = IRQ_HANDLED;
+ } while (1);
+
+ out_fail:
+ netdev_err(priv->ndev, "IRQ handler returned %d (intf=0x%08x).\n",
+ err, priv->regs_status.intf);
+ mcp251xfd_chip_interrupts_disable(priv);
+
+ return handled;
+}
+
+static inline struct
+mcp251xfd_tx_obj *mcp251xfd_get_tx_obj_next(struct mcp251xfd_tx_ring *tx_ring)
+{
+ u8 tx_head;
+
+ tx_head = mcp251xfd_get_tx_head(tx_ring);
+
+ return &tx_ring->obj[tx_head];
+}
+
+static void
+mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_tx_obj *tx_obj,
+ const struct sk_buff *skb,
+ unsigned int seq)
+{
+ const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
+ struct mcp251xfd_hw_tx_obj_raw *hw_tx_obj;
+ union mcp251xfd_tx_obj_load_buf *load_buf;
+ u8 dlc;
+ u32 id, flags;
+ int offset, len;
+
+ if (cfd->can_id & CAN_EFF_FLAG) {
+ u32 sid, eid;
+
+ sid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_SID_MASK, cfd->can_id);
+ eid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_EID_MASK, cfd->can_id);
+
+ id = FIELD_PREP(MCP251XFD_OBJ_ID_EID_MASK, eid) |
+ FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, sid);
+
+ flags = MCP251XFD_OBJ_FLAGS_IDE;
+ } else {
+ id = FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, cfd->can_id);
+ flags = 0;
+ }
+
+ /* Use the MCP2518FD mask even on the MCP2517FD. It doesn't
+ * harm, only the lower 7 bits will be transferred into the
+ * TEF object.
+ */
+ dlc = can_len2dlc(cfd->len);
+ flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, seq) |
+ FIELD_PREP(MCP251XFD_OBJ_FLAGS_DLC, dlc);
+
+ if (cfd->can_id & CAN_RTR_FLAG)
+ flags |= MCP251XFD_OBJ_FLAGS_RTR;
+
+ /* CANFD */
+ if (can_is_canfd_skb(skb)) {
+ if (cfd->flags & CANFD_ESI)
+ flags |= MCP251XFD_OBJ_FLAGS_ESI;
+
+ flags |= MCP251XFD_OBJ_FLAGS_FDF;
+
+ if (cfd->flags & CANFD_BRS)
+ flags |= MCP251XFD_OBJ_FLAGS_BRS;
+ }
+
+ load_buf = &tx_obj->buf;
+ if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX)
+ hw_tx_obj = &load_buf->crc.hw_tx_obj;
+ else
+ hw_tx_obj = &load_buf->nocrc.hw_tx_obj;
+
+ put_unaligned_le32(id, &hw_tx_obj->id);
+ put_unaligned_le32(flags, &hw_tx_obj->flags);
+
+ /* Clear data at end of CAN frame */
+ offset = round_down(cfd->len, sizeof(u32));
+ len = round_up(can_dlc2len(dlc), sizeof(u32)) - offset;
+ if (MCP251XFD_SANITIZE_CAN && len)
+ memset(hw_tx_obj->data + offset, 0x0, len);
+ memcpy(hw_tx_obj->data, cfd->data, cfd->len);
+
+ /* Number of bytes to be written into the RAM of the controller */
+ len = sizeof(hw_tx_obj->id) + sizeof(hw_tx_obj->flags);
+ if (MCP251XFD_SANITIZE_CAN)
+ len += round_up(can_dlc2len(dlc), sizeof(u32));
+ else
+ len += round_up(cfd->len, sizeof(u32));
+
+ if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX) {
+ u16 crc;
+
+ mcp251xfd_spi_cmd_crc_set_len_in_ram(&load_buf->crc.cmd,
+ len);
+ /* CRC */
+ len += sizeof(load_buf->crc.cmd);
+ crc = mcp251xfd_crc16_compute(&load_buf->crc, len);
+ put_unaligned_be16(crc, (void *)load_buf + len);
+
+ /* Total length */
+ len += sizeof(load_buf->crc.crc);
+ } else {
+ len += sizeof(load_buf->nocrc.cmd);
+ }
+
+ tx_obj->xfer[0].len = len;
+}
+
+static int mcp251xfd_tx_obj_write(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_tx_obj *tx_obj)
+{
+ return spi_async(priv->spi, &tx_obj->msg);
+}
+
+static bool mcp251xfd_tx_busy(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_tx_ring *tx_ring)
+{
+ if (mcp251xfd_get_tx_free(tx_ring) > 0)
+ return false;
+
+ netif_stop_queue(priv->ndev);
+
+ /* Memory barrier before checking tx_free (head and tail) */
+ smp_mb();
+
+ if (mcp251xfd_get_tx_free(tx_ring) == 0) {
+ netdev_dbg(priv->ndev,
+ "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n",
+ tx_ring->head, tx_ring->tail,
+ tx_ring->head - tx_ring->tail);
+
+ return true;
+ }
+
+ netif_start_queue(priv->ndev);
+
+ return false;
+}
+
+static netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct mcp251xfd_priv *priv = netdev_priv(ndev);
+ struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ struct mcp251xfd_tx_obj *tx_obj;
+ u8 tx_head;
+ int err;
+
+ if (can_dropped_invalid_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
+ if (mcp251xfd_tx_busy(priv, tx_ring))
+ return NETDEV_TX_BUSY;
+
+ tx_obj = mcp251xfd_get_tx_obj_next(tx_ring);
+ mcp251xfd_tx_obj_from_skb(priv, tx_obj, skb, tx_ring->head);
+
+ /* Stop queue if we occupy the complete TX FIFO */
+ tx_head = mcp251xfd_get_tx_head(tx_ring);
+ tx_ring->head++;
+ if (tx_ring->head - tx_ring->tail >= tx_ring->obj_num)
+ netif_stop_queue(ndev);
+
+ can_put_echo_skb(skb, ndev, tx_head);
+
+ err = mcp251xfd_tx_obj_write(priv, tx_obj);
+ if (err)
+ goto out_err;
+
+ return NETDEV_TX_OK;
+
+ out_err:
+ netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
+
+ return NETDEV_TX_OK;
+}
+
+static int mcp251xfd_open(struct net_device *ndev)
+{
+ struct mcp251xfd_priv *priv = netdev_priv(ndev);
+ const struct spi_device *spi = priv->spi;
+ int err;
+
+ err = pm_runtime_get_sync(ndev->dev.parent);
+ if (err < 0) {
+ pm_runtime_put_noidle(ndev->dev.parent);
+ return err;
+ }
+
+ err = open_candev(ndev);
+ if (err)
+ goto out_pm_runtime_put;
+
+ err = mcp251xfd_ring_alloc(priv);
+ if (err)
+ goto out_close_candev;
+
+ err = mcp251xfd_transceiver_enable(priv);
+ if (err)
+ goto out_mcp251xfd_ring_free;
+
+ err = mcp251xfd_chip_start(priv);
+ if (err)
+ goto out_transceiver_disable;
+
+ can_rx_offload_enable(&priv->offload);
+
+ err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq,
+ IRQF_ONESHOT, dev_name(&spi->dev),
+ priv);
+ if (err)
+ goto out_can_rx_offload_disable;
+
+ err = mcp251xfd_chip_interrupts_enable(priv);
+ if (err)
+ goto out_free_irq;
+
+ netif_start_queue(ndev);
+
+ return 0;
+
+ out_free_irq:
+ free_irq(spi->irq, priv);
+ out_can_rx_offload_disable:
+ can_rx_offload_disable(&priv->offload);
+ out_transceiver_disable:
+ mcp251xfd_transceiver_disable(priv);
+ out_mcp251xfd_ring_free:
+ mcp251xfd_ring_free(priv);
+ out_close_candev:
+ close_candev(ndev);
+ out_pm_runtime_put:
+ mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
+ pm_runtime_put(ndev->dev.parent);
+
+ return err;
+}
+
+static int mcp251xfd_stop(struct net_device *ndev)
+{
+ struct mcp251xfd_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ mcp251xfd_chip_interrupts_disable(priv);
+ free_irq(ndev->irq, priv);
+ can_rx_offload_disable(&priv->offload);
+ mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
+ mcp251xfd_transceiver_disable(priv);
+ mcp251xfd_ring_free(priv);
+ close_candev(ndev);
+
+ pm_runtime_put(ndev->dev.parent);
+
+ return 0;
+}
+
+static const struct net_device_ops mcp251xfd_netdev_ops = {
+ .ndo_open = mcp251xfd_open,
+ .ndo_stop = mcp251xfd_stop,
+ .ndo_start_xmit = mcp251xfd_start_xmit,
+ .ndo_change_mtu = can_change_mtu,
+};
+
+static void
+mcp251xfd_register_quirks(struct mcp251xfd_priv *priv)
+{
+ const struct spi_device *spi = priv->spi;
+ const struct spi_controller *ctlr = spi->controller;
+
+ if (ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX)
+ priv->devtype_data.quirks |= MCP251XFD_QUIRK_HALF_DUPLEX;
+}
+
+static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv)
+{
+ const struct net_device *ndev = priv->ndev;
+ const struct mcp251xfd_devtype_data *devtype_data;
+ u32 osc;
+ int err;
+
+ /* The OSC_LPMEN is only supported on MCP2518FD, so use it to
+ * autodetect the model.
+ */
+ err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_OSC,
+ MCP251XFD_REG_OSC_LPMEN,
+ MCP251XFD_REG_OSC_LPMEN);
+ if (err)
+ return err;
+
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc);
+ if (err)
+ return err;
+
+ if (osc & MCP251XFD_REG_OSC_LPMEN)
+ devtype_data = &mcp251xfd_devtype_data_mcp2518fd;
+ else
+ devtype_data = &mcp251xfd_devtype_data_mcp2517fd;
+
+ if (!mcp251xfd_is_251X(priv) &&
+ priv->devtype_data.model != devtype_data->model) {
+ netdev_info(ndev,
+ "Detected %s, but firmware specifies a %s. Fixing up.",
+ __mcp251xfd_get_model_str(devtype_data->model),
+ mcp251xfd_get_model_str(priv));
+ }
+ priv->devtype_data = *devtype_data;
+
+ /* We need to preserve the Half Duplex Quirk. */
+ mcp251xfd_register_quirks(priv);
+
+ /* Re-init regmap with quirks of detected model. */
+ return mcp251xfd_regmap_init(priv);
+}
+
+static int mcp251xfd_register_check_rx_int(struct mcp251xfd_priv *priv)
+{
+ int err, rx_pending;
+
+ if (!priv->rx_int)
+ return 0;
+
+ err = mcp251xfd_chip_rx_int_enable(priv);
+ if (err)
+ return err;
+
+ /* Check if RX_INT is properly working. The RX_INT should not
+ * be active after a softreset.
+ */
+ rx_pending = gpiod_get_value_cansleep(priv->rx_int);
+
+ err = mcp251xfd_chip_rx_int_disable(priv);
+ if (err)
+ return err;
+
+ if (!rx_pending)
+ return 0;
+
+ netdev_info(priv->ndev,
+ "RX_INT active after softreset, disabling RX_INT support.");
+ devm_gpiod_put(&priv->spi->dev, priv->rx_int);
+ priv->rx_int = NULL;
+
+ return 0;
+}
+
+static int
+mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv,
+ u32 *dev_id, u32 *effective_speed_hz)
+{
+ struct mcp251xfd_map_buf_nocrc *buf_rx;
+ struct mcp251xfd_map_buf_nocrc *buf_tx;
+ struct spi_transfer xfer[2] = { };
+ int err;
+
+ buf_rx = kzalloc(sizeof(*buf_rx), GFP_KERNEL);
+ if (!buf_rx)
+ return -ENOMEM;
+
+ buf_tx = kzalloc(sizeof(*buf_tx), GFP_KERNEL);
+ if (!buf_tx) {
+ err = -ENOMEM;
+ goto out_kfree_buf_rx;
+ }
+
+ xfer[0].tx_buf = buf_tx;
+ xfer[0].len = sizeof(buf_tx->cmd);
+ xfer[1].rx_buf = buf_rx->data;
+ xfer[1].len = sizeof(dev_id);
+
+ mcp251xfd_spi_cmd_read_nocrc(&buf_tx->cmd, MCP251XFD_REG_DEVID);
+ err = spi_sync_transfer(priv->spi, xfer, ARRAY_SIZE(xfer));
+ if (err)
+ goto out_kfree_buf_tx;
+
+ *dev_id = be32_to_cpup((__be32 *)buf_rx->data);
+ *effective_speed_hz = xfer->effective_speed_hz;
+
+ out_kfree_buf_tx:
+ kfree(buf_tx);
+ out_kfree_buf_rx:
+ kfree(buf_rx);
+
+ return 0;
+}
+
+#define MCP251XFD_QUIRK_ACTIVE(quirk) \
+ (priv->devtype_data.quirks & MCP251XFD_QUIRK_##quirk ? '+' : '-')
+
+static int
+mcp251xfd_register_done(const struct mcp251xfd_priv *priv)
+{
+ u32 dev_id, effective_speed_hz;
+ int err;
+
+ err = mcp251xfd_register_get_dev_id(priv, &dev_id,
+ &effective_speed_hz);
+ if (err)
+ return err;
+
+ netdev_info(priv->ndev,
+ "%s rev%lu.%lu (%cRX_INT %cMAB_NO_WARN %cCRC_REG %cCRC_RX %cCRC_TX %cECC %cHD c:%u.%02uMHz m:%u.%02uMHz r:%u.%02uMHz e:%u.%02uMHz) successfully initialized.\n",
+ mcp251xfd_get_model_str(priv),
+ FIELD_GET(MCP251XFD_REG_DEVID_ID_MASK, dev_id),
+ FIELD_GET(MCP251XFD_REG_DEVID_REV_MASK, dev_id),
+ priv->rx_int ? '+' : '-',
+ MCP251XFD_QUIRK_ACTIVE(MAB_NO_WARN),
+ MCP251XFD_QUIRK_ACTIVE(CRC_REG),
+ MCP251XFD_QUIRK_ACTIVE(CRC_RX),
+ MCP251XFD_QUIRK_ACTIVE(CRC_TX),
+ MCP251XFD_QUIRK_ACTIVE(ECC),
+ MCP251XFD_QUIRK_ACTIVE(HALF_DUPLEX),
+ priv->can.clock.freq / 1000000,
+ priv->can.clock.freq % 1000000 / 1000 / 10,
+ priv->spi_max_speed_hz_orig / 1000000,
+ priv->spi_max_speed_hz_orig % 1000000 / 1000 / 10,
+ priv->spi->max_speed_hz / 1000000,
+ priv->spi->max_speed_hz % 1000000 / 1000 / 10,
+ effective_speed_hz / 1000000,
+ effective_speed_hz % 1000000 / 1000 / 10);
+
+ return 0;
+}
+
+static int mcp251xfd_register(struct mcp251xfd_priv *priv)
+{
+ struct net_device *ndev = priv->ndev;
+ int err;
+
+ err = mcp251xfd_clks_and_vdd_enable(priv);
+ if (err)
+ return err;
+
+ pm_runtime_get_noresume(ndev->dev.parent);
+ err = pm_runtime_set_active(ndev->dev.parent);
+ if (err)
+ goto out_runtime_put_noidle;
+ pm_runtime_enable(ndev->dev.parent);
+
+ mcp251xfd_register_quirks(priv);
+
+ err = mcp251xfd_chip_softreset(priv);
+ if (err == -ENODEV)
+ goto out_runtime_disable;
+ if (err)
+ goto out_chip_set_mode_sleep;
+
+ err = mcp251xfd_register_chip_detect(priv);
+ if (err)
+ goto out_chip_set_mode_sleep;
+
+ err = mcp251xfd_register_check_rx_int(priv);
+ if (err)
+ goto out_chip_set_mode_sleep;
+
+ err = register_candev(ndev);
+ if (err)
+ goto out_chip_set_mode_sleep;
+
+ err = mcp251xfd_register_done(priv);
+ if (err)
+ goto out_unregister_candev;
+
+ /* Put controller into sleep mode and let pm_runtime_put()
+ * disable the clocks and vdd. If CONFIG_PM is not enabled,
+ * the clocks and vdd will stay powered.
+ */
+ err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
+ if (err)
+ goto out_unregister_candev;
+
+ pm_runtime_put(ndev->dev.parent);
+
+ return 0;
+
+ out_unregister_candev:
+ unregister_candev(ndev);
+ out_chip_set_mode_sleep:
+ mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP);
+ out_runtime_disable:
+ pm_runtime_disable(ndev->dev.parent);
+ out_runtime_put_noidle:
+ pm_runtime_put_noidle(ndev->dev.parent);
+ mcp251xfd_clks_and_vdd_disable(priv);
+
+ return err;
+}
+
+static inline void mcp251xfd_unregister(struct mcp251xfd_priv *priv)
+{
+ struct net_device *ndev = priv->ndev;
+
+ unregister_candev(ndev);
+
+ pm_runtime_get_sync(ndev->dev.parent);
+ pm_runtime_put_noidle(ndev->dev.parent);
+ mcp251xfd_clks_and_vdd_disable(priv);
+ pm_runtime_disable(ndev->dev.parent);
+}
+
+static const struct of_device_id mcp251xfd_of_match[] = {
+ {
+ .compatible = "microchip,mcp2517fd",
+ .data = &mcp251xfd_devtype_data_mcp2517fd,
+ }, {
+ .compatible = "microchip,mcp2518fd",
+ .data = &mcp251xfd_devtype_data_mcp2518fd,
+ }, {
+ .compatible = "microchip,mcp251xfd",
+ .data = &mcp251xfd_devtype_data_mcp251xfd,
+ }, {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(of, mcp251xfd_of_match);
+
+static const struct spi_device_id mcp251xfd_id_table[] = {
+ {
+ .name = "mcp2517fd",
+ .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2517fd,
+ }, {
+ .name = "mcp2518fd",
+ .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2518fd,
+ }, {
+ .name = "mcp251xfd",
+ .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp251xfd,
+ }, {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(spi, mcp251xfd_id_table);
+
+static int mcp251xfd_probe(struct spi_device *spi)
+{
+ const void *match;
+ struct net_device *ndev;
+ struct mcp251xfd_priv *priv;
+ struct gpio_desc *rx_int;
+ struct regulator *reg_vdd, *reg_xceiver;
+ struct clk *clk;
+ u32 freq;
+ int err;
+
+ rx_int = devm_gpiod_get_optional(&spi->dev, "microchip,rx-int",
+ GPIOD_IN);
+ if (PTR_ERR(rx_int) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ else if (IS_ERR(rx_int))
+ return PTR_ERR(rx_int);
+
+ reg_vdd = devm_regulator_get_optional(&spi->dev, "vdd");
+ if (PTR_ERR(reg_vdd) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ else if (PTR_ERR(reg_vdd) == -ENODEV)
+ reg_vdd = NULL;
+ else if (IS_ERR(reg_vdd))
+ return PTR_ERR(reg_vdd);
+
+ reg_xceiver = devm_regulator_get_optional(&spi->dev, "xceiver");
+ if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ else if (PTR_ERR(reg_xceiver) == -ENODEV)
+ reg_xceiver = NULL;
+ else if (IS_ERR(reg_xceiver))
+ return PTR_ERR(reg_xceiver);
+
+ clk = devm_clk_get(&spi->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&spi->dev, "No Oscillator (clock) defined.\n");
+ return PTR_ERR(clk);
+ }
+ freq = clk_get_rate(clk);
+
+ /* Sanity check */
+ if (freq < MCP251XFD_SYSCLOCK_HZ_MIN ||
+ freq > MCP251XFD_SYSCLOCK_HZ_MAX) {
+ dev_err(&spi->dev,
+ "Oscillator frequency (%u Hz) is too low or high.\n",
+ freq);
+ return -ERANGE;
+ }
+
+ if (freq <= MCP251XFD_SYSCLOCK_HZ_MAX / MCP251XFD_OSC_PLL_MULTIPLIER) {
+ dev_err(&spi->dev,
+ "Oscillator frequency (%u Hz) is too low and PLL is not supported.\n",
+ freq);
+ return -ERANGE;
+ }
+
+ ndev = alloc_candev(sizeof(struct mcp251xfd_priv),
+ MCP251XFD_TX_OBJ_NUM_MAX);
+ if (!ndev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(ndev, &spi->dev);
+
+ ndev->netdev_ops = &mcp251xfd_netdev_ops;
+ ndev->irq = spi->irq;
+ ndev->flags |= IFF_ECHO;
+
+ priv = netdev_priv(ndev);
+ spi_set_drvdata(spi, priv);
+ priv->can.clock.freq = freq;
+ priv->can.do_set_mode = mcp251xfd_set_mode;
+ priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter;
+ priv->can.bittiming_const = &mcp251xfd_bittiming_const;
+ priv->can.data_bittiming_const = &mcp251xfd_data_bittiming_const;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_BERR_REPORTING | CAN_CTRLMODE_FD |
+ CAN_CTRLMODE_FD_NON_ISO;
+ priv->ndev = ndev;
+ priv->spi = spi;
+ priv->rx_int = rx_int;
+ priv->clk = clk;
+ priv->reg_vdd = reg_vdd;
+ priv->reg_xceiver = reg_xceiver;
+
+ match = device_get_match_data(&spi->dev);
+ if (match)
+ priv->devtype_data = *(struct mcp251xfd_devtype_data *)match;
+ else
+ priv->devtype_data = *(struct mcp251xfd_devtype_data *)
+ spi_get_device_id(spi)->driver_data;
+
+ /* Errata Reference:
+ * mcp2517fd: DS80000789B, mcp2518fd: DS80000792C 4.
+ *
+ * The SPI can write corrupted data to the RAM at fast SPI
+ * speeds:
+ *
+ * Simultaneous activity on the CAN bus while writing data to
+ * RAM via the SPI interface, with high SCK frequency, can
+ * lead to corrupted data being written to RAM.
+ *
+ * Fix/Work Around:
+ * Ensure that FSCK is less than or equal to 0.85 *
+ * (FSYSCLK/2).
+ *
+ * Known good and bad combinations are:
+ *
+ * MCP ext-clk SoC SPI SPI-clk max-clk parent-clk Status config
+ *
+ * 2518 20 MHz allwinner,sun8i-h3 allwinner,sun8i-h3-spi 8333333 Hz 83.33% 600000000 Hz good assigned-clocks = <&ccu CLK_SPIx>
+ * 2518 20 MHz allwinner,sun8i-h3 allwinner,sun8i-h3-spi 9375000 Hz 93.75% 600000000 Hz bad assigned-clocks = <&ccu CLK_SPIx>
+ * 2518 40 MHz allwinner,sun8i-h3 allwinner,sun8i-h3-spi 16666667 Hz 83.33% 600000000 Hz good assigned-clocks = <&ccu CLK_SPIx>
+ * 2518 40 MHz allwinner,sun8i-h3 allwinner,sun8i-h3-spi 18750000 Hz 93.75% 600000000 Hz bad assigned-clocks = <&ccu CLK_SPIx>
+ * 2517 20 MHz fsl,imx8mm fsl,imx51-ecspi 8333333 Hz 83.33% 16666667 Hz good assigned-clocks = <&clk IMX8MM_CLK_ECSPIx_ROOT>
+ * 2517 20 MHz fsl,imx8mm fsl,imx51-ecspi 9523809 Hz 95.34% 28571429 Hz bad assigned-clocks = <&clk IMX8MM_CLK_ECSPIx_ROOT>
+ * 2517 40 MHz atmel,sama5d27 atmel,at91rm9200-spi 16400000 Hz 82.00% 82000000 Hz good default
+ * 2518 40 MHz atmel,sama5d27 atmel,at91rm9200-spi 16400000 Hz 82.00% 82000000 Hz good default
+ *
+ */
+ priv->spi_max_speed_hz_orig = spi->max_speed_hz;
+ spi->max_speed_hz = min(spi->max_speed_hz, freq / 2 / 1000 * 850);
+ spi->bits_per_word = 8;
+ spi->rt = true;
+ err = spi_setup(spi);
+ if (err)
+ goto out_free_candev;
+
+ err = mcp251xfd_regmap_init(priv);
+ if (err)
+ goto out_free_candev;
+
+ err = can_rx_offload_add_manual(ndev, &priv->offload,
+ MCP251XFD_NAPI_WEIGHT);
+ if (err)
+ goto out_free_candev;
+
+ err = mcp251xfd_register(priv);
+ if (err)
+ goto out_free_candev;
+
+ return 0;
+
+ out_free_candev:
+ spi->max_speed_hz = priv->spi_max_speed_hz_orig;
+
+ free_candev(ndev);
+
+ return err;
+}
+
+static int mcp251xfd_remove(struct spi_device *spi)
+{
+ struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
+ struct net_device *ndev = priv->ndev;
+
+ can_rx_offload_del(&priv->offload);
+ mcp251xfd_unregister(priv);
+ spi->max_speed_hz = priv->spi_max_speed_hz_orig;
+ free_candev(ndev);
+
+ return 0;
+}
+
+static int __maybe_unused mcp251xfd_runtime_suspend(struct device *device)
+{
+ const struct mcp251xfd_priv *priv = dev_get_drvdata(device);
+
+ return mcp251xfd_clks_and_vdd_disable(priv);
+}
+
+static int __maybe_unused mcp251xfd_runtime_resume(struct device *device)
+{
+ const struct mcp251xfd_priv *priv = dev_get_drvdata(device);
+
+ return mcp251xfd_clks_and_vdd_enable(priv);
+}
+
+static const struct dev_pm_ops mcp251xfd_pm_ops = {
+ SET_RUNTIME_PM_OPS(mcp251xfd_runtime_suspend,
+ mcp251xfd_runtime_resume, NULL)
+};
+
+static struct spi_driver mcp251xfd_driver = {
+ .driver = {
+ .name = DEVICE_NAME,
+ .pm = &mcp251xfd_pm_ops,
+ .of_match_table = mcp251xfd_of_match,
+ },
+ .probe = mcp251xfd_probe,
+ .remove = mcp251xfd_remove,
+ .id_table = mcp251xfd_id_table,
+};
+module_spi_driver(mcp251xfd_driver);
+
+MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
+MODULE_DESCRIPTION("Microchip MCP251xFD Family CAN controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-crc16.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-crc16.c
new file mode 100644
index 000000000000..a02ca76ac239
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-crc16.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+//
+// Copyright (c) 2020 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+// Based on:
+//
+// CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface
+//
+// Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
+//
+
+#include "mcp251xfd.h"
+
+/* The standard crc16 in linux/crc16.h is unfortunately not computing
+ * the correct results (left shift vs. right shift). So here an
+ * implementation with a table generated with the help of:
+ *
+ * http://lkml.iu.edu/hypermail/linux/kernel/0508.1/1085.html
+ */
+static const u16 mcp251xfd_crc16_table[] = {
+ 0x0000, 0x8005, 0x800f, 0x000a, 0x801b, 0x001e, 0x0014, 0x8011,
+ 0x8033, 0x0036, 0x003c, 0x8039, 0x0028, 0x802d, 0x8027, 0x0022,
+ 0x8063, 0x0066, 0x006c, 0x8069, 0x0078, 0x807d, 0x8077, 0x0072,
+ 0x0050, 0x8055, 0x805f, 0x005a, 0x804b, 0x004e, 0x0044, 0x8041,
+ 0x80c3, 0x00c6, 0x00cc, 0x80c9, 0x00d8, 0x80dd, 0x80d7, 0x00d2,
+ 0x00f0, 0x80f5, 0x80ff, 0x00fa, 0x80eb, 0x00ee, 0x00e4, 0x80e1,
+ 0x00a0, 0x80a5, 0x80af, 0x00aa, 0x80bb, 0x00be, 0x00b4, 0x80b1,
+ 0x8093, 0x0096, 0x009c, 0x8099, 0x0088, 0x808d, 0x8087, 0x0082,
+ 0x8183, 0x0186, 0x018c, 0x8189, 0x0198, 0x819d, 0x8197, 0x0192,
+ 0x01b0, 0x81b5, 0x81bf, 0x01ba, 0x81ab, 0x01ae, 0x01a4, 0x81a1,
+ 0x01e0, 0x81e5, 0x81ef, 0x01ea, 0x81fb, 0x01fe, 0x01f4, 0x81f1,
+ 0x81d3, 0x01d6, 0x01dc, 0x81d9, 0x01c8, 0x81cd, 0x81c7, 0x01c2,
+ 0x0140, 0x8145, 0x814f, 0x014a, 0x815b, 0x015e, 0x0154, 0x8151,
+ 0x8173, 0x0176, 0x017c, 0x8179, 0x0168, 0x816d, 0x8167, 0x0162,
+ 0x8123, 0x0126, 0x012c, 0x8129, 0x0138, 0x813d, 0x8137, 0x0132,
+ 0x0110, 0x8115, 0x811f, 0x011a, 0x810b, 0x010e, 0x0104, 0x8101,
+ 0x8303, 0x0306, 0x030c, 0x8309, 0x0318, 0x831d, 0x8317, 0x0312,
+ 0x0330, 0x8335, 0x833f, 0x033a, 0x832b, 0x032e, 0x0324, 0x8321,
+ 0x0360, 0x8365, 0x836f, 0x036a, 0x837b, 0x037e, 0x0374, 0x8371,
+ 0x8353, 0x0356, 0x035c, 0x8359, 0x0348, 0x834d, 0x8347, 0x0342,
+ 0x03c0, 0x83c5, 0x83cf, 0x03ca, 0x83db, 0x03de, 0x03d4, 0x83d1,
+ 0x83f3, 0x03f6, 0x03fc, 0x83f9, 0x03e8, 0x83ed, 0x83e7, 0x03e2,
+ 0x83a3, 0x03a6, 0x03ac, 0x83a9, 0x03b8, 0x83bd, 0x83b7, 0x03b2,
+ 0x0390, 0x8395, 0x839f, 0x039a, 0x838b, 0x038e, 0x0384, 0x8381,
+ 0x0280, 0x8285, 0x828f, 0x028a, 0x829b, 0x029e, 0x0294, 0x8291,
+ 0x82b3, 0x02b6, 0x02bc, 0x82b9, 0x02a8, 0x82ad, 0x82a7, 0x02a2,
+ 0x82e3, 0x02e6, 0x02ec, 0x82e9, 0x02f8, 0x82fd, 0x82f7, 0x02f2,
+ 0x02d0, 0x82d5, 0x82df, 0x02da, 0x82cb, 0x02ce, 0x02c4, 0x82c1,
+ 0x8243, 0x0246, 0x024c, 0x8249, 0x0258, 0x825d, 0x8257, 0x0252,
+ 0x0270, 0x8275, 0x827f, 0x027a, 0x826b, 0x026e, 0x0264, 0x8261,
+ 0x0220, 0x8225, 0x822f, 0x022a, 0x823b, 0x023e, 0x0234, 0x8231,
+ 0x8213, 0x0216, 0x021c, 0x8219, 0x0208, 0x820d, 0x8207, 0x0202
+};
+
+static inline u16 mcp251xfd_crc16_byte(u16 crc, const u8 data)
+{
+ u8 index = (crc >> 8) ^ data;
+
+ return (crc << 8) ^ mcp251xfd_crc16_table[index];
+}
+
+static u16 mcp251xfd_crc16(u16 crc, u8 const *buffer, size_t len)
+{
+ while (len--)
+ crc = mcp251xfd_crc16_byte(crc, *buffer++);
+
+ return crc;
+}
+
+u16 mcp251xfd_crc16_compute(const void *data, size_t data_size)
+{
+ u16 crc = 0xffff;
+
+ return mcp251xfd_crc16(crc, data, data_size);
+}
+
+u16 mcp251xfd_crc16_compute2(const void *cmd, size_t cmd_size,
+ const void *data, size_t data_size)
+{
+ u16 crc;
+
+ crc = mcp251xfd_crc16_compute(cmd, cmd_size);
+ crc = mcp251xfd_crc16(crc, data, data_size);
+
+ return crc;
+}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
new file mode 100644
index 000000000000..ba25902dd78c
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
@@ -0,0 +1,556 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+//
+// Copyright (c) 2019, 2020 Pengutronix,
+// Marc Kleine-Budde <kernel@pengutronix.de>
+//
+
+#include "mcp251xfd.h"
+
+#include <asm/unaligned.h>
+
+static const struct regmap_config mcp251xfd_regmap_crc;
+
+static int
+mcp251xfd_regmap_nocrc_write(void *context, const void *data, size_t count)
+{
+ struct spi_device *spi = context;
+
+ return spi_write(spi, data, count);
+}
+
+static int
+mcp251xfd_regmap_nocrc_gather_write(void *context,
+ const void *reg, size_t reg_len,
+ const void *val, size_t val_len)
+{
+ struct spi_device *spi = context;
+ struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
+ struct mcp251xfd_map_buf_nocrc *buf_tx = priv->map_buf_nocrc_tx;
+ struct spi_transfer xfer[] = {
+ {
+ .tx_buf = buf_tx,
+ .len = sizeof(buf_tx->cmd) + val_len,
+ },
+ };
+
+ BUILD_BUG_ON(sizeof(buf_tx->cmd) != sizeof(__be16));
+
+ if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) &&
+ reg_len != sizeof(buf_tx->cmd.cmd))
+ return -EINVAL;
+
+ memcpy(&buf_tx->cmd, reg, sizeof(buf_tx->cmd));
+ memcpy(buf_tx->data, val, val_len);
+
+ return spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
+}
+
+static inline bool mcp251xfd_update_bits_read_reg(unsigned int reg)
+{
+ switch (reg) {
+ case MCP251XFD_REG_INT:
+ case MCP251XFD_REG_TEFCON:
+ case MCP251XFD_REG_FIFOCON(MCP251XFD_RX_FIFO(0)):
+ case MCP251XFD_REG_FLTCON(0):
+ case MCP251XFD_REG_ECCSTAT:
+ case MCP251XFD_REG_CRC:
+ return false;
+ case MCP251XFD_REG_CON:
+ case MCP251XFD_REG_FIFOSTA(MCP251XFD_RX_FIFO(0)):
+ case MCP251XFD_REG_OSC:
+ case MCP251XFD_REG_ECCCON:
+ return true;
+ default:
+ WARN(1, "Status of reg 0x%04x unknown.\n", reg);
+ }
+
+ return true;
+}
+
+static int
+mcp251xfd_regmap_nocrc_update_bits(void *context, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ struct spi_device *spi = context;
+ struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
+ struct mcp251xfd_map_buf_nocrc *buf_rx = priv->map_buf_nocrc_rx;
+ struct mcp251xfd_map_buf_nocrc *buf_tx = priv->map_buf_nocrc_tx;
+ __le32 orig_le32 = 0, mask_le32, val_le32, tmp_le32;
+ u8 first_byte, last_byte, len;
+ int err;
+
+ BUILD_BUG_ON(sizeof(buf_rx->cmd) != sizeof(__be16));
+ BUILD_BUG_ON(sizeof(buf_tx->cmd) != sizeof(__be16));
+
+ if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) &&
+ mask == 0)
+ return -EINVAL;
+
+ first_byte = mcp251xfd_first_byte_set(mask);
+ last_byte = mcp251xfd_last_byte_set(mask);
+ len = last_byte - first_byte + 1;
+
+ if (mcp251xfd_update_bits_read_reg(reg)) {
+ struct spi_transfer xfer[2] = { };
+ struct spi_message msg;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer[0], &msg);
+
+ if (priv->devtype_data.quirks & MCP251XFD_QUIRK_HALF_DUPLEX) {
+ xfer[0].tx_buf = buf_tx;
+ xfer[0].len = sizeof(buf_tx->cmd);
+
+ xfer[1].rx_buf = buf_rx->data;
+ xfer[1].len = len;
+ spi_message_add_tail(&xfer[1], &msg);
+ } else {
+ xfer[0].tx_buf = buf_tx;
+ xfer[0].rx_buf = buf_rx;
+ xfer[0].len = sizeof(buf_tx->cmd) + len;
+
+ if (MCP251XFD_SANITIZE_SPI)
+ memset(buf_tx->data, 0x0, len);
+ }
+
+ mcp251xfd_spi_cmd_read_nocrc(&buf_tx->cmd, reg + first_byte);
+ err = spi_sync(spi, &msg);
+ if (err)
+ return err;
+
+ memcpy(&orig_le32, buf_rx->data, len);
+ }
+
+ mask_le32 = cpu_to_le32(mask >> BITS_PER_BYTE * first_byte);
+ val_le32 = cpu_to_le32(val >> BITS_PER_BYTE * first_byte);
+
+ tmp_le32 = orig_le32 & ~mask_le32;
+ tmp_le32 |= val_le32 & mask_le32;
+
+ mcp251xfd_spi_cmd_write_nocrc(&buf_tx->cmd, reg + first_byte);
+ memcpy(buf_tx->data, &tmp_le32, len);
+
+ return spi_write(spi, buf_tx, sizeof(buf_tx->cmd) + len);
+}
+
+static int
+mcp251xfd_regmap_nocrc_read(void *context,
+ const void *reg, size_t reg_len,
+ void *val_buf, size_t val_len)
+{
+ struct spi_device *spi = context;
+ struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
+ struct mcp251xfd_map_buf_nocrc *buf_rx = priv->map_buf_nocrc_rx;
+ struct mcp251xfd_map_buf_nocrc *buf_tx = priv->map_buf_nocrc_tx;
+ struct spi_transfer xfer[2] = { };
+ struct spi_message msg;
+ int err;
+
+ BUILD_BUG_ON(sizeof(buf_rx->cmd) != sizeof(__be16));
+ BUILD_BUG_ON(sizeof(buf_tx->cmd) != sizeof(__be16));
+
+ if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) &&
+ reg_len != sizeof(buf_tx->cmd.cmd))
+ return -EINVAL;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer[0], &msg);
+
+ if (priv->devtype_data.quirks & MCP251XFD_QUIRK_HALF_DUPLEX) {
+ xfer[0].tx_buf = reg;
+ xfer[0].len = sizeof(buf_tx->cmd);
+
+ xfer[1].rx_buf = val_buf;
+ xfer[1].len = val_len;
+ spi_message_add_tail(&xfer[1], &msg);
+ } else {
+ xfer[0].tx_buf = buf_tx;
+ xfer[0].rx_buf = buf_rx;
+ xfer[0].len = sizeof(buf_tx->cmd) + val_len;
+
+ memcpy(&buf_tx->cmd, reg, sizeof(buf_tx->cmd));
+ if (MCP251XFD_SANITIZE_SPI)
+ memset(buf_tx->data, 0x0, val_len);
+ };
+
+ err = spi_sync(spi, &msg);
+ if (err)
+ return err;
+
+ if (!(priv->devtype_data.quirks & MCP251XFD_QUIRK_HALF_DUPLEX))
+ memcpy(val_buf, buf_rx->data, val_len);
+
+ return 0;
+}
+
+static int
+mcp251xfd_regmap_crc_gather_write(void *context,
+ const void *reg_p, size_t reg_len,
+ const void *val, size_t val_len)
+{
+ struct spi_device *spi = context;
+ struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
+ struct mcp251xfd_map_buf_crc *buf_tx = priv->map_buf_crc_tx;
+ struct spi_transfer xfer[] = {
+ {
+ .tx_buf = buf_tx,
+ .len = sizeof(buf_tx->cmd) + val_len +
+ sizeof(buf_tx->crc),
+ },
+ };
+ u16 reg = *(u16 *)reg_p;
+ u16 crc;
+
+ BUILD_BUG_ON(sizeof(buf_tx->cmd) != sizeof(__be16) + sizeof(u8));
+
+ if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) &&
+ reg_len != sizeof(buf_tx->cmd.cmd) +
+ mcp251xfd_regmap_crc.pad_bits / BITS_PER_BYTE)
+ return -EINVAL;
+
+ mcp251xfd_spi_cmd_write_crc(&buf_tx->cmd, reg, val_len);
+ memcpy(buf_tx->data, val, val_len);
+
+ crc = mcp251xfd_crc16_compute(buf_tx, sizeof(buf_tx->cmd) + val_len);
+ put_unaligned_be16(crc, buf_tx->data + val_len);
+
+ return spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
+}
+
+static int
+mcp251xfd_regmap_crc_write(void *context,
+ const void *data, size_t count)
+{
+ const size_t data_offset = sizeof(__be16) +
+ mcp251xfd_regmap_crc.pad_bits / BITS_PER_BYTE;
+
+ return mcp251xfd_regmap_crc_gather_write(context,
+ data, data_offset,
+ data + data_offset,
+ count - data_offset);
+}
+
+static int
+mcp251xfd_regmap_crc_read_one(struct mcp251xfd_priv *priv,
+ struct spi_message *msg, unsigned int data_len)
+{
+ const struct mcp251xfd_map_buf_crc *buf_rx = priv->map_buf_crc_rx;
+ const struct mcp251xfd_map_buf_crc *buf_tx = priv->map_buf_crc_tx;
+ u16 crc_received, crc_calculated;
+ int err;
+
+ BUILD_BUG_ON(sizeof(buf_rx->cmd) != sizeof(__be16) + sizeof(u8));
+ BUILD_BUG_ON(sizeof(buf_tx->cmd) != sizeof(__be16) + sizeof(u8));
+
+ err = spi_sync(priv->spi, msg);
+ if (err)
+ return err;
+
+ crc_received = get_unaligned_be16(buf_rx->data + data_len);
+ crc_calculated = mcp251xfd_crc16_compute2(&buf_tx->cmd,
+ sizeof(buf_tx->cmd),
+ buf_rx->data,
+ data_len);
+ if (crc_received != crc_calculated)
+ return -EBADMSG;
+
+ return 0;
+}
+
+static int
+mcp251xfd_regmap_crc_read(void *context,
+ const void *reg_p, size_t reg_len,
+ void *val_buf, size_t val_len)
+{
+ struct spi_device *spi = context;
+ struct mcp251xfd_priv *priv = spi_get_drvdata(spi);
+ struct mcp251xfd_map_buf_crc *buf_rx = priv->map_buf_crc_rx;
+ struct mcp251xfd_map_buf_crc *buf_tx = priv->map_buf_crc_tx;
+ struct spi_transfer xfer[2] = { };
+ struct spi_message msg;
+ u16 reg = *(u16 *)reg_p;
+ int i, err;
+
+ BUILD_BUG_ON(sizeof(buf_rx->cmd) != sizeof(__be16) + sizeof(u8));
+ BUILD_BUG_ON(sizeof(buf_tx->cmd) != sizeof(__be16) + sizeof(u8));
+
+ if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) &&
+ reg_len != sizeof(buf_tx->cmd.cmd) +
+ mcp251xfd_regmap_crc.pad_bits / BITS_PER_BYTE)
+ return -EINVAL;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer[0], &msg);
+
+ if (priv->devtype_data.quirks & MCP251XFD_QUIRK_HALF_DUPLEX) {
+ xfer[0].tx_buf = buf_tx;
+ xfer[0].len = sizeof(buf_tx->cmd);
+
+ xfer[1].rx_buf = buf_rx->data;
+ xfer[1].len = val_len + sizeof(buf_tx->crc);
+ spi_message_add_tail(&xfer[1], &msg);
+ } else {
+ xfer[0].tx_buf = buf_tx;
+ xfer[0].rx_buf = buf_rx;
+ xfer[0].len = sizeof(buf_tx->cmd) + val_len +
+ sizeof(buf_tx->crc);
+
+ if (MCP251XFD_SANITIZE_SPI)
+ memset(buf_tx->data, 0x0, val_len +
+ sizeof(buf_tx->crc));
+ }
+
+ mcp251xfd_spi_cmd_read_crc(&buf_tx->cmd, reg, val_len);
+
+ for (i = 0; i < MCP251XFD_READ_CRC_RETRIES_MAX; i++) {
+ err = mcp251xfd_regmap_crc_read_one(priv, &msg, val_len);
+ if (!err)
+ goto out;
+ if (err != -EBADMSG)
+ return err;
+
+ /* MCP251XFD_REG_OSC is the first ever reg we read from.
+ *
+ * The chip may be in deep sleep and this SPI transfer
+ * (i.e. the assertion of the CS) will wake the chip
+ * up. This takes about 3ms. The CRC of this transfer
+ * is wrong.
+ *
+ * Or there isn't a chip at all, in this case the CRC
+ * will be wrong, too.
+ *
+ * In both cases ignore the CRC and copy the read data
+ * to the caller. It will take care of both cases.
+ *
+ */
+ if (reg == MCP251XFD_REG_OSC) {
+ err = 0;
+ goto out;
+ }
+
+ netdev_dbg(priv->ndev,
+ "CRC read error at address 0x%04x (length=%zd, data=%*ph, CRC=0x%04x) retrying.\n",
+ reg, val_len, (int)val_len, buf_rx->data,
+ get_unaligned_be16(buf_rx->data + val_len));
+ }
+
+ if (err) {
+ netdev_info(priv->ndev,
+ "CRC read error at address 0x%04x (length=%zd, data=%*ph, CRC=0x%04x).\n",
+ reg, val_len, (int)val_len, buf_rx->data,
+ get_unaligned_be16(buf_rx->data + val_len));
+
+ return err;
+ }
+ out:
+ memcpy(val_buf, buf_rx->data, val_len);
+
+ return 0;
+}
+
+static const struct regmap_range mcp251xfd_reg_table_yes_range[] = {
+ regmap_reg_range(0x000, 0x2ec), /* CAN FD Controller Module SFR */
+ regmap_reg_range(0x400, 0xbfc), /* RAM */
+ regmap_reg_range(0xe00, 0xe14), /* MCP2517/18FD SFR */
+};
+
+static const struct regmap_access_table mcp251xfd_reg_table = {
+ .yes_ranges = mcp251xfd_reg_table_yes_range,
+ .n_yes_ranges = ARRAY_SIZE(mcp251xfd_reg_table_yes_range),
+};
+
+static const struct regmap_config mcp251xfd_regmap_nocrc = {
+ .name = "nocrc",
+ .reg_bits = 16,
+ .reg_stride = 4,
+ .pad_bits = 0,
+ .val_bits = 32,
+ .max_register = 0xffc,
+ .wr_table = &mcp251xfd_reg_table,
+ .rd_table = &mcp251xfd_reg_table,
+ .cache_type = REGCACHE_NONE,
+ .read_flag_mask = (__force unsigned long)
+ cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_READ),
+ .write_flag_mask = (__force unsigned long)
+ cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_WRITE),
+};
+
+static const struct regmap_bus mcp251xfd_bus_nocrc = {
+ .write = mcp251xfd_regmap_nocrc_write,
+ .gather_write = mcp251xfd_regmap_nocrc_gather_write,
+ .reg_update_bits = mcp251xfd_regmap_nocrc_update_bits,
+ .read = mcp251xfd_regmap_nocrc_read,
+ .reg_format_endian_default = REGMAP_ENDIAN_BIG,
+ .val_format_endian_default = REGMAP_ENDIAN_LITTLE,
+ .max_raw_read = sizeof_field(struct mcp251xfd_map_buf_nocrc, data),
+ .max_raw_write = sizeof_field(struct mcp251xfd_map_buf_nocrc, data),
+};
+
+static const struct regmap_config mcp251xfd_regmap_crc = {
+ .name = "crc",
+ .reg_bits = 16,
+ .reg_stride = 4,
+ .pad_bits = 16, /* keep data bits aligned */
+ .val_bits = 32,
+ .max_register = 0xffc,
+ .wr_table = &mcp251xfd_reg_table,
+ .rd_table = &mcp251xfd_reg_table,
+ .cache_type = REGCACHE_NONE,
+};
+
+static const struct regmap_bus mcp251xfd_bus_crc = {
+ .write = mcp251xfd_regmap_crc_write,
+ .gather_write = mcp251xfd_regmap_crc_gather_write,
+ .read = mcp251xfd_regmap_crc_read,
+ .reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
+ .val_format_endian_default = REGMAP_ENDIAN_LITTLE,
+ .max_raw_read = sizeof_field(struct mcp251xfd_map_buf_crc, data),
+ .max_raw_write = sizeof_field(struct mcp251xfd_map_buf_crc, data),
+};
+
+static inline bool
+mcp251xfd_regmap_use_nocrc(struct mcp251xfd_priv *priv)
+{
+ return (!(priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG)) ||
+ (!(priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_RX));
+}
+
+static inline bool
+mcp251xfd_regmap_use_crc(struct mcp251xfd_priv *priv)
+{
+ return (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG) ||
+ (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_RX);
+}
+
+static int
+mcp251xfd_regmap_init_nocrc(struct mcp251xfd_priv *priv)
+{
+ if (!priv->map_nocrc) {
+ struct regmap *map;
+
+ map = devm_regmap_init(&priv->spi->dev, &mcp251xfd_bus_nocrc,
+ priv->spi, &mcp251xfd_regmap_nocrc);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ priv->map_nocrc = map;
+ }
+
+ if (!priv->map_buf_nocrc_rx) {
+ priv->map_buf_nocrc_rx =
+ devm_kzalloc(&priv->spi->dev,
+ sizeof(*priv->map_buf_nocrc_rx),
+ GFP_KERNEL);
+ if (!priv->map_buf_nocrc_rx)
+ return -ENOMEM;
+ }
+
+ if (!priv->map_buf_nocrc_tx) {
+ priv->map_buf_nocrc_tx =
+ devm_kzalloc(&priv->spi->dev,
+ sizeof(*priv->map_buf_nocrc_tx),
+ GFP_KERNEL);
+ if (!priv->map_buf_nocrc_tx)
+ return -ENOMEM;
+ }
+
+ if (!(priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG))
+ priv->map_reg = priv->map_nocrc;
+
+ if (!(priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_RX))
+ priv->map_rx = priv->map_nocrc;
+
+ return 0;
+}
+
+static void mcp251xfd_regmap_destroy_nocrc(struct mcp251xfd_priv *priv)
+{
+ if (priv->map_buf_nocrc_rx) {
+ devm_kfree(&priv->spi->dev, priv->map_buf_nocrc_rx);
+ priv->map_buf_nocrc_rx = NULL;
+ }
+ if (priv->map_buf_nocrc_tx) {
+ devm_kfree(&priv->spi->dev, priv->map_buf_nocrc_tx);
+ priv->map_buf_nocrc_tx = NULL;
+ }
+}
+
+static int
+mcp251xfd_regmap_init_crc(struct mcp251xfd_priv *priv)
+{
+ if (!priv->map_crc) {
+ struct regmap *map;
+
+ map = devm_regmap_init(&priv->spi->dev, &mcp251xfd_bus_crc,
+ priv->spi, &mcp251xfd_regmap_crc);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ priv->map_crc = map;
+ }
+
+ if (!priv->map_buf_crc_rx) {
+ priv->map_buf_crc_rx =
+ devm_kzalloc(&priv->spi->dev,
+ sizeof(*priv->map_buf_crc_rx),
+ GFP_KERNEL);
+ if (!priv->map_buf_crc_rx)
+ return -ENOMEM;
+ }
+
+ if (!priv->map_buf_crc_tx) {
+ priv->map_buf_crc_tx =
+ devm_kzalloc(&priv->spi->dev,
+ sizeof(*priv->map_buf_crc_tx),
+ GFP_KERNEL);
+ if (!priv->map_buf_crc_tx)
+ return -ENOMEM;
+ }
+
+ if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG)
+ priv->map_reg = priv->map_crc;
+
+ if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_RX)
+ priv->map_rx = priv->map_crc;
+
+ return 0;
+}
+
+static void mcp251xfd_regmap_destroy_crc(struct mcp251xfd_priv *priv)
+{
+ if (priv->map_buf_crc_rx) {
+ devm_kfree(&priv->spi->dev, priv->map_buf_crc_rx);
+ priv->map_buf_crc_rx = NULL;
+ }
+ if (priv->map_buf_crc_tx) {
+ devm_kfree(&priv->spi->dev, priv->map_buf_crc_tx);
+ priv->map_buf_crc_tx = NULL;
+ }
+}
+
+int mcp251xfd_regmap_init(struct mcp251xfd_priv *priv)
+{
+ int err;
+
+ if (mcp251xfd_regmap_use_nocrc(priv)) {
+ err = mcp251xfd_regmap_init_nocrc(priv);
+
+ if (err)
+ return err;
+ } else {
+ mcp251xfd_regmap_destroy_nocrc(priv);
+ }
+
+ if (mcp251xfd_regmap_use_crc(priv)) {
+ err = mcp251xfd_regmap_init_crc(priv);
+
+ if (err)
+ return err;
+ } else {
+ mcp251xfd_regmap_destroy_crc(priv);
+ }
+
+ return 0;
+}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
new file mode 100644
index 000000000000..fa1246e39980
--- /dev/null
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
@@ -0,0 +1,835 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * mcp251xfd - Microchip MCP251xFD Family CAN controller driver
+ *
+ * Copyright (c) 2019 Pengutronix,
+ * Marc Kleine-Budde <kernel@pengutronix.de>
+ * Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
+ */
+
+#ifndef _MCP251XFD_H
+#define _MCP251XFD_H
+
+#include <linux/can/core.h>
+#include <linux/can/dev.h>
+#include <linux/can/rx-offload.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+/* MPC251x registers */
+
+/* CAN FD Controller Module SFR */
+#define MCP251XFD_REG_CON 0x00
+#define MCP251XFD_REG_CON_TXBWS_MASK GENMASK(31, 28)
+#define MCP251XFD_REG_CON_ABAT BIT(27)
+#define MCP251XFD_REG_CON_REQOP_MASK GENMASK(26, 24)
+#define MCP251XFD_REG_CON_MODE_MIXED 0
+#define MCP251XFD_REG_CON_MODE_SLEEP 1
+#define MCP251XFD_REG_CON_MODE_INT_LOOPBACK 2
+#define MCP251XFD_REG_CON_MODE_LISTENONLY 3
+#define MCP251XFD_REG_CON_MODE_CONFIG 4
+#define MCP251XFD_REG_CON_MODE_EXT_LOOPBACK 5
+#define MCP251XFD_REG_CON_MODE_CAN2_0 6
+#define MCP251XFD_REG_CON_MODE_RESTRICTED 7
+#define MCP251XFD_REG_CON_OPMOD_MASK GENMASK(23, 21)
+#define MCP251XFD_REG_CON_TXQEN BIT(20)
+#define MCP251XFD_REG_CON_STEF BIT(19)
+#define MCP251XFD_REG_CON_SERR2LOM BIT(18)
+#define MCP251XFD_REG_CON_ESIGM BIT(17)
+#define MCP251XFD_REG_CON_RTXAT BIT(16)
+#define MCP251XFD_REG_CON_BRSDIS BIT(12)
+#define MCP251XFD_REG_CON_BUSY BIT(11)
+#define MCP251XFD_REG_CON_WFT_MASK GENMASK(10, 9)
+#define MCP251XFD_REG_CON_WFT_T00FILTER 0x0
+#define MCP251XFD_REG_CON_WFT_T01FILTER 0x1
+#define MCP251XFD_REG_CON_WFT_T10FILTER 0x2
+#define MCP251XFD_REG_CON_WFT_T11FILTER 0x3
+#define MCP251XFD_REG_CON_WAKFIL BIT(8)
+#define MCP251XFD_REG_CON_PXEDIS BIT(6)
+#define MCP251XFD_REG_CON_ISOCRCEN BIT(5)
+#define MCP251XFD_REG_CON_DNCNT_MASK GENMASK(4, 0)
+
+#define MCP251XFD_REG_NBTCFG 0x04
+#define MCP251XFD_REG_NBTCFG_BRP_MASK GENMASK(31, 24)
+#define MCP251XFD_REG_NBTCFG_TSEG1_MASK GENMASK(23, 16)
+#define MCP251XFD_REG_NBTCFG_TSEG2_MASK GENMASK(14, 8)
+#define MCP251XFD_REG_NBTCFG_SJW_MASK GENMASK(6, 0)
+
+#define MCP251XFD_REG_DBTCFG 0x08
+#define MCP251XFD_REG_DBTCFG_BRP_MASK GENMASK(31, 24)
+#define MCP251XFD_REG_DBTCFG_TSEG1_MASK GENMASK(20, 16)
+#define MCP251XFD_REG_DBTCFG_TSEG2_MASK GENMASK(11, 8)
+#define MCP251XFD_REG_DBTCFG_SJW_MASK GENMASK(3, 0)
+
+#define MCP251XFD_REG_TDC 0x0c
+#define MCP251XFD_REG_TDC_EDGFLTEN BIT(25)
+#define MCP251XFD_REG_TDC_SID11EN BIT(24)
+#define MCP251XFD_REG_TDC_TDCMOD_MASK GENMASK(17, 16)
+#define MCP251XFD_REG_TDC_TDCMOD_AUTO 2
+#define MCP251XFD_REG_TDC_TDCMOD_MANUAL 1
+#define MCP251XFD_REG_TDC_TDCMOD_DISABLED 0
+#define MCP251XFD_REG_TDC_TDCO_MASK GENMASK(14, 8)
+#define MCP251XFD_REG_TDC_TDCV_MASK GENMASK(5, 0)
+
+#define MCP251XFD_REG_TBC 0x10
+
+#define MCP251XFD_REG_TSCON 0x14
+#define MCP251XFD_REG_TSCON_TSRES BIT(18)
+#define MCP251XFD_REG_TSCON_TSEOF BIT(17)
+#define MCP251XFD_REG_TSCON_TBCEN BIT(16)
+#define MCP251XFD_REG_TSCON_TBCPRE_MASK GENMASK(9, 0)
+
+#define MCP251XFD_REG_VEC 0x18
+#define MCP251XFD_REG_VEC_RXCODE_MASK GENMASK(30, 24)
+#define MCP251XFD_REG_VEC_TXCODE_MASK GENMASK(22, 16)
+#define MCP251XFD_REG_VEC_FILHIT_MASK GENMASK(12, 8)
+#define MCP251XFD_REG_VEC_ICODE_MASK GENMASK(6, 0)
+
+#define MCP251XFD_REG_INT 0x1c
+#define MCP251XFD_REG_INT_IF_MASK GENMASK(15, 0)
+#define MCP251XFD_REG_INT_IE_MASK GENMASK(31, 16)
+#define MCP251XFD_REG_INT_IVMIE BIT(31)
+#define MCP251XFD_REG_INT_WAKIE BIT(30)
+#define MCP251XFD_REG_INT_CERRIE BIT(29)
+#define MCP251XFD_REG_INT_SERRIE BIT(28)
+#define MCP251XFD_REG_INT_RXOVIE BIT(27)
+#define MCP251XFD_REG_INT_TXATIE BIT(26)
+#define MCP251XFD_REG_INT_SPICRCIE BIT(25)
+#define MCP251XFD_REG_INT_ECCIE BIT(24)
+#define MCP251XFD_REG_INT_TEFIE BIT(20)
+#define MCP251XFD_REG_INT_MODIE BIT(19)
+#define MCP251XFD_REG_INT_TBCIE BIT(18)
+#define MCP251XFD_REG_INT_RXIE BIT(17)
+#define MCP251XFD_REG_INT_TXIE BIT(16)
+#define MCP251XFD_REG_INT_IVMIF BIT(15)
+#define MCP251XFD_REG_INT_WAKIF BIT(14)
+#define MCP251XFD_REG_INT_CERRIF BIT(13)
+#define MCP251XFD_REG_INT_SERRIF BIT(12)
+#define MCP251XFD_REG_INT_RXOVIF BIT(11)
+#define MCP251XFD_REG_INT_TXATIF BIT(10)
+#define MCP251XFD_REG_INT_SPICRCIF BIT(9)
+#define MCP251XFD_REG_INT_ECCIF BIT(8)
+#define MCP251XFD_REG_INT_TEFIF BIT(4)
+#define MCP251XFD_REG_INT_MODIF BIT(3)
+#define MCP251XFD_REG_INT_TBCIF BIT(2)
+#define MCP251XFD_REG_INT_RXIF BIT(1)
+#define MCP251XFD_REG_INT_TXIF BIT(0)
+/* These IRQ flags must be cleared by SW in the CAN_INT register */
+#define MCP251XFD_REG_INT_IF_CLEARABLE_MASK \
+ (MCP251XFD_REG_INT_IVMIF | MCP251XFD_REG_INT_WAKIF | \
+ MCP251XFD_REG_INT_CERRIF | MCP251XFD_REG_INT_SERRIF | \
+ MCP251XFD_REG_INT_MODIF)
+
+#define MCP251XFD_REG_RXIF 0x20
+#define MCP251XFD_REG_TXIF 0x24
+#define MCP251XFD_REG_RXOVIF 0x28
+#define MCP251XFD_REG_TXATIF 0x2c
+#define MCP251XFD_REG_TXREQ 0x30
+
+#define MCP251XFD_REG_TREC 0x34
+#define MCP251XFD_REG_TREC_TXBO BIT(21)
+#define MCP251XFD_REG_TREC_TXBP BIT(20)
+#define MCP251XFD_REG_TREC_RXBP BIT(19)
+#define MCP251XFD_REG_TREC_TXWARN BIT(18)
+#define MCP251XFD_REG_TREC_RXWARN BIT(17)
+#define MCP251XFD_REG_TREC_EWARN BIT(16)
+#define MCP251XFD_REG_TREC_TEC_MASK GENMASK(15, 8)
+#define MCP251XFD_REG_TREC_REC_MASK GENMASK(7, 0)
+
+#define MCP251XFD_REG_BDIAG0 0x38
+#define MCP251XFD_REG_BDIAG0_DTERRCNT_MASK GENMASK(31, 24)
+#define MCP251XFD_REG_BDIAG0_DRERRCNT_MASK GENMASK(23, 16)
+#define MCP251XFD_REG_BDIAG0_NTERRCNT_MASK GENMASK(15, 8)
+#define MCP251XFD_REG_BDIAG0_NRERRCNT_MASK GENMASK(7, 0)
+
+#define MCP251XFD_REG_BDIAG1 0x3c
+#define MCP251XFD_REG_BDIAG1_DLCMM BIT(31)
+#define MCP251XFD_REG_BDIAG1_ESI BIT(30)
+#define MCP251XFD_REG_BDIAG1_DCRCERR BIT(29)
+#define MCP251XFD_REG_BDIAG1_DSTUFERR BIT(28)
+#define MCP251XFD_REG_BDIAG1_DFORMERR BIT(27)
+#define MCP251XFD_REG_BDIAG1_DBIT1ERR BIT(25)
+#define MCP251XFD_REG_BDIAG1_DBIT0ERR BIT(24)
+#define MCP251XFD_REG_BDIAG1_TXBOERR BIT(23)
+#define MCP251XFD_REG_BDIAG1_NCRCERR BIT(21)
+#define MCP251XFD_REG_BDIAG1_NSTUFERR BIT(20)
+#define MCP251XFD_REG_BDIAG1_NFORMERR BIT(19)
+#define MCP251XFD_REG_BDIAG1_NACKERR BIT(18)
+#define MCP251XFD_REG_BDIAG1_NBIT1ERR BIT(17)
+#define MCP251XFD_REG_BDIAG1_NBIT0ERR BIT(16)
+#define MCP251XFD_REG_BDIAG1_BERR_MASK \
+ (MCP251XFD_REG_BDIAG1_DLCMM | MCP251XFD_REG_BDIAG1_ESI | \
+ MCP251XFD_REG_BDIAG1_DCRCERR | MCP251XFD_REG_BDIAG1_DSTUFERR | \
+ MCP251XFD_REG_BDIAG1_DFORMERR | MCP251XFD_REG_BDIAG1_DBIT1ERR | \
+ MCP251XFD_REG_BDIAG1_DBIT0ERR | MCP251XFD_REG_BDIAG1_TXBOERR | \
+ MCP251XFD_REG_BDIAG1_NCRCERR | MCP251XFD_REG_BDIAG1_NSTUFERR | \
+ MCP251XFD_REG_BDIAG1_NFORMERR | MCP251XFD_REG_BDIAG1_NACKERR | \
+ MCP251XFD_REG_BDIAG1_NBIT1ERR | MCP251XFD_REG_BDIAG1_NBIT0ERR)
+#define MCP251XFD_REG_BDIAG1_EFMSGCNT_MASK GENMASK(15, 0)
+
+#define MCP251XFD_REG_TEFCON 0x40
+#define MCP251XFD_REG_TEFCON_FSIZE_MASK GENMASK(28, 24)
+#define MCP251XFD_REG_TEFCON_FRESET BIT(10)
+#define MCP251XFD_REG_TEFCON_UINC BIT(8)
+#define MCP251XFD_REG_TEFCON_TEFTSEN BIT(5)
+#define MCP251XFD_REG_TEFCON_TEFOVIE BIT(3)
+#define MCP251XFD_REG_TEFCON_TEFFIE BIT(2)
+#define MCP251XFD_REG_TEFCON_TEFHIE BIT(1)
+#define MCP251XFD_REG_TEFCON_TEFNEIE BIT(0)
+
+#define MCP251XFD_REG_TEFSTA 0x44
+#define MCP251XFD_REG_TEFSTA_TEFOVIF BIT(3)
+#define MCP251XFD_REG_TEFSTA_TEFFIF BIT(2)
+#define MCP251XFD_REG_TEFSTA_TEFHIF BIT(1)
+#define MCP251XFD_REG_TEFSTA_TEFNEIF BIT(0)
+
+#define MCP251XFD_REG_TEFUA 0x48
+
+#define MCP251XFD_REG_TXQCON 0x50
+#define MCP251XFD_REG_TXQCON_PLSIZE_MASK GENMASK(31, 29)
+#define MCP251XFD_REG_TXQCON_PLSIZE_8 0
+#define MCP251XFD_REG_TXQCON_PLSIZE_12 1
+#define MCP251XFD_REG_TXQCON_PLSIZE_16 2
+#define MCP251XFD_REG_TXQCON_PLSIZE_20 3
+#define MCP251XFD_REG_TXQCON_PLSIZE_24 4
+#define MCP251XFD_REG_TXQCON_PLSIZE_32 5
+#define MCP251XFD_REG_TXQCON_PLSIZE_48 6
+#define MCP251XFD_REG_TXQCON_PLSIZE_64 7
+#define MCP251XFD_REG_TXQCON_FSIZE_MASK GENMASK(28, 24)
+#define MCP251XFD_REG_TXQCON_TXAT_UNLIMITED 3
+#define MCP251XFD_REG_TXQCON_TXAT_THREE_SHOT 1
+#define MCP251XFD_REG_TXQCON_TXAT_ONE_SHOT 0
+#define MCP251XFD_REG_TXQCON_TXAT_MASK GENMASK(22, 21)
+#define MCP251XFD_REG_TXQCON_TXPRI_MASK GENMASK(20, 16)
+#define MCP251XFD_REG_TXQCON_FRESET BIT(10)
+#define MCP251XFD_REG_TXQCON_TXREQ BIT(9)
+#define MCP251XFD_REG_TXQCON_UINC BIT(8)
+#define MCP251XFD_REG_TXQCON_TXEN BIT(7)
+#define MCP251XFD_REG_TXQCON_TXATIE BIT(4)
+#define MCP251XFD_REG_TXQCON_TXQEIE BIT(2)
+#define MCP251XFD_REG_TXQCON_TXQNIE BIT(0)
+
+#define MCP251XFD_REG_TXQSTA 0x54
+#define MCP251XFD_REG_TXQSTA_TXQCI_MASK GENMASK(12, 8)
+#define MCP251XFD_REG_TXQSTA_TXABT BIT(7)
+#define MCP251XFD_REG_TXQSTA_TXLARB BIT(6)
+#define MCP251XFD_REG_TXQSTA_TXERR BIT(5)
+#define MCP251XFD_REG_TXQSTA_TXATIF BIT(4)
+#define MCP251XFD_REG_TXQSTA_TXQEIF BIT(2)
+#define MCP251XFD_REG_TXQSTA_TXQNIF BIT(0)
+
+#define MCP251XFD_REG_TXQUA 0x58
+
+#define MCP251XFD_REG_FIFOCON(x) (0x50 + 0xc * (x))
+#define MCP251XFD_REG_FIFOCON_PLSIZE_MASK GENMASK(31, 29)
+#define MCP251XFD_REG_FIFOCON_PLSIZE_8 0
+#define MCP251XFD_REG_FIFOCON_PLSIZE_12 1
+#define MCP251XFD_REG_FIFOCON_PLSIZE_16 2
+#define MCP251XFD_REG_FIFOCON_PLSIZE_20 3
+#define MCP251XFD_REG_FIFOCON_PLSIZE_24 4
+#define MCP251XFD_REG_FIFOCON_PLSIZE_32 5
+#define MCP251XFD_REG_FIFOCON_PLSIZE_48 6
+#define MCP251XFD_REG_FIFOCON_PLSIZE_64 7
+#define MCP251XFD_REG_FIFOCON_FSIZE_MASK GENMASK(28, 24)
+#define MCP251XFD_REG_FIFOCON_TXAT_MASK GENMASK(22, 21)
+#define MCP251XFD_REG_FIFOCON_TXAT_ONE_SHOT 0
+#define MCP251XFD_REG_FIFOCON_TXAT_THREE_SHOT 1
+#define MCP251XFD_REG_FIFOCON_TXAT_UNLIMITED 3
+#define MCP251XFD_REG_FIFOCON_TXPRI_MASK GENMASK(20, 16)
+#define MCP251XFD_REG_FIFOCON_FRESET BIT(10)
+#define MCP251XFD_REG_FIFOCON_TXREQ BIT(9)
+#define MCP251XFD_REG_FIFOCON_UINC BIT(8)
+#define MCP251XFD_REG_FIFOCON_TXEN BIT(7)
+#define MCP251XFD_REG_FIFOCON_RTREN BIT(6)
+#define MCP251XFD_REG_FIFOCON_RXTSEN BIT(5)
+#define MCP251XFD_REG_FIFOCON_TXATIE BIT(4)
+#define MCP251XFD_REG_FIFOCON_RXOVIE BIT(3)
+#define MCP251XFD_REG_FIFOCON_TFERFFIE BIT(2)
+#define MCP251XFD_REG_FIFOCON_TFHRFHIE BIT(1)
+#define MCP251XFD_REG_FIFOCON_TFNRFNIE BIT(0)
+
+#define MCP251XFD_REG_FIFOSTA(x) (0x54 + 0xc * (x))
+#define MCP251XFD_REG_FIFOSTA_FIFOCI_MASK GENMASK(12, 8)
+#define MCP251XFD_REG_FIFOSTA_TXABT BIT(7)
+#define MCP251XFD_REG_FIFOSTA_TXLARB BIT(6)
+#define MCP251XFD_REG_FIFOSTA_TXERR BIT(5)
+#define MCP251XFD_REG_FIFOSTA_TXATIF BIT(4)
+#define MCP251XFD_REG_FIFOSTA_RXOVIF BIT(3)
+#define MCP251XFD_REG_FIFOSTA_TFERFFIF BIT(2)
+#define MCP251XFD_REG_FIFOSTA_TFHRFHIF BIT(1)
+#define MCP251XFD_REG_FIFOSTA_TFNRFNIF BIT(0)
+
+#define MCP251XFD_REG_FIFOUA(x) (0x58 + 0xc * (x))
+
+#define MCP251XFD_REG_FLTCON(x) (0x1d0 + 0x4 * (x))
+#define MCP251XFD_REG_FLTCON_FLTEN3 BIT(31)
+#define MCP251XFD_REG_FLTCON_F3BP_MASK GENMASK(28, 24)
+#define MCP251XFD_REG_FLTCON_FLTEN2 BIT(23)
+#define MCP251XFD_REG_FLTCON_F2BP_MASK GENMASK(20, 16)
+#define MCP251XFD_REG_FLTCON_FLTEN1 BIT(15)
+#define MCP251XFD_REG_FLTCON_F1BP_MASK GENMASK(12, 8)
+#define MCP251XFD_REG_FLTCON_FLTEN0 BIT(7)
+#define MCP251XFD_REG_FLTCON_F0BP_MASK GENMASK(4, 0)
+#define MCP251XFD_REG_FLTCON_FLTEN(x) (BIT(7) << 8 * ((x) & 0x3))
+#define MCP251XFD_REG_FLTCON_FLT_MASK(x) (GENMASK(7, 0) << (8 * ((x) & 0x3)))
+#define MCP251XFD_REG_FLTCON_FBP(x, fifo) ((fifo) << 8 * ((x) & 0x3))
+
+#define MCP251XFD_REG_FLTOBJ(x) (0x1f0 + 0x8 * (x))
+#define MCP251XFD_REG_FLTOBJ_EXIDE BIT(30)
+#define MCP251XFD_REG_FLTOBJ_SID11 BIT(29)
+#define MCP251XFD_REG_FLTOBJ_EID_MASK GENMASK(28, 11)
+#define MCP251XFD_REG_FLTOBJ_SID_MASK GENMASK(10, 0)
+
+#define MCP251XFD_REG_FLTMASK(x) (0x1f4 + 0x8 * (x))
+#define MCP251XFD_REG_MASK_MIDE BIT(30)
+#define MCP251XFD_REG_MASK_MSID11 BIT(29)
+#define MCP251XFD_REG_MASK_MEID_MASK GENMASK(28, 11)
+#define MCP251XFD_REG_MASK_MSID_MASK GENMASK(10, 0)
+
+/* RAM */
+#define MCP251XFD_RAM_START 0x400
+#define MCP251XFD_RAM_SIZE SZ_2K
+
+/* Message Object */
+#define MCP251XFD_OBJ_ID_SID11 BIT(29)
+#define MCP251XFD_OBJ_ID_EID_MASK GENMASK(28, 11)
+#define MCP251XFD_OBJ_ID_SID_MASK GENMASK(10, 0)
+#define MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK GENMASK(31, 9)
+#define MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK GENMASK(15, 9)
+#define MCP251XFD_OBJ_FLAGS_SEQ_MASK MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK
+#define MCP251XFD_OBJ_FLAGS_ESI BIT(8)
+#define MCP251XFD_OBJ_FLAGS_FDF BIT(7)
+#define MCP251XFD_OBJ_FLAGS_BRS BIT(6)
+#define MCP251XFD_OBJ_FLAGS_RTR BIT(5)
+#define MCP251XFD_OBJ_FLAGS_IDE BIT(4)
+#define MCP251XFD_OBJ_FLAGS_DLC GENMASK(3, 0)
+
+#define MCP251XFD_REG_FRAME_EFF_SID_MASK GENMASK(28, 18)
+#define MCP251XFD_REG_FRAME_EFF_EID_MASK GENMASK(17, 0)
+
+/* MCP2517/18FD SFR */
+#define MCP251XFD_REG_OSC 0xe00
+#define MCP251XFD_REG_OSC_SCLKRDY BIT(12)
+#define MCP251XFD_REG_OSC_OSCRDY BIT(10)
+#define MCP251XFD_REG_OSC_PLLRDY BIT(8)
+#define MCP251XFD_REG_OSC_CLKODIV_10 3
+#define MCP251XFD_REG_OSC_CLKODIV_4 2
+#define MCP251XFD_REG_OSC_CLKODIV_2 1
+#define MCP251XFD_REG_OSC_CLKODIV_1 0
+#define MCP251XFD_REG_OSC_CLKODIV_MASK GENMASK(6, 5)
+#define MCP251XFD_REG_OSC_SCLKDIV BIT(4)
+#define MCP251XFD_REG_OSC_LPMEN BIT(3) /* MCP2518FD only */
+#define MCP251XFD_REG_OSC_OSCDIS BIT(2)
+#define MCP251XFD_REG_OSC_PLLEN BIT(0)
+
+#define MCP251XFD_REG_IOCON 0xe04
+#define MCP251XFD_REG_IOCON_INTOD BIT(30)
+#define MCP251XFD_REG_IOCON_SOF BIT(29)
+#define MCP251XFD_REG_IOCON_TXCANOD BIT(28)
+#define MCP251XFD_REG_IOCON_PM1 BIT(25)
+#define MCP251XFD_REG_IOCON_PM0 BIT(24)
+#define MCP251XFD_REG_IOCON_GPIO1 BIT(17)
+#define MCP251XFD_REG_IOCON_GPIO0 BIT(16)
+#define MCP251XFD_REG_IOCON_LAT1 BIT(9)
+#define MCP251XFD_REG_IOCON_LAT0 BIT(8)
+#define MCP251XFD_REG_IOCON_XSTBYEN BIT(6)
+#define MCP251XFD_REG_IOCON_TRIS1 BIT(1)
+#define MCP251XFD_REG_IOCON_TRIS0 BIT(0)
+
+#define MCP251XFD_REG_CRC 0xe08
+#define MCP251XFD_REG_CRC_FERRIE BIT(25)
+#define MCP251XFD_REG_CRC_CRCERRIE BIT(24)
+#define MCP251XFD_REG_CRC_FERRIF BIT(17)
+#define MCP251XFD_REG_CRC_CRCERRIF BIT(16)
+#define MCP251XFD_REG_CRC_IF_MASK GENMASK(17, 16)
+#define MCP251XFD_REG_CRC_MASK GENMASK(15, 0)
+
+#define MCP251XFD_REG_ECCCON 0xe0c
+#define MCP251XFD_REG_ECCCON_PARITY_MASK GENMASK(14, 8)
+#define MCP251XFD_REG_ECCCON_DEDIE BIT(2)
+#define MCP251XFD_REG_ECCCON_SECIE BIT(1)
+#define MCP251XFD_REG_ECCCON_ECCEN BIT(0)
+
+#define MCP251XFD_REG_ECCSTAT 0xe10
+#define MCP251XFD_REG_ECCSTAT_ERRADDR_MASK GENMASK(27, 16)
+#define MCP251XFD_REG_ECCSTAT_IF_MASK GENMASK(2, 1)
+#define MCP251XFD_REG_ECCSTAT_DEDIF BIT(2)
+#define MCP251XFD_REG_ECCSTAT_SECIF BIT(1)
+
+#define MCP251XFD_REG_DEVID 0xe14 /* MCP2518FD only */
+#define MCP251XFD_REG_DEVID_ID_MASK GENMASK(7, 4)
+#define MCP251XFD_REG_DEVID_REV_MASK GENMASK(3, 0)
+
+/* number of TX FIFO objects, depending on CAN mode
+ *
+ * FIFO setup: tef: 8*12 bytes = 96 bytes, tx: 8*16 bytes = 128 bytes
+ * FIFO setup: tef: 4*12 bytes = 48 bytes, tx: 4*72 bytes = 288 bytes
+ */
+#define MCP251XFD_TX_OBJ_NUM_CAN 8
+#define MCP251XFD_TX_OBJ_NUM_CANFD 4
+
+#if MCP251XFD_TX_OBJ_NUM_CAN > MCP251XFD_TX_OBJ_NUM_CANFD
+#define MCP251XFD_TX_OBJ_NUM_MAX MCP251XFD_TX_OBJ_NUM_CAN
+#else
+#define MCP251XFD_TX_OBJ_NUM_MAX MCP251XFD_TX_OBJ_NUM_CANFD
+#endif
+
+#define MCP251XFD_NAPI_WEIGHT 32
+#define MCP251XFD_TX_FIFO 1
+#define MCP251XFD_RX_FIFO(x) (MCP251XFD_TX_FIFO + 1 + (x))
+
+/* SPI commands */
+#define MCP251XFD_SPI_INSTRUCTION_RESET 0x0000
+#define MCP251XFD_SPI_INSTRUCTION_WRITE 0x2000
+#define MCP251XFD_SPI_INSTRUCTION_READ 0x3000
+#define MCP251XFD_SPI_INSTRUCTION_WRITE_CRC 0xa000
+#define MCP251XFD_SPI_INSTRUCTION_READ_CRC 0xb000
+#define MCP251XFD_SPI_INSTRUCTION_WRITE_CRC_SAFE 0xc000
+#define MCP251XFD_SPI_ADDRESS_MASK GENMASK(11, 0)
+
+#define MCP251XFD_SYSCLOCK_HZ_MAX 40000000
+#define MCP251XFD_SYSCLOCK_HZ_MIN 1000000
+#define MCP251XFD_SPICLOCK_HZ_MAX 20000000
+#define MCP251XFD_OSC_PLL_MULTIPLIER 10
+#define MCP251XFD_OSC_STAB_SLEEP_US (3 * USEC_PER_MSEC)
+#define MCP251XFD_OSC_STAB_TIMEOUT_US (10 * MCP251XFD_OSC_STAB_SLEEP_US)
+#define MCP251XFD_POLL_SLEEP_US (10)
+#define MCP251XFD_POLL_TIMEOUT_US (USEC_PER_MSEC)
+#define MCP251XFD_SOFTRESET_RETRIES_MAX 3
+#define MCP251XFD_READ_CRC_RETRIES_MAX 3
+#define MCP251XFD_ECC_CNT_MAX 2
+#define MCP251XFD_SANITIZE_SPI 1
+#define MCP251XFD_SANITIZE_CAN 1
+
+/* Silence TX MAB overflow warnings */
+#define MCP251XFD_QUIRK_MAB_NO_WARN BIT(0)
+/* Use CRC to access registers */
+#define MCP251XFD_QUIRK_CRC_REG BIT(1)
+/* Use CRC to access RX/TEF-RAM */
+#define MCP251XFD_QUIRK_CRC_RX BIT(2)
+/* Use CRC to access TX-RAM */
+#define MCP251XFD_QUIRK_CRC_TX BIT(3)
+/* Enable ECC for RAM */
+#define MCP251XFD_QUIRK_ECC BIT(4)
+/* Use Half Duplex SPI transfers */
+#define MCP251XFD_QUIRK_HALF_DUPLEX BIT(5)
+
+struct mcp251xfd_hw_tef_obj {
+ u32 id;
+ u32 flags;
+ u32 ts;
+};
+
+/* The tx_obj_raw version is used in spi async, i.e. without
+ * regmap. We have to take care of endianness ourselves.
+ */
+struct mcp251xfd_hw_tx_obj_raw {
+ __le32 id;
+ __le32 flags;
+ u8 data[sizeof_field(struct canfd_frame, data)];
+};
+
+struct mcp251xfd_hw_tx_obj_can {
+ u32 id;
+ u32 flags;
+ u8 data[sizeof_field(struct can_frame, data)];
+};
+
+struct mcp251xfd_hw_tx_obj_canfd {
+ u32 id;
+ u32 flags;
+ u8 data[sizeof_field(struct canfd_frame, data)];
+};
+
+struct mcp251xfd_hw_rx_obj_can {
+ u32 id;
+ u32 flags;
+ u32 ts;
+ u8 data[sizeof_field(struct can_frame, data)];
+};
+
+struct mcp251xfd_hw_rx_obj_canfd {
+ u32 id;
+ u32 flags;
+ u32 ts;
+ u8 data[sizeof_field(struct canfd_frame, data)];
+};
+
+struct mcp251xfd_tef_ring {
+ unsigned int head;
+ unsigned int tail;
+
+ /* u8 obj_num equals tx_ring->obj_num */
+ /* u8 obj_size equals sizeof(struct mcp251xfd_hw_tef_obj) */
+};
+
+struct __packed mcp251xfd_buf_cmd {
+ __be16 cmd;
+};
+
+struct __packed mcp251xfd_buf_cmd_crc {
+ __be16 cmd;
+ u8 len;
+};
+
+union mcp251xfd_tx_obj_load_buf {
+ struct __packed {
+ struct mcp251xfd_buf_cmd cmd;
+ struct mcp251xfd_hw_tx_obj_raw hw_tx_obj;
+ } nocrc;
+ struct __packed {
+ struct mcp251xfd_buf_cmd_crc cmd;
+ struct mcp251xfd_hw_tx_obj_raw hw_tx_obj;
+ __be16 crc;
+ } crc;
+} ____cacheline_aligned;
+
+union mcp251xfd_write_reg_buf {
+ struct __packed {
+ struct mcp251xfd_buf_cmd cmd;
+ u8 data[4];
+ } nocrc;
+ struct __packed {
+ struct mcp251xfd_buf_cmd_crc cmd;
+ u8 data[4];
+ __be16 crc;
+ } crc;
+} ____cacheline_aligned;
+
+struct mcp251xfd_tx_obj {
+ struct spi_message msg;
+ struct spi_transfer xfer[2];
+ union mcp251xfd_tx_obj_load_buf buf;
+};
+
+struct mcp251xfd_tx_ring {
+ unsigned int head;
+ unsigned int tail;
+
+ u16 base;
+ u8 obj_num;
+ u8 obj_size;
+
+ struct mcp251xfd_tx_obj obj[MCP251XFD_TX_OBJ_NUM_MAX];
+ union mcp251xfd_write_reg_buf rts_buf;
+};
+
+struct mcp251xfd_rx_ring {
+ unsigned int head;
+ unsigned int tail;
+
+ u16 base;
+ u8 nr;
+ u8 fifo_nr;
+ u8 obj_num;
+ u8 obj_size;
+
+ struct mcp251xfd_hw_rx_obj_canfd obj[];
+};
+
+struct __packed mcp251xfd_map_buf_nocrc {
+ struct mcp251xfd_buf_cmd cmd;
+ u8 data[256];
+} ____cacheline_aligned;
+
+struct __packed mcp251xfd_map_buf_crc {
+ struct mcp251xfd_buf_cmd_crc cmd;
+ u8 data[256 - 4];
+ __be16 crc;
+} ____cacheline_aligned;
+
+struct mcp251xfd_ecc {
+ u32 ecc_stat;
+ int cnt;
+};
+
+struct mcp251xfd_regs_status {
+ u32 intf;
+};
+
+enum mcp251xfd_model {
+ MCP251XFD_MODEL_MCP2517FD = 0x2517,
+ MCP251XFD_MODEL_MCP2518FD = 0x2518,
+ MCP251XFD_MODEL_MCP251XFD = 0xffff, /* autodetect model */
+};
+
+struct mcp251xfd_devtype_data {
+ enum mcp251xfd_model model;
+ u32 quirks;
+};
+
+struct mcp251xfd_priv {
+ struct can_priv can;
+ struct can_rx_offload offload;
+ struct net_device *ndev;
+
+ struct regmap *map_reg; /* register access */
+ struct regmap *map_rx; /* RX/TEF RAM access */
+
+ struct regmap *map_nocrc;
+ struct mcp251xfd_map_buf_nocrc *map_buf_nocrc_rx;
+ struct mcp251xfd_map_buf_nocrc *map_buf_nocrc_tx;
+
+ struct regmap *map_crc;
+ struct mcp251xfd_map_buf_crc *map_buf_crc_rx;
+ struct mcp251xfd_map_buf_crc *map_buf_crc_tx;
+
+ struct spi_device *spi;
+ u32 spi_max_speed_hz_orig;
+
+ struct mcp251xfd_tef_ring tef;
+ struct mcp251xfd_tx_ring tx[1];
+ struct mcp251xfd_rx_ring *rx[1];
+
+ u8 rx_ring_num;
+
+ struct mcp251xfd_ecc ecc;
+ struct mcp251xfd_regs_status regs_status;
+
+ struct gpio_desc *rx_int;
+ struct clk *clk;
+ struct regulator *reg_vdd;
+ struct regulator *reg_xceiver;
+
+ struct mcp251xfd_devtype_data devtype_data;
+ struct can_berr_counter bec;
+};
+
+#define MCP251XFD_IS(_model) \
+static inline bool \
+mcp251xfd_is_##_model(const struct mcp251xfd_priv *priv) \
+{ \
+ return priv->devtype_data.model == MCP251XFD_MODEL_MCP##_model##FD; \
+}
+
+MCP251XFD_IS(2517);
+MCP251XFD_IS(2518);
+MCP251XFD_IS(251X);
+
+static inline u8 mcp251xfd_first_byte_set(u32 mask)
+{
+ return (mask & 0x0000ffff) ?
+ ((mask & 0x000000ff) ? 0 : 1) :
+ ((mask & 0x00ff0000) ? 2 : 3);
+}
+
+static inline u8 mcp251xfd_last_byte_set(u32 mask)
+{
+ return (mask & 0xffff0000) ?
+ ((mask & 0xff000000) ? 3 : 2) :
+ ((mask & 0x0000ff00) ? 1 : 0);
+}
+
+static inline __be16 mcp251xfd_cmd_reset(void)
+{
+ return cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_RESET);
+}
+
+static inline void
+mcp251xfd_spi_cmd_read_nocrc(struct mcp251xfd_buf_cmd *cmd, u16 addr)
+{
+ cmd->cmd = cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_READ | addr);
+}
+
+static inline void
+mcp251xfd_spi_cmd_write_nocrc(struct mcp251xfd_buf_cmd *cmd, u16 addr)
+{
+ cmd->cmd = cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_WRITE | addr);
+}
+
+static inline bool mcp251xfd_reg_in_ram(unsigned int reg)
+{
+ static const struct regmap_range range =
+ regmap_reg_range(MCP251XFD_RAM_START,
+ MCP251XFD_RAM_START + MCP251XFD_RAM_SIZE - 4);
+
+ return regmap_reg_in_range(reg, &range);
+}
+
+static inline void
+__mcp251xfd_spi_cmd_crc_set_len(struct mcp251xfd_buf_cmd_crc *cmd,
+ u16 len, bool in_ram)
+{
+ /* Number of u32 for RAM access, number of u8 otherwise. */
+ if (in_ram)
+ cmd->len = len >> 2;
+ else
+ cmd->len = len;
+}
+
+static inline void
+mcp251xfd_spi_cmd_crc_set_len_in_ram(struct mcp251xfd_buf_cmd_crc *cmd, u16 len)
+{
+ __mcp251xfd_spi_cmd_crc_set_len(cmd, len, true);
+}
+
+static inline void
+mcp251xfd_spi_cmd_crc_set_len_in_reg(struct mcp251xfd_buf_cmd_crc *cmd, u16 len)
+{
+ __mcp251xfd_spi_cmd_crc_set_len(cmd, len, false);
+}
+
+static inline void
+mcp251xfd_spi_cmd_read_crc_set_addr(struct mcp251xfd_buf_cmd_crc *cmd, u16 addr)
+{
+ cmd->cmd = cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_READ_CRC | addr);
+}
+
+static inline void
+mcp251xfd_spi_cmd_read_crc(struct mcp251xfd_buf_cmd_crc *cmd,
+ u16 addr, u16 len)
+{
+ mcp251xfd_spi_cmd_read_crc_set_addr(cmd, addr);
+ __mcp251xfd_spi_cmd_crc_set_len(cmd, len, mcp251xfd_reg_in_ram(addr));
+}
+
+static inline void
+mcp251xfd_spi_cmd_write_crc_set_addr(struct mcp251xfd_buf_cmd_crc *cmd,
+ u16 addr)
+{
+ cmd->cmd = cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_WRITE_CRC | addr);
+}
+
+static inline void
+mcp251xfd_spi_cmd_write_crc(struct mcp251xfd_buf_cmd_crc *cmd,
+ u16 addr, u16 len)
+{
+ mcp251xfd_spi_cmd_write_crc_set_addr(cmd, addr);
+ __mcp251xfd_spi_cmd_crc_set_len(cmd, len, mcp251xfd_reg_in_ram(addr));
+}
+
+static inline u8 *
+mcp251xfd_spi_cmd_write(const struct mcp251xfd_priv *priv,
+ union mcp251xfd_write_reg_buf *write_reg_buf,
+ u16 addr)
+{
+ u8 *data;
+
+ if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG) {
+ mcp251xfd_spi_cmd_write_crc_set_addr(&write_reg_buf->crc.cmd,
+ addr);
+ data = write_reg_buf->crc.data;
+ } else {
+ mcp251xfd_spi_cmd_write_nocrc(&write_reg_buf->nocrc.cmd,
+ addr);
+ data = write_reg_buf->nocrc.data;
+ }
+
+ return data;
+}
+
+static inline u16 mcp251xfd_get_tef_obj_addr(u8 n)
+{
+ return MCP251XFD_RAM_START +
+ sizeof(struct mcp251xfd_hw_tef_obj) * n;
+}
+
+static inline u16
+mcp251xfd_get_tx_obj_addr(const struct mcp251xfd_tx_ring *ring, u8 n)
+{
+ return ring->base + ring->obj_size * n;
+}
+
+static inline u16
+mcp251xfd_get_rx_obj_addr(const struct mcp251xfd_rx_ring *ring, u8 n)
+{
+ return ring->base + ring->obj_size * n;
+}
+
+static inline u8 mcp251xfd_get_tef_head(const struct mcp251xfd_priv *priv)
+{
+ return priv->tef.head & (priv->tx->obj_num - 1);
+}
+
+static inline u8 mcp251xfd_get_tef_tail(const struct mcp251xfd_priv *priv)
+{
+ return priv->tef.tail & (priv->tx->obj_num - 1);
+}
+
+static inline u8 mcp251xfd_get_tef_len(const struct mcp251xfd_priv *priv)
+{
+ return priv->tef.head - priv->tef.tail;
+}
+
+static inline u8 mcp251xfd_get_tef_linear_len(const struct mcp251xfd_priv *priv)
+{
+ u8 len;
+
+ len = mcp251xfd_get_tef_len(priv);
+
+ return min_t(u8, len, priv->tx->obj_num - mcp251xfd_get_tef_tail(priv));
+}
+
+static inline u8 mcp251xfd_get_tx_head(const struct mcp251xfd_tx_ring *ring)
+{
+ return ring->head & (ring->obj_num - 1);
+}
+
+static inline u8 mcp251xfd_get_tx_tail(const struct mcp251xfd_tx_ring *ring)
+{
+ return ring->tail & (ring->obj_num - 1);
+}
+
+static inline u8 mcp251xfd_get_tx_free(const struct mcp251xfd_tx_ring *ring)
+{
+ return ring->obj_num - (ring->head - ring->tail);
+}
+
+static inline int
+mcp251xfd_get_tx_nr_by_addr(const struct mcp251xfd_tx_ring *tx_ring, u8 *nr,
+ u16 addr)
+{
+ if (addr < mcp251xfd_get_tx_obj_addr(tx_ring, 0) ||
+ addr >= mcp251xfd_get_tx_obj_addr(tx_ring, tx_ring->obj_num))
+ return -ENOENT;
+
+ *nr = (addr - mcp251xfd_get_tx_obj_addr(tx_ring, 0)) /
+ tx_ring->obj_size;
+
+ return 0;
+}
+
+static inline u8 mcp251xfd_get_rx_head(const struct mcp251xfd_rx_ring *ring)
+{
+ return ring->head & (ring->obj_num - 1);
+}
+
+static inline u8 mcp251xfd_get_rx_tail(const struct mcp251xfd_rx_ring *ring)
+{
+ return ring->tail & (ring->obj_num - 1);
+}
+
+static inline u8 mcp251xfd_get_rx_len(const struct mcp251xfd_rx_ring *ring)
+{
+ return ring->head - ring->tail;
+}
+
+static inline u8
+mcp251xfd_get_rx_linear_len(const struct mcp251xfd_rx_ring *ring)
+{
+ u8 len;
+
+ len = mcp251xfd_get_rx_len(ring);
+
+ return min_t(u8, len, ring->obj_num - mcp251xfd_get_rx_tail(ring));
+}
+
+#define mcp251xfd_for_each_tx_obj(ring, _obj, n) \
+ for ((n) = 0, (_obj) = &(ring)->obj[(n)]; \
+ (n) < (ring)->obj_num; \
+ (n)++, (_obj) = &(ring)->obj[(n)])
+
+#define mcp251xfd_for_each_rx_ring(priv, ring, n) \
+ for ((n) = 0, (ring) = *((priv)->rx + (n)); \
+ (n) < (priv)->rx_ring_num; \
+ (n)++, (ring) = *((priv)->rx + (n)))
+
+int mcp251xfd_regmap_init(struct mcp251xfd_priv *priv);
+u16 mcp251xfd_crc16_compute2(const void *cmd, size_t cmd_size,
+ const void *data, size_t data_size);
+u16 mcp251xfd_crc16_compute(const void *data, size_t data_size);
+
+#endif
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 94b1491b569f..1d63006c97bc 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -454,7 +454,7 @@ static int ti_hecc_get_berr_counter(const struct net_device *ndev,
/* ti_hecc_xmit: HECC Transmit
*
* The transmit mailboxes start from 0 to HECC_MAX_TX_MBOX. In HECC the
- * priority of the mailbox for tranmission is dependent upon priority setting
+ * priority of the mailbox for transmission is dependent upon priority setting
* field in mailbox registers. The mailbox with highest value in priority field
* is transmitted first. Only when two mailboxes have the same value in
* priority field the highest numbered mailbox is transmitted first.
@@ -857,7 +857,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
struct net_device *ndev = (struct net_device *)0;
struct ti_hecc_priv *priv;
struct device_node *np = pdev->dev.of_node;
- struct resource *res, *irq;
+ struct resource *irq;
struct regulator *reg_xceiver;
int err = -ENODEV;
@@ -878,39 +878,22 @@ static int ti_hecc_probe(struct platform_device *pdev)
priv = netdev_priv(ndev);
/* handle hecc memory */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hecc");
- if (!res) {
- dev_err(&pdev->dev, "can't get IORESOURCE_MEM hecc\n");
- return -EINVAL;
- }
-
- priv->base = devm_ioremap_resource(&pdev->dev, res);
+ priv->base = devm_platform_ioremap_resource_byname(pdev, "hecc");
if (IS_ERR(priv->base)) {
dev_err(&pdev->dev, "hecc ioremap failed\n");
return PTR_ERR(priv->base);
}
/* handle hecc-ram memory */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hecc-ram");
- if (!res) {
- dev_err(&pdev->dev, "can't get IORESOURCE_MEM hecc-ram\n");
- return -EINVAL;
- }
-
- priv->hecc_ram = devm_ioremap_resource(&pdev->dev, res);
+ priv->hecc_ram = devm_platform_ioremap_resource_byname(pdev,
+ "hecc-ram");
if (IS_ERR(priv->hecc_ram)) {
dev_err(&pdev->dev, "hecc-ram ioremap failed\n");
return PTR_ERR(priv->hecc_ram);
}
/* handle mbx memory */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mbx");
- if (!res) {
- dev_err(&pdev->dev, "can't get IORESOURCE_MEM mbx\n");
- return -EINVAL;
- }
-
- priv->mbx = devm_ioremap_resource(&pdev->dev, res);
+ priv->mbx = devm_platform_ioremap_resource_byname(pdev, "mbx");
if (IS_ERR(priv->mbx)) {
dev_err(&pdev->dev, "mbx ioremap failed\n");
return PTR_ERR(priv->mbx);
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index 77fa830fe7dd..bcb331b0c958 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -90,7 +90,7 @@ config CAN_PEAK_USB
tristate "PEAK PCAN-USB/USB Pro interfaces for CAN 2.0b/CAN-FD"
help
This driver supports the PEAK-System Technik USB adapters that enable
- access to the CAN bus, with repect to the CAN 2.0b and/or CAN-FD
+ access to the CAN bus, with respect to the CAN 2.0b and/or CAN-FD
standards, that is:
PCAN-USB single CAN 2.0b channel USB adapter
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index a4b4b742c80c..3005157059ca 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -828,7 +828,7 @@ static struct gs_can *gs_make_candev(unsigned int channel,
netdev->flags |= IFF_ECHO; /* we support full roundtrip echo */
- /* dev settup */
+ /* dev setup */
strcpy(dev->bt_const.name, "gs_usb");
dev->bt_const.tseg1_min = bt_const->tseg1_min;
dev->bt_const.tseg1_max = bt_const->tseg1_max;
@@ -852,7 +852,7 @@ static struct gs_can *gs_make_candev(unsigned int channel,
dev->tx_context[rc].echo_id = GS_MAX_TX_URBS;
}
- /* can settup */
+ /* can setup */
dev->can.state = CAN_STATE_STOPPED;
dev->can.clock.freq = bt_const->fclk_can;
dev->can.bittiming_const = &dev->bt_const;
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index 21faa2ec4632..5857b37dcd96 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -28,7 +28,7 @@
#define MCBA_CTX_FREE MCBA_MAX_TX_URBS
/* RX buffer must be bigger than msg size since at the
- * beggining USB messages are stacked.
+ * beginning USB messages are stacked.
*/
#define MCBA_USB_RX_BUFF_SIZE 64
#define MCBA_USB_TX_BUFF_SIZE (sizeof(struct mcba_usb_msg))
@@ -793,7 +793,7 @@ static int mcba_usb_probe(struct usb_interface *intf,
{
struct net_device *netdev;
struct mcba_priv *priv;
- int err = -ENOMEM;
+ int err;
struct usb_device *usbdev = interface_to_usbdev(intf);
netdev = alloc_candev(sizeof(struct mcba_priv), MCBA_MAX_TX_URBS);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 66d0198e7834..63bd2ed96697 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -34,6 +34,23 @@ MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB adapter");
#define PCAN_USB_CMD_LEN (PCAN_USB_CMD_ARGS + \
PCAN_USB_CMD_ARGS_LEN)
+/* PCAN-USB commands */
+#define PCAN_USB_CMD_BITRATE 1
+#define PCAN_USB_CMD_SET_BUS 3
+#define PCAN_USB_CMD_DEVID 4
+#define PCAN_USB_CMD_SN 6
+#define PCAN_USB_CMD_REGISTER 9
+#define PCAN_USB_CMD_EXT_VCC 10
+#define PCAN_USB_CMD_ERR_FR 11
+
+/* PCAN_USB_CMD_SET_BUS number arg */
+#define PCAN_USB_BUS_XCVER 2
+#define PCAN_USB_BUS_SILENT_MODE 3
+
+/* PCAN_USB_CMD_xxx functions */
+#define PCAN_USB_GET 1
+#define PCAN_USB_SET 2
+
/* PCAN-USB command timeout (ms.) */
#define PCAN_USB_COMMAND_TIMEOUT 1000
@@ -66,6 +83,10 @@ MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB adapter");
#define PCAN_USB_ERROR_QOVR 0x40
#define PCAN_USB_ERROR_TXQFULL 0x80
+#define PCAN_USB_ERROR_BUS (PCAN_USB_ERROR_BUS_LIGHT | \
+ PCAN_USB_ERROR_BUS_HEAVY | \
+ PCAN_USB_ERROR_BUS_OFF)
+
/* SJA1000 modes */
#define SJA1000_MODE_NORMAL 0x00
#define SJA1000_MODE_INIT 0x01
@@ -85,11 +106,25 @@ MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB adapter");
#define PCAN_USB_REC_TS 4
#define PCAN_USB_REC_BUSEVT 5
+/* CAN bus events notifications selection mask */
+#define PCAN_USB_ERR_RXERR 0x02 /* ask for rxerr counter */
+#define PCAN_USB_ERR_TXERR 0x04 /* ask for txerr counter */
+
+/* This mask generates an usb packet each time the state of the bus changes.
+ * In other words, its interest is to know which side among rx and tx is
+ * responsible of the change of the bus state.
+ */
+#define PCAN_USB_BERR_MASK (PCAN_USB_ERR_RXERR | PCAN_USB_ERR_TXERR)
+
+/* identify bus event packets with rx/tx error counters */
+#define PCAN_USB_ERR_CNT 0x80
+
/* private to PCAN-USB adapter */
struct pcan_usb {
struct peak_usb_device dev;
struct peak_time_ref time_ref;
struct timer_list restart_timer;
+ struct can_berr_counter bec;
};
/* incoming message context for decoding */
@@ -172,7 +207,8 @@ static int pcan_usb_set_sja1000(struct peak_usb_device *dev, u8 mode)
[1] = mode,
};
- return pcan_usb_send_cmd(dev, 9, 2, args);
+ return pcan_usb_send_cmd(dev, PCAN_USB_CMD_REGISTER, PCAN_USB_SET,
+ args);
}
static int pcan_usb_set_bus(struct peak_usb_device *dev, u8 onoff)
@@ -181,7 +217,8 @@ static int pcan_usb_set_bus(struct peak_usb_device *dev, u8 onoff)
[0] = !!onoff,
};
- return pcan_usb_send_cmd(dev, 3, 2, args);
+ return pcan_usb_send_cmd(dev, PCAN_USB_CMD_SET_BUS, PCAN_USB_BUS_XCVER,
+ args);
}
static int pcan_usb_set_silent(struct peak_usb_device *dev, u8 onoff)
@@ -190,7 +227,18 @@ static int pcan_usb_set_silent(struct peak_usb_device *dev, u8 onoff)
[0] = !!onoff,
};
- return pcan_usb_send_cmd(dev, 3, 3, args);
+ return pcan_usb_send_cmd(dev, PCAN_USB_CMD_SET_BUS,
+ PCAN_USB_BUS_SILENT_MODE, args);
+}
+
+/* send the cmd to be notified from bus errors */
+static int pcan_usb_set_err_frame(struct peak_usb_device *dev, u8 err_mask)
+{
+ u8 args[PCAN_USB_CMD_ARGS_LEN] = {
+ [0] = err_mask,
+ };
+
+ return pcan_usb_send_cmd(dev, PCAN_USB_CMD_ERR_FR, PCAN_USB_SET, args);
}
static int pcan_usb_set_ext_vcc(struct peak_usb_device *dev, u8 onoff)
@@ -199,7 +247,7 @@ static int pcan_usb_set_ext_vcc(struct peak_usb_device *dev, u8 onoff)
[0] = !!onoff,
};
- return pcan_usb_send_cmd(dev, 10, 2, args);
+ return pcan_usb_send_cmd(dev, PCAN_USB_CMD_EXT_VCC, PCAN_USB_SET, args);
}
/*
@@ -223,7 +271,7 @@ static int pcan_usb_set_bittiming(struct peak_usb_device *dev,
args[0] = btr1;
args[1] = btr0;
- return pcan_usb_send_cmd(dev, 1, 2, args);
+ return pcan_usb_send_cmd(dev, PCAN_USB_CMD_BITRATE, PCAN_USB_SET, args);
}
/*
@@ -307,7 +355,7 @@ static int pcan_usb_get_serial(struct peak_usb_device *dev, u32 *serial_number)
u8 args[PCAN_USB_CMD_ARGS_LEN];
int err;
- err = pcan_usb_wait_rsp(dev, 6, 1, args);
+ err = pcan_usb_wait_rsp(dev, PCAN_USB_CMD_SN, PCAN_USB_GET, args);
if (err) {
netdev_err(dev->netdev, "getting serial failure: %d\n", err);
} else if (serial_number) {
@@ -328,7 +376,7 @@ static int pcan_usb_get_device_id(struct peak_usb_device *dev, u32 *device_id)
u8 args[PCAN_USB_CMD_ARGS_LEN];
int err;
- err = pcan_usb_wait_rsp(dev, 4, 1, args);
+ err = pcan_usb_wait_rsp(dev, PCAN_USB_CMD_DEVID, PCAN_USB_GET, args);
if (err)
netdev_err(dev->netdev, "getting device id failure: %d\n", err);
else if (device_id)
@@ -426,7 +474,7 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
new_state = CAN_STATE_BUS_OFF;
break;
}
- if (n & (PCAN_USB_ERROR_RXQOVR | PCAN_USB_ERROR_QOVR)) {
+ if (n & ~PCAN_USB_ERROR_BUS) {
/*
* trick to bypass next comparison and process other
* errors
@@ -450,7 +498,7 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
new_state = CAN_STATE_ERROR_WARNING;
break;
}
- if (n & (PCAN_USB_ERROR_RXQOVR | PCAN_USB_ERROR_QOVR)) {
+ if (n & ~PCAN_USB_ERROR_BUS) {
/*
* trick to bypass next comparison and process other
* errors
@@ -489,29 +537,50 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
case CAN_STATE_ERROR_PASSIVE:
cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE |
- CAN_ERR_CRTL_RX_PASSIVE;
+ cf->data[1] = (mc->pdev->bec.txerr > mc->pdev->bec.rxerr) ?
+ CAN_ERR_CRTL_TX_PASSIVE :
+ CAN_ERR_CRTL_RX_PASSIVE;
+ cf->data[6] = mc->pdev->bec.txerr;
+ cf->data[7] = mc->pdev->bec.rxerr;
+
mc->pdev->dev.can.can_stats.error_passive++;
break;
case CAN_STATE_ERROR_WARNING:
cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] |= CAN_ERR_CRTL_TX_WARNING |
- CAN_ERR_CRTL_RX_WARNING;
+ cf->data[1] = (mc->pdev->bec.txerr > mc->pdev->bec.rxerr) ?
+ CAN_ERR_CRTL_TX_WARNING :
+ CAN_ERR_CRTL_RX_WARNING;
+ cf->data[6] = mc->pdev->bec.txerr;
+ cf->data[7] = mc->pdev->bec.rxerr;
+
mc->pdev->dev.can.can_stats.error_warning++;
break;
case CAN_STATE_ERROR_ACTIVE:
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_ACTIVE;
+
+ /* sync local copies of rxerr/txerr counters */
+ mc->pdev->bec.txerr = 0;
+ mc->pdev->bec.rxerr = 0;
break;
default:
/* CAN_STATE_MAX (trick to handle other errors) */
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
- mc->netdev->stats.rx_over_errors++;
- mc->netdev->stats.rx_errors++;
+ if (n & PCAN_USB_ERROR_TXQFULL)
+ netdev_dbg(mc->netdev, "device Tx queue full)\n");
+
+ if (n & PCAN_USB_ERROR_RXQOVR) {
+ netdev_dbg(mc->netdev, "data overrun interrupt\n");
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
+ mc->netdev->stats.rx_over_errors++;
+ mc->netdev->stats.rx_errors++;
+ }
+
+ cf->data[6] = mc->pdev->bec.txerr;
+ cf->data[7] = mc->pdev->bec.rxerr;
new_state = mc->pdev->dev.can.state;
break;
@@ -533,6 +602,30 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
return 0;
}
+/* decode bus event usb packet: first byte contains rxerr while 2nd one contains
+ * txerr.
+ */
+static int pcan_usb_handle_bus_evt(struct pcan_usb_msg_context *mc, u8 ir)
+{
+ struct pcan_usb *pdev = mc->pdev;
+
+ /* acccording to the content of the packet */
+ switch (ir) {
+ case PCAN_USB_ERR_CNT:
+
+ /* save rx/tx error counters from in the device context */
+ pdev->bec.rxerr = mc->ptr[0];
+ pdev->bec.txerr = mc->ptr[1];
+ break;
+
+ default:
+ /* reserved */
+ break;
+ }
+
+ return 0;
+}
+
/*
* decode non-data usb message
*/
@@ -587,9 +680,10 @@ static int pcan_usb_decode_status(struct pcan_usb_msg_context *mc,
break;
case PCAN_USB_REC_BUSEVT:
- /* error frame/bus event */
- if (n & PCAN_USB_ERROR_TXQFULL)
- netdev_dbg(mc->netdev, "device Tx queue full)\n");
+ /* bus event notifications (get rxerr/txerr) */
+ err = pcan_usb_handle_bus_evt(mc, n);
+ if (err)
+ return err;
break;
default:
netdev_err(mc->netdev, "unexpected function %u\n", f);
@@ -773,20 +867,44 @@ static int pcan_usb_encode_msg(struct peak_usb_device *dev, struct sk_buff *skb,
return 0;
}
+/* socket callback used to copy berr counters values received through USB */
+static int pcan_usb_get_berr_counter(const struct net_device *netdev,
+ struct can_berr_counter *bec)
+{
+ struct peak_usb_device *dev = netdev_priv(netdev);
+ struct pcan_usb *pdev = container_of(dev, struct pcan_usb, dev);
+
+ *bec = pdev->bec;
+
+ /* must return 0 */
+ return 0;
+}
+
/*
* start interface
*/
static int pcan_usb_start(struct peak_usb_device *dev)
{
struct pcan_usb *pdev = container_of(dev, struct pcan_usb, dev);
+ int err;
/* number of bits used in timestamps read from adapter struct */
peak_usb_init_time_ref(&pdev->time_ref, &pcan_usb);
+ pdev->bec.rxerr = 0;
+ pdev->bec.txerr = 0;
+
+ /* be notified on error counter changes (if requested by user) */
+ if (dev->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) {
+ err = pcan_usb_set_err_frame(dev, PCAN_USB_BERR_MASK);
+ if (err)
+ netdev_warn(dev->netdev,
+ "Asking for BERR reporting error %u\n",
+ err);
+ }
+
/* if revision greater than 3, can put silent mode on/off */
if (dev->device_rev > 3) {
- int err;
-
err = pcan_usb_set_silent(dev,
dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY);
if (err)
@@ -873,7 +991,8 @@ const struct peak_usb_adapter pcan_usb = {
.name = "PCAN-USB",
.device_id = PCAN_USB_PRODUCT_ID,
.ctrl_count = 1,
- .ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY,
+ .ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_BERR_REPORTING,
.clock = {
.freq = PCAN_USB_CRYSTAL_HZ / 2 ,
},
@@ -906,4 +1025,5 @@ const struct peak_usb_adapter pcan_usb = {
.dev_encode_msg = pcan_usb_encode_msg,
.dev_start = pcan_usb_start,
.dev_restart_async = pcan_usb_restart_async,
+ .do_get_berr_counter = pcan_usb_get_berr_counter,
};
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index 47cc1ff5b88e..ab63fd9eb982 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -35,7 +35,7 @@ MODULE_SUPPORTED_DEVICE("PEAK-System PCAN-USB Pro FD adapter");
#define PCAN_UFD_RX_BUFFER_SIZE 2048
#define PCAN_UFD_TX_BUFFER_SIZE 512
-/* read some versions info from the hw devcie */
+/* read some versions info from the hw device */
struct __packed pcan_ufd_fw_info {
__le16 size_of; /* sizeof this */
__le16 type; /* type of this structure */
@@ -796,7 +796,7 @@ static int pcan_usb_fd_start(struct peak_usb_device *dev)
return err;
}
-/* socket callback used to copy berr counters values receieved through USB */
+/* socket callback used to copy berr counters values received through USB */
static int pcan_usb_fd_get_berr_counter(const struct net_device *netdev,
struct can_berr_counter *bec)
{
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 1689ab387612..c7564773fb2b 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -186,7 +186,7 @@ static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, int id, ...)
len = pc - pm->rec_ptr;
if (len > 0) {
- *pm->u.rec_cnt = cpu_to_le32(le32_to_cpu(*pm->u.rec_cnt) + 1);
+ le32_add_cpu(pm->u.rec_cnt, 1);
*pm->rec_ptr = id;
pm->rec_ptr = pc;
@@ -973,7 +973,7 @@ int pcan_usb_pro_probe(struct usb_interface *intf)
struct usb_endpoint_descriptor *ep = &if_desc->endpoint[i].desc;
/*
- * below is the list of valid ep addreses. Any other ep address
+ * below is the list of valid ep addresses. Any other ep address
* is considered as not-CAN interface address => no dev created
*/
switch (ep->bEndpointAddress) {
diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
index 81e942f713e6..dc5290b36598 100644
--- a/drivers/net/can/usb/ucan.c
+++ b/drivers/net/can/usb/ucan.c
@@ -1445,7 +1445,7 @@ static int ucan_probe(struct usb_interface *intf,
/* request the device information and store it in ctl_msg_buffer
*
- * note: ucan_ctrl_command_* wrappers connot be used yet
+ * note: ucan_ctrl_command_* wrappers cannot be used yet
* because `up` is initialised in Stage 3
*/
ret = usb_control_msg(udev,
@@ -1494,7 +1494,7 @@ static int ucan_probe(struct usb_interface *intf,
up = netdev_priv(netdev);
- /* initialze data */
+ /* initialize data */
up->udev = udev;
up->intf = intf;
up->netdev = netdev;
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index 8fa224b28218..62749c67c959 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -88,7 +88,7 @@ enum usb_8dev_cmd {
/* status */
#define USB_8DEV_STATUSMSG_OK 0x00 /* Normal condition. */
-#define USB_8DEV_STATUSMSG_OVERRUN 0x01 /* Overrun occured when sending */
+#define USB_8DEV_STATUSMSG_OVERRUN 0x01 /* Overrun occurred when sending */
#define USB_8DEV_STATUSMSG_BUSLIGHT 0x02 /* Error counter has reached 96 */
#define USB_8DEV_STATUSMSG_BUSHEAVY 0x03 /* Error count. has reached 128 */
#define USB_8DEV_STATUSMSG_BUSOFF 0x04 /* Device is in BUSOFF */
@@ -165,7 +165,7 @@ struct __packed usb_8dev_rx_msg {
/* command frame */
struct __packed usb_8dev_cmd_msg {
u8 begin;
- u8 channel; /* unkown - always 0 */
+ u8 channel; /* unknown - always 0 */
u8 command; /* command to execute */
u8 opt1; /* optional parameter / return value */
u8 opt2; /* optional parameter 2 */
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index c1dbab8c896d..6c4d00d2dbdc 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -259,7 +259,7 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd2 = {
.tseg2_min = 1,
.tseg2_max = 128,
.sjw_max = 128,
- .brp_min = 1,
+ .brp_min = 2,
.brp_max = 256,
.brp_inc = 1,
};
@@ -272,7 +272,7 @@ static struct can_bittiming_const xcan_data_bittiming_const_canfd2 = {
.tseg2_min = 1,
.tseg2_max = 16,
.sjw_max = 16,
- .brp_min = 1,
+ .brp_min = 2,
.brp_max = 256,
.brp_inc = 1,
};
@@ -1308,7 +1308,7 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
/**
* xcan_interrupt - CAN Isr
* @irq: irq number
- * @dev_id: device id poniter
+ * @dev_id: device id pointer
*
* This is the xilinx CAN Isr. It checks for the type of interrupt
* and invokes the corresponding ISR.
@@ -1369,9 +1369,13 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
static void xcan_chip_stop(struct net_device *ndev)
{
struct xcan_priv *priv = netdev_priv(ndev);
+ int ret;
/* Disable interrupts and leave the can in configuration mode */
- set_reset_mode(ndev);
+ ret = set_reset_mode(ndev);
+ if (ret < 0)
+ netdev_dbg(ndev, "set_reset_mode() Failed\n");
+
priv->can.state = CAN_STATE_STOPPED;
}
@@ -1667,7 +1671,7 @@ static int xcan_probe(struct platform_device *pdev)
void __iomem *addr;
int ret;
int rx_max, tx_max;
- int hw_tx_max, hw_rx_max;
+ u32 hw_tx_max = 0, hw_rx_max = 0;
const char *hw_tx_max_property;
/* Get the virtual base address for the device */
@@ -1720,7 +1724,7 @@ static int xcan_probe(struct platform_device *pdev)
*/
if (!(devtype->flags & XCAN_FLAG_TX_MAILBOXES) &&
(devtype->flags & XCAN_FLAG_TXFEMP))
- tx_max = min(hw_tx_max, 2);
+ tx_max = min(hw_tx_max, 2U);
else
tx_max = 1;
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 468b3c4273c5..2451f61a38e4 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -33,12 +33,12 @@ config NET_DSA_LANTIQ_GSWIP
the xrx200 / VR9 SoC.
config NET_DSA_MT7530
- tristate "Mediatek MT7530 Ethernet switch support"
+ tristate "MediaTek MT753x and MT7621 Ethernet switch support"
depends on NET_DSA
select NET_DSA_TAG_MTK
help
- This enables support for the Mediatek MT7530 Ethernet switch
- chip.
+ This enables support for the MediaTek MT7530, MT7531, and MT7621
+ Ethernet switch chips.
config NET_DSA_MV88E6060
tristate "Marvell 88E6060 ethernet switch chip support"
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index e731db900ee0..288b5a5c3e0d 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -17,8 +17,6 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/gpio.h>
@@ -767,8 +765,11 @@ static int b53_switch_reset(struct b53_device *dev)
usleep_range(1000, 2000);
} while (timeout-- > 0);
- if (timeout == 0)
+ if (timeout == 0) {
+ dev_err(dev->dev,
+ "Timeout waiting for SW_RST to clear!\n");
return -ETIMEDOUT;
+ }
}
b53_read8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, &mgmt);
@@ -976,6 +977,54 @@ int b53_get_sset_count(struct dsa_switch *ds, int port, int sset)
}
EXPORT_SYMBOL(b53_get_sset_count);
+enum b53_devlink_resource_id {
+ B53_DEVLINK_PARAM_ID_VLAN_TABLE,
+};
+
+static u64 b53_devlink_vlan_table_get(void *priv)
+{
+ struct b53_device *dev = priv;
+ struct b53_vlan *vl;
+ unsigned int i;
+ u64 count = 0;
+
+ for (i = 0; i < dev->num_vlans; i++) {
+ vl = &dev->vlans[i];
+ if (vl->members)
+ count++;
+ }
+
+ return count;
+}
+
+int b53_setup_devlink_resources(struct dsa_switch *ds)
+{
+ struct devlink_resource_size_params size_params;
+ struct b53_device *dev = ds->priv;
+ int err;
+
+ devlink_resource_size_params_init(&size_params, dev->num_vlans,
+ dev->num_vlans,
+ 1, DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ err = dsa_devlink_resource_register(ds, "VLAN", dev->num_vlans,
+ B53_DEVLINK_PARAM_ID_VLAN_TABLE,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
+ if (err)
+ goto out;
+
+ dsa_devlink_resource_occ_get_register(ds,
+ B53_DEVLINK_PARAM_ID_VLAN_TABLE,
+ b53_devlink_vlan_table_get, dev);
+
+ return 0;
+out:
+ dsa_devlink_resources_unregister(ds);
+ return err;
+}
+EXPORT_SYMBOL(b53_setup_devlink_resources);
+
static int b53_setup(struct dsa_switch *ds)
{
struct b53_device *dev = ds->priv;
@@ -991,8 +1040,10 @@ static int b53_setup(struct dsa_switch *ds)
b53_reset_mib(dev);
ret = b53_apply_config(dev);
- if (ret)
+ if (ret) {
dev_err(ds->dev, "failed to apply configuration\n");
+ return ret;
+ }
/* Configure IMP/CPU port, disable all other ports. Enabled
* ports will be configured with .port_enable
@@ -1011,7 +1062,12 @@ static int b53_setup(struct dsa_switch *ds)
*/
ds->vlan_filtering_is_global = true;
- return ret;
+ return b53_setup_devlink_resources(ds);
+}
+
+static void b53_teardown(struct dsa_switch *ds)
+{
+ dsa_devlink_resources_unregister(ds);
}
static void b53_force_link(struct b53_device *dev, int port, int link)
@@ -1318,26 +1374,13 @@ void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
}
EXPORT_SYMBOL(b53_phylink_mac_link_up);
-int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
+int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
+ struct switchdev_trans *trans)
{
struct b53_device *dev = ds->priv;
- u16 pvid, new_pvid;
- b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
- if (!vlan_filtering) {
- /* Filtering is currently enabled, use the default PVID since
- * the bridge does not expect tagging anymore
- */
- dev->ports[port].pvid = pvid;
- new_pvid = b53_default_pvid(dev);
- } else {
- /* Filtering is currently disabled, restore the previous PVID */
- new_pvid = dev->ports[port].pvid;
- }
-
- if (pvid != new_pvid)
- b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
- new_pvid);
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
b53_enable_vlan(dev, dev->vlan_enabled, vlan_filtering);
@@ -2140,6 +2183,7 @@ static int b53_get_max_mtu(struct dsa_switch *ds, int port)
static const struct dsa_switch_ops b53_switch_ops = {
.get_tag_protocol = b53_get_tag_protocol,
.setup = b53_setup,
+ .teardown = b53_teardown,
.get_strings = b53_get_strings,
.get_ethtool_stats = b53_get_ethtool_stats,
.get_sset_count = b53_get_sset_count,
@@ -2562,6 +2606,9 @@ struct b53_device *b53_switch_alloc(struct device *base,
dev->priv = priv;
dev->ops = ops;
ds->ops = &b53_switch_ops;
+ ds->configure_vlan_while_not_filtering = true;
+ ds->untag_bridge_pvid = true;
+ dev->vlan_enabled = ds->configure_vlan_while_not_filtering;
mutex_init(&dev->reg_mutex);
mutex_init(&dev->stats_mutex);
@@ -2620,8 +2667,9 @@ int b53_switch_detect(struct b53_device *dev)
dev->chip_id = id32;
break;
default:
- pr_err("unsupported switch detected (BCM53%02x/BCM%x)\n",
- id8, id32);
+ dev_err(dev->dev,
+ "unsupported switch detected (BCM53%02x/BCM%x)\n",
+ id8, id32);
return -ENODEV;
}
}
@@ -2651,7 +2699,8 @@ int b53_switch_register(struct b53_device *dev)
if (ret)
return ret;
- pr_info("found switch: %s, rev %i\n", dev->name, dev->core_rev);
+ dev_info(dev->dev, "found switch: %s, rev %i\n",
+ dev->name, dev->core_rev);
return dsa_register_switch(dev->ds);
}
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index e942c60e4365..7c67409bb186 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -91,7 +91,6 @@ enum {
struct b53_port {
u16 vlan_ctl_mask;
struct ethtool_eee eee;
- u16 pvid;
};
struct b53_vlan {
@@ -328,6 +327,7 @@ void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state);
void b53_br_fast_age(struct dsa_switch *ds, int port);
int b53_br_egress_floods(struct dsa_switch *ds, int port,
bool unicast, bool multicast);
+int b53_setup_devlink_resources(struct dsa_switch *ds);
void b53_port_event(struct dsa_switch *ds, int port);
void b53_phylink_validate(struct dsa_switch *ds, int port,
unsigned long *supported,
@@ -347,7 +347,8 @@ void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
struct phy_device *phydev,
int speed, int duplex,
bool tx_pause, bool rx_pause);
-int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering);
+int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
+ struct switchdev_trans *trans);
int b53_vlan_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan);
void b53_vlan_add(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 5ebff986a1ac..0b5b2b33b3b6 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -14,6 +14,7 @@
#include <linux/phy_fixed.h>
#include <linux/phylink.h>
#include <linux/mii.h>
+#include <linux/clk.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
@@ -31,6 +32,49 @@
#include "b53/b53_priv.h"
#include "b53/b53_regs.h"
+/* Return the number of active ports, not counting the IMP (CPU) port */
+static unsigned int bcm_sf2_num_active_ports(struct dsa_switch *ds)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ unsigned int port, count = 0;
+
+ for (port = 0; port < ARRAY_SIZE(priv->port_sts); port++) {
+ if (dsa_is_cpu_port(ds, port))
+ continue;
+ if (priv->port_sts[port].enabled)
+ count++;
+ }
+
+ return count;
+}
+
+static void bcm_sf2_recalc_clock(struct dsa_switch *ds)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ unsigned long new_rate;
+ unsigned int ports_active;
+ /* Frequenty in Mhz */
+ const unsigned long rate_table[] = {
+ 59220000,
+ 60820000,
+ 62500000,
+ 62500000,
+ };
+
+ ports_active = bcm_sf2_num_active_ports(ds);
+ if (ports_active == 0 || !priv->clk_mdiv)
+ return;
+
+ /* If we overflow our table, just use the recommended operational
+ * frequency
+ */
+ if (ports_active > ARRAY_SIZE(rate_table))
+ new_rate = 90000000;
+ else
+ new_rate = rate_table[ports_active - 1];
+ clk_set_rate(priv->clk_mdiv, new_rate);
+}
+
static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
@@ -82,6 +126,8 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
reg &= ~(RX_DIS | TX_DIS);
core_writel(priv, reg, CORE_G_PCTL_PORT(port));
}
+
+ priv->port_sts[port].enabled = true;
}
static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
@@ -167,6 +213,10 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
if (!dsa_is_user_port(ds, port))
return 0;
+ priv->port_sts[port].enabled = true;
+
+ bcm_sf2_recalc_clock(ds);
+
/* Clear the memory power down */
reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
reg &= ~P_TXQ_PSM_VDD(port);
@@ -260,6 +310,10 @@ static void bcm_sf2_port_disable(struct dsa_switch *ds, int port)
reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
reg |= P_TXQ_PSM_VDD(port);
core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
+
+ priv->port_sts[port].enabled = false;
+
+ bcm_sf2_recalc_clock(ds);
}
@@ -403,6 +457,7 @@ static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
{
struct device_node *port;
unsigned int port_num;
+ struct property *prop;
phy_interface_t mode;
int err;
@@ -429,15 +484,27 @@ static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
if (of_property_read_bool(port, "brcm,use-bcm-hdr"))
priv->brcm_tag_mask |= 1 << port_num;
+
+ /* Ensure that port 5 is not picked up as a DSA CPU port
+ * flavour but a regular port instead. We should be using
+ * devlink to be able to set the port flavour.
+ */
+ if (port_num == 5 && priv->type == BCM7278_DEVICE_ID) {
+ prop = of_find_property(port, "ethernet", NULL);
+ if (prop)
+ of_remove_property(port, prop);
+ }
}
}
static int bcm_sf2_mdio_register(struct dsa_switch *ds)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- struct device_node *dn;
+ struct device_node *dn, *child;
+ struct phy_device *phydev;
+ struct property *prop;
static int index;
- int err;
+ int err, reg;
/* Find our integrated MDIO bus node */
dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
@@ -471,7 +538,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
* driver.
*/
if (of_machine_is_compatible("brcm,bcm7445d0"))
- priv->indir_phy_mask |= (1 << BRCM_PSEUDO_PHY_ADDR);
+ priv->indir_phy_mask |= (1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0);
else
priv->indir_phy_mask = 0;
@@ -480,6 +547,31 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
priv->slave_mii_bus->parent = ds->dev->parent;
priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask;
+ /* We need to make sure that of_phy_connect() will not work by
+ * removing the 'phandle' and 'linux,phandle' properties and
+ * unregister the existing PHY device that was already registered.
+ */
+ for_each_available_child_of_node(dn, child) {
+ if (of_property_read_u32(child, "reg", &reg) ||
+ reg >= PHY_MAX_ADDR)
+ continue;
+
+ if (!(priv->indir_phy_mask & BIT(reg)))
+ continue;
+
+ prop = of_find_property(child, "phandle", NULL);
+ if (prop)
+ of_remove_property(child, prop);
+
+ prop = of_find_property(child, "linux,phandle", NULL);
+ if (prop)
+ of_remove_property(child, prop);
+
+ phydev = of_phy_find_device(child);
+ if (phydev)
+ phy_device_remove(phydev);
+ }
+
err = mdiobus_register(priv->slave_mii_bus);
if (err && dn)
of_node_put(dn);
@@ -750,6 +842,9 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
bcm_sf2_port_disable(ds, port);
}
+ if (!priv->wol_ports_mask)
+ clk_disable_unprepare(priv->clk);
+
return 0;
}
@@ -758,6 +853,9 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
int ret;
+ if (!priv->wol_ports_mask)
+ clk_prepare_enable(priv->clk);
+
ret = bcm_sf2_sw_rst(priv);
if (ret) {
pr_err("%s: failed to software reset switch\n", __func__);
@@ -849,7 +947,12 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
b53_configure_vlan(ds);
bcm_sf2_enable_acb(ds);
- return 0;
+ return b53_setup_devlink_resources(ds);
+}
+
+static void bcm_sf2_sw_teardown(struct dsa_switch *ds)
+{
+ dsa_devlink_resources_unregister(ds);
}
/* The SWITCH_CORE register space is managed by b53 but operates on a page +
@@ -986,6 +1089,7 @@ static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds, int port,
static const struct dsa_switch_ops bcm_sf2_ops = {
.get_tag_protocol = b53_get_tag_protocol,
.setup = bcm_sf2_sw_setup,
+ .teardown = bcm_sf2_sw_teardown,
.get_strings = bcm_sf2_sw_get_strings,
.get_ethtool_stats = bcm_sf2_sw_get_ethtool_stats,
.get_sset_count = bcm_sf2_sw_get_sset_count,
@@ -1189,10 +1293,24 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
base++;
}
+ priv->clk = devm_clk_get_optional(&pdev->dev, "sw_switch");
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ clk_prepare_enable(priv->clk);
+
+ priv->clk_mdiv = devm_clk_get_optional(&pdev->dev, "sw_switch_mdiv");
+ if (IS_ERR(priv->clk_mdiv)) {
+ ret = PTR_ERR(priv->clk_mdiv);
+ goto out_clk;
+ }
+
+ clk_prepare_enable(priv->clk_mdiv);
+
ret = bcm_sf2_sw_rst(priv);
if (ret) {
pr_err("unable to software reset switch: %d\n", ret);
- return ret;
+ goto out_clk_mdiv;
}
bcm_sf2_gphy_enable_set(priv->dev->ds, true);
@@ -1200,7 +1318,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
ret = bcm_sf2_mdio_register(ds);
if (ret) {
pr_err("failed to register MDIO bus\n");
- return ret;
+ goto out_clk_mdiv;
}
bcm_sf2_gphy_enable_set(priv->dev->ds, false);
@@ -1267,6 +1385,10 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
out_mdio:
bcm_sf2_mdio_unregister(priv);
+out_clk_mdiv:
+ clk_disable_unprepare(priv->clk_mdiv);
+out_clk:
+ clk_disable_unprepare(priv->clk);
return ret;
}
@@ -1280,6 +1402,8 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
dsa_unregister_switch(priv->dev->ds);
bcm_sf2_cfp_exit(priv->dev->ds);
bcm_sf2_mdio_unregister(priv);
+ clk_disable_unprepare(priv->clk_mdiv);
+ clk_disable_unprepare(priv->clk);
if (priv->type == BCM7278_DEVICE_ID && !IS_ERR(priv->rcdev))
reset_control_assert(priv->rcdev);
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index de386dd96d66..1ed901a68536 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -45,6 +45,7 @@ struct bcm_sf2_hw_params {
struct bcm_sf2_port_status {
unsigned int link;
+ bool enabled;
};
struct bcm_sf2_cfp_priv {
@@ -93,6 +94,9 @@ struct bcm_sf2_priv {
/* Mask of ports enabled for Wake-on-LAN */
u32 wol_ports_mask;
+ struct clk *clk;
+ struct clk *clk_mdiv;
+
/* MoCA port location */
int moca_port;
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index eb600b3dbf26..e38906ae8f23 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -28,6 +28,53 @@ static struct dsa_loop_mib_entry dsa_loop_mibs[] = {
static struct phy_device *phydevs[PHY_MAX_ADDR];
+enum dsa_loop_devlink_resource_id {
+ DSA_LOOP_DEVLINK_PARAM_ID_VTU,
+};
+
+static u64 dsa_loop_devlink_vtu_get(void *priv)
+{
+ struct dsa_loop_priv *ps = priv;
+ unsigned int i, count = 0;
+ struct dsa_loop_vlan *vl;
+
+ for (i = 0; i < ARRAY_SIZE(ps->vlans); i++) {
+ vl = &ps->vlans[i];
+ if (vl->members)
+ count++;
+ }
+
+ return count;
+}
+
+static int dsa_loop_setup_devlink_resources(struct dsa_switch *ds)
+{
+ struct devlink_resource_size_params size_params;
+ struct dsa_loop_priv *ps = ds->priv;
+ int err;
+
+ devlink_resource_size_params_init(&size_params, ARRAY_SIZE(ps->vlans),
+ ARRAY_SIZE(ps->vlans),
+ 1, DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ err = dsa_devlink_resource_register(ds, "VTU", ARRAY_SIZE(ps->vlans),
+ DSA_LOOP_DEVLINK_PARAM_ID_VTU,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
+ if (err)
+ goto out;
+
+ dsa_devlink_resource_occ_get_register(ds,
+ DSA_LOOP_DEVLINK_PARAM_ID_VTU,
+ dsa_loop_devlink_vtu_get, ps);
+
+ return 0;
+
+out:
+ dsa_devlink_resources_unregister(ds);
+ return err;
+}
+
static enum dsa_tag_protocol dsa_loop_get_protocol(struct dsa_switch *ds,
int port,
enum dsa_tag_protocol mp)
@@ -48,7 +95,12 @@ static int dsa_loop_setup(struct dsa_switch *ds)
dev_dbg(ds->dev, "%s\n", __func__);
- return 0;
+ return dsa_loop_setup_devlink_resources(ds);
+}
+
+static void dsa_loop_teardown(struct dsa_switch *ds)
+{
+ dsa_devlink_resources_unregister(ds);
}
static int dsa_loop_get_sset_count(struct dsa_switch *ds, int port, int sset)
@@ -138,7 +190,8 @@ static void dsa_loop_port_stp_state_set(struct dsa_switch *ds, int port,
}
static int dsa_loop_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool vlan_filtering)
+ bool vlan_filtering,
+ struct switchdev_trans *trans)
{
dev_dbg(ds->dev, "%s: port: %d, vlan_filtering: %d\n",
__func__, port, vlan_filtering);
@@ -243,6 +296,7 @@ static int dsa_loop_port_max_mtu(struct dsa_switch *ds, int port)
static const struct dsa_switch_ops dsa_loop_driver = {
.get_tag_protocol = dsa_loop_get_protocol,
.setup = dsa_loop_setup,
+ .teardown = dsa_loop_teardown,
.get_strings = dsa_loop_get_strings,
.get_ethtool_stats = dsa_loop_get_ethtool_stats,
.get_sset_count = dsa_loop_get_sset_count,
@@ -290,6 +344,7 @@ static int dsa_loop_drv_probe(struct mdio_device *mdiodev)
ds->dev = &mdiodev->dev;
ds->ops = &dsa_loop_driver;
ds->priv = ps;
+ ds->configure_vlan_while_not_filtering = true;
ps->bus = mdiodev->bus;
dev_set_drvdata(&mdiodev->dev, ds);
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 521ebc072903..74db81dafee3 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -736,14 +736,23 @@ static int gswip_pce_load_microcode(struct gswip_priv *priv)
}
static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool vlan_filtering)
+ bool vlan_filtering,
+ struct switchdev_trans *trans)
{
struct gswip_priv *priv = ds->priv;
- struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
/* Do not allow changing the VLAN filtering options while in bridge */
- if (!!(priv->port_vlan_filter & BIT(port)) != vlan_filtering && bridge)
- return -EIO;
+ if (switchdev_trans_ph_prepare(trans)) {
+ struct net_device *bridge = dsa_to_port(ds, port)->bridge_dev;
+
+ if (!bridge)
+ return 0;
+
+ if (!!(priv->port_vlan_filter & BIT(port)) != vlan_filtering)
+ return -EIO;
+
+ return 0;
+ }
if (vlan_filtering) {
/* Use port based VLAN tag */
@@ -781,8 +790,15 @@ static int gswip_setup(struct dsa_switch *ds)
/* disable port fetch/store dma on all ports */
for (i = 0; i < priv->hw_info->max_ports; i++) {
+ struct switchdev_trans trans;
+
+ /* Skip the prepare phase, this shouldn't return an error
+ * during setup.
+ */
+ trans.ph_prepare = false;
+
gswip_port_disable(ds, i);
- gswip_port_vlan_filtering(ds, i, false);
+ gswip_port_vlan_filtering(ds, i, false, &trans);
}
/* enable Switch */
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index f5779e152377..1e101ab56cea 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -782,10 +782,14 @@ static void ksz8795_flush_dyn_mac_table(struct ksz_device *dev, int port)
}
static int ksz8795_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool flag)
+ bool flag,
+ struct switchdev_trans *trans)
{
struct ksz_device *dev = ds->priv;
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
ksz_cfg(dev, S_MIRROR_CTRL, SW_VLAN_ENABLE, flag);
return 0;
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 2f5506ac7d19..abfd3802bb51 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -493,10 +493,14 @@ static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port)
}
static int ksz9477_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool flag)
+ bool flag,
+ struct switchdev_trans *trans)
{
struct ksz_device *dev = ds->priv;
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
if (flag) {
ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL,
PORT_VLAN_LOOKUP_VID_0, true);
@@ -1235,6 +1239,9 @@ static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
if (p->interface == PHY_INTERFACE_MODE_RGMII_ID ||
p->interface == PHY_INTERFACE_MODE_RGMII_TXID)
data8 |= PORT_RGMII_ID_EG_ENABLE;
+ /* On KSZ9893, disable RGMII in-band status support */
+ if (dev->features & IS_9893)
+ data8 &= ~PORT_MII_MAC_MODE;
p->phydev.speed = SPEED_1000;
break;
}
@@ -1265,6 +1272,8 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds)
for (i = 0; i < dev->port_cnt; i++) {
if (dsa_is_cpu_port(ds, i) && (dev->cpu_ports & (1 << i))) {
phy_interface_t interface;
+ const char *prev_msg;
+ const char *prev_mode;
dev->cpu_port = i;
dev->host_mask = (1 << dev->cpu_port);
@@ -1287,11 +1296,19 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds)
p->interface = interface;
}
}
- if (interface && interface != p->interface)
- dev_info(dev->dev,
- "use %s instead of %s\n",
- phy_modes(p->interface),
- phy_modes(interface));
+ if (interface && interface != p->interface) {
+ prev_msg = " instead of ";
+ prev_mode = phy_modes(interface);
+ } else {
+ prev_msg = "";
+ prev_mode = "";
+ }
+ dev_info(dev->dev,
+ "Port%d: using phy mode %s%s%s\n",
+ i,
+ phy_modes(p->interface),
+ prev_msg,
+ prev_mode);
/* enable cpu port */
ksz9477_port_setup(dev, i, true);
@@ -1435,10 +1452,12 @@ static int ksz9477_switch_detect(struct ksz_device *dev)
/* Default capability is gigabit capable. */
dev->features = GBIT_SUPPORT;
+ dev_dbg(dev->dev, "Switch detect: ID=%08x%02x\n", id32, data8);
id_hi = (u8)(id32 >> 16);
id_lo = (u8)(id32 >> 8);
if ((id_lo & 0xf) == 3) {
/* Chip is from KSZ9893 design. */
+ dev_info(dev->dev, "Found KSZ9893\n");
dev->features |= IS_9893;
/* Chip does not support gigabit. */
@@ -1447,6 +1466,7 @@ static int ksz9477_switch_detect(struct ksz_device *dev)
dev->mib_port_cnt = 3;
dev->phy_port_cnt = 2;
} else {
+ dev_info(dev->dev, "Found KSZ9477 or compatible\n");
/* Chip uses new XMII register definitions. */
dev->features |= NEW_XMII;
diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
index 7951f52d860d..4e053a25d077 100644
--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
+++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
@@ -80,6 +80,7 @@ static const struct of_device_id ksz9477_dt_ids[] = {
{ .compatible = "microchip,ksz9477" },
{ .compatible = "microchip,ksz9897" },
{ .compatible = "microchip,ksz9893" },
+ { .compatible = "microchip,ksz9563" },
{ .compatible = "microchip,ksz9567" },
{},
};
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index c796d42730ba..0ef854911f21 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -103,14 +103,8 @@ void ksz_init_mib_timer(struct ksz_device *dev)
INIT_DELAYED_WORK(&dev->mib_read, ksz_mib_read_work);
- /* Read MIB counters every 30 seconds to avoid overflow. */
- dev->mib_read_interval = msecs_to_jiffies(30000);
-
for (i = 0; i < dev->mib_port_cnt; i++)
dev->dev_ops->port_init_cnt(dev, i);
-
- /* Start the timer 2 seconds later. */
- schedule_delayed_work(&dev->mib_read, msecs_to_jiffies(2000));
}
EXPORT_SYMBOL_GPL(ksz_init_mib_timer);
@@ -143,7 +137,9 @@ void ksz_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
/* Read all MIB counters when the link is going down. */
p->read = true;
- schedule_delayed_work(&dev->mib_read, 0);
+ /* timer started */
+ if (dev->mib_read_interval)
+ schedule_delayed_work(&dev->mib_read, 0);
}
EXPORT_SYMBOL_GPL(ksz_mac_link_down);
@@ -402,8 +398,9 @@ int ksz_switch_register(struct ksz_device *dev,
if (dev->reset_gpio) {
gpiod_set_value_cansleep(dev->reset_gpio, 1);
- mdelay(10);
+ usleep_range(10000, 12000);
gpiod_set_value_cansleep(dev->reset_gpio, 0);
+ usleep_range(100, 1000);
}
mutex_init(&dev->dev_mutex);
@@ -450,6 +447,12 @@ int ksz_switch_register(struct ksz_device *dev,
return ret;
}
+ /* Read MIB counters every 30 seconds to avoid overflow. */
+ dev->mib_read_interval = msecs_to_jiffies(30000);
+
+ /* Start the MIB timer. */
+ schedule_delayed_work(&dev->mib_read, 0);
+
return 0;
}
EXPORT_SYMBOL(ksz_switch_register);
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 1aaf47a0da2b..de7692b763d8 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -234,6 +234,12 @@ mt7530_write(struct mt7530_priv *priv, u32 reg, u32 val)
}
static u32
+_mt7530_unlocked_read(struct mt7530_dummy_poll *p)
+{
+ return mt7530_mii_read(p->priv, p->reg);
+}
+
+static u32
_mt7530_read(struct mt7530_dummy_poll *p)
{
struct mii_bus *bus = p->priv->bus;
@@ -372,8 +378,9 @@ mt7530_fdb_write(struct mt7530_priv *priv, u16 vid,
mt7530_write(priv, MT7530_ATA1 + (i * 4), reg[i]);
}
+/* Setup TX circuit including relevant PAD and driving */
static int
-mt7530_pad_clk_setup(struct dsa_switch *ds, int mode)
+mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
{
struct mt7530_priv *priv = ds->priv;
u32 ncpo1, ssc_delta, trgint, i, xtal;
@@ -387,7 +394,7 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, int mode)
return -EINVAL;
}
- switch (mode) {
+ switch (interface) {
case PHY_INTERFACE_MODE_RGMII:
trgint = 0;
/* PLL frequency: 125MHz */
@@ -409,7 +416,8 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, int mode)
}
break;
default:
- dev_err(priv->dev, "xMII mode %d not supported\n", mode);
+ dev_err(priv->dev, "xMII interface %d not supported\n",
+ interface);
return -EINVAL;
}
@@ -481,6 +489,108 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, int mode)
return 0;
}
+static bool mt7531_dual_sgmii_supported(struct mt7530_priv *priv)
+{
+ u32 val;
+
+ val = mt7530_read(priv, MT7531_TOP_SIG_SR);
+
+ return (val & PAD_DUAL_SGMII_EN) != 0;
+}
+
+static int
+mt7531_pad_setup(struct dsa_switch *ds, phy_interface_t interface)
+{
+ struct mt7530_priv *priv = ds->priv;
+ u32 top_sig;
+ u32 hwstrap;
+ u32 xtal;
+ u32 val;
+
+ if (mt7531_dual_sgmii_supported(priv))
+ return 0;
+
+ val = mt7530_read(priv, MT7531_CREV);
+ top_sig = mt7530_read(priv, MT7531_TOP_SIG_SR);
+ hwstrap = mt7530_read(priv, MT7531_HWTRAP);
+ if ((val & CHIP_REV_M) > 0)
+ xtal = (top_sig & PAD_MCM_SMI_EN) ? HWTRAP_XTAL_FSEL_40MHZ :
+ HWTRAP_XTAL_FSEL_25MHZ;
+ else
+ xtal = hwstrap & HWTRAP_XTAL_FSEL_MASK;
+
+ /* Step 1 : Disable MT7531 COREPLL */
+ val = mt7530_read(priv, MT7531_PLLGP_EN);
+ val &= ~EN_COREPLL;
+ mt7530_write(priv, MT7531_PLLGP_EN, val);
+
+ /* Step 2: switch to XTAL output */
+ val = mt7530_read(priv, MT7531_PLLGP_EN);
+ val |= SW_CLKSW;
+ mt7530_write(priv, MT7531_PLLGP_EN, val);
+
+ val = mt7530_read(priv, MT7531_PLLGP_CR0);
+ val &= ~RG_COREPLL_EN;
+ mt7530_write(priv, MT7531_PLLGP_CR0, val);
+
+ /* Step 3: disable PLLGP and enable program PLLGP */
+ val = mt7530_read(priv, MT7531_PLLGP_EN);
+ val |= SW_PLLGP;
+ mt7530_write(priv, MT7531_PLLGP_EN, val);
+
+ /* Step 4: program COREPLL output frequency to 500MHz */
+ val = mt7530_read(priv, MT7531_PLLGP_CR0);
+ val &= ~RG_COREPLL_POSDIV_M;
+ val |= 2 << RG_COREPLL_POSDIV_S;
+ mt7530_write(priv, MT7531_PLLGP_CR0, val);
+ usleep_range(25, 35);
+
+ switch (xtal) {
+ case HWTRAP_XTAL_FSEL_25MHZ:
+ val = mt7530_read(priv, MT7531_PLLGP_CR0);
+ val &= ~RG_COREPLL_SDM_PCW_M;
+ val |= 0x140000 << RG_COREPLL_SDM_PCW_S;
+ mt7530_write(priv, MT7531_PLLGP_CR0, val);
+ break;
+ case HWTRAP_XTAL_FSEL_40MHZ:
+ val = mt7530_read(priv, MT7531_PLLGP_CR0);
+ val &= ~RG_COREPLL_SDM_PCW_M;
+ val |= 0x190000 << RG_COREPLL_SDM_PCW_S;
+ mt7530_write(priv, MT7531_PLLGP_CR0, val);
+ break;
+ };
+
+ /* Set feedback divide ratio update signal to high */
+ val = mt7530_read(priv, MT7531_PLLGP_CR0);
+ val |= RG_COREPLL_SDM_PCW_CHG;
+ mt7530_write(priv, MT7531_PLLGP_CR0, val);
+ /* Wait for at least 16 XTAL clocks */
+ usleep_range(10, 20);
+
+ /* Step 5: set feedback divide ratio update signal to low */
+ val = mt7530_read(priv, MT7531_PLLGP_CR0);
+ val &= ~RG_COREPLL_SDM_PCW_CHG;
+ mt7530_write(priv, MT7531_PLLGP_CR0, val);
+
+ /* Enable 325M clock for SGMII */
+ mt7530_write(priv, MT7531_ANA_PLLGP_CR5, 0xad0000);
+
+ /* Enable 250SSC clock for RGMII */
+ mt7530_write(priv, MT7531_ANA_PLLGP_CR2, 0x4f40000);
+
+ /* Step 6: Enable MT7531 PLL */
+ val = mt7530_read(priv, MT7531_PLLGP_CR0);
+ val |= RG_COREPLL_EN;
+ mt7530_write(priv, MT7531_PLLGP_CR0, val);
+
+ val = mt7530_read(priv, MT7531_PLLGP_EN);
+ val |= EN_COREPLL;
+ mt7530_write(priv, MT7531_PLLGP_EN, val);
+ usleep_range(25, 35);
+
+ return 0;
+}
+
static void
mt7530_mib_reset(struct dsa_switch *ds)
{
@@ -505,6 +615,217 @@ static int mt7530_phy_write(struct dsa_switch *ds, int port, int regnum,
return mdiobus_write_nested(priv->bus, port, regnum, val);
}
+static int
+mt7531_ind_c45_phy_read(struct mt7530_priv *priv, int port, int devad,
+ int regnum)
+{
+ struct mii_bus *bus = priv->bus;
+ struct mt7530_dummy_poll p;
+ u32 reg, val;
+ int ret;
+
+ INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
+ !(val & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+ reg = MT7531_MDIO_CL45_ADDR | MT7531_MDIO_PHY_ADDR(port) |
+ MT7531_MDIO_DEV_ADDR(devad) | regnum;
+ mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
+ !(val & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+ reg = MT7531_MDIO_CL45_READ | MT7531_MDIO_PHY_ADDR(port) |
+ MT7531_MDIO_DEV_ADDR(devad);
+ mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
+ !(val & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+ ret = val & MT7531_MDIO_RW_DATA_MASK;
+out:
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
+static int
+mt7531_ind_c45_phy_write(struct mt7530_priv *priv, int port, int devad,
+ int regnum, u32 data)
+{
+ struct mii_bus *bus = priv->bus;
+ struct mt7530_dummy_poll p;
+ u32 val, reg;
+ int ret;
+
+ INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
+ !(val & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+ reg = MT7531_MDIO_CL45_ADDR | MT7531_MDIO_PHY_ADDR(port) |
+ MT7531_MDIO_DEV_ADDR(devad) | regnum;
+ mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
+ !(val & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+ reg = MT7531_MDIO_CL45_WRITE | MT7531_MDIO_PHY_ADDR(port) |
+ MT7531_MDIO_DEV_ADDR(devad) | data;
+ mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
+ !(val & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+out:
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
+static int
+mt7531_ind_c22_phy_read(struct mt7530_priv *priv, int port, int regnum)
+{
+ struct mii_bus *bus = priv->bus;
+ struct mt7530_dummy_poll p;
+ int ret;
+ u32 val;
+
+ INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
+ !(val & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+ val = MT7531_MDIO_CL22_READ | MT7531_MDIO_PHY_ADDR(port) |
+ MT7531_MDIO_REG_ADDR(regnum);
+
+ mt7530_mii_write(priv, MT7531_PHY_IAC, val | MT7531_PHY_ACS_ST);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, val,
+ !(val & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+ ret = val & MT7531_MDIO_RW_DATA_MASK;
+out:
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
+static int
+mt7531_ind_c22_phy_write(struct mt7530_priv *priv, int port, int regnum,
+ u16 data)
+{
+ struct mii_bus *bus = priv->bus;
+ struct mt7530_dummy_poll p;
+ int ret;
+ u32 reg;
+
+ INIT_MT7530_DUMMY_POLL(&p, priv, MT7531_PHY_IAC);
+
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, reg,
+ !(reg & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+ reg = MT7531_MDIO_CL22_WRITE | MT7531_MDIO_PHY_ADDR(port) |
+ MT7531_MDIO_REG_ADDR(regnum) | data;
+
+ mt7530_mii_write(priv, MT7531_PHY_IAC, reg | MT7531_PHY_ACS_ST);
+
+ ret = readx_poll_timeout(_mt7530_unlocked_read, &p, reg,
+ !(reg & MT7531_PHY_ACS_ST), 20, 100000);
+ if (ret < 0) {
+ dev_err(priv->dev, "poll timeout\n");
+ goto out;
+ }
+
+out:
+ mutex_unlock(&bus->mdio_lock);
+
+ return ret;
+}
+
+static int
+mt7531_ind_phy_read(struct dsa_switch *ds, int port, int regnum)
+{
+ struct mt7530_priv *priv = ds->priv;
+ int devad;
+ int ret;
+
+ if (regnum & MII_ADDR_C45) {
+ devad = (regnum >> MII_DEVADDR_C45_SHIFT) & 0x1f;
+ ret = mt7531_ind_c45_phy_read(priv, port, devad,
+ regnum & MII_REGADDR_C45_MASK);
+ } else {
+ ret = mt7531_ind_c22_phy_read(priv, port, regnum);
+ }
+
+ return ret;
+}
+
+static int
+mt7531_ind_phy_write(struct dsa_switch *ds, int port, int regnum,
+ u16 data)
+{
+ struct mt7530_priv *priv = ds->priv;
+ int devad;
+ int ret;
+
+ if (regnum & MII_ADDR_C45) {
+ devad = (regnum >> MII_DEVADDR_C45_SHIFT) & 0x1f;
+ ret = mt7531_ind_c45_phy_write(priv, port, devad,
+ regnum & MII_REGADDR_C45_MASK,
+ data);
+ } else {
+ ret = mt7531_ind_c22_phy_write(priv, port, regnum, data);
+ }
+
+ return ret;
+}
+
static void
mt7530_get_strings(struct dsa_switch *ds, int port, u32 stringset,
uint8_t *data)
@@ -621,9 +942,18 @@ unlock_exit:
}
static int
-mt7530_cpu_port_enable(struct mt7530_priv *priv,
- int port)
+mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
{
+ struct mt7530_priv *priv = ds->priv;
+ int ret;
+
+ /* Setup max capability of CPU port at first */
+ if (priv->info->cpu_port_config) {
+ ret = priv->info->cpu_port_config(ds, port);
+ if (ret)
+ return ret;
+ }
+
/* Enable Mediatek header mode on the cpu port */
mt7530_write(priv, MT7530_PVC_P(port),
PORT_SPEC_TAG);
@@ -636,7 +966,7 @@ mt7530_cpu_port_enable(struct mt7530_priv *priv,
mt7530_rmw(priv, MT7530_MFC, CPU_MASK, CPU_EN | CPU_PORT(port));
/* CPU port gets connected to all user ports of
- * the switch
+ * the switch.
*/
mt7530_write(priv, MT7530_PCR_P(port),
PCR_MATRIX(dsa_user_ports(priv->ds)));
@@ -959,8 +1289,12 @@ mt7530_vlan_cmd(struct mt7530_priv *priv, enum mt7530_vlan_cmd cmd, u16 vid)
static int
mt7530_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool vlan_filtering)
+ bool vlan_filtering,
+ struct switchdev_trans *trans)
{
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
if (vlan_filtering) {
/* The port is being kept as VLAN-unaware port when bridge is
* set up with vlan_filtering not being set, Otherwise, the
@@ -1130,27 +1464,42 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port,
return 0;
}
-static int mt7530_port_mirror_add(struct dsa_switch *ds, int port,
+static int mt753x_mirror_port_get(unsigned int id, u32 val)
+{
+ return (id == ID_MT7531) ? MT7531_MIRROR_PORT_GET(val) :
+ MIRROR_PORT(val);
+}
+
+static int mt753x_mirror_port_set(unsigned int id, u32 val)
+{
+ return (id == ID_MT7531) ? MT7531_MIRROR_PORT_SET(val) :
+ MIRROR_PORT(val);
+}
+
+static int mt753x_port_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
bool ingress)
{
struct mt7530_priv *priv = ds->priv;
+ int monitor_port;
u32 val;
/* Check for existent entry */
if ((ingress ? priv->mirror_rx : priv->mirror_tx) & BIT(port))
return -EEXIST;
- val = mt7530_read(priv, MT7530_MFC);
+ val = mt7530_read(priv, MT753X_MIRROR_REG(priv->id));
/* MT7530 only supports one monitor port */
- if (val & MIRROR_EN && MIRROR_PORT(val) != mirror->to_local_port)
+ monitor_port = mt753x_mirror_port_get(priv->id, val);
+ if (val & MT753X_MIRROR_EN(priv->id) &&
+ monitor_port != mirror->to_local_port)
return -EEXIST;
- val |= MIRROR_EN;
- val &= ~MIRROR_MASK;
- val |= mirror->to_local_port;
- mt7530_write(priv, MT7530_MFC, val);
+ val |= MT753X_MIRROR_EN(priv->id);
+ val &= ~MT753X_MIRROR_MASK(priv->id);
+ val |= mt753x_mirror_port_set(priv->id, mirror->to_local_port);
+ mt7530_write(priv, MT753X_MIRROR_REG(priv->id), val);
val = mt7530_read(priv, MT7530_PCR_P(port));
if (ingress) {
@@ -1165,7 +1514,7 @@ static int mt7530_port_mirror_add(struct dsa_switch *ds, int port,
return 0;
}
-static void mt7530_port_mirror_del(struct dsa_switch *ds, int port,
+static void mt753x_port_mirror_del(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror)
{
struct mt7530_priv *priv = ds->priv;
@@ -1182,9 +1531,9 @@ static void mt7530_port_mirror_del(struct dsa_switch *ds, int port,
mt7530_write(priv, MT7530_PCR_P(port), val);
if (!priv->mirror_rx && !priv->mirror_tx) {
- val = mt7530_read(priv, MT7530_MFC);
- val &= ~MIRROR_EN;
- mt7530_write(priv, MT7530_MFC, val);
+ val = mt7530_read(priv, MT753X_MIRROR_REG(priv->id));
+ val &= ~MT753X_MIRROR_EN(priv->id);
+ mt7530_write(priv, MT753X_MIRROR_REG(priv->id), val);
}
}
@@ -1290,9 +1639,11 @@ mt7530_setup(struct dsa_switch *ds)
mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
PCR_MATRIX_CLR);
- if (dsa_is_cpu_port(ds, i))
- mt7530_cpu_port_enable(priv, i);
- else
+ if (dsa_is_cpu_port(ds, i)) {
+ ret = mt753x_cpu_port_enable(ds, i);
+ if (ret)
+ return ret;
+ } else
mt7530_port_disable(ds, i);
/* Enable consistent egress tag */
@@ -1352,51 +1703,492 @@ mt7530_setup(struct dsa_switch *ds)
return 0;
}
-static void mt7530_phylink_mac_config(struct dsa_switch *ds, int port,
- unsigned int mode,
- const struct phylink_link_state *state)
+static int
+mt7531_setup(struct dsa_switch *ds)
+{
+ struct mt7530_priv *priv = ds->priv;
+ struct mt7530_dummy_poll p;
+ u32 val, id;
+ int ret, i;
+
+ /* Reset whole chip through gpio pin or memory-mapped registers for
+ * different type of hardware
+ */
+ if (priv->mcm) {
+ reset_control_assert(priv->rstc);
+ usleep_range(1000, 1100);
+ reset_control_deassert(priv->rstc);
+ } else {
+ gpiod_set_value_cansleep(priv->reset, 0);
+ usleep_range(1000, 1100);
+ gpiod_set_value_cansleep(priv->reset, 1);
+ }
+
+ /* Waiting for MT7530 got to stable */
+ INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_HWTRAP);
+ ret = readx_poll_timeout(_mt7530_read, &p, val, val != 0,
+ 20, 1000000);
+ if (ret < 0) {
+ dev_err(priv->dev, "reset timeout\n");
+ return ret;
+ }
+
+ id = mt7530_read(priv, MT7531_CREV);
+ id >>= CHIP_NAME_SHIFT;
+
+ if (id != MT7531_ID) {
+ dev_err(priv->dev, "chip %x can't be supported\n", id);
+ return -ENODEV;
+ }
+
+ /* Reset the switch through internal reset */
+ mt7530_write(priv, MT7530_SYS_CTRL,
+ SYS_CTRL_PHY_RST | SYS_CTRL_SW_RST |
+ SYS_CTRL_REG_RST);
+
+ if (mt7531_dual_sgmii_supported(priv)) {
+ priv->p5_intf_sel = P5_INTF_SEL_GMAC5_SGMII;
+
+ /* Let ds->slave_mii_bus be able to access external phy. */
+ mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO11_RG_RXD2_MASK,
+ MT7531_EXT_P_MDC_11);
+ mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO12_RG_RXD3_MASK,
+ MT7531_EXT_P_MDIO_12);
+ } else {
+ priv->p5_intf_sel = P5_INTF_SEL_GMAC5;
+ }
+ dev_dbg(ds->dev, "P5 support %s interface\n",
+ p5_intf_modes(priv->p5_intf_sel));
+
+ mt7530_rmw(priv, MT7531_GPIO_MODE0, MT7531_GPIO0_MASK,
+ MT7531_GPIO0_INTERRUPT);
+
+ /* Let phylink decide the interface later. */
+ priv->p5_interface = PHY_INTERFACE_MODE_NA;
+ priv->p6_interface = PHY_INTERFACE_MODE_NA;
+
+ /* Enable PHY core PLL, since phy_device has not yet been created
+ * provided for phy_[read,write]_mmd_indirect is called, we provide
+ * our own mt7531_ind_mmd_phy_[read,write] to complete this
+ * function.
+ */
+ val = mt7531_ind_c45_phy_read(priv, MT753X_CTRL_PHY_ADDR,
+ MDIO_MMD_VEND2, CORE_PLL_GROUP4);
+ val |= MT7531_PHY_PLL_BYPASS_MODE;
+ val &= ~MT7531_PHY_PLL_OFF;
+ mt7531_ind_c45_phy_write(priv, MT753X_CTRL_PHY_ADDR, MDIO_MMD_VEND2,
+ CORE_PLL_GROUP4, val);
+
+ /* BPDU to CPU port */
+ mt7530_rmw(priv, MT7531_CFC, MT7531_CPU_PMAP_MASK,
+ BIT(MT7530_CPU_PORT));
+ mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK,
+ MT753X_BPDU_CPU_ONLY);
+
+ /* Enable and reset MIB counters */
+ mt7530_mib_reset(ds);
+
+ for (i = 0; i < MT7530_NUM_PORTS; i++) {
+ /* Disable forwarding by default on all ports */
+ mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
+ PCR_MATRIX_CLR);
+
+ mt7530_set(priv, MT7531_DBG_CNT(i), MT7531_DIS_CLR);
+
+ if (dsa_is_cpu_port(ds, i)) {
+ ret = mt753x_cpu_port_enable(ds, i);
+ if (ret)
+ return ret;
+ } else
+ mt7530_port_disable(ds, i);
+
+ /* Enable consistent egress tag */
+ mt7530_rmw(priv, MT7530_PVC_P(i), PVC_EG_TAG_MASK,
+ PVC_EG_TAG(MT7530_VLAN_EG_CONSISTENT));
+ }
+
+ ds->configure_vlan_while_not_filtering = true;
+
+ /* Flush the FDB table */
+ ret = mt7530_fdb_cmd(priv, MT7530_FDB_FLUSH, NULL);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static bool
+mt7530_phy_mode_supported(struct dsa_switch *ds, int port,
+ const struct phylink_link_state *state)
{
struct mt7530_priv *priv = ds->priv;
- u32 mcr_cur, mcr_new;
switch (port) {
- case 0: /* Internal phy */
- case 1:
- case 2:
- case 3:
- case 4:
+ case 0 ... 4: /* Internal phy */
if (state->interface != PHY_INTERFACE_MODE_GMII)
- return;
+ return false;
break;
case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */
- if (priv->p5_interface == state->interface)
- break;
if (!phy_interface_mode_is_rgmii(state->interface) &&
state->interface != PHY_INTERFACE_MODE_MII &&
state->interface != PHY_INTERFACE_MODE_GMII)
- return;
+ return false;
+ break;
+ case 6: /* 1st cpu port */
+ if (state->interface != PHY_INTERFACE_MODE_RGMII &&
+ state->interface != PHY_INTERFACE_MODE_TRGMII)
+ return false;
+ break;
+ default:
+ dev_err(priv->dev, "%s: unsupported port: %i\n", __func__,
+ port);
+ return false;
+ }
- mt7530_setup_port5(ds, state->interface);
+ return true;
+}
+
+static bool mt7531_is_rgmii_port(struct mt7530_priv *priv, u32 port)
+{
+ return (port == 5) && (priv->p5_intf_sel != P5_INTF_SEL_GMAC5_SGMII);
+}
+
+static bool
+mt7531_phy_mode_supported(struct dsa_switch *ds, int port,
+ const struct phylink_link_state *state)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ switch (port) {
+ case 0 ... 4: /* Internal phy */
+ if (state->interface != PHY_INTERFACE_MODE_GMII)
+ return false;
+ break;
+ case 5: /* 2nd cpu port supports either rgmii or sgmii/8023z */
+ if (mt7531_is_rgmii_port(priv, port))
+ return phy_interface_mode_is_rgmii(state->interface);
+ fallthrough;
+ case 6: /* 1st cpu port supports sgmii/8023z only */
+ if (state->interface != PHY_INTERFACE_MODE_SGMII &&
+ !phy_interface_mode_is_8023z(state->interface))
+ return false;
+ break;
+ default:
+ dev_err(priv->dev, "%s: unsupported port: %i\n", __func__,
+ port);
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+mt753x_phy_mode_supported(struct dsa_switch *ds, int port,
+ const struct phylink_link_state *state)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ return priv->info->phy_mode_supported(ds, port, state);
+}
+
+static int
+mt753x_pad_setup(struct dsa_switch *ds, const struct phylink_link_state *state)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ return priv->info->pad_setup(ds, state->interface);
+}
+
+static int
+mt7530_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ /* Only need to setup port5. */
+ if (port != 5)
+ return 0;
+
+ mt7530_setup_port5(priv->ds, interface);
+
+ return 0;
+}
+
+static int mt7531_rgmii_setup(struct mt7530_priv *priv, u32 port,
+ phy_interface_t interface,
+ struct phy_device *phydev)
+{
+ u32 val;
+
+ if (!mt7531_is_rgmii_port(priv, port)) {
+ dev_err(priv->dev, "RGMII mode is not available for port %d\n",
+ port);
+ return -EINVAL;
+ }
+
+ val = mt7530_read(priv, MT7531_CLKGEN_CTRL);
+ val |= GP_CLK_EN;
+ val &= ~GP_MODE_MASK;
+ val |= GP_MODE(MT7531_GP_MODE_RGMII);
+ val &= ~CLK_SKEW_IN_MASK;
+ val |= CLK_SKEW_IN(MT7531_CLK_SKEW_NO_CHG);
+ val &= ~CLK_SKEW_OUT_MASK;
+ val |= CLK_SKEW_OUT(MT7531_CLK_SKEW_NO_CHG);
+ val |= TXCLK_NO_REVERSE | RXCLK_NO_DELAY;
+
+ /* Do not adjust rgmii delay when vendor phy driver presents. */
+ if (!phydev || phy_driver_is_genphy(phydev)) {
+ val &= ~(TXCLK_NO_REVERSE | RXCLK_NO_DELAY);
+ switch (interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ val |= TXCLK_NO_REVERSE;
+ val |= RXCLK_NO_DELAY;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ val |= TXCLK_NO_REVERSE;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ val |= RXCLK_NO_DELAY;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ mt7530_write(priv, MT7531_CLKGEN_CTRL, val);
+
+ return 0;
+}
+
+static void mt7531_sgmii_validate(struct mt7530_priv *priv, int port,
+ unsigned long *supported)
+{
+ /* Port5 supports ethier RGMII or SGMII.
+ * Port6 supports SGMII only.
+ */
+ switch (port) {
+ case 5:
+ if (mt7531_is_rgmii_port(priv, port))
+ break;
+ fallthrough;
+ case 6:
+ phylink_set(supported, 1000baseX_Full);
+ phylink_set(supported, 2500baseX_Full);
+ phylink_set(supported, 2500baseT_Full);
+ }
+}
+
+static void
+mt7531_sgmii_link_up_force(struct dsa_switch *ds, int port,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex)
+{
+ struct mt7530_priv *priv = ds->priv;
+ unsigned int val;
+
+ /* For adjusting speed and duplex of SGMII force mode. */
+ if (interface != PHY_INTERFACE_MODE_SGMII ||
+ phylink_autoneg_inband(mode))
+ return;
+
+ /* SGMII force mode setting */
+ val = mt7530_read(priv, MT7531_SGMII_MODE(port));
+ val &= ~MT7531_SGMII_IF_MODE_MASK;
+
+ switch (speed) {
+ case SPEED_10:
+ val |= MT7531_SGMII_FORCE_SPEED_10;
+ break;
+ case SPEED_100:
+ val |= MT7531_SGMII_FORCE_SPEED_100;
+ break;
+ case SPEED_1000:
+ val |= MT7531_SGMII_FORCE_SPEED_1000;
+ break;
+ }
+
+ /* MT7531 SGMII 1G force mode can only work in full duplex mode,
+ * no matter MT7531_SGMII_FORCE_HALF_DUPLEX is set or not.
+ */
+ if ((speed == SPEED_10 || speed == SPEED_100) &&
+ duplex != DUPLEX_FULL)
+ val |= MT7531_SGMII_FORCE_HALF_DUPLEX;
+
+ mt7530_write(priv, MT7531_SGMII_MODE(port), val);
+}
+
+static bool mt753x_is_mac_port(u32 port)
+{
+ return (port == 5 || port == 6);
+}
+
+static int mt7531_sgmii_setup_mode_force(struct mt7530_priv *priv, u32 port,
+ phy_interface_t interface)
+{
+ u32 val;
+
+ if (!mt753x_is_mac_port(port))
+ return -EINVAL;
+
+ mt7530_set(priv, MT7531_QPHY_PWR_STATE_CTRL(port),
+ MT7531_SGMII_PHYA_PWD);
+
+ val = mt7530_read(priv, MT7531_PHYA_CTRL_SIGNAL3(port));
+ val &= ~MT7531_RG_TPHY_SPEED_MASK;
+ /* Setup 2.5 times faster clock for 2.5Gbps data speeds with 10B/8B
+ * encoding.
+ */
+ val |= (interface == PHY_INTERFACE_MODE_2500BASEX) ?
+ MT7531_RG_TPHY_SPEED_3_125G : MT7531_RG_TPHY_SPEED_1_25G;
+ mt7530_write(priv, MT7531_PHYA_CTRL_SIGNAL3(port), val);
+
+ mt7530_clear(priv, MT7531_PCS_CONTROL_1(port), MT7531_SGMII_AN_ENABLE);
+
+ /* MT7531 SGMII 1G and 2.5G force mode can only work in full duplex
+ * mode, no matter MT7531_SGMII_FORCE_HALF_DUPLEX is set or not.
+ */
+ mt7530_rmw(priv, MT7531_SGMII_MODE(port),
+ MT7531_SGMII_IF_MODE_MASK | MT7531_SGMII_REMOTE_FAULT_DIS,
+ MT7531_SGMII_FORCE_SPEED_1000);
+
+ mt7530_write(priv, MT7531_QPHY_PWR_STATE_CTRL(port), 0);
+
+ return 0;
+}
+
+static int mt7531_sgmii_setup_mode_an(struct mt7530_priv *priv, int port,
+ phy_interface_t interface)
+{
+ if (!mt753x_is_mac_port(port))
+ return -EINVAL;
+
+ mt7530_set(priv, MT7531_QPHY_PWR_STATE_CTRL(port),
+ MT7531_SGMII_PHYA_PWD);
+
+ mt7530_rmw(priv, MT7531_PHYA_CTRL_SIGNAL3(port),
+ MT7531_RG_TPHY_SPEED_MASK, MT7531_RG_TPHY_SPEED_1_25G);
+
+ mt7530_set(priv, MT7531_SGMII_MODE(port),
+ MT7531_SGMII_REMOTE_FAULT_DIS |
+ MT7531_SGMII_SPEED_DUPLEX_AN);
+
+ mt7530_rmw(priv, MT7531_PCS_SPEED_ABILITY(port),
+ MT7531_SGMII_TX_CONFIG_MASK, 1);
+
+ mt7530_set(priv, MT7531_PCS_CONTROL_1(port), MT7531_SGMII_AN_ENABLE);
+
+ mt7530_set(priv, MT7531_PCS_CONTROL_1(port), MT7531_SGMII_AN_RESTART);
+
+ mt7530_write(priv, MT7531_QPHY_PWR_STATE_CTRL(port), 0);
+
+ return 0;
+}
+
+static void mt7531_sgmii_restart_an(struct dsa_switch *ds, int port)
+{
+ struct mt7530_priv *priv = ds->priv;
+ u32 val;
+
+ /* Only restart AN when AN is enabled */
+ val = mt7530_read(priv, MT7531_PCS_CONTROL_1(port));
+ if (val & MT7531_SGMII_AN_ENABLE) {
+ val |= MT7531_SGMII_AN_RESTART;
+ mt7530_write(priv, MT7531_PCS_CONTROL_1(port), val);
+ }
+}
+
+static int
+mt7531_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct mt7530_priv *priv = ds->priv;
+ struct phy_device *phydev;
+ struct dsa_port *dp;
+
+ if (!mt753x_is_mac_port(port)) {
+ dev_err(priv->dev, "port %d is not a MAC port\n", port);
+ return -EINVAL;
+ }
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ dp = dsa_to_port(ds, port);
+ phydev = dp->slave->phydev;
+ return mt7531_rgmii_setup(priv, port, interface, phydev);
+ case PHY_INTERFACE_MODE_SGMII:
+ return mt7531_sgmii_setup_mode_an(priv, port, interface);
+ case PHY_INTERFACE_MODE_NA:
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ if (phylink_autoneg_inband(mode))
+ return -EINVAL;
+
+ return mt7531_sgmii_setup_mode_force(priv, port, interface);
+ default:
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
+
+static int
+mt753x_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ return priv->info->mac_port_config(ds, port, mode, state->interface);
+}
+
+static void
+mt753x_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct mt7530_priv *priv = ds->priv;
+ u32 mcr_cur, mcr_new;
+
+ if (!mt753x_phy_mode_supported(ds, port, state))
+ goto unsupported;
+
+ switch (port) {
+ case 0 ... 4: /* Internal phy */
+ if (state->interface != PHY_INTERFACE_MODE_GMII)
+ goto unsupported;
+ break;
+ case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */
+ if (priv->p5_interface == state->interface)
+ break;
+
+ if (mt753x_mac_config(ds, port, mode, state) < 0)
+ goto unsupported;
+
+ if (priv->p5_intf_sel != P5_DISABLED)
+ priv->p5_interface = state->interface;
break;
case 6: /* 1st cpu port */
if (priv->p6_interface == state->interface)
break;
- if (state->interface != PHY_INTERFACE_MODE_RGMII &&
- state->interface != PHY_INTERFACE_MODE_TRGMII)
- return;
+ mt753x_pad_setup(ds, state);
- /* Setup TX circuit incluing relevant PAD and driving */
- mt7530_pad_clk_setup(ds, state->interface);
+ if (mt753x_mac_config(ds, port, mode, state) < 0)
+ goto unsupported;
priv->p6_interface = state->interface;
break;
default:
- dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
+unsupported:
+ dev_err(ds->dev, "%s: unsupported %s port: %i\n",
+ __func__, phy_modes(state->interface), port);
return;
}
- if (phylink_autoneg_inband(mode)) {
+ if (phylink_autoneg_inband(mode) &&
+ state->interface != PHY_INTERFACE_MODE_SGMII) {
dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
__func__);
return;
@@ -1406,7 +2198,7 @@ static void mt7530_phylink_mac_config(struct dsa_switch *ds, int port,
mcr_new = mcr_cur;
mcr_new &= ~PMCR_LINK_SETTINGS_MASK;
mcr_new |= PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | PMCR_BACKOFF_EN |
- PMCR_BACKPR_EN | PMCR_FORCE_MODE;
+ PMCR_BACKPR_EN | PMCR_FORCE_MODE_ID(priv->id);
/* Are we connected to external phy */
if (port == 5 && dsa_is_user_port(ds, 5))
@@ -1416,7 +2208,18 @@ static void mt7530_phylink_mac_config(struct dsa_switch *ds, int port,
mt7530_write(priv, MT7530_PMCR_P(port), mcr_new);
}
-static void mt7530_phylink_mac_link_down(struct dsa_switch *ds, int port,
+static void
+mt753x_phylink_mac_an_restart(struct dsa_switch *ds, int port)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ if (!priv->info->mac_pcs_an_restart)
+ return;
+
+ priv->info->mac_pcs_an_restart(ds, port);
+}
+
+static void mt753x_phylink_mac_link_down(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface)
{
@@ -1425,7 +2228,19 @@ static void mt7530_phylink_mac_link_down(struct dsa_switch *ds, int port,
mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
}
-static void mt7530_phylink_mac_link_up(struct dsa_switch *ds, int port,
+static void mt753x_mac_pcs_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ if (!priv->info->mac_pcs_link_up)
+ return;
+
+ priv->info->mac_pcs_link_up(ds, port, mode, interface, speed, duplex);
+}
+
+static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface,
struct phy_device *phydev,
@@ -1435,8 +2250,19 @@ static void mt7530_phylink_mac_link_up(struct dsa_switch *ds, int port,
struct mt7530_priv *priv = ds->priv;
u32 mcr;
+ mt753x_mac_pcs_link_up(ds, port, mode, interface, speed, duplex);
+
mcr = PMCR_RX_EN | PMCR_TX_EN | PMCR_FORCE_LNK;
+ /* MT753x MAC works in 1G full duplex mode for all up-clocked
+ * variants.
+ */
+ if (interface == PHY_INTERFACE_MODE_TRGMII ||
+ (phy_interface_mode_is_8023z(interface))) {
+ speed = SPEED_1000;
+ duplex = DUPLEX_FULL;
+ }
+
switch (speed) {
case SPEED_1000:
mcr |= PMCR_FORCE_SPEED_1000;
@@ -1456,66 +2282,107 @@ static void mt7530_phylink_mac_link_up(struct dsa_switch *ds, int port,
mt7530_set(priv, MT7530_PMCR_P(port), mcr);
}
-static void mt7530_phylink_validate(struct dsa_switch *ds, int port,
- unsigned long *supported,
- struct phylink_link_state *state)
+static int
+mt7531_cpu_port_config(struct dsa_switch *ds, int port)
{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ struct mt7530_priv *priv = ds->priv;
+ phy_interface_t interface;
+ int speed;
+ int ret;
switch (port) {
- case 0: /* Internal phy */
- case 1:
- case 2:
- case 3:
- case 4:
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_GMII)
- goto unsupported;
- break;
- case 5: /* 2nd cpu port with phy of port 0 or 4 / external phy */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- !phy_interface_mode_is_rgmii(state->interface) &&
- state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_GMII)
- goto unsupported;
+ case 5:
+ if (mt7531_is_rgmii_port(priv, port))
+ interface = PHY_INTERFACE_MODE_RGMII;
+ else
+ interface = PHY_INTERFACE_MODE_2500BASEX;
+
+ priv->p5_interface = interface;
break;
- case 6: /* 1st cpu port */
- if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_RGMII &&
- state->interface != PHY_INTERFACE_MODE_TRGMII)
- goto unsupported;
+ case 6:
+ interface = PHY_INTERFACE_MODE_2500BASEX;
+
+ mt7531_pad_setup(ds, interface);
+
+ priv->p6_interface = interface;
break;
default:
- dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
-unsupported:
+ return -EINVAL;
+ }
+
+ if (interface == PHY_INTERFACE_MODE_2500BASEX)
+ speed = SPEED_2500;
+ else
+ speed = SPEED_1000;
+
+ ret = mt7531_mac_config(ds, port, MLO_AN_FIXED, interface);
+ if (ret)
+ return ret;
+ mt7530_write(priv, MT7530_PMCR_P(port),
+ PMCR_CPU_PORT_SETTING(priv->id));
+ mt753x_phylink_mac_link_up(ds, port, MLO_AN_FIXED, interface, NULL,
+ speed, DUPLEX_FULL, true, true);
+
+ return 0;
+}
+
+static void
+mt7530_mac_port_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported)
+{
+ if (port == 5)
+ phylink_set(supported, 1000baseX_Full);
+}
+
+static void mt7531_mac_port_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ mt7531_sgmii_validate(priv, port, supported);
+}
+
+static void
+mt753x_phylink_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ struct mt7530_priv *priv = ds->priv;
+
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ !mt753x_phy_mode_supported(ds, port, state)) {
linkmode_zero(supported);
return;
}
phylink_set_port_modes(mask);
- phylink_set(mask, Autoneg);
- if (state->interface == PHY_INTERFACE_MODE_TRGMII) {
- phylink_set(mask, 1000baseT_Full);
- } else {
+ if (state->interface != PHY_INTERFACE_MODE_TRGMII ||
+ !phy_interface_mode_is_8023z(state->interface)) {
phylink_set(mask, 10baseT_Half);
phylink_set(mask, 10baseT_Full);
phylink_set(mask, 100baseT_Half);
phylink_set(mask, 100baseT_Full);
-
- if (state->interface != PHY_INTERFACE_MODE_MII) {
- /* This switch only supports 1G full-duplex. */
- phylink_set(mask, 1000baseT_Full);
- if (port == 5)
- phylink_set(mask, 1000baseX_Full);
- }
+ phylink_set(mask, Autoneg);
}
+ /* This switch only supports 1G full-duplex. */
+ if (state->interface != PHY_INTERFACE_MODE_MII)
+ phylink_set(mask, 1000baseT_Full);
+
+ priv->info->mac_port_validate(ds, port, mask);
+
phylink_set(mask, Pause);
phylink_set(mask, Asym_Pause);
linkmode_and(supported, supported, mask);
linkmode_and(state->advertising, state->advertising, mask);
+
+ /* We can only operate at 2500BaseX or 1000BaseX. If requested
+ * to advertise both, only report advertising at 2500BaseX.
+ */
+ phylink_helper_basex_speed(state);
}
static int
@@ -1558,12 +2425,96 @@ mt7530_phylink_mac_link_state(struct dsa_switch *ds, int port,
return 1;
}
+static int
+mt7531_sgmii_pcs_get_state_an(struct mt7530_priv *priv, int port,
+ struct phylink_link_state *state)
+{
+ u32 status, val;
+ u16 config_reg;
+
+ status = mt7530_read(priv, MT7531_PCS_CONTROL_1(port));
+ state->link = !!(status & MT7531_SGMII_LINK_STATUS);
+ if (state->interface == PHY_INTERFACE_MODE_SGMII &&
+ (status & MT7531_SGMII_AN_ENABLE)) {
+ val = mt7530_read(priv, MT7531_PCS_SPEED_ABILITY(port));
+ config_reg = val >> 16;
+
+ switch (config_reg & LPA_SGMII_SPD_MASK) {
+ case LPA_SGMII_1000:
+ state->speed = SPEED_1000;
+ break;
+ case LPA_SGMII_100:
+ state->speed = SPEED_100;
+ break;
+ case LPA_SGMII_10:
+ state->speed = SPEED_10;
+ break;
+ default:
+ dev_err(priv->dev, "invalid sgmii PHY speed\n");
+ state->link = false;
+ return -EINVAL;
+ }
+
+ if (config_reg & LPA_SGMII_FULL_DUPLEX)
+ state->duplex = DUPLEX_FULL;
+ else
+ state->duplex = DUPLEX_HALF;
+ }
+
+ return 0;
+}
+
+static int
+mt7531_phylink_mac_link_state(struct dsa_switch *ds, int port,
+ struct phylink_link_state *state)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ if (state->interface == PHY_INTERFACE_MODE_SGMII)
+ return mt7531_sgmii_pcs_get_state_an(priv, port, state);
+
+ return -EOPNOTSUPP;
+}
+
+static int
+mt753x_phylink_mac_link_state(struct dsa_switch *ds, int port,
+ struct phylink_link_state *state)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ return priv->info->mac_port_get_state(ds, port, state);
+}
+
+static int
+mt753x_setup(struct dsa_switch *ds)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ return priv->info->sw_setup(ds);
+}
+
+static int
+mt753x_phy_read(struct dsa_switch *ds, int port, int regnum)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ return priv->info->phy_read(ds, port, regnum);
+}
+
+static int
+mt753x_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
+{
+ struct mt7530_priv *priv = ds->priv;
+
+ return priv->info->phy_write(ds, port, regnum, val);
+}
+
static const struct dsa_switch_ops mt7530_switch_ops = {
.get_tag_protocol = mtk_get_tag_protocol,
- .setup = mt7530_setup,
+ .setup = mt753x_setup,
.get_strings = mt7530_get_strings,
- .phy_read = mt7530_phy_read,
- .phy_write = mt7530_phy_write,
+ .phy_read = mt753x_phy_read,
+ .phy_write = mt753x_phy_write,
.get_ethtool_stats = mt7530_get_ethtool_stats,
.get_sset_count = mt7530_get_sset_count,
.port_enable = mt7530_port_enable,
@@ -1578,18 +2529,59 @@ static const struct dsa_switch_ops mt7530_switch_ops = {
.port_vlan_prepare = mt7530_port_vlan_prepare,
.port_vlan_add = mt7530_port_vlan_add,
.port_vlan_del = mt7530_port_vlan_del,
- .port_mirror_add = mt7530_port_mirror_add,
- .port_mirror_del = mt7530_port_mirror_del,
- .phylink_validate = mt7530_phylink_validate,
- .phylink_mac_link_state = mt7530_phylink_mac_link_state,
- .phylink_mac_config = mt7530_phylink_mac_config,
- .phylink_mac_link_down = mt7530_phylink_mac_link_down,
- .phylink_mac_link_up = mt7530_phylink_mac_link_up,
+ .port_mirror_add = mt753x_port_mirror_add,
+ .port_mirror_del = mt753x_port_mirror_del,
+ .phylink_validate = mt753x_phylink_validate,
+ .phylink_mac_link_state = mt753x_phylink_mac_link_state,
+ .phylink_mac_config = mt753x_phylink_mac_config,
+ .phylink_mac_an_restart = mt753x_phylink_mac_an_restart,
+ .phylink_mac_link_down = mt753x_phylink_mac_link_down,
+ .phylink_mac_link_up = mt753x_phylink_mac_link_up,
+};
+
+static const struct mt753x_info mt753x_table[] = {
+ [ID_MT7621] = {
+ .id = ID_MT7621,
+ .sw_setup = mt7530_setup,
+ .phy_read = mt7530_phy_read,
+ .phy_write = mt7530_phy_write,
+ .pad_setup = mt7530_pad_clk_setup,
+ .phy_mode_supported = mt7530_phy_mode_supported,
+ .mac_port_validate = mt7530_mac_port_validate,
+ .mac_port_get_state = mt7530_phylink_mac_link_state,
+ .mac_port_config = mt7530_mac_config,
+ },
+ [ID_MT7530] = {
+ .id = ID_MT7530,
+ .sw_setup = mt7530_setup,
+ .phy_read = mt7530_phy_read,
+ .phy_write = mt7530_phy_write,
+ .pad_setup = mt7530_pad_clk_setup,
+ .phy_mode_supported = mt7530_phy_mode_supported,
+ .mac_port_validate = mt7530_mac_port_validate,
+ .mac_port_get_state = mt7530_phylink_mac_link_state,
+ .mac_port_config = mt7530_mac_config,
+ },
+ [ID_MT7531] = {
+ .id = ID_MT7531,
+ .sw_setup = mt7531_setup,
+ .phy_read = mt7531_ind_phy_read,
+ .phy_write = mt7531_ind_phy_write,
+ .pad_setup = mt7531_pad_setup,
+ .cpu_port_config = mt7531_cpu_port_config,
+ .phy_mode_supported = mt7531_phy_mode_supported,
+ .mac_port_validate = mt7531_mac_port_validate,
+ .mac_port_get_state = mt7531_phylink_mac_link_state,
+ .mac_port_config = mt7531_mac_config,
+ .mac_pcs_an_restart = mt7531_sgmii_restart_an,
+ .mac_pcs_link_up = mt7531_sgmii_link_up_force,
+ },
};
static const struct of_device_id mt7530_of_match[] = {
- { .compatible = "mediatek,mt7621", .data = (void *)ID_MT7621, },
- { .compatible = "mediatek,mt7530", .data = (void *)ID_MT7530, },
+ { .compatible = "mediatek,mt7621", .data = &mt753x_table[ID_MT7621], },
+ { .compatible = "mediatek,mt7530", .data = &mt753x_table[ID_MT7530], },
+ { .compatible = "mediatek,mt7531", .data = &mt753x_table[ID_MT7531], },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, mt7530_of_match);
@@ -1630,8 +2622,21 @@ mt7530_probe(struct mdio_device *mdiodev)
/* Get the hardware identifier from the devicetree node.
* We will need it for some of the clock and regulator setup.
*/
- priv->id = (unsigned int)(unsigned long)
- of_device_get_match_data(&mdiodev->dev);
+ priv->info = of_device_get_match_data(&mdiodev->dev);
+ if (!priv->info)
+ return -EINVAL;
+
+ /* Sanity check if these required device operations are filled
+ * properly.
+ */
+ if (!priv->info->sw_setup || !priv->info->pad_setup ||
+ !priv->info->phy_read || !priv->info->phy_write ||
+ !priv->info->phy_mode_supported ||
+ !priv->info->mac_port_validate ||
+ !priv->info->mac_port_get_state || !priv->info->mac_port_config)
+ return -EINVAL;
+
+ priv->id = priv->info->id;
if (priv->id == ID_MT7530) {
priv->core_pwr = devm_regulator_get(&mdiodev->dev, "core");
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index 14de60d0b9ca..9278a8e3d04e 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -11,9 +11,10 @@
#define MT7530_NUM_FDB_RECORDS 2048
#define MT7530_ALL_MEMBERS 0xff
-enum {
+enum mt753x_id {
ID_MT7530 = 0,
ID_MT7621 = 1,
+ ID_MT7531 = 2,
};
#define NUM_TRGMII_CTRL 5
@@ -41,6 +42,33 @@ enum {
#define MIRROR_PORT(x) ((x) & 0x7)
#define MIRROR_MASK 0x7
+/* Registers for CPU forward control */
+#define MT7531_CFC 0x4
+#define MT7531_MIRROR_EN BIT(19)
+#define MT7531_MIRROR_MASK (MIRROR_MASK << 16)
+#define MT7531_MIRROR_PORT_GET(x) (((x) >> 16) & MIRROR_MASK)
+#define MT7531_MIRROR_PORT_SET(x) (((x) & MIRROR_MASK) << 16)
+#define MT7531_CPU_PMAP_MASK GENMASK(7, 0)
+
+#define MT753X_MIRROR_REG(id) (((id) == ID_MT7531) ? \
+ MT7531_CFC : MT7530_MFC)
+#define MT753X_MIRROR_EN(id) (((id) == ID_MT7531) ? \
+ MT7531_MIRROR_EN : MIRROR_EN)
+#define MT753X_MIRROR_MASK(id) (((id) == ID_MT7531) ? \
+ MT7531_MIRROR_MASK : MIRROR_MASK)
+
+/* Registers for BPDU and PAE frame control*/
+#define MT753X_BPC 0x24
+#define MT753X_BPDU_PORT_FW_MASK GENMASK(2, 0)
+
+enum mt753x_bpdu_port_fw {
+ MT753X_BPDU_FOLLOW_MFC,
+ MT753X_BPDU_CPU_EXCLUDE = 4,
+ MT753X_BPDU_CPU_INCLUDE = 5,
+ MT753X_BPDU_CPU_ONLY = 6,
+ MT753X_BPDU_DROP = 7,
+};
+
/* Registers for address table access */
#define MT7530_ATA1 0x74
#define STATIC_EMP 0
@@ -220,10 +248,30 @@ enum mt7530_vlan_port_attr {
#define PMCR_FORCE_LNK BIT(0)
#define PMCR_SPEED_MASK (PMCR_FORCE_SPEED_100 | \
PMCR_FORCE_SPEED_1000)
+#define MT7531_FORCE_LNK BIT(31)
+#define MT7531_FORCE_SPD BIT(30)
+#define MT7531_FORCE_DPX BIT(29)
+#define MT7531_FORCE_RX_FC BIT(28)
+#define MT7531_FORCE_TX_FC BIT(27)
+#define MT7531_FORCE_MODE (MT7531_FORCE_LNK | \
+ MT7531_FORCE_SPD | \
+ MT7531_FORCE_DPX | \
+ MT7531_FORCE_RX_FC | \
+ MT7531_FORCE_TX_FC)
+#define PMCR_FORCE_MODE_ID(id) (((id) == ID_MT7531) ? \
+ MT7531_FORCE_MODE : \
+ PMCR_FORCE_MODE)
#define PMCR_LINK_SETTINGS_MASK (PMCR_TX_EN | PMCR_FORCE_SPEED_1000 | \
PMCR_RX_EN | PMCR_FORCE_SPEED_100 | \
PMCR_TX_FC_EN | PMCR_RX_FC_EN | \
PMCR_FORCE_FDX | PMCR_FORCE_LNK)
+#define PMCR_CPU_PORT_SETTING(id) (PMCR_FORCE_MODE_ID((id)) | \
+ PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | \
+ PMCR_BACKOFF_EN | PMCR_BACKPR_EN | \
+ PMCR_TX_EN | PMCR_RX_EN | \
+ PMCR_TX_FC_EN | PMCR_RX_FC_EN | \
+ PMCR_FORCE_SPEED_1000 | \
+ PMCR_FORCE_FDX | PMCR_FORCE_LNK)
#define MT7530_PMSR_P(x) (0x3008 + (x) * 0x100)
#define PMSR_EEE1G BIT(7)
@@ -237,6 +285,10 @@ enum mt7530_vlan_port_attr {
#define PMSR_DPX BIT(1)
#define PMSR_LINK BIT(0)
+/* Register for port debug count */
+#define MT7531_DBG_CNT(x) (0x3018 + (x) * 0x100)
+#define MT7531_DIS_CLR BIT(31)
+
/* Register for MIB */
#define MT7530_PORT_MIB_COUNTER(x) (0x4000 + (x) * 0x100)
#define MT7530_MIB_CCR 0x4fe0
@@ -254,12 +306,118 @@ enum mt7530_vlan_port_attr {
CCR_RX_OCT_CNT_BAD | \
CCR_TX_OCT_CNT_GOOD | \
CCR_TX_OCT_CNT_BAD)
+
+/* MT7531 SGMII register group */
+#define MT7531_SGMII_REG_BASE 0x5000
+#define MT7531_SGMII_REG(p, r) (MT7531_SGMII_REG_BASE + \
+ ((p) - 5) * 0x1000 + (r))
+
+/* Register forSGMII PCS_CONTROL_1 */
+#define MT7531_PCS_CONTROL_1(p) MT7531_SGMII_REG(p, 0x00)
+#define MT7531_SGMII_LINK_STATUS BIT(18)
+#define MT7531_SGMII_AN_ENABLE BIT(12)
+#define MT7531_SGMII_AN_RESTART BIT(9)
+
+/* Register for SGMII PCS_SPPED_ABILITY */
+#define MT7531_PCS_SPEED_ABILITY(p) MT7531_SGMII_REG(p, 0x08)
+#define MT7531_SGMII_TX_CONFIG_MASK GENMASK(15, 0)
+#define MT7531_SGMII_TX_CONFIG BIT(0)
+
+/* Register for SGMII_MODE */
+#define MT7531_SGMII_MODE(p) MT7531_SGMII_REG(p, 0x20)
+#define MT7531_SGMII_REMOTE_FAULT_DIS BIT(8)
+#define MT7531_SGMII_IF_MODE_MASK GENMASK(5, 1)
+#define MT7531_SGMII_FORCE_DUPLEX BIT(4)
+#define MT7531_SGMII_FORCE_SPEED_MASK GENMASK(3, 2)
+#define MT7531_SGMII_FORCE_SPEED_1000 BIT(3)
+#define MT7531_SGMII_FORCE_SPEED_100 BIT(2)
+#define MT7531_SGMII_FORCE_SPEED_10 0
+#define MT7531_SGMII_SPEED_DUPLEX_AN BIT(1)
+
+enum mt7531_sgmii_force_duplex {
+ MT7531_SGMII_FORCE_FULL_DUPLEX = 0,
+ MT7531_SGMII_FORCE_HALF_DUPLEX = 0x10,
+};
+
+/* Fields of QPHY_PWR_STATE_CTRL */
+#define MT7531_QPHY_PWR_STATE_CTRL(p) MT7531_SGMII_REG(p, 0xe8)
+#define MT7531_SGMII_PHYA_PWD BIT(4)
+
+/* Values of SGMII SPEED */
+#define MT7531_PHYA_CTRL_SIGNAL3(p) MT7531_SGMII_REG(p, 0x128)
+#define MT7531_RG_TPHY_SPEED_MASK (BIT(2) | BIT(3))
+#define MT7531_RG_TPHY_SPEED_1_25G 0x0
+#define MT7531_RG_TPHY_SPEED_3_125G BIT(2)
+
/* Register for system reset */
#define MT7530_SYS_CTRL 0x7000
#define SYS_CTRL_PHY_RST BIT(2)
#define SYS_CTRL_SW_RST BIT(1)
#define SYS_CTRL_REG_RST BIT(0)
+/* Register for PHY Indirect Access Control */
+#define MT7531_PHY_IAC 0x701C
+#define MT7531_PHY_ACS_ST BIT(31)
+#define MT7531_MDIO_REG_ADDR_MASK (0x1f << 25)
+#define MT7531_MDIO_PHY_ADDR_MASK (0x1f << 20)
+#define MT7531_MDIO_CMD_MASK (0x3 << 18)
+#define MT7531_MDIO_ST_MASK (0x3 << 16)
+#define MT7531_MDIO_RW_DATA_MASK (0xffff)
+#define MT7531_MDIO_REG_ADDR(x) (((x) & 0x1f) << 25)
+#define MT7531_MDIO_DEV_ADDR(x) (((x) & 0x1f) << 25)
+#define MT7531_MDIO_PHY_ADDR(x) (((x) & 0x1f) << 20)
+#define MT7531_MDIO_CMD(x) (((x) & 0x3) << 18)
+#define MT7531_MDIO_ST(x) (((x) & 0x3) << 16)
+
+enum mt7531_phy_iac_cmd {
+ MT7531_MDIO_ADDR = 0,
+ MT7531_MDIO_WRITE = 1,
+ MT7531_MDIO_READ = 2,
+ MT7531_MDIO_READ_CL45 = 3,
+};
+
+/* MDIO_ST: MDIO start field */
+enum mt7531_mdio_st {
+ MT7531_MDIO_ST_CL45 = 0,
+ MT7531_MDIO_ST_CL22 = 1,
+};
+
+#define MT7531_MDIO_CL22_READ (MT7531_MDIO_ST(MT7531_MDIO_ST_CL22) | \
+ MT7531_MDIO_CMD(MT7531_MDIO_READ))
+#define MT7531_MDIO_CL22_WRITE (MT7531_MDIO_ST(MT7531_MDIO_ST_CL22) | \
+ MT7531_MDIO_CMD(MT7531_MDIO_WRITE))
+#define MT7531_MDIO_CL45_ADDR (MT7531_MDIO_ST(MT7531_MDIO_ST_CL45) | \
+ MT7531_MDIO_CMD(MT7531_MDIO_ADDR))
+#define MT7531_MDIO_CL45_READ (MT7531_MDIO_ST(MT7531_MDIO_ST_CL45) | \
+ MT7531_MDIO_CMD(MT7531_MDIO_READ))
+#define MT7531_MDIO_CL45_WRITE (MT7531_MDIO_ST(MT7531_MDIO_ST_CL45) | \
+ MT7531_MDIO_CMD(MT7531_MDIO_WRITE))
+
+/* Register for RGMII clock phase */
+#define MT7531_CLKGEN_CTRL 0x7500
+#define CLK_SKEW_OUT(x) (((x) & 0x3) << 8)
+#define CLK_SKEW_OUT_MASK GENMASK(9, 8)
+#define CLK_SKEW_IN(x) (((x) & 0x3) << 6)
+#define CLK_SKEW_IN_MASK GENMASK(7, 6)
+#define RXCLK_NO_DELAY BIT(5)
+#define TXCLK_NO_REVERSE BIT(4)
+#define GP_MODE(x) (((x) & 0x3) << 1)
+#define GP_MODE_MASK GENMASK(2, 1)
+#define GP_CLK_EN BIT(0)
+
+enum mt7531_gp_mode {
+ MT7531_GP_MODE_RGMII = 0,
+ MT7531_GP_MODE_MII = 1,
+ MT7531_GP_MODE_REV_MII = 2
+};
+
+enum mt7531_clk_skew {
+ MT7531_CLK_SKEW_NO_CHG = 0,
+ MT7531_CLK_SKEW_DLY_100PPS = 1,
+ MT7531_CLK_SKEW_DLY_200PPS = 2,
+ MT7531_CLK_SKEW_REVERSE = 3,
+};
+
/* Register for hw trap status */
#define MT7530_HWTRAP 0x7800
#define HWTRAP_XTAL_MASK (BIT(10) | BIT(9))
@@ -267,6 +425,16 @@ enum mt7530_vlan_port_attr {
#define HWTRAP_XTAL_40MHZ (BIT(10))
#define HWTRAP_XTAL_20MHZ (BIT(9))
+#define MT7531_HWTRAP 0x7800
+#define HWTRAP_XTAL_FSEL_MASK BIT(7)
+#define HWTRAP_XTAL_FSEL_25MHZ BIT(7)
+#define HWTRAP_XTAL_FSEL_40MHZ 0
+/* Unique fields of (M)HWSTRAP for MT7531 */
+#define XTAL_FSEL_S 7
+#define XTAL_FSEL_M BIT(7)
+#define PHY_EN BIT(6)
+#define CHG_STRAP BIT(8)
+
/* Register for hw trap modification */
#define MT7530_MHWTRAP 0x7804
#define MHWTRAP_PHY0_SEL BIT(20)
@@ -281,14 +449,37 @@ enum mt7530_vlan_port_attr {
#define MT7530_TOP_SIG_CTRL 0x7808
#define TOP_SIG_CTRL_NORMAL (BIT(17) | BIT(16))
+#define MT7531_TOP_SIG_SR 0x780c
+#define PAD_DUAL_SGMII_EN BIT(1)
+#define PAD_MCM_SMI_EN BIT(0)
+
#define MT7530_IO_DRV_CR 0x7810
#define P5_IO_CLK_DRV(x) ((x) & 0x3)
#define P5_IO_DATA_DRV(x) (((x) & 0x3) << 4)
+#define MT7531_CHIP_REV 0x781C
+
+#define MT7531_PLLGP_EN 0x7820
+#define EN_COREPLL BIT(2)
+#define SW_CLKSW BIT(1)
+#define SW_PLLGP BIT(0)
+
#define MT7530_P6ECR 0x7830
#define P6_INTF_MODE_MASK 0x3
#define P6_INTF_MODE(x) ((x) & 0x3)
+#define MT7531_PLLGP_CR0 0x78a8
+#define RG_COREPLL_EN BIT(22)
+#define RG_COREPLL_POSDIV_S 23
+#define RG_COREPLL_POSDIV_M 0x3800000
+#define RG_COREPLL_SDM_PCW_S 1
+#define RG_COREPLL_SDM_PCW_M 0x3ffffe
+#define RG_COREPLL_SDM_PCW_CHG BIT(0)
+
+/* Registers for RGMII and SGMII PLL clock */
+#define MT7531_ANA_PLLGP_CR2 0x78b0
+#define MT7531_ANA_PLLGP_CR5 0x78bc
+
/* Registers for TRGMII on the both side */
#define MT7530_TRGMII_RCK_CTRL 0x7a00
#define RX_RST BIT(31)
@@ -327,10 +518,25 @@ enum mt7530_vlan_port_attr {
#define MT7530_P5RGMIITXCR 0x7b04
#define CSR_RGMII_TXC_CFG(x) ((x) & 0x1f)
+/* Registers for GPIO mode */
+#define MT7531_GPIO_MODE0 0x7c0c
+#define MT7531_GPIO0_MASK GENMASK(3, 0)
+#define MT7531_GPIO0_INTERRUPT 1
+
+#define MT7531_GPIO_MODE1 0x7c10
+#define MT7531_GPIO11_RG_RXD2_MASK GENMASK(15, 12)
+#define MT7531_EXT_P_MDC_11 (2 << 12)
+#define MT7531_GPIO12_RG_RXD3_MASK GENMASK(19, 16)
+#define MT7531_EXT_P_MDIO_12 (2 << 16)
+
#define MT7530_CREV 0x7ffc
#define CHIP_NAME_SHIFT 16
#define MT7530_ID 0x7530
+#define MT7531_CREV 0x781C
+#define CHIP_REV_M 0x0f
+#define MT7531_ID 0x7531
+
/* Registers for core PLL access through mmd indirect */
#define CORE_PLL_GROUP2 0x401
#define RG_SYSPLL_EN_NORMAL BIT(15)
@@ -347,6 +553,10 @@ enum mt7530_vlan_port_attr {
#define RG_SYSPLL_DDSFBK_EN BIT(12)
#define RG_SYSPLL_BIAS_EN BIT(11)
#define RG_SYSPLL_BIAS_LPF_EN BIT(10)
+#define MT7531_PHY_PLL_OFF BIT(5)
+#define MT7531_PHY_PLL_BYPASS_MODE BIT(4)
+
+#define MT753X_CTRL_PHY_ADDR 0
#define CORE_PLL_GROUP5 0x404
#define RG_LCDDS_PCW_NCPO1(x) ((x) & 0xffff)
@@ -425,6 +635,7 @@ enum p5_interface_select {
P5_INTF_SEL_PHY_P0,
P5_INTF_SEL_PHY_P4,
P5_INTF_SEL_GMAC5,
+ P5_INTF_SEL_GMAC5_SGMII,
};
static const char *p5_intf_modes(unsigned int p5_interface)
@@ -438,11 +649,56 @@ static const char *p5_intf_modes(unsigned int p5_interface)
return "PHY P4";
case P5_INTF_SEL_GMAC5:
return "GMAC5";
+ case P5_INTF_SEL_GMAC5_SGMII:
+ return "GMAC5_SGMII";
default:
return "unknown";
}
}
+/* struct mt753x_info - This is the main data structure for holding the specific
+ * part for each supported device
+ * @sw_setup: Holding the handler to a device initialization
+ * @phy_read: Holding the way reading PHY port
+ * @phy_write: Holding the way writing PHY port
+ * @pad_setup: Holding the way setting up the bus pad for a certain
+ * MAC port
+ * @phy_mode_supported: Check if the PHY type is being supported on a certain
+ * port
+ * @mac_port_validate: Holding the way to set addition validate type for a
+ * certan MAC port
+ * @mac_port_get_state: Holding the way getting the MAC/PCS state for a certain
+ * MAC port
+ * @mac_port_config: Holding the way setting up the PHY attribute to a
+ * certain MAC port
+ * @mac_pcs_an_restart Holding the way restarting PCS autonegotiation for a
+ * certain MAC port
+ * @mac_pcs_link_up: Holding the way setting up the PHY attribute to the pcs
+ * of the certain MAC port
+ */
+struct mt753x_info {
+ enum mt753x_id id;
+
+ int (*sw_setup)(struct dsa_switch *ds);
+ int (*phy_read)(struct dsa_switch *ds, int port, int regnum);
+ int (*phy_write)(struct dsa_switch *ds, int port, int regnum, u16 val);
+ int (*pad_setup)(struct dsa_switch *ds, phy_interface_t interface);
+ int (*cpu_port_config)(struct dsa_switch *ds, int port);
+ bool (*phy_mode_supported)(struct dsa_switch *ds, int port,
+ const struct phylink_link_state *state);
+ void (*mac_port_validate)(struct dsa_switch *ds, int port,
+ unsigned long *supported);
+ int (*mac_port_get_state)(struct dsa_switch *ds, int port,
+ struct phylink_link_state *state);
+ int (*mac_port_config)(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface);
+ void (*mac_pcs_an_restart)(struct dsa_switch *ds, int port);
+ void (*mac_pcs_link_up)(struct dsa_switch *ds, int port,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex);
+};
+
/* struct mt7530_priv - This is the main data structure for holding the state
* of the driver
* @dev: The device pointer
@@ -468,6 +724,7 @@ struct mt7530_priv {
struct regulator *core_pwr;
struct regulator *io_pwr;
struct gpio_desc *reset;
+ const struct mt753x_info *info;
unsigned int id;
bool mcm;
phy_interface_t p6_interface;
diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile
index aa645ff86f64..4b080b448ce7 100644
--- a/drivers/net/dsa/mv88e6xxx/Makefile
+++ b/drivers/net/dsa/mv88e6xxx/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx.o
mv88e6xxx-objs := chip.o
+mv88e6xxx-objs += devlink.o
mv88e6xxx-objs += global1.o
mv88e6xxx-objs += global1_atu.o
mv88e6xxx-objs += global1_vtu.o
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index f0dbc05e30a4..bd297ae7cf9e 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -32,6 +32,7 @@
#include <net/dsa.h>
#include "chip.h"
+#include "devlink.h"
#include "global1.h"
#include "global2.h"
#include "hwtstamp.h"
@@ -1465,21 +1466,21 @@ static int mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_chip *chip,
return chip->info->ops->vtu_loadpurge(chip, entry);
}
-static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
+int mv88e6xxx_fid_map(struct mv88e6xxx_chip *chip, unsigned long *fid_bitmap)
{
- DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
struct mv88e6xxx_vtu_entry vlan;
int i, err;
+ u16 fid;
bitmap_zero(fid_bitmap, MV88E6XXX_N_FID);
/* Set every FID bit used by the (un)bridged ports */
for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
- err = mv88e6xxx_port_get_fid(chip, i, fid);
+ err = mv88e6xxx_port_get_fid(chip, i, &fid);
if (err)
return err;
- set_bit(*fid, fid_bitmap);
+ set_bit(fid, fid_bitmap);
}
/* Set every FID bit used by the VLAN entries */
@@ -1497,6 +1498,18 @@ static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
set_bit(vlan.fid, fid_bitmap);
} while (vlan.vid < chip->info->max_vid);
+ return 0;
+}
+
+static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
+{
+ DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
+ int err;
+
+ err = mv88e6xxx_fid_map(chip, fid_bitmap);
+ if (err)
+ return err;
+
/* The reset value 0x000 is used to indicate that multiple address
* databases are not needed. Return the next positive available.
*/
@@ -1508,22 +1521,6 @@ static int mv88e6xxx_atu_new(struct mv88e6xxx_chip *chip, u16 *fid)
return mv88e6xxx_g1_atu_flush(chip, *fid, true);
}
-static int mv88e6xxx_atu_get_hash(struct mv88e6xxx_chip *chip, u8 *hash)
-{
- if (chip->info->ops->atu_get_hash)
- return chip->info->ops->atu_get_hash(chip, hash);
-
- return -EOPNOTSUPP;
-}
-
-static int mv88e6xxx_atu_set_hash(struct mv88e6xxx_chip *chip, u8 hash)
-{
- if (chip->info->ops->atu_set_hash)
- return chip->info->ops->atu_set_hash(chip, hash);
-
- return -EOPNOTSUPP;
-}
-
static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
u16 vid_begin, u16 vid_end)
{
@@ -1581,15 +1578,16 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
}
static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool vlan_filtering)
+ bool vlan_filtering,
+ struct switchdev_trans *trans)
{
struct mv88e6xxx_chip *chip = ds->priv;
u16 mode = vlan_filtering ? MV88E6XXX_PORT_CTL2_8021Q_MODE_SECURE :
MV88E6XXX_PORT_CTL2_8021Q_MODE_DISABLED;
int err;
- if (!chip->info->max_vid)
- return -EOPNOTSUPP;
+ if (switchdev_trans_ph_prepare(trans))
+ return chip->info->max_vid ? 0 : -EOPNOTSUPP;
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_set_8021q_mode(chip, port, mode);
@@ -2837,248 +2835,11 @@ static int mv88e6390_setup_errata(struct mv88e6xxx_chip *chip)
return mv88e6xxx_software_reset(chip);
}
-enum mv88e6xxx_devlink_param_id {
- MV88E6XXX_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
- MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH,
-};
-
-static int mv88e6xxx_devlink_param_get(struct dsa_switch *ds, u32 id,
- struct devlink_param_gset_ctx *ctx)
-{
- struct mv88e6xxx_chip *chip = ds->priv;
- int err;
-
- mv88e6xxx_reg_lock(chip);
-
- switch (id) {
- case MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH:
- err = mv88e6xxx_atu_get_hash(chip, &ctx->val.vu8);
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
-
- mv88e6xxx_reg_unlock(chip);
-
- return err;
-}
-
-static int mv88e6xxx_devlink_param_set(struct dsa_switch *ds, u32 id,
- struct devlink_param_gset_ctx *ctx)
-{
- struct mv88e6xxx_chip *chip = ds->priv;
- int err;
-
- mv88e6xxx_reg_lock(chip);
-
- switch (id) {
- case MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH:
- err = mv88e6xxx_atu_set_hash(chip, ctx->val.vu8);
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
-
- mv88e6xxx_reg_unlock(chip);
-
- return err;
-}
-
-static const struct devlink_param mv88e6xxx_devlink_params[] = {
- DSA_DEVLINK_PARAM_DRIVER(MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH,
- "ATU_hash", DEVLINK_PARAM_TYPE_U8,
- BIT(DEVLINK_PARAM_CMODE_RUNTIME)),
-};
-
-static int mv88e6xxx_setup_devlink_params(struct dsa_switch *ds)
-{
- return dsa_devlink_params_register(ds, mv88e6xxx_devlink_params,
- ARRAY_SIZE(mv88e6xxx_devlink_params));
-}
-
-static void mv88e6xxx_teardown_devlink_params(struct dsa_switch *ds)
-{
- dsa_devlink_params_unregister(ds, mv88e6xxx_devlink_params,
- ARRAY_SIZE(mv88e6xxx_devlink_params));
-}
-
-enum mv88e6xxx_devlink_resource_id {
- MV88E6XXX_RESOURCE_ID_ATU,
- MV88E6XXX_RESOURCE_ID_ATU_BIN_0,
- MV88E6XXX_RESOURCE_ID_ATU_BIN_1,
- MV88E6XXX_RESOURCE_ID_ATU_BIN_2,
- MV88E6XXX_RESOURCE_ID_ATU_BIN_3,
-};
-
-static u64 mv88e6xxx_devlink_atu_bin_get(struct mv88e6xxx_chip *chip,
- u16 bin)
-{
- u16 occupancy = 0;
- int err;
-
- mv88e6xxx_reg_lock(chip);
-
- err = mv88e6xxx_g2_atu_stats_set(chip, MV88E6XXX_G2_ATU_STATS_MODE_ALL,
- bin);
- if (err) {
- dev_err(chip->dev, "failed to set ATU stats kind/bin\n");
- goto unlock;
- }
-
- err = mv88e6xxx_g1_atu_get_next(chip, 0);
- if (err) {
- dev_err(chip->dev, "failed to perform ATU get next\n");
- goto unlock;
- }
-
- err = mv88e6xxx_g2_atu_stats_get(chip, &occupancy);
- if (err) {
- dev_err(chip->dev, "failed to get ATU stats\n");
- goto unlock;
- }
-
- occupancy &= MV88E6XXX_G2_ATU_STATS_MASK;
-
-unlock:
- mv88e6xxx_reg_unlock(chip);
-
- return occupancy;
-}
-
-static u64 mv88e6xxx_devlink_atu_bin_0_get(void *priv)
-{
- struct mv88e6xxx_chip *chip = priv;
-
- return mv88e6xxx_devlink_atu_bin_get(chip,
- MV88E6XXX_G2_ATU_STATS_BIN_0);
-}
-
-static u64 mv88e6xxx_devlink_atu_bin_1_get(void *priv)
-{
- struct mv88e6xxx_chip *chip = priv;
-
- return mv88e6xxx_devlink_atu_bin_get(chip,
- MV88E6XXX_G2_ATU_STATS_BIN_1);
-}
-
-static u64 mv88e6xxx_devlink_atu_bin_2_get(void *priv)
-{
- struct mv88e6xxx_chip *chip = priv;
-
- return mv88e6xxx_devlink_atu_bin_get(chip,
- MV88E6XXX_G2_ATU_STATS_BIN_2);
-}
-
-static u64 mv88e6xxx_devlink_atu_bin_3_get(void *priv)
-{
- struct mv88e6xxx_chip *chip = priv;
-
- return mv88e6xxx_devlink_atu_bin_get(chip,
- MV88E6XXX_G2_ATU_STATS_BIN_3);
-}
-
-static u64 mv88e6xxx_devlink_atu_get(void *priv)
-{
- return mv88e6xxx_devlink_atu_bin_0_get(priv) +
- mv88e6xxx_devlink_atu_bin_1_get(priv) +
- mv88e6xxx_devlink_atu_bin_2_get(priv) +
- mv88e6xxx_devlink_atu_bin_3_get(priv);
-}
-
-static int mv88e6xxx_setup_devlink_resources(struct dsa_switch *ds)
-{
- struct devlink_resource_size_params size_params;
- struct mv88e6xxx_chip *chip = ds->priv;
- int err;
-
- devlink_resource_size_params_init(&size_params,
- mv88e6xxx_num_macs(chip),
- mv88e6xxx_num_macs(chip),
- 1, DEVLINK_RESOURCE_UNIT_ENTRY);
-
- err = dsa_devlink_resource_register(ds, "ATU",
- mv88e6xxx_num_macs(chip),
- MV88E6XXX_RESOURCE_ID_ATU,
- DEVLINK_RESOURCE_ID_PARENT_TOP,
- &size_params);
- if (err)
- goto out;
-
- devlink_resource_size_params_init(&size_params,
- mv88e6xxx_num_macs(chip) / 4,
- mv88e6xxx_num_macs(chip) / 4,
- 1, DEVLINK_RESOURCE_UNIT_ENTRY);
-
- err = dsa_devlink_resource_register(ds, "ATU_bin_0",
- mv88e6xxx_num_macs(chip) / 4,
- MV88E6XXX_RESOURCE_ID_ATU_BIN_0,
- MV88E6XXX_RESOURCE_ID_ATU,
- &size_params);
- if (err)
- goto out;
-
- err = dsa_devlink_resource_register(ds, "ATU_bin_1",
- mv88e6xxx_num_macs(chip) / 4,
- MV88E6XXX_RESOURCE_ID_ATU_BIN_1,
- MV88E6XXX_RESOURCE_ID_ATU,
- &size_params);
- if (err)
- goto out;
-
- err = dsa_devlink_resource_register(ds, "ATU_bin_2",
- mv88e6xxx_num_macs(chip) / 4,
- MV88E6XXX_RESOURCE_ID_ATU_BIN_2,
- MV88E6XXX_RESOURCE_ID_ATU,
- &size_params);
- if (err)
- goto out;
-
- err = dsa_devlink_resource_register(ds, "ATU_bin_3",
- mv88e6xxx_num_macs(chip) / 4,
- MV88E6XXX_RESOURCE_ID_ATU_BIN_3,
- MV88E6XXX_RESOURCE_ID_ATU,
- &size_params);
- if (err)
- goto out;
-
- dsa_devlink_resource_occ_get_register(ds,
- MV88E6XXX_RESOURCE_ID_ATU,
- mv88e6xxx_devlink_atu_get,
- chip);
-
- dsa_devlink_resource_occ_get_register(ds,
- MV88E6XXX_RESOURCE_ID_ATU_BIN_0,
- mv88e6xxx_devlink_atu_bin_0_get,
- chip);
-
- dsa_devlink_resource_occ_get_register(ds,
- MV88E6XXX_RESOURCE_ID_ATU_BIN_1,
- mv88e6xxx_devlink_atu_bin_1_get,
- chip);
-
- dsa_devlink_resource_occ_get_register(ds,
- MV88E6XXX_RESOURCE_ID_ATU_BIN_2,
- mv88e6xxx_devlink_atu_bin_2_get,
- chip);
-
- dsa_devlink_resource_occ_get_register(ds,
- MV88E6XXX_RESOURCE_ID_ATU_BIN_3,
- mv88e6xxx_devlink_atu_bin_3_get,
- chip);
-
- return 0;
-
-out:
- dsa_devlink_resources_unregister(ds);
- return err;
-}
-
static void mv88e6xxx_teardown(struct dsa_switch *ds)
{
mv88e6xxx_teardown_devlink_params(ds);
dsa_devlink_resources_unregister(ds);
+ mv88e6xxx_teardown_devlink_regions(ds);
}
static int mv88e6xxx_setup(struct dsa_switch *ds)
@@ -3211,7 +2972,18 @@ unlock:
err = mv88e6xxx_setup_devlink_params(ds);
if (err)
- dsa_devlink_resources_unregister(ds);
+ goto out_resources;
+
+ err = mv88e6xxx_setup_devlink_regions(ds);
+ if (err)
+ goto out_params;
+
+ return 0;
+
+out_params:
+ mv88e6xxx_teardown_devlink_params(ds);
+out_resources:
+ dsa_devlink_resources_unregister(ds);
return err;
}
@@ -3329,12 +3101,6 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
return 0;
}
-static const struct of_device_id mv88e6xxx_mdio_external_match[] = {
- { .compatible = "marvell,mv88e6xxx-mdio-external",
- .data = (void *)true },
- { },
-};
-
static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
{
@@ -3354,7 +3120,6 @@ static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,
struct device_node *np)
{
- const struct of_device_id *match;
struct device_node *child;
int err;
@@ -3372,8 +3137,8 @@ static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,
* bus.
*/
for_each_available_child_of_node(np, child) {
- match = of_match_node(mv88e6xxx_mdio_external_match, child);
- if (match) {
+ if (of_device_is_compatible(
+ child, "marvell,mv88e6xxx-mdio-external")) {
err = mv88e6xxx_mdio_register(chip, child, true);
if (err) {
mv88e6xxx_mdios_unregister(chip);
@@ -5614,6 +5379,7 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.get_ts_info = mv88e6xxx_get_ts_info,
.devlink_param_get = mv88e6xxx_devlink_param_get,
.devlink_param_set = mv88e6xxx_devlink_param_set,
+ .devlink_info_get = mv88e6xxx_devlink_info_get,
};
static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 823ae89e5fca..81c244fc0419 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -238,6 +238,19 @@ struct mv88e6xxx_port {
bool mirror_egress;
unsigned int serdes_irq;
char serdes_irq_name[64];
+ struct devlink_region *region;
+};
+
+enum mv88e6xxx_region_id {
+ MV88E6XXX_REGION_GLOBAL1 = 0,
+ MV88E6XXX_REGION_GLOBAL2,
+ MV88E6XXX_REGION_ATU,
+
+ _MV88E6XXX_REGION_MAX,
+};
+
+struct mv88e6xxx_region_priv {
+ enum mv88e6xxx_region_id id;
};
struct mv88e6xxx_chip {
@@ -334,6 +347,9 @@ struct mv88e6xxx_chip {
/* Array of port structures. */
struct mv88e6xxx_port ports[DSA_MAX_PORTS];
+
+ /* devlink regions */
+ struct devlink_region *regions[_MV88E6XXX_REGION_MAX];
};
struct mv88e6xxx_bus_ops {
@@ -689,4 +705,6 @@ static inline void mv88e6xxx_reg_unlock(struct mv88e6xxx_chip *chip)
mutex_unlock(&chip->reg_lock);
}
+int mv88e6xxx_fid_map(struct mv88e6xxx_chip *chip, unsigned long *bitmap);
+
#endif /* _MV88E6XXX_CHIP_H */
diff --git a/drivers/net/dsa/mv88e6xxx/devlink.c b/drivers/net/dsa/mv88e6xxx/devlink.c
new file mode 100644
index 000000000000..10cd1bfd81a0
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/devlink.c
@@ -0,0 +1,633 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+#include <net/dsa.h>
+
+#include "chip.h"
+#include "devlink.h"
+#include "global1.h"
+#include "global2.h"
+#include "port.h"
+
+static int mv88e6xxx_atu_get_hash(struct mv88e6xxx_chip *chip, u8 *hash)
+{
+ if (chip->info->ops->atu_get_hash)
+ return chip->info->ops->atu_get_hash(chip, hash);
+
+ return -EOPNOTSUPP;
+}
+
+static int mv88e6xxx_atu_set_hash(struct mv88e6xxx_chip *chip, u8 hash)
+{
+ if (chip->info->ops->atu_set_hash)
+ return chip->info->ops->atu_set_hash(chip, hash);
+
+ return -EOPNOTSUPP;
+}
+
+enum mv88e6xxx_devlink_param_id {
+ MV88E6XXX_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH,
+};
+
+int mv88e6xxx_devlink_param_get(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+
+ switch (id) {
+ case MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH:
+ err = mv88e6xxx_atu_get_hash(chip, &ctx->val.vu8);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+int mv88e6xxx_devlink_param_set(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+
+ switch (id) {
+ case MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH:
+ err = mv88e6xxx_atu_set_hash(chip, ctx->val.vu8);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static const struct devlink_param mv88e6xxx_devlink_params[] = {
+ DSA_DEVLINK_PARAM_DRIVER(MV88E6XXX_DEVLINK_PARAM_ID_ATU_HASH,
+ "ATU_hash", DEVLINK_PARAM_TYPE_U8,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME)),
+};
+
+int mv88e6xxx_setup_devlink_params(struct dsa_switch *ds)
+{
+ return dsa_devlink_params_register(ds, mv88e6xxx_devlink_params,
+ ARRAY_SIZE(mv88e6xxx_devlink_params));
+}
+
+void mv88e6xxx_teardown_devlink_params(struct dsa_switch *ds)
+{
+ dsa_devlink_params_unregister(ds, mv88e6xxx_devlink_params,
+ ARRAY_SIZE(mv88e6xxx_devlink_params));
+}
+
+enum mv88e6xxx_devlink_resource_id {
+ MV88E6XXX_RESOURCE_ID_ATU,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_0,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_1,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_2,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_3,
+};
+
+static u64 mv88e6xxx_devlink_atu_bin_get(struct mv88e6xxx_chip *chip,
+ u16 bin)
+{
+ u16 occupancy = 0;
+ int err;
+
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_g2_atu_stats_set(chip, MV88E6XXX_G2_ATU_STATS_MODE_ALL,
+ bin);
+ if (err) {
+ dev_err(chip->dev, "failed to set ATU stats kind/bin\n");
+ goto unlock;
+ }
+
+ err = mv88e6xxx_g1_atu_get_next(chip, 0);
+ if (err) {
+ dev_err(chip->dev, "failed to perform ATU get next\n");
+ goto unlock;
+ }
+
+ err = mv88e6xxx_g2_atu_stats_get(chip, &occupancy);
+ if (err) {
+ dev_err(chip->dev, "failed to get ATU stats\n");
+ goto unlock;
+ }
+
+ occupancy &= MV88E6XXX_G2_ATU_STATS_MASK;
+
+unlock:
+ mv88e6xxx_reg_unlock(chip);
+
+ return occupancy;
+}
+
+static u64 mv88e6xxx_devlink_atu_bin_0_get(void *priv)
+{
+ struct mv88e6xxx_chip *chip = priv;
+
+ return mv88e6xxx_devlink_atu_bin_get(chip,
+ MV88E6XXX_G2_ATU_STATS_BIN_0);
+}
+
+static u64 mv88e6xxx_devlink_atu_bin_1_get(void *priv)
+{
+ struct mv88e6xxx_chip *chip = priv;
+
+ return mv88e6xxx_devlink_atu_bin_get(chip,
+ MV88E6XXX_G2_ATU_STATS_BIN_1);
+}
+
+static u64 mv88e6xxx_devlink_atu_bin_2_get(void *priv)
+{
+ struct mv88e6xxx_chip *chip = priv;
+
+ return mv88e6xxx_devlink_atu_bin_get(chip,
+ MV88E6XXX_G2_ATU_STATS_BIN_2);
+}
+
+static u64 mv88e6xxx_devlink_atu_bin_3_get(void *priv)
+{
+ struct mv88e6xxx_chip *chip = priv;
+
+ return mv88e6xxx_devlink_atu_bin_get(chip,
+ MV88E6XXX_G2_ATU_STATS_BIN_3);
+}
+
+static u64 mv88e6xxx_devlink_atu_get(void *priv)
+{
+ return mv88e6xxx_devlink_atu_bin_0_get(priv) +
+ mv88e6xxx_devlink_atu_bin_1_get(priv) +
+ mv88e6xxx_devlink_atu_bin_2_get(priv) +
+ mv88e6xxx_devlink_atu_bin_3_get(priv);
+}
+
+int mv88e6xxx_setup_devlink_resources(struct dsa_switch *ds)
+{
+ struct devlink_resource_size_params size_params;
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ devlink_resource_size_params_init(&size_params,
+ mv88e6xxx_num_macs(chip),
+ mv88e6xxx_num_macs(chip),
+ 1, DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ err = dsa_devlink_resource_register(ds, "ATU",
+ mv88e6xxx_num_macs(chip),
+ MV88E6XXX_RESOURCE_ID_ATU,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
+ if (err)
+ goto out;
+
+ devlink_resource_size_params_init(&size_params,
+ mv88e6xxx_num_macs(chip) / 4,
+ mv88e6xxx_num_macs(chip) / 4,
+ 1, DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ err = dsa_devlink_resource_register(ds, "ATU_bin_0",
+ mv88e6xxx_num_macs(chip) / 4,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_0,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ &size_params);
+ if (err)
+ goto out;
+
+ err = dsa_devlink_resource_register(ds, "ATU_bin_1",
+ mv88e6xxx_num_macs(chip) / 4,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_1,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ &size_params);
+ if (err)
+ goto out;
+
+ err = dsa_devlink_resource_register(ds, "ATU_bin_2",
+ mv88e6xxx_num_macs(chip) / 4,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_2,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ &size_params);
+ if (err)
+ goto out;
+
+ err = dsa_devlink_resource_register(ds, "ATU_bin_3",
+ mv88e6xxx_num_macs(chip) / 4,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_3,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ &size_params);
+ if (err)
+ goto out;
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU,
+ mv88e6xxx_devlink_atu_get,
+ chip);
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_0,
+ mv88e6xxx_devlink_atu_bin_0_get,
+ chip);
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_1,
+ mv88e6xxx_devlink_atu_bin_1_get,
+ chip);
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_2,
+ mv88e6xxx_devlink_atu_bin_2_get,
+ chip);
+
+ dsa_devlink_resource_occ_get_register(ds,
+ MV88E6XXX_RESOURCE_ID_ATU_BIN_3,
+ mv88e6xxx_devlink_atu_bin_3_get,
+ chip);
+
+ return 0;
+
+out:
+ dsa_devlink_resources_unregister(ds);
+ return err;
+}
+
+static int mv88e6xxx_region_global_snapshot(struct devlink *dl,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct mv88e6xxx_region_priv *region_priv = ops->priv;
+ struct dsa_switch *ds = dsa_devlink_to_ds(dl);
+ struct mv88e6xxx_chip *chip = ds->priv;
+ u16 *registers;
+ int i, err;
+
+ registers = kmalloc_array(32, sizeof(u16), GFP_KERNEL);
+ if (!registers)
+ return -ENOMEM;
+
+ mv88e6xxx_reg_lock(chip);
+ for (i = 0; i < 32; i++) {
+ switch (region_priv->id) {
+ case MV88E6XXX_REGION_GLOBAL1:
+ err = mv88e6xxx_g1_read(chip, i, &registers[i]);
+ break;
+ case MV88E6XXX_REGION_GLOBAL2:
+ err = mv88e6xxx_g2_read(chip, i, &registers[i]);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ if (err) {
+ kfree(registers);
+ goto out;
+ }
+ }
+ *data = (u8 *)registers;
+out:
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+/* The ATU entry varies between mv88e6xxx chipset generations. Define
+ * a generic format which covers all the current and hopefully future
+ * mv88e6xxx generations
+ */
+
+struct mv88e6xxx_devlink_atu_entry {
+ /* The FID is scattered over multiple registers. */
+ u16 fid;
+ u16 atu_op;
+ u16 atu_data;
+ u16 atu_01;
+ u16 atu_23;
+ u16 atu_45;
+};
+
+static int mv88e6xxx_region_atu_snapshot_fid(struct mv88e6xxx_chip *chip,
+ int fid,
+ struct mv88e6xxx_devlink_atu_entry *table,
+ int *count)
+{
+ u16 atu_op, atu_data, atu_01, atu_23, atu_45;
+ struct mv88e6xxx_atu_entry addr;
+ int err;
+
+ addr.state = 0;
+ eth_broadcast_addr(addr.mac);
+
+ do {
+ err = mv88e6xxx_g1_atu_getnext(chip, fid, &addr);
+ if (err)
+ return err;
+
+ if (!addr.state)
+ break;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_OP, &atu_op);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_DATA, &atu_data);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_MAC01, &atu_01);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_MAC23, &atu_23);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_ATU_MAC45, &atu_45);
+ if (err)
+ return err;
+
+ table[*count].fid = fid;
+ table[*count].atu_op = atu_op;
+ table[*count].atu_data = atu_data;
+ table[*count].atu_01 = atu_01;
+ table[*count].atu_23 = atu_23;
+ table[*count].atu_45 = atu_45;
+ (*count)++;
+ } while (!is_broadcast_ether_addr(addr.mac));
+
+ return 0;
+}
+
+static int mv88e6xxx_region_atu_snapshot(struct devlink *dl,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct dsa_switch *ds = dsa_devlink_to_ds(dl);
+ DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
+ struct mv88e6xxx_devlink_atu_entry *table;
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int fid = -1, count, err;
+
+ table = kmalloc_array(mv88e6xxx_num_databases(chip),
+ sizeof(struct mv88e6xxx_devlink_atu_entry),
+ GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ memset(table, 0, mv88e6xxx_num_databases(chip) *
+ sizeof(struct mv88e6xxx_devlink_atu_entry));
+
+ count = 0;
+
+ mv88e6xxx_reg_lock(chip);
+
+ err = mv88e6xxx_fid_map(chip, fid_bitmap);
+ if (err)
+ goto out;
+
+ while (1) {
+ fid = find_next_bit(fid_bitmap, MV88E6XXX_N_FID, fid + 1);
+ if (fid == MV88E6XXX_N_FID)
+ break;
+
+ err = mv88e6xxx_region_atu_snapshot_fid(chip, fid, table,
+ &count);
+ if (err) {
+ kfree(table);
+ goto out;
+ }
+ }
+ *data = (u8 *)table;
+out:
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static int mv88e6xxx_region_port_snapshot(struct devlink_port *devlink_port,
+ const struct devlink_port_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct dsa_switch *ds = dsa_devlink_port_to_ds(devlink_port);
+ int port = dsa_devlink_port_to_port(devlink_port);
+ struct mv88e6xxx_chip *chip = ds->priv;
+ u16 *registers;
+ int i, err;
+
+ registers = kmalloc_array(32, sizeof(u16), GFP_KERNEL);
+ if (!registers)
+ return -ENOMEM;
+
+ mv88e6xxx_reg_lock(chip);
+ for (i = 0; i < 32; i++) {
+ err = mv88e6xxx_port_read(chip, port, i, &registers[i]);
+ if (err) {
+ kfree(registers);
+ goto out;
+ }
+ }
+ *data = (u8 *)registers;
+out:
+ mv88e6xxx_reg_unlock(chip);
+
+ return err;
+}
+
+static struct mv88e6xxx_region_priv mv88e6xxx_region_global1_priv = {
+ .id = MV88E6XXX_REGION_GLOBAL1,
+};
+
+static struct devlink_region_ops mv88e6xxx_region_global1_ops = {
+ .name = "global1",
+ .snapshot = mv88e6xxx_region_global_snapshot,
+ .destructor = kfree,
+ .priv = &mv88e6xxx_region_global1_priv,
+};
+
+static struct mv88e6xxx_region_priv mv88e6xxx_region_global2_priv = {
+ .id = MV88E6XXX_REGION_GLOBAL2,
+};
+
+static struct devlink_region_ops mv88e6xxx_region_global2_ops = {
+ .name = "global2",
+ .snapshot = mv88e6xxx_region_global_snapshot,
+ .destructor = kfree,
+ .priv = &mv88e6xxx_region_global2_priv,
+};
+
+static struct devlink_region_ops mv88e6xxx_region_atu_ops = {
+ .name = "atu",
+ .snapshot = mv88e6xxx_region_atu_snapshot,
+ .destructor = kfree,
+};
+
+static const struct devlink_port_region_ops mv88e6xxx_region_port_ops = {
+ .name = "port",
+ .snapshot = mv88e6xxx_region_port_snapshot,
+ .destructor = kfree,
+};
+
+struct mv88e6xxx_region {
+ struct devlink_region_ops *ops;
+ u64 size;
+};
+
+static struct mv88e6xxx_region mv88e6xxx_regions[] = {
+ [MV88E6XXX_REGION_GLOBAL1] = {
+ .ops = &mv88e6xxx_region_global1_ops,
+ .size = 32 * sizeof(u16)
+ },
+ [MV88E6XXX_REGION_GLOBAL2] = {
+ .ops = &mv88e6xxx_region_global2_ops,
+ .size = 32 * sizeof(u16) },
+ [MV88E6XXX_REGION_ATU] = {
+ .ops = &mv88e6xxx_region_atu_ops
+ /* calculated at runtime */
+ },
+};
+
+static void
+mv88e6xxx_teardown_devlink_regions_global(struct mv88e6xxx_chip *chip)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mv88e6xxx_regions); i++)
+ dsa_devlink_region_destroy(chip->regions[i]);
+}
+
+static void
+mv88e6xxx_teardown_devlink_regions_port(struct mv88e6xxx_chip *chip,
+ int port)
+{
+ dsa_devlink_region_destroy(chip->ports[port].region);
+}
+
+static int mv88e6xxx_setup_devlink_regions_port(struct dsa_switch *ds,
+ struct mv88e6xxx_chip *chip,
+ int port)
+{
+ struct devlink_region *region;
+
+ region = dsa_devlink_port_region_create(ds,
+ port,
+ &mv88e6xxx_region_port_ops, 1,
+ 32 * sizeof(u16));
+ if (IS_ERR(region))
+ return PTR_ERR(region);
+
+ chip->ports[port].region = region;
+
+ return 0;
+}
+
+static void
+mv88e6xxx_teardown_devlink_regions_ports(struct mv88e6xxx_chip *chip)
+{
+ int port;
+
+ for (port = 0; port < mv88e6xxx_num_ports(chip); port++)
+ mv88e6xxx_teardown_devlink_regions_port(chip, port);
+}
+
+static int mv88e6xxx_setup_devlink_regions_ports(struct dsa_switch *ds,
+ struct mv88e6xxx_chip *chip)
+{
+ int port;
+ int err;
+
+ for (port = 0; port < mv88e6xxx_num_ports(chip); port++) {
+ err = mv88e6xxx_setup_devlink_regions_port(ds, chip, port);
+ if (err)
+ goto out;
+ }
+
+ return 0;
+
+out:
+ while (port-- > 0)
+ mv88e6xxx_teardown_devlink_regions_port(chip, port);
+
+ return err;
+}
+
+static int mv88e6xxx_setup_devlink_regions_global(struct dsa_switch *ds,
+ struct mv88e6xxx_chip *chip)
+{
+ struct devlink_region_ops *ops;
+ struct devlink_region *region;
+ u64 size;
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(mv88e6xxx_regions); i++) {
+ ops = mv88e6xxx_regions[i].ops;
+ size = mv88e6xxx_regions[i].size;
+
+ if (i == MV88E6XXX_REGION_ATU)
+ size = mv88e6xxx_num_databases(chip) *
+ sizeof(struct mv88e6xxx_devlink_atu_entry);
+
+ region = dsa_devlink_region_create(ds, ops, 1, size);
+ if (IS_ERR(region))
+ goto out;
+ chip->regions[i] = region;
+ }
+ return 0;
+
+out:
+ for (j = 0; j < i; j++)
+ dsa_devlink_region_destroy(chip->regions[j]);
+
+ return PTR_ERR(region);
+}
+
+int mv88e6xxx_setup_devlink_regions(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ err = mv88e6xxx_setup_devlink_regions_global(ds, chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_setup_devlink_regions_ports(ds, chip);
+ if (err)
+ mv88e6xxx_teardown_devlink_regions_global(chip);
+
+ return err;
+}
+
+void mv88e6xxx_teardown_devlink_regions(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+
+ mv88e6xxx_teardown_devlink_regions_ports(chip);
+ mv88e6xxx_teardown_devlink_regions_global(chip);
+}
+
+int mv88e6xxx_devlink_info_get(struct dsa_switch *ds,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ int err;
+
+ err = devlink_info_driver_name_put(req, "mv88e6xxx");
+ if (err)
+ return err;
+
+ return devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_ID,
+ chip->info->name);
+}
diff --git a/drivers/net/dsa/mv88e6xxx/devlink.h b/drivers/net/dsa/mv88e6xxx/devlink.h
new file mode 100644
index 000000000000..3d72db3dcf95
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/devlink.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+/* Marvell 88E6xxx Switch devlink support. */
+
+#ifndef _MV88E6XXX_DEVLINK_H
+#define _MV88E6XXX_DEVLINK_H
+
+int mv88e6xxx_setup_devlink_params(struct dsa_switch *ds);
+void mv88e6xxx_teardown_devlink_params(struct dsa_switch *ds);
+int mv88e6xxx_setup_devlink_resources(struct dsa_switch *ds);
+int mv88e6xxx_devlink_param_get(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+int mv88e6xxx_devlink_param_set(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+int mv88e6xxx_setup_devlink_regions(struct dsa_switch *ds);
+void mv88e6xxx_teardown_devlink_regions(struct dsa_switch *ds);
+
+int mv88e6xxx_devlink_info_get(struct dsa_switch *ds,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack);
+#endif /* _MV88E6XXX_DEVLINK_H */
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
index a4c488b12e8f..094d17a1d037 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
@@ -211,49 +211,20 @@ int mv88e6xxx_port_hwtstamp_get(struct dsa_switch *ds, int port,
-EFAULT : 0;
}
-/* Get the start of the PTP header in this skb */
-static u8 *parse_ptp_header(struct sk_buff *skb, unsigned int type)
-{
- u8 *data = skb_mac_header(skb);
- unsigned int offset = 0;
-
- if (type & PTP_CLASS_VLAN)
- offset += VLAN_HLEN;
-
- switch (type & PTP_CLASS_PMASK) {
- case PTP_CLASS_IPV4:
- offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
- break;
- case PTP_CLASS_IPV6:
- offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
- break;
- case PTP_CLASS_L2:
- offset += ETH_HLEN;
- break;
- default:
- return NULL;
- }
-
- /* Ensure that the entire header is present in this packet. */
- if (skb->len + ETH_HLEN < offset + 34)
- return NULL;
-
- return data + offset;
-}
-
/* Returns a pointer to the PTP header if the caller should time stamp,
* or NULL if the caller should not.
*/
-static u8 *mv88e6xxx_should_tstamp(struct mv88e6xxx_chip *chip, int port,
- struct sk_buff *skb, unsigned int type)
+static struct ptp_header *mv88e6xxx_should_tstamp(struct mv88e6xxx_chip *chip,
+ int port, struct sk_buff *skb,
+ unsigned int type)
{
struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
- u8 *hdr;
+ struct ptp_header *hdr;
if (!chip->info->ptp_support)
return NULL;
- hdr = parse_ptp_header(skb, type);
+ hdr = ptp_parse_header(skb, type);
if (!hdr)
return NULL;
@@ -275,12 +246,11 @@ static int mv88e6xxx_ts_valid(u16 status)
static int seq_match(struct sk_buff *skb, u16 ts_seqid)
{
unsigned int type = SKB_PTP_TYPE(skb);
- u8 *hdr = parse_ptp_header(skb, type);
- __be16 *seqid;
+ struct ptp_header *hdr;
- seqid = (__be16 *)(hdr + OFF_PTP_SEQUENCE_ID);
+ hdr = ptp_parse_header(skb, type);
- return ts_seqid == ntohs(*seqid);
+ return ts_seqid == ntohs(hdr->sequence_id);
}
static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip,
@@ -357,9 +327,9 @@ static void mv88e6xxx_rxtstamp_work(struct mv88e6xxx_chip *chip,
&ps->rx_queue2);
}
-static int is_pdelay_resp(u8 *msgtype)
+static int is_pdelay_resp(const struct ptp_header *hdr)
{
- return (*msgtype & 0xf) == 3;
+ return (hdr->tsmt & 0xf) == 3;
}
bool mv88e6xxx_port_rxtstamp(struct dsa_switch *ds, int port,
@@ -367,7 +337,7 @@ bool mv88e6xxx_port_rxtstamp(struct dsa_switch *ds, int port,
{
struct mv88e6xxx_port_hwtstamp *ps;
struct mv88e6xxx_chip *chip;
- u8 *hdr;
+ struct ptp_header *hdr;
chip = ds->priv;
ps = &chip->port_hwtstamp[port];
@@ -503,8 +473,7 @@ bool mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port,
{
struct mv88e6xxx_chip *chip = ds->priv;
struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
- __be16 *seq_ptr;
- u8 *hdr;
+ struct ptp_header *hdr;
if (!(skb_shinfo(clone)->tx_flags & SKBTX_HW_TSTAMP))
return false;
@@ -513,15 +482,13 @@ bool mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port,
if (!hdr)
return false;
- seq_ptr = (__be16 *)(hdr + OFF_PTP_SEQUENCE_ID);
-
if (test_and_set_bit_lock(MV88E6XXX_HWTSTAMP_TX_IN_PROGRESS,
&ps->state))
return false;
ps->tx_skb = clone;
ps->tx_tstamp_start = jiffies;
- ps->tx_seq_id = be16_to_cpup(seq_ptr);
+ ps->tx_seq_id = be16_to_cpu(hdr->sequence_id);
ptp_schedule_worker(chip->ptp_clock, 0);
return true;
diff --git a/drivers/net/dsa/ocelot/Kconfig b/drivers/net/dsa/ocelot/Kconfig
index 2d23ccef7d0e..c110e82a7973 100644
--- a/drivers/net/dsa/ocelot/Kconfig
+++ b/drivers/net/dsa/ocelot/Kconfig
@@ -8,12 +8,19 @@ config NET_DSA_MSCC_FELIX
select MSCC_OCELOT_SWITCH_LIB
select NET_DSA_TAG_OCELOT
select FSL_ENETC_MDIO
+ select PCS_LYNX
help
- This driver supports network switches from the Vitesse /
- Microsemi / Microchip Ocelot family of switching cores that are
- connected to their host CPU via Ethernet.
- The following switches are supported:
- - VSC9959 (Felix): embedded as a PCIe function of the NXP LS1028A
- ENETC integrated endpoint.
- - VSC9953 (Seville): embedded as a platform device on the
- NXP T1040 SoC.
+ This driver supports the VSC9959 (Felix) switch, which is embedded as
+ a PCIe function of the NXP LS1028A ENETC RCiEP.
+
+config NET_DSA_MSCC_SEVILLE
+ tristate "Ocelot / Seville Ethernet switch support"
+ depends on NET_DSA
+ depends on NET_VENDOR_MICROSEMI
+ depends on HAS_IOMEM
+ select MSCC_OCELOT_SWITCH_LIB
+ select NET_DSA_TAG_OCELOT
+ select PCS_LYNX
+ help
+ This driver supports the VSC9953 (Seville) switch, which is embedded
+ as a platform device on the NXP T1040 SoC.
diff --git a/drivers/net/dsa/ocelot/Makefile b/drivers/net/dsa/ocelot/Makefile
index ec57a5a12330..f6dd131e7491 100644
--- a/drivers/net/dsa/ocelot/Makefile
+++ b/drivers/net/dsa/ocelot/Makefile
@@ -1,7 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_NET_DSA_MSCC_FELIX) += mscc_felix.o
+obj-$(CONFIG_NET_DSA_MSCC_SEVILLE) += mscc_seville.o
mscc_felix-objs := \
felix.o \
- felix_vsc9959.o \
+ felix_vsc9959.o
+
+mscc_seville-objs := \
+ felix.o \
seville_vsc9953.o
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 01427cd08448..f791860d495f 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -19,6 +19,7 @@
#include <linux/of_net.h>
#include <linux/pci.h>
#include <linux/of.h>
+#include <linux/pcs-lynx.h>
#include <net/pkt_sched.h>
#include <net/dsa.h>
#include "felix.h"
@@ -118,13 +119,12 @@ static int felix_vlan_prepare(struct dsa_switch *ds, int port,
return 0;
}
-static int felix_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
+static int felix_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
+ struct switchdev_trans *trans)
{
struct ocelot *ocelot = ds->priv;
- ocelot_port_vlan_filtering(ocelot, port, enabled);
-
- return 0;
+ return ocelot_port_vlan_filtering(ocelot, port, enabled, trans);
}
static void felix_vlan_add(struct dsa_switch *ds, int port,
@@ -196,27 +196,16 @@ static void felix_phylink_validate(struct dsa_switch *ds, int port,
felix->info->phylink_validate(ocelot, port, supported, state);
}
-static int felix_phylink_mac_pcs_get_state(struct dsa_switch *ds, int port,
- struct phylink_link_state *state)
-{
- struct ocelot *ocelot = ds->priv;
- struct felix *felix = ocelot_to_felix(ocelot);
-
- if (felix->info->pcs_link_state)
- felix->info->pcs_link_state(ocelot, port, state);
-
- return 0;
-}
-
static void felix_phylink_mac_config(struct dsa_switch *ds, int port,
unsigned int link_an_mode,
const struct phylink_link_state *state)
{
struct ocelot *ocelot = ds->priv;
struct felix *felix = ocelot_to_felix(ocelot);
+ struct dsa_port *dp = dsa_to_port(ds, port);
- if (felix->info->pcs_config)
- felix->info->pcs_config(ocelot, port, link_an_mode, state);
+ if (felix->pcs[port])
+ phylink_set_pcs(dp->pl, &felix->pcs[port]->pcs);
}
static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
@@ -306,10 +295,6 @@ static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
ocelot_fields_write(ocelot, port,
QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
- if (felix->info->pcs_link_up)
- felix->info->pcs_link_up(ocelot, port, link_an_mode, interface,
- speed, duplex);
-
if (felix->info->port_sched_speed_set)
felix->info->port_sched_speed_set(ocelot, port, speed);
}
@@ -449,10 +434,10 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
ocelot->num_stats = felix->info->num_stats;
ocelot->shared_queue_sz = felix->info->shared_queue_sz;
ocelot->num_mact_rows = felix->info->num_mact_rows;
- ocelot->vcap_is2_keys = felix->info->vcap_is2_keys;
- ocelot->vcap_is2_actions= felix->info->vcap_is2_actions;
ocelot->vcap = felix->info->vcap;
ocelot->ops = felix->info->ops;
+ ocelot->inj_prefix = OCELOT_TAG_PREFIX_SHORT;
+ ocelot->xtr_prefix = OCELOT_TAG_PREFIX_SHORT;
port_phy_modes = kcalloc(num_phys_ports, sizeof(phy_interface_t),
GFP_KERNEL);
@@ -523,7 +508,7 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
return PTR_ERR(target);
}
- template = devm_kzalloc(ocelot->dev, OCELOT_TAG_LEN,
+ template = devm_kzalloc(ocelot->dev, OCELOT_TOTAL_TAG_LEN,
GFP_KERNEL);
if (!template) {
dev_err(ocelot->dev,
@@ -552,22 +537,27 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
return 0;
}
-static struct ptp_clock_info ocelot_ptp_clock_info = {
- .owner = THIS_MODULE,
- .name = "felix ptp",
- .max_adj = 0x7fffffff,
- .n_alarm = 0,
- .n_ext_ts = 0,
- .n_per_out = OCELOT_PTP_PINS_NUM,
- .n_pins = OCELOT_PTP_PINS_NUM,
- .pps = 0,
- .gettime64 = ocelot_ptp_gettime64,
- .settime64 = ocelot_ptp_settime64,
- .adjtime = ocelot_ptp_adjtime,
- .adjfine = ocelot_ptp_adjfine,
- .verify = ocelot_ptp_verify,
- .enable = ocelot_ptp_enable,
-};
+/* The CPU port module is connected to the Node Processor Interface (NPI). This
+ * is the mode through which frames can be injected from and extracted to an
+ * external CPU, over Ethernet.
+ */
+static void felix_npi_port_init(struct ocelot *ocelot, int port)
+{
+ ocelot->npi = port;
+
+ ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M |
+ QSYS_EXT_CPU_CFG_EXT_CPU_PORT(port),
+ QSYS_EXT_CPU_CFG);
+
+ /* NPI port Injection/Extraction configuration */
+ ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR,
+ ocelot->xtr_prefix);
+ ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR,
+ ocelot->inj_prefix);
+
+ /* Disable transmission of pause frames */
+ ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
+}
/* Hardware initialization done here so that we can allocate structures with
* devm without fear of dsa_register_switch returning -EPROBE_DEFER and causing
@@ -590,7 +580,7 @@ static int felix_setup(struct dsa_switch *ds)
return err;
if (ocelot->ptp) {
- err = ocelot_init_timestamp(ocelot, &ocelot_ptp_clock_info);
+ err = ocelot_init_timestamp(ocelot, felix->info->ptp_caps);
if (err) {
dev_err(ocelot->dev,
"Timestamp initialization failed\n");
@@ -601,11 +591,8 @@ static int felix_setup(struct dsa_switch *ds)
for (port = 0; port < ds->num_ports; port++) {
ocelot_init_port(ocelot, port);
- /* Bring up the CPU port module and configure the NPI port */
if (dsa_is_cpu_port(ds, port))
- ocelot_configure_cpu(ocelot, port,
- OCELOT_TAG_PREFIX_NONE,
- OCELOT_TAG_PREFIX_LONG);
+ felix_npi_port_init(ocelot, port);
/* Set the default QoS Classification based on PCP and DEI
* bits of vlan tag.
@@ -630,11 +617,6 @@ static int felix_setup(struct dsa_switch *ds)
ds->mtu_enforcement_ingress = true;
ds->configure_vlan_while_not_filtering = true;
- /* It looks like the MAC/PCS interrupt register - PM0_IEVENT (0x8040)
- * isn't instantiated for the Felix PF.
- * In-band AN may take a few ms to complete, so we need to poll.
- */
- ds->pcs_poll = true;
return 0;
}
@@ -705,8 +687,11 @@ static bool felix_txtstamp(struct dsa_switch *ds, int port,
struct ocelot *ocelot = ds->priv;
struct ocelot_port *ocelot_port = ocelot->ports[port];
- if (!ocelot_port_add_txtstamp_skb(ocelot_port, clone))
+ if (ocelot->ptp && (skb_shinfo(clone)->tx_flags & SKBTX_HW_TSTAMP) &&
+ ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
+ ocelot_port_add_txtstamp_skb(ocelot, port, clone);
return true;
+ }
return false;
}
@@ -793,7 +778,6 @@ const struct dsa_switch_ops felix_switch_ops = {
.get_sset_count = felix_get_sset_count,
.get_ts_info = felix_get_ts_info,
.phylink_validate = felix_phylink_validate,
- .phylink_mac_link_state = felix_phylink_mac_pcs_get_state,
.phylink_mac_config = felix_phylink_mac_config,
.phylink_mac_link_down = felix_phylink_mac_link_down,
.phylink_mac_link_up = felix_phylink_mac_link_up,
@@ -823,31 +807,27 @@ const struct dsa_switch_ops felix_switch_ops = {
.cls_flower_add = felix_cls_flower_add,
.cls_flower_del = felix_cls_flower_del,
.cls_flower_stats = felix_cls_flower_stats,
- .port_setup_tc = felix_port_setup_tc,
+ .port_setup_tc = felix_port_setup_tc,
};
-static int __init felix_init(void)
+struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port)
{
- int err;
-
- err = pci_register_driver(&felix_vsc9959_pci_driver);
- if (err)
- return err;
+ struct felix *felix = ocelot_to_felix(ocelot);
+ struct dsa_switch *ds = felix->ds;
- err = platform_driver_register(&seville_vsc9953_driver);
- if (err)
- return err;
+ if (!dsa_is_user_port(ds, port))
+ return NULL;
- return 0;
+ return dsa_to_port(ds, port)->slave;
}
-module_init(felix_init);
-static void __exit felix_exit(void)
+int felix_netdev_to_port(struct net_device *dev)
{
- pci_unregister_driver(&felix_vsc9959_pci_driver);
- platform_driver_unregister(&seville_vsc9953_driver);
-}
-module_exit(felix_exit);
+ struct dsa_port *dp;
+
+ dp = dsa_port_from_netdev(dev);
+ if (IS_ERR(dp))
+ return -EINVAL;
-MODULE_DESCRIPTION("Felix Switch driver");
-MODULE_LICENSE("GPL v2");
+ return dp->index;
+}
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index 98f14621ac23..4c717324ac2f 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -20,23 +20,13 @@ struct felix_info {
const struct ocelot_stat_layout *stats_layout;
unsigned int num_stats;
int num_ports;
- int num_tx_queues;
- struct vcap_field *vcap_is2_keys;
- struct vcap_field *vcap_is2_actions;
- const struct vcap_props *vcap;
+ int num_tx_queues;
+ struct vcap_props *vcap;
int switch_pci_bar;
int imdio_pci_bar;
+ const struct ptp_clock_info *ptp_caps;
int (*mdio_bus_alloc)(struct ocelot *ocelot);
void (*mdio_bus_free)(struct ocelot *ocelot);
- void (*pcs_config)(struct ocelot *ocelot, int port,
- unsigned int link_an_mode,
- const struct phylink_link_state *state);
- void (*pcs_link_up)(struct ocelot *ocelot, int port,
- unsigned int link_an_mode,
- phy_interface_t interface,
- int speed, int duplex);
- void (*pcs_link_state)(struct ocelot *ocelot, int port,
- struct phylink_link_state *state);
void (*phylink_validate)(struct ocelot *ocelot, int port,
unsigned long *supported,
struct phylink_link_state *state);
@@ -50,8 +40,6 @@ struct felix_info {
};
extern const struct dsa_switch_ops felix_switch_ops;
-extern struct pci_driver felix_vsc9959_pci_driver;
-extern struct platform_driver seville_vsc9953_driver;
/* DSA glue / front-end for struct ocelot */
struct felix {
@@ -59,20 +47,12 @@ struct felix {
const struct felix_info *info;
struct ocelot ocelot;
struct mii_bus *imdio;
- struct phy_device **pcs;
+ struct lynx_pcs **pcs;
resource_size_t switch_base;
resource_size_t imdio_base;
};
-void vsc9959_pcs_link_state(struct ocelot *ocelot, int port,
- struct phylink_link_state *state);
-void vsc9959_pcs_config(struct ocelot *ocelot, int port,
- unsigned int link_an_mode,
- const struct phylink_link_state *state);
-void vsc9959_pcs_link_up(struct ocelot *ocelot, int port,
- unsigned int link_an_mode,
- phy_interface_t interface,
- int speed, int duplex);
-void vsc9959_mdio_bus_free(struct ocelot *ocelot);
+struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port);
+int felix_netdev_to_port(struct net_device *dev);
#endif
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 3a9637496407..3e925b8d5306 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -9,15 +9,13 @@
#include <soc/mscc/ocelot_sys.h>
#include <soc/mscc/ocelot.h>
#include <linux/packing.h>
+#include <linux/pcs-lynx.h>
#include <net/pkt_sched.h>
#include <linux/iopoll.h>
#include <linux/mdio.h>
#include <linux/pci.h>
#include "felix.h"
-#define VSC9959_VCAP_IS2_CNT 1024
-#define VSC9959_VCAP_IS2_ENTRY_WIDTH 376
-#define VSC9959_VCAP_PORT_CNT 6
#define VSC9959_TAS_GCL_ENTRY_MAX 63
static const u32 vsc9959_ana_regmap[] = {
@@ -137,14 +135,27 @@ static const u32 vsc9959_qs_regmap[] = {
REG_RESERVED(QS_INH_DBG),
};
-static const u32 vsc9959_s2_regmap[] = {
- REG(S2_CORE_UPDATE_CTRL, 0x000000),
- REG(S2_CORE_MV_CFG, 0x000004),
- REG(S2_CACHE_ENTRY_DAT, 0x000008),
- REG(S2_CACHE_MASK_DAT, 0x000108),
- REG(S2_CACHE_ACTION_DAT, 0x000208),
- REG(S2_CACHE_CNT_DAT, 0x000308),
- REG(S2_CACHE_TG_DAT, 0x000388),
+static const u32 vsc9959_vcap_regmap[] = {
+ /* VCAP_CORE_CFG */
+ REG(VCAP_CORE_UPDATE_CTRL, 0x000000),
+ REG(VCAP_CORE_MV_CFG, 0x000004),
+ /* VCAP_CORE_CACHE */
+ REG(VCAP_CACHE_ENTRY_DAT, 0x000008),
+ REG(VCAP_CACHE_MASK_DAT, 0x000108),
+ REG(VCAP_CACHE_ACTION_DAT, 0x000208),
+ REG(VCAP_CACHE_CNT_DAT, 0x000308),
+ REG(VCAP_CACHE_TG_DAT, 0x000388),
+ /* VCAP_CONST */
+ REG(VCAP_CONST_VCAP_VER, 0x000398),
+ REG(VCAP_CONST_ENTRY_WIDTH, 0x00039c),
+ REG(VCAP_CONST_ENTRY_CNT, 0x0003a0),
+ REG(VCAP_CONST_ENTRY_SWCNT, 0x0003a4),
+ REG(VCAP_CONST_ENTRY_TG_WIDTH, 0x0003a8),
+ REG(VCAP_CONST_ACTION_DEF_CNT, 0x0003ac),
+ REG(VCAP_CONST_ACTION_WIDTH, 0x0003b0),
+ REG(VCAP_CONST_CNT_WIDTH, 0x0003b4),
+ REG(VCAP_CONST_CORE_CNT, 0x0003b8),
+ REG(VCAP_CONST_IF_CNT, 0x0003bc),
};
static const u32 vsc9959_qsys_regmap[] = {
@@ -295,15 +306,15 @@ static const u32 vsc9959_sys_regmap[] = {
};
static const u32 vsc9959_ptp_regmap[] = {
- REG(PTP_PIN_CFG, 0x000000),
- REG(PTP_PIN_TOD_SEC_MSB, 0x000004),
- REG(PTP_PIN_TOD_SEC_LSB, 0x000008),
- REG(PTP_PIN_TOD_NSEC, 0x00000c),
- REG(PTP_PIN_WF_HIGH_PERIOD, 0x000014),
- REG(PTP_PIN_WF_LOW_PERIOD, 0x000018),
- REG(PTP_CFG_MISC, 0x0000a0),
- REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4),
- REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8),
+ REG(PTP_PIN_CFG, 0x000000),
+ REG(PTP_PIN_TOD_SEC_MSB, 0x000004),
+ REG(PTP_PIN_TOD_SEC_LSB, 0x000008),
+ REG(PTP_PIN_TOD_NSEC, 0x00000c),
+ REG(PTP_PIN_WF_HIGH_PERIOD, 0x000014),
+ REG(PTP_PIN_WF_LOW_PERIOD, 0x000018),
+ REG(PTP_CFG_MISC, 0x0000a0),
+ REG(PTP_CLK_CFG_ADJ_CFG, 0x0000a4),
+ REG(PTP_CLK_CFG_ADJ_FREQ, 0x0000a8),
};
static const u32 vsc9959_gcb_regmap[] = {
@@ -358,7 +369,9 @@ static const u32 *vsc9959_regmap[TARGET_MAX] = {
[QSYS] = vsc9959_qsys_regmap,
[REW] = vsc9959_rew_regmap,
[SYS] = vsc9959_sys_regmap,
- [S2] = vsc9959_s2_regmap,
+ [S0] = vsc9959_vcap_regmap,
+ [S1] = vsc9959_vcap_regmap,
+ [S2] = vsc9959_vcap_regmap,
[PTP] = vsc9959_ptp_regmap,
[GCB] = vsc9959_gcb_regmap,
[DEV_GMII] = vsc9959_dev_gmii_regmap,
@@ -391,6 +404,16 @@ static const struct resource vsc9959_target_io_res[TARGET_MAX] = {
.end = 0x001ffff,
.name = "sys",
},
+ [S0] = {
+ .start = 0x0040000,
+ .end = 0x00403ff,
+ .name = "s0",
+ },
+ [S1] = {
+ .start = 0x0050000,
+ .end = 0x00503ff,
+ .name = "s1",
+ },
[S2] = {
.start = 0x0060000,
.end = 0x00603ff,
@@ -595,6 +618,113 @@ static const struct ocelot_stat_layout vsc9959_stats_layout[] = {
{ .offset = 0x111, .name = "drop_green_prio_7", },
};
+static const struct vcap_field vsc9959_vcap_es0_keys[] = {
+ [VCAP_ES0_EGR_PORT] = { 0, 3},
+ [VCAP_ES0_IGR_PORT] = { 3, 3},
+ [VCAP_ES0_RSV] = { 6, 2},
+ [VCAP_ES0_L2_MC] = { 8, 1},
+ [VCAP_ES0_L2_BC] = { 9, 1},
+ [VCAP_ES0_VID] = { 10, 12},
+ [VCAP_ES0_DP] = { 22, 1},
+ [VCAP_ES0_PCP] = { 23, 3},
+};
+
+static const struct vcap_field vsc9959_vcap_es0_actions[] = {
+ [VCAP_ES0_ACT_PUSH_OUTER_TAG] = { 0, 2},
+ [VCAP_ES0_ACT_PUSH_INNER_TAG] = { 2, 1},
+ [VCAP_ES0_ACT_TAG_A_TPID_SEL] = { 3, 2},
+ [VCAP_ES0_ACT_TAG_A_VID_SEL] = { 5, 1},
+ [VCAP_ES0_ACT_TAG_A_PCP_SEL] = { 6, 2},
+ [VCAP_ES0_ACT_TAG_A_DEI_SEL] = { 8, 2},
+ [VCAP_ES0_ACT_TAG_B_TPID_SEL] = { 10, 2},
+ [VCAP_ES0_ACT_TAG_B_VID_SEL] = { 12, 1},
+ [VCAP_ES0_ACT_TAG_B_PCP_SEL] = { 13, 2},
+ [VCAP_ES0_ACT_TAG_B_DEI_SEL] = { 15, 2},
+ [VCAP_ES0_ACT_VID_A_VAL] = { 17, 12},
+ [VCAP_ES0_ACT_PCP_A_VAL] = { 29, 3},
+ [VCAP_ES0_ACT_DEI_A_VAL] = { 32, 1},
+ [VCAP_ES0_ACT_VID_B_VAL] = { 33, 12},
+ [VCAP_ES0_ACT_PCP_B_VAL] = { 45, 3},
+ [VCAP_ES0_ACT_DEI_B_VAL] = { 48, 1},
+ [VCAP_ES0_ACT_RSV] = { 49, 23},
+ [VCAP_ES0_ACT_HIT_STICKY] = { 72, 1},
+};
+
+static const struct vcap_field vsc9959_vcap_is1_keys[] = {
+ [VCAP_IS1_HK_TYPE] = { 0, 1},
+ [VCAP_IS1_HK_LOOKUP] = { 1, 2},
+ [VCAP_IS1_HK_IGR_PORT_MASK] = { 3, 7},
+ [VCAP_IS1_HK_RSV] = { 10, 9},
+ [VCAP_IS1_HK_OAM_Y1731] = { 19, 1},
+ [VCAP_IS1_HK_L2_MC] = { 20, 1},
+ [VCAP_IS1_HK_L2_BC] = { 21, 1},
+ [VCAP_IS1_HK_IP_MC] = { 22, 1},
+ [VCAP_IS1_HK_VLAN_TAGGED] = { 23, 1},
+ [VCAP_IS1_HK_VLAN_DBL_TAGGED] = { 24, 1},
+ [VCAP_IS1_HK_TPID] = { 25, 1},
+ [VCAP_IS1_HK_VID] = { 26, 12},
+ [VCAP_IS1_HK_DEI] = { 38, 1},
+ [VCAP_IS1_HK_PCP] = { 39, 3},
+ /* Specific Fields for IS1 Half Key S1_NORMAL */
+ [VCAP_IS1_HK_L2_SMAC] = { 42, 48},
+ [VCAP_IS1_HK_ETYPE_LEN] = { 90, 1},
+ [VCAP_IS1_HK_ETYPE] = { 91, 16},
+ [VCAP_IS1_HK_IP_SNAP] = {107, 1},
+ [VCAP_IS1_HK_IP4] = {108, 1},
+ /* Layer-3 Information */
+ [VCAP_IS1_HK_L3_FRAGMENT] = {109, 1},
+ [VCAP_IS1_HK_L3_FRAG_OFS_GT0] = {110, 1},
+ [VCAP_IS1_HK_L3_OPTIONS] = {111, 1},
+ [VCAP_IS1_HK_L3_DSCP] = {112, 6},
+ [VCAP_IS1_HK_L3_IP4_SIP] = {118, 32},
+ /* Layer-4 Information */
+ [VCAP_IS1_HK_TCP_UDP] = {150, 1},
+ [VCAP_IS1_HK_TCP] = {151, 1},
+ [VCAP_IS1_HK_L4_SPORT] = {152, 16},
+ [VCAP_IS1_HK_L4_RNG] = {168, 8},
+ /* Specific Fields for IS1 Half Key S1_5TUPLE_IP4 */
+ [VCAP_IS1_HK_IP4_INNER_TPID] = { 42, 1},
+ [VCAP_IS1_HK_IP4_INNER_VID] = { 43, 12},
+ [VCAP_IS1_HK_IP4_INNER_DEI] = { 55, 1},
+ [VCAP_IS1_HK_IP4_INNER_PCP] = { 56, 3},
+ [VCAP_IS1_HK_IP4_IP4] = { 59, 1},
+ [VCAP_IS1_HK_IP4_L3_FRAGMENT] = { 60, 1},
+ [VCAP_IS1_HK_IP4_L3_FRAG_OFS_GT0] = { 61, 1},
+ [VCAP_IS1_HK_IP4_L3_OPTIONS] = { 62, 1},
+ [VCAP_IS1_HK_IP4_L3_DSCP] = { 63, 6},
+ [VCAP_IS1_HK_IP4_L3_IP4_DIP] = { 69, 32},
+ [VCAP_IS1_HK_IP4_L3_IP4_SIP] = {101, 32},
+ [VCAP_IS1_HK_IP4_L3_PROTO] = {133, 8},
+ [VCAP_IS1_HK_IP4_TCP_UDP] = {141, 1},
+ [VCAP_IS1_HK_IP4_TCP] = {142, 1},
+ [VCAP_IS1_HK_IP4_L4_RNG] = {143, 8},
+ [VCAP_IS1_HK_IP4_IP_PAYLOAD_S1_5TUPLE] = {151, 32},
+};
+
+static const struct vcap_field vsc9959_vcap_is1_actions[] = {
+ [VCAP_IS1_ACT_DSCP_ENA] = { 0, 1},
+ [VCAP_IS1_ACT_DSCP_VAL] = { 1, 6},
+ [VCAP_IS1_ACT_QOS_ENA] = { 7, 1},
+ [VCAP_IS1_ACT_QOS_VAL] = { 8, 3},
+ [VCAP_IS1_ACT_DP_ENA] = { 11, 1},
+ [VCAP_IS1_ACT_DP_VAL] = { 12, 1},
+ [VCAP_IS1_ACT_PAG_OVERRIDE_MASK] = { 13, 8},
+ [VCAP_IS1_ACT_PAG_VAL] = { 21, 8},
+ [VCAP_IS1_ACT_RSV] = { 29, 9},
+ /* The fields below are incorrectly shifted by 2 in the manual */
+ [VCAP_IS1_ACT_VID_REPLACE_ENA] = { 38, 1},
+ [VCAP_IS1_ACT_VID_ADD_VAL] = { 39, 12},
+ [VCAP_IS1_ACT_FID_SEL] = { 51, 2},
+ [VCAP_IS1_ACT_FID_VAL] = { 53, 13},
+ [VCAP_IS1_ACT_PCP_DEI_ENA] = { 66, 1},
+ [VCAP_IS1_ACT_PCP_VAL] = { 67, 3},
+ [VCAP_IS1_ACT_DEI_VAL] = { 70, 1},
+ [VCAP_IS1_ACT_VLAN_POP_CNT_ENA] = { 71, 1},
+ [VCAP_IS1_ACT_VLAN_POP_CNT] = { 72, 2},
+ [VCAP_IS1_ACT_CUSTOM_ACE_TYPE_ENA] = { 74, 4},
+ [VCAP_IS1_ACT_HIT_STICKY] = { 78, 1},
+};
+
static struct vcap_field vsc9959_vcap_is2_keys[] = {
/* Common: 41 bits */
[VCAP_IS2_TYPE] = { 0, 4},
@@ -693,15 +823,32 @@ static struct vcap_field vsc9959_vcap_is2_actions[] = {
[VCAP_IS2_ACT_HIT_CNT] = { 44, 32},
};
-static const struct vcap_props vsc9959_vcap_props[] = {
+static struct vcap_props vsc9959_vcap_props[] = {
+ [VCAP_ES0] = {
+ .action_type_width = 0,
+ .action_table = {
+ [ES0_ACTION_TYPE_NORMAL] = {
+ .width = 72, /* HIT_STICKY not included */
+ .count = 1,
+ },
+ },
+ .target = S0,
+ .keys = vsc9959_vcap_es0_keys,
+ .actions = vsc9959_vcap_es0_actions,
+ },
+ [VCAP_IS1] = {
+ .action_type_width = 0,
+ .action_table = {
+ [IS1_ACTION_TYPE_NORMAL] = {
+ .width = 78, /* HIT_STICKY not included */
+ .count = 4,
+ },
+ },
+ .target = S1,
+ .keys = vsc9959_vcap_is1_keys,
+ .actions = vsc9959_vcap_is1_actions,
+ },
[VCAP_IS2] = {
- .tg_width = 2,
- .sw_count = 4,
- .entry_count = VSC9959_VCAP_IS2_CNT,
- .entry_width = VSC9959_VCAP_IS2_ENTRY_WIDTH,
- .action_count = VSC9959_VCAP_IS2_CNT +
- VSC9959_VCAP_PORT_CNT + 2,
- .action_width = 89,
.action_type_width = 1,
.action_table = {
[IS2_ACTION_TYPE_NORMAL] = {
@@ -713,11 +860,29 @@ static const struct vcap_props vsc9959_vcap_props[] = {
.count = 4
},
},
- .counter_words = 4,
- .counter_width = 32,
+ .target = S2,
+ .keys = vsc9959_vcap_is2_keys,
+ .actions = vsc9959_vcap_is2_actions,
},
};
+static const struct ptp_clock_info vsc9959_ptp_caps = {
+ .owner = THIS_MODULE,
+ .name = "felix ptp",
+ .max_adj = 0x7fffffff,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = OCELOT_PTP_PINS_NUM,
+ .n_pins = OCELOT_PTP_PINS_NUM,
+ .pps = 0,
+ .gettime64 = ocelot_ptp_gettime64,
+ .settime64 = ocelot_ptp_settime64,
+ .adjtime = ocelot_ptp_adjtime,
+ .adjfine = ocelot_ptp_adjfine,
+ .verify = ocelot_ptp_verify,
+ .enable = ocelot_ptp_enable,
+};
+
#define VSC9959_INIT_TIMEOUT 50000
#define VSC9959_GCB_RST_SLEEP 100
#define VSC9959_SYS_RAMINIT_SLEEP 80
@@ -726,7 +891,7 @@ static int vsc9959_gcb_soft_rst_status(struct ocelot *ocelot)
{
int val;
- regmap_field_read(ocelot->regfields[GCB_SOFT_RST_SWC_RST], &val);
+ ocelot_field_read(ocelot, GCB_SOFT_RST_SWC_RST, &val);
return val;
}
@@ -736,12 +901,15 @@ static int vsc9959_sys_ram_init_status(struct ocelot *ocelot)
return ocelot_read(ocelot, SYS_RAM_INIT);
}
+/* CORE_ENA is in SYS:SYSTEM:RESET_CFG
+ * RAM_INIT is in SYS:RAM_CTRL:RAM_INIT
+ */
static int vsc9959_reset(struct ocelot *ocelot)
{
int val, err;
/* soft-reset the switch core */
- regmap_field_write(ocelot->regfields[GCB_SOFT_RST_SWC_RST], 1);
+ ocelot_field_write(ocelot, GCB_SOFT_RST_SWC_RST, 1);
err = readx_poll_timeout(vsc9959_gcb_soft_rst_status, ocelot, val, !val,
VSC9959_GCB_RST_SLEEP, VSC9959_INIT_TIMEOUT);
@@ -761,352 +929,11 @@ static int vsc9959_reset(struct ocelot *ocelot)
}
/* enable switch core */
- regmap_field_write(ocelot->regfields[SYS_RESET_CFG_CORE_ENA], 1);
+ ocelot_field_write(ocelot, SYS_RESET_CFG_CORE_ENA, 1);
return 0;
}
-/* We enable SGMII AN only when the PHY has managed = "in-band-status" in the
- * device tree. If we are in MLO_AN_PHY mode, we program directly state->speed
- * into the PCS, which is retrieved out-of-band over MDIO. This also has the
- * benefit of working with SGMII fixed-links, like downstream switches, where
- * both link partners attempt to operate as AN slaves and therefore AN never
- * completes. But it also has the disadvantage that some PHY chips don't pass
- * traffic if SGMII AN is enabled but not completed (acknowledged by us), so
- * setting MLO_AN_INBAND is actually required for those.
- */
-static void vsc9959_pcs_config_sgmii(struct phy_device *pcs,
- unsigned int link_an_mode,
- const struct phylink_link_state *state)
-{
- int bmsr, bmcr;
-
- /* Some PHYs like VSC8234 don't like it when AN restarts on
- * their system side and they restart line side AN too, going
- * into an endless link up/down loop. Don't restart PCS AN if
- * link is up already.
- * We do check that AN is enabled just in case this is the 1st
- * call, PCS detects a carrier but AN is disabled from power on
- * or by boot loader.
- */
- bmcr = phy_read(pcs, MII_BMCR);
- if (bmcr < 0)
- return;
-
- bmsr = phy_read(pcs, MII_BMSR);
- if (bmsr < 0)
- return;
-
- if ((bmcr & BMCR_ANENABLE) && (bmsr & BMSR_LSTATUS))
- return;
-
- /* SGMII spec requires tx_config_Reg[15:0] to be exactly 0x4001
- * for the MAC PCS in order to acknowledge the AN.
- */
- phy_write(pcs, MII_ADVERTISE, ADVERTISE_SGMII |
- ADVERTISE_LPACK);
-
- phy_write(pcs, ENETC_PCS_IF_MODE,
- ENETC_PCS_IF_MODE_SGMII_EN |
- ENETC_PCS_IF_MODE_USE_SGMII_AN);
-
- /* Adjust link timer for SGMII */
- phy_write(pcs, ENETC_PCS_LINK_TIMER1,
- ENETC_PCS_LINK_TIMER1_VAL);
- phy_write(pcs, ENETC_PCS_LINK_TIMER2,
- ENETC_PCS_LINK_TIMER2_VAL);
-
- phy_set_bits(pcs, MII_BMCR, BMCR_ANENABLE);
-}
-
-static void vsc9959_pcs_config_usxgmii(struct phy_device *pcs,
- unsigned int link_an_mode,
- const struct phylink_link_state *state)
-{
- /* Configure device ability for the USXGMII Replicator */
- phy_write_mmd(pcs, MDIO_MMD_VEND2, MII_ADVERTISE,
- MDIO_USXGMII_2500FULL |
- MDIO_USXGMII_LINK |
- ADVERTISE_SGMII |
- ADVERTISE_LPACK);
-}
-
-void vsc9959_pcs_config(struct ocelot *ocelot, int port,
- unsigned int link_an_mode,
- const struct phylink_link_state *state)
-{
- struct felix *felix = ocelot_to_felix(ocelot);
- struct phy_device *pcs = felix->pcs[port];
-
- if (!pcs)
- return;
-
- /* The PCS does not implement the BMSR register fully, so capability
- * detection via genphy_read_abilities does not work. Since we can get
- * the PHY config word from the LPA register though, there is still
- * value in using the generic phy_resolve_aneg_linkmode function. So
- * populate the supported and advertising link modes manually here.
- */
- linkmode_set_bit_array(phy_basic_ports_array,
- ARRAY_SIZE(phy_basic_ports_array),
- pcs->supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, pcs->supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, pcs->supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, pcs->supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, pcs->supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, pcs->supported);
- linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, pcs->supported);
- if (pcs->interface == PHY_INTERFACE_MODE_2500BASEX ||
- pcs->interface == PHY_INTERFACE_MODE_USXGMII)
- linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
- pcs->supported);
- if (pcs->interface != PHY_INTERFACE_MODE_2500BASEX)
- linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
- pcs->supported);
- phy_advertise_supported(pcs);
-
- if (!phylink_autoneg_inband(link_an_mode))
- return;
-
- switch (pcs->interface) {
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- vsc9959_pcs_config_sgmii(pcs, link_an_mode, state);
- break;
- case PHY_INTERFACE_MODE_2500BASEX:
- phydev_err(pcs, "AN not supported on 3.125GHz SerDes lane\n");
- break;
- case PHY_INTERFACE_MODE_USXGMII:
- vsc9959_pcs_config_usxgmii(pcs, link_an_mode, state);
- break;
- default:
- dev_err(ocelot->dev, "Unsupported link mode %s\n",
- phy_modes(pcs->interface));
- }
-}
-
-static void vsc9959_pcs_link_up_sgmii(struct phy_device *pcs,
- unsigned int link_an_mode,
- int speed, int duplex)
-{
- u16 if_mode = ENETC_PCS_IF_MODE_SGMII_EN;
-
- switch (speed) {
- case SPEED_1000:
- if_mode |= ENETC_PCS_IF_MODE_SGMII_SPEED(ENETC_PCS_SPEED_1000);
- break;
- case SPEED_100:
- if_mode |= ENETC_PCS_IF_MODE_SGMII_SPEED(ENETC_PCS_SPEED_100);
- break;
- case SPEED_10:
- if_mode |= ENETC_PCS_IF_MODE_SGMII_SPEED(ENETC_PCS_SPEED_10);
- break;
- default:
- phydev_err(pcs, "Invalid PCS speed %d\n", speed);
- return;
- }
-
- if (duplex == DUPLEX_HALF)
- if_mode |= ENETC_PCS_IF_MODE_DUPLEX_HALF;
-
- phy_write(pcs, ENETC_PCS_IF_MODE, if_mode);
- phy_clear_bits(pcs, MII_BMCR, BMCR_ANENABLE);
-}
-
-/* 2500Base-X is SerDes protocol 7 on Felix and 6 on ENETC. It is a SerDes lane
- * clocked at 3.125 GHz which encodes symbols with 8b/10b and does not have
- * auto-negotiation of any link parameters. Electrically it is compatible with
- * a single lane of XAUI.
- * The hardware reference manual wants to call this mode SGMII, but it isn't
- * really, since the fundamental features of SGMII:
- * - Downgrading the link speed by duplicating symbols
- * - Auto-negotiation
- * are not there.
- * The speed is configured at 1000 in the IF_MODE and BMCR MDIO registers
- * because the clock frequency is actually given by a PLL configured in the
- * Reset Configuration Word (RCW).
- * Since there is no difference between fixed speed SGMII w/o AN and 802.3z w/o
- * AN, we call this PHY interface type 2500Base-X. In case a PHY negotiates a
- * lower link speed on line side, the system-side interface remains fixed at
- * 2500 Mbps and we do rate adaptation through pause frames.
- */
-static void vsc9959_pcs_link_up_2500basex(struct phy_device *pcs,
- unsigned int link_an_mode,
- int speed, int duplex)
-{
- u16 if_mode = ENETC_PCS_IF_MODE_SGMII_SPEED(ENETC_PCS_SPEED_2500) |
- ENETC_PCS_IF_MODE_SGMII_EN;
-
- if (duplex == DUPLEX_HALF)
- if_mode |= ENETC_PCS_IF_MODE_DUPLEX_HALF;
-
- phy_write(pcs, ENETC_PCS_IF_MODE, if_mode);
- phy_clear_bits(pcs, MII_BMCR, BMCR_ANENABLE);
-}
-
-void vsc9959_pcs_link_up(struct ocelot *ocelot, int port,
- unsigned int link_an_mode,
- phy_interface_t interface,
- int speed, int duplex)
-{
- struct felix *felix = ocelot_to_felix(ocelot);
- struct phy_device *pcs = felix->pcs[port];
-
- if (!pcs)
- return;
-
- if (phylink_autoneg_inband(link_an_mode))
- return;
-
- switch (interface) {
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- vsc9959_pcs_link_up_sgmii(pcs, link_an_mode, speed, duplex);
- break;
- case PHY_INTERFACE_MODE_2500BASEX:
- vsc9959_pcs_link_up_2500basex(pcs, link_an_mode, speed,
- duplex);
- break;
- case PHY_INTERFACE_MODE_USXGMII:
- phydev_err(pcs, "USXGMII only supports in-band AN for now\n");
- break;
- default:
- dev_err(ocelot->dev, "Unsupported link mode %s\n",
- phy_modes(pcs->interface));
- }
-}
-
-static void vsc9959_pcs_link_state_resolve(struct phy_device *pcs,
- struct phylink_link_state *state)
-{
- state->an_complete = pcs->autoneg_complete;
- state->an_enabled = pcs->autoneg;
- state->link = pcs->link;
- state->duplex = pcs->duplex;
- state->speed = pcs->speed;
- /* SGMII AN does not negotiate flow control, but that's ok,
- * since phylink already knows that, and does:
- * link_state.pause |= pl->phy_state.pause;
- */
- state->pause = MLO_PAUSE_NONE;
-
- phydev_dbg(pcs,
- "mode=%s/%s/%s adv=%*pb lpa=%*pb link=%u an_enabled=%u an_complete=%u\n",
- phy_modes(pcs->interface),
- phy_speed_to_str(pcs->speed),
- phy_duplex_to_str(pcs->duplex),
- __ETHTOOL_LINK_MODE_MASK_NBITS, pcs->advertising,
- __ETHTOOL_LINK_MODE_MASK_NBITS, pcs->lp_advertising,
- pcs->link, pcs->autoneg, pcs->autoneg_complete);
-}
-
-static void vsc9959_pcs_link_state_sgmii(struct phy_device *pcs,
- struct phylink_link_state *state)
-{
- int err;
-
- err = genphy_update_link(pcs);
- if (err < 0)
- return;
-
- if (pcs->autoneg_complete) {
- u16 lpa = phy_read(pcs, MII_LPA);
-
- mii_lpa_to_linkmode_lpa_sgmii(pcs->lp_advertising, lpa);
-
- phy_resolve_aneg_linkmode(pcs);
- }
-}
-
-static void vsc9959_pcs_link_state_2500basex(struct phy_device *pcs,
- struct phylink_link_state *state)
-{
- int err;
-
- err = genphy_update_link(pcs);
- if (err < 0)
- return;
-
- pcs->speed = SPEED_2500;
- pcs->asym_pause = true;
- pcs->pause = true;
-}
-
-static void vsc9959_pcs_link_state_usxgmii(struct phy_device *pcs,
- struct phylink_link_state *state)
-{
- int status, lpa;
-
- status = phy_read_mmd(pcs, MDIO_MMD_VEND2, MII_BMSR);
- if (status < 0)
- return;
-
- pcs->autoneg = true;
- pcs->autoneg_complete = !!(status & BMSR_ANEGCOMPLETE);
- pcs->link = !!(status & BMSR_LSTATUS);
-
- if (!pcs->link || !pcs->autoneg_complete)
- return;
-
- lpa = phy_read_mmd(pcs, MDIO_MMD_VEND2, MII_LPA);
- if (lpa < 0)
- return;
-
- switch (lpa & MDIO_USXGMII_SPD_MASK) {
- case MDIO_USXGMII_10:
- pcs->speed = SPEED_10;
- break;
- case MDIO_USXGMII_100:
- pcs->speed = SPEED_100;
- break;
- case MDIO_USXGMII_1000:
- pcs->speed = SPEED_1000;
- break;
- case MDIO_USXGMII_2500:
- pcs->speed = SPEED_2500;
- break;
- default:
- break;
- }
-
- if (lpa & MDIO_USXGMII_FULL_DUPLEX)
- pcs->duplex = DUPLEX_FULL;
- else
- pcs->duplex = DUPLEX_HALF;
-}
-
-void vsc9959_pcs_link_state(struct ocelot *ocelot, int port,
- struct phylink_link_state *state)
-{
- struct felix *felix = ocelot_to_felix(ocelot);
- struct phy_device *pcs = felix->pcs[port];
-
- if (!pcs)
- return;
-
- pcs->speed = SPEED_UNKNOWN;
- pcs->duplex = DUPLEX_UNKNOWN;
- pcs->pause = 0;
- pcs->asym_pause = 0;
-
- switch (pcs->interface) {
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_QSGMII:
- vsc9959_pcs_link_state_sgmii(pcs, state);
- break;
- case PHY_INTERFACE_MODE_2500BASEX:
- vsc9959_pcs_link_state_2500basex(pcs, state);
- break;
- case PHY_INTERFACE_MODE_USXGMII:
- vsc9959_pcs_link_state_usxgmii(pcs, state);
- break;
- default:
- return;
- }
-
- vsc9959_pcs_link_state_resolve(pcs, state);
-}
-
static void vsc9959_phylink_validate(struct ocelot *ocelot, int port,
unsigned long *supported,
struct phylink_link_state *state)
@@ -1182,6 +1009,8 @@ static u16 vsc9959_wm_enc(u16 value)
static const struct ocelot_ops vsc9959_ops = {
.reset = vsc9959_reset,
.wm_enc = vsc9959_wm_enc,
+ .port_to_netdev = felix_port_to_netdev,
+ .netdev_to_port = felix_netdev_to_port,
};
static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
@@ -1197,7 +1026,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
int rc;
felix->pcs = devm_kcalloc(dev, felix->info->num_ports,
- sizeof(struct phy_device *),
+ sizeof(struct lynx_pcs *),
GFP_KERNEL);
if (!felix->pcs) {
dev_err(dev, "failed to allocate array for PCS PHYs\n");
@@ -1248,18 +1077,26 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
for (port = 0; port < felix->info->num_ports; port++) {
struct ocelot_port *ocelot_port = ocelot->ports[port];
- struct phy_device *pcs;
- bool is_c45 = false;
+ struct mdio_device *pcs;
+ struct lynx_pcs *lynx;
- if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_USXGMII)
- is_c45 = true;
+ if (dsa_is_unused_port(felix->ds, port))
+ continue;
- pcs = get_phy_device(felix->imdio, port, is_c45);
+ if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_INTERNAL)
+ continue;
+
+ pcs = mdio_device_create(felix->imdio, port);
if (IS_ERR(pcs))
continue;
- pcs->interface = ocelot_port->phy_mode;
- felix->pcs[port] = pcs;
+ lynx = lynx_pcs_create(pcs);
+ if (!lynx) {
+ mdio_device_free(pcs);
+ continue;
+ }
+
+ felix->pcs[port] = lynx;
dev_info(dev, "Found PCS at internal MDIO address %d\n", port);
}
@@ -1267,18 +1104,19 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
return 0;
}
-void vsc9959_mdio_bus_free(struct ocelot *ocelot)
+static void vsc9959_mdio_bus_free(struct ocelot *ocelot)
{
struct felix *felix = ocelot_to_felix(ocelot);
int port;
for (port = 0; port < ocelot->num_phys_ports; port++) {
- struct phy_device *pcs = felix->pcs[port];
+ struct lynx_pcs *pcs = felix->pcs[port];
if (!pcs)
continue;
- put_device(&pcs->mdio.dev);
+ mdio_device_free(pcs->mdio);
+ lynx_pcs_destroy(pcs);
}
mdiobus_unregister(felix->imdio);
}
@@ -1488,6 +1326,8 @@ static void vsc9959_xmit_template_populate(struct ocelot *ocelot, int port)
struct ocelot_port *ocelot_port = ocelot->ports[port];
u8 *template = ocelot_port->xmit_template;
u64 bypass, dest, src;
+ __be32 *prefix;
+ u8 *injection;
/* Set the source port as the CPU port module and not the
* NPI port
@@ -1496,9 +1336,14 @@ static void vsc9959_xmit_template_populate(struct ocelot *ocelot, int port)
dest = BIT(port);
bypass = true;
- packing(template, &bypass, 127, 127, OCELOT_TAG_LEN, PACK, 0);
- packing(template, &dest, 68, 56, OCELOT_TAG_LEN, PACK, 0);
- packing(template, &src, 46, 43, OCELOT_TAG_LEN, PACK, 0);
+ injection = template + OCELOT_SHORT_PREFIX_LEN;
+ prefix = (__be32 *)template;
+
+ packing(injection, &bypass, 127, 127, OCELOT_TAG_LEN, PACK, 0);
+ packing(injection, &dest, 68, 56, OCELOT_TAG_LEN, PACK, 0);
+ packing(injection, &src, 46, 43, OCELOT_TAG_LEN, PACK, 0);
+
+ *prefix = cpu_to_be32(0x8880000a);
}
static const struct felix_info felix_info_vsc9959 = {
@@ -1510,8 +1355,6 @@ static const struct felix_info felix_info_vsc9959 = {
.ops = &vsc9959_ops,
.stats_layout = vsc9959_stats_layout,
.num_stats = ARRAY_SIZE(vsc9959_stats_layout),
- .vcap_is2_keys = vsc9959_vcap_is2_keys,
- .vcap_is2_actions = vsc9959_vcap_is2_actions,
.vcap = vsc9959_vcap_props,
.shared_queue_sz = 128 * 1024,
.num_mact_rows = 2048,
@@ -1519,15 +1362,13 @@ static const struct felix_info felix_info_vsc9959 = {
.num_tx_queues = FELIX_NUM_TC,
.switch_pci_bar = 4,
.imdio_pci_bar = 0,
+ .ptp_caps = &vsc9959_ptp_caps,
.mdio_bus_alloc = vsc9959_mdio_bus_alloc,
.mdio_bus_free = vsc9959_mdio_bus_free,
- .pcs_config = vsc9959_pcs_config,
- .pcs_link_up = vsc9959_pcs_link_up,
- .pcs_link_state = vsc9959_pcs_link_state,
.phylink_validate = vsc9959_phylink_validate,
.prevalidate_phy_mode = vsc9959_prevalidate_phy_mode,
- .port_setup_tc = vsc9959_port_setup_tc,
- .port_sched_speed_set = vsc9959_sched_speed_set,
+ .port_setup_tc = vsc9959_port_setup_tc,
+ .port_sched_speed_set = vsc9959_sched_speed_set,
.xmit_template_populate = vsc9959_xmit_template_populate,
};
@@ -1663,9 +1504,13 @@ static struct pci_device_id felix_ids[] = {
};
MODULE_DEVICE_TABLE(pci, felix_ids);
-struct pci_driver felix_vsc9959_pci_driver = {
+static struct pci_driver felix_vsc9959_pci_driver = {
.name = "mscc_felix",
.id_table = felix_ids,
.probe = felix_pci_probe,
.remove = felix_pci_remove,
};
+module_pci_driver(felix_vsc9959_pci_driver);
+
+MODULE_DESCRIPTION("Felix Switch driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index 9e9fd19e1d00..76576cf0ba8a 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -7,31 +7,17 @@
#include <soc/mscc/ocelot_sys.h>
#include <soc/mscc/ocelot.h>
#include <linux/of_platform.h>
+#include <linux/pcs-lynx.h>
#include <linux/packing.h>
#include <linux/iopoll.h>
#include "felix.h"
-#define VSC9953_VCAP_IS2_CNT 1024
-#define VSC9953_VCAP_IS2_ENTRY_WIDTH 376
-#define VSC9953_VCAP_PORT_CNT 10
-
-#define MSCC_MIIM_REG_STATUS 0x0
-#define MSCC_MIIM_STATUS_STAT_BUSY BIT(3)
-#define MSCC_MIIM_REG_CMD 0x8
-#define MSCC_MIIM_CMD_OPR_WRITE BIT(1)
-#define MSCC_MIIM_CMD_OPR_READ BIT(2)
-#define MSCC_MIIM_CMD_WRDATA_SHIFT 4
-#define MSCC_MIIM_CMD_REGAD_SHIFT 20
-#define MSCC_MIIM_CMD_PHYAD_SHIFT 25
-#define MSCC_MIIM_CMD_VLD BIT(31)
-#define MSCC_MIIM_REG_DATA 0xC
-#define MSCC_MIIM_DATA_ERROR (BIT(16) | BIT(17))
-
-#define MSCC_PHY_REG_PHY_CFG 0x0
-#define PHY_CFG_PHY_ENA (BIT(0) | BIT(1) | BIT(2) | BIT(3))
-#define PHY_CFG_PHY_COMMON_RESET BIT(4)
-#define PHY_CFG_PHY_RESET (BIT(5) | BIT(6) | BIT(7) | BIT(8))
-#define MSCC_PHY_REG_PHY_STATUS 0x4
+#define MSCC_MIIM_CMD_OPR_WRITE BIT(1)
+#define MSCC_MIIM_CMD_OPR_READ BIT(2)
+#define MSCC_MIIM_CMD_WRDATA_SHIFT 4
+#define MSCC_MIIM_CMD_REGAD_SHIFT 20
+#define MSCC_MIIM_CMD_PHYAD_SHIFT 25
+#define MSCC_MIIM_CMD_VLD BIT(31)
static const u32 vsc9953_ana_regmap[] = {
REG(ANA_ADVLEARN, 0x00b500),
@@ -150,14 +136,27 @@ static const u32 vsc9953_qs_regmap[] = {
REG_RESERVED(QS_INH_DBG),
};
-static const u32 vsc9953_s2_regmap[] = {
- REG(S2_CORE_UPDATE_CTRL, 0x000000),
- REG(S2_CORE_MV_CFG, 0x000004),
- REG(S2_CACHE_ENTRY_DAT, 0x000008),
- REG(S2_CACHE_MASK_DAT, 0x000108),
- REG(S2_CACHE_ACTION_DAT, 0x000208),
- REG(S2_CACHE_CNT_DAT, 0x000308),
- REG(S2_CACHE_TG_DAT, 0x000388),
+static const u32 vsc9953_vcap_regmap[] = {
+ /* VCAP_CORE_CFG */
+ REG(VCAP_CORE_UPDATE_CTRL, 0x000000),
+ REG(VCAP_CORE_MV_CFG, 0x000004),
+ /* VCAP_CORE_CACHE */
+ REG(VCAP_CACHE_ENTRY_DAT, 0x000008),
+ REG(VCAP_CACHE_MASK_DAT, 0x000108),
+ REG(VCAP_CACHE_ACTION_DAT, 0x000208),
+ REG(VCAP_CACHE_CNT_DAT, 0x000308),
+ REG(VCAP_CACHE_TG_DAT, 0x000388),
+ /* VCAP_CONST */
+ REG(VCAP_CONST_VCAP_VER, 0x000398),
+ REG(VCAP_CONST_ENTRY_WIDTH, 0x00039c),
+ REG(VCAP_CONST_ENTRY_CNT, 0x0003a0),
+ REG(VCAP_CONST_ENTRY_SWCNT, 0x0003a4),
+ REG(VCAP_CONST_ENTRY_TG_WIDTH, 0x0003a8),
+ REG(VCAP_CONST_ACTION_DEF_CNT, 0x0003ac),
+ REG(VCAP_CONST_ACTION_WIDTH, 0x0003b0),
+ REG(VCAP_CONST_CNT_WIDTH, 0x0003b4),
+ REG_RESERVED(VCAP_CONST_CORE_CNT),
+ REG_RESERVED(VCAP_CONST_IF_CNT),
};
static const u32 vsc9953_qsys_regmap[] = {
@@ -362,7 +361,9 @@ static const u32 *vsc9953_regmap[TARGET_MAX] = {
[QSYS] = vsc9953_qsys_regmap,
[REW] = vsc9953_rew_regmap,
[SYS] = vsc9953_sys_regmap,
- [S2] = vsc9953_s2_regmap,
+ [S0] = vsc9953_vcap_regmap,
+ [S1] = vsc9953_vcap_regmap,
+ [S2] = vsc9953_vcap_regmap,
[GCB] = vsc9953_gcb_regmap,
[DEV_GMII] = vsc9953_dev_gmii_regmap,
};
@@ -394,6 +395,16 @@ static const struct resource vsc9953_target_io_res[TARGET_MAX] = {
.end = 0x001ffff,
.name = "sys",
},
+ [S0] = {
+ .start = 0x0040000,
+ .end = 0x00403ff,
+ .name = "s0",
+ },
+ [S1] = {
+ .start = 0x0050000,
+ .end = 0x00503ff,
+ .name = "s1",
+ },
[S2] = {
.start = 0x0060000,
.end = 0x00603ff,
@@ -609,6 +620,112 @@ static const struct ocelot_stat_layout vsc9953_stats_layout[] = {
{ .offset = 0x91, .name = "drop_green_prio_7", },
};
+static const struct vcap_field vsc9953_vcap_es0_keys[] = {
+ [VCAP_ES0_EGR_PORT] = { 0, 4},
+ [VCAP_ES0_IGR_PORT] = { 4, 4},
+ [VCAP_ES0_RSV] = { 8, 2},
+ [VCAP_ES0_L2_MC] = { 10, 1},
+ [VCAP_ES0_L2_BC] = { 11, 1},
+ [VCAP_ES0_VID] = { 12, 12},
+ [VCAP_ES0_DP] = { 24, 1},
+ [VCAP_ES0_PCP] = { 25, 3},
+};
+
+static const struct vcap_field vsc9953_vcap_es0_actions[] = {
+ [VCAP_ES0_ACT_PUSH_OUTER_TAG] = { 0, 2},
+ [VCAP_ES0_ACT_PUSH_INNER_TAG] = { 2, 1},
+ [VCAP_ES0_ACT_TAG_A_TPID_SEL] = { 3, 2},
+ [VCAP_ES0_ACT_TAG_A_VID_SEL] = { 5, 1},
+ [VCAP_ES0_ACT_TAG_A_PCP_SEL] = { 6, 2},
+ [VCAP_ES0_ACT_TAG_A_DEI_SEL] = { 8, 2},
+ [VCAP_ES0_ACT_TAG_B_TPID_SEL] = { 10, 2},
+ [VCAP_ES0_ACT_TAG_B_VID_SEL] = { 12, 1},
+ [VCAP_ES0_ACT_TAG_B_PCP_SEL] = { 13, 2},
+ [VCAP_ES0_ACT_TAG_B_DEI_SEL] = { 15, 2},
+ [VCAP_ES0_ACT_VID_A_VAL] = { 17, 12},
+ [VCAP_ES0_ACT_PCP_A_VAL] = { 29, 3},
+ [VCAP_ES0_ACT_DEI_A_VAL] = { 32, 1},
+ [VCAP_ES0_ACT_VID_B_VAL] = { 33, 12},
+ [VCAP_ES0_ACT_PCP_B_VAL] = { 45, 3},
+ [VCAP_ES0_ACT_DEI_B_VAL] = { 48, 1},
+ [VCAP_ES0_ACT_RSV] = { 49, 24},
+ [VCAP_ES0_ACT_HIT_STICKY] = { 73, 1},
+};
+
+static const struct vcap_field vsc9953_vcap_is1_keys[] = {
+ [VCAP_IS1_HK_TYPE] = { 0, 1},
+ [VCAP_IS1_HK_LOOKUP] = { 1, 2},
+ [VCAP_IS1_HK_IGR_PORT_MASK] = { 3, 11},
+ [VCAP_IS1_HK_RSV] = { 14, 10},
+ /* VCAP_IS1_HK_OAM_Y1731 not supported */
+ [VCAP_IS1_HK_L2_MC] = { 24, 1},
+ [VCAP_IS1_HK_L2_BC] = { 25, 1},
+ [VCAP_IS1_HK_IP_MC] = { 26, 1},
+ [VCAP_IS1_HK_VLAN_TAGGED] = { 27, 1},
+ [VCAP_IS1_HK_VLAN_DBL_TAGGED] = { 28, 1},
+ [VCAP_IS1_HK_TPID] = { 29, 1},
+ [VCAP_IS1_HK_VID] = { 30, 12},
+ [VCAP_IS1_HK_DEI] = { 42, 1},
+ [VCAP_IS1_HK_PCP] = { 43, 3},
+ /* Specific Fields for IS1 Half Key S1_NORMAL */
+ [VCAP_IS1_HK_L2_SMAC] = { 46, 48},
+ [VCAP_IS1_HK_ETYPE_LEN] = { 94, 1},
+ [VCAP_IS1_HK_ETYPE] = { 95, 16},
+ [VCAP_IS1_HK_IP_SNAP] = {111, 1},
+ [VCAP_IS1_HK_IP4] = {112, 1},
+ /* Layer-3 Information */
+ [VCAP_IS1_HK_L3_FRAGMENT] = {113, 1},
+ [VCAP_IS1_HK_L3_FRAG_OFS_GT0] = {114, 1},
+ [VCAP_IS1_HK_L3_OPTIONS] = {115, 1},
+ [VCAP_IS1_HK_L3_DSCP] = {116, 6},
+ [VCAP_IS1_HK_L3_IP4_SIP] = {122, 32},
+ /* Layer-4 Information */
+ [VCAP_IS1_HK_TCP_UDP] = {154, 1},
+ [VCAP_IS1_HK_TCP] = {155, 1},
+ [VCAP_IS1_HK_L4_SPORT] = {156, 16},
+ [VCAP_IS1_HK_L4_RNG] = {172, 8},
+ /* Specific Fields for IS1 Half Key S1_5TUPLE_IP4 */
+ [VCAP_IS1_HK_IP4_INNER_TPID] = { 46, 1},
+ [VCAP_IS1_HK_IP4_INNER_VID] = { 47, 12},
+ [VCAP_IS1_HK_IP4_INNER_DEI] = { 59, 1},
+ [VCAP_IS1_HK_IP4_INNER_PCP] = { 60, 3},
+ [VCAP_IS1_HK_IP4_IP4] = { 63, 1},
+ [VCAP_IS1_HK_IP4_L3_FRAGMENT] = { 64, 1},
+ [VCAP_IS1_HK_IP4_L3_FRAG_OFS_GT0] = { 65, 1},
+ [VCAP_IS1_HK_IP4_L3_OPTIONS] = { 66, 1},
+ [VCAP_IS1_HK_IP4_L3_DSCP] = { 67, 6},
+ [VCAP_IS1_HK_IP4_L3_IP4_DIP] = { 73, 32},
+ [VCAP_IS1_HK_IP4_L3_IP4_SIP] = {105, 32},
+ [VCAP_IS1_HK_IP4_L3_PROTO] = {137, 8},
+ [VCAP_IS1_HK_IP4_TCP_UDP] = {145, 1},
+ [VCAP_IS1_HK_IP4_TCP] = {146, 1},
+ [VCAP_IS1_HK_IP4_L4_RNG] = {147, 8},
+ [VCAP_IS1_HK_IP4_IP_PAYLOAD_S1_5TUPLE] = {155, 32},
+};
+
+static const struct vcap_field vsc9953_vcap_is1_actions[] = {
+ [VCAP_IS1_ACT_DSCP_ENA] = { 0, 1},
+ [VCAP_IS1_ACT_DSCP_VAL] = { 1, 6},
+ [VCAP_IS1_ACT_QOS_ENA] = { 7, 1},
+ [VCAP_IS1_ACT_QOS_VAL] = { 8, 3},
+ [VCAP_IS1_ACT_DP_ENA] = { 11, 1},
+ [VCAP_IS1_ACT_DP_VAL] = { 12, 1},
+ [VCAP_IS1_ACT_PAG_OVERRIDE_MASK] = { 13, 8},
+ [VCAP_IS1_ACT_PAG_VAL] = { 21, 8},
+ [VCAP_IS1_ACT_RSV] = { 29, 11},
+ [VCAP_IS1_ACT_VID_REPLACE_ENA] = { 40, 1},
+ [VCAP_IS1_ACT_VID_ADD_VAL] = { 41, 12},
+ [VCAP_IS1_ACT_FID_SEL] = { 53, 2},
+ [VCAP_IS1_ACT_FID_VAL] = { 55, 13},
+ [VCAP_IS1_ACT_PCP_DEI_ENA] = { 68, 1},
+ [VCAP_IS1_ACT_PCP_VAL] = { 69, 3},
+ [VCAP_IS1_ACT_DEI_VAL] = { 72, 1},
+ [VCAP_IS1_ACT_VLAN_POP_CNT_ENA] = { 73, 1},
+ [VCAP_IS1_ACT_VLAN_POP_CNT] = { 74, 2},
+ [VCAP_IS1_ACT_CUSTOM_ACE_TYPE_ENA] = { 76, 4},
+ [VCAP_IS1_ACT_HIT_STICKY] = { 80, 1},
+};
+
static struct vcap_field vsc9953_vcap_is2_keys[] = {
/* Common: 41 bits */
[VCAP_IS2_TYPE] = { 0, 4},
@@ -694,15 +811,32 @@ static struct vcap_field vsc9953_vcap_is2_actions[] = {
[VCAP_IS2_ACT_HIT_CNT] = { 50, 32},
};
-static const struct vcap_props vsc9953_vcap_props[] = {
+static struct vcap_props vsc9953_vcap_props[] = {
+ [VCAP_ES0] = {
+ .action_type_width = 0,
+ .action_table = {
+ [ES0_ACTION_TYPE_NORMAL] = {
+ .width = 73, /* HIT_STICKY not included */
+ .count = 1,
+ },
+ },
+ .target = S0,
+ .keys = vsc9953_vcap_es0_keys,
+ .actions = vsc9953_vcap_es0_actions,
+ },
+ [VCAP_IS1] = {
+ .action_type_width = 0,
+ .action_table = {
+ [IS1_ACTION_TYPE_NORMAL] = {
+ .width = 80, /* HIT_STICKY not included */
+ .count = 4,
+ },
+ },
+ .target = S1,
+ .keys = vsc9953_vcap_is1_keys,
+ .actions = vsc9953_vcap_is1_actions,
+ },
[VCAP_IS2] = {
- .tg_width = 2,
- .sw_count = 4,
- .entry_count = VSC9953_VCAP_IS2_CNT,
- .entry_width = VSC9953_VCAP_IS2_ENTRY_WIDTH,
- .action_count = VSC9953_VCAP_IS2_CNT +
- VSC9953_VCAP_PORT_CNT + 2,
- .action_width = 101,
.action_type_width = 1,
.action_table = {
[IS2_ACTION_TYPE_NORMAL] = {
@@ -714,8 +848,9 @@ static const struct vcap_props vsc9953_vcap_props[] = {
.count = 4
},
},
- .counter_words = 4,
- .counter_width = 32,
+ .target = S2,
+ .keys = vsc9953_vcap_is2_keys,
+ .actions = vsc9953_vcap_is2_actions,
},
};
@@ -819,6 +954,10 @@ out:
return err;
}
+/* CORE_ENA is in SYS:SYSTEM:RESET_CFG
+ * MEM_INIT is in SYS:SYSTEM:RESET_CFG
+ * MEM_ENA is in SYS:SYSTEM:RESET_CFG
+ */
static int vsc9953_reset(struct ocelot *ocelot)
{
int val, err;
@@ -834,8 +973,8 @@ static int vsc9953_reset(struct ocelot *ocelot)
}
/* initialize switch mem ~40us */
- ocelot_field_write(ocelot, SYS_RESET_CFG_MEM_INIT, 1);
ocelot_field_write(ocelot, SYS_RESET_CFG_MEM_ENA, 1);
+ ocelot_field_write(ocelot, SYS_RESET_CFG_MEM_INIT, 1);
err = readx_poll_timeout(vsc9953_sys_ram_init_status, ocelot, val, !val,
VSC9953_SYS_RAMINIT_SLEEP,
@@ -846,7 +985,6 @@ static int vsc9953_reset(struct ocelot *ocelot)
}
/* enable switch core */
- ocelot_field_write(ocelot, SYS_RESET_CFG_MEM_ENA, 1);
ocelot_field_write(ocelot, SYS_RESET_CFG_CORE_ENA, 1);
return 0;
@@ -922,6 +1060,8 @@ static u16 vsc9953_wm_enc(u16 value)
static const struct ocelot_ops vsc9953_ops = {
.reset = vsc9953_reset,
.wm_enc = vsc9953_wm_enc,
+ .port_to_netdev = felix_port_to_netdev,
+ .netdev_to_port = felix_netdev_to_port,
};
static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot)
@@ -962,18 +1102,27 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot)
for (port = 0; port < felix->info->num_ports; port++) {
struct ocelot_port *ocelot_port = ocelot->ports[port];
- struct phy_device *pcs;
int addr = port + 4;
+ struct mdio_device *pcs;
+ struct lynx_pcs *lynx;
+
+ if (dsa_is_unused_port(felix->ds, port))
+ continue;
if (ocelot_port->phy_mode == PHY_INTERFACE_MODE_INTERNAL)
continue;
- pcs = get_phy_device(felix->imdio, addr, false);
+ pcs = mdio_device_create(felix->imdio, addr);
if (IS_ERR(pcs))
continue;
- pcs->interface = ocelot_port->phy_mode;
- felix->pcs[port] = pcs;
+ lynx = lynx_pcs_create(pcs);
+ if (!lynx) {
+ mdio_device_free(pcs);
+ continue;
+ }
+
+ felix->pcs[port] = lynx;
dev_info(dev, "Found PCS at internal MDIO address %d\n", addr);
}
@@ -981,11 +1130,30 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot)
return 0;
}
+static void vsc9953_mdio_bus_free(struct ocelot *ocelot)
+{
+ struct felix *felix = ocelot_to_felix(ocelot);
+ int port;
+
+ for (port = 0; port < ocelot->num_phys_ports; port++) {
+ struct lynx_pcs *pcs = felix->pcs[port];
+
+ if (!pcs)
+ continue;
+
+ mdio_device_free(pcs->mdio);
+ lynx_pcs_destroy(pcs);
+ }
+ mdiobus_unregister(felix->imdio);
+}
+
static void vsc9953_xmit_template_populate(struct ocelot *ocelot, int port)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
u8 *template = ocelot_port->xmit_template;
u64 bypass, dest, src;
+ __be32 *prefix;
+ u8 *injection;
/* Set the source port as the CPU port module and not the
* NPI port
@@ -994,9 +1162,14 @@ static void vsc9953_xmit_template_populate(struct ocelot *ocelot, int port)
dest = BIT(port);
bypass = true;
- packing(template, &bypass, 127, 127, OCELOT_TAG_LEN, PACK, 0);
- packing(template, &dest, 67, 57, OCELOT_TAG_LEN, PACK, 0);
- packing(template, &src, 46, 43, OCELOT_TAG_LEN, PACK, 0);
+ injection = template + OCELOT_SHORT_PREFIX_LEN;
+ prefix = (__be32 *)template;
+
+ packing(injection, &bypass, 127, 127, OCELOT_TAG_LEN, PACK, 0);
+ packing(injection, &dest, 67, 57, OCELOT_TAG_LEN, PACK, 0);
+ packing(injection, &src, 46, 43, OCELOT_TAG_LEN, PACK, 0);
+
+ *prefix = cpu_to_be32(0x88800005);
}
static const struct felix_info seville_info_vsc9953 = {
@@ -1007,17 +1180,12 @@ static const struct felix_info seville_info_vsc9953 = {
.ops = &vsc9953_ops,
.stats_layout = vsc9953_stats_layout,
.num_stats = ARRAY_SIZE(vsc9953_stats_layout),
- .vcap_is2_keys = vsc9953_vcap_is2_keys,
- .vcap_is2_actions = vsc9953_vcap_is2_actions,
.vcap = vsc9953_vcap_props,
.shared_queue_sz = 2048 * 1024,
.num_mact_rows = 2048,
.num_ports = 10,
.mdio_bus_alloc = vsc9953_mdio_bus_alloc,
- .mdio_bus_free = vsc9959_mdio_bus_free,
- .pcs_config = vsc9959_pcs_config,
- .pcs_link_up = vsc9959_pcs_link_up,
- .pcs_link_state = vsc9959_pcs_link_state,
+ .mdio_bus_free = vsc9953_mdio_bus_free,
.phylink_validate = vsc9953_phylink_validate,
.prevalidate_phy_mode = vsc9953_prevalidate_phy_mode,
.xmit_template_populate = vsc9953_xmit_template_populate,
@@ -1096,7 +1264,7 @@ static const struct of_device_id seville_of_match[] = {
};
MODULE_DEVICE_TABLE(of, seville_of_match);
-struct platform_driver seville_vsc9953_driver = {
+static struct platform_driver seville_vsc9953_driver = {
.probe = seville_probe,
.remove = seville_remove,
.driver = {
@@ -1104,3 +1272,7 @@ struct platform_driver seville_vsc9953_driver = {
.of_match_table = of_match_ptr(seville_of_match),
},
};
+module_platform_driver(seville_vsc9953_driver);
+
+MODULE_DESCRIPTION("Seville Switch driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index f1e484477e35..53064e0e1618 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -1294,10 +1294,14 @@ qca8k_port_fdb_dump(struct dsa_switch *ds, int port,
}
static int
-qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
+qca8k_port_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
+ struct switchdev_trans *trans)
{
struct qca8k_priv *priv = ds->priv;
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
if (vlan_filtering) {
qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
QCA8K_PORT_LOOKUP_VLAN_MODE,
diff --git a/drivers/net/dsa/realtek-smi-core.c b/drivers/net/dsa/realtek-smi-core.c
index fae188c60191..8e49d4f85d48 100644
--- a/drivers/net/dsa/realtek-smi-core.c
+++ b/drivers/net/dsa/realtek-smi-core.c
@@ -394,9 +394,10 @@ static int realtek_smi_probe(struct platform_device *pdev)
var = of_device_get_match_data(dev);
np = dev->of_node;
- smi = devm_kzalloc(dev, sizeof(*smi), GFP_KERNEL);
+ smi = devm_kzalloc(dev, sizeof(*smi) + var->chip_data_sz, GFP_KERNEL);
if (!smi)
return -ENOMEM;
+ smi->chip_data = (void *)smi + sizeof(*smi);
smi->map = devm_regmap_init(dev, NULL, smi,
&realtek_smi_mdio_regmap_config);
if (IS_ERR(smi->map)) {
diff --git a/drivers/net/dsa/realtek-smi-core.h b/drivers/net/dsa/realtek-smi-core.h
index 9a63b51e1d82..6b6a3dec0984 100644
--- a/drivers/net/dsa/realtek-smi-core.h
+++ b/drivers/net/dsa/realtek-smi-core.h
@@ -25,6 +25,9 @@ struct rtl8366_mib_counter {
const char *name;
};
+/**
+ * struct rtl8366_vlan_mc - Virtual LAN member configuration
+ */
struct rtl8366_vlan_mc {
u16 vid;
u16 untag;
@@ -68,6 +71,7 @@ struct realtek_smi {
int vlan4k_enabled;
char buf[4096];
+ void *chip_data; /* Per-chip extra variant data */
};
/**
@@ -108,6 +112,7 @@ struct realtek_smi_variant {
unsigned int clk_delay;
u8 cmd_read;
u8 cmd_write;
+ size_t chip_data_sz;
};
/* SMI core calls */
@@ -119,7 +124,6 @@ int realtek_smi_setup_mdio(struct realtek_smi *smi);
int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used);
int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
u32 untag, u32 fid);
-int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val);
int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
unsigned int vid);
int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable);
@@ -127,7 +131,8 @@ int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable);
int rtl8366_reset_vlan(struct realtek_smi *smi);
int rtl8366_init_vlan(struct realtek_smi *smi);
int rtl8366_vlan_filtering(struct dsa_switch *ds, int port,
- bool vlan_filtering);
+ bool vlan_filtering,
+ struct switchdev_trans *trans);
int rtl8366_vlan_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan);
void rtl8366_vlan_add(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c
index a8c5a934c3d3..307466b90489 100644
--- a/drivers/net/dsa/rtl8366.c
+++ b/drivers/net/dsa/rtl8366.c
@@ -36,12 +36,113 @@ int rtl8366_mc_is_used(struct realtek_smi *smi, int mc_index, int *used)
}
EXPORT_SYMBOL_GPL(rtl8366_mc_is_used);
+/**
+ * rtl8366_obtain_mc() - retrieve or allocate a VLAN member configuration
+ * @smi: the Realtek SMI device instance
+ * @vid: the VLAN ID to look up or allocate
+ * @vlanmc: the pointer will be assigned to a pointer to a valid member config
+ * if successful
+ * @return: index of a new member config or negative error number
+ */
+static int rtl8366_obtain_mc(struct realtek_smi *smi, int vid,
+ struct rtl8366_vlan_mc *vlanmc)
+{
+ struct rtl8366_vlan_4k vlan4k;
+ int ret;
+ int i;
+
+ /* Try to find an existing member config entry for this VID */
+ for (i = 0; i < smi->num_vlan_mc; i++) {
+ ret = smi->ops->get_vlan_mc(smi, i, vlanmc);
+ if (ret) {
+ dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n",
+ i, vid);
+ return ret;
+ }
+
+ if (vid == vlanmc->vid)
+ return i;
+ }
+
+ /* We have no MC entry for this VID, try to find an empty one */
+ for (i = 0; i < smi->num_vlan_mc; i++) {
+ ret = smi->ops->get_vlan_mc(smi, i, vlanmc);
+ if (ret) {
+ dev_err(smi->dev, "error searching for VLAN MC %d for VID %d\n",
+ i, vid);
+ return ret;
+ }
+
+ if (vlanmc->vid == 0 && vlanmc->member == 0) {
+ /* Update the entry from the 4K table */
+ ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+ if (ret) {
+ dev_err(smi->dev, "error looking for 4K VLAN MC %d for VID %d\n",
+ i, vid);
+ return ret;
+ }
+
+ vlanmc->vid = vid;
+ vlanmc->member = vlan4k.member;
+ vlanmc->untag = vlan4k.untag;
+ vlanmc->fid = vlan4k.fid;
+ ret = smi->ops->set_vlan_mc(smi, i, vlanmc);
+ if (ret) {
+ dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n",
+ i, vid);
+ return ret;
+ }
+
+ dev_dbg(smi->dev, "created new MC at index %d for VID %d\n",
+ i, vid);
+ return i;
+ }
+ }
+
+ /* MC table is full, try to find an unused entry and replace it */
+ for (i = 0; i < smi->num_vlan_mc; i++) {
+ int used;
+
+ ret = rtl8366_mc_is_used(smi, i, &used);
+ if (ret)
+ return ret;
+
+ if (!used) {
+ /* Update the entry from the 4K table */
+ ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
+ if (ret)
+ return ret;
+
+ vlanmc->vid = vid;
+ vlanmc->member = vlan4k.member;
+ vlanmc->untag = vlan4k.untag;
+ vlanmc->fid = vlan4k.fid;
+ ret = smi->ops->set_vlan_mc(smi, i, vlanmc);
+ if (ret) {
+ dev_err(smi->dev, "unable to set/update VLAN MC %d for VID %d\n",
+ i, vid);
+ return ret;
+ }
+ dev_dbg(smi->dev, "recycled MC at index %i for VID %d\n",
+ i, vid);
+ return i;
+ }
+ }
+
+ dev_err(smi->dev, "all VLAN member configurations are in use\n");
+ return -ENOSPC;
+}
+
int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
u32 untag, u32 fid)
{
+ struct rtl8366_vlan_mc vlanmc;
struct rtl8366_vlan_4k vlan4k;
+ int mc;
int ret;
- int i;
+
+ if (!smi->ops->is_vlan_valid(smi, vid))
+ return -EINVAL;
dev_dbg(smi->dev,
"setting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
@@ -63,133 +164,58 @@ int rtl8366_set_vlan(struct realtek_smi *smi, int vid, u32 member,
"resulting VLAN%d 4k members: 0x%02x, untagged: 0x%02x\n",
vid, vlan4k.member, vlan4k.untag);
- /* Try to find an existing MC entry for this VID */
- for (i = 0; i < smi->num_vlan_mc; i++) {
- struct rtl8366_vlan_mc vlanmc;
-
- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
- if (ret)
- return ret;
-
- if (vid == vlanmc.vid) {
- /* update the MC entry */
- vlanmc.member |= member;
- vlanmc.untag |= untag;
- vlanmc.fid = fid;
-
- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
+ /* Find or allocate a member config for this VID */
+ ret = rtl8366_obtain_mc(smi, vid, &vlanmc);
+ if (ret < 0)
+ return ret;
+ mc = ret;
- dev_dbg(smi->dev,
- "resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n",
- vid, vlanmc.member, vlanmc.untag);
+ /* Update the MC entry */
+ vlanmc.member |= member;
+ vlanmc.untag |= untag;
+ vlanmc.fid = fid;
- break;
- }
- }
+ /* Commit updates to the MC entry */
+ ret = smi->ops->set_vlan_mc(smi, mc, &vlanmc);
+ if (ret)
+ dev_err(smi->dev, "failed to commit changes to VLAN MC index %d for VID %d\n",
+ mc, vid);
+ else
+ dev_dbg(smi->dev,
+ "resulting VLAN%d MC members: 0x%02x, untagged: 0x%02x\n",
+ vid, vlanmc.member, vlanmc.untag);
return ret;
}
EXPORT_SYMBOL_GPL(rtl8366_set_vlan);
-int rtl8366_get_pvid(struct realtek_smi *smi, int port, int *val)
-{
- struct rtl8366_vlan_mc vlanmc;
- int ret;
- int index;
-
- ret = smi->ops->get_mc_index(smi, port, &index);
- if (ret)
- return ret;
-
- ret = smi->ops->get_vlan_mc(smi, index, &vlanmc);
- if (ret)
- return ret;
-
- *val = vlanmc.vid;
- return 0;
-}
-EXPORT_SYMBOL_GPL(rtl8366_get_pvid);
-
int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
unsigned int vid)
{
struct rtl8366_vlan_mc vlanmc;
- struct rtl8366_vlan_4k vlan4k;
+ int mc;
int ret;
- int i;
-
- /* Try to find an existing MC entry for this VID */
- for (i = 0; i < smi->num_vlan_mc; i++) {
- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
- if (ret)
- return ret;
-
- if (vid == vlanmc.vid) {
- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
- if (ret)
- return ret;
-
- ret = smi->ops->set_mc_index(smi, port, i);
- return ret;
- }
- }
-
- /* We have no MC entry for this VID, try to find an empty one */
- for (i = 0; i < smi->num_vlan_mc; i++) {
- ret = smi->ops->get_vlan_mc(smi, i, &vlanmc);
- if (ret)
- return ret;
-
- if (vlanmc.vid == 0 && vlanmc.member == 0) {
- /* Update the entry from the 4K table */
- ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
- if (ret)
- return ret;
-
- vlanmc.vid = vid;
- vlanmc.member = vlan4k.member;
- vlanmc.untag = vlan4k.untag;
- vlanmc.fid = vlan4k.fid;
- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
- if (ret)
- return ret;
-
- ret = smi->ops->set_mc_index(smi, port, i);
- return ret;
- }
- }
-
- /* MC table is full, try to find an unused entry and replace it */
- for (i = 0; i < smi->num_vlan_mc; i++) {
- int used;
-
- ret = rtl8366_mc_is_used(smi, i, &used);
- if (ret)
- return ret;
- if (!used) {
- /* Update the entry from the 4K table */
- ret = smi->ops->get_vlan_4k(smi, vid, &vlan4k);
- if (ret)
- return ret;
+ if (!smi->ops->is_vlan_valid(smi, vid))
+ return -EINVAL;
- vlanmc.vid = vid;
- vlanmc.member = vlan4k.member;
- vlanmc.untag = vlan4k.untag;
- vlanmc.fid = vlan4k.fid;
- ret = smi->ops->set_vlan_mc(smi, i, &vlanmc);
- if (ret)
- return ret;
+ /* Find or allocate a member config for this VID */
+ ret = rtl8366_obtain_mc(smi, vid, &vlanmc);
+ if (ret < 0)
+ return ret;
+ mc = ret;
- ret = smi->ops->set_mc_index(smi, port, i);
- return ret;
- }
+ ret = smi->ops->set_mc_index(smi, port, mc);
+ if (ret) {
+ dev_err(smi->dev, "set PVID: failed to set MC index %d for port %d\n",
+ mc, port);
+ return ret;
}
- dev_err(smi->dev,
- "all VLAN member configurations are in use\n");
+ dev_dbg(smi->dev, "set PVID: the PVID for port %d set to %d using existing MC index %d\n",
+ port, vid, mc);
- return -ENOSPC;
+ return 0;
}
EXPORT_SYMBOL_GPL(rtl8366_set_pvid);
@@ -314,15 +340,20 @@ int rtl8366_init_vlan(struct realtek_smi *smi)
}
EXPORT_SYMBOL_GPL(rtl8366_init_vlan);
-int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
+int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
+ struct switchdev_trans *trans)
{
struct realtek_smi *smi = ds->priv;
struct rtl8366_vlan_4k vlan4k;
int ret;
/* Use VLAN nr port + 1 since VLAN0 is not valid */
- if (!smi->ops->is_vlan_valid(smi, port + 1))
- return -EINVAL;
+ if (switchdev_trans_ph_prepare(trans)) {
+ if (!smi->ops->is_vlan_valid(smi, port + 1))
+ return -EINVAL;
+
+ return 0;
+ }
dev_info(smi->dev, "%s filtering on port %d\n",
vlan_filtering ? "enable" : "disable",
@@ -389,7 +420,8 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
if (!smi->ops->is_vlan_valid(smi, vid))
return;
- dev_info(smi->dev, "add VLAN on port %d, %s, %s\n",
+ dev_info(smi->dev, "add VLAN %d on port %d, %s, %s\n",
+ vlan->vid_begin,
port,
untagged ? "untagged" : "tagged",
pvid ? " PVID" : "no PVID");
@@ -398,34 +430,29 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
dev_err(smi->dev, "port is DSA or CPU port\n");
for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- int pvid_val = 0;
-
- dev_info(smi->dev, "add VLAN %04x\n", vid);
member |= BIT(port);
if (untagged)
untag |= BIT(port);
- /* To ensure that we have a valid MC entry for this VLAN,
- * initialize the port VLAN ID here.
- */
- ret = rtl8366_get_pvid(smi, port, &pvid_val);
- if (ret < 0) {
- dev_err(smi->dev, "could not lookup PVID for port %d\n",
- port);
- return;
- }
- if (pvid_val == 0) {
- ret = rtl8366_set_pvid(smi, port, vid);
- if (ret < 0)
- return;
- }
-
ret = rtl8366_set_vlan(smi, vid, member, untag, 0);
if (ret)
dev_err(smi->dev,
"failed to set up VLAN %04x",
vid);
+
+ if (!pvid)
+ continue;
+
+ ret = rtl8366_set_pvid(smi, port, vid);
+ if (ret)
+ dev_err(smi->dev,
+ "failed to set PVID on port %d to VLAN %04x",
+ port, vid);
+
+ if (!ret)
+ dev_dbg(smi->dev, "VLAN add: added VLAN %d with PVID on port %d\n",
+ vid, port);
}
}
EXPORT_SYMBOL_GPL(rtl8366_vlan_add);
diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c
index 48f1ff746799..cfe56960f44b 100644
--- a/drivers/net/dsa/rtl8366rb.c
+++ b/drivers/net/dsa/rtl8366rb.c
@@ -35,7 +35,7 @@
#define RTL8366RB_SGCR_MAX_LENGTH_1522 RTL8366RB_SGCR_MAX_LENGTH(0x0)
#define RTL8366RB_SGCR_MAX_LENGTH_1536 RTL8366RB_SGCR_MAX_LENGTH(0x1)
#define RTL8366RB_SGCR_MAX_LENGTH_1552 RTL8366RB_SGCR_MAX_LENGTH(0x2)
-#define RTL8366RB_SGCR_MAX_LENGTH_9216 RTL8366RB_SGCR_MAX_LENGTH(0x3)
+#define RTL8366RB_SGCR_MAX_LENGTH_16000 RTL8366RB_SGCR_MAX_LENGTH(0x3)
#define RTL8366RB_SGCR_EN_VLAN BIT(13)
#define RTL8366RB_SGCR_EN_VLAN_4KTB BIT(14)
@@ -311,6 +311,14 @@
#define RTL8366RB_GREEN_FEATURE_TX BIT(0)
#define RTL8366RB_GREEN_FEATURE_RX BIT(2)
+/**
+ * struct rtl8366rb - RTL8366RB-specific data
+ * @max_mtu: per-port max MTU setting
+ */
+struct rtl8366rb {
+ unsigned int max_mtu[RTL8366RB_NUM_PORTS];
+};
+
static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = {
{ 0, 0, 4, "IfInOctets" },
{ 0, 4, 4, "EtherStatsOctets" },
@@ -712,6 +720,7 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
{
struct realtek_smi *smi = ds->priv;
const u16 *jam_table;
+ struct rtl8366rb *rb;
u32 chip_ver = 0;
u32 chip_id = 0;
int jam_size;
@@ -719,6 +728,8 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
int ret;
int i;
+ rb = smi->chip_data;
+
ret = regmap_read(smi->map, RTL8366RB_CHIP_ID_REG, &chip_id);
if (ret) {
dev_err(smi->dev, "unable to read chip id\n");
@@ -868,6 +879,9 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
RTL8366RB_SGCR_MAX_LENGTH_1536);
if (ret)
return ret;
+ for (i = 0; i < RTL8366RB_NUM_PORTS; i++)
+ /* layer 2 size, see rtl8366rb_change_mtu() */
+ rb->max_mtu[i] = 1532;
/* Enable learning for all ports */
ret = regmap_write(smi->map, RTL8366RB_SSCR0, 0);
@@ -969,8 +983,10 @@ static enum dsa_tag_protocol rtl8366_get_tag_protocol(struct dsa_switch *ds,
return DSA_TAG_PROTO_RTL4_A;
}
-static void rtl8366rb_adjust_link(struct dsa_switch *ds, int port,
- struct phy_device *phydev)
+static void
+rtl8366rb_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
+ phy_interface_t interface, struct phy_device *phydev,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
{
struct realtek_smi *smi = ds->priv;
int ret;
@@ -978,25 +994,52 @@ static void rtl8366rb_adjust_link(struct dsa_switch *ds, int port,
if (port != smi->cpu_port)
return;
- dev_info(smi->dev, "adjust link on CPU port (%d)\n", port);
+ dev_dbg(smi->dev, "MAC link up on CPU port (%d)\n", port);
/* Force the fixed CPU port into 1Gbit mode, no autonegotiation */
ret = regmap_update_bits(smi->map, RTL8366RB_MAC_FORCE_CTRL_REG,
BIT(port), BIT(port));
- if (ret)
+ if (ret) {
+ dev_err(smi->dev, "failed to force 1Gbit on CPU port\n");
return;
+ }
ret = regmap_update_bits(smi->map, RTL8366RB_PAACR2,
0xFF00U,
RTL8366RB_PAACR_CPU_PORT << 8);
- if (ret)
+ if (ret) {
+ dev_err(smi->dev, "failed to set PAACR on CPU port\n");
return;
+ }
/* Enable the CPU port */
ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
0);
- if (ret)
+ if (ret) {
+ dev_err(smi->dev, "failed to enable the CPU port\n");
return;
+ }
+}
+
+static void
+rtl8366rb_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct realtek_smi *smi = ds->priv;
+ int ret;
+
+ if (port != smi->cpu_port)
+ return;
+
+ dev_dbg(smi->dev, "MAC link down on CPU port (%d)\n", port);
+
+ /* Disable the CPU port */
+ ret = regmap_update_bits(smi->map, RTL8366RB_PECR, BIT(port),
+ BIT(port));
+ if (ret) {
+ dev_err(smi->dev, "failed to disable the CPU port\n");
+ return;
+ }
}
static void rb8366rb_set_port_led(struct realtek_smi *smi,
@@ -1077,6 +1120,56 @@ rtl8366rb_port_disable(struct dsa_switch *ds, int port)
rb8366rb_set_port_led(smi, port, false);
}
+static int rtl8366rb_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8366rb *rb;
+ unsigned int max_mtu;
+ u32 len;
+ int i;
+
+ /* Cache the per-port MTU setting */
+ rb = smi->chip_data;
+ rb->max_mtu[port] = new_mtu;
+
+ /* Roof out the MTU for the entire switch to the greatest
+ * common denominator: the biggest set for any one port will
+ * be the biggest MTU for the switch.
+ *
+ * The first setting, 1522 bytes, is max IP packet 1500 bytes,
+ * plus ethernet header, 1518 bytes, plus CPU tag, 4 bytes.
+ * This function should consider the parameter an SDU, so the
+ * MTU passed for this setting is 1518 bytes. The same logic
+ * of subtracting the DSA tag of 4 bytes apply to the other
+ * settings.
+ */
+ max_mtu = 1518;
+ for (i = 0; i < RTL8366RB_NUM_PORTS; i++) {
+ if (rb->max_mtu[i] > max_mtu)
+ max_mtu = rb->max_mtu[i];
+ }
+ if (max_mtu <= 1518)
+ len = RTL8366RB_SGCR_MAX_LENGTH_1522;
+ else if (max_mtu > 1518 && max_mtu <= 1532)
+ len = RTL8366RB_SGCR_MAX_LENGTH_1536;
+ else if (max_mtu > 1532 && max_mtu <= 1548)
+ len = RTL8366RB_SGCR_MAX_LENGTH_1552;
+ else
+ len = RTL8366RB_SGCR_MAX_LENGTH_16000;
+
+ return regmap_update_bits(smi->map, RTL8366RB_SGCR,
+ RTL8366RB_SGCR_MAX_LENGTH_MASK,
+ len);
+}
+
+static int rtl8366rb_max_mtu(struct dsa_switch *ds, int port)
+{
+ /* The max MTU is 16000 bytes, so we subtract the CPU tag
+ * and the max presented to the system is 15996 bytes.
+ */
+ return 15996;
+}
+
static int rtl8366rb_get_vlan_4k(struct realtek_smi *smi, u32 vid,
struct rtl8366_vlan_4k *vlan4k)
{
@@ -1255,7 +1348,7 @@ static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan)
if (smi->vlan4k_enabled)
max = RTL8366RB_NUM_VIDS - 1;
- if (vlan == 0 || vlan >= max)
+ if (vlan == 0 || vlan > max)
return false;
return true;
@@ -1405,7 +1498,8 @@ static int rtl8366rb_detect(struct realtek_smi *smi)
static const struct dsa_switch_ops rtl8366rb_switch_ops = {
.get_tag_protocol = rtl8366_get_tag_protocol,
.setup = rtl8366rb_setup,
- .adjust_link = rtl8366rb_adjust_link,
+ .phylink_mac_link_up = rtl8366rb_mac_link_up,
+ .phylink_mac_link_down = rtl8366rb_mac_link_down,
.get_strings = rtl8366_get_strings,
.get_ethtool_stats = rtl8366_get_ethtool_stats,
.get_sset_count = rtl8366_get_sset_count,
@@ -1415,6 +1509,8 @@ static const struct dsa_switch_ops rtl8366rb_switch_ops = {
.port_vlan_del = rtl8366_vlan_del,
.port_enable = rtl8366rb_port_enable,
.port_disable = rtl8366rb_port_disable,
+ .port_change_mtu = rtl8366rb_change_mtu,
+ .port_max_mtu = rtl8366rb_max_mtu,
};
static const struct realtek_smi_ops rtl8366rb_smi_ops = {
@@ -1439,5 +1535,6 @@ const struct realtek_smi_variant rtl8366rb_variant = {
.clk_delay = 10,
.cmd_read = 0xa9,
.cmd_write = 0xa8,
+ .chip_data_sz = sizeof(struct rtl8366rb),
};
EXPORT_SYMBOL_GPL(rtl8366rb_variant);
diff --git a/drivers/net/dsa/sja1105/Makefile b/drivers/net/dsa/sja1105/Makefile
index c88e56a29db8..a860e3a910be 100644
--- a/drivers/net/dsa/sja1105/Makefile
+++ b/drivers/net/dsa/sja1105/Makefile
@@ -6,6 +6,7 @@ sja1105-objs := \
sja1105_main.o \
sja1105_flower.o \
sja1105_ethtool.o \
+ sja1105_devlink.o \
sja1105_clocking.o \
sja1105_static_config.o \
sja1105_dynamic_config.o \
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
index ba70b40a9a95..4ebc4a5a7b35 100644
--- a/drivers/net/dsa/sja1105/sja1105.h
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -210,15 +210,15 @@ struct sja1105_private {
struct dsa_switch *ds;
struct list_head dsa_8021q_vlans;
struct list_head bridge_vlans;
- struct list_head crosschip_links;
struct sja1105_flow_block flow_block;
struct sja1105_port ports[SJA1105_NUM_PORTS];
/* Serializes transmission of management frames so that
* the switch doesn't confuse them with one another.
*/
struct mutex mgmt_lock;
- bool expect_dsa_8021q;
+ struct dsa_8021q_context *dsa_8021q_ctx;
enum sja1105_vlan_state vlan_state;
+ struct devlink_region **regions;
struct sja1105_cbs_entry *cbs;
struct sja1105_tagger_data tagger_data;
struct sja1105_ptp_data ptp_data;
@@ -245,9 +245,21 @@ enum sja1105_reset_reason {
int sja1105_static_config_reload(struct sja1105_private *priv,
enum sja1105_reset_reason reason);
-
+int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
+ struct switchdev_trans *trans);
void sja1105_frame_memory_partitioning(struct sja1105_private *priv);
+/* From sja1105_devlink.c */
+int sja1105_devlink_setup(struct dsa_switch *ds);
+void sja1105_devlink_teardown(struct dsa_switch *ds);
+int sja1105_devlink_param_get(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+int sja1105_devlink_param_set(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+int sja1105_devlink_info_get(struct dsa_switch *ds,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack);
+
/* From sja1105_spi.c */
int sja1105_xfer_buf(const struct sja1105_private *priv,
sja1105_spi_rw_mode_t rw, u64 reg_addr,
@@ -258,6 +270,8 @@ int sja1105_xfer_u32(const struct sja1105_private *priv,
int sja1105_xfer_u64(const struct sja1105_private *priv,
sja1105_spi_rw_mode_t rw, u64 reg_addr, u64 *value,
struct ptp_system_timestamp *ptp_sts);
+int static_config_buf_prepare_for_upload(struct sja1105_private *priv,
+ void *config_buf, int buf_len);
int sja1105_static_config_upload(struct sja1105_private *priv);
int sja1105_inhibit_tx(const struct sja1105_private *priv,
unsigned long port_bitmap, bool tx_inhibited);
diff --git a/drivers/net/dsa/sja1105/sja1105_devlink.c b/drivers/net/dsa/sja1105/sja1105_devlink.c
new file mode 100644
index 000000000000..4a2ec395bcb0
--- /dev/null
+++ b/drivers/net/dsa/sja1105/sja1105_devlink.c
@@ -0,0 +1,262 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
+ * Copyright 2020 NXP Semiconductors
+ */
+#include "sja1105.h"
+
+/* Since devlink regions have a fixed size and the static config has a variable
+ * size, we need to calculate the maximum possible static config size by
+ * creating a dummy config with all table entries populated to the max, and get
+ * its packed length. This is done dynamically as opposed to simply hardcoding
+ * a number, since currently not all static config tables are implemented, so
+ * we are avoiding a possible code desynchronization.
+ */
+static size_t sja1105_static_config_get_max_size(struct sja1105_private *priv)
+{
+ struct sja1105_static_config config;
+ enum sja1105_blk_idx blk_idx;
+ int rc;
+
+ rc = sja1105_static_config_init(&config,
+ priv->info->static_ops,
+ priv->info->device_id);
+ if (rc)
+ return 0;
+
+ for (blk_idx = 0; blk_idx < BLK_IDX_MAX; blk_idx++) {
+ struct sja1105_table *table = &config.tables[blk_idx];
+
+ table->entry_count = table->ops->max_entry_count;
+ }
+
+ return sja1105_static_config_get_length(&config);
+}
+
+static int
+sja1105_region_static_config_snapshot(struct devlink *dl,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct dsa_switch *ds = dsa_devlink_to_ds(dl);
+ struct sja1105_private *priv = ds->priv;
+ size_t max_len, len;
+
+ len = sja1105_static_config_get_length(&priv->static_config);
+ max_len = sja1105_static_config_get_max_size(priv);
+
+ *data = kcalloc(max_len, sizeof(u8), GFP_KERNEL);
+ if (!*data)
+ return -ENOMEM;
+
+ return static_config_buf_prepare_for_upload(priv, *data, len);
+}
+
+static struct devlink_region_ops sja1105_region_static_config_ops = {
+ .name = "static-config",
+ .snapshot = sja1105_region_static_config_snapshot,
+ .destructor = kfree,
+};
+
+enum sja1105_region_id {
+ SJA1105_REGION_STATIC_CONFIG = 0,
+};
+
+struct sja1105_region {
+ const struct devlink_region_ops *ops;
+ size_t (*get_size)(struct sja1105_private *priv);
+};
+
+static struct sja1105_region sja1105_regions[] = {
+ [SJA1105_REGION_STATIC_CONFIG] = {
+ .ops = &sja1105_region_static_config_ops,
+ .get_size = sja1105_static_config_get_max_size,
+ },
+};
+
+static int sja1105_setup_devlink_regions(struct dsa_switch *ds)
+{
+ int i, num_regions = ARRAY_SIZE(sja1105_regions);
+ struct sja1105_private *priv = ds->priv;
+ const struct devlink_region_ops *ops;
+ struct devlink_region *region;
+ u64 size;
+
+ priv->regions = kcalloc(num_regions, sizeof(struct devlink_region *),
+ GFP_KERNEL);
+ if (!priv->regions)
+ return -ENOMEM;
+
+ for (i = 0; i < num_regions; i++) {
+ size = sja1105_regions[i].get_size(priv);
+ ops = sja1105_regions[i].ops;
+
+ region = dsa_devlink_region_create(ds, ops, 1, size);
+ if (IS_ERR(region)) {
+ while (i-- >= 0)
+ dsa_devlink_region_destroy(priv->regions[i]);
+ return PTR_ERR(region);
+ }
+
+ priv->regions[i] = region;
+ }
+
+ return 0;
+}
+
+static void sja1105_teardown_devlink_regions(struct dsa_switch *ds)
+{
+ int i, num_regions = ARRAY_SIZE(sja1105_regions);
+ struct sja1105_private *priv = ds->priv;
+
+ for (i = 0; i < num_regions; i++)
+ dsa_devlink_region_destroy(priv->regions[i]);
+
+ kfree(priv->regions);
+}
+
+static int sja1105_best_effort_vlan_filtering_get(struct sja1105_private *priv,
+ bool *be_vlan)
+{
+ *be_vlan = priv->best_effort_vlan_filtering;
+
+ return 0;
+}
+
+static int sja1105_best_effort_vlan_filtering_set(struct sja1105_private *priv,
+ bool be_vlan)
+{
+ struct dsa_switch *ds = priv->ds;
+ bool vlan_filtering;
+ int port;
+ int rc;
+
+ priv->best_effort_vlan_filtering = be_vlan;
+
+ rtnl_lock();
+ for (port = 0; port < ds->num_ports; port++) {
+ struct switchdev_trans trans;
+ struct dsa_port *dp;
+
+ if (!dsa_is_user_port(ds, port))
+ continue;
+
+ dp = dsa_to_port(ds, port);
+ vlan_filtering = dsa_port_is_vlan_filtering(dp);
+
+ trans.ph_prepare = true;
+ rc = sja1105_vlan_filtering(ds, port, vlan_filtering, &trans);
+ if (rc)
+ break;
+
+ trans.ph_prepare = false;
+ rc = sja1105_vlan_filtering(ds, port, vlan_filtering, &trans);
+ if (rc)
+ break;
+ }
+ rtnl_unlock();
+
+ return rc;
+}
+
+enum sja1105_devlink_param_id {
+ SJA1105_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING,
+};
+
+int sja1105_devlink_param_get(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct sja1105_private *priv = ds->priv;
+ int err;
+
+ switch (id) {
+ case SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING:
+ err = sja1105_best_effort_vlan_filtering_get(priv,
+ &ctx->val.vbool);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+int sja1105_devlink_param_set(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct sja1105_private *priv = ds->priv;
+ int err;
+
+ switch (id) {
+ case SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING:
+ err = sja1105_best_effort_vlan_filtering_set(priv,
+ ctx->val.vbool);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static const struct devlink_param sja1105_devlink_params[] = {
+ DSA_DEVLINK_PARAM_DRIVER(SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING,
+ "best_effort_vlan_filtering",
+ DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME)),
+};
+
+static int sja1105_setup_devlink_params(struct dsa_switch *ds)
+{
+ return dsa_devlink_params_register(ds, sja1105_devlink_params,
+ ARRAY_SIZE(sja1105_devlink_params));
+}
+
+static void sja1105_teardown_devlink_params(struct dsa_switch *ds)
+{
+ dsa_devlink_params_unregister(ds, sja1105_devlink_params,
+ ARRAY_SIZE(sja1105_devlink_params));
+}
+
+int sja1105_devlink_info_get(struct dsa_switch *ds,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_private *priv = ds->priv;
+ int rc;
+
+ rc = devlink_info_driver_name_put(req, "sja1105");
+ if (rc)
+ return rc;
+
+ rc = devlink_info_version_fixed_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_ID,
+ priv->info->name);
+ return rc;
+}
+
+int sja1105_devlink_setup(struct dsa_switch *ds)
+{
+ int rc;
+
+ rc = sja1105_setup_devlink_params(ds);
+ if (rc)
+ return rc;
+
+ rc = sja1105_setup_devlink_regions(ds);
+ if (rc < 0) {
+ sja1105_teardown_devlink_params(ds);
+ return rc;
+ }
+
+ return 0;
+}
+
+void sja1105_devlink_teardown(struct dsa_switch *ds)
+{
+ sja1105_teardown_devlink_params(ds);
+ sja1105_teardown_devlink_regions(ds);
+}
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
index 75247f342124..b777d3f37573 100644
--- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
@@ -97,10 +97,10 @@
#define SJA1105_SIZE_DYN_CMD 4
-#define SJA1105ET_SJA1105_SIZE_VL_LOOKUP_DYN_CMD \
+#define SJA1105ET_SIZE_VL_LOOKUP_DYN_CMD \
SJA1105_SIZE_DYN_CMD
-#define SJA1105PQRS_SJA1105_SIZE_VL_LOOKUP_DYN_CMD \
+#define SJA1105PQRS_SIZE_VL_LOOKUP_DYN_CMD \
(SJA1105_SIZE_DYN_CMD + SJA1105_SIZE_VL_LOOKUP_ENTRY)
#define SJA1105ET_SIZE_MAC_CONFIG_DYN_ENTRY \
@@ -183,7 +183,7 @@ static size_t sja1105et_vl_lookup_entry_packing(void *buf, void *entry_ptr,
enum packing_op op)
{
struct sja1105_vl_lookup_entry *entry = entry_ptr;
- const int size = SJA1105ET_SJA1105_SIZE_VL_LOOKUP_DYN_CMD;
+ const int size = SJA1105ET_SIZE_VL_LOOKUP_DYN_CMD;
sja1105_packing(buf, &entry->egrmirr, 21, 17, size, op);
sja1105_packing(buf, &entry->ingrmirr, 16, 16, size, op);
@@ -644,7 +644,7 @@ const struct sja1105_dynamic_table_ops sja1105et_dyn_ops[BLK_IDX_MAX_DYN] = {
.cmd_packing = sja1105_vl_lookup_cmd_packing,
.access = OP_WRITE,
.max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
- .packed_size = SJA1105ET_SJA1105_SIZE_VL_LOOKUP_DYN_CMD,
+ .packed_size = SJA1105ET_SIZE_VL_LOOKUP_DYN_CMD,
.addr = 0x35,
},
[BLK_IDX_L2_LOOKUP] = {
@@ -728,7 +728,7 @@ const struct sja1105_dynamic_table_ops sja1105pqrs_dyn_ops[BLK_IDX_MAX_DYN] = {
.cmd_packing = sja1105_vl_lookup_cmd_packing,
.access = (OP_READ | OP_WRITE),
.max_entry_count = SJA1105_MAX_VL_LOOKUP_COUNT,
- .packed_size = SJA1105PQRS_SJA1105_SIZE_VL_LOOKUP_DYN_CMD,
+ .packed_size = SJA1105PQRS_SIZE_VL_LOOKUP_DYN_CMD,
.addr = 0x47,
},
[BLK_IDX_L2_LOOKUP] = {
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 5a28dfb36ec3..4ca029650993 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -1880,19 +1880,17 @@ static int sja1105_crosschip_bridge_join(struct dsa_switch *ds,
if (dsa_to_port(ds, port)->bridge_dev != br)
continue;
- other_priv->expect_dsa_8021q = true;
- rc = dsa_8021q_crosschip_bridge_join(ds, port, other_ds,
- other_port,
- &priv->crosschip_links);
- other_priv->expect_dsa_8021q = false;
+ rc = dsa_8021q_crosschip_bridge_join(priv->dsa_8021q_ctx,
+ port,
+ other_priv->dsa_8021q_ctx,
+ other_port);
if (rc)
return rc;
- priv->expect_dsa_8021q = true;
- rc = dsa_8021q_crosschip_bridge_join(other_ds, other_port, ds,
- port,
- &other_priv->crosschip_links);
- priv->expect_dsa_8021q = false;
+ rc = dsa_8021q_crosschip_bridge_join(other_priv->dsa_8021q_ctx,
+ other_port,
+ priv->dsa_8021q_ctx,
+ port);
if (rc)
return rc;
}
@@ -1919,33 +1917,24 @@ static void sja1105_crosschip_bridge_leave(struct dsa_switch *ds,
if (dsa_to_port(ds, port)->bridge_dev != br)
continue;
- other_priv->expect_dsa_8021q = true;
- dsa_8021q_crosschip_bridge_leave(ds, port, other_ds, other_port,
- &priv->crosschip_links);
- other_priv->expect_dsa_8021q = false;
+ dsa_8021q_crosschip_bridge_leave(priv->dsa_8021q_ctx, port,
+ other_priv->dsa_8021q_ctx,
+ other_port);
- priv->expect_dsa_8021q = true;
- dsa_8021q_crosschip_bridge_leave(other_ds, other_port, ds, port,
- &other_priv->crosschip_links);
- priv->expect_dsa_8021q = false;
+ dsa_8021q_crosschip_bridge_leave(other_priv->dsa_8021q_ctx,
+ other_port,
+ priv->dsa_8021q_ctx, port);
}
}
static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled)
{
struct sja1105_private *priv = ds->priv;
- int rc, i;
+ int rc;
- for (i = 0; i < SJA1105_NUM_PORTS; i++) {
- priv->expect_dsa_8021q = true;
- rc = dsa_port_setup_8021q_tagging(ds, i, enabled);
- priv->expect_dsa_8021q = false;
- if (rc < 0) {
- dev_err(ds->dev, "Failed to setup VLAN tagging for port %d: %d\n",
- i, rc);
- return rc;
- }
- }
+ rc = dsa_8021q_setup(priv->dsa_8021q_ctx, enabled);
+ if (rc)
+ return rc;
dev_info(ds->dev, "%s switch tagging\n",
enabled ? "Enabled" : "Disabled");
@@ -2149,12 +2138,12 @@ struct sja1105_crosschip_vlan {
bool untagged;
int port;
int other_port;
- struct dsa_switch *other_ds;
+ struct dsa_8021q_context *other_ctx;
};
struct sja1105_crosschip_switch {
struct list_head list;
- struct dsa_switch *other_ds;
+ struct dsa_8021q_context *other_ctx;
};
static int sja1105_commit_pvid(struct sja1105_private *priv)
@@ -2330,8 +2319,8 @@ sja1105_build_crosschip_subvlans(struct sja1105_private *priv,
INIT_LIST_HEAD(&crosschip_vlans);
- list_for_each_entry(c, &priv->crosschip_links, list) {
- struct sja1105_private *other_priv = c->other_ds->priv;
+ list_for_each_entry(c, &priv->dsa_8021q_ctx->crosschip_links, list) {
+ struct sja1105_private *other_priv = c->other_ctx->ds->priv;
if (other_priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
continue;
@@ -2341,7 +2330,7 @@ sja1105_build_crosschip_subvlans(struct sja1105_private *priv,
*/
if (!dsa_is_user_port(priv->ds, c->port))
continue;
- if (!dsa_is_user_port(c->other_ds, c->other_port))
+ if (!dsa_is_user_port(c->other_ctx->ds, c->other_port))
continue;
/* Search for VLANs on the remote port */
@@ -2376,7 +2365,7 @@ sja1105_build_crosschip_subvlans(struct sja1105_private *priv,
tmp->untagged == v->untagged &&
tmp->port == c->port &&
tmp->other_port == v->port &&
- tmp->other_ds == c->other_ds) {
+ tmp->other_ctx == c->other_ctx) {
already_added = true;
break;
}
@@ -2394,14 +2383,14 @@ sja1105_build_crosschip_subvlans(struct sja1105_private *priv,
tmp->vid = v->vid;
tmp->port = c->port;
tmp->other_port = v->port;
- tmp->other_ds = c->other_ds;
+ tmp->other_ctx = c->other_ctx;
tmp->untagged = v->untagged;
list_add(&tmp->list, &crosschip_vlans);
}
}
list_for_each_entry(tmp, &crosschip_vlans, list) {
- struct sja1105_private *other_priv = tmp->other_ds->priv;
+ struct sja1105_private *other_priv = tmp->other_ctx->ds->priv;
int upstream = dsa_upstream_port(priv->ds, tmp->port);
int match, subvlan;
u16 rx_vid;
@@ -2418,7 +2407,7 @@ sja1105_build_crosschip_subvlans(struct sja1105_private *priv,
goto out;
}
- rx_vid = dsa_8021q_rx_vid_subvlan(tmp->other_ds,
+ rx_vid = dsa_8021q_rx_vid_subvlan(tmp->other_ctx->ds,
tmp->other_port,
subvlan);
@@ -2493,11 +2482,11 @@ static int sja1105_notify_crosschip_switches(struct sja1105_private *priv)
INIT_LIST_HEAD(&crosschip_switches);
- list_for_each_entry(c, &priv->crosschip_links, list) {
+ list_for_each_entry(c, &priv->dsa_8021q_ctx->crosschip_links, list) {
bool already_added = false;
list_for_each_entry(s, &crosschip_switches, list) {
- if (s->other_ds == c->other_ds) {
+ if (s->other_ctx == c->other_ctx) {
already_added = true;
break;
}
@@ -2512,12 +2501,12 @@ static int sja1105_notify_crosschip_switches(struct sja1105_private *priv)
rc = -ENOMEM;
goto out;
}
- s->other_ds = c->other_ds;
+ s->other_ctx = c->other_ctx;
list_add(&s->list, &crosschip_switches);
}
list_for_each_entry(s, &crosschip_switches, list) {
- struct sja1105_private *other_priv = s->other_ds->priv;
+ struct sja1105_private *other_priv = s->other_ctx->ds->priv;
rc = sja1105_build_vlan_table(other_priv, false);
if (rc)
@@ -2618,16 +2607,6 @@ out:
return rc;
}
-/* Select the list to which we should add this VLAN. */
-static struct list_head *sja1105_classify_vlan(struct sja1105_private *priv,
- u16 vid)
-{
- if (priv->expect_dsa_8021q)
- return &priv->dsa_8021q_vlans;
-
- return &priv->bridge_vlans;
-}
-
static int sja1105_vlan_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
@@ -2642,7 +2621,7 @@ static int sja1105_vlan_prepare(struct dsa_switch *ds, int port,
* configuration done by dsa_8021q.
*/
for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- if (!priv->expect_dsa_8021q && vid_is_dsa_8021q(vid)) {
+ if (vid_is_dsa_8021q(vid)) {
dev_err(ds->dev, "Range 1024-3071 reserved for dsa_8021q operation\n");
return -EBUSY;
}
@@ -2655,7 +2634,8 @@ static int sja1105_vlan_prepare(struct dsa_switch *ds, int port,
* which can only be partially reconfigured at runtime (and not the TPID).
* So a switch reset is required.
*/
-static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
+int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
+ struct switchdev_trans *trans)
{
struct sja1105_l2_lookup_params_entry *l2_lookup_params;
struct sja1105_general_params_entry *general_params;
@@ -2667,12 +2647,16 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
u16 tpid, tpid2;
int rc;
- list_for_each_entry(rule, &priv->flow_block.rules, list) {
- if (rule->type == SJA1105_RULE_VL) {
- dev_err(ds->dev,
- "Cannot change VLAN filtering state while VL rules are active\n");
- return -EBUSY;
+ if (switchdev_trans_ph_prepare(trans)) {
+ list_for_each_entry(rule, &priv->flow_block.rules, list) {
+ if (rule->type == SJA1105_RULE_VL) {
+ dev_err(ds->dev,
+ "Cannot change VLAN filtering with active VL rules\n");
+ return -EBUSY;
+ }
}
+
+ return 0;
}
if (enabled) {
@@ -2762,6 +2746,54 @@ static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
return sja1105_setup_8021q_tagging(ds, want_tagging);
}
+/* Returns number of VLANs added (0 or 1) on success,
+ * or a negative error code.
+ */
+static int sja1105_vlan_add_one(struct dsa_switch *ds, int port, u16 vid,
+ u16 flags, struct list_head *vlan_list)
+{
+ bool untagged = flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = flags & BRIDGE_VLAN_INFO_PVID;
+ struct sja1105_bridge_vlan *v;
+
+ list_for_each_entry(v, vlan_list, list)
+ if (v->port == port && v->vid == vid &&
+ v->untagged == untagged && v->pvid == pvid)
+ /* Already added */
+ return 0;
+
+ v = kzalloc(sizeof(*v), GFP_KERNEL);
+ if (!v) {
+ dev_err(ds->dev, "Out of memory while storing VLAN\n");
+ return -ENOMEM;
+ }
+
+ v->port = port;
+ v->vid = vid;
+ v->untagged = untagged;
+ v->pvid = pvid;
+ list_add(&v->list, vlan_list);
+
+ return 1;
+}
+
+/* Returns number of VLANs deleted (0 or 1) */
+static int sja1105_vlan_del_one(struct dsa_switch *ds, int port, u16 vid,
+ struct list_head *vlan_list)
+{
+ struct sja1105_bridge_vlan *v, *n;
+
+ list_for_each_entry_safe(v, n, vlan_list, list) {
+ if (v->port == port && v->vid == vid) {
+ list_del(&v->list);
+ kfree(v);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
static void sja1105_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
@@ -2771,38 +2803,12 @@ static void sja1105_vlan_add(struct dsa_switch *ds, int port,
int rc;
for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
- bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
- struct sja1105_bridge_vlan *v;
- struct list_head *vlan_list;
- bool already_added = false;
-
- vlan_list = sja1105_classify_vlan(priv, vid);
-
- list_for_each_entry(v, vlan_list, list) {
- if (v->port == port && v->vid == vid &&
- v->untagged == untagged && v->pvid == pvid) {
- already_added = true;
- break;
- }
- }
-
- if (already_added)
- continue;
-
- v = kzalloc(sizeof(*v), GFP_KERNEL);
- if (!v) {
- dev_err(ds->dev, "Out of memory while storing VLAN\n");
+ rc = sja1105_vlan_add_one(ds, port, vid, vlan->flags,
+ &priv->bridge_vlans);
+ if (rc < 0)
return;
- }
-
- v->port = port;
- v->vid = vid;
- v->untagged = untagged;
- v->pvid = pvid;
- list_add(&v->list, vlan_list);
-
- vlan_table_changed = true;
+ if (rc > 0)
+ vlan_table_changed = true;
}
if (!vlan_table_changed)
@@ -2819,21 +2825,12 @@ static int sja1105_vlan_del(struct dsa_switch *ds, int port,
struct sja1105_private *priv = ds->priv;
bool vlan_table_changed = false;
u16 vid;
+ int rc;
for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
- struct sja1105_bridge_vlan *v, *n;
- struct list_head *vlan_list;
-
- vlan_list = sja1105_classify_vlan(priv, vid);
-
- list_for_each_entry_safe(v, n, vlan_list, list) {
- if (v->port == port && v->vid == vid) {
- list_del(&v->list);
- kfree(v);
- vlan_table_changed = true;
- break;
- }
- }
+ rc = sja1105_vlan_del_one(ds, port, vid, &priv->bridge_vlans);
+ if (rc > 0)
+ vlan_table_changed = true;
}
if (!vlan_table_changed)
@@ -2842,105 +2839,36 @@ static int sja1105_vlan_del(struct dsa_switch *ds, int port,
return sja1105_build_vlan_table(priv, true);
}
-static int sja1105_best_effort_vlan_filtering_get(struct sja1105_private *priv,
- bool *be_vlan)
-{
- *be_vlan = priv->best_effort_vlan_filtering;
-
- return 0;
-}
-
-static int sja1105_best_effort_vlan_filtering_set(struct sja1105_private *priv,
- bool be_vlan)
-{
- struct dsa_switch *ds = priv->ds;
- bool vlan_filtering;
- int port;
- int rc;
-
- priv->best_effort_vlan_filtering = be_vlan;
-
- rtnl_lock();
- for (port = 0; port < ds->num_ports; port++) {
- struct dsa_port *dp;
-
- if (!dsa_is_user_port(ds, port))
- continue;
-
- dp = dsa_to_port(ds, port);
- vlan_filtering = dsa_port_is_vlan_filtering(dp);
-
- rc = sja1105_vlan_filtering(ds, port, vlan_filtering);
- if (rc)
- break;
- }
- rtnl_unlock();
-
- return rc;
-}
-
-enum sja1105_devlink_param_id {
- SJA1105_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
- SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING,
-};
-
-static int sja1105_devlink_param_get(struct dsa_switch *ds, u32 id,
- struct devlink_param_gset_ctx *ctx)
+static int sja1105_dsa_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
+ u16 flags)
{
struct sja1105_private *priv = ds->priv;
- int err;
+ int rc;
- switch (id) {
- case SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING:
- err = sja1105_best_effort_vlan_filtering_get(priv,
- &ctx->val.vbool);
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
+ rc = sja1105_vlan_add_one(ds, port, vid, flags, &priv->dsa_8021q_vlans);
+ if (rc <= 0)
+ return rc;
- return err;
+ return sja1105_build_vlan_table(priv, true);
}
-static int sja1105_devlink_param_set(struct dsa_switch *ds, u32 id,
- struct devlink_param_gset_ctx *ctx)
+static int sja1105_dsa_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
{
struct sja1105_private *priv = ds->priv;
- int err;
+ int rc;
- switch (id) {
- case SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING:
- err = sja1105_best_effort_vlan_filtering_set(priv,
- ctx->val.vbool);
- break;
- default:
- err = -EOPNOTSUPP;
- break;
- }
+ rc = sja1105_vlan_del_one(ds, port, vid, &priv->dsa_8021q_vlans);
+ if (!rc)
+ return 0;
- return err;
+ return sja1105_build_vlan_table(priv, true);
}
-static const struct devlink_param sja1105_devlink_params[] = {
- DSA_DEVLINK_PARAM_DRIVER(SJA1105_DEVLINK_PARAM_ID_BEST_EFFORT_VLAN_FILTERING,
- "best_effort_vlan_filtering",
- DEVLINK_PARAM_TYPE_BOOL,
- BIT(DEVLINK_PARAM_CMODE_RUNTIME)),
+static const struct dsa_8021q_ops sja1105_dsa_8021q_ops = {
+ .vlan_add = sja1105_dsa_8021q_vlan_add,
+ .vlan_del = sja1105_dsa_8021q_vlan_del,
};
-static int sja1105_setup_devlink_params(struct dsa_switch *ds)
-{
- return dsa_devlink_params_register(ds, sja1105_devlink_params,
- ARRAY_SIZE(sja1105_devlink_params));
-}
-
-static void sja1105_teardown_devlink_params(struct dsa_switch *ds)
-{
- dsa_devlink_params_unregister(ds, sja1105_devlink_params,
- ARRAY_SIZE(sja1105_devlink_params));
-}
-
/* The programming model for the SJA1105 switch is "all-at-once" via static
* configuration tables. Some of these can be dynamically modified at runtime,
* but not the xMII mode parameters table.
@@ -3008,7 +2936,7 @@ static int sja1105_setup(struct dsa_switch *ds)
ds->configure_vlan_while_not_filtering = true;
- rc = sja1105_setup_devlink_params(ds);
+ rc = sja1105_devlink_setup(ds);
if (rc < 0)
return rc;
@@ -3016,7 +2944,11 @@ static int sja1105_setup(struct dsa_switch *ds)
* default, and that means vlan_filtering is 0 since they're not under
* a bridge, so it's safe to set up switch tagging at this time.
*/
- return sja1105_setup_8021q_tagging(ds, true);
+ rtnl_lock();
+ rc = sja1105_setup_8021q_tagging(ds, true);
+ rtnl_unlock();
+
+ return rc;
}
static void sja1105_teardown(struct dsa_switch *ds)
@@ -3035,7 +2967,7 @@ static void sja1105_teardown(struct dsa_switch *ds)
kthread_destroy_worker(sp->xmit_worker);
}
- sja1105_teardown_devlink_params(ds);
+ sja1105_devlink_teardown(ds);
sja1105_flower_teardown(ds);
sja1105_tas_teardown(ds);
sja1105_ptp_clock_unregister(ds);
@@ -3389,6 +3321,7 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.crosschip_bridge_leave = sja1105_crosschip_bridge_leave,
.devlink_param_get = sja1105_devlink_param_get,
.devlink_param_set = sja1105_devlink_param_set,
+ .devlink_info_get = sja1105_devlink_info_get,
};
static const struct of_device_id sja1105_dt_ids[];
@@ -3504,7 +3437,16 @@ static int sja1105_probe(struct spi_device *spi)
mutex_init(&priv->ptp_data.lock);
mutex_init(&priv->mgmt_lock);
- INIT_LIST_HEAD(&priv->crosschip_links);
+ priv->dsa_8021q_ctx = devm_kzalloc(dev, sizeof(*priv->dsa_8021q_ctx),
+ GFP_KERNEL);
+ if (!priv->dsa_8021q_ctx)
+ return -ENOMEM;
+
+ priv->dsa_8021q_ctx->ops = &sja1105_dsa_8021q_ops;
+ priv->dsa_8021q_ctx->proto = htons(ETH_P_8021Q);
+ priv->dsa_8021q_ctx->ds = ds;
+
+ INIT_LIST_HEAD(&priv->dsa_8021q_ctx->crosschip_links);
INIT_LIST_HEAD(&priv->bridge_vlans);
INIT_LIST_HEAD(&priv->dsa_8021q_vlans);
diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c
index 704dcf1d1c01..591c5734747d 100644
--- a/drivers/net/dsa/sja1105/sja1105_spi.c
+++ b/drivers/net/dsa/sja1105/sja1105_spi.c
@@ -302,9 +302,8 @@ static int sja1105_status_get(struct sja1105_private *priv,
* for upload requires the recalculation of table CRCs and updating the
* structures with these.
*/
-static int
-static_config_buf_prepare_for_upload(struct sja1105_private *priv,
- void *config_buf, int buf_len)
+int static_config_buf_prepare_for_upload(struct sja1105_private *priv,
+ void *config_buf, int buf_len)
{
struct sja1105_static_config *config = &priv->static_config;
struct sja1105_table_header final_header;
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 049cc0158a64..05e15b6e5e2c 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -789,8 +789,8 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
* it with zeros to ETH_ZLEN for us.
*/
if (skb_shinfo(skb)->nr_frags == 0) {
- skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ skb_dma = dma_map_single(&tp->tx_pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
txd->len = cpu_to_le16(skb->len);
txd->frag.addr = cpu_to_le32(skb_dma);
@@ -800,8 +800,8 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
int i, len;
len = skb_headlen(skb);
- skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
- PCI_DMA_TODEVICE);
+ skb_dma = dma_map_single(&tp->tx_pdev->dev, skb->data, len,
+ DMA_TO_DEVICE);
txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
txd->len = cpu_to_le16(len);
txd->frag.addr = cpu_to_le32(skb_dma);
@@ -818,8 +818,8 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
len = skb_frag_size(frag);
frag_addr = skb_frag_address(frag);
- skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
- PCI_DMA_TODEVICE);
+ skb_dma = dma_map_single(&tp->tx_pdev->dev, frag_addr,
+ len, DMA_TO_DEVICE);
txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
txd->len = cpu_to_le16(len);
txd->frag.addr = cpu_to_le32(skb_dma);
@@ -1349,12 +1349,12 @@ typhoon_download_firmware(struct typhoon *tp)
image_data = typhoon_fw->data;
fHdr = (struct typhoon_file_header *) image_data;
- /* Cannot just map the firmware image using pci_map_single() as
+ /* Cannot just map the firmware image using dma_map_single() as
* the firmware is vmalloc()'d and may not be physically contiguous,
- * so we allocate some consistent memory to copy the sections into.
+ * so we allocate some coherent memory to copy the sections into.
*/
err = -ENOMEM;
- dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
+ dpage = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &dpage_dma, GFP_ATOMIC);
if (!dpage) {
netdev_err(tp->dev, "no DMA mem for firmware\n");
goto err_out;
@@ -1459,7 +1459,7 @@ err_out_irq:
iowrite32(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
iowrite32(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
- pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma);
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, dpage, dpage_dma);
err_out:
return err;
@@ -1526,8 +1526,8 @@ typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
*/
skb_dma = (dma_addr_t) le32_to_cpu(tx->frag.addr);
dma_len = le16_to_cpu(tx->len);
- pci_unmap_single(tp->pdev, skb_dma, dma_len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&tp->pdev->dev, skb_dma, dma_len,
+ DMA_TO_DEVICE);
}
tx->flags = 0;
@@ -1608,8 +1608,8 @@ typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
skb_reserve(skb, 2);
#endif
- dma_addr = pci_map_single(tp->pdev, skb->data,
- PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ dma_addr = dma_map_single(&tp->pdev->dev, skb->data, PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
/* Since no card does 64 bit DAC, the high bits will never
* change from zero.
@@ -1664,20 +1664,19 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile __le32 * read
if (pkt_len < rx_copybreak &&
(new_skb = netdev_alloc_skb(tp->dev, pkt_len + 2)) != NULL) {
skb_reserve(new_skb, 2);
- pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
- PKT_BUF_SZ,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr,
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
skb_copy_to_linear_data(new_skb, skb->data, pkt_len);
- pci_dma_sync_single_for_device(tp->pdev, dma_addr,
- PKT_BUF_SZ,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&tp->pdev->dev, dma_addr,
+ PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
skb_put(new_skb, pkt_len);
typhoon_recycle_rx_skb(tp, idx);
} else {
new_skb = skb;
skb_put(new_skb, pkt_len);
- pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&tp->pdev->dev, dma_addr, PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
typhoon_alloc_rx_skb(tp, idx);
}
new_skb->protocol = eth_type_trans(new_skb, tp->dev);
@@ -1791,8 +1790,8 @@ typhoon_free_rx_rings(struct typhoon *tp)
for (i = 0; i < RXENT_ENTRIES; i++) {
struct rxbuff_ent *rxb = &tp->rxbuffers[i];
if (rxb->skb) {
- pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&tp->pdev->dev, rxb->dma_addr,
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
dev_kfree_skb(rxb->skb);
rxb->skb = NULL;
}
@@ -2305,7 +2304,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto error_out_disable;
}
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err < 0) {
err_msg = "No usable DMA configuration";
goto error_out_mwi;
@@ -2354,8 +2353,8 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* allocate pci dma space for rx and tx descriptor rings
*/
- shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
- &shared_dma);
+ shared = dma_alloc_coherent(&pdev->dev, sizeof(struct typhoon_shared),
+ &shared_dma, GFP_KERNEL);
if (!shared) {
err_msg = "could not allocate DMA memory";
err = -ENOMEM;
@@ -2508,8 +2507,8 @@ error_out_reset:
typhoon_reset(ioaddr, NoWait);
error_out_dma:
- pci_free_consistent(pdev, sizeof(struct typhoon_shared),
- shared, shared_dma);
+ dma_free_coherent(&pdev->dev, sizeof(struct typhoon_shared), shared,
+ shared_dma);
error_out_remap:
pci_iounmap(pdev, ioaddr);
error_out_regions:
@@ -2536,8 +2535,8 @@ typhoon_remove_one(struct pci_dev *pdev)
pci_restore_state(pdev);
typhoon_reset(tp->ioaddr, NoWait);
pci_iounmap(pdev, tp->ioaddr);
- pci_free_consistent(pdev, sizeof(struct typhoon_shared),
- tp->shared, tp->shared_dma);
+ dma_free_coherent(&pdev->dev, sizeof(struct typhoon_shared),
+ tp->shared, tp->shared_dma);
pci_release_regions(pdev);
pci_clear_mwi(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index a00b36f91d9f..2488bfdb9133 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -657,8 +657,10 @@ static void block_input(struct net_device *dev, int count,
outb_p(E8390_RREAD+E8390_START, nic_base + AXNET_CMD);
insw(nic_base + AXNET_DATAPORT,buf,count>>1);
- if (count & 0x01)
- buf[count-1] = inb(nic_base + AXNET_DATAPORT), xfer_count++;
+ if (count & 0x01) {
+ buf[count-1] = inb(nic_base + AXNET_DATAPORT);
+ xfer_count++;
+ }
}
@@ -1270,10 +1272,12 @@ static void ei_tx_intr(struct net_device *dev)
ei_local->txing = 1;
NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
netif_trans_update(dev);
- ei_local->tx2 = -1,
+ ei_local->tx2 = -1;
ei_local->lasttx = 2;
+ } else {
+ ei_local->lasttx = 20;
+ ei_local->txing = 0;
}
- else ei_local->lasttx = 20, ei_local->txing = 0;
}
else if (ei_local->tx2 < 0)
{
@@ -1289,9 +1293,10 @@ static void ei_tx_intr(struct net_device *dev)
netif_trans_update(dev);
ei_local->tx1 = -1;
ei_local->lasttx = 1;
+ } else {
+ ei_local->lasttx = 10;
+ ei_local->txing = 0;
}
- else
- ei_local->lasttx = 10, ei_local->txing = 0;
}
// else
// netdev_warn(dev, "unexpected TX-done interrupt, lasttx=%d\n",
diff --git a/drivers/net/ethernet/8390/lib8390.c b/drivers/net/ethernet/8390/lib8390.c
index babc92e2692e..e84021282edf 100644
--- a/drivers/net/ethernet/8390/lib8390.c
+++ b/drivers/net/ethernet/8390/lib8390.c
@@ -50,6 +50,7 @@
*/
+#include <linux/build_bug.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
@@ -112,8 +113,10 @@ static void do_set_multicast_list(struct net_device *dev);
static void __NS8390_init(struct net_device *dev, int startp);
static unsigned version_printed;
-static u32 msg_enable;
-module_param(msg_enable, uint, 0444);
+static int msg_enable;
+static const int default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_RX_ERR |
+ NETIF_MSG_TX_ERR);
+module_param(msg_enable, int, 0444);
MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)");
/*
@@ -597,10 +600,12 @@ static void ei_tx_intr(struct net_device *dev)
ei_local->txing = 1;
NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
netif_trans_update(dev);
- ei_local->tx2 = -1,
+ ei_local->tx2 = -1;
ei_local->lasttx = 2;
- } else
- ei_local->lasttx = 20, ei_local->txing = 0;
+ } else {
+ ei_local->lasttx = 20;
+ ei_local->txing = 0;
+ }
} else if (ei_local->tx2 < 0) {
if (ei_local->lasttx != 2 && ei_local->lasttx != -2)
pr_err("%s: bogus last_tx_buffer %d, tx2=%d\n",
@@ -612,8 +617,10 @@ static void ei_tx_intr(struct net_device *dev)
netif_trans_update(dev);
ei_local->tx1 = -1;
ei_local->lasttx = 1;
- } else
- ei_local->lasttx = 10, ei_local->txing = 0;
+ } else {
+ ei_local->lasttx = 10;
+ ei_local->txing = 0;
+ }
} /* else
netdev_warn(dev, "unexpected TX-done interrupt, lasttx=%d\n",
ei_local->lasttx);
@@ -969,14 +976,14 @@ static void ethdev_setup(struct net_device *dev)
{
struct ei_device *ei_local = netdev_priv(dev);
- if ((msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
- pr_info("%s", version);
-
ether_setup(dev);
spin_lock_init(&ei_local->page_lock);
- ei_local->msg_enable = msg_enable;
+ ei_local->msg_enable = netif_msg_init(msg_enable, default_msg_level);
+
+ if (netif_msg_drv(ei_local) && (version_printed++ == 0))
+ pr_info("%s", version);
}
/**
@@ -1014,8 +1021,7 @@ static void __NS8390_init(struct net_device *dev, int startp)
? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0))
: 0x48;
- if (sizeof(struct e8390_pkt_hdr) != 4)
- panic("8390.c: header struct mispacked\n");
+ BUILD_BUG_ON(sizeof(struct e8390_pkt_hdr) != 4);
/* Follow National Semi's recommendations for initing the DP83902. */
ei_outb_p(E8390_NODMA+E8390_PAGE0+E8390_STOP, e8390_base+E8390_CMD); /* 0x21 */
ei_outb_p(endcfg, e8390_base + EN0_DCFG); /* 0x48 or 0x49 */
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 164c3ed550bf..9d3b1e0e425c 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -1178,8 +1178,10 @@ static void dma_block_input(struct net_device *dev, int count,
outb_p(E8390_RREAD+E8390_START, nic_base + PCNET_CMD);
insw(nic_base + PCNET_DATAPORT,buf,count>>1);
- if (count & 0x01)
- buf[count-1] = inb(nic_base + PCNET_DATAPORT), xfer_count++;
+ if (count & 0x01) {
+ buf[count-1] = inb(nic_base + PCNET_DATAPORT);
+ xfer_count++;
+ }
/* This was for the ALPHA version only, but enough people have been
encountering problems that it is still here. */
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index ba0055bb1614..555299737b51 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -886,7 +886,9 @@ static int netdev_open(struct net_device *dev)
tx_ring_size = ((sizeof(starfire_tx_desc) * TX_RING_SIZE + QUEUE_ALIGN - 1) / QUEUE_ALIGN) * QUEUE_ALIGN;
rx_ring_size = sizeof(struct starfire_rx_desc) * RX_RING_SIZE;
np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
- np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma);
+ np->queue_mem = dma_alloc_coherent(&np->pci_dev->dev,
+ np->queue_mem_size,
+ &np->queue_mem_dma, GFP_ATOMIC);
if (np->queue_mem == NULL) {
free_irq(irq, dev);
return -ENOMEM;
@@ -1136,9 +1138,11 @@ static void init_ring(struct net_device *dev)
np->rx_info[i].skb = skb;
if (skb == NULL)
break;
- np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(np->pci_dev,
- np->rx_info[i].mapping)) {
+ np->rx_info[i].mapping = dma_map_single(&np->pci_dev->dev,
+ skb->data,
+ np->rx_buf_sz,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&np->pci_dev->dev, np->rx_info[i].mapping)) {
dev_kfree_skb(skb);
np->rx_info[i].skb = NULL;
break;
@@ -1217,18 +1221,19 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
status |= skb_first_frag_len(skb) | (skb_num_frags(skb) << 16);
np->tx_info[entry].mapping =
- pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE);
+ dma_map_single(&np->pci_dev->dev, skb->data,
+ skb_first_frag_len(skb),
+ DMA_TO_DEVICE);
} else {
const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1];
status |= skb_frag_size(this_frag);
np->tx_info[entry].mapping =
- pci_map_single(np->pci_dev,
+ dma_map_single(&np->pci_dev->dev,
skb_frag_address(this_frag),
skb_frag_size(this_frag),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
}
- if (pci_dma_mapping_error(np->pci_dev,
- np->tx_info[entry].mapping)) {
+ if (dma_mapping_error(&np->pci_dev->dev, np->tx_info[entry].mapping)) {
dev->stats.tx_dropped++;
goto err_out;
}
@@ -1271,18 +1276,16 @@ err_out:
entry = prev_tx % TX_RING_SIZE;
np->tx_info[entry].skb = NULL;
if (i > 0) {
- pci_unmap_single(np->pci_dev,
+ dma_unmap_single(&np->pci_dev->dev,
np->tx_info[entry].mapping,
- skb_first_frag_len(skb),
- PCI_DMA_TODEVICE);
+ skb_first_frag_len(skb), DMA_TO_DEVICE);
np->tx_info[entry].mapping = 0;
entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
for (j = 1; j < i; j++) {
- pci_unmap_single(np->pci_dev,
+ dma_unmap_single(&np->pci_dev->dev,
np->tx_info[entry].mapping,
- skb_frag_size(
- &skb_shinfo(skb)->frags[j-1]),
- PCI_DMA_TODEVICE);
+ skb_frag_size(&skb_shinfo(skb)->frags[j - 1]),
+ DMA_TO_DEVICE);
entry++;
}
}
@@ -1356,20 +1359,20 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
u16 entry = (tx_status & 0x7fff) / sizeof(starfire_tx_desc);
struct sk_buff *skb = np->tx_info[entry].skb;
np->tx_info[entry].skb = NULL;
- pci_unmap_single(np->pci_dev,
+ dma_unmap_single(&np->pci_dev->dev,
np->tx_info[entry].mapping,
skb_first_frag_len(skb),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
np->tx_info[entry].mapping = 0;
np->dirty_tx += np->tx_info[entry].used_slots;
entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
{
int i;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- pci_unmap_single(np->pci_dev,
+ dma_unmap_single(&np->pci_dev->dev,
np->tx_info[entry].mapping,
skb_frag_size(&skb_shinfo(skb)->frags[i]),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
np->dirty_tx++;
entry++;
}
@@ -1461,16 +1464,18 @@ static int __netdev_rx(struct net_device *dev, int *quota)
if (pkt_len < rx_copybreak &&
(skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */
- pci_dma_sync_single_for_cpu(np->pci_dev,
- np->rx_info[entry].mapping,
- pkt_len, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&np->pci_dev->dev,
+ np->rx_info[entry].mapping,
+ pkt_len, DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, np->rx_info[entry].skb->data, pkt_len);
- pci_dma_sync_single_for_device(np->pci_dev,
- np->rx_info[entry].mapping,
- pkt_len, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&np->pci_dev->dev,
+ np->rx_info[entry].mapping,
+ pkt_len, DMA_FROM_DEVICE);
skb_put(skb, pkt_len);
} else {
- pci_unmap_single(np->pci_dev, np->rx_info[entry].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&np->pci_dev->dev,
+ np->rx_info[entry].mapping,
+ np->rx_buf_sz, DMA_FROM_DEVICE);
skb = np->rx_info[entry].skb;
skb_put(skb, pkt_len);
np->rx_info[entry].skb = NULL;
@@ -1588,9 +1593,9 @@ static void refill_rx_ring(struct net_device *dev)
if (skb == NULL)
break; /* Better luck next round. */
np->rx_info[entry].mapping =
- pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(np->pci_dev,
- np->rx_info[entry].mapping)) {
+ dma_map_single(&np->pci_dev->dev, skb->data,
+ np->rx_buf_sz, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&np->pci_dev->dev, np->rx_info[entry].mapping)) {
dev_kfree_skb(skb);
np->rx_info[entry].skb = NULL;
break;
@@ -1963,7 +1968,9 @@ static int netdev_close(struct net_device *dev)
for (i = 0; i < RX_RING_SIZE; i++) {
np->rx_ring[i].rxaddr = cpu_to_dma(0xBADF00D0); /* An invalid address. */
if (np->rx_info[i].skb != NULL) {
- pci_unmap_single(np->pci_dev, np->rx_info[i].mapping, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&np->pci_dev->dev,
+ np->rx_info[i].mapping,
+ np->rx_buf_sz, DMA_FROM_DEVICE);
dev_kfree_skb(np->rx_info[i].skb);
}
np->rx_info[i].skb = NULL;
@@ -1973,9 +1980,8 @@ static int netdev_close(struct net_device *dev)
struct sk_buff *skb = np->tx_info[i].skb;
if (skb == NULL)
continue;
- pci_unmap_single(np->pci_dev,
- np->tx_info[i].mapping,
- skb_first_frag_len(skb), PCI_DMA_TODEVICE);
+ dma_unmap_single(&np->pci_dev->dev, np->tx_info[i].mapping,
+ skb_first_frag_len(skb), DMA_TO_DEVICE);
np->tx_info[i].mapping = 0;
dev_kfree_skb(skb);
np->tx_info[i].skb = NULL;
@@ -2018,7 +2024,8 @@ static void starfire_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
if (np->queue_mem)
- pci_free_consistent(pdev, np->queue_mem_size, np->queue_mem, np->queue_mem_dma);
+ dma_free_coherent(&pdev->dev, np->queue_mem_size,
+ np->queue_mem, np->queue_mem_dma);
/* XXX: add wakeup code -- requires firmware for MagicPacket */
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index b3b8a8010142..862ea44beea7 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -640,13 +640,11 @@ static irqreturn_t emac_interrupt(int irq, void *dev_id)
struct net_device *dev = dev_id;
struct emac_board_info *db = netdev_priv(dev);
int int_status;
- unsigned long flags;
unsigned int reg_val;
/* A real interrupt coming */
- /* holders of db->lock must always block IRQs */
- spin_lock_irqsave(&db->lock, flags);
+ spin_lock(&db->lock);
/* Disable all interrupts */
writel(0, db->membase + EMAC_INT_CTL_REG);
@@ -680,7 +678,7 @@ static irqreturn_t emac_interrupt(int irq, void *dev_id)
reg_val |= (0xf << 0) | (0x01 << 8);
writel(reg_val, db->membase + EMAC_INT_CTL_REG);
}
- spin_unlock_irqrestore(&db->lock, flags);
+ spin_unlock(&db->lock);
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 8470c836fa18..1a7e4df9b3e9 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -465,6 +465,7 @@ static int acenic_probe_one(struct pci_dev *pdev,
SET_NETDEV_DEV(dev, &pdev->dev);
ap = netdev_priv(dev);
+ ap->ndev = dev;
ap->pdev = pdev;
ap->name = pci_name(pdev);
@@ -1562,10 +1563,10 @@ static void ace_watchdog(struct net_device *data, unsigned int txqueue)
}
-static void ace_tasklet(unsigned long arg)
+static void ace_tasklet(struct tasklet_struct *t)
{
- struct net_device *dev = (struct net_device *) arg;
- struct ace_private *ap = netdev_priv(dev);
+ struct ace_private *ap = from_tasklet(ap, t, ace_tasklet);
+ struct net_device *dev = ap->ndev;
int cur_size;
cur_size = atomic_read(&ap->cur_rx_bufs);
@@ -2269,7 +2270,7 @@ static int ace_open(struct net_device *dev)
/*
* Setup the bottom half rx ring refill handler
*/
- tasklet_init(&ap->ace_tasklet, ace_tasklet, (unsigned long)dev);
+ tasklet_setup(&ap->ace_tasklet, ace_tasklet);
return 0;
}
diff --git a/drivers/net/ethernet/alteon/acenic.h b/drivers/net/ethernet/alteon/acenic.h
index c670067b1541..265fa601a258 100644
--- a/drivers/net/ethernet/alteon/acenic.h
+++ b/drivers/net/ethernet/alteon/acenic.h
@@ -633,6 +633,7 @@ struct ace_skb
*/
struct ace_private
{
+ struct net_device *ndev; /* backpointer */
struct ace_info *info;
struct ace_regs __iomem *regs; /* register base */
struct ace_skb *skb;
@@ -776,7 +777,7 @@ static int ace_open(struct net_device *dev);
static netdev_tx_t ace_start_xmit(struct sk_buff *skb,
struct net_device *dev);
static int ace_close(struct net_device *dev);
-static void ace_tasklet(unsigned long dev);
+static void ace_tasklet(struct tasklet_struct *t);
static void ace_dump_trace(struct ace_private *ap);
static void ace_set_multicast_list(struct net_device *dev);
static int ace_change_mtu(struct net_device *dev, int new_mtu);
diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
index b818a169c193..4164eacc5c28 100644
--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
@@ -1,37 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
- * Copyright 2015 - 2016 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _ENA_ADMIN_H_
#define _ENA_ADMIN_H_
+#define ENA_ADMIN_RSS_KEY_PARTS 10
enum ena_admin_aq_opcode {
ENA_ADMIN_CREATE_SQ = 1,
@@ -55,6 +29,7 @@ enum ena_admin_aq_completion_status {
ENA_ADMIN_RESOURCE_BUSY = 7,
};
+/* subcommands for the set/get feature admin commands */
enum ena_admin_aq_feature_id {
ENA_ADMIN_DEVICE_ATTRIBUTES = 1,
ENA_ADMIN_MAX_QUEUES_NUM = 2,
@@ -63,7 +38,7 @@ enum ena_admin_aq_feature_id {
ENA_ADMIN_MAX_QUEUES_EXT = 7,
ENA_ADMIN_RSS_HASH_FUNCTION = 10,
ENA_ADMIN_STATELESS_OFFLOAD_CONFIG = 11,
- ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG = 12,
+ ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG = 12,
ENA_ADMIN_MTU = 14,
ENA_ADMIN_RSS_HASH_INPUT = 18,
ENA_ADMIN_INTERRUPT_MODERATION = 20,
@@ -117,6 +92,8 @@ enum ena_admin_completion_policy_type {
enum ena_admin_get_stats_type {
ENA_ADMIN_GET_STATS_TYPE_BASIC = 0,
ENA_ADMIN_GET_STATS_TYPE_EXTENDED = 1,
+ /* extra HW stats for specific network interface */
+ ENA_ADMIN_GET_STATS_TYPE_ENI = 2,
};
enum ena_admin_get_stats_scope {
@@ -193,7 +170,7 @@ struct ena_admin_acq_common_desc {
u16 extended_status;
/* indicates to the driver which AQ entry has been consumed by the
- * device and could be reused
+ * device and could be reused
*/
u16 sq_head_indx;
};
@@ -238,8 +215,8 @@ struct ena_admin_aq_create_sq_cmd {
*/
u8 sq_caps_3;
- /* associated completion queue id. This CQ must be created prior to
- * SQ creation
+ /* associated completion queue id. This CQ must be created prior to SQ
+ * creation
*/
u16 cq_idx;
@@ -378,7 +355,7 @@ struct ena_admin_aq_get_stats_cmd {
u16 queue_idx;
/* device id, value 0xFFFF means mine. only privileged device can get
- * stats of other device
+ * stats of other device
*/
u16 device_id;
};
@@ -410,10 +387,43 @@ struct ena_admin_basic_stats {
u32 tx_drops_high;
};
+/* ENI Statistics Command. */
+struct ena_admin_eni_stats {
+ /* The number of packets shaped due to inbound aggregate BW
+ * allowance being exceeded
+ */
+ u64 bw_in_allowance_exceeded;
+
+ /* The number of packets shaped due to outbound aggregate BW
+ * allowance being exceeded
+ */
+ u64 bw_out_allowance_exceeded;
+
+ /* The number of packets shaped due to PPS allowance being exceeded */
+ u64 pps_allowance_exceeded;
+
+ /* The number of packets shaped due to connection tracking
+ * allowance being exceeded and leading to failure in establishment
+ * of new connections
+ */
+ u64 conntrack_allowance_exceeded;
+
+ /* The number of packets shaped due to linklocal packet rate
+ * allowance being exceeded
+ */
+ u64 linklocal_allowance_exceeded;
+};
+
struct ena_admin_acq_get_stats_resp {
struct ena_admin_acq_common_desc acq_common_desc;
- struct ena_admin_basic_stats basic_stats;
+ union {
+ u64 raw[7];
+
+ struct ena_admin_basic_stats basic_stats;
+
+ struct ena_admin_eni_stats eni_stats;
+ } u;
};
struct ena_admin_get_set_feature_common_desc {
@@ -440,7 +450,9 @@ struct ena_admin_device_attr_feature_desc {
u32 device_version;
- /* bitmap of ena_admin_aq_feature_id */
+ /* bitmap of ena_admin_aq_feature_id, which represents supported
+ * subcommands for the set/get feature admin commands.
+ */
u32 supported_features;
u32 reserved3;
@@ -526,32 +538,30 @@ struct ena_admin_feature_llq_desc {
u32 max_llq_depth;
- /* specify the header locations the device supports. bitfield of
- * enum ena_admin_llq_header_location.
+ /* specify the header locations the device supports. bitfield of enum
+ * ena_admin_llq_header_location.
*/
u16 header_location_ctrl_supported;
/* the header location the driver selected to use. */
u16 header_location_ctrl_enabled;
- /* if inline header is specified - this is the size of descriptor
- * list entry. If header in a separate ring is specified - this is
- * the size of header ring entry. bitfield of enum
- * ena_admin_llq_ring_entry_size. specify the entry sizes the device
- * supports
+ /* if inline header is specified - this is the size of descriptor list
+ * entry. If header in a separate ring is specified - this is the size
+ * of header ring entry. bitfield of enum ena_admin_llq_ring_entry_size.
+ * specify the entry sizes the device supports
*/
u16 entry_size_ctrl_supported;
/* the entry size the driver selected to use. */
u16 entry_size_ctrl_enabled;
- /* valid only if inline header is specified. First entry associated
- * with the packet includes descriptors and header. Rest of the
- * entries occupied by descriptors. This parameter defines the max
- * number of descriptors precedding the header in the first entry.
- * The field is bitfield of enum
- * ena_admin_llq_num_descs_before_header and specify the values the
- * device supports
+ /* valid only if inline header is specified. First entry associated with
+ * the packet includes descriptors and header. Rest of the entries
+ * occupied by descriptors. This parameter defines the max number of
+ * descriptors precedding the header in the first entry. The field is
+ * bitfield of enum ena_admin_llq_num_descs_before_header and specify
+ * the values the device supports
*/
u16 desc_num_before_header_supported;
@@ -559,7 +569,7 @@ struct ena_admin_feature_llq_desc {
u16 desc_num_before_header_enabled;
/* valid only if inline was chosen. bitfield of enum
- * ena_admin_llq_stride_ctrl
+ * ena_admin_llq_stride_ctrl
*/
u16 descriptors_stride_ctrl_supported;
@@ -594,8 +604,8 @@ struct ena_admin_queue_ext_feature_fields {
u32 max_tx_header_size;
- /* Maximum Descriptors number, including meta descriptor, allowed for
- * a single Tx packet
+ /* Maximum Descriptors number, including meta descriptor, allowed for a
+ * single Tx packet
*/
u16 max_per_packet_tx_descs;
@@ -618,8 +628,8 @@ struct ena_admin_queue_feature_desc {
u32 max_header_size;
- /* Maximum Descriptors number, including meta descriptor, allowed for
- * a single Tx packet
+ /* Maximum Descriptors number, including meta descriptor, allowed for a
+ * single Tx packet
*/
u16 max_packet_tx_descs;
@@ -707,11 +717,11 @@ enum ena_admin_hash_functions {
};
struct ena_admin_feature_rss_flow_hash_control {
- u32 keys_num;
+ u32 key_parts;
u32 reserved;
- u32 key[10];
+ u32 key[ENA_ADMIN_RSS_KEY_PARTS];
};
struct ena_admin_feature_rss_flow_hash_function {
@@ -1007,7 +1017,7 @@ struct ena_admin_set_feat_resp {
struct ena_admin_aenq_common_desc {
u16 group;
- u16 syndrom;
+ u16 syndrome;
/* 0 : phase
* 7:1 : reserved - MBZ
@@ -1031,7 +1041,7 @@ enum ena_admin_aenq_group {
ENA_ADMIN_AENQ_GROUPS_NUM = 5,
};
-enum ena_admin_aenq_notification_syndrom {
+enum ena_admin_aenq_notification_syndrome {
ENA_ADMIN_SUSPEND = 0,
ENA_ADMIN_RESUME = 1,
ENA_ADMIN_UPDATE_HINTS = 2,
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 435bf05a853c..5f8769aa469d 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
- * Copyright 2015 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include "ena_com.h"
@@ -98,7 +71,7 @@ static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
dma_addr_t addr)
{
if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
- pr_err("dma address has more bits that the device supports\n");
+ pr_err("DMA address has more bits that the device supports\n");
return -EINVAL;
}
@@ -108,16 +81,16 @@ static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
return 0;
}
-static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
+static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
{
- struct ena_com_admin_sq *sq = &queue->sq;
- u16 size = ADMIN_SQ_SIZE(queue->q_depth);
+ struct ena_com_admin_sq *sq = &admin_queue->sq;
+ u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
- sq->entries = dma_alloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
- GFP_KERNEL);
+ sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
+ &sq->dma_addr, GFP_KERNEL);
if (!sq->entries) {
- pr_err("memory allocation failed\n");
+ pr_err("Memory allocation failed\n");
return -ENOMEM;
}
@@ -130,16 +103,16 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
return 0;
}
-static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
+static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue)
{
- struct ena_com_admin_cq *cq = &queue->cq;
- u16 size = ADMIN_CQ_SIZE(queue->q_depth);
+ struct ena_com_admin_cq *cq = &admin_queue->cq;
+ u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
- cq->entries = dma_alloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
- GFP_KERNEL);
+ cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
+ &cq->dma_addr, GFP_KERNEL);
if (!cq->entries) {
- pr_err("memory allocation failed\n");
+ pr_err("Memory allocation failed\n");
return -ENOMEM;
}
@@ -149,20 +122,20 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
return 0;
}
-static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
+static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
struct ena_aenq_handlers *aenq_handlers)
{
- struct ena_com_aenq *aenq = &dev->aenq;
+ struct ena_com_aenq *aenq = &ena_dev->aenq;
u32 addr_low, addr_high, aenq_caps;
u16 size;
- dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
+ ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
- aenq->entries = dma_alloc_coherent(dev->dmadev, size, &aenq->dma_addr,
- GFP_KERNEL);
+ aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size,
+ &aenq->dma_addr, GFP_KERNEL);
if (!aenq->entries) {
- pr_err("memory allocation failed\n");
+ pr_err("Memory allocation failed\n");
return -ENOMEM;
}
@@ -172,18 +145,18 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
- writel(addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
- writel(addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
+ writel(addr_low, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
+ writel(addr_high, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
aenq_caps = 0;
- aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
+ aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
<< ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
- writel(aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
+ writel(aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
if (unlikely(!aenq_handlers)) {
- pr_err("aenq handlers pointer is NULL\n");
+ pr_err("AENQ handlers pointer is NULL\n");
return -EINVAL;
}
@@ -199,31 +172,31 @@ static void comp_ctxt_release(struct ena_com_admin_queue *queue,
atomic_dec(&queue->outstanding_cmds);
}
-static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
+static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queue,
u16 command_id, bool capture)
{
- if (unlikely(command_id >= queue->q_depth)) {
- pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
- command_id, queue->q_depth);
+ if (unlikely(command_id >= admin_queue->q_depth)) {
+ pr_err("Command id is larger than the queue size. cmd_id: %u queue size %d\n",
+ command_id, admin_queue->q_depth);
return NULL;
}
- if (unlikely(!queue->comp_ctx)) {
+ if (unlikely(!admin_queue->comp_ctx)) {
pr_err("Completion context is NULL\n");
return NULL;
}
- if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
+ if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) {
pr_err("Completion context is occupied\n");
return NULL;
}
if (capture) {
- atomic_inc(&queue->outstanding_cmds);
- queue->comp_ctx[command_id].occupied = true;
+ atomic_inc(&admin_queue->outstanding_cmds);
+ admin_queue->comp_ctx[command_id].occupied = true;
}
- return &queue->comp_ctx[command_id];
+ return &admin_queue->comp_ctx[command_id];
}
static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
@@ -244,7 +217,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
/* In case of queue FULL */
cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
if (cnt >= admin_queue->q_depth) {
- pr_debug("admin queue is full.\n");
+ pr_debug("Admin queue is full.\n");
admin_queue->stats.out_of_space++;
return ERR_PTR(-ENOSPC);
}
@@ -284,20 +257,21 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
return comp_ctx;
}
-static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
+static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue)
{
- size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
+ size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx);
struct ena_comp_ctx *comp_ctx;
u16 i;
- queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL);
- if (unlikely(!queue->comp_ctx)) {
- pr_err("memory allocation failed\n");
+ admin_queue->comp_ctx =
+ devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL);
+ if (unlikely(!admin_queue->comp_ctx)) {
+ pr_err("Memory allocation failed\n");
return -ENOMEM;
}
- for (i = 0; i < queue->q_depth; i++) {
- comp_ctx = get_comp_ctxt(queue, i, false);
+ for (i = 0; i < admin_queue->q_depth; i++) {
+ comp_ctx = get_comp_ctxt(admin_queue, i, false);
if (comp_ctx)
init_completion(&comp_ctx->wait_event);
}
@@ -363,7 +337,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
}
if (!io_sq->desc_addr.virt_addr) {
- pr_err("memory allocation failed\n");
+ pr_err("Memory allocation failed\n");
return -ENOMEM;
}
}
@@ -389,7 +363,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
if (!io_sq->bounce_buf_ctrl.base_buffer) {
- pr_err("bounce buffer memory allocation failed\n");
+ pr_err("Bounce buffer memory allocation failed\n");
return -ENOMEM;
}
@@ -449,7 +423,7 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
}
if (!io_cq->cdesc_addr.virt_addr) {
- pr_err("memory allocation failed\n");
+ pr_err("Memory allocation failed\n");
return -ENOMEM;
}
@@ -525,7 +499,7 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu
static int ena_com_comp_status_to_errno(u8 comp_status)
{
if (unlikely(comp_status != 0))
- pr_err("admin command failed[%u]\n", comp_status);
+ pr_err("Admin command failed[%u]\n", comp_status);
switch (comp_status) {
case ENA_ADMIN_SUCCESS:
@@ -539,6 +513,8 @@ static int ena_com_comp_status_to_errno(u8 comp_status)
case ENA_ADMIN_ILLEGAL_PARAMETER:
case ENA_ADMIN_UNKNOWN_ERROR:
return -EINVAL;
+ case ENA_ADMIN_RESOURCE_BUSY:
+ return -EAGAIN;
}
return -EINVAL;
@@ -603,7 +579,7 @@ err:
return ret;
}
-/**
+/*
* Set the LLQ configurations of the firmware
*
* The driver provides only the enabled feature values to the device,
@@ -717,7 +693,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
/* The desc list entry size should be whole multiply of 8
* This requirement comes from __iowrite64_copy()
*/
- pr_err("illegal entry size %d\n", llq_info->desc_list_entry_size);
+ pr_err("Illegal entry size %d\n", llq_info->desc_list_entry_size);
return -EINVAL;
}
@@ -858,7 +834,7 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
}
if (unlikely(i == timeout)) {
- pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
+ pr_err("Reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
mmio_read->seq_num, offset, read_resp->req_id,
read_resp->reg_off);
ret = ENA_MMIO_READ_TIMEOUT;
@@ -925,7 +901,7 @@ static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
sizeof(destroy_resp));
if (unlikely(ret && (ret != -ENODEV)))
- pr_err("failed to destroy io sq error: %d\n", ret);
+ pr_err("Failed to destroy io sq error: %d\n", ret);
return ret;
}
@@ -1034,7 +1010,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
&get_cmd.control_buffer.address,
control_buf_dma_addr);
if (unlikely(ret)) {
- pr_err("memory address set failed\n");
+ pr_err("Memory address set failed\n");
return ret;
}
@@ -1081,11 +1057,10 @@ static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
(ena_dev->rss).hash_key;
netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key));
- /* The key is stored in the device in u32 array
- * as well as the API requires the key to be passed in this
- * format. Thus the size of our array should be divided by 4
+ /* The key buffer is stored in the device in an array of
+ * uint32 elements.
*/
- hash_key->keys_num = sizeof(hash_key->key) / sizeof(u32);
+ hash_key->key_parts = ENA_ADMIN_RSS_KEY_PARTS;
}
static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
@@ -1149,13 +1124,13 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
int ret;
ret = ena_com_get_feature(ena_dev, &get_resp,
- ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0);
+ ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG, 0);
if (unlikely(ret))
return ret;
if ((get_resp.u.ind_table.min_size > log_size) ||
(get_resp.u.ind_table.max_size < log_size)) {
- pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
+ pr_err("Indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1 << log_size, 1 << get_resp.u.ind_table.min_size,
1 << get_resp.u.ind_table.max_size);
return -EINVAL;
@@ -1248,7 +1223,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
&create_cmd.sq_ba,
io_sq->desc_addr.phys_addr);
if (unlikely(ret)) {
- pr_err("memory address set failed\n");
+ pr_err("Memory address set failed\n");
return ret;
}
}
@@ -1277,7 +1252,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
cmd_completion.llq_descriptors_offset);
}
- pr_debug("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
+ pr_debug("Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
return ret;
}
@@ -1390,7 +1365,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
&create_cmd.cq_ba,
io_cq->cdesc_addr.phys_addr);
if (unlikely(ret)) {
- pr_err("memory address set failed\n");
+ pr_err("Memory address set failed\n");
return ret;
}
@@ -1419,7 +1394,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
(u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
cmd_completion.numa_node_register_offset);
- pr_debug("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
+ pr_debug("Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
return ret;
}
@@ -1612,12 +1587,12 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev)
return -ETIME;
}
- pr_info("ena device version: %d.%d\n",
+ pr_info("ENA device version: %d.%d\n",
(ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
- pr_info("ena controller version: %d.%d.%d implementation version %d\n",
+ pr_info("ENA controller version: %d.%d.%d implementation version %d\n",
(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
(ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
@@ -1640,6 +1615,19 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev)
return 0;
}
+static void
+ena_com_free_ena_admin_queue_comp_ctx(struct ena_com_dev *ena_dev,
+ struct ena_com_admin_queue *admin_queue)
+
+{
+ if (!admin_queue->comp_ctx)
+ return;
+
+ devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
+
+ admin_queue->comp_ctx = NULL;
+}
+
void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
{
struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
@@ -1648,9 +1636,8 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
struct ena_com_aenq *aenq = &ena_dev->aenq;
u16 size;
- if (admin_queue->comp_ctx)
- devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
- admin_queue->comp_ctx = NULL;
+ ena_com_free_ena_admin_queue_comp_ctx(ena_dev, admin_queue);
+
size = ADMIN_SQ_SIZE(admin_queue->q_depth);
if (sq->entries)
dma_free_coherent(ena_dev->dmadev, size, sq->entries,
@@ -1928,6 +1915,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
sizeof(get_resp.u.dev_attr));
+
ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
@@ -2006,10 +1994,10 @@ void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
/* ena_handle_specific_aenq_event:
* return the handler that is relevant to the specific event group
*/
-static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
+static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *ena_dev,
u16 group)
{
- struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
+ struct ena_aenq_handlers *aenq_handlers = ena_dev->aenq.aenq_handlers;
if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
return aenq_handlers->handlers[group];
@@ -2021,11 +2009,11 @@ static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
* handles the aenq incoming events.
* pop events from the queue and apply the specific handler
*/
-void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
+void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
{
struct ena_admin_aenq_entry *aenq_e;
struct ena_admin_aenq_common_desc *aenq_common;
- struct ena_com_aenq *aenq = &dev->aenq;
+ struct ena_com_aenq *aenq = &ena_dev->aenq;
u64 timestamp;
ena_aenq_handler handler_cb;
u16 masked_head, processed = 0;
@@ -2045,12 +2033,13 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
dma_rmb();
timestamp = (u64)aenq_common->timestamp_low |
- ((u64)aenq_common->timestamp_high << 32);
- pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
- aenq_common->group, aenq_common->syndrom, timestamp);
+ ((u64)aenq_common->timestamp_high << 32);
+
+ pr_debug("AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n",
+ aenq_common->group, aenq_common->syndrome, timestamp);
/* Handle specific event*/
- handler_cb = ena_com_get_specific_aenq_cb(dev,
+ handler_cb = ena_com_get_specific_aenq_cb(ena_dev,
aenq_common->group);
handler_cb(data, aenq_e); /* call the actual event handler*/
@@ -2075,7 +2064,8 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
/* write the aenq doorbell after all AENQ descriptors were read */
mb();
- writel_relaxed((u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
+ writel_relaxed((u32)aenq->head,
+ ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
}
int ena_com_dev_reset(struct ena_com_dev *ena_dev,
@@ -2167,6 +2157,21 @@ static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
return ret;
}
+int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
+ struct ena_admin_eni_stats *stats)
+{
+ struct ena_com_stats_ctx ctx;
+ int ret;
+
+ memset(&ctx, 0x0, sizeof(ctx));
+ ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI);
+ if (likely(ret == 0))
+ memcpy(stats, &ctx.get_resp.u.eni_stats,
+ sizeof(ctx.get_resp.u.eni_stats));
+
+ return ret;
+}
+
int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
struct ena_admin_basic_stats *stats)
{
@@ -2176,8 +2181,8 @@ int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
memset(&ctx, 0x0, sizeof(ctx));
ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
if (likely(ret == 0))
- memcpy(stats, &ctx.get_resp.basic_stats,
- sizeof(ctx.get_resp.basic_stats));
+ memcpy(stats, &ctx.get_resp.u.basic_stats,
+ sizeof(ctx.get_resp.u.basic_stats));
return ret;
}
@@ -2273,7 +2278,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
&cmd.control_buffer.address,
rss->hash_key_dma_addr);
if (unlikely(ret)) {
- pr_err("memory address set failed\n");
+ pr_err("Memory address set failed\n");
return ret;
}
@@ -2331,7 +2336,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
}
memcpy(hash_key->key, key, key_len);
rss->hash_init_val = init_val;
- hash_key->keys_num = key_len >> 2;
+ hash_key->key_parts = key_len / sizeof(hash_key->key[0]);
}
break;
case ENA_ADMIN_CRC32:
@@ -2386,7 +2391,8 @@ int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key)
ena_dev->rss.hash_key;
if (key)
- memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
+ memcpy(key, hash_key->key,
+ (size_t)(hash_key->key_parts) * sizeof(hash_key->key[0]));
return 0;
}
@@ -2442,7 +2448,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
&cmd.control_buffer.address,
rss->hash_ctrl_dma_addr);
if (unlikely(ret)) {
- pr_err("memory address set failed\n");
+ pr_err("Memory address set failed\n");
return ret;
}
cmd.control_buffer.length = sizeof(*hash_ctrl);
@@ -2503,7 +2509,7 @@ int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
available_fields = hash_ctrl->selected_fields[i].fields &
hash_ctrl->supported_fields[i].fields;
if (available_fields != hash_ctrl->selected_fields[i].fields) {
- pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
+ pr_err("Hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
i, hash_ctrl->supported_fields[i].fields,
hash_ctrl->selected_fields[i].fields);
return -EOPNOTSUPP;
@@ -2541,7 +2547,7 @@ int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
/* Make sure all the fields are supported */
supported_fields = hash_ctrl->supported_fields[proto].fields;
if ((hash_fields & supported_fields) != hash_fields) {
- pr_err("proto %d doesn't support the required fields %x. supports only: %x\n",
+ pr_err("Proto %d doesn't support the required fields %x. supports only: %x\n",
proto, hash_fields, supported_fields);
}
@@ -2581,9 +2587,9 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
int ret;
if (!ena_com_check_supported_feature_id(
- ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
+ ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
pr_debug("Feature %d isn't supported\n",
- ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
+ ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG);
return -EOPNOTSUPP;
}
@@ -2598,7 +2604,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
cmd.aq_common_descriptor.flags =
ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
- cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
+ cmd.feat_common.feature_id = ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG;
cmd.u.ind_table.size = rss->tbl_log_size;
cmd.u.ind_table.inline_index = 0xFFFFFFFF;
@@ -2606,7 +2612,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
&cmd.control_buffer.address,
rss->rss_ind_tbl_dma_addr);
if (unlikely(ret)) {
- pr_err("memory address set failed\n");
+ pr_err("Memory address set failed\n");
return ret;
}
@@ -2636,7 +2642,7 @@ int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
sizeof(struct ena_admin_rss_ind_table_entry);
rc = ena_com_get_feature_ex(ena_dev, &get_resp,
- ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
+ ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG,
rss->rss_ind_tbl_dma_addr,
tbl_size, 0);
if (unlikely(rc))
@@ -2719,8 +2725,7 @@ int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
host_attr->debug_area_virt_addr =
dma_alloc_coherent(ena_dev->dmadev, debug_area_size,
- &host_attr->debug_area_dma_addr,
- GFP_KERNEL);
+ &host_attr->debug_area_dma_addr, GFP_KERNEL);
if (unlikely(!host_attr->debug_area_virt_addr)) {
host_attr->debug_area_size = 0;
return -ENOMEM;
@@ -2777,7 +2782,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
&cmd.u.host_attr.debug_ba,
host_attr->debug_area_dma_addr);
if (unlikely(ret)) {
- pr_err("memory address set failed\n");
+ pr_err("Memory address set failed\n");
return ret;
}
@@ -2785,7 +2790,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
&cmd.u.host_attr.os_info_ba,
host_attr->host_info_dma_addr);
if (unlikely(ret)) {
- pr_err("memory address set failed\n");
+ pr_err("Memory address set failed\n");
return ret;
}
@@ -2904,7 +2909,7 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
(llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
if (unlikely(ena_dev->tx_max_header_size == 0)) {
- pr_err("the size of the LLQ entry is smaller than needed\n");
+ pr_err("The size of the LLQ entry is smaller than needed\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index 4287d47b2b0b..55097750d062 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
- * Copyright 2015 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef ENA_COM
@@ -536,7 +509,7 @@ void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev);
* This method goes over the async event notification queue and calls the proper
* aenq handler.
*/
-void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data);
+void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data);
/* ena_com_abort_admin_commands - Abort all the outstanding admin commands.
* @ena_dev: ENA communication layer struct
@@ -616,6 +589,15 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
struct ena_admin_basic_stats *stats);
+/* ena_com_get_eni_stats - Get extended network interface statistics
+ * @ena_dev: ENA communication layer struct
+ * @stats: stats return value
+ *
+ * @return: 0 on Success and negative value otherwise.
+ */
+int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
+ struct ena_admin_eni_stats *stats);
+
/* ena_com_set_dev_mtu - Configure the device mtu.
* @ena_dev: ENA communication layer struct
* @mtu: mtu value
diff --git a/drivers/net/ethernet/amazon/ena/ena_common_defs.h b/drivers/net/ethernet/amazon/ena/ena_common_defs.h
index 8a8ded0de9ac..e210c8a81fc0 100644
--- a/drivers/net/ethernet/amazon/ena/ena_common_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_common_defs.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
- * Copyright 2015 - 2016 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _ENA_COMMON_H_
#define _ENA_COMMON_H_
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index ccd440589565..ad30cacc1622 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
- * Copyright 2015 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include "ena_eth_com.h"
@@ -45,8 +18,9 @@ static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
+ (head_masked * io_cq->cdesc_entry_size_in_bytes));
- desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
- ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
+ desc_phase = (READ_ONCE(cdesc->status) &
+ ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
if (desc_phase != expected_phase)
return NULL;
@@ -89,7 +63,7 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
}
io_sq->entries_in_tx_burst_left--;
- pr_debug("decreasing entries_in_tx_burst_left of queue %d to %d\n",
+ pr_debug("Decreasing entries_in_tx_burst_left of queue %d to %d\n",
io_sq->qid, io_sq->entries_in_tx_burst_left);
}
@@ -128,12 +102,12 @@ static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
if (unlikely((header_offset + header_len) >
llq_info->desc_list_entry_size)) {
- pr_err("trying to write header larger than llq entry can accommodate\n");
+ pr_err("Trying to write header larger than llq entry can accommodate\n");
return -EFAULT;
}
if (unlikely(!bounce_buffer)) {
- pr_err("bounce buffer is NULL\n");
+ pr_err("Bounce buffer is NULL\n");
return -EFAULT;
}
@@ -151,7 +125,7 @@ static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
bounce_buffer = pkt_ctrl->curr_bounce_buf;
if (unlikely(!bounce_buffer)) {
- pr_err("bounce buffer is NULL\n");
+ pr_err("Bounce buffer is NULL\n");
return NULL;
}
@@ -262,8 +236,9 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
ena_com_cq_inc_head(io_cq);
count++;
- last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
- ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
+ last = (READ_ONCE(cdesc->status) &
+ ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
} while (!last);
if (last) {
@@ -275,7 +250,7 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
io_cq->cur_rx_pkt_cdesc_count = 0;
io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
- pr_debug("ena q_id: %d packets were completed. first desc idx %u descs# %d\n",
+ pr_debug("ENA q_id: %d packets were completed. first desc idx %u descs# %d\n",
io_cq->qid, *first_cdesc_idx, count);
} else {
io_cq->cur_rx_pkt_cdesc_count += count;
@@ -291,6 +266,9 @@ static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
meta_desc = get_sq_desc(io_sq);
+ if (unlikely(!meta_desc))
+ return -EFAULT;
+
memset(meta_desc, 0x0, sizeof(struct ena_eth_io_tx_meta_desc));
meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_META_DESC_MASK;
@@ -298,7 +276,7 @@ static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_EXT_VALID_MASK;
/* bits 0-9 of the mss */
- meta_desc->word2 |= (ena_meta->mss <<
+ meta_desc->word2 |= ((u32)ena_meta->mss <<
ENA_ETH_IO_TX_META_DESC_MSS_LO_SHIFT) &
ENA_ETH_IO_TX_META_DESC_MSS_LO_MASK;
/* bits 10-13 of the mss */
@@ -308,7 +286,7 @@ static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
/* Extended meta desc */
meta_desc->len_ctrl |= ENA_ETH_IO_TX_META_DESC_ETH_META_TYPE_MASK;
- meta_desc->len_ctrl |= (io_sq->phase <<
+ meta_desc->len_ctrl |= ((u32)io_sq->phase <<
ENA_ETH_IO_TX_META_DESC_PHASE_SHIFT) &
ENA_ETH_IO_TX_META_DESC_PHASE_MASK;
@@ -321,7 +299,7 @@ static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_SHIFT) &
ENA_ETH_IO_TX_META_DESC_L3_HDR_OFF_MASK;
- meta_desc->word2 |= (ena_meta->l4_hdr_len <<
+ meta_desc->word2 |= ((u32)ena_meta->l4_hdr_len <<
ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_SHIFT) &
ENA_ETH_IO_TX_META_DESC_L4_HDR_LEN_IN_WORDS_MASK;
@@ -358,7 +336,7 @@ static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
}
static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
- struct ena_eth_io_rx_cdesc_base *cdesc)
+ struct ena_eth_io_rx_cdesc_base *cdesc)
{
ena_rx_ctx->l3_proto = cdesc->status &
ENA_ETH_IO_RX_CDESC_BASE_L3_PROTO_IDX_MASK;
@@ -379,7 +357,7 @@ static void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
(cdesc->status & ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_MASK) >>
ENA_ETH_IO_RX_CDESC_BASE_IPV4_FRAG_SHIFT;
- pr_debug("ena_rx_ctx->l3_proto %d ena_rx_ctx->l4_proto %d\nena_rx_ctx->l3_csum_err %d ena_rx_ctx->l4_csum_err %d\nhash frag %d frag: %d cdesc_status: %x\n",
+ pr_debug("l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto,
ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err,
ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
@@ -412,7 +390,7 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
}
if (unlikely(header_len > io_sq->tx_max_header_size)) {
- pr_err("header size is too large %d max header: %d\n",
+ pr_err("Header size is too large %d max header: %d\n",
header_len, io_sq->tx_max_header_size);
return -EINVAL;
}
@@ -427,7 +405,7 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx, &have_meta);
if (unlikely(rc)) {
- pr_err("failed to create and store tx meta desc\n");
+ pr_err("Failed to create and store tx meta desc\n");
return rc;
}
@@ -447,16 +425,16 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
if (!have_meta)
desc->len_ctrl |= ENA_ETH_IO_TX_DESC_FIRST_MASK;
- desc->buff_addr_hi_hdr_sz |= (header_len <<
+ desc->buff_addr_hi_hdr_sz |= ((u32)header_len <<
ENA_ETH_IO_TX_DESC_HEADER_LENGTH_SHIFT) &
ENA_ETH_IO_TX_DESC_HEADER_LENGTH_MASK;
- desc->len_ctrl |= (io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
+ desc->len_ctrl |= ((u32)io_sq->phase << ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
ENA_ETH_IO_TX_DESC_PHASE_MASK;
desc->len_ctrl |= ENA_ETH_IO_TX_DESC_COMP_REQ_MASK;
/* Bits 0-9 */
- desc->meta_ctrl |= (ena_tx_ctx->req_id <<
+ desc->meta_ctrl |= ((u32)ena_tx_ctx->req_id <<
ENA_ETH_IO_TX_DESC_REQ_ID_LO_SHIFT) &
ENA_ETH_IO_TX_DESC_REQ_ID_LO_MASK;
@@ -502,7 +480,7 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
- desc->len_ctrl |= (io_sq->phase <<
+ desc->len_ctrl |= ((u32)io_sq->phase <<
ENA_ETH_IO_TX_DESC_PHASE_SHIFT) &
ENA_ETH_IO_TX_DESC_PHASE_MASK;
}
@@ -550,7 +528,7 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
return 0;
}
- pr_debug("fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
+ pr_debug("Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
nb_hw_desc);
if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
@@ -606,9 +584,9 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
desc->length = ena_buf->len;
desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK |
- ENA_ETH_IO_RX_DESC_LAST_MASK |
- (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK) |
- ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
+ ENA_ETH_IO_RX_DESC_LAST_MASK |
+ (io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK) |
+ ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
desc->req_id = req_id;
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index b6592cb93b04..2c16c218818a 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
- * Copyright 2015 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef ENA_ETH_COM_H_
@@ -167,7 +140,7 @@ static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
llq_info->descs_per_entry);
}
- pr_debug("queue: %d num_descs: %d num_entries_needed: %d\n", io_sq->qid,
+ pr_debug("Queue: %d num_descs: %d num_entries_needed: %d\n", io_sq->qid,
num_descs, num_entries_needed);
return num_entries_needed > io_sq->entries_in_tx_burst_left;
@@ -178,13 +151,13 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
u16 max_entries_in_tx_burst = io_sq->llq_info.max_entries_in_tx_burst;
u16 tail = io_sq->tail;
- pr_debug("write submission queue doorbell for queue: %d tail: %d\n",
+ pr_debug("Write submission queue doorbell for queue: %d tail: %d\n",
io_sq->qid, tail);
writel(tail, io_sq->db_addr);
if (is_llq_max_tx_burst_exists(io_sq)) {
- pr_debug("reset available entries in tx burst for queue %d to %d\n",
+ pr_debug("Reset available entries in tx burst for queue %d to %d\n",
io_sq->qid, max_entries_in_tx_burst);
io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst;
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h
index d105c9c56192..332ac0d28ac7 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_io_defs.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
- * Copyright 2015 - 2016 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _ENA_ETH_IO_H_
#define _ENA_ETH_IO_H_
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 430275bc0d04..3b2cd28f962d 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
- * Copyright 2015 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include <linux/pci.h>
@@ -41,12 +14,17 @@ struct ena_stats {
#define ENA_STAT_ENA_COM_ENTRY(stat) { \
.name = #stat, \
- .stat_offset = offsetof(struct ena_com_stats_admin, stat) \
+ .stat_offset = offsetof(struct ena_com_stats_admin, stat) / sizeof(u64) \
}
#define ENA_STAT_ENTRY(stat, stat_type) { \
.name = #stat, \
- .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \
+ .stat_offset = offsetof(struct ena_stats_##stat_type, stat) / sizeof(u64) \
+}
+
+#define ENA_STAT_HW_ENTRY(stat, stat_type) { \
+ .name = #stat, \
+ .stat_offset = offsetof(struct ena_admin_##stat_type, stat) / sizeof(u64) \
}
#define ENA_STAT_RX_ENTRY(stat) \
@@ -58,6 +36,9 @@ struct ena_stats {
#define ENA_STAT_GLOBAL_ENTRY(stat) \
ENA_STAT_ENTRY(stat, dev)
+#define ENA_STAT_ENI_ENTRY(stat) \
+ ENA_STAT_HW_ENTRY(stat, eni_stats)
+
static const struct ena_stats ena_stats_global_strings[] = {
ENA_STAT_GLOBAL_ENTRY(tx_timeout),
ENA_STAT_GLOBAL_ENTRY(suspend),
@@ -68,6 +49,14 @@ static const struct ena_stats ena_stats_global_strings[] = {
ENA_STAT_GLOBAL_ENTRY(admin_q_pause),
};
+static const struct ena_stats ena_stats_eni_strings[] = {
+ ENA_STAT_ENI_ENTRY(bw_in_allowance_exceeded),
+ ENA_STAT_ENI_ENTRY(bw_out_allowance_exceeded),
+ ENA_STAT_ENI_ENTRY(pps_allowance_exceeded),
+ ENA_STAT_ENI_ENTRY(conntrack_allowance_exceeded),
+ ENA_STAT_ENI_ENTRY(linklocal_allowance_exceeded),
+};
+
static const struct ena_stats ena_stats_tx_strings[] = {
ENA_STAT_TX_ENTRY(cnt),
ENA_STAT_TX_ENTRY(bytes),
@@ -100,6 +89,11 @@ static const struct ena_stats ena_stats_rx_strings[] = {
ENA_STAT_RX_ENTRY(bad_req_id),
ENA_STAT_RX_ENTRY(empty_rx_ring),
ENA_STAT_RX_ENTRY(csum_unchecked),
+ ENA_STAT_RX_ENTRY(xdp_aborted),
+ ENA_STAT_RX_ENTRY(xdp_drop),
+ ENA_STAT_RX_ENTRY(xdp_pass),
+ ENA_STAT_RX_ENTRY(xdp_tx),
+ ENA_STAT_RX_ENTRY(xdp_invalid),
};
static const struct ena_stats ena_stats_ena_com_strings[] = {
@@ -110,10 +104,12 @@ static const struct ena_stats ena_stats_ena_com_strings[] = {
ENA_STAT_ENA_COM_ENTRY(no_completion),
};
-#define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings)
-#define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings)
-#define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings)
-#define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings)
+#define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings)
+#define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings)
+#define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings)
+#define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings)
+#define ENA_STATS_ARRAY_ENI(adapter) \
+ (ARRAY_SIZE(ena_stats_eni_strings) * (adapter)->eni_stats_supported)
static void ena_safe_update_stat(u64 *src, u64 *dst,
struct u64_stats_sync *syncp)
@@ -134,29 +130,30 @@ static void ena_queue_stats(struct ena_adapter *adapter, u64 **data)
u64 *ptr;
int i, j;
- for (i = 0; i < adapter->num_io_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
/* Tx stats */
ring = &adapter->tx_ring[i];
for (j = 0; j < ENA_STATS_ARRAY_TX; j++) {
ena_stats = &ena_stats_tx_strings[j];
- ptr = (u64 *)((uintptr_t)&ring->tx_stats +
- (uintptr_t)ena_stats->stat_offset);
+ ptr = (u64 *)&ring->tx_stats + ena_stats->stat_offset;
ena_safe_update_stat(ptr, (*data)++, &ring->syncp);
}
+ /* XDP TX queues don't have a RX queue counterpart */
+ if (!ENA_IS_XDP_INDEX(adapter, i)) {
+ /* Rx stats */
+ ring = &adapter->rx_ring[i];
- /* Rx stats */
- ring = &adapter->rx_ring[i];
-
- for (j = 0; j < ENA_STATS_ARRAY_RX; j++) {
- ena_stats = &ena_stats_rx_strings[j];
+ for (j = 0; j < ENA_STATS_ARRAY_RX; j++) {
+ ena_stats = &ena_stats_rx_strings[j];
- ptr = (u64 *)((uintptr_t)&ring->rx_stats +
- (uintptr_t)ena_stats->stat_offset);
+ ptr = (u64 *)&ring->rx_stats +
+ ena_stats->stat_offset;
- ena_safe_update_stat(ptr, (*data)++, &ring->syncp);
+ ena_safe_update_stat(ptr, (*data)++, &ring->syncp);
+ }
}
}
}
@@ -170,18 +167,17 @@ static void ena_dev_admin_queue_stats(struct ena_adapter *adapter, u64 **data)
for (i = 0; i < ENA_STATS_ARRAY_ENA_COM; i++) {
ena_stats = &ena_stats_ena_com_strings[i];
- ptr = (u64 *)((uintptr_t)&adapter->ena_dev->admin_queue.stats +
- (uintptr_t)ena_stats->stat_offset);
+ ptr = (u64 *)&adapter->ena_dev->admin_queue.stats +
+ ena_stats->stat_offset;
*(*data)++ = *ptr;
}
}
-static void ena_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats,
- u64 *data)
+static void ena_get_stats(struct ena_adapter *adapter,
+ u64 *data,
+ bool eni_stats_needed)
{
- struct ena_adapter *adapter = netdev_priv(netdev);
const struct ena_stats *ena_stats;
u64 *ptr;
int i;
@@ -189,16 +185,48 @@ static void ena_get_ethtool_stats(struct net_device *netdev,
for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) {
ena_stats = &ena_stats_global_strings[i];
- ptr = (u64 *)((uintptr_t)&adapter->dev_stats +
- (uintptr_t)ena_stats->stat_offset);
+ ptr = (u64 *)&adapter->dev_stats + ena_stats->stat_offset;
ena_safe_update_stat(ptr, data++, &adapter->syncp);
}
+ if (eni_stats_needed) {
+ ena_update_hw_stats(adapter);
+ for (i = 0; i < ENA_STATS_ARRAY_ENI(adapter); i++) {
+ ena_stats = &ena_stats_eni_strings[i];
+
+ ptr = (u64 *)&adapter->eni_stats +
+ ena_stats->stat_offset;
+
+ ena_safe_update_stat(ptr, data++, &adapter->syncp);
+ }
+ }
+
ena_queue_stats(adapter, &data);
ena_dev_admin_queue_stats(adapter, &data);
}
+static void ena_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+
+ ena_get_stats(adapter, data, adapter->eni_stats_supported);
+}
+
+static int ena_get_sw_stats_count(struct ena_adapter *adapter)
+{
+ return adapter->num_io_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX)
+ + adapter->xdp_num_queues * ENA_STATS_ARRAY_TX
+ + ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM;
+}
+
+static int ena_get_hw_stats_count(struct ena_adapter *adapter)
+{
+ return ENA_STATS_ARRAY_ENI(adapter);
+}
+
int ena_get_sset_count(struct net_device *netdev, int sset)
{
struct ena_adapter *adapter = netdev_priv(netdev);
@@ -206,31 +234,38 @@ int ena_get_sset_count(struct net_device *netdev, int sset)
if (sset != ETH_SS_STATS)
return -EOPNOTSUPP;
- return adapter->num_io_queues * (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX)
- + ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM;
+ return ena_get_sw_stats_count(adapter) + ena_get_hw_stats_count(adapter);
}
static void ena_queue_strings(struct ena_adapter *adapter, u8 **data)
{
const struct ena_stats *ena_stats;
+ bool is_xdp;
int i, j;
- for (i = 0; i < adapter->num_io_queues; i++) {
+ for (i = 0; i < adapter->num_io_queues + adapter->xdp_num_queues; i++) {
+ is_xdp = ENA_IS_XDP_INDEX(adapter, i);
/* Tx stats */
for (j = 0; j < ENA_STATS_ARRAY_TX; j++) {
ena_stats = &ena_stats_tx_strings[j];
snprintf(*data, ETH_GSTRING_LEN,
- "queue_%u_tx_%s", i, ena_stats->name);
+ "queue_%u_%s_%s", i,
+ is_xdp ? "xdp_tx" : "tx", ena_stats->name);
(*data) += ETH_GSTRING_LEN;
}
- /* Rx stats */
- for (j = 0; j < ENA_STATS_ARRAY_RX; j++) {
- ena_stats = &ena_stats_rx_strings[j];
- snprintf(*data, ETH_GSTRING_LEN,
- "queue_%u_rx_%s", i, ena_stats->name);
- (*data) += ETH_GSTRING_LEN;
+ if (!is_xdp) {
+ /* RX stats, in XDP there isn't a RX queue
+ * counterpart
+ */
+ for (j = 0; j < ENA_STATS_ARRAY_RX; j++) {
+ ena_stats = &ena_stats_rx_strings[j];
+
+ snprintf(*data, ETH_GSTRING_LEN,
+ "queue_%u_rx_%s", i, ena_stats->name);
+ (*data) += ETH_GSTRING_LEN;
+ }
}
}
}
@@ -249,25 +284,43 @@ static void ena_com_dev_strings(u8 **data)
}
}
-static void ena_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+static void ena_get_strings(struct ena_adapter *adapter,
+ u8 *data,
+ bool eni_stats_needed)
{
- struct ena_adapter *adapter = netdev_priv(netdev);
const struct ena_stats *ena_stats;
int i;
- if (sset != ETH_SS_STATS)
- return;
-
for (i = 0; i < ENA_STATS_ARRAY_GLOBAL; i++) {
ena_stats = &ena_stats_global_strings[i];
memcpy(data, ena_stats->name, ETH_GSTRING_LEN);
data += ETH_GSTRING_LEN;
}
+ if (eni_stats_needed) {
+ for (i = 0; i < ENA_STATS_ARRAY_ENI(adapter); i++) {
+ ena_stats = &ena_stats_eni_strings[i];
+ memcpy(data, ena_stats->name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+
ena_queue_strings(adapter, &data);
ena_com_dev_strings(&data);
}
+static void ena_get_ethtool_strings(struct net_device *netdev,
+ u32 sset,
+ u8 *data)
+{
+ struct ena_adapter *adapter = netdev_priv(netdev);
+
+ if (sset != ETH_SS_STATS)
+ return;
+
+ ena_get_strings(adapter, data, adapter->eni_stats_supported);
+}
+
static int ena_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *link_ksettings)
{
@@ -847,7 +900,7 @@ static const struct ethtool_ops ena_ethtool_ops = {
.get_ringparam = ena_get_ringparam,
.set_ringparam = ena_set_ringparam,
.get_sset_count = ena_get_sset_count,
- .get_strings = ena_get_strings,
+ .get_strings = ena_get_ethtool_strings,
.get_ethtool_stats = ena_get_ethtool_stats,
.get_rxnfc = ena_get_rxnfc,
.set_rxnfc = ena_set_rxnfc,
@@ -875,7 +928,7 @@ static void ena_dump_stats_ex(struct ena_adapter *adapter, u8 *buf)
int strings_num;
int i, rc;
- strings_num = ena_get_sset_count(netdev, ETH_SS_STATS);
+ strings_num = ena_get_sw_stats_count(adapter);
if (strings_num <= 0) {
netif_err(adapter, drv, netdev, "Can't get stats num\n");
return;
@@ -886,7 +939,7 @@ static void ena_dump_stats_ex(struct ena_adapter *adapter, u8 *buf)
GFP_ATOMIC);
if (!strings_buf) {
netif_err(adapter, drv, netdev,
- "failed to alloc strings_buf\n");
+ "Failed to allocate strings_buf\n");
return;
}
@@ -895,13 +948,13 @@ static void ena_dump_stats_ex(struct ena_adapter *adapter, u8 *buf)
GFP_ATOMIC);
if (!data_buf) {
netif_err(adapter, drv, netdev,
- "failed to allocate data buf\n");
+ "Failed to allocate data buf\n");
devm_kfree(&adapter->pdev->dev, strings_buf);
return;
}
- ena_get_strings(netdev, ETH_SS_STATS, strings_buf);
- ena_get_ethtool_stats(netdev, NULL, data_buf);
+ ena_get_strings(adapter, strings_buf, false);
+ ena_get_stats(adapter, data_buf, false);
/* If there is a buffer, dump stats, otherwise print them to dmesg */
if (buf)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index a3a8edf9a734..e8131dadc22c 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/*
- * Copyright 2015 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -139,7 +112,7 @@ static int ena_change_mtu(struct net_device *dev, int new_mtu)
ret = ena_com_set_dev_mtu(adapter->ena_dev, new_mtu);
if (!ret) {
- netif_dbg(adapter, drv, dev, "set MTU to %d\n", new_mtu);
+ netif_dbg(adapter, drv, dev, "Set MTU to %d\n", new_mtu);
update_rx_ring_mtu(adapter, new_mtu);
dev->mtu = new_mtu;
} else {
@@ -178,7 +151,7 @@ static int ena_xmit_common(struct net_device *dev,
*/
if (unlikely(rc)) {
netif_err(adapter, tx_queued, dev,
- "failed to prepare tx bufs\n");
+ "Failed to prepare tx bufs\n");
u64_stats_update_begin(&ring->syncp);
ring->tx_stats.prepare_ctx_err++;
u64_stats_update_end(&ring->syncp);
@@ -292,7 +265,7 @@ error_report_dma_error:
u64_stats_update_begin(&xdp_ring->syncp);
xdp_ring->tx_stats.dma_mapping_err++;
u64_stats_update_end(&xdp_ring->syncp);
- netdev_warn(adapter->netdev, "failed to map xdp buff\n");
+ netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map xdp buff\n");
xdp_return_frame_rx_napi(tx_info->xdpf);
tx_info->xdpf = NULL;
@@ -365,6 +338,7 @@ static int ena_xdp_execute(struct ena_ring *rx_ring,
{
struct bpf_prog *xdp_prog;
u32 verdict = XDP_PASS;
+ u64 *xdp_stat;
rcu_read_lock();
xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog);
@@ -374,17 +348,31 @@ static int ena_xdp_execute(struct ena_ring *rx_ring,
verdict = bpf_prog_run_xdp(xdp_prog, xdp);
- if (verdict == XDP_TX)
+ if (verdict == XDP_TX) {
ena_xdp_xmit_buff(rx_ring->netdev,
xdp,
rx_ring->qid + rx_ring->adapter->num_io_queues,
rx_info);
- else if (unlikely(verdict == XDP_ABORTED))
+
+ xdp_stat = &rx_ring->rx_stats.xdp_tx;
+ } else if (unlikely(verdict == XDP_ABORTED)) {
trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
- else if (unlikely(verdict > XDP_TX))
+ xdp_stat = &rx_ring->rx_stats.xdp_aborted;
+ } else if (unlikely(verdict == XDP_DROP)) {
+ xdp_stat = &rx_ring->rx_stats.xdp_drop;
+ } else if (unlikely(verdict == XDP_PASS)) {
+ xdp_stat = &rx_ring->rx_stats.xdp_pass;
+ } else {
bpf_warn_invalid_xdp_action(verdict);
+ xdp_stat = &rx_ring->rx_stats.xdp_invalid;
+ }
+
+ u64_stats_update_begin(&rx_ring->syncp);
+ (*xdp_stat)++;
+ u64_stats_update_end(&rx_ring->syncp);
out:
rcu_read_unlock();
+
return verdict;
}
@@ -549,7 +537,7 @@ static int ena_xdp_set(struct net_device *netdev, struct netdev_bpf *bpf)
if (!old_bpf_prog)
netif_info(adapter, drv, adapter->netdev,
- "xdp program set, changing the max_mtu from %d to %d",
+ "XDP program is set, changing the max_mtu from %d to %d",
prev_mtu, netdev->max_mtu);
} else if (rc == ENA_XDP_CURRENT_MTU_TOO_LARGE) {
@@ -968,7 +956,7 @@ static int ena_alloc_rx_page(struct ena_ring *rx_ring,
return -EIO;
}
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
- "alloc page %p, rx_info %p\n", page, rx_info);
+ "Allocate page %p, rx_info %p\n", page, rx_info);
rx_info->page = page;
rx_info->page_offset = 0;
@@ -1018,7 +1006,7 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
GFP_ATOMIC | __GFP_COMP);
if (unlikely(rc < 0)) {
netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
- "failed to alloc buffer for rx queue %d\n",
+ "Failed to allocate buffer for rx queue %d\n",
rx_ring->qid);
break;
}
@@ -1027,7 +1015,7 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
req_id);
if (unlikely(rc)) {
netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
- "failed to add buffer for rx queue %d\n",
+ "Failed to add buffer for rx queue %d\n",
rx_ring->qid);
break;
}
@@ -1039,9 +1027,9 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->rx_stats.refil_partial++;
u64_stats_update_end(&rx_ring->syncp);
- netdev_warn(rx_ring->netdev,
- "refilled rx qid %d with only %d buffers (from %d)\n",
- rx_ring->qid, i, num);
+ netif_warn(rx_ring->adapter, rx_err, rx_ring->netdev,
+ "Refilled rx qid %d with only %d buffers (from %d)\n",
+ rx_ring->qid, i, num);
}
/* ena_com_write_sq_doorbell issues a wmb() */
@@ -1082,7 +1070,7 @@ static void ena_refill_all_rx_bufs(struct ena_adapter *adapter)
if (unlikely(rc != bufs_num))
netif_warn(rx_ring->adapter, rx_status, rx_ring->netdev,
- "refilling Queue %d failed. allocated %d buffers from: %d\n",
+ "Refilling Queue %d failed. allocated %d buffers from: %d\n",
i, rc, bufs_num);
}
}
@@ -1140,14 +1128,14 @@ static void ena_free_tx_bufs(struct ena_ring *tx_ring)
continue;
if (print_once) {
- netdev_notice(tx_ring->netdev,
- "free uncompleted tx skb qid %d idx 0x%x\n",
- tx_ring->qid, i);
+ netif_notice(tx_ring->adapter, ifdown, tx_ring->netdev,
+ "Free uncompleted tx skb qid %d idx 0x%x\n",
+ tx_ring->qid, i);
print_once = false;
} else {
- netdev_dbg(tx_ring->netdev,
- "free uncompleted tx skb qid %d idx 0x%x\n",
- tx_ring->qid, i);
+ netif_dbg(tx_ring->adapter, ifdown, tx_ring->netdev,
+ "Free uncompleted tx skb qid %d idx 0x%x\n",
+ tx_ring->qid, i);
}
ena_unmap_tx_buff(tx_ring, tx_info);
@@ -1399,7 +1387,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
return NULL;
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
- "rx allocated small packet. len %d. data_len %d\n",
+ "RX allocated small packet. len %d. data_len %d\n",
skb->len, skb->data_len);
/* sync this buffer for CPU use */
@@ -1436,7 +1424,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
rx_info->page_offset = 0;
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
- "rx skb updated. len %d. data_len %d\n",
+ "RX skb updated. len %d. data_len %d\n",
skb->len, skb->data_len);
rx_info->page = NULL;
@@ -1643,6 +1631,11 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
&next_to_clean);
if (unlikely(!skb)) {
+ /* The page might not actually be freed here since the
+ * page reference count is incremented in
+ * ena_xdp_xmit_buff(), and it will be decreased only
+ * when send completion was received from the device
+ */
if (xdp_verdict == XDP_TX)
ena_free_rx_page(rx_ring,
&rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id]);
@@ -1770,6 +1763,7 @@ static void ena_unmask_interrupt(struct ena_ring *tx_ring,
u64_stats_update_begin(&tx_ring->syncp);
tx_ring->tx_stats.unmask_interrupt++;
u64_stats_update_end(&tx_ring->syncp);
+
/* It is a shared MSI-X.
* Tx and Rx CQ have pointer to it.
* So we use one of them to reach the intr reg
@@ -1987,7 +1981,7 @@ static int ena_enable_msix(struct ena_adapter *adapter)
/* Reserved the max msix vectors we might need */
msix_vecs = ENA_MAX_MSIX_VEC(adapter->max_num_io_queues);
netif_dbg(adapter, probe, adapter->netdev,
- "trying to enable MSI-X, vectors %d\n", msix_vecs);
+ "Trying to enable MSI-X, vectors %d\n", msix_vecs);
irq_cnt = pci_alloc_irq_vectors(adapter->pdev, ENA_MIN_MSIX_VEC,
msix_vecs, PCI_IRQ_MSIX);
@@ -2000,7 +1994,7 @@ static int ena_enable_msix(struct ena_adapter *adapter)
if (irq_cnt != msix_vecs) {
netif_notice(adapter, probe, adapter->netdev,
- "enable only %d MSI-X (out of %d), reduce the number of queues\n",
+ "Enable only %d MSI-X (out of %d), reduce the number of queues\n",
irq_cnt, msix_vecs);
adapter->num_io_queues = irq_cnt - ENA_ADMIN_MSIX_VEC;
}
@@ -2070,12 +2064,12 @@ static int ena_request_mgmnt_irq(struct ena_adapter *adapter)
irq->data);
if (rc) {
netif_err(adapter, probe, adapter->netdev,
- "failed to request admin irq\n");
+ "Failed to request admin irq\n");
return rc;
}
netif_dbg(adapter, probe, adapter->netdev,
- "set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
+ "Set affinity hint of mgmnt irq.to 0x%lx (irq vector: %d)\n",
irq->affinity_hint_mask.bits[0], irq->vector);
irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
@@ -2108,7 +2102,7 @@ static int ena_request_io_irq(struct ena_adapter *adapter)
}
netif_dbg(adapter, ifup, adapter->netdev,
- "set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
+ "Set affinity hint of irq. index %d to 0x%lx (irq vector: %d)\n",
i, irq->affinity_hint_mask.bits[0], irq->vector);
irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask);
@@ -2548,7 +2542,7 @@ static int ena_up(struct ena_adapter *adapter)
{
int io_queue_count, rc, i;
- netdev_dbg(adapter->netdev, "%s\n", __func__);
+ netif_dbg(adapter, ifup, adapter->netdev, "%s\n", __func__);
io_queue_count = adapter->num_io_queues + adapter->xdp_num_queues;
ena_setup_io_intr(adapter);
@@ -2632,7 +2626,8 @@ static void ena_down(struct ena_adapter *adapter)
rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
if (rc)
- dev_err(&adapter->pdev->dev, "Device reset failed\n");
+ netif_err(adapter, ifdown, adapter->netdev,
+ "Device reset failed\n");
/* stop submitting admin commands on a device that was reset */
ena_com_set_admin_running_state(adapter->ena_dev, false);
}
@@ -2954,7 +2949,7 @@ error_report_dma_error:
u64_stats_update_begin(&tx_ring->syncp);
tx_ring->tx_stats.dma_mapping_err++;
u64_stats_update_end(&tx_ring->syncp);
- netdev_warn(adapter->netdev, "failed to map skb\n");
+ netif_warn(adapter, tx_queued, adapter->netdev, "Failed to map skb\n");
tx_info->skb = NULL;
@@ -3092,13 +3087,14 @@ static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pdev)
{
+ struct device *dev = &pdev->dev;
struct ena_admin_host_info *host_info;
int rc;
/* Allocate only the host info */
rc = ena_com_allocate_host_info(ena_dev);
if (rc) {
- pr_err("Cannot allocate host info\n");
+ dev_err(dev, "Cannot allocate host info\n");
return;
}
@@ -3128,9 +3124,9 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pd
rc = ena_com_set_host_attributes(ena_dev);
if (rc) {
if (rc == -EOPNOTSUPP)
- pr_warn("Cannot set host attributes\n");
+ dev_warn(dev, "Cannot set host attributes\n");
else
- pr_err("Cannot set host attributes\n");
+ dev_err(dev, "Cannot set host attributes\n");
goto err;
}
@@ -3158,7 +3154,8 @@ static void ena_config_debug_area(struct ena_adapter *adapter)
rc = ena_com_allocate_debug_area(adapter->ena_dev, debug_area_size);
if (rc) {
- pr_err("Cannot allocate debug area\n");
+ netif_err(adapter, drv, adapter->netdev,
+ "Cannot allocate debug area\n");
return;
}
@@ -3178,6 +3175,19 @@ err:
ena_com_delete_debug_area(adapter->ena_dev);
}
+int ena_update_hw_stats(struct ena_adapter *adapter)
+{
+ int rc = 0;
+
+ rc = ena_com_get_eni_stats(adapter->ena_dev, &adapter->eni_stats);
+ if (rc) {
+ dev_info_once(&adapter->pdev->dev, "Failed to get ENI stats\n");
+ return rc;
+ }
+
+ return 0;
+}
+
static void ena_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
@@ -3349,7 +3359,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
rc = ena_com_mmio_reg_read_request_init(ena_dev);
if (rc) {
- dev_err(dev, "failed to init mmio read less\n");
+ dev_err(dev, "Failed to init mmio read less\n");
return rc;
}
@@ -3367,7 +3377,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
rc = ena_com_validate_version(ena_dev);
if (rc) {
- dev_err(dev, "device version is too low\n");
+ dev_err(dev, "Device version is too low\n");
goto err_mmio_read_less;
}
@@ -3436,7 +3446,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq,
&llq_config);
if (rc) {
- dev_err(&pdev->dev, "ena device init failed\n");
+ dev_err(dev, "ENA device init failed\n");
goto err_admin_init;
}
@@ -3572,9 +3582,10 @@ static int ena_restore_device(struct ena_adapter *adapter)
netif_carrier_on(adapter->netdev);
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
- dev_err(&pdev->dev, "Device reset completed successfully\n");
adapter->last_keep_alive_jiffies = jiffies;
+ dev_err(&pdev->dev, "Device reset completed successfully\n");
+
return rc;
err_disable_msix:
ena_free_mgmnt_irq(adapter);
@@ -3776,7 +3787,7 @@ static void check_for_empty_rx_ring(struct ena_adapter *adapter)
u64_stats_update_end(&rx_ring->syncp);
netif_err(adapter, drv, adapter->netdev,
- "trigger refill for ring %d\n", i);
+ "Trigger refill for ring %d\n", i);
napi_schedule(rx_ring->napi);
rx_ring->empty_rx_queue = 0;
@@ -4138,14 +4149,13 @@ static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
*/
static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- struct ena_calc_queue_size_ctx calc_queue_ctx = { 0 };
+ struct ena_calc_queue_size_ctx calc_queue_ctx = {};
struct ena_com_dev_get_features_ctx get_feat_ctx;
struct ena_com_dev *ena_dev = NULL;
struct ena_adapter *adapter;
struct net_device *netdev;
static int adapters_found;
u32 max_num_io_queues;
- char *queue_type_str;
bool wd_state;
int bars, rc;
@@ -4177,7 +4187,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_resource_start(pdev, ENA_REG_BAR),
pci_resource_len(pdev, ENA_REG_BAR));
if (!ena_dev->reg_bar) {
- dev_err(&pdev->dev, "failed to remap regs bar\n");
+ dev_err(&pdev->dev, "Failed to remap regs bar\n");
rc = -EFAULT;
goto err_free_region;
}
@@ -4188,7 +4198,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = ena_device_init(ena_dev, pdev, &get_feat_ctx, &wd_state);
if (rc) {
- dev_err(&pdev->dev, "ena device init failed\n");
+ dev_err(&pdev->dev, "ENA device init failed\n");
if (rc == -ETIME)
rc = -EPROBE_DEFER;
goto err_free_region;
@@ -4196,7 +4206,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = ena_map_llq_mem_bar(pdev, ena_dev, bars);
if (rc) {
- dev_err(&pdev->dev, "ena llq bar mapping failed\n");
+ dev_err(&pdev->dev, "ENA llq bar mapping failed\n");
goto err_free_ena_dev;
}
@@ -4296,6 +4306,11 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ena_config_debug_area(adapter);
+ if (!ena_update_hw_stats(adapter))
+ adapter->eni_stats_supported = true;
+ else
+ adapter->eni_stats_supported = false;
+
memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len);
netif_carrier_off(netdev);
@@ -4318,15 +4333,10 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
timer_setup(&adapter->timer_service, ena_timer_service, 0);
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
- if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
- queue_type_str = "Regular";
- else
- queue_type_str = "Low Latency";
-
dev_info(&pdev->dev,
- "%s found at mem %lx, mac addr %pM, Placement policy: %s\n",
+ "%s found at mem %lx, mac addr %pM\n",
DEVICE_NAME, (long)pci_resource_start(pdev, 0),
- netdev->dev_addr, queue_type_str);
+ netdev->dev_addr);
set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
@@ -4456,7 +4466,7 @@ static int __maybe_unused ena_suspend(struct device *dev_d)
rtnl_lock();
if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
dev_err(&pdev->dev,
- "ignoring device reset request as the device is being suspended\n");
+ "Ignoring device reset request as the device is being suspended\n");
clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
}
ena_destroy_device(adapter, true);
@@ -4531,7 +4541,7 @@ static void ena_update_on_link_change(void *adapter_data,
ENA_ADMIN_AENQ_LINK_CHANGE_DESC_LINK_STATUS_MASK;
if (status) {
- netdev_dbg(adapter->netdev, "%s\n", __func__);
+ netif_dbg(adapter, ifup, adapter->netdev, "%s\n", __func__);
set_bit(ENA_FLAG_LINK_UP, &adapter->flags);
if (!test_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags))
netif_carrier_on(adapter->netdev);
@@ -4575,7 +4585,7 @@ static void ena_notification(void *adapter_data,
aenq_e->aenq_common_desc.group,
ENA_ADMIN_NOTIFICATION);
- switch (aenq_e->aenq_common_desc.syndrom) {
+ switch (aenq_e->aenq_common_desc.syndrome) {
case ENA_ADMIN_UPDATE_HINTS:
hints = (struct ena_admin_ena_hw_hints *)
(&aenq_e->inline_data_w4);
@@ -4584,7 +4594,7 @@ static void ena_notification(void *adapter_data,
default:
netif_err(adapter, drv, adapter->netdev,
"Invalid aenq notification link state %d\n",
- aenq_e->aenq_common_desc.syndrom);
+ aenq_e->aenq_common_desc.syndrome);
}
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 0c8504006247..30eb686749dc 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
- * Copyright 2015 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef ENA_H
@@ -261,6 +234,11 @@ struct ena_stats_rx {
u64 bad_req_id;
u64 empty_rx_ring;
u64 csum_unchecked;
+ u64 xdp_aborted;
+ u64 xdp_drop;
+ u64 xdp_pass;
+ u64 xdp_tx;
+ u64 xdp_invalid;
};
struct ena_ring {
@@ -405,6 +383,8 @@ struct ena_adapter {
struct u64_stats_sync syncp;
struct ena_stats_dev dev_stats;
+ struct ena_admin_eni_stats eni_stats;
+ bool eni_stats_supported;
/* last queue index that was checked for uncompleted tx packets */
u32 last_monitored_tx_qid;
@@ -422,6 +402,8 @@ void ena_dump_stats_to_dmesg(struct ena_adapter *adapter);
void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
+int ena_update_hw_stats(struct ena_adapter *adapter);
+
int ena_update_queue_sizes(struct ena_adapter *adapter,
u32 new_tx_size,
u32 new_rx_size);
diff --git a/drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h b/drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h
index 426e57e10a7f..3ecdf29160ca 100644
--- a/drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h
+++ b/drivers/net/ethernet/amazon/ena/ena_pci_id_tbl.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
- * Copyright 2015 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef ENA_PCI_ID_TBL_H_
diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
index b514bb1b855d..1e007a41a525 100644
--- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
- * Copyright 2015 - 2016 Amazon.com, Inc. or its affiliates.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _ENA_REGS_H_
#define _ENA_REGS_H_
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index e1fde585fd0d..00ae1081254d 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -657,16 +657,6 @@ static irqreturn_t lance_interrupt( int irq, void *dev_id)
struct net_device *dev = dev_id;
struct lance_private *lp = netdev_priv(dev);
int csr0;
- static int in_interrupt;
-
- if (dev == NULL) {
- DPRINTK( 1, ( "lance_interrupt(): invalid dev_id\n" ));
- return IRQ_NONE;
- }
-
- if (in_interrupt)
- DPRINTK( 2, ( "%s: Re-entering the interrupt handler.\n", dev->name ));
- in_interrupt = 1;
still_more:
flush_cache_all();
@@ -774,7 +764,6 @@ static irqreturn_t lance_interrupt( int irq, void *dev_id)
DPRINTK( 2, ( "%s: exiting interrupt, csr0=%#04x.\n",
dev->name, DREG ));
- in_interrupt = 0;
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 4ba75551cb17..2709a2db5657 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -403,9 +403,9 @@ static bool xgbe_ecc_ded(struct xgbe_prv_data *pdata, unsigned long *period,
return false;
}
-static void xgbe_ecc_isr_task(unsigned long data)
+static void xgbe_ecc_isr_task(struct tasklet_struct *t)
{
- struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+ struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_ecc);
unsigned int ecc_isr;
bool stop = false;
@@ -468,14 +468,14 @@ static irqreturn_t xgbe_ecc_isr(int irq, void *data)
if (pdata->isr_as_tasklet)
tasklet_schedule(&pdata->tasklet_ecc);
else
- xgbe_ecc_isr_task((unsigned long)pdata);
+ xgbe_ecc_isr_task(&pdata->tasklet_ecc);
return IRQ_HANDLED;
}
-static void xgbe_isr_task(unsigned long data)
+static void xgbe_isr_task(struct tasklet_struct *t)
{
- struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+ struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_dev);
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_channel *channel;
unsigned int dma_isr, dma_ch_isr;
@@ -582,7 +582,7 @@ isr_done:
/* If there is not a separate ECC irq, handle it here */
if (pdata->vdata->ecc_support && (pdata->dev_irq == pdata->ecc_irq))
- xgbe_ecc_isr_task((unsigned long)pdata);
+ xgbe_ecc_isr_task(&pdata->tasklet_ecc);
/* If there is not a separate I2C irq, handle it here */
if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq))
@@ -607,7 +607,7 @@ static irqreturn_t xgbe_isr(int irq, void *data)
if (pdata->isr_as_tasklet)
tasklet_schedule(&pdata->tasklet_dev);
else
- xgbe_isr_task((unsigned long)pdata);
+ xgbe_isr_task(&pdata->tasklet_dev);
return IRQ_HANDLED;
}
@@ -991,9 +991,8 @@ static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
unsigned int i;
int ret;
- tasklet_init(&pdata->tasklet_dev, xgbe_isr_task, (unsigned long)pdata);
- tasklet_init(&pdata->tasklet_ecc, xgbe_ecc_isr_task,
- (unsigned long)pdata);
+ tasklet_setup(&pdata->tasklet_dev, xgbe_isr_task);
+ tasklet_setup(&pdata->tasklet_ecc, xgbe_ecc_isr_task);
ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
netdev_name(netdev), pdata);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
index 4d9062d35930..22d4fc547a0a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
@@ -274,9 +274,9 @@ static void xgbe_i2c_clear_isr_interrupts(struct xgbe_prv_data *pdata,
XI2C_IOREAD(pdata, IC_CLR_STOP_DET);
}
-static void xgbe_i2c_isr_task(unsigned long data)
+static void xgbe_i2c_isr_task(struct tasklet_struct *t)
{
- struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+ struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_i2c);
struct xgbe_i2c_op_state *state = &pdata->i2c.op_state;
unsigned int isr;
@@ -324,7 +324,7 @@ static irqreturn_t xgbe_i2c_isr(int irq, void *data)
if (pdata->isr_as_tasklet)
tasklet_schedule(&pdata->tasklet_i2c);
else
- xgbe_i2c_isr_task((unsigned long)pdata);
+ xgbe_i2c_isr_task(&pdata->tasklet_i2c);
return IRQ_HANDLED;
}
@@ -369,7 +369,7 @@ static void xgbe_i2c_set_target(struct xgbe_prv_data *pdata, unsigned int addr)
static irqreturn_t xgbe_i2c_combined_isr(struct xgbe_prv_data *pdata)
{
- xgbe_i2c_isr_task((unsigned long)pdata);
+ xgbe_i2c_isr_task(&pdata->tasklet_i2c);
return IRQ_HANDLED;
}
@@ -462,8 +462,7 @@ static int xgbe_i2c_start(struct xgbe_prv_data *pdata)
/* If we have a separate I2C irq, enable it */
if (pdata->dev_irq != pdata->i2c_irq) {
- tasklet_init(&pdata->tasklet_i2c, xgbe_i2c_isr_task,
- (unsigned long)pdata);
+ tasklet_setup(&pdata->tasklet_i2c, xgbe_i2c_isr_task);
ret = devm_request_irq(pdata->dev, pdata->i2c_irq,
xgbe_i2c_isr, 0, pdata->i2c_name,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
index 8a3a60bb2688..93ef5a30cb8d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -688,9 +688,9 @@ static void xgbe_an73_isr(struct xgbe_prv_data *pdata)
}
}
-static void xgbe_an_isr_task(unsigned long data)
+static void xgbe_an_isr_task(struct tasklet_struct *t)
{
- struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+ struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_an);
netif_dbg(pdata, intr, pdata->netdev, "AN interrupt received\n");
@@ -715,14 +715,14 @@ static irqreturn_t xgbe_an_isr(int irq, void *data)
if (pdata->isr_as_tasklet)
tasklet_schedule(&pdata->tasklet_an);
else
- xgbe_an_isr_task((unsigned long)pdata);
+ xgbe_an_isr_task(&pdata->tasklet_an);
return IRQ_HANDLED;
}
static irqreturn_t xgbe_an_combined_isr(struct xgbe_prv_data *pdata)
{
- xgbe_an_isr_task((unsigned long)pdata);
+ xgbe_an_isr_task(&pdata->tasklet_an);
return IRQ_HANDLED;
}
@@ -1414,8 +1414,7 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata)
/* If we have a separate AN irq, enable it */
if (pdata->dev_irq != pdata->an_irq) {
- tasklet_init(&pdata->tasklet_an, xgbe_an_isr_task,
- (unsigned long)pdata);
+ tasklet_setup(&pdata->tasklet_an, xgbe_an_isr_task);
ret = devm_request_irq(pdata->dev, pdata->an_irq,
xgbe_an_isr, 0, pdata->an_name,
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index d35a338120cf..643f5e646740 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -18,6 +18,7 @@
#include <linux/of_platform.h>
#include <linux/of_net.h>
#include <linux/of_mdio.h>
+#include <linux/mdio/mdio-xgene.h>
#include <linux/module.h>
#include <net/ip.h>
#include <linux/prefetch.h>
@@ -26,7 +27,6 @@
#include "xgene_enet_hw.h"
#include "xgene_enet_cle.h"
#include "xgene_enet_ring2.h"
-#include "../../../phy/mdio-xgene.h"
#define ETHER_MIN_PACKET 64
#define ETHER_STD_PACKET 1518
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 1ab5314c4c1b..de2a9348bc3f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -917,6 +917,57 @@ static int aq_ethtool_set_priv_flags(struct net_device *ndev, u32 flags)
return ret;
}
+static int aq_ethtool_get_phy_tunable(struct net_device *ndev,
+ const struct ethtool_tunable *tuna, void *data)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ switch (tuna->id) {
+ case ETHTOOL_PHY_EDPD: {
+ u16 *val = data;
+
+ *val = aq_nic->aq_nic_cfg.is_media_detect ? AQ_HW_MEDIA_DETECT_CNT : 0;
+ break;
+ }
+ case ETHTOOL_PHY_DOWNSHIFT: {
+ u8 *val = data;
+
+ *val = (u8)aq_nic->aq_nic_cfg.downshift_counter;
+ break;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int aq_ethtool_set_phy_tunable(struct net_device *ndev,
+ const struct ethtool_tunable *tuna, const void *data)
+{
+ int err = -EOPNOTSUPP;
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ switch (tuna->id) {
+ case ETHTOOL_PHY_EDPD: {
+ const u16 *val = data;
+
+ err = aq_nic_set_media_detect(aq_nic, *val);
+ break;
+ }
+ case ETHTOOL_PHY_DOWNSHIFT: {
+ const u8 *val = data;
+
+ err = aq_nic_set_downshift(aq_nic, *val);
+ break;
+ }
+ default:
+ break;
+ }
+
+ return err;
+}
+
const struct ethtool_ops aq_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
@@ -952,4 +1003,6 @@ const struct ethtool_ops aq_ethtool_ops = {
.get_coalesce = aq_ethtool_get_coalesce,
.set_coalesce = aq_ethtool_set_coalesce,
.get_ts_info = aq_ethtool_get_ts_info,
+ .get_phy_tunable = aq_ethtool_get_phy_tunable,
+ .set_phy_tunable = aq_ethtool_set_phy_tunable,
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index 7df74015fbc9..bed481816ea3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -143,6 +143,8 @@ struct aq_stats_s {
#define AQ_HW_LED_BLINK 0x2U
#define AQ_HW_LED_DEFAULT 0x0U
+#define AQ_HW_MEDIA_DETECT_CNT 6000
+
enum aq_priv_flags {
AQ_HW_LOOPBACK_DMA_SYS,
AQ_HW_LOOPBACK_PKT_SYS,
@@ -386,6 +388,10 @@ struct aq_fw_ops {
int (*get_eee_rate)(struct aq_hw_s *self, u32 *rate,
u32 *supported_rates);
+ int (*set_downshift)(struct aq_hw_s *self, u32 counter);
+
+ int (*set_media_detect)(struct aq_hw_s *self, bool enable);
+
u32 (*get_link_capabilities)(struct aq_hw_s *self);
int (*send_macsec_req)(struct aq_hw_s *self,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index c6bdf1d677d1..0f865daeb36d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -405,6 +405,10 @@ int aq_nic_init(struct aq_nic_s *self)
mutex_unlock(&self->fwreq_mutex);
if (err < 0)
goto err_exit;
+ /* Restore default settings */
+ aq_nic_set_downshift(self, self->aq_nic_cfg.downshift_counter);
+ aq_nic_set_media_detect(self, self->aq_nic_cfg.is_media_detect ?
+ AQ_HW_MEDIA_DETECT_CNT : 0);
err = self->aq_hw_ops->hw_init(self->aq_hw,
aq_nic_get_ndev(self)->dev_addr);
@@ -1398,6 +1402,52 @@ void aq_nic_release_filter(struct aq_nic_s *self, enum aq_rx_filter_type type,
}
}
+int aq_nic_set_downshift(struct aq_nic_s *self, int val)
+{
+ int err = 0;
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+
+ if (!self->aq_fw_ops->set_downshift)
+ return -EOPNOTSUPP;
+
+ if (val > 15) {
+ netdev_err(self->ndev, "downshift counter should be <= 15\n");
+ return -EINVAL;
+ }
+ cfg->downshift_counter = val;
+
+ mutex_lock(&self->fwreq_mutex);
+ err = self->aq_fw_ops->set_downshift(self->aq_hw, cfg->downshift_counter);
+ mutex_unlock(&self->fwreq_mutex);
+
+ return err;
+}
+
+int aq_nic_set_media_detect(struct aq_nic_s *self, int val)
+{
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+ int err = 0;
+
+ if (!self->aq_fw_ops->set_media_detect)
+ return -EOPNOTSUPP;
+
+ if (val > 0 && val != AQ_HW_MEDIA_DETECT_CNT) {
+ netdev_err(self->ndev, "EDPD on this device could have only fixed value of %d\n",
+ AQ_HW_MEDIA_DETECT_CNT);
+ return -EINVAL;
+ }
+
+ mutex_lock(&self->fwreq_mutex);
+ err = self->aq_fw_ops->set_media_detect(self->aq_hw, !!val);
+ mutex_unlock(&self->fwreq_mutex);
+
+ /* msecs plays no role - configuration is always fixed in PHY */
+ if (!err)
+ cfg->is_media_detect = !!val;
+
+ return err;
+}
+
int aq_nic_setup_tc_mqprio(struct aq_nic_s *self, u32 tcs, u8 *prio_tc_map)
{
struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index eb7d8430f2f5..926cca9a0c83 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -62,6 +62,8 @@ struct aq_nic_cfg_s {
bool is_lro;
bool is_qos;
bool is_ptp;
+ bool is_media_detect;
+ int downshift_counter;
enum aq_tc_mode tc_mode;
u32 priv_flags;
u8 tcs;
@@ -195,6 +197,8 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self,
struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self);
u32 aq_nic_get_fw_version(struct aq_nic_s *self);
int aq_nic_set_loopback(struct aq_nic_s *self);
+int aq_nic_set_downshift(struct aq_nic_s *self, int val);
+int aq_nic_set_media_detect(struct aq_nic_s *self, int val);
int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self);
void aq_nic_shutdown(struct aq_nic_s *self);
u8 aq_nic_reserve_filter(struct aq_nic_s *self, enum aq_rx_filter_type type);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 8941ac4df9e3..9f1b15077e7d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -1536,7 +1536,7 @@ static int hw_atl_b0_hw_fl2_clear(struct aq_hw_s *self,
return aq_hw_err_from_flags(self);
}
-/**
+/*
* @brief Set VLAN filter table
* @details Configure VLAN filter table to accept (and assign the queue) traffic
* for the particular vlan ids.
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index 93c06dfa6c55..ee0c22d04935 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -612,6 +612,41 @@ static u32 aq_fw2x_state2_get(struct aq_hw_s *self)
return aq_hw_read_reg(self, HW_ATL_FW2X_MPI_STATE2_ADDR);
}
+static int aq_fw2x_set_downshift(struct aq_hw_s *self, u32 counter)
+{
+ int err = 0;
+ u32 mpi_opts;
+ u32 offset;
+
+ offset = offsetof(struct hw_atl_utils_settings, downshift_retry_count);
+ err = hw_atl_write_fwsettings_dwords(self, offset, &counter, 1);
+ if (err)
+ return err;
+
+ mpi_opts = aq_hw_read_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR);
+ if (counter)
+ mpi_opts |= HW_ATL_FW2X_CTRL_DOWNSHIFT;
+ else
+ mpi_opts &= ~HW_ATL_FW2X_CTRL_DOWNSHIFT;
+ aq_hw_write_reg(self, HW_ATL_FW2X_MPI_CONTROL2_ADDR, mpi_opts);
+
+ return err;
+}
+
+static int aq_fw2x_set_media_detect(struct aq_hw_s *self, bool on)
+{
+ u32 enable;
+ u32 offset;
+
+ if (self->fw_ver_actual < HW_ATL_FW_VER_MEDIA_CONTROL)
+ return -EOPNOTSUPP;
+
+ offset = offsetof(struct hw_atl_utils_settings, media_detect);
+ enable = on;
+
+ return hw_atl_write_fwsettings_dwords(self, offset, &enable, 1);
+}
+
static u32 aq_fw2x_get_link_capabilities(struct aq_hw_s *self)
{
int err = 0;
@@ -692,6 +727,8 @@ const struct aq_fw_ops aq_fw_2x_ops = {
.enable_ptp = aq_fw3x_enable_ptp,
.led_control = aq_fw2x_led_control,
.set_phyloopback = aq_fw2x_set_phyloopback,
+ .set_downshift = aq_fw2x_set_downshift,
+ .set_media_detect = aq_fw2x_set_media_detect,
.adjust_ptp = aq_fw3x_adjust_ptp,
.get_link_capabilities = aq_fw2x_get_link_capabilities,
.send_macsec_req = aq_fw2x_send_macsec_req,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
index 85628acbcc1d..dd259c8f2f4f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c
@@ -519,6 +519,18 @@ int hw_atl2_utils_get_action_resolve_table_caps(struct aq_hw_s *self,
return 0;
}
+static int aq_a2_fw_set_downshift(struct aq_hw_s *self, u32 counter)
+{
+ struct link_options_s link_options;
+
+ hw_atl2_shared_buffer_get(self, link_options, link_options);
+ link_options.downshift = !!counter;
+ link_options.downshift_retry = counter;
+ hw_atl2_shared_buffer_write(self, link_options, link_options);
+
+ return hw_atl2_shared_buffer_finish_ack(self);
+}
+
const struct aq_fw_ops aq_a2_fw_ops = {
.init = aq_a2_fw_init,
.deinit = aq_a2_fw_deinit,
@@ -536,4 +548,5 @@ const struct aq_fw_ops aq_a2_fw_ops = {
.set_flow_control = aq_a2_fw_set_flow_control,
.get_flow_control = aq_a2_fw_get_flow_control,
.set_phyloopback = aq_a2_fw_set_phyloopback,
+ .set_downshift = aq_a2_fw_set_downshift,
};
diff --git a/drivers/net/ethernet/arc/emac_arc.c b/drivers/net/ethernet/arc/emac_arc.c
index 1c7736b7eaf7..800620b8f10d 100644
--- a/drivers/net/ethernet/arc/emac_arc.c
+++ b/drivers/net/ethernet/arc/emac_arc.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/**
- * emac_arc.c - ARC EMAC specific glue layer
+ * DOC: emac_arc.c - ARC EMAC specific glue layer
*
* Copyright (C) 2014 Romain Perier
*
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 38cce66ef212..dd5c8a9038bb 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -235,6 +235,59 @@
| NETIF_MSG_RX_ERR \
| NETIF_MSG_TX_ERR)
+struct ag71xx_statistic {
+ unsigned short offset;
+ u32 mask;
+ const char name[ETH_GSTRING_LEN];
+};
+
+static const struct ag71xx_statistic ag71xx_statistics[] = {
+ { 0x0080, GENMASK(17, 0), "Tx/Rx 64 Byte", },
+ { 0x0084, GENMASK(17, 0), "Tx/Rx 65-127 Byte", },
+ { 0x0088, GENMASK(17, 0), "Tx/Rx 128-255 Byte", },
+ { 0x008C, GENMASK(17, 0), "Tx/Rx 256-511 Byte", },
+ { 0x0090, GENMASK(17, 0), "Tx/Rx 512-1023 Byte", },
+ { 0x0094, GENMASK(17, 0), "Tx/Rx 1024-1518 Byte", },
+ { 0x0098, GENMASK(17, 0), "Tx/Rx 1519-1522 Byte VLAN", },
+ { 0x009C, GENMASK(23, 0), "Rx Byte", },
+ { 0x00A0, GENMASK(17, 0), "Rx Packet", },
+ { 0x00A4, GENMASK(11, 0), "Rx FCS Error", },
+ { 0x00A8, GENMASK(17, 0), "Rx Multicast Packet", },
+ { 0x00AC, GENMASK(21, 0), "Rx Broadcast Packet", },
+ { 0x00B0, GENMASK(17, 0), "Rx Control Frame Packet", },
+ { 0x00B4, GENMASK(11, 0), "Rx Pause Frame Packet", },
+ { 0x00B8, GENMASK(11, 0), "Rx Unknown OPCode Packet", },
+ { 0x00BC, GENMASK(11, 0), "Rx Alignment Error", },
+ { 0x00C0, GENMASK(15, 0), "Rx Frame Length Error", },
+ { 0x00C4, GENMASK(11, 0), "Rx Code Error", },
+ { 0x00C8, GENMASK(11, 0), "Rx Carrier Sense Error", },
+ { 0x00CC, GENMASK(11, 0), "Rx Undersize Packet", },
+ { 0x00D0, GENMASK(11, 0), "Rx Oversize Packet", },
+ { 0x00D4, GENMASK(11, 0), "Rx Fragments", },
+ { 0x00D8, GENMASK(11, 0), "Rx Jabber", },
+ { 0x00DC, GENMASK(11, 0), "Rx Dropped Packet", },
+ { 0x00E0, GENMASK(23, 0), "Tx Byte", },
+ { 0x00E4, GENMASK(17, 0), "Tx Packet", },
+ { 0x00E8, GENMASK(17, 0), "Tx Multicast Packet", },
+ { 0x00EC, GENMASK(17, 0), "Tx Broadcast Packet", },
+ { 0x00F0, GENMASK(11, 0), "Tx Pause Control Frame", },
+ { 0x00F4, GENMASK(11, 0), "Tx Deferral Packet", },
+ { 0x00F8, GENMASK(11, 0), "Tx Excessive Deferral Packet", },
+ { 0x00FC, GENMASK(11, 0), "Tx Single Collision Packet", },
+ { 0x0100, GENMASK(11, 0), "Tx Multiple Collision", },
+ { 0x0104, GENMASK(11, 0), "Tx Late Collision Packet", },
+ { 0x0108, GENMASK(11, 0), "Tx Excessive Collision Packet", },
+ { 0x010C, GENMASK(12, 0), "Tx Total Collision", },
+ { 0x0110, GENMASK(11, 0), "Tx Pause Frames Honored", },
+ { 0x0114, GENMASK(11, 0), "Tx Drop Frame", },
+ { 0x0118, GENMASK(11, 0), "Tx Jabber Frame", },
+ { 0x011C, GENMASK(11, 0), "Tx FCS Error", },
+ { 0x0120, GENMASK(11, 0), "Tx Control Frame", },
+ { 0x0124, GENMASK(11, 0), "Tx Oversize Frame", },
+ { 0x0128, GENMASK(11, 0), "Tx Undersize Frame", },
+ { 0x012C, GENMASK(11, 0), "Tx Fragment", },
+};
+
#define DESC_EMPTY BIT(31)
#define DESC_MORE BIT(24)
#define DESC_PKTLEN_M 0xfff
@@ -394,6 +447,99 @@ static void ag71xx_int_disable(struct ag71xx *ag, u32 ints)
ag71xx_cb(ag, AG71XX_REG_INT_ENABLE, ints);
}
+static void ag71xx_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ struct ag71xx *ag = netdev_priv(ndev);
+
+ strlcpy(info->driver, "ag71xx", sizeof(info->driver));
+ strlcpy(info->bus_info, of_node_full_name(ag->pdev->dev.of_node),
+ sizeof(info->bus_info));
+}
+
+static int ag71xx_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *kset)
+{
+ struct ag71xx *ag = netdev_priv(ndev);
+
+ return phylink_ethtool_ksettings_get(ag->phylink, kset);
+}
+
+static int ag71xx_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *kset)
+{
+ struct ag71xx *ag = netdev_priv(ndev);
+
+ return phylink_ethtool_ksettings_set(ag->phylink, kset);
+}
+
+static int ag71xx_ethtool_nway_reset(struct net_device *ndev)
+{
+ struct ag71xx *ag = netdev_priv(ndev);
+
+ return phylink_ethtool_nway_reset(ag->phylink);
+}
+
+static void ag71xx_ethtool_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ag71xx *ag = netdev_priv(ndev);
+
+ phylink_ethtool_get_pauseparam(ag->phylink, pause);
+}
+
+static int ag71xx_ethtool_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct ag71xx *ag = netdev_priv(ndev);
+
+ return phylink_ethtool_set_pauseparam(ag->phylink, pause);
+}
+
+static void ag71xx_ethtool_get_strings(struct net_device *netdev, u32 sset,
+ u8 *data)
+{
+ if (sset == ETH_SS_STATS) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ag71xx_statistics); i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ ag71xx_statistics[i].name, ETH_GSTRING_LEN);
+ }
+}
+
+static void ag71xx_ethtool_get_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct ag71xx *ag = netdev_priv(ndev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ag71xx_statistics); i++)
+ *data++ = ag71xx_rr(ag, ag71xx_statistics[i].offset)
+ & ag71xx_statistics[i].mask;
+}
+
+static int ag71xx_ethtool_get_sset_count(struct net_device *ndev, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return ARRAY_SIZE(ag71xx_statistics);
+ return -EOPNOTSUPP;
+}
+
+static const struct ethtool_ops ag71xx_ethtool_ops = {
+ .get_drvinfo = ag71xx_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = ag71xx_get_link_ksettings,
+ .set_link_ksettings = ag71xx_set_link_ksettings,
+ .nway_reset = ag71xx_ethtool_nway_reset,
+ .get_pauseparam = ag71xx_ethtool_get_pauseparam,
+ .set_pauseparam = ag71xx_ethtool_set_pauseparam,
+ .get_strings = ag71xx_ethtool_get_strings,
+ .get_ethtool_stats = ag71xx_ethtool_get_stats,
+ .get_sset_count = ag71xx_ethtool_get_sset_count,
+};
+
static int ag71xx_mdio_wait_busy(struct ag71xx *ag)
{
struct net_device *ndev = ag->ndev;
@@ -910,6 +1056,8 @@ static void ag71xx_mac_validate(struct phylink_config *config,
phylink_set(mask, MII);
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
phylink_set(mask, Autoneg);
phylink_set(mask, 10baseT_Half);
phylink_set(mask, 10baseT_Full);
@@ -960,7 +1108,7 @@ static void ag71xx_mac_link_up(struct phylink_config *config,
bool tx_pause, bool rx_pause)
{
struct ag71xx *ag = netdev_priv(to_net_dev(config->dev));
- u32 cfg2;
+ u32 cfg1, cfg2;
u32 ifctl;
u32 fifo5;
@@ -994,6 +1142,15 @@ static void ag71xx_mac_link_up(struct phylink_config *config,
ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5);
ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl);
+ cfg1 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG1);
+ cfg1 &= ~(MAC_CFG1_TFC | MAC_CFG1_RFC);
+ if (tx_pause)
+ cfg1 |= MAC_CFG1_TFC;
+
+ if (rx_pause)
+ cfg1 |= MAC_CFG1_RFC;
+ ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, cfg1);
+
ag71xx_hw_start(ag);
}
@@ -1769,6 +1926,7 @@ static int ag71xx_probe(struct platform_device *pdev)
}
ndev->netdev_ops = &ag71xx_netdev_ops;
+ ndev->ethtool_ops = &ag71xx_ethtool_ops;
INIT_DELAYED_WORK(&ag->restart_work, ag71xx_restart_work_func);
timer_setup(&ag->oom_timer, ag71xx_oom_timer_handler, 0);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index decab9a8e4a8..0c12cf7bda50 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -204,7 +204,7 @@ static u32 atl1c_wait_until_idle(struct atl1c_hw *hw, u32 modu_ctrl)
/**
* atl1c_phy_config - Timer Call-back
- * @data: pointer to netdev cast into an unsigned long
+ * @t: timer list containing pointer to netdev cast into an unsigned long
*/
static void atl1c_phy_config(struct timer_list *t)
{
@@ -220,7 +220,6 @@ static void atl1c_phy_config(struct timer_list *t)
void atl1c_reinit_locked(struct atl1c_adapter *adapter)
{
- WARN_ON(in_interrupt());
atl1c_down(adapter);
atl1c_up(adapter);
clear_bit(__AT_RESETTING, &adapter->flags);
@@ -346,6 +345,7 @@ static void atl1c_del_timer(struct atl1c_adapter *adapter)
/**
* atl1c_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: index of hanging tx queue
*/
static void atl1c_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
@@ -826,16 +826,16 @@ static inline void atl1c_clean_buffer(struct pci_dev *pdev,
return;
if (buffer_info->dma) {
if (buffer_info->flags & ATL1C_PCIMAP_FROMDEVICE)
- pci_driection = PCI_DMA_FROMDEVICE;
+ pci_driection = DMA_FROM_DEVICE;
else
- pci_driection = PCI_DMA_TODEVICE;
+ pci_driection = DMA_TO_DEVICE;
if (buffer_info->flags & ATL1C_PCIMAP_SINGLE)
- pci_unmap_single(pdev, buffer_info->dma,
- buffer_info->length, pci_driection);
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ buffer_info->length, pci_driection);
else if (buffer_info->flags & ATL1C_PCIMAP_PAGE)
- pci_unmap_page(pdev, buffer_info->dma,
- buffer_info->length, pci_driection);
+ dma_unmap_page(&pdev->dev, buffer_info->dma,
+ buffer_info->length, pci_driection);
}
if (buffer_info->skb)
dev_consume_skb_any(buffer_info->skb);
@@ -846,6 +846,7 @@ static inline void atl1c_clean_buffer(struct pci_dev *pdev,
/**
* atl1c_clean_tx_ring - Free Tx-skb
* @adapter: board private structure
+ * @type: type of transmit queue
*/
static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
enum atl1c_trans_queue type)
@@ -933,9 +934,8 @@ static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
- pci_free_consistent(pdev, adapter->ring_header.size,
- adapter->ring_header.desc,
- adapter->ring_header.dma);
+ dma_free_coherent(&pdev->dev, adapter->ring_header.size,
+ adapter->ring_header.desc, adapter->ring_header.dma);
adapter->ring_header.desc = NULL;
/* Note: just free tdp_ring.buffer_info,
@@ -1717,10 +1717,9 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter)
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
buffer_info->skb = skb;
buffer_info->length = adapter->rx_buffer_len;
- mapping = pci_map_single(pdev, vir_addr,
- buffer_info->length,
- PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(pdev, mapping))) {
+ mapping = dma_map_single(&pdev->dev, vir_addr,
+ buffer_info->length, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
dev_kfree_skb(skb);
buffer_info->skb = NULL;
buffer_info->length = 0;
@@ -1831,8 +1830,8 @@ rrs_checked:
rfd_index = (rrs->word0 >> RRS_RX_RFD_INDEX_SHIFT) &
RRS_RX_RFD_INDEX_MASK;
buffer_info = &rfd_ring->buffer_info[rfd_index];
- pci_unmap_single(pdev, buffer_info->dma,
- buffer_info->length, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_FROM_DEVICE);
skb = buffer_info->skb;
} else {
/* TODO */
@@ -1863,6 +1862,8 @@ rrs_checked:
/**
* atl1c_clean - NAPI Rx polling callback
+ * @napi: napi info
+ * @budget: limit of packets to clean
*/
static int atl1c_clean(struct napi_struct *napi, int budget)
{
@@ -2106,10 +2107,10 @@ static int atl1c_tx_map(struct atl1c_adapter *adapter,
buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
buffer_info->length = map_len;
- buffer_info->dma = pci_map_single(adapter->pdev,
- skb->data, hdr_len, PCI_DMA_TODEVICE);
- if (unlikely(pci_dma_mapping_error(adapter->pdev,
- buffer_info->dma)))
+ buffer_info->dma = dma_map_single(&adapter->pdev->dev,
+ skb->data, hdr_len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)))
goto err_dma;
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE,
@@ -2131,10 +2132,10 @@ static int atl1c_tx_map(struct atl1c_adapter *adapter,
buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
buffer_info->length = buf_len - mapped_len;
buffer_info->dma =
- pci_map_single(adapter->pdev, skb->data + mapped_len,
- buffer_info->length, PCI_DMA_TODEVICE);
- if (unlikely(pci_dma_mapping_error(adapter->pdev,
- buffer_info->dma)))
+ dma_map_single(&adapter->pdev->dev,
+ skb->data + mapped_len,
+ buffer_info->length, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)))
goto err_dma;
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
@@ -2542,8 +2543,8 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* various kernel subsystems to support the mechanics required by a
* fixed-high-32-bit system.
*/
- if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
- (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
+ if ((dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) ||
+ (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0)) {
dev_err(&pdev->dev, "No usable DMA configuration,aborting\n");
goto err_dma;
}
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 223ef846123e..098b0328e3cb 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -111,7 +111,7 @@ static inline void atl1e_irq_reset(struct atl1e_adapter *adapter)
/**
* atl1e_phy_config - Timer Call-back
- * @data: pointer to netdev cast into an unsigned long
+ * @t: timer list containing pointer to netdev cast into an unsigned long
*/
static void atl1e_phy_config(struct timer_list *t)
{
@@ -127,8 +127,6 @@ static void atl1e_phy_config(struct timer_list *t)
void atl1e_reinit_locked(struct atl1e_adapter *adapter)
{
-
- WARN_ON(in_interrupt());
while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
msleep(1);
atl1e_down(adapter);
@@ -196,7 +194,7 @@ static int atl1e_check_link(struct atl1e_adapter *adapter)
/**
* atl1e_link_chg_task - deal with link change event Out of interrupt context
- * @netdev: network interface device structure
+ * @work: work struct with driver info
*/
static void atl1e_link_chg_task(struct work_struct *work)
{
@@ -246,6 +244,7 @@ static void atl1e_cancel_work(struct atl1e_adapter *adapter)
/**
* atl1e_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: the index of the hanging queue
*/
static void atl1e_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
@@ -654,11 +653,13 @@ static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter)
tx_buffer = &tx_ring->tx_buffer[index];
if (tx_buffer->dma) {
if (tx_buffer->flags & ATL1E_TX_PCIMAP_SINGLE)
- pci_unmap_single(pdev, tx_buffer->dma,
- tx_buffer->length, PCI_DMA_TODEVICE);
+ dma_unmap_single(&pdev->dev, tx_buffer->dma,
+ tx_buffer->length,
+ DMA_TO_DEVICE);
else if (tx_buffer->flags & ATL1E_TX_PCIMAP_PAGE)
- pci_unmap_page(pdev, tx_buffer->dma,
- tx_buffer->length, PCI_DMA_TODEVICE);
+ dma_unmap_page(&pdev->dev, tx_buffer->dma,
+ tx_buffer->length,
+ DMA_TO_DEVICE);
tx_buffer->dma = 0;
}
}
@@ -774,8 +775,8 @@ static void atl1e_free_ring_resources(struct atl1e_adapter *adapter)
atl1e_clean_rx_ring(adapter);
if (adapter->ring_vir_addr) {
- pci_free_consistent(pdev, adapter->ring_size,
- adapter->ring_vir_addr, adapter->ring_dma);
+ dma_free_coherent(&pdev->dev, adapter->ring_size,
+ adapter->ring_vir_addr, adapter->ring_dma);
adapter->ring_vir_addr = NULL;
}
@@ -810,11 +811,12 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
/* real ring DMA buffer */
size = adapter->ring_size;
- adapter->ring_vir_addr = pci_zalloc_consistent(pdev, adapter->ring_size,
- &adapter->ring_dma);
+ adapter->ring_vir_addr = dma_alloc_coherent(&pdev->dev,
+ adapter->ring_size,
+ &adapter->ring_dma, GFP_KERNEL);
if (adapter->ring_vir_addr == NULL) {
netdev_err(adapter->netdev,
- "pci_alloc_consistent failed, size = D%d\n", size);
+ "dma_alloc_coherent failed, size = D%d\n", size);
return -ENOMEM;
}
@@ -870,8 +872,8 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
return 0;
failed:
if (adapter->ring_vir_addr != NULL) {
- pci_free_consistent(pdev, adapter->ring_size,
- adapter->ring_vir_addr, adapter->ring_dma);
+ dma_free_coherent(&pdev->dev, adapter->ring_size,
+ adapter->ring_vir_addr, adapter->ring_dma);
adapter->ring_vir_addr = NULL;
}
return err;
@@ -1233,11 +1235,15 @@ static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter)
tx_buffer = &tx_ring->tx_buffer[next_to_clean];
if (tx_buffer->dma) {
if (tx_buffer->flags & ATL1E_TX_PCIMAP_SINGLE)
- pci_unmap_single(adapter->pdev, tx_buffer->dma,
- tx_buffer->length, PCI_DMA_TODEVICE);
+ dma_unmap_single(&adapter->pdev->dev,
+ tx_buffer->dma,
+ tx_buffer->length,
+ DMA_TO_DEVICE);
else if (tx_buffer->flags & ATL1E_TX_PCIMAP_PAGE)
- pci_unmap_page(adapter->pdev, tx_buffer->dma,
- tx_buffer->length, PCI_DMA_TODEVICE);
+ dma_unmap_page(&adapter->pdev->dev,
+ tx_buffer->dma,
+ tx_buffer->length,
+ DMA_TO_DEVICE);
tx_buffer->dma = 0;
}
@@ -1495,6 +1501,8 @@ fatal_err:
/**
* atl1e_clean - NAPI Rx polling callback
+ * @napi: napi info
+ * @budget: number of packets to clean
*/
static int atl1e_clean(struct napi_struct *napi, int budget)
{
@@ -1710,8 +1718,9 @@ static int atl1e_tx_map(struct atl1e_adapter *adapter,
tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd);
tx_buffer->length = map_len;
- tx_buffer->dma = pci_map_single(adapter->pdev,
- skb->data, hdr_len, PCI_DMA_TODEVICE);
+ tx_buffer->dma = dma_map_single(&adapter->pdev->dev,
+ skb->data, hdr_len,
+ DMA_TO_DEVICE);
if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma))
return -ENOSPC;
@@ -1739,8 +1748,9 @@ static int atl1e_tx_map(struct atl1e_adapter *adapter,
((buf_len - mapped_len) >= MAX_TX_BUF_LEN) ?
MAX_TX_BUF_LEN : (buf_len - mapped_len);
tx_buffer->dma =
- pci_map_single(adapter->pdev, skb->data + mapped_len,
- map_len, PCI_DMA_TODEVICE);
+ dma_map_single(&adapter->pdev->dev,
+ skb->data + mapped_len, map_len,
+ DMA_TO_DEVICE);
if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) {
/* We need to unwind the mappings we've done */
@@ -1749,8 +1759,10 @@ static int atl1e_tx_map(struct atl1e_adapter *adapter,
while (adapter->tx_ring.next_to_use != ring_end) {
tpd = atl1e_get_tpd(adapter);
tx_buffer = atl1e_get_tx_buffer(adapter, tpd);
- pci_unmap_single(adapter->pdev, tx_buffer->dma,
- tx_buffer->length, PCI_DMA_TODEVICE);
+ dma_unmap_single(&adapter->pdev->dev,
+ tx_buffer->dma,
+ tx_buffer->length,
+ DMA_TO_DEVICE);
}
/* Reset the tx rings next pointer */
adapter->tx_ring.next_to_use = ring_start;
@@ -2300,8 +2312,8 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* various kernel subsystems to support the mechanics required by a
* fixed-high-32-bit system.
*/
- if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
- (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
+ if ((dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) ||
+ (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0)) {
dev_err(&pdev->dev, "No usable DMA configuration,aborting\n");
goto err_dma;
}
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index b35fcfcd692d..eaf96d002fa5 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -1050,11 +1050,11 @@ static s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
+ sizeof(struct stats_msg_block)
+ 40;
- ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
- &ring_header->dma);
+ ring_header->desc = dma_alloc_coherent(&pdev->dev, ring_header->size,
+ &ring_header->dma, GFP_KERNEL);
if (unlikely(!ring_header->desc)) {
if (netif_msg_drv(adapter))
- dev_err(&pdev->dev, "pci_alloc_consistent failed\n");
+ dev_err(&pdev->dev, "dma_alloc_coherent failed\n");
goto err_nomem;
}
@@ -1136,8 +1136,8 @@ static void atl1_clean_rx_ring(struct atl1_adapter *adapter)
for (i = 0; i < rfd_ring->count; i++) {
buffer_info = &rfd_ring->buffer_info[i];
if (buffer_info->dma) {
- pci_unmap_page(pdev, buffer_info->dma,
- buffer_info->length, PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_FROM_DEVICE);
buffer_info->dma = 0;
}
if (buffer_info->skb) {
@@ -1175,8 +1175,8 @@ static void atl1_clean_tx_ring(struct atl1_adapter *adapter)
for (i = 0; i < tpd_ring->count; i++) {
buffer_info = &tpd_ring->buffer_info[i];
if (buffer_info->dma) {
- pci_unmap_page(pdev, buffer_info->dma,
- buffer_info->length, PCI_DMA_TODEVICE);
+ dma_unmap_page(&pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
buffer_info->dma = 0;
}
}
@@ -1217,8 +1217,8 @@ static void atl1_free_ring_resources(struct atl1_adapter *adapter)
atl1_clean_rx_ring(adapter);
kfree(tpd_ring->buffer_info);
- pci_free_consistent(pdev, ring_header->size, ring_header->desc,
- ring_header->dma);
+ dma_free_coherent(&pdev->dev, ring_header->size, ring_header->desc,
+ ring_header->dma);
tpd_ring->buffer_info = NULL;
tpd_ring->desc = NULL;
@@ -1866,9 +1866,9 @@ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
buffer_info->length = (u16) adapter->rx_buffer_len;
page = virt_to_page(skb->data);
offset = offset_in_page(skb->data);
- buffer_info->dma = pci_map_page(pdev, page, offset,
+ buffer_info->dma = dma_map_page(&pdev->dev, page, offset,
adapter->rx_buffer_len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len);
rfd_desc->coalese = 0;
@@ -1992,8 +1992,8 @@ rrd_ok:
}
/* Good Receive */
- pci_unmap_page(adapter->pdev, buffer_info->dma,
- buffer_info->length, PCI_DMA_FROMDEVICE);
+ dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_FROM_DEVICE);
buffer_info->dma = 0;
skb = buffer_info->skb;
length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size);
@@ -2062,8 +2062,8 @@ static int atl1_intr_tx(struct atl1_adapter *adapter)
while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) {
buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean];
if (buffer_info->dma) {
- pci_unmap_page(adapter->pdev, buffer_info->dma,
- buffer_info->length, PCI_DMA_TODEVICE);
+ dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
+ buffer_info->length, DMA_TO_DEVICE);
buffer_info->dma = 0;
}
@@ -2210,9 +2210,9 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
buffer_info->length = hdr_len;
page = virt_to_page(skb->data);
offset = offset_in_page(skb->data);
- buffer_info->dma = pci_map_page(adapter->pdev, page,
+ buffer_info->dma = dma_map_page(&adapter->pdev->dev, page,
offset, hdr_len,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
if (++next_to_use == tpd_ring->count)
next_to_use = 0;
@@ -2235,9 +2235,10 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
(hdr_len + i * ATL1_MAX_TX_BUF_LEN));
offset = offset_in_page(skb->data +
(hdr_len + i * ATL1_MAX_TX_BUF_LEN));
- buffer_info->dma = pci_map_page(adapter->pdev,
- page, offset, buffer_info->length,
- PCI_DMA_TODEVICE);
+ buffer_info->dma = dma_map_page(&adapter->pdev->dev,
+ page, offset,
+ buffer_info->length,
+ DMA_TO_DEVICE);
if (++next_to_use == tpd_ring->count)
next_to_use = 0;
}
@@ -2247,8 +2248,9 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
buffer_info->length = buf_len;
page = virt_to_page(skb->data);
offset = offset_in_page(skb->data);
- buffer_info->dma = pci_map_page(adapter->pdev, page,
- offset, buf_len, PCI_DMA_TODEVICE);
+ buffer_info->dma = dma_map_page(&adapter->pdev->dev, page,
+ offset, buf_len,
+ DMA_TO_DEVICE);
if (++next_to_use == tpd_ring->count)
next_to_use = 0;
}
@@ -2550,7 +2552,7 @@ static irqreturn_t atl1_intr(int irq, void *data)
/**
* atl1_phy_config - Timer Call-back
- * @data: pointer to netdev cast into an unsigned long
+ * @t: timer_list containing pointer to netdev cast into an unsigned long
*/
static void atl1_phy_config(struct timer_list *t)
{
@@ -2922,7 +2924,7 @@ static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* various kernel subsystems to support the mechanics required by a
* fixed-high-32-bit system.
*/
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "no usable DMA configuration\n");
goto err_dma;
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index c915852b8892..7b80d924632a 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -281,8 +281,8 @@ static s32 atl2_setup_ring_resources(struct atl2_adapter *adapter)
adapter->txs_ring_size * 4 + 7 + /* dword align */
adapter->rxd_ring_size * 1536 + 127; /* 128bytes align */
- adapter->ring_vir_addr = pci_alloc_consistent(pdev, size,
- &adapter->ring_dma);
+ adapter->ring_vir_addr = dma_alloc_coherent(&pdev->dev, size,
+ &adapter->ring_dma, GFP_KERNEL);
if (!adapter->ring_vir_addr)
return -ENOMEM;
@@ -663,8 +663,8 @@ static int atl2_request_irq(struct atl2_adapter *adapter)
static void atl2_free_ring_resources(struct atl2_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
- pci_free_consistent(pdev, adapter->ring_size, adapter->ring_vir_addr,
- adapter->ring_dma);
+ dma_free_coherent(&pdev->dev, adapter->ring_size,
+ adapter->ring_vir_addr, adapter->ring_dma);
}
/**
@@ -994,6 +994,7 @@ static int atl2_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
/**
* atl2_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: index of the hanging transmit queue
*/
static void atl2_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
@@ -1005,7 +1006,7 @@ static void atl2_tx_timeout(struct net_device *netdev, unsigned int txqueue)
/**
* atl2_watchdog - Timer Call-back
- * @data: pointer to netdev cast into an unsigned long
+ * @t: timer list containing a pointer to netdev cast into an unsigned long
*/
static void atl2_watchdog(struct timer_list *t)
{
@@ -1030,7 +1031,7 @@ static void atl2_watchdog(struct timer_list *t)
/**
* atl2_phy_config - Timer Call-back
- * @data: pointer to netdev cast into an unsigned long
+ * @t: timer list containing a pointer to netdev cast into an unsigned long
*/
static void atl2_phy_config(struct timer_list *t)
{
@@ -1085,7 +1086,6 @@ err_up:
static void atl2_reinit_locked(struct atl2_adapter *adapter)
{
- WARN_ON(in_interrupt());
while (test_and_set_bit(__ATL2_RESETTING, &adapter->flags))
msleep(1);
atl2_down(adapter);
@@ -1235,6 +1235,7 @@ static int atl2_check_link(struct atl2_adapter *adapter)
/**
* atl2_link_chg_task - deal with link change event Out of interrupt context
+ * @work: pointer to work struct with private info
*/
static void atl2_link_chg_task(struct work_struct *work)
{
@@ -1328,8 +1329,8 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* until the kernel has the proper infrastructure to support 64-bit DMA
* on these devices.
*/
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) &&
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) {
printk(KERN_ERR "atl2: No usable DMA configuration, aborting\n");
err = -EIO;
goto err_dma;
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 6fb620e25208..74c1778d841e 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2210,12 +2210,12 @@ static void b44_adjust_link(struct net_device *dev)
{
struct b44 *bp = netdev_priv(dev);
struct phy_device *phydev = dev->phydev;
- bool status_changed = 0;
+ bool status_changed = false;
BUG_ON(!phydev);
if (bp->old_link != phydev->link) {
- status_changed = 1;
+ status_changed = true;
bp->old_link = phydev->link;
}
@@ -2223,11 +2223,11 @@ static void b44_adjust_link(struct net_device *dev)
if (phydev->link) {
if ((phydev->duplex == DUPLEX_HALF) &&
(bp->flags & B44_FLAG_FULL_DUPLEX)) {
- status_changed = 1;
+ status_changed = true;
bp->flags &= ~B44_FLAG_FULL_DUPLEX;
} else if ((phydev->duplex == DUPLEX_FULL) &&
!(bp->flags & B44_FLAG_FULL_DUPLEX)) {
- status_changed = 1;
+ status_changed = true;
bp->flags |= B44_FLAG_FULL_DUPLEX;
}
}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 0762d5d1a810..0fdd19d99d99 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -20,6 +20,7 @@
#include <linux/phy.h>
#include <linux/phy_fixed.h>
#include <net/dsa.h>
+#include <linux/clk.h>
#include <net/ip.h>
#include <net/ipv6.h>
@@ -186,6 +187,11 @@ static int bcm_sysport_set_features(struct net_device *dev,
netdev_features_t features)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
/* Read CRC forward */
if (!priv->is_lite)
@@ -197,6 +203,8 @@ static int bcm_sysport_set_features(struct net_device *dev,
bcm_sysport_set_rx_csum(dev, features);
bcm_sysport_set_tx_csum(dev, features);
+ clk_disable_unprepare(priv->clk);
+
return 0;
}
@@ -1940,6 +1948,8 @@ static int bcm_sysport_open(struct net_device *dev)
unsigned int i;
int ret;
+ clk_prepare_enable(priv->clk);
+
/* Reset UniMAC */
umac_reset(priv);
@@ -1970,7 +1980,8 @@ static int bcm_sysport_open(struct net_device *dev)
0, priv->phy_interface);
if (!phydev) {
netdev_err(dev, "could not attach to PHY\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto out_clk_disable;
}
/* Reset house keeping link status */
@@ -2048,6 +2059,8 @@ out_free_irq0:
free_irq(priv->irq0, dev);
out_phy_disconnect:
phy_disconnect(phydev);
+out_clk_disable:
+ clk_disable_unprepare(priv->clk);
return ret;
}
@@ -2106,6 +2119,8 @@ static int bcm_sysport_stop(struct net_device *dev)
/* Disconnect from PHY */
phy_disconnect(dev->phydev);
+ clk_disable_unprepare(priv->clk);
+
return 0;
}
@@ -2487,6 +2502,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
/* Initialize private members */
priv = netdev_priv(dev);
+ priv->clk = devm_clk_get_optional(&pdev->dev, "sw_sysport");
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
/* Allocate number of TX rings */
priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
sizeof(struct bcm_sysport_tx_ring),
@@ -2566,6 +2585,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
if (!ret)
device_set_wakeup_capable(&pdev->dev, 1);
+ priv->wol_clk = devm_clk_get_optional(&pdev->dev, "sw_sysportwol");
+ if (IS_ERR(priv->wol_clk))
+ return PTR_ERR(priv->wol_clk);
+
/* Set the needed headroom once and for all */
BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
dev->needed_headroom += sizeof(struct bcm_tsb);
@@ -2590,6 +2613,8 @@ static int bcm_sysport_probe(struct platform_device *pdev)
goto err_deregister_notifier;
}
+ clk_prepare_enable(priv->clk);
+
priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
dev_info(&pdev->dev,
"Broadcom SYSTEMPORT%s " REV_FMT
@@ -2598,6 +2623,8 @@ static int bcm_sysport_probe(struct platform_device *pdev)
(priv->rev >> 8) & 0xff, priv->rev & 0xff,
priv->irq0, priv->irq1, txq, rxq);
+ clk_disable_unprepare(priv->clk);
+
return 0;
err_deregister_notifier:
@@ -2751,8 +2778,12 @@ static int __maybe_unused bcm_sysport_suspend(struct device *d)
bcm_sysport_fini_rx_ring(priv);
/* Get prepared for Wake-on-LAN */
- if (device_may_wakeup(d) && priv->wolopts)
+ if (device_may_wakeup(d) && priv->wolopts) {
+ clk_prepare_enable(priv->wol_clk);
ret = bcm_sysport_suspend_to_wol(priv);
+ }
+
+ clk_disable_unprepare(priv->clk);
return ret;
}
@@ -2767,6 +2798,10 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
if (!netif_running(dev))
return 0;
+ clk_prepare_enable(priv->clk);
+ if (priv->wolopts)
+ clk_disable_unprepare(priv->wol_clk);
+
umac_reset(priv);
/* Disable the UniMAC RX/TX */
@@ -2846,6 +2881,7 @@ out_free_rx_ring:
out_free_tx_rings:
for (i = 0; i < dev->num_tx_queues; i++)
bcm_sysport_fini_tx_ring(priv, i);
+ clk_disable_unprepare(priv->clk);
return ret;
}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 6d80735fbc7f..3a5cb6f128f5 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -770,6 +770,8 @@ struct bcm_sysport_priv {
u32 wolopts;
u8 sopass[SOPASS_MAX];
unsigned int wol_irq_disabled:1;
+ struct clk *clk;
+ struct clk *wol_clk;
/* MIB related fields */
struct bcm_sysport_mib mib;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index e3d92e4f2193..1a6ec1a12d53 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -504,6 +504,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
* @len_on_bd: total length of the first packet for the
* aggregation.
* @pkt_len: length of all segments
+ * @num_of_coalesced_segs: count of segments
*
* Approximate value of the MSS for this aggregation calculated using
* the first packet of it.
@@ -1958,6 +1959,7 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
* bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
*
* @bp: Driver handle
+ * @include_cnic: handle cnic case
*
* We currently support for at most 16 Tx queues for each CoS thus we will
* allocate a multiple of 16 for ETH L2 rings according to the value of the
@@ -4229,8 +4231,8 @@ void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
/**
* bnx2x_setup_tc - routine to configure net_device for multi tc
*
- * @netdev: net device to configure
- * @tc: number of traffic classes to enable
+ * @dev: net device to configure
+ * @num_tc: number of traffic classes to enable
*
* callback connected to the ndo_setup_tc function pointer
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 7e4c93be4451..d8b1824c334d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -825,9 +825,9 @@ static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp)
int i;
for_each_rx_queue_cnic(bp, i) {
- napi_hash_del(&bnx2x_fp(bp, i, napi));
- netif_napi_del(&bnx2x_fp(bp, i, napi));
+ __netif_napi_del(&bnx2x_fp(bp, i, napi));
}
+ synchronize_net();
}
static inline void bnx2x_del_all_napi(struct bnx2x *bp)
@@ -835,9 +835,9 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp)
int i;
for_each_eth_queue(bp, i) {
- napi_hash_del(&bnx2x_fp(bp, i, napi));
- netif_napi_del(&bnx2x_fp(bp, i, napi));
+ __netif_napi_del(&bnx2x_fp(bp, i, napi));
}
+ synchronize_net();
}
int bnx2x_set_int_mode(struct bnx2x *bp);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 7cea33803f7f..32245bbe88a8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -839,8 +839,9 @@ static bool bnx2x_is_wreg_in_chip(struct bnx2x *bp,
/**
* bnx2x_read_pages_regs - read "paged" registers
*
- * @bp device handle
- * @p output buffer
+ * @bp: device handle
+ * @p: output buffer
+ * @preset: the preset value
*
* Reads "paged" memories: memories that may only be read by first writing to a
* specific address ("write address") and then reading from a specific address
@@ -3561,6 +3562,7 @@ static void bnx2x_get_channels(struct net_device *dev,
* bnx2x_change_num_queues - change the number of RSS queues.
*
* @bp: bnx2x private structure
+ * @num_rss: rss count
*
* Re-configure interrupt mode to get the new number of MSI-X
* vectors and re-add NAPI objects.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 3c543dd7a8f3..28069b290862 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -3086,9 +3086,9 @@ void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
/**
* bnx2x_get_common_flags - Return common flags
*
- * @bp device handle
- * @fp queue handle
- * @zero_stats TRUE if statistics zeroing is needed
+ * @bp: device handle
+ * @fp: queue handle
+ * @zero_stats: TRUE if statistics zeroing is needed
*
* Return the flags that are common for the Tx-only and not normal connections.
*/
@@ -6313,11 +6313,11 @@ static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
case FW_MSG_CODE_DRV_LOAD_COMMON:
case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
bnx2x_init_internal_common(bp);
- /* no break */
+ fallthrough;
case FW_MSG_CODE_DRV_LOAD_PORT:
/* nothing to do */
- /* no break */
+ fallthrough;
case FW_MSG_CODE_DRV_LOAD_FUNCTION:
/* internal memory per function is
@@ -12390,7 +12390,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
}
if (CHIP_IS_E1(bp))
- bp->dropless_fc = 0;
+ bp->dropless_fc = false;
else
bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp);
@@ -13591,8 +13591,8 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
/**
* bnx2x_get_num_none_def_sbs - return the number of none default SBs
- *
- * @dev: pci device
+ * @pdev: pci device
+ * @cnic_cnt: count
*
*/
static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
@@ -14451,9 +14451,7 @@ module_exit(bnx2x_cleanup);
/**
* bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
- *
* @bp: driver handle
- * @set: set or clear the CAM entry
*
* This function will wait until the ramrod completion returns.
* Return 0 if success, -ENODEV if ramrod doesn't return.
@@ -15412,7 +15410,7 @@ static int bnx2x_hwtstamp_ioctl(struct bnx2x *bp, struct ifreq *ifr)
return -EINVAL;
}
- bp->hwtstamp_ioctl_called = 1;
+ bp->hwtstamp_ioctl_called = true;
bp->tx_type = config.tx_type;
bp->rx_filter = config.rx_filter;
@@ -15494,7 +15492,7 @@ void bnx2x_init_ptp(struct bnx2x *bp)
bnx2x_init_cyclecounter(bp);
timecounter_init(&bp->timecounter, &bp->cyclecounter,
ktime_to_ns(ktime_get_real()));
- bp->timecounter_init_done = 1;
+ bp->timecounter_init_done = true;
}
DP(BNX2X_MSG_PTP, "PTP initialization ended successfully\n");
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index e26f4da5a6d7..6cd1523ad9e5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -37,10 +37,12 @@
/**
* bnx2x_exe_queue_init - init the Exe Queue object
*
+ * @bp: driver handle
* @o: pointer to the object
* @exe_len: length
* @owner: pointer to the owner
* @validate: validate function pointer
+ * @remove: remove function pointer
* @optimize: optimize function pointer
* @exec: execute function pointer
* @get: get function pointer
@@ -103,7 +105,7 @@ static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
*
* @bp: driver handle
* @o: queue
- * @cmd: new command to add
+ * @elem: new command to add
* @restore: true - do not optimize the command
*
* If the element is optimized or is illegal, frees it.
@@ -277,7 +279,7 @@ static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
*
* @bp: device handle
* @state: state which is to be cleared
- * @state_p: state buffer
+ * @pstate: state buffer
*
*/
static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
@@ -424,8 +426,8 @@ static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
* @bp: device handle
* @o: vlan_mac object
*
- * @details: Non-blocking implementation; should be called under execution
- * queue lock.
+ * Context: Non-blocking implementation; should be called under execution
+ * queue lock.
*/
static int __bnx2x_vlan_mac_h_write_trylock(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o)
@@ -445,7 +447,7 @@ static int __bnx2x_vlan_mac_h_write_trylock(struct bnx2x *bp,
* @bp: device handle
* @o: vlan_mac object
*
- * @details Should be called under execution queue lock; notice it might release
+ * details Should be called under execution queue lock; notice it might release
* and reclaim it during its run.
*/
static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp,
@@ -475,7 +477,7 @@ static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp,
* @o: vlan_mac object
* @ramrod_flags: ramrod flags of missed execution
*
- * @details Should be called under execution queue lock.
+ * Context: Should be called under execution queue lock.
*/
static void __bnx2x_vlan_mac_h_pend(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o,
@@ -493,7 +495,7 @@ static void __bnx2x_vlan_mac_h_pend(struct bnx2x *bp,
* @bp: device handle
* @o: vlan_mac object
*
- * @details Should be called under execution queue lock. Notice if a pending
+ * Context: Should be called under execution queue lock. Notice if a pending
* execution exists, it would perform it - possibly releasing and
* reclaiming the execution queue lock.
*/
@@ -516,7 +518,7 @@ static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
* @bp: device handle
* @o: vlan_mac object
*
- * @details Should be called under the execution queue lock. May sleep. May
+ * Context: Should be called under the execution queue lock. May sleep. May
* release and reclaim execution queue lock during its run.
*/
static int __bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
@@ -536,7 +538,7 @@ static int __bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
* @bp: device handle
* @o: vlan_mac object
*
- * @details May sleep. Claims and releases execution queue lock during its run.
+ * Context: May sleep. Claims and releases execution queue lock during its run.
*/
int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *o)
@@ -556,7 +558,7 @@ int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
* @bp: device handle
* @o: vlan_mac object
*
- * @details Should be called under execution queue lock. Notice if a pending
+ * Context: Should be called under execution queue lock. Notice if a pending
* execution exists, it would be performed if this was the last
* reader. possibly releasing and reclaiming the execution queue lock.
*/
@@ -591,7 +593,7 @@ static void __bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
* @bp: device handle
* @o: vlan_mac object
*
- * @details Notice if a pending execution exists, it would be performed if this
+ * Context: Notice if a pending execution exists, it would be performed if this
* was the last reader. Claims and releases the execution queue lock
* during its run.
*/
@@ -968,7 +970,7 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
*
* @bp: device handle
* @o: queue
- * @type:
+ * @type: the type of echo
* @cam_offset: offset in cam memory
* @hdr: pointer to a header to setup
*
@@ -1608,8 +1610,8 @@ static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp,
*
* @bp: device handle
* @o: bnx2x_vlan_mac_obj
- * @cqe:
- * @cont: if true schedule next execution chunk
+ * @cqe: completion element
+ * @ramrod_flags: if set schedule next execution chunk
*
*/
static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
@@ -1656,7 +1658,7 @@ static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
* bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
*
* @bp: device handle
- * @o: bnx2x_qable_obj
+ * @qo: bnx2x_qable_obj
* @elem: bnx2x_exeq_elem
*/
static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
@@ -1714,10 +1716,10 @@ static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
* bnx2x_vlan_mac_get_registry_elem - prepare a registry element
*
* @bp: device handle
- * @o:
- * @elem:
- * @restore:
- * @re:
+ * @o: vlan object
+ * @elem: element
+ * @restore: to restore or not
+ * @re: registry
*
* prepare a registry element according to the current command request.
*/
@@ -1768,9 +1770,9 @@ static inline int bnx2x_vlan_mac_get_registry_elem(
* bnx2x_execute_vlan_mac - execute vlan mac command
*
* @bp: device handle
- * @qo:
- * @exe_chunk:
- * @ramrod_flags:
+ * @qo: bnx2x_qable_obj pointer
+ * @exe_chunk: chunk
+ * @ramrod_flags: flags
*
* go and send a ramrod!
*/
@@ -2006,8 +2008,8 @@ int bnx2x_config_vlan_mac(struct bnx2x *bp,
* bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
*
* @bp: device handle
- * @o:
- * @vlan_mac_flags:
+ * @o: vlan object info
+ * @vlan_mac_flags: vlan flags
* @ramrod_flags: execution flags to be used for this deletion
*
* if the last operation has completed successfully and there are no
@@ -2767,7 +2769,7 @@ static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
/**
* bnx2x_mcast_get_next_bin - get the next set bin (index)
*
- * @o:
+ * @o: multicast object info
* @last: index to start looking from (including)
*
* returns the next found (set) bin or a negative value if none is found.
@@ -2892,7 +2894,7 @@ static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
* bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
*
* @bp: device handle
- * @o:
+ * @o: multicast object info
* @start_bin: index in the registry to start from (including)
* @rdata_idx: index in the ramrod data to start from
*
@@ -3202,11 +3204,11 @@ static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
}
/**
- * bnx2x_mcast_handle_current_cmd -
+ * bnx2x_mcast_handle_current_cmd - send command if room
*
* @bp: device handle
- * @p:
- * @cmd:
+ * @p: ramrod mcast info
+ * @cmd: command
* @start_cnt: first line in the ramrod data that may be used
*
* This function is called iff there is enough place for the current command in
@@ -3323,7 +3325,7 @@ static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
* bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
*
* @bp: device handle
- * @p:
+ * @p: ramrod parameters
* @len: number of rules to handle
*/
static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
@@ -3684,7 +3686,7 @@ static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
* bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd
*
* @bp: device handle
- * @p:
+ * @p: ramrod parameters
* @len: number of rules to handle
*/
static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
@@ -3711,7 +3713,7 @@ static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
* bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
*
* @bp: device handle
- * @o:
+ * @o: multicast info
* @start_idx: index in the registry to start from
* @rdata_idx: index in the ramrod data to start from
*
@@ -3798,10 +3800,10 @@ static inline int bnx2x_mcast_handle_pending_cmds_e1(
/**
* bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
*
- * @fw_hi:
- * @fw_mid:
- * @fw_lo:
- * @mac:
+ * @fw_hi: address
+ * @fw_mid: address
+ * @fw_lo: address
+ * @mac: mac address
*/
static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
__le16 *fw_lo, u8 *mac)
@@ -3818,7 +3820,7 @@ static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
* bnx2x_mcast_refresh_registry_e1 -
*
* @bp: device handle
- * @cnt:
+ * @o: multicast info
*
* Check the ramrod data first entry flag to see if it's a DELETE or ADD command
* and update the registry correspondingly: if ADD - allocate a memory and add
@@ -4311,7 +4313,7 @@ static bool bnx2x_credit_pool_get_entry_always_true(
/**
* bnx2x_init_credit_pool - initialize credit pool internals.
*
- * @p:
+ * @p: credit pool
* @base: Base entry in the CAM to use.
* @credit: pool size.
*
@@ -4725,8 +4727,8 @@ static int bnx2x_queue_wait_comp(struct bnx2x *bp,
* bnx2x_queue_comp_cmd - complete the state change command.
*
* @bp: device handle
- * @o:
- * @cmd:
+ * @o: queue info
+ * @cmd: command to exec
*
* Checks that the arrived completion is expected.
*/
@@ -5477,8 +5479,8 @@ static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
* bnx2x_queue_chk_transition - check state machine of a regular Queue
*
* @bp: device handle
- * @o:
- * @params:
+ * @o: queue info
+ * @params: queue state
*
* (not Forwarding)
* It both checks if the requested command is legal in a current
@@ -5735,8 +5737,8 @@ static int bnx2x_func_wait_comp(struct bnx2x *bp,
* bnx2x_func_state_change_comp - complete the state machine transition
*
* @bp: device handle
- * @o:
- * @cmd:
+ * @o: function info
+ * @cmd: more info
*
* Called on state change transition. Completes the state
* machine transition only - no HW interaction.
@@ -5776,8 +5778,8 @@ static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
* bnx2x_func_comp_cmd - complete the state change command
*
* @bp: device handle
- * @o:
- * @cmd:
+ * @o: function info
+ * @cmd: more info
*
* Checks that the arrived completion is expected.
*/
@@ -5796,8 +5798,8 @@ static int bnx2x_func_comp_cmd(struct bnx2x *bp,
* bnx2x_func_chk_transition - perform function state machine transition
*
* @bp: device handle
- * @o:
- * @params:
+ * @o: function info
+ * @params: state parameters
*
* It both checks if the requested command is legal in a current
* state and, if it's legal, sets a `next_state' in the object
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 7b7e8b7883c8..fa147865e33f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -69,6 +69,7 @@
#include "bnxt_debugfs.h"
#define BNXT_TX_TIMEOUT (5 * HZ)
+#define BNXT_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_HW)
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
@@ -254,6 +255,7 @@ static const u16 bnxt_async_events_arr[] = {
ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
+ ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
};
static struct workqueue_struct *bnxt_pf_wq;
@@ -1172,7 +1174,10 @@ static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{
if (!rxr->bnapi->in_reset) {
rxr->bnapi->in_reset = true;
- set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
+ else
+ set_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event);
bnxt_queue_sp_work(bp);
}
rxr->rx_next_cons = 0xffff;
@@ -1738,8 +1743,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
if (unlikely(cons != rxr->rx_next_cons)) {
int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
- netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
- cons, rxr->rx_next_cons);
+ /* 0xffff is forced error, don't print it */
+ if (rxr->rx_next_cons != 0xffff)
+ netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
+ cons, rxr->rx_next_cons);
bnxt_sched_reset(bp, rxr);
return rc1;
}
@@ -1772,9 +1779,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rc = -EIO;
if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
- netdev_warn(bp->dev, "RX buffer error %x\n",
- rx_err);
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
+ !(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
+ netdev_warn_once(bp->dev, "RX buffer error %x\n",
+ rx_err);
bnxt_sched_reset(bp, rxr);
}
}
@@ -1941,19 +1949,43 @@ u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx)
return val;
}
+static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id)
+{
+ int i;
+
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ u16 grp_idx = bp->rx_ring[i].bnapi->index;
+ struct bnxt_ring_grp_info *grp_info;
+
+ grp_info = &bp->grp_info[grp_idx];
+ if (grp_info->agg_fw_ring_id == ring_id)
+ return grp_idx;
+ }
+ return INVALID_HW_RING_ID;
+}
+
#define BNXT_GET_EVENT_PORT(data) \
((data) & \
ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
+#define BNXT_EVENT_RING_TYPE(data2) \
+ ((data2) & \
+ ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK)
+
+#define BNXT_EVENT_RING_TYPE_RX(data2) \
+ (BNXT_EVENT_RING_TYPE(data2) == \
+ ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX)
+
static int bnxt_async_event_process(struct bnxt *bp,
struct hwrm_async_event_cmpl *cmpl)
{
u16 event_id = le16_to_cpu(cmpl->event_id);
+ u32 data1 = le32_to_cpu(cmpl->event_data1);
+ u32 data2 = le32_to_cpu(cmpl->event_data2);
/* TODO CHIMP_FW: Define event id's for link change, error etc */
switch (event_id) {
case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
- u32 data1 = le32_to_cpu(cmpl->event_data1);
struct bnxt_link_info *link_info = &bp->link_info;
if (BNXT_VF(bp))
@@ -1983,7 +2015,6 @@ static int bnxt_async_event_process(struct bnxt *bp,
set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
break;
case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
- u32 data1 = le32_to_cpu(cmpl->event_data1);
u16 port_id = BNXT_GET_EVENT_PORT(data1);
if (BNXT_VF(bp))
@@ -2000,9 +2031,10 @@ static int bnxt_async_event_process(struct bnxt *bp,
goto async_event_process_exit;
set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
break;
- case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
- u32 data1 = le32_to_cpu(cmpl->event_data1);
-
+ case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY:
+ if (netif_msg_hw(bp))
+ netdev_warn(bp->dev, "Received RESET_NOTIFY event, data1: 0x%x, data2: 0x%x\n",
+ data1, data2);
if (!bp->fw_health)
goto async_event_process_exit;
@@ -2022,10 +2054,8 @@ static int bnxt_async_event_process(struct bnxt *bp,
}
set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
break;
- }
case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
struct bnxt_fw_health *fw_health = bp->fw_health;
- u32 data1 = le32_to_cpu(cmpl->event_data1);
if (!fw_health)
goto async_event_process_exit;
@@ -2052,6 +2082,28 @@ static int bnxt_async_event_process(struct bnxt *bp,
bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
goto async_event_process_exit;
}
+ case ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG: {
+ struct bnxt_rx_ring_info *rxr;
+ u16 grp_idx;
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5)
+ goto async_event_process_exit;
+
+ netdev_warn(bp->dev, "Ring monitor event, ring type %lu id 0x%x\n",
+ BNXT_EVENT_RING_TYPE(data2), data1);
+ if (!BNXT_EVENT_RING_TYPE_RX(data2))
+ goto async_event_process_exit;
+
+ grp_idx = bnxt_agg_ring_id_to_grp_idx(bp, data1);
+ if (grp_idx == INVALID_HW_RING_ID) {
+ netdev_warn(bp->dev, "Unknown RX agg ring id 0x%x\n",
+ data1);
+ goto async_event_process_exit;
+ }
+ rxr = bp->bnapi[grp_idx]->rx_ring;
+ bnxt_sched_reset(bp, rxr);
+ goto async_event_process_exit;
+ }
default:
goto async_event_process_exit;
}
@@ -2250,7 +2302,7 @@ static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
bnapi->tx_pkts = 0;
}
- if (bnapi->events & BNXT_RX_EVENT) {
+ if ((bnapi->events & BNXT_RX_EVENT) && !(bnapi->in_reset)) {
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
if (bnapi->events & BNXT_AGG_EVENT)
@@ -2540,93 +2592,91 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
}
}
-static void bnxt_free_rx_skbs(struct bnxt *bp)
+static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
{
- int i, max_idx, max_agg_idx;
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
struct pci_dev *pdev = bp->pdev;
-
- if (!bp->rx_ring)
- return;
+ struct bnxt_tpa_idx_map *map;
+ int i, max_idx, max_agg_idx;
max_idx = bp->rx_nr_pages * RX_DESC_CNT;
max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
- for (i = 0; i < bp->rx_nr_rings; i++) {
- struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- struct bnxt_tpa_idx_map *map;
- int j;
-
- if (rxr->rx_tpa) {
- for (j = 0; j < bp->max_tpa; j++) {
- struct bnxt_tpa_info *tpa_info =
- &rxr->rx_tpa[j];
- u8 *data = tpa_info->data;
+ if (!rxr->rx_tpa)
+ goto skip_rx_tpa_free;
- if (!data)
- continue;
+ for (i = 0; i < bp->max_tpa; i++) {
+ struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
+ u8 *data = tpa_info->data;
- dma_unmap_single_attrs(&pdev->dev,
- tpa_info->mapping,
- bp->rx_buf_use_size,
- bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
+ if (!data)
+ continue;
- tpa_info->data = NULL;
+ dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
+ bp->rx_buf_use_size, bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
- kfree(data);
- }
- }
+ tpa_info->data = NULL;
- for (j = 0; j < max_idx; j++) {
- struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
- dma_addr_t mapping = rx_buf->mapping;
- void *data = rx_buf->data;
+ kfree(data);
+ }
- if (!data)
- continue;
+skip_rx_tpa_free:
+ for (i = 0; i < max_idx; i++) {
+ struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
+ dma_addr_t mapping = rx_buf->mapping;
+ void *data = rx_buf->data;
- rx_buf->data = NULL;
+ if (!data)
+ continue;
- if (BNXT_RX_PAGE_MODE(bp)) {
- mapping -= bp->rx_dma_offset;
- dma_unmap_page_attrs(&pdev->dev, mapping,
- PAGE_SIZE, bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
- page_pool_recycle_direct(rxr->page_pool, data);
- } else {
- dma_unmap_single_attrs(&pdev->dev, mapping,
- bp->rx_buf_use_size,
- bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
- kfree(data);
- }
+ rx_buf->data = NULL;
+ if (BNXT_RX_PAGE_MODE(bp)) {
+ mapping -= bp->rx_dma_offset;
+ dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
+ bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
+ page_pool_recycle_direct(rxr->page_pool, data);
+ } else {
+ dma_unmap_single_attrs(&pdev->dev, mapping,
+ bp->rx_buf_use_size, bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
+ kfree(data);
}
+ }
+ for (i = 0; i < max_agg_idx; i++) {
+ struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
+ struct page *page = rx_agg_buf->page;
- for (j = 0; j < max_agg_idx; j++) {
- struct bnxt_sw_rx_agg_bd *rx_agg_buf =
- &rxr->rx_agg_ring[j];
- struct page *page = rx_agg_buf->page;
-
- if (!page)
- continue;
+ if (!page)
+ continue;
- dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
- BNXT_RX_PAGE_SIZE,
- PCI_DMA_FROMDEVICE,
- DMA_ATTR_WEAK_ORDERING);
+ dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
+ BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
+ DMA_ATTR_WEAK_ORDERING);
- rx_agg_buf->page = NULL;
- __clear_bit(j, rxr->rx_agg_bmap);
+ rx_agg_buf->page = NULL;
+ __clear_bit(i, rxr->rx_agg_bmap);
- __free_page(page);
- }
- if (rxr->rx_page) {
- __free_page(rxr->rx_page);
- rxr->rx_page = NULL;
- }
- map = rxr->rx_tpa_idx_map;
- if (map)
- memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
+ __free_page(page);
+ }
+ if (rxr->rx_page) {
+ __free_page(rxr->rx_page);
+ rxr->rx_page = NULL;
}
+ map = rxr->rx_tpa_idx_map;
+ if (map)
+ memset(map->agg_idx_bmap, 0, sizeof(map->agg_idx_bmap));
+}
+
+static void bnxt_free_rx_skbs(struct bnxt *bp)
+{
+ int i;
+
+ if (!bp->rx_ring)
+ return;
+
+ for (i = 0; i < bp->rx_nr_rings; i++)
+ bnxt_free_one_rx_ring_skbs(bp, i);
}
static void bnxt_free_skbs(struct bnxt *bp)
@@ -3165,31 +3215,16 @@ static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
}
}
-static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
+static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
{
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
struct net_device *dev = bp->dev;
- struct bnxt_rx_ring_info *rxr;
- struct bnxt_ring_struct *ring;
- u32 prod, type;
+ u32 prod;
int i;
- type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
- RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
-
- if (NET_IP_ALIGN == 2)
- type |= RX_BD_FLAGS_SOP;
-
- rxr = &bp->rx_ring[ring_nr];
- ring = &rxr->rx_ring_struct;
- bnxt_init_rxbd_pages(ring, type);
-
- if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
- bpf_prog_add(bp->xdp_prog, 1);
- rxr->xdp_prog = bp->xdp_prog;
- }
prod = rxr->rx_prod;
for (i = 0; i < bp->rx_ring_size; i++) {
- if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
+ if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
ring_nr, i, bp->rx_ring_size);
break;
@@ -3197,22 +3232,13 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
prod = NEXT_RX(prod);
}
rxr->rx_prod = prod;
- ring->fw_ring_id = INVALID_HW_RING_ID;
-
- ring = &rxr->rx_agg_ring_struct;
- ring->fw_ring_id = INVALID_HW_RING_ID;
if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
return 0;
- type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
- RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
-
- bnxt_init_rxbd_pages(ring, type);
-
prod = rxr->rx_agg_prod;
for (i = 0; i < bp->rx_agg_ring_size; i++) {
- if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
+ if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
ring_nr, i, bp->rx_ring_size);
break;
@@ -3221,30 +3247,58 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
}
rxr->rx_agg_prod = prod;
- if (bp->flags & BNXT_FLAG_TPA) {
- if (rxr->rx_tpa) {
- u8 *data;
- dma_addr_t mapping;
+ if (rxr->rx_tpa) {
+ dma_addr_t mapping;
+ u8 *data;
- for (i = 0; i < bp->max_tpa; i++) {
- data = __bnxt_alloc_rx_data(bp, &mapping,
- GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ for (i = 0; i < bp->max_tpa; i++) {
+ data = __bnxt_alloc_rx_data(bp, &mapping, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
- rxr->rx_tpa[i].data = data;
- rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
- rxr->rx_tpa[i].mapping = mapping;
- }
- } else {
- netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
- return -ENOMEM;
+ rxr->rx_tpa[i].data = data;
+ rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
+ rxr->rx_tpa[i].mapping = mapping;
}
}
-
return 0;
}
+static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
+{
+ struct bnxt_rx_ring_info *rxr;
+ struct bnxt_ring_struct *ring;
+ u32 type;
+
+ type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
+ RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
+
+ if (NET_IP_ALIGN == 2)
+ type |= RX_BD_FLAGS_SOP;
+
+ rxr = &bp->rx_ring[ring_nr];
+ ring = &rxr->rx_ring_struct;
+ bnxt_init_rxbd_pages(ring, type);
+
+ if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
+ bpf_prog_add(bp->xdp_prog, 1);
+ rxr->xdp_prog = bp->xdp_prog;
+ }
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+
+ ring = &rxr->rx_agg_ring_struct;
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+
+ if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
+ type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
+ RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
+
+ bnxt_init_rxbd_pages(ring, type);
+ }
+
+ return bnxt_alloc_one_rx_ring(bp, ring_nr);
+}
+
static void bnxt_init_cp_rings(struct bnxt *bp)
{
int i, j;
@@ -4269,6 +4323,8 @@ static int bnxt_hwrm_to_stderr(u32 hwrm_err)
switch (hwrm_err) {
case HWRM_ERR_CODE_SUCCESS:
return 0;
+ case HWRM_ERR_CODE_RESOURCE_LOCKED:
+ return -EROFS;
case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED:
return -EACCES;
case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR:
@@ -5343,13 +5399,16 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
* VLAN_STRIP_CAP properly.
*/
if ((flags & VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP) ||
- ((bp->flags & BNXT_FLAG_CHIP_P5) &&
+ (BNXT_CHIP_P5_THOR(bp) &&
!(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)))
bp->fw_cap |= BNXT_FW_CAP_VLAN_RX_STRIP;
bp->max_tpa_v2 = le16_to_cpu(resp->max_aggs_supported);
- if (bp->max_tpa_v2)
- bp->hw_ring_stats_size =
- sizeof(struct ctx_hw_stats_ext);
+ if (bp->max_tpa_v2) {
+ if (BNXT_CHIP_P5_THOR(bp))
+ bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5;
+ else
+ bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2;
+ }
}
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -6639,6 +6698,8 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
}
if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
bp->flags |= BNXT_FLAG_MULTI_HOST;
+ if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED)
+ bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
switch (resp->port_partition_type) {
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
@@ -7333,6 +7394,77 @@ hwrm_cfa_adv_qcaps_exit:
return rc;
}
+static int __bnxt_alloc_fw_health(struct bnxt *bp)
+{
+ if (bp->fw_health)
+ return 0;
+
+ bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
+ if (!bp->fw_health)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int bnxt_alloc_fw_health(struct bnxt *bp)
+{
+ int rc;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
+ !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
+ return 0;
+
+ rc = __bnxt_alloc_fw_health(bp);
+ if (rc) {
+ bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
+ bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
+ return rc;
+ }
+
+ return 0;
+}
+
+static void __bnxt_map_fw_health_reg(struct bnxt *bp, u32 reg)
+{
+ writel(reg & BNXT_GRC_BASE_MASK, bp->bar0 +
+ BNXT_GRCPF_REG_WINDOW_BASE_OUT +
+ BNXT_FW_HEALTH_WIN_MAP_OFF);
+}
+
+static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
+{
+ void __iomem *hs;
+ u32 status_loc;
+ u32 reg_type;
+ u32 sig;
+
+ __bnxt_map_fw_health_reg(bp, HCOMM_STATUS_STRUCT_LOC);
+ hs = bp->bar0 + BNXT_FW_HEALTH_WIN_OFF(HCOMM_STATUS_STRUCT_LOC);
+
+ sig = readl(hs + offsetof(struct hcomm_status, sig_ver));
+ if ((sig & HCOMM_STATUS_SIGNATURE_MASK) != HCOMM_STATUS_SIGNATURE_VAL) {
+ if (bp->fw_health)
+ bp->fw_health->status_reliable = false;
+ return;
+ }
+
+ if (__bnxt_alloc_fw_health(bp)) {
+ netdev_warn(bp->dev, "no memory for firmware status checks\n");
+ return;
+ }
+
+ status_loc = readl(hs + offsetof(struct hcomm_status, fw_status_loc));
+ bp->fw_health->regs[BNXT_FW_HEALTH_REG] = status_loc;
+ reg_type = BNXT_FW_HEALTH_REG_TYPE(status_loc);
+ if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC) {
+ __bnxt_map_fw_health_reg(bp, status_loc);
+ bp->fw_health->mapped_regs[BNXT_FW_HEALTH_REG] =
+ BNXT_FW_HEALTH_WIN_OFF(status_loc);
+ }
+
+ bp->fw_health->status_reliable = true;
+}
+
static int bnxt_map_fw_health_regs(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
@@ -7349,14 +7481,12 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp)
reg_base = reg & BNXT_GRC_BASE_MASK;
if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
return -ERANGE;
- fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_BASE +
- (reg & BNXT_GRC_OFFSET_MASK);
+ fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
}
if (reg_base == 0xffffffff)
return 0;
- writel(reg_base, bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT +
- BNXT_FW_HEALTH_WIN_MAP_OFF);
+ __bnxt_map_fw_health_reg(bp, reg_base);
return 0;
}
@@ -7432,6 +7562,16 @@ static int bnxt_hwrm_func_reset(struct bnxt *bp)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
}
+static void bnxt_nvm_cfg_ver_get(struct bnxt *bp)
+{
+ struct hwrm_nvm_get_dev_info_output nvm_info;
+
+ if (!bnxt_hwrm_nvm_get_dev_info(bp, &nvm_info))
+ snprintf(bp->nvm_cfg_ver, FW_VER_STR_LEN, "%d.%d.%d",
+ nvm_info.nvm_cfg_ver_maj, nvm_info.nvm_cfg_ver_min,
+ nvm_info.nvm_cfg_ver_upd);
+}
+
static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
{
int rc = 0;
@@ -8635,10 +8775,9 @@ static void bnxt_del_napi(struct bnxt *bp)
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
- napi_hash_del(&bnapi->napi);
- netif_napi_del(&bnapi->napi);
+ __netif_napi_del(&bnapi->napi);
}
- /* We called napi_hash_del() before netif_napi_del(), we need
+ /* We called __netif_napi_del(), we need
* to respect an RCU grace period before freeing napi structures.
*/
synchronize_net();
@@ -8694,14 +8833,19 @@ static void bnxt_enable_napi(struct bnxt *bp)
int i;
for (i = 0; i < bp->cp_nr_rings; i++) {
- struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
- bp->bnapi[i]->in_reset = false;
+ struct bnxt_napi *bnapi = bp->bnapi[i];
+ struct bnxt_cp_ring_info *cpr;
+
+ cpr = &bnapi->cp_ring;
+ if (bnapi->in_reset)
+ cpr->sw_stats.rx.rx_resets++;
+ bnapi->in_reset = false;
- if (bp->bnapi[i]->rx_ring) {
+ if (bnapi->rx_ring) {
INIT_WORK(&cpr->dim.work, bnxt_dim_work);
cpr->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
}
- napi_enable(&bp->bnapi[i]->napi);
+ napi_enable(&bnapi->napi);
}
}
@@ -8735,6 +8879,30 @@ void bnxt_tx_enable(struct bnxt *bp)
netif_carrier_on(bp->dev);
}
+static char *bnxt_report_fec(struct bnxt_link_info *link_info)
+{
+ u8 active_fec = link_info->active_fec_sig_mode &
+ PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
+
+ switch (active_fec) {
+ default:
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE:
+ return "None";
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
+ return "Clause 74 BaseR";
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
+ return "Clause 91 RS(528,514)";
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
+ return "Clause 91 RS544_1XN";
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
+ return "Clause 91 RS(544,514)";
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
+ return "Clause 91 RS272_1XN";
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
+ return "Clause 91 RS(272,257)";
+ }
+}
+
static void bnxt_report_link(struct bnxt *bp)
{
if (bp->link_info.link_up) {
@@ -8744,6 +8912,11 @@ static void bnxt_report_link(struct bnxt *bp)
u16 fec;
netif_carrier_on(bp->dev);
+ speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
+ if (speed == SPEED_UNKNOWN) {
+ netdev_info(bp->dev, "NIC Link is Up, speed unknown\n");
+ return;
+ }
if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
duplex = "full";
else
@@ -8756,7 +8929,6 @@ static void bnxt_report_link(struct bnxt *bp)
flow_ctrl = "ON - receive";
else
flow_ctrl = "none";
- speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
speed, duplex, flow_ctrl);
if (bp->flags & BNXT_FLAG_EEE_CAP)
@@ -8765,16 +8937,25 @@ static void bnxt_report_link(struct bnxt *bp)
"not active");
fec = bp->link_info.fec_cfg;
if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
- netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n",
+ netdev_info(bp->dev, "FEC autoneg %s encoding: %s\n",
(fec & BNXT_FEC_AUTONEG) ? "on" : "off",
- (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" :
- (fec & BNXT_FEC_ENC_RS) ? "RS" : "None");
+ bnxt_report_fec(&bp->link_info));
} else {
netif_carrier_off(bp->dev);
netdev_err(bp->dev, "NIC Link is Down\n");
}
}
+static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
+{
+ if (!resp->supported_speeds_auto_mode &&
+ !resp->supported_speeds_force_mode &&
+ !resp->supported_pam4_speeds_auto_mode &&
+ !resp->supported_pam4_speeds_force_mode)
+ return true;
+ return false;
+}
+
static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
{
int rc = 0;
@@ -8822,9 +9003,24 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET)
bp->fw_cap |= BNXT_FW_CAP_PORT_STATS_NO_RESET;
+ if (bp->hwrm_spec_code >= 0x10a01) {
+ if (bnxt_phy_qcaps_no_speed(resp)) {
+ link_info->phy_state = BNXT_PHY_STATE_DISABLED;
+ netdev_warn(bp->dev, "Ethernet link disabled\n");
+ } else if (link_info->phy_state == BNXT_PHY_STATE_DISABLED) {
+ link_info->phy_state = BNXT_PHY_STATE_ENABLED;
+ netdev_info(bp->dev, "Ethernet link enabled\n");
+ /* Phy re-enabled, reprobe the speeds */
+ link_info->support_auto_speeds = 0;
+ link_info->support_pam4_auto_speeds = 0;
+ }
+ }
if (resp->supported_speeds_auto_mode)
link_info->support_auto_speeds =
le16_to_cpu(resp->supported_speeds_auto_mode);
+ if (resp->supported_pam4_speeds_auto_mode)
+ link_info->support_pam4_auto_speeds =
+ le16_to_cpu(resp->supported_pam4_speeds_auto_mode);
bp->port_count = resp->port_cnt;
@@ -8833,14 +9029,21 @@ hwrm_phy_qcaps_exit:
return rc;
}
-static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
+static bool bnxt_support_dropped(u16 advertising, u16 supported)
+{
+ u16 diff = advertising ^ supported;
+
+ return ((supported | diff) != supported);
+}
+
+int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
{
int rc = 0;
struct bnxt_link_info *link_info = &bp->link_info;
struct hwrm_port_phy_qcfg_input req = {0};
struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
u8 link_up = link_info->link_up;
- u16 diff;
+ bool support_changed = false;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
@@ -8867,10 +9070,17 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
else
link_info->link_speed = 0;
link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
+ link_info->force_pam4_link_speed =
+ le16_to_cpu(resp->force_pam4_link_speed);
link_info->support_speeds = le16_to_cpu(resp->support_speeds);
+ link_info->support_pam4_speeds = le16_to_cpu(resp->support_pam4_speeds);
link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
+ link_info->auto_pam4_link_speeds =
+ le16_to_cpu(resp->auto_pam4_link_speed_mask);
link_info->lp_auto_link_speeds =
le16_to_cpu(resp->link_partner_adv_speeds);
+ link_info->lp_auto_pam4_link_speeds =
+ resp->link_partner_pam4_adv_speeds;
link_info->preemphasis = le32_to_cpu(resp->preemphasis);
link_info->phy_ver[0] = resp->phy_maj;
link_info->phy_ver[1] = resp->phy_min;
@@ -8919,9 +9129,10 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
}
link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
- if (bp->hwrm_spec_code >= 0x10504)
+ if (bp->hwrm_spec_code >= 0x10504) {
link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
-
+ link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
+ }
/* TODO: need to add more logic to report VF link */
if (chng_link_state) {
if (link_info->phy_link_status == BNXT_LINK_LINK)
@@ -8939,17 +9150,21 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
if (!BNXT_PHY_CFG_ABLE(bp))
return 0;
- diff = link_info->support_auto_speeds ^ link_info->advertising;
- if ((link_info->support_auto_speeds | diff) !=
- link_info->support_auto_speeds) {
- /* An advertised speed is no longer supported, so we need to
- * update the advertisement settings. Caller holds RTNL
- * so we can modify link settings.
- */
+ /* Check if any advertised speeds are no longer supported. The caller
+ * holds the link_lock mutex, so we can modify link_info settings.
+ */
+ if (bnxt_support_dropped(link_info->advertising,
+ link_info->support_auto_speeds)) {
link_info->advertising = link_info->support_auto_speeds;
- if (link_info->autoneg & BNXT_AUTONEG_SPEED)
- bnxt_hwrm_set_link_setting(bp, true, false);
+ support_changed = true;
+ }
+ if (bnxt_support_dropped(link_info->advertising_pam4,
+ link_info->support_pam4_auto_speeds)) {
+ link_info->advertising_pam4 = link_info->support_pam4_auto_speeds;
+ support_changed = true;
}
+ if (support_changed && (link_info->autoneg & BNXT_AUTONEG_SPEED))
+ bnxt_hwrm_set_link_setting(bp, true, false);
return 0;
}
@@ -9008,27 +9223,30 @@ bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
}
}
-static void bnxt_hwrm_set_link_common(struct bnxt *bp,
- struct hwrm_port_phy_cfg_input *req)
+static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
{
- u8 autoneg = bp->link_info.autoneg;
- u16 fw_link_speed = bp->link_info.req_link_speed;
- u16 advertising = bp->link_info.advertising;
-
- if (autoneg & BNXT_AUTONEG_SPEED) {
- req->auto_mode |=
- PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
-
- req->enables |= cpu_to_le32(
- PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
- req->auto_link_speed_mask = cpu_to_le16(advertising);
-
+ if (bp->link_info.autoneg & BNXT_AUTONEG_SPEED) {
+ req->auto_mode |= PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
+ if (bp->link_info.advertising) {
+ req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
+ req->auto_link_speed_mask = cpu_to_le16(bp->link_info.advertising);
+ }
+ if (bp->link_info.advertising_pam4) {
+ req->enables |=
+ cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK);
+ req->auto_link_pam4_speed_mask =
+ cpu_to_le16(bp->link_info.advertising_pam4);
+ }
req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
- req->flags |=
- cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
+ req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
} else {
- req->force_link_speed = cpu_to_le16(fw_link_speed);
req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
+ if (bp->link_info.req_signal_mode == BNXT_SIG_MODE_PAM4) {
+ req->force_pam4_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
+ req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED);
+ } else {
+ req->force_link_speed = cpu_to_le16(bp->link_info.req_link_speed);
+ }
}
/* tell chimp that the setting takes effect immediately */
@@ -9424,14 +9642,19 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
if (BNXT_AUTO_MODE(link_info->auto_mode))
update_link = true;
- if (link_info->req_link_speed != link_info->force_link_speed)
+ if (link_info->req_signal_mode == BNXT_SIG_MODE_NRZ &&
+ link_info->req_link_speed != link_info->force_link_speed)
+ update_link = true;
+ else if (link_info->req_signal_mode == BNXT_SIG_MODE_PAM4 &&
+ link_info->req_link_speed != link_info->force_pam4_link_speed)
update_link = true;
if (link_info->req_duplex != link_info->duplex_setting)
update_link = true;
} else {
if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
update_link = true;
- if (link_info->advertising != link_info->auto_link_speeds)
+ if (link_info->advertising != link_info->auto_link_speeds ||
+ link_info->advertising_pam4 != link_info->auto_pam4_link_speeds)
update_link = true;
}
@@ -10362,6 +10585,23 @@ static void bnxt_dbg_dump_states(struct bnxt *bp)
}
}
+static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr)
+{
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
+ struct hwrm_ring_reset_input req = {0};
+ struct bnxt_napi *bnapi = rxr->bnapi;
+ struct bnxt_cp_ring_info *cpr;
+ u16 cp_ring_id;
+
+ cpr = &bnapi->cp_ring;
+ cp_ring_id = cpr->cp_ring_struct.fw_ring_id;
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_RESET, cp_ring_id, -1);
+ req.ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP;
+ req.ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id);
+ return hwrm_send_message_silent(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+}
+
static void bnxt_reset_task(struct bnxt *bp, bool silent)
{
if (!silent)
@@ -10497,6 +10737,55 @@ static void bnxt_reset(struct bnxt *bp, bool silent)
bnxt_rtnl_unlock_sp(bp);
}
+/* Only called from bnxt_sp_task() */
+static void bnxt_rx_ring_reset(struct bnxt *bp)
+{
+ int i;
+
+ bnxt_rtnl_lock_sp(bp);
+ if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
+ bnxt_rtnl_unlock_sp(bp);
+ return;
+ }
+ /* Disable and flush TPA before resetting the RX ring */
+ if (bp->flags & BNXT_FLAG_TPA)
+ bnxt_set_tpa(bp, false);
+ for (i = 0; i < bp->rx_nr_rings; i++) {
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
+ struct bnxt_cp_ring_info *cpr;
+ int rc;
+
+ if (!rxr->bnapi->in_reset)
+ continue;
+
+ rc = bnxt_hwrm_rx_ring_reset(bp, i);
+ if (rc) {
+ if (rc == -EINVAL || rc == -EOPNOTSUPP)
+ netdev_info_once(bp->dev, "RX ring reset not supported by firmware, falling back to global reset\n");
+ else
+ netdev_warn(bp->dev, "RX ring reset failed, rc = %d, falling back to global reset\n",
+ rc);
+ bnxt_reset_task(bp, true);
+ break;
+ }
+ bnxt_free_one_rx_ring_skbs(bp, i);
+ rxr->rx_prod = 0;
+ rxr->rx_agg_prod = 0;
+ rxr->rx_sw_agg_prod = 0;
+ rxr->rx_next_cons = 0;
+ rxr->bnapi->in_reset = false;
+ bnxt_alloc_one_rx_ring(bp, i);
+ cpr = &rxr->bnapi->cp_ring;
+ cpr->sw_stats.rx.rx_resets++;
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
+ bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
+ }
+ if (bp->flags & BNXT_FLAG_TPA)
+ bnxt_set_tpa(bp, true);
+ bnxt_rtnl_unlock_sp(bp);
+}
+
static void bnxt_fw_reset_close(struct bnxt *bp)
{
bnxt_ulp_stop(bp);
@@ -10691,8 +10980,15 @@ static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
}
link_info->advertising = link_info->auto_link_speeds;
+ link_info->advertising_pam4 = link_info->auto_pam4_link_speeds;
} else {
link_info->req_link_speed = link_info->force_link_speed;
+ link_info->req_signal_mode = BNXT_SIG_MODE_NRZ;
+ if (link_info->force_pam4_link_speed) {
+ link_info->req_link_speed =
+ link_info->force_pam4_link_speed;
+ link_info->req_signal_mode = BNXT_SIG_MODE_PAM4;
+ }
link_info->req_duplex = link_info->duplex_setting;
}
if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
@@ -10778,6 +11074,9 @@ static void bnxt_sp_task(struct work_struct *work)
if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
bnxt_reset(bp, true);
+ if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
+ bnxt_rx_ring_reset(bp);
+
if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event))
bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT);
@@ -10882,21 +11181,19 @@ static void bnxt_init_dflt_coal(struct bnxt *bp)
bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
}
-static void bnxt_alloc_fw_health(struct bnxt *bp)
+static int bnxt_fw_reset_via_optee(struct bnxt *bp)
{
- if (bp->fw_health)
- return;
+#ifdef CONFIG_TEE_BNXT_FW
+ int rc = tee_bnxt_fw_load();
- if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
- !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
- return;
+ if (rc)
+ netdev_err(bp->dev, "Failed FW reset via OP-TEE, rc=%d\n", rc);
- bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
- if (!bp->fw_health) {
- netdev_warn(bp->dev, "Failed to allocate fw_health\n");
- bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
- bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
- }
+ return rc;
+#else
+ netdev_err(bp->dev, "OP-TEE not supported\n");
+ return -ENODEV;
+#endif
}
static int bnxt_fw_init_one_p1(struct bnxt *bp)
@@ -10905,8 +11202,24 @@ static int bnxt_fw_init_one_p1(struct bnxt *bp)
bp->fw_cap = 0;
rc = bnxt_hwrm_ver_get(bp);
- if (rc)
- return rc;
+ bnxt_try_map_fw_health_reg(bp);
+ if (rc) {
+ if (bp->fw_health && bp->fw_health->status_reliable) {
+ u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
+
+ netdev_err(bp->dev,
+ "Firmware not responding, status: 0x%x\n",
+ sts);
+ if (sts & FW_STATUS_REG_CRASHED_NO_MASTER) {
+ netdev_warn(bp->dev, "Firmware recover via OP-TEE requested\n");
+ rc = bnxt_fw_reset_via_optee(bp);
+ if (!rc)
+ rc = bnxt_hwrm_ver_get(bp);
+ }
+ }
+ if (rc)
+ return rc;
+ }
if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) {
rc = bnxt_alloc_kong_hwrm_resources(bp);
@@ -10920,6 +11233,8 @@ static int bnxt_fw_init_one_p1(struct bnxt *bp)
if (rc)
return rc;
}
+ bnxt_nvm_cfg_ver_get(bp);
+
rc = bnxt_hwrm_func_reset(bp);
if (rc)
return -ENODEV;
@@ -10945,11 +11260,14 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp)
netdev_warn(bp->dev, "hwrm query adv flow mgnt failure rc: %d\n",
rc);
- bnxt_alloc_fw_health(bp);
- rc = bnxt_hwrm_error_recovery_qcfg(bp);
- if (rc)
- netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
- rc);
+ if (bnxt_alloc_fw_health(bp)) {
+ netdev_warn(bp->dev, "no memory for firmware error recovery\n");
+ } else {
+ rc = bnxt_hwrm_error_recovery_qcfg(bp);
+ if (rc)
+ netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
+ rc);
+ }
rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
if (rc)
@@ -11075,12 +11393,8 @@ static void bnxt_reset_all(struct bnxt *bp)
int i, rc;
if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
-#ifdef CONFIG_TEE_BNXT_FW
- rc = tee_bnxt_fw_load();
- if (rc)
- netdev_err(bp->dev, "Unable to reset FW rc=%d\n", rc);
+ bnxt_fw_reset_via_optee(bp);
bp->fw_reset_timestamp = jiffies;
-#endif
return;
}
@@ -11199,7 +11513,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
if (time_after(jiffies, bp->fw_reset_timestamp +
(bp->fw_reset_max_dsecs * HZ / 10))) {
netdev_err(bp->dev, "Firmware reset aborted\n");
- goto fw_reset_abort;
+ goto fw_reset_abort_status;
}
bnxt_queue_fw_reset_work(bp, HZ / 5);
return;
@@ -11233,6 +11547,13 @@ static void bnxt_fw_reset_task(struct work_struct *work)
}
return;
+fw_reset_abort_status:
+ if (bp->fw_health->status_reliable ||
+ (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
+ u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
+
+ netdev_err(bp->dev, "fw_health_status 0x%x\n", sts);
+ }
fw_reset_abort:
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
@@ -12203,6 +12524,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
bp = netdev_priv(dev);
+ bp->msg_enable = BNXT_DEF_MSG_ENABLE;
bnxt_set_max_func_irqs(bp, max_irqs);
if (bnxt_vf_pciid(ent->driver_data))
@@ -12234,8 +12556,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
- if (BNXT_CHIP_P5(bp))
+ if (BNXT_CHIP_P5(bp)) {
bp->flags |= BNXT_FLAG_CHIP_P5;
+ if (BNXT_CHIP_SR2(bp))
+ bp->flags |= BNXT_FLAG_CHIP_SR2;
+ }
rc = bnxt_alloc_rss_indir_tbl(bp);
if (rc)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 0ef89dabfd61..21ef1c21f602 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -907,6 +907,7 @@ struct bnxt_rx_ring_info {
struct bnxt_rx_sw_stats {
u64 rx_l4_csum_errors;
+ u64 rx_resets;
u64 rx_buf_errors;
};
@@ -1142,50 +1143,6 @@ struct bnxt_ntuple_filter {
#define BNXT_FLTR_UPDATE 1
};
-struct hwrm_port_phy_qcfg_output_compat {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 link;
- u8 link_signal_mode;
- __le16 link_speed;
- u8 duplex_cfg;
- u8 pause;
- __le16 support_speeds;
- __le16 force_link_speed;
- u8 auto_mode;
- u8 auto_pause;
- __le16 auto_link_speed;
- __le16 auto_link_speed_mask;
- u8 wirespeed;
- u8 lpbk;
- u8 force_pause;
- u8 module_status;
- __le32 preemphasis;
- u8 phy_maj;
- u8 phy_min;
- u8 phy_bld;
- u8 phy_type;
- u8 media_type;
- u8 xcvr_pkg_type;
- u8 eee_config_phy_addr;
- u8 parallel_detect;
- __le16 link_partner_adv_speeds;
- u8 link_partner_adv_auto_mode;
- u8 link_partner_adv_pause;
- __le16 adv_eee_link_speed_mask;
- __le16 link_partner_adv_eee_link_speed_mask;
- __le32 xcvr_identifier_type_tx_lpi_timer;
- __le16 fec_cfg;
- u8 duplex_state;
- u8 option_flags;
- char phy_vendor_name[16];
- char phy_vendor_partnumber[16];
- u8 unused_0[7];
- u8 valid;
-};
-
struct bnxt_link_info {
u8 phy_type;
u8 media_type;
@@ -1196,7 +1153,10 @@ struct bnxt_link_info {
#define BNXT_LINK_SIGNAL PORT_PHY_QCFG_RESP_LINK_SIGNAL
#define BNXT_LINK_LINK PORT_PHY_QCFG_RESP_LINK_LINK
u8 wire_speed;
- u8 loop_back;
+ u8 phy_state;
+#define BNXT_PHY_STATE_ENABLED 0
+#define BNXT_PHY_STATE_DISABLED 1
+
u8 link_up;
u8 duplex;
#define BNXT_LINK_DUPLEX_HALF PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF
@@ -1232,6 +1192,7 @@ struct bnxt_link_info {
#define BNXT_LINK_SPEED_50GB PORT_PHY_QCFG_RESP_LINK_SPEED_50GB
#define BNXT_LINK_SPEED_100GB PORT_PHY_QCFG_RESP_LINK_SPEED_100GB
u16 support_speeds;
+ u16 support_pam4_speeds;
u16 auto_link_speeds; /* fw adv setting */
#define BNXT_LINK_SPEED_MSK_100MB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB
#define BNXT_LINK_SPEED_MSK_1GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB
@@ -1243,24 +1204,51 @@ struct bnxt_link_info {
#define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB
#define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB
#define BNXT_LINK_SPEED_MSK_100GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB
+ u16 auto_pam4_link_speeds;
+#define BNXT_LINK_PAM4_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_50G
+#define BNXT_LINK_PAM4_SPEED_MSK_100GB PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_100G
+#define BNXT_LINK_PAM4_SPEED_MSK_200GB PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_200G
u16 support_auto_speeds;
+ u16 support_pam4_auto_speeds;
u16 lp_auto_link_speeds;
+ u16 lp_auto_pam4_link_speeds;
u16 force_link_speed;
+ u16 force_pam4_link_speed;
u32 preemphasis;
u8 module_status;
+ u8 active_fec_sig_mode;
u16 fec_cfg;
+#define BNXT_FEC_NONE PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED
+#define BNXT_FEC_AUTONEG_CAP PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED
#define BNXT_FEC_AUTONEG PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED
+#define BNXT_FEC_ENC_BASE_R_CAP \
+ PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED
#define BNXT_FEC_ENC_BASE_R PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED
-#define BNXT_FEC_ENC_RS PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED
+#define BNXT_FEC_ENC_RS_CAP \
+ PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED
+#define BNXT_FEC_ENC_LLRS_CAP \
+ (PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_SUPPORTED | \
+ PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_SUPPORTED)
+#define BNXT_FEC_ENC_RS \
+ (PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED | \
+ PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ENABLED | \
+ PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_IEEE_ENABLED)
+#define BNXT_FEC_ENC_LLRS \
+ (PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_ENABLED | \
+ PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_ENABLED)
/* copy of requested setting from ethtool cmd */
u8 autoneg;
#define BNXT_AUTONEG_SPEED 1
#define BNXT_AUTONEG_FLOW_CTRL 2
+ u8 req_signal_mode;
+#define BNXT_SIG_MODE_NRZ PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ
+#define BNXT_SIG_MODE_PAM4 PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4
u8 req_duplex;
u8 req_flow_ctrl;
u16 req_link_speed;
u16 advertising; /* user adv setting */
+ u16 advertising_pam4;
bool force_link_chng;
bool phy_retry;
@@ -1272,6 +1260,49 @@ struct bnxt_link_info {
struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
};
+#define BNXT_FEC_RS544_ON \
+ (PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_ENABLE | \
+ PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_ENABLE)
+
+#define BNXT_FEC_RS544_OFF \
+ (PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_DISABLE | \
+ PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_DISABLE)
+
+#define BNXT_FEC_RS272_ON \
+ (PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_ENABLE | \
+ PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_ENABLE)
+
+#define BNXT_FEC_RS272_OFF \
+ (PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_DISABLE | \
+ PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_DISABLE)
+
+#define BNXT_PAM4_SUPPORTED(link_info) \
+ ((link_info)->support_pam4_speeds)
+
+#define BNXT_FEC_RS_ON(link_info) \
+ (PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE | \
+ PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE | \
+ (BNXT_PAM4_SUPPORTED(link_info) ? \
+ (BNXT_FEC_RS544_ON | BNXT_FEC_RS272_OFF) : 0))
+
+#define BNXT_FEC_LLRS_ON \
+ (PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE | \
+ PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE | \
+ BNXT_FEC_RS272_ON | BNXT_FEC_RS544_OFF)
+
+#define BNXT_FEC_RS_OFF(link_info) \
+ (PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE | \
+ (BNXT_PAM4_SUPPORTED(link_info) ? \
+ (BNXT_FEC_RS544_OFF | BNXT_FEC_RS272_OFF) : 0))
+
+#define BNXT_FEC_BASE_R_ON(link_info) \
+ (PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE | \
+ BNXT_FEC_RS_OFF(link_info))
+
+#define BNXT_FEC_ALL_OFF(link_info) \
+ (PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE | \
+ BNXT_FEC_RS_OFF(link_info))
+
#define BNXT_MAX_QUEUE 8
struct bnxt_queue_info {
@@ -1464,6 +1495,7 @@ struct bnxt_fw_health {
u8 enabled:1;
u8 master:1;
u8 fatal:1;
+ u8 status_reliable:1;
u8 tmr_multiplier;
u8 tmr_counter;
u8 fw_reset_seq_cnt;
@@ -1491,6 +1523,9 @@ struct bnxt_fw_reporter_ctx {
#define BNXT_FW_HEALTH_WIN_BASE 0x3000
#define BNXT_FW_HEALTH_WIN_MAP_OFF 8
+#define BNXT_FW_HEALTH_WIN_OFF(reg) (BNXT_FW_HEALTH_WIN_BASE + \
+ ((reg) & BNXT_GRC_OFFSET_MASK))
+
#define BNXT_FW_STATUS_HEALTHY 0x8000
#define BNXT_FW_STATUS_SHUTDOWN 0x100000
@@ -1535,6 +1570,8 @@ struct bnxt {
u8 chip_rev;
+#define CHIP_NUM_58818 0xd818
+
#define BNXT_CHIP_NUM_5730X(chip_num) \
((chip_num) >= CHIP_NUM_57301 && \
(chip_num) <= CHIP_NUM_57304)
@@ -1613,6 +1650,7 @@ struct bnxt {
BNXT_FLAG_ROCEV2_CAP)
#define BNXT_FLAG_NO_AGG_RINGS 0x20000
#define BNXT_FLAG_RX_PAGE_MODE 0x40000
+ #define BNXT_FLAG_CHIP_SR2 0x80000
#define BNXT_FLAG_MULTI_HOST 0x100000
#define BNXT_FLAG_DSN_VALID 0x200000
#define BNXT_FLAG_DOUBLE_DB 0x400000
@@ -1630,20 +1668,27 @@ struct bnxt {
#define BNXT_NPAR(bp) ((bp)->port_partition_type)
#define BNXT_MH(bp) ((bp)->flags & BNXT_FLAG_MULTI_HOST)
#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp) && !BNXT_MH(bp))
-#define BNXT_PHY_CFG_ABLE(bp) (BNXT_SINGLE_PF(bp) || \
- ((bp)->fw_cap & BNXT_FW_CAP_SHARED_PORT_CFG))
+#define BNXT_PHY_CFG_ABLE(bp) ((BNXT_SINGLE_PF(bp) || \
+ ((bp)->fw_cap & BNXT_FW_CAP_SHARED_PORT_CFG)) && \
+ (bp)->link_info.phy_state == BNXT_PHY_STATE_ENABLED)
#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
#define BNXT_RX_PAGE_MODE(bp) ((bp)->flags & BNXT_FLAG_RX_PAGE_MODE)
#define BNXT_SUPPORTS_TPA(bp) (!BNXT_CHIP_TYPE_NITRO_A0(bp) && \
(!((bp)->flags & BNXT_FLAG_CHIP_P5) || \
(bp)->max_tpa_v2) && !is_kdump_kernel())
-/* Chip class phase 5 */
-#define BNXT_CHIP_P5(bp) \
+#define BNXT_CHIP_SR2(bp) \
+ ((bp)->chip_num == CHIP_NUM_58818)
+
+#define BNXT_CHIP_P5_THOR(bp) \
((bp)->chip_num == CHIP_NUM_57508 || \
(bp)->chip_num == CHIP_NUM_57504 || \
(bp)->chip_num == CHIP_NUM_57502)
+/* Chip class phase 5 */
+#define BNXT_CHIP_P5(bp) \
+ (BNXT_CHIP_P5_THOR(bp) || BNXT_CHIP_SR2(bp))
+
/* Chip class phase 4.x */
#define BNXT_CHIP_P4(bp) \
(BNXT_CHIP_NUM_57X1X((bp)->chip_num) || \
@@ -1777,6 +1822,7 @@ struct bnxt {
#define BNXT_FW_CAP_VLAN_TX_INSERT 0x02000000
#define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED 0x04000000
#define BNXT_FW_CAP_PORT_STATS_NO_RESET 0x10000000
+ #define BNXT_FW_CAP_RING_MONITOR 0x40000000
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
u32 hwrm_spec_code;
@@ -1810,6 +1856,7 @@ struct bnxt {
#define PHY_VER_STR_LEN (FW_VER_STR_LEN - BC_HWRM_STR_LEN)
char fw_ver_str[FW_VER_STR_LEN];
char hwrm_ver_supp[FW_VER_STR_LEN];
+ char nvm_cfg_ver[FW_VER_STR_LEN];
u64 fw_ver_code;
#define BNXT_FW_VER_CODE(maj, min, bld, rsv) \
((u64)(maj) << 48 | (u64)(min) << 32 | (u64)(bld) << 16 | (rsv))
@@ -1935,6 +1982,20 @@ struct bnxt {
struct device *hwmon_dev;
};
+#define BNXT_NUM_RX_RING_STATS 8
+#define BNXT_NUM_TX_RING_STATS 8
+#define BNXT_NUM_TPA_RING_STATS 4
+#define BNXT_NUM_TPA_RING_STATS_P5 5
+#define BNXT_NUM_TPA_RING_STATS_P5_SR2 6
+
+#define BNXT_RING_STATS_SIZE_P5 \
+ ((BNXT_NUM_RX_RING_STATS + BNXT_NUM_TX_RING_STATS + \
+ BNXT_NUM_TPA_RING_STATS_P5) * 8)
+
+#define BNXT_RING_STATS_SIZE_P5_SR2 \
+ ((BNXT_NUM_RX_RING_STATS + BNXT_NUM_TX_RING_STATS + \
+ BNXT_NUM_TPA_RING_STATS_P5_SR2) * 8)
+
#define BNXT_GET_RING_STATS64(sw, counter) \
(*((sw) + offsetof(struct ctx_hw_stats, counter) / 8))
@@ -2114,6 +2175,7 @@ int bnxt_get_avail_msix(struct bnxt *bp, int num);
int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init);
void bnxt_tx_disable(struct bnxt *bp);
void bnxt_tx_enable(struct bnxt *bp);
+int bnxt_update_link(struct bnxt *bp, bool chng_link_state);
int bnxt_hwrm_set_pause(struct bnxt *);
int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 3a854195d5b0..184b6d0513b2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -17,15 +17,13 @@
#include "bnxt_ethtool.h"
static int
-bnxt_dl_flash_update(struct devlink *dl, const char *filename,
- const char *region, struct netlink_ext_ack *extack)
+bnxt_dl_flash_update(struct devlink *dl,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
int rc;
- if (region)
- return -EOPNOTSUPP;
-
if (!BNXT_PF(bp)) {
NL_SET_ERR_MSG_MOD(extack,
"flash update not supported from a VF");
@@ -33,15 +31,12 @@ bnxt_dl_flash_update(struct devlink *dl, const char *filename,
}
devlink_flash_update_begin_notify(dl);
- devlink_flash_update_status_notify(dl, "Preparing to flash", region, 0,
- 0);
- rc = bnxt_flash_package_from_file(bp->dev, filename, 0);
+ devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0);
+ rc = bnxt_flash_package_from_file(bp->dev, params->file_name, 0);
if (!rc)
- devlink_flash_update_status_notify(dl, "Flashing done", region,
- 0, 0);
+ devlink_flash_update_status_notify(dl, "Flashing done", NULL, 0, 0);
else
- devlink_flash_update_status_notify(dl, "Flashing failed",
- region, 0, 0);
+ devlink_flash_update_status_notify(dl, "Flashing failed", NULL, 0, 0);
devlink_flash_update_end_notify(dl);
return rc;
}
@@ -387,15 +382,41 @@ static int bnxt_hwrm_get_nvm_cfg_ver(struct bnxt *bp,
return rc;
}
+static int bnxt_dl_info_put(struct bnxt *bp, struct devlink_info_req *req,
+ enum bnxt_dl_version_type type, const char *key,
+ char *buf)
+{
+ if (!strlen(buf))
+ return 0;
+
+ if ((bp->flags & BNXT_FLAG_CHIP_P5) &&
+ (!strcmp(key, DEVLINK_INFO_VERSION_GENERIC_FW_NCSI) ||
+ !strcmp(key, DEVLINK_INFO_VERSION_GENERIC_FW_ROCE)))
+ return 0;
+
+ switch (type) {
+ case BNXT_VERSION_FIXED:
+ return devlink_info_version_fixed_put(req, key, buf);
+ case BNXT_VERSION_RUNNING:
+ return devlink_info_version_running_put(req, key, buf);
+ case BNXT_VERSION_STORED:
+ return devlink_info_version_stored_put(req, key, buf);
+ }
+ return 0;
+}
+
+#define HWRM_FW_VER_STR_LEN 16
+
static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
struct netlink_ext_ack *extack)
{
+ struct hwrm_nvm_get_dev_info_output nvm_dev_info;
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
union devlink_param_value nvm_cfg_ver;
struct hwrm_ver_get_output *ver_resp;
char mgmt_ver[FW_VER_STR_LEN];
char roce_ver[FW_VER_STR_LEN];
- char fw_ver[FW_VER_STR_LEN];
+ char ncsi_ver[FW_VER_STR_LEN];
char buf[32];
int rc;
@@ -403,10 +424,11 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
if (rc)
return rc;
- if (strlen(bp->board_partno)) {
- rc = devlink_info_version_fixed_put(req,
- DEVLINK_INFO_VERSION_GENERIC_BOARD_ID,
- bp->board_partno);
+ if (BNXT_PF(bp) && (bp->flags & BNXT_FLAG_DSN_VALID)) {
+ sprintf(buf, "%02X-%02X-%02X-%02X-%02X-%02X-%02X-%02X",
+ bp->dsn[7], bp->dsn[6], bp->dsn[5], bp->dsn[4],
+ bp->dsn[3], bp->dsn[2], bp->dsn[1], bp->dsn[0]);
+ rc = devlink_info_serial_number_put(req, buf);
if (rc)
return rc;
}
@@ -417,54 +439,56 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
return rc;
}
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_FIXED,
+ DEVLINK_INFO_VERSION_GENERIC_BOARD_ID,
+ bp->board_partno);
+ if (rc)
+ return rc;
+
sprintf(buf, "%X", bp->chip_num);
- rc = devlink_info_version_fixed_put(req,
- DEVLINK_INFO_VERSION_GENERIC_ASIC_ID, buf);
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_FIXED,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_ID, buf);
if (rc)
return rc;
ver_resp = &bp->ver_resp;
sprintf(buf, "%X", ver_resp->chip_rev);
- rc = devlink_info_version_fixed_put(req,
- DEVLINK_INFO_VERSION_GENERIC_ASIC_REV, buf);
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_FIXED,
+ DEVLINK_INFO_VERSION_GENERIC_ASIC_REV, buf);
if (rc)
return rc;
- if (BNXT_PF(bp)) {
- sprintf(buf, "%02X-%02X-%02X-%02X-%02X-%02X-%02X-%02X",
- bp->dsn[7], bp->dsn[6], bp->dsn[5], bp->dsn[4],
- bp->dsn[3], bp->dsn[2], bp->dsn[1], bp->dsn[0]);
- rc = devlink_info_serial_number_put(req, buf);
- if (rc)
- return rc;
- }
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
+ bp->nvm_cfg_ver);
+ if (rc)
+ return rc;
- if (strlen(ver_resp->active_pkg_name)) {
- rc =
- devlink_info_version_running_put(req,
- DEVLINK_INFO_VERSION_GENERIC_FW,
- ver_resp->active_pkg_name);
- if (rc)
- return rc;
- }
+ buf[0] = 0;
+ strncat(buf, ver_resp->active_pkg_name, HWRM_FW_VER_STR_LEN);
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW, buf);
+ if (rc)
+ return rc;
if (BNXT_PF(bp) && !bnxt_hwrm_get_nvm_cfg_ver(bp, &nvm_cfg_ver)) {
u32 ver = nvm_cfg_ver.vu32;
sprintf(buf, "%X.%X.%X", (ver >> 16) & 0xF, (ver >> 8) & 0xF,
ver & 0xF);
- rc = devlink_info_version_running_put(req,
- DEVLINK_INFO_VERSION_GENERIC_FW_PSID, buf);
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW_PSID,
+ buf);
if (rc)
return rc;
}
if (ver_resp->flags & VER_GET_RESP_FLAGS_EXT_VER_AVAIL) {
- snprintf(fw_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
ver_resp->hwrm_fw_major, ver_resp->hwrm_fw_minor,
ver_resp->hwrm_fw_build, ver_resp->hwrm_fw_patch);
- snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ snprintf(ncsi_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
ver_resp->mgmt_fw_major, ver_resp->mgmt_fw_minor,
ver_resp->mgmt_fw_build, ver_resp->mgmt_fw_patch);
@@ -472,11 +496,11 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
ver_resp->roce_fw_major, ver_resp->roce_fw_minor,
ver_resp->roce_fw_build, ver_resp->roce_fw_patch);
} else {
- snprintf(fw_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
ver_resp->hwrm_fw_maj_8b, ver_resp->hwrm_fw_min_8b,
ver_resp->hwrm_fw_bld_8b, ver_resp->hwrm_fw_rsvd_8b);
- snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ snprintf(ncsi_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
ver_resp->mgmt_fw_maj_8b, ver_resp->mgmt_fw_min_8b,
ver_resp->mgmt_fw_bld_8b, ver_resp->mgmt_fw_rsvd_8b);
@@ -484,29 +508,60 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
ver_resp->roce_fw_maj_8b, ver_resp->roce_fw_min_8b,
ver_resp->roce_fw_bld_8b, ver_resp->roce_fw_rsvd_8b);
}
- rc = devlink_info_version_running_put(req,
- DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, fw_ver);
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, mgmt_ver);
if (rc)
return rc;
- rc = devlink_info_version_running_put(req,
- DEVLINK_INFO_VERSION_GENERIC_FW_MGMT_API,
- bp->hwrm_ver_supp);
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT_API,
+ bp->hwrm_ver_supp);
if (rc)
return rc;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5)) {
- rc = devlink_info_version_running_put(req,
- DEVLINK_INFO_VERSION_GENERIC_FW_NCSI, mgmt_ver);
- if (rc)
- return rc;
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_NCSI, ncsi_ver);
+ if (rc)
+ return rc;
- rc = devlink_info_version_running_put(req,
- DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver);
- if (rc)
- return rc;
- }
- return 0;
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_RUNNING,
+ DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver);
+ if (rc)
+ return rc;
+
+ rc = bnxt_hwrm_nvm_get_dev_info(bp, &nvm_dev_info);
+ if (rc ||
+ !(nvm_dev_info.flags & NVM_GET_DEV_INFO_RESP_FLAGS_FW_VER_VALID))
+ return 0;
+
+ buf[0] = 0;
+ strncat(buf, nvm_dev_info.pkg_name, HWRM_FW_VER_STR_LEN);
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW, buf);
+ if (rc)
+ return rc;
+
+ snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ nvm_dev_info.hwrm_fw_major, nvm_dev_info.hwrm_fw_minor,
+ nvm_dev_info.hwrm_fw_build, nvm_dev_info.hwrm_fw_patch);
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, mgmt_ver);
+ if (rc)
+ return rc;
+
+ snprintf(ncsi_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ nvm_dev_info.mgmt_fw_major, nvm_dev_info.mgmt_fw_minor,
+ nvm_dev_info.mgmt_fw_build, nvm_dev_info.mgmt_fw_patch);
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW_NCSI, ncsi_ver);
+ if (rc)
+ return rc;
+
+ snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
+ nvm_dev_info.roce_fw_major, nvm_dev_info.roce_fw_minor,
+ nvm_dev_info.roce_fw_build, nvm_dev_info.roce_fw_patch);
+ return bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver);
}
static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index d5c8bd49383a..d22cab5d6856 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -60,6 +60,12 @@ struct bnxt_dl_nvm_param {
u8 dl_num_bytes;
};
+enum bnxt_dl_version_type {
+ BNXT_VERSION_FIXED,
+ BNXT_VERSION_RUNNING,
+ BNXT_VERSION_STORED,
+};
+
void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event);
void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy);
void bnxt_dl_health_recovery_done(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index fecdfd875af1..53687bc7fcf5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -11,6 +11,7 @@
#include <linux/ctype.h>
#include <linux/stringify.h>
#include <linux/ethtool.h>
+#include <linux/linkmode.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/etherdevice.h>
@@ -172,10 +173,12 @@ static const char * const bnxt_ring_tpa2_stats_str[] = {
"rx_tpa_pkt",
"rx_tpa_bytes",
"rx_tpa_errors",
+ "rx_tpa_events",
};
static const char * const bnxt_rx_sw_stats_str[] = {
"rx_l4_csum_errors",
+ "rx_resets",
"rx_buf_errors",
};
@@ -462,9 +465,12 @@ static const struct {
static int bnxt_get_num_tpa_ring_stats(struct bnxt *bp)
{
if (BNXT_SUPPORTS_TPA(bp)) {
- if (bp->max_tpa_v2)
- return ARRAY_SIZE(bnxt_ring_tpa2_stats_str);
- return ARRAY_SIZE(bnxt_ring_tpa_stats_str);
+ if (bp->max_tpa_v2) {
+ if (BNXT_CHIP_P5_THOR(bp))
+ return BNXT_NUM_TPA_RING_STATS_P5;
+ return BNXT_NUM_TPA_RING_STATS_P5_SR2;
+ }
+ return BNXT_NUM_TPA_RING_STATS;
}
return 0;
}
@@ -796,7 +802,7 @@ static void bnxt_get_channels(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
int max_rx_rings, max_tx_rings, tcs;
- int max_tx_sch_inputs;
+ int max_tx_sch_inputs, tx_grps;
/* Get the most up-to-date max_tx_sch_inputs. */
if (netif_running(dev) && BNXT_NEW_RM(bp))
@@ -806,6 +812,12 @@ static void bnxt_get_channels(struct net_device *dev,
bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
if (max_tx_sch_inputs)
max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
+
+ tcs = netdev_get_num_tc(dev);
+ tx_grps = max(tcs, 1);
+ if (bp->tx_nr_rings_xdp)
+ tx_grps++;
+ max_tx_rings /= tx_grps;
channel->max_combined = min_t(int, max_rx_rings, max_tx_rings);
if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
@@ -815,7 +827,6 @@ static void bnxt_get_channels(struct net_device *dev,
if (max_tx_sch_inputs)
max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
- tcs = netdev_get_num_tc(dev);
if (tcs > 1)
max_tx_rings /= tcs;
@@ -1503,6 +1514,53 @@ u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
(fw_speeds) |= BNXT_LINK_SPEED_MSK_100GB; \
}
+#define BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, name) \
+{ \
+ if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_50GB) \
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+ 50000baseCR_Full); \
+ if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_100GB) \
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+ 100000baseCR2_Full);\
+ if ((fw_speeds) & BNXT_LINK_PAM4_SPEED_MSK_200GB) \
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+ 200000baseCR4_Full);\
+}
+
+#define BNXT_ETHTOOL_TO_FW_PAM4_SPDS(fw_speeds, lk_ksettings, name) \
+{ \
+ if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
+ 50000baseCR_Full)) \
+ (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_50GB; \
+ if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
+ 100000baseCR2_Full)) \
+ (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_100GB; \
+ if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
+ 200000baseCR4_Full)) \
+ (fw_speeds) |= BNXT_LINK_PAM4_SPEED_MSK_200GB; \
+}
+
+static void bnxt_fw_to_ethtool_advertised_fec(struct bnxt_link_info *link_info,
+ struct ethtool_link_ksettings *lk_ksettings)
+{
+ u16 fec_cfg = link_info->fec_cfg;
+
+ if ((fec_cfg & BNXT_FEC_NONE) || !(fec_cfg & BNXT_FEC_AUTONEG)) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
+ lk_ksettings->link_modes.advertising);
+ return;
+ }
+ if (fec_cfg & BNXT_FEC_ENC_BASE_R)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ lk_ksettings->link_modes.advertising);
+ if (fec_cfg & BNXT_FEC_ENC_RS)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ lk_ksettings->link_modes.advertising);
+ if (fec_cfg & BNXT_FEC_ENC_LLRS)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
+ lk_ksettings->link_modes.advertising);
+}
+
static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info,
struct ethtool_link_ksettings *lk_ksettings)
{
@@ -1513,6 +1571,9 @@ static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info,
fw_pause = link_info->auto_pause_setting;
BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, advertising);
+ fw_speeds = link_info->advertising_pam4;
+ BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, advertising);
+ bnxt_fw_to_ethtool_advertised_fec(link_info, lk_ksettings);
}
static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info,
@@ -1526,6 +1587,29 @@ static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info,
BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings,
lp_advertising);
+ fw_speeds = link_info->lp_auto_pam4_link_speeds;
+ BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, lp_advertising);
+}
+
+static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info *link_info,
+ struct ethtool_link_ksettings *lk_ksettings)
+{
+ u16 fec_cfg = link_info->fec_cfg;
+
+ if (fec_cfg & BNXT_FEC_NONE) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
+ lk_ksettings->link_modes.supported);
+ return;
+ }
+ if (fec_cfg & BNXT_FEC_ENC_BASE_R_CAP)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ lk_ksettings->link_modes.supported);
+ if (fec_cfg & BNXT_FEC_ENC_RS_CAP)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ lk_ksettings->link_modes.supported);
+ if (fec_cfg & BNXT_FEC_ENC_LLRS_CAP)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
+ lk_ksettings->link_modes.supported);
}
static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info,
@@ -1534,14 +1618,18 @@ static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info,
u16 fw_speeds = link_info->support_speeds;
BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported);
+ fw_speeds = link_info->support_pam4_speeds;
+ BNXT_FW_TO_ETHTOOL_PAM4_SPDS(fw_speeds, lk_ksettings, supported);
ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, Pause);
ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
Asym_Pause);
- if (link_info->support_auto_speeds)
+ if (link_info->support_auto_speeds ||
+ link_info->support_pam4_auto_speeds)
ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
Autoneg);
+ bnxt_fw_to_ethtool_support_fec(link_info, lk_ksettings);
}
u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
@@ -1632,55 +1720,86 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
return 0;
}
-static u32 bnxt_get_fw_speed(struct net_device *dev, u32 ethtool_speed)
+static int bnxt_force_link_speed(struct net_device *dev, u32 ethtool_speed)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info = &bp->link_info;
+ u16 support_pam4_spds = link_info->support_pam4_speeds;
u16 support_spds = link_info->support_speeds;
- u32 fw_speed = 0;
+ u8 sig_mode = BNXT_SIG_MODE_NRZ;
+ u16 fw_speed = 0;
switch (ethtool_speed) {
case SPEED_100:
if (support_spds & BNXT_LINK_SPEED_MSK_100MB)
- fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB;
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB;
break;
case SPEED_1000:
if (support_spds & BNXT_LINK_SPEED_MSK_1GB)
- fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB;
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
break;
case SPEED_2500:
if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB)
- fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB;
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB;
break;
case SPEED_10000:
if (support_spds & BNXT_LINK_SPEED_MSK_10GB)
- fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB;
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
break;
case SPEED_20000:
if (support_spds & BNXT_LINK_SPEED_MSK_20GB)
- fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB;
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB;
break;
case SPEED_25000:
if (support_spds & BNXT_LINK_SPEED_MSK_25GB)
- fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB;
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
break;
case SPEED_40000:
if (support_spds & BNXT_LINK_SPEED_MSK_40GB)
- fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB;
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
break;
case SPEED_50000:
- if (support_spds & BNXT_LINK_SPEED_MSK_50GB)
- fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB;
+ if (support_spds & BNXT_LINK_SPEED_MSK_50GB) {
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
+ } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_50GB) {
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB;
+ sig_mode = BNXT_SIG_MODE_PAM4;
+ }
break;
case SPEED_100000:
- if (support_spds & BNXT_LINK_SPEED_MSK_100GB)
- fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB;
+ if (support_spds & BNXT_LINK_SPEED_MSK_100GB) {
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB;
+ } else if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_100GB) {
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB;
+ sig_mode = BNXT_SIG_MODE_PAM4;
+ }
break;
- default:
- netdev_err(dev, "unsupported speed!\n");
+ case SPEED_200000:
+ if (support_pam4_spds & BNXT_LINK_PAM4_SPEED_MSK_200GB) {
+ fw_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB;
+ sig_mode = BNXT_SIG_MODE_PAM4;
+ }
break;
}
- return fw_speed;
+
+ if (!fw_speed) {
+ netdev_err(dev, "unsupported speed!\n");
+ return -EINVAL;
+ }
+
+ if (link_info->req_link_speed == fw_speed &&
+ link_info->req_signal_mode == sig_mode &&
+ link_info->autoneg == 0)
+ return -EALREADY;
+
+ link_info->req_link_speed = fw_speed;
+ link_info->req_signal_mode = sig_mode;
+ link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
+ link_info->autoneg = 0;
+ link_info->advertising = 0;
+ link_info->advertising_pam4 = 0;
+
+ return 0;
}
u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
@@ -1712,7 +1831,6 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
struct bnxt_link_info *link_info = &bp->link_info;
const struct ethtool_link_settings *base = &lk_ksettings->base;
bool set_pause = false;
- u16 fw_advertising = 0;
u32 speed;
int rc = 0;
@@ -1721,19 +1839,23 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
mutex_lock(&bp->link_lock);
if (base->autoneg == AUTONEG_ENABLE) {
- BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings,
+ link_info->advertising = 0;
+ link_info->advertising_pam4 = 0;
+ BNXT_ETHTOOL_TO_FW_SPDS(link_info->advertising, lk_ksettings,
advertising);
+ BNXT_ETHTOOL_TO_FW_PAM4_SPDS(link_info->advertising_pam4,
+ lk_ksettings, advertising);
link_info->autoneg |= BNXT_AUTONEG_SPEED;
- if (!fw_advertising)
+ if (!link_info->advertising && !link_info->advertising_pam4) {
link_info->advertising = link_info->support_auto_speeds;
- else
- link_info->advertising = fw_advertising;
+ link_info->advertising_pam4 =
+ link_info->support_pam4_auto_speeds;
+ }
/* any change to autoneg will cause link change, therefore the
* driver should put back the original pause setting in autoneg
*/
set_pause = true;
} else {
- u16 fw_speed;
u8 phy_type = link_info->phy_type;
if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET ||
@@ -1749,15 +1871,12 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
goto set_setting_exit;
}
speed = base->speed;
- fw_speed = bnxt_get_fw_speed(dev, speed);
- if (!fw_speed) {
- rc = -EINVAL;
+ rc = bnxt_force_link_speed(dev, speed);
+ if (rc) {
+ if (rc == -EALREADY)
+ rc = 0;
goto set_setting_exit;
}
- link_info->req_link_speed = fw_speed;
- link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
- link_info->autoneg = 0;
- link_info->advertising = 0;
}
if (netif_running(dev))
@@ -1768,6 +1887,110 @@ set_setting_exit:
return rc;
}
+static int bnxt_get_fecparam(struct net_device *dev,
+ struct ethtool_fecparam *fec)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_link_info *link_info;
+ u8 active_fec;
+ u16 fec_cfg;
+
+ link_info = &bp->link_info;
+ fec_cfg = link_info->fec_cfg;
+ active_fec = link_info->active_fec_sig_mode &
+ PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK;
+ if (fec_cfg & BNXT_FEC_NONE) {
+ fec->fec = ETHTOOL_FEC_NONE;
+ fec->active_fec = ETHTOOL_FEC_NONE;
+ return 0;
+ }
+ if (fec_cfg & BNXT_FEC_AUTONEG)
+ fec->fec |= ETHTOOL_FEC_AUTO;
+ if (fec_cfg & BNXT_FEC_ENC_BASE_R)
+ fec->fec |= ETHTOOL_FEC_BASER;
+ if (fec_cfg & BNXT_FEC_ENC_RS)
+ fec->fec |= ETHTOOL_FEC_RS;
+ if (fec_cfg & BNXT_FEC_ENC_LLRS)
+ fec->fec |= ETHTOOL_FEC_LLRS;
+
+ switch (active_fec) {
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE:
+ fec->active_fec |= ETHTOOL_FEC_BASER;
+ break;
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE:
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE:
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE:
+ fec->active_fec |= ETHTOOL_FEC_RS;
+ break;
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE:
+ case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE:
+ fec->active_fec |= ETHTOOL_FEC_LLRS;
+ break;
+ }
+ return 0;
+}
+
+static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info,
+ u32 fec)
+{
+ u32 fw_fec = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE;
+
+ if (fec & ETHTOOL_FEC_BASER)
+ fw_fec |= BNXT_FEC_BASE_R_ON(link_info);
+ else if (fec & ETHTOOL_FEC_RS)
+ fw_fec |= BNXT_FEC_RS_ON(link_info);
+ else if (fec & ETHTOOL_FEC_LLRS)
+ fw_fec |= BNXT_FEC_LLRS_ON;
+ return fw_fec;
+}
+
+static int bnxt_set_fecparam(struct net_device *dev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct hwrm_port_phy_cfg_input req = {0};
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_link_info *link_info;
+ u32 new_cfg, fec = fecparam->fec;
+ u16 fec_cfg;
+ int rc;
+
+ link_info = &bp->link_info;
+ fec_cfg = link_info->fec_cfg;
+ if (fec_cfg & BNXT_FEC_NONE)
+ return -EOPNOTSUPP;
+
+ if (fec & ETHTOOL_FEC_OFF) {
+ new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE |
+ BNXT_FEC_ALL_OFF(link_info);
+ goto apply_fec;
+ }
+ if (((fec & ETHTOOL_FEC_AUTO) && !(fec_cfg & BNXT_FEC_AUTONEG_CAP)) ||
+ ((fec & ETHTOOL_FEC_RS) && !(fec_cfg & BNXT_FEC_ENC_RS_CAP)) ||
+ ((fec & ETHTOOL_FEC_LLRS) && !(fec_cfg & BNXT_FEC_ENC_LLRS_CAP)) ||
+ ((fec & ETHTOOL_FEC_BASER) && !(fec_cfg & BNXT_FEC_ENC_BASE_R_CAP)))
+ return -EINVAL;
+
+ if (fec & ETHTOOL_FEC_AUTO) {
+ if (!link_info->autoneg)
+ return -EINVAL;
+ new_cfg = PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE;
+ } else {
+ new_cfg = bnxt_ethtool_forced_fec_to_fw(link_info, fec);
+ }
+
+apply_fec:
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
+ req.flags = cpu_to_le32(new_cfg | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ /* update current settings */
+ if (!rc) {
+ mutex_lock(&bp->link_lock);
+ bnxt_update_link(bp, false);
+ mutex_unlock(&bp->link_lock);
+ }
+ return rc;
+}
+
static void bnxt_get_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *epause)
{
@@ -1781,6 +2004,22 @@ static void bnxt_get_pauseparam(struct net_device *dev,
epause->tx_pause = !!(link_info->req_flow_ctrl & BNXT_LINK_PAUSE_TX);
}
+static void bnxt_get_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *epstat)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u64 *rx, *tx;
+
+ if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_PORT_STATS))
+ return;
+
+ rx = bp->port_stats.sw_stats;
+ tx = bp->port_stats.sw_stats + BNXT_TX_PORT_STATS_BYTE_OFFSET / 8;
+
+ epstat->rx_pause_frames = BNXT_GET_RX_PORT_STATS64(rx, rx_pause_frames);
+ epstat->tx_pause_frames = BNXT_GET_TX_PORT_STATS64(tx, tx_pause_frames);
+}
+
static int bnxt_set_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *epause)
{
@@ -1833,6 +2072,22 @@ static u32 bnxt_get_link(struct net_device *dev)
return bp->link_info.link_up;
}
+int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
+ struct hwrm_nvm_get_dev_info_output *nvm_dev_info)
+{
+ struct hwrm_nvm_get_dev_info_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_nvm_get_dev_info_input req = {0};
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DEV_INFO, -1, -1);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc)
+ memcpy(nvm_dev_info, resp, sizeof(*resp));
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
static void bnxt_print_admin_err(struct bnxt *bp)
{
netdev_info(bp->dev, "PF does not have admin privileges to flash or reset the device\n");
@@ -3059,7 +3314,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
u8 test_mask = 0;
int rc = 0, i;
- if (!bp->num_tests || !BNXT_SINGLE_PF(bp))
+ if (!bp->num_tests || !BNXT_PF(bp))
return;
memset(buf, 0, sizeof(u64) * bp->num_tests);
if (!netif_running(dev)) {
@@ -3072,9 +3327,9 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
do_ext_lpbk = true;
if (etest->flags & ETH_TEST_FL_OFFLINE) {
- if (bp->pf.active_vfs) {
+ if (bp->pf.active_vfs || !BNXT_SINGLE_PF(bp)) {
etest->flags |= ETH_TEST_FL_FAILED;
- netdev_warn(dev, "Offline tests cannot be run with active VFs\n");
+ netdev_warn(dev, "Offline tests cannot be run with active VFs or on shared PF\n");
return;
}
offline = true;
@@ -3590,7 +3845,7 @@ void bnxt_ethtool_init(struct bnxt *bp)
bnxt_get_pkgver(dev);
bp->num_tests = 0;
- if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp))
+ if (bp->hwrm_spec_code < 0x10704 || !BNXT_PF(bp))
return;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_QLIST, -1, -1);
@@ -3657,6 +3912,9 @@ const struct ethtool_ops bnxt_ethtool_ops = {
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_link_ksettings = bnxt_get_link_ksettings,
.set_link_ksettings = bnxt_set_link_ksettings,
+ .get_fecparam = bnxt_get_fecparam,
+ .set_fecparam = bnxt_set_fecparam,
+ .get_pause_stats = bnxt_get_pause_stats,
.get_pauseparam = bnxt_get_pauseparam,
.set_pauseparam = bnxt_set_pauseparam,
.get_drvinfo = bnxt_get_drvinfo,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index 34f44ddfad79..fa6fbde52bea 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -92,6 +92,8 @@ u32 bnxt_get_rxfh_indir_size(struct net_device *dev);
u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8);
u32 bnxt_fw_to_ethtool_speed(u16);
u16 bnxt_get_fw_auto_link_speeds(u32);
+int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
+ struct hwrm_nvm_get_dev_info_output *nvm_dev_info);
int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
u32 install_type);
void bnxt_ethtool_init(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index c4af6bf15e36..2d3e962bdac3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -213,7 +213,10 @@ struct cmd_nums {
#define HWRM_PORT_PHY_MDIO_BUS_ACQUIRE 0xb7UL
#define HWRM_PORT_PHY_MDIO_BUS_RELEASE 0xb8UL
#define HWRM_PORT_QSTATS_EXT_PFC_WD 0xb9UL
- #define HWRM_PORT_ECN_QSTATS 0xbaUL
+ #define HWRM_RESERVED7 0xbaUL
+ #define HWRM_PORT_TX_FIR_CFG 0xbbUL
+ #define HWRM_PORT_TX_FIR_QCFG 0xbcUL
+ #define HWRM_PORT_ECN_QSTATS 0xbdUL
#define HWRM_FW_RESET 0xc0UL
#define HWRM_FW_QSTATUS 0xc1UL
#define HWRM_FW_HEALTH_CHECK 0xc2UL
@@ -370,6 +373,9 @@ struct cmd_nums {
#define HWRM_TF_SESSION_RESC_FLUSH 0x2cfUL
#define HWRM_TF_TBL_TYPE_GET 0x2daUL
#define HWRM_TF_TBL_TYPE_SET 0x2dbUL
+ #define HWRM_TF_TBL_TYPE_BULK_GET 0x2dcUL
+ #define HWRM_TF_CTXT_MEM_ALLOC 0x2e2UL
+ #define HWRM_TF_CTXT_MEM_FREE 0x2e3UL
#define HWRM_TF_CTXT_MEM_RGTR 0x2e4UL
#define HWRM_TF_CTXT_MEM_UNRGTR 0x2e5UL
#define HWRM_TF_EXT_EM_QCAPS 0x2e6UL
@@ -384,6 +390,8 @@ struct cmd_nums {
#define HWRM_TF_TCAM_FREE 0x2fbUL
#define HWRM_TF_GLOBAL_CFG_SET 0x2fcUL
#define HWRM_TF_GLOBAL_CFG_GET 0x2fdUL
+ #define HWRM_TF_IF_TBL_SET 0x2feUL
+ #define HWRM_TF_IF_TBL_GET 0x2ffUL
#define HWRM_SV 0x400UL
#define HWRM_DBG_READ_DIRECT 0xff10UL
#define HWRM_DBG_READ_INDIRECT 0xff11UL
@@ -447,6 +455,7 @@ struct ret_codes {
#define HWRM_ERR_CODE_KEY_ALREADY_EXISTS 0xeUL
#define HWRM_ERR_CODE_HWRM_ERROR 0xfUL
#define HWRM_ERR_CODE_BUSY 0x10UL
+ #define HWRM_ERR_CODE_RESOURCE_LOCKED 0x11UL
#define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL
#define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL
#define HWRM_ERR_CODE_CMD_NOT_SUPPORTED 0xffffUL
@@ -478,8 +487,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 1
-#define HWRM_VERSION_RSVD 54
-#define HWRM_VERSION_STR "1.10.1.54"
+#define HWRM_VERSION_RSVD 68
+#define HWRM_VERSION_STR "1.10.1.68"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -675,6 +684,7 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL
#define ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY 0x8UL
#define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY 0x9UL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG 0xaUL
#define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL
#define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL
@@ -851,6 +861,32 @@ struct hwrm_async_event_cmpl_error_recovery {
#define ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED 0x2UL
};
+/* hwrm_async_event_cmpl_ring_monitor_msg (size:128b/16B) */
+struct hwrm_async_event_cmpl_ring_monitor_msg {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_RING_MONITOR_MSG 0xaUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_ID_RING_MONITOR_MSG
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_TX 0x0UL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_RX 0x1UL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_CMPL 0x2UL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_LAST ASYNC_EVENT_CMPL_RING_MONITOR_MSG_EVENT_DATA2_DISABLE_RING_TYPE_CMPL
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_V 0x1UL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_RING_MONITOR_MSG_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
/* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */
struct hwrm_async_event_cmpl_vf_cfg_change {
__le16 type;
@@ -975,6 +1011,28 @@ struct hwrm_async_event_cmpl_eem_cache_flush_done {
#define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_SFT 0
};
+/* hwrm_async_event_cmpl_deferred_response (size:128b/16B) */
+struct hwrm_async_event_cmpl_deferred_response {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_LAST ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_DEFERRED_RESPONSE 0x40UL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_LAST ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_ID_DEFERRED_RESPONSE
+ __le32 event_data2;
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_DATA2_SEQ_ID_MASK 0xffffUL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_EVENT_DATA2_SEQ_ID_SFT 0
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_V 0x1UL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_DEFERRED_RESPONSE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+};
+
/* hwrm_func_reset_input (size:192b/24B) */
struct hwrm_func_reset_input {
__le16 req_type;
@@ -1214,7 +1272,13 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_EXT_SCHQ_SUPPORTED 0x40UL
#define FUNC_QCAPS_RESP_FLAGS_EXT_PPP_PUSH_MODE_SUPPORTED 0x80UL
u8 max_schqs;
- u8 unused_1[2];
+ u8 mpc_chnls_cap;
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TCE 0x1UL
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RCE 0x2UL
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TE_CFA 0x4UL
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_RE_CFA 0x8UL
+ #define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_PRIMATE 0x10UL
+ u8 unused_1;
u8 valid;
};
@@ -1250,6 +1314,7 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_FLAGS_PREBOOT_LEGACY_L2_RINGS 0x100UL
#define FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED 0x200UL
#define FUNC_QCFG_RESP_FLAGS_PPP_PUSH_MODE_ENABLED 0x400UL
+ #define FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED 0x800UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
@@ -1341,7 +1406,13 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_SVIF_INFO_SVIF_MASK 0x7fffUL
#define FUNC_QCFG_RESP_SVIF_INFO_SVIF_SFT 0
#define FUNC_QCFG_RESP_SVIF_INFO_SVIF_VALID 0x8000UL
- u8 unused_2[7];
+ u8 mpc_chnls;
+ #define FUNC_QCFG_RESP_MPC_CHNLS_TCE_ENABLED 0x1UL
+ #define FUNC_QCFG_RESP_MPC_CHNLS_RCE_ENABLED 0x2UL
+ #define FUNC_QCFG_RESP_MPC_CHNLS_TE_CFA_ENABLED 0x4UL
+ #define FUNC_QCFG_RESP_MPC_CHNLS_RE_CFA_ENABLED 0x8UL
+ #define FUNC_QCFG_RESP_MPC_CHNLS_PRIMATE_ENABLED 0x10UL
+ u8 unused_2[6];
u8 valid;
};
@@ -1405,6 +1476,7 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_ENABLES_ADMIN_LINK_STATE 0x400000UL
#define FUNC_CFG_REQ_ENABLES_HOT_RESET_IF_SUPPORT 0x800000UL
#define FUNC_CFG_REQ_ENABLES_SCHQ_ID 0x1000000UL
+ #define FUNC_CFG_REQ_ENABLES_MPC_CHNLS 0x2000000UL
__le16 mtu;
__le16 mru;
__le16 num_rsscos_ctxs;
@@ -1479,7 +1551,18 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_OPTIONS_RSVD_SFT 4
__le16 num_mcast_filters;
__le16 schq_id;
- u8 unused_0[6];
+ __le16 mpc_chnls;
+ #define FUNC_CFG_REQ_MPC_CHNLS_TCE_ENABLE 0x1UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_TCE_DISABLE 0x2UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_RCE_ENABLE 0x4UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_RCE_DISABLE 0x8UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_TE_CFA_ENABLE 0x10UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_TE_CFA_DISABLE 0x20UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_RE_CFA_ENABLE 0x40UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_RE_CFA_DISABLE 0x80UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_PRIMATE_ENABLE 0x100UL
+ #define FUNC_CFG_REQ_MPC_CHNLS_PRIMATE_DISABLE 0x200UL
+ u8 unused_0[4];
};
/* hwrm_func_cfg_output (size:128b/16B) */
@@ -1559,7 +1642,7 @@ struct hwrm_func_qstats_ext_input {
u8 unused_1[4];
};
-/* hwrm_func_qstats_ext_output (size:1472b/184B) */
+/* hwrm_func_qstats_ext_output (size:1536b/192B) */
struct hwrm_func_qstats_ext_output {
__le16 error_code;
__le16 req_type;
@@ -1586,6 +1669,7 @@ struct hwrm_func_qstats_ext_output {
__le64 rx_tpa_pkt;
__le64 rx_tpa_bytes;
__le64 rx_tpa_errors;
+ __le64 rx_tpa_events;
u8 unused_0[7];
u8 valid;
};
@@ -2412,25 +2496,29 @@ struct hwrm_port_phy_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
- #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL
- #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
- #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
- #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_ENABLE 0x8000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_DISABLE 0x10000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_2XN_ENABLE 0x20000UL
- #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_2XN_DISABLE 0x40000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_RESET_PHY 0x1UL
+ #define PORT_PHY_CFG_REQ_FLAGS_DEPRECATED 0x2UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
+ #define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_ENABLE 0x8000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_DISABLE 0x10000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_ENABLE 0x20000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_DISABLE 0x40000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_ENABLE 0x80000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_DISABLE 0x100000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_ENABLE 0x200000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_DISABLE 0x400000UL
__le32 enables;
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
@@ -2573,7 +2661,7 @@ struct hwrm_port_phy_qcfg_input {
u8 unused_0[6];
};
-/* hwrm_port_phy_qcfg_output (size:832b/104B) */
+/* hwrm_port_phy_qcfg_output (size:768b/96B) */
struct hwrm_port_phy_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -2584,10 +2672,22 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL
#define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL
#define PORT_PHY_QCFG_RESP_LINK_LAST PORT_PHY_QCFG_RESP_LINK_LINK
- u8 link_signal_mode;
- #define PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_NRZ 0x0UL
- #define PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_PAM4 0x1UL
- #define PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_LAST PORT_PHY_QCFG_RESP_LINK_SIGNAL_MODE_PAM4
+ u8 active_fec_signal_mode;
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK 0xfUL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_SFT 0
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ 0x0UL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4 0x1UL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_LAST PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK 0xf0UL
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_SFT 4
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE (0x0UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE (0x1UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE (0x2UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE (0x3UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE (0x4UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE (0x5UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE (0x6UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_LAST PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE
__le16 link_speed;
#define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL
@@ -2809,21 +2909,21 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24)
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28
__le16 fec_cfg;
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_SUPPORTED 0x80UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ENABLED 0x100UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_2XN_SUPPORTED 0x200UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_2XN_ENABLED 0x400UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ACTIVE 0x800UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ACTIVE 0x1000UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ACTIVE 0x2000UL
- #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_2XN_ACTIVE 0x4000UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_SUPPORTED 0x80UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ENABLED 0x100UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_IEEE_SUPPORTED 0x200UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_IEEE_ENABLED 0x400UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_SUPPORTED 0x800UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_ENABLED 0x1000UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_SUPPORTED 0x2000UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_ENABLED 0x4000UL
u8 duplex_state;
#define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL
#define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL
@@ -2845,11 +2945,10 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_50G 0x1UL
#define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_100G 0x2UL
#define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_200G 0x4UL
- __le16 link_partner_pam4_adv_speeds;
+ u8 link_partner_pam4_adv_speeds;
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_50GB 0x1UL
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_100GB 0x2UL
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_200GB 0x4UL
- u8 unused_0[7];
u8 valid;
};
@@ -3293,6 +3392,47 @@ struct hwrm_port_lpbk_qstats_output {
u8 valid;
};
+/* hwrm_port_ecn_qstats_input (size:256b/32B) */
+struct hwrm_port_ecn_qstats_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 ecn_stat_buf_size;
+ u8 flags;
+ #define PORT_ECN_QSTATS_REQ_FLAGS_UNUSED 0x0UL
+ #define PORT_ECN_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ #define PORT_ECN_QSTATS_REQ_FLAGS_LAST PORT_ECN_QSTATS_REQ_FLAGS_COUNTER_MASK
+ u8 unused_0[3];
+ __le64 ecn_stat_host_addr;
+};
+
+/* hwrm_port_ecn_qstats_output (size:128b/16B) */
+struct hwrm_port_ecn_qstats_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 ecn_stat_buf_size;
+ u8 mark_en;
+ u8 unused_0[4];
+ u8 valid;
+};
+
+/* port_stats_ecn (size:512b/64B) */
+struct port_stats_ecn {
+ __le64 mark_cnt_cos0;
+ __le64 mark_cnt_cos1;
+ __le64 mark_cnt_cos2;
+ __le64 mark_cnt_cos3;
+ __le64 mark_cnt_cos4;
+ __le64 mark_cnt_cos5;
+ __le64 mark_cnt_cos6;
+ __le64 mark_cnt_cos7;
+};
+
/* hwrm_port_clr_stats_input (size:192b/24B) */
struct hwrm_port_clr_stats_input {
__le16 req_type;
@@ -3387,8 +3527,9 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_FLAGS_AUTONEG_LPBK_SUPPORTED 0x4UL
#define PORT_PHY_QCAPS_RESP_FLAGS_SHARED_PHY_CFG_SUPPORTED 0x8UL
#define PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET 0x10UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xe0UL
- #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 5
+ #define PORT_PHY_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED 0x20UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xc0UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 6
u8 port_cnt;
#define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
#define PORT_PHY_QCAPS_RESP_PORT_CNT_1 0x1UL
@@ -5365,6 +5506,7 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID 0x80UL
#define RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID 0x100UL
#define RING_ALLOC_REQ_ENABLES_SCHQ_ID 0x200UL
+ #define RING_ALLOC_REQ_ENABLES_MPC_CHNLS_TYPE 0x400UL
u8 ring_type;
#define RING_ALLOC_REQ_RING_TYPE_L2_CMPL 0x0UL
#define RING_ALLOC_REQ_RING_TYPE_TX 0x1UL
@@ -5424,7 +5566,14 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_INT_MODE_MSIX 0x2UL
#define RING_ALLOC_REQ_INT_MODE_POLL 0x3UL
#define RING_ALLOC_REQ_INT_MODE_LAST RING_ALLOC_REQ_INT_MODE_POLL
- u8 unused_4[3];
+ u8 mpc_chnls_type;
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_TCE 0x0UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_RCE 0x1UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_TE_CFA 0x2UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_RE_CFA 0x3UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE 0x4UL
+ #define RING_ALLOC_REQ_MPC_CHNLS_TYPE_LAST RING_ALLOC_REQ_MPC_CHNLS_TYPE_PRIMATE
+ u8 unused_4[2];
__le64 cq_handle;
};
@@ -6661,7 +6810,7 @@ struct hwrm_cfa_vfr_alloc_output {
u8 valid;
};
-/* hwrm_cfa_vfr_free_input (size:384b/48B) */
+/* hwrm_cfa_vfr_free_input (size:448b/56B) */
struct hwrm_cfa_vfr_free_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -6669,6 +6818,9 @@ struct hwrm_cfa_vfr_free_input {
__le16 target_id;
__le64 resp_addr;
char vfr_name[32];
+ __le16 vf_id;
+ __le16 reserved;
+ u8 unused_0[4];
};
/* hwrm_cfa_vfr_free_output (size:128b/16B) */
@@ -6970,7 +7122,7 @@ struct ctx_hw_stats {
__le64 tpa_aborts;
};
-/* ctx_hw_stats_ext (size:1344b/168B) */
+/* ctx_hw_stats_ext (size:1408b/176B) */
struct ctx_hw_stats_ext {
__le64 rx_ucast_pkts;
__le64 rx_mcast_pkts;
@@ -6993,6 +7145,7 @@ struct ctx_hw_stats_ext {
__le64 rx_tpa_pkt;
__le64 rx_tpa_bytes;
__le64 rx_tpa_errors;
+ __le64 rx_tpa_events;
};
/* hwrm_stat_ctx_alloc_input (size:256b/32B) */
@@ -7065,16 +7218,16 @@ struct hwrm_stat_ctx_query_output {
__le64 tx_ucast_pkts;
__le64 tx_mcast_pkts;
__le64 tx_bcast_pkts;
- __le64 tx_err_pkts;
- __le64 tx_drop_pkts;
+ __le64 tx_discard_pkts;
+ __le64 tx_error_pkts;
__le64 tx_ucast_bytes;
__le64 tx_mcast_bytes;
__le64 tx_bcast_bytes;
__le64 rx_ucast_pkts;
__le64 rx_mcast_pkts;
__le64 rx_bcast_pkts;
- __le64 rx_err_pkts;
- __le64 rx_drop_pkts;
+ __le64 rx_discard_pkts;
+ __le64 rx_error_pkts;
__le64 rx_ucast_bytes;
__le64 rx_mcast_bytes;
__le64 rx_bcast_bytes;
@@ -7099,7 +7252,7 @@ struct hwrm_stat_ext_ctx_query_input {
u8 unused_0[3];
};
-/* hwrm_stat_ext_ctx_query_output (size:1472b/184B) */
+/* hwrm_stat_ext_ctx_query_output (size:1536b/192B) */
struct hwrm_stat_ext_ctx_query_output {
__le16 error_code;
__le16 req_type;
@@ -7126,6 +7279,7 @@ struct hwrm_stat_ext_ctx_query_output {
__le64 rx_tpa_pkt;
__le64 rx_tpa_bytes;
__le64 rx_tpa_errors;
+ __le64 rx_tpa_events;
u8 unused_0[7];
u8 valid;
};
@@ -7702,6 +7856,77 @@ struct hwrm_dbg_read_direct_output {
u8 valid;
};
+/* hwrm_dbg_qcaps_input (size:192b/24B) */
+struct hwrm_dbg_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ u8 unused_0[6];
+};
+
+/* hwrm_dbg_qcaps_output (size:192b/24B) */
+struct hwrm_dbg_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ u8 unused_0[2];
+ __le32 coredump_component_disable_caps;
+ #define DBG_QCAPS_RESP_COREDUMP_COMPONENT_DISABLE_CAPS_NVRAM 0x1UL
+ __le32 flags;
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_NVM 0x1UL
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR 0x2UL
+ #define DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR 0x4UL
+ u8 unused_1[3];
+ u8 valid;
+};
+
+/* hwrm_dbg_qcfg_input (size:192b/24B) */
+struct hwrm_dbg_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ __le16 flags;
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_MASK 0x3UL
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_SFT 0
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_NVM 0x0UL
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_HOST_DDR 0x1UL
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR 0x2UL
+ #define DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_LAST DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR
+ __le32 coredump_component_disable_flags;
+ #define DBG_QCFG_REQ_COREDUMP_COMPONENT_DISABLE_FLAGS_NVRAM 0x1UL
+};
+
+/* hwrm_dbg_qcfg_output (size:256b/32B) */
+struct hwrm_dbg_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ u8 unused_0[2];
+ __le32 coredump_size;
+ __le32 flags;
+ #define DBG_QCFG_RESP_FLAGS_UART_LOG 0x1UL
+ #define DBG_QCFG_RESP_FLAGS_UART_LOG_SECONDARY 0x2UL
+ #define DBG_QCFG_RESP_FLAGS_FW_TRACE 0x4UL
+ #define DBG_QCFG_RESP_FLAGS_FW_TRACE_SECONDARY 0x8UL
+ #define DBG_QCFG_RESP_FLAGS_DEBUG_NOTIFY 0x10UL
+ #define DBG_QCFG_RESP_FLAGS_JTAG_DEBUG 0x20UL
+ __le16 async_cmpl_ring;
+ u8 unused_2[2];
+ __le32 crashdump_size;
+ u8 unused_3[3];
+ u8 valid;
+};
+
/* coredump_segment_record (size:128b/16B) */
struct coredump_segment_record {
__le16 component_id;
@@ -8048,7 +8273,7 @@ struct hwrm_nvm_get_dev_info_input {
__le64 resp_addr;
};
-/* hwrm_nvm_get_dev_info_output (size:256b/32B) */
+/* hwrm_nvm_get_dev_info_output (size:640b/80B) */
struct hwrm_nvm_get_dev_info_output {
__le16 error_code;
__le16 req_type;
@@ -8063,6 +8288,22 @@ struct hwrm_nvm_get_dev_info_output {
u8 nvm_cfg_ver_maj;
u8 nvm_cfg_ver_min;
u8 nvm_cfg_ver_upd;
+ u8 flags;
+ #define NVM_GET_DEV_INFO_RESP_FLAGS_FW_VER_VALID 0x1UL
+ char pkg_name[16];
+ __le16 hwrm_fw_major;
+ __le16 hwrm_fw_minor;
+ __le16 hwrm_fw_build;
+ __le16 hwrm_fw_patch;
+ __le16 mgmt_fw_major;
+ __le16 mgmt_fw_minor;
+ __le16 mgmt_fw_build;
+ __le16 mgmt_fw_patch;
+ __le16 roce_fw_major;
+ __le16 roce_fw_minor;
+ __le16 roce_fw_build;
+ __le16 roce_fw_patch;
+ u8 unused_0[7];
u8 valid;
};
@@ -8381,6 +8622,16 @@ struct hwrm_selftest_irq_output {
u8 valid;
};
+/* db_push_info (size:64b/8B) */
+struct db_push_info {
+ u32 push_size_push_index;
+ #define DB_PUSH_INFO_PUSH_INDEX_MASK 0xffffffUL
+ #define DB_PUSH_INFO_PUSH_INDEX_SFT 0
+ #define DB_PUSH_INFO_PUSH_SIZE_MASK 0x1f000000UL
+ #define DB_PUSH_INFO_PUSH_SIZE_SFT 24
+ u32 reserved32;
+};
+
/* fw_status_reg (size:32b/4B) */
struct fw_status_reg {
u32 fw_status;
@@ -8393,6 +8644,32 @@ struct fw_status_reg {
#define FW_STATUS_REG_CRASHDUMP_ONGOING 0x40000UL
#define FW_STATUS_REG_CRASHDUMP_COMPLETE 0x80000UL
#define FW_STATUS_REG_SHUTDOWN 0x100000UL
-};
+ #define FW_STATUS_REG_CRASHED_NO_MASTER 0x200000UL
+};
+
+/* hcomm_status (size:64b/8B) */
+struct hcomm_status {
+ u32 sig_ver;
+ #define HCOMM_STATUS_VER_MASK 0xffUL
+ #define HCOMM_STATUS_VER_SFT 0
+ #define HCOMM_STATUS_VER_LATEST 0x1UL
+ #define HCOMM_STATUS_VER_LAST HCOMM_STATUS_VER_LATEST
+ #define HCOMM_STATUS_SIGNATURE_MASK 0xffffff00UL
+ #define HCOMM_STATUS_SIGNATURE_SFT 8
+ #define HCOMM_STATUS_SIGNATURE_VAL (0x484353UL << 8)
+ #define HCOMM_STATUS_SIGNATURE_LAST HCOMM_STATUS_SIGNATURE_VAL
+ u32 fw_status_loc;
+ #define HCOMM_STATUS_TRUE_ADDR_SPACE_MASK 0x3UL
+ #define HCOMM_STATUS_TRUE_ADDR_SPACE_SFT 0
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_PCIE_CFG 0x0UL
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_GRC 0x1UL
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR0 0x2UL
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR1 0x3UL
+ #define HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_LAST HCOMM_STATUS_FW_STATUS_LOC_ADDR_SPACE_BAR1
+ #define HCOMM_STATUS_TRUE_OFFSET_MASK 0xfffffffcUL
+ #define HCOMM_STATUS_TRUE_OFFSET_SFT 2
+};
+
+#define HCOMM_STATUS_STRUCT_LOC 0x31001F0UL
#endif /* _BNXT_HSI_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index cc2ee4d0bd18..23b80aa171dd 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -1029,7 +1029,7 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
rc = bnxt_hwrm_exec_fwd_resp(
bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
} else {
- struct hwrm_port_phy_qcfg_output_compat phy_qcfg_resp = {0};
+ struct hwrm_port_phy_qcfg_output phy_qcfg_resp = {0};
struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
phy_qcfg_req =
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 84536292b031..f7f10cfb3476 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -3009,10 +3009,10 @@ static int cnic_service_bnx2(void *data, void *status_blk)
return cnic_service_bnx2_queues(dev);
}
-static void cnic_service_bnx2_msix(unsigned long data)
+static void cnic_service_bnx2_msix(struct tasklet_struct *t)
{
- struct cnic_dev *dev = (struct cnic_dev *) data;
- struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task);
+ struct cnic_dev *dev = cp->dev;
cp->last_status_idx = cnic_service_bnx2_queues(dev);
@@ -3134,10 +3134,10 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
return last_status;
}
-static void cnic_service_bnx2x_bh(unsigned long data)
+static void cnic_service_bnx2x_bh(struct tasklet_struct *t)
{
- struct cnic_dev *dev = (struct cnic_dev *) data;
- struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_local *cp = from_tasklet(cp, t, cnic_irq_task);
+ struct cnic_dev *dev = cp->dev;
struct bnx2x *bp = netdev_priv(dev->netdev);
u32 status_idx, new_status_idx;
@@ -4458,8 +4458,7 @@ static int cnic_init_bnx2_irq(struct cnic_dev *dev)
CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
cp->last_status_idx = cp->status_blk.bnx2->status_idx;
- tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix,
- (unsigned long) dev);
+ tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2_msix);
err = cnic_request_irq(dev);
if (err)
return err;
@@ -4868,8 +4867,7 @@ static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
struct cnic_eth_dev *ethdev = cp->ethdev;
int err = 0;
- tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh,
- (unsigned long) dev);
+ tasklet_setup(&cp->cnic_irq_task, cnic_service_bnx2x_bh);
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
err = cnic_request_irq(dev);
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.c b/drivers/net/ethernet/brocade/bna/bfa_cee.c
index 09fb9315d1ae..06f221c44802 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cee.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_cee.c
@@ -102,14 +102,10 @@ bfa_cee_get_stats_isr(struct bfa_cee *cee, enum bfa_status status)
}
/**
- * bfa_cee_get_attr_isr()
+ * bfa_cee_reset_stats_isr - CEE ISR for reset-stats responses from f/w
*
- * @brief CEE ISR for reset-stats responses from f/w
- *
- * @param[in] cee - Pointer to the CEE module
- * status - Return status from the f/w
- *
- * @return void
+ * @cee: Input Pointer to the CEE module
+ * @status: Return status from the f/w
*/
static void
bfa_cee_reset_stats_isr(struct bfa_cee *cee, enum bfa_status status)
@@ -148,9 +144,12 @@ bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa)
}
/**
- * bfa_cee_get_attr - Send the request to the f/w to fetch CEE attributes.
+ * bfa_nw_cee_get_attr - Send the request to the f/w to fetch CEE attributes.
*
* @cee: Pointer to the CEE module data structure.
+ * @attr: attribute requested
+ * @cbfn: function pointer
+ * @cbarg: function pointer arguments
*
* Return: status
*/
@@ -181,7 +180,9 @@ bfa_nw_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr,
}
/**
- * bfa_cee_isrs - Handles Mail-box interrupts for CEE module.
+ * bfa_cee_isr - Handles Mail-box interrupts for CEE module.
+ * @cbarg: argument passed containing pointer to the CEE module data structure.
+ * @m: message pointer
*/
static void
@@ -210,6 +211,7 @@ bfa_cee_isr(void *cbarg, struct bfi_mbmsg *m)
/**
* bfa_cee_notify - CEE module heart-beat failure handler.
*
+ * @arg: argument passed containing pointer to the CEE module data structure.
* @event: IOC event type
*/
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index b9dd06b12945..cd933817a0b8 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -269,7 +269,7 @@ bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
break;
case IOC_E_PFFAILED:
- /* !!! fall through !!! */
+ fallthrough;
case IOC_E_HWERROR:
ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
@@ -365,7 +365,8 @@ bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
case IOC_E_PFFAILED:
case IOC_E_HWERROR:
bfa_ioc_hb_stop(ioc);
- /* !!! fall through !!! */
+ fallthrough;
+
case IOC_E_HBFAIL:
if (ioc->iocpf.auto_recover)
bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
@@ -1763,7 +1764,7 @@ bfa_ioc_flash_fwver_cmp(struct bfa_ioc *ioc,
return BFI_IOC_IMG_VER_INCOMP;
}
-/**
+/*
* Returns TRUE if driver is willing to work with current smem f/w version.
*/
bool
@@ -2469,6 +2470,7 @@ bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
*
* @ioc: memory for IOC
* @bfa: driver instance structure
+ * @cbfn: callback function
*/
void
bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
@@ -2500,7 +2502,9 @@ bfa_nw_ioc_detach(struct bfa_ioc *ioc)
/**
* bfa_nw_ioc_pci_init - Setup IOC PCI properties.
*
+ * @ioc: memory for IOC
* @pcidev: PCI device information for this IOC
+ * @clscode: class code
*/
void
bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
@@ -2569,6 +2573,7 @@ bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
/**
* bfa_nw_ioc_mem_claim - Initialize IOC dma memory
*
+ * @ioc: memory for IOC
* @dm_kva: kernel virtual address of IOC dma memory
* @dm_pa: physical address of IOC dma memory
*/
@@ -2636,6 +2641,8 @@ bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
*
* @ioc: IOC instance
* @cmd: Mailbox command
+ * @cbfn: callback function
+ * @cbarg: arguments to callback
*
* Waits if mailbox is busy. Responsibility of caller to serialize
*/
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index cc80bbbefe87..7e4e831d720f 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -3277,7 +3277,7 @@ bnad_change_mtu(struct net_device *netdev, int new_mtu)
{
int err, mtu;
struct bnad *bnad = netdev_priv(netdev);
- u32 rx_count = 0, frame, new_frame;
+ u32 frame, new_frame;
mutex_lock(&bnad->conf_mutex);
@@ -3293,12 +3293,9 @@ bnad_change_mtu(struct net_device *netdev, int new_mtu)
/* only when transition is over 4K */
if ((frame <= 4096 && new_frame > 4096) ||
(frame > 4096 && new_frame <= 4096))
- rx_count = bnad_reinit_rx(bnad);
+ bnad_reinit_rx(bnad);
}
- /* rx_count > 0 - new rx created
- * - Linux set err = 0 and return
- */
err = bnad_mtu_set(bnad, new_frame);
if (err)
err = -EBUSY;
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 4f1b41569260..5de47f6fde5a 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -7,6 +7,7 @@
#ifndef _MACB_H
#define _MACB_H
+#include <linux/clk.h>
#include <linux/phylink.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
@@ -365,6 +366,8 @@
#define MACB_ISR_RLE_SIZE 1
#define MACB_TXERR_OFFSET 6 /* EN TX frame corrupt from error interrupt */
#define MACB_TXERR_SIZE 1
+#define MACB_RM9200_TBRE_OFFSET 6 /* EN may send new frame interrupt (RM9200) */
+#define MACB_RM9200_TBRE_SIZE 1
#define MACB_TCOMP_OFFSET 7 /* Enable transmit complete interrupt */
#define MACB_TCOMP_SIZE 1
#define MACB_ISR_LINK_OFFSET 9 /* Enable link change interrupt */
@@ -1204,10 +1207,10 @@ struct macb {
phy_interface_t phy_interface;
- /* AT91RM9200 transmit */
- struct sk_buff *skb; /* holds skb until xmit interrupt completes */
- dma_addr_t skb_physaddr; /* phys addr from pci_map_single */
- int skb_length; /* saved skb length for pci_unmap_single */
+ /* AT91RM9200 transmit queue (1 on wire + 1 queued) */
+ struct macb_tx_skb rm9200_txq[2];
+ unsigned int rm9200_tx_tail;
+ unsigned int rm9200_tx_len;
unsigned int max_tx_length;
u64 ethtool_stats[GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES];
@@ -1298,4 +1301,14 @@ static inline bool gem_has_ptp(struct macb *bp)
return !!(bp->caps & MACB_CAPS_GEM_HAS_PTP);
}
+/**
+ * struct macb_platform_data - platform data for MACB Ethernet used for PCI registration
+ * @pclk: platform clock
+ * @hclk: AHB clock
+ */
+struct macb_platform_data {
+ struct clk *pclk;
+ struct clk *hclk;
+};
+
#endif /* _MACB_H */
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 9179f7b0b900..883e47c5b1a7 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -23,7 +23,6 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/dma-mapping.h>
-#include <linux/platform_data/macb.h>
#include <linux/platform_device.h>
#include <linux/phylink.h>
#include <linux/of.h>
@@ -458,9 +457,9 @@ static void macb_init_buffers(struct macb *bp)
/**
* macb_set_tx_clk() - Set a clock to a new frequency
- * @clk Pointer to the clock to change
- * @rate New frequency in Hz
- * @dev Pointer to the struct net_device
+ * @clk: Pointer to the clock to change
+ * @speed: New frequency in Hz
+ * @dev: Pointer to the struct net_device
*/
static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
{
@@ -1465,9 +1464,9 @@ static int macb_poll(struct napi_struct *napi, int budget)
return work_done;
}
-static void macb_hresp_error_task(unsigned long data)
+static void macb_hresp_error_task(struct tasklet_struct *t)
{
- struct macb *bp = (struct macb *)data;
+ struct macb *bp = from_tasklet(bp, t, hresp_err_tasklet);
struct net_device *dev = bp->dev;
struct macb_queue *queue;
unsigned int q;
@@ -3909,6 +3908,7 @@ static int at91ether_start(struct macb *lp)
MACB_BIT(ISR_TUND) |
MACB_BIT(ISR_RLE) |
MACB_BIT(TCOMP) |
+ MACB_BIT(RM9200_TBRE) |
MACB_BIT(ISR_ROVR) |
MACB_BIT(HRESP));
@@ -3925,6 +3925,7 @@ static void at91ether_stop(struct macb *lp)
MACB_BIT(ISR_TUND) |
MACB_BIT(ISR_RLE) |
MACB_BIT(TCOMP) |
+ MACB_BIT(RM9200_TBRE) |
MACB_BIT(ISR_ROVR) |
MACB_BIT(HRESP));
@@ -3994,24 +3995,34 @@ static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct macb *lp = netdev_priv(dev);
+ unsigned long flags;
- if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
- netif_stop_queue(dev);
+ if (lp->rm9200_tx_len < 2) {
+ int desc = lp->rm9200_tx_tail;
/* Store packet information (to free when Tx completed) */
- lp->skb = skb;
- lp->skb_length = skb->len;
- lp->skb_physaddr = dma_map_single(&lp->pdev->dev, skb->data,
- skb->len, DMA_TO_DEVICE);
- if (dma_mapping_error(&lp->pdev->dev, lp->skb_physaddr)) {
+ lp->rm9200_txq[desc].skb = skb;
+ lp->rm9200_txq[desc].size = skb->len;
+ lp->rm9200_txq[desc].mapping = dma_map_single(&lp->pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&lp->pdev->dev, lp->rm9200_txq[desc].mapping)) {
dev_kfree_skb_any(skb);
dev->stats.tx_dropped++;
netdev_err(dev, "%s: DMA mapping error\n", __func__);
return NETDEV_TX_OK;
}
+ spin_lock_irqsave(&lp->lock, flags);
+
+ lp->rm9200_tx_tail = (desc + 1) & 1;
+ lp->rm9200_tx_len++;
+ if (lp->rm9200_tx_len > 1)
+ netif_stop_queue(dev);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
+
/* Set address of the data in the Transmit Address register */
- macb_writel(lp, TAR, lp->skb_physaddr);
+ macb_writel(lp, TAR, lp->rm9200_txq[desc].mapping);
/* Set length of the packet in the Transmit Control register */
macb_writel(lp, TCR, skb->len);
@@ -4074,6 +4085,9 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
struct net_device *dev = dev_id;
struct macb *lp = netdev_priv(dev);
u32 intstatus, ctl;
+ unsigned int desc;
+ unsigned int qlen;
+ u32 tsr;
/* MAC Interrupt Status register indicates what interrupts are pending.
* It is automatically cleared once read.
@@ -4085,20 +4099,39 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
at91ether_rx(dev);
/* Transmit complete */
- if (intstatus & MACB_BIT(TCOMP)) {
+ if (intstatus & (MACB_BIT(TCOMP) | MACB_BIT(RM9200_TBRE))) {
/* The TCOM bit is set even if the transmission failed */
if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
dev->stats.tx_errors++;
- if (lp->skb) {
- dev_consume_skb_irq(lp->skb);
- lp->skb = NULL;
- dma_unmap_single(&lp->pdev->dev, lp->skb_physaddr,
- lp->skb_length, DMA_TO_DEVICE);
+ spin_lock(&lp->lock);
+
+ tsr = macb_readl(lp, TSR);
+
+ /* we have three possibilities here:
+ * - all pending packets transmitted (TGO, implies BNQ)
+ * - only first packet transmitted (!TGO && BNQ)
+ * - two frames pending (!TGO && !BNQ)
+ * Note that TGO ("transmit go") is called "IDLE" on RM9200.
+ */
+ qlen = (tsr & MACB_BIT(TGO)) ? 0 :
+ (tsr & MACB_BIT(RM9200_BNQ)) ? 1 : 2;
+
+ while (lp->rm9200_tx_len > qlen) {
+ desc = (lp->rm9200_tx_tail - lp->rm9200_tx_len) & 1;
+ dev_consume_skb_irq(lp->rm9200_txq[desc].skb);
+ lp->rm9200_txq[desc].skb = NULL;
+ dma_unmap_single(&lp->pdev->dev, lp->rm9200_txq[desc].mapping,
+ lp->rm9200_txq[desc].size, DMA_TO_DEVICE);
dev->stats.tx_packets++;
- dev->stats.tx_bytes += lp->skb_length;
+ dev->stats.tx_bytes += lp->rm9200_txq[desc].size;
+ lp->rm9200_tx_len--;
}
- netif_wake_queue(dev);
+
+ if (lp->rm9200_tx_len < 2 && netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+
+ spin_unlock(&lp->lock);
}
/* Work-around for EMAC Errata section 41.3.1 */
@@ -4559,8 +4592,7 @@ static int macb_probe(struct platform_device *pdev)
goto err_out_unregister_mdio;
}
- tasklet_init(&bp->hresp_err_tasklet, macb_hresp_error_task,
- (unsigned long)bp);
+ tasklet_setup(&bp->hresp_err_tasklet, macb_hresp_error_task);
netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
diff --git a/drivers/net/ethernet/cadence/macb_pci.c b/drivers/net/ethernet/cadence/macb_pci.c
index cd7d0332cba3..353393dea639 100644
--- a/drivers/net/ethernet/cadence/macb_pci.c
+++ b/drivers/net/ethernet/cadence/macb_pci.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/**
- * Cadence GEM PCI wrapper.
+ * DOC: Cadence GEM PCI wrapper.
*
* Copyright (C) 2016 Cadence Design Systems - https://www.cadence.com
*
@@ -13,7 +13,6 @@
#include <linux/etherdevice.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/platform_data/macb.h>
#include <linux/platform_device.h>
#include "macb.h"
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 05a3d067c3fc..bbb453c6a5f7 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1246,6 +1246,8 @@ static int xgmac_poll(struct napi_struct *napi, int budget)
/**
* xgmac_tx_timeout
* @dev : Pointer to net device structure
+ * @txqueue: index of the hung transmit queue
+ *
* Description: this function is called when a packet transmission fails to
* complete within a reasonable tmrate. The driver will mark the error in the
* netdev structure and arrange for the device to be reset to a sane state
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c
index 81ff9ac73f9a..9fd717b9cf69 100644
--- a/drivers/net/ethernet/cavium/common/cavium_ptp.c
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c
@@ -86,7 +86,7 @@ EXPORT_SYMBOL(cavium_ptp_put);
/**
* cavium_ptp_adjfine() - Adjust ptp frequency
- * @ptp: PTP clock info
+ * @ptp_info: PTP clock info
* @scaled_ppm: how much to adjust by, in parts per million, but with a
* 16 bit binary fractional field
*/
@@ -134,7 +134,7 @@ static int cavium_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
/**
* cavium_ptp_adjtime() - Adjust ptp time
- * @ptp: PTP clock info
+ * @ptp_info: PTP clock info
* @delta: how much to adjust by, in nanosecs
*/
static int cavium_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
@@ -155,7 +155,7 @@ static int cavium_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
/**
* cavium_ptp_gettime() - Get hardware clock time with adjustment
- * @ptp: PTP clock info
+ * @ptp_info: PTP clock info
* @ts: timespec
*/
static int cavium_ptp_gettime(struct ptp_clock_info *ptp_info,
@@ -177,7 +177,7 @@ static int cavium_ptp_gettime(struct ptp_clock_info *ptp_info,
/**
* cavium_ptp_settime() - Set hardware clock time. Reset adjustment
- * @ptp: PTP clock info
+ * @ptp_info: PTP clock info
* @ts: timespec
*/
static int cavium_ptp_settime(struct ptp_clock_info *ptp_info,
@@ -199,7 +199,7 @@ static int cavium_ptp_settime(struct ptp_clock_info *ptp_info,
/**
* cavium_ptp_enable() - Request to enable or disable an ancillary feature.
- * @ptp: PTP clock info
+ * @ptp_info: PTP clock info
* @rq: request
* @on: is it on
*/
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
index 50b533ff58e6..2a6d1cadac9e 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
@@ -25,7 +25,9 @@
#include "octeon_main.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
+#include "cn68xx_device.h"
#include "cn68xx_regs.h"
+#include "cn68xx_device.h"
static void lio_cn68xx_set_dpi_regs(struct octeon_device *oct)
{
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index e40c64b79f66..9ef172976b35 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -32,8 +32,8 @@
#define OCTNIC_MAX_SG MAX_SKB_FRAGS
/**
- * \brief Delete gather lists
- * @param lio per-network private data
+ * lio_delete_glists - Delete gather lists
+ * @lio: per-network private data
*/
void lio_delete_glists(struct lio *lio)
{
@@ -73,8 +73,10 @@ void lio_delete_glists(struct lio *lio)
}
/**
- * \brief Setup gather lists
- * @param lio per-network private data
+ * lio_setup_glists - Setup gather lists
+ * @oct: octeon_device
+ * @lio: per-network private data
+ * @num_iqs: count of iqs to allocate
*/
int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
{
@@ -521,12 +523,12 @@ static void lio_update_txq_status(struct octeon_device *oct, int iq_num)
}
/**
- * \brief Setup output queue
- * @param oct octeon device
- * @param q_no which queue
- * @param num_descs how many descriptors
- * @param desc_size size of each descriptor
- * @param app_ctx application context
+ * octeon_setup_droq - Setup output queue
+ * @oct: octeon device
+ * @q_no: which queue
+ * @num_descs: how many descriptors
+ * @desc_size: size of each descriptor
+ * @app_ctx: application context
*/
static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
int desc_size, void *app_ctx)
@@ -555,16 +557,17 @@ static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
return ret_val;
}
-/** Routine to push packets arriving on Octeon interface upto network layer.
- * @param oct_id - octeon device id.
- * @param skbuff - skbuff struct to be passed to network layer.
- * @param len - size of total data received.
- * @param rh - Control header associated with the packet
- * @param param - additional control data with the packet
- * @param arg - farg registered in droq_ops
+/**
+ * liquidio_push_packet - Routine to push packets arriving on Octeon interface upto network layer.
+ * @octeon_id:octeon device id.
+ * @skbuff: skbuff struct to be passed to network layer.
+ * @len: size of total data received.
+ * @rh: Control header associated with the packet
+ * @param: additional control data with the packet
+ * @arg: farg registered in droq_ops
*/
static void
-liquidio_push_packet(u32 octeon_id __attribute__((unused)),
+liquidio_push_packet(u32 __maybe_unused octeon_id,
void *skbuff,
u32 len,
union octeon_rh *rh,
@@ -698,8 +701,8 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)),
}
/**
- * \brief wrapper for calling napi_schedule
- * @param param parameters to pass to napi_schedule
+ * napi_schedule_wrapper - wrapper for calling napi_schedule
+ * @param: parameters to pass to napi_schedule
*
* Used when scheduling on different CPUs
*/
@@ -711,8 +714,8 @@ static void napi_schedule_wrapper(void *param)
}
/**
- * \brief callback when receive interrupt occurs and we are in NAPI mode
- * @param arg pointer to octeon output queue
+ * liquidio_napi_drv_callback - callback when receive interrupt occurs and we are in NAPI mode
+ * @arg: pointer to octeon output queue
*/
static void liquidio_napi_drv_callback(void *arg)
{
@@ -737,9 +740,9 @@ static void liquidio_napi_drv_callback(void *arg)
}
/**
- * \brief Entry point for NAPI polling
- * @param napi NAPI structure
- * @param budget maximum number of items to process
+ * liquidio_napi_poll - Entry point for NAPI polling
+ * @napi: NAPI structure
+ * @budget: maximum number of items to process
*/
static int liquidio_napi_poll(struct napi_struct *napi, int budget)
{
@@ -792,9 +795,11 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
}
/**
- * \brief Setup input and output queues
- * @param octeon_dev octeon device
- * @param ifidx Interface index
+ * liquidio_setup_io_queues - Setup input and output queues
+ * @octeon_dev: octeon device
+ * @ifidx: Interface index
+ * @num_iqs: input io queue count
+ * @num_oqs: output io queue count
*
* Note: Queues are with respect to the octeon device. Thus
* an input queue is for egress packets, and output queues
@@ -927,7 +932,7 @@ int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
}
irqreturn_t
-liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
+liquidio_msix_intr_handler(int __maybe_unused irq, void *dev)
{
struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
struct octeon_device *oct = ioq_vector->oct_dev;
@@ -943,8 +948,8 @@ liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
}
/**
- * \brief Droq packet processor sceduler
- * @param oct octeon device
+ * liquidio_schedule_droq_pkt_handlers - Droq packet processor sceduler
+ * @oct: octeon device
*/
static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
{
@@ -972,13 +977,12 @@ static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
}
/**
- * \brief Interrupt handler for octeon
- * @param irq unused
- * @param dev octeon device
+ * liquidio_legacy_intr_handler - Interrupt handler for octeon
+ * @irq: unused
+ * @dev: octeon device
*/
static
-irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)),
- void *dev)
+irqreturn_t liquidio_legacy_intr_handler(int __maybe_unused irq, void *dev)
{
struct octeon_device *oct = (struct octeon_device *)dev;
irqreturn_t ret;
@@ -999,8 +1003,9 @@ irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)),
}
/**
- * \brief Setup interrupt for octeon device
- * @param oct octeon device
+ * octeon_setup_interrupt - Setup interrupt for octeon device
+ * @oct: octeon device
+ * @num_ioqs: number of queues
*
* Enable interrupt in Octeon device as given in the PCI interrupt mask.
*/
@@ -1083,7 +1088,7 @@ int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
num_ioq_vectors = oct->num_msix_irqs;
- /** For PF, there is one non-ioq interrupt handler */
+ /* For PF, there is one non-ioq interrupt handler */
if (OCTEON_CN23XX_PF(oct)) {
num_ioq_vectors -= 1;
@@ -1126,13 +1131,13 @@ int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
dev_err(&oct->pci_dev->dev,
"Request_irq failed for MSIX interrupt Error: %d\n",
irqret);
- /** Freeing the non-ioq irq vector here . */
+ /* Freeing the non-ioq irq vector here . */
free_irq(msix_entries[num_ioq_vectors].vector,
oct);
while (i) {
i--;
- /** clearing affinity mask. */
+ /* clearing affinity mask. */
irq_set_affinity_hint(
msix_entries[i].vector,
NULL);
@@ -1197,8 +1202,9 @@ int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
}
/**
- * \brief Net device change_mtu
- * @param netdev network device
+ * liquidio_change_mtu - Net device change_mtu
+ * @netdev: network device
+ * @new_mtu: the new max transmit unit size
*/
int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
{
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 8e0ed01e7f03..7d00d3a8ded4 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -69,9 +69,9 @@ MODULE_PARM_DESC(console_bitmask,
"Bitmask indicating which consoles have debug output redirected to syslog.");
/**
- * \brief determines if a given console has debug enabled.
- * @param console console to check
- * @returns 1 = enabled. 0 otherwise
+ * octeon_console_debug_enabled - determines if a given console has debug enabled.
+ * @console: console to check
+ * Return: 1 = enabled. 0 otherwise
*/
static int octeon_console_debug_enabled(u32 console)
{
@@ -126,7 +126,7 @@ union tx_info {
} s;
};
-/** Octeon device properties to be used by the NIC module.
+/* Octeon device properties to be used by the NIC module.
* Each octeon device in the system will be represented
* by this structure in the NIC module.
*/
@@ -161,13 +161,13 @@ static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
static struct handshake handshake[MAX_OCTEON_DEVICES];
static struct completion first_stage;
-static void octeon_droq_bh(unsigned long pdev)
+static void octeon_droq_bh(struct tasklet_struct *t)
{
int q_no;
int reschedule = 0;
- struct octeon_device *oct = (struct octeon_device *)pdev;
- struct octeon_device_priv *oct_priv =
- (struct octeon_device_priv *)oct->priv;
+ struct octeon_device_priv *oct_priv = from_tasklet(oct_priv, t,
+ droq_tasklet);
+ struct octeon_device *oct = oct_priv->dev;
for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
if (!(oct->io_qmask.oq & BIT_ULL(q_no)))
@@ -222,8 +222,8 @@ static int lio_wait_for_oq_pkts(struct octeon_device *oct)
}
/**
- * \brief Forces all IO queues off on a given device
- * @param oct Pointer to Octeon device
+ * force_io_queues_off - Forces all IO queues off on a given device
+ * @oct: Pointer to Octeon device
*/
static void force_io_queues_off(struct octeon_device *oct)
{
@@ -238,8 +238,8 @@ static void force_io_queues_off(struct octeon_device *oct)
}
/**
- * \brief Cause device to go quiet so it can be safely removed/reset/etc
- * @param oct Pointer to Octeon device
+ * pcierror_quiesce_device - Cause device to go quiet so it can be safely removed/reset/etc
+ * @oct: Pointer to Octeon device
*/
static inline void pcierror_quiesce_device(struct octeon_device *oct)
{
@@ -283,8 +283,8 @@ static inline void pcierror_quiesce_device(struct octeon_device *oct)
}
/**
- * \brief Cleanup PCI AER uncorrectable error status
- * @param dev Pointer to PCI device
+ * cleanup_aer_uncorrect_error_status - Cleanup PCI AER uncorrectable error status
+ * @dev: Pointer to PCI device
*/
static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
{
@@ -303,8 +303,8 @@ static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
}
/**
- * \brief Stop all PCI IO to a given device
- * @param dev Pointer to Octeon device
+ * stop_pci_io - Stop all PCI IO to a given device
+ * @oct: Pointer to Octeon device
*/
static void stop_pci_io(struct octeon_device *oct)
{
@@ -332,9 +332,9 @@ static void stop_pci_io(struct octeon_device *oct)
}
/**
- * \brief called when PCI error is detected
- * @param pdev Pointer to PCI device
- * @param state The current pci connection state
+ * liquidio_pcie_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
*
* This function is called after a PCI bus error affecting
* this device has been detected.
@@ -362,11 +362,10 @@ static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
}
/**
- * \brief mmio handler
- * @param pdev Pointer to PCI device
+ * liquidio_pcie_mmio_enabled - mmio handler
+ * @pdev: Pointer to PCI device
*/
-static pci_ers_result_t liquidio_pcie_mmio_enabled(
- struct pci_dev *pdev __attribute__((unused)))
+static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev __maybe_unused *pdev)
{
/* We should never hit this since we never ask for a reset for a Fatal
* Error. We always return DISCONNECT in io_error above.
@@ -376,14 +375,13 @@ static pci_ers_result_t liquidio_pcie_mmio_enabled(
}
/**
- * \brief called after the pci bus has been reset.
- * @param pdev Pointer to PCI device
+ * liquidio_pcie_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
*
* Restart the card from scratch, as if from a cold-boot. Implementation
* resembles the first-half of the octeon_resume routine.
*/
-static pci_ers_result_t liquidio_pcie_slot_reset(
- struct pci_dev *pdev __attribute__((unused)))
+static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev __maybe_unused *pdev)
{
/* We should never hit this since we never ask for a reset for a Fatal
* Error. We always return DISCONNECT in io_error above.
@@ -393,14 +391,14 @@ static pci_ers_result_t liquidio_pcie_slot_reset(
}
/**
- * \brief called when traffic can start flowing again.
- * @param pdev Pointer to PCI device
+ * liquidio_pcie_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
*
* This callback is called when the error recovery driver tells us that
* its OK to resume normal operation. Implementation resembles the
* second-half of the octeon_resume routine.
*/
-static void liquidio_pcie_resume(struct pci_dev *pdev __attribute__((unused)))
+static void liquidio_pcie_resume(struct pci_dev __maybe_unused *pdev)
{
/* Nothing to be done here. */
}
@@ -447,7 +445,7 @@ static struct pci_driver liquidio_pci_driver = {
};
/**
- * \brief register PCI driver
+ * liquidio_init_pci - register PCI driver
*/
static int liquidio_init_pci(void)
{
@@ -455,7 +453,7 @@ static int liquidio_init_pci(void)
}
/**
- * \brief unregister PCI driver
+ * liquidio_deinit_pci - unregister PCI driver
*/
static void liquidio_deinit_pci(void)
{
@@ -463,9 +461,9 @@ static void liquidio_deinit_pci(void)
}
/**
- * \brief Check Tx queue status, and take appropriate action
- * @param lio per-network private data
- * @returns 0 if full, number of queues woken up otherwise
+ * check_txq_status - Check Tx queue status, and take appropriate action
+ * @lio: per-network private data
+ * Return: 0 if full, number of queues woken up otherwise
*/
static inline int check_txq_status(struct lio *lio)
{
@@ -491,8 +489,8 @@ static inline int check_txq_status(struct lio *lio)
}
/**
- * \brief Print link information
- * @param netdev network device
+ * print_link_info - Print link information
+ * @netdev: network device
*/
static void print_link_info(struct net_device *netdev)
{
@@ -513,8 +511,8 @@ static void print_link_info(struct net_device *netdev)
}
/**
- * \brief Routine to notify MTU change
- * @param work work_struct data structure
+ * octnet_link_status_change - Routine to notify MTU change
+ * @work: work_struct data structure
*/
static void octnet_link_status_change(struct work_struct *work)
{
@@ -531,8 +529,8 @@ static void octnet_link_status_change(struct work_struct *work)
}
/**
- * \brief Sets up the mtu status change work
- * @param netdev network device
+ * setup_link_status_change_wq - Sets up the mtu status change work
+ * @netdev: network device
*/
static inline int setup_link_status_change_wq(struct net_device *netdev)
{
@@ -563,9 +561,9 @@ static inline void cleanup_link_status_change_wq(struct net_device *netdev)
}
/**
- * \brief Update link status
- * @param netdev network device
- * @param ls link status structure
+ * update_link_status - Update link status
+ * @netdev: network device
+ * @ls: link status structure
*
* Called on receipt of a link status response from the core application to
* update each interface's link status.
@@ -663,10 +661,9 @@ static void lio_sync_octeon_time(struct work_struct *work)
}
/**
- * setup_sync_octeon_time_wq - Sets up the work to periodically update
- * local time to octeon firmware
+ * setup_sync_octeon_time_wq - prepare work to periodically update local time to octeon firmware
*
- * @netdev - network device which should send time update to firmware
+ * @netdev: network device which should send time update to firmware
**/
static inline int setup_sync_octeon_time_wq(struct net_device *netdev)
{
@@ -690,10 +687,12 @@ static inline int setup_sync_octeon_time_wq(struct net_device *netdev)
}
/**
- * cleanup_sync_octeon_time_wq - stop scheduling and destroy the work created
- * to periodically update local time to octeon firmware
+ * cleanup_sync_octeon_time_wq - destroy wq
*
- * @netdev - network device which should send time update to firmware
+ * @netdev: network device which should send time update to firmware
+ *
+ * Stop scheduling and destroy the work created to periodically update local
+ * time to octeon firmware.
**/
static inline void cleanup_sync_octeon_time_wq(struct net_device *netdev)
{
@@ -828,13 +827,12 @@ static int liquidio_watchdog(void *param)
}
/**
- * \brief PCI probe handler
- * @param pdev PCI device structure
- * @param ent unused
+ * liquidio_probe - PCI probe handler
+ * @pdev: PCI device structure
+ * @ent: unused
*/
static int
-liquidio_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent __attribute__((unused)))
+liquidio_probe(struct pci_dev *pdev, const struct pci_device_id __maybe_unused *ent)
{
struct octeon_device *oct_dev = NULL;
struct handshake *hs;
@@ -924,8 +922,8 @@ static bool fw_type_is_auto(void)
}
/**
- * \brief PCI FLR for each Octeon device.
- * @param oct octeon device
+ * octeon_pci_flr - PCI FLR for each Octeon device.
+ * @oct: octeon device
*/
static void octeon_pci_flr(struct octeon_device *oct)
{
@@ -951,9 +949,8 @@ static void octeon_pci_flr(struct octeon_device *oct)
}
/**
- *\brief Destroy resources associated with octeon device
- * @param pdev PCI device structure
- * @param ent unused
+ * octeon_destroy_resources - Destroy resources associated with octeon device
+ * @oct: octeon device
*/
static void octeon_destroy_resources(struct octeon_device *oct)
{
@@ -1152,9 +1149,9 @@ static void octeon_destroy_resources(struct octeon_device *oct)
}
/**
- * \brief Send Rx control command
- * @param lio per-network private data
- * @param start_stop whether to start or stop
+ * send_rx_ctrl_cmd - Send Rx control command
+ * @lio: per-network private data
+ * @start_stop: whether to start or stop
*/
static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
{
@@ -1210,9 +1207,9 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
}
/**
- * \brief Destroy NIC device interface
- * @param oct octeon device
- * @param ifidx which interface to destroy
+ * liquidio_destroy_nic_device - Destroy NIC device interface
+ * @oct: octeon device
+ * @ifidx: which interface to destroy
*
* Cleanup associated with each interface for an Octeon device when NIC
* module is being unloaded or if initialization fails during load.
@@ -1272,8 +1269,8 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
}
/**
- * \brief Stop complete NIC functionality
- * @param oct octeon device
+ * liquidio_stop_nic_module - Stop complete NIC functionality
+ * @oct: octeon device
*/
static int liquidio_stop_nic_module(struct octeon_device *oct)
{
@@ -1313,8 +1310,8 @@ static int liquidio_stop_nic_module(struct octeon_device *oct)
}
/**
- * \brief Cleans up resources at unload time
- * @param pdev PCI device structure
+ * liquidio_remove - Cleans up resources at unload time
+ * @pdev: PCI device structure
*/
static void liquidio_remove(struct pci_dev *pdev)
{
@@ -1346,8 +1343,8 @@ static void liquidio_remove(struct pci_dev *pdev)
}
/**
- * \brief Identify the Octeon device and to map the BAR address space
- * @param oct octeon device
+ * octeon_chip_specific_setup - Identify the Octeon device and to map the BAR address space
+ * @oct: octeon device
*/
static int octeon_chip_specific_setup(struct octeon_device *oct)
{
@@ -1390,8 +1387,8 @@ static int octeon_chip_specific_setup(struct octeon_device *oct)
}
/**
- * \brief PCI initialization for each Octeon device.
- * @param oct octeon device
+ * octeon_pci_os_setup - PCI initialization for each Octeon device.
+ * @oct: octeon device
*/
static int octeon_pci_os_setup(struct octeon_device *oct)
{
@@ -1414,8 +1411,8 @@ static int octeon_pci_os_setup(struct octeon_device *oct)
}
/**
- * \brief Unmap and free network buffer
- * @param buf buffer
+ * free_netbuf - Unmap and free network buffer
+ * @buf: buffer
*/
static void free_netbuf(void *buf)
{
@@ -1434,8 +1431,8 @@ static void free_netbuf(void *buf)
}
/**
- * \brief Unmap and free gather buffer
- * @param buf buffer
+ * free_netsgbuf - Unmap and free gather buffer
+ * @buf: buffer
*/
static void free_netsgbuf(void *buf)
{
@@ -1474,8 +1471,8 @@ static void free_netsgbuf(void *buf)
}
/**
- * \brief Unmap and free gather buffer with response
- * @param buf buffer
+ * free_netsgbuf_with_resp - Unmap and free gather buffer with response
+ * @buf: buffer
*/
static void free_netsgbuf_with_resp(void *buf)
{
@@ -1518,9 +1515,9 @@ static void free_netsgbuf_with_resp(void *buf)
}
/**
- * \brief Adjust ptp frequency
- * @param ptp PTP clock info
- * @param ppb how much to adjust by, in parts-per-billion
+ * liquidio_ptp_adjfreq - Adjust ptp frequency
+ * @ptp: PTP clock info
+ * @ppb: how much to adjust by, in parts-per-billion
*/
static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
{
@@ -1555,9 +1552,9 @@ static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
}
/**
- * \brief Adjust ptp time
- * @param ptp PTP clock info
- * @param delta how much to adjust by, in nanosecs
+ * liquidio_ptp_adjtime - Adjust ptp time
+ * @ptp: PTP clock info
+ * @delta: how much to adjust by, in nanosecs
*/
static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
@@ -1572,9 +1569,9 @@ static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
}
/**
- * \brief Get hardware clock time, including any adjustment
- * @param ptp PTP clock info
- * @param ts timespec
+ * liquidio_ptp_gettime - Get hardware clock time, including any adjustment
+ * @ptp: PTP clock info
+ * @ts: timespec
*/
static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
struct timespec64 *ts)
@@ -1595,9 +1592,9 @@ static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
}
/**
- * \brief Set hardware clock time. Reset adjustment
- * @param ptp PTP clock info
- * @param ts timespec
+ * liquidio_ptp_settime - Set hardware clock time. Reset adjustment
+ * @ptp: PTP clock info
+ * @ts: timespec
*/
static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts)
@@ -1618,22 +1615,22 @@ static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
}
/**
- * \brief Check if PTP is enabled
- * @param ptp PTP clock info
- * @param rq request
- * @param on is it on
+ * liquidio_ptp_enable - Check if PTP is enabled
+ * @ptp: PTP clock info
+ * @rq: request
+ * @on: is it on
*/
static int
-liquidio_ptp_enable(struct ptp_clock_info *ptp __attribute__((unused)),
- struct ptp_clock_request *rq __attribute__((unused)),
- int on __attribute__((unused)))
+liquidio_ptp_enable(struct ptp_clock_info __maybe_unused *ptp,
+ struct ptp_clock_request __maybe_unused *rq,
+ int __maybe_unused on)
{
return -EOPNOTSUPP;
}
/**
- * \brief Open PTP clock source
- * @param netdev network device
+ * oct_ptp_open - Open PTP clock source
+ * @netdev: network device
*/
static void oct_ptp_open(struct net_device *netdev)
{
@@ -1665,8 +1662,8 @@ static void oct_ptp_open(struct net_device *netdev)
}
/**
- * \brief Init PTP clock
- * @param oct octeon device
+ * liquidio_ptp_init - Init PTP clock
+ * @oct: octeon device
*/
static void liquidio_ptp_init(struct octeon_device *oct)
{
@@ -1682,8 +1679,8 @@ static void liquidio_ptp_init(struct octeon_device *oct)
}
/**
- * \brief Load firmware to device
- * @param oct octeon device
+ * load_firmware - Load firmware to device
+ * @oct: octeon device
*
* Maps device to firmware filename, requests firmware, and downloads it
*/
@@ -1721,8 +1718,8 @@ static int load_firmware(struct octeon_device *oct)
}
/**
- * \brief Poll routine for checking transmit queue status
- * @param work work_struct data structure
+ * octnet_poll_check_txq_status - Poll routine for checking transmit queue status
+ * @work: work_struct data structure
*/
static void octnet_poll_check_txq_status(struct work_struct *work)
{
@@ -1738,8 +1735,8 @@ static void octnet_poll_check_txq_status(struct work_struct *work)
}
/**
- * \brief Sets up the txq poll check
- * @param netdev network device
+ * setup_tx_poll_fn - Sets up the txq poll check
+ * @netdev: network device
*/
static inline int setup_tx_poll_fn(struct net_device *netdev)
{
@@ -1771,8 +1768,8 @@ static inline void cleanup_tx_poll_fn(struct net_device *netdev)
}
/**
- * \brief Net device open for LiquidIO
- * @param netdev network device
+ * liquidio_open - Net device open for LiquidIO
+ * @netdev: network device
*/
static int liquidio_open(struct net_device *netdev)
{
@@ -1831,8 +1828,8 @@ static int liquidio_open(struct net_device *netdev)
}
/**
- * \brief Net device stop for LiquidIO
- * @param netdev network device
+ * liquidio_stop - Net device stop for LiquidIO
+ * @netdev: network device
*/
static int liquidio_stop(struct net_device *netdev)
{
@@ -1896,8 +1893,8 @@ static int liquidio_stop(struct net_device *netdev)
}
/**
- * \brief Converts a mask based on net device flags
- * @param netdev network device
+ * get_new_flags - Converts a mask based on net device flags
+ * @netdev: network device
*
* This routine generates a octnet_ifflags mask from the net device flags
* received from the OS.
@@ -1929,8 +1926,8 @@ static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
}
/**
- * \brief Net device set_multicast_list
- * @param netdev network device
+ * liquidio_set_mcast_list - Net device set_multicast_list
+ * @netdev: network device
*/
static void liquidio_set_mcast_list(struct net_device *netdev)
{
@@ -1977,8 +1974,9 @@ static void liquidio_set_mcast_list(struct net_device *netdev)
}
/**
- * \brief Net device set_mac_address
- * @param netdev network device
+ * liquidio_set_mac - Net device set_mac_address
+ * @netdev: network device
+ * @p: pointer to sockaddr
*/
static int liquidio_set_mac(struct net_device *netdev, void *p)
{
@@ -2096,10 +2094,9 @@ liquidio_get_stats64(struct net_device *netdev,
}
/**
- * \brief Handler for SIOCSHWTSTAMP ioctl
- * @param netdev network device
- * @param ifr interface request
- * @param cmd command
+ * hwtstamp_ioctl - Handler for SIOCSHWTSTAMP ioctl
+ * @netdev: network device
+ * @ifr: interface request
*/
static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
{
@@ -2154,10 +2151,10 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
}
/**
- * \brief ioctl handler
- * @param netdev network device
- * @param ifr interface request
- * @param cmd command
+ * liquidio_ioctl - ioctl handler
+ * @netdev: network device
+ * @ifr: interface request
+ * @cmd: command
*/
static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
@@ -2174,9 +2171,10 @@ static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
}
/**
- * \brief handle a Tx timestamp response
- * @param status response status
- * @param buf pointer to skb
+ * handle_timestamp - handle a Tx timestamp response
+ * @oct: octeon device
+ * @status: response status
+ * @buf: pointer to skb
*/
static void handle_timestamp(struct octeon_device *oct,
u32 status,
@@ -2217,10 +2215,12 @@ static void handle_timestamp(struct octeon_device *oct,
tx_buffer_free(skb);
}
-/* \brief Send a data packet that will be timestamped
- * @param oct octeon device
- * @param ndata pointer to network data
- * @param finfo pointer to private network data
+/**
+ * send_nic_timestamp_pkt - Send a data packet that will be timestamped
+ * @oct: octeon device
+ * @ndata: pointer to network data
+ * @finfo: pointer to private network data
+ * @xmit_more: more is coming
*/
static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
struct octnic_data_pkt *ndata,
@@ -2276,10 +2276,12 @@ static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
return retval;
}
-/** \brief Transmit networks packets to the Octeon interface
- * @param skbuff skbuff struct to be passed to network layer.
- * @param netdev pointer to network device
- * @returns whether the packet was transmitted to the device okay or not
+/**
+ * liquidio_xmit - Transmit networks packets to the Octeon interface
+ * @skb: skbuff struct to be passed to network layer.
+ * @netdev: pointer to network device
+ *
+ * Return: whether the packet was transmitted to the device okay or not
* (NETDEV_TX_OK or NETDEV_TX_BUSY)
*/
static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
@@ -2524,8 +2526,10 @@ lio_xmit_failed:
return NETDEV_TX_OK;
}
-/** \brief Network device Tx timeout
- * @param netdev pointer to network device
+/**
+ * liquidio_tx_timeout - Network device Tx timeout
+ * @netdev: pointer to network device
+ * @txqueue: index of the hung transmit queue
*/
static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
@@ -2597,12 +2601,12 @@ static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
return ret;
}
-/** Sending command to enable/disable RX checksum offload
- * @param netdev pointer to network device
- * @param command OCTNET_CMD_TNL_RX_CSUM_CTL
- * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/
- * OCTNET_CMD_RXCSUM_DISABLE
- * @returns SUCCESS or FAILURE
+/**
+ * liquidio_set_rxcsum_command - Sending command to enable/disable RX checksum offload
+ * @netdev: pointer to network device
+ * @command: OCTNET_CMD_TNL_RX_CSUM_CTL
+ * @rx_cmd: OCTNET_CMD_RXCSUM_ENABLE/OCTNET_CMD_RXCSUM_DISABLE
+ * Returns: SUCCESS or FAILURE
*/
static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
u8 rx_cmd)
@@ -2632,13 +2636,14 @@ static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
return ret;
}
-/** Sending command to add/delete VxLAN UDP port to firmware
- * @param netdev pointer to network device
- * @param command OCTNET_CMD_VXLAN_PORT_CONFIG
- * @param vxlan_port VxLAN port to be added or deleted
- * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD,
+/**
+ * liquidio_vxlan_port_command - Sending command to add/delete VxLAN UDP port to firmware
+ * @netdev: pointer to network device
+ * @command: OCTNET_CMD_VXLAN_PORT_CONFIG
+ * @vxlan_port: VxLAN port to be added or deleted
+ * @vxlan_cmd_bit: OCTNET_CMD_VXLAN_PORT_ADD,
* OCTNET_CMD_VXLAN_PORT_DEL
- * @returns SUCCESS or FAILURE
+ * Return: SUCCESS or FAILURE
*/
static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
u16 vxlan_port, u8 vxlan_cmd_bit)
@@ -2698,10 +2703,11 @@ static const struct udp_tunnel_nic_info liquidio_udp_tunnels = {
},
};
-/** \brief Net device fix features
- * @param netdev pointer to network device
- * @param request features requested
- * @returns updated features list
+/**
+ * liquidio_fix_features - Net device fix features
+ * @netdev: pointer to network device
+ * @request: features requested
+ * Return: updated features list
*/
static netdev_features_t liquidio_fix_features(struct net_device *netdev,
netdev_features_t request)
@@ -2737,9 +2743,10 @@ static netdev_features_t liquidio_fix_features(struct net_device *netdev,
return request;
}
-/** \brief Net device set features
- * @param netdev pointer to network device
- * @param features features to enable/disable
+/**
+ * liquidio_set_features - Net device set features
+ * @netdev: pointer to network device
+ * @features: features to enable/disable
*/
static int liquidio_set_features(struct net_device *netdev,
netdev_features_t features)
@@ -3224,7 +3231,8 @@ static const struct net_device_ops lionetdevops = {
.ndo_get_port_parent_id = liquidio_get_port_parent_id,
};
-/** \brief Entry point for the liquidio module
+/**
+ * liquidio_init - Entry point for the liquidio module
*/
static int __init liquidio_init(void)
{
@@ -3307,8 +3315,8 @@ nic_info_err:
}
/**
- * \brief Setup network interfaces
- * @param octeon_dev octeon device
+ * setup_nic_devices - Setup network interfaces
+ * @octeon_dev: octeon device
*
* Called during init time for each device. It assumes the NIC
* is already up and running. The link information for each
@@ -3872,8 +3880,8 @@ static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs)
#endif
/**
- * \brief initialize the NIC
- * @param oct octeon device
+ * liquidio_init_nic_module - initialize the NIC
+ * @oct: octeon device
*
* This initialization routine is called once the Octeon device application is
* up and running
@@ -3928,9 +3936,10 @@ octnet_init_failure:
}
/**
- * \brief starter callback that invokes the remaining initialization work after
- * the NIC is up and running.
- * @param octptr work struct work_struct
+ * nic_starter - finish init
+ * @work: work struct work_struct
+ *
+ * starter callback that invokes the remaining initialization work after the NIC is up and running.
*/
static void nic_starter(struct work_struct *work)
{
@@ -4023,8 +4032,8 @@ octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
}
/**
- * \brief Device initialization for each Octeon device that is probed
- * @param octeon_dev octeon device
+ * octeon_device_init - Device initialization for each Octeon device that is probed
+ * @octeon_dev: octeon device
*/
static int octeon_device_init(struct octeon_device *octeon_dev)
{
@@ -4193,8 +4202,7 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
/* Initialize the tasklet that handles output queue packet processing.*/
dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
- tasklet_init(&oct_priv->droq_tasklet, octeon_droq_bh,
- (unsigned long)octeon_dev);
+ tasklet_setup(&oct_priv->droq_tasklet, octeon_droq_bh);
/* Setup the interrupt handler and record the INT SUM register address
*/
@@ -4298,16 +4306,17 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
complete(&handshake[octeon_dev->octeon_id].init);
atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
+ oct_priv->dev = octeon_dev;
return 0;
}
/**
- * \brief Debug console print function
- * @param octeon_dev octeon device
- * @param console_num console number
- * @param prefix first portion of line to display
- * @param suffix second portion of line to display
+ * octeon_dbg_console_print - Debug console print function
+ * @oct: octeon device
+ * @console_num: console number
+ * @prefix: first portion of line to display
+ * @suffix: second portion of line to display
*
* The OCTEON debug console outputs entire lines (excluding '\n').
* Normally, the line will be passed in the 'prefix' parameter.
@@ -4330,7 +4339,7 @@ static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
}
/**
- * \brief Exits the module
+ * liquidio_exit - Exits the module
*/
static void __exit liquidio_exit(void)
{
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 8c5879e31240..103440f97bc8 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -99,8 +99,8 @@ static int lio_wait_for_oq_pkts(struct octeon_device *oct)
}
/**
- * \brief Cause device to go quiet so it can be safely removed/reset/etc
- * @param oct Pointer to Octeon device
+ * pcierror_quiesce_device - Cause device to go quiet so it can be safely removed/reset/etc
+ * @oct: Pointer to Octeon device
*/
static void pcierror_quiesce_device(struct octeon_device *oct)
{
@@ -143,8 +143,8 @@ static void pcierror_quiesce_device(struct octeon_device *oct)
}
/**
- * \brief Cleanup PCI AER uncorrectable error status
- * @param dev Pointer to PCI device
+ * cleanup_aer_uncorrect_error_status - Cleanup PCI AER uncorrectable error status
+ * @dev: Pointer to PCI device
*/
static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
{
@@ -163,8 +163,8 @@ static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
}
/**
- * \brief Stop all PCI IO to a given device
- * @param dev Pointer to Octeon device
+ * stop_pci_io - Stop all PCI IO to a given device
+ * @oct: Pointer to Octeon device
*/
static void stop_pci_io(struct octeon_device *oct)
{
@@ -205,9 +205,9 @@ static void stop_pci_io(struct octeon_device *oct)
}
/**
- * \brief called when PCI error is detected
- * @param pdev Pointer to PCI device
- * @param state The current pci connection state
+ * liquidio_pcie_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
*
* This function is called after a PCI bus error affecting
* this device has been detected.
@@ -256,8 +256,8 @@ static struct pci_driver liquidio_vf_pci_driver = {
};
/**
- * \brief Print link information
- * @param netdev network device
+ * print_link_info - Print link information
+ * @netdev: network device
*/
static void print_link_info(struct net_device *netdev)
{
@@ -278,8 +278,8 @@ static void print_link_info(struct net_device *netdev)
}
/**
- * \brief Routine to notify MTU change
- * @param work work_struct data structure
+ * octnet_link_status_change - Routine to notify MTU change
+ * @work: work_struct data structure
*/
static void octnet_link_status_change(struct work_struct *work)
{
@@ -296,8 +296,8 @@ static void octnet_link_status_change(struct work_struct *work)
}
/**
- * \brief Sets up the mtu status change work
- * @param netdev network device
+ * setup_link_status_change_wq - Sets up the mtu status change work
+ * @netdev: network device
*/
static int setup_link_status_change_wq(struct net_device *netdev)
{
@@ -328,9 +328,9 @@ static void cleanup_link_status_change_wq(struct net_device *netdev)
}
/**
- * \brief Update link status
- * @param netdev network device
- * @param ls link status structure
+ * update_link_status - Update link status
+ * @netdev: network device
+ * @ls: link status structure
*
* Called on receipt of a link status response from the core application to
* update each interface's link status.
@@ -374,13 +374,13 @@ static void update_link_status(struct net_device *netdev,
}
/**
- * \brief PCI probe handler
- * @param pdev PCI device structure
- * @param ent unused
+ * liquidio_vf_probe - PCI probe handler
+ * @pdev: PCI device structure
+ * @ent: unused
*/
static int
liquidio_vf_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent __attribute__((unused)))
+ const struct pci_device_id __maybe_unused *ent)
{
struct octeon_device *oct_dev = NULL;
@@ -416,8 +416,8 @@ liquidio_vf_probe(struct pci_dev *pdev,
}
/**
- * \brief PCI FLR for each Octeon device.
- * @param oct octeon device
+ * octeon_pci_flr - PCI FLR for each Octeon device.
+ * @oct: octeon device
*/
static void octeon_pci_flr(struct octeon_device *oct)
{
@@ -437,9 +437,8 @@ static void octeon_pci_flr(struct octeon_device *oct)
}
/**
- *\brief Destroy resources associated with octeon device
- * @param pdev PCI device structure
- * @param ent unused
+ * octeon_destroy_resources - Destroy resources associated with octeon device
+ * @oct: octeon device
*/
static void octeon_destroy_resources(struct octeon_device *oct)
{
@@ -592,9 +591,9 @@ static void octeon_destroy_resources(struct octeon_device *oct)
}
/**
- * \brief Send Rx control command
- * @param lio per-network private data
- * @param start_stop whether to start or stop
+ * send_rx_ctrl_cmd - Send Rx control command
+ * @lio: per-network private data
+ * @start_stop: whether to start or stop
*/
static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
{
@@ -644,9 +643,9 @@ static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
}
/**
- * \brief Destroy NIC device interface
- * @param oct octeon device
- * @param ifidx which interface to destroy
+ * liquidio_destroy_nic_device - Destroy NIC device interface
+ * @oct: octeon device
+ * @ifidx: which interface to destroy
*
* Cleanup associated with each interface for an Octeon device when NIC
* module is being unloaded or if initialization fails during load.
@@ -704,8 +703,8 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
}
/**
- * \brief Stop complete NIC functionality
- * @param oct octeon device
+ * liquidio_stop_nic_module - Stop complete NIC functionality
+ * @oct: octeon device
*/
static int liquidio_stop_nic_module(struct octeon_device *oct)
{
@@ -737,8 +736,8 @@ static int liquidio_stop_nic_module(struct octeon_device *oct)
}
/**
- * \brief Cleans up resources at unload time
- * @param pdev PCI device structure
+ * liquidio_vf_remove - Cleans up resources at unload time
+ * @pdev: PCI device structure
*/
static void liquidio_vf_remove(struct pci_dev *pdev)
{
@@ -763,8 +762,8 @@ static void liquidio_vf_remove(struct pci_dev *pdev)
}
/**
- * \brief PCI initialization for each Octeon device.
- * @param oct octeon device
+ * octeon_pci_os_setup - PCI initialization for each Octeon device.
+ * @oct: octeon device
*/
static int octeon_pci_os_setup(struct octeon_device *oct)
{
@@ -792,8 +791,8 @@ static int octeon_pci_os_setup(struct octeon_device *oct)
}
/**
- * \brief Unmap and free network buffer
- * @param buf buffer
+ * free_netbuf - Unmap and free network buffer
+ * @buf: buffer
*/
static void free_netbuf(void *buf)
{
@@ -812,8 +811,8 @@ static void free_netbuf(void *buf)
}
/**
- * \brief Unmap and free gather buffer
- * @param buf buffer
+ * free_netsgbuf - Unmap and free gather buffer
+ * @buf: buffer
*/
static void free_netsgbuf(void *buf)
{
@@ -853,8 +852,8 @@ static void free_netsgbuf(void *buf)
}
/**
- * \brief Unmap and free gather buffer with response
- * @param buf buffer
+ * free_netsgbuf_with_resp - Unmap and free gather buffer with response
+ * @buf: buffer
*/
static void free_netsgbuf_with_resp(void *buf)
{
@@ -897,8 +896,8 @@ static void free_netsgbuf_with_resp(void *buf)
}
/**
- * \brief Net device open for LiquidIO
- * @param netdev network device
+ * liquidio_open - Net device open for LiquidIO
+ * @netdev: network device
*/
static int liquidio_open(struct net_device *netdev)
{
@@ -941,8 +940,8 @@ static int liquidio_open(struct net_device *netdev)
}
/**
- * \brief Net device stop for LiquidIO
- * @param netdev network device
+ * liquidio_stop - jNet device stop for LiquidIO
+ * @netdev: network device
*/
static int liquidio_stop(struct net_device *netdev)
{
@@ -991,8 +990,8 @@ static int liquidio_stop(struct net_device *netdev)
}
/**
- * \brief Converts a mask based on net device flags
- * @param netdev network device
+ * get_new_flags - Converts a mask based on net device flags
+ * @netdev: network device
*
* This routine generates a octnet_ifflags mask from the net device flags
* received from the OS.
@@ -1060,8 +1059,8 @@ static void liquidio_set_uc_list(struct net_device *netdev)
}
/**
- * \brief Net device set_multicast_list
- * @param netdev network device
+ * liquidio_set_mcast_list - Net device set_multicast_list
+ * @netdev: network device
*/
static void liquidio_set_mcast_list(struct net_device *netdev)
{
@@ -1110,8 +1109,9 @@ static void liquidio_set_mcast_list(struct net_device *netdev)
}
/**
- * \brief Net device set_mac_address
- * @param netdev network device
+ * liquidio_set_mac - Net device set_mac_address
+ * @netdev: network device
+ * @p: opaque pointer to sockaddr
*/
static int liquidio_set_mac(struct net_device *netdev, void *p)
{
@@ -1229,10 +1229,9 @@ liquidio_get_stats64(struct net_device *netdev,
}
/**
- * \brief Handler for SIOCSHWTSTAMP ioctl
- * @param netdev network device
- * @param ifr interface request
- * @param cmd command
+ * hwtstamp_ioctl - Handler for SIOCSHWTSTAMP ioctl
+ * @netdev: network device
+ * @ifr: interface request
*/
static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
{
@@ -1287,10 +1286,10 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
}
/**
- * \brief ioctl handler
- * @param netdev network device
- * @param ifr interface request
- * @param cmd command
+ * liquidio_ioctl - ioctl handler
+ * @netdev: network device
+ * @ifr: interface request
+ * @cmd: command
*/
static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
@@ -1339,10 +1338,10 @@ static void handle_timestamp(struct octeon_device *oct, u32 status, void *buf)
tx_buffer_free(skb);
}
-/* \brief Send a data packet that will be timestamped
- * @param oct octeon device
- * @param ndata pointer to network data
- * @param finfo pointer to private network data
+/* send_nic_timestamp_pkt - Send a data packet that will be timestamped
+ * @oct: octeon device
+ * @ndata: pointer to network data
+ * @finfo: pointer to private network data
*/
static int send_nic_timestamp_pkt(struct octeon_device *oct,
struct octnic_data_pkt *ndata,
@@ -1393,9 +1392,10 @@ static int send_nic_timestamp_pkt(struct octeon_device *oct,
return retval;
}
-/** \brief Transmit networks packets to the Octeon interface
- * @param skbuff skbuff struct to be passed to network layer.
- * @param netdev pointer to network device
+/**
+ * liquidio_xmit - Transmit networks packets to the Octeon interface
+ * @skb: skbuff struct to be passed to network layer.
+ * @netdev: pointer to network device
* @returns whether the packet was transmitted to the device okay or not
* (NETDEV_TX_OK or NETDEV_TX_BUSY)
*/
@@ -1623,8 +1623,10 @@ lio_xmit_failed:
return NETDEV_TX_OK;
}
-/** \brief Network device Tx timeout
- * @param netdev pointer to network device
+/**
+ * liquidio_tx_timeout - Network device Tx timeout
+ * @netdev: pointer to network device
+ * @txqueue: index of the hung transmit queue
*/
static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
@@ -1917,8 +1919,8 @@ nic_info_err:
}
/**
- * \brief Setup network interfaces
- * @param octeon_dev octeon device
+ * setup_nic_devices - Setup network interfaces
+ * @octeon_dev: octeon device
*
* Called during init time for each device. It assumes the NIC
* is already up and running. The link information for each
@@ -2229,8 +2231,8 @@ setup_nic_dev_done:
}
/**
- * \brief initialize the NIC
- * @param oct octeon device
+ * liquidio_init_nic_module - initialize the NIC
+ * @oct: octeon device
*
* This initialization routine is called once the Octeon device application is
* up and running
@@ -2270,8 +2272,8 @@ octnet_init_failure:
}
/**
- * \brief Device initialization for each Octeon device that is probed
- * @param octeon_dev octeon device
+ * octeon_device_init - Device initialization for each Octeon device that is probed
+ * @oct: octeon device
*/
static int octeon_device_init(struct octeon_device *oct)
{
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
index 0d2831d10f65..28feabec8fbb 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
@@ -15,7 +15,7 @@
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more details.
***********************************************************************/
-/**
+/*
* @file octeon_console.c
*/
#include <linux/moduleparam.h>
@@ -131,7 +131,7 @@ struct octeon_pci_console_desc {
/* Implicit storage for console_addr_array */
};
-/**
+/*
* This function is the implementation of the get macros defined
* for individual structure members. The argument are generated
* by the macros inorder to read only the needed memory.
@@ -160,7 +160,7 @@ static inline u64 __cvmx_bootmem_desc_get(struct octeon_device *oct,
}
}
-/**
+/*
* This function retrieves the string name of a named block. It is
* more complicated than a simple memcpy() since the named block
* descriptor may not be directly accessible.
@@ -182,7 +182,7 @@ static void CVMX_BOOTMEM_NAMED_GET_NAME(struct octeon_device *oct,
/* See header file for descriptions of functions */
-/**
+/*
* Check the version information on the bootmem descriptor
*
* @param exact_match
@@ -323,7 +323,7 @@ static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct,
return result;
}
-/**
+/*
* Find a named block on the remote Octeon
*
* @param name Name of block to find
@@ -707,7 +707,7 @@ int octeon_add_console(struct octeon_device *oct, u32 console_num,
return ret;
}
-/**
+/*
* Removes all consoles
*
* @param oct octeon device
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index ac32facaa427..387a57cbfb73 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -1307,7 +1307,7 @@ struct octeon_config *octeon_get_conf(struct octeon_device *oct)
/* scratch register address is same in all the OCT-II and CN70XX models */
#define CNXX_SLI_SCRATCH1 0x3C0
-/** Get the octeon device pointer.
+/* Get the octeon device pointer.
* @param octeon_id - The id for which the octeon device pointer is required.
* @return Success: Octeon device pointer.
* @return Failure: NULL.
@@ -1324,7 +1324,7 @@ u64 lio_pci_readq(struct octeon_device *oct, u64 addr)
{
u64 val64;
unsigned long flags;
- u32 val32, addrhi;
+ u32 addrhi;
spin_lock_irqsave(&oct->pci_win_lock, flags);
@@ -1339,10 +1339,10 @@ u64 lio_pci_readq(struct octeon_device *oct, u64 addr)
writel(addrhi, oct->reg_list.pci_win_rd_addr_hi);
/* Read back to preserve ordering of writes */
- val32 = readl(oct->reg_list.pci_win_rd_addr_hi);
+ readl(oct->reg_list.pci_win_rd_addr_hi);
writel(addr & 0xffffffff, oct->reg_list.pci_win_rd_addr_lo);
- val32 = readl(oct->reg_list.pci_win_rd_addr_lo);
+ readl(oct->reg_list.pci_win_rd_addr_lo);
val64 = readq(oct->reg_list.pci_win_rd_data);
@@ -1355,7 +1355,6 @@ void lio_pci_writeq(struct octeon_device *oct,
u64 val,
u64 addr)
{
- u32 val32;
unsigned long flags;
spin_lock_irqsave(&oct->pci_win_lock, flags);
@@ -1365,7 +1364,7 @@ void lio_pci_writeq(struct octeon_device *oct,
/* The write happens when the LSB is written. So write MSB first. */
writel(val >> 32, oct->reg_list.pci_win_wr_data_hi);
/* Read the MSB to ensure ordering of writes. */
- val32 = readl(oct->reg_list.pci_win_wr_data_hi);
+ readl(oct->reg_list.pci_win_wr_data_hi);
writel(val & 0xffffffff, oct->reg_list.pci_win_wr_data_lo);
@@ -1411,7 +1410,7 @@ int octeon_wait_for_ddr_init(struct octeon_device *oct, u32 *timeout)
return ret;
}
-/** Get the octeon id assigned to the octeon device passed as argument.
+/* Get the octeon id assigned to the octeon device passed as argument.
* This function is exported to other modules.
* @param dev - octeon device pointer passed as a void *.
* @return octeon device id
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index 017169023cca..d4080bddcb6b 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -280,13 +280,10 @@ int octeon_init_droq(struct octeon_device *oct,
dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no,
droq->max_count);
- droq->recv_buf_list = (struct octeon_recv_buffer *)
- vzalloc_node(array_size(droq->max_count, OCT_DROQ_RECVBUF_SIZE),
- numa_node);
+ droq->recv_buf_list = vzalloc_node(array_size(droq->max_count, OCT_DROQ_RECVBUF_SIZE),
+ numa_node);
if (!droq->recv_buf_list)
- droq->recv_buf_list = (struct octeon_recv_buffer *)
- vzalloc(array_size(droq->max_count,
- OCT_DROQ_RECVBUF_SIZE));
+ droq->recv_buf_list = vzalloc(array_size(droq->max_count, OCT_DROQ_RECVBUF_SIZE));
if (!droq->recv_buf_list) {
dev_err(&oct->pci_dev->dev, "Output queue recv buf list alloc failed\n");
goto init_droq_fail;
@@ -777,7 +774,7 @@ octeon_droq_process_packets(struct octeon_device *oct,
return 0;
}
-/**
+/*
* Utility function to poll for packets. check_hw_for_packets must be
* called before calling this routine.
*/
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
index 614d07be7181..ad685f5d0a13 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
@@ -28,7 +28,7 @@
/**
* octeon_mbox_read:
- * @oct: Pointer mailbox
+ * @mbox: Pointer mailbox
*
* Reads the 8-bytes of data from the mbox register
* Writes back the acknowldgement inidcating completion of read
@@ -285,7 +285,8 @@ static int octeon_mbox_process_cmd(struct octeon_mbox *mbox,
}
/**
- *octeon_mbox_process_message:
+ * octeon_mbox_process_message
+ * @mbox: mailbox
*
* Process the received mbox message.
*/
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
index 073d0647b439..5b4cb725f60f 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
@@ -39,6 +39,7 @@ struct octeon_device_priv {
/** Tasklet structures for this device. */
struct tasklet_struct droq_tasklet;
unsigned long napi_mask;
+ struct octeon_device *dev;
};
/** This structure is used by NIC driver to store information required
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
index 4c85ae643b7b..7ccab36143c1 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
@@ -22,6 +22,7 @@
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
+#include "octeon_mem_ops.h"
#define MEMOPS_IDX BAR1_INDEX_DYNAMIC_MAP
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 6cb2162a75d4..5e50bb19bf26 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -315,9 +315,9 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
netif_wake_queue(p->netdev);
}
-static void octeon_mgmt_clean_tx_tasklet(unsigned long arg)
+static void octeon_mgmt_clean_tx_tasklet(struct tasklet_struct *t)
{
- struct octeon_mgmt *p = (struct octeon_mgmt *)arg;
+ struct octeon_mgmt *p = from_tasklet(p, t, tx_clean_tasklet);
octeon_mgmt_clean_tx_buffers(p);
octeon_mgmt_enable_tx_irq(p);
}
@@ -1491,8 +1491,8 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
skb_queue_head_init(&p->tx_list);
skb_queue_head_init(&p->rx_list);
- tasklet_init(&p->tx_clean_tasklet,
- octeon_mgmt_clean_tx_tasklet, (unsigned long)p);
+ tasklet_setup(&p->tx_clean_tasklet,
+ octeon_mgmt_clean_tx_tasklet);
netdev->priv_flags |= IFF_UNICAST_FLT;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 063e560d9c1b..f3b7b443f964 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -985,9 +985,9 @@ static int nicvf_poll(struct napi_struct *napi, int budget)
*
* As of now only CQ errors are handled
*/
-static void nicvf_handle_qs_err(unsigned long data)
+static void nicvf_handle_qs_err(struct tasklet_struct *t)
{
- struct nicvf *nic = (struct nicvf *)data;
+ struct nicvf *nic = from_tasklet(nic, t, qs_err_task);
struct queue_set *qs = nic->qs;
int qidx;
u64 status;
@@ -1493,12 +1493,10 @@ int nicvf_open(struct net_device *netdev)
}
/* Init tasklet for handling Qset err interrupt */
- tasklet_init(&nic->qs_err_task, nicvf_handle_qs_err,
- (unsigned long)nic);
+ tasklet_setup(&nic->qs_err_task, nicvf_handle_qs_err);
/* Init RBDR tasklet which will refill RBDR */
- tasklet_init(&nic->rbdr_task, nicvf_rbdr_task,
- (unsigned long)nic);
+ tasklet_setup(&nic->rbdr_task, nicvf_rbdr_task);
INIT_DELAYED_WORK(&nic->rbdr_work, nicvf_rbdr_work);
/* Configure CPI alorithm */
@@ -2067,8 +2065,8 @@ static void nicvf_set_rx_mode(struct net_device *netdev)
mode |= BGX_XCAST_MCAST_FILTER;
/* here we need to copy mc addrs */
if (netdev_mc_count(netdev)) {
- mc_list = kmalloc(offsetof(typeof(*mc_list),
- mc[netdev_mc_count(netdev)]),
+ mc_list = kmalloc(struct_size(mc_list, mc,
+ netdev_mc_count(netdev)),
GFP_ATOMIC);
if (unlikely(!mc_list))
return;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index a45223f0cca5..7a141ce32e86 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -460,9 +460,9 @@ void nicvf_rbdr_work(struct work_struct *work)
}
/* In Softirq context, alloc rcv buffers in atomic mode */
-void nicvf_rbdr_task(unsigned long data)
+void nicvf_rbdr_task(struct tasklet_struct *t)
{
- struct nicvf *nic = (struct nicvf *)data;
+ struct nicvf *nic = from_tasklet(nic, t, rbdr_task);
nicvf_refill_rbdr(nic, GFP_ATOMIC);
if (nic->rb_alloc_fail) {
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 2460451fc48f..8453defc296c 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -348,7 +348,7 @@ void nicvf_xdp_sq_doorbell(struct nicvf *nic, struct snd_queue *sq, int sq_num);
struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic,
struct cqe_rx_t *cqe_rx, bool xdp);
-void nicvf_rbdr_task(unsigned long data);
+void nicvf_rbdr_task(struct tasklet_struct *t);
void nicvf_rbdr_work(struct work_struct *work);
void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx);
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index f6f3ef9a93cf..87cc0ef68b31 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -134,4 +134,6 @@ config CHELSIO_LIB
help
Common library for Chelsio drivers.
+source "drivers/net/ethernet/chelsio/inline_crypto/Kconfig"
+
endif # NET_VENDOR_CHELSIO
diff --git a/drivers/net/ethernet/chelsio/Makefile b/drivers/net/ethernet/chelsio/Makefile
index c0f978d2e8a7..1a6fd8b2bb7d 100644
--- a/drivers/net/ethernet/chelsio/Makefile
+++ b/drivers/net/ethernet/chelsio/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_CHELSIO_T3) += cxgb3/
obj-$(CONFIG_CHELSIO_T4) += cxgb4/
obj-$(CONFIG_CHELSIO_T4VF) += cxgb4vf/
obj-$(CONFIG_CHELSIO_LIB) += libcxgb/
+obj-$(CONFIG_CHELSIO_INLINE_CRYPTO) += inline_crypto/
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 99736796e1a0..0e4a0f413960 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -997,17 +997,17 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_disable_pdev;
}
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
pci_using_dac = 1;
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
- pr_err("%s: unable to obtain 64-bit DMA for "
- "consistent allocations\n", pci_name(pdev));
+ if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ pr_err("%s: unable to obtain 64-bit DMA for coherent allocations\n",
+ pci_name(pdev));
err = -ENODEV;
goto out_disable_pdev;
}
- } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
+ } else if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) {
pr_err("%s: no usable DMA configuration\n", pci_name(pdev));
goto out_disable_pdev;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 47b5c8e2104b..2d9c2b5a690a 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -239,8 +239,10 @@ struct sched {
unsigned int num; /* num skbs in per port queues */
struct sched_port p[MAX_NPORTS];
struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */
+ struct sge *sge;
};
-static void restart_sched(unsigned long);
+
+static void restart_sched(struct tasklet_struct *t);
/*
@@ -378,7 +380,8 @@ static int tx_sched_init(struct sge *sge)
return -ENOMEM;
pr_debug("tx_sched_init\n");
- tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge);
+ tasklet_setup(&s->sched_tsk, restart_sched);
+ s->sge = sge;
sge->tx_sched = s;
for (i = 0; i < MAX_NPORTS; i++) {
@@ -509,9 +512,8 @@ static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
while (q->credits--) {
struct freelQ_ce *ce = &q->centries[cidx];
- pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
- dma_unmap_len(ce, dma_len),
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pdev->dev, dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len), DMA_FROM_DEVICE);
dev_kfree_skb(ce->skb);
ce->skb = NULL;
if (++cidx == q->size)
@@ -529,8 +531,8 @@ static void free_rx_resources(struct sge *sge)
if (sge->respQ.entries) {
size = sizeof(struct respQ_e) * sge->respQ.size;
- pci_free_consistent(pdev, size, sge->respQ.entries,
- sge->respQ.dma_addr);
+ dma_free_coherent(&pdev->dev, size, sge->respQ.entries,
+ sge->respQ.dma_addr);
}
for (i = 0; i < SGE_FREELQ_N; i++) {
@@ -542,8 +544,8 @@ static void free_rx_resources(struct sge *sge)
}
if (q->entries) {
size = sizeof(struct freelQ_e) * q->size;
- pci_free_consistent(pdev, size, q->entries,
- q->dma_addr);
+ dma_free_coherent(&pdev->dev, size, q->entries,
+ q->dma_addr);
}
}
}
@@ -564,7 +566,8 @@ static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
q->size = p->freelQ_size[i];
q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
size = sizeof(struct freelQ_e) * q->size;
- q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
+ q->entries = dma_alloc_coherent(&pdev->dev, size,
+ &q->dma_addr, GFP_KERNEL);
if (!q->entries)
goto err_no_mem;
@@ -601,7 +604,8 @@ static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
sge->respQ.credits = 0;
size = sizeof(struct respQ_e) * sge->respQ.size;
sge->respQ.entries =
- pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
+ dma_alloc_coherent(&pdev->dev, size, &sge->respQ.dma_addr,
+ GFP_KERNEL);
if (!sge->respQ.entries)
goto err_no_mem;
return 0;
@@ -624,9 +628,10 @@ static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
ce = &q->centries[cidx];
while (n--) {
if (likely(dma_unmap_len(ce, dma_len))) {
- pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
+ dma_unmap_single(&pdev->dev,
+ dma_unmap_addr(ce, dma_addr),
dma_unmap_len(ce, dma_len),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
if (q->sop)
q->sop = 0;
}
@@ -663,8 +668,8 @@ static void free_tx_resources(struct sge *sge)
}
if (q->entries) {
size = sizeof(struct cmdQ_e) * q->size;
- pci_free_consistent(pdev, size, q->entries,
- q->dma_addr);
+ dma_free_coherent(&pdev->dev, size, q->entries,
+ q->dma_addr);
}
}
}
@@ -689,7 +694,8 @@ static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
q->stop_thres = 0;
spin_lock_init(&q->lock);
size = sizeof(struct cmdQ_e) * q->size;
- q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr);
+ q->entries = dma_alloc_coherent(&pdev->dev, size,
+ &q->dma_addr, GFP_KERNEL);
if (!q->entries)
goto err_no_mem;
@@ -837,8 +843,8 @@ static void refill_free_list(struct sge *sge, struct freelQ *q)
break;
skb_reserve(skb, q->dma_offset);
- mapping = pci_map_single(pdev, skb->data, dma_len,
- PCI_DMA_FROMDEVICE);
+ mapping = dma_map_single(&pdev->dev, skb->data, dma_len,
+ DMA_FROM_DEVICE);
skb_reserve(skb, sge->rx_pkt_pad);
ce->skb = skb;
@@ -1049,15 +1055,15 @@ static inline struct sk_buff *get_packet(struct adapter *adapter,
goto use_orig_buf;
skb_put(skb, len);
- pci_dma_sync_single_for_cpu(pdev,
- dma_unmap_addr(ce, dma_addr),
- dma_unmap_len(ce, dma_len),
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&pdev->dev,
+ dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len),
+ DMA_FROM_DEVICE);
skb_copy_from_linear_data(ce->skb, skb->data, len);
- pci_dma_sync_single_for_device(pdev,
- dma_unmap_addr(ce, dma_addr),
- dma_unmap_len(ce, dma_len),
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&pdev->dev,
+ dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len),
+ DMA_FROM_DEVICE);
recycle_fl_buf(fl, fl->cidx);
return skb;
}
@@ -1068,8 +1074,8 @@ use_orig_buf:
return NULL;
}
- pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
- dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pdev->dev, dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len), DMA_FROM_DEVICE);
skb = ce->skb;
prefetch(skb->data);
@@ -1091,8 +1097,9 @@ static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
struct freelQ_ce *ce = &fl->centries[fl->cidx];
struct sk_buff *skb = ce->skb;
- pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr),
- dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&adapter->pdev->dev,
+ dma_unmap_addr(ce, dma_addr),
+ dma_unmap_len(ce, dma_len), DMA_FROM_DEVICE);
pr_err("%s: unexpected offload packet, cmd %u\n",
adapter->name, *skb->data);
recycle_fl_buf(fl, fl->cidx);
@@ -1209,8 +1216,8 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
e = e1 = &q->entries[pidx];
ce = &q->centries[pidx];
- mapping = pci_map_single(adapter->pdev, skb->data,
- skb_headlen(skb), PCI_DMA_TODEVICE);
+ mapping = dma_map_single(&adapter->pdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
desc_mapping = mapping;
desc_len = skb_headlen(skb);
@@ -1301,9 +1308,10 @@ static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
* Called from tasklet. Checks the scheduler for any
* pending skbs that can be sent.
*/
-static void restart_sched(unsigned long arg)
+static void restart_sched(struct tasklet_struct *t)
{
- struct sge *sge = (struct sge *) arg;
+ struct sched *s = from_tasklet(s, t, sched_tsk);
+ struct sge *sge = s->sge;
struct adapter *adapter = sge->adapter;
struct cmdQ *q = &sge->cmdQ[0];
struct sk_buff *skb;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/adapter.h b/drivers/net/ethernet/chelsio/cxgb3/adapter.h
index 087ff0ffb597..f80fbd81b609 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/adapter.h
@@ -313,6 +313,7 @@ void t3_os_link_fault(struct adapter *adapter, int port_id, int state);
void t3_os_link_fault_handler(struct adapter *adapter, int port_id);
void t3_sge_start(struct adapter *adap);
+void t3_sge_stop_dma(struct adapter *adap);
void t3_sge_stop(struct adapter *adap);
void t3_start_sge_timers(struct adapter *adap);
void t3_stop_sge_timers(struct adapter *adap);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/ael1002.c b/drivers/net/ethernet/chelsio/cxgb3/ael1002.c
index dadf11e3dddb..9d591f0ddfc5 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/ael1002.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/ael1002.c
@@ -815,17 +815,12 @@ static const struct cphy_ops ael2020_ops = {
int t3_ael2020_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
const struct mdio_ops *mdio_ops)
{
- int err;
-
cphy_init(phy, adapter, phy_addr, &ael2020_ops, mdio_ops,
SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_FIBRE |
SUPPORTED_IRQ, "10GBASE-R");
msleep(125);
- err = set_phy_regs(phy, ael2020_reset_regs);
- if (err)
- return err;
- return 0;
+ return set_phy_regs(phy, ael2020_reset_regs);
}
/*
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 387c357e1b8e..84ad7261e243 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -148,7 +148,7 @@ struct workqueue_struct *cxgb3_wq;
/**
* link_report - show link status and link speed/duplex
- * @p: the port whose settings are to be reported
+ * @dev: the port whose settings are to be reported
*
* Shows the link status, speed, and duplex of a port.
*/
@@ -304,8 +304,8 @@ void t3_os_link_changed(struct adapter *adapter, int port_id, int link_stat,
/**
* t3_os_phymod_changed - handle PHY module changes
- * @phy: the PHY reporting the module change
- * @mod_type: new module type
+ * @adap: the adapter associated with the link change
+ * @port_id: the port index whose limk status has changed
*
* This is the OS-dependent handler for PHY module changes. It is
* invoked when a PHY module is removed or inserted for any OS-specific
@@ -1200,7 +1200,7 @@ static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
/**
* cxgb_up - enable the adapter
- * @adapter: adapter being enabled
+ * @adap: adapter being enabled
*
* Called when the first port is enabled, this function performs the
* actions necessary to make an adapter operational, such as completing
@@ -2996,7 +2996,7 @@ void t3_fatal_err(struct adapter *adapter)
unsigned int fw_status[4];
if (adapter->flags & FULL_INIT_DONE) {
- t3_sge_stop(adapter);
+ t3_sge_stop_dma(adapter);
t3_write_reg(adapter, A_XGM_TX_CTRL, 0);
t3_write_reg(adapter, A_XGM_RX_CTRL, 0);
t3_write_reg(adapter, XGM_REG(A_XGM_TX_CTRL, 1), 0);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 6dabbf1502c7..e18e9ce27f94 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -372,7 +372,7 @@ static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
/**
* free_rx_bufs - free the Rx buffers on an SGE free list
* @pdev: the PCI device associated with the adapter
- * @rxq: the SGE free list to clean up
+ * @q: the SGE free list to clean up
*
* Release the buffers on an SGE free-buffer Rx queue. HW fetching from
* this queue should be stopped before calling this function.
@@ -493,7 +493,7 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
/**
* refill_fl - refill an SGE free-buffer list
- * @adapter: the adapter
+ * @adap: the adapter
* @q: the free-list to refill
* @n: the number of new buffers to allocate
* @gfp: the gfp flags for allocating new buffers
@@ -568,7 +568,7 @@ static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
/**
* recycle_rx_buf - recycle a receive buffer
- * @adapter: the adapter
+ * @adap: the adapter
* @q: the SGE free list
* @idx: index of buffer to recycle
*
@@ -825,6 +825,7 @@ use_orig_buf:
* get_packet_pg - return the next ingress packet buffer from a free list
* @adap: the adapter that received the packet
* @fl: the SGE free list holding the packet
+ * @q: the queue
* @len: the packet length including any SGE padding
* @drop_thres: # of remaining buffers before we start dropping packets
*
@@ -1173,6 +1174,7 @@ static void write_wr_hdr_sgl(unsigned int ndesc, struct sk_buff *skb,
* @q: the Tx queue
* @ndesc: number of descriptors the packet will occupy
* @compl: the value of the COMPL bit to use
+ * @addr: address
*
* Generate a TX_PKT work request to send the supplied packet.
*/
@@ -1516,14 +1518,14 @@ static int ctrl_xmit(struct adapter *adap, struct sge_txq *q,
/**
* restart_ctrlq - restart a suspended control queue
- * @qs: the queue set cotaining the control queue
+ * @t: pointer to the tasklet associated with this handler
*
* Resumes transmission on a suspended Tx control queue.
*/
-static void restart_ctrlq(unsigned long data)
+static void restart_ctrlq(struct tasklet_struct *t)
{
struct sk_buff *skb;
- struct sge_qset *qs = (struct sge_qset *)data;
+ struct sge_qset *qs = from_tasklet(qs, t, txq[TXQ_CTRL].qresume_tsk);
struct sge_txq *q = &qs->txq[TXQ_CTRL];
spin_lock(&q->lock);
@@ -1622,6 +1624,7 @@ static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
* @pidx: index of the first Tx descriptor to write
* @gen: the generation value to use
* @ndesc: number of descriptors the packet will occupy
+ * @addr: the address
*
* Write an offload work request to send the supplied packet. The packet
* data already carry the work request with most fields populated.
@@ -1733,14 +1736,14 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
/**
* restart_offloadq - restart a suspended offload queue
- * @qs: the queue set cotaining the offload queue
+ * @t: pointer to the tasklet associated with this handler
*
* Resumes transmission on a suspended Tx offload queue.
*/
-static void restart_offloadq(unsigned long data)
+static void restart_offloadq(struct tasklet_struct *t)
{
struct sk_buff *skb;
- struct sge_qset *qs = (struct sge_qset *)data;
+ struct sge_qset *qs = from_tasklet(qs, t, txq[TXQ_OFLD].qresume_tsk);
struct sge_txq *q = &qs->txq[TXQ_OFLD];
const struct port_info *pi = netdev_priv(qs->netdev);
struct adapter *adap = pi->adapter;
@@ -1883,7 +1886,7 @@ static inline void deliver_partial_bundle(struct t3cdev *tdev,
/**
* ofld_poll - NAPI handler for offload packets in interrupt mode
- * @dev: the network device doing the polling
+ * @napi: the network device doing the polling
* @budget: polling budget
*
* The NAPI handler for offload packets when a response queue is serviced
@@ -2007,7 +2010,7 @@ static void restart_tx(struct sge_qset *qs)
/**
* cxgb3_arp_process - process an ARP request probing a private IP address
- * @adapter: the adapter
+ * @pi: the port info
* @skb: the skbuff containing the ARP request
*
* Check if the ARP request is probing the private IP address
@@ -2069,7 +2072,8 @@ static void cxgb3_process_iscsi_prov_pack(struct port_info *pi,
* @adap: the adapter
* @rq: the response queue that received the packet
* @skb: the packet
- * @pad: amount of padding at the start of the buffer
+ * @pad: padding
+ * @lro: large receive offload
*
* Process an ingress ethernet pakcet and deliver it to the stack.
* The padding is 2 if the packet was delivered in an Rx buffer and 0
@@ -2239,7 +2243,7 @@ static inline void handle_rsp_cntrl_info(struct sge_qset *qs, u32 flags)
/**
* check_ring_db - check if we need to ring any doorbells
- * @adapter: the adapter
+ * @adap: the adapter
* @qs: the queue set whose Tx queues are to be examined
* @sleeping: indicates which Tx queue sent GTS
*
@@ -2372,10 +2376,7 @@ no_mem:
if (fl->use_pages) {
void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
- prefetch(addr);
-#if L1_CACHE_BYTES < 128
- prefetch(addr + L1_CACHE_BYTES);
-#endif
+ net_prefetch(addr);
__refill_fl(adap, fl);
if (lro > 0) {
lro_add_page(adap, qs, fl,
@@ -2902,7 +2903,7 @@ void t3_sge_err_intr_handler(struct adapter *adapter)
/**
* sge_timer_tx - perform periodic maintenance of an SGE qset
- * @data: the SGE queue set to maintain
+ * @t: a timer list containing the SGE queue set to maintain
*
* Runs periodically from a timer to perform maintenance of an SGE queue
* set. It performs two tasks:
@@ -2946,7 +2947,7 @@ static void sge_timer_tx(struct timer_list *t)
/**
* sge_timer_rx - perform periodic maintenance of an SGE qset
- * @data: the SGE queue set to maintain
+ * @t: the timer list containing the SGE queue set to maintain
*
* a) Replenishes Rx queues that have run out due to memory shortage.
* Normally new Rx buffers are added when existing ones are consumed but
@@ -3024,7 +3025,7 @@ void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
* @irq_vec_idx: the IRQ vector index for response queue interrupts
* @p: configuration parameters for this queue set
* @ntxq: number of Tx queues for the queue set
- * @netdev: net device associated with this queue set
+ * @dev: net device associated with this queue set
* @netdevq: net device TX queue associated with this queue set
*
* Allocate resources and initialize an SGE queue set. A queue set
@@ -3084,10 +3085,8 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
skb_queue_head_init(&q->txq[i].sendq);
}
- tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq,
- (unsigned long)q);
- tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq,
- (unsigned long)q);
+ tasklet_setup(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq);
+ tasklet_setup(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq);
q->fl[0].gen = q->fl[1].gen = 1;
q->fl[0].size = p->fl_size;
@@ -3271,30 +3270,40 @@ void t3_sge_start(struct adapter *adap)
}
/**
- * t3_sge_stop - disable SGE operation
+ * t3_sge_stop_dma - Disable SGE DMA engine operation
* @adap: the adapter
*
- * Disables the DMA engine. This can be called in emeregencies (e.g.,
- * from error interrupts) or from normal process context. In the latter
- * case it also disables any pending queue restart tasklets. Note that
- * if it is called in interrupt context it cannot disable the restart
- * tasklets as it cannot wait, however the tasklets will have no effect
- * since the doorbells are disabled and the driver will call this again
- * later from process context, at which time the tasklets will be stopped
- * if they are still running.
+ * Can be invoked from interrupt context e.g. error handler.
+ *
+ * Note that this function cannot disable the restart of tasklets as
+ * it cannot wait if called from interrupt context, however the
+ * tasklets will have no effect since the doorbells are disabled. The
+ * driver will call tg3_sge_stop() later from process context, at
+ * which time the tasklets will be stopped if they are still running.
*/
-void t3_sge_stop(struct adapter *adap)
+void t3_sge_stop_dma(struct adapter *adap)
{
t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0);
- if (!in_interrupt()) {
- int i;
+}
- for (i = 0; i < SGE_QSETS; ++i) {
- struct sge_qset *qs = &adap->sge.qs[i];
+/**
+ * t3_sge_stop - disable SGE operation completly
+ * @adap: the adapter
+ *
+ * Called from process context. Disables the DMA engine and any
+ * pending queue restart tasklets.
+ */
+void t3_sge_stop(struct adapter *adap)
+{
+ int i;
- tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
- tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
- }
+ t3_sge_stop_dma(adap);
+
+ for (i = 0; i < SGE_QSETS; ++i) {
+ struct sge_qset *qs = &adap->sge.qs[i];
+
+ tasklet_kill(&qs->txq[TXQ_OFLD].qresume_tsk);
+ tasklet_kill(&qs->txq[TXQ_CTRL].qresume_tsk);
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index 0a9f2c596624..7ff31d1026fb 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -2195,7 +2195,7 @@ static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
/**
* clear_sge_ctxt - completely clear an SGE context
- * @adapter: the adapter
+ * @adap: the adapter
* @id: the context id
* @type: the context type
*
@@ -2484,6 +2484,7 @@ int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
* @adapter: the adapter
* @id: the context id
* @op: the operation to perform
+ * @credits: credit value to write
*
* Perform the selected operation on an SGE completion queue context.
* The caller is responsible for ensuring only one context operation
@@ -2885,7 +2886,7 @@ static void init_cong_ctrl(unsigned short *a, unsigned short *b)
* t3_load_mtus - write the MTU and congestion control HW tables
* @adap: the adapter
* @mtus: the unrestricted values for the MTU table
- * @alphs: the values for the congestion control alpha parameter
+ * @alpha: the values for the congestion control alpha parameter
* @beta: the values for the congestion control beta parameter
* @mtu_cap: the maximum permitted effective MTU
*
@@ -2966,7 +2967,7 @@ static void ulp_config(struct adapter *adap, const struct tp_params *p)
/**
* t3_set_proto_sram - set the contents of the protocol sram
- * @adapter: the adapter
+ * @adap: the adapter
* @data: the protocol image
*
* Write the contents of the protocol SRAM.
@@ -3483,7 +3484,7 @@ static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
/**
* init_link_config - initialize a link's SW state
* @lc: structure holding the link state
- * @ai: information about the current card
+ * @caps: information about the current card
*
* Initializes the SW state maintained for each link, including the link's
* capabilities and default speed/duplex/flow-control/autonegotiation
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 9cb8b229c1b3..3352dad6ca99 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -146,6 +146,11 @@ enum {
CXGB4_ETHTOOL_FLASH_BOOTCFG = 4
};
+enum cxgb4_netdev_tls_ops {
+ CXGB4_TLSDEV_OPS = 1,
+ CXGB4_XFRMDEV_OPS
+};
+
struct cxgb4_bootcfg_data {
__le16 signature;
__u8 reserved[2];
@@ -1196,6 +1201,12 @@ struct adapter {
struct cxgb4_tc_u32_table *tc_u32;
struct chcr_ktls chcr_ktls;
struct chcr_stats_debug chcr_stats;
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
+ struct ch_ktls_stats_debug ch_ktls_stats;
+#endif
+#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
+ struct ch_ipsec_stats_debug ch_ipsec_stats;
+#endif
/* TC flower offload */
bool tc_flower_initialized;
@@ -2100,7 +2111,7 @@ void free_tx_desc(struct adapter *adap, struct sge_txq *q,
void cxgb4_eosw_txq_free_desc(struct adapter *adap, struct sge_eosw_txq *txq,
u32 ndesc);
int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc);
-void cxgb4_ethofld_restart(unsigned long data);
+void cxgb4_ethofld_restart(struct tasklet_struct *t);
int cxgb4_ethofld_rx_handler(struct sge_rspq *q, const __be64 *rsp,
const struct pkt_gl *si);
void free_txq(struct adapter *adap, struct sge_txq *q);
@@ -2169,7 +2180,7 @@ void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q);
void cxgb4_quiesce_rx(struct sge_rspq *q);
int cxgb4_port_mirror_alloc(struct net_device *dev);
void cxgb4_port_mirror_free(struct net_device *dev);
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
int cxgb4_set_ktls_feature(struct adapter *adap, bool enable);
#endif
#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 05f33b7e3677..0273f40b85f7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -3527,6 +3527,10 @@ DEFINE_SHOW_ATTRIBUTE(meminfo);
static int chcr_stats_show(struct seq_file *seq, void *v)
{
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
+ struct ch_ktls_port_stats_debug *ktls_port;
+ int i = 0;
+#endif
struct adapter *adap = seq->private;
seq_puts(seq, "Chelsio Crypto Accelerator Stats \n");
@@ -3542,52 +3546,45 @@ static int chcr_stats_show(struct seq_file *seq, void *v)
atomic_read(&adap->chcr_stats.error));
seq_printf(seq, "Fallback: %10u \n",
atomic_read(&adap->chcr_stats.fallback));
- seq_printf(seq, "IPSec PDU: %10u\n",
- atomic_read(&adap->chcr_stats.ipsec_cnt));
seq_printf(seq, "TLS PDU Tx: %10u\n",
atomic_read(&adap->chcr_stats.tls_pdu_tx));
seq_printf(seq, "TLS PDU Rx: %10u\n",
atomic_read(&adap->chcr_stats.tls_pdu_rx));
seq_printf(seq, "TLS Keys (DDR) Count: %10u\n",
atomic_read(&adap->chcr_stats.tls_key));
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
+#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
+ seq_puts(seq, "\nChelsio Inline IPsec Crypto Accelerator Stats\n");
+ seq_printf(seq, "IPSec PDU: %10u\n",
+ atomic_read(&adap->ch_ipsec_stats.ipsec_cnt));
+#endif
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
seq_puts(seq, "\nChelsio KTLS Crypto Accelerator Stats\n");
seq_printf(seq, "Tx TLS offload refcount: %20u\n",
refcount_read(&adap->chcr_ktls.ktls_refcount));
- seq_printf(seq, "Tx HW offload contexts added: %20llu\n",
- atomic64_read(&adap->chcr_stats.ktls_tx_ctx));
- seq_printf(seq, "Tx connection created: %20llu\n",
- atomic64_read(&adap->chcr_stats.ktls_tx_connection_open));
- seq_printf(seq, "Tx connection failed: %20llu\n",
- atomic64_read(&adap->chcr_stats.ktls_tx_connection_fail));
- seq_printf(seq, "Tx connection closed: %20llu\n",
- atomic64_read(&adap->chcr_stats.ktls_tx_connection_close));
- seq_printf(seq, "Packets passed for encryption : %20llu\n",
- atomic64_read(&adap->chcr_stats.ktls_tx_encrypted_packets));
- seq_printf(seq, "Bytes passed for encryption : %20llu\n",
- atomic64_read(&adap->chcr_stats.ktls_tx_encrypted_bytes));
seq_printf(seq, "Tx records send: %20llu\n",
- atomic64_read(&adap->chcr_stats.ktls_tx_send_records));
+ atomic64_read(&adap->ch_ktls_stats.ktls_tx_send_records));
seq_printf(seq, "Tx partial start of records: %20llu\n",
- atomic64_read(&adap->chcr_stats.ktls_tx_start_pkts));
+ atomic64_read(&adap->ch_ktls_stats.ktls_tx_start_pkts));
seq_printf(seq, "Tx partial middle of records: %20llu\n",
- atomic64_read(&adap->chcr_stats.ktls_tx_middle_pkts));
+ atomic64_read(&adap->ch_ktls_stats.ktls_tx_middle_pkts));
seq_printf(seq, "Tx partial end of record: %20llu\n",
- atomic64_read(&adap->chcr_stats.ktls_tx_end_pkts));
+ atomic64_read(&adap->ch_ktls_stats.ktls_tx_end_pkts));
seq_printf(seq, "Tx complete records: %20llu\n",
- atomic64_read(&adap->chcr_stats.ktls_tx_complete_pkts));
+ atomic64_read(&adap->ch_ktls_stats.ktls_tx_complete_pkts));
seq_printf(seq, "TX trim pkts : %20llu\n",
- atomic64_read(&adap->chcr_stats.ktls_tx_trimmed_pkts));
- seq_printf(seq, "Tx out of order packets: %20llu\n",
- atomic64_read(&adap->chcr_stats.ktls_tx_ooo));
- seq_printf(seq, "Tx drop pkts before HW offload: %20llu\n",
- atomic64_read(&adap->chcr_stats.ktls_tx_skip_no_sync_data));
- seq_printf(seq, "Tx drop not synced packets: %20llu\n",
- atomic64_read(&adap->chcr_stats.ktls_tx_drop_no_sync_data));
- seq_printf(seq, "Tx drop bypass req: %20llu\n",
- atomic64_read(&adap->chcr_stats.ktls_tx_drop_bypass_req));
+ atomic64_read(&adap->ch_ktls_stats.ktls_tx_trimmed_pkts));
+ while (i < MAX_NPORTS) {
+ ktls_port = &adap->ch_ktls_stats.ktls_port[i];
+ seq_printf(seq, "Port %d\n", i);
+ seq_printf(seq, "Tx connection created: %20llu\n",
+ atomic64_read(&ktls_port->ktls_tx_connection_open));
+ seq_printf(seq, "Tx connection failed: %20llu\n",
+ atomic64_read(&ktls_port->ktls_tx_connection_fail));
+ seq_printf(seq, "Tx connection closed: %20llu\n",
+ atomic64_read(&ktls_port->ktls_tx_connection_close));
+ i++;
+ }
#endif
-
return 0;
}
DEFINE_SHOW_ATTRIBUTE(chcr_stats);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 9f3173f86eed..61ea3ec5c3fc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -117,15 +117,7 @@ static const char stats_strings[][ETH_GSTRING_LEN] = {
"vlan_insertions ",
"gro_packets ",
"gro_merged ",
-};
-
-static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
- "db_drop ",
- "db_full ",
- "db_empty ",
- "write_coal_success ",
- "write_coal_fail ",
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
"tx_tls_encrypted_packets",
"tx_tls_encrypted_bytes ",
"tx_tls_ctx ",
@@ -136,6 +128,14 @@ static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
#endif
};
+static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
+ "db_drop ",
+ "db_full ",
+ "db_empty ",
+ "write_coal_success ",
+ "write_coal_fail ",
+};
+
static char loopback_stats_strings[][ETH_GSTRING_LEN] = {
"-------Loopback----------- ",
"octets_ok ",
@@ -257,15 +257,7 @@ struct queue_port_stats {
u64 vlan_ins;
u64 gro_pkts;
u64 gro_merged;
-};
-
-struct adapter_stats {
- u64 db_drop;
- u64 db_full;
- u64 db_empty;
- u64 wc_success;
- u64 wc_fail;
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
u64 tx_tls_encrypted_packets;
u64 tx_tls_encrypted_bytes;
u64 tx_tls_ctx;
@@ -276,12 +268,23 @@ struct adapter_stats {
#endif
};
+struct adapter_stats {
+ u64 db_drop;
+ u64 db_full;
+ u64 db_empty;
+ u64 wc_success;
+ u64 wc_fail;
+};
+
static void collect_sge_port_stats(const struct adapter *adap,
const struct port_info *p,
struct queue_port_stats *s)
{
const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
+ const struct ch_ktls_port_stats_debug *ktls_stats;
+#endif
struct sge_eohw_txq *eohw_tx;
unsigned int i;
@@ -306,6 +309,21 @@ static void collect_sge_port_stats(const struct adapter *adap,
s->vlan_ins += eohw_tx->vlan_ins;
}
}
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
+ ktls_stats = &adap->ch_ktls_stats.ktls_port[p->port_id];
+ s->tx_tls_encrypted_packets =
+ atomic64_read(&ktls_stats->ktls_tx_encrypted_packets);
+ s->tx_tls_encrypted_bytes =
+ atomic64_read(&ktls_stats->ktls_tx_encrypted_bytes);
+ s->tx_tls_ctx = atomic64_read(&ktls_stats->ktls_tx_ctx);
+ s->tx_tls_ooo = atomic64_read(&ktls_stats->ktls_tx_ooo);
+ s->tx_tls_skip_no_sync_data =
+ atomic64_read(&ktls_stats->ktls_tx_skip_no_sync_data);
+ s->tx_tls_drop_no_sync_data =
+ atomic64_read(&ktls_stats->ktls_tx_drop_no_sync_data);
+ s->tx_tls_drop_bypass_req =
+ atomic64_read(&ktls_stats->ktls_tx_drop_bypass_req);
+#endif
}
static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 481498585ead..6ec5f2f26f05 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -604,17 +604,14 @@ int cxgb4_get_free_ftid(struct net_device *dev, u8 family, bool hash_en,
/* If the new rule wants to get inserted into
* HPFILTER region, but its prio is greater
* than the rule with the highest prio in HASH
- * region, then reject the rule.
- */
- if (t->tc_hash_tids_max_prio &&
- tc_prio > t->tc_hash_tids_max_prio)
- break;
-
- /* If there's not enough slots available
- * in HPFILTER region, then move on to
- * normal FILTER region immediately.
+ * region, or if there's not enough slots
+ * available in HPFILTER region, then skip
+ * trying to insert this rule into HPFILTER
+ * region and directly go to the next region.
*/
- if (ftid + n > t->nhpftids) {
+ if ((t->tc_hash_tids_max_prio &&
+ tc_prio > t->tc_hash_tids_max_prio) ||
+ (ftid + n) > t->nhpftids) {
ftid = t->nhpftids;
continue;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index de078a5bf23e..a952fe198eb9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -66,7 +66,7 @@
#include <linux/crash_dump.h>
#include <net/udp_tunnel.h>
#include <net/xfrm.h>
-#if defined(CONFIG_CHELSIO_TLS_DEVICE)
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
#include <net/tls.h>
#endif
@@ -6396,7 +6396,50 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
}
#endif /* CONFIG_PCI_IOV */
-#if defined(CONFIG_CHELSIO_TLS_DEVICE)
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) || IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
+
+static int chcr_offload_state(struct adapter *adap,
+ enum cxgb4_netdev_tls_ops op_val)
+{
+ switch (op_val) {
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
+ case CXGB4_TLSDEV_OPS:
+ if (!adap->uld[CXGB4_ULD_KTLS].handle) {
+ dev_dbg(adap->pdev_dev, "ch_ktls driver is not loaded\n");
+ return -EOPNOTSUPP;
+ }
+ if (!adap->uld[CXGB4_ULD_KTLS].tlsdev_ops) {
+ dev_dbg(adap->pdev_dev,
+ "ch_ktls driver has no registered tlsdev_ops\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+#endif /* CONFIG_CHELSIO_TLS_DEVICE */
+#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
+ case CXGB4_XFRMDEV_OPS:
+ if (!adap->uld[CXGB4_ULD_IPSEC].handle) {
+ dev_dbg(adap->pdev_dev, "chipsec driver is not loaded\n");
+ return -EOPNOTSUPP;
+ }
+ if (!adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops) {
+ dev_dbg(adap->pdev_dev,
+ "chipsec driver has no registered xfrmdev_ops\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+ default:
+ dev_dbg(adap->pdev_dev,
+ "driver has no support for offload %d\n", op_val);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+#endif /* CONFIG_CHELSIO_TLS_DEVICE || CONFIG_CHELSIO_IPSEC_INLINE */
+
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
static int cxgb4_ktls_dev_add(struct net_device *netdev, struct sock *sk,
enum tls_offload_ctx_dir direction,
@@ -6404,30 +6447,21 @@ static int cxgb4_ktls_dev_add(struct net_device *netdev, struct sock *sk,
u32 tcp_sn)
{
struct adapter *adap = netdev2adap(netdev);
- int ret = 0;
+ int ret;
mutex_lock(&uld_mutex);
- if (!adap->uld[CXGB4_ULD_CRYPTO].handle) {
- dev_err(adap->pdev_dev, "chcr driver is not loaded\n");
- ret = -EOPNOTSUPP;
- goto out_unlock;
- }
-
- if (!adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops) {
- dev_err(adap->pdev_dev,
- "chcr driver has no registered tlsdev_ops()\n");
- ret = -EOPNOTSUPP;
+ ret = chcr_offload_state(adap, CXGB4_TLSDEV_OPS);
+ if (ret)
goto out_unlock;
- }
ret = cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_ENABLE);
if (ret)
goto out_unlock;
- ret = adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops->tls_dev_add(netdev, sk,
- direction,
- crypto_info,
- tcp_sn);
+ ret = adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_add(netdev, sk,
+ direction,
+ crypto_info,
+ tcp_sn);
/* if there is a failure, clear the refcount */
if (ret)
cxgb4_set_ktls_feature(adap,
@@ -6444,19 +6478,11 @@ static void cxgb4_ktls_dev_del(struct net_device *netdev,
struct adapter *adap = netdev2adap(netdev);
mutex_lock(&uld_mutex);
- if (!adap->uld[CXGB4_ULD_CRYPTO].handle) {
- dev_err(adap->pdev_dev, "chcr driver is not loaded\n");
+ if (chcr_offload_state(adap, CXGB4_TLSDEV_OPS))
goto out_unlock;
- }
- if (!adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops) {
- dev_err(adap->pdev_dev,
- "chcr driver has no registered tlsdev_ops\n");
- goto out_unlock;
- }
-
- adap->uld[CXGB4_ULD_CRYPTO].tlsdev_ops->tls_dev_del(netdev, tls_ctx,
- direction);
+ adap->uld[CXGB4_ULD_KTLS].tlsdev_ops->tls_dev_del(netdev, tls_ctx,
+ direction);
cxgb4_set_ktls_feature(adap, FW_PARAMS_PARAM_DEV_KTLS_HW_DISABLE);
out_unlock:
@@ -6469,6 +6495,114 @@ static const struct tlsdev_ops cxgb4_ktls_ops = {
};
#endif /* CONFIG_CHELSIO_TLS_DEVICE */
+#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
+
+static int cxgb4_xfrm_add_state(struct xfrm_state *x)
+{
+ struct adapter *adap = netdev2adap(x->xso.dev);
+ int ret;
+
+ if (!mutex_trylock(&uld_mutex)) {
+ dev_dbg(adap->pdev_dev,
+ "crypto uld critical resource is under use\n");
+ return -EBUSY;
+ }
+ ret = chcr_offload_state(adap, CXGB4_XFRMDEV_OPS);
+ if (ret)
+ goto out_unlock;
+
+ ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_add(x);
+
+out_unlock:
+ mutex_unlock(&uld_mutex);
+
+ return ret;
+}
+
+static void cxgb4_xfrm_del_state(struct xfrm_state *x)
+{
+ struct adapter *adap = netdev2adap(x->xso.dev);
+
+ if (!mutex_trylock(&uld_mutex)) {
+ dev_dbg(adap->pdev_dev,
+ "crypto uld critical resource is under use\n");
+ return;
+ }
+ if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
+ goto out_unlock;
+
+ adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_delete(x);
+
+out_unlock:
+ mutex_unlock(&uld_mutex);
+}
+
+static void cxgb4_xfrm_free_state(struct xfrm_state *x)
+{
+ struct adapter *adap = netdev2adap(x->xso.dev);
+
+ if (!mutex_trylock(&uld_mutex)) {
+ dev_dbg(adap->pdev_dev,
+ "crypto uld critical resource is under use\n");
+ return;
+ }
+ if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
+ goto out_unlock;
+
+ adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_free(x);
+
+out_unlock:
+ mutex_unlock(&uld_mutex);
+}
+
+static bool cxgb4_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
+{
+ struct adapter *adap = netdev2adap(x->xso.dev);
+ bool ret = false;
+
+ if (!mutex_trylock(&uld_mutex)) {
+ dev_dbg(adap->pdev_dev,
+ "crypto uld critical resource is under use\n");
+ return ret;
+ }
+ if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
+ goto out_unlock;
+
+ ret = adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_offload_ok(skb, x);
+
+out_unlock:
+ mutex_unlock(&uld_mutex);
+ return ret;
+}
+
+static void cxgb4_advance_esn_state(struct xfrm_state *x)
+{
+ struct adapter *adap = netdev2adap(x->xso.dev);
+
+ if (!mutex_trylock(&uld_mutex)) {
+ dev_dbg(adap->pdev_dev,
+ "crypto uld critical resource is under use\n");
+ return;
+ }
+ if (chcr_offload_state(adap, CXGB4_XFRMDEV_OPS))
+ goto out_unlock;
+
+ adap->uld[CXGB4_ULD_IPSEC].xfrmdev_ops->xdo_dev_state_advance_esn(x);
+
+out_unlock:
+ mutex_unlock(&uld_mutex);
+}
+
+static const struct xfrmdev_ops cxgb4_xfrmdev_ops = {
+ .xdo_dev_state_add = cxgb4_xfrm_add_state,
+ .xdo_dev_state_delete = cxgb4_xfrm_del_state,
+ .xdo_dev_state_free = cxgb4_xfrm_free_state,
+ .xdo_dev_offload_ok = cxgb4_ipsec_offload_ok,
+ .xdo_dev_state_advance_esn = cxgb4_advance_esn_state,
+};
+
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *netdev;
@@ -6721,14 +6855,22 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features |= NETIF_F_HIGHDMA;
netdev->features |= netdev->hw_features;
netdev->vlan_features = netdev->features & VLAN_FEAT;
-#if defined(CONFIG_CHELSIO_TLS_DEVICE)
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
if (pi->adapter->params.crypto & FW_CAPS_CONFIG_TLS_HW) {
netdev->hw_features |= NETIF_F_HW_TLS_TX;
netdev->tlsdev_ops = &cxgb4_ktls_ops;
/* initialize the refcount */
refcount_set(&pi->adapter->chcr_ktls.ktls_refcount, 0);
}
-#endif
+#endif /* CONFIG_CHELSIO_TLS_DEVICE */
+#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
+ if (pi->adapter->params.crypto & FW_CAPS_CONFIG_IPSEC_INLINE) {
+ netdev->hw_enc_features |= NETIF_F_HW_ESP;
+ netdev->features |= NETIF_F_HW_ESP;
+ netdev->xfrmdev_ops = &cxgb4_xfrmdev_ops;
+ }
+#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
+
netdev->priv_flags |= IFF_UNICAST_FLT;
/* MTU range: 81 - 9600 */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index f642c1b475c4..1b88bd1c2dbe 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -60,6 +60,89 @@ static struct ch_tc_pedit_fields pedits[] = {
PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12),
};
+static const struct cxgb4_natmode_config cxgb4_natmode_config_array[] = {
+ /* Default supported NAT modes */
+ {
+ .chip = CHELSIO_T5,
+ .flags = CXGB4_ACTION_NATMODE_NONE,
+ .natmode = NAT_MODE_NONE,
+ },
+ {
+ .chip = CHELSIO_T5,
+ .flags = CXGB4_ACTION_NATMODE_DIP,
+ .natmode = NAT_MODE_DIP,
+ },
+ {
+ .chip = CHELSIO_T5,
+ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT,
+ .natmode = NAT_MODE_DIP_DP,
+ },
+ {
+ .chip = CHELSIO_T5,
+ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT |
+ CXGB4_ACTION_NATMODE_SIP,
+ .natmode = NAT_MODE_DIP_DP_SIP,
+ },
+ {
+ .chip = CHELSIO_T5,
+ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_DPORT |
+ CXGB4_ACTION_NATMODE_SPORT,
+ .natmode = NAT_MODE_DIP_DP_SP,
+ },
+ {
+ .chip = CHELSIO_T5,
+ .flags = CXGB4_ACTION_NATMODE_SIP | CXGB4_ACTION_NATMODE_SPORT,
+ .natmode = NAT_MODE_SIP_SP,
+ },
+ {
+ .chip = CHELSIO_T5,
+ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP |
+ CXGB4_ACTION_NATMODE_SPORT,
+ .natmode = NAT_MODE_DIP_SIP_SP,
+ },
+ {
+ .chip = CHELSIO_T5,
+ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP |
+ CXGB4_ACTION_NATMODE_DPORT |
+ CXGB4_ACTION_NATMODE_SPORT,
+ .natmode = NAT_MODE_ALL,
+ },
+ /* T6+ can ignore L4 ports when they're disabled. */
+ {
+ .chip = CHELSIO_T6,
+ .flags = CXGB4_ACTION_NATMODE_SIP,
+ .natmode = NAT_MODE_SIP_SP,
+ },
+ {
+ .chip = CHELSIO_T6,
+ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SPORT,
+ .natmode = NAT_MODE_DIP_DP_SP,
+ },
+ {
+ .chip = CHELSIO_T6,
+ .flags = CXGB4_ACTION_NATMODE_DIP | CXGB4_ACTION_NATMODE_SIP,
+ .natmode = NAT_MODE_ALL,
+ },
+};
+
+static void cxgb4_action_natmode_tweak(struct ch_filter_specification *fs,
+ u8 natmode_flags)
+{
+ u8 i = 0;
+
+ /* Translate the enabled NAT 4-tuple fields to one of the
+ * hardware supported NAT mode configurations. This ensures
+ * that we pick a valid combination, where the disabled fields
+ * do not get overwritten to 0.
+ */
+ for (i = 0; i < ARRAY_SIZE(cxgb4_natmode_config_array); i++) {
+ if (cxgb4_natmode_config_array[i].flags == natmode_flags) {
+ fs->nat_mode = cxgb4_natmode_config_array[i].natmode;
+ return;
+ }
+ }
+}
+
static struct ch_tc_flower_entry *allocate_flower_entry(void)
{
struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
@@ -289,7 +372,8 @@ static void offload_pedit(struct ch_filter_specification *fs, u32 val, u32 mask,
}
static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
- u32 mask, u32 offset, u8 htype)
+ u32 mask, u32 offset, u8 htype,
+ u8 *natmode_flags)
{
switch (htype) {
case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
@@ -314,60 +398,94 @@ static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
switch (offset) {
case PEDIT_IP4_SRC:
offload_pedit(fs, val, mask, IP4_SRC);
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
break;
case PEDIT_IP4_DST:
offload_pedit(fs, val, mask, IP4_DST);
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
}
- fs->nat_mode = NAT_MODE_ALL;
break;
case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
switch (offset) {
case PEDIT_IP6_SRC_31_0:
offload_pedit(fs, val, mask, IP6_SRC_31_0);
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
break;
case PEDIT_IP6_SRC_63_32:
offload_pedit(fs, val, mask, IP6_SRC_63_32);
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
break;
case PEDIT_IP6_SRC_95_64:
offload_pedit(fs, val, mask, IP6_SRC_95_64);
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
break;
case PEDIT_IP6_SRC_127_96:
offload_pedit(fs, val, mask, IP6_SRC_127_96);
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
break;
case PEDIT_IP6_DST_31_0:
offload_pedit(fs, val, mask, IP6_DST_31_0);
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
break;
case PEDIT_IP6_DST_63_32:
offload_pedit(fs, val, mask, IP6_DST_63_32);
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
break;
case PEDIT_IP6_DST_95_64:
offload_pedit(fs, val, mask, IP6_DST_95_64);
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
break;
case PEDIT_IP6_DST_127_96:
offload_pedit(fs, val, mask, IP6_DST_127_96);
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
}
- fs->nat_mode = NAT_MODE_ALL;
break;
case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
switch (offset) {
case PEDIT_TCP_SPORT_DPORT:
- if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
+ if (~mask & PEDIT_TCP_UDP_SPORT_MASK) {
fs->nat_fport = val;
- else
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
+ } else {
fs->nat_lport = val >> 16;
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
+ }
}
- fs->nat_mode = NAT_MODE_ALL;
break;
case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
switch (offset) {
case PEDIT_UDP_SPORT_DPORT:
- if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
+ if (~mask & PEDIT_TCP_UDP_SPORT_MASK) {
fs->nat_fport = val;
- else
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
+ } else {
fs->nat_lport = val >> 16;
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
+ }
}
- fs->nat_mode = NAT_MODE_ALL;
+ break;
+ }
+}
+
+static int cxgb4_action_natmode_validate(struct adapter *adap, u8 natmode_flags,
+ struct netlink_ext_ack *extack)
+{
+ u8 i = 0;
+
+ /* Extract the NAT mode to enable based on what 4-tuple fields
+ * are enabled to be overwritten. This ensures that the
+ * disabled fields don't get overwritten to 0.
+ */
+ for (i = 0; i < ARRAY_SIZE(cxgb4_natmode_config_array); i++) {
+ const struct cxgb4_natmode_config *c;
+
+ c = &cxgb4_natmode_config_array[i];
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) >= c->chip &&
+ natmode_flags == c->flags)
+ return 0;
}
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported NAT mode 4-tuple combination");
+ return -EOPNOTSUPP;
}
void cxgb4_process_flow_actions(struct net_device *in,
@@ -375,6 +493,7 @@ void cxgb4_process_flow_actions(struct net_device *in,
struct ch_filter_specification *fs)
{
struct flow_action_entry *act;
+ u8 natmode_flags = 0;
int i;
flow_action_for_each(i, act, actions) {
@@ -426,7 +545,8 @@ void cxgb4_process_flow_actions(struct net_device *in,
val = act->mangle.val;
offset = act->mangle.offset;
- process_pedit_field(fs, val, mask, offset, htype);
+ process_pedit_field(fs, val, mask, offset, htype,
+ &natmode_flags);
}
break;
case FLOW_ACTION_QUEUE:
@@ -438,6 +558,9 @@ void cxgb4_process_flow_actions(struct net_device *in,
break;
}
}
+ if (natmode_flags)
+ cxgb4_action_natmode_tweak(fs, natmode_flags);
+
}
static bool valid_l4_mask(u32 mask)
@@ -454,7 +577,8 @@ static bool valid_l4_mask(u32 mask)
}
static bool valid_pedit_action(struct net_device *dev,
- const struct flow_action_entry *act)
+ const struct flow_action_entry *act,
+ u8 *natmode_flags)
{
u32 mask, offset;
u8 htype;
@@ -479,7 +603,10 @@ static bool valid_pedit_action(struct net_device *dev,
case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
switch (offset) {
case PEDIT_IP4_SRC:
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
+ break;
case PEDIT_IP4_DST:
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
break;
default:
netdev_err(dev, "%s: Unsupported pedit field\n",
@@ -493,10 +620,13 @@ static bool valid_pedit_action(struct net_device *dev,
case PEDIT_IP6_SRC_63_32:
case PEDIT_IP6_SRC_95_64:
case PEDIT_IP6_SRC_127_96:
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SIP;
+ break;
case PEDIT_IP6_DST_31_0:
case PEDIT_IP6_DST_63_32:
case PEDIT_IP6_DST_95_64:
case PEDIT_IP6_DST_127_96:
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
break;
default:
netdev_err(dev, "%s: Unsupported pedit field\n",
@@ -512,6 +642,10 @@ static bool valid_pedit_action(struct net_device *dev,
__func__);
return false;
}
+ if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
+ else
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
break;
default:
netdev_err(dev, "%s: Unsupported pedit field\n",
@@ -527,6 +661,10 @@ static bool valid_pedit_action(struct net_device *dev,
__func__);
return false;
}
+ if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
+ *natmode_flags |= CXGB4_ACTION_NATMODE_SPORT;
+ else
+ *natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
break;
default:
netdev_err(dev, "%s: Unsupported pedit field\n",
@@ -546,10 +684,12 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
struct netlink_ext_ack *extack,
u8 matchall_filter)
{
+ struct adapter *adap = netdev2adap(dev);
struct flow_action_entry *act;
bool act_redir = false;
bool act_pedit = false;
bool act_vlan = false;
+ u8 natmode_flags = 0;
int i;
if (!flow_action_basic_hw_stats_check(actions, extack))
@@ -563,7 +703,6 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
break;
case FLOW_ACTION_MIRRED:
case FLOW_ACTION_REDIRECT: {
- struct adapter *adap = netdev2adap(dev);
struct net_device *n_dev, *target_dev;
bool found = false;
unsigned int i;
@@ -620,7 +759,8 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
}
break;
case FLOW_ACTION_MANGLE: {
- bool pedit_valid = valid_pedit_action(dev, act);
+ bool pedit_valid = valid_pedit_action(dev, act,
+ &natmode_flags);
if (!pedit_valid)
return -EOPNOTSUPP;
@@ -642,6 +782,15 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
return -EINVAL;
}
+ if (act_pedit) {
+ int ret;
+
+ ret = cxgb4_action_natmode_validate(adap, natmode_flags,
+ extack);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
index 6296e1d5a12b..3a2fa00c8cde 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h
@@ -108,6 +108,21 @@ struct ch_tc_pedit_fields {
#define PEDIT_TCP_SPORT_DPORT 0x0
#define PEDIT_UDP_SPORT_DPORT 0x0
+enum cxgb4_action_natmode_flags {
+ CXGB4_ACTION_NATMODE_NONE = 0,
+ CXGB4_ACTION_NATMODE_DIP = (1 << 0),
+ CXGB4_ACTION_NATMODE_SIP = (1 << 1),
+ CXGB4_ACTION_NATMODE_DPORT = (1 << 2),
+ CXGB4_ACTION_NATMODE_SPORT = (1 << 3),
+};
+
+/* TC PEDIT action to NATMODE translation entry */
+struct cxgb4_natmode_config {
+ enum chip_type chip;
+ u8 flags;
+ u8 natmode;
+};
+
void cxgb4_process_flow_actions(struct net_device *in,
struct flow_action *actions,
struct ch_filter_specification *fs);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
index ae7123a9de8e..6c259de96f96 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
@@ -114,8 +114,7 @@ static int cxgb4_init_eosw_txq(struct net_device *dev,
eosw_txq->cred = adap->params.ofldq_wr_cred;
eosw_txq->hwqid = hwqid;
eosw_txq->netdev = dev;
- tasklet_init(&eosw_txq->qresume_tsk, cxgb4_ethofld_restart,
- (unsigned long)eosw_txq);
+ tasklet_setup(&eosw_txq->qresume_tsk, cxgb4_ethofld_restart);
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 08439e215efe..743af9e654aa 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -663,7 +663,7 @@ static int uld_attach(struct adapter *adap, unsigned int uld)
return 0;
}
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
static bool cxgb4_uld_in_use(struct adapter *adap)
{
const struct tid_info *t = &adap->tids;
@@ -690,8 +690,8 @@ int cxgb4_set_ktls_feature(struct adapter *adap, bool enable)
* ULD is/are already active, return failure.
*/
if (cxgb4_uld_in_use(adap)) {
- dev_warn(adap->pdev_dev,
- "ULD connections (tid/stid) active. Can't enable kTLS\n");
+ dev_dbg(adap->pdev_dev,
+ "ULD connections (tid/stid) active. Can't enable kTLS\n");
return -EINVAL;
}
ret = t4_set_params(adap, adap->mbox, adap->pf,
@@ -699,7 +699,7 @@ int cxgb4_set_ktls_feature(struct adapter *adap, bool enable)
if (ret)
return ret;
refcount_set(&adap->chcr_ktls.ktls_refcount, 1);
- pr_info("kTLS has been enabled. Restrictions placed on ULD support\n");
+ pr_debug("kTLS has been enabled. Restrictions placed on ULD support\n");
} else {
/* ktls settings already up, just increment refcount. */
refcount_inc(&adap->chcr_ktls.ktls_refcount);
@@ -716,7 +716,7 @@ int cxgb4_set_ktls_feature(struct adapter *adap, bool enable)
0, 1, &params, &params);
if (ret)
return ret;
- pr_info("kTLS is disabled. Restrictions on ULD support removed\n");
+ pr_debug("kTLS is disabled. Restrictions on ULD support removed\n");
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index a963fd0b4540..b169776ab484 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -40,9 +40,11 @@
#include <linux/skbuff.h>
#include <linux/inetdevice.h>
#include <linux/atomic.h>
+#include <net/tls.h>
#include "cxgb4.h"
#define MAX_ULD_QSETS 16
+#define MAX_ULD_NPORTS 4
/* CPL message priority levels */
enum {
@@ -302,7 +304,9 @@ enum cxgb4_uld {
CXGB4_ULD_ISCSI,
CXGB4_ULD_ISCSIT,
CXGB4_ULD_CRYPTO,
+ CXGB4_ULD_IPSEC,
CXGB4_ULD_TLS,
+ CXGB4_ULD_KTLS,
CXGB4_ULD_MAX
};
@@ -361,28 +365,11 @@ struct cxgb4_virt_res { /* virtualized HW resources */
struct cxgb4_range ppod_edram;
};
-struct chcr_stats_debug {
- atomic_t cipher_rqst;
- atomic_t digest_rqst;
- atomic_t aead_rqst;
- atomic_t complete;
- atomic_t error;
- atomic_t fallback;
- atomic_t ipsec_cnt;
- atomic_t tls_pdu_tx;
- atomic_t tls_pdu_rx;
- atomic_t tls_key;
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
+struct ch_ktls_port_stats_debug {
atomic64_t ktls_tx_connection_open;
atomic64_t ktls_tx_connection_fail;
atomic64_t ktls_tx_connection_close;
- atomic64_t ktls_tx_send_records;
- atomic64_t ktls_tx_end_pkts;
- atomic64_t ktls_tx_start_pkts;
- atomic64_t ktls_tx_middle_pkts;
- atomic64_t ktls_tx_retransmit_pkts;
- atomic64_t ktls_tx_complete_pkts;
- atomic64_t ktls_tx_trimmed_pkts;
atomic64_t ktls_tx_encrypted_packets;
atomic64_t ktls_tx_encrypted_bytes;
atomic64_t ktls_tx_ctx;
@@ -390,10 +377,38 @@ struct chcr_stats_debug {
atomic64_t ktls_tx_skip_no_sync_data;
atomic64_t ktls_tx_drop_no_sync_data;
atomic64_t ktls_tx_drop_bypass_req;
+};
+struct ch_ktls_stats_debug {
+ struct ch_ktls_port_stats_debug ktls_port[MAX_ULD_NPORTS];
+ atomic64_t ktls_tx_send_records;
+ atomic64_t ktls_tx_end_pkts;
+ atomic64_t ktls_tx_start_pkts;
+ atomic64_t ktls_tx_middle_pkts;
+ atomic64_t ktls_tx_retransmit_pkts;
+ atomic64_t ktls_tx_complete_pkts;
+ atomic64_t ktls_tx_trimmed_pkts;
+};
#endif
+
+struct chcr_stats_debug {
+ atomic_t cipher_rqst;
+ atomic_t digest_rqst;
+ atomic_t aead_rqst;
+ atomic_t complete;
+ atomic_t error;
+ atomic_t fallback;
+ atomic_t tls_pdu_tx;
+ atomic_t tls_pdu_rx;
+ atomic_t tls_key;
};
+#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
+struct ch_ipsec_stats_debug {
+ atomic_t ipsec_cnt;
+};
+#endif
+
#define OCQ_WIN_OFFSET(pdev, vres) \
(pci_resource_len((pdev), 2) - roundup_pow_of_two((vres)->ocq.size))
@@ -470,9 +485,12 @@ struct cxgb4_uld_info {
struct napi_struct *napi);
void (*lro_flush)(struct t4_lro_mgr *);
int (*tx_handler)(struct sk_buff *skb, struct net_device *dev);
-#if IS_ENABLED(CONFIG_TLS_DEVICE)
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
const struct tlsdev_ops *tlsdev_ops;
#endif
+#if IS_ENABLED(CONFIG_XFRM_OFFLOAD)
+ const struct xfrmdev_ops *xfrmdev_ops;
+#endif
};
void cxgb4_uld_enable(struct adapter *adap);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 869431a1eedd..a9e9c7ae565d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1416,14 +1416,14 @@ static netdev_tx_t cxgb4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
pi = netdev_priv(dev);
adap = pi->adapter;
ssi = skb_shinfo(skb);
-#ifdef CONFIG_CHELSIO_IPSEC_INLINE
+#if IS_ENABLED(CONFIG_CHELSIO_IPSEC_INLINE)
if (xfrm_offload(skb) && !ssi->gso_size)
- return adap->uld[CXGB4_ULD_CRYPTO].tx_handler(skb, dev);
+ return adap->uld[CXGB4_ULD_IPSEC].tx_handler(skb, dev);
#endif /* CHELSIO_IPSEC_INLINE */
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
+#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
if (skb->decrypted)
- return adap->uld[CXGB4_ULD_CRYPTO].tx_handler(skb, dev);
+ return adap->uld[CXGB4_ULD_KTLS].tx_handler(skb, dev);
#endif /* CHELSIO_TLS_DEVICE */
qidx = skb_get_queue_mapping(skb);
@@ -2660,15 +2660,15 @@ static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
/**
* restart_ctrlq - restart a suspended control queue
- * @data: the control queue to restart
+ * @t: pointer to the tasklet associated with this handler
*
* Resumes transmission on a suspended Tx control queue.
*/
-static void restart_ctrlq(unsigned long data)
+static void restart_ctrlq(struct tasklet_struct *t)
{
struct sk_buff *skb;
unsigned int written = 0;
- struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data;
+ struct sge_ctrl_txq *q = from_tasklet(q, t, qresume_tsk);
spin_lock(&q->sendq.lock);
reclaim_completed_tx_imm(&q->q);
@@ -2961,13 +2961,13 @@ static int ofld_xmit(struct sge_uld_txq *q, struct sk_buff *skb)
/**
* restart_ofldq - restart a suspended offload queue
- * @data: the offload queue to restart
+ * @t: pointer to the tasklet associated with this handler
*
* Resumes transmission on a suspended Tx offload queue.
*/
-static void restart_ofldq(unsigned long data)
+static void restart_ofldq(struct tasklet_struct *t)
{
- struct sge_uld_txq *q = (struct sge_uld_txq *)data;
+ struct sge_uld_txq *q = from_tasklet(q, t, qresume_tsk);
spin_lock(&q->sendq.lock);
q->full = 0; /* the queue actually is completely empty now */
@@ -3887,9 +3887,10 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
return work_done;
}
-void cxgb4_ethofld_restart(unsigned long data)
+void cxgb4_ethofld_restart(struct tasklet_struct *t)
{
- struct sge_eosw_txq *eosw_txq = (struct sge_eosw_txq *)data;
+ struct sge_eosw_txq *eosw_txq = from_tasklet(eosw_txq, t,
+ qresume_tsk);
int pktcount;
spin_lock(&eosw_txq->lock);
@@ -4580,7 +4581,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid)));
txq->adap = adap;
skb_queue_head_init(&txq->sendq);
- tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq);
+ tasklet_setup(&txq->qresume_tsk, restart_ctrlq);
txq->full = 0;
return 0;
}
@@ -4670,7 +4671,7 @@ int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
txq->q.q_type = CXGB4_TXQ_ULD;
txq->adap = adap;
skb_queue_head_init(&txq->sendq);
- tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
+ tasklet_setup(&txq->qresume_tsk, restart_ofldq);
txq->full = 0;
txq->mapping_err = 0;
return 0;
@@ -4872,9 +4873,6 @@ void t4_sge_stop(struct adapter *adap)
int i;
struct sge *s = &adap->sge;
- if (in_interrupt()) /* actions below require waiting */
- return;
-
if (s->rx_timer.function)
del_timer_sync(&s->rx_timer);
if (s->tx_timer.function)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index fa3367966f4b..98d01a7497ec 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -4745,9 +4745,11 @@ static void le_intr_handler(struct adapter *adap)
static struct intr_info t6_le_intr_info[] = {
{ T6_LIPMISS_F, "LE LIP miss", -1, 0 },
{ T6_LIP0_F, "LE 0 LIP error", -1, 0 },
+ { CMDTIDERR_F, "LE cmd tid error", -1, 1 },
{ TCAMINTPERR_F, "LE parity error", -1, 1 },
{ T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 },
{ SSRAMINTPERR_F, "LE request queue parity error", -1, 1 },
+ { HASHTBLMEMCRCERR_F, "LE hash table mem crc error", -1, 0 },
{ 0 }
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 065c01c654ff..b11a172b5174 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -3017,6 +3017,14 @@
#define REV_V(x) ((x) << REV_S)
#define REV_G(x) (((x) >> REV_S) & REV_M)
+#define HASHTBLMEMCRCERR_S 27
+#define HASHTBLMEMCRCERR_V(x) ((x) << HASHTBLMEMCRCERR_S)
+#define HASHTBLMEMCRCERR_F HASHTBLMEMCRCERR_V(1U)
+
+#define CMDTIDERR_S 22
+#define CMDTIDERR_V(x) ((x) << CMDTIDERR_S)
+#define CMDTIDERR_F CMDTIDERR_V(1U)
+
#define T6_UNKNOWNCMD_S 3
#define T6_UNKNOWNCMD_V(x) ((x) << T6_UNKNOWNCMD_S)
#define T6_UNKNOWNCMD_F T6_UNKNOWNCMD_V(1U)
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index e2fe78e2e242..2820a0bb971b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2017,33 +2017,14 @@ static void mboxlog_stop(struct seq_file *seq, void *v)
{
}
-static const struct seq_operations mboxlog_seq_ops = {
+static const struct seq_operations mboxlog_sops = {
.start = mboxlog_start,
.next = mboxlog_next,
.stop = mboxlog_stop,
.show = mboxlog_show
};
-static int mboxlog_open(struct inode *inode, struct file *file)
-{
- int res = seq_open(file, &mboxlog_seq_ops);
-
- if (!res) {
- struct seq_file *seq = file->private_data;
-
- seq->private = inode->i_private;
- }
- return res;
-}
-
-static const struct file_operations mboxlog_fops = {
- .owner = THIS_MODULE,
- .open = mboxlog_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
+DEFINE_SEQ_ATTRIBUTE(mboxlog);
/*
* Show SGE Queue Set information. We display QPL Queues Sets per line.
*/
@@ -2171,31 +2152,14 @@ static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos)
return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
}
-static const struct seq_operations sge_qinfo_seq_ops = {
+static const struct seq_operations sge_qinfo_sops = {
.start = sge_queue_start,
.next = sge_queue_next,
.stop = sge_queue_stop,
.show = sge_qinfo_show
};
-static int sge_qinfo_open(struct inode *inode, struct file *file)
-{
- int res = seq_open(file, &sge_qinfo_seq_ops);
-
- if (!res) {
- struct seq_file *seq = file->private_data;
- seq->private = inode->i_private;
- }
- return res;
-}
-
-static const struct file_operations sge_qinfo_debugfs_fops = {
- .owner = THIS_MODULE,
- .open = sge_qinfo_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
+DEFINE_SEQ_ATTRIBUTE(sge_qinfo);
/*
* Show SGE Queue Set statistics. We display QPL Queues Sets per line.
@@ -2317,31 +2281,14 @@ static void *sge_qstats_next(struct seq_file *seq, void *v, loff_t *pos)
return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
}
-static const struct seq_operations sge_qstats_seq_ops = {
+static const struct seq_operations sge_qstats_sops = {
.start = sge_qstats_start,
.next = sge_qstats_next,
.stop = sge_qstats_stop,
.show = sge_qstats_show
};
-static int sge_qstats_open(struct inode *inode, struct file *file)
-{
- int res = seq_open(file, &sge_qstats_seq_ops);
-
- if (res == 0) {
- struct seq_file *seq = file->private_data;
- seq->private = inode->i_private;
- }
- return res;
-}
-
-static const struct file_operations sge_qstats_proc_fops = {
- .owner = THIS_MODULE,
- .open = sge_qstats_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
+DEFINE_SEQ_ATTRIBUTE(sge_qstats);
/*
* Show PCI-E SR-IOV Virtual Function Resource Limits.
@@ -2415,31 +2362,14 @@ static void interfaces_stop(struct seq_file *seq, void *v)
{
}
-static const struct seq_operations interfaces_seq_ops = {
+static const struct seq_operations interfaces_sops = {
.start = interfaces_start,
.next = interfaces_next,
.stop = interfaces_stop,
.show = interfaces_show
};
-static int interfaces_open(struct inode *inode, struct file *file)
-{
- int res = seq_open(file, &interfaces_seq_ops);
-
- if (res == 0) {
- struct seq_file *seq = file->private_data;
- seq->private = inode->i_private;
- }
- return res;
-}
-
-static const struct file_operations interfaces_proc_fops = {
- .owner = THIS_MODULE,
- .open = interfaces_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
+DEFINE_SEQ_ATTRIBUTE(interfaces);
/*
* /sys/kernel/debugfs/cxgb4vf/ files list.
@@ -2452,10 +2382,10 @@ struct cxgb4vf_debugfs_entry {
static struct cxgb4vf_debugfs_entry debugfs_files[] = {
{ "mboxlog", 0444, &mboxlog_fops },
- { "sge_qinfo", 0444, &sge_qinfo_debugfs_fops },
- { "sge_qstats", 0444, &sge_qstats_proc_fops },
+ { "sge_qinfo", 0444, &sge_qinfo_fops },
+ { "sge_qstats", 0444, &sge_qstats_fops },
{ "resources", 0444, &resources_fops },
- { "interfaces", 0444, &interfaces_proc_fops },
+ { "interfaces", 0444, &interfaces_fops },
};
/*
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/Kconfig b/drivers/net/ethernet/chelsio/inline_crypto/Kconfig
new file mode 100644
index 000000000000..7dfa57348d54
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/inline_crypto/Kconfig
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Chelsio inline crypto configuration
+#
+
+config CHELSIO_INLINE_CRYPTO
+ bool "Chelsio Inline Crypto support"
+ depends on CHELSIO_T4
+ default y
+ help
+ Enable support for inline crypto.
+ Allows enable/disable from list of inline crypto drivers.
+
+if CHELSIO_INLINE_CRYPTO
+
+config CRYPTO_DEV_CHELSIO_TLS
+ tristate "Chelsio Crypto Inline TLS Driver"
+ depends on CHELSIO_T4
+ depends on TLS_TOE
+ help
+ Support Chelsio Inline TLS with Chelsio crypto accelerator.
+ Enable inline TLS support for Tx and Rx.
+
+ To compile this driver as a module, choose M here: the module
+ will be called chtls.
+
+config CHELSIO_IPSEC_INLINE
+ tristate "Chelsio IPSec XFRM Tx crypto offload"
+ depends on CHELSIO_T4
+ depends on XFRM_OFFLOAD
+ depends on INET_ESP_OFFLOAD || INET6_ESP_OFFLOAD
+ help
+ Support Chelsio Inline IPsec with Chelsio crypto accelerator.
+ Enable inline IPsec support for Tx.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ch_ipsec.
+
+config CHELSIO_TLS_DEVICE
+ tristate "Chelsio Inline KTLS Offload"
+ depends on CHELSIO_T4
+ depends on TLS
+ depends on TLS_DEVICE
+ help
+ This flag enables support for kernel tls offload over Chelsio T6
+ crypto accelerator. CONFIG_CHELSIO_TLS_DEVICE flag can be enabled
+ only if CONFIG_TLS and CONFIG_TLS_DEVICE flags are enabled.
+
+ To compile this driver as a module, choose M here: the module
+ will be called ch_ktls.
+
+endif # CHELSIO_INLINE_CRYPTO
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/Makefile b/drivers/net/ethernet/chelsio/inline_crypto/Makefile
new file mode 100644
index 000000000000..27e6d7e2f1eb
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/inline_crypto/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_CRYPTO_DEV_CHELSIO_TLS) += chtls/
+obj-$(CONFIG_CHELSIO_IPSEC_INLINE) += ch_ipsec/
+obj-$(CONFIG_CHELSIO_TLS_DEVICE) += ch_ktls/
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/Makefile b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/Makefile
new file mode 100644
index 000000000000..efdcaaebc455
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y := -I $(srctree)/drivers/net/ethernet/chelsio/cxgb4 \
+ -I $(srctree)/drivers/crypto/chelsio
+
+obj-$(CONFIG_CHELSIO_IPSEC_INLINE) += ch_ipsec.o
+ch_ipsec-objs := chcr_ipsec.o
+
+
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
index 967babd67a51..072299b14b8d 100644
--- a/drivers/crypto/chelsio/chcr_ipsec.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.c
@@ -35,7 +35,7 @@
* Atul Gupta (atul.gupta@chelsio.com)
*/
-#define pr_fmt(fmt) "chcr:" fmt
+#define pr_fmt(fmt) "ch_ipsec: " fmt
#include <linux/kernel.h>
#include <linux/module.h>
@@ -60,9 +60,7 @@
#include <crypto/scatterwalk.h>
#include <crypto/internal/hash.h>
-#include "chcr_core.h"
-#include "chcr_algo.h"
-#include "chcr_crypto.h"
+#include "chcr_ipsec.h"
/*
* Max Tx descriptor space we allow for an Ethernet packet to be inlined
@@ -71,39 +69,80 @@
#define MAX_IMM_TX_PKT_LEN 256
#define GCM_ESP_IV_SIZE 8
-static int chcr_xfrm_add_state(struct xfrm_state *x);
-static void chcr_xfrm_del_state(struct xfrm_state *x);
-static void chcr_xfrm_free_state(struct xfrm_state *x);
-static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
-static void chcr_advance_esn_state(struct xfrm_state *x);
-
-static const struct xfrmdev_ops chcr_xfrmdev_ops = {
- .xdo_dev_state_add = chcr_xfrm_add_state,
- .xdo_dev_state_delete = chcr_xfrm_del_state,
- .xdo_dev_state_free = chcr_xfrm_free_state,
- .xdo_dev_offload_ok = chcr_ipsec_offload_ok,
- .xdo_dev_state_advance_esn = chcr_advance_esn_state,
+static LIST_HEAD(uld_ctx_list);
+static DEFINE_MUTEX(dev_mutex);
+
+static bool ch_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
+static int ch_ipsec_uld_state_change(void *handle, enum cxgb4_state new_state);
+static int ch_ipsec_xmit(struct sk_buff *skb, struct net_device *dev);
+static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop);
+static void ch_ipsec_advance_esn_state(struct xfrm_state *x);
+static void ch_ipsec_xfrm_free_state(struct xfrm_state *x);
+static void ch_ipsec_xfrm_del_state(struct xfrm_state *x);
+static int ch_ipsec_xfrm_add_state(struct xfrm_state *x);
+
+static const struct xfrmdev_ops ch_ipsec_xfrmdev_ops = {
+ .xdo_dev_state_add = ch_ipsec_xfrm_add_state,
+ .xdo_dev_state_delete = ch_ipsec_xfrm_del_state,
+ .xdo_dev_state_free = ch_ipsec_xfrm_free_state,
+ .xdo_dev_offload_ok = ch_ipsec_offload_ok,
+ .xdo_dev_state_advance_esn = ch_ipsec_advance_esn_state,
};
-/* Add offload xfrms to Chelsio Interface */
-void chcr_add_xfrmops(const struct cxgb4_lld_info *lld)
+static struct cxgb4_uld_info ch_ipsec_uld_info = {
+ .name = CHIPSEC_DRV_MODULE_NAME,
+ .nrxq = MAX_ULD_QSETS,
+ /* Max ntxq will be derived from fw config file*/
+ .rxq_size = 1024,
+ .add = ch_ipsec_uld_add,
+ .state_change = ch_ipsec_uld_state_change,
+ .tx_handler = ch_ipsec_xmit,
+ .xfrmdev_ops = &ch_ipsec_xfrmdev_ops,
+};
+
+static void *ch_ipsec_uld_add(const struct cxgb4_lld_info *infop)
+{
+ struct ipsec_uld_ctx *u_ctx;
+
+ pr_info_once("%s - version %s\n", CHIPSEC_DRV_DESC,
+ CHIPSEC_DRV_VERSION);
+ u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL);
+ if (!u_ctx) {
+ u_ctx = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+ u_ctx->lldi = *infop;
+out:
+ return u_ctx;
+}
+
+static int ch_ipsec_uld_state_change(void *handle, enum cxgb4_state new_state)
{
- struct net_device *netdev = NULL;
- int i;
-
- for (i = 0; i < lld->nports; i++) {
- netdev = lld->ports[i];
- if (!netdev)
- continue;
- netdev->xfrmdev_ops = &chcr_xfrmdev_ops;
- netdev->hw_enc_features |= NETIF_F_HW_ESP;
- netdev->features |= NETIF_F_HW_ESP;
- netdev_change_features(netdev);
+ struct ipsec_uld_ctx *u_ctx = handle;
+
+ pr_debug("new_state %u\n", new_state);
+ switch (new_state) {
+ case CXGB4_STATE_UP:
+ pr_info("%s: Up\n", pci_name(u_ctx->lldi.pdev));
+ mutex_lock(&dev_mutex);
+ list_add_tail(&u_ctx->entry, &uld_ctx_list);
+ mutex_unlock(&dev_mutex);
+ break;
+ case CXGB4_STATE_START_RECOVERY:
+ case CXGB4_STATE_DOWN:
+ case CXGB4_STATE_DETACH:
+ pr_info("%s: Down\n", pci_name(u_ctx->lldi.pdev));
+ list_del(&u_ctx->entry);
+ break;
+ default:
+ break;
}
+
+ return 0;
}
-static inline int chcr_ipsec_setauthsize(struct xfrm_state *x,
- struct ipsec_sa_entry *sa_entry)
+static int ch_ipsec_setauthsize(struct xfrm_state *x,
+ struct ipsec_sa_entry *sa_entry)
{
int hmac_ctrl;
int authsize = x->aead->alg_icv_len / 8;
@@ -126,8 +165,8 @@ static inline int chcr_ipsec_setauthsize(struct xfrm_state *x,
return hmac_ctrl;
}
-static inline int chcr_ipsec_setkey(struct xfrm_state *x,
- struct ipsec_sa_entry *sa_entry)
+static int ch_ipsec_setkey(struct xfrm_state *x,
+ struct ipsec_sa_entry *sa_entry)
{
int keylen = (x->aead->alg_key_len + 7) / 8;
unsigned char *key = x->aead->alg_key;
@@ -185,65 +224,65 @@ out:
}
/*
- * chcr_xfrm_add_state
+ * ch_ipsec_xfrm_add_state
* returns 0 on success, negative error if failed to send message to FPGA
* positive error if FPGA returned a bad response
*/
-static int chcr_xfrm_add_state(struct xfrm_state *x)
+static int ch_ipsec_xfrm_add_state(struct xfrm_state *x)
{
struct ipsec_sa_entry *sa_entry;
int res = 0;
if (x->props.aalgo != SADB_AALG_NONE) {
- pr_debug("CHCR: Cannot offload authenticated xfrm states\n");
+ pr_debug("Cannot offload authenticated xfrm states\n");
return -EINVAL;
}
if (x->props.calgo != SADB_X_CALG_NONE) {
- pr_debug("CHCR: Cannot offload compressed xfrm states\n");
+ pr_debug("Cannot offload compressed xfrm states\n");
return -EINVAL;
}
if (x->props.family != AF_INET &&
x->props.family != AF_INET6) {
- pr_debug("CHCR: Only IPv4/6 xfrm state offloaded\n");
+ pr_debug("Only IPv4/6 xfrm state offloaded\n");
return -EINVAL;
}
if (x->props.mode != XFRM_MODE_TRANSPORT &&
x->props.mode != XFRM_MODE_TUNNEL) {
- pr_debug("CHCR: Only transport and tunnel xfrm offload\n");
+ pr_debug("Only transport and tunnel xfrm offload\n");
return -EINVAL;
}
if (x->id.proto != IPPROTO_ESP) {
- pr_debug("CHCR: Only ESP xfrm state offloaded\n");
+ pr_debug("Only ESP xfrm state offloaded\n");
return -EINVAL;
}
if (x->encap) {
- pr_debug("CHCR: Encapsulated xfrm state not offloaded\n");
+ pr_debug("Encapsulated xfrm state not offloaded\n");
return -EINVAL;
}
if (!x->aead) {
- pr_debug("CHCR: Cannot offload xfrm states without aead\n");
+ pr_debug("Cannot offload xfrm states without aead\n");
return -EINVAL;
}
if (x->aead->alg_icv_len != 128 &&
x->aead->alg_icv_len != 96) {
- pr_debug("CHCR: Cannot offload xfrm states with AEAD ICV length other than 96b & 128b\n");
+ pr_debug("Cannot offload xfrm states with AEAD ICV length other than 96b & 128b\n");
return -EINVAL;
}
if ((x->aead->alg_key_len != 128 + 32) &&
(x->aead->alg_key_len != 256 + 32)) {
- pr_debug("CHCR: Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
+ pr_debug("cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
return -EINVAL;
}
if (x->tfcpad) {
- pr_debug("CHCR: Cannot offload xfrm states with tfc padding\n");
+ pr_debug("Cannot offload xfrm states with tfc padding\n");
return -EINVAL;
}
if (!x->geniv) {
- pr_debug("CHCR: Cannot offload xfrm states without geniv\n");
+ pr_debug("Cannot offload xfrm states without geniv\n");
return -EINVAL;
}
if (strcmp(x->geniv, "seqiv")) {
- pr_debug("CHCR: Cannot offload xfrm states with geniv other than seqiv\n");
+ pr_debug("Cannot offload xfrm states with geniv other than seqiv\n");
return -EINVAL;
}
@@ -253,24 +292,24 @@ static int chcr_xfrm_add_state(struct xfrm_state *x)
goto out;
}
- sa_entry->hmac_ctrl = chcr_ipsec_setauthsize(x, sa_entry);
+ sa_entry->hmac_ctrl = ch_ipsec_setauthsize(x, sa_entry);
if (x->props.flags & XFRM_STATE_ESN)
sa_entry->esn = 1;
- chcr_ipsec_setkey(x, sa_entry);
+ ch_ipsec_setkey(x, sa_entry);
x->xso.offload_handle = (unsigned long)sa_entry;
try_module_get(THIS_MODULE);
out:
return res;
}
-static void chcr_xfrm_del_state(struct xfrm_state *x)
+static void ch_ipsec_xfrm_del_state(struct xfrm_state *x)
{
/* do nothing */
if (!x->xso.offload_handle)
return;
}
-static void chcr_xfrm_free_state(struct xfrm_state *x)
+static void ch_ipsec_xfrm_free_state(struct xfrm_state *x)
{
struct ipsec_sa_entry *sa_entry;
@@ -282,7 +321,7 @@ static void chcr_xfrm_free_state(struct xfrm_state *x)
module_put(THIS_MODULE);
}
-static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
+static bool ch_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
{
if (x->props.family == AF_INET) {
/* Offload with IP options is not supported yet */
@@ -296,15 +335,15 @@ static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
return true;
}
-static void chcr_advance_esn_state(struct xfrm_state *x)
+static void ch_ipsec_advance_esn_state(struct xfrm_state *x)
{
/* do nothing */
if (!x->xso.offload_handle)
return;
}
-static inline int is_eth_imm(const struct sk_buff *skb,
- struct ipsec_sa_entry *sa_entry)
+static int is_eth_imm(const struct sk_buff *skb,
+ struct ipsec_sa_entry *sa_entry)
{
unsigned int kctx_len;
int hdrlen;
@@ -322,9 +361,9 @@ static inline int is_eth_imm(const struct sk_buff *skb,
return 0;
}
-static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
- struct ipsec_sa_entry *sa_entry,
- bool *immediate)
+static unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
+ struct ipsec_sa_entry *sa_entry,
+ bool *immediate)
{
unsigned int kctx_len;
unsigned int flits;
@@ -365,7 +404,7 @@ static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
return flits;
}
-inline void *copy_esn_pktxt(struct sk_buff *skb,
+static void *copy_esn_pktxt(struct sk_buff *skb,
struct net_device *dev,
void *pos,
struct ipsec_sa_entry *sa_entry)
@@ -419,7 +458,7 @@ inline void *copy_esn_pktxt(struct sk_buff *skb,
return pos;
}
-inline void *copy_cpltx_pktxt(struct sk_buff *skb,
+static void *copy_cpltx_pktxt(struct sk_buff *skb,
struct net_device *dev,
void *pos,
struct ipsec_sa_entry *sa_entry)
@@ -463,10 +502,10 @@ inline void *copy_cpltx_pktxt(struct sk_buff *skb,
return pos;
}
-inline void *copy_key_cpltx_pktxt(struct sk_buff *skb,
- struct net_device *dev,
- void *pos,
- struct ipsec_sa_entry *sa_entry)
+static void *copy_key_cpltx_pktxt(struct sk_buff *skb,
+ struct net_device *dev,
+ void *pos,
+ struct ipsec_sa_entry *sa_entry)
{
struct _key_ctx *key_ctx;
int left, eoq, key_len;
@@ -511,11 +550,11 @@ inline void *copy_key_cpltx_pktxt(struct sk_buff *skb,
return pos;
}
-inline void *chcr_crypto_wreq(struct sk_buff *skb,
- struct net_device *dev,
- void *pos,
- int credits,
- struct ipsec_sa_entry *sa_entry)
+static void *ch_ipsec_crypto_wreq(struct sk_buff *skb,
+ struct net_device *dev,
+ void *pos,
+ int credits,
+ struct ipsec_sa_entry *sa_entry)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
@@ -538,7 +577,7 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb,
unsigned int kctx_len = sa_entry->kctx_len;
int qid = q->q.cntxt_id;
- atomic_inc(&adap->chcr_stats.ipsec_cnt);
+ atomic_inc(&adap->ch_ipsec_stats.ipsec_cnt);
flits = calc_tx_sec_flits(skb, sa_entry, &immediate);
ndesc = DIV_ROUND_UP(flits, 2);
@@ -636,13 +675,13 @@ inline void *chcr_crypto_wreq(struct sk_buff *skb,
* Returns the number of Tx descriptors needed for the supplied number
* of flits.
*/
-static inline unsigned int flits_to_desc(unsigned int n)
+static unsigned int flits_to_desc(unsigned int n)
{
WARN_ON(n > SGE_MAX_WR_LEN / 8);
return DIV_ROUND_UP(n, 8);
}
-static inline unsigned int txq_avail(const struct sge_txq *q)
+static unsigned int txq_avail(const struct sge_txq *q)
{
return q->size - 1 - q->in_use;
}
@@ -653,7 +692,7 @@ static void eth_txq_stop(struct sge_eth_txq *q)
q->q.stops++;
}
-static inline void txq_advance(struct sge_txq *q, unsigned int n)
+static void txq_advance(struct sge_txq *q, unsigned int n)
{
q->in_use += n;
q->pidx += n;
@@ -662,9 +701,9 @@ static inline void txq_advance(struct sge_txq *q, unsigned int n)
}
/*
- * chcr_ipsec_xmit called from ULD Tx handler
+ * ch_ipsec_xmit called from ULD Tx handler
*/
-int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev)
+int ch_ipsec_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xfrm_state *x = xfrm_input_state(skb);
unsigned int last_desc, ndesc, flits = 0;
@@ -725,8 +764,8 @@ out_free: dev_kfree_skb_any(skb);
before = (u64 *)pos;
end = (u64 *)pos + flits;
/* Setup IPSec CPL */
- pos = (void *)chcr_crypto_wreq(skb, dev, (void *)pos,
- credits, sa_entry);
+ pos = (void *)ch_ipsec_crypto_wreq(skb, dev, (void *)pos,
+ credits, sa_entry);
if (before > (u64 *)pos) {
left = (u8 *)end - (u8 *)q->q.stat;
end = (void *)q->q.desc + left;
@@ -752,3 +791,35 @@ out_free: dev_kfree_skb_any(skb);
cxgb4_ring_tx_db(adap, &q->q, ndesc);
return NETDEV_TX_OK;
}
+
+static int __init ch_ipsec_init(void)
+{
+ cxgb4_register_uld(CXGB4_ULD_IPSEC, &ch_ipsec_uld_info);
+
+ return 0;
+}
+
+static void __exit ch_ipsec_exit(void)
+{
+ struct ipsec_uld_ctx *u_ctx, *tmp;
+ struct adapter *adap;
+
+ mutex_lock(&dev_mutex);
+ list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) {
+ adap = pci_get_drvdata(u_ctx->lldi.pdev);
+ atomic_set(&adap->ch_ipsec_stats.ipsec_cnt, 0);
+ list_del(&u_ctx->entry);
+ kfree(u_ctx);
+ }
+ mutex_unlock(&dev_mutex);
+ cxgb4_unregister_uld(CXGB4_ULD_IPSEC);
+}
+
+module_init(ch_ipsec_init);
+module_exit(ch_ipsec_exit);
+
+MODULE_DESCRIPTION("Crypto IPSEC for Chelsio Terminator cards.");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Chelsio Communications");
+MODULE_VERSION(CHIPSEC_DRV_VERSION);
+
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.h b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.h
new file mode 100644
index 000000000000..1d110d2edd64
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ipsec/chcr_ipsec.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2018 Chelsio Communications, Inc. */
+
+#ifndef __CHCR_IPSEC_H__
+#define __CHCR_IPSEC_H__
+
+#include <crypto/algapi.h>
+#include "t4_hw.h"
+#include "cxgb4.h"
+#include "t4_msg.h"
+#include "cxgb4_uld.h"
+
+#include "chcr_core.h"
+#include "chcr_algo.h"
+#include "chcr_crypto.h"
+
+#define CHIPSEC_DRV_MODULE_NAME "ch_ipsec"
+#define CHIPSEC_DRV_VERSION "1.0.0.0-ko"
+#define CHIPSEC_DRV_DESC "Chelsio T6 Crypto Ipsec offload Driver"
+
+struct ipsec_uld_ctx {
+ struct list_head entry;
+ struct cxgb4_lld_info lldi;
+};
+
+struct chcr_ipsec_req {
+ struct ulp_txpkt ulptx;
+ struct ulptx_idata sc_imm;
+ struct cpl_tx_sec_pdu sec_cpl;
+ struct _key_ctx key_ctx;
+};
+
+struct chcr_ipsec_wr {
+ struct fw_ulptx_wr wreq;
+ struct chcr_ipsec_req req;
+};
+
+#define ESN_IV_INSERT_OFFSET 12
+struct chcr_ipsec_aadiv {
+ __be32 spi;
+ u8 seq_no[8];
+ u8 iv[8];
+};
+
+struct ipsec_sa_entry {
+ int hmac_ctrl;
+ u16 esn;
+ u16 resv;
+ unsigned int enckey_len;
+ unsigned int kctx_len;
+ unsigned int authsize;
+ __be32 key_ctx_hdr;
+ char salt[MAX_SALT];
+ char key[2 * AES_MAX_KEY_SIZE];
+};
+
+#endif /* __CHCR_IPSEC_H__ */
+
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/Makefile b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/Makefile
new file mode 100644
index 000000000000..5e7d161c3199
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y := -I $(srctree)/drivers/net/ethernet/chelsio/cxgb4
+
+obj-$(CONFIG_CHELSIO_TLS_DEVICE) += ch_ktls.o
+ch_ktls-objs := chcr_ktls.o
diff --git a/drivers/crypto/chelsio/chcr_common.h b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_common.h
index 33f589cbfba1..38319f4c3121 100644
--- a/drivers/crypto/chelsio/chcr_common.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_common.h
@@ -18,28 +18,6 @@
#define CHCR_SCMD_AUTH_MODE_GHASH 4
#define AES_BLOCK_LEN 16
-enum chcr_state {
- CHCR_INIT = 0,
- CHCR_ATTACH,
- CHCR_DETACH,
-};
-
-struct chcr_dev {
- spinlock_t lock_chcr_dev; /* chcr dev structure lock */
- enum chcr_state state;
- atomic_t inflight;
- int wqretry;
- struct delayed_work detach_work;
- struct completion detach_comp;
- unsigned char tx_channel_id;
-};
-
-struct uld_ctx {
- struct list_head entry;
- struct cxgb4_lld_info lldi;
- struct chcr_dev dev;
-};
-
struct ktls_key_ctx {
__be32 ctx_hdr;
u8 salt[CHCR_MAX_SALT];
@@ -77,8 +55,6 @@ struct ktls_key_ctx {
KEY_CONTEXT_SALT_PRESENT_F | \
KEY_CONTEXT_CTX_LEN_V((ctx_len)))
-struct uld_ctx *assign_chcr_device(void);
-
static inline void *chcr_copy_to_txd(const void *src, const struct sge_txq *q,
void *pos, int length)
{
diff --git a/drivers/crypto/chelsio/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index c5cce024886a..5195f692f14d 100644
--- a/drivers/crypto/chelsio/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -1,10 +1,18 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2020 Chelsio Communications. All rights reserved. */
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/skbuff.h>
+#include <linux/module.h>
#include <linux/highmem.h>
+#include <linux/ip.h>
+#include <net/ipv6.h>
+#include <linux/netdevice.h>
#include "chcr_ktls.h"
-#include "clip_tbl.h"
+
+static LIST_HEAD(uld_ctx_list);
+static DEFINE_MUTEX(dev_mutex);
static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info);
/*
@@ -117,60 +125,6 @@ out:
return ret;
}
-static int chcr_ktls_update_connection_state(struct chcr_ktls_info *tx_info,
- int new_state)
-{
- /* This function can be called from both rx (interrupt context) and tx
- * queue contexts.
- */
- spin_lock_bh(&tx_info->lock);
- switch (tx_info->connection_state) {
- case KTLS_CONN_CLOSED:
- tx_info->connection_state = new_state;
- break;
-
- case KTLS_CONN_ACT_OPEN_REQ:
- /* only go forward if state is greater than current state. */
- if (new_state <= tx_info->connection_state)
- break;
- /* update to the next state and also initialize TCB */
- tx_info->connection_state = new_state;
- fallthrough;
- case KTLS_CONN_ACT_OPEN_RPL:
- /* if we are stuck in this state, means tcb init might not
- * received by HW, try sending it again.
- */
- if (!chcr_init_tcb_fields(tx_info))
- tx_info->connection_state = KTLS_CONN_SET_TCB_REQ;
- break;
-
- case KTLS_CONN_SET_TCB_REQ:
- /* only go forward if state is greater than current state. */
- if (new_state <= tx_info->connection_state)
- break;
- /* update to the next state and check if l2t_state is valid */
- tx_info->connection_state = new_state;
- fallthrough;
- case KTLS_CONN_SET_TCB_RPL:
- /* Check if l2t state is valid, then move to ready state. */
- if (cxgb4_check_l2t_valid(tx_info->l2te)) {
- tx_info->connection_state = KTLS_CONN_TX_READY;
- atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_ctx);
- }
- break;
-
- case KTLS_CONN_TX_READY:
- /* nothing to be done here */
- break;
-
- default:
- pr_err("unknown KTLS connection state\n");
- break;
- }
- spin_unlock_bh(&tx_info->lock);
-
- return tx_info->connection_state;
-}
/*
* chcr_ktls_act_open_req: creates TCB entry for ipv4 connection.
* @sk - tcp socket.
@@ -290,27 +244,17 @@ static int chcr_setup_connection(struct sock *sk,
return -EINVAL;
tx_info->atid = atid;
- tx_info->ip_family = sk->sk_family;
- if (sk->sk_family == AF_INET) {
- tx_info->ip_family = AF_INET;
+ if (tx_info->ip_family == AF_INET) {
ret = chcr_ktls_act_open_req(sk, tx_info, atid);
#if IS_ENABLED(CONFIG_IPV6)
} else {
- if (!sk->sk_ipv6only &&
- ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
- tx_info->ip_family = AF_INET;
- ret = chcr_ktls_act_open_req(sk, tx_info, atid);
- } else {
- tx_info->ip_family = AF_INET6;
- ret = cxgb4_clip_get(tx_info->netdev,
- (const u32 *)
- &sk->sk_v6_rcv_saddr.s6_addr,
- 1);
- if (ret)
- goto out;
- ret = chcr_ktls_act_open_req6(sk, tx_info, atid);
- }
+ ret = cxgb4_clip_get(tx_info->netdev, (const u32 *)
+ &sk->sk_v6_rcv_saddr,
+ 1);
+ if (ret)
+ return ret;
+ ret = chcr_ktls_act_open_req6(sk, tx_info, atid);
#endif
}
@@ -318,16 +262,21 @@ static int chcr_setup_connection(struct sock *sk,
* success, if any other return type clear atid and return that failure.
*/
if (ret) {
- if (ret == NET_XMIT_CN)
+ if (ret == NET_XMIT_CN) {
ret = 0;
- else
+ } else {
+#if IS_ENABLED(CONFIG_IPV6)
+ /* clear clip entry */
+ if (tx_info->ip_family == AF_INET6)
+ cxgb4_clip_release(tx_info->netdev,
+ (const u32 *)
+ &sk->sk_v6_rcv_saddr,
+ 1);
+#endif
cxgb4_free_atid(t, atid);
- goto out;
+ }
}
- /* update the connection state */
- chcr_ktls_update_connection_state(tx_info, KTLS_CONN_ACT_OPEN_REQ);
-out:
return ret;
}
@@ -381,22 +330,17 @@ static int chcr_ktls_mark_tcb_close(struct chcr_ktls_info *tx_info)
* @tls_cts - tls context.
* @direction - TX/RX crypto direction
*/
-void chcr_ktls_dev_del(struct net_device *netdev,
- struct tls_context *tls_ctx,
- enum tls_offload_ctx_dir direction)
+static void chcr_ktls_dev_del(struct net_device *netdev,
+ struct tls_context *tls_ctx,
+ enum tls_offload_ctx_dir direction)
{
struct chcr_ktls_ofld_ctx_tx *tx_ctx =
chcr_get_ktls_tx_context(tls_ctx);
struct chcr_ktls_info *tx_info = tx_ctx->chcr_info;
- struct sock *sk;
+ struct ch_ktls_port_stats_debug *port_stats;
if (!tx_info)
return;
- sk = tx_info->sk;
-
- spin_lock(&tx_info->lock);
- tx_info->connection_state = KTLS_CONN_CLOSED;
- spin_unlock(&tx_info->lock);
/* clear l2t entry */
if (tx_info->l2te)
@@ -405,8 +349,8 @@ void chcr_ktls_dev_del(struct net_device *netdev,
#if IS_ENABLED(CONFIG_IPV6)
/* clear clip entry */
if (tx_info->ip_family == AF_INET6)
- cxgb4_clip_release(netdev,
- (const u32 *)&sk->sk_v6_daddr.in6_u.u6_addr8,
+ cxgb4_clip_release(netdev, (const u32 *)
+ &tx_info->sk->sk_v6_rcv_saddr,
1);
#endif
@@ -418,7 +362,8 @@ void chcr_ktls_dev_del(struct net_device *netdev,
tx_info->tid, tx_info->ip_family);
}
- atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_connection_close);
+ port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
+ atomic64_inc(&port_stats->ktls_tx_connection_close);
kvfree(tx_info);
tx_ctx->chcr_info = NULL;
/* release module refcount */
@@ -434,12 +379,13 @@ void chcr_ktls_dev_del(struct net_device *netdev,
* @direction - TX/RX crypto direction
* return: SUCCESS/FAILURE.
*/
-int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
- enum tls_offload_ctx_dir direction,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn)
+static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
+ enum tls_offload_ctx_dir direction,
+ struct tls_crypto_info *crypto_info,
+ u32 start_offload_tcp_sn)
{
struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct ch_ktls_port_stats_debug *port_stats;
struct chcr_ktls_ofld_ctx_tx *tx_ctx;
struct chcr_ktls_info *tx_info;
struct dst_entry *dst;
@@ -453,30 +399,23 @@ int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
pi = netdev_priv(netdev);
adap = pi->adapter;
+ port_stats = &adap->ch_ktls_stats.ktls_port[pi->port_id];
+ atomic64_inc(&port_stats->ktls_tx_connection_open);
+
if (direction == TLS_OFFLOAD_CTX_DIR_RX) {
pr_err("not expecting for RX direction\n");
- ret = -EINVAL;
goto out;
}
- if (tx_ctx->chcr_info) {
- ret = -EINVAL;
+
+ if (tx_ctx->chcr_info)
goto out;
- }
tx_info = kvzalloc(sizeof(*tx_info), GFP_KERNEL);
- if (!tx_info) {
- ret = -ENOMEM;
+ if (!tx_info)
goto out;
- }
-
- spin_lock_init(&tx_info->lock);
-
- /* clear connection state */
- spin_lock(&tx_info->lock);
- tx_info->connection_state = KTLS_CONN_CLOSED;
- spin_unlock(&tx_info->lock);
tx_info->sk = sk;
+ spin_lock_init(&tx_info->lock);
/* initialize tid and atid to -1, 0 is a also a valid id. */
tx_info->tid = -1;
tx_info->atid = -1;
@@ -487,10 +426,12 @@ int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
tx_info->tx_chan = pi->tx_chan;
tx_info->smt_idx = pi->smt_idx;
tx_info->port_id = pi->port_id;
+ tx_info->prev_ack = 0;
+ tx_info->prev_win = 0;
tx_info->rx_qid = chcr_get_first_rx_qid(adap);
if (unlikely(tx_info->rx_qid < 0))
- goto out2;
+ goto free_tx_info;
tx_info->prev_seq = start_offload_tcp_sn;
tx_info->tcp_start_seq_number = start_offload_tcp_sn;
@@ -498,18 +439,22 @@ int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
/* save crypto keys */
ret = chcr_ktls_save_keys(tx_info, crypto_info, direction);
if (ret < 0)
- goto out2;
+ goto free_tx_info;
/* get peer ip */
if (sk->sk_family == AF_INET) {
memcpy(daaddr, &sk->sk_daddr, 4);
+ tx_info->ip_family = AF_INET;
#if IS_ENABLED(CONFIG_IPV6)
} else {
if (!sk->sk_ipv6only &&
- ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED)
+ ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
memcpy(daaddr, &sk->sk_daddr, 4);
- else
+ tx_info->ip_family = AF_INET;
+ } else {
memcpy(daaddr, sk->sk_v6_daddr.in6_u.u6_addr8, 16);
+ tx_info->ip_family = AF_INET6;
+ }
#endif
}
@@ -517,13 +462,13 @@ int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
dst = sk_dst_get(sk);
if (!dst) {
pr_err("DST entry not found\n");
- goto out2;
+ goto free_tx_info;
}
n = dst_neigh_lookup(dst, daaddr);
if (!n || !n->dev) {
pr_err("neighbour not found\n");
dst_release(dst);
- goto out2;
+ goto free_tx_info;
}
tx_info->l2te = cxgb4_l2t_get(adap->l2t, n, n->dev, 0);
@@ -532,31 +477,86 @@ int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
if (!tx_info->l2te) {
pr_err("l2t entry not found\n");
- goto out2;
+ goto free_tx_info;
}
- tx_ctx->chcr_info = tx_info;
+ /* Driver shouldn't be removed until any single connection exists */
+ if (!try_module_get(THIS_MODULE))
+ goto free_l2t;
+ init_completion(&tx_info->completion);
/* create a filter and call cxgb4_l2t_send to send the packet out, which
* will take care of updating l2t entry in hw if not already done.
*/
- ret = chcr_setup_connection(sk, tx_info);
- if (ret)
- goto out2;
+ tx_info->open_state = CH_KTLS_OPEN_PENDING;
- /* Driver shouldn't be removed until any single connection exists */
- if (!try_module_get(THIS_MODULE)) {
- ret = -EINVAL;
- goto out2;
+ if (chcr_setup_connection(sk, tx_info))
+ goto put_module;
+
+ /* Wait for reply */
+ wait_for_completion_timeout(&tx_info->completion, 30 * HZ);
+ spin_lock_bh(&tx_info->lock);
+ if (tx_info->open_state) {
+ /* need to wait for hw response, can't free tx_info yet. */
+ if (tx_info->open_state == CH_KTLS_OPEN_PENDING)
+ tx_info->pending_close = true;
+ /* free the lock after the cleanup */
+ goto put_module;
}
+ spin_unlock_bh(&tx_info->lock);
+
+ /* initialize tcb */
+ reinit_completion(&tx_info->completion);
+ /* mark it pending for hw response */
+ tx_info->open_state = CH_KTLS_OPEN_PENDING;
+
+ if (chcr_init_tcb_fields(tx_info))
+ goto free_tid;
+
+ /* Wait for reply */
+ wait_for_completion_timeout(&tx_info->completion, 30 * HZ);
+ spin_lock_bh(&tx_info->lock);
+ if (tx_info->open_state) {
+ /* need to wait for hw response, can't free tx_info yet. */
+ tx_info->pending_close = true;
+ /* free the lock after cleanup */
+ goto free_tid;
+ }
+ spin_unlock_bh(&tx_info->lock);
+
+ if (!cxgb4_check_l2t_valid(tx_info->l2te))
+ goto free_tid;
+
+ atomic64_inc(&port_stats->ktls_tx_ctx);
+ tx_ctx->chcr_info = tx_info;
- atomic64_inc(&adap->chcr_stats.ktls_tx_connection_open);
return 0;
-out2:
- kvfree(tx_info);
+
+free_tid:
+ chcr_ktls_mark_tcb_close(tx_info);
+#if IS_ENABLED(CONFIG_IPV6)
+ /* clear clip entry */
+ if (tx_info->ip_family == AF_INET6)
+ cxgb4_clip_release(netdev, (const u32 *)
+ &sk->sk_v6_rcv_saddr,
+ 1);
+#endif
+ cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
+ tx_info->tid, tx_info->ip_family);
+
+put_module:
+ /* release module refcount */
+ module_put(THIS_MODULE);
+free_l2t:
+ cxgb4_l2t_release(tx_info->l2te);
+free_tx_info:
+ if (tx_info->pending_close)
+ spin_unlock_bh(&tx_info->lock);
+ else
+ kvfree(tx_info);
out:
- atomic64_inc(&adap->chcr_stats.ktls_tx_connection_fail);
- return ret;
+ atomic64_inc(&port_stats->ktls_tx_connection_fail);
+ return -1;
}
/*
@@ -603,7 +603,8 @@ static int chcr_init_tcb_fields(struct chcr_ktls_info *tx_info)
/*
* chcr_ktls_cpl_act_open_rpl: connection reply received from TP.
*/
-int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, unsigned char *input)
+static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
+ unsigned char *input)
{
const struct cpl_act_open_rpl *p = (void *)input;
struct chcr_ktls_info *tx_info = NULL;
@@ -618,27 +619,46 @@ int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, unsigned char *input)
tx_info = lookup_atid(t, atid);
if (!tx_info || tx_info->atid != atid) {
- pr_err("tx_info or atid is not correct\n");
+ pr_err("%s: incorrect tx_info or atid\n", __func__);
return -1;
}
+ cxgb4_free_atid(t, atid);
+ tx_info->atid = -1;
+
+ spin_lock(&tx_info->lock);
+ /* HW response is very close, finish pending cleanup */
+ if (tx_info->pending_close) {
+ spin_unlock(&tx_info->lock);
+ if (!status) {
+ /* it's a late success, tcb status is establised,
+ * mark it close.
+ */
+ chcr_ktls_mark_tcb_close(tx_info);
+ cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
+ tid, tx_info->ip_family);
+ }
+ kvfree(tx_info);
+ return 0;
+ }
+
if (!status) {
tx_info->tid = tid;
cxgb4_insert_tid(t, tx_info, tx_info->tid, tx_info->ip_family);
-
- cxgb4_free_atid(t, atid);
- tx_info->atid = -1;
- /* update the connection state */
- chcr_ktls_update_connection_state(tx_info,
- KTLS_CONN_ACT_OPEN_RPL);
+ tx_info->open_state = CH_KTLS_OPEN_SUCCESS;
+ } else {
+ tx_info->open_state = CH_KTLS_OPEN_FAILURE;
}
+ spin_unlock(&tx_info->lock);
+
+ complete(&tx_info->completion);
return 0;
}
/*
* chcr_ktls_cpl_set_tcb_rpl: TCB reply received from TP.
*/
-int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input)
+static int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input)
{
const struct cpl_set_tcb_rpl *p = (void *)input;
struct chcr_ktls_info *tx_info = NULL;
@@ -649,12 +669,22 @@ int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input)
t = &adap->tids;
tx_info = lookup_tid(t, tid);
+
if (!tx_info || tx_info->tid != tid) {
- pr_err("tx_info or atid is not correct\n");
+ pr_err("%s: incorrect tx_info or tid\n", __func__);
return -1;
}
- /* update the connection state */
- chcr_ktls_update_connection_state(tx_info, KTLS_CONN_SET_TCB_RPL);
+
+ spin_lock(&tx_info->lock);
+ if (tx_info->pending_close) {
+ spin_unlock(&tx_info->lock);
+ kvfree(tx_info);
+ return 0;
+ }
+ tx_info->open_state = false;
+ spin_unlock(&tx_info->lock);
+
+ complete(&tx_info->completion);
return 0;
}
@@ -756,6 +786,7 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
u64 tcp_ack, u64 tcp_win)
{
bool first_wr = ((tx_info->prev_ack == 0) && (tx_info->prev_win == 0));
+ struct ch_ktls_port_stats_debug *port_stats;
u32 len, cpl = 0, ndesc, wr_len;
struct fw_ulptx_wr *wr;
int credits;
@@ -789,12 +820,14 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
/* reset snd una if it's a re-transmit pkt */
if (tcp_seq != tx_info->prev_seq) {
/* reset snd_una */
+ port_stats =
+ &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
TCB_SND_UNA_RAW_W,
TCB_SND_UNA_RAW_V
(TCB_SND_UNA_RAW_M),
TCB_SND_UNA_RAW_V(0), 0);
- atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_ooo);
+ atomic64_inc(&port_stats->ktls_tx_ooo);
cpl++;
}
/* update ack */
@@ -1222,7 +1255,7 @@ static int chcr_ktls_xmit_wr_complete(struct sk_buff *skb,
chcr_txq_advance(&q->q, ndesc);
cxgb4_ring_tx_db(adap, &q->q, ndesc);
- atomic64_inc(&adap->chcr_stats.ktls_tx_send_records);
+ atomic64_inc(&adap->ch_ktls_stats.ktls_tx_send_records);
return 0;
}
@@ -1633,7 +1666,7 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
/* check if it is a complete record */
if (tls_end_offset == record->len) {
nskb = skb;
- atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_complete_pkts);
+ atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_complete_pkts);
} else {
dev_kfree_skb_any(skb);
@@ -1651,7 +1684,7 @@ static int chcr_end_part_handler(struct chcr_ktls_info *tx_info,
*/
if (chcr_ktls_update_snd_una(tx_info, q))
goto out;
- atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_end_pkts);
+ atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_end_pkts);
}
if (chcr_ktls_xmit_wr_complete(nskb, tx_info, q, tcp_seq,
@@ -1722,7 +1755,7 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
/* free the last trimmed portion */
dev_kfree_skb_any(skb);
skb = tmp_skb;
- atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_trimmed_pkts);
+ atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_trimmed_pkts);
}
data_len = skb->data_len;
/* check if the middle record's start point is 16 byte aligned. CTR
@@ -1794,7 +1827,7 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
*/
if (chcr_ktls_update_snd_una(tx_info, q))
goto out;
- atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_middle_pkts);
+ atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_middle_pkts);
} else {
/* Else means, its a partial first part of the record. Check if
* its only the header, don't need to send for encryption then.
@@ -1809,7 +1842,7 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info,
}
return 0;
}
- atomic64_inc(&tx_info->adap->chcr_stats.ktls_tx_start_pkts);
+ atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_start_pkts);
}
if (chcr_ktls_xmit_wr_short(skb, tx_info, q, tcp_seq, tcp_push_no_fin,
@@ -1825,18 +1858,18 @@ out:
}
/* nic tls TX handler */
-int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
+static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ struct ch_ktls_port_stats_debug *port_stats;
struct chcr_ktls_ofld_ctx_tx *tx_ctx;
+ struct ch_ktls_stats_debug *stats;
struct tcphdr *th = tcp_hdr(skb);
int data_len, qidx, ret = 0, mss;
struct tls_record_info *record;
- struct chcr_stats_debug *stats;
struct chcr_ktls_info *tx_info;
u32 tls_end_offset, tcp_seq;
struct tls_context *tls_ctx;
struct sk_buff *local_skb;
- int new_connection_state;
struct sge_eth_txq *q;
struct adapter *adap;
unsigned long flags;
@@ -1859,15 +1892,6 @@ int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!tx_info))
goto out;
- /* check the connection state, we don't need to pass new connection
- * state, state machine will check and update the new state if it is
- * stuck due to responses not received from HW.
- * Start the tx handling only if state is KTLS_CONN_TX_READY.
- */
- new_connection_state = chcr_ktls_update_connection_state(tx_info, 0);
- if (new_connection_state != KTLS_CONN_TX_READY)
- goto out;
-
/* don't touch the original skb, make a new skb to extract each records
* and send them separately.
*/
@@ -1877,7 +1901,8 @@ int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
adap = tx_info->adap;
- stats = &adap->chcr_stats;
+ stats = &adap->ch_ktls_stats;
+ port_stats = &stats->ktls_port[tx_info->port_id];
qidx = skb->queue_mapping;
q = &adap->sge.ethtxq[qidx + tx_info->first_qset];
@@ -1923,13 +1948,13 @@ int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
*/
if (unlikely(!record)) {
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
- atomic64_inc(&stats->ktls_tx_drop_no_sync_data);
+ atomic64_inc(&port_stats->ktls_tx_drop_no_sync_data);
goto out;
}
if (unlikely(tls_record_is_start_marker(record))) {
spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
- atomic64_inc(&stats->ktls_tx_skip_no_sync_data);
+ atomic64_inc(&port_stats->ktls_tx_skip_no_sync_data);
goto out;
}
@@ -2000,9 +2025,8 @@ clear_ref:
} while (data_len > 0);
tx_info->prev_seq = ntohl(th->seq) + skb->data_len;
-
- atomic64_inc(&stats->ktls_tx_encrypted_packets);
- atomic64_add(skb->data_len, &stats->ktls_tx_encrypted_bytes);
+ atomic64_inc(&port_stats->ktls_tx_encrypted_packets);
+ atomic64_add(skb->data_len, &port_stats->ktls_tx_encrypted_bytes);
/* tcp finish is set, send a separate tcp msg including all the options
* as well.
@@ -2014,4 +2038,117 @@ out:
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
-#endif /* CONFIG_CHELSIO_TLS_DEVICE */
+
+static void *chcr_ktls_uld_add(const struct cxgb4_lld_info *lldi)
+{
+ struct chcr_ktls_uld_ctx *u_ctx;
+
+ pr_info_once("%s - version %s\n", CHCR_KTLS_DRV_DESC,
+ CHCR_KTLS_DRV_VERSION);
+ u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL);
+ if (!u_ctx) {
+ u_ctx = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+ u_ctx->lldi = *lldi;
+out:
+ return u_ctx;
+}
+
+static const struct tlsdev_ops chcr_ktls_ops = {
+ .tls_dev_add = chcr_ktls_dev_add,
+ .tls_dev_del = chcr_ktls_dev_del,
+};
+
+static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
+ [CPL_ACT_OPEN_RPL] = chcr_ktls_cpl_act_open_rpl,
+ [CPL_SET_TCB_RPL] = chcr_ktls_cpl_set_tcb_rpl,
+};
+
+static int chcr_ktls_uld_rx_handler(void *handle, const __be64 *rsp,
+ const struct pkt_gl *pgl)
+{
+ const struct cpl_act_open_rpl *rpl = (struct cpl_act_open_rpl *)rsp;
+ struct chcr_ktls_uld_ctx *u_ctx = handle;
+ u8 opcode = rpl->ot.opcode;
+ struct adapter *adap;
+
+ adap = pci_get_drvdata(u_ctx->lldi.pdev);
+
+ if (!work_handlers[opcode]) {
+ pr_err("Unsupported opcode %d received\n", opcode);
+ return 0;
+ }
+
+ work_handlers[opcode](adap, (unsigned char *)&rsp[1]);
+ return 0;
+}
+
+static int chcr_ktls_uld_state_change(void *handle, enum cxgb4_state new_state)
+{
+ struct chcr_ktls_uld_ctx *u_ctx = handle;
+
+ switch (new_state) {
+ case CXGB4_STATE_UP:
+ pr_info("%s: Up\n", pci_name(u_ctx->lldi.pdev));
+ mutex_lock(&dev_mutex);
+ list_add_tail(&u_ctx->entry, &uld_ctx_list);
+ mutex_unlock(&dev_mutex);
+ break;
+ case CXGB4_STATE_START_RECOVERY:
+ case CXGB4_STATE_DOWN:
+ case CXGB4_STATE_DETACH:
+ pr_info("%s: Down\n", pci_name(u_ctx->lldi.pdev));
+ mutex_lock(&dev_mutex);
+ list_del(&u_ctx->entry);
+ mutex_unlock(&dev_mutex);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static struct cxgb4_uld_info chcr_ktls_uld_info = {
+ .name = CHCR_KTLS_DRV_MODULE_NAME,
+ .nrxq = 1,
+ .rxq_size = 1024,
+ .add = chcr_ktls_uld_add,
+ .tx_handler = chcr_ktls_xmit,
+ .rx_handler = chcr_ktls_uld_rx_handler,
+ .state_change = chcr_ktls_uld_state_change,
+ .tlsdev_ops = &chcr_ktls_ops,
+};
+
+static int __init chcr_ktls_init(void)
+{
+ cxgb4_register_uld(CXGB4_ULD_KTLS, &chcr_ktls_uld_info);
+ return 0;
+}
+
+static void __exit chcr_ktls_exit(void)
+{
+ struct chcr_ktls_uld_ctx *u_ctx, *tmp;
+ struct adapter *adap;
+
+ pr_info("driver unloaded\n");
+
+ mutex_lock(&dev_mutex);
+ list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) {
+ adap = pci_get_drvdata(u_ctx->lldi.pdev);
+ memset(&adap->ch_ktls_stats, 0, sizeof(adap->ch_ktls_stats));
+ list_del(&u_ctx->entry);
+ kfree(u_ctx);
+ }
+ mutex_unlock(&dev_mutex);
+ cxgb4_unregister_uld(CXGB4_ULD_KTLS);
+}
+
+module_init(chcr_ktls_init);
+module_exit(chcr_ktls_exit);
+
+MODULE_DESCRIPTION("Chelsio NIC TLS ULD driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Chelsio Communications");
+MODULE_VERSION(CHCR_KTLS_DRV_VERSION);
diff --git a/drivers/crypto/chelsio/chcr_ktls.h b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
index 5cbd84b1da05..c1651b1431a0 100644
--- a/drivers/crypto/chelsio/chcr_ktls.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h
@@ -4,14 +4,17 @@
#ifndef __CHCR_KTLS_H__
#define __CHCR_KTLS_H__
-#ifdef CONFIG_CHELSIO_TLS_DEVICE
-#include <net/tls.h>
#include "cxgb4.h"
#include "t4_msg.h"
#include "t4_tcb.h"
#include "l2t.h"
#include "chcr_common.h"
#include "cxgb4_uld.h"
+#include "clip_tbl.h"
+
+#define CHCR_KTLS_DRV_MODULE_NAME "ch_ktls"
+#define CHCR_KTLS_DRV_VERSION "1.0.0.0-ko"
+#define CHCR_KTLS_DRV_DESC "Chelsio NIC TLS ULD Driver"
#define CHCR_TCB_STATE_CLOSED 0
#define CHCR_KTLS_KEY_CTX_LEN 16
@@ -24,22 +27,20 @@
#define CHCR_KTLS_WR_SIZE (CHCR_PLAIN_TX_DATA_LEN +\
sizeof(struct cpl_tx_sec_pdu))
-enum chcr_ktls_conn_state {
- KTLS_CONN_CLOSED,
- KTLS_CONN_ACT_OPEN_REQ,
- KTLS_CONN_ACT_OPEN_RPL,
- KTLS_CONN_SET_TCB_REQ,
- KTLS_CONN_SET_TCB_RPL,
- KTLS_CONN_TX_READY,
+enum ch_ktls_open_state {
+ CH_KTLS_OPEN_SUCCESS = 0,
+ CH_KTLS_OPEN_PENDING = 1,
+ CH_KTLS_OPEN_FAILURE = 2,
};
struct chcr_ktls_info {
struct sock *sk;
- spinlock_t lock; /* state machine lock */
+ spinlock_t lock; /* lock for pending_close */
struct ktls_key_ctx key_ctx;
struct adapter *adap;
struct l2t_entry *l2te;
struct net_device *netdev;
+ struct completion completion;
u64 iv;
u64 record_no;
int tid;
@@ -55,13 +56,14 @@ struct chcr_ktls_info {
u32 tcp_start_seq_number;
u32 scmd0_short_seqno_numivs;
u32 scmd0_short_ivgen_hdrlen;
- enum chcr_ktls_conn_state connection_state;
u16 prev_win;
u8 tx_chan;
u8 smt_idx;
u8 port_id;
u8 ip_family;
u8 first_qset;
+ enum ch_ktls_open_state open_state;
+ bool pending_close;
};
struct chcr_ktls_ofld_ctx_tx {
@@ -69,6 +71,11 @@ struct chcr_ktls_ofld_ctx_tx {
struct chcr_ktls_info *chcr_info;
};
+struct chcr_ktls_uld_ctx {
+ struct list_head entry;
+ struct cxgb4_lld_info lldi;
+};
+
static inline struct chcr_ktls_ofld_ctx_tx *
chcr_get_ktls_tx_context(struct tls_context *tls_ctx)
{
@@ -82,22 +89,12 @@ chcr_get_ktls_tx_context(struct tls_context *tls_ctx)
static inline int chcr_get_first_rx_qid(struct adapter *adap)
{
/* u_ctx is saved in adap, fetch it */
- struct uld_ctx *u_ctx = adap->uld[CXGB4_ULD_CRYPTO].handle;
+ struct chcr_ktls_uld_ctx *u_ctx = adap->uld[CXGB4_ULD_KTLS].handle;
if (!u_ctx)
return -1;
return u_ctx->lldi.rxq_ids[0];
}
-int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, unsigned char *input);
-int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input);
-int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev);
-int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
- enum tls_offload_ctx_dir direction,
- struct tls_crypto_info *crypto_info,
- u32 start_offload_tcp_sn);
-void chcr_ktls_dev_del(struct net_device *netdev,
- struct tls_context *tls_ctx,
- enum tls_offload_ctx_dir direction);
-#endif /* CONFIG_CHELSIO_TLS_DEVICE */
+typedef int (*chcr_handler_func)(struct adapter *adap, unsigned char *input);
#endif /* __CHCR_KTLS_H__ */
diff --git a/drivers/crypto/chelsio/chtls/Makefile b/drivers/net/ethernet/chelsio/inline_crypto/chtls/Makefile
index bc11495acdb3..bc11495acdb3 100644
--- a/drivers/crypto/chelsio/chtls/Makefile
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/Makefile
diff --git a/drivers/crypto/chelsio/chtls/chtls.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
index 459442704eb1..2d3dfdd2a716 100644
--- a/drivers/crypto/chelsio/chtls/chtls.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls.h
@@ -32,6 +32,94 @@
#include "chcr_core.h"
#include "chcr_crypto.h"
+#define CHTLS_DRV_VERSION "1.0.0.0-ko"
+
+#define TLS_KEYCTX_RXFLIT_CNT_S 24
+#define TLS_KEYCTX_RXFLIT_CNT_V(x) ((x) << TLS_KEYCTX_RXFLIT_CNT_S)
+
+#define TLS_KEYCTX_RXPROT_VER_S 20
+#define TLS_KEYCTX_RXPROT_VER_M 0xf
+#define TLS_KEYCTX_RXPROT_VER_V(x) ((x) << TLS_KEYCTX_RXPROT_VER_S)
+
+#define TLS_KEYCTX_RXCIPH_MODE_S 16
+#define TLS_KEYCTX_RXCIPH_MODE_M 0xf
+#define TLS_KEYCTX_RXCIPH_MODE_V(x) ((x) << TLS_KEYCTX_RXCIPH_MODE_S)
+
+#define TLS_KEYCTX_RXAUTH_MODE_S 12
+#define TLS_KEYCTX_RXAUTH_MODE_M 0xf
+#define TLS_KEYCTX_RXAUTH_MODE_V(x) ((x) << TLS_KEYCTX_RXAUTH_MODE_S)
+
+#define TLS_KEYCTX_RXCIAU_CTRL_S 11
+#define TLS_KEYCTX_RXCIAU_CTRL_V(x) ((x) << TLS_KEYCTX_RXCIAU_CTRL_S)
+
+#define TLS_KEYCTX_RX_SEQCTR_S 9
+#define TLS_KEYCTX_RX_SEQCTR_M 0x3
+#define TLS_KEYCTX_RX_SEQCTR_V(x) ((x) << TLS_KEYCTX_RX_SEQCTR_S)
+
+#define TLS_KEYCTX_RX_VALID_S 8
+#define TLS_KEYCTX_RX_VALID_V(x) ((x) << TLS_KEYCTX_RX_VALID_S)
+
+#define TLS_KEYCTX_RXCK_SIZE_S 3
+#define TLS_KEYCTX_RXCK_SIZE_M 0x7
+#define TLS_KEYCTX_RXCK_SIZE_V(x) ((x) << TLS_KEYCTX_RXCK_SIZE_S)
+
+#define TLS_KEYCTX_RXMK_SIZE_S 0
+#define TLS_KEYCTX_RXMK_SIZE_M 0x7
+#define TLS_KEYCTX_RXMK_SIZE_V(x) ((x) << TLS_KEYCTX_RXMK_SIZE_S)
+
+#define KEYCTX_TX_WR_IV_S 55
+#define KEYCTX_TX_WR_IV_M 0x1ffULL
+#define KEYCTX_TX_WR_IV_V(x) ((x) << KEYCTX_TX_WR_IV_S)
+#define KEYCTX_TX_WR_IV_G(x) \
+ (((x) >> KEYCTX_TX_WR_IV_S) & KEYCTX_TX_WR_IV_M)
+
+#define KEYCTX_TX_WR_AAD_S 47
+#define KEYCTX_TX_WR_AAD_M 0xffULL
+#define KEYCTX_TX_WR_AAD_V(x) ((x) << KEYCTX_TX_WR_AAD_S)
+#define KEYCTX_TX_WR_AAD_G(x) (((x) >> KEYCTX_TX_WR_AAD_S) & \
+ KEYCTX_TX_WR_AAD_M)
+
+#define KEYCTX_TX_WR_AADST_S 39
+#define KEYCTX_TX_WR_AADST_M 0xffULL
+#define KEYCTX_TX_WR_AADST_V(x) ((x) << KEYCTX_TX_WR_AADST_S)
+#define KEYCTX_TX_WR_AADST_G(x) \
+ (((x) >> KEYCTX_TX_WR_AADST_S) & KEYCTX_TX_WR_AADST_M)
+
+#define KEYCTX_TX_WR_CIPHER_S 30
+#define KEYCTX_TX_WR_CIPHER_M 0x1ffULL
+#define KEYCTX_TX_WR_CIPHER_V(x) ((x) << KEYCTX_TX_WR_CIPHER_S)
+#define KEYCTX_TX_WR_CIPHER_G(x) \
+ (((x) >> KEYCTX_TX_WR_CIPHER_S) & KEYCTX_TX_WR_CIPHER_M)
+
+#define KEYCTX_TX_WR_CIPHERST_S 23
+#define KEYCTX_TX_WR_CIPHERST_M 0x7f
+#define KEYCTX_TX_WR_CIPHERST_V(x) ((x) << KEYCTX_TX_WR_CIPHERST_S)
+#define KEYCTX_TX_WR_CIPHERST_G(x) \
+ (((x) >> KEYCTX_TX_WR_CIPHERST_S) & KEYCTX_TX_WR_CIPHERST_M)
+
+#define KEYCTX_TX_WR_AUTH_S 14
+#define KEYCTX_TX_WR_AUTH_M 0x1ff
+#define KEYCTX_TX_WR_AUTH_V(x) ((x) << KEYCTX_TX_WR_AUTH_S)
+#define KEYCTX_TX_WR_AUTH_G(x) \
+ (((x) >> KEYCTX_TX_WR_AUTH_S) & KEYCTX_TX_WR_AUTH_M)
+
+#define KEYCTX_TX_WR_AUTHST_S 7
+#define KEYCTX_TX_WR_AUTHST_M 0x7f
+#define KEYCTX_TX_WR_AUTHST_V(x) ((x) << KEYCTX_TX_WR_AUTHST_S)
+#define KEYCTX_TX_WR_AUTHST_G(x) \
+ (((x) >> KEYCTX_TX_WR_AUTHST_S) & KEYCTX_TX_WR_AUTHST_M)
+
+#define KEYCTX_TX_WR_AUTHIN_S 0
+#define KEYCTX_TX_WR_AUTHIN_M 0x7f
+#define KEYCTX_TX_WR_AUTHIN_V(x) ((x) << KEYCTX_TX_WR_AUTHIN_S)
+#define KEYCTX_TX_WR_AUTHIN_G(x) \
+ (((x) >> KEYCTX_TX_WR_AUTHIN_S) & KEYCTX_TX_WR_AUTHIN_M)
+
+struct sge_opaque_hdr {
+ void *dev;
+ dma_addr_t addr[MAX_SKB_FRAGS + 1];
+};
+
#define MAX_IVS_PAGE 256
#define TLS_KEY_CONTEXT_SZ 64
#define CIPHER_BLOCK_SIZE 16
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
index 05520dccd906..05520dccd906 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
index 47ba81e42f5d..47ba81e42f5d 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
diff --git a/drivers/crypto/chelsio/chtls/chtls_hw.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
index f1820aca0d33..f1820aca0d33 100644
--- a/drivers/crypto/chelsio/chtls/chtls_hw.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
index 2e9acae1cba3..2e9acae1cba3 100644
--- a/drivers/crypto/chelsio/chtls/chtls_io.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_io.c
diff --git a/drivers/crypto/chelsio/chtls/chtls_main.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
index 66d247efd561..9098b3eed4da 100644
--- a/drivers/crypto/chelsio/chtls/chtls_main.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
@@ -638,4 +638,4 @@ module_exit(chtls_unregister);
MODULE_DESCRIPTION("Chelsio TLS Inline driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Chelsio Communications");
-MODULE_VERSION(DRV_VERSION);
+MODULE_VERSION(CHTLS_DRV_VERSION);
diff --git a/drivers/net/ethernet/cirrus/cs89x0.h b/drivers/net/ethernet/cirrus/cs89x0.h
index 91423b70bb45..210f9ec9af4b 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.h
+++ b/drivers/net/ethernet/cirrus/cs89x0.h
@@ -459,7 +459,3 @@
#define PNP_CNF_INT 0x70
#define PNP_CNF_DMA 0x74
#define PNP_CNF_MEM 0x48
-
-#define BIT0 1
-#define BIT15 0x8000
-
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 18f3aeb88f22..c67a16a48d62 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -169,6 +169,7 @@ struct enic {
u16 num_vfs;
#endif
spinlock_t enic_api_lock;
+ bool enic_api_busy;
struct enic_port_profile *pp;
/* work queue cache line section */
diff --git a/drivers/net/ethernet/cisco/enic/enic_api.c b/drivers/net/ethernet/cisco/enic/enic_api.c
index b161f24522b8..3bdc74fba1e3 100644
--- a/drivers/net/ethernet/cisco/enic/enic_api.c
+++ b/drivers/net/ethernet/cisco/enic/enic_api.c
@@ -1,4 +1,4 @@
-/**
+/*
* Copyright 2013 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
@@ -34,6 +34,12 @@ int enic_api_devcmd_proxy_by_index(struct net_device *netdev, int vf,
struct vnic_dev *vdev = enic->vdev;
spin_lock(&enic->enic_api_lock);
+ while (enic->enic_api_busy) {
+ spin_unlock(&enic->enic_api_lock);
+ cpu_relax();
+ spin_lock(&enic->enic_api_lock);
+ }
+
spin_lock_bh(&enic->devcmd_lock);
vnic_dev_cmd_proxy_by_index_start(vdev, vf);
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 4d8e0aa447fb..a4dd52bba2c3 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -1,4 +1,4 @@
-/**
+/*
* Copyright 2013 Cisco Systems, Inc. All rights reserved.
*
* This program is free software; you may redistribute it and/or modify
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 552d89fdf54a..fb269d587b74 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -326,11 +326,11 @@ static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
struct enic *enic = vnic_dev_priv(wq->vdev);
if (buf->sop)
- pci_unmap_single(enic->pdev, buf->dma_addr,
- buf->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_TO_DEVICE);
else
- pci_unmap_page(enic->pdev, buf->dma_addr,
- buf->len, PCI_DMA_TODEVICE);
+ dma_unmap_page(&enic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_TO_DEVICE);
if (buf->os_buf)
dev_kfree_skb_any(buf->os_buf);
@@ -574,8 +574,8 @@ static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq,
dma_addr_t dma_addr;
int err = 0;
- dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
- PCI_DMA_TODEVICE);
+ dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len,
+ DMA_TO_DEVICE);
if (unlikely(enic_dma_map_check(enic, dma_addr)))
return -ENOMEM;
@@ -605,8 +605,8 @@ static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq,
dma_addr_t dma_addr;
int err = 0;
- dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
- PCI_DMA_TODEVICE);
+ dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len,
+ DMA_TO_DEVICE);
if (unlikely(enic_dma_map_check(enic, dma_addr)))
return -ENOMEM;
@@ -693,8 +693,9 @@ static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq,
*/
while (frag_len_left) {
len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN);
- dma_addr = pci_map_single(enic->pdev, skb->data + offset, len,
- PCI_DMA_TODEVICE);
+ dma_addr = dma_map_single(&enic->pdev->dev,
+ skb->data + offset, len,
+ DMA_TO_DEVICE);
if (unlikely(enic_dma_map_check(enic, dma_addr)))
return -ENOMEM;
enic_queue_wq_desc_tso(wq, skb, dma_addr, len, mss, hdr_len,
@@ -752,8 +753,8 @@ static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq,
dma_addr_t dma_addr;
int err = 0;
- dma_addr = pci_map_single(enic->pdev, skb->data, head_len,
- PCI_DMA_TODEVICE);
+ dma_addr = dma_map_single(&enic->pdev->dev, skb->data, head_len,
+ DMA_TO_DEVICE);
if (unlikely(enic_dma_map_check(enic, dma_addr)))
return -ENOMEM;
@@ -1222,8 +1223,8 @@ static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
if (!buf->os_buf)
return;
- pci_unmap_single(enic->pdev, buf->dma_addr,
- buf->len, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_FROM_DEVICE);
dev_kfree_skb_any(buf->os_buf);
buf->os_buf = NULL;
}
@@ -1248,8 +1249,8 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
if (!skb)
return -ENOMEM;
- dma_addr = pci_map_single(enic->pdev, skb->data, len,
- PCI_DMA_FROMDEVICE);
+ dma_addr = dma_map_single(&enic->pdev->dev, skb->data, len,
+ DMA_FROM_DEVICE);
if (unlikely(enic_dma_map_check(enic, dma_addr))) {
dev_kfree_skb(skb);
return -ENOMEM;
@@ -1281,8 +1282,8 @@ static bool enic_rxcopybreak(struct net_device *netdev, struct sk_buff **skb,
new_skb = netdev_alloc_skb_ip_align(netdev, len);
if (!new_skb)
return false;
- pci_dma_sync_single_for_cpu(enic->pdev, buf->dma_addr, len,
- DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(&enic->pdev->dev, buf->dma_addr, len,
+ DMA_FROM_DEVICE);
memcpy(new_skb->data, (*skb)->data, len);
*skb = new_skb;
@@ -1331,8 +1332,8 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
enic->rq_truncated_pkts++;
}
- pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
buf->os_buf = NULL;
@@ -1346,8 +1347,8 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
if (!enic_rxcopybreak(netdev, &skb, buf, bytes_written)) {
buf->os_buf = NULL;
- pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&enic->pdev->dev, buf->dma_addr,
+ buf->len, DMA_FROM_DEVICE);
}
prefetch(skb->data - NET_IP_ALIGN);
@@ -1420,8 +1421,8 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
/* Buffer overflow
*/
- pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
+ DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
buf->os_buf = NULL;
}
@@ -2106,8 +2107,6 @@ static int enic_dev_wait(struct vnic_dev *vdev,
int done;
int err;
- BUG_ON(in_interrupt());
-
err = start(vdev, arg);
if (err)
return err;
@@ -2178,9 +2177,9 @@ int __enic_set_rsskey(struct enic *enic)
dma_addr_t rss_key_buf_pa;
int i, kidx, bidx, err;
- rss_key_buf_va = pci_zalloc_consistent(enic->pdev,
- sizeof(union vnic_rss_key),
- &rss_key_buf_pa);
+ rss_key_buf_va = dma_alloc_coherent(&enic->pdev->dev,
+ sizeof(union vnic_rss_key),
+ &rss_key_buf_pa, GFP_ATOMIC);
if (!rss_key_buf_va)
return -ENOMEM;
@@ -2195,8 +2194,8 @@ int __enic_set_rsskey(struct enic *enic)
sizeof(union vnic_rss_key));
spin_unlock_bh(&enic->devcmd_lock);
- pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key),
- rss_key_buf_va, rss_key_buf_pa);
+ dma_free_coherent(&enic->pdev->dev, sizeof(union vnic_rss_key),
+ rss_key_buf_va, rss_key_buf_pa);
return err;
}
@@ -2215,8 +2214,9 @@ static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
unsigned int i;
int err;
- rss_cpu_buf_va = pci_alloc_consistent(enic->pdev,
- sizeof(union vnic_rss_cpu), &rss_cpu_buf_pa);
+ rss_cpu_buf_va = dma_alloc_coherent(&enic->pdev->dev,
+ sizeof(union vnic_rss_cpu),
+ &rss_cpu_buf_pa, GFP_ATOMIC);
if (!rss_cpu_buf_va)
return -ENOMEM;
@@ -2229,8 +2229,8 @@ static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
sizeof(union vnic_rss_cpu));
spin_unlock_bh(&enic->devcmd_lock);
- pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu),
- rss_cpu_buf_va, rss_cpu_buf_pa);
+ dma_free_coherent(&enic->pdev->dev, sizeof(union vnic_rss_cpu),
+ rss_cpu_buf_va, rss_cpu_buf_pa);
return err;
}
@@ -2295,6 +2295,13 @@ static int enic_set_rss_nic_cfg(struct enic *enic)
rss_hash_bits, rss_base_cpu, rss_enable);
}
+static void enic_set_api_busy(struct enic *enic, bool busy)
+{
+ spin_lock(&enic->enic_api_lock);
+ enic->enic_api_busy = busy;
+ spin_unlock(&enic->enic_api_lock);
+}
+
static void enic_reset(struct work_struct *work)
{
struct enic *enic = container_of(work, struct enic, reset);
@@ -2304,7 +2311,9 @@ static void enic_reset(struct work_struct *work)
rtnl_lock();
- spin_lock(&enic->enic_api_lock);
+ /* Stop any activity from infiniband */
+ enic_set_api_busy(enic, true);
+
enic_stop(enic->netdev);
enic_dev_soft_reset(enic);
enic_reset_addr_lists(enic);
@@ -2312,7 +2321,10 @@ static void enic_reset(struct work_struct *work)
enic_set_rss_nic_cfg(enic);
enic_dev_set_ig_vlan_rewrite_mode(enic);
enic_open(enic->netdev);
- spin_unlock(&enic->enic_api_lock);
+
+ /* Allow infiniband to fiddle with the device again */
+ enic_set_api_busy(enic, false);
+
call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
rtnl_unlock();
@@ -2324,7 +2336,9 @@ static void enic_tx_hang_reset(struct work_struct *work)
rtnl_lock();
- spin_lock(&enic->enic_api_lock);
+ /* Stop any activity from infiniband */
+ enic_set_api_busy(enic, true);
+
enic_dev_hang_notify(enic);
enic_stop(enic->netdev);
enic_dev_hang_reset(enic);
@@ -2333,7 +2347,10 @@ static void enic_tx_hang_reset(struct work_struct *work)
enic_set_rss_nic_cfg(enic);
enic_dev_set_ig_vlan_rewrite_mode(enic);
enic_open(enic->netdev);
- spin_unlock(&enic->enic_api_lock);
+
+ /* Allow infiniband to fiddle with the device again */
+ enic_set_api_busy(enic, false);
+
call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
rtnl_unlock();
@@ -2527,13 +2544,15 @@ static void enic_dev_deinit(struct enic *enic)
{
unsigned int i;
- for (i = 0; i < enic->rq_count; i++) {
- napi_hash_del(&enic->napi[i]);
- netif_napi_del(&enic->napi[i]);
- }
+ for (i = 0; i < enic->rq_count; i++)
+ __netif_napi_del(&enic->napi[i]);
+
if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
for (i = 0; i < enic->wq_count; i++)
- netif_napi_del(&enic->napi[enic_cq_wq(enic, i)]);
+ __netif_napi_del(&enic->napi[enic_cq_wq(enic, i)]);
+
+ /* observe RCU grace period after __netif_napi_del() calls */
+ synchronize_net();
enic_free_vnic_resources(enic);
enic_clear_intr_mode(enic);
@@ -2699,21 +2718,21 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* fail to 32-bit.
*/
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(47));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(47));
if (err) {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(dev, "No usable DMA configuration, aborting\n");
goto err_out_release_regions;
}
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(dev, "Unable to obtain %u-bit DMA "
"for consistent allocations, aborting\n", 32);
goto err_out_release_regions;
}
} else {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(47));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(47));
if (err) {
dev_err(dev, "Unable to obtain %u-bit DMA "
"for consistent allocations, aborting\n", 47);
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 901e44b0b795..45015931b335 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -193,9 +193,10 @@ int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
{
vnic_dev_desc_ring_size(ring, desc_count, desc_size);
- ring->descs_unaligned = pci_alloc_consistent(vdev->pdev,
- ring->size_unaligned,
- &ring->base_addr_unaligned);
+ ring->descs_unaligned = dma_alloc_coherent(&vdev->pdev->dev,
+ ring->size_unaligned,
+ &ring->base_addr_unaligned,
+ GFP_KERNEL);
if (!ring->descs_unaligned) {
vdev_err(vdev, "Failed to allocate ring (size=%d), aborting\n",
@@ -218,10 +219,9 @@ int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
{
if (ring->descs) {
- pci_free_consistent(vdev->pdev,
- ring->size_unaligned,
- ring->descs_unaligned,
- ring->base_addr_unaligned);
+ dma_free_coherent(&vdev->pdev->dev, ring->size_unaligned,
+ ring->descs_unaligned,
+ ring->base_addr_unaligned);
ring->descs = NULL;
}
}
@@ -551,9 +551,9 @@ int vnic_dev_fw_info(struct vnic_dev *vdev,
int err = 0;
if (!vdev->fw_info) {
- vdev->fw_info = pci_zalloc_consistent(vdev->pdev,
- sizeof(struct vnic_devcmd_fw_info),
- &vdev->fw_info_pa);
+ vdev->fw_info = dma_alloc_coherent(&vdev->pdev->dev,
+ sizeof(struct vnic_devcmd_fw_info),
+ &vdev->fw_info_pa, GFP_ATOMIC);
if (!vdev->fw_info)
return -ENOMEM;
@@ -603,8 +603,9 @@ int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
int wait = 1000;
if (!vdev->stats) {
- vdev->stats = pci_alloc_consistent(vdev->pdev,
- sizeof(struct vnic_stats), &vdev->stats_pa);
+ vdev->stats = dma_alloc_coherent(&vdev->pdev->dev,
+ sizeof(struct vnic_stats),
+ &vdev->stats_pa, GFP_ATOMIC);
if (!vdev->stats)
return -ENOMEM;
}
@@ -852,9 +853,9 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
return -EINVAL;
}
- notify_addr = pci_alloc_consistent(vdev->pdev,
- sizeof(struct vnic_devcmd_notify),
- &notify_pa);
+ notify_addr = dma_alloc_coherent(&vdev->pdev->dev,
+ sizeof(struct vnic_devcmd_notify),
+ &notify_pa, GFP_ATOMIC);
if (!notify_addr)
return -ENOMEM;
@@ -882,10 +883,9 @@ static int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
int vnic_dev_notify_unset(struct vnic_dev *vdev)
{
if (vdev->notify) {
- pci_free_consistent(vdev->pdev,
- sizeof(struct vnic_devcmd_notify),
- vdev->notify,
- vdev->notify_pa);
+ dma_free_coherent(&vdev->pdev->dev,
+ sizeof(struct vnic_devcmd_notify),
+ vdev->notify, vdev->notify_pa);
}
return vnic_dev_notify_unsetcmd(vdev);
@@ -1046,18 +1046,17 @@ void vnic_dev_unregister(struct vnic_dev *vdev)
{
if (vdev) {
if (vdev->notify)
- pci_free_consistent(vdev->pdev,
- sizeof(struct vnic_devcmd_notify),
- vdev->notify,
- vdev->notify_pa);
+ dma_free_coherent(&vdev->pdev->dev,
+ sizeof(struct vnic_devcmd_notify),
+ vdev->notify, vdev->notify_pa);
if (vdev->stats)
- pci_free_consistent(vdev->pdev,
- sizeof(struct vnic_stats),
- vdev->stats, vdev->stats_pa);
+ dma_free_coherent(&vdev->pdev->dev,
+ sizeof(struct vnic_stats),
+ vdev->stats, vdev->stats_pa);
if (vdev->fw_info)
- pci_free_consistent(vdev->pdev,
- sizeof(struct vnic_devcmd_fw_info),
- vdev->fw_info, vdev->fw_info_pa);
+ dma_free_coherent(&vdev->pdev->dev,
+ sizeof(struct vnic_devcmd_fw_info),
+ vdev->fw_info, vdev->fw_info_pa);
if (vdev->devcmd2)
vnic_dev_deinit_devcmd2(vdev);
@@ -1127,7 +1126,7 @@ int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len)
void *prov_buf;
int ret;
- prov_buf = pci_alloc_consistent(vdev->pdev, len, &prov_pa);
+ prov_buf = dma_alloc_coherent(&vdev->pdev->dev, len, &prov_pa, GFP_ATOMIC);
if (!prov_buf)
return -ENOMEM;
@@ -1137,7 +1136,7 @@ int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len)
ret = vnic_dev_cmd(vdev, CMD_INIT_PROV_INFO2, &a0, &a1, wait);
- pci_free_consistent(vdev->pdev, len, prov_buf, prov_pa);
+ dma_free_coherent(&vdev->pdev->dev, len, prov_buf, prov_pa);
return ret;
}
@@ -1217,7 +1216,8 @@ int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
tlv_size = sizeof(struct filter) +
sizeof(struct filter_action) +
2 * sizeof(struct filter_tlv);
- tlv_va = pci_alloc_consistent(vdev->pdev, tlv_size, &tlv_pa);
+ tlv_va = dma_alloc_coherent(&vdev->pdev->dev, tlv_size,
+ &tlv_pa, GFP_ATOMIC);
if (!tlv_va)
return -ENOMEM;
tlv = tlv_va;
@@ -1240,7 +1240,7 @@ int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
ret = vnic_dev_cmd(vdev, CMD_ADD_FILTER, &a0, &a1, wait);
*entry = (u16)a0;
- pci_free_consistent(vdev->pdev, tlv_size, tlv_va, tlv_pa);
+ dma_free_coherent(&vdev->pdev->dev, tlv_size, tlv_va, tlv_pa);
} else if (cmd == CLSF_DEL) {
a0 = *entry;
ret = vnic_dev_cmd(vdev, CMD_DEL_FILTER, &a0, &a1, wait);
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index ffec0f3dd957..8df6f081f244 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -85,6 +85,8 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
/**
* struct gmac_queue_page - page buffer per-page info
+ * @page: the page struct
+ * @mapping: the dma address handle
*/
struct gmac_queue_page {
struct page *page;
@@ -509,7 +511,6 @@ static int gmac_init(struct net_device *netdev)
.rel_threshold = 0,
} };
union gmac_config0 tmp;
- u32 val;
config0.bits.max_len = gmac_pick_rx_max_len(netdev->mtu);
tmp.bits32 = readl(port->gmac_base + GMAC_CONFIG0);
@@ -519,7 +520,7 @@ static int gmac_init(struct net_device *netdev)
writel(config2.bits32, port->gmac_base + GMAC_CONFIG2);
writel(config3.bits32, port->gmac_base + GMAC_CONFIG3);
- val = readl(port->dma_base + GMAC_AHB_WEIGHT_REG);
+ readl(port->dma_base + GMAC_AHB_WEIGHT_REG);
writel(ahb_weight.bits32, port->dma_base + GMAC_AHB_WEIGHT_REG);
writel(hw_weigh.bits32,
@@ -539,12 +540,6 @@ static int gmac_init(struct net_device *netdev)
return 0;
}
-static void gmac_uninit(struct net_device *netdev)
-{
- if (netdev->phydev)
- phy_disconnect(netdev->phydev);
-}
-
static int gmac_setup_txqs(struct net_device *netdev)
{
struct gemini_ethernet_port *port = netdev_priv(netdev);
@@ -1768,15 +1763,6 @@ static int gmac_open(struct net_device *netdev)
struct gemini_ethernet_port *port = netdev_priv(netdev);
int err;
- if (!netdev->phydev) {
- err = gmac_setup_phy(netdev);
- if (err) {
- netif_err(port, ifup, netdev,
- "PHY init failed: %d\n", err);
- return err;
- }
- }
-
err = request_irq(netdev->irq, gmac_irq,
IRQF_SHARED, netdev->name, netdev);
if (err) {
@@ -2122,9 +2108,8 @@ static void gmac_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *rp)
{
struct gemini_ethernet_port *port = netdev_priv(netdev);
- union gmac_config0 config0;
- config0.bits32 = readl(port->gmac_base + GMAC_CONFIG0);
+ readl(port->gmac_base + GMAC_CONFIG0);
rp->rx_max_pending = 1 << 15;
rp->rx_mini_max_pending = 0;
@@ -2209,7 +2194,6 @@ static void gmac_get_drvinfo(struct net_device *netdev,
static const struct net_device_ops gmac_351x_ops = {
.ndo_init = gmac_init,
- .ndo_uninit = gmac_uninit,
.ndo_open = gmac_open,
.ndo_stop = gmac_stop,
.ndo_start_xmit = gmac_start_xmit,
@@ -2295,8 +2279,10 @@ static irqreturn_t gemini_port_irq(int irq, void *data)
static void gemini_port_remove(struct gemini_ethernet_port *port)
{
- if (port->netdev)
+ if (port->netdev) {
+ phy_disconnect(port->netdev->phydev);
unregister_netdev(port->netdev);
+ }
clk_disable_unprepare(port->pclk);
geth_cleanup_freeq(port->geth);
}
@@ -2505,6 +2491,13 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
if (ret)
goto unprepare;
+ ret = gmac_setup_phy(netdev);
+ if (ret) {
+ netdev_err(netdev,
+ "PHY init failed\n");
+ goto unprepare;
+ }
+
ret = register_netdev(netdev);
if (ret)
goto unprepare;
@@ -2513,10 +2506,6 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
"irq %d, DMA @ 0x%pap, GMAC @ 0x%pap\n",
port->irq, &dmares->start,
&gmacres->start);
- ret = gmac_setup_phy(netdev);
- if (ret)
- netdev_info(netdev,
- "PHY init failed, deferring to ifup time\n");
return 0;
unprepare:
@@ -2529,6 +2518,7 @@ static int gemini_ethernet_port_remove(struct platform_device *pdev)
struct gemini_ethernet_port *port = platform_get_drvdata(pdev);
gemini_port_remove(port);
+
return 0;
}
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index 2610efe4f873..d9f6c19940ef 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -443,21 +443,23 @@ static void de_rx (struct de_private *de)
}
if (!copying_skb) {
- pci_unmap_single(de->pdev, mapping,
- buflen, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&de->pdev->dev, mapping, buflen,
+ DMA_FROM_DEVICE);
skb_put(skb, len);
mapping =
de->rx_skb[rx_tail].mapping =
- pci_map_single(de->pdev, copy_skb->data,
- buflen, PCI_DMA_FROMDEVICE);
+ dma_map_single(&de->pdev->dev, copy_skb->data,
+ buflen, DMA_FROM_DEVICE);
de->rx_skb[rx_tail].skb = copy_skb;
} else {
- pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&de->pdev->dev, mapping, len,
+ DMA_FROM_DEVICE);
skb_reserve(copy_skb, RX_OFFSET);
skb_copy_from_linear_data(skb, skb_put(copy_skb, len),
len);
- pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&de->pdev->dev, mapping,
+ len, DMA_FROM_DEVICE);
/* We'll reuse the original ring buffer. */
skb = copy_skb;
@@ -554,13 +556,15 @@ static void de_tx (struct de_private *de)
goto next;
if (unlikely(skb == DE_SETUP_SKB)) {
- pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
- sizeof(de->setup_frame), PCI_DMA_TODEVICE);
+ dma_unmap_single(&de->pdev->dev,
+ de->tx_skb[tx_tail].mapping,
+ sizeof(de->setup_frame),
+ DMA_TO_DEVICE);
goto next;
}
- pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&de->pdev->dev, de->tx_skb[tx_tail].mapping,
+ skb->len, DMA_TO_DEVICE);
if (status & LastFrag) {
if (status & TxError) {
@@ -620,7 +624,8 @@ static netdev_tx_t de_start_xmit (struct sk_buff *skb,
txd = &de->tx_ring[entry];
len = skb->len;
- mapping = pci_map_single(de->pdev, skb->data, len, PCI_DMA_TODEVICE);
+ mapping = dma_map_single(&de->pdev->dev, skb->data, len,
+ DMA_TO_DEVICE);
if (entry == (DE_TX_RING_SIZE - 1))
flags |= RingEnd;
if (!tx_free || (tx_free == (DE_TX_RING_SIZE / 2)))
@@ -763,8 +768,8 @@ static void __de_set_rx_mode (struct net_device *dev)
de->tx_skb[entry].skb = DE_SETUP_SKB;
de->tx_skb[entry].mapping = mapping =
- pci_map_single (de->pdev, de->setup_frame,
- sizeof (de->setup_frame), PCI_DMA_TODEVICE);
+ dma_map_single(&de->pdev->dev, de->setup_frame,
+ sizeof(de->setup_frame), DMA_TO_DEVICE);
/* Put the setup frame on the Tx list. */
txd = &de->tx_ring[entry];
@@ -1279,8 +1284,10 @@ static int de_refill_rx (struct de_private *de)
if (!skb)
goto err_out;
- de->rx_skb[i].mapping = pci_map_single(de->pdev,
- skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ de->rx_skb[i].mapping = dma_map_single(&de->pdev->dev,
+ skb->data,
+ de->rx_buf_sz,
+ DMA_FROM_DEVICE);
de->rx_skb[i].skb = skb;
de->rx_ring[i].opts1 = cpu_to_le32(DescOwn);
@@ -1313,7 +1320,8 @@ static int de_init_rings (struct de_private *de)
static int de_alloc_rings (struct de_private *de)
{
- de->rx_ring = pci_alloc_consistent(de->pdev, DE_RING_BYTES, &de->ring_dma);
+ de->rx_ring = dma_alloc_coherent(&de->pdev->dev, DE_RING_BYTES,
+ &de->ring_dma, GFP_KERNEL);
if (!de->rx_ring)
return -ENOMEM;
de->tx_ring = &de->rx_ring[DE_RX_RING_SIZE];
@@ -1333,8 +1341,9 @@ static void de_clean_rings (struct de_private *de)
for (i = 0; i < DE_RX_RING_SIZE; i++) {
if (de->rx_skb[i].skb) {
- pci_unmap_single(de->pdev, de->rx_skb[i].mapping,
- de->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&de->pdev->dev,
+ de->rx_skb[i].mapping, de->rx_buf_sz,
+ DMA_FROM_DEVICE);
dev_kfree_skb(de->rx_skb[i].skb);
}
}
@@ -1344,15 +1353,15 @@ static void de_clean_rings (struct de_private *de)
if ((skb) && (skb != DE_DUMMY_SKB)) {
if (skb != DE_SETUP_SKB) {
de->dev->stats.tx_dropped++;
- pci_unmap_single(de->pdev,
- de->tx_skb[i].mapping,
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&de->pdev->dev,
+ de->tx_skb[i].mapping,
+ skb->len, DMA_TO_DEVICE);
dev_kfree_skb(skb);
} else {
- pci_unmap_single(de->pdev,
- de->tx_skb[i].mapping,
- sizeof(de->setup_frame),
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&de->pdev->dev,
+ de->tx_skb[i].mapping,
+ sizeof(de->setup_frame),
+ DMA_TO_DEVICE);
}
}
}
@@ -1364,7 +1373,8 @@ static void de_clean_rings (struct de_private *de)
static void de_free_rings (struct de_private *de)
{
de_clean_rings(de);
- pci_free_consistent(de->pdev, DE_RING_BYTES, de->rx_ring, de->ring_dma);
+ dma_free_coherent(&de->pdev->dev, DE_RING_BYTES, de->rx_ring,
+ de->ring_dma);
de->rx_ring = NULL;
de->tx_ring = NULL;
}
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index f9dd1aa9f2da..683e328b5461 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -4925,11 +4925,11 @@ mii_get_oui(u_char phyaddr, u_long ioaddr)
u_char breg[2];
} a;
int i, r2, r3, ret=0;*/
- int r2, r3;
+ int r2;
/* Read r2 and r3 */
r2 = mii_rd(MII_ID0, phyaddr, ioaddr);
- r3 = mii_rd(MII_ID1, phyaddr, ioaddr);
+ mii_rd(MII_ID1, phyaddr, ioaddr);
/* SEEQ and Cypress way * /
/ * Shuffle r2 and r3 * /
a.reg=0;
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index c3b4abff48b5..87a27fe2992d 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -380,7 +380,7 @@ static int dmfe_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
SET_NETDEV_DEV(dev, &pdev->dev);
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
pr_warn("32-bit PCI DMA not available\n");
err = -ENODEV;
goto err_out_free;
@@ -422,15 +422,17 @@ static int dmfe_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
db = netdev_priv(dev);
/* Allocate Tx/Rx descriptor memory */
- db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) *
- DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
+ db->desc_pool_ptr = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
+ &db->desc_pool_dma_ptr, GFP_KERNEL);
if (!db->desc_pool_ptr) {
err = -ENOMEM;
goto err_out_res;
}
- db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC *
- TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
+ db->buf_pool_ptr = dma_alloc_coherent(&pdev->dev,
+ TX_BUF_ALLOC * TX_DESC_CNT + 4,
+ &db->buf_pool_dma_ptr, GFP_KERNEL);
if (!db->buf_pool_ptr) {
err = -ENOMEM;
goto err_out_free_desc;
@@ -492,11 +494,12 @@ static int dmfe_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
err_out_unmap:
pci_iounmap(pdev, db->ioaddr);
err_out_free_buf:
- pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
- db->buf_pool_ptr, db->buf_pool_dma_ptr);
+ dma_free_coherent(&pdev->dev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
+ db->buf_pool_ptr, db->buf_pool_dma_ptr);
err_out_free_desc:
- pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
- db->desc_pool_ptr, db->desc_pool_dma_ptr);
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
+ db->desc_pool_ptr, db->desc_pool_dma_ptr);
err_out_res:
pci_release_regions(pdev);
err_out_disable:
@@ -519,11 +522,12 @@ static void dmfe_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
pci_iounmap(db->pdev, db->ioaddr);
- pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
- DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
- db->desc_pool_dma_ptr);
- pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
- db->buf_pool_ptr, db->buf_pool_dma_ptr);
+ dma_free_coherent(&db->pdev->dev,
+ sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
+ db->desc_pool_ptr, db->desc_pool_dma_ptr);
+ dma_free_coherent(&db->pdev->dev,
+ TX_BUF_ALLOC * TX_DESC_CNT + 4,
+ db->buf_pool_ptr, db->buf_pool_dma_ptr);
pci_release_regions(pdev);
free_netdev(dev); /* free board information */
}
@@ -955,8 +959,8 @@ static void dmfe_rx_packet(struct net_device *dev, struct dmfe_board_info *db)
db->rx_avail_cnt--;
db->interval_rx_cnt++;
- pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2),
- RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&db->pdev->dev, le32_to_cpu(rxptr->rdes2),
+ RX_ALLOC_SIZE, DMA_FROM_DEVICE);
if ( (rdes0 & 0x300) != 0x300) {
/* A packet without First/Last flag */
@@ -1329,8 +1333,8 @@ static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
rxptr->rx_skb_ptr = skb;
- rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev,
- skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
+ rxptr->rdes2 = cpu_to_le32(dma_map_single(&db->pdev->dev, skb->data,
+ RX_ALLOC_SIZE, DMA_FROM_DEVICE));
wmb();
rxptr->rdes0 = cpu_to_le32(0x80000000);
db->rx_avail_cnt++;
@@ -1544,8 +1548,8 @@ static void allocate_rx_buffer(struct net_device *dev)
if ( ( skb = netdev_alloc_skb(dev, RX_ALLOC_SIZE) ) == NULL )
break;
rxptr->rx_skb_ptr = skb; /* FIXME (?) */
- rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data,
- RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
+ rxptr->rdes2 = cpu_to_le32(dma_map_single(&db->pdev->dev, skb->data,
+ RX_ALLOC_SIZE, DMA_FROM_DEVICE));
wmb();
rxptr->rdes0 = cpu_to_le32(0x80000000);
rxptr = rxptr->next_rx_desc;
diff --git a/drivers/net/ethernet/dec/tulip/interrupt.c b/drivers/net/ethernet/dec/tulip/interrupt.c
index c1ca0765d56d..54560f9a1651 100644
--- a/drivers/net/ethernet/dec/tulip/interrupt.c
+++ b/drivers/net/ethernet/dec/tulip/interrupt.c
@@ -74,8 +74,8 @@ int tulip_refill_rx(struct net_device *dev)
if (skb == NULL)
break;
- mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
- PCI_DMA_FROMDEVICE);
+ mapping = dma_map_single(&tp->pdev->dev, skb->data,
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
if (dma_mapping_error(&tp->pdev->dev, mapping)) {
dev_kfree_skb(skb);
tp->rx_buffers[entry].skb = NULL;
@@ -210,9 +210,10 @@ int tulip_poll(struct napi_struct *napi, int budget)
if (pkt_len < tulip_rx_copybreak &&
(skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */
- pci_dma_sync_single_for_cpu(tp->pdev,
- tp->rx_buffers[entry].mapping,
- pkt_len, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&tp->pdev->dev,
+ tp->rx_buffers[entry].mapping,
+ pkt_len,
+ DMA_FROM_DEVICE);
#if ! defined(__alpha__)
skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
pkt_len);
@@ -222,9 +223,10 @@ int tulip_poll(struct napi_struct *napi, int budget)
tp->rx_buffers[entry].skb->data,
pkt_len);
#endif
- pci_dma_sync_single_for_device(tp->pdev,
- tp->rx_buffers[entry].mapping,
- pkt_len, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&tp->pdev->dev,
+ tp->rx_buffers[entry].mapping,
+ pkt_len,
+ DMA_FROM_DEVICE);
} else { /* Pass up the skb already on the Rx ring. */
char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
pkt_len);
@@ -240,8 +242,10 @@ int tulip_poll(struct napi_struct *napi, int budget)
}
#endif
- pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
- PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&tp->pdev->dev,
+ tp->rx_buffers[entry].mapping,
+ PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
tp->rx_buffers[entry].skb = NULL;
tp->rx_buffers[entry].mapping = 0;
@@ -436,9 +440,10 @@ static int tulip_rx(struct net_device *dev)
if (pkt_len < tulip_rx_copybreak &&
(skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */
- pci_dma_sync_single_for_cpu(tp->pdev,
- tp->rx_buffers[entry].mapping,
- pkt_len, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&tp->pdev->dev,
+ tp->rx_buffers[entry].mapping,
+ pkt_len,
+ DMA_FROM_DEVICE);
#if ! defined(__alpha__)
skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data,
pkt_len);
@@ -448,9 +453,10 @@ static int tulip_rx(struct net_device *dev)
tp->rx_buffers[entry].skb->data,
pkt_len);
#endif
- pci_dma_sync_single_for_device(tp->pdev,
- tp->rx_buffers[entry].mapping,
- pkt_len, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&tp->pdev->dev,
+ tp->rx_buffers[entry].mapping,
+ pkt_len,
+ DMA_FROM_DEVICE);
} else { /* Pass up the skb already on the Rx ring. */
char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
pkt_len);
@@ -466,8 +472,9 @@ static int tulip_rx(struct net_device *dev)
}
#endif
- pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
- PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&tp->pdev->dev,
+ tp->rx_buffers[entry].mapping,
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
tp->rx_buffers[entry].skb = NULL;
tp->rx_buffers[entry].mapping = 0;
@@ -597,10 +604,10 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
if (tp->tx_buffers[entry].skb == NULL) {
/* test because dummy frames not mapped */
if (tp->tx_buffers[entry].mapping)
- pci_unmap_single(tp->pdev,
- tp->tx_buffers[entry].mapping,
- sizeof(tp->setup_frame),
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&tp->pdev->dev,
+ tp->tx_buffers[entry].mapping,
+ sizeof(tp->setup_frame),
+ DMA_TO_DEVICE);
continue;
}
@@ -629,9 +636,10 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
dev->stats.tx_packets++;
}
- pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
+ dma_unmap_single(&tp->pdev->dev,
+ tp->tx_buffers[entry].mapping,
tp->tx_buffers[entry].skb->len,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
/* Free the original skb. */
dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
diff --git a/drivers/net/ethernet/dec/tulip/media.c b/drivers/net/ethernet/dec/tulip/media.c
index dcf21a36a9cf..011604787b8e 100644
--- a/drivers/net/ethernet/dec/tulip/media.c
+++ b/drivers/net/ethernet/dec/tulip/media.c
@@ -319,13 +319,8 @@ void tulip_select_media(struct net_device *dev, int startup)
break;
}
case 5: case 6: {
- u16 setup[5];
-
new_csr6 = 0; /* FIXME */
- for (i = 0; i < 5; i++)
- setup[i] = get_u16(&p[i*2 + 1]);
-
if (startup && mtable->has_reset) {
struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset];
unsigned char *rst = rleaf->leafdata;
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 3a8659c5da06..e7b0d7de40fd 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -350,9 +350,9 @@ static void tulip_up(struct net_device *dev)
*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
- mapping = pci_map_single(tp->pdev, tp->setup_frame,
+ mapping = dma_map_single(&tp->pdev->dev, tp->setup_frame,
sizeof(tp->setup_frame),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
tp->tx_buffers[tp->cur_tx].skb = NULL;
tp->tx_buffers[tp->cur_tx].mapping = mapping;
@@ -630,8 +630,8 @@ static void tulip_init_ring(struct net_device *dev)
tp->rx_buffers[i].skb = skb;
if (skb == NULL)
break;
- mapping = pci_map_single(tp->pdev, skb->data,
- PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ mapping = dma_map_single(&tp->pdev->dev, skb->data,
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
tp->rx_buffers[i].mapping = mapping;
tp->rx_ring[i].status = cpu_to_le32(DescOwned); /* Owned by Tulip chip */
tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
@@ -664,8 +664,8 @@ tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
entry = tp->cur_tx % TX_RING_SIZE;
tp->tx_buffers[entry].skb = skb;
- mapping = pci_map_single(tp->pdev, skb->data,
- skb->len, PCI_DMA_TODEVICE);
+ mapping = dma_map_single(&tp->pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
tp->tx_buffers[entry].mapping = mapping;
tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
@@ -716,16 +716,17 @@ static void tulip_clean_tx_ring(struct tulip_private *tp)
if (tp->tx_buffers[entry].skb == NULL) {
/* test because dummy frames not mapped */
if (tp->tx_buffers[entry].mapping)
- pci_unmap_single(tp->pdev,
- tp->tx_buffers[entry].mapping,
- sizeof(tp->setup_frame),
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&tp->pdev->dev,
+ tp->tx_buffers[entry].mapping,
+ sizeof(tp->setup_frame),
+ DMA_TO_DEVICE);
continue;
}
- pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
- tp->tx_buffers[entry].skb->len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&tp->pdev->dev,
+ tp->tx_buffers[entry].mapping,
+ tp->tx_buffers[entry].skb->len,
+ DMA_TO_DEVICE);
/* Free the original skb. */
dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
@@ -795,8 +796,8 @@ static void tulip_free_ring (struct net_device *dev)
/* An invalid address. */
tp->rx_ring[i].buffer1 = cpu_to_le32(0xBADF00D0);
if (skb) {
- pci_unmap_single(tp->pdev, mapping, PKT_BUF_SZ,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&tp->pdev->dev, mapping, PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
dev_kfree_skb (skb);
}
}
@@ -805,8 +806,9 @@ static void tulip_free_ring (struct net_device *dev)
struct sk_buff *skb = tp->tx_buffers[i].skb;
if (skb != NULL) {
- pci_unmap_single(tp->pdev, tp->tx_buffers[i].mapping,
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&tp->pdev->dev,
+ tp->tx_buffers[i].mapping, skb->len,
+ DMA_TO_DEVICE);
dev_kfree_skb (skb);
}
tp->tx_buffers[i].skb = NULL;
@@ -1149,9 +1151,10 @@ static void set_rx_mode(struct net_device *dev)
tp->tx_buffers[entry].skb = NULL;
tp->tx_buffers[entry].mapping =
- pci_map_single(tp->pdev, tp->setup_frame,
+ dma_map_single(&tp->pdev->dev,
+ tp->setup_frame,
sizeof(tp->setup_frame),
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
/* Put the setup frame on the Tx list. */
if (entry == TX_RING_SIZE-1)
tx_flags |= DESC_RING_WRAP; /* Wrap ring. */
@@ -1422,10 +1425,10 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp = netdev_priv(dev);
tp->dev = dev;
- tp->rx_ring = pci_alloc_consistent(pdev,
- sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
- sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
- &tp->rx_ring_dma);
+ tp->rx_ring = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
+ sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
+ &tp->rx_ring_dma, GFP_KERNEL);
if (!tp->rx_ring)
goto err_out_mtable;
tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
@@ -1757,10 +1760,10 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_out_free_ring:
- pci_free_consistent (pdev,
- sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
- sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
- tp->rx_ring, tp->rx_ring_dma);
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
+ sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
+ tp->rx_ring, tp->rx_ring_dma);
err_out_mtable:
kfree (tp->mtable);
@@ -1878,10 +1881,10 @@ static void tulip_remove_one(struct pci_dev *pdev)
tp = netdev_priv(dev);
unregister_netdev(dev);
- pci_free_consistent (pdev,
- sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
- sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
- tp->rx_ring, tp->rx_ring_dma);
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
+ sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
+ tp->rx_ring, tp->rx_ring_dma);
kfree (tp->mtable);
pci_iounmap(pdev, tp->base_addr);
free_netdev (dev);
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index f942399f0f32..13e73ed15ef0 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -282,7 +282,7 @@ static int uli526x_init_one(struct pci_dev *pdev,
return -ENOMEM;
SET_NETDEV_DEV(dev, &pdev->dev);
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
pr_warn("32-bit PCI DMA not available\n");
err = -ENODEV;
goto err_out_free;
@@ -317,11 +317,15 @@ static int uli526x_init_one(struct pci_dev *pdev,
/* Allocate Tx/Rx descriptor memory */
err = -ENOMEM;
- db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
+ db->desc_pool_ptr = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
+ &db->desc_pool_dma_ptr, GFP_KERNEL);
if (!db->desc_pool_ptr)
goto err_out_release;
- db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
+ db->buf_pool_ptr = dma_alloc_coherent(&pdev->dev,
+ TX_BUF_ALLOC * TX_DESC_CNT + 4,
+ &db->buf_pool_dma_ptr, GFP_KERNEL);
if (!db->buf_pool_ptr)
goto err_out_free_tx_desc;
@@ -401,11 +405,12 @@ static int uli526x_init_one(struct pci_dev *pdev,
err_out_unmap:
pci_iounmap(pdev, db->ioaddr);
err_out_free_tx_buf:
- pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
- db->buf_pool_ptr, db->buf_pool_dma_ptr);
+ dma_free_coherent(&pdev->dev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
+ db->buf_pool_ptr, db->buf_pool_dma_ptr);
err_out_free_tx_desc:
- pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
- db->desc_pool_ptr, db->desc_pool_dma_ptr);
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
+ db->desc_pool_ptr, db->desc_pool_dma_ptr);
err_out_release:
pci_release_regions(pdev);
err_out_disable:
@@ -424,11 +429,11 @@ static void uli526x_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
pci_iounmap(pdev, db->ioaddr);
- pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
- DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
- db->desc_pool_dma_ptr);
- pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
- db->buf_pool_ptr, db->buf_pool_dma_ptr);
+ dma_free_coherent(&db->pdev->dev,
+ sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
+ db->desc_pool_ptr, db->desc_pool_dma_ptr);
+ dma_free_coherent(&db->pdev->dev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
+ db->buf_pool_ptr, db->buf_pool_dma_ptr);
pci_release_regions(pdev);
pci_disable_device(pdev);
free_netdev(dev);
@@ -810,7 +815,8 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info
db->rx_avail_cnt--;
db->interval_rx_cnt++;
- pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2), RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&db->pdev->dev, le32_to_cpu(rxptr->rdes2),
+ RX_ALLOC_SIZE, DMA_FROM_DEVICE);
if ( (rdes0 & 0x300) != 0x300) {
/* A packet without First/Last flag */
/* reuse this SKB */
@@ -1234,10 +1240,8 @@ static void uli526x_reuse_skb(struct uli526x_board_info *db, struct sk_buff * sk
if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
rxptr->rx_skb_ptr = skb;
- rxptr->rdes2 = cpu_to_le32(pci_map_single(db->pdev,
- skb_tail_pointer(skb),
- RX_ALLOC_SIZE,
- PCI_DMA_FROMDEVICE));
+ rxptr->rdes2 = cpu_to_le32(dma_map_single(&db->pdev->dev, skb_tail_pointer(skb),
+ RX_ALLOC_SIZE, DMA_FROM_DEVICE));
wmb();
rxptr->rdes0 = cpu_to_le32(0x80000000);
db->rx_avail_cnt++;
@@ -1409,10 +1413,8 @@ static void allocate_rx_buffer(struct net_device *dev)
if (skb == NULL)
break;
rxptr->rx_skb_ptr = skb; /* FIXME (?) */
- rxptr->rdes2 = cpu_to_le32(pci_map_single(db->pdev,
- skb_tail_pointer(skb),
- RX_ALLOC_SIZE,
- PCI_DMA_FROMDEVICE));
+ rxptr->rdes2 = cpu_to_le32(dma_map_single(&db->pdev->dev, skb_tail_pointer(skb),
+ RX_ALLOC_SIZE, DMA_FROM_DEVICE));
wmb();
rxptr->rdes0 = cpu_to_le32(0x80000000);
rxptr = rxptr->next_rx_desc;
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 5a43be327f58..89cbdc1f4857 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -364,7 +364,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
irq = pdev->irq;
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
pr_warn("Device %s disabled due to DMA limitations\n",
pci_name(pdev));
return -EIO;
@@ -630,9 +630,10 @@ static int netdev_open(struct net_device *dev)
goto out_err;
if (debug > 1)
- netdev_dbg(dev, "w89c840_open() irq %d\n", irq);
+ netdev_dbg(dev, "%s() irq %d\n", __func__, irq);
- if((i=alloc_ringdesc(dev)))
+ i = alloc_ringdesc(dev);
+ if (i)
goto out_err;
spin_lock_irq(&np->lock);
@@ -642,7 +643,7 @@ static int netdev_open(struct net_device *dev)
netif_start_queue(dev);
if (debug > 2)
- netdev_dbg(dev, "Done netdev_open()\n");
+ netdev_dbg(dev, "Done %s()\n", __func__);
/* Set the timer to check for link beat. */
timer_setup(&np->timer, netdev_timer, 0);
@@ -802,8 +803,9 @@ static void init_rxtx_rings(struct net_device *dev)
np->rx_skbuff[i] = skb;
if (skb == NULL)
break;
- np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data,
- np->rx_buf_sz,PCI_DMA_FROMDEVICE);
+ np->rx_addr[i] = dma_map_single(&np->pci_dev->dev, skb->data,
+ np->rx_buf_sz,
+ DMA_FROM_DEVICE);
np->rx_ring[i].buffer1 = np->rx_addr[i];
np->rx_ring[i].status = DescOwned;
@@ -833,20 +835,17 @@ static void free_rxtx_rings(struct netdev_private* np)
for (i = 0; i < RX_RING_SIZE; i++) {
np->rx_ring[i].status = 0;
if (np->rx_skbuff[i]) {
- pci_unmap_single(np->pci_dev,
- np->rx_addr[i],
- np->rx_skbuff[i]->len,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&np->pci_dev->dev, np->rx_addr[i],
+ np->rx_skbuff[i]->len,
+ DMA_FROM_DEVICE);
dev_kfree_skb(np->rx_skbuff[i]);
}
np->rx_skbuff[i] = NULL;
}
for (i = 0; i < TX_RING_SIZE; i++) {
if (np->tx_skbuff[i]) {
- pci_unmap_single(np->pci_dev,
- np->tx_addr[i],
- np->tx_skbuff[i]->len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&np->pci_dev->dev, np->tx_addr[i],
+ np->tx_skbuff[i]->len, DMA_TO_DEVICE);
dev_kfree_skb(np->tx_skbuff[i]);
}
np->tx_skbuff[i] = NULL;
@@ -964,10 +963,10 @@ static int alloc_ringdesc(struct net_device *dev)
np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
- np->rx_ring = pci_alloc_consistent(np->pci_dev,
- sizeof(struct w840_rx_desc)*RX_RING_SIZE +
- sizeof(struct w840_tx_desc)*TX_RING_SIZE,
- &np->ring_dma_addr);
+ np->rx_ring = dma_alloc_coherent(&np->pci_dev->dev,
+ sizeof(struct w840_rx_desc) * RX_RING_SIZE +
+ sizeof(struct w840_tx_desc) * TX_RING_SIZE,
+ &np->ring_dma_addr, GFP_KERNEL);
if(!np->rx_ring)
return -ENOMEM;
init_rxtx_rings(dev);
@@ -976,10 +975,10 @@ static int alloc_ringdesc(struct net_device *dev)
static void free_ringdesc(struct netdev_private *np)
{
- pci_free_consistent(np->pci_dev,
- sizeof(struct w840_rx_desc)*RX_RING_SIZE +
- sizeof(struct w840_tx_desc)*TX_RING_SIZE,
- np->rx_ring, np->ring_dma_addr);
+ dma_free_coherent(&np->pci_dev->dev,
+ sizeof(struct w840_rx_desc) * RX_RING_SIZE +
+ sizeof(struct w840_tx_desc) * TX_RING_SIZE,
+ np->rx_ring, np->ring_dma_addr);
}
@@ -994,8 +993,8 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
/* Calculate the next Tx descriptor entry. */
entry = np->cur_tx % TX_RING_SIZE;
- np->tx_addr[entry] = pci_map_single(np->pci_dev,
- skb->data,skb->len, PCI_DMA_TODEVICE);
+ np->tx_addr[entry] = dma_map_single(&np->pci_dev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
np->tx_skbuff[entry] = skb;
np->tx_ring[entry].buffer1 = np->tx_addr[entry];
@@ -1078,9 +1077,8 @@ static void netdev_tx_done(struct net_device *dev)
np->stats.tx_packets++;
}
/* Free the original skb. */
- pci_unmap_single(np->pci_dev,np->tx_addr[entry],
- np->tx_skbuff[entry]->len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&np->pci_dev->dev, np->tx_addr[entry],
+ np->tx_skbuff[entry]->len, DMA_TO_DEVICE);
np->tx_q_bytes -= np->tx_skbuff[entry]->len;
dev_kfree_skb_irq(np->tx_skbuff[entry]);
np->tx_skbuff[entry] = NULL;
@@ -1217,18 +1215,21 @@ static int netdev_rx(struct net_device *dev)
if (pkt_len < rx_copybreak &&
(skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */
- pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
- np->rx_skbuff[entry]->len,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&np->pci_dev->dev,
+ np->rx_addr[entry],
+ np->rx_skbuff[entry]->len,
+ DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len);
skb_put(skb, pkt_len);
- pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry],
- np->rx_skbuff[entry]->len,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&np->pci_dev->dev,
+ np->rx_addr[entry],
+ np->rx_skbuff[entry]->len,
+ DMA_FROM_DEVICE);
} else {
- pci_unmap_single(np->pci_dev,np->rx_addr[entry],
- np->rx_skbuff[entry]->len,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&np->pci_dev->dev,
+ np->rx_addr[entry],
+ np->rx_skbuff[entry]->len,
+ DMA_FROM_DEVICE);
skb_put(skb = np->rx_skbuff[entry], pkt_len);
np->rx_skbuff[entry] = NULL;
}
@@ -1258,9 +1259,10 @@ static int netdev_rx(struct net_device *dev)
np->rx_skbuff[entry] = skb;
if (skb == NULL)
break; /* Better luck next round. */
- np->rx_addr[entry] = pci_map_single(np->pci_dev,
- skb->data,
- np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ np->rx_addr[entry] = dma_map_single(&np->pci_dev->dev,
+ skb->data,
+ np->rx_buf_sz,
+ DMA_FROM_DEVICE);
np->rx_ring[entry].buffer1 = np->rx_addr[entry];
}
wmb();
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index be6d8a9ada27..734acb834c98 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -7,7 +7,6 @@
*/
-#define DRV_NAME "DL2000/TC902x-based linux driver"
#include "dl2k.h"
#include <linux/dma-mapping.h>
@@ -223,13 +222,15 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata (pdev, dev);
- ring_space = pci_alloc_consistent (pdev, TX_TOTAL_SIZE, &ring_dma);
+ ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
+ GFP_KERNEL);
if (!ring_space)
goto err_out_iounmap;
np->tx_ring = ring_space;
np->tx_ring_dma = ring_dma;
- ring_space = pci_alloc_consistent (pdev, RX_TOTAL_SIZE, &ring_dma);
+ ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
+ GFP_KERNEL);
if (!ring_space)
goto err_out_unmap_tx;
np->rx_ring = ring_space;
@@ -280,9 +281,11 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_out_unmap_rx:
- pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
+ dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
+ np->rx_ring_dma);
err_out_unmap_tx:
- pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
+ dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
+ np->tx_ring_dma);
err_out_iounmap:
#ifdef MEM_MAPPING
pci_iounmap(pdev, np->ioaddr);
@@ -436,8 +439,9 @@ static void free_list(struct net_device *dev)
for (i = 0; i < RX_RING_SIZE; i++) {
skb = np->rx_skbuff[i];
if (skb) {
- pci_unmap_single(np->pdev, desc_to_dma(&np->rx_ring[i]),
- skb->len, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&np->pdev->dev,
+ desc_to_dma(&np->rx_ring[i]),
+ skb->len, DMA_FROM_DEVICE);
dev_kfree_skb(skb);
np->rx_skbuff[i] = NULL;
}
@@ -447,8 +451,9 @@ static void free_list(struct net_device *dev)
for (i = 0; i < TX_RING_SIZE; i++) {
skb = np->tx_skbuff[i];
if (skb) {
- pci_unmap_single(np->pdev, desc_to_dma(&np->tx_ring[i]),
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&np->pdev->dev,
+ desc_to_dma(&np->tx_ring[i]),
+ skb->len, DMA_TO_DEVICE);
dev_kfree_skb(skb);
np->tx_skbuff[i] = NULL;
}
@@ -505,9 +510,8 @@ static int alloc_list(struct net_device *dev)
sizeof(struct netdev_desc));
/* Rubicon now supports 40 bits of addressing space. */
np->rx_ring[i].fraginfo =
- cpu_to_le64(pci_map_single(
- np->pdev, skb->data, np->rx_buf_sz,
- PCI_DMA_FROMDEVICE));
+ cpu_to_le64(dma_map_single(&np->pdev->dev, skb->data,
+ np->rx_buf_sz, DMA_FROM_DEVICE));
np->rx_ring[i].fraginfo |= cpu_to_le64((u64)np->rx_buf_sz << 48);
}
@@ -673,9 +677,8 @@ rio_timer (struct timer_list *t)
}
np->rx_skbuff[entry] = skb;
np->rx_ring[entry].fraginfo =
- cpu_to_le64 (pci_map_single
- (np->pdev, skb->data, np->rx_buf_sz,
- PCI_DMA_FROMDEVICE));
+ cpu_to_le64 (dma_map_single(&np->pdev->dev, skb->data,
+ np->rx_buf_sz, DMA_FROM_DEVICE));
}
np->rx_ring[entry].fraginfo |=
cpu_to_le64((u64)np->rx_buf_sz << 48);
@@ -729,9 +732,8 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
((u64)np->vlan << 32) |
((u64)skb->priority << 45);
}
- txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data,
- skb->len,
- PCI_DMA_TODEVICE));
+ txdesc->fraginfo = cpu_to_le64 (dma_map_single(&np->pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE));
txdesc->fraginfo |= cpu_to_le64((u64)skb->len << 48);
/* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode
@@ -828,9 +830,9 @@ rio_free_tx (struct net_device *dev, int irq)
if (!(np->tx_ring[entry].status & cpu_to_le64(TFDDone)))
break;
skb = np->tx_skbuff[entry];
- pci_unmap_single (np->pdev,
- desc_to_dma(&np->tx_ring[entry]),
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&np->pdev->dev,
+ desc_to_dma(&np->tx_ring[entry]), skb->len,
+ DMA_TO_DEVICE);
if (irq)
dev_consume_skb_irq(skb);
else
@@ -950,25 +952,25 @@ receive_packet (struct net_device *dev)
/* Small skbuffs for short packets */
if (pkt_len > copy_thresh) {
- pci_unmap_single (np->pdev,
- desc_to_dma(desc),
- np->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&np->pdev->dev,
+ desc_to_dma(desc),
+ np->rx_buf_sz,
+ DMA_FROM_DEVICE);
skb_put (skb = np->rx_skbuff[entry], pkt_len);
np->rx_skbuff[entry] = NULL;
} else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) {
- pci_dma_sync_single_for_cpu(np->pdev,
- desc_to_dma(desc),
- np->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&np->pdev->dev,
+ desc_to_dma(desc),
+ np->rx_buf_sz,
+ DMA_FROM_DEVICE);
skb_copy_to_linear_data (skb,
np->rx_skbuff[entry]->data,
pkt_len);
skb_put (skb, pkt_len);
- pci_dma_sync_single_for_device(np->pdev,
- desc_to_dma(desc),
- np->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&np->pdev->dev,
+ desc_to_dma(desc),
+ np->rx_buf_sz,
+ DMA_FROM_DEVICE);
}
skb->protocol = eth_type_trans (skb, dev);
#if 0
@@ -1001,9 +1003,8 @@ receive_packet (struct net_device *dev)
}
np->rx_skbuff[entry] = skb;
np->rx_ring[entry].fraginfo =
- cpu_to_le64 (pci_map_single
- (np->pdev, skb->data, np->rx_buf_sz,
- PCI_DMA_FROMDEVICE));
+ cpu_to_le64(dma_map_single(&np->pdev->dev, skb->data,
+ np->rx_buf_sz, DMA_FROM_DEVICE));
}
np->rx_ring[entry].fraginfo |=
cpu_to_le64((u64)np->rx_buf_sz << 48);
@@ -1797,10 +1798,10 @@ rio_remove1 (struct pci_dev *pdev)
struct netdev_private *np = netdev_priv(dev);
unregister_netdev (dev);
- pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring,
- np->rx_ring_dma);
- pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring,
- np->tx_ring_dma);
+ dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring,
+ np->rx_ring_dma);
+ dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring,
+ np->tx_ring_dma);
#ifdef MEM_MAPPING
pci_iounmap(pdev, np->ioaddr);
#endif
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index b3f8597e77aa..e3a8858915b3 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -367,6 +367,7 @@ struct netdev_private {
dma_addr_t tx_ring_dma;
dma_addr_t rx_ring_dma;
struct timer_list timer; /* Media monitoring timer. */
+ struct net_device *ndev; /* backpointer */
/* ethtool extra stats */
struct {
u64 tx_multiple_collisions;
@@ -429,8 +430,8 @@ static void init_ring(struct net_device *dev);
static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
static int reset_tx (struct net_device *dev);
static irqreturn_t intr_handler(int irq, void *dev_instance);
-static void rx_poll(unsigned long data);
-static void tx_poll(unsigned long data);
+static void rx_poll(struct tasklet_struct *t);
+static void tx_poll(struct tasklet_struct *t);
static void refill_rx (struct net_device *dev);
static void netdev_error(struct net_device *dev, int intr_status);
static void netdev_error(struct net_device *dev, int intr_status);
@@ -531,14 +532,15 @@ static int sundance_probe1(struct pci_dev *pdev,
cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
np = netdev_priv(dev);
+ np->ndev = dev;
np->base = ioaddr;
np->pci_dev = pdev;
np->chip_id = chip_idx;
np->msg_enable = (1 << debug) - 1;
spin_lock_init(&np->lock);
spin_lock_init(&np->statlock);
- tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev);
- tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev);
+ tasklet_setup(&np->rx_tasklet, rx_poll);
+ tasklet_setup(&np->tx_tasklet, tx_poll);
ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE,
&ring_dma, GFP_KERNEL);
@@ -1054,10 +1056,9 @@ static void init_ring(struct net_device *dev)
}
}
-static void tx_poll (unsigned long data)
+static void tx_poll(struct tasklet_struct *t)
{
- struct net_device *dev = (struct net_device *)data;
- struct netdev_private *np = netdev_priv(dev);
+ struct netdev_private *np = from_tasklet(np, t, tx_tasklet);
unsigned head = np->cur_task % TX_RING_SIZE;
struct netdev_desc *txdesc =
&np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE];
@@ -1312,10 +1313,10 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
return IRQ_RETVAL(handled);
}
-static void rx_poll(unsigned long data)
+static void rx_poll(struct tasklet_struct *t)
{
- struct net_device *dev = (struct net_device *)data;
- struct netdev_private *np = netdev_priv(dev);
+ struct netdev_private *np = from_tasklet(np, t, rx_tasklet);
+ struct net_device *dev = np->ndev;
int entry = np->cur_rx % RX_RING_SIZE;
int boguscnt = np->budget;
void __iomem *ioaddr = np->base;
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index db98274501a0..48c6eb142dcc 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -507,23 +507,20 @@ static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dnet *bp = netdev_priv(dev);
- u32 tx_status, irq_enable;
- unsigned int len, i, tx_cmd, wrsz;
+ unsigned int i, tx_cmd, wrsz;
unsigned long flags;
unsigned int *bufp;
+ u32 irq_enable;
- tx_status = dnet_readl(bp, TX_STATUS);
+ dnet_readl(bp, TX_STATUS);
pr_debug("start_xmit: len %u head %p data %p\n",
skb->len, skb->head, skb->data);
dnet_print_skb(skb);
- /* frame size (words) */
- len = (skb->len + 3) >> 2;
-
spin_lock_irqsave(&bp->lock, flags);
- tx_status = dnet_readl(bp, TX_STATUS);
+ dnet_readl(bp, TX_STATUS);
bufp = (unsigned int *)(((unsigned long) skb->data) & ~0x3UL);
wrsz = (u32) skb->len + 3;
@@ -545,7 +542,7 @@ static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (dnet_readl(bp, TX_FIFO_WCNT) > DNET_FIFO_TX_DATA_AF_TH) {
netif_stop_queue(dev);
- tx_status = dnet_readl(bp, INTR_SRC);
+ dnet_readl(bp, INTR_SRC);
irq_enable = dnet_readl(bp, INTR_ENB);
irq_enable |= DNET_INTR_ENB_TX_FIFOAE;
dnet_writel(bp, irq_enable, INTR_ENB);
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index a817ca661c1f..0981fe9652e5 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -177,6 +177,7 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
* struct ethoc - driver-private device structure
* @iobase: pointer to I/O memory region
* @membase: pointer to buffer memory region
+ * @big_endian: just big or little (endian)
* @num_bd: number of buffer descriptors
* @num_tx: number of send buffers
* @cur_tx: last send buffer written
@@ -189,7 +190,10 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
* @msg_enable: device state flags
* @lock: device lock
* @mdio: MDIO bus for PHY access
+ * @clk: clock
* @phy_id: address of attached PHY
+ * @old_link: previous link info
+ * @old_duplex: previous duplex info
*/
struct ethoc {
void __iomem *iobase;
@@ -1015,7 +1019,7 @@ static const struct net_device_ops ethoc_netdev_ops = {
/**
* ethoc_probe - initialize OpenCores ethernet MAC
- * pdev: platform device
+ * @pdev: platform device
*/
static int ethoc_probe(struct platform_device *pdev)
{
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index fdff3b4723ba..06cc863f4dd6 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -87,7 +87,7 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
#define DPAA_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
NETIF_MSG_LINK | NETIF_MSG_IFUP | \
- NETIF_MSG_IFDOWN)
+ NETIF_MSG_IFDOWN | NETIF_MSG_HW)
#define DPAA_INGRESS_CS_THRESHOLD 0x10000000
/* Ingress congestion threshold on FMan ports
diff --git a/drivers/net/ethernet/freescale/dpaa2/Kconfig b/drivers/net/ethernet/freescale/dpaa2/Kconfig
index feea797cde02..cfd369cf4c8c 100644
--- a/drivers/net/ethernet/freescale/dpaa2/Kconfig
+++ b/drivers/net/ethernet/freescale/dpaa2/Kconfig
@@ -3,6 +3,7 @@ config FSL_DPAA2_ETH
tristate "Freescale DPAA2 Ethernet"
depends on FSL_MC_BUS && FSL_MC_DPIO
select PHYLINK
+ select PCS_LYNX
help
This is the DPAA2 Ethernet driver supporting Freescale SoCs
with DPAA2 (DataPath Acceleration Architecture v2).
diff --git a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile
index 6e7f33c956bf..146cb3540e61 100644
--- a/drivers/net/ethernet/freescale/dpaa2/Makefile
+++ b/drivers/net/ethernet/freescale/dpaa2/Makefile
@@ -6,7 +6,7 @@
obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += fsl-dpaa2-ptp.o
-fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o
+fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o dpaa2-eth-devlink.o
fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DCB} += dpaa2-eth-dcb.o
fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o
fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-dcb.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-dcb.c
index 83dee575c2fa..84de0644168d 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-dcb.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-dcb.c
@@ -17,12 +17,12 @@ static int dpaa2_eth_dcbnl_ieee_getpfc(struct net_device *net_dev,
return 0;
}
-static inline bool is_prio_enabled(u8 pfc_en, u8 tc)
+static inline bool dpaa2_eth_is_prio_enabled(u8 pfc_en, u8 tc)
{
return !!(pfc_en & (1 << tc));
}
-static int set_pfc_cn(struct dpaa2_eth_priv *priv, u8 pfc_en)
+static int dpaa2_eth_set_pfc_cn(struct dpaa2_eth_priv *priv, u8 pfc_en)
{
struct dpni_congestion_notification_cfg cfg = {0};
int i, err;
@@ -33,7 +33,7 @@ static int set_pfc_cn(struct dpaa2_eth_priv *priv, u8 pfc_en)
cfg.message_ctx = 0ULL;
for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
- if (is_prio_enabled(pfc_en, i)) {
+ if (dpaa2_eth_is_prio_enabled(pfc_en, i)) {
cfg.threshold_entry = DPAA2_ETH_CN_THRESH_ENTRY(priv);
cfg.threshold_exit = DPAA2_ETH_CN_THRESH_EXIT(priv);
} else {
@@ -93,7 +93,7 @@ static int dpaa2_eth_dcbnl_ieee_setpfc(struct net_device *net_dev,
}
/* Configure congestion notifications for the enabled priorities */
- err = set_pfc_cn(priv, pfc->pfc_en);
+ err = dpaa2_eth_set_pfc_cn(priv, pfc->pfc_en);
if (err)
return err;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
index 56d9927fbfda..b87db0846e10 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c
@@ -42,24 +42,7 @@ static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset)
return 0;
}
-static int dpaa2_dbg_cpu_open(struct inode *inode, struct file *file)
-{
- int err;
- struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
-
- err = single_open(file, dpaa2_dbg_cpu_show, priv);
- if (err < 0)
- netdev_err(priv->net_dev, "single_open() failed\n");
-
- return err;
-}
-
-static const struct file_operations dpaa2_dbg_cpu_ops = {
- .open = dpaa2_dbg_cpu_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(dpaa2_dbg_cpu);
static char *fq_type_to_str(struct dpaa2_eth_fq *fq)
{
@@ -106,24 +89,7 @@ static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
return 0;
}
-static int dpaa2_dbg_fqs_open(struct inode *inode, struct file *file)
-{
- int err;
- struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
-
- err = single_open(file, dpaa2_dbg_fqs_show, priv);
- if (err < 0)
- netdev_err(priv->net_dev, "single_open() failed\n");
-
- return err;
-}
-
-static const struct file_operations dpaa2_dbg_fq_ops = {
- .open = dpaa2_dbg_fqs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(dpaa2_dbg_fqs);
static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
{
@@ -151,24 +117,7 @@ static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset)
return 0;
}
-static int dpaa2_dbg_ch_open(struct inode *inode, struct file *file)
-{
- int err;
- struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private;
-
- err = single_open(file, dpaa2_dbg_ch_show, priv);
- if (err < 0)
- netdev_err(priv->net_dev, "single_open() failed\n");
-
- return err;
-}
-
-static const struct file_operations dpaa2_dbg_ch_ops = {
- .open = dpaa2_dbg_ch_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(dpaa2_dbg_ch);
void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
{
@@ -179,13 +128,13 @@ void dpaa2_dbg_add(struct dpaa2_eth_priv *priv)
priv->dbg.dir = dir;
/* per-cpu stats file */
- debugfs_create_file("cpu_stats", 0444, dir, priv, &dpaa2_dbg_cpu_ops);
+ debugfs_create_file("cpu_stats", 0444, dir, priv, &dpaa2_dbg_cpu_fops);
/* per-fq stats file */
- debugfs_create_file("fq_stats", 0444, dir, priv, &dpaa2_dbg_fq_ops);
+ debugfs_create_file("fq_stats", 0444, dir, priv, &dpaa2_dbg_fqs_fops);
/* per-fq stats file */
- debugfs_create_file("ch_stats", 0444, dir, priv, &dpaa2_dbg_ch_ops);
+ debugfs_create_file("ch_stats", 0444, dir, priv, &dpaa2_dbg_ch_fops);
}
void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c
new file mode 100644
index 000000000000..833696245565
--- /dev/null
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+#include "dpaa2-eth.h"
+/* Copyright 2020 NXP
+ */
+
+#define DPAA2_ETH_TRAP_DROP(_id, _group_id) \
+ DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, 0)
+
+static const struct devlink_trap_group dpaa2_eth_trap_groups_arr[] = {
+ DEVLINK_TRAP_GROUP_GENERIC(PARSER_ERROR_DROPS, 0),
+};
+
+static const struct devlink_trap dpaa2_eth_traps_arr[] = {
+ DPAA2_ETH_TRAP_DROP(VXLAN_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(LLC_SNAP_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(VLAN_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(PPPOE_PPP_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(MPLS_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(ARP_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(IP_1_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(IP_N_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(GRE_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(UDP_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(TCP_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(IPSEC_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(SCTP_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(DCCP_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(GTP_PARSING, PARSER_ERROR_DROPS),
+ DPAA2_ETH_TRAP_DROP(ESP_PARSING, PARSER_ERROR_DROPS),
+};
+
+static int dpaa2_eth_dl_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct dpaa2_eth_devlink_priv *dl_priv = devlink_priv(devlink);
+ struct dpaa2_eth_priv *priv = dl_priv->dpaa2_priv;
+ char buf[10];
+ int err;
+
+ err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
+ if (err)
+ return err;
+
+ scnprintf(buf, 10, "%d.%d", priv->dpni_ver_major, priv->dpni_ver_minor);
+ err = devlink_info_version_running_put(req, "dpni", buf);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static struct dpaa2_eth_trap_item *
+dpaa2_eth_dl_trap_item_lookup(struct dpaa2_eth_priv *priv, u16 trap_id)
+{
+ struct dpaa2_eth_trap_data *dpaa2_eth_trap_data = priv->trap_data;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dpaa2_eth_traps_arr); i++) {
+ if (dpaa2_eth_traps_arr[i].id == trap_id)
+ return &dpaa2_eth_trap_data->trap_items_arr[i];
+ }
+
+ return NULL;
+}
+
+struct dpaa2_eth_trap_item *dpaa2_eth_dl_get_trap(struct dpaa2_eth_priv *priv,
+ struct dpaa2_fapr *fapr)
+{
+ struct dpaa2_faf_error_bit {
+ int position;
+ enum devlink_trap_generic_id trap_id;
+ } faf_bits[] = {
+ { .position = 5, .trap_id = DEVLINK_TRAP_GENERIC_ID_VXLAN_PARSING },
+ { .position = 20, .trap_id = DEVLINK_TRAP_GENERIC_ID_LLC_SNAP_PARSING },
+ { .position = 24, .trap_id = DEVLINK_TRAP_GENERIC_ID_VLAN_PARSING },
+ { .position = 26, .trap_id = DEVLINK_TRAP_GENERIC_ID_PPPOE_PPP_PARSING },
+ { .position = 29, .trap_id = DEVLINK_TRAP_GENERIC_ID_MPLS_PARSING },
+ { .position = 31, .trap_id = DEVLINK_TRAP_GENERIC_ID_ARP_PARSING },
+ { .position = 52, .trap_id = DEVLINK_TRAP_GENERIC_ID_IP_1_PARSING },
+ { .position = 61, .trap_id = DEVLINK_TRAP_GENERIC_ID_IP_N_PARSING },
+ { .position = 67, .trap_id = DEVLINK_TRAP_GENERIC_ID_GRE_PARSING },
+ { .position = 71, .trap_id = DEVLINK_TRAP_GENERIC_ID_UDP_PARSING },
+ { .position = 76, .trap_id = DEVLINK_TRAP_GENERIC_ID_TCP_PARSING },
+ { .position = 80, .trap_id = DEVLINK_TRAP_GENERIC_ID_IPSEC_PARSING },
+ { .position = 82, .trap_id = DEVLINK_TRAP_GENERIC_ID_SCTP_PARSING },
+ { .position = 84, .trap_id = DEVLINK_TRAP_GENERIC_ID_DCCP_PARSING },
+ { .position = 88, .trap_id = DEVLINK_TRAP_GENERIC_ID_GTP_PARSING },
+ { .position = 90, .trap_id = DEVLINK_TRAP_GENERIC_ID_ESP_PARSING },
+ };
+ u64 faf_word;
+ u64 mask;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(faf_bits); i++) {
+ if (faf_bits[i].position < 32) {
+ /* Low part of FAF.
+ * position ranges from 31 to 0, mask from 0 to 31.
+ */
+ mask = 1ull << (31 - faf_bits[i].position);
+ faf_word = __le32_to_cpu(fapr->faf_lo);
+ } else {
+ /* High part of FAF.
+ * position ranges from 95 to 32, mask from 0 to 63.
+ */
+ mask = 1ull << (63 - (faf_bits[i].position - 32));
+ faf_word = __le64_to_cpu(fapr->faf_hi);
+ }
+ if (faf_word & mask)
+ return dpaa2_eth_dl_trap_item_lookup(priv, faf_bits[i].trap_id);
+ }
+ return NULL;
+}
+
+static int dpaa2_eth_dl_trap_init(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ void *trap_ctx)
+{
+ struct dpaa2_eth_devlink_priv *dl_priv = devlink_priv(devlink);
+ struct dpaa2_eth_priv *priv = dl_priv->dpaa2_priv;
+ struct dpaa2_eth_trap_item *dpaa2_eth_trap_item;
+
+ dpaa2_eth_trap_item = dpaa2_eth_dl_trap_item_lookup(priv, trap->id);
+ if (WARN_ON(!dpaa2_eth_trap_item))
+ return -ENOENT;
+
+ dpaa2_eth_trap_item->trap_ctx = trap_ctx;
+
+ return 0;
+}
+
+static int dpaa2_eth_dl_trap_action_set(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack)
+{
+ /* No support for changing the action of an independent packet trap,
+ * only per trap group - parser error drops
+ */
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot change trap action independently of group");
+ return -EOPNOTSUPP;
+}
+
+static int dpaa2_eth_dl_trap_group_action_set(struct devlink *devlink,
+ const struct devlink_trap_group *group,
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack)
+{
+ struct dpaa2_eth_devlink_priv *dl_priv = devlink_priv(devlink);
+ struct dpaa2_eth_priv *priv = dl_priv->dpaa2_priv;
+ struct net_device *net_dev = priv->net_dev;
+ struct device *dev = net_dev->dev.parent;
+ struct dpni_error_cfg err_cfg = {0};
+ int err;
+
+ if (group->id != DEVLINK_TRAP_GROUP_GENERIC_ID_PARSER_ERROR_DROPS)
+ return -EOPNOTSUPP;
+
+ /* Configure handling of frames marked as errors from the parser */
+ err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
+ err_cfg.set_frame_annotation = 1;
+
+ switch (action) {
+ case DEVLINK_TRAP_ACTION_DROP:
+ err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
+ break;
+ case DEVLINK_TRAP_ACTION_TRAP:
+ err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token, &err_cfg);
+ if (err) {
+ dev_err(dev, "dpni_set_errors_behavior failed\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct devlink_ops dpaa2_eth_devlink_ops = {
+ .info_get = dpaa2_eth_dl_info_get,
+ .trap_init = dpaa2_eth_dl_trap_init,
+ .trap_action_set = dpaa2_eth_dl_trap_action_set,
+ .trap_group_action_set = dpaa2_eth_dl_trap_group_action_set,
+};
+
+int dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv)
+{
+ struct net_device *net_dev = priv->net_dev;
+ struct device *dev = net_dev->dev.parent;
+ struct dpaa2_eth_devlink_priv *dl_priv;
+ int err;
+
+ priv->devlink = devlink_alloc(&dpaa2_eth_devlink_ops, sizeof(*dl_priv));
+ if (!priv->devlink) {
+ dev_err(dev, "devlink_alloc failed\n");
+ return -ENOMEM;
+ }
+ dl_priv = devlink_priv(priv->devlink);
+ dl_priv->dpaa2_priv = priv;
+
+ err = devlink_register(priv->devlink, dev);
+ if (err) {
+ dev_err(dev, "devlink_register() = %d\n", err);
+ goto devlink_free;
+ }
+
+ return 0;
+
+devlink_free:
+ devlink_free(priv->devlink);
+
+ return err;
+}
+
+void dpaa2_eth_dl_unregister(struct dpaa2_eth_priv *priv)
+{
+ devlink_unregister(priv->devlink);
+ devlink_free(priv->devlink);
+}
+
+int dpaa2_eth_dl_port_add(struct dpaa2_eth_priv *priv)
+{
+ struct devlink_port *devlink_port = &priv->devlink_port;
+ struct devlink_port_attrs attrs = {};
+ int err;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ devlink_port_attrs_set(devlink_port, &attrs);
+
+ err = devlink_port_register(priv->devlink, devlink_port, 0);
+ if (err)
+ return err;
+
+ devlink_port_type_eth_set(devlink_port, priv->net_dev);
+
+ return 0;
+}
+
+void dpaa2_eth_dl_port_del(struct dpaa2_eth_priv *priv)
+{
+ struct devlink_port *devlink_port = &priv->devlink_port;
+
+ devlink_port_type_clear(devlink_port);
+ devlink_port_unregister(devlink_port);
+}
+
+int dpaa2_eth_dl_traps_register(struct dpaa2_eth_priv *priv)
+{
+ struct dpaa2_eth_trap_data *dpaa2_eth_trap_data;
+ struct net_device *net_dev = priv->net_dev;
+ struct device *dev = net_dev->dev.parent;
+ int err;
+
+ dpaa2_eth_trap_data = kzalloc(sizeof(*dpaa2_eth_trap_data), GFP_KERNEL);
+ if (!dpaa2_eth_trap_data)
+ return -ENOMEM;
+ priv->trap_data = dpaa2_eth_trap_data;
+
+ dpaa2_eth_trap_data->trap_items_arr = kcalloc(ARRAY_SIZE(dpaa2_eth_traps_arr),
+ sizeof(struct dpaa2_eth_trap_item),
+ GFP_KERNEL);
+ if (!dpaa2_eth_trap_data->trap_items_arr) {
+ err = -ENOMEM;
+ goto trap_data_free;
+ }
+
+ err = devlink_trap_groups_register(priv->devlink, dpaa2_eth_trap_groups_arr,
+ ARRAY_SIZE(dpaa2_eth_trap_groups_arr));
+ if (err) {
+ dev_err(dev, "devlink_trap_groups_register() = %d\n", err);
+ goto trap_items_arr_free;
+ }
+
+ err = devlink_traps_register(priv->devlink, dpaa2_eth_traps_arr,
+ ARRAY_SIZE(dpaa2_eth_traps_arr), priv);
+ if (err) {
+ dev_err(dev, "devlink_traps_register() = %d\n", err);
+ goto trap_groups_unregiser;
+ }
+
+ return 0;
+
+trap_groups_unregiser:
+ devlink_trap_groups_unregister(priv->devlink, dpaa2_eth_trap_groups_arr,
+ ARRAY_SIZE(dpaa2_eth_trap_groups_arr));
+trap_items_arr_free:
+ kfree(dpaa2_eth_trap_data->trap_items_arr);
+trap_data_free:
+ kfree(dpaa2_eth_trap_data);
+ priv->trap_data = NULL;
+
+ return err;
+}
+
+void dpaa2_eth_dl_traps_unregister(struct dpaa2_eth_priv *priv)
+{
+ devlink_traps_unregister(priv->devlink, dpaa2_eth_traps_arr,
+ ARRAY_SIZE(dpaa2_eth_traps_arr));
+ devlink_trap_groups_unregister(priv->devlink, dpaa2_eth_trap_groups_arr,
+ ARRAY_SIZE(dpaa2_eth_trap_groups_arr));
+ kfree(priv->trap_data->trap_items_arr);
+ kfree(priv->trap_data);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index cf5383bb8331..cf9400a9886d 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -11,10 +11,11 @@
#include <linux/msi.h>
#include <linux/kthread.h>
#include <linux/iommu.h>
-#include <linux/net_tstamp.h>
#include <linux/fsl/mc.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
+#include <linux/fsl/ptp_qoriq.h>
+#include <linux/ptp_classify.h>
#include <net/pkt_cls.h>
#include <net/sock.h>
@@ -30,6 +31,9 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Freescale Semiconductor, Inc");
MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
+struct ptp_qoriq *dpaa2_ptp;
+EXPORT_SYMBOL(dpaa2_ptp);
+
static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
dma_addr_t iova_addr)
{
@@ -40,9 +44,9 @@ static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
return phys_to_virt(phys_addr);
}
-static void validate_rx_csum(struct dpaa2_eth_priv *priv,
- u32 fd_status,
- struct sk_buff *skb)
+static void dpaa2_eth_validate_rx_csum(struct dpaa2_eth_priv *priv,
+ u32 fd_status,
+ struct sk_buff *skb)
{
skb_checksum_none_assert(skb);
@@ -62,9 +66,9 @@ static void validate_rx_csum(struct dpaa2_eth_priv *priv,
/* Free a received FD.
* Not to be used for Tx conf FDs or on any other paths.
*/
-static void free_rx_fd(struct dpaa2_eth_priv *priv,
- const struct dpaa2_fd *fd,
- void *vaddr)
+static void dpaa2_eth_free_rx_fd(struct dpaa2_eth_priv *priv,
+ const struct dpaa2_fd *fd,
+ void *vaddr)
{
struct device *dev = priv->net_dev->dev.parent;
dma_addr_t addr = dpaa2_fd_get_addr(fd);
@@ -100,9 +104,9 @@ free_buf:
}
/* Build a linear skb based on a single-buffer frame descriptor */
-static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch,
- const struct dpaa2_fd *fd,
- void *fd_vaddr)
+static struct sk_buff *dpaa2_eth_build_linear_skb(struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ void *fd_vaddr)
{
struct sk_buff *skb = NULL;
u16 fd_offset = dpaa2_fd_get_offset(fd);
@@ -121,9 +125,9 @@ static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch,
}
/* Build a non linear (fragmented) skb based on a S/G table */
-static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
- struct dpaa2_sg_entry *sgt)
+static struct sk_buff *dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_sg_entry *sgt)
{
struct sk_buff *skb = NULL;
struct device *dev = priv->net_dev->dev.parent;
@@ -204,7 +208,8 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
/* Free buffers acquired from the buffer pool or which were meant to
* be released in the pool
*/
-static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
+static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array,
+ int count)
{
struct device *dev = priv->net_dev->dev.parent;
void *vaddr;
@@ -218,9 +223,9 @@ static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
}
}
-static void xdp_release_buf(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
- dma_addr_t addr)
+static void dpaa2_eth_xdp_release_buf(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ dma_addr_t addr)
{
int retries = 0;
int err;
@@ -238,7 +243,7 @@ static void xdp_release_buf(struct dpaa2_eth_priv *priv,
}
if (err) {
- free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt);
+ dpaa2_eth_free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt);
ch->buf_count -= ch->xdp.drop_cnt;
}
@@ -274,9 +279,9 @@ static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv *priv,
return total_enqueued;
}
-static void xdp_tx_flush(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
- struct dpaa2_eth_fq *fq)
+static void dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_eth_fq *fq)
{
struct rtnl_link_stats64 *percpu_stats;
struct dpaa2_fd *fds;
@@ -295,17 +300,17 @@ static void xdp_tx_flush(struct dpaa2_eth_priv *priv,
ch->stats.xdp_tx++;
}
for (i = enqueued; i < fq->xdp_tx_fds.num; i++) {
- xdp_release_buf(priv, ch, dpaa2_fd_get_addr(&fds[i]));
+ dpaa2_eth_xdp_release_buf(priv, ch, dpaa2_fd_get_addr(&fds[i]));
percpu_stats->tx_errors++;
ch->stats.xdp_tx_err++;
}
fq->xdp_tx_fds.num = 0;
}
-static void xdp_enqueue(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
- struct dpaa2_fd *fd,
- void *buf_start, u16 queue_id)
+static void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_fd *fd,
+ void *buf_start, u16 queue_id)
{
struct dpaa2_faead *faead;
struct dpaa2_fd *dest_fd;
@@ -333,13 +338,13 @@ static void xdp_enqueue(struct dpaa2_eth_priv *priv,
if (fq->xdp_tx_fds.num < DEV_MAP_BULK_SIZE)
return;
- xdp_tx_flush(priv, ch, fq);
+ dpaa2_eth_xdp_tx_flush(priv, ch, fq);
}
-static u32 run_xdp(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
- struct dpaa2_eth_fq *rx_fq,
- struct dpaa2_fd *fd, void *vaddr)
+static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ struct dpaa2_eth_fq *rx_fq,
+ struct dpaa2_fd *fd, void *vaddr)
{
dma_addr_t addr = dpaa2_fd_get_addr(fd);
struct bpf_prog *xdp_prog;
@@ -372,7 +377,7 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
case XDP_PASS:
break;
case XDP_TX:
- xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid);
+ dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid);
break;
default:
bpf_warn_invalid_xdp_action(xdp_act);
@@ -381,7 +386,7 @@ static u32 run_xdp(struct dpaa2_eth_priv *priv,
trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
fallthrough;
case XDP_DROP:
- xdp_release_buf(priv, ch, addr);
+ dpaa2_eth_xdp_release_buf(priv, ch, addr);
ch->stats.xdp_drop++;
break;
case XDP_REDIRECT:
@@ -441,7 +446,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
percpu_extras = this_cpu_ptr(priv->percpu_extras);
if (fd_format == dpaa2_fd_single) {
- xdp_act = run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
+ xdp_act = dpaa2_eth_run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
if (xdp_act != XDP_PASS) {
percpu_stats->rx_packets++;
percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
@@ -450,13 +455,13 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
dma_unmap_page(dev, addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
- skb = build_linear_skb(ch, fd, vaddr);
+ skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
} else if (fd_format == dpaa2_fd_sg) {
WARN_ON(priv->xdp_prog);
dma_unmap_page(dev, addr, priv->rx_buf_size,
DMA_BIDIRECTIONAL);
- skb = build_frag_skb(priv, ch, buf_data);
+ skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data);
free_pages((unsigned long)vaddr, 0);
percpu_extras->rx_sg_frames++;
percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
@@ -485,7 +490,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
/* Check if we need to validate the L4 csum */
if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
status = le32_to_cpu(fas->status);
- validate_rx_csum(priv, status, skb);
+ dpaa2_eth_validate_rx_csum(priv, status, skb);
}
skb->protocol = eth_type_trans(skb, priv->net_dev);
@@ -499,19 +504,71 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
return;
err_build_skb:
- free_rx_fd(priv, fd, vaddr);
+ dpaa2_eth_free_rx_fd(priv, fd, vaddr);
err_frame_format:
percpu_stats->rx_dropped++;
}
+/* Processing of Rx frames received on the error FQ
+ * We check and print the error bits and then free the frame
+ */
+static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ const struct dpaa2_fd *fd,
+ struct dpaa2_eth_fq *fq __always_unused)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ dma_addr_t addr = dpaa2_fd_get_addr(fd);
+ u8 fd_format = dpaa2_fd_get_format(fd);
+ struct rtnl_link_stats64 *percpu_stats;
+ struct dpaa2_eth_trap_item *trap_item;
+ struct dpaa2_fapr *fapr;
+ struct sk_buff *skb;
+ void *buf_data;
+ void *vaddr;
+
+ vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
+ dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
+ DMA_BIDIRECTIONAL);
+
+ buf_data = vaddr + dpaa2_fd_get_offset(fd);
+
+ if (fd_format == dpaa2_fd_single) {
+ dma_unmap_page(dev, addr, priv->rx_buf_size,
+ DMA_BIDIRECTIONAL);
+ skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
+ } else if (fd_format == dpaa2_fd_sg) {
+ dma_unmap_page(dev, addr, priv->rx_buf_size,
+ DMA_BIDIRECTIONAL);
+ skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data);
+ free_pages((unsigned long)vaddr, 0);
+ } else {
+ /* We don't support any other format */
+ dpaa2_eth_free_rx_fd(priv, fd, vaddr);
+ goto err_frame_format;
+ }
+
+ fapr = dpaa2_get_fapr(vaddr, false);
+ trap_item = dpaa2_eth_dl_get_trap(priv, fapr);
+ if (trap_item)
+ devlink_trap_report(priv->devlink, skb, trap_item->trap_ctx,
+ &priv->devlink_port, NULL);
+ consume_skb(skb);
+
+err_frame_format:
+ percpu_stats = this_cpu_ptr(priv->percpu_stats);
+ percpu_stats->rx_errors++;
+ ch->buf_count--;
+}
+
/* Consume all frames pull-dequeued into the store. This is the simplest way to
* make sure we don't accidentally issue another volatile dequeue which would
* overwrite (leak) frames already in the store.
*
* Observance of NAPI budget is not our concern, leaving that to the caller.
*/
-static int consume_frames(struct dpaa2_eth_channel *ch,
- struct dpaa2_eth_fq **src)
+static int dpaa2_eth_consume_frames(struct dpaa2_eth_channel *ch,
+ struct dpaa2_eth_fq **src)
{
struct dpaa2_eth_priv *priv = ch->priv;
struct dpaa2_eth_fq *fq = NULL;
@@ -559,11 +616,57 @@ static int consume_frames(struct dpaa2_eth_channel *ch,
return cleaned;
}
+static int dpaa2_eth_ptp_parse(struct sk_buff *skb,
+ u8 *msgtype, u8 *twostep, u8 *udp,
+ u16 *correction_offset,
+ u16 *origintimestamp_offset)
+{
+ unsigned int ptp_class;
+ struct ptp_header *hdr;
+ unsigned int type;
+ u8 *base;
+
+ ptp_class = ptp_classify_raw(skb);
+ if (ptp_class == PTP_CLASS_NONE)
+ return -EINVAL;
+
+ hdr = ptp_parse_header(skb, ptp_class);
+ if (!hdr)
+ return -EINVAL;
+
+ *msgtype = ptp_get_msgtype(hdr, ptp_class);
+ *twostep = hdr->flag_field[0] & 0x2;
+
+ type = ptp_class & PTP_CLASS_PMASK;
+ if (type == PTP_CLASS_IPV4 ||
+ type == PTP_CLASS_IPV6)
+ *udp = 1;
+ else
+ *udp = 0;
+
+ base = skb_mac_header(skb);
+ *correction_offset = (u8 *)&hdr->correction - base;
+ *origintimestamp_offset = (u8 *)hdr + sizeof(struct ptp_header) - base;
+
+ return 0;
+}
+
/* Configure the egress frame annotation for timestamp update */
-static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
+static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv,
+ struct dpaa2_fd *fd,
+ void *buf_start,
+ struct sk_buff *skb)
{
+ struct ptp_tstamp origin_timestamp;
+ struct dpni_single_step_cfg cfg;
+ u8 msgtype, twostep, udp;
struct dpaa2_faead *faead;
+ struct dpaa2_fas *fas;
+ struct timespec64 ts;
+ u16 offset1, offset2;
u32 ctrl, frc;
+ __le64 *ns;
+ u8 *data;
/* Mark the egress frame annotation area as valid */
frc = dpaa2_fd_get_frc(fd);
@@ -579,12 +682,52 @@ static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
faead = dpaa2_get_faead(buf_start, true);
faead->ctrl = cpu_to_le32(ctrl);
+
+ if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
+ if (dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp,
+ &offset1, &offset2) ||
+ msgtype != 0 || twostep) {
+ WARN_ONCE(1, "Bad packet for one-step timestamping\n");
+ return;
+ }
+
+ /* Mark the frame annotation status as valid */
+ frc = dpaa2_fd_get_frc(fd);
+ dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FASV);
+
+ /* Mark the PTP flag for one step timestamping */
+ fas = dpaa2_get_fas(buf_start, true);
+ fas->status = cpu_to_le32(DPAA2_FAS_PTP);
+
+ dpaa2_ptp->caps.gettime64(&dpaa2_ptp->caps, &ts);
+ ns = dpaa2_get_ts(buf_start, true);
+ *ns = cpu_to_le64(timespec64_to_ns(&ts) /
+ DPAA2_PTP_CLK_PERIOD_NS);
+
+ /* Update current time to PTP message originTimestamp field */
+ ns_to_ptp_tstamp(&origin_timestamp, le64_to_cpup(ns));
+ data = skb_mac_header(skb);
+ *(__be16 *)(data + offset2) = htons(origin_timestamp.sec_msb);
+ *(__be32 *)(data + offset2 + 2) =
+ htonl(origin_timestamp.sec_lsb);
+ *(__be32 *)(data + offset2 + 6) = htonl(origin_timestamp.nsec);
+
+ cfg.en = 1;
+ cfg.ch_update = udp;
+ cfg.offset = offset1;
+ cfg.peer_delay = 0;
+
+ if (dpni_set_single_step_cfg(priv->mc_io, 0, priv->mc_token,
+ &cfg))
+ WARN_ONCE(1, "Failed to set single step register");
+ }
}
/* Create a frame descriptor based on a fragmented skb */
-static int build_sg_fd(struct dpaa2_eth_priv *priv,
- struct sk_buff *skb,
- struct dpaa2_fd *fd)
+static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
+ struct sk_buff *skb,
+ struct dpaa2_fd *fd,
+ void **swa_addr)
{
struct device *dev = priv->net_dev->dev.parent;
void *sgt_buf = NULL;
@@ -606,7 +749,7 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv,
if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
return -EINVAL;
- scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
+ scl = kmalloc_array(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
if (unlikely(!scl))
return -ENOMEM;
@@ -653,6 +796,7 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv,
* skb backpointer in the software annotation area. We'll need
* all of them on Tx Conf.
*/
+ *swa_addr = (void *)sgt_buf;
swa = (struct dpaa2_eth_swa *)sgt_buf;
swa->type = DPAA2_ETH_SWA_SG;
swa->sg.skb = skb;
@@ -672,9 +816,6 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv,
dpaa2_fd_set_len(fd, skb->len);
dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
- if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
- enable_tx_tstamp(fd, sgt_buf);
-
return 0;
dma_map_single_failed:
@@ -692,9 +833,10 @@ dma_map_sg_failed:
* enough for the HW requirements, thus instead of realloc-ing the skb we
* create a SG frame descriptor with only one entry.
*/
-static int build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
- struct sk_buff *skb,
- struct dpaa2_fd *fd)
+static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
+ struct sk_buff *skb,
+ struct dpaa2_fd *fd,
+ void **swa_addr)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpaa2_eth_sgt_cache *sgt_cache;
@@ -732,6 +874,7 @@ static int build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
dpaa2_sg_set_final(sgt, true);
/* Store the skb backpointer in the SGT buffer */
+ *swa_addr = (void *)sgt_buf;
swa = (struct dpaa2_eth_swa *)sgt_buf;
swa->type = DPAA2_ETH_SWA_SINGLE;
swa->single.skb = skb;
@@ -750,9 +893,6 @@ static int build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
dpaa2_fd_set_len(fd, skb->len);
dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
- if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
- enable_tx_tstamp(fd, sgt_buf);
-
return 0;
sgt_map_failed:
@@ -767,16 +907,17 @@ data_map_failed:
}
/* Create a frame descriptor based on a linear skb */
-static int build_single_fd(struct dpaa2_eth_priv *priv,
- struct sk_buff *skb,
- struct dpaa2_fd *fd)
+static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
+ struct sk_buff *skb,
+ struct dpaa2_fd *fd,
+ void **swa_addr)
{
struct device *dev = priv->net_dev->dev.parent;
u8 *buffer_start, *aligned_start;
struct dpaa2_eth_swa *swa;
dma_addr_t addr;
- buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb);
+ buffer_start = skb->data - dpaa2_eth_needed_headroom(skb);
/* If there's enough room to align the FD address, do it.
* It will help hardware optimize accesses.
@@ -790,6 +931,7 @@ static int build_single_fd(struct dpaa2_eth_priv *priv,
* (in the private data area) such that we can release it
* on Tx confirm
*/
+ *swa_addr = (void *)buffer_start;
swa = (struct dpaa2_eth_swa *)buffer_start;
swa->type = DPAA2_ETH_SWA_SINGLE;
swa->single.skb = skb;
@@ -806,9 +948,6 @@ static int build_single_fd(struct dpaa2_eth_priv *priv,
dpaa2_fd_set_format(fd, dpaa2_fd_single);
dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
- if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
- enable_tx_tstamp(fd, buffer_start);
-
return 0;
}
@@ -819,9 +958,9 @@ static int build_single_fd(struct dpaa2_eth_priv *priv,
* This can be called either from dpaa2_eth_tx_conf() or on the error path of
* dpaa2_eth_tx().
*/
-static void free_tx_fd(const struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_fq *fq,
- const struct dpaa2_fd *fd, bool in_napi)
+static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq,
+ const struct dpaa2_fd *fd, bool in_napi)
{
struct device *dev = priv->net_dev->dev.parent;
dma_addr_t fd_addr, sg_addr;
@@ -892,7 +1031,7 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv,
}
/* Get the timestamp value */
- if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ if (skb->cb[0] == TX_TSTAMP) {
struct skb_shared_hwtstamps shhwtstamps;
__le64 *ts = dpaa2_get_ts(buffer_start, true);
u64 ns;
@@ -902,6 +1041,8 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv,
ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
shhwtstamps.hwtstamp = ns_to_ktime(ns);
skb_tstamp_tx(skb, &shhwtstamps);
+ } else if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
+ mutex_unlock(&priv->onestep_tstamp_lock);
}
/* Free SGT buffer allocated on tx */
@@ -921,7 +1062,8 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv,
napi_consume_skb(skb, in_napi);
}
-static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
+static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
+ struct net_device *net_dev)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
struct dpaa2_fd fd;
@@ -934,11 +1076,12 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
u32 fd_len;
u8 prio = 0;
int err, i;
+ void *swa;
percpu_stats = this_cpu_ptr(priv->percpu_stats);
percpu_extras = this_cpu_ptr(priv->percpu_extras);
- needed_headroom = dpaa2_eth_needed_headroom(priv, skb);
+ needed_headroom = dpaa2_eth_needed_headroom(skb);
/* We'll be holding a back-reference to the skb until Tx Confirmation;
* we don't want that overwritten by a concurrent Tx with a cloned skb.
@@ -954,17 +1097,17 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
memset(&fd, 0, sizeof(fd));
if (skb_is_nonlinear(skb)) {
- err = build_sg_fd(priv, skb, &fd);
+ err = dpaa2_eth_build_sg_fd(priv, skb, &fd, &swa);
percpu_extras->tx_sg_frames++;
percpu_extras->tx_sg_bytes += skb->len;
} else if (skb_headroom(skb) < needed_headroom) {
- err = build_sg_fd_single_buf(priv, skb, &fd);
+ err = dpaa2_eth_build_sg_fd_single_buf(priv, skb, &fd, &swa);
percpu_extras->tx_sg_frames++;
percpu_extras->tx_sg_bytes += skb->len;
percpu_extras->tx_converted_sg_frames++;
percpu_extras->tx_converted_sg_bytes += skb->len;
} else {
- err = build_single_fd(priv, skb, &fd);
+ err = dpaa2_eth_build_single_fd(priv, skb, &fd, &swa);
}
if (unlikely(err)) {
@@ -972,6 +1115,9 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
goto err_build_fd;
}
+ if (skb->cb[0])
+ dpaa2_eth_enable_tx_tstamp(priv, &fd, swa, skb);
+
/* Tracing point */
trace_dpaa2_tx_fd(net_dev, &fd);
@@ -1010,7 +1156,7 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
if (unlikely(err < 0)) {
percpu_stats->tx_errors++;
/* Clean up everything, including freeing the skb */
- free_tx_fd(priv, fq, &fd, false);
+ dpaa2_eth_free_tx_fd(priv, fq, &fd, false);
netdev_tx_completed_queue(nq, 1, fd_len);
} else {
percpu_stats->tx_packets++;
@@ -1025,6 +1171,63 @@ err_build_fd:
return NETDEV_TX_OK;
}
+static void dpaa2_eth_tx_onestep_tstamp(struct work_struct *work)
+{
+ struct dpaa2_eth_priv *priv = container_of(work, struct dpaa2_eth_priv,
+ tx_onestep_tstamp);
+ struct sk_buff *skb;
+
+ while (true) {
+ skb = skb_dequeue(&priv->tx_skbs);
+ if (!skb)
+ return;
+
+ /* Lock just before TX one-step timestamping packet,
+ * and release the lock in dpaa2_eth_free_tx_fd when
+ * confirm the packet has been sent on hardware, or
+ * when clean up during transmit failure.
+ */
+ mutex_lock(&priv->onestep_tstamp_lock);
+ __dpaa2_eth_tx(skb, priv->net_dev);
+ }
+}
+
+static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
+ u8 msgtype, twostep, udp;
+ u16 offset1, offset2;
+
+ /* Utilize skb->cb[0] for timestamping request per skb */
+ skb->cb[0] = 0;
+
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && dpaa2_ptp) {
+ if (priv->tx_tstamp_type == HWTSTAMP_TX_ON)
+ skb->cb[0] = TX_TSTAMP;
+ else if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC)
+ skb->cb[0] = TX_TSTAMP_ONESTEP_SYNC;
+ }
+
+ /* TX for one-step timestamping PTP Sync packet */
+ if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
+ if (!dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp,
+ &offset1, &offset2))
+ if (msgtype == 0 && twostep == 0) {
+ skb_queue_tail(&priv->tx_skbs, skb);
+ queue_work(priv->dpaa2_ptp_wq,
+ &priv->tx_onestep_tstamp);
+ return NETDEV_TX_OK;
+ }
+ /* Use two-step timestamping if not one-step timestamping
+ * PTP Sync packet
+ */
+ skb->cb[0] = TX_TSTAMP;
+ }
+
+ /* TX for other packets */
+ return __dpaa2_eth_tx(skb, net_dev);
+}
+
/* Tx confirmation frame processing routine */
static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
struct dpaa2_eth_channel *ch __always_unused,
@@ -1045,7 +1248,7 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
/* Check frame errors in the FD field */
fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
- free_tx_fd(priv, fq, fd, true);
+ dpaa2_eth_free_tx_fd(priv, fq, fd, true);
if (likely(!fd_errors))
return;
@@ -1059,7 +1262,7 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
percpu_stats->tx_errors++;
}
-static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
+static int dpaa2_eth_set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
{
int err;
@@ -1082,7 +1285,7 @@ static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
return 0;
}
-static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
+static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
{
int err;
@@ -1106,8 +1309,8 @@ static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
/* Perform a single release command to add buffers
* to the specified buffer pool
*/
-static int add_bufs(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch, u16 bpid)
+static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch, u16 bpid)
{
struct device *dev = priv->net_dev->dev.parent;
u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
@@ -1155,7 +1358,7 @@ release_bufs:
* not much else we can do about it
*/
if (err) {
- free_bufs(priv, buf_array, i);
+ dpaa2_eth_free_bufs(priv, buf_array, i);
return 0;
}
@@ -1173,7 +1376,7 @@ err_alloc:
return 0;
}
-static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
+static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
{
int i, j;
int new_count;
@@ -1181,7 +1384,7 @@ static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
for (j = 0; j < priv->num_channels; j++) {
for (i = 0; i < DPAA2_ETH_NUM_BUFS;
i += DPAA2_ETH_BUFS_PER_CMD) {
- new_count = add_bufs(priv, priv->channel[j], bpid);
+ new_count = dpaa2_eth_add_bufs(priv, priv->channel[j], bpid);
priv->channel[j]->buf_count += new_count;
if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
@@ -1193,11 +1396,11 @@ static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
return 0;
}
-/**
+/*
* Drain the specified number of buffers from the DPNI's private buffer pool.
* @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
*/
-static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
+static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int count)
{
u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
int retries = 0;
@@ -1213,17 +1416,17 @@ static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
return;
}
- free_bufs(priv, buf_array, ret);
+ dpaa2_eth_free_bufs(priv, buf_array, ret);
retries = 0;
} while (ret);
}
-static void drain_pool(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv)
{
int i;
- drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
- drain_bufs(priv, 1);
+ dpaa2_eth_drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
+ dpaa2_eth_drain_bufs(priv, 1);
for (i = 0; i < priv->num_channels; i++)
priv->channel[i]->buf_count = 0;
@@ -1232,9 +1435,9 @@ static void drain_pool(struct dpaa2_eth_priv *priv)
/* Function is called from softirq context only, so we don't need to guard
* the access to percpu count
*/
-static int refill_pool(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch,
- u16 bpid)
+static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *ch,
+ u16 bpid)
{
int new_count;
@@ -1242,7 +1445,7 @@ static int refill_pool(struct dpaa2_eth_priv *priv,
return 0;
do {
- new_count = add_bufs(priv, ch, bpid);
+ new_count = dpaa2_eth_add_bufs(priv, ch, bpid);
if (unlikely(!new_count)) {
/* Out of memory; abort for now, we'll try later on */
break;
@@ -1272,7 +1475,7 @@ static void dpaa2_eth_sgt_cache_drain(struct dpaa2_eth_priv *priv)
}
}
-static int pull_channel(struct dpaa2_eth_channel *ch)
+static int dpaa2_eth_pull_channel(struct dpaa2_eth_channel *ch)
{
int err;
int dequeues = -1;
@@ -1319,14 +1522,14 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
ch->rx_list = &rx_list;
do {
- err = pull_channel(ch);
+ err = dpaa2_eth_pull_channel(ch);
if (unlikely(err))
break;
/* Refill pool if appropriate */
- refill_pool(priv, ch, priv->bpid);
+ dpaa2_eth_refill_pool(priv, ch, priv->bpid);
- store_cleaned = consume_frames(ch, &fq);
+ store_cleaned = dpaa2_eth_consume_frames(ch, &fq);
if (store_cleaned <= 0)
break;
if (fq->type == DPAA2_RX_FQ) {
@@ -1375,12 +1578,12 @@ out:
if (ch->xdp.res & XDP_REDIRECT)
xdp_do_flush_map();
else if (rx_cleaned && ch->xdp.res & XDP_TX)
- xdp_tx_flush(priv, ch, &priv->fq[flowid]);
+ dpaa2_eth_xdp_tx_flush(priv, ch, &priv->fq[flowid]);
return work_done;
}
-static void enable_ch_napi(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_enable_ch_napi(struct dpaa2_eth_priv *priv)
{
struct dpaa2_eth_channel *ch;
int i;
@@ -1391,7 +1594,7 @@ static void enable_ch_napi(struct dpaa2_eth_priv *priv)
}
}
-static void disable_ch_napi(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_disable_ch_napi(struct dpaa2_eth_priv *priv)
{
struct dpaa2_eth_channel *ch;
int i;
@@ -1465,7 +1668,7 @@ set_cgtd:
priv->rx_cgtd_enabled = td.enable;
}
-static int link_state_update(struct dpaa2_eth_priv *priv)
+static int dpaa2_eth_link_state_update(struct dpaa2_eth_priv *priv)
{
struct dpni_link_state state = {0};
bool tx_pause;
@@ -1517,7 +1720,7 @@ static int dpaa2_eth_open(struct net_device *net_dev)
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
int err;
- err = seed_pool(priv, priv->bpid);
+ err = dpaa2_eth_seed_pool(priv, priv->bpid);
if (err) {
/* Not much to do; the buffer pool, though not filled up,
* may still contain some buffers which would enable us
@@ -1541,7 +1744,7 @@ static int dpaa2_eth_open(struct net_device *net_dev)
*/
netif_carrier_off(net_dev);
}
- enable_ch_napi(priv);
+ dpaa2_eth_enable_ch_napi(priv);
err = dpni_enable(priv->mc_io, 0, priv->mc_token);
if (err < 0) {
@@ -1549,30 +1752,19 @@ static int dpaa2_eth_open(struct net_device *net_dev)
goto enable_err;
}
- if (!priv->mac) {
- /* If the DPMAC object has already processed the link up
- * interrupt, we have to learn the link state ourselves.
- */
- err = link_state_update(priv);
- if (err < 0) {
- netdev_err(net_dev, "Can't update link state\n");
- goto link_state_err;
- }
- } else {
+ if (priv->mac)
phylink_start(priv->mac->phylink);
- }
return 0;
-link_state_err:
enable_err:
- disable_ch_napi(priv);
- drain_pool(priv);
+ dpaa2_eth_disable_ch_napi(priv);
+ dpaa2_eth_drain_pool(priv);
return err;
}
/* Total number of in-flight frames on ingress queues */
-static u32 ingress_fq_count(struct dpaa2_eth_priv *priv)
+static u32 dpaa2_eth_ingress_fq_count(struct dpaa2_eth_priv *priv)
{
struct dpaa2_eth_fq *fq;
u32 fcnt = 0, bcnt = 0, total = 0;
@@ -1591,13 +1783,13 @@ static u32 ingress_fq_count(struct dpaa2_eth_priv *priv)
return total;
}
-static void wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv)
{
int retries = 10;
u32 pending;
do {
- pending = ingress_fq_count(priv);
+ pending = dpaa2_eth_ingress_fq_count(priv);
if (pending)
msleep(100);
} while (pending && --retries);
@@ -1605,7 +1797,7 @@ static void wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv)
#define DPNI_TX_PENDING_VER_MAJOR 7
#define DPNI_TX_PENDING_VER_MINOR 13
-static void wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv)
{
union dpni_statistics stats;
int retries = 10;
@@ -1651,7 +1843,7 @@ static int dpaa2_eth_stop(struct net_device *net_dev)
* on WRIOP. After it finishes, wait until all remaining frames on Rx
* and Tx conf queues are consumed on NAPI poll.
*/
- wait_for_egress_fq_empty(priv);
+ dpaa2_eth_wait_for_egress_fq_empty(priv);
do {
dpni_disable(priv->mc_io, 0, priv->mc_token);
@@ -1667,11 +1859,11 @@ static int dpaa2_eth_stop(struct net_device *net_dev)
*/
}
- wait_for_ingress_fq_empty(priv);
- disable_ch_napi(priv);
+ dpaa2_eth_wait_for_ingress_fq_empty(priv);
+ dpaa2_eth_disable_ch_napi(priv);
/* Empty the buffer pool */
- drain_pool(priv);
+ dpaa2_eth_drain_pool(priv);
/* Empty the Scatter-Gather Buffer cache */
dpaa2_eth_sgt_cache_drain(priv);
@@ -1725,8 +1917,8 @@ static void dpaa2_eth_get_stats(struct net_device *net_dev,
/* Copy mac unicast addresses from @net_dev to @priv.
* Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
*/
-static void add_uc_hw_addr(const struct net_device *net_dev,
- struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_add_uc_hw_addr(const struct net_device *net_dev,
+ struct dpaa2_eth_priv *priv)
{
struct netdev_hw_addr *ha;
int err;
@@ -1744,8 +1936,8 @@ static void add_uc_hw_addr(const struct net_device *net_dev,
/* Copy mac multicast addresses from @net_dev to @priv
* Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
*/
-static void add_mc_hw_addr(const struct net_device *net_dev,
- struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_add_mc_hw_addr(const struct net_device *net_dev,
+ struct dpaa2_eth_priv *priv)
{
struct netdev_hw_addr *ha;
int err;
@@ -1810,7 +2002,7 @@ static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
if (err)
netdev_warn(net_dev, "Can't clear uc filters\n");
- add_uc_hw_addr(net_dev, priv);
+ dpaa2_eth_add_uc_hw_addr(net_dev, priv);
/* Finally, clear uc promisc and set mc promisc as requested. */
err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
@@ -1833,8 +2025,8 @@ static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
if (err)
netdev_warn(net_dev, "Can't clear mac filters\n");
- add_mc_hw_addr(net_dev, priv);
- add_uc_hw_addr(net_dev, priv);
+ dpaa2_eth_add_mc_hw_addr(net_dev, priv);
+ dpaa2_eth_add_uc_hw_addr(net_dev, priv);
/* Now we can clear both ucast and mcast promisc, without risking
* to drop legitimate frames anymore.
@@ -1868,14 +2060,14 @@ static int dpaa2_eth_set_features(struct net_device *net_dev,
if (changed & NETIF_F_RXCSUM) {
enable = !!(features & NETIF_F_RXCSUM);
- err = set_rx_csum(priv, enable);
+ err = dpaa2_eth_set_rx_csum(priv, enable);
if (err)
return err;
}
if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
- err = set_tx_csum(priv, enable);
+ err = dpaa2_eth_set_tx_csum(priv, enable);
if (err)
return err;
}
@@ -1888,15 +2080,17 @@ static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
struct dpaa2_eth_priv *priv = netdev_priv(dev);
struct hwtstamp_config config;
+ if (!dpaa2_ptp)
+ return -EINVAL;
+
if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
return -EFAULT;
switch (config.tx_type) {
case HWTSTAMP_TX_OFF:
- priv->tx_tstamp = false;
- break;
case HWTSTAMP_TX_ON:
- priv->tx_tstamp = true;
+ case HWTSTAMP_TX_ONESTEP_SYNC:
+ priv->tx_tstamp_type = config.tx_type;
break;
default:
return -ERANGE;
@@ -1944,7 +2138,7 @@ static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
return true;
}
-static int set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp)
+static int dpaa2_eth_set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp)
{
int mfl, err;
@@ -1978,7 +2172,7 @@ static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu)
if (!xdp_mtu_valid(priv, new_mtu))
return -EINVAL;
- err = set_rx_mfl(priv, new_mtu, true);
+ err = dpaa2_eth_set_rx_mfl(priv, new_mtu, true);
if (err)
return err;
@@ -1987,7 +2181,7 @@ out:
return 0;
}
-static int update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp)
+static int dpaa2_eth_update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp)
{
struct dpni_buffer_layout buf_layout = {0};
int err;
@@ -2013,7 +2207,7 @@ static int update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp)
return 0;
}
-static int setup_xdp(struct net_device *dev, struct bpf_prog *prog)
+static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog)
{
struct dpaa2_eth_priv *priv = netdev_priv(dev);
struct dpaa2_eth_channel *ch;
@@ -2039,10 +2233,10 @@ static int setup_xdp(struct net_device *dev, struct bpf_prog *prog)
* so we are sure no old format buffers will be used from now on.
*/
if (need_update) {
- err = set_rx_mfl(priv, dev->mtu, !!prog);
+ err = dpaa2_eth_set_rx_mfl(priv, dev->mtu, !!prog);
if (err)
goto out_err;
- err = update_rx_buffer_headroom(priv, !!prog);
+ err = dpaa2_eth_update_rx_buffer_headroom(priv, !!prog);
if (err)
goto out_err;
}
@@ -2079,7 +2273,7 @@ static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
switch (xdp->command) {
case XDP_SETUP_PROG:
- return setup_xdp(dev, xdp->prog);
+ return dpaa2_eth_setup_xdp(dev, xdp->prog);
default:
return -EINVAL;
}
@@ -2091,7 +2285,6 @@ static int dpaa2_eth_xdp_create_fd(struct net_device *net_dev,
struct xdp_frame *xdpf,
struct dpaa2_fd *fd)
{
- struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
struct device *dev = net_dev->dev.parent;
unsigned int needed_headroom;
struct dpaa2_eth_swa *swa;
@@ -2101,7 +2294,7 @@ static int dpaa2_eth_xdp_create_fd(struct net_device *net_dev,
/* We require a minimum headroom to be able to transmit the frame.
* Otherwise return an error and let the original net_device handle it
*/
- needed_headroom = dpaa2_eth_needed_headroom(priv, NULL);
+ needed_headroom = dpaa2_eth_needed_headroom(NULL);
if (xdpf->headroom < needed_headroom)
return -EINVAL;
@@ -2316,7 +2509,7 @@ static const struct net_device_ops dpaa2_eth_ops = {
.ndo_setup_tc = dpaa2_eth_setup_tc,
};
-static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
+static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx)
{
struct dpaa2_eth_channel *ch;
@@ -2329,7 +2522,7 @@ static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
}
/* Allocate and configure a DPCON object */
-static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
+static struct fsl_mc_device *dpaa2_eth_setup_dpcon(struct dpaa2_eth_priv *priv)
{
struct fsl_mc_device *dpcon;
struct device *dev = priv->net_dev->dev.parent;
@@ -2373,16 +2566,15 @@ free:
return ERR_PTR(err);
}
-static void free_dpcon(struct dpaa2_eth_priv *priv,
- struct fsl_mc_device *dpcon)
+static void dpaa2_eth_free_dpcon(struct dpaa2_eth_priv *priv,
+ struct fsl_mc_device *dpcon)
{
dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
fsl_mc_object_free(dpcon);
}
-static struct dpaa2_eth_channel *
-alloc_channel(struct dpaa2_eth_priv *priv)
+static struct dpaa2_eth_channel *dpaa2_eth_alloc_channel(struct dpaa2_eth_priv *priv)
{
struct dpaa2_eth_channel *channel;
struct dpcon_attr attr;
@@ -2393,7 +2585,7 @@ alloc_channel(struct dpaa2_eth_priv *priv)
if (!channel)
return NULL;
- channel->dpcon = setup_dpcon(priv);
+ channel->dpcon = dpaa2_eth_setup_dpcon(priv);
if (IS_ERR(channel->dpcon)) {
err = PTR_ERR(channel->dpcon);
goto err_setup;
@@ -2413,23 +2605,23 @@ alloc_channel(struct dpaa2_eth_priv *priv)
return channel;
err_get_attr:
- free_dpcon(priv, channel->dpcon);
+ dpaa2_eth_free_dpcon(priv, channel->dpcon);
err_setup:
kfree(channel);
return ERR_PTR(err);
}
-static void free_channel(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *channel)
+static void dpaa2_eth_free_channel(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_channel *channel)
{
- free_dpcon(priv, channel->dpcon);
+ dpaa2_eth_free_dpcon(priv, channel->dpcon);
kfree(channel);
}
/* DPIO setup: allocate and configure QBMan channels, setup core affinity
* and register data availability notifications
*/
-static int setup_dpio(struct dpaa2_eth_priv *priv)
+static int dpaa2_eth_setup_dpio(struct dpaa2_eth_priv *priv)
{
struct dpaa2_io_notification_ctx *nctx;
struct dpaa2_eth_channel *channel;
@@ -2449,7 +2641,7 @@ static int setup_dpio(struct dpaa2_eth_priv *priv)
cpumask_clear(&priv->dpio_cpumask);
for_each_online_cpu(i) {
/* Try to allocate a channel */
- channel = alloc_channel(priv);
+ channel = dpaa2_eth_alloc_channel(priv);
if (IS_ERR_OR_NULL(channel)) {
err = PTR_ERR_OR_ZERO(channel);
if (err != -EPROBE_DEFER)
@@ -2462,7 +2654,7 @@ static int setup_dpio(struct dpaa2_eth_priv *priv)
nctx = &channel->nctx;
nctx->is_cdan = 1;
- nctx->cb = cdan_cb;
+ nctx->cb = dpaa2_eth_cdan_cb;
nctx->id = channel->ch_id;
nctx->desired_cpu = i;
@@ -2510,14 +2702,14 @@ static int setup_dpio(struct dpaa2_eth_priv *priv)
err_set_cdan:
dpaa2_io_service_deregister(channel->dpio, nctx, dev);
err_service_reg:
- free_channel(priv, channel);
+ dpaa2_eth_free_channel(priv, channel);
err_alloc_ch:
if (err == -EPROBE_DEFER) {
for (i = 0; i < priv->num_channels; i++) {
channel = priv->channel[i];
nctx = &channel->nctx;
dpaa2_io_service_deregister(channel->dpio, nctx, dev);
- free_channel(priv, channel);
+ dpaa2_eth_free_channel(priv, channel);
}
priv->num_channels = 0;
return err;
@@ -2534,7 +2726,7 @@ err_alloc_ch:
return 0;
}
-static void free_dpio(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_free_dpio(struct dpaa2_eth_priv *priv)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpaa2_eth_channel *ch;
@@ -2544,12 +2736,12 @@ static void free_dpio(struct dpaa2_eth_priv *priv)
for (i = 0; i < priv->num_channels; i++) {
ch = priv->channel[i];
dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev);
- free_channel(priv, ch);
+ dpaa2_eth_free_channel(priv, ch);
}
}
-static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
- int cpu)
+static struct dpaa2_eth_channel *dpaa2_eth_get_affine_channel(struct dpaa2_eth_priv *priv,
+ int cpu)
{
struct device *dev = priv->net_dev->dev.parent;
int i;
@@ -2566,7 +2758,7 @@ static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
return priv->channel[0];
}
-static void set_fq_affinity(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_set_fq_affinity(struct dpaa2_eth_priv *priv)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpaa2_eth_fq *fq;
@@ -2583,6 +2775,7 @@ static void set_fq_affinity(struct dpaa2_eth_priv *priv)
fq = &priv->fq[i];
switch (fq->type) {
case DPAA2_RX_FQ:
+ case DPAA2_RX_ERR_FQ:
fq->target_cpu = rx_cpu;
rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
if (rx_cpu >= nr_cpu_ids)
@@ -2597,13 +2790,13 @@ static void set_fq_affinity(struct dpaa2_eth_priv *priv)
default:
dev_err(dev, "Unknown FQ type: %d\n", fq->type);
}
- fq->channel = get_affine_channel(priv, fq->target_cpu);
+ fq->channel = dpaa2_eth_get_affine_channel(priv, fq->target_cpu);
}
update_xps(priv);
}
-static void setup_fqs(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv *priv)
{
int i, j;
@@ -2626,12 +2819,16 @@ static void setup_fqs(struct dpaa2_eth_priv *priv)
}
}
+ /* We have exactly one Rx error queue per DPNI */
+ priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ;
+ priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err;
+
/* For each FQ, decide on which core to process incoming frames */
- set_fq_affinity(priv);
+ dpaa2_eth_set_fq_affinity(priv);
}
/* Allocate and configure one buffer pool for each interface */
-static int setup_dpbp(struct dpaa2_eth_priv *priv)
+static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv)
{
int err;
struct fsl_mc_device *dpbp_dev;
@@ -2690,15 +2887,15 @@ err_open:
return err;
}
-static void free_dpbp(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv)
{
- drain_pool(priv);
+ dpaa2_eth_drain_pool(priv);
dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
fsl_mc_object_free(priv->dpbp_dev);
}
-static int set_buffer_layout(struct dpaa2_eth_priv *priv)
+static int dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv *priv)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_buffer_layout buf_layout = {0};
@@ -2723,8 +2920,10 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
/* tx buffer */
buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
buf_layout.pass_timestamp = true;
+ buf_layout.pass_frame_status = true;
buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
- DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
+ DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
+ DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
DPNI_QUEUE_TX, &buf_layout);
if (err) {
@@ -2733,7 +2932,8 @@ static int set_buffer_layout(struct dpaa2_eth_priv *priv)
}
/* tx-confirm buffer */
- buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
+ buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
+ DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
DPNI_QUEUE_TX_CONFIRM, &buf_layout);
if (err) {
@@ -2815,7 +3015,7 @@ static inline int dpaa2_eth_enqueue_fq_multiple(struct dpaa2_eth_priv *priv,
return 0;
}
-static void set_enqueue_mode(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_set_enqueue_mode(struct dpaa2_eth_priv *priv)
{
if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
@@ -2824,7 +3024,7 @@ static void set_enqueue_mode(struct dpaa2_eth_priv *priv)
priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
}
-static int set_pause(struct dpaa2_eth_priv *priv)
+static int dpaa2_eth_set_pause(struct dpaa2_eth_priv *priv)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_link_cfg link_cfg = {0};
@@ -2851,7 +3051,7 @@ static int set_pause(struct dpaa2_eth_priv *priv)
return 0;
}
-static void update_tx_fqids(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_update_tx_fqids(struct dpaa2_eth_priv *priv)
{
struct dpni_queue_id qid = {0};
struct dpaa2_eth_fq *fq;
@@ -2893,7 +3093,7 @@ out_err:
}
/* Configure ingress classification based on VLAN PCP */
-static int set_vlan_qos(struct dpaa2_eth_priv *priv)
+static int dpaa2_eth_set_vlan_qos(struct dpaa2_eth_priv *priv)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpkg_profile_cfg kg_cfg = {0};
@@ -3005,7 +3205,7 @@ out_free_tbl:
}
/* Configure the DPNI object this interface is associated with */
-static int setup_dpni(struct fsl_mc_device *ls_dev)
+static int dpaa2_eth_setup_dpni(struct fsl_mc_device *ls_dev)
{
struct device *dev = &ls_dev->dev;
struct dpaa2_eth_priv *priv;
@@ -3053,20 +3253,20 @@ static int setup_dpni(struct fsl_mc_device *ls_dev)
goto close;
}
- err = set_buffer_layout(priv);
+ err = dpaa2_eth_set_buffer_layout(priv);
if (err)
goto close;
- set_enqueue_mode(priv);
+ dpaa2_eth_set_enqueue_mode(priv);
/* Enable pause frame support */
if (dpaa2_eth_has_pause_support(priv)) {
- err = set_pause(priv);
+ err = dpaa2_eth_set_pause(priv);
if (err)
goto close;
}
- err = set_vlan_qos(priv);
+ err = dpaa2_eth_set_vlan_qos(priv);
if (err && err != -EOPNOTSUPP)
goto close;
@@ -3086,7 +3286,7 @@ close:
return err;
}
-static void free_dpni(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_free_dpni(struct dpaa2_eth_priv *priv)
{
int err;
@@ -3098,8 +3298,8 @@ static void free_dpni(struct dpaa2_eth_priv *priv)
dpni_close(priv->mc_io, 0, priv->mc_token);
}
-static int setup_rx_flow(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_fq *fq)
+static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_queue queue;
@@ -3150,8 +3350,8 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv,
return 0;
}
-static int setup_tx_flow(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_fq *fq)
+static int dpaa2_eth_setup_tx_flow(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_queue queue;
@@ -3198,6 +3398,38 @@ static int setup_tx_flow(struct dpaa2_eth_priv *priv,
return 0;
}
+static int setup_rx_err_flow(struct dpaa2_eth_priv *priv,
+ struct dpaa2_eth_fq *fq)
+{
+ struct device *dev = priv->net_dev->dev.parent;
+ struct dpni_queue q = { { 0 } };
+ struct dpni_queue_id qid;
+ u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
+ int err;
+
+ err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid);
+ if (err) {
+ dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
+ return err;
+ }
+
+ fq->fqid = qid.fqid;
+
+ q.destination.id = fq->channel->dpcon_id;
+ q.destination.type = DPNI_DEST_DPCON;
+ q.destination.priority = 1;
+ q.user_context = (u64)(uintptr_t)fq;
+ err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
+ DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q);
+ if (err) {
+ dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
/* Supported header fields for Rx hash distribution key */
static const struct dpaa2_eth_dist_fields dist_fields[] = {
{
@@ -3266,7 +3498,7 @@ static const struct dpaa2_eth_dist_fields dist_fields[] = {
};
/* Configure the Rx hash key using the legacy API */
-static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
+static int dpaa2_eth_config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_rx_tc_dist_cfg dist_cfg;
@@ -3291,7 +3523,7 @@ static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
}
/* Configure the Rx hash key using the new API */
-static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
+static int dpaa2_eth_config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_rx_dist_cfg dist_cfg;
@@ -3311,13 +3543,19 @@ static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
dev_err(dev, "dpni_set_rx_hash_dist failed\n");
break;
}
+
+ /* If the flow steering / hashing key is shared between all
+ * traffic classes, install it just once
+ */
+ if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS)
+ break;
}
return err;
}
/* Configure the Rx flow classification key */
-static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
+static int dpaa2_eth_config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_rx_dist_cfg dist_cfg;
@@ -3337,6 +3575,12 @@ static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
dev_err(dev, "dpni_set_rx_fs_dist failed\n");
break;
}
+
+ /* If the flow steering / hashing key is shared between all
+ * traffic classes, install it just once
+ */
+ if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS)
+ break;
}
return err;
@@ -3452,11 +3696,11 @@ static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
if (type == DPAA2_ETH_RX_DIST_HASH) {
if (dpaa2_eth_has_legacy_dist(priv))
- err = config_legacy_hash_key(priv, key_iova);
+ err = dpaa2_eth_config_legacy_hash_key(priv, key_iova);
else
- err = config_hash_key(priv, key_iova);
+ err = dpaa2_eth_config_hash_key(priv, key_iova);
} else {
- err = config_cls_key(priv, key_iova);
+ err = dpaa2_eth_config_cls_key(priv, key_iova);
}
dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
@@ -3531,7 +3775,7 @@ out:
/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
* frame queues and channels
*/
-static int bind_dpni(struct dpaa2_eth_priv *priv)
+static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv)
{
struct net_device *net_dev = priv->net_dev;
struct device *dev = net_dev->dev.parent;
@@ -3579,10 +3823,13 @@ static int bind_dpni(struct dpaa2_eth_priv *priv)
for (i = 0; i < priv->num_fqs; i++) {
switch (priv->fq[i].type) {
case DPAA2_RX_FQ:
- err = setup_rx_flow(priv, &priv->fq[i]);
+ err = dpaa2_eth_setup_rx_flow(priv, &priv->fq[i]);
break;
case DPAA2_TX_CONF_FQ:
- err = setup_tx_flow(priv, &priv->fq[i]);
+ err = dpaa2_eth_setup_tx_flow(priv, &priv->fq[i]);
+ break;
+ case DPAA2_RX_ERR_FQ:
+ err = setup_rx_err_flow(priv, &priv->fq[i]);
break;
default:
dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
@@ -3603,7 +3850,7 @@ static int bind_dpni(struct dpaa2_eth_priv *priv)
}
/* Allocate rings for storing incoming frame descriptors */
-static int alloc_rings(struct dpaa2_eth_priv *priv)
+static int dpaa2_eth_alloc_rings(struct dpaa2_eth_priv *priv)
{
struct net_device *net_dev = priv->net_dev;
struct device *dev = net_dev->dev.parent;
@@ -3630,7 +3877,7 @@ err_ring:
return -ENOMEM;
}
-static void free_rings(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_free_rings(struct dpaa2_eth_priv *priv)
{
int i;
@@ -3638,7 +3885,7 @@ static void free_rings(struct dpaa2_eth_priv *priv)
dpaa2_io_store_destroy(priv->channel[i]->store);
}
-static int set_mac_addr(struct dpaa2_eth_priv *priv)
+static int dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv *priv)
{
struct net_device *net_dev = priv->net_dev;
struct device *dev = net_dev->dev.parent;
@@ -3703,7 +3950,7 @@ static int set_mac_addr(struct dpaa2_eth_priv *priv)
return 0;
}
-static int netdev_init(struct net_device *net_dev)
+static int dpaa2_eth_netdev_init(struct net_device *net_dev)
{
struct device *dev = net_dev->dev.parent;
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
@@ -3716,7 +3963,7 @@ static int netdev_init(struct net_device *net_dev)
net_dev->netdev_ops = &dpaa2_eth_ops;
net_dev->ethtool_ops = &dpaa2_ethtool_ops;
- err = set_mac_addr(priv);
+ err = dpaa2_eth_set_mac_addr(priv);
if (err)
return err;
@@ -3771,13 +4018,13 @@ static int netdev_init(struct net_device *net_dev)
return 0;
}
-static int poll_link_state(void *arg)
+static int dpaa2_eth_poll_link_state(void *arg)
{
struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
int err;
while (!kthread_should_stop()) {
- err = link_state_update(priv);
+ err = dpaa2_eth_link_state_update(priv);
if (unlikely(err))
return err;
@@ -3847,11 +4094,11 @@ static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
}
if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
- link_state_update(netdev_priv(net_dev));
+ dpaa2_eth_link_state_update(netdev_priv(net_dev));
if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED) {
- set_mac_addr(netdev_priv(net_dev));
- update_tx_fqids(priv);
+ dpaa2_eth_set_mac_addr(netdev_priv(net_dev));
+ dpaa2_eth_update_tx_fqids(priv);
rtnl_lock();
if (priv->mac)
@@ -3864,7 +4111,7 @@ static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
return IRQ_HANDLED;
}
-static int setup_irqs(struct fsl_mc_device *ls_dev)
+static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev)
{
int err = 0;
struct fsl_mc_device_irq *irq;
@@ -3910,7 +4157,7 @@ free_mc_irq:
return err;
}
-static void add_ch_napi(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_add_ch_napi(struct dpaa2_eth_priv *priv)
{
int i;
struct dpaa2_eth_channel *ch;
@@ -3923,7 +4170,7 @@ static void add_ch_napi(struct dpaa2_eth_priv *priv)
}
}
-static void del_ch_napi(struct dpaa2_eth_priv *priv)
+static void dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv *priv)
{
int i;
struct dpaa2_eth_channel *ch;
@@ -3958,6 +4205,19 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
priv->iommu_domain = iommu_get_domain_for_dev(dev);
+ priv->tx_tstamp_type = HWTSTAMP_TX_OFF;
+ priv->rx_tstamp = false;
+
+ priv->dpaa2_ptp_wq = alloc_workqueue("dpaa2_ptp_wq", 0, 0);
+ if (!priv->dpaa2_ptp_wq) {
+ err = -ENOMEM;
+ goto err_wq_alloc;
+ }
+
+ INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp);
+
+ skb_queue_head_init(&priv->tx_skbs);
+
/* Obtain a MC portal */
err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
&priv->mc_io);
@@ -3970,26 +4230,26 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
}
/* MC objects initialization and configuration */
- err = setup_dpni(dpni_dev);
+ err = dpaa2_eth_setup_dpni(dpni_dev);
if (err)
goto err_dpni_setup;
- err = setup_dpio(priv);
+ err = dpaa2_eth_setup_dpio(priv);
if (err)
goto err_dpio_setup;
- setup_fqs(priv);
+ dpaa2_eth_setup_fqs(priv);
- err = setup_dpbp(priv);
+ err = dpaa2_eth_setup_dpbp(priv);
if (err)
goto err_dpbp_setup;
- err = bind_dpni(priv);
+ err = dpaa2_eth_bind_dpni(priv);
if (err)
goto err_bind;
/* Add a NAPI context for each channel */
- add_ch_napi(priv);
+ dpaa2_eth_add_ch_napi(priv);
/* Percpu statistics */
priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
@@ -4012,21 +4272,21 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
goto err_alloc_sgt_cache;
}
- err = netdev_init(net_dev);
+ err = dpaa2_eth_netdev_init(net_dev);
if (err)
goto err_netdev_init;
/* Configure checksum offload based on current interface flags */
- err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
+ err = dpaa2_eth_set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
if (err)
goto err_csum;
- err = set_tx_csum(priv, !!(net_dev->features &
- (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
+ err = dpaa2_eth_set_tx_csum(priv,
+ !!(net_dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
if (err)
goto err_csum;
- err = alloc_rings(priv);
+ err = dpaa2_eth_alloc_rings(priv);
if (err)
goto err_alloc_rings;
@@ -4039,10 +4299,10 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
}
#endif
- err = setup_irqs(dpni_dev);
+ err = dpaa2_eth_setup_irqs(dpni_dev);
if (err) {
netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
- priv->poll_thread = kthread_run(poll_link_state, priv,
+ priv->poll_thread = kthread_run(dpaa2_eth_poll_link_state, priv,
"%s_poll_link", net_dev->name);
if (IS_ERR(priv->poll_thread)) {
dev_err(dev, "Error starting polling thread\n");
@@ -4055,6 +4315,18 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
if (err)
goto err_connect_mac;
+ err = dpaa2_eth_dl_register(priv);
+ if (err)
+ goto err_dl_register;
+
+ err = dpaa2_eth_dl_traps_register(priv);
+ if (err)
+ goto err_dl_trap_register;
+
+ err = dpaa2_eth_dl_port_add(priv);
+ if (err)
+ goto err_dl_port_add;
+
err = register_netdev(net_dev);
if (err < 0) {
dev_err(dev, "register_netdev() failed\n");
@@ -4069,6 +4341,12 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
return 0;
err_netdev_reg:
+ dpaa2_eth_dl_port_del(priv);
+err_dl_port_add:
+ dpaa2_eth_dl_traps_unregister(priv);
+err_dl_trap_register:
+ dpaa2_eth_dl_unregister(priv);
+err_dl_register:
dpaa2_eth_disconnect_mac(priv);
err_connect_mac:
if (priv->do_link_poll)
@@ -4076,7 +4354,7 @@ err_connect_mac:
else
fsl_mc_free_irqs(dpni_dev);
err_poll_thread:
- free_rings(priv);
+ dpaa2_eth_free_rings(priv);
err_alloc_rings:
err_csum:
err_netdev_init:
@@ -4086,16 +4364,18 @@ err_alloc_sgt_cache:
err_alloc_percpu_extras:
free_percpu(priv->percpu_stats);
err_alloc_percpu_stats:
- del_ch_napi(priv);
+ dpaa2_eth_del_ch_napi(priv);
err_bind:
- free_dpbp(priv);
+ dpaa2_eth_free_dpbp(priv);
err_dpbp_setup:
- free_dpio(priv);
+ dpaa2_eth_free_dpio(priv);
err_dpio_setup:
- free_dpni(priv);
+ dpaa2_eth_free_dpni(priv);
err_dpni_setup:
fsl_mc_portal_free(priv->mc_io);
err_portal_alloc:
+ destroy_workqueue(priv->dpaa2_ptp_wq);
+err_wq_alloc:
dev_set_drvdata(dev, NULL);
free_netdev(net_dev);
@@ -4121,20 +4401,24 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
unregister_netdev(net_dev);
+ dpaa2_eth_dl_port_del(priv);
+ dpaa2_eth_dl_traps_unregister(priv);
+ dpaa2_eth_dl_unregister(priv);
+
if (priv->do_link_poll)
kthread_stop(priv->poll_thread);
else
fsl_mc_free_irqs(ls_dev);
- free_rings(priv);
+ dpaa2_eth_free_rings(priv);
free_percpu(priv->sgt_cache);
free_percpu(priv->percpu_stats);
free_percpu(priv->percpu_extras);
- del_ch_napi(priv);
- free_dpbp(priv);
- free_dpio(priv);
- free_dpni(priv);
+ dpaa2_eth_del_ch_napi(priv);
+ dpaa2_eth_free_dpbp(priv);
+ dpaa2_eth_free_dpio(priv);
+ dpaa2_eth_free_dpni(priv);
fsl_mc_portal_free(priv->mc_io);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index 7f3c41dc98f2..d236b8695c39 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -10,6 +10,8 @@
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <linux/fsl/mc.h>
+#include <linux/net_tstamp.h>
+#include <net/devlink.h>
#include <soc/fsl/dpaa2-io.h>
#include <soc/fsl/dpaa2-fd.h>
@@ -180,6 +182,49 @@ struct dpaa2_fas {
*/
#define DPAA2_TS_OFFSET 0x8
+/* Frame annotation parse results */
+struct dpaa2_fapr {
+ /* 64-bit word 1 */
+ __le32 faf_lo;
+ __le16 faf_ext;
+ __le16 nxt_hdr;
+ /* 64-bit word 2 */
+ __le64 faf_hi;
+ /* 64-bit word 3 */
+ u8 last_ethertype_offset;
+ u8 vlan_tci_offset_n;
+ u8 vlan_tci_offset_1;
+ u8 llc_snap_offset;
+ u8 eth_offset;
+ u8 ip1_pid_offset;
+ u8 shim_offset_2;
+ u8 shim_offset_1;
+ /* 64-bit word 4 */
+ u8 l5_offset;
+ u8 l4_offset;
+ u8 gre_offset;
+ u8 l3_offset_n;
+ u8 l3_offset_1;
+ u8 mpls_offset_n;
+ u8 mpls_offset_1;
+ u8 pppoe_offset;
+ /* 64-bit word 5 */
+ __le16 running_sum;
+ __le16 gross_running_sum;
+ u8 ipv6_frag_offset;
+ u8 nxt_hdr_offset;
+ u8 routing_hdr_offset_2;
+ u8 routing_hdr_offset_1;
+ /* 64-bit word 6 */
+ u8 reserved[5]; /* Soft-parsing context */
+ u8 ip_proto_offset_n;
+ u8 nxt_hdr_frag_offset;
+ u8 parse_error_code;
+};
+
+#define DPAA2_FAPR_OFFSET 0x10
+#define DPAA2_FAPR_SIZE sizeof((struct dpaa2_fapr))
+
/* Frame annotation egress action descriptor */
#define DPAA2_FAEAD_OFFSET 0x58
@@ -194,6 +239,24 @@ struct dpaa2_faead {
#define DPAA2_FAEAD_EBDDV 0x00002000
#define DPAA2_FAEAD_UPD 0x00000010
+struct ptp_tstamp {
+ u16 sec_msb;
+ u32 sec_lsb;
+ u32 nsec;
+};
+
+static inline void ns_to_ptp_tstamp(struct ptp_tstamp *tstamp, u64 ns)
+{
+ u64 sec, nsec;
+
+ sec = ns;
+ nsec = do_div(sec, 1000000000);
+
+ tstamp->sec_lsb = sec & 0xFFFFFFFF;
+ tstamp->sec_msb = (sec >> 32) & 0xFFFF;
+ tstamp->nsec = nsec;
+}
+
/* Accessors for the hardware annotation fields that we use */
static inline void *dpaa2_get_hwa(void *buf_addr, bool swa)
{
@@ -210,6 +273,11 @@ static inline __le64 *dpaa2_get_ts(void *buf_addr, bool swa)
return dpaa2_get_hwa(buf_addr, swa) + DPAA2_TS_OFFSET;
}
+static inline struct dpaa2_fapr *dpaa2_get_fapr(void *buf_addr, bool swa)
+{
+ return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAPR_OFFSET;
+}
+
static inline struct dpaa2_faead *dpaa2_get_faead(void *buf_addr, bool swa)
{
return dpaa2_get_hwa(buf_addr, swa) + DPAA2_FAEAD_OFFSET;
@@ -324,8 +392,10 @@ struct dpaa2_eth_ch_stats {
#define DPAA2_ETH_MAX_RX_QUEUES \
(DPAA2_ETH_MAX_RX_QUEUES_PER_TC * DPAA2_ETH_MAX_TCS)
#define DPAA2_ETH_MAX_TX_QUEUES 16
+#define DPAA2_ETH_MAX_RX_ERR_QUEUES 1
#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \
- DPAA2_ETH_MAX_TX_QUEUES)
+ DPAA2_ETH_MAX_TX_QUEUES + \
+ DPAA2_ETH_MAX_RX_ERR_QUEUES)
#define DPAA2_ETH_MAX_NETDEV_QUEUES \
(DPAA2_ETH_MAX_TX_QUEUES * DPAA2_ETH_MAX_TCS)
@@ -334,6 +404,7 @@ struct dpaa2_eth_ch_stats {
enum dpaa2_eth_fq_type {
DPAA2_RX_FQ = 0,
DPAA2_TX_CONF_FQ,
+ DPAA2_RX_ERR_FQ
};
struct dpaa2_eth_priv;
@@ -407,6 +478,15 @@ struct dpaa2_eth_sgt_cache {
u16 count;
};
+struct dpaa2_eth_trap_item {
+ void *trap_ctx;
+};
+
+struct dpaa2_eth_trap_data {
+ struct dpaa2_eth_trap_item *trap_items_arr;
+ struct dpaa2_eth_priv *priv;
+};
+
/* Driver private data */
struct dpaa2_eth_priv {
struct net_device *net_dev;
@@ -433,8 +513,8 @@ struct dpaa2_eth_priv {
u16 bpid;
struct iommu_domain *iommu_domain;
- bool tx_tstamp; /* Tx timestamping enabled */
- bool rx_tstamp; /* Rx timestamping enabled */
+ enum hwtstamp_tx_types tx_tstamp_type; /* Tx timestamping type */
+ bool rx_tstamp; /* Rx timestamping enabled */
u16 tx_qdid;
struct fsl_mc_io *mc_io;
@@ -473,8 +553,29 @@ struct dpaa2_eth_priv {
#endif
struct dpaa2_mac *mac;
+ struct workqueue_struct *dpaa2_ptp_wq;
+ struct work_struct tx_onestep_tstamp;
+ struct sk_buff_head tx_skbs;
+ /* The one-step timestamping configuration on hardware
+ * registers could only be done when no one-step
+ * timestamping frames are in flight. So we use a mutex
+ * lock here to make sure the lock is released by last
+ * one-step timestamping packet through TX confirmation
+ * queue before transmit current packet.
+ */
+ struct mutex onestep_tstamp_lock;
+ struct devlink *devlink;
+ struct dpaa2_eth_trap_data *trap_data;
+ struct devlink_port devlink_port;
};
+struct dpaa2_eth_devlink_priv {
+ struct dpaa2_eth_priv *dpaa2_priv;
+};
+
+#define TX_TSTAMP 0x1
+#define TX_TSTAMP_ONESTEP_SYNC 0x2
+
#define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \
| RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 \
| RXH_L4_B_2_3)
@@ -491,6 +592,7 @@ struct dpaa2_eth_priv {
extern const struct ethtool_ops dpaa2_ethtool_ops;
extern int dpaa2_phc_index;
+extern struct ptp_qoriq *dpaa2_ptp;
static inline int dpaa2_eth_cmp_dpni_ver(struct dpaa2_eth_priv *priv,
u16 ver_major, u16 ver_minor)
@@ -560,9 +662,7 @@ static inline bool dpaa2_eth_rx_pause_enabled(u64 link_options)
return !!(link_options & DPNI_LINK_OPT_PAUSE);
}
-static inline
-unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv,
- struct sk_buff *skb)
+static inline unsigned int dpaa2_eth_needed_headroom(struct sk_buff *skb)
{
unsigned int headroom = DPAA2_ETH_SWA_SIZE;
@@ -579,7 +679,7 @@ unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv,
return 0;
/* If we have Tx timestamping, need 128B hardware annotation */
- if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
+ if (skb->cb[0])
headroom += DPAA2_ETH_TX_HWA_SIZE;
return headroom;
@@ -604,4 +704,15 @@ void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv,
extern const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops;
+int dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv);
+void dpaa2_eth_dl_unregister(struct dpaa2_eth_priv *priv);
+
+int dpaa2_eth_dl_port_add(struct dpaa2_eth_priv *priv);
+void dpaa2_eth_dl_port_del(struct dpaa2_eth_priv *priv);
+
+int dpaa2_eth_dl_traps_register(struct dpaa2_eth_priv *priv);
+void dpaa2_eth_dl_traps_unregister(struct dpaa2_eth_priv *priv);
+
+struct dpaa2_eth_trap_item *dpaa2_eth_dl_get_trap(struct dpaa2_eth_priv *priv,
+ struct dpaa2_fapr *fapr);
#endif /* __DPAA2_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index 8356f1fbbee1..f981a523e13a 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2014-2016 Freescale Semiconductor Inc.
* Copyright 2016 NXP
+ * Copyright 2020 NXP
*/
#include <linux/net_tstamp.h>
@@ -316,8 +317,8 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev,
dpaa2_mac_get_ethtool_stats(priv->mac, data + i);
}
-static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
- void *key, void *mask, u64 *fields)
+static int dpaa2_eth_prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
+ void *key, void *mask, u64 *fields)
{
int off;
@@ -345,9 +346,9 @@ static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask,
return 0;
}
-static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
- struct ethtool_usrip4_spec *uip_mask,
- void *key, void *mask, u64 *fields)
+static int dpaa2_eth_prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
+ struct ethtool_usrip4_spec *uip_mask,
+ void *key, void *mask, u64 *fields)
{
int off;
u32 tmp_value, tmp_mask;
@@ -400,9 +401,9 @@ static int prep_uip_rule(struct ethtool_usrip4_spec *uip_value,
return 0;
}
-static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
- struct ethtool_tcpip4_spec *l4_mask,
- void *key, void *mask, u8 l4_proto, u64 *fields)
+static int dpaa2_eth_prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
+ struct ethtool_tcpip4_spec *l4_mask,
+ void *key, void *mask, u8 l4_proto, u64 *fields)
{
int off;
@@ -451,9 +452,9 @@ static int prep_l4_rule(struct ethtool_tcpip4_spec *l4_value,
return 0;
}
-static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
- struct ethtool_flow_ext *ext_mask,
- void *key, void *mask, u64 *fields)
+static int dpaa2_eth_prep_ext_rule(struct ethtool_flow_ext *ext_value,
+ struct ethtool_flow_ext *ext_mask,
+ void *key, void *mask, u64 *fields)
{
int off;
@@ -470,9 +471,9 @@ static int prep_ext_rule(struct ethtool_flow_ext *ext_value,
return 0;
}
-static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
- struct ethtool_flow_ext *ext_mask,
- void *key, void *mask, u64 *fields)
+static int dpaa2_eth_prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
+ struct ethtool_flow_ext *ext_mask,
+ void *key, void *mask, u64 *fields)
{
int off;
@@ -486,32 +487,32 @@ static int prep_mac_ext_rule(struct ethtool_flow_ext *ext_value,
return 0;
}
-static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask,
- u64 *fields)
+static int dpaa2_eth_prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key,
+ void *mask, u64 *fields)
{
int err;
switch (fs->flow_type & 0xFF) {
case ETHER_FLOW:
- err = prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec,
- key, mask, fields);
+ err = dpaa2_eth_prep_eth_rule(&fs->h_u.ether_spec, &fs->m_u.ether_spec,
+ key, mask, fields);
break;
case IP_USER_FLOW:
- err = prep_uip_rule(&fs->h_u.usr_ip4_spec,
- &fs->m_u.usr_ip4_spec, key, mask, fields);
+ err = dpaa2_eth_prep_uip_rule(&fs->h_u.usr_ip4_spec,
+ &fs->m_u.usr_ip4_spec, key, mask, fields);
break;
case TCP_V4_FLOW:
- err = prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec,
- key, mask, IPPROTO_TCP, fields);
+ err = dpaa2_eth_prep_l4_rule(&fs->h_u.tcp_ip4_spec, &fs->m_u.tcp_ip4_spec,
+ key, mask, IPPROTO_TCP, fields);
break;
case UDP_V4_FLOW:
- err = prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec,
- key, mask, IPPROTO_UDP, fields);
+ err = dpaa2_eth_prep_l4_rule(&fs->h_u.udp_ip4_spec, &fs->m_u.udp_ip4_spec,
+ key, mask, IPPROTO_UDP, fields);
break;
case SCTP_V4_FLOW:
- err = prep_l4_rule(&fs->h_u.sctp_ip4_spec,
- &fs->m_u.sctp_ip4_spec, key, mask,
- IPPROTO_SCTP, fields);
+ err = dpaa2_eth_prep_l4_rule(&fs->h_u.sctp_ip4_spec,
+ &fs->m_u.sctp_ip4_spec, key, mask,
+ IPPROTO_SCTP, fields);
break;
default:
return -EOPNOTSUPP;
@@ -521,14 +522,14 @@ static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask,
return err;
if (fs->flow_type & FLOW_EXT) {
- err = prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask, fields);
+ err = dpaa2_eth_prep_ext_rule(&fs->h_ext, &fs->m_ext, key, mask, fields);
if (err)
return err;
}
if (fs->flow_type & FLOW_MAC_EXT) {
- err = prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key, mask,
- fields);
+ err = dpaa2_eth_prep_mac_ext_rule(&fs->h_ext, &fs->m_ext, key,
+ mask, fields);
if (err)
return err;
}
@@ -536,9 +537,9 @@ static int prep_cls_rule(struct ethtool_rx_flow_spec *fs, void *key, void *mask,
return 0;
}
-static int do_cls_rule(struct net_device *net_dev,
- struct ethtool_rx_flow_spec *fs,
- bool add)
+static int dpaa2_eth_do_cls_rule(struct net_device *net_dev,
+ struct ethtool_rx_flow_spec *fs,
+ bool add)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
struct device *dev = net_dev->dev.parent;
@@ -561,7 +562,7 @@ static int do_cls_rule(struct net_device *net_dev,
return -ENOMEM;
/* Fill the key and mask memory areas */
- err = prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size, &fields);
+ err = dpaa2_eth_prep_cls_rule(fs, key_buf, key_buf + rule_cfg.key_size, &fields);
if (err)
goto free_mem;
@@ -617,7 +618,7 @@ static int do_cls_rule(struct net_device *net_dev,
err = dpni_remove_fs_entry(priv->mc_io, 0,
priv->mc_token, i,
&rule_cfg);
- if (err)
+ if (err || priv->dpni_attrs.options & DPNI_OPT_SHARED_FS)
break;
}
@@ -629,7 +630,7 @@ free_mem:
return err;
}
-static int num_rules(struct dpaa2_eth_priv *priv)
+static int dpaa2_eth_num_cls_rules(struct dpaa2_eth_priv *priv)
{
int i, rules = 0;
@@ -640,9 +641,9 @@ static int num_rules(struct dpaa2_eth_priv *priv)
return rules;
}
-static int update_cls_rule(struct net_device *net_dev,
- struct ethtool_rx_flow_spec *new_fs,
- unsigned int location)
+static int dpaa2_eth_update_cls_rule(struct net_device *net_dev,
+ struct ethtool_rx_flow_spec *new_fs,
+ unsigned int location)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
struct dpaa2_eth_cls_rule *rule;
@@ -658,13 +659,14 @@ static int update_cls_rule(struct net_device *net_dev,
/* If a rule is present at the specified location, delete it. */
if (rule->in_use) {
- err = do_cls_rule(net_dev, &rule->fs, false);
+ err = dpaa2_eth_do_cls_rule(net_dev, &rule->fs, false);
if (err)
return err;
rule->in_use = 0;
- if (!dpaa2_eth_fs_mask_enabled(priv) && !num_rules(priv))
+ if (!dpaa2_eth_fs_mask_enabled(priv) &&
+ !dpaa2_eth_num_cls_rules(priv))
priv->rx_cls_fields = 0;
}
@@ -672,7 +674,7 @@ static int update_cls_rule(struct net_device *net_dev,
if (!new_fs)
return err;
- err = do_cls_rule(net_dev, new_fs, true);
+ err = dpaa2_eth_do_cls_rule(net_dev, new_fs, true);
if (err)
return err;
@@ -702,7 +704,7 @@ static int dpaa2_eth_get_rxnfc(struct net_device *net_dev,
break;
case ETHTOOL_GRXCLSRLCNT:
rxnfc->rule_cnt = 0;
- rxnfc->rule_cnt = num_rules(priv);
+ rxnfc->rule_cnt = dpaa2_eth_num_cls_rules(priv);
rxnfc->data = max_rules;
break;
case ETHTOOL_GRXCLSRULE:
@@ -744,10 +746,10 @@ static int dpaa2_eth_set_rxnfc(struct net_device *net_dev,
err = dpaa2_eth_set_hash(net_dev, rxnfc->data);
break;
case ETHTOOL_SRXCLSRLINS:
- err = update_cls_rule(net_dev, &rxnfc->fs, rxnfc->fs.location);
+ err = dpaa2_eth_update_cls_rule(net_dev, &rxnfc->fs, rxnfc->fs.location);
break;
case ETHTOOL_SRXCLSRLDEL:
- err = update_cls_rule(net_dev, NULL, rxnfc->fs.location);
+ err = dpaa2_eth_update_cls_rule(net_dev, NULL, rxnfc->fs.location);
break;
default:
err = -EOPNOTSUPP;
@@ -762,6 +764,9 @@ EXPORT_SYMBOL(dpaa2_phc_index);
static int dpaa2_eth_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
+ if (!dpaa2_ptp)
+ return ethtool_op_get_ts_info(dev, info);
+
info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
@@ -769,7 +774,8 @@ static int dpaa2_eth_get_ts_info(struct net_device *dev,
info->phc_index = dpaa2_phc_index;
info->tx_types = (1 << HWTSTAMP_TX_OFF) |
- (1 << HWTSTAMP_TX_ON);
+ (1 << HWTSTAMP_TX_ON) |
+ (1 << HWTSTAMP_TX_ONESTEP_SYNC);
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_ALL);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
index 3ee236c5fc37..90cd243070d7 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -15,6 +15,18 @@ static int phy_mode(enum dpmac_eth_if eth_if, phy_interface_t *if_mode)
case DPMAC_ETH_IF_RGMII:
*if_mode = PHY_INTERFACE_MODE_RGMII;
break;
+ case DPMAC_ETH_IF_USXGMII:
+ *if_mode = PHY_INTERFACE_MODE_USXGMII;
+ break;
+ case DPMAC_ETH_IF_QSGMII:
+ *if_mode = PHY_INTERFACE_MODE_QSGMII;
+ break;
+ case DPMAC_ETH_IF_SGMII:
+ *if_mode = PHY_INTERFACE_MODE_SGMII;
+ break;
+ case DPMAC_ETH_IF_XFI:
+ *if_mode = PHY_INTERFACE_MODE_10GBASER;
+ break;
default:
return -EINVAL;
}
@@ -67,6 +79,10 @@ static bool dpaa2_mac_phy_mode_mismatch(struct dpaa2_mac *mac,
phy_interface_t interface)
{
switch (interface) {
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
@@ -95,6 +111,17 @@ static void dpaa2_mac_validate(struct phylink_config *config,
phylink_set(mask, Asym_Pause);
switch (state->interface) {
+ case PHY_INTERFACE_MODE_NA:
+ case PHY_INTERFACE_MODE_10GBASER:
+ case PHY_INTERFACE_MODE_USXGMII:
+ phylink_set(mask, 10000baseT_Full);
+ if (state->interface == PHY_INTERFACE_MODE_10GBASER)
+ break;
+ phylink_set(mask, 5000baseT_Full);
+ phylink_set(mask, 2500baseT_Full);
+ fallthrough;
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
@@ -227,6 +254,51 @@ out:
return fixed;
}
+static int dpaa2_pcs_create(struct dpaa2_mac *mac,
+ struct device_node *dpmac_node, int id)
+{
+ struct mdio_device *mdiodev;
+ struct device_node *node;
+
+ node = of_parse_phandle(dpmac_node, "pcs-handle", 0);
+ if (!node) {
+ /* do not error out on old DTS files */
+ netdev_warn(mac->net_dev, "pcs-handle node not found\n");
+ return 0;
+ }
+
+ if (!of_device_is_available(node)) {
+ netdev_err(mac->net_dev, "pcs-handle node not available\n");
+ return -ENODEV;
+ }
+
+ mdiodev = of_mdio_find_device(node);
+ of_node_put(node);
+ if (!mdiodev)
+ return -EPROBE_DEFER;
+
+ mac->pcs = lynx_pcs_create(mdiodev);
+ if (!mac->pcs) {
+ netdev_err(mac->net_dev, "lynx_pcs_create() failed\n");
+ put_device(&mdiodev->dev);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void dpaa2_pcs_destroy(struct dpaa2_mac *mac)
+{
+ struct lynx_pcs *pcs = mac->pcs;
+
+ if (pcs) {
+ struct device *dev = &pcs->mdio->dev;
+ lynx_pcs_destroy(pcs);
+ put_device(dev);
+ mac->pcs = NULL;
+ }
+}
+
int dpaa2_mac_connect(struct dpaa2_mac *mac)
{
struct fsl_mc_device *dpmac_dev = mac->mc_dev;
@@ -278,6 +350,13 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
goto err_put_node;
}
+ if (attr.link_type == DPMAC_LINK_TYPE_PHY &&
+ attr.eth_if != DPMAC_ETH_IF_RGMII) {
+ err = dpaa2_pcs_create(mac, dpmac_node, attr.id);
+ if (err)
+ goto err_put_node;
+ }
+
mac->phylink_config.dev = &net_dev->dev;
mac->phylink_config.type = PHYLINK_NETDEV;
@@ -286,10 +365,13 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
&dpaa2_mac_phylink_ops);
if (IS_ERR(phylink)) {
err = PTR_ERR(phylink);
- goto err_put_node;
+ goto err_pcs_destroy;
}
mac->phylink = phylink;
+ if (mac->pcs)
+ phylink_set_pcs(mac->phylink, &mac->pcs->pcs);
+
err = phylink_of_phy_connect(mac->phylink, dpmac_node, 0);
if (err) {
netdev_err(net_dev, "phylink_of_phy_connect() = %d\n", err);
@@ -302,6 +384,8 @@ int dpaa2_mac_connect(struct dpaa2_mac *mac)
err_phylink_destroy:
phylink_destroy(mac->phylink);
+err_pcs_destroy:
+ dpaa2_pcs_destroy(mac);
err_put_node:
of_node_put(dpmac_node);
err_close_dpmac:
@@ -316,6 +400,8 @@ void dpaa2_mac_disconnect(struct dpaa2_mac *mac)
phylink_disconnect_phy(mac->phylink);
phylink_destroy(mac->phylink);
+ dpaa2_pcs_destroy(mac);
+
dpmac_close(mac->mc_io, 0, mac->mc_dev->mc_handle);
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
index 2130d9c7d40e..955a52856210 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
@@ -7,6 +7,7 @@
#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/phylink.h>
+#include <linux/pcs-lynx.h>
#include "dpmac.h"
#include "dpmac-cmd.h"
@@ -21,6 +22,7 @@ struct dpaa2_mac {
struct phylink *phylink;
phy_interface_t if_mode;
enum dpmac_link_type if_link_type;
+ struct lynx_pcs *pcs;
};
bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
index cc1b7f85e433..32b5faa87bb8 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
@@ -2,6 +2,7 @@
/*
* Copyright 2013-2016 Freescale Semiconductor Inc.
* Copyright 2016-2018 NXP
+ * Copyright 2020 NXP
*/
#include <linux/module.h>
@@ -9,7 +10,6 @@
#include <linux/of_address.h>
#include <linux/msi.h>
#include <linux/fsl/mc.h>
-#include <linux/fsl/ptp_qoriq.h>
#include "dpaa2-ptp.h"
@@ -201,6 +201,7 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
goto err_free_threaded_irq;
dpaa2_phc_index = ptp_qoriq->phc_index;
+ dpaa2_ptp = ptp_qoriq;
dev_set_drvdata(dev, ptp_qoriq);
return 0;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h
index df2458a5e9ef..e1023538b4c3 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h
@@ -1,14 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright 2018 NXP
+ * Copyright 2020 NXP
*/
#ifndef __RTC_H
#define __RTC_H
+#include <linux/fsl/ptp_qoriq.h>
+
#include "dprtc.h"
#include "dprtc-cmd.h"
extern int dpaa2_phc_index;
+extern struct ptp_qoriq *dpaa2_ptp;
#endif
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
index 3c06f5fb5759..90453dc7baef 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni-cmd.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/* Copyright 2013-2016 Freescale Semiconductor Inc.
* Copyright 2016 NXP
+ * Copyright 2020 NXP
*/
#ifndef _FSL_DPNI_CMD_H
#define _FSL_DPNI_CMD_H
@@ -92,6 +93,9 @@
#define DPNI_CMDID_SET_RX_HASH_DIST DPNI_CMD(0x274)
#define DPNI_CMDID_GET_LINK_CFG DPNI_CMD(0x278)
+#define DPNI_CMDID_SET_SINGLE_STEP_CFG DPNI_CMD(0x279)
+#define DPNI_CMDID_GET_SINGLE_STEP_CFG DPNI_CMD(0x27a)
+
/* Macros for accessing command fields smaller than 1byte */
#define DPNI_MASK(field) \
GENMASK(DPNI_##field##_SHIFT + DPNI_##field##_SIZE - 1, \
@@ -641,4 +645,21 @@ struct dpni_cmd_set_tx_shaping {
u8 coupled;
};
+#define DPNI_PTP_ENABLE_SHIFT 0
+#define DPNI_PTP_ENABLE_SIZE 1
+#define DPNI_PTP_CH_UPDATE_SHIFT 1
+#define DPNI_PTP_CH_UPDATE_SIZE 1
+
+struct dpni_cmd_single_step_cfg {
+ __le16 flags;
+ __le16 offset;
+ __le32 peer_delay;
+};
+
+struct dpni_rsp_single_step_cfg {
+ __le16 flags;
+ __le16 offset;
+ __le32 peer_delay;
+};
+
#endif /* _FSL_DPNI_CMD_H */
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.c b/drivers/net/ethernet/freescale/dpaa2/dpni.c
index 68ed4c41b282..6ea7db66a632 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2013-2016 Freescale Semiconductor Inc.
* Copyright 2016 NXP
+ * Copyright 2020 NXP
*/
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -1999,3 +2000,81 @@ int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
+
+/**
+ * dpni_get_single_step_cfg() - return current configuration for
+ * single step PTP
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @ptp_cfg: ptp single step configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ */
+int dpni_get_single_step_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_single_step_cfg *ptp_cfg)
+{
+ struct dpni_rsp_single_step_cfg *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_SINGLE_STEP_CFG,
+ cmd_flags, token);
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* read command response */
+ rsp_params = (struct dpni_rsp_single_step_cfg *)cmd.params;
+ ptp_cfg->offset = le16_to_cpu(rsp_params->offset);
+ ptp_cfg->en = dpni_get_field(le16_to_cpu(rsp_params->flags),
+ PTP_ENABLE) ? 1 : 0;
+ ptp_cfg->ch_update = dpni_get_field(le16_to_cpu(rsp_params->flags),
+ PTP_CH_UPDATE) ? 1 : 0;
+ ptp_cfg->peer_delay = le32_to_cpu(rsp_params->peer_delay);
+
+ return err;
+}
+
+/**
+ * dpni_set_single_step_cfg() - enable/disable and configure single step PTP
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPNI object
+ * @ptp_cfg: ptp single step configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ *
+ * The function has effect only when dpni object is connected to a dpmac
+ * object. If the dpni is not connected to a dpmac the configuration will
+ * be stored inside and applied when connection is made.
+ */
+int dpni_set_single_step_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_single_step_cfg *ptp_cfg)
+{
+ struct dpni_cmd_single_step_cfg *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+ u16 flags;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_SINGLE_STEP_CFG,
+ cmd_flags, token);
+ cmd_params = (struct dpni_cmd_single_step_cfg *)cmd.params;
+ cmd_params->offset = cpu_to_le16(ptp_cfg->offset);
+ cmd_params->peer_delay = cpu_to_le32(ptp_cfg->peer_delay);
+
+ flags = le16_to_cpu(cmd_params->flags);
+ dpni_set_field(flags, PTP_ENABLE, !!ptp_cfg->en);
+ dpni_set_field(flags, PTP_CH_UPDATE, !!ptp_cfg->ch_update);
+ cmd_params->flags = cpu_to_le16(flags);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpni.h b/drivers/net/ethernet/freescale/dpaa2/dpni.h
index 39387991a1f9..e7b9e195b534 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpni.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpni.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/* Copyright 2013-2016 Freescale Semiconductor Inc.
* Copyright 2016 NXP
+ * Copyright 2020 NXP
*/
#ifndef __FSL_DPNI_H
#define __FSL_DPNI_H
@@ -74,6 +75,10 @@ struct fsl_mc_io;
* Disables the flow steering table.
*/
#define DPNI_OPT_NO_FS 0x000020
+/**
+ * Flow steering table is shared between all traffic classes
+ */
+#define DPNI_OPT_SHARED_FS 0x001000
int dpni_open(struct fsl_mc_io *mc_io,
u32 cmd_flags,
@@ -1079,4 +1084,34 @@ int dpni_set_tx_shaping(struct fsl_mc_io *mc_io,
const struct dpni_tx_shaping_cfg *tx_er_shaper,
int coupled);
+/**
+ * struct dpni_single_step_cfg - configure single step PTP (IEEE 1588)
+ * @en: enable single step PTP. When enabled the PTPv1 functionality
+ * will not work. If the field is zero, offset and ch_update
+ * parameters will be ignored
+ * @offset: start offset from the beginning of the frame where
+ * timestamp field is found. The offset must respect all MAC
+ * headers, VLAN tags and other protocol headers
+ * @ch_update: when set UDP checksum will be updated inside packet
+ * @peer_delay: For peer-to-peer transparent clocks add this value to the
+ * correction field in addition to the transient time update.
+ * The value expresses nanoseconds.
+ */
+struct dpni_single_step_cfg {
+ u8 en;
+ u8 ch_update;
+ u16 offset;
+ u32 peer_delay;
+};
+
+int dpni_set_single_step_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_single_step_cfg *ptp_cfg);
+
+int dpni_get_single_step_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpni_single_step_cfg *ptp_cfg);
+
#endif /* __FSL_DPNI_H */
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index 37b804f8bd76..0fa18b00c49b 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -3,7 +3,8 @@ config FSL_ENETC
tristate "ENETC PF driver"
depends on PCI && PCI_MSI
select FSL_ENETC_MDIO
- select PHYLIB
+ select PHYLINK
+ select PCS_LYNX
select DIMLIB
help
This driver supports NXP ENETC gigabit ethernet controller PCIe
@@ -15,7 +16,7 @@ config FSL_ENETC
config FSL_ENETC_VF
tristate "ENETC VF driver"
depends on PCI && PCI_MSI
- select PHYLIB
+ select PHYLINK
select DIMLIB
help
This driver supports NXP ENETC gigabit ethernet controller PCIe
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index f78ca7b343d2..52be6e315752 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -4,7 +4,6 @@
#include "enetc.h"
#include <linux/tcp.h>
#include <linux/udp.h>
-#include <linux/of_mdio.h>
#include <linux/vmalloc.h>
/* ENETC overhead: optional extension BD + 1 BD gap */
@@ -1392,38 +1391,24 @@ static void enetc_clear_interrupts(struct enetc_ndev_priv *priv)
enetc_rxbdr_wr(&priv->si->hw, i, ENETC_RBIER, 0);
}
-static void adjust_link(struct net_device *ndev)
+static int enetc_phylink_connect(struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct phy_device *phydev = ndev->phydev;
-
- if (priv->active_offloads & ENETC_F_QBV)
- enetc_sched_speed_set(ndev);
-
- phy_print_status(phydev);
-}
-
-static int enetc_phy_connect(struct net_device *ndev)
-{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct phy_device *phydev;
struct ethtool_eee edata;
+ int err;
- if (!priv->phy_node)
+ if (!priv->phylink)
return 0; /* phy-less mode */
- phydev = of_phy_connect(ndev, priv->phy_node, &adjust_link,
- 0, priv->if_mode);
- if (!phydev) {
+ err = phylink_of_phy_connect(priv->phylink, priv->dev->of_node, 0);
+ if (err) {
dev_err(&ndev->dev, "could not attach to PHY\n");
- return -ENODEV;
+ return err;
}
- phy_attached_info(phydev);
-
/* disable EEE autoneg, until ENETC driver supports it */
memset(&edata, 0, sizeof(struct ethtool_eee));
- phy_ethtool_set_eee(phydev, &edata);
+ phylink_ethtool_set_eee(priv->phylink, &edata);
return 0;
}
@@ -1443,8 +1428,8 @@ void enetc_start(struct net_device *ndev)
enable_irq(irq);
}
- if (ndev->phydev)
- phy_start(ndev->phydev);
+ if (priv->phylink)
+ phylink_start(priv->phylink);
else
netif_carrier_on(ndev);
@@ -1460,7 +1445,7 @@ int enetc_open(struct net_device *ndev)
if (err)
return err;
- err = enetc_phy_connect(ndev);
+ err = enetc_phylink_connect(ndev);
if (err)
goto err_phy_connect;
@@ -1490,8 +1475,8 @@ err_set_queues:
err_alloc_rx:
enetc_free_tx_resources(priv);
err_alloc_tx:
- if (ndev->phydev)
- phy_disconnect(ndev->phydev);
+ if (priv->phylink)
+ phylink_disconnect_phy(priv->phylink);
err_phy_connect:
enetc_free_irqs(priv);
@@ -1514,8 +1499,8 @@ void enetc_stop(struct net_device *ndev)
napi_disable(&priv->int_vector[i]->napi);
}
- if (ndev->phydev)
- phy_stop(ndev->phydev);
+ if (priv->phylink)
+ phylink_stop(priv->phylink);
else
netif_carrier_off(ndev);
@@ -1529,8 +1514,8 @@ int enetc_close(struct net_device *ndev)
enetc_stop(ndev);
enetc_clear_bdrs(priv);
- if (ndev->phydev)
- phy_disconnect(ndev->phydev);
+ if (priv->phylink)
+ phylink_disconnect_phy(priv->phylink);
enetc_free_rxtx_rings(priv);
enetc_free_rx_resources(priv);
enetc_free_tx_resources(priv);
@@ -1780,6 +1765,7 @@ static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr)
int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
#ifdef CONFIG_FSL_ENETC_PTP_CLOCK
if (cmd == SIOCSHWTSTAMP)
return enetc_hwtstamp_set(ndev, rq);
@@ -1787,9 +1773,10 @@ int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
return enetc_hwtstamp_get(ndev, rq);
#endif
- if (!ndev->phydev)
+ if (!priv->phylink)
return -EOPNOTSUPP;
- return phy_mii_ioctl(ndev->phydev, rq, cmd);
+
+ return phylink_mii_ioctl(priv->phylink, rq, cmd);
}
int enetc_alloc_msix(struct enetc_ndev_priv *priv)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index d309803cfeb6..dd0fb0c066d7 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -9,7 +9,7 @@
#include <linux/skbuff.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
-#include <linux/phy.h>
+#include <linux/phylink.h>
#include <linux/dim.h>
#include "enetc_hw.h"
@@ -264,8 +264,7 @@ struct enetc_ndev_priv {
struct psfp_cap psfp_cap;
- struct device_node *phy_node;
- phy_interface_t if_mode;
+ struct phylink *phylink;
int ic_mode;
u32 tx_ictt;
};
@@ -323,7 +322,7 @@ int enetc_send_cmd(struct enetc_si *si, struct enetc_cbd *cbd);
#ifdef CONFIG_FSL_ENETC_QOS
int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data);
-void enetc_sched_speed_set(struct net_device *ndev);
+void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed);
int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data);
int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data);
int enetc_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
@@ -388,7 +387,7 @@ static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
#else
#define enetc_setup_tc_taprio(ndev, type_data) -EOPNOTSUPP
-#define enetc_sched_speed_set(ndev) (void)0
+#define enetc_sched_speed_set(priv, speed) (void)0
#define enetc_setup_tc_cbs(ndev, type_data) -EOPNOTSUPP
#define enetc_setup_tc_txtime(ndev, type_data) -EOPNOTSUPP
#define enetc_setup_tc_psfp(ndev, type_data) -EOPNOTSUPP
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index 1dab83fbca77..8ed1ebd5a183 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -686,6 +686,28 @@ static int enetc_set_wol(struct net_device *dev,
return ret;
}
+static int enetc_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(dev);
+
+ if (!priv->phylink)
+ return -EOPNOTSUPP;
+
+ return phylink_ethtool_ksettings_get(priv->phylink, cmd);
+}
+
+static int enetc_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(dev);
+
+ if (!priv->phylink)
+ return -EOPNOTSUPP;
+
+ return phylink_ethtool_ksettings_set(priv->phylink, cmd);
+}
+
static const struct ethtool_ops enetc_pf_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
@@ -704,8 +726,8 @@ static const struct ethtool_ops enetc_pf_ethtool_ops = {
.get_ringparam = enetc_get_ringparam,
.get_coalesce = enetc_get_coalesce,
.set_coalesce = enetc_set_coalesce,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_link_ksettings = enetc_get_link_ksettings,
+ .set_link_ksettings = enetc_set_link_ksettings,
.get_link = ethtool_op_get_link,
.get_ts_info = enetc_get_ts_info,
.get_wol = enetc_get_wol,
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 177334f0adb1..419306342ac5 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -482,8 +482,7 @@ static void enetc_port_si_configure(struct enetc_si *si)
enetc_port_wr(hw, ENETC_PSIVLANFMR, ENETC_PSIVLANFMR_VS);
}
-static void enetc_configure_port_mac(struct enetc_hw *hw,
- phy_interface_t phy_mode)
+static void enetc_configure_port_mac(struct enetc_hw *hw)
{
enetc_port_wr(hw, ENETC_PM0_MAXFRM,
ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE));
@@ -492,12 +491,14 @@ static void enetc_configure_port_mac(struct enetc_hw *hw,
enetc_port_wr(hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
enetc_port_wr(hw, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
- ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC |
- ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
+ ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC);
enetc_port_wr(hw, ENETC_PM1_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
- ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC |
- ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
+ ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC);
+}
+
+static void enetc_mac_config(struct enetc_hw *hw, phy_interface_t phy_mode)
+{
/* set auto-speed for RGMII */
if (enetc_port_rd(hw, ENETC_PM0_IF_MODE) & ENETC_PMO_IFM_RG ||
phy_interface_mode_is_rgmii(phy_mode))
@@ -507,6 +508,17 @@ static void enetc_configure_port_mac(struct enetc_hw *hw,
enetc_port_wr(hw, ENETC_PM0_IF_MODE, ENETC_PM0_IFM_XGMII);
}
+static void enetc_mac_enable(struct enetc_hw *hw, bool en)
+{
+ u32 val = enetc_port_rd(hw, ENETC_PM0_CMD_CFG);
+
+ val &= ~(ENETC_PM0_TX_EN | ENETC_PM0_RX_EN);
+ val |= en ? (ENETC_PM0_TX_EN | ENETC_PM0_RX_EN) : 0;
+
+ enetc_port_wr(hw, ENETC_PM0_CMD_CFG, val);
+ enetc_port_wr(hw, ENETC_PM1_CMD_CFG, val);
+}
+
static void enetc_configure_port_pmac(struct enetc_hw *hw)
{
u32 temp;
@@ -527,7 +539,7 @@ static void enetc_configure_port(struct enetc_pf *pf)
enetc_configure_port_pmac(hw);
- enetc_configure_port_mac(hw, pf->if_mode);
+ enetc_configure_port_mac(hw);
enetc_port_si_configure(pf->si);
@@ -733,11 +745,10 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr);
}
-static int enetc_mdio_probe(struct enetc_pf *pf)
+static int enetc_mdio_probe(struct enetc_pf *pf, struct device_node *np)
{
struct device *dev = &pf->si->pdev->dev;
struct enetc_mdio_priv *mdio_priv;
- struct device_node *np;
struct mii_bus *bus;
int err;
@@ -754,20 +765,12 @@ static int enetc_mdio_probe(struct enetc_pf *pf)
mdio_priv->mdio_base = ENETC_EMDIO_BASE;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
- np = of_get_child_by_name(dev->of_node, "mdio");
- if (!np) {
- dev_err(dev, "MDIO node missing\n");
- return -EINVAL;
- }
-
err = of_mdiobus_register(bus, np);
if (err) {
- of_node_put(np);
dev_err(dev, "cannot register MDIO bus\n");
return err;
}
- of_node_put(np);
pf->mdio = bus;
return 0;
@@ -779,69 +782,12 @@ static void enetc_mdio_remove(struct enetc_pf *pf)
mdiobus_unregister(pf->mdio);
}
-static int enetc_of_get_phy(struct enetc_pf *pf)
-{
- struct device *dev = &pf->si->pdev->dev;
- struct device_node *np = dev->of_node;
- struct device_node *mdio_np;
- int err;
-
- pf->phy_node = of_parse_phandle(np, "phy-handle", 0);
- if (!pf->phy_node) {
- if (!of_phy_is_fixed_link(np)) {
- dev_err(dev, "PHY not specified\n");
- return -ENODEV;
- }
-
- err = of_phy_register_fixed_link(np);
- if (err < 0) {
- dev_err(dev, "fixed link registration failed\n");
- return err;
- }
-
- pf->phy_node = of_node_get(np);
- }
-
- mdio_np = of_get_child_by_name(np, "mdio");
- if (mdio_np) {
- of_node_put(mdio_np);
- err = enetc_mdio_probe(pf);
- if (err) {
- of_node_put(pf->phy_node);
- return err;
- }
- }
-
- err = of_get_phy_mode(np, &pf->if_mode);
- if (err) {
- dev_err(dev, "missing phy type\n");
- of_node_put(pf->phy_node);
- if (of_phy_is_fixed_link(np))
- of_phy_deregister_fixed_link(np);
- else
- enetc_mdio_remove(pf);
-
- return -EINVAL;
- }
-
- return 0;
-}
-
-static void enetc_of_put_phy(struct enetc_pf *pf)
-{
- struct device_node *np = pf->si->pdev->dev.of_node;
-
- if (np && of_phy_is_fixed_link(np))
- of_phy_deregister_fixed_link(np);
- if (pf->phy_node)
- of_node_put(pf->phy_node);
-}
-
-static int enetc_imdio_init(struct enetc_pf *pf, bool is_c45)
+static int enetc_imdio_create(struct enetc_pf *pf)
{
struct device *dev = &pf->si->pdev->dev;
struct enetc_mdio_priv *mdio_priv;
- struct phy_device *pcs;
+ struct lynx_pcs *pcs_lynx;
+ struct mdio_device *pcs;
struct mii_bus *bus;
int err;
@@ -865,15 +811,23 @@ static int enetc_imdio_init(struct enetc_pf *pf, bool is_c45)
goto free_mdio_bus;
}
- pcs = get_phy_device(bus, 0, is_c45);
+ pcs = mdio_device_create(bus, 0);
if (IS_ERR(pcs)) {
err = PTR_ERR(pcs);
- dev_err(dev, "cannot get internal PCS PHY (%d)\n", err);
+ dev_err(dev, "cannot create pcs (%d)\n", err);
+ goto unregister_mdiobus;
+ }
+
+ pcs_lynx = lynx_pcs_create(pcs);
+ if (!pcs_lynx) {
+ mdio_device_free(pcs);
+ err = -ENOMEM;
+ dev_err(dev, "cannot create lynx pcs (%d)\n", err);
goto unregister_mdiobus;
}
pf->imdio = bus;
- pf->pcs = pcs;
+ pf->pcs = pcs_lynx;
return 0;
@@ -886,91 +840,168 @@ free_mdio_bus:
static void enetc_imdio_remove(struct enetc_pf *pf)
{
- if (pf->pcs)
- put_device(&pf->pcs->mdio.dev);
+ if (pf->pcs) {
+ mdio_device_free(pf->pcs->mdio);
+ lynx_pcs_destroy(pf->pcs);
+ }
if (pf->imdio) {
mdiobus_unregister(pf->imdio);
mdiobus_free(pf->imdio);
}
}
-static void enetc_configure_sgmii(struct phy_device *pcs)
+static bool enetc_port_has_pcs(struct enetc_pf *pf)
+{
+ return (pf->if_mode == PHY_INTERFACE_MODE_SGMII ||
+ pf->if_mode == PHY_INTERFACE_MODE_2500BASEX ||
+ pf->if_mode == PHY_INTERFACE_MODE_USXGMII);
+}
+
+static int enetc_mdiobus_create(struct enetc_pf *pf)
+{
+ struct device *dev = &pf->si->pdev->dev;
+ struct device_node *mdio_np;
+ int err;
+
+ mdio_np = of_get_child_by_name(dev->of_node, "mdio");
+ if (mdio_np) {
+ err = enetc_mdio_probe(pf, mdio_np);
+
+ of_node_put(mdio_np);
+ if (err)
+ return err;
+ }
+
+ if (enetc_port_has_pcs(pf)) {
+ err = enetc_imdio_create(pf);
+ if (err) {
+ enetc_mdio_remove(pf);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void enetc_mdiobus_destroy(struct enetc_pf *pf)
{
- /* SGMII spec requires tx_config_Reg[15:0] to be exactly 0x4001
- * for the MAC PCS in order to acknowledge the AN.
- */
- phy_write(pcs, MII_ADVERTISE, ADVERTISE_SGMII | ADVERTISE_LPACK);
+ enetc_mdio_remove(pf);
+ enetc_imdio_remove(pf);
+}
- phy_write(pcs, ENETC_PCS_IF_MODE,
- ENETC_PCS_IF_MODE_SGMII_EN |
- ENETC_PCS_IF_MODE_USE_SGMII_AN);
+static void enetc_pl_mac_validate(struct phylink_config *config,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ state->interface != PHY_INTERFACE_MODE_INTERNAL &&
+ state->interface != PHY_INTERFACE_MODE_SGMII &&
+ state->interface != PHY_INTERFACE_MODE_2500BASEX &&
+ state->interface != PHY_INTERFACE_MODE_USXGMII &&
+ !phy_interface_mode_is_rgmii(state->interface)) {
+ bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ return;
+ }
- /* Adjust link timer for SGMII */
- phy_write(pcs, ENETC_PCS_LINK_TIMER1, ENETC_PCS_LINK_TIMER1_VAL);
- phy_write(pcs, ENETC_PCS_LINK_TIMER2, ENETC_PCS_LINK_TIMER2_VAL);
+ phylink_set_port_modes(mask);
+ phylink_set(mask, Autoneg);
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 1000baseT_Half);
+ phylink_set(mask, 1000baseT_Full);
+
+ if (state->interface == PHY_INTERFACE_MODE_INTERNAL ||
+ state->interface == PHY_INTERFACE_MODE_2500BASEX ||
+ state->interface == PHY_INTERFACE_MODE_USXGMII) {
+ phylink_set(mask, 2500baseT_Full);
+ phylink_set(mask, 2500baseX_Full);
+ }
- phy_write(pcs, MII_BMCR, BMCR_ANRESTART | BMCR_ANENABLE);
+ bitmap_and(supported, supported, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
+ bitmap_and(state->advertising, state->advertising, mask,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static void enetc_configure_2500basex(struct phy_device *pcs)
+static void enetc_pl_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state)
{
- phy_write(pcs, ENETC_PCS_IF_MODE,
- ENETC_PCS_IF_MODE_SGMII_EN |
- ENETC_PCS_IF_MODE_SGMII_SPEED(ENETC_PCS_SPEED_2500));
+ struct enetc_pf *pf = phylink_to_enetc_pf(config);
+ struct enetc_ndev_priv *priv;
+
+ enetc_mac_config(&pf->si->hw, state->interface);
- phy_write(pcs, MII_BMCR, BMCR_SPEED1000 | BMCR_FULLDPLX | BMCR_RESET);
+ priv = netdev_priv(pf->si->ndev);
+ if (pf->pcs)
+ phylink_set_pcs(priv->phylink, &pf->pcs->pcs);
}
-static void enetc_configure_usxgmii(struct phy_device *pcs)
+static void enetc_pl_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy, unsigned int mode,
+ phy_interface_t interface, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
{
- /* Configure device ability for the USXGMII Replicator */
- phy_write_mmd(pcs, MDIO_MMD_VEND2, MII_ADVERTISE,
- ADVERTISE_SGMII | ADVERTISE_LPACK |
- MDIO_USXGMII_FULL_DUPLEX);
-
- /* Restart PCS AN */
- phy_write_mmd(pcs, MDIO_MMD_VEND2, MII_BMCR,
- BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART);
+ struct enetc_pf *pf = phylink_to_enetc_pf(config);
+ struct enetc_ndev_priv *priv;
+
+ priv = netdev_priv(pf->si->ndev);
+ if (priv->active_offloads & ENETC_F_QBV)
+ enetc_sched_speed_set(priv, speed);
+
+ enetc_mac_enable(&pf->si->hw, true);
}
-static int enetc_configure_serdes(struct enetc_ndev_priv *priv)
+static void enetc_pl_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct enetc_pf *pf = phylink_to_enetc_pf(config);
+
+ enetc_mac_enable(&pf->si->hw, false);
+}
+
+static const struct phylink_mac_ops enetc_mac_phylink_ops = {
+ .validate = enetc_pl_mac_validate,
+ .mac_config = enetc_pl_mac_config,
+ .mac_link_up = enetc_pl_mac_link_up,
+ .mac_link_down = enetc_pl_mac_link_down,
+};
+
+static int enetc_phylink_create(struct enetc_ndev_priv *priv)
{
- bool is_c45 = priv->if_mode == PHY_INTERFACE_MODE_USXGMII;
struct enetc_pf *pf = enetc_si_priv(priv->si);
+ struct device *dev = &pf->si->pdev->dev;
+ struct phylink *phylink;
int err;
- if (priv->if_mode != PHY_INTERFACE_MODE_SGMII &&
- priv->if_mode != PHY_INTERFACE_MODE_2500BASEX &&
- priv->if_mode != PHY_INTERFACE_MODE_USXGMII)
- return 0;
+ pf->phylink_config.dev = &priv->ndev->dev;
+ pf->phylink_config.type = PHYLINK_NETDEV;
- err = enetc_imdio_init(pf, is_c45);
- if (err)
+ phylink = phylink_create(&pf->phylink_config,
+ of_fwnode_handle(dev->of_node),
+ pf->if_mode, &enetc_mac_phylink_ops);
+ if (IS_ERR(phylink)) {
+ err = PTR_ERR(phylink);
return err;
-
- switch (priv->if_mode) {
- case PHY_INTERFACE_MODE_SGMII:
- enetc_configure_sgmii(pf->pcs);
- break;
- case PHY_INTERFACE_MODE_2500BASEX:
- enetc_configure_2500basex(pf->pcs);
- break;
- case PHY_INTERFACE_MODE_USXGMII:
- enetc_configure_usxgmii(pf->pcs);
- break;
- default:
- dev_err(&pf->si->pdev->dev, "Unsupported link mode %s\n",
- phy_modes(priv->if_mode));
}
+ priv->phylink = phylink;
+
return 0;
}
-static void enetc_teardown_serdes(struct enetc_ndev_priv *priv)
+static void enetc_phylink_destroy(struct enetc_ndev_priv *priv)
{
- struct enetc_pf *pf = enetc_si_priv(priv->si);
-
- enetc_imdio_remove(pf);
+ if (priv->phylink)
+ phylink_destroy(priv->phylink);
}
static int enetc_pf_probe(struct pci_dev *pdev,
@@ -1004,10 +1035,6 @@ static int enetc_pf_probe(struct pci_dev *pdev,
pf->si = si;
pf->total_vfs = pci_sriov_get_totalvfs(pdev);
- err = enetc_of_get_phy(pf);
- if (err)
- dev_warn(&pdev->dev, "Fallback to PHY-less operation\n");
-
enetc_configure_port(pf);
enetc_get_si_caps(si);
@@ -1022,8 +1049,6 @@ static int enetc_pf_probe(struct pci_dev *pdev,
enetc_pf_netdev_setup(si, ndev, &enetc_ndev_ops);
priv = netdev_priv(ndev);
- priv->phy_node = pf->phy_node;
- priv->if_mode = pf->if_mode;
enetc_init_si_rings_params(priv);
@@ -1039,20 +1064,27 @@ static int enetc_pf_probe(struct pci_dev *pdev,
goto err_alloc_msix;
}
- err = enetc_configure_serdes(priv);
- if (err)
- dev_warn(&pdev->dev, "Attempted SerDes config but failed\n");
+ if (!of_get_phy_mode(pdev->dev.of_node, &pf->if_mode)) {
+ err = enetc_mdiobus_create(pf);
+ if (err)
+ goto err_mdiobus_create;
+
+ err = enetc_phylink_create(priv);
+ if (err)
+ goto err_phylink_create;
+ }
err = register_netdev(ndev);
if (err)
goto err_reg_netdev;
- netif_carrier_off(ndev);
-
return 0;
err_reg_netdev:
- enetc_teardown_serdes(priv);
+ enetc_phylink_destroy(priv);
+err_phylink_create:
+ enetc_mdiobus_destroy(pf);
+err_mdiobus_create:
enetc_free_msix(priv);
err_alloc_msix:
enetc_free_si_resources(priv);
@@ -1060,8 +1092,6 @@ err_alloc_si_res:
si->ndev = NULL;
free_netdev(ndev);
err_alloc_netdev:
- enetc_mdio_remove(pf);
- enetc_of_put_phy(pf);
err_map_pf_space:
enetc_pci_remove(pdev);
@@ -1074,16 +1104,15 @@ static void enetc_pf_remove(struct pci_dev *pdev)
struct enetc_pf *pf = enetc_si_priv(si);
struct enetc_ndev_priv *priv;
+ priv = netdev_priv(si->ndev);
+ enetc_phylink_destroy(priv);
+ enetc_mdiobus_destroy(pf);
+
if (pf->num_vfs)
enetc_sriov_configure(pdev, 0);
- priv = netdev_priv(si->ndev);
unregister_netdev(si->ndev);
- enetc_teardown_serdes(priv);
- enetc_mdio_remove(pf);
- enetc_of_put_phy(pf);
-
enetc_free_msix(priv);
enetc_free_si_resources(priv);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.h b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
index 0d0ee91282a5..263946c51e37 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.h
@@ -2,6 +2,7 @@
/* Copyright 2017-2019 NXP */
#include "enetc.h"
+#include <linux/pcs-lynx.h>
#define ENETC_PF_NUM_RINGS 8
@@ -45,12 +46,15 @@ struct enetc_pf {
struct mii_bus *mdio; /* saved for cleanup */
struct mii_bus *imdio;
- struct phy_device *pcs;
+ struct lynx_pcs *pcs;
- struct device_node *phy_node;
phy_interface_t if_mode;
+ struct phylink_config phylink_config;
};
+#define phylink_to_enetc_pf(config) \
+ container_of((config), struct enetc_pf, phylink_config)
+
int enetc_msg_psi_init(struct enetc_pf *pf);
void enetc_msg_psi_free(struct enetc_pf *pf);
void enetc_msg_handle_rxmsg(struct enetc_pf *pf, int mbox_id, u16 *status);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index 1c4a535890da..c81be32bcedf 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -15,17 +15,14 @@ static u16 enetc_get_max_gcl_len(struct enetc_hw *hw)
& ENETC_QBV_MAX_GCL_LEN_MASK;
}
-void enetc_sched_speed_set(struct net_device *ndev)
+void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed)
{
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- struct phy_device *phydev = ndev->phydev;
u32 old_speed = priv->speed;
- u32 speed, pspeed;
+ u32 pspeed;
- if (phydev->speed == old_speed)
+ if (speed == old_speed)
return;
- speed = phydev->speed;
switch (speed) {
case SPEED_1000:
pspeed = ENETC_PMR_PSPEED_1000M;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index f14576212a0e..7b5c82c7e4e5 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -78,16 +78,11 @@ static int enetc_vf_set_mac_addr(struct net_device *ndev, void *addr)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct sockaddr *saddr = addr;
- int err;
if (!is_valid_ether_addr(saddr->sa_data))
return -EADDRNOTAVAIL;
- err = enetc_msg_vsi_set_primary_mac_addr(priv, saddr);
- if (err)
- return err;
-
- return 0;
+ return enetc_msg_vsi_set_primary_mac_addr(priv, saddr);
}
static int enetc_vf_set_features(struct net_device *ndev,
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index fb37816a74db..8f7eca1e7716 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1912,6 +1912,27 @@ out:
return ret;
}
+static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ struct phy_device *phy_dev = ndev->phydev;
+
+ if (phy_dev) {
+ phy_reset_after_clk_enable(phy_dev);
+ } else if (fep->phy_node) {
+ /*
+ * If the PHY still is not bound to the MAC, but there is
+ * OF PHY node and a matching PHY device instance already,
+ * use the OF PHY node to obtain the PHY device instance,
+ * and then use that PHY device instance when triggering
+ * the PHY reset.
+ */
+ phy_dev = of_phy_find_device(fep->phy_node);
+ phy_reset_after_clk_enable(phy_dev);
+ put_device(&phy_dev->mdio.dev);
+ }
+}
+
static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
{
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -1938,7 +1959,7 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
if (ret)
goto failed_clk_ref;
- phy_reset_after_clk_enable(ndev->phydev);
+ fec_enet_phy_reset_after_clk_enable(ndev);
} else {
clk_disable_unprepare(fep->clk_enet_out);
if (fep->clk_ptp) {
@@ -1960,8 +1981,7 @@ failed_clk_ref:
mutex_unlock(&fep->ptp_clk_mutex);
}
failed_clk_ptp:
- if (fep->clk_enet_out)
- clk_disable_unprepare(fep->clk_enet_out);
+ clk_disable_unprepare(fep->clk_enet_out);
return ret;
}
@@ -2984,16 +3004,16 @@ fec_enet_open(struct net_device *ndev)
/* Init MAC prior to mii bus probe */
fec_restart(ndev);
- /* Probe and connect to PHY when open the interface */
- ret = fec_enet_mii_probe(ndev);
- if (ret)
- goto err_enet_mii_probe;
-
/* Call phy_reset_after_clk_enable() again if it failed during
* phy_reset_after_clk_enable() before because the PHY wasn't probed.
*/
if (reset_again)
- phy_reset_after_clk_enable(ndev->phydev);
+ fec_enet_phy_reset_after_clk_enable(ndev);
+
+ /* Probe and connect to PHY when open the interface */
+ ret = fec_enet_mii_probe(ndev);
+ if (ret)
+ goto err_enet_mii_probe;
if (fep->quirks & FEC_QUIRK_ERR006687)
imx6q_cpuidle_fec_irqs_used();
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 7a3f066e611d..b3bad429e03b 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -74,7 +74,7 @@ struct mpc52xx_fec_priv {
static irqreturn_t mpc52xx_fec_interrupt(int, void *);
static irqreturn_t mpc52xx_fec_rx_interrupt(int, void *);
static irqreturn_t mpc52xx_fec_tx_interrupt(int, void *);
-static void mpc52xx_fec_stop(struct net_device *dev);
+static void mpc52xx_fec_stop(struct net_device *dev, bool may_sleep);
static void mpc52xx_fec_start(struct net_device *dev);
static void mpc52xx_fec_reset(struct net_device *dev);
@@ -283,7 +283,7 @@ static int mpc52xx_fec_close(struct net_device *dev)
netif_stop_queue(dev);
- mpc52xx_fec_stop(dev);
+ mpc52xx_fec_stop(dev, true);
mpc52xx_fec_free_rx_buffers(dev, priv->rx_dmatsk);
@@ -693,7 +693,7 @@ static void mpc52xx_fec_start(struct net_device *dev)
*
* stop all activity on fec and empty dma buffers
*/
-static void mpc52xx_fec_stop(struct net_device *dev)
+static void mpc52xx_fec_stop(struct net_device *dev, bool may_sleep)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
struct mpc52xx_fec __iomem *fec = priv->fec;
@@ -706,7 +706,7 @@ static void mpc52xx_fec_stop(struct net_device *dev)
bcom_disable(priv->rx_dmatsk);
/* Wait for tx queue to drain, but only if we're in process context */
- if (!in_interrupt()) {
+ if (may_sleep) {
timeout = jiffies + msecs_to_jiffies(2000);
while (time_before(jiffies, timeout) &&
!bcom_queue_empty(priv->tx_dmatsk))
@@ -738,7 +738,7 @@ static void mpc52xx_fec_reset(struct net_device *dev)
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
struct mpc52xx_fec __iomem *fec = priv->fec;
- mpc52xx_fec_stop(dev);
+ mpc52xx_fec_stop(dev, false);
out_be32(&fec->rfifo_status, in_be32(&fec->rfifo_status));
out_be32(&fec->reset_cntrl, FEC_RESET_CNTRL_RESET_FIFO);
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index a0c1f4410306..2e344aada4c6 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -512,7 +512,7 @@ int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr)
-EFAULT : 0;
}
-/**
+/*
* fec_time_keep - call timecounter_read every second to avoid timer overrun
* because ENET just support 32bit counter, will timeout in 4s
*/
@@ -520,13 +520,12 @@ static void fec_time_keep(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep);
- u64 ns;
unsigned long flags;
mutex_lock(&fep->ptp_clk_mutex);
if (fep->ptp_clk_on) {
spin_lock_irqsave(&fep->tmreg_lock, flags);
- ns = timecounter_read(&fep->tc);
+ timecounter_read(&fep->tc);
spin_unlock_irqrestore(&fep->tmreg_lock, flags);
}
mutex_unlock(&fep->ptp_clk_mutex);
@@ -567,7 +566,8 @@ static irqreturn_t fec_pps_interrupt(int irq, void *dev_id)
/**
* fec_ptp_init
- * @ndev: The FEC network adapter
+ * @pdev: The FEC network adapter
+ * @irq_idx: the interrupt index
*
* This function performs the required steps for enabling ptp
* support. If ptp support has already been loaded it simply calls the
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index ef67e8599b39..ce0a121580f6 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -2063,11 +2063,11 @@ static int fman_set_exception(struct fman *fman,
/**
* fman_register_intr
* @fman: A Pointer to FMan device
- * @mod: Calling module
+ * @module: Calling module
* @mod_id: Module id (if more than 1 exists, '0' if not)
* @intr_type: Interrupt type (error/normal) selection.
- * @f_isr: The interrupt service routine.
- * @h_src_arg: Argument to be passed to f_isr.
+ * @isr_cb: The interrupt service routine.
+ * @src_arg: Argument to be passed to isr_cb.
*
* Used to register an event handler to be processed by FMan
*
@@ -2091,7 +2091,7 @@ EXPORT_SYMBOL(fman_register_intr);
/**
* fman_unregister_intr
* @fman: A Pointer to FMan device
- * @mod: Calling module
+ * @module: Calling module
* @mod_id: Module id (if more than 1 exists, '0' if not)
* @intr_type: Interrupt type (error/normal) selection.
*
@@ -2342,8 +2342,8 @@ EXPORT_SYMBOL(fman_get_bmi_max_fifo_size);
/**
* fman_get_revision
- * @fman - Pointer to the FMan module
- * @rev_info - A structure of revision information parameters.
+ * @fman: - Pointer to the FMan module
+ * @rev_info: - A structure of revision information parameters.
*
* Returns the FM revision
*
@@ -2508,7 +2508,7 @@ EXPORT_SYMBOL(fman_get_rx_extra_headroom);
/**
* fman_bind
- * @dev: FMan OF device pointer
+ * @fm_dev: FMan OF device pointer
*
* Bind to a specific FMan device.
*
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.c b/drivers/net/ethernet/freescale/fman/fman_muram.c
index 5ec94d243da0..7ad317e622bc 100644
--- a/drivers/net/ethernet/freescale/fman/fman_muram.c
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.c
@@ -144,9 +144,9 @@ unsigned long fman_muram_alloc(struct muram_info *muram, size_t size)
/**
* fman_muram_free_mem
- * muram: FM-MURAM module pointer.
- * offset: offset of the memory region to be freed.
- * size: size of the memory to be freed.
+ * @muram: FM-MURAM module pointer.
+ * @offset: offset of the memory region to be freed.
+ * @size: size of the memory to be freed.
*
* Free an allocated memory from FM-MURAM partition.
*/
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index 624b2eb6f01d..d9baac0dbc7d 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -1410,9 +1410,11 @@ err_port_cfg:
}
EXPORT_SYMBOL(fman_port_config);
-/**
+/*
* fman_port_use_kg_hash
- * port: A pointer to a FM Port module.
+ * @port: A pointer to a FM Port module.
+ * @enable: enable or disable
+ *
* Sets the HW KeyGen or the BMI as HW Parser next engine, enabling
* or bypassing the KeyGen hashing of Rx traffic
*/
@@ -1430,7 +1432,8 @@ EXPORT_SYMBOL(fman_port_use_kg_hash);
/**
* fman_port_init
- * port: A pointer to a FM Port module.
+ * @port: A pointer to a FM Port module.
+ *
* Initializes the FM PORT module by defining the software structure and
* configuring the hardware registers.
*
@@ -1524,8 +1527,8 @@ EXPORT_SYMBOL(fman_port_init);
/**
* fman_port_cfg_buf_prefix_content
- * @port A pointer to a FM Port module.
- * @buffer_prefix_content A structure of parameters describing
+ * @port: A pointer to a FM Port module.
+ * @buffer_prefix_content: A structure of parameters describing
* the structure of the buffer.
* Out parameter:
* Start margin - offset of data from
@@ -1570,7 +1573,7 @@ EXPORT_SYMBOL(fman_port_cfg_buf_prefix_content);
/**
* fman_port_disable
- * port: A pointer to a FM Port module.
+ * @port: A pointer to a FM Port module.
*
* Gracefully disable an FM port. The port will not start new tasks after all
* tasks associated with the port are terminated.
@@ -1651,7 +1654,7 @@ EXPORT_SYMBOL(fman_port_disable);
/**
* fman_port_enable
- * port: A pointer to a FM Port module.
+ * @port: A pointer to a FM Port module.
*
* A runtime routine provided to allow disable/enable of port.
*
@@ -1697,7 +1700,7 @@ EXPORT_SYMBOL(fman_port_enable);
/**
* fman_port_bind
- * dev: FMan Port OF device pointer
+ * @dev: FMan Port OF device pointer
*
* Bind to a specific FMan Port.
*
@@ -1713,7 +1716,7 @@ EXPORT_SYMBOL(fman_port_bind);
/**
* fman_port_get_qman_channel_id
- * port: Pointer to the FMan port devuce
+ * @port: Pointer to the FMan port devuce
*
* Get the QMan channel ID for the specific port
*
@@ -1727,7 +1730,7 @@ EXPORT_SYMBOL(fman_port_get_qman_channel_id);
/**
* fman_port_get_device
- * port: Pointer to the FMan port device
+ * @port: Pointer to the FMan port device
*
* Get the 'struct device' associated to the specified FMan port device
*
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 43427c5b9396..901749a7a318 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -359,8 +359,8 @@ EXPORT_SYMBOL(fman_set_mac_active_pause);
/**
* fman_get_pause_cfg
* @mac_dev: A pointer to the MAC device
- * @rx: Return value for RX setting
- * @tx: Return value for TX setting
+ * @rx_pause: Return value for RX setting
+ * @tx_pause: Return value for TX setting
*
* Determine the MAC RX/TX PAUSE frames settings based on PHY
* autonegotiation or values set by eththool.
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index bf846b42bc74..78e008b81374 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -562,10 +562,13 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
BD_ENET_TX_TC);
CBDS_SC(bdp, BD_ENET_TX_READY);
- if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
- bdp++, curidx++;
- else
- bdp = fep->tx_bd_base, curidx = 0;
+ if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) {
+ bdp++;
+ curidx++;
+ } else {
+ bdp = fep->tx_bd_base;
+ curidx = 0;
+ }
len = skb_frag_size(frag);
CBDW_BUFADDR(bdp, skb_frag_dma_map(fep->dev, frag, 0, len,
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index ebc37e256922..f5c80229ea96 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -27,6 +27,17 @@
/* 1 for management, 1 for rx, 1 for tx */
#define GVE_MIN_MSIX 3
+/* Numbers of gve tx/rx stats in stats report. */
+#define GVE_TX_STATS_REPORT_NUM 5
+#define GVE_RX_STATS_REPORT_NUM 2
+
+/* Interval to schedule a stats report update, 20000ms. */
+#define GVE_STATS_REPORT_TIMER_PERIOD 20000
+
+/* Numbers of NIC tx/rx stats in stats report. */
+#define NIC_TX_STATS_REPORT_NUM 0
+#define NIC_RX_STATS_REPORT_NUM 4
+
/* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
struct gve_rx_desc_queue {
struct gve_rx_desc *desc_ring; /* the descriptor ring */
@@ -71,6 +82,11 @@ struct gve_rx_ring {
u32 cnt; /* free-running total number of completed packets */
u32 fill_cnt; /* free-running total number of descs and buffs posted */
u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
+ u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
+ u64 rx_copied_pkt; /* free-running total number of copied packets */
+ u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
+ u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
+ u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
u32 q_num; /* queue index */
u32 ntfy_id; /* notification block index */
struct gve_queue_resources *q_resources; /* head and tail pointer idx */
@@ -202,24 +218,63 @@ struct gve_priv {
dma_addr_t adminq_bus_addr;
u32 adminq_mask; /* masks prod_cnt to adminq size */
u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
-
+ u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
+ u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */
+ /* free-running count of per AQ cmd executed */
+ u32 adminq_describe_device_cnt;
+ u32 adminq_cfg_device_resources_cnt;
+ u32 adminq_register_page_list_cnt;
+ u32 adminq_unregister_page_list_cnt;
+ u32 adminq_create_tx_queue_cnt;
+ u32 adminq_create_rx_queue_cnt;
+ u32 adminq_destroy_tx_queue_cnt;
+ u32 adminq_destroy_rx_queue_cnt;
+ u32 adminq_dcfg_device_resources_cnt;
+ u32 adminq_set_driver_parameter_cnt;
+ u32 adminq_report_stats_cnt;
+ u32 adminq_report_link_speed_cnt;
+
+ /* Global stats */
+ u32 interface_up_cnt; /* count of times interface turned up since last reset */
+ u32 interface_down_cnt; /* count of times interface turned down since last reset */
+ u32 reset_cnt; /* count of reset */
+ u32 page_alloc_fail; /* count of page alloc fails */
+ u32 dma_mapping_error; /* count of dma mapping errors */
+ u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */
struct workqueue_struct *gve_wq;
struct work_struct service_task;
+ struct work_struct stats_report_task;
unsigned long service_task_flags;
unsigned long state_flags;
+
+ struct gve_stats_report *stats_report;
+ u64 stats_report_len;
+ dma_addr_t stats_report_bus; /* dma address for the stats report */
+ unsigned long ethtool_flags;
+
+ unsigned long stats_report_timer_period;
+ struct timer_list stats_report_timer;
+
+ /* Gvnic device link speed from hypervisor. */
+ u64 link_speed;
};
-enum gve_service_task_flags {
- GVE_PRIV_FLAGS_DO_RESET = BIT(1),
- GVE_PRIV_FLAGS_RESET_IN_PROGRESS = BIT(2),
- GVE_PRIV_FLAGS_PROBE_IN_PROGRESS = BIT(3),
+enum gve_service_task_flags_bit {
+ GVE_PRIV_FLAGS_DO_RESET = 1,
+ GVE_PRIV_FLAGS_RESET_IN_PROGRESS = 2,
+ GVE_PRIV_FLAGS_PROBE_IN_PROGRESS = 3,
+ GVE_PRIV_FLAGS_DO_REPORT_STATS = 4,
};
-enum gve_state_flags {
- GVE_PRIV_FLAGS_ADMIN_QUEUE_OK = BIT(1),
- GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK = BIT(2),
- GVE_PRIV_FLAGS_DEVICE_RINGS_OK = BIT(3),
- GVE_PRIV_FLAGS_NAPI_ENABLED = BIT(4),
+enum gve_state_flags_bit {
+ GVE_PRIV_FLAGS_ADMIN_QUEUE_OK = 1,
+ GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK = 2,
+ GVE_PRIV_FLAGS_DEVICE_RINGS_OK = 3,
+ GVE_PRIV_FLAGS_NAPI_ENABLED = 4,
+};
+
+enum gve_ethtool_flags_bit {
+ GVE_PRIV_FLAGS_REPORT_STATS = 0,
};
static inline bool gve_get_do_reset(struct gve_priv *priv)
@@ -269,6 +324,22 @@ static inline void gve_clear_probe_in_progress(struct gve_priv *priv)
clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
}
+static inline bool gve_get_do_report_stats(struct gve_priv *priv)
+{
+ return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS,
+ &priv->service_task_flags);
+}
+
+static inline void gve_set_do_report_stats(struct gve_priv *priv)
+{
+ set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
+}
+
+static inline void gve_clear_do_report_stats(struct gve_priv *priv)
+{
+ clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
+}
+
static inline bool gve_get_admin_queue_ok(struct gve_priv *priv)
{
return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
@@ -329,6 +400,16 @@ static inline void gve_clear_napi_enabled(struct gve_priv *priv)
clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
}
+static inline bool gve_get_report_stats(struct gve_priv *priv)
+{
+ return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
+}
+
+static inline void gve_clear_report_stats(struct gve_priv *priv)
+{
+ clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
+}
+
/* Returns the address of the ntfy_blocks irq doorbell
*/
static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
@@ -426,7 +507,8 @@ static inline bool gve_can_recycle_pages(struct net_device *dev)
}
/* buffers */
-int gve_alloc_page(struct device *dev, struct page **page, dma_addr_t *dma,
+int gve_alloc_page(struct gve_priv *priv, struct device *dev,
+ struct page **page, dma_addr_t *dma,
enum dma_data_direction);
void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
enum dma_data_direction);
@@ -450,6 +532,8 @@ int gve_reset(struct gve_priv *priv, bool attempt_teardown);
int gve_adjust_queues(struct gve_priv *priv,
struct gve_queue_config new_rx_config,
struct gve_queue_config new_tx_config);
+/* report stats handling */
+void gve_handle_report_stats(struct gve_priv *priv);
/* exported by ethtool.c */
extern const struct ethtool_ops gve_ethtool_ops;
/* needed by ethtool */
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index c3ba7baf0107..24ae6a28a806 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -23,6 +23,20 @@ int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
priv->adminq_mask = (PAGE_SIZE / sizeof(union gve_adminq_command)) - 1;
priv->adminq_prod_cnt = 0;
+ priv->adminq_cmd_fail = 0;
+ priv->adminq_timeouts = 0;
+ priv->adminq_describe_device_cnt = 0;
+ priv->adminq_cfg_device_resources_cnt = 0;
+ priv->adminq_register_page_list_cnt = 0;
+ priv->adminq_unregister_page_list_cnt = 0;
+ priv->adminq_create_tx_queue_cnt = 0;
+ priv->adminq_create_rx_queue_cnt = 0;
+ priv->adminq_destroy_tx_queue_cnt = 0;
+ priv->adminq_destroy_rx_queue_cnt = 0;
+ priv->adminq_dcfg_device_resources_cnt = 0;
+ priv->adminq_set_driver_parameter_cnt = 0;
+ priv->adminq_report_stats_cnt = 0;
+ priv->adminq_report_link_speed_cnt = 0;
/* Setup Admin queue with the device */
iowrite32be(priv->adminq_bus_addr / PAGE_SIZE,
@@ -81,17 +95,18 @@ static bool gve_adminq_wait_for_cmd(struct gve_priv *priv, u32 prod_cnt)
return false;
}
-static int gve_adminq_parse_err(struct device *dev, u32 status)
+static int gve_adminq_parse_err(struct gve_priv *priv, u32 status)
{
if (status != GVE_ADMINQ_COMMAND_PASSED &&
- status != GVE_ADMINQ_COMMAND_UNSET)
- dev_err(dev, "AQ command failed with status %d\n", status);
-
+ status != GVE_ADMINQ_COMMAND_UNSET) {
+ dev_err(&priv->pdev->dev, "AQ command failed with status %d\n", status);
+ priv->adminq_cmd_fail++;
+ }
switch (status) {
case GVE_ADMINQ_COMMAND_PASSED:
return 0;
case GVE_ADMINQ_COMMAND_UNSET:
- dev_err(dev, "parse_aq_err: err and status both unset, this should not be possible.\n");
+ dev_err(&priv->pdev->dev, "parse_aq_err: err and status both unset, this should not be possible.\n");
return -EINVAL;
case GVE_ADMINQ_COMMAND_ERROR_ABORTED:
case GVE_ADMINQ_COMMAND_ERROR_CANCELLED:
@@ -116,36 +131,145 @@ static int gve_adminq_parse_err(struct device *dev, u32 status)
case GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED:
return -ENOTSUPP;
default:
- dev_err(dev, "parse_aq_err: unknown status code %d\n", status);
+ dev_err(&priv->pdev->dev, "parse_aq_err: unknown status code %d\n", status);
return -EINVAL;
}
}
+/* Flushes all AQ commands currently queued and waits for them to complete.
+ * If there are failures, it will return the first error.
+ */
+static int gve_adminq_kick_and_wait(struct gve_priv *priv)
+{
+ u32 tail, head;
+ int i;
+
+ tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
+ head = priv->adminq_prod_cnt;
+
+ gve_adminq_kick_cmd(priv, head);
+ if (!gve_adminq_wait_for_cmd(priv, head)) {
+ dev_err(&priv->pdev->dev, "AQ commands timed out, need to reset AQ\n");
+ priv->adminq_timeouts++;
+ return -ENOTRECOVERABLE;
+ }
+
+ for (i = tail; i < head; i++) {
+ union gve_adminq_command *cmd;
+ u32 status, err;
+
+ cmd = &priv->adminq[i & priv->adminq_mask];
+ status = be32_to_cpu(READ_ONCE(cmd->status));
+ err = gve_adminq_parse_err(priv, status);
+ if (err)
+ // Return the first error if we failed.
+ return err;
+ }
+
+ return 0;
+}
+
/* This function is not threadsafe - the caller is responsible for any
* necessary locks.
*/
-int gve_adminq_execute_cmd(struct gve_priv *priv,
- union gve_adminq_command *cmd_orig)
+static int gve_adminq_issue_cmd(struct gve_priv *priv,
+ union gve_adminq_command *cmd_orig)
{
union gve_adminq_command *cmd;
- u32 status = 0;
- u32 prod_cnt;
+ u32 opcode;
+ u32 tail;
+
+ tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
+
+ // Check if next command will overflow the buffer.
+ if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) == tail) {
+ int err;
+
+ // Flush existing commands to make room.
+ err = gve_adminq_kick_and_wait(priv);
+ if (err)
+ return err;
+
+ // Retry.
+ tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
+ if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) == tail) {
+ // This should never happen. We just flushed the
+ // command queue so there should be enough space.
+ return -ENOMEM;
+ }
+ }
cmd = &priv->adminq[priv->adminq_prod_cnt & priv->adminq_mask];
priv->adminq_prod_cnt++;
- prod_cnt = priv->adminq_prod_cnt;
memcpy(cmd, cmd_orig, sizeof(*cmd_orig));
-
- gve_adminq_kick_cmd(priv, prod_cnt);
- if (!gve_adminq_wait_for_cmd(priv, prod_cnt)) {
- dev_err(&priv->pdev->dev, "AQ command timed out, need to reset AQ\n");
- return -ENOTRECOVERABLE;
+ opcode = be32_to_cpu(READ_ONCE(cmd->opcode));
+
+ switch (opcode) {
+ case GVE_ADMINQ_DESCRIBE_DEVICE:
+ priv->adminq_describe_device_cnt++;
+ break;
+ case GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES:
+ priv->adminq_cfg_device_resources_cnt++;
+ break;
+ case GVE_ADMINQ_REGISTER_PAGE_LIST:
+ priv->adminq_register_page_list_cnt++;
+ break;
+ case GVE_ADMINQ_UNREGISTER_PAGE_LIST:
+ priv->adminq_unregister_page_list_cnt++;
+ break;
+ case GVE_ADMINQ_CREATE_TX_QUEUE:
+ priv->adminq_create_tx_queue_cnt++;
+ break;
+ case GVE_ADMINQ_CREATE_RX_QUEUE:
+ priv->adminq_create_rx_queue_cnt++;
+ break;
+ case GVE_ADMINQ_DESTROY_TX_QUEUE:
+ priv->adminq_destroy_tx_queue_cnt++;
+ break;
+ case GVE_ADMINQ_DESTROY_RX_QUEUE:
+ priv->adminq_destroy_rx_queue_cnt++;
+ break;
+ case GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES:
+ priv->adminq_dcfg_device_resources_cnt++;
+ break;
+ case GVE_ADMINQ_SET_DRIVER_PARAMETER:
+ priv->adminq_set_driver_parameter_cnt++;
+ break;
+ case GVE_ADMINQ_REPORT_STATS:
+ priv->adminq_report_stats_cnt++;
+ break;
+ case GVE_ADMINQ_REPORT_LINK_SPEED:
+ priv->adminq_report_link_speed_cnt++;
+ break;
+ default:
+ dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode);
}
- memcpy(cmd_orig, cmd, sizeof(*cmd));
- status = be32_to_cpu(READ_ONCE(cmd->status));
- return gve_adminq_parse_err(&priv->pdev->dev, status);
+ return 0;
+}
+
+/* This function is not threadsafe - the caller is responsible for any
+ * necessary locks.
+ * The caller is also responsible for making sure there are no commands
+ * waiting to be executed.
+ */
+static int gve_adminq_execute_cmd(struct gve_priv *priv, union gve_adminq_command *cmd_orig)
+{
+ u32 tail, head;
+ int err;
+
+ tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
+ head = priv->adminq_prod_cnt;
+ if (tail != head)
+ // This is not a valid path
+ return -EINVAL;
+
+ err = gve_adminq_issue_cmd(priv, cmd_orig);
+ if (err)
+ return err;
+
+ return gve_adminq_kick_and_wait(priv);
}
/* The device specifies that the management vector can either be the first irq
@@ -190,29 +314,50 @@ int gve_adminq_deconfigure_device_resources(struct gve_priv *priv)
return gve_adminq_execute_cmd(priv, &cmd);
}
-int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
+static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
{
struct gve_tx_ring *tx = &priv->tx[queue_index];
union gve_adminq_command cmd;
+ int err;
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_TX_QUEUE);
cmd.create_tx_queue = (struct gve_adminq_create_tx_queue) {
.queue_id = cpu_to_be32(queue_index),
.reserved = 0,
- .queue_resources_addr = cpu_to_be64(tx->q_resources_bus),
+ .queue_resources_addr =
+ cpu_to_be64(tx->q_resources_bus),
.tx_ring_addr = cpu_to_be64(tx->bus),
.queue_page_list_id = cpu_to_be32(tx->tx_fifo.qpl->id),
.ntfy_id = cpu_to_be32(tx->ntfy_id),
};
- return gve_adminq_execute_cmd(priv, &cmd);
+ err = gve_adminq_issue_cmd(priv, &cmd);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 num_queues)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < num_queues; i++) {
+ err = gve_adminq_create_tx_queue(priv, i);
+ if (err)
+ return err;
+ }
+
+ return gve_adminq_kick_and_wait(priv);
}
-int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
+static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
{
struct gve_rx_ring *rx = &priv->rx[queue_index];
union gve_adminq_command cmd;
+ int err;
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE);
@@ -227,12 +372,31 @@ int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
.queue_page_list_id = cpu_to_be32(rx->data.qpl->id),
};
- return gve_adminq_execute_cmd(priv, &cmd);
+ err = gve_adminq_issue_cmd(priv, &cmd);
+ if (err)
+ return err;
+
+ return 0;
}
-int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
+int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < num_queues; i++) {
+ err = gve_adminq_create_rx_queue(priv, i);
+ if (err)
+ return err;
+ }
+
+ return gve_adminq_kick_and_wait(priv);
+}
+
+static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
{
union gve_adminq_command cmd;
+ int err;
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_TX_QUEUE);
@@ -240,12 +404,31 @@ int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
.queue_id = cpu_to_be32(queue_index),
};
- return gve_adminq_execute_cmd(priv, &cmd);
+ err = gve_adminq_issue_cmd(priv, &cmd);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 num_queues)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < num_queues; i++) {
+ err = gve_adminq_destroy_tx_queue(priv, i);
+ if (err)
+ return err;
+ }
+
+ return gve_adminq_kick_and_wait(priv);
}
-int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_index)
+static int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_index)
{
union gve_adminq_command cmd;
+ int err;
memset(&cmd, 0, sizeof(cmd));
cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_RX_QUEUE);
@@ -253,7 +436,25 @@ int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_index)
.queue_id = cpu_to_be32(queue_index),
};
- return gve_adminq_execute_cmd(priv, &cmd);
+ err = gve_adminq_issue_cmd(priv, &cmd);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < num_queues; i++) {
+ err = gve_adminq_destroy_rx_queue(priv, i);
+ if (err)
+ return err;
+ }
+
+ return gve_adminq_kick_and_wait(priv);
}
int gve_adminq_describe_device(struct gve_priv *priv)
@@ -283,8 +484,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
if (priv->tx_desc_cnt * sizeof(priv->tx->desc[0]) < PAGE_SIZE) {
- netif_err(priv, drv, priv->dev, "Tx desc count %d too low\n",
- priv->tx_desc_cnt);
+ dev_err(&priv->pdev->dev, "Tx desc count %d too low\n", priv->tx_desc_cnt);
err = -EINVAL;
goto free_device_descriptor;
}
@@ -293,8 +493,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
< PAGE_SIZE ||
priv->rx_desc_cnt * sizeof(priv->rx->data.data_ring[0])
< PAGE_SIZE) {
- netif_err(priv, drv, priv->dev, "Rx desc count %d too low\n",
- priv->rx_desc_cnt);
+ dev_err(&priv->pdev->dev, "Rx desc count %d too low\n", priv->rx_desc_cnt);
err = -EINVAL;
goto free_device_descriptor;
}
@@ -302,8 +501,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
be64_to_cpu(descriptor->max_registered_pages);
mtu = be16_to_cpu(descriptor->mtu);
if (mtu < ETH_MIN_MTU) {
- netif_err(priv, drv, priv->dev, "MTU %d below minimum MTU\n",
- mtu);
+ dev_err(&priv->pdev->dev, "MTU %d below minimum MTU\n", mtu);
err = -EINVAL;
goto free_device_descriptor;
}
@@ -311,12 +509,12 @@ int gve_adminq_describe_device(struct gve_priv *priv)
priv->num_event_counters = be16_to_cpu(descriptor->counters);
ether_addr_copy(priv->dev->dev_addr, descriptor->mac);
mac = descriptor->mac;
- netif_info(priv, drv, priv->dev, "MAC addr: %pM\n", mac);
+ dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac);
priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl);
priv->rx_pages_per_qpl = be16_to_cpu(descriptor->rx_pages_per_qpl);
if (priv->rx_pages_per_qpl < priv->rx_desc_cnt) {
- netif_err(priv, drv, priv->dev, "rx_pages_per_qpl cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n",
- priv->rx_pages_per_qpl);
+ dev_err(&priv->pdev->dev, "rx_pages_per_qpl cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n",
+ priv->rx_pages_per_qpl);
priv->rx_desc_cnt = priv->rx_pages_per_qpl;
}
priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
@@ -385,3 +583,46 @@ int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu)
return gve_adminq_execute_cmd(priv, &cmd);
}
+
+int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
+ dma_addr_t stats_report_addr, u64 interval)
+{
+ union gve_adminq_command cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_STATS);
+ cmd.report_stats = (struct gve_adminq_report_stats) {
+ .stats_report_len = cpu_to_be64(stats_report_len),
+ .stats_report_addr = cpu_to_be64(stats_report_addr),
+ .interval = cpu_to_be64(interval),
+ };
+
+ return gve_adminq_execute_cmd(priv, &cmd);
+}
+
+int gve_adminq_report_link_speed(struct gve_priv *priv)
+{
+ union gve_adminq_command gvnic_cmd;
+ dma_addr_t link_speed_region_bus;
+ __be64 *link_speed_region;
+ int err;
+
+ link_speed_region =
+ dma_alloc_coherent(&priv->pdev->dev, sizeof(*link_speed_region),
+ &link_speed_region_bus, GFP_KERNEL);
+
+ if (!link_speed_region)
+ return -ENOMEM;
+
+ memset(&gvnic_cmd, 0, sizeof(gvnic_cmd));
+ gvnic_cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_LINK_SPEED);
+ gvnic_cmd.report_link_speed.link_speed_address =
+ cpu_to_be64(link_speed_region_bus);
+
+ err = gve_adminq_execute_cmd(priv, &gvnic_cmd);
+
+ priv->link_speed = be64_to_cpu(*link_speed_region);
+ dma_free_coherent(&priv->pdev->dev, sizeof(*link_speed_region), link_speed_region,
+ link_speed_region_bus);
+ return err;
+}
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index 4dfa06edc0f8..281de8326bc5 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -21,6 +21,8 @@ enum gve_adminq_opcodes {
GVE_ADMINQ_DESTROY_RX_QUEUE = 0x8,
GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES = 0x9,
GVE_ADMINQ_SET_DRIVER_PARAMETER = 0xB,
+ GVE_ADMINQ_REPORT_STATS = 0xC,
+ GVE_ADMINQ_REPORT_LINK_SPEED = 0xD
};
/* Admin queue status codes */
@@ -172,6 +174,51 @@ struct gve_adminq_set_driver_parameter {
static_assert(sizeof(struct gve_adminq_set_driver_parameter) == 16);
+struct gve_adminq_report_stats {
+ __be64 stats_report_len;
+ __be64 stats_report_addr;
+ __be64 interval;
+};
+
+static_assert(sizeof(struct gve_adminq_report_stats) == 24);
+
+struct gve_adminq_report_link_speed {
+ __be64 link_speed_address;
+};
+
+static_assert(sizeof(struct gve_adminq_report_link_speed) == 8);
+
+struct stats {
+ __be32 stat_name;
+ __be32 queue_id;
+ __be64 value;
+};
+
+static_assert(sizeof(struct stats) == 16);
+
+struct gve_stats_report {
+ __be64 written_count;
+ struct stats stats[0];
+};
+
+static_assert(sizeof(struct gve_stats_report) == 8);
+
+enum gve_stat_names {
+ // stats from gve
+ TX_WAKE_CNT = 1,
+ TX_STOP_CNT = 2,
+ TX_FRAMES_SENT = 3,
+ TX_BYTES_SENT = 4,
+ TX_LAST_COMPLETION_PROCESSED = 5,
+ RX_NEXT_EXPECTED_SEQUENCE = 6,
+ RX_BUFFERS_POSTED = 7,
+ // stats from NIC
+ RX_QUEUE_DROP_CNT = 65,
+ RX_NO_BUFFERS_POSTED = 66,
+ RX_DROPS_PACKET_OVER_MRU = 67,
+ RX_DROPS_INVALID_CHECKSUM = 68,
+};
+
union gve_adminq_command {
struct {
__be32 opcode;
@@ -187,6 +234,8 @@ union gve_adminq_command {
struct gve_adminq_register_page_list reg_page_list;
struct gve_adminq_unregister_page_list unreg_page_list;
struct gve_adminq_set_driver_parameter set_driver_param;
+ struct gve_adminq_report_stats report_stats;
+ struct gve_adminq_report_link_speed report_link_speed;
};
};
u8 reserved[64];
@@ -197,8 +246,6 @@ static_assert(sizeof(union gve_adminq_command) == 64);
int gve_adminq_alloc(struct device *dev, struct gve_priv *priv);
void gve_adminq_free(struct device *dev, struct gve_priv *priv);
void gve_adminq_release(struct gve_priv *priv);
-int gve_adminq_execute_cmd(struct gve_priv *priv,
- union gve_adminq_command *cmd_orig);
int gve_adminq_describe_device(struct gve_priv *priv);
int gve_adminq_configure_device_resources(struct gve_priv *priv,
dma_addr_t counter_array_bus_addr,
@@ -206,12 +253,15 @@ int gve_adminq_configure_device_resources(struct gve_priv *priv,
dma_addr_t db_array_bus_addr,
u32 num_ntfy_blks);
int gve_adminq_deconfigure_device_resources(struct gve_priv *priv);
-int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_id);
-int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_id);
-int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_id);
-int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_id);
+int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 num_queues);
+int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 queue_id);
+int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues);
+int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 queue_id);
int gve_adminq_register_page_list(struct gve_priv *priv,
struct gve_queue_page_list *qpl);
int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id);
int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu);
+int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
+ dma_addr_t stats_report_addr, u64 interval);
+int gve_adminq_report_link_speed(struct gve_priv *priv);
#endif /* _GVE_ADMINQ_H */
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index d8fa816f4473..7b44769bd87c 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -6,6 +6,7 @@
#include <linux/rtnetlink.h>
#include "gve.h"
+#include "gve_adminq.h"
static void gve_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
@@ -34,41 +35,84 @@ static u32 gve_get_msglevel(struct net_device *netdev)
static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = {
"rx_packets", "tx_packets", "rx_bytes", "tx_bytes",
"rx_dropped", "tx_dropped", "tx_timeouts",
+ "rx_skb_alloc_fail", "rx_buf_alloc_fail", "rx_desc_err_dropped_pkt",
+ "interface_up_cnt", "interface_down_cnt", "reset_cnt",
+ "page_alloc_fail", "dma_mapping_error", "stats_report_trigger_cnt",
+};
+
+static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
+ "rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_bytes[%u]",
+ "rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]",
+ "rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]",
+ "rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]",
+};
+
+static const char gve_gstrings_tx_stats[][ETH_GSTRING_LEN] = {
+ "tx_posted_desc[%u]", "tx_completed_desc[%u]", "tx_bytes[%u]",
+ "tx_wake[%u]", "tx_stop[%u]", "tx_event_counter[%u]",
+};
+
+static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = {
+ "adminq_prod_cnt", "adminq_cmd_fail", "adminq_timeouts",
+ "adminq_describe_device_cnt", "adminq_cfg_device_resources_cnt",
+ "adminq_register_page_list_cnt", "adminq_unregister_page_list_cnt",
+ "adminq_create_tx_queue_cnt", "adminq_create_rx_queue_cnt",
+ "adminq_destroy_tx_queue_cnt", "adminq_destroy_rx_queue_cnt",
+ "adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt",
+ "adminq_report_stats_cnt", "adminq_report_link_speed_cnt"
+};
+
+static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = {
+ "report-stats",
};
#define GVE_MAIN_STATS_LEN ARRAY_SIZE(gve_gstrings_main_stats)
-#define NUM_GVE_TX_CNTS 5
-#define NUM_GVE_RX_CNTS 2
+#define GVE_ADMINQ_STATS_LEN ARRAY_SIZE(gve_gstrings_adminq_stats)
+#define NUM_GVE_TX_CNTS ARRAY_SIZE(gve_gstrings_tx_stats)
+#define NUM_GVE_RX_CNTS ARRAY_SIZE(gve_gstrings_rx_stats)
+#define GVE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(gve_gstrings_priv_flags)
static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
struct gve_priv *priv = netdev_priv(netdev);
char *s = (char *)data;
- int i;
+ int i, j;
- if (stringset != ETH_SS_STATS)
- return;
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(s, *gve_gstrings_main_stats,
+ sizeof(gve_gstrings_main_stats));
+ s += sizeof(gve_gstrings_main_stats);
+
+ for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+ for (j = 0; j < NUM_GVE_RX_CNTS; j++) {
+ snprintf(s, ETH_GSTRING_LEN,
+ gve_gstrings_rx_stats[j], i);
+ s += ETH_GSTRING_LEN;
+ }
+ }
- memcpy(s, *gve_gstrings_main_stats,
- sizeof(gve_gstrings_main_stats));
- s += sizeof(gve_gstrings_main_stats);
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
- snprintf(s, ETH_GSTRING_LEN, "rx_desc_cnt[%u]", i);
- s += ETH_GSTRING_LEN;
- snprintf(s, ETH_GSTRING_LEN, "rx_desc_fill_cnt[%u]", i);
- s += ETH_GSTRING_LEN;
- }
- for (i = 0; i < priv->tx_cfg.num_queues; i++) {
- snprintf(s, ETH_GSTRING_LEN, "tx_req[%u]", i);
- s += ETH_GSTRING_LEN;
- snprintf(s, ETH_GSTRING_LEN, "tx_done[%u]", i);
- s += ETH_GSTRING_LEN;
- snprintf(s, ETH_GSTRING_LEN, "tx_wake[%u]", i);
- s += ETH_GSTRING_LEN;
- snprintf(s, ETH_GSTRING_LEN, "tx_stop[%u]", i);
- s += ETH_GSTRING_LEN;
- snprintf(s, ETH_GSTRING_LEN, "tx_event_counter[%u]", i);
- s += ETH_GSTRING_LEN;
+ for (i = 0; i < priv->tx_cfg.num_queues; i++) {
+ for (j = 0; j < NUM_GVE_TX_CNTS; j++) {
+ snprintf(s, ETH_GSTRING_LEN,
+ gve_gstrings_tx_stats[j], i);
+ s += ETH_GSTRING_LEN;
+ }
+ }
+
+ memcpy(s, *gve_gstrings_adminq_stats,
+ sizeof(gve_gstrings_adminq_stats));
+ s += sizeof(gve_gstrings_adminq_stats);
+ break;
+
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(s, *gve_gstrings_priv_flags,
+ sizeof(gve_gstrings_priv_flags));
+ s += sizeof(gve_gstrings_priv_flags);
+ break;
+
+ default:
+ break;
}
}
@@ -78,9 +122,11 @@ static int gve_get_sset_count(struct net_device *netdev, int sset)
switch (sset) {
case ETH_SS_STATS:
- return GVE_MAIN_STATS_LEN +
+ return GVE_MAIN_STATS_LEN + GVE_ADMINQ_STATS_LEN +
(priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS) +
(priv->tx_cfg.num_queues * NUM_GVE_TX_CNTS);
+ case ETH_SS_PRIV_FLAGS:
+ return GVE_PRIV_FLAGS_STR_LEN;
default:
return -EOPNOTSUPP;
}
@@ -90,24 +136,56 @@ static void
gve_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
- struct gve_priv *priv = netdev_priv(netdev);
- u64 rx_pkts, rx_bytes, tx_pkts, tx_bytes;
+ u64 tmp_rx_pkts, tmp_rx_bytes, tmp_rx_skb_alloc_fail, tmp_rx_buf_alloc_fail,
+ tmp_rx_desc_err_dropped_pkt, tmp_tx_pkts, tmp_tx_bytes;
+ u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_pkts,
+ rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes;
+ int stats_idx, base_stats_idx, max_stats_idx;
+ struct stats *report_stats;
+ int *rx_qid_to_stats_idx;
+ int *tx_qid_to_stats_idx;
+ struct gve_priv *priv;
+ bool skip_nic_stats;
unsigned int start;
int ring;
- int i;
+ int i, j;
ASSERT_RTNL();
- for (rx_pkts = 0, rx_bytes = 0, ring = 0;
+ priv = netdev_priv(netdev);
+ report_stats = priv->stats_report->stats;
+ rx_qid_to_stats_idx = kmalloc_array(priv->rx_cfg.num_queues,
+ sizeof(int), GFP_KERNEL);
+ if (!rx_qid_to_stats_idx)
+ return;
+ tx_qid_to_stats_idx = kmalloc_array(priv->tx_cfg.num_queues,
+ sizeof(int), GFP_KERNEL);
+ if (!tx_qid_to_stats_idx) {
+ kfree(rx_qid_to_stats_idx);
+ return;
+ }
+ for (rx_pkts = 0, rx_bytes = 0, rx_skb_alloc_fail = 0,
+ rx_buf_alloc_fail = 0, rx_desc_err_dropped_pkt = 0, ring = 0;
ring < priv->rx_cfg.num_queues; ring++) {
if (priv->rx) {
do {
+ struct gve_rx_ring *rx = &priv->rx[ring];
+
start =
u64_stats_fetch_begin(&priv->rx[ring].statss);
- rx_pkts += priv->rx[ring].rpackets;
- rx_bytes += priv->rx[ring].rbytes;
+ tmp_rx_pkts = rx->rpackets;
+ tmp_rx_bytes = rx->rbytes;
+ tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
+ tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
+ tmp_rx_desc_err_dropped_pkt =
+ rx->rx_desc_err_dropped_pkt;
} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
start));
+ rx_pkts += tmp_rx_pkts;
+ rx_bytes += tmp_rx_bytes;
+ rx_skb_alloc_fail += tmp_rx_skb_alloc_fail;
+ rx_buf_alloc_fail += tmp_rx_buf_alloc_fail;
+ rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt;
}
}
for (tx_pkts = 0, tx_bytes = 0, ring = 0;
@@ -116,10 +194,12 @@ gve_get_ethtool_stats(struct net_device *netdev,
do {
start =
u64_stats_fetch_begin(&priv->tx[ring].statss);
- tx_pkts += priv->tx[ring].pkt_done;
- tx_bytes += priv->tx[ring].bytes_done;
+ tmp_tx_pkts = priv->tx[ring].pkt_done;
+ tmp_tx_bytes = priv->tx[ring].bytes_done;
} while (u64_stats_fetch_retry(&priv->tx[ring].statss,
start));
+ tx_pkts += tmp_tx_pkts;
+ tx_bytes += tmp_tx_bytes;
}
}
@@ -128,22 +208,102 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = tx_pkts;
data[i++] = rx_bytes;
data[i++] = tx_bytes;
- /* Skip rx_dropped and tx_dropped */
- i += 2;
+ /* total rx dropped packets */
+ data[i++] = rx_skb_alloc_fail + rx_buf_alloc_fail +
+ rx_desc_err_dropped_pkt;
+ /* Skip tx_dropped */
+ i++;
+
data[i++] = priv->tx_timeo_cnt;
+ data[i++] = rx_skb_alloc_fail;
+ data[i++] = rx_buf_alloc_fail;
+ data[i++] = rx_desc_err_dropped_pkt;
+ data[i++] = priv->interface_up_cnt;
+ data[i++] = priv->interface_down_cnt;
+ data[i++] = priv->reset_cnt;
+ data[i++] = priv->page_alloc_fail;
+ data[i++] = priv->dma_mapping_error;
+ data[i++] = priv->stats_report_trigger_cnt;
i = GVE_MAIN_STATS_LEN;
+ /* For rx cross-reporting stats, start from nic rx stats in report */
+ base_stats_idx = GVE_TX_STATS_REPORT_NUM * priv->tx_cfg.num_queues +
+ GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues;
+ max_stats_idx = NIC_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues +
+ base_stats_idx;
+ /* Preprocess the stats report for rx, map queue id to start index */
+ skip_nic_stats = false;
+ for (stats_idx = base_stats_idx; stats_idx < max_stats_idx;
+ stats_idx += NIC_RX_STATS_REPORT_NUM) {
+ u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name);
+ u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id);
+
+ if (stat_name == 0) {
+ /* no stats written by NIC yet */
+ skip_nic_stats = true;
+ break;
+ }
+ rx_qid_to_stats_idx[queue_id] = stats_idx;
+ }
/* walk RX rings */
if (priv->rx) {
for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
struct gve_rx_ring *rx = &priv->rx[ring];
- data[i++] = rx->cnt;
data[i++] = rx->fill_cnt;
+ data[i++] = rx->cnt;
+ do {
+ start =
+ u64_stats_fetch_begin(&priv->rx[ring].statss);
+ tmp_rx_bytes = rx->rbytes;
+ tmp_rx_skb_alloc_fail = rx->rx_skb_alloc_fail;
+ tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail;
+ tmp_rx_desc_err_dropped_pkt =
+ rx->rx_desc_err_dropped_pkt;
+ } while (u64_stats_fetch_retry(&priv->rx[ring].statss,
+ start));
+ data[i++] = tmp_rx_bytes;
+ /* rx dropped packets */
+ data[i++] = tmp_rx_skb_alloc_fail +
+ tmp_rx_buf_alloc_fail +
+ tmp_rx_desc_err_dropped_pkt;
+ data[i++] = rx->rx_copybreak_pkt;
+ data[i++] = rx->rx_copied_pkt;
+ /* stats from NIC */
+ if (skip_nic_stats) {
+ /* skip NIC rx stats */
+ i += NIC_RX_STATS_REPORT_NUM;
+ continue;
+ }
+ for (j = 0; j < NIC_RX_STATS_REPORT_NUM; j++) {
+ u64 value =
+ be64_to_cpu(report_stats[rx_qid_to_stats_idx[ring] + j].value);
+
+ data[i++] = value;
+ }
}
} else {
i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS;
}
+
+ /* For tx cross-reporting stats, start from nic tx stats in report */
+ base_stats_idx = max_stats_idx;
+ max_stats_idx = NIC_TX_STATS_REPORT_NUM * priv->tx_cfg.num_queues +
+ max_stats_idx;
+ /* Preprocess the stats report for tx, map queue id to start index */
+ skip_nic_stats = false;
+ for (stats_idx = base_stats_idx; stats_idx < max_stats_idx;
+ stats_idx += NIC_TX_STATS_REPORT_NUM) {
+ u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name);
+ u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id);
+
+ if (stat_name == 0) {
+ /* no stats written by NIC yet */
+ skip_nic_stats = true;
+ break;
+ }
+ tx_qid_to_stats_idx[queue_id] = stats_idx;
+ }
/* walk TX rings */
if (priv->tx) {
for (ring = 0; ring < priv->tx_cfg.num_queues; ring++) {
@@ -151,14 +311,51 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = tx->req;
data[i++] = tx->done;
+ do {
+ start =
+ u64_stats_fetch_begin(&priv->tx[ring].statss);
+ tmp_tx_bytes = tx->bytes_done;
+ } while (u64_stats_fetch_retry(&priv->tx[ring].statss,
+ start));
+ data[i++] = tmp_tx_bytes;
data[i++] = tx->wake_queue;
data[i++] = tx->stop_queue;
data[i++] = be32_to_cpu(gve_tx_load_event_counter(priv,
tx));
+ /* stats from NIC */
+ if (skip_nic_stats) {
+ /* skip NIC tx stats */
+ i += NIC_TX_STATS_REPORT_NUM;
+ continue;
+ }
+ for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) {
+ u64 value =
+ be64_to_cpu(report_stats[tx_qid_to_stats_idx[ring] + j].value);
+ data[i++] = value;
+ }
}
} else {
i += priv->tx_cfg.num_queues * NUM_GVE_TX_CNTS;
}
+
+ kfree(rx_qid_to_stats_idx);
+ kfree(tx_qid_to_stats_idx);
+ /* AQ Stats */
+ data[i++] = priv->adminq_prod_cnt;
+ data[i++] = priv->adminq_cmd_fail;
+ data[i++] = priv->adminq_timeouts;
+ data[i++] = priv->adminq_describe_device_cnt;
+ data[i++] = priv->adminq_cfg_device_resources_cnt;
+ data[i++] = priv->adminq_register_page_list_cnt;
+ data[i++] = priv->adminq_unregister_page_list_cnt;
+ data[i++] = priv->adminq_create_tx_queue_cnt;
+ data[i++] = priv->adminq_create_rx_queue_cnt;
+ data[i++] = priv->adminq_destroy_tx_queue_cnt;
+ data[i++] = priv->adminq_destroy_rx_queue_cnt;
+ data[i++] = priv->adminq_dcfg_device_resources_cnt;
+ data[i++] = priv->adminq_set_driver_parameter_cnt;
+ data[i++] = priv->adminq_report_stats_cnt;
+ data[i++] = priv->adminq_report_link_speed_cnt;
}
static void gve_get_channels(struct net_device *netdev,
@@ -230,6 +427,95 @@ static int gve_user_reset(struct net_device *netdev, u32 *flags)
return -EOPNOTSUPP;
}
+static int gve_get_tunable(struct net_device *netdev,
+ const struct ethtool_tunable *etuna, void *value)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+
+ switch (etuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)value = priv->rx_copybreak;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int gve_set_tunable(struct net_device *netdev,
+ const struct ethtool_tunable *etuna,
+ const void *value)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+ u32 len;
+
+ switch (etuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ len = *(u32 *)value;
+ if (len > PAGE_SIZE / 2)
+ return -EINVAL;
+ priv->rx_copybreak = len;
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static u32 gve_get_priv_flags(struct net_device *netdev)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+ u32 ret_flags = 0;
+
+ /* Only 1 flag exists currently: report-stats (BIT(O)), so set that flag. */
+ if (priv->ethtool_flags & BIT(0))
+ ret_flags |= BIT(0);
+ return ret_flags;
+}
+
+static int gve_set_priv_flags(struct net_device *netdev, u32 flags)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+ u64 ori_flags, new_flags;
+
+ ori_flags = READ_ONCE(priv->ethtool_flags);
+ new_flags = ori_flags;
+
+ /* Only one priv flag exists: report-stats (BIT(0))*/
+ if (flags & BIT(0))
+ new_flags |= BIT(0);
+ else
+ new_flags &= ~(BIT(0));
+ priv->ethtool_flags = new_flags;
+ /* start report-stats timer when user turns report stats on. */
+ if (flags & BIT(0)) {
+ mod_timer(&priv->stats_report_timer,
+ round_jiffies(jiffies +
+ msecs_to_jiffies(priv->stats_report_timer_period)));
+ }
+ /* Zero off gve stats when report-stats turned off and */
+ /* delete report stats timer. */
+ if (!(flags & BIT(0)) && (ori_flags & BIT(0))) {
+ int tx_stats_num = GVE_TX_STATS_REPORT_NUM *
+ priv->tx_cfg.num_queues;
+ int rx_stats_num = GVE_RX_STATS_REPORT_NUM *
+ priv->rx_cfg.num_queues;
+
+ memset(priv->stats_report->stats, 0, (tx_stats_num + rx_stats_num) *
+ sizeof(struct stats));
+ del_timer_sync(&priv->stats_report_timer);
+ }
+ return 0;
+}
+
+static int gve_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+ int err = gve_adminq_report_link_speed(priv);
+
+ cmd->base.speed = priv->link_speed;
+ return err;
+}
+
const struct ethtool_ops gve_ethtool_ops = {
.get_drvinfo = gve_get_drvinfo,
.get_strings = gve_get_strings,
@@ -242,4 +528,9 @@ const struct ethtool_ops gve_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_ringparam = gve_get_ringparam,
.reset = gve_user_reset,
+ .get_tunable = gve_get_tunable,
+ .set_tunable = gve_set_tunable,
+ .get_priv_flags = gve_get_priv_flags,
+ .set_priv_flags = gve_set_priv_flags,
+ .get_link_ksettings = gve_get_link_ksettings
};
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index e032563ceefd..48a433154ce0 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -78,6 +78,66 @@ static void gve_free_counter_array(struct gve_priv *priv)
priv->counter_array = NULL;
}
+/* NIC requests to report stats */
+static void gve_stats_report_task(struct work_struct *work)
+{
+ struct gve_priv *priv = container_of(work, struct gve_priv,
+ stats_report_task);
+ if (gve_get_do_report_stats(priv)) {
+ gve_handle_report_stats(priv);
+ gve_clear_do_report_stats(priv);
+ }
+}
+
+static void gve_stats_report_schedule(struct gve_priv *priv)
+{
+ if (!gve_get_probe_in_progress(priv) &&
+ !gve_get_reset_in_progress(priv)) {
+ gve_set_do_report_stats(priv);
+ queue_work(priv->gve_wq, &priv->stats_report_task);
+ }
+}
+
+static void gve_stats_report_timer(struct timer_list *t)
+{
+ struct gve_priv *priv = from_timer(priv, t, stats_report_timer);
+
+ mod_timer(&priv->stats_report_timer,
+ round_jiffies(jiffies +
+ msecs_to_jiffies(priv->stats_report_timer_period)));
+ gve_stats_report_schedule(priv);
+}
+
+static int gve_alloc_stats_report(struct gve_priv *priv)
+{
+ int tx_stats_num, rx_stats_num;
+
+ tx_stats_num = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) *
+ priv->tx_cfg.num_queues;
+ rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) *
+ priv->rx_cfg.num_queues;
+ priv->stats_report_len = sizeof(struct gve_stats_report) +
+ (tx_stats_num + rx_stats_num) *
+ sizeof(struct stats);
+ priv->stats_report =
+ dma_alloc_coherent(&priv->pdev->dev, priv->stats_report_len,
+ &priv->stats_report_bus, GFP_KERNEL);
+ if (!priv->stats_report)
+ return -ENOMEM;
+ /* Set up timer for the report-stats task */
+ timer_setup(&priv->stats_report_timer, gve_stats_report_timer, 0);
+ priv->stats_report_timer_period = GVE_STATS_REPORT_TIMER_PERIOD;
+ return 0;
+}
+
+static void gve_free_stats_report(struct gve_priv *priv)
+{
+ del_timer_sync(&priv->stats_report_timer);
+ dma_free_coherent(&priv->pdev->dev, priv->stats_report_len,
+ priv->stats_report, priv->stats_report_bus);
+ priv->stats_report = NULL;
+}
+
static irqreturn_t gve_mgmnt_intr(int irq, void *arg)
{
struct gve_priv *priv = arg;
@@ -270,6 +330,9 @@ static int gve_setup_device_resources(struct gve_priv *priv)
err = gve_alloc_notify_blocks(priv);
if (err)
goto abort_with_counter;
+ err = gve_alloc_stats_report(priv);
+ if (err)
+ goto abort_with_ntfy_blocks;
err = gve_adminq_configure_device_resources(priv,
priv->counter_array_bus,
priv->num_event_counters,
@@ -279,10 +342,18 @@ static int gve_setup_device_resources(struct gve_priv *priv)
dev_err(&priv->pdev->dev,
"could not setup device_resources: err=%d\n", err);
err = -ENXIO;
- goto abort_with_ntfy_blocks;
+ goto abort_with_stats_report;
}
+ err = gve_adminq_report_stats(priv, priv->stats_report_len,
+ priv->stats_report_bus,
+ GVE_STATS_REPORT_TIMER_PERIOD);
+ if (err)
+ dev_err(&priv->pdev->dev,
+ "Failed to report stats: err=%d\n", err);
gve_set_device_resources_ok(priv);
return 0;
+abort_with_stats_report:
+ gve_free_stats_report(priv);
abort_with_ntfy_blocks:
gve_free_notify_blocks(priv);
abort_with_counter:
@@ -298,6 +369,13 @@ static void gve_teardown_device_resources(struct gve_priv *priv)
/* Tell device its resources are being freed */
if (gve_get_device_resources_ok(priv)) {
+ /* detach the stats report */
+ err = gve_adminq_report_stats(priv, 0, 0x0, GVE_STATS_REPORT_TIMER_PERIOD);
+ if (err) {
+ dev_err(&priv->pdev->dev,
+ "Failed to detach stats report: err=%d\n", err);
+ gve_trigger_reset(priv);
+ }
err = gve_adminq_deconfigure_device_resources(priv);
if (err) {
dev_err(&priv->pdev->dev,
@@ -308,6 +386,7 @@ static void gve_teardown_device_resources(struct gve_priv *priv)
}
gve_free_counter_array(priv);
gve_free_notify_blocks(priv);
+ gve_free_stats_report(priv);
gve_clear_device_resources_ok(priv);
}
@@ -371,36 +450,37 @@ static int gve_create_rings(struct gve_priv *priv)
int err;
int i;
- for (i = 0; i < priv->tx_cfg.num_queues; i++) {
- err = gve_adminq_create_tx_queue(priv, i);
- if (err) {
- netif_err(priv, drv, priv->dev, "failed to create tx queue %d\n",
- i);
- /* This failure will trigger a reset - no need to clean
- * up
- */
- return err;
- }
- netif_dbg(priv, drv, priv->dev, "created tx queue %d\n", i);
+ err = gve_adminq_create_tx_queues(priv, priv->tx_cfg.num_queues);
+ if (err) {
+ netif_err(priv, drv, priv->dev, "failed to create %d tx queues\n",
+ priv->tx_cfg.num_queues);
+ /* This failure will trigger a reset - no need to clean
+ * up
+ */
+ return err;
}
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
- err = gve_adminq_create_rx_queue(priv, i);
- if (err) {
- netif_err(priv, drv, priv->dev, "failed to create rx queue %d\n",
- i);
- /* This failure will trigger a reset - no need to clean
- * up
- */
- return err;
- }
- /* Rx data ring has been prefilled with packet buffers at
- * queue allocation time.
- * Write the doorbell to provide descriptor slots and packet
- * buffers to the NIC.
+ netif_dbg(priv, drv, priv->dev, "created %d tx queues\n",
+ priv->tx_cfg.num_queues);
+
+ err = gve_adminq_create_rx_queues(priv, priv->rx_cfg.num_queues);
+ if (err) {
+ netif_err(priv, drv, priv->dev, "failed to create %d rx queues\n",
+ priv->rx_cfg.num_queues);
+ /* This failure will trigger a reset - no need to clean
+ * up
*/
- gve_rx_write_doorbell(priv, &priv->rx[i]);
- netif_dbg(priv, drv, priv->dev, "created rx queue %d\n", i);
+ return err;
}
+ netif_dbg(priv, drv, priv->dev, "created %d rx queues\n",
+ priv->rx_cfg.num_queues);
+
+ /* Rx data ring has been prefilled with packet buffers at queue
+ * allocation time.
+ * Write the doorbell to provide descriptor slots and packet buffers
+ * to the NIC.
+ */
+ for (i = 0; i < priv->rx_cfg.num_queues; i++)
+ gve_rx_write_doorbell(priv, &priv->rx[i]);
return 0;
}
@@ -458,34 +538,23 @@ free_tx:
static int gve_destroy_rings(struct gve_priv *priv)
{
int err;
- int i;
- for (i = 0; i < priv->tx_cfg.num_queues; i++) {
- err = gve_adminq_destroy_tx_queue(priv, i);
- if (err) {
- netif_err(priv, drv, priv->dev,
- "failed to destroy tx queue %d\n",
- i);
- /* This failure will trigger a reset - no need to clean
- * up
- */
- return err;
- }
- netif_dbg(priv, drv, priv->dev, "destroyed tx queue %d\n", i);
+ err = gve_adminq_destroy_tx_queues(priv, priv->tx_cfg.num_queues);
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "failed to destroy tx queues\n");
+ /* This failure will trigger a reset - no need to clean up */
+ return err;
}
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
- err = gve_adminq_destroy_rx_queue(priv, i);
- if (err) {
- netif_err(priv, drv, priv->dev,
- "failed to destroy rx queue %d\n",
- i);
- /* This failure will trigger a reset - no need to clean
- * up
- */
- return err;
- }
- netif_dbg(priv, drv, priv->dev, "destroyed rx queue %d\n", i);
+ netif_dbg(priv, drv, priv->dev, "destroyed tx queues\n");
+ err = gve_adminq_destroy_rx_queues(priv, priv->rx_cfg.num_queues);
+ if (err) {
+ netif_err(priv, drv, priv->dev,
+ "failed to destroy rx queues\n");
+ /* This failure will trigger a reset - no need to clean up */
+ return err;
}
+ netif_dbg(priv, drv, priv->dev, "destroyed rx queues\n");
return 0;
}
@@ -514,14 +583,18 @@ static void gve_free_rings(struct gve_priv *priv)
}
}
-int gve_alloc_page(struct device *dev, struct page **page, dma_addr_t *dma,
+int gve_alloc_page(struct gve_priv *priv, struct device *dev,
+ struct page **page, dma_addr_t *dma,
enum dma_data_direction dir)
{
*page = alloc_page(GFP_KERNEL);
- if (!*page)
+ if (!*page) {
+ priv->page_alloc_fail++;
return -ENOMEM;
+ }
*dma = dma_map_page(dev, *page, 0, PAGE_SIZE, dir);
if (dma_mapping_error(dev, *dma)) {
+ priv->dma_mapping_error++;
put_page(*page);
return -ENOMEM;
}
@@ -556,7 +629,7 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
return -ENOMEM;
for (i = 0; i < pages; i++) {
- err = gve_alloc_page(&priv->pdev->dev, &qpl->pages[i],
+ err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
&qpl->page_buses[i],
gve_qpl_dma_dir(priv, id));
/* caller handles clean up */
@@ -695,8 +768,14 @@ static int gve_open(struct net_device *dev)
goto reset;
gve_set_device_rings_ok(priv);
+ if (gve_get_report_stats(priv))
+ mod_timer(&priv->stats_report_timer,
+ round_jiffies(jiffies +
+ msecs_to_jiffies(priv->stats_report_timer_period)));
+
gve_turnup(priv);
- netif_carrier_on(dev);
+ queue_work(priv->gve_wq, &priv->service_task);
+ priv->interface_up_cnt++;
return 0;
free_rings:
@@ -735,9 +814,11 @@ static int gve_close(struct net_device *dev)
goto err;
gve_clear_device_rings_ok(priv);
}
+ del_timer_sync(&priv->stats_report_timer);
gve_free_rings(priv);
gve_free_qpls(priv);
+ priv->interface_down_cnt++;
return 0;
err:
@@ -817,6 +898,7 @@ static void gve_turndown(struct gve_priv *priv)
netif_tx_disable(priv->dev);
gve_clear_napi_enabled(priv);
+ gve_clear_report_stats(priv);
}
static void gve_turnup(struct gve_priv *priv)
@@ -867,6 +949,10 @@ static void gve_handle_status(struct gve_priv *priv, u32 status)
dev_info(&priv->pdev->dev, "Device requested reset.\n");
gve_set_do_reset(priv);
}
+ if (GVE_DEVICE_STATUS_REPORT_STATS_MASK & status) {
+ priv->stats_report_trigger_cnt++;
+ gve_set_do_report_stats(priv);
+ }
}
static void gve_handle_reset(struct gve_priv *priv)
@@ -885,16 +971,95 @@ static void gve_handle_reset(struct gve_priv *priv)
}
}
-/* Handle NIC status register changes and reset requests */
+void gve_handle_report_stats(struct gve_priv *priv)
+{
+ int idx, stats_idx = 0, tx_bytes;
+ unsigned int start = 0;
+ struct stats *stats = priv->stats_report->stats;
+
+ if (!gve_get_report_stats(priv))
+ return;
+
+ be64_add_cpu(&priv->stats_report->written_count, 1);
+ /* tx stats */
+ if (priv->tx) {
+ for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
+ do {
+ start = u64_stats_fetch_begin(&priv->tx[idx].statss);
+ tx_bytes = priv->tx[idx].bytes_done;
+ } while (u64_stats_fetch_retry(&priv->tx[idx].statss, start));
+ stats[stats_idx++] = (struct stats) {
+ .stat_name = cpu_to_be32(TX_WAKE_CNT),
+ .value = cpu_to_be64(priv->tx[idx].wake_queue),
+ .queue_id = cpu_to_be32(idx),
+ };
+ stats[stats_idx++] = (struct stats) {
+ .stat_name = cpu_to_be32(TX_STOP_CNT),
+ .value = cpu_to_be64(priv->tx[idx].stop_queue),
+ .queue_id = cpu_to_be32(idx),
+ };
+ stats[stats_idx++] = (struct stats) {
+ .stat_name = cpu_to_be32(TX_FRAMES_SENT),
+ .value = cpu_to_be64(priv->tx[idx].req),
+ .queue_id = cpu_to_be32(idx),
+ };
+ stats[stats_idx++] = (struct stats) {
+ .stat_name = cpu_to_be32(TX_BYTES_SENT),
+ .value = cpu_to_be64(tx_bytes),
+ .queue_id = cpu_to_be32(idx),
+ };
+ stats[stats_idx++] = (struct stats) {
+ .stat_name = cpu_to_be32(TX_LAST_COMPLETION_PROCESSED),
+ .value = cpu_to_be64(priv->tx[idx].done),
+ .queue_id = cpu_to_be32(idx),
+ };
+ }
+ }
+ /* rx stats */
+ if (priv->rx) {
+ for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
+ stats[stats_idx++] = (struct stats) {
+ .stat_name = cpu_to_be32(RX_NEXT_EXPECTED_SEQUENCE),
+ .value = cpu_to_be64(priv->rx[idx].desc.seqno),
+ .queue_id = cpu_to_be32(idx),
+ };
+ stats[stats_idx++] = (struct stats) {
+ .stat_name = cpu_to_be32(RX_BUFFERS_POSTED),
+ .value = cpu_to_be64(priv->rx[0].fill_cnt),
+ .queue_id = cpu_to_be32(idx),
+ };
+ }
+ }
+}
+
+static void gve_handle_link_status(struct gve_priv *priv, bool link_status)
+{
+ if (!gve_get_napi_enabled(priv))
+ return;
+
+ if (link_status == netif_carrier_ok(priv->dev))
+ return;
+
+ if (link_status) {
+ netdev_info(priv->dev, "Device link is up.\n");
+ netif_carrier_on(priv->dev);
+ } else {
+ netdev_info(priv->dev, "Device link is down.\n");
+ netif_carrier_off(priv->dev);
+ }
+}
+
+/* Handle NIC status register changes, reset requests and report stats */
static void gve_service_task(struct work_struct *work)
{
struct gve_priv *priv = container_of(work, struct gve_priv,
service_task);
+ u32 status = ioread32be(&priv->reg_bar0->device_status);
- gve_handle_status(priv,
- ioread32be(&priv->reg_bar0->device_status));
+ gve_handle_status(priv, status);
gve_handle_reset(priv);
+ gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
}
static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
@@ -924,7 +1089,7 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
priv->dev->max_mtu = PAGE_SIZE;
err = gve_adminq_set_mtu(priv, priv->dev->mtu);
if (err) {
- netif_err(priv, drv, priv->dev, "Could not set mtu");
+ dev_err(&priv->pdev->dev, "Could not set mtu");
goto err;
}
}
@@ -964,10 +1129,10 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
priv->rx_cfg.num_queues);
}
- netif_info(priv, drv, priv->dev, "TX queues %d, RX queues %d\n",
- priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
- netif_info(priv, drv, priv->dev, "Max TX queues %d, Max RX queues %d\n",
- priv->tx_cfg.max_queues, priv->rx_cfg.max_queues);
+ dev_info(&priv->pdev->dev, "TX queues %d, RX queues %d\n",
+ priv->tx_cfg.num_queues, priv->rx_cfg.num_queues);
+ dev_info(&priv->pdev->dev, "Max TX queues %d, Max RX queues %d\n",
+ priv->tx_cfg.max_queues, priv->rx_cfg.max_queues);
setup_device:
err = gve_setup_device_resources(priv);
@@ -1047,6 +1212,10 @@ int gve_reset(struct gve_priv *priv, bool attempt_teardown)
/* Set it all back up */
err = gve_reset_recovery(priv, was_up);
gve_clear_reset_in_progress(priv);
+ priv->reset_cnt++;
+ priv->interface_up_cnt = 0;
+ priv->interface_down_cnt = 0;
+ priv->stats_report_trigger_cnt = 0;
return err;
}
@@ -1149,6 +1318,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
priv->db_bar2 = db_bar;
priv->service_task_flags = 0x0;
priv->state_flags = 0x0;
+ priv->ethtool_flags = 0x0;
gve_set_probe_in_progress(priv);
priv->gve_wq = alloc_ordered_workqueue("gve", 0);
@@ -1158,6 +1328,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto abort_with_netdev;
}
INIT_WORK(&priv->service_task, gve_service_task);
+ INIT_WORK(&priv->stats_report_task, gve_stats_report_task);
priv->tx_cfg.max_queues = max_tx_queues;
priv->rx_cfg.max_queues = max_rx_queues;
diff --git a/drivers/net/ethernet/google/gve/gve_register.h b/drivers/net/ethernet/google/gve/gve_register.h
index 84ab8893aadd..fb655463c357 100644
--- a/drivers/net/ethernet/google/gve/gve_register.h
+++ b/drivers/net/ethernet/google/gve/gve_register.h
@@ -23,5 +23,6 @@ struct gve_registers {
enum gve_device_status_flags {
GVE_DEVICE_STATUS_RESET_MASK = BIT(1),
GVE_DEVICE_STATUS_LINK_STATUS_MASK = BIT(2),
+ GVE_DEVICE_STATUS_REPORT_STATS_MASK = BIT(3),
};
#endif /* _GVE_REGISTER_H_ */
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 9f52e72ff641..008fa897a3e6 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -225,7 +225,8 @@ static enum pkt_hash_types gve_rss_type(__be16 pkt_flags)
return PKT_HASH_TYPE_L2;
}
-static struct sk_buff *gve_rx_copy(struct net_device *dev,
+static struct sk_buff *gve_rx_copy(struct gve_rx_ring *rx,
+ struct net_device *dev,
struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info,
u16 len)
@@ -242,6 +243,11 @@ static struct sk_buff *gve_rx_copy(struct net_device *dev,
skb_copy_to_linear_data(skb, va, len);
skb->protocol = eth_type_trans(skb, dev);
+
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_copied_pkt++;
+ u64_stats_update_end(&rx->statss);
+
return skb;
}
@@ -284,8 +290,12 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
u16 len;
/* drop this packet */
- if (unlikely(rx_desc->flags_seq & GVE_RXF_ERR))
+ if (unlikely(rx_desc->flags_seq & GVE_RXF_ERR)) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_desc_err_dropped_pkt++;
+ u64_stats_update_end(&rx->statss);
return true;
+ }
len = be16_to_cpu(rx_desc->len) - GVE_RX_PAD;
page_info = &rx->data.page_info[idx];
@@ -300,11 +310,14 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
if (PAGE_SIZE == 4096) {
if (len <= priv->rx_copybreak) {
/* Just copy small packets */
- skb = gve_rx_copy(dev, napi, page_info, len);
+ skb = gve_rx_copy(rx, dev, napi, page_info, len);
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_copybreak_pkt++;
+ u64_stats_update_end(&rx->statss);
goto have_skb;
}
if (unlikely(!gve_can_recycle_pages(dev))) {
- skb = gve_rx_copy(dev, napi, page_info, len);
+ skb = gve_rx_copy(rx, dev, napi, page_info, len);
goto have_skb;
}
pagecount = page_count(page_info->page);
@@ -314,8 +327,12 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
* stack.
*/
skb = gve_rx_add_frags(dev, napi, page_info, len);
- if (!skb)
+ if (!skb) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_skb_alloc_fail++;
+ u64_stats_update_end(&rx->statss);
return true;
+ }
/* Make sure the kernel stack can't release the page */
get_page(page_info->page);
/* "flip" to other packet buffer on this page */
@@ -324,21 +341,25 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
/* We have previously passed the other half of this
* page up the stack, but it has not yet been freed.
*/
- skb = gve_rx_copy(dev, napi, page_info, len);
+ skb = gve_rx_copy(rx, dev, napi, page_info, len);
} else {
WARN(pagecount < 1, "Pagecount should never be < 1");
return false;
}
} else {
- skb = gve_rx_copy(dev, napi, page_info, len);
+ skb = gve_rx_copy(rx, dev, napi, page_info, len);
}
have_skb:
/* We didn't manage to allocate an skb but we haven't had any
* reset worthy failures.
*/
- if (!skb)
+ if (!skb) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_skb_alloc_fail++;
+ u64_stats_update_end(&rx->statss);
return true;
+ }
if (likely(feat & NETIF_F_RXCSUM)) {
/* NIC passes up the partial sum */
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index 08339278c722..00fafc0f8512 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -270,7 +270,7 @@ static void hnae_fini_queue(struct hnae_queue *q)
hnae_fini_ring(&q->rx_ring);
}
-/**
+/*
* ae_chain - define ae chain head
*/
static RAW_NOTIFIER_HEAD(ae_chain);
@@ -438,7 +438,7 @@ EXPORT_SYMBOL(hnae_ae_register);
/**
* hnae_ae_unregister - unregisters a HNAE AE engine
- * @cdev: the device to unregister
+ * @hdev: the device to unregister
*/
void hnae_ae_unregister(struct hnae_ae_dev *hdev)
{
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index b43dec0560a8..b98244f75ab9 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -13,8 +13,6 @@
#include "hns_dsaf_ppe.h"
#include "hns_dsaf_rcb.h"
-#define AE_NAME_PORT_ID_IDX 6
-
static struct hns_mac_cb *hns_get_mac_cb(struct hnae_handle *handle)
{
struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index 9a907947ba19..4a448138b4ec 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -374,11 +374,12 @@ static void hns_mac_param_get(struct mac_params *param,
}
/**
- *hns_mac_queue_config_bc_en - set broadcast rx&tx enable
- *@mac_cb: mac device
- *@queue: queue number
- *@en:enable
- *retuen 0 - success , negative --fail
+ * hns_mac_queue_config_bc_en - set broadcast rx&tx enable
+ * @mac_cb: mac device
+ * @port_num: queue number
+ * @vlan_id: vlan id`
+ * @enable: enable
+ * return 0 - success , negative --fail
*/
static int hns_mac_port_config_bc_en(struct hns_mac_cb *mac_cb,
u32 port_num, u16 vlan_id, bool enable)
@@ -408,11 +409,11 @@ static int hns_mac_port_config_bc_en(struct hns_mac_cb *mac_cb,
}
/**
- *hns_mac_vm_config_bc_en - set broadcast rx&tx enable
- *@mac_cb: mac device
- *@vmid: vm id
- *@en:enable
- *retuen 0 - success , negative --fail
+ * hns_mac_vm_config_bc_en - set broadcast rx&tx enable
+ * @mac_cb: mac device
+ * @vmid: vm id
+ * @enable: enable
+ * return 0 - success , negative --fail
*/
int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable)
{
@@ -542,8 +543,8 @@ void hns_mac_stop(struct hns_mac_cb *mac_cb)
/**
* hns_mac_get_autoneg - get auto autonegotiation
* @mac_cb: mac control block
- * @enable: enable or not
- * retuen 0 - success , negative --fail
+ * @auto_neg: output pointer to autoneg result
+ * return 0 - success , negative --fail
*/
void hns_mac_get_autoneg(struct hns_mac_cb *mac_cb, u32 *auto_neg)
{
@@ -560,7 +561,7 @@ void hns_mac_get_autoneg(struct hns_mac_cb *mac_cb, u32 *auto_neg)
* @mac_cb: mac control block
* @rx_en: rx enable status
* @tx_en: tx enable status
- * retuen 0 - success , negative --fail
+ * return 0 - success , negative --fail
*/
void hns_mac_get_pauseparam(struct hns_mac_cb *mac_cb, u32 *rx_en, u32 *tx_en)
{
@@ -578,7 +579,7 @@ void hns_mac_get_pauseparam(struct hns_mac_cb *mac_cb, u32 *rx_en, u32 *tx_en)
* hns_mac_set_autoneg - set auto autonegotiation
* @mac_cb: mac control block
* @enable: enable or not
- * retuen 0 - success , negative --fail
+ * return 0 - success , negative --fail
*/
int hns_mac_set_autoneg(struct hns_mac_cb *mac_cb, u8 enable)
{
@@ -623,7 +624,7 @@ int hns_mac_set_pauseparam(struct hns_mac_cb *mac_cb, u32 rx_en, u32 tx_en)
/**
* hns_mac_init_ex - mac init
* @mac_cb: mac control block
- * retuen 0 - success , negative --fail
+ * return 0 - success , negative --fail
*/
static int hns_mac_init_ex(struct hns_mac_cb *mac_cb)
{
@@ -800,7 +801,6 @@ static const struct {
/**
*hns_mac_get_info - get mac information from device node
*@mac_cb: mac device
- *@np:device node
* return: 0 --success, negative --fail
*/
static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
@@ -951,7 +951,7 @@ static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
/**
* hns_mac_get_mode - get mac mode
* @phy_if: phy interface
- * retuen 0 - gmac, 1 - xgmac , negative --fail
+ * return 0 - gmac, 1 - xgmac , negative --fail
*/
static int hns_mac_get_mode(phy_interface_t phy_if)
{
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index acfa86e5296f..87d3db4666df 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -207,7 +207,7 @@ static int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
/**
* hns_dsaf_sbm_link_sram_init_en - config dsaf_sbm_init_en
- * @dsaf_id: dsa fabric id
+ * @dsaf_dev: dsa fabric id
*/
static void hns_dsaf_sbm_link_sram_init_en(struct dsaf_device *dsaf_dev)
{
@@ -216,8 +216,8 @@ static void hns_dsaf_sbm_link_sram_init_en(struct dsaf_device *dsaf_dev)
/**
* hns_dsaf_reg_cnt_clr_ce - config hns_dsaf_reg_cnt_clr_ce
- * @dsaf_id: dsa fabric id
- * @hns_dsaf_reg_cnt_clr_ce: config value
+ * @dsaf_dev: dsa fabric id
+ * @reg_cnt_clr_ce: config value
*/
static void
hns_dsaf_reg_cnt_clr_ce(struct dsaf_device *dsaf_dev, u32 reg_cnt_clr_ce)
@@ -228,8 +228,8 @@ hns_dsaf_reg_cnt_clr_ce(struct dsaf_device *dsaf_dev, u32 reg_cnt_clr_ce)
/**
* hns_ppe_qid_cfg - config ppe qid
- * @dsaf_id: dsa fabric id
- * @pppe_qid_cfg: value array
+ * @dsaf_dev: dsa fabric id
+ * @qid_cfg: value array
*/
static void
hns_dsaf_ppe_qid_cfg(struct dsaf_device *dsaf_dev, u32 qid_cfg)
@@ -285,8 +285,8 @@ static void hns_dsaf_inner_qid_cfg(struct dsaf_device *dsaf_dev)
/**
* hns_dsaf_sw_port_type_cfg - cfg sw type
- * @dsaf_id: dsa fabric id
- * @psw_port_type: array
+ * @dsaf_dev: dsa fabric id
+ * @port_type: array
*/
static void hns_dsaf_sw_port_type_cfg(struct dsaf_device *dsaf_dev,
enum dsaf_sw_port_type port_type)
@@ -303,8 +303,8 @@ static void hns_dsaf_sw_port_type_cfg(struct dsaf_device *dsaf_dev,
/**
* hns_dsaf_stp_port_type_cfg - cfg stp type
- * @dsaf_id: dsa fabric id
- * @pstp_port_type: array
+ * @dsaf_dev: dsa fabric id
+ * @port_type: array
*/
static void hns_dsaf_stp_port_type_cfg(struct dsaf_device *dsaf_dev,
enum dsaf_stp_port_type port_type)
@@ -323,7 +323,7 @@ static void hns_dsaf_stp_port_type_cfg(struct dsaf_device *dsaf_dev,
(AE_IS_VER1((dev)->dsaf_ver) ? DSAF_SBM_NUM : DSAFV2_SBM_NUM)
/**
* hns_dsaf_sbm_cfg - config sbm
- * @dsaf_id: dsa fabric id
+ * @dsaf_dev: dsa fabric id
*/
static void hns_dsaf_sbm_cfg(struct dsaf_device *dsaf_dev)
{
@@ -342,7 +342,7 @@ static void hns_dsaf_sbm_cfg(struct dsaf_device *dsaf_dev)
/**
* hns_dsaf_sbm_cfg_mib_en - config sbm
- * @dsaf_id: dsa fabric id
+ * @dsaf_dev: dsa fabric id
*/
static int hns_dsaf_sbm_cfg_mib_en(struct dsaf_device *dsaf_dev)
{
@@ -387,7 +387,7 @@ static int hns_dsaf_sbm_cfg_mib_en(struct dsaf_device *dsaf_dev)
/**
* hns_dsaf_sbm_bp_wl_cfg - config sbm
- * @dsaf_id: dsa fabric id
+ * @dsaf_dev: dsa fabric id
*/
static void hns_dsaf_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev)
{
@@ -556,7 +556,7 @@ static void hns_dsafv2_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev)
/**
* hns_dsaf_voq_bp_all_thrd_cfg - voq
- * @dsaf_id: dsa fabric id
+ * @dsaf_dev: dsa fabric id
*/
static void hns_dsaf_voq_bp_all_thrd_cfg(struct dsaf_device *dsaf_dev)
{
@@ -599,7 +599,7 @@ static void hns_dsaf_tbl_tcam_match_cfg(
/**
* hns_dsaf_tbl_tcam_data_cfg - tbl
- * @dsaf_id: dsa fabric id
+ * @dsaf_dev: dsa fabric id
* @ptbl_tcam_data: addr
*/
static void hns_dsaf_tbl_tcam_data_cfg(
@@ -614,8 +614,8 @@ static void hns_dsaf_tbl_tcam_data_cfg(
/**
* dsaf_tbl_tcam_mcast_cfg - tbl
- * @dsaf_id: dsa fabric id
- * @ptbl_tcam_mcast: addr
+ * @dsaf_dev: dsa fabric id
+ * @mcast: addr
*/
static void hns_dsaf_tbl_tcam_mcast_cfg(
struct dsaf_device *dsaf_dev,
@@ -648,8 +648,8 @@ static void hns_dsaf_tbl_tcam_mcast_cfg(
/**
* hns_dsaf_tbl_tcam_ucast_cfg - tbl
- * @dsaf_id: dsa fabric id
- * @ptbl_tcam_ucast: addr
+ * @dsaf_dev: dsa fabric id
+ * @tbl_tcam_ucast: addr
*/
static void hns_dsaf_tbl_tcam_ucast_cfg(
struct dsaf_device *dsaf_dev,
@@ -674,8 +674,8 @@ static void hns_dsaf_tbl_tcam_ucast_cfg(
/**
* hns_dsaf_tbl_line_cfg - tbl
- * @dsaf_id: dsa fabric id
- * @ptbl_lin: addr
+ * @dsaf_dev: dsa fabric id
+ * @tbl_lin: addr
*/
static void hns_dsaf_tbl_line_cfg(struct dsaf_device *dsaf_dev,
struct dsaf_tbl_line_cfg *tbl_lin)
@@ -695,7 +695,7 @@ static void hns_dsaf_tbl_line_cfg(struct dsaf_device *dsaf_dev,
/**
* hns_dsaf_tbl_tcam_mcast_pul - tbl
- * @dsaf_id: dsa fabric id
+ * @dsaf_dev: dsa fabric id
*/
static void hns_dsaf_tbl_tcam_mcast_pul(struct dsaf_device *dsaf_dev)
{
@@ -710,7 +710,7 @@ static void hns_dsaf_tbl_tcam_mcast_pul(struct dsaf_device *dsaf_dev)
/**
* hns_dsaf_tbl_line_pul - tbl
- * @dsaf_id: dsa fabric id
+ * @dsaf_dev: dsa fabric id
*/
static void hns_dsaf_tbl_line_pul(struct dsaf_device *dsaf_dev)
{
@@ -725,7 +725,7 @@ static void hns_dsaf_tbl_line_pul(struct dsaf_device *dsaf_dev)
/**
* hns_dsaf_tbl_tcam_data_mcast_pul - tbl
- * @dsaf_id: dsa fabric id
+ * @dsaf_dev: dsa fabric id
*/
static void hns_dsaf_tbl_tcam_data_mcast_pul(
struct dsaf_device *dsaf_dev)
@@ -743,7 +743,7 @@ static void hns_dsaf_tbl_tcam_data_mcast_pul(
/**
* hns_dsaf_tbl_tcam_data_ucast_pul - tbl
- * @dsaf_id: dsa fabric id
+ * @dsaf_dev: dsa fabric id
*/
static void hns_dsaf_tbl_tcam_data_ucast_pul(
struct dsaf_device *dsaf_dev)
@@ -768,8 +768,7 @@ void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en)
/**
* hns_dsaf_tbl_stat_en - tbl
- * @dsaf_id: dsa fabric id
- * @ptbl_stat_en: addr
+ * @dsaf_dev: dsa fabric id
*/
static void hns_dsaf_tbl_stat_en(struct dsaf_device *dsaf_dev)
{
@@ -785,7 +784,7 @@ static void hns_dsaf_tbl_stat_en(struct dsaf_device *dsaf_dev)
/**
* hns_dsaf_rocee_bp_en - rocee back press enable
- * @dsaf_id: dsa fabric id
+ * @dsaf_dev: dsa fabric id
*/
static void hns_dsaf_rocee_bp_en(struct dsaf_device *dsaf_dev)
{
@@ -852,9 +851,9 @@ static void hns_dsaf_int_tbl_src_clr(struct dsaf_device *dsaf_dev,
/**
* hns_dsaf_single_line_tbl_cfg - INT
- * @dsaf_id: dsa fabric id
- * @address:
- * @ptbl_line:
+ * @dsaf_dev: dsa fabric id
+ * @address: the address
+ * @ptbl_line: the line
*/
static void hns_dsaf_single_line_tbl_cfg(
struct dsaf_device *dsaf_dev,
@@ -876,9 +875,10 @@ static void hns_dsaf_single_line_tbl_cfg(
/**
* hns_dsaf_tcam_uc_cfg - INT
- * @dsaf_id: dsa fabric id
- * @address,
- * @ptbl_tcam_data,
+ * @dsaf_dev: dsa fabric id
+ * @address: the address
+ * @ptbl_tcam_data: the data
+ * @ptbl_tcam_ucast: unicast
*/
static void hns_dsaf_tcam_uc_cfg(
struct dsaf_device *dsaf_dev, u32 address,
@@ -904,7 +904,8 @@ static void hns_dsaf_tcam_uc_cfg(
* @dsaf_dev: dsa fabric device struct pointer
* @address: tcam index
* @ptbl_tcam_data: tcam data struct pointer
- * @ptbl_tcam_mcast: tcam mask struct pointer, it must be null for HNSv1
+ * @ptbl_tcam_mask: tcam mask struct pointer, it must be null for HNSv1
+ * @ptbl_tcam_mcast: tcam data struct pointer
*/
static void hns_dsaf_tcam_mc_cfg(
struct dsaf_device *dsaf_dev, u32 address,
@@ -933,8 +934,10 @@ static void hns_dsaf_tcam_mc_cfg(
/**
* hns_dsaf_tcam_uc_cfg_vague - INT
* @dsaf_dev: dsa fabric device struct pointer
- * @address,
- * @ptbl_tcam_data,
+ * @address: the address
+ * @tcam_data: the data
+ * @tcam_mask: the mask
+ * @tcam_uc: the unicast data
*/
static void hns_dsaf_tcam_uc_cfg_vague(struct dsaf_device *dsaf_dev,
u32 address,
@@ -960,10 +963,10 @@ static void hns_dsaf_tcam_uc_cfg_vague(struct dsaf_device *dsaf_dev,
/**
* hns_dsaf_tcam_mc_cfg_vague - INT
* @dsaf_dev: dsa fabric device struct pointer
- * @address,
- * @ptbl_tcam_data,
- * @ptbl_tcam_mask
- * @ptbl_tcam_mcast
+ * @address: the address
+ * @tcam_data: the data
+ * @tcam_mask: the mask
+ * @tcam_mc: the multicast data
*/
static void hns_dsaf_tcam_mc_cfg_vague(struct dsaf_device *dsaf_dev,
u32 address,
@@ -988,8 +991,8 @@ static void hns_dsaf_tcam_mc_cfg_vague(struct dsaf_device *dsaf_dev,
/**
* hns_dsaf_tcam_mc_invld - INT
- * @dsaf_id: dsa fabric id
- * @address
+ * @dsaf_dev: dsa fabric id
+ * @address: the address
*/
static void hns_dsaf_tcam_mc_invld(struct dsaf_device *dsaf_dev, u32 address)
{
@@ -1024,10 +1027,10 @@ hns_dsaf_tcam_addr_get(struct dsaf_drv_tbl_tcam_key *mac_key, u8 *addr)
/**
* hns_dsaf_tcam_uc_get - INT
- * @dsaf_id: dsa fabric id
- * @address
- * @ptbl_tcam_data
- * @ptbl_tcam_ucast
+ * @dsaf_dev: dsa fabric id
+ * @address: the address
+ * @ptbl_tcam_data: the data
+ * @ptbl_tcam_ucast: unicast
*/
static void hns_dsaf_tcam_uc_get(
struct dsaf_device *dsaf_dev, u32 address,
@@ -1077,10 +1080,10 @@ static void hns_dsaf_tcam_uc_get(
/**
* hns_dsaf_tcam_mc_get - INT
- * @dsaf_id: dsa fabric id
- * @address
- * @ptbl_tcam_data
- * @ptbl_tcam_ucast
+ * @dsaf_dev: dsa fabric id
+ * @address: the address
+ * @ptbl_tcam_data: the data
+ * @ptbl_tcam_mcast: tcam multicast data
*/
static void hns_dsaf_tcam_mc_get(
struct dsaf_device *dsaf_dev, u32 address,
@@ -1127,7 +1130,7 @@ static void hns_dsaf_tcam_mc_get(
/**
* hns_dsaf_tbl_line_init - INT
- * @dsaf_id: dsa fabric id
+ * @dsaf_dev: dsa fabric id
*/
static void hns_dsaf_tbl_line_init(struct dsaf_device *dsaf_dev)
{
@@ -1141,7 +1144,7 @@ static void hns_dsaf_tbl_line_init(struct dsaf_device *dsaf_dev)
/**
* hns_dsaf_tbl_tcam_init - INT
- * @dsaf_id: dsa fabric id
+ * @dsaf_dev: dsa fabric id
*/
static void hns_dsaf_tbl_tcam_init(struct dsaf_device *dsaf_dev)
{
@@ -1156,7 +1159,9 @@ static void hns_dsaf_tbl_tcam_init(struct dsaf_device *dsaf_dev)
/**
* hns_dsaf_pfc_en_cfg - dsaf pfc pause cfg
- * @mac_cb: mac contrl block
+ * @dsaf_dev: dsa fabric id
+ * @mac_id: mac contrl block
+ * @tc_en: traffic class
*/
static void hns_dsaf_pfc_en_cfg(struct dsaf_device *dsaf_dev,
int mac_id, int tc_en)
@@ -1209,8 +1214,7 @@ void hns_dsaf_get_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
/**
* hns_dsaf_tbl_tcam_init - INT
- * @dsaf_id: dsa fabric id
- * @dsaf_mode
+ * @dsaf_dev: dsa fabric id
*/
static void hns_dsaf_comm_init(struct dsaf_device *dsaf_dev)
{
@@ -1263,7 +1267,7 @@ static void hns_dsaf_comm_init(struct dsaf_device *dsaf_dev)
/**
* hns_dsaf_inode_init - INT
- * @dsaf_id: dsa fabric id
+ * @dsaf_dev: dsa fabric id
*/
static void hns_dsaf_inode_init(struct dsaf_device *dsaf_dev)
{
@@ -1315,7 +1319,7 @@ static void hns_dsaf_inode_init(struct dsaf_device *dsaf_dev)
/**
* hns_dsaf_sbm_init - INT
- * @dsaf_id: dsa fabric id
+ * @dsaf_dev: dsa fabric id
*/
static int hns_dsaf_sbm_init(struct dsaf_device *dsaf_dev)
{
@@ -1369,7 +1373,7 @@ static int hns_dsaf_sbm_init(struct dsaf_device *dsaf_dev)
/**
* hns_dsaf_tbl_init - INT
- * @dsaf_id: dsa fabric id
+ * @dsaf_dev: dsa fabric id
*/
static void hns_dsaf_tbl_init(struct dsaf_device *dsaf_dev)
{
@@ -1381,7 +1385,7 @@ static void hns_dsaf_tbl_init(struct dsaf_device *dsaf_dev)
/**
* hns_dsaf_voq_init - INT
- * @dsaf_id: dsa fabric id
+ * @dsaf_dev: dsa fabric id
*/
static void hns_dsaf_voq_init(struct dsaf_device *dsaf_dev)
{
@@ -1435,7 +1439,7 @@ static void hns_dsaf_remove_hw(struct dsaf_device *dsaf_dev)
/**
* hns_dsaf_init - init dsa fabric
* @dsaf_dev: dsa fabric device struct pointer
- * retuen 0 - success , negative --fail
+ * return 0 - success , negative --fail
*/
static int hns_dsaf_init(struct dsaf_device *dsaf_dev)
{
@@ -2099,7 +2103,7 @@ static struct dsaf_device *hns_dsaf_alloc_dev(struct device *dev,
/**
* hns_dsaf_free_dev - free dev mem
- * @dev: struct device pointer
+ * @dsaf_dev: struct device pointer
*/
static void hns_dsaf_free_dev(struct dsaf_device *dsaf_dev)
{
@@ -2108,9 +2112,9 @@ static void hns_dsaf_free_dev(struct dsaf_device *dsaf_dev)
/**
* dsaf_pfc_unit_cnt - set pfc unit count
- * @dsaf_id: dsa fabric id
- * @pport_rate: value array
- * @pdsaf_pfc_unit_cnt: value array
+ * @dsaf_dev: dsa fabric id
+ * @mac_id: id in use
+ * @rate: value array
*/
static void hns_dsaf_pfc_unit_cnt(struct dsaf_device *dsaf_dev, int mac_id,
enum dsaf_port_rate_mode rate)
@@ -2139,8 +2143,9 @@ static void hns_dsaf_pfc_unit_cnt(struct dsaf_device *dsaf_dev, int mac_id,
/**
* dsaf_port_work_rate_cfg - fifo
- * @dsaf_id: dsa fabric id
- * @xge_ge_work_mode
+ * @dsaf_dev: dsa fabric id
+ * @mac_id: mac contrl block
+ * @rate_mode: value array
*/
static void
hns_dsaf_port_work_rate_cfg(struct dsaf_device *dsaf_dev, int mac_id,
@@ -2253,7 +2258,8 @@ void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num)
/**
*hns_dsaf_get_regs - dump dsaf regs
- *@dsaf_dev: dsaf device
+ *@ddev: dsaf device
+ *@port: port
*@data:data for value of regs
*/
void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
@@ -2690,6 +2696,7 @@ void hns_dsaf_get_stats(struct dsaf_device *ddev, u64 *data, int port)
/**
*hns_dsaf_get_sset_count - get dsaf string set count
+ *@dsaf_dev: dsaf device
*@stringset: type of values in data
*return dsaf string name count
*/
@@ -2711,6 +2718,7 @@ int hns_dsaf_get_sset_count(struct dsaf_device *dsaf_dev, int stringset)
*@stringset:srting set index
*@data:strings name value
*@port:port index
+ *@dsaf_dev: dsaf device
*/
void hns_dsaf_get_strings(int stringset, u8 *data, int port,
struct dsaf_device *dsaf_dev)
@@ -2943,7 +2951,7 @@ int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port)
/**
* dsaf_probe - probo dsaf dev
* @pdev: dasf platform device
- * retuen 0 - success , negative --fail
+ * return 0 - success , negative --fail
*/
static int hns_dsaf_probe(struct platform_device *pdev)
{
@@ -3038,8 +3046,8 @@ module_platform_driver(g_dsaf_driver);
/**
* hns_dsaf_roce_reset - reset dsaf and roce
* @dsaf_fwnode: Pointer to framework node for the dasf
- * @enable: false - request reset , true - drop reset
- * retuen 0 - success , negative -fail
+ * @dereset: false - request reset , true - drop reset
+ * return 0 - success , negative -fail
*/
int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
{
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index a769273b36f7..a9aca8c24e90 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -330,11 +330,12 @@ static void hns_dsaf_xge_srst_by_port_acpi(struct dsaf_device *dsaf_dev,
* hns_dsaf_srst_chns - reset dsaf channels
* @dsaf_dev: dsaf device struct pointer
* @msk: xbar channels mask value:
+ * @dereset: false - request reset , true - drop reset
+ *
* bit0-5 for xge0-5
* bit6-11 for ppe0-5
* bit12-17 for roce0-5
* bit18-19 for com/dfx
- * @dereset: false - request reset , true - drop reset
*/
static void
hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
@@ -353,11 +354,12 @@ hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
* hns_dsaf_srst_chns - reset dsaf channels
* @dsaf_dev: dsaf device struct pointer
* @msk: xbar channels mask value:
+ * @dereset: false - request reset , true - drop reset
+ *
* bit0-5 for xge0-5
* bit6-11 for ppe0-5
* bit12-17 for roce0-5
* bit18-19 for com/dfx
- * @dereset: false - request reset , true - drop reset
*/
static void
hns_dsaf_srst_chns_acpi(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
@@ -612,7 +614,8 @@ static int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
/**
* hns_mac_config_sds_loopback - set loop back for serdes
* @mac_cb: mac control block
- * retuen 0 == success
+ * @en: enable or disable
+ * return 0 == success
*/
static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
{
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index 2b34b553acf3..d0f8b1fff333 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -66,8 +66,8 @@ hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common)
/**
* hns_ppe_common_get_cfg - get ppe common config
* @dsaf_dev: dasf device
- * comm_index: common index
- * retuen 0 - success , negative --fail
+ * @comm_index: common index
+ * return 0 - success , negative --fail
*/
static int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index)
{
@@ -143,7 +143,7 @@ static void hns_ppe_set_vlan_strip(struct hns_ppe_cb *ppe_cb, int en)
/**
* hns_ppe_checksum_hw - set ppe checksum caculate
- * @ppe_device: ppe device
+ * @ppe_cb: ppe device
* @value: value
*/
static void hns_ppe_checksum_hw(struct hns_ppe_cb *ppe_cb, u32 value)
@@ -179,7 +179,7 @@ static void hns_ppe_set_qid(struct ppe_common_cb *ppe_common, u32 qid)
/**
* hns_ppe_set_port_mode - set port mode
- * @ppe_device: ppe device
+ * @ppe_cb: ppe device
* @mode: port mode
*/
static void hns_ppe_set_port_mode(struct hns_ppe_cb *ppe_cb,
@@ -344,7 +344,7 @@ static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb)
/**
* ppe_uninit_hw - uninit ppe
- * @ppe_device: ppe device
+ * @ppe_cb: ppe device
*/
static void hns_ppe_uninit_hw(struct hns_ppe_cb *ppe_cb)
{
@@ -384,7 +384,8 @@ void hns_ppe_uninit(struct dsaf_device *dsaf_dev)
/**
* hns_ppe_reset - reinit ppe/rcb hw
* @dsaf_dev: dasf device
- * retuen void
+ * @ppe_common_index: the index
+ * return void
*/
void hns_ppe_reset_common(struct dsaf_device *dsaf_dev, u8 ppe_common_index)
{
@@ -455,7 +456,7 @@ int hns_ppe_get_regs_count(void)
/**
* ppe_get_strings - get ppe srting
- * @ppe_device: ppe device
+ * @ppe_cb: ppe device
* @stringset: string set type
* @data: output string
*/
@@ -513,7 +514,7 @@ void hns_ppe_get_stats(struct hns_ppe_cb *ppe_cb, u64 *data)
/**
* hns_ppe_init - init ppe device
* @dsaf_dev: dasf device
- * retuen 0 - success , negative --fail
+ * return 0 - success , negative --fail
*/
int hns_ppe_init(struct dsaf_device *dsaf_dev)
{
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 5453597ec629..b6c8910cf7ba 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -34,7 +34,7 @@
/**
*hns_rcb_wait_fbd_clean - clean fbd
*@qs: ring struct pointer array
- *@qnum: num of array
+ *@q_num: num of array
*@flag: tx or rx flag
*/
void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag)
@@ -191,7 +191,8 @@ void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag)
/**
*hns_rcb_ring_enable_hw - enable ring
- *@ring: rcb ring
+ *@q: rcb ring
+ *@val: value to write
*/
void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val)
{
@@ -844,7 +845,7 @@ void hns_rcb_update_stats(struct hnae_queue *queue)
/**
*hns_rcb_get_stats - get rcb statistic
- *@ring: rcb ring
+ *@queue: rcb ring
*@data:statistic value
*/
void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index 0a3dbab2dfc9..7e3609ce112a 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -130,7 +130,7 @@ static void hns_xgmac_lf_rf_control_init(struct mac_driver *mac_drv)
/**
*hns_xgmac_enable - enable xgmac port
- *@drv: mac driver
+ *@mac_drv: mac driver
*@mode: mode of mac port
*/
static void hns_xgmac_enable(void *mac_drv, enum mac_commom_mode mode)
@@ -242,7 +242,8 @@ static void hns_xgmac_config_pad_and_crc(void *mac_drv, u8 newval)
/**
*hns_xgmac_pausefrm_cfg - set pause param about xgmac
*@mac_drv: mac driver
- *@newval:enable of pad and crc
+ *@rx_en: enable receive
+ *@tx_en: enable transmit
*/
static void hns_xgmac_pausefrm_cfg(void *mac_drv, u32 rx_en, u32 tx_en)
{
@@ -490,7 +491,6 @@ static void hns_xgmac_get_link_status(void *mac_drv, u32 *link_stat)
/**
*hns_xgmac_get_regs - dump xgmac regs
*@mac_drv: mac driver
- *@cmd:ethtool cmd
*@data:data for value of regs
*/
static void hns_xgmac_get_regs(void *mac_drv, void *data)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 22522f8a5299..858cb293152a 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -11,6 +11,7 @@
#include <linux/io.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <linux/irq.h>
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/platform_device.h>
@@ -557,10 +558,7 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
/* prefetch first cache line of first page */
- prefetch(va);
-#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
-#endif
+ net_prefetch(va);
skb = *out_skb = napi_alloc_skb(&ring_data->napi,
HNS_RX_HEAD_SIZE);
@@ -754,6 +752,8 @@ static void hns_update_rx_rate(struct hnae_ring *ring)
/**
* smooth_alg - smoothing algrithm for adjusting coalesce parameter
+ * @new_param: new value
+ * @old_param: old value
**/
static u32 smooth_alg(u32 new_param, u32 old_param)
{
@@ -1293,6 +1293,7 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv)
rd->ring->ring_name[RCB_RING_NAME_LEN - 1] = '\0';
+ irq_set_status_flags(rd->ring->irq, IRQ_NOAUTOEN);
ret = request_irq(rd->ring->irq,
hns_irq_handle, 0, rd->ring->ring_name, rd);
if (ret) {
@@ -1300,7 +1301,6 @@ static int hns_nic_init_irq(struct hns_nic_priv *priv)
rd->ring->irq);
goto out_free_irq;
}
- disable_irq(rd->ring->irq);
cpu = hns_nic_init_affinity_mask(h->q_num, i,
rd->ring, &rd->mask);
@@ -1831,9 +1831,8 @@ static int hns_nic_uc_unsync(struct net_device *netdev,
}
/**
- * nic_set_multicast_list - set mutl mac address
- * @netdev: net device
- * @p: mac address
+ * hns_set_multicast_list - set mutl mac address
+ * @ndev: net device
*
* return void
*/
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 14e60c9e491d..7165da0ee9aa 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -462,7 +462,7 @@ static int __lb_clean_rings(struct hns_nic_priv *priv,
}
/**
- * nic_run_loopback_test - run loopback test
+ * __lb_run_test - run loopback test
* @ndev: net device
* @loop_mode: loopback mode
*/
@@ -971,7 +971,7 @@ static void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
}
/**
- * nic_get_sset_count - get string set count witch returned by nic_get_strings.
+ * hns_get_sset_count - get string set count returned by nic_get_strings
* @netdev: net device
* @stringset: string set index, 0: self test string; 1: statistics string.
*
@@ -1027,7 +1027,7 @@ static int hns_phy_led_set(struct net_device *netdev, int value)
}
/**
- * nic_set_phys_id - set phy identify LED.
+ * hns_set_phys_id - set phy identify LED.
* @netdev: net device
* @state: LED state.
*
@@ -1125,7 +1125,7 @@ static void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd,
}
/**
- * nic_get_regs_len - get total register len.
+ * hns_get_regs_len - get total register len.
* @net_dev: net device
*
* Return total register len.
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 088550db2de7..912c51e327d6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -34,6 +34,13 @@
#define HNAE3_MIN_VECTOR_NUM 2 /* first one for misc, another for IO */
+/* Device version */
+#define HNAE3_DEVICE_VERSION_V1 0x00020
+#define HNAE3_DEVICE_VERSION_V2 0x00021
+#define HNAE3_DEVICE_VERSION_V3 0x00030
+
+#define HNAE3_PCI_REVISION_BIT_SIZE 8
+
/* Device IDs */
#define HNAE3_DEV_ID_GE 0xA220
#define HNAE3_DEV_ID_25GE 0xA221
@@ -42,8 +49,9 @@
#define HNAE3_DEV_ID_50GE_RDMA 0xA224
#define HNAE3_DEV_ID_50GE_RDMA_MACSEC 0xA225
#define HNAE3_DEV_ID_100G_RDMA_MACSEC 0xA226
-#define HNAE3_DEV_ID_100G_VF 0xA22E
-#define HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF 0xA22F
+#define HNAE3_DEV_ID_200G_RDMA 0xA228
+#define HNAE3_DEV_ID_VF 0xA22E
+#define HNAE3_DEV_ID_RDMA_DCB_PFC_VF 0xA22F
#define HNAE3_CLASS_NAME_SIZE 16
@@ -53,8 +61,6 @@
#define HNAE3_KNIC_CLIENT_INITED_B 0x3
#define HNAE3_UNIC_CLIENT_INITED_B 0x4
#define HNAE3_ROCE_CLIENT_INITED_B 0x5
-#define HNAE3_DEV_SUPPORT_FD_B 0x6
-#define HNAE3_DEV_SUPPORT_GRO_B 0x7
#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\
BIT(HNAE3_DEV_SUPPORT_ROCE_B))
@@ -65,11 +71,67 @@
#define hnae3_dev_dcb_supported(hdev) \
hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
+enum HNAE3_DEV_CAP_BITS {
+ HNAE3_DEV_SUPPORT_FD_B,
+ HNAE3_DEV_SUPPORT_GRO_B,
+ HNAE3_DEV_SUPPORT_FEC_B,
+ HNAE3_DEV_SUPPORT_UDP_GSO_B,
+ HNAE3_DEV_SUPPORT_QB_B,
+ HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B,
+ HNAE3_DEV_SUPPORT_PTP_B,
+ HNAE3_DEV_SUPPORT_INT_QL_B,
+ HNAE3_DEV_SUPPORT_SIMPLE_BD_B,
+ HNAE3_DEV_SUPPORT_TX_PUSH_B,
+ HNAE3_DEV_SUPPORT_PHY_IMP_B,
+ HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B,
+ HNAE3_DEV_SUPPORT_HW_PAD_B,
+ HNAE3_DEV_SUPPORT_STASH_B,
+};
+
#define hnae3_dev_fd_supported(hdev) \
- hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B)
+ test_bit(HNAE3_DEV_SUPPORT_FD_B, (hdev)->ae_dev->caps)
#define hnae3_dev_gro_supported(hdev) \
- hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B)
+ test_bit(HNAE3_DEV_SUPPORT_GRO_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_fec_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_FEC_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_udp_gso_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_qb_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_QB_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_fd_forward_tc_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_ptp_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_PTP_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_int_ql_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_INT_QL_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_simple_bd_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_SIMPLE_BD_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_tx_push_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_phy_imp_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_tqp_txrx_indep_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_hw_pad_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_HW_PAD_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_stash_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_STASH_B, (hdev)->ae_dev->caps)
+
+#define hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, (ae_dev)->caps)
#define ring_ptr_move_fw(ring, p) \
((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
@@ -152,6 +214,7 @@ enum hnae3_hw_error_type {
HNAE3_PPU_POISON_ERROR,
HNAE3_CMDQ_ECC_ERROR,
HNAE3_IMP_RD_POISON_ERROR,
+ HNAE3_ROCEE_AXI_RESP_ERROR,
};
enum hnae3_reset_type {
@@ -207,6 +270,17 @@ struct hnae3_ring_chain_node {
#define HNAE3_IS_TX_RING(node) \
(((node)->flag & (1 << HNAE3_RING_TYPE_B)) == HNAE3_RING_TYPE_TX)
+/* device specification info from firmware */
+struct hnae3_dev_specs {
+ u32 mac_entry_num; /* number of mac-vlan table entry */
+ u32 mng_entry_num; /* number of manager table entry */
+ u32 max_tm_rate;
+ u16 rss_ind_tbl_size;
+ u16 rss_key_size;
+ u16 int_ql_max; /* max value of interrupt coalesce based on INT_QL */
+ u8 max_non_tso_bd_num; /* max BD number of one non-TSO packet */
+};
+
struct hnae3_client_ops {
int (*init_instance)(struct hnae3_handle *handle);
void (*uninit_instance)(struct hnae3_handle *handle, bool reset);
@@ -227,12 +301,16 @@ struct hnae3_client {
struct list_head node;
};
+#define HNAE3_DEV_CAPS_MAX_NUM 96
struct hnae3_ae_dev {
struct pci_dev *pdev;
const struct hnae3_ae_ops *ops;
struct list_head node;
u32 flag;
unsigned long hw_err_reset_req;
+ struct hnae3_dev_specs dev_specs;
+ u32 dev_version;
+ unsigned long caps[BITS_TO_LONGS(HNAE3_DEV_CAPS_MAX_NUM)];
void *priv;
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index fe7fb565da19..dc9a85745e62 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -15,11 +15,12 @@ static struct dentry *hns3_dbgfs_root;
static int hns3_dbg_queue_info(struct hnae3_handle *h,
const char *cmd_buf)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
struct hns3_nic_priv *priv = h->priv;
struct hns3_enet_ring *ring;
u32 base_add_l, base_add_h;
u32 queue_num, queue_max;
- u32 value, i = 0;
+ u32 value, i;
int cnt;
if (!priv->ring) {
@@ -118,8 +119,25 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h,
value = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_PKTNUM_RECORD_REG);
- dev_info(&h->pdev->dev, "TX(%u) RING PKTNUM: %u\n\n", i,
- value);
+ dev_info(&h->pdev->dev, "TX(%u) RING PKTNUM: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base + HNS3_RING_EN_REG);
+ dev_info(&h->pdev->dev, "TX/RX(%u) RING EN: %s\n", i,
+ value ? "enable" : "disable");
+
+ if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev)) {
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_EN_REG);
+ dev_info(&h->pdev->dev, "TX(%u) RING EN: %s\n", i,
+ value ? "enable" : "disable");
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_EN_REG);
+ dev_info(&h->pdev->dev, "RX(%u) RING EN: %s\n", i,
+ value ? "enable" : "disable");
+ }
+
+ dev_info(&h->pdev->dev, "\n");
}
return 0;
@@ -244,6 +262,8 @@ static void hns3_dbg_help(struct hnae3_handle *h)
dev_info(&h->pdev->dev, "queue info <number>\n");
dev_info(&h->pdev->dev, "queue map\n");
dev_info(&h->pdev->dev, "bd info <q_num> <bd index>\n");
+ dev_info(&h->pdev->dev, "dev capability\n");
+ dev_info(&h->pdev->dev, "dev spec\n");
if (!hns3_is_phys_func(h->pdev))
return;
@@ -264,6 +284,7 @@ static void hns3_dbg_help(struct hnae3_handle *h)
dev_info(&h->pdev->dev, "dump qs shaper [qs id]\n");
dev_info(&h->pdev->dev, "dump uc mac list <func id>\n");
dev_info(&h->pdev->dev, "dump mc mac list <func id>\n");
+ dev_info(&h->pdev->dev, "dump intr\n");
memset(printf_buf, 0, HNS3_DBG_BUF_LEN);
strncat(printf_buf, "dump reg [[bios common] [ssu <port_id>]",
@@ -284,6 +305,52 @@ static void hns3_dbg_help(struct hnae3_handle *h)
dev_info(&h->pdev->dev, "%s", printf_buf);
}
+static void hns3_dbg_dev_caps(struct hnae3_handle *h)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ unsigned long *caps;
+
+ caps = ae_dev->caps;
+
+ dev_info(&h->pdev->dev, "support FD: %s\n",
+ test_bit(HNAE3_DEV_SUPPORT_FD_B, caps) ? "yes" : "no");
+ dev_info(&h->pdev->dev, "support GRO: %s\n",
+ test_bit(HNAE3_DEV_SUPPORT_GRO_B, caps) ? "yes" : "no");
+ dev_info(&h->pdev->dev, "support FEC: %s\n",
+ test_bit(HNAE3_DEV_SUPPORT_FEC_B, caps) ? "yes" : "no");
+ dev_info(&h->pdev->dev, "support UDP GSO: %s\n",
+ test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, caps) ? "yes" : "no");
+ dev_info(&h->pdev->dev, "support PTP: %s\n",
+ test_bit(HNAE3_DEV_SUPPORT_PTP_B, caps) ? "yes" : "no");
+ dev_info(&h->pdev->dev, "support INT QL: %s\n",
+ test_bit(HNAE3_DEV_SUPPORT_INT_QL_B, caps) ? "yes" : "no");
+}
+
+static void hns3_dbg_dev_specs(struct hnae3_handle *h)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_dev_specs *dev_specs = &ae_dev->dev_specs;
+ struct hnae3_knic_private_info *kinfo = &h->kinfo;
+ struct hns3_nic_priv *priv = h->priv;
+
+ dev_info(priv->dev, "MAC entry num: %u\n", dev_specs->mac_entry_num);
+ dev_info(priv->dev, "MNG entry num: %u\n", dev_specs->mng_entry_num);
+ dev_info(priv->dev, "MAX non tso bd num: %u\n",
+ dev_specs->max_non_tso_bd_num);
+ dev_info(priv->dev, "RSS ind tbl size: %u\n",
+ dev_specs->rss_ind_tbl_size);
+ dev_info(priv->dev, "RSS key size: %u\n", dev_specs->rss_key_size);
+ dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size);
+ dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size);
+ dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps);
+
+ dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len);
+ dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc);
+ dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc);
+ dev_info(priv->dev, "Total number of enabled TCs: %u\n", kinfo->num_tc);
+ dev_info(priv->dev, "MAX INT QL: %u\n", dev_specs->int_ql_max);
+}
+
static ssize_t hns3_dbg_cmd_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
@@ -359,6 +426,10 @@ static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
ret = hns3_dbg_queue_map(handle);
else if (strncmp(cmd_buf, "bd info", 7) == 0)
ret = hns3_dbg_bd_info(handle, cmd_buf);
+ else if (strncmp(cmd_buf, "dev capability", 14) == 0)
+ hns3_dbg_dev_caps(handle);
+ else if (strncmp(cmd_buf, "dev spec", 8) == 0)
+ hns3_dbg_dev_specs(handle);
else if (handle->ae_algo->ops->dbg_run_cmd)
ret = handle->ae_algo->ops->dbg_run_cmd(handle, cmd_buf);
else
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index a4f1d515e5e0..a362516a3185 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -81,8 +81,10 @@ static const struct pci_device_id hns3_pci_tbl[] = {
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF),
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
/* required last entry */
{0, }
@@ -623,27 +625,15 @@ void hns3_request_update_promisc_mode(struct hnae3_handle *handle)
ops->request_update_promisc_mode(handle);
}
-int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags)
-{
- struct hns3_nic_priv *priv = netdev_priv(netdev);
- struct hnae3_handle *h = priv->ae_handle;
-
- if (h->ae_algo->ops->set_promisc_mode) {
- return h->ae_algo->ops->set_promisc_mode(h,
- promisc_flags & HNAE3_UPE,
- promisc_flags & HNAE3_MPE);
- }
-
- return 0;
-}
-
void hns3_enable_vlan_filter(struct net_device *netdev, bool enable)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
bool last_state;
- if (h->pdev->revision >= 0x21 && h->ae_algo->ops->enable_vlan_filter) {
+ if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 &&
+ h->ae_algo->ops->enable_vlan_filter) {
last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false;
if (enable != last_state) {
netdev_info(netdev,
@@ -706,12 +696,19 @@ static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
/* normal or tunnel packet */
l4_offset = l4.hdr - skb->data;
- hdr_len = (l4.tcp->doff << 2) + l4_offset;
/* remove payload length from inner pseudo checksum when tso */
l4_paylen = skb->len - l4_offset;
- csum_replace_by_diff(&l4.tcp->check,
- (__force __wsum)htonl(l4_paylen));
+
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ hdr_len = sizeof(*l4.udp) + l4_offset;
+ csum_replace_by_diff(&l4.udp->check,
+ (__force __wsum)htonl(l4_paylen));
+ } else {
+ hdr_len = (l4.tcp->doff << 2) + l4_offset;
+ csum_replace_by_diff(&l4.tcp->check,
+ (__force __wsum)htonl(l4_paylen));
+ }
/* find the txbd field values */
*paylen = skb->len - hdr_len;
@@ -1194,21 +1191,23 @@ static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size,
return bd_num;
}
-static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size)
+static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size,
+ u8 max_non_tso_bd_num)
{
struct sk_buff *frag_skb;
unsigned int bd_num = 0;
/* If the total len is within the max bd limit */
if (likely(skb->len <= HNS3_MAX_BD_SIZE && !skb_has_frag_list(skb) &&
- skb_shinfo(skb)->nr_frags < HNS3_MAX_NON_TSO_BD_NUM))
+ skb_shinfo(skb)->nr_frags < max_non_tso_bd_num))
return skb_shinfo(skb)->nr_frags + 1U;
/* The below case will always be linearized, return
* HNS3_MAX_BD_NUM_TSO + 1U to make sure it is linearized.
*/
if (unlikely(skb->len > HNS3_MAX_TSO_SIZE ||
- (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)))
+ (!skb_is_gso(skb) && skb->len >
+ HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num))))
return HNS3_MAX_TSO_BD_NUM + 1U;
bd_num = hns3_skb_bd_num(skb, bd_size, bd_num);
@@ -1233,31 +1232,34 @@ static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
}
-/* HW need every continuous 8 buffer data to be larger than MSS,
- * we simplify it by ensuring skb_headlen + the first continuous
- * 7 frags to to be larger than gso header len + mss, and the remaining
- * continuous 7 frags to be larger than MSS except the last 7 frags.
+/* HW need every continuous max_non_tso_bd_num buffer data to be larger
+ * than MSS, we simplify it by ensuring skb_headlen + the first continuous
+ * max_non_tso_bd_num - 1 frags to be larger than gso header len + mss,
+ * and the remaining continuous max_non_tso_bd_num - 1 frags to be larger
+ * than MSS except the last max_non_tso_bd_num - 1 frags.
*/
static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
- unsigned int bd_num)
+ unsigned int bd_num, u8 max_non_tso_bd_num)
{
unsigned int tot_len = 0;
int i;
- for (i = 0; i < HNS3_MAX_NON_TSO_BD_NUM - 1U; i++)
+ for (i = 0; i < max_non_tso_bd_num - 1U; i++)
tot_len += bd_size[i];
- /* ensure the first 8 frags is greater than mss + header */
- if (tot_len + bd_size[HNS3_MAX_NON_TSO_BD_NUM - 1U] <
+ /* ensure the first max_non_tso_bd_num frags is greater than
+ * mss + header
+ */
+ if (tot_len + bd_size[max_non_tso_bd_num - 1U] <
skb_shinfo(skb)->gso_size + hns3_gso_hdr_len(skb))
return true;
- /* ensure every continuous 7 buffer is greater than mss
- * except the last one.
+ /* ensure every continuous max_non_tso_bd_num - 1 buffer is greater
+ * than mss except the last one.
*/
- for (i = 0; i < bd_num - HNS3_MAX_NON_TSO_BD_NUM; i++) {
+ for (i = 0; i < bd_num - max_non_tso_bd_num; i++) {
tot_len -= bd_size[i];
- tot_len += bd_size[i + HNS3_MAX_NON_TSO_BD_NUM - 1U];
+ tot_len += bd_size[i + max_non_tso_bd_num - 1U];
if (tot_len < skb_shinfo(skb)->gso_size)
return true;
@@ -1268,7 +1270,7 @@ static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
{
- int i = 0;
+ int i;
for (i = 0; i < MAX_SKB_FRAGS; i++)
size[i] = skb_frag_size(&shinfo->frags[i]);
@@ -1279,14 +1281,16 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
struct sk_buff *skb)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ u8 max_non_tso_bd_num = priv->max_non_tso_bd_num;
unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U];
unsigned int bd_num;
- bd_num = hns3_tx_bd_num(skb, bd_size);
- if (unlikely(bd_num > HNS3_MAX_NON_TSO_BD_NUM)) {
+ bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num);
+ if (unlikely(bd_num > max_non_tso_bd_num)) {
if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) &&
- !hns3_skb_need_linearized(skb, bd_size, bd_num)) {
- trace_hns3_over_8bd(skb);
+ !hns3_skb_need_linearized(skb, bd_size, bd_num,
+ max_non_tso_bd_num)) {
+ trace_hns3_over_max_bd(skb);
goto out;
}
@@ -1296,8 +1300,8 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
bd_num = hns3_tx_bd_count(skb->len);
if ((skb_is_gso(skb) && bd_num > HNS3_MAX_TSO_BD_NUM) ||
(!skb_is_gso(skb) &&
- bd_num > HNS3_MAX_NON_TSO_BD_NUM)) {
- trace_hns3_over_8bd(skb);
+ bd_num > max_non_tso_bd_num)) {
+ trace_hns3_over_max_bd(skb);
return -ENOMEM;
}
@@ -1397,6 +1401,27 @@ static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
return bd_num;
}
+static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
+ bool doorbell)
+{
+ ring->pending_buf += num;
+
+ if (!doorbell) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_more++;
+ u64_stats_update_end(&ring->syncp);
+ return;
+ }
+
+ if (!ring->pending_buf)
+ return;
+
+ writel(ring->pending_buf,
+ ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
+ ring->pending_buf = 0;
+ WRITE_ONCE(ring->last_to_use, ring->next_to_use);
+}
+
netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
@@ -1405,11 +1430,14 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
int pre_ntu, next_to_use_head;
struct sk_buff *frag_skb;
int bd_num = 0;
+ bool doorbell;
int ret;
/* Hardware can only handle short frames above 32 bytes */
- if (skb_put_padto(skb, HNS3_MIN_TX_LEN))
+ if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) {
+ hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
return NETDEV_TX_OK;
+ }
/* Prefetch the data used later */
prefetch(skb->data);
@@ -1420,6 +1448,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_busy++;
u64_stats_update_end(&ring->syncp);
+ hns3_tx_doorbell(ring, 0, true);
return NETDEV_TX_BUSY;
} else if (ret == -ENOMEM) {
u64_stats_update_begin(&ring->syncp);
@@ -1460,11 +1489,9 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Complete translate all packets */
dev_queue = netdev_get_tx_queue(netdev, ring->queue_index);
- netdev_tx_sent_queue(dev_queue, skb->len);
-
- wmb(); /* Commit all data before submit */
-
- hnae3_queue_xmit(ring->tqp, bd_num);
+ doorbell = __netdev_tx_sent_queue(dev_queue, skb->len,
+ netdev_xmit_more());
+ hns3_tx_doorbell(ring, bd_num, doorbell);
return NETDEV_TX_OK;
@@ -1473,6 +1500,7 @@ fill_err:
out_err_tx_ok:
dev_kfree_skb_any(skb);
+ hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
return NETDEV_TX_OK;
}
@@ -1853,13 +1881,13 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
tx_ring->next_to_clean, napi->state);
netdev_info(ndev,
- "tx_pkts: %llu, tx_bytes: %llu, io_err_cnt: %llu, sw_err_cnt: %llu\n",
+ "tx_pkts: %llu, tx_bytes: %llu, sw_err_cnt: %llu, tx_pending: %d\n",
tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes,
- tx_ring->stats.io_err_cnt, tx_ring->stats.sw_err_cnt);
+ tx_ring->stats.sw_err_cnt, tx_ring->pending_buf);
netdev_info(ndev,
- "seg_pkt_cnt: %llu, tx_err_cnt: %llu, restart_queue: %llu, tx_busy: %llu\n",
- tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_err_cnt,
+ "seg_pkt_cnt: %llu, tx_more: %llu, restart_queue: %llu, tx_busy: %llu\n",
+ tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more,
tx_ring->stats.restart_queue, tx_ring->stats.tx_busy);
/* When mac received many pause frames continuous, it's unable to send
@@ -2034,9 +2062,10 @@ bool hns3_is_phys_func(struct pci_dev *pdev)
case HNAE3_DEV_ID_50GE_RDMA:
case HNAE3_DEV_ID_50GE_RDMA_MACSEC:
case HNAE3_DEV_ID_100G_RDMA_MACSEC:
+ case HNAE3_DEV_ID_200G_RDMA:
return true;
- case HNAE3_DEV_ID_100G_VF:
- case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF:
+ case HNAE3_DEV_ID_VF:
+ case HNAE3_DEV_ID_RDMA_DCB_PFC_VF:
return false;
default:
dev_warn(&pdev->dev, "un-recognized pci device-id %u",
@@ -2061,15 +2090,6 @@ static void hns3_disable_sriov(struct pci_dev *pdev)
pci_disable_sriov(pdev);
}
-static void hns3_get_dev_capability(struct pci_dev *pdev,
- struct hnae3_ae_dev *ae_dev)
-{
- if (pdev->revision >= 0x21) {
- hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1);
- hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B, 1);
- }
-}
-
/* hns3_probe - Device initialization routine
* @pdev: PCI device information struct
* @ent: entry in hns3_pci_tbl
@@ -2091,7 +2111,6 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ae_dev->pdev = pdev;
ae_dev->flag = ent->driver_data;
- hns3_get_dev_capability(pdev, ae_dev);
pci_set_drvdata(pdev, ae_dev);
ret = hnae3_register_ae_dev(ae_dev);
@@ -2252,6 +2271,7 @@ static void hns3_set_default_feature(struct net_device *netdev)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
struct pci_dev *pdev = h->pdev;
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
netdev->priv_flags |= IFF_UNICAST_FLT;
@@ -2289,7 +2309,7 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC |
NETIF_F_FRAGLIST;
- if (pdev->revision >= 0x21) {
+ if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
netdev->hw_features |= NETIF_F_GRO_HW;
netdev->features |= NETIF_F_GRO_HW;
@@ -2298,6 +2318,13 @@ static void hns3_set_default_feature(struct net_device *netdev)
netdev->features |= NETIF_F_NTUPLE;
}
}
+
+ if (test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps)) {
+ netdev->hw_features |= NETIF_F_GSO_UDP_L4;
+ netdev->features |= NETIF_F_GSO_UDP_L4;
+ netdev->vlan_features |= NETIF_F_GSO_UDP_L4;
+ netdev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
+ }
}
static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
@@ -2316,17 +2343,19 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
cb->buf = page_address(p);
cb->length = hns3_page_size(ring);
cb->type = DESC_TYPE_PAGE;
+ page_ref_add(p, USHRT_MAX - 1);
+ cb->pagecnt_bias = USHRT_MAX;
return 0;
}
static void hns3_free_buffer(struct hns3_enet_ring *ring,
- struct hns3_desc_cb *cb)
+ struct hns3_desc_cb *cb, int budget)
{
if (cb->type == DESC_TYPE_SKB)
- dev_kfree_skb_any((struct sk_buff *)cb->priv);
- else if (!HNAE3_IS_TX_RING(ring))
- put_page((struct page *)cb->priv);
+ napi_consume_skb(cb->priv, budget);
+ else if (!HNAE3_IS_TX_RING(ring) && cb->pagecnt_bias)
+ __page_frag_cache_drain(cb->priv, cb->pagecnt_bias);
memset(cb, 0, sizeof(*cb));
}
@@ -2358,7 +2387,8 @@ static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
ring->desc[i].addr = 0;
}
-static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
+static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i,
+ int budget)
{
struct hns3_desc_cb *cb = &ring->desc_cb[i];
@@ -2366,7 +2396,7 @@ static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
return;
hns3_buffer_detach(ring, i);
- hns3_free_buffer(ring, cb);
+ hns3_free_buffer(ring, cb, budget);
}
static void hns3_free_buffers(struct hns3_enet_ring *ring)
@@ -2374,7 +2404,7 @@ static void hns3_free_buffers(struct hns3_enet_ring *ring)
int i;
for (i = 0; i < ring->desc_num; i++)
- hns3_free_buffer_detach(ring, i);
+ hns3_free_buffer_detach(ring, i, 0);
}
/* free desc along with its attached buffer */
@@ -2419,7 +2449,7 @@ static int hns3_alloc_and_map_buffer(struct hns3_enet_ring *ring,
return 0;
out_with_buf:
- hns3_free_buffer(ring, cb);
+ hns3_free_buffer(ring, cb, 0);
out:
return ret;
}
@@ -2451,7 +2481,7 @@ static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
out_buffer_fail:
for (j = i - 1; j >= 0; j--)
- hns3_free_buffer_detach(ring, j);
+ hns3_free_buffer_detach(ring, j, 0);
return ret;
}
@@ -2478,71 +2508,62 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
DMA_FROM_DEVICE);
}
-static void hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, int head,
- int *bytes, int *pkts)
+static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring,
+ int *bytes, int *pkts, int budget)
{
+ /* pair with ring->last_to_use update in hns3_tx_doorbell(),
+ * smp_store_release() is not used in hns3_tx_doorbell() because
+ * the doorbell operation already have the needed barrier operation.
+ */
+ int ltu = smp_load_acquire(&ring->last_to_use);
int ntc = ring->next_to_clean;
struct hns3_desc_cb *desc_cb;
+ bool reclaimed = false;
+ struct hns3_desc *desc;
+
+ while (ltu != ntc) {
+ desc = &ring->desc[ntc];
+
+ if (le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri) &
+ BIT(HNS3_TXD_VLD_B))
+ break;
- while (head != ntc) {
desc_cb = &ring->desc_cb[ntc];
(*pkts) += (desc_cb->type == DESC_TYPE_SKB);
(*bytes) += desc_cb->length;
/* desc_cb will be cleaned, after hnae3_free_buffer_detach */
- hns3_free_buffer_detach(ring, ntc);
+ hns3_free_buffer_detach(ring, ntc, budget);
if (++ntc == ring->desc_num)
ntc = 0;
/* Issue prefetch for next Tx descriptor */
prefetch(&ring->desc_cb[ntc]);
+ reclaimed = true;
}
+ if (unlikely(!reclaimed))
+ return false;
+
/* This smp_store_release() pairs with smp_load_acquire() in
* ring_space called by hns3_nic_net_xmit.
*/
smp_store_release(&ring->next_to_clean, ntc);
+ return true;
}
-static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
-{
- int u = ring->next_to_use;
- int c = ring->next_to_clean;
-
- if (unlikely(h > ring->desc_num))
- return 0;
-
- return u > c ? (h > c && h <= u) : (h > c || h <= u);
-}
-
-void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
+void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
{
struct net_device *netdev = ring_to_netdev(ring);
struct hns3_nic_priv *priv = netdev_priv(netdev);
struct netdev_queue *dev_queue;
int bytes, pkts;
- int head;
-
- head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
-
- if (is_ring_empty(ring) || head == ring->next_to_clean)
- return; /* no data to poll */
-
- rmb(); /* Make sure head is ready before touch any data */
-
- if (unlikely(!is_valid_clean_head(ring, head))) {
- hns3_rl_err(netdev, "wrong head (%d, %d-%d)\n", head,
- ring->next_to_use, ring->next_to_clean);
-
- u64_stats_update_begin(&ring->syncp);
- ring->stats.io_err_cnt++;
- u64_stats_update_end(&ring->syncp);
- return;
- }
bytes = 0;
pkts = 0;
- hns3_nic_reclaim_desc(ring, head, &bytes, &pkts);
+
+ if (unlikely(!hns3_nic_reclaim_desc(ring, &bytes, &pkts, budget)))
+ return;
ring->tqp_vector->tx_group.total_bytes += bytes;
ring->tqp_vector->tx_group.total_packets += pkts;
@@ -2614,8 +2635,7 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
ring_ptr_move_fw(ring, next_to_use);
}
- wmb(); /* Make all data has been write before submit */
- writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
+ writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
}
static bool hns3_page_is_reusable(struct page *page)
@@ -2624,6 +2644,11 @@ static bool hns3_page_is_reusable(struct page *page)
!page_is_pfmemalloc(page);
}
+static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
+{
+ return (page_count(cb->priv) - cb->pagecnt_bias) == 1;
+}
+
static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
struct hns3_enet_ring *ring, int pull_len,
struct hns3_desc_cb *desc_cb)
@@ -2632,6 +2657,7 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
int size = le16_to_cpu(desc->rx.size);
u32 truesize = hns3_buf_size(ring);
+ desc_cb->pagecnt_bias--;
skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
size - pull_len, truesize);
@@ -2639,20 +2665,27 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
* when page_offset rollback to zero, flag default unreuse
*/
if (unlikely(!hns3_page_is_reusable(desc_cb->priv)) ||
- (!desc_cb->page_offset && page_count(desc_cb->priv) > 1))
+ (!desc_cb->page_offset && !hns3_can_reuse_page(desc_cb))) {
+ __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias);
return;
+ }
/* Move offset up to the next cache line */
desc_cb->page_offset += truesize;
if (desc_cb->page_offset + truesize <= hns3_page_size(ring)) {
desc_cb->reuse_flag = 1;
- /* Bump ref count on page before it is given */
- get_page(desc_cb->priv);
- } else if (page_count(desc_cb->priv) == 1) {
+ } else if (hns3_can_reuse_page(desc_cb)) {
desc_cb->reuse_flag = 1;
desc_cb->page_offset = 0;
- get_page(desc_cb->priv);
+ } else if (desc_cb->pagecnt_bias) {
+ __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias);
+ return;
+ }
+
+ if (unlikely(!desc_cb->pagecnt_bias)) {
+ page_ref_add(desc_cb->priv, USHRT_MAX);
+ desc_cb->pagecnt_bias = USHRT_MAX;
}
}
@@ -2782,8 +2815,9 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
{
struct hnae3_handle *handle = ring->tqp->handle;
struct pci_dev *pdev = ring->tqp->handle->pdev;
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
- if (pdev->revision == 0x20) {
+ if (unlikely(ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)) {
*vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
if (!(*vlan_tag & VLAN_VID_MASK))
*vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
@@ -2828,6 +2862,16 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
}
}
+static void hns3_rx_ring_move_fw(struct hns3_enet_ring *ring)
+{
+ ring->desc[ring->next_to_clean].rx.bd_base_info &=
+ cpu_to_le32(~BIT(HNS3_RXD_VLD_B));
+ ring->next_to_clean += 1;
+
+ if (unlikely(ring->next_to_clean == ring->desc_num))
+ ring->next_to_clean = 0;
+}
+
static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
unsigned char *va)
{
@@ -2860,9 +2904,10 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
if (likely(hns3_page_is_reusable(desc_cb->priv)))
desc_cb->reuse_flag = 1;
else /* This page cannot be reused so discard it */
- put_page(desc_cb->priv);
+ __page_frag_cache_drain(desc_cb->priv,
+ desc_cb->pagecnt_bias);
- ring_ptr_move_fw(ring, next_to_clean);
+ hns3_rx_ring_move_fw(ring);
return 0;
}
u64_stats_update_begin(&ring->syncp);
@@ -2873,7 +2918,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
__skb_put(skb, ring->pull_len);
hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len,
desc_cb);
- ring_ptr_move_fw(ring, next_to_clean);
+ hns3_rx_ring_move_fw(ring);
return 0;
}
@@ -2928,7 +2973,7 @@ static int hns3_add_frag(struct hns3_enet_ring *ring)
hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
trace_hns3_rx_desc(ring);
- ring_ptr_move_fw(ring, next_to_clean);
+ hns3_rx_ring_move_fw(ring);
ring->pending_buf++;
} while (!(bd_base_info & BIT(HNS3_RXD_FE_B)));
@@ -3070,35 +3115,32 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring)
prefetch(desc);
- length = le16_to_cpu(desc->rx.size);
- bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
+ if (!skb) {
+ bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
- /* Check valid BD */
- if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
- return -ENXIO;
+ /* Check valid BD */
+ if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
+ return -ENXIO;
+
+ dma_rmb();
+ length = le16_to_cpu(desc->rx.size);
- if (!skb) {
ring->va = desc_cb->buf + desc_cb->page_offset;
dma_sync_single_for_cpu(ring_to_dev(ring),
desc_cb->dma + desc_cb->page_offset,
hns3_buf_size(ring),
DMA_FROM_DEVICE);
- }
- /* Prefetch first cache line of first page
- * Idea is to cache few bytes of the header of the packet. Our L1 Cache
- * line size is 64B so need to prefetch twice to make it 128B. But in
- * actual we can have greater size of caches with 128B Level 1 cache
- * lines. In such a case, single fetch would suffice to cache in the
- * relevant part of the header.
- */
- prefetch(ring->va);
-#if L1_CACHE_BYTES < 128
- prefetch(ring->va + L1_CACHE_BYTES);
-#endif
+ /* Prefetch first cache line of first page.
+ * Idea is to cache few bytes of the header of the packet.
+ * Our L1 Cache line size is 64B so need to prefetch twice to make
+ * it 128B. But in actual we can have greater size of caches with
+ * 128B Level 1 cache lines. In such a case, single fetch would
+ * suffice to cache in the relevant part of the header.
+ */
+ net_prefetch(ring->va);
- if (!skb) {
ret = hns3_alloc_skb(ring, length, ring->va);
skb = ring->skb;
@@ -3138,19 +3180,11 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
int unused_count = hns3_desc_unused(ring);
int recv_pkts = 0;
- int recv_bds = 0;
- int err, num;
+ int err;
- num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
- num -= unused_count;
unused_count -= ring->pending_buf;
- if (num <= 0)
- goto out;
-
- rmb(); /* Make sure num taken effect before the other data is touched */
-
- while (recv_pkts < budget && recv_bds < num) {
+ while (recv_pkts < budget) {
/* Reuse or realloc buffers */
if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
hns3_nic_alloc_rx_buffers(ring, unused_count);
@@ -3168,7 +3202,6 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
recv_pkts++;
}
- recv_bds += ring->pending_buf;
unused_count += ring->pending_buf;
ring->skb = NULL;
ring->pending_buf = 0;
@@ -3337,7 +3370,7 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
hns3_for_each_ring(ring, tqp_vector->tx_group)
- hns3_clean_tx_ring(ring);
+ hns3_clean_tx_ring(ring, budget);
/* make sure rx ring budget not smaller than 1 */
if (tqp_vector->num_tqps > 1)
@@ -3496,7 +3529,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
struct hnae3_ring_chain_node vector_ring_chain;
struct hnae3_handle *h = priv->ae_handle;
struct hns3_enet_tqp_vector *tqp_vector;
- int ret = 0;
+ int ret;
int i;
hns3_nic_set_cpumask(priv);
@@ -3673,12 +3706,10 @@ static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
ring = &priv->ring[q->tqp_index];
desc_num = priv->ae_handle->kinfo.num_tx_desc;
ring->queue_index = q->tqp_index;
- ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
} else {
ring = &priv->ring[q->tqp_index + queue_num];
desc_num = priv->ae_handle->kinfo.num_rx_desc;
ring->queue_index = q->tqp_index;
- ring->io_base = q->io_base;
}
hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
@@ -3692,6 +3723,7 @@ static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
ring->desc_num = desc_num;
ring->next_to_use = 0;
ring->next_to_clean = 0;
+ ring->last_to_use = 0;
}
static void hns3_queue_to_ring(struct hnae3_queue *tqp,
@@ -3771,6 +3803,7 @@ void hns3_fini_ring(struct hns3_enet_ring *ring)
ring->desc_cb = NULL;
ring->next_to_clean = 0;
ring->next_to_use = 0;
+ ring->last_to_use = 0;
ring->pending_buf = 0;
if (ring->skb) {
dev_kfree_skb_any(ring->skb);
@@ -3979,6 +4012,7 @@ static void hns3_info_show(struct hns3_nic_priv *priv)
static int hns3_client_init(struct hnae3_handle *handle)
{
struct pci_dev *pdev = handle->pdev;
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
u16 alloc_tqps, max_rss_size;
struct hns3_nic_priv *priv;
struct net_device *netdev;
@@ -3995,6 +4029,7 @@ static int hns3_client_init(struct hnae3_handle *handle)
priv->netdev = netdev;
priv->ae_handle = handle;
priv->tx_timeout_count = 0;
+ priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num;
set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
@@ -4181,9 +4216,11 @@ static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
{
while (ring->next_to_clean != ring->next_to_use) {
ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0;
- hns3_free_buffer_detach(ring, ring->next_to_clean);
+ hns3_free_buffer_detach(ring, ring->next_to_clean, 0);
ring_ptr_move_fw(ring, next_to_clean);
}
+
+ ring->pending_buf = 0;
}
static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
@@ -4286,6 +4323,7 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h)
hns3_clear_tx_ring(&priv->ring[i]);
priv->ring[i].next_to_clean = 0;
priv->ring[i].next_to_use = 0;
+ priv->ring[i].last_to_use = 0;
rx_ring = &priv->ring[i + h->kinfo.num_tqps];
hns3_init_ring_hw(rx_ring);
@@ -4582,6 +4620,8 @@ static const struct hns3_hw_error_info hns3_hw_err[] = {
.msg = "IMP CMDQ error" },
{ .type = HNAE3_IMP_RD_POISON_ERROR,
.msg = "IMP RD poison" },
+ { .type = HNAE3_ROCEE_AXI_RESP_ERROR,
+ .msg = "ROCEE AXI RESP error" },
};
static void hns3_process_hw_error(struct hnae3_handle *handle,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 9922c5fd7f94..1c81dea0da1e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -42,12 +42,9 @@ enum hns3_nic_state {
#define HNS3_RING_TX_RING_PKTNUM_RECORD_REG 0x0006C
#define HNS3_RING_TX_RING_EBD_OFFSET_REG 0x00070
#define HNS3_RING_TX_RING_BD_ERR_REG 0x00074
-#define HNS3_RING_PREFETCH_EN_REG 0x0007C
-#define HNS3_RING_CFG_VF_NUM_REG 0x00080
-#define HNS3_RING_ASID_REG 0x0008C
#define HNS3_RING_EN_REG 0x00090
-
-#define HNS3_TX_REG_OFFSET 0x40
+#define HNS3_RING_RX_EN_REG 0x00098
+#define HNS3_RING_TX_EN_REG 0x000D4
#define HNS3_RX_HEAD_SIZE 256
@@ -172,13 +169,12 @@ enum hns3_nic_state {
#define HNS3_VECTOR_INITED 1
#define HNS3_MAX_BD_SIZE 65535
-#define HNS3_MAX_NON_TSO_BD_NUM 8U
#define HNS3_MAX_TSO_BD_NUM 63U
#define HNS3_MAX_TSO_SIZE \
(HNS3_MAX_BD_SIZE * HNS3_MAX_TSO_BD_NUM)
-#define HNS3_MAX_NON_TSO_SIZE \
- (HNS3_MAX_BD_SIZE * HNS3_MAX_NON_TSO_BD_NUM)
+#define HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num) \
+ (HNS3_MAX_BD_SIZE * (max_non_tso_bd_num))
#define HNS3_VECTOR_GL0_OFFSET 0x100
#define HNS3_VECTOR_GL1_OFFSET 0x200
@@ -292,6 +288,7 @@ struct hns3_desc_cb {
/* desc type, used by the ring user to mark the type of the priv data */
u16 type;
+ u16 pagecnt_bias;
};
enum hns3_pkt_l3type {
@@ -348,14 +345,13 @@ enum hns3_pkt_ol4type {
};
struct ring_stats {
- u64 io_err_cnt;
u64 sw_err_cnt;
u64 seg_pkt_cnt;
union {
struct {
u64 tx_pkts;
u64 tx_bytes;
- u64 tx_err_cnt;
+ u64 tx_more;
u64 restart_queue;
u64 tx_busy;
u64 tx_copy;
@@ -380,7 +376,6 @@ struct ring_stats {
};
struct hns3_enet_ring {
- u8 __iomem *io_base; /* base io address for the ring */
struct hns3_desc *desc; /* dma map address space */
struct hns3_desc_cb *desc_cb;
struct hns3_enet_ring *next;
@@ -402,8 +397,10 @@ struct hns3_enet_ring {
* next_to_use
*/
int next_to_clean;
-
- u32 pull_len; /* head length for current packet */
+ union {
+ int last_to_use; /* last idx used by xmit */
+ u32 pull_len; /* memcpy len for current rx packet */
+ };
u32 frag_num;
void *va; /* first buffer address for current packet */
@@ -479,6 +476,7 @@ struct hns3_nic_priv {
struct hns3_enet_ring *ring;
struct hns3_enet_tqp_vector *tqp_vector;
u16 vector_num;
+ u8 max_non_tso_bd_num;
u64 tx_timeout_count;
@@ -518,11 +516,6 @@ static inline int ring_space(struct hns3_enet_ring *ring)
(begin - end)) - 1;
}
-static inline int is_ring_empty(struct hns3_enet_ring *ring)
-{
- return ring->next_to_use == ring->next_to_clean;
-}
-
static inline u32 hns3_read_reg(void __iomem *base, u32 reg)
{
return readl(base + reg);
@@ -548,9 +541,6 @@ static inline bool hns3_nic_resetting(struct net_device *netdev)
#define hns3_write_dev(a, reg, value) \
hns3_write_reg((a)->io_base, (reg), (value))
-#define hnae3_queue_xmit(tqp, buf_num) writel_relaxed(buf_num, \
- (tqp)->io_base + HNS3_RING_TX_RING_TAIL_REG)
-
#define ring_to_dev(ring) ((ring)->dev)
#define ring_to_netdev(ring) ((ring)->tqp_vector->napi.dev)
@@ -588,7 +578,7 @@ void hns3_ethtool_set_ops(struct net_device *netdev);
int hns3_set_channels(struct net_device *netdev,
struct ethtool_channels *ch);
-void hns3_clean_tx_ring(struct hns3_enet_ring *ring);
+void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget);
int hns3_init_all_ring(struct hns3_nic_priv *priv);
int hns3_uninit_all_ring(struct hns3_nic_priv *priv);
int hns3_nic_reset_all_ring(struct hnae3_handle *h);
@@ -607,7 +597,6 @@ void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
u32 rl_value);
void hns3_enable_vlan_filter(struct net_device *netdev, bool enable);
-int hns3_update_promisc_mode(struct net_device *netdev, u8 promisc_flags);
void hns3_request_update_promisc_mode(struct hnae3_handle *handle);
#ifdef CONFIG_HNS3_DCB
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 2622e04e8eed..6b07b2771172 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -27,12 +27,11 @@ struct hns3_sfp_type {
static const struct hns3_stats hns3_txq_stats[] = {
/* Tx per-queue statistics */
- HNS3_TQP_STAT("io_err_cnt", io_err_cnt),
HNS3_TQP_STAT("dropped", sw_err_cnt),
HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
HNS3_TQP_STAT("packets", tx_pkts),
HNS3_TQP_STAT("bytes", tx_bytes),
- HNS3_TQP_STAT("errors", tx_err_cnt),
+ HNS3_TQP_STAT("more", tx_more),
HNS3_TQP_STAT("wake", restart_queue),
HNS3_TQP_STAT("busy", tx_busy),
HNS3_TQP_STAT("copy", tx_copy),
@@ -46,7 +45,6 @@ static const struct hns3_stats hns3_txq_stats[] = {
static const struct hns3_stats hns3_rxq_stats[] = {
/* Rx per-queue statistics */
- HNS3_TQP_STAT("io_err_cnt", io_err_cnt),
HNS3_TQP_STAT("dropped", sw_err_cnt),
HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
HNS3_TQP_STAT("packets", rx_pkts),
@@ -79,6 +77,7 @@ static const struct hns3_stats hns3_rxq_stats[] = {
static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
{
struct hnae3_handle *h = hns3_get_handle(ndev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
bool vlan_filter_enable;
int ret;
@@ -98,7 +97,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
break;
}
- if (ret || h->pdev->revision >= 0x21)
+ if (ret || ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
return ret;
if (en) {
@@ -149,6 +148,7 @@ static void hns3_lp_setup_skb(struct sk_buff *skb)
struct net_device *ndev = skb->dev;
struct hnae3_handle *handle;
+ struct hnae3_ae_dev *ae_dev;
unsigned char *packet;
struct ethhdr *ethh;
unsigned int i;
@@ -165,7 +165,8 @@ static void hns3_lp_setup_skb(struct sk_buff *skb)
* the purpose of mac or serdes selftest.
*/
handle = hns3_get_handle(ndev);
- if (handle->pdev->revision == 0x20)
+ ae_dev = pci_get_drvdata(handle->pdev);
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
ethh->h_dest[5] += HNS3_NIC_LB_DST_MAC_ADDR;
eth_zero_addr(ethh->h_source);
ethh->h_proto = htons(ETH_P_ARP);
@@ -232,7 +233,7 @@ static void hns3_lb_clear_tx_ring(struct hns3_nic_priv *priv, u32 start_ringid,
for (i = start_ringid; i <= end_ringid; i++) {
struct hns3_enet_ring *ring = &priv->ring[i];
- hns3_clean_tx_ring(ring);
+ hns3_clean_tx_ring(ring, 0);
}
}
@@ -310,9 +311,6 @@ static void hns3_self_test(struct net_device *ndev,
struct hnae3_handle *h = priv->ae_handle;
int st_param[HNS3_SELF_TEST_TYPE_NUM][2];
bool if_running = netif_running(ndev);
-#if IS_ENABLED(CONFIG_VLAN_8021Q)
- bool dis_vlan_filter;
-#endif
int test_index = 0;
u32 i;
@@ -349,9 +347,7 @@ static void hns3_self_test(struct net_device *ndev,
#if IS_ENABLED(CONFIG_VLAN_8021Q)
/* Disable the vlan filter for selftest does not support it */
- dis_vlan_filter = (ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
- h->ae_algo->ops->enable_vlan_filter;
- if (dis_vlan_filter)
+ if (h->ae_algo->ops->enable_vlan_filter)
h->ae_algo->ops->enable_vlan_filter(h, false);
#endif
@@ -388,7 +384,7 @@ static void hns3_self_test(struct net_device *ndev,
h->ae_algo->ops->halt_autoneg(h, false);
#if IS_ENABLED(CONFIG_VLAN_8021Q)
- if (dis_vlan_filter)
+ if (h->ae_algo->ops->enable_vlan_filter)
h->ae_algo->ops->enable_vlan_filter(h, true);
#endif
@@ -763,6 +759,7 @@ static int hns3_set_link_ksettings(struct net_device *netdev,
const struct ethtool_link_ksettings *cmd)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
int ret;
@@ -784,7 +781,7 @@ static int hns3_set_link_ksettings(struct net_device *netdev,
return phy_ethtool_ksettings_set(netdev->phydev, cmd);
}
- if (handle->pdev->revision == 0x20)
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
return -EOPNOTSUPP;
ret = hns3_check_ksettings_param(netdev, cmd);
@@ -848,11 +845,12 @@ static int hns3_set_rss(struct net_device *netdev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
if (!h->ae_algo->ops->set_rss)
return -EOPNOTSUPP;
- if ((h->pdev->revision == 0x20 &&
+ if ((ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 &&
hfunc != ETH_RSS_HASH_TOP) || (hfunc != ETH_RSS_HASH_NO_CHANGE &&
hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR)) {
netdev_err(netdev, "hash func not supported\n");
@@ -1073,9 +1071,6 @@ static int hns3_nway_reset(struct net_device *netdev)
if (phy)
return genphy_restart_aneg(phy);
- if (handle->pdev->revision == 0x20)
- return -EOPNOTSUPP;
-
return ops->restart_autoneg(handle);
}
@@ -1363,11 +1358,12 @@ static int hns3_get_fecparam(struct net_device *netdev,
struct ethtool_fecparam *fec)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
u8 fec_ability;
u8 fec_mode;
- if (handle->pdev->revision == 0x20)
+ if (!test_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps))
return -EOPNOTSUPP;
if (!ops->get_fec)
@@ -1385,10 +1381,11 @@ static int hns3_set_fecparam(struct net_device *netdev,
struct ethtool_fecparam *fec)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
u32 fec_mode;
- if (handle->pdev->revision == 0x20)
+ if (!test_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps))
return -EOPNOTSUPP;
if (!ops->set_fec)
@@ -1406,11 +1403,13 @@ static int hns3_get_module_info(struct net_device *netdev,
#define HNS3_SFF_8636_V1_3 0x03
struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
struct hns3_sfp_type sfp_type;
int ret;
- if (handle->pdev->revision == 0x20 || !ops->get_module_eeprom)
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 ||
+ !ops->get_module_eeprom)
return -EOPNOTSUPP;
memset(&sfp_type, 0, sizeof(sfp_type));
@@ -1454,9 +1453,11 @@ static int hns3_get_module_eeprom(struct net_device *netdev,
struct ethtool_eeprom *ee, u8 *data)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
- if (handle->pdev->revision == 0x20 || !ops->get_module_eeprom)
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 ||
+ !ops->get_module_eeprom)
return -EOPNOTSUPP;
if (!ee->len)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h
index 7bddcca148a5..5153e5d41bbd 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h
@@ -53,7 +53,7 @@ DECLARE_EVENT_CLASS(hns3_skb_template,
)
);
-DEFINE_EVENT(hns3_skb_template, hns3_over_8bd,
+DEFINE_EVENT(hns3_skb_template, hns3_over_max_bd,
TP_PROTO(struct sk_buff *skb),
TP_ARGS(skb));
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 1d6c328bd9fb..e6321dda0f3f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -261,7 +261,7 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
bool complete = false;
u32 timeout = 0;
int handle = 0;
- int retval = 0;
+ int retval;
int ntc;
spin_lock_bh(&hw->cmq.csq.lock);
@@ -330,9 +330,37 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
return retval;
}
-static enum hclge_cmd_status hclge_cmd_query_firmware_version(
- struct hclge_hw *hw, u32 *version)
+static void hclge_set_default_capability(struct hclge_dev *hdev)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+
+ set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
+ set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps);
+ set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
+}
+
+static void hclge_parse_capability(struct hclge_dev *hdev,
+ struct hclge_query_version_cmd *cmd)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ u32 caps;
+
+ caps = __le32_to_cpu(cmd->caps[0]);
+
+ if (hnae3_get_bit(caps, HCLGE_CAP_UDP_GSO_B))
+ set_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps);
+ if (hnae3_get_bit(caps, HCLGE_CAP_PTP_B))
+ set_bit(HNAE3_DEV_SUPPORT_PTP_B, ae_dev->caps);
+ if (hnae3_get_bit(caps, HCLGE_CAP_INT_QL_B))
+ set_bit(HNAE3_DEV_SUPPORT_INT_QL_B, ae_dev->caps);
+ if (hnae3_get_bit(caps, HCLGE_CAP_TQP_TXRX_INDEP_B))
+ set_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, ae_dev->caps);
+}
+
+static enum hclge_cmd_status
+hclge_cmd_query_version_and_capability(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
struct hclge_query_version_cmd *resp;
struct hclge_desc desc;
int ret;
@@ -340,9 +368,20 @@ static enum hclge_cmd_status hclge_cmd_query_firmware_version(
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
resp = (struct hclge_query_version_cmd *)desc.data;
- ret = hclge_cmd_send(hw, &desc, 1);
- if (!ret)
- *version = le32_to_cpu(resp->firmware);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ return ret;
+
+ hdev->fw_version = le32_to_cpu(resp->firmware);
+
+ ae_dev->dev_version = le32_to_cpu(resp->hardware) <<
+ HNAE3_PCI_REVISION_BIT_SIZE;
+ ae_dev->dev_version |= hdev->pdev->revision;
+
+ if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
+ hclge_set_default_capability(hdev);
+
+ hclge_parse_capability(hdev, resp);
return ret;
}
@@ -402,7 +441,6 @@ static int hclge_firmware_compat_config(struct hclge_dev *hdev)
int hclge_cmd_init(struct hclge_dev *hdev)
{
- u32 version;
int ret;
spin_lock_bh(&hdev->hw.cmq.csq.lock);
@@ -431,22 +469,23 @@ int hclge_cmd_init(struct hclge_dev *hdev)
goto err_cmd_init;
}
- ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
+ /* get version and device capabilities */
+ ret = hclge_cmd_query_version_and_capability(hdev);
if (ret) {
dev_err(&hdev->pdev->dev,
- "firmware version query failed %d\n", ret);
+ "failed to query version and capabilities, ret = %d\n",
+ ret);
goto err_cmd_init;
}
- hdev->fw_version = version;
dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n",
- hnae3_get_field(version, HNAE3_FW_VERSION_BYTE3_MASK,
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
HNAE3_FW_VERSION_BYTE3_SHIFT),
- hnae3_get_field(version, HNAE3_FW_VERSION_BYTE2_MASK,
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
HNAE3_FW_VERSION_BYTE2_SHIFT),
- hnae3_get_field(version, HNAE3_FW_VERSION_BYTE1_MASK,
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
HNAE3_FW_VERSION_BYTE1_SHIFT),
- hnae3_get_field(version, HNAE3_FW_VERSION_BYTE0_MASK,
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
HNAE3_FW_VERSION_BYTE0_SHIFT));
/* ask the firmware to enable some features, driver can work without
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 463f29151ef0..096e26a2e16b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -115,7 +115,8 @@ enum hclge_opcode_type {
HCLGE_OPC_DFX_RCB_REG = 0x004D,
HCLGE_OPC_DFX_TQP_REG = 0x004E,
HCLGE_OPC_DFX_SSU_REG_2 = 0x004F,
- HCLGE_OPC_DFX_QUERY_CHIP_CAP = 0x0050,
+
+ HCLGE_OPC_QUERY_DEV_SPECS = 0x0050,
/* MAC command */
HCLGE_OPC_CONFIG_MAC_MODE = 0x0301,
@@ -362,9 +363,26 @@ struct hclge_rx_priv_buff_cmd {
u8 rsv[6];
};
+enum HCLGE_CAP_BITS {
+ HCLGE_CAP_UDP_GSO_B,
+ HCLGE_CAP_QB_B,
+ HCLGE_CAP_FD_FORWARD_TC_B,
+ HCLGE_CAP_PTP_B,
+ HCLGE_CAP_INT_QL_B,
+ HCLGE_CAP_SIMPLE_BD_B,
+ HCLGE_CAP_TX_PUSH_B,
+ HCLGE_CAP_PHY_IMP_B,
+ HCLGE_CAP_TQP_TXRX_INDEP_B,
+ HCLGE_CAP_HW_PAD_B,
+ HCLGE_CAP_STASH_B,
+};
+
+#define HCLGE_QUERY_CAP_LENGTH 3
struct hclge_query_version_cmd {
__le32 firmware;
- __le32 firmware_rsv[5];
+ __le32 hardware;
+ __le32 rsv;
+ __le32 caps[HCLGE_QUERY_CAP_LENGTH]; /* capabilities of device */
};
#define HCLGE_RX_PRIV_EN_B 15
@@ -491,6 +509,8 @@ struct hclge_pf_res_cmd {
#define HCLGE_CFG_RSS_SIZE_M GENMASK(31, 24)
#define HCLGE_CFG_SPEED_ABILITY_S 0
#define HCLGE_CFG_SPEED_ABILITY_M GENMASK(7, 0)
+#define HCLGE_CFG_SPEED_ABILITY_EXT_S 10
+#define HCLGE_CFG_SPEED_ABILITY_EXT_M GENMASK(15, 10)
#define HCLGE_CFG_UMV_TBL_SPACE_S 16
#define HCLGE_CFG_UMV_TBL_SPACE_M GENMASK(31, 16)
@@ -1069,6 +1089,20 @@ struct hclge_sfp_info_bd0_cmd {
u8 data[HCLGE_SFP_INFO_BD0_LEN];
};
+#define HCLGE_QUERY_DEV_SPECS_BD_NUM 4
+
+struct hclge_dev_specs_0_cmd {
+ __le32 rsv0;
+ __le32 mac_entry_num;
+ __le32 mng_entry_num;
+ __le16 rss_ind_tbl_size;
+ __le16 rss_key_size;
+ __le16 int_ql_max;
+ u8 max_non_tso_bd_num;
+ u8 rsv1;
+ __le32 max_tm_rate;
+};
+
int hclge_cmd_init(struct hclge_dev *hdev);
static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
{
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index d6c3952aba04..3606240025a8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -2,7 +2,9 @@
// Copyright (c) 2016-2017 Hisilicon Limited.
#include "hclge_main.h"
+#include "hclge_dcb.h"
#include "hclge_tm.h"
+#include "hclge_dcb.h"
#include "hnae3.h"
#define BW_PERCENT 100
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 26f6f068b01d..16df050e72cf 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -8,7 +8,7 @@
#include "hclge_tm.h"
#include "hnae3.h"
-static struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
+static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
{ .reg_type = "bios common",
.dfx_msg = &hclge_dbg_bios_common_reg[0],
.reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg),
@@ -115,14 +115,14 @@ static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
}
static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
- struct hclge_dbg_reg_type_info *reg_info,
+ const struct hclge_dbg_reg_type_info *reg_info,
const char *cmd_buf)
{
#define IDX_OFFSET 1
const char *s = &cmd_buf[strlen(reg_info->reg_type) + IDX_OFFSET];
- struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
- struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
+ const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
+ const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
struct hclge_desc *desc_src;
struct hclge_desc *desc;
int entries_per_desc;
@@ -399,7 +399,7 @@ err_dcb_cmd_send:
static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf)
{
- struct hclge_dbg_reg_type_info *reg_info;
+ const struct hclge_dbg_reg_type_info *reg_info;
bool has_dump = false;
int i;
@@ -428,17 +428,13 @@ static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf)
}
}
-static void hclge_title_idx_print(struct hclge_dev *hdev, bool flag, int index,
- char *title_buf, char *true_buf,
- char *false_buf)
+static void hclge_print_tc_info(struct hclge_dev *hdev, bool flag, int index)
{
if (flag)
- dev_info(&hdev->pdev->dev, "%s(%d): %s weight: %u\n",
- title_buf, index, true_buf,
- hdev->tm_info.pg_info[0].tc_dwrr[index]);
+ dev_info(&hdev->pdev->dev, "tc(%d): no sp mode weight: %u\n",
+ index, hdev->tm_info.pg_info[0].tc_dwrr[index]);
else
- dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index,
- false_buf);
+ dev_info(&hdev->pdev->dev, "tc(%d): sp mode\n", index);
}
static void hclge_dbg_dump_tc(struct hclge_dev *hdev)
@@ -469,8 +465,7 @@ static void hclge_dbg_dump_tc(struct hclge_dev *hdev)
ets_weight->weight_offset);
for (i = 0; i < HNAE3_MAX_TC; i++)
- hclge_title_idx_print(hdev, ets_weight->tc_weight[i], i,
- "tc", "no sp mode", "sp mode");
+ hclge_print_tc_info(hdev, ets_weight->tc_weight[i], i);
}
static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
@@ -1170,6 +1165,14 @@ static void hclge_dbg_dump_serv_info(struct hclge_dev *hdev)
hdev->serv_processed_cnt);
}
+static void hclge_dbg_dump_interrupt(struct hclge_dev *hdev)
+{
+ dev_info(&hdev->pdev->dev, "num_nic_msi: %u\n", hdev->num_nic_msi);
+ dev_info(&hdev->pdev->dev, "num_roce_msi: %u\n", hdev->num_roce_msi);
+ dev_info(&hdev->pdev->dev, "num_msi_used: %u\n", hdev->num_msi_used);
+ dev_info(&hdev->pdev->dev, "num_msi_left: %u\n", hdev->num_msi_left);
+}
+
static void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
{
struct hclge_desc *desc_src, *desc_tmp;
@@ -1494,6 +1497,7 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
#define DUMP_REG "dump reg"
#define DUMP_TM_MAP "dump tm map"
#define DUMP_LOOPBACK "dump loopback"
+#define DUMP_INTERRUPT "dump intr"
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -1541,6 +1545,9 @@ int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
hclge_dbg_dump_mac_list(hdev,
&cmd_buf[sizeof("dump mc mac list")],
false);
+ } else if (strncmp(cmd_buf, DUMP_INTERRUPT,
+ strlen(DUMP_INTERRUPT)) == 0) {
+ hclge_dbg_dump_interrupt(hdev);
} else {
dev_info(&hdev->pdev->dev, "unknown command\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
index 38b79321c4c4..a9066e6ff697 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
@@ -81,13 +81,13 @@ struct hclge_dbg_dfx_message {
#define HCLGE_DBG_MAC_REG_TYPE_LEN 32
struct hclge_dbg_reg_type_info {
const char *reg_type;
- struct hclge_dbg_dfx_message *dfx_msg;
+ const struct hclge_dbg_dfx_message *dfx_msg;
struct hclge_dbg_reg_common_msg reg_msg;
};
#pragma pack()
-static struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = {
{false, "Reserved"},
{true, "BP_CPU_STATE"},
{true, "DFX_MSIX_INFO_NIC_0"},
@@ -103,7 +103,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = {
{false, "Reserved"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_0[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_0[] = {
{false, "Reserved"},
{true, "SSU_ETS_PORT_STATUS"},
{true, "SSU_ETS_TCG_STATUS"},
@@ -175,7 +175,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_0[] = {
{false, "Reserved"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_1[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_1[] = {
{true, "prt_id"},
{true, "PACKET_TC_CURR_BUFFER_CNT_0"},
{true, "PACKET_TC_CURR_BUFFER_CNT_1"},
@@ -282,7 +282,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_1[] = {
{false, "Reserved"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_2[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_2[] = {
{true, "OQ_INDEX"},
{true, "QUEUE_CNT"},
{false, "Reserved"},
@@ -291,7 +291,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_2[] = {
{false, "Reserved"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_igu_egu_reg[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_igu_egu_reg[] = {
{true, "prt_id"},
{true, "IGU_RX_ERR_PKT"},
{true, "IGU_RX_NO_SOF_PKT"},
@@ -356,7 +356,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_igu_egu_reg[] = {
{false, "Reserved"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_0[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_0[] = {
{true, "tc_queue_num"},
{true, "FSM_DFX_ST0"},
{true, "FSM_DFX_ST1"},
@@ -365,7 +365,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_0[] = {
{true, "BUF_WAIT_TIMEOUT_QID"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_1[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_1[] = {
{false, "Reserved"},
{true, "FIFO_DFX_ST0"},
{true, "FIFO_DFX_ST1"},
@@ -381,7 +381,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_1[] = {
{false, "Reserved"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_ncsi_reg[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_ncsi_reg[] = {
{false, "Reserved"},
{true, "NCSI_EGU_TX_FIFO_STS"},
{true, "NCSI_PAUSE_STATUS"},
@@ -453,7 +453,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_ncsi_reg[] = {
{true, "NCSI_MAC_RX_PAUSE_FRAMES"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_rtc_reg[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_rtc_reg[] = {
{false, "Reserved"},
{true, "LGE_IGU_AFIFO_DFX_0"},
{true, "LGE_IGU_AFIFO_DFX_1"},
@@ -483,7 +483,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_rtc_reg[] = {
{false, "Reserved"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_ppp_reg[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_ppp_reg[] = {
{false, "Reserved"},
{true, "DROP_FROM_PRT_PKT_CNT"},
{true, "DROP_FROM_HOST_PKT_CNT"},
@@ -639,7 +639,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_ppp_reg[] = {
{false, "Reserved"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_rcb_reg[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_rcb_reg[] = {
{false, "Reserved"},
{true, "FSM_DFX_ST0"},
{true, "FSM_DFX_ST1"},
@@ -711,7 +711,7 @@ static struct hclge_dbg_dfx_message hclge_dbg_rcb_reg[] = {
{false, "Reserved"},
};
-static struct hclge_dbg_dfx_message hclge_dbg_tqp_reg[] = {
+static const struct hclge_dbg_dfx_message hclge_dbg_tqp_reg[] = {
{true, "q_num"},
{true, "RCB_CFG_RX_RING_TAIL"},
{true, "RCB_CFG_RX_RING_HEAD"},
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 50d5ef71756b..9ee55ee0487d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -729,7 +729,7 @@ static int hclge_config_ncsi_hw_err_int(struct hclge_dev *hdev, bool en)
struct hclge_desc desc;
int ret;
- if (hdev->pdev->revision < 0x21)
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
return 0;
/* configure NCSI error interrupts */
@@ -808,7 +808,7 @@ static int hclge_config_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd,
cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN_MASK);
desc[1].data[1] =
cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN_MASK);
- if (hdev->pdev->revision >= 0x21)
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
desc[1].data[2] =
cpu_to_le32(HCLGE_PPP_PF_ERR_INT_EN_MASK);
} else if (cmd == HCLGE_PPP_CMD1_INT_CMD) {
@@ -1041,7 +1041,7 @@ static int hclge_config_ssu_hw_err_int(struct hclge_dev *hdev, bool en)
hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_COMMON_INT_CMD, false);
if (en) {
- if (hdev->pdev->revision >= 0x21)
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
desc[0].data[0] =
cpu_to_le32(HCLGE_SSU_COMMON_INT_EN);
else
@@ -1507,6 +1507,8 @@ hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
reset_type = HNAE3_FUNC_RESET;
+ hclge_report_hw_error(hdev, HNAE3_ROCEE_AXI_RESP_ERROR);
+
ret = hclge_log_rocee_axi_error(hdev);
if (ret)
return HNAE3_GLOBAL_RESET;
@@ -1548,7 +1550,8 @@ int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en)
struct hclge_desc desc;
int ret;
- if (hdev->pdev->revision < 0x21 || !hnae3_dev_roce_supported(hdev))
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 ||
+ !hnae3_dev_roce_supported(hdev))
return 0;
hclge_cmd_setup_basic_desc(&desc, HCLGE_CONFIG_ROCEE_RAS_INT_EN, false);
@@ -1574,8 +1577,7 @@ static void hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev)
struct hclge_dev *hdev = ae_dev->priv;
enum hnae3_reset_type reset_type;
- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
- hdev->pdev->revision < 0x21)
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
return;
reset_type = hclge_log_and_clear_rocee_ras_error(hdev);
@@ -1661,7 +1663,7 @@ pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
}
/* Handling Non-fatal Rocee RAS errors */
- if (hdev->pdev->revision >= 0x21 &&
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 &&
status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
dev_err(dev, "ROCEE Non-Fatal RAS error identified\n");
hclge_handle_rocee_ras_error(ae_dev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index d553ed7ee64c..1f026408ad38 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -84,6 +84,7 @@ static const struct pci_device_id ae_algo_pci_tbl[] = {
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
{PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
/* required last entry */
{0, }
};
@@ -622,7 +623,7 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
u8 *buff = data;
- int i = 0;
+ int i;
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
@@ -739,7 +740,7 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
if (stringset == ETH_SS_TEST) {
/* clear loopback bit flags at first */
handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
- if (hdev->pdev->revision >= 0x21 ||
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
@@ -965,6 +966,9 @@ static int hclge_parse_speed(int speed_cmd, int *speed)
case 5:
*speed = HCLGE_MAC_SPEED_100G;
break;
+ case 8:
+ *speed = HCLGE_MAC_SPEED_200G;
+ break;
default:
return -EINVAL;
}
@@ -1004,6 +1008,9 @@ static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
case HCLGE_MAC_SPEED_100G:
speed_bit = HCLGE_SUPPORT_100G_BIT;
break;
+ case HCLGE_MAC_SPEED_200G:
+ speed_bit = HCLGE_SUPPORT_200G_BIT;
+ break;
default:
return -EINVAL;
}
@@ -1014,7 +1021,7 @@ static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
return -EINVAL;
}
-static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
+static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
{
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
@@ -1031,9 +1038,12 @@ static void hclge_convert_setting_sr(struct hclge_mac *mac, u8 speed_ability)
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
+ mac->supported);
}
-static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
+static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
{
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
@@ -1050,9 +1060,13 @@ static void hclge_convert_setting_lr(struct hclge_mac *mac, u8 speed_ability)
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
+ linkmode_set_bit(
+ ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
+ mac->supported);
}
-static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
+static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
{
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
@@ -1069,9 +1083,12 @@ static void hclge_convert_setting_cr(struct hclge_mac *mac, u8 speed_ability)
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
+ mac->supported);
}
-static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
+static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
{
if (speed_ability & HCLGE_SUPPORT_1G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
@@ -1091,6 +1108,9 @@ static void hclge_convert_setting_kr(struct hclge_mac *mac, u8 speed_ability)
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
+ mac->supported);
}
static void hclge_convert_setting_fec(struct hclge_mac *mac)
@@ -1115,6 +1135,7 @@ static void hclge_convert_setting_fec(struct hclge_mac *mac)
BIT(HNAE3_FEC_AUTO);
break;
case HCLGE_MAC_SPEED_100G:
+ case HCLGE_MAC_SPEED_200G:
linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
break;
@@ -1125,7 +1146,7 @@ static void hclge_convert_setting_fec(struct hclge_mac *mac)
}
static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
- u8 speed_ability)
+ u16 speed_ability)
{
struct hclge_mac *mac = &hdev->hw.mac;
@@ -1136,7 +1157,7 @@ static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
hclge_convert_setting_sr(mac, speed_ability);
hclge_convert_setting_lr(mac, speed_ability);
hclge_convert_setting_cr(mac, speed_ability);
- if (hdev->pdev->revision >= 0x21)
+ if (hnae3_dev_fec_supported(hdev))
hclge_convert_setting_fec(mac);
linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
@@ -1145,12 +1166,12 @@ static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
}
static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
- u8 speed_ability)
+ u16 speed_ability)
{
struct hclge_mac *mac = &hdev->hw.mac;
hclge_convert_setting_kr(mac, speed_ability);
- if (hdev->pdev->revision >= 0x21)
+ if (hnae3_dev_fec_supported(hdev))
hclge_convert_setting_fec(mac);
linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
@@ -1158,7 +1179,7 @@ static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
}
static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
- u8 speed_ability)
+ u16 speed_ability)
{
unsigned long *supported = hdev->hw.mac.supported;
@@ -1188,7 +1209,7 @@ static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
}
-static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
+static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
{
u8 media_type = hdev->hw.mac.media_type;
@@ -1200,8 +1221,11 @@ static void hclge_parse_link_mode(struct hclge_dev *hdev, u8 speed_ability)
hclge_parse_backplane_link_mode(hdev, speed_ability);
}
-static u32 hclge_get_max_speed(u8 speed_ability)
+static u32 hclge_get_max_speed(u16 speed_ability)
{
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
+ return HCLGE_MAC_SPEED_200G;
+
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
return HCLGE_MAC_SPEED_100G;
@@ -1231,8 +1255,11 @@ static u32 hclge_get_max_speed(u8 speed_ability)
static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
{
+#define SPEED_ABILITY_EXT_SHIFT 8
+
struct hclge_cfg_param_cmd *req;
u64 mac_addr_tmp_high;
+ u16 speed_ability_ext;
u64 mac_addr_tmp;
unsigned int i;
@@ -1281,6 +1308,11 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
HCLGE_CFG_SPEED_ABILITY_M,
HCLGE_CFG_SPEED_ABILITY_S);
+ speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_SPEED_ABILITY_EXT_M,
+ HCLGE_CFG_SPEED_ABILITY_EXT_S);
+ cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
+
cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
HCLGE_CFG_UMV_TBL_SPACE_M,
HCLGE_CFG_UMV_TBL_SPACE_S);
@@ -1324,6 +1356,78 @@ static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
return 0;
}
+static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
+{
+#define HCLGE_MAX_NON_TSO_BD_NUM 8U
+
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+
+ ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
+ ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
+ ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
+ ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
+}
+
+static void hclge_parse_dev_specs(struct hclge_dev *hdev,
+ struct hclge_desc *desc)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ struct hclge_dev_specs_0_cmd *req0;
+
+ req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
+
+ ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
+ ae_dev->dev_specs.rss_ind_tbl_size =
+ le16_to_cpu(req0->rss_ind_tbl_size);
+ ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
+ ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
+}
+
+static void hclge_check_dev_specs(struct hclge_dev *hdev)
+{
+ struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
+
+ if (!dev_specs->max_non_tso_bd_num)
+ dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
+ if (!dev_specs->rss_ind_tbl_size)
+ dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
+ if (!dev_specs->rss_key_size)
+ dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
+ if (!dev_specs->max_tm_rate)
+ dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
+}
+
+static int hclge_query_dev_specs(struct hclge_dev *hdev)
+{
+ struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
+ int ret;
+ int i;
+
+ /* set default specifications as devices lower than version V3 do not
+ * support querying specifications from firmware.
+ */
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
+ hclge_set_default_dev_specs(hdev);
+ return 0;
+ }
+
+ for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
+ true);
+ desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ }
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
+
+ ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
+ if (ret)
+ return ret;
+
+ hclge_parse_dev_specs(hdev, desc);
+ hclge_check_dev_specs(hdev);
+
+ return 0;
+}
+
static int hclge_get_cap(struct hclge_dev *hdev)
{
int ret;
@@ -2422,6 +2526,10 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
HCLGE_CFG_SPEED_S, 5);
break;
+ case HCLGE_MAC_SPEED_200G:
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 8);
+ break;
default:
dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
return -EINVAL;
@@ -2856,7 +2964,7 @@ static int hclge_update_port_info(struct hclge_dev *hdev)
if (!hdev->support_sfp_query)
return 0;
- if (hdev->pdev->revision >= 0x21)
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
ret = hclge_get_sfp_info(hdev, mac);
else
ret = hclge_get_sfp_speed(hdev, &speed);
@@ -2868,7 +2976,7 @@ static int hclge_update_port_info(struct hclge_dev *hdev)
return ret;
}
- if (hdev->pdev->revision >= 0x21) {
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
if (mac->speed_type == QUERY_ACTIVE_SPEED) {
hclge_update_port_capability(mac);
return 0;
@@ -3211,7 +3319,7 @@ static int hclge_notify_roce_client(struct hclge_dev *hdev,
enum hnae3_reset_notify_type type)
{
struct hnae3_client *client = hdev->roce_client;
- int ret = 0;
+ int ret;
u16 i;
if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
@@ -3533,7 +3641,7 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev)
/* For revision 0x20, the reset interrupt source
* can only be cleared after hardware reset done
*/
- if (hdev->pdev->revision == 0x20)
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
clearval);
@@ -3944,6 +4052,9 @@ static void hclge_periodic_service_task(struct hclge_dev *hdev)
{
unsigned long delta = round_jiffies_relative(HZ);
+ if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
+ return;
+
/* Always handle the link updating to make sure link state is
* updated when it is triggered by mbx.
*/
@@ -4537,7 +4648,7 @@ static void hclge_rss_init_cfg(struct hclge_dev *hdev)
int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
struct hclge_vport *vport = hdev->vport;
- if (hdev->pdev->revision >= 0x21)
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
@@ -4737,13 +4848,14 @@ static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
bool en_mc_pmc)
{
struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
bool en_bc_pmc = true;
- /* For revision 0x20, if broadcast promisc enabled, vlan filter is
- * always bypassed. So broadcast promisc should be disabled until
- * user enable promisc mode
+ /* For device whose version below V2, if broadcast promisc enabled,
+ * vlan filter is always bypassed. So broadcast promisc should be
+ * disabled until user enable promisc mode
*/
- if (handle->pdev->revision == 0x20)
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
@@ -6758,7 +6870,7 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
* the same, the packets are looped back in the SSU. If SSU loopback
* is disabled, packets can reach MAC even if SMAC is the same as DMAC.
*/
- if (hdev->pdev->revision >= 0x21) {
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
@@ -8260,7 +8372,7 @@ static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- if (hdev->pdev->revision >= 0x21) {
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
HCLGE_FILTER_FE_EGRESS, enable, 0);
hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
@@ -8620,7 +8732,7 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
int ret;
int i;
- if (hdev->pdev->revision >= 0x21) {
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
/* for revision 0x21, vf vlan filter is per function */
for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
@@ -8975,7 +9087,7 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
u16 state;
int ret;
- if (hdev->pdev->revision == 0x20)
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
return -EOPNOTSUPP;
vport = hclge_get_vf_vport(hdev, vfid);
@@ -9950,6 +10062,13 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
if (ret)
goto err_cmd_uninit;
+ ret = hclge_query_dev_specs(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
+ ret);
+ goto err_cmd_uninit;
+ }
+
ret = hclge_configure(hdev);
if (ret) {
dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
@@ -10147,7 +10266,7 @@ static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
u32 new_spoofchk = enable ? 1 : 0;
int ret;
- if (hdev->pdev->revision == 0x20)
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
return -EOPNOTSUPP;
vport = hclge_get_vf_vport(hdev, vf);
@@ -10180,7 +10299,7 @@ static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
int ret;
int i;
- if (hdev->pdev->revision == 0x20)
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
return 0;
/* resume the vf spoof check state after reset */
@@ -10200,6 +10319,7 @@ static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
+ struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
u32 new_trusted = enable ? 1 : 0;
bool en_bc_pmc;
int ret;
@@ -10213,7 +10333,7 @@ static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
/* Disable promisc mode for VF if it is not trusted any more. */
if (!enable && vport->vf_info.promisc_enable) {
- en_bc_pmc = hdev->pdev->revision != 0x20;
+ en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
ret = hclge_set_vport_promisc_mode(vport, false, false,
en_bc_pmc);
if (ret)
@@ -11090,7 +11210,7 @@ static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
{
struct hclge_vport *vport = &hdev->vport[0];
struct hnae3_handle *handle = &vport->nic;
- u8 tmp_flags = 0;
+ u8 tmp_flags;
int ret;
if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 9bbdd4557c27..64e6afdb61b8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -199,6 +199,7 @@ enum HLCGE_PORT_TYPE {
#define HCLGE_SUPPORT_40G_BIT BIT(5)
#define HCLGE_SUPPORT_100M_BIT BIT(6)
#define HCLGE_SUPPORT_10M_BIT BIT(7)
+#define HCLGE_SUPPORT_200G_BIT BIT(8)
#define HCLGE_SUPPORT_GE \
(HCLGE_SUPPORT_1G_BIT | HCLGE_SUPPORT_100M_BIT | HCLGE_SUPPORT_10M_BIT)
@@ -238,7 +239,8 @@ enum HCLGE_MAC_SPEED {
HCLGE_MAC_SPEED_25G = 25000, /* 25000 Mbps = 25 Gbps */
HCLGE_MAC_SPEED_40G = 40000, /* 40000 Mbps = 40 Gbps */
HCLGE_MAC_SPEED_50G = 50000, /* 50000 Mbps = 50 Gbps */
- HCLGE_MAC_SPEED_100G = 100000 /* 100000 Mbps = 100 Gbps */
+ HCLGE_MAC_SPEED_100G = 100000, /* 100000 Mbps = 100 Gbps */
+ HCLGE_MAC_SPEED_200G = 200000 /* 200000 Mbps = 200 Gbps */
};
enum HCLGE_MAC_DUPLEX {
@@ -266,7 +268,7 @@ struct hclge_mac {
u32 fec_mode; /* active fec mode */
u32 user_fec_mode;
u32 fec_ability;
- int link; /* store the link status of mac & phy (if phy exit) */
+ int link; /* store the link status of mac & phy (if phy exists) */
struct phy_device *phydev;
struct mii_bus *mdio_bus;
phy_interface_t phy_if;
@@ -349,7 +351,7 @@ struct hclge_cfg {
u8 mac_addr[ETH_ALEN];
u8 default_speed;
u32 numa_node_map;
- u8 speed_ability;
+ u16 speed_ability;
u16 umv_space;
};
@@ -749,7 +751,6 @@ struct hclge_dev {
u16 num_tx_desc; /* desc num of per tx queue */
u16 num_rx_desc; /* desc num of per rx queue */
u8 hw_tc_map;
- u8 tc_num_last_time;
enum hclge_fc_mode fc_mode_last_time;
u8 support_sfp_query;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 28db13253a5e..15f69fa86323 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -23,14 +23,11 @@ enum hclge_shaper_level {
#define HCLGE_SHAPER_BS_U_DEF 5
#define HCLGE_SHAPER_BS_S_DEF 20
-#define HCLGE_ETHER_MAX_RATE 100000
-
/* hclge_shaper_para_calc: calculate ir parameter for the shaper
* @ir: Rate to be config, its unit is Mbps
* @shaper_level: the shaper level. eg: port, pg, priority, queueset
- * @ir_b: IR_B parameter of IR shaper
- * @ir_u: IR_U parameter of IR shaper
- * @ir_s: IR_S parameter of IR shaper
+ * @ir_para: parameters of IR shaper
+ * @max_tm_rate: max tm rate is available to config
*
* the formula:
*
@@ -41,7 +38,8 @@ enum hclge_shaper_level {
* @return: 0: calculate sucessful, negative: fail
*/
static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
- u8 *ir_b, u8 *ir_u, u8 *ir_s)
+ struct hclge_shaper_ir_para *ir_para,
+ u32 max_tm_rate)
{
#define DIVISOR_CLK (1000 * 8)
#define DIVISOR_IR_B_126 (126 * DIVISOR_CLK)
@@ -59,7 +57,7 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
/* Calc tick */
if (shaper_level >= HCLGE_SHAPER_LVL_CNT ||
- ir > HCLGE_ETHER_MAX_RATE)
+ ir > max_tm_rate)
return -EINVAL;
tick = tick_array[shaper_level];
@@ -74,9 +72,9 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
if (ir_calc == ir) {
- *ir_b = 126;
- *ir_u = 0;
- *ir_s = 0;
+ ir_para->ir_b = 126;
+ ir_para->ir_u = 0;
+ ir_para->ir_s = 0;
return 0;
} else if (ir_calc > ir) {
@@ -86,8 +84,8 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
}
- *ir_b = (ir * tick * (1 << ir_s_calc) + (DIVISOR_CLK >> 1)) /
- DIVISOR_CLK;
+ ir_para->ir_b = (ir * tick * (1 << ir_s_calc) +
+ (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
} else {
/* Increasing the numerator to select ir_u value */
u32 numerator;
@@ -99,15 +97,16 @@ static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
}
if (ir_calc == ir) {
- *ir_b = 126;
+ ir_para->ir_b = 126;
} else {
u32 denominator = DIVISOR_CLK * (1 << --ir_u_calc);
- *ir_b = (ir * tick + (denominator >> 1)) / denominator;
+ ir_para->ir_b = (ir * tick + (denominator >> 1)) /
+ denominator;
}
}
- *ir_u = ir_u_calc;
- *ir_s = ir_s_calc;
+ ir_para->ir_u = ir_u_calc;
+ ir_para->ir_s = ir_s_calc;
return 0;
}
@@ -400,21 +399,22 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
{
struct hclge_port_shapping_cmd *shap_cfg_cmd;
+ struct hclge_shaper_ir_para ir_para;
struct hclge_desc desc;
- u8 ir_u, ir_b, ir_s;
u32 shapping_para;
int ret;
- ret = hclge_shaper_para_calc(hdev->hw.mac.speed,
- HCLGE_SHAPER_LVL_PORT,
- &ir_b, &ir_u, &ir_s);
+ ret = hclge_shaper_para_calc(hdev->hw.mac.speed, HCLGE_SHAPER_LVL_PORT,
+ &ir_para,
+ hdev->ae_dev->dev_specs.max_tm_rate);
if (ret)
return ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
- shapping_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
+ shapping_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
+ ir_para.ir_s,
HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF);
@@ -515,21 +515,23 @@ int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate)
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
struct hclge_qs_shapping_cmd *shap_cfg_cmd;
+ struct hclge_shaper_ir_para ir_para;
struct hclge_dev *hdev = vport->back;
struct hclge_desc desc;
- u8 ir_b, ir_u, ir_s;
u32 shaper_para;
int ret, i;
if (!max_tx_rate)
- max_tx_rate = HCLGE_ETHER_MAX_RATE;
+ max_tx_rate = hdev->ae_dev->dev_specs.max_tm_rate;
ret = hclge_shaper_para_calc(max_tx_rate, HCLGE_SHAPER_LVL_QSET,
- &ir_b, &ir_u, &ir_s);
+ &ir_para,
+ hdev->ae_dev->dev_specs.max_tm_rate);
if (ret)
return ret;
- shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
+ shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
+ ir_para.ir_s,
HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF);
@@ -668,7 +670,8 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
hdev->tm_info.pg_info[i].pg_id = i;
hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
- hdev->tm_info.pg_info[i].bw_limit = HCLGE_ETHER_MAX_RATE;
+ hdev->tm_info.pg_info[i].bw_limit =
+ hdev->ae_dev->dev_specs.max_tm_rate;
if (i != 0)
continue;
@@ -729,7 +732,8 @@ static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
{
- u8 ir_u, ir_b, ir_s;
+ u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
+ struct hclge_shaper_ir_para ir_para;
u32 shaper_para;
int ret;
u32 i;
@@ -741,10 +745,9 @@ static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
/* Pg to pri */
for (i = 0; i < hdev->tm_info.num_pg; i++) {
/* Calc shaper para */
- ret = hclge_shaper_para_calc(
- hdev->tm_info.pg_info[i].bw_limit,
- HCLGE_SHAPER_LVL_PG,
- &ir_b, &ir_u, &ir_s);
+ ret = hclge_shaper_para_calc(hdev->tm_info.pg_info[i].bw_limit,
+ HCLGE_SHAPER_LVL_PG,
+ &ir_para, max_tm_rate);
if (ret)
return ret;
@@ -757,7 +760,9 @@ static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
if (ret)
return ret;
- shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
+ shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b,
+ ir_para.ir_u,
+ ir_para.ir_s,
HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pg_shapping_cfg(hdev,
@@ -861,16 +866,16 @@ static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
{
- u8 ir_u, ir_b, ir_s;
+ u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
+ struct hclge_shaper_ir_para ir_para;
u32 shaper_para;
int ret;
u32 i;
for (i = 0; i < hdev->tm_info.num_tc; i++) {
- ret = hclge_shaper_para_calc(
- hdev->tm_info.tc_info[i].bw_limit,
- HCLGE_SHAPER_LVL_PRI,
- &ir_b, &ir_u, &ir_s);
+ ret = hclge_shaper_para_calc(hdev->tm_info.tc_info[i].bw_limit,
+ HCLGE_SHAPER_LVL_PRI,
+ &ir_para, max_tm_rate);
if (ret)
return ret;
@@ -882,7 +887,9 @@ static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
if (ret)
return ret;
- shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
+ shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b,
+ ir_para.ir_u,
+ ir_para.ir_s,
HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i,
@@ -897,12 +904,13 @@ static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
{
struct hclge_dev *hdev = vport->back;
- u8 ir_u, ir_b, ir_s;
+ struct hclge_shaper_ir_para ir_para;
u32 shaper_para;
int ret;
ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
- &ir_b, &ir_u, &ir_s);
+ &ir_para,
+ hdev->ae_dev->dev_specs.max_tm_rate);
if (ret)
return ret;
@@ -914,7 +922,8 @@ static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
if (ret)
return ret;
- shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
+ shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
+ ir_para.ir_s,
HCLGE_SHAPER_BS_U_DEF,
HCLGE_SHAPER_BS_S_DEF);
ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
@@ -929,15 +938,15 @@ static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
{
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
struct hclge_dev *hdev = vport->back;
- u8 ir_u, ir_b, ir_s;
+ u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
+ struct hclge_shaper_ir_para ir_para;
u32 i;
int ret;
for (i = 0; i < kinfo->num_tc; i++) {
- ret = hclge_shaper_para_calc(
- hdev->tm_info.tc_info[i].bw_limit,
- HCLGE_SHAPER_LVL_QSET,
- &ir_b, &ir_u, &ir_s);
+ ret = hclge_shaper_para_calc(hdev->tm_info.tc_info[i].bw_limit,
+ HCLGE_SHAPER_LVL_QSET,
+ &ir_para, max_tm_rate);
if (ret)
return ret;
}
@@ -1355,7 +1364,7 @@ static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
static int hclge_tm_bp_setup(struct hclge_dev *hdev)
{
- int ret = 0;
+ int ret;
int i;
for (i = 0; i < hdev->tm_info.num_tc; i++) {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 45bcb67f90fd..bb2a2d8e9259 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -19,6 +19,8 @@
#define HCLGE_TM_TX_SCHD_DWRR_MSK BIT(0)
#define HCLGE_TM_TX_SCHD_SP_MSK (0xFE)
+#define HCLGE_ETHER_MAX_RATE 100000
+
struct hclge_pg_to_pri_link_cmd {
u8 pg_id;
u8 rsvd1[3];
@@ -139,6 +141,12 @@ struct hclge_port_shapping_cmd {
__le32 port_shapping_para;
};
+struct hclge_shaper_ir_para {
+ u8 ir_b; /* IR_B parameter of IR shaper */
+ u8 ir_u; /* IR_U parameter of IR shaper */
+ u8 ir_s; /* IR_S parameter of IR shaper */
+};
+
#define hclge_tm_set_field(dest, string, val) \
hnae3_set_field((dest), \
(HCLGE_TM_SHAP_##string##_MSK), \
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
index fec65239a3c8..66866c1cfb12 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -313,9 +313,34 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
return status;
}
-static int hclgevf_cmd_query_firmware_version(struct hclgevf_hw *hw,
- u32 *version)
+static void hclgevf_set_default_capability(struct hclgevf_dev *hdev)
{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+
+ set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
+ set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps);
+ set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
+}
+
+static void hclgevf_parse_capability(struct hclgevf_dev *hdev,
+ struct hclgevf_query_version_cmd *cmd)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ u32 caps;
+
+ caps = __le32_to_cpu(cmd->caps[0]);
+
+ if (hnae3_get_bit(caps, HCLGEVF_CAP_UDP_GSO_B))
+ set_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps);
+ if (hnae3_get_bit(caps, HCLGEVF_CAP_INT_QL_B))
+ set_bit(HNAE3_DEV_SUPPORT_INT_QL_B, ae_dev->caps);
+ if (hnae3_get_bit(caps, HCLGEVF_CAP_TQP_TXRX_INDEP_B))
+ set_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, ae_dev->caps);
+}
+
+static int hclgevf_cmd_query_version_and_capability(struct hclgevf_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
struct hclgevf_query_version_cmd *resp;
struct hclgevf_desc desc;
int status;
@@ -323,9 +348,20 @@ static int hclgevf_cmd_query_firmware_version(struct hclgevf_hw *hw,
resp = (struct hclgevf_query_version_cmd *)desc.data;
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_FW_VER, 1);
- status = hclgevf_cmd_send(hw, &desc, 1);
- if (!status)
- *version = le32_to_cpu(resp->firmware);
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ return status;
+
+ hdev->fw_version = le32_to_cpu(resp->firmware);
+
+ ae_dev->dev_version = le32_to_cpu(resp->hardware) <<
+ HNAE3_PCI_REVISION_BIT_SIZE;
+ ae_dev->dev_version |= hdev->pdev->revision;
+
+ if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
+ hclgevf_set_default_capability(hdev);
+
+ hclgevf_parse_capability(hdev, resp);
return status;
}
@@ -364,7 +400,6 @@ err_csq:
int hclgevf_cmd_init(struct hclgevf_dev *hdev)
{
- u32 version;
int ret;
spin_lock_bh(&hdev->hw.cmq.csq.lock);
@@ -395,23 +430,22 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
goto err_cmd_init;
}
- /* get firmware version */
- ret = hclgevf_cmd_query_firmware_version(&hdev->hw, &version);
+ /* get version and device capabilities */
+ ret = hclgevf_cmd_query_version_and_capability(hdev);
if (ret) {
dev_err(&hdev->pdev->dev,
- "failed(%d) to query firmware version\n", ret);
+ "failed to query version and capabilities, ret = %d\n", ret);
goto err_cmd_init;
}
- hdev->fw_version = version;
dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n",
- hnae3_get_field(version, HNAE3_FW_VERSION_BYTE3_MASK,
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
HNAE3_FW_VERSION_BYTE3_SHIFT),
- hnae3_get_field(version, HNAE3_FW_VERSION_BYTE2_MASK,
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
HNAE3_FW_VERSION_BYTE2_SHIFT),
- hnae3_get_field(version, HNAE3_FW_VERSION_BYTE1_MASK,
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
HNAE3_FW_VERSION_BYTE1_SHIFT),
- hnae3_get_field(version, HNAE3_FW_VERSION_BYTE0_MASK,
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
HNAE3_FW_VERSION_BYTE0_SHIFT));
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
index 40d6e602ab51..9460c128c095 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -91,6 +91,8 @@ enum hclgevf_opcode_type {
/* Generic command */
HCLGEVF_OPC_QUERY_FW_VER = 0x0001,
HCLGEVF_OPC_QUERY_VF_RSRC = 0x0024,
+ HCLGEVF_OPC_QUERY_DEV_SPECS = 0x0050,
+
/* TQP command */
HCLGEVF_OPC_QUERY_TX_STATUS = 0x0B03,
HCLGEVF_OPC_QUERY_RX_STATUS = 0x0B13,
@@ -141,9 +143,26 @@ struct hclgevf_ctrl_vector_chain {
u8 resv;
};
+enum HCLGEVF_CAP_BITS {
+ HCLGEVF_CAP_UDP_GSO_B,
+ HCLGEVF_CAP_QB_B,
+ HCLGEVF_CAP_FD_FORWARD_TC_B,
+ HCLGEVF_CAP_PTP_B,
+ HCLGEVF_CAP_INT_QL_B,
+ HCLGEVF_CAP_SIMPLE_BD_B,
+ HCLGEVF_CAP_TX_PUSH_B,
+ HCLGEVF_CAP_PHY_IMP_B,
+ HCLGEVF_CAP_TQP_TXRX_INDEP_B,
+ HCLGEVF_CAP_HW_PAD_B,
+ HCLGEVF_CAP_STASH_B,
+};
+
+#define HCLGEVF_QUERY_CAP_LENGTH 3
struct hclgevf_query_version_cmd {
__le32 firmware;
- __le32 firmware_rsv[5];
+ __le32 hardware;
+ __le32 rsv;
+ __le32 caps[HCLGEVF_QUERY_CAP_LENGTH]; /* capabilities of device */
};
#define HCLGEVF_MSIX_OFT_ROCEE_S 0
@@ -253,6 +272,19 @@ struct hclgevf_cfg_tx_queue_pointer_cmd {
#define HCLGEVF_NIC_CMQ_DESC_NUM_S 3
#define HCLGEVF_NIC_CMDQ_INT_SRC_REG 0x27100
+#define HCLGEVF_QUERY_DEV_SPECS_BD_NUM 4
+
+struct hclgevf_dev_specs_0_cmd {
+ __le32 rsv0;
+ __le32 mac_entry_num;
+ __le32 mng_entry_num;
+ __le16 rss_ind_tbl_size;
+ __le16 rss_key_size;
+ __le16 int_ql_max;
+ u8 max_non_tso_bd_num;
+ u8 rsv1[5];
+};
+
static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value)
{
writel(value, base + reg);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index e972138a14ad..50c84c5e65d2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -19,8 +19,9 @@ static struct hnae3_ae_algo ae_algovf;
static struct workqueue_struct *hclgevf_wq;
static const struct pci_device_id ae_algovf_pci_tbl[] = {
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
- {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
/* required last entry */
{0, }
};
@@ -171,7 +172,7 @@ static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
{
struct hnae3_knic_private_info *kinfo = &handle->kinfo;
u8 *buff = data;
- int i = 0;
+ int i;
for (i = 0; i < kinfo->num_tqps; i++) {
struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
@@ -745,7 +746,7 @@ static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
int i, ret;
- if (handle->pdev->revision >= 0x21) {
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
/* Get hash algorithm */
if (hfunc) {
switch (rss_cfg->hash_algo) {
@@ -791,7 +792,7 @@ static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
int ret, i;
- if (handle->pdev->revision >= 0x21) {
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
/* Set the RSS Hash Key if specififed by the user */
if (key) {
switch (hfunc) {
@@ -863,7 +864,7 @@ static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
u8 tuple_sets;
int ret;
- if (handle->pdev->revision == 0x20)
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
return -EOPNOTSUPP;
if (nfc->data &
@@ -941,7 +942,7 @@ static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
u8 tuple_sets;
- if (handle->pdev->revision == 0x20)
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
return -EOPNOTSUPP;
nfc->data = 0;
@@ -1154,10 +1155,9 @@ static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
bool en_mc_pmc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
- struct pci_dev *pdev = hdev->pdev;
bool en_bc_pmc;
- en_bc_pmc = pdev->revision != 0x20;
+ en_bc_pmc = hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc,
en_bc_pmc);
@@ -1702,6 +1702,26 @@ static int hclgevf_notify_client(struct hclgevf_dev *hdev,
return ret;
}
+static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev,
+ enum hnae3_reset_notify_type type)
+{
+ struct hnae3_client *client = hdev->roce_client;
+ struct hnae3_handle *handle = &hdev->roce;
+ int ret;
+
+ if (!test_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state) || !client)
+ return 0;
+
+ if (!client->ops->reset_notify)
+ return -EOPNOTSUPP;
+
+ ret = client->ops->reset_notify(handle, type);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
+ type, ret);
+ return ret;
+}
+
static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
{
#define HCLGEVF_RESET_WAIT_US 20000
@@ -1788,10 +1808,10 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
{
#define HCLGEVF_RESET_SYNC_TIME 100
- struct hclge_vf_to_pf_msg send_msg;
- int ret = 0;
-
if (hdev->reset_type == HNAE3_VF_FUNC_RESET) {
+ struct hclge_vf_to_pf_msg send_msg;
+ int ret;
+
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0);
ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
if (ret) {
@@ -1806,10 +1826,10 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
/* inform hardware that preparatory work is done */
msleep(HCLGEVF_RESET_SYNC_TIME);
hclgevf_reset_handshake(hdev, true);
- dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n",
- hdev->reset_type, ret);
+ dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done\n",
+ hdev->reset_type);
- return ret;
+ return 0;
}
static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev)
@@ -1865,6 +1885,11 @@ static int hclgevf_reset_prepare(struct hclgevf_dev *hdev)
hdev->rst_stats.rst_cnt++;
+ /* perform reset of the stack & ae device for a client */
+ ret = hclgevf_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
+
rtnl_lock();
/* bring down the nic to stop any ongoing TX/RX */
ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
@@ -1880,6 +1905,9 @@ static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev)
int ret;
hdev->rst_stats.hw_rst_done_cnt++;
+ ret = hclgevf_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ return ret;
rtnl_lock();
/* now, re-initialize the nic client and ae device */
@@ -1890,6 +1918,18 @@ static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev)
return ret;
}
+ ret = hclgevf_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
+ /* ignore RoCE notify error if it fails HCLGEVF_RESET_MAX_FAIL_CNT - 1
+ * times
+ */
+ if (ret &&
+ hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT - 1)
+ return ret;
+
+ ret = hclgevf_notify_roce_client(hdev, HNAE3_UP_CLIENT);
+ if (ret)
+ return ret;
+
hdev->last_reset_time = jiffies;
hdev->rst_stats.rst_done_cnt++;
hdev->rst_stats.rst_fail_cnt = 0;
@@ -2186,6 +2226,9 @@ static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev)
unsigned long delta = round_jiffies_relative(HZ);
struct hnae3_handle *handle = &hdev->nic;
+ if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state))
+ return;
+
if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
delta = jiffies - hdev->last_serv_processed;
@@ -2284,7 +2327,7 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
* register, so we should just write 0 to the bit we are
* handling, and keep other bits as cmdq_stat_reg.
*/
- if (hdev->pdev->revision >= 0x21)
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
*clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
else
*clearval = cmdq_stat_reg &
@@ -2427,7 +2470,7 @@ static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
rss_cfg->rss_size = hdev->nic.kinfo.rss_size;
tuple_sets = &rss_cfg->rss_tuple_sets;
- if (hdev->pdev->revision >= 0x21) {
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key,
HCLGEVF_RSS_KEY_SIZE);
@@ -2452,7 +2495,7 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
int ret;
- if (hdev->pdev->revision >= 0x21) {
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
rss_cfg->rss_hash_key);
if (ret)
@@ -2551,13 +2594,7 @@ static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
static int hclgevf_client_start(struct hnae3_handle *handle)
{
- int ret;
-
- ret = hclgevf_set_alive(handle, true);
- if (ret)
- return ret;
-
- return 0;
+ return hclgevf_set_alive(handle, true);
}
static void hclgevf_client_stop(struct hnae3_handle *handle)
@@ -2760,6 +2797,7 @@ static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
if (ret)
return ret;
+ set_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state);
hnae3_set_client_init_flag(client, ae_dev, 1);
return 0;
@@ -2820,6 +2858,7 @@ static void hclgevf_uninit_client_instance(struct hnae3_client *client,
/* un-init roce, if it exists */
if (hdev->roce_client) {
+ clear_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state);
hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
hdev->roce_client = NULL;
hdev->roce.client = NULL;
@@ -2942,6 +2981,76 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
return 0;
}
+static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev)
+{
+#define HCLGEVF_MAX_NON_TSO_BD_NUM 8U
+
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+
+ ae_dev->dev_specs.max_non_tso_bd_num =
+ HCLGEVF_MAX_NON_TSO_BD_NUM;
+ ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
+ ae_dev->dev_specs.rss_key_size = HCLGEVF_RSS_KEY_SIZE;
+}
+
+static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev,
+ struct hclgevf_desc *desc)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ struct hclgevf_dev_specs_0_cmd *req0;
+
+ req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data;
+
+ ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
+ ae_dev->dev_specs.rss_ind_tbl_size =
+ le16_to_cpu(req0->rss_ind_tbl_size);
+ ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
+}
+
+static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev)
+{
+ struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
+
+ if (!dev_specs->max_non_tso_bd_num)
+ dev_specs->max_non_tso_bd_num = HCLGEVF_MAX_NON_TSO_BD_NUM;
+ if (!dev_specs->rss_ind_tbl_size)
+ dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
+ if (!dev_specs->rss_key_size)
+ dev_specs->rss_key_size = HCLGEVF_RSS_KEY_SIZE;
+}
+
+static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev)
+{
+ struct hclgevf_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM];
+ int ret;
+ int i;
+
+ /* set default specifications as devices lower than version V3 do not
+ * support querying specifications from firmware.
+ */
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
+ hclgevf_set_default_dev_specs(hdev);
+ return 0;
+ }
+
+ for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
+ hclgevf_cmd_setup_basic_desc(&desc[i],
+ HCLGEVF_OPC_QUERY_DEV_SPECS, true);
+ desc[i].flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_NEXT);
+ }
+ hclgevf_cmd_setup_basic_desc(&desc[i], HCLGEVF_OPC_QUERY_DEV_SPECS,
+ true);
+
+ ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM);
+ if (ret)
+ return ret;
+
+ hclgevf_parse_dev_specs(hdev, desc);
+ hclgevf_check_dev_specs(hdev);
+
+ return 0;
+}
+
static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
{
struct pci_dev *pdev = hdev->pdev;
@@ -3050,6 +3159,13 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
if (ret)
goto err_cmd_init;
+ ret = hclgevf_query_dev_specs(hdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to query dev specifications, ret = %d\n", ret);
+ goto err_cmd_init;
+ }
+
ret = hclgevf_init_msi(hdev);
if (ret) {
dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
@@ -3345,6 +3461,13 @@ static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle)
return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
}
+static bool hclgevf_get_cmdq_stat(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+}
+
static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
@@ -3530,6 +3653,7 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.get_link_mode = hclgevf_get_link_mode,
.set_promisc_mode = hclgevf_set_promisc_mode,
.request_update_promisc_mode = hclgevf_request_update_promisc_mode,
+ .get_cmdq_stat = hclgevf_get_cmdq_stat,
};
static struct hnae3_ae_algo ae_algovf = {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index c1fac8920ae3..c5bcc3894fd5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -139,6 +139,7 @@ enum hclgevf_states {
HCLGEVF_STATE_IRQ_INITED,
HCLGEVF_STATE_REMOVING,
HCLGEVF_STATE_NIC_REGISTERED,
+ HCLGEVF_STATE_ROCE_REGISTERED,
/* task states */
HCLGEVF_STATE_RST_SERVICE_SCHED,
HCLGEVF_STATE_RST_HANDLING,
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 7df5d7d211d4..883d0d7c6858 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -210,7 +210,7 @@ static void hns_mdio_cmd_write(struct hns_mdio_device *mdio_dev,
* @bus: mdio bus
* @phy_id: phy id
* @regnum: register num
- * @value: register value
+ * @data: register value
*
* Return 0 on success, negative on failure
*/
@@ -273,7 +273,6 @@ static int hns_mdio_write(struct mii_bus *bus,
* @bus: mdio bus
* @phy_id: phy id
* @regnum: register num
- * @value: register value
*
* Return phy register value
*/
diff --git a/drivers/net/ethernet/huawei/hinic/Makefile b/drivers/net/ethernet/huawei/hinic/Makefile
index 67b59d0ba769..2f89119c9b69 100644
--- a/drivers/net/ethernet/huawei/hinic/Makefile
+++ b/drivers/net/ethernet/huawei/hinic/Makefile
@@ -4,4 +4,5 @@ obj-$(CONFIG_HINIC) += hinic.o
hinic-y := hinic_main.o hinic_tx.o hinic_rx.o hinic_port.o hinic_hw_dev.o \
hinic_hw_io.o hinic_hw_qp.o hinic_hw_cmdq.o hinic_hw_wq.o \
hinic_hw_mgmt.o hinic_hw_api_cmd.o hinic_hw_eqs.o hinic_hw_if.o \
- hinic_common.o hinic_ethtool.o hinic_devlink.o hinic_hw_mbox.o hinic_sriov.o
+ hinic_common.o hinic_ethtool.o hinic_devlink.o hinic_hw_mbox.o \
+ hinic_sriov.o hinic_debugfs.o
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
new file mode 100644
index 000000000000..19eb839177ec
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c
@@ -0,0 +1,318 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+
+#include "hinic_debugfs.h"
+
+static struct dentry *hinic_dbgfs_root;
+
+enum sq_dbg_info {
+ GLB_SQ_ID,
+ SQ_PI,
+ SQ_CI,
+ SQ_FI,
+ SQ_MSIX_ENTRY,
+};
+
+static char *sq_fields[] = {"glb_sq_id", "sq_pi", "sq_ci", "sq_fi", "sq_msix_entry"};
+
+static u64 hinic_dbg_get_sq_info(struct hinic_dev *nic_dev, struct hinic_sq *sq, int idx)
+{
+ struct hinic_wq *wq = sq->wq;
+
+ switch (idx) {
+ case GLB_SQ_ID:
+ return nic_dev->hwdev->func_to_io.global_qpn + sq->qid;
+ case SQ_PI:
+ return atomic_read(&wq->prod_idx) & wq->mask;
+ case SQ_CI:
+ return atomic_read(&wq->cons_idx) & wq->mask;
+ case SQ_FI:
+ return be16_to_cpu(*(__be16 *)(sq->hw_ci_addr)) & wq->mask;
+ case SQ_MSIX_ENTRY:
+ return sq->msix_entry;
+ }
+
+ return 0;
+}
+
+enum rq_dbg_info {
+ GLB_RQ_ID,
+ RQ_HW_PI,
+ RQ_SW_CI,
+ RQ_SW_PI,
+ RQ_MSIX_ENTRY,
+};
+
+static char *rq_fields[] = {"glb_rq_id", "rq_hw_pi", "rq_sw_ci", "rq_sw_pi", "rq_msix_entry"};
+
+static u64 hinic_dbg_get_rq_info(struct hinic_dev *nic_dev, struct hinic_rq *rq, int idx)
+{
+ struct hinic_wq *wq = rq->wq;
+
+ switch (idx) {
+ case GLB_RQ_ID:
+ return nic_dev->hwdev->func_to_io.global_qpn + rq->qid;
+ case RQ_HW_PI:
+ return be16_to_cpu(*(__be16 *)(rq->pi_virt_addr)) & wq->mask;
+ case RQ_SW_CI:
+ return atomic_read(&wq->cons_idx) & wq->mask;
+ case RQ_SW_PI:
+ return atomic_read(&wq->prod_idx) & wq->mask;
+ case RQ_MSIX_ENTRY:
+ return rq->msix_entry;
+ }
+
+ return 0;
+}
+
+enum func_tbl_info {
+ VALID,
+ RX_MODE,
+ MTU,
+ RQ_DEPTH,
+ QUEUE_NUM,
+};
+
+static char *func_table_fields[] = {"valid", "rx_mode", "mtu", "rq_depth", "cfg_q_num"};
+
+static int hinic_dbg_get_func_table(struct hinic_dev *nic_dev, int idx)
+{
+ struct tag_sml_funcfg_tbl *funcfg_table_elem;
+ struct hinic_cmd_lt_rd *read_data;
+ u16 out_size = sizeof(*read_data);
+ int err;
+
+ read_data = kzalloc(sizeof(*read_data), GFP_KERNEL);
+ if (!read_data)
+ return ~0;
+
+ read_data->node = TBL_ID_FUNC_CFG_SM_NODE;
+ read_data->inst = TBL_ID_FUNC_CFG_SM_INST;
+ read_data->entry_size = HINIC_FUNCTION_CONFIGURE_TABLE_SIZE;
+ read_data->lt_index = HINIC_HWIF_FUNC_IDX(nic_dev->hwdev->hwif);
+ read_data->len = HINIC_FUNCTION_CONFIGURE_TABLE_SIZE;
+
+ err = hinic_port_msg_cmd(nic_dev->hwdev, HINIC_PORT_CMD_RD_LINE_TBL, read_data,
+ sizeof(*read_data), read_data, &out_size);
+ if (err || out_size != sizeof(*read_data) || read_data->status) {
+ netif_err(nic_dev, drv, nic_dev->netdev,
+ "Failed to get func table, err: %d, status: 0x%x, out size: 0x%x\n",
+ err, read_data->status, out_size);
+ kfree(read_data);
+ return ~0;
+ }
+
+ funcfg_table_elem = (struct tag_sml_funcfg_tbl *)read_data->data;
+
+ switch (idx) {
+ case VALID:
+ return funcfg_table_elem->dw0.bs.valid;
+ case RX_MODE:
+ return funcfg_table_elem->dw0.bs.nic_rx_mode;
+ case MTU:
+ return funcfg_table_elem->dw1.bs.mtu;
+ case RQ_DEPTH:
+ return funcfg_table_elem->dw13.bs.cfg_rq_depth;
+ case QUEUE_NUM:
+ return funcfg_table_elem->dw13.bs.cfg_q_num;
+ }
+
+ kfree(read_data);
+
+ return ~0;
+}
+
+static ssize_t hinic_dbg_cmd_read(struct file *filp, char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ struct hinic_debug_priv *dbg;
+ char ret_buf[20];
+ int *desc;
+ u64 out;
+ int ret;
+
+ desc = filp->private_data;
+ dbg = container_of(desc, struct hinic_debug_priv, field_id[*desc]);
+
+ switch (dbg->type) {
+ case HINIC_DBG_SQ_INFO:
+ out = hinic_dbg_get_sq_info(dbg->dev, dbg->object, *desc);
+ break;
+
+ case HINIC_DBG_RQ_INFO:
+ out = hinic_dbg_get_rq_info(dbg->dev, dbg->object, *desc);
+ break;
+
+ case HINIC_DBG_FUNC_TABLE:
+ out = hinic_dbg_get_func_table(dbg->dev, *desc);
+ break;
+
+ default:
+ netif_warn(dbg->dev, drv, dbg->dev->netdev, "Invalid hinic debug cmd: %d\n",
+ dbg->type);
+ return -EINVAL;
+ }
+
+ ret = snprintf(ret_buf, sizeof(ret_buf), "0x%llx\n", out);
+
+ return simple_read_from_buffer(buffer, count, ppos, ret_buf, ret);
+}
+
+static const struct file_operations hinic_dbg_cmd_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = hinic_dbg_cmd_read,
+};
+
+static int create_dbg_files(struct hinic_dev *dev, enum hinic_dbg_type type, void *data,
+ struct dentry *root, struct hinic_debug_priv **dbg, char **field,
+ int nfile)
+{
+ struct hinic_debug_priv *tmp;
+ int i;
+
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ tmp->dev = dev;
+ tmp->object = data;
+ tmp->type = type;
+ tmp->root = root;
+
+ for (i = 0; i < nfile; i++) {
+ tmp->field_id[i] = i;
+ debugfs_create_file(field[i], 0400, root, &tmp->field_id[i], &hinic_dbg_cmd_fops);
+ }
+
+ *dbg = tmp;
+
+ return 0;
+}
+
+static void rem_dbg_files(struct hinic_debug_priv *dbg)
+{
+ if (dbg->type != HINIC_DBG_FUNC_TABLE)
+ debugfs_remove_recursive(dbg->root);
+
+ kfree(dbg);
+}
+
+int hinic_sq_debug_add(struct hinic_dev *dev, u16 sq_id)
+{
+ struct hinic_sq *sq;
+ struct dentry *root;
+ char sub_dir[16];
+
+ sq = dev->txqs[sq_id].sq;
+
+ sprintf(sub_dir, "0x%x", sq_id);
+
+ root = debugfs_create_dir(sub_dir, dev->sq_dbgfs);
+
+ return create_dbg_files(dev, HINIC_DBG_SQ_INFO, sq, root, &sq->dbg, sq_fields,
+ ARRAY_SIZE(sq_fields));
+}
+
+void hinic_sq_debug_rem(struct hinic_sq *sq)
+{
+ if (sq->dbg)
+ rem_dbg_files(sq->dbg);
+}
+
+int hinic_rq_debug_add(struct hinic_dev *dev, u16 rq_id)
+{
+ struct hinic_rq *rq;
+ struct dentry *root;
+ char sub_dir[16];
+
+ rq = dev->rxqs[rq_id].rq;
+
+ sprintf(sub_dir, "0x%x", rq_id);
+
+ root = debugfs_create_dir(sub_dir, dev->rq_dbgfs);
+
+ return create_dbg_files(dev, HINIC_DBG_RQ_INFO, rq, root, &rq->dbg, rq_fields,
+ ARRAY_SIZE(rq_fields));
+}
+
+void hinic_rq_debug_rem(struct hinic_rq *rq)
+{
+ if (rq->dbg)
+ rem_dbg_files(rq->dbg);
+}
+
+int hinic_func_table_debug_add(struct hinic_dev *dev)
+{
+ if (HINIC_IS_VF(dev->hwdev->hwif))
+ return 0;
+
+ return create_dbg_files(dev, HINIC_DBG_FUNC_TABLE, dev, dev->func_tbl_dbgfs, &dev->dbg,
+ func_table_fields, ARRAY_SIZE(func_table_fields));
+}
+
+void hinic_func_table_debug_rem(struct hinic_dev *dev)
+{
+ if (!HINIC_IS_VF(dev->hwdev->hwif) && dev->dbg)
+ rem_dbg_files(dev->dbg);
+}
+
+void hinic_sq_dbgfs_init(struct hinic_dev *nic_dev)
+{
+ nic_dev->sq_dbgfs = debugfs_create_dir("SQs", nic_dev->dbgfs_root);
+}
+
+void hinic_sq_dbgfs_uninit(struct hinic_dev *nic_dev)
+{
+ debugfs_remove_recursive(nic_dev->sq_dbgfs);
+}
+
+void hinic_rq_dbgfs_init(struct hinic_dev *nic_dev)
+{
+ nic_dev->rq_dbgfs = debugfs_create_dir("RQs", nic_dev->dbgfs_root);
+}
+
+void hinic_rq_dbgfs_uninit(struct hinic_dev *nic_dev)
+{
+ debugfs_remove_recursive(nic_dev->rq_dbgfs);
+}
+
+void hinic_func_tbl_dbgfs_init(struct hinic_dev *nic_dev)
+{
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ nic_dev->func_tbl_dbgfs = debugfs_create_dir("func_table", nic_dev->dbgfs_root);
+}
+
+void hinic_func_tbl_dbgfs_uninit(struct hinic_dev *nic_dev)
+{
+ if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
+ debugfs_remove_recursive(nic_dev->func_tbl_dbgfs);
+}
+
+void hinic_dbg_init(struct hinic_dev *nic_dev)
+{
+ nic_dev->dbgfs_root = debugfs_create_dir(pci_name(nic_dev->hwdev->hwif->pdev),
+ hinic_dbgfs_root);
+}
+
+void hinic_dbg_uninit(struct hinic_dev *nic_dev)
+{
+ debugfs_remove_recursive(nic_dev->dbgfs_root);
+ nic_dev->dbgfs_root = NULL;
+}
+
+void hinic_dbg_register_debugfs(const char *debugfs_dir_name)
+{
+ hinic_dbgfs_root = debugfs_create_dir(debugfs_dir_name, NULL);
+}
+
+void hinic_dbg_unregister_debugfs(void)
+{
+ debugfs_remove_recursive(hinic_dbgfs_root);
+ hinic_dbgfs_root = NULL;
+}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.h b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.h
new file mode 100644
index 000000000000..e9e00cfa1329
--- /dev/null
+++ b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Huawei HiNIC PCI Express Linux driver
+ * Copyright(c) 2017 Huawei Technologies Co., Ltd
+ */
+
+#ifndef HINIC_DEBUGFS_H
+#define HINIC_DEBUGFS_H
+
+#include "hinic_dev.h"
+
+#define TBL_ID_FUNC_CFG_SM_NODE 11
+#define TBL_ID_FUNC_CFG_SM_INST 1
+
+#define HINIC_FUNCTION_CONFIGURE_TABLE_SIZE 64
+#define HINIC_FUNCTION_CONFIGURE_TABLE 1
+
+struct hinic_cmd_lt_rd {
+ u8 status;
+ u8 version;
+ u8 rsvd0[6];
+
+ unsigned char node;
+ unsigned char inst;
+ unsigned char entry_size;
+ unsigned char rsvd;
+ unsigned int lt_index;
+ unsigned int offset;
+ unsigned int len;
+ unsigned char data[100];
+};
+
+struct tag_sml_funcfg_tbl {
+ union {
+ struct {
+ u32 rsvd0 :8;
+ u32 nic_rx_mode :5;
+ u32 rsvd1 :18;
+ u32 valid :1;
+ } bs;
+
+ u32 value;
+ } dw0;
+
+ union {
+ struct {
+ u32 vlan_id :12;
+ u32 vlan_mode :3;
+ u32 fast_recycled_mode :1;
+ u32 mtu :16;
+ } bs;
+
+ u32 value;
+ } dw1;
+
+ u32 dw2;
+ u32 dw3;
+ u32 dw4;
+ u32 dw5;
+ u32 dw6;
+ u32 dw7;
+ u32 dw8;
+ u32 dw9;
+ u32 dw10;
+ u32 dw11;
+ u32 dw12;
+
+ union {
+ struct {
+ u32 rsvd2 :15;
+ u32 cfg_q_num :9;
+ u32 cfg_rq_depth :6;
+ u32 vhd_type :2;
+ } bs;
+
+ u32 value;
+ } dw13;
+
+ u32 dw14;
+ u32 dw15;
+};
+
+int hinic_sq_debug_add(struct hinic_dev *dev, u16 sq_id);
+
+void hinic_sq_debug_rem(struct hinic_sq *sq);
+
+int hinic_rq_debug_add(struct hinic_dev *dev, u16 rq_id);
+
+void hinic_rq_debug_rem(struct hinic_rq *rq);
+
+int hinic_func_table_debug_add(struct hinic_dev *dev);
+
+void hinic_func_table_debug_rem(struct hinic_dev *dev);
+
+void hinic_sq_dbgfs_init(struct hinic_dev *nic_dev);
+
+void hinic_sq_dbgfs_uninit(struct hinic_dev *nic_dev);
+
+void hinic_rq_dbgfs_init(struct hinic_dev *nic_dev);
+
+void hinic_rq_dbgfs_uninit(struct hinic_dev *nic_dev);
+
+void hinic_func_tbl_dbgfs_init(struct hinic_dev *nic_dev);
+
+void hinic_func_tbl_dbgfs_uninit(struct hinic_dev *nic_dev);
+
+void hinic_dbg_init(struct hinic_dev *nic_dev);
+
+void hinic_dbg_uninit(struct hinic_dev *nic_dev);
+
+void hinic_dbg_register_debugfs(const char *debugfs_dir_name);
+
+void hinic_dbg_unregister_debugfs(void);
+
+#endif
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
index 0a1e20edf7cf..fb3e89141a0d 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_dev.h
@@ -58,6 +58,20 @@ struct hinic_intr_coal_info {
u8 resend_timer_cfg;
};
+enum hinic_dbg_type {
+ HINIC_DBG_SQ_INFO,
+ HINIC_DBG_RQ_INFO,
+ HINIC_DBG_FUNC_TABLE,
+};
+
+struct hinic_debug_priv {
+ struct hinic_dev *dev;
+ void *object;
+ enum hinic_dbg_type type;
+ struct dentry *root;
+ int field_id[64];
+};
+
struct hinic_dev {
struct net_device *netdev;
struct hinic_hwdev *hwdev;
@@ -97,6 +111,12 @@ struct hinic_dev {
int lb_test_rx_idx;
int lb_pkt_len;
u8 *lb_test_rx_buf;
+
+ struct dentry *dbgfs_root;
+ struct dentry *sq_dbgfs;
+ struct dentry *rq_dbgfs;
+ struct dentry *func_tbl_dbgfs;
+ struct hinic_debug_priv *dbg;
struct devlink *devlink;
bool cable_unplugged;
bool module_unrecognized;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
index 16bda7381ba0..2630d667f393 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
@@ -281,18 +281,14 @@ static int hinic_firmware_update(struct hinic_devlink_priv *priv,
}
static int hinic_devlink_flash_update(struct devlink *devlink,
- const char *file_name,
- const char *component,
+ struct devlink_flash_update_params *params,
struct netlink_ext_ack *extack)
{
struct hinic_devlink_priv *priv = devlink_priv(devlink);
const struct firmware *fw;
int err;
- if (component)
- return -EOPNOTSUPP;
-
- err = request_firmware_direct(&fw, file_name,
+ err = request_firmware_direct(&fw, params->file_name,
&priv->hwdev->hwif->pdev->dev);
if (err)
return err;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
index 29e88e25a4a4..4e4029d5c8e1 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c
@@ -373,7 +373,7 @@ static int wait_for_api_cmd_completion(struct hinic_api_cmd_chain *chain)
* @chain: chain for the command
* @dest: destination node on the card that will receive the command
* @cmd: command data
- * @size: the command size
+ * @cmd_size: the command size
*
* Return 0 - Success, negative - Failure
**/
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
index e0eb294779ec..5a6bbee819cd 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
@@ -784,7 +784,7 @@ static void free_cmdq(struct hinic_cmdq *cmdq)
* init_cmdqs_ctxt - write the cmdq ctxt to HW after init all cmdq
* @hwdev: the NIC HW device
* @cmdqs: cmdqs to write the ctxts for
- * &db_area: db_area for all the cmdqs
+ * @db_area: db_area for all the cmdqs
*
* Return 0 - Success, negative - Failure
**/
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index 0c737765d113..0c74f6674634 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -437,6 +437,8 @@ static int get_base_qpn(struct hinic_hwdev *hwdev, u16 *base_qpn)
/**
* hinic_hwdev_ifup - Preparing the HW for passing IO
* @hwdev: the NIC HW device
+ * @sq_depth: the send queue depth
+ * @rq_depth: the receive queue depth
*
* Return 0 - Success, negative - Failure
**/
@@ -465,6 +467,7 @@ int hinic_hwdev_ifup(struct hinic_hwdev *hwdev, u16 sq_depth, u16 rq_depth)
func_to_io->hwdev = hwdev;
func_to_io->sq_depth = sq_depth;
func_to_io->rq_depth = rq_depth;
+ func_to_io->global_qpn = base_qpn;
err = hinic_io_init(func_to_io, hwif, nic_cap->max_qps, num_ceqs,
ceq_msix_entries);
@@ -581,6 +584,7 @@ void hinic_hwdev_cb_unregister(struct hinic_hwdev *hwdev,
/**
* nic_mgmt_msg_handler - nic mgmt event handler
* @handle: private data for the handler
+ * @cmd: message command
* @buf_in: input buffer
* @in_size: input size
* @buf_out: output buffer
@@ -908,6 +912,7 @@ int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev,
/**
* hinic_init_hwdev - Initialize the NIC HW
* @pdev: the NIC pci device
+ * @devlink: the poniter of hinic devlink
*
* Return initialized NIC HW device
*
@@ -1120,7 +1125,7 @@ int hinic_hwdev_msix_cnt_set(struct hinic_hwdev *hwdev, u16 msix_index)
* @msix_index: msix_index
* @pending_limit: the maximum pending interrupt events (unit 8)
* @coalesc_timer: coalesc period for interrupt (unit 8 us)
- * @lli_timer: replenishing period for low latency credit (unit 8 us)
+ * @lli_timer_cfg: replenishing period for low latency credit (unit 8 us)
* @lli_credit_limit: maximum credits for low latency msix messages (unit 8)
* @resend_timer: maximum wait for resending msix (unit coalesc period)
*
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
index 701eb81e09a7..416492e48274 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
@@ -96,6 +96,8 @@ enum hinic_port_cmd {
HINIC_PORT_CMD_RSS_TEMP_MGR = 49,
+ HINIC_PORT_CMD_RD_LINE_TBL = 57,
+
HINIC_PORT_CMD_RSS_CFG = 66,
HINIC_PORT_CMD_FWCTXT_INIT = 69,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
index ca8cb68a8d20..19942fef99d9 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c
@@ -106,7 +106,7 @@ enum eq_arm_state {
* @aeqs: pointer to Async eqs of the chip
* @event: aeq event to register callback for it
* @handle: private data will be used by the callback
- * @hw_handler: callback function
+ * @hwe_handler: callback function
**/
void hinic_aeq_register_hw_cb(struct hinic_aeqs *aeqs,
enum hinic_aeq_type event, void *handle,
@@ -188,6 +188,7 @@ static u8 eq_cons_idx_checksum_set(u32 val)
/**
* eq_update_ci - update the HW cons idx of event queue
* @eq: the event queue to update the cons idx for
+ * @arm_state: the arm bit value of eq's interrupt
**/
static void eq_update_ci(struct hinic_eq *eq, u32 arm_state)
{
@@ -368,11 +369,11 @@ static void eq_irq_work(struct work_struct *work)
/**
* ceq_tasklet - the tasklet of the EQ that received the event
- * @ceq_data: the eq
+ * @t: the tasklet struct pointer
**/
-static void ceq_tasklet(unsigned long ceq_data)
+static void ceq_tasklet(struct tasklet_struct *t)
{
- struct hinic_eq *ceq = (struct hinic_eq *)ceq_data;
+ struct hinic_eq *ceq = from_tasklet(ceq, t, ceq_tasklet);
eq_irq_handler(ceq);
}
@@ -782,8 +783,7 @@ static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif,
INIT_WORK(&aeq_work->work, eq_irq_work);
} else if (type == HINIC_CEQ) {
- tasklet_init(&eq->ceq_tasklet, ceq_tasklet,
- (unsigned long)eq);
+ tasklet_setup(&eq->ceq_tasklet, ceq_tasklet);
}
/* set the attributes of the msix entry */
@@ -794,12 +794,15 @@ static int init_eq(struct hinic_eq *eq, struct hinic_hwif *hwif,
HINIC_EQ_MSIX_LLI_CREDIT_LIMIT_DEFAULT,
HINIC_EQ_MSIX_RESEND_TIMER_DEFAULT);
- if (type == HINIC_AEQ)
- err = request_irq(entry.vector, aeq_interrupt, 0,
- "hinic_aeq", eq);
- else if (type == HINIC_CEQ)
- err = request_irq(entry.vector, ceq_interrupt, 0,
- "hinic_ceq", eq);
+ if (type == HINIC_AEQ) {
+ snprintf(eq->irq_name, sizeof(eq->irq_name), "hinic_aeq%d@pci:%s", eq->q_id,
+ pci_name(pdev));
+ err = request_irq(entry.vector, aeq_interrupt, 0, eq->irq_name, eq);
+ } else if (type == HINIC_CEQ) {
+ snprintf(eq->irq_name, sizeof(eq->irq_name), "hinic_ceq%d@pci:%s", eq->q_id,
+ pci_name(pdev));
+ err = request_irq(entry.vector, ceq_interrupt, 0, eq->irq_name, eq);
+ }
if (err) {
dev_err(&pdev->dev, "Failed to request irq for the EQ\n");
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
index 43065fc70869..2f3222174fc7 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h
@@ -186,6 +186,7 @@ struct hinic_eq {
int num_elem_in_pg;
struct msix_entry msix_entry;
+ char irq_name[64];
dma_addr_t *dma_addr;
void **virt_addr;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
index bc8925c0c982..efbaed389440 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
@@ -230,6 +230,7 @@ static int wait_hwif_ready(struct hinic_hwif *hwif)
* @hwif: the HW interface of a pci function device
* @attr0: the first attribute that was read from the hw
* @attr1: the second attribute that was read from the hw
+ * @attr2: the third attribute that was read from the hw
**/
static void set_hwif_attr(struct hinic_hwif *hwif, u32 attr0, u32 attr1,
u32 attr2)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
index 3e3fa742e476..4ef4008e65bd 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.c
@@ -305,6 +305,7 @@ static int init_qp(struct hinic_func_to_io *func_to_io,
func_to_io->sq_db[q_id] = db_base;
+ qp->sq.qid = q_id;
err = hinic_init_sq(&qp->sq, hwif, &func_to_io->sq_wq[q_id],
sq_msix_entry,
CI_ADDR(func_to_io->ci_addr_base, q_id),
@@ -314,6 +315,7 @@ static int init_qp(struct hinic_func_to_io *func_to_io,
goto err_sq_init;
}
+ qp->rq.qid = q_id;
err = hinic_init_rq(&qp->rq, hwif, &func_to_io->rq_wq[q_id],
rq_msix_entry);
if (err) {
@@ -361,8 +363,8 @@ static void destroy_qp(struct hinic_func_to_io *func_to_io,
* @func_to_io: func to io channel that holds the IO components
* @base_qpn: base qp number
* @num_qps: number queue pairs to create
- * @sq_msix_entry: msix entries for sq
- * @rq_msix_entry: msix entries for rq
+ * @sq_msix_entries: msix entries for sq
+ * @rq_msix_entries: msix entries for rq
*
* Return 0 - Success, negative - Failure
**/
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h
index ee6d60762d84..52159a90278a 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_io.h
@@ -59,6 +59,7 @@ struct hinic_nic_cfg {
struct hinic_func_to_io {
struct hinic_hwif *hwif;
struct hinic_hwdev *hwdev;
+ u16 global_qpn;
struct hinic_ceqs ceqs;
struct hinic_wqs wqs;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
index 2ebae6cb5db5..819fa13034c0 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
@@ -238,6 +238,7 @@ static int send_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
* @out_size: response length
* @direction: the direction of the original message
* @resp_msg_id: msg id to response for
+ * @timeout: time-out period of waiting for response
*
* Return 0 - Success, negative - Failure
**/
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
index ca3e2d060284..0dfa51ad5855 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
@@ -81,6 +81,8 @@ struct hinic_sq {
struct hinic_wq *wq;
+ u16 qid;
+
u32 irq;
u16 msix_entry;
@@ -90,6 +92,7 @@ struct hinic_sq {
void __iomem *db_base;
struct sk_buff **saved_skb;
+ struct hinic_debug_priv *dbg;
};
struct hinic_rq {
@@ -97,6 +100,8 @@ struct hinic_rq {
struct hinic_wq *wq;
+ u16 qid;
+
struct cpumask affinity_mask;
u32 irq;
u16 msix_entry;
@@ -110,6 +115,7 @@ struct hinic_rq {
u16 *pi_virt_addr;
dma_addr_t pi_dma_addr;
+ struct hinic_debug_priv *dbg;
};
struct hinic_qp {
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 28581bd8ce07..350225bbe0be 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -24,6 +24,7 @@
#include <linux/delay.h>
#include <linux/err.h>
+#include "hinic_debugfs.h"
#include "hinic_hw_qp.h"
#include "hinic_hw_dev.h"
#include "hinic_devlink.h"
@@ -153,6 +154,8 @@ static int create_txqs(struct hinic_dev *nic_dev)
if (!nic_dev->txqs)
return -ENOMEM;
+ hinic_sq_dbgfs_init(nic_dev);
+
for (i = 0; i < num_txqs; i++) {
struct hinic_sq *sq = hinic_hwdev_get_sq(nic_dev->hwdev, i);
@@ -162,13 +165,27 @@ static int create_txqs(struct hinic_dev *nic_dev)
"Failed to init Txq\n");
goto err_init_txq;
}
+
+ err = hinic_sq_debug_add(nic_dev, i);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Failed to add SQ%d debug\n", i);
+ goto err_add_sq_dbg;
+ }
+
}
return 0;
+err_add_sq_dbg:
+ hinic_clean_txq(&nic_dev->txqs[i]);
err_init_txq:
- for (j = 0; j < i; j++)
+ for (j = 0; j < i; j++) {
+ hinic_sq_debug_rem(nic_dev->txqs[j].sq);
hinic_clean_txq(&nic_dev->txqs[j]);
+ }
+
+ hinic_sq_dbgfs_uninit(nic_dev);
devm_kfree(&netdev->dev, nic_dev->txqs);
return err;
@@ -204,8 +221,12 @@ static void free_txqs(struct hinic_dev *nic_dev)
if (!nic_dev->txqs)
return;
- for (i = 0; i < num_txqs; i++)
+ for (i = 0; i < num_txqs; i++) {
+ hinic_sq_debug_rem(nic_dev->txqs[i].sq);
hinic_clean_txq(&nic_dev->txqs[i]);
+ }
+
+ hinic_sq_dbgfs_uninit(nic_dev);
devm_kfree(&netdev->dev, nic_dev->txqs);
nic_dev->txqs = NULL;
@@ -231,6 +252,8 @@ static int create_rxqs(struct hinic_dev *nic_dev)
if (!nic_dev->rxqs)
return -ENOMEM;
+ hinic_rq_dbgfs_init(nic_dev);
+
for (i = 0; i < num_rxqs; i++) {
struct hinic_rq *rq = hinic_hwdev_get_rq(nic_dev->hwdev, i);
@@ -240,13 +263,26 @@ static int create_rxqs(struct hinic_dev *nic_dev)
"Failed to init rxq\n");
goto err_init_rxq;
}
+
+ err = hinic_rq_debug_add(nic_dev, i);
+ if (err) {
+ netif_err(nic_dev, drv, netdev,
+ "Failed to add RQ%d debug\n", i);
+ goto err_add_rq_dbg;
+ }
}
return 0;
+err_add_rq_dbg:
+ hinic_clean_rxq(&nic_dev->rxqs[i]);
err_init_rxq:
- for (j = 0; j < i; j++)
+ for (j = 0; j < i; j++) {
+ hinic_rq_debug_rem(nic_dev->rxqs[j].rq);
hinic_clean_rxq(&nic_dev->rxqs[j]);
+ }
+
+ hinic_rq_dbgfs_uninit(nic_dev);
devm_kfree(&netdev->dev, nic_dev->rxqs);
return err;
@@ -264,8 +300,12 @@ static void free_rxqs(struct hinic_dev *nic_dev)
if (!nic_dev->rxqs)
return;
- for (i = 0; i < num_rxqs; i++)
+ for (i = 0; i < num_rxqs; i++) {
+ hinic_rq_debug_rem(nic_dev->rxqs[i].rq);
hinic_clean_rxq(&nic_dev->rxqs[i]);
+ }
+
+ hinic_rq_dbgfs_uninit(nic_dev);
devm_kfree(&netdev->dev, nic_dev->rxqs);
nic_dev->rxqs = NULL;
@@ -913,11 +953,16 @@ static void netdev_features_init(struct net_device *netdev)
netdev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_RXCSUM | NETIF_F_LRO |
- NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL_CSUM;
netdev->vlan_features = netdev->hw_features;
netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ netdev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SCTP_CRC |
+ NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_UDP_TUNNEL;
}
static void hinic_refresh_nic_cfg(struct hinic_dev *nic_dev)
@@ -945,7 +990,7 @@ static void hinic_refresh_nic_cfg(struct hinic_dev *nic_dev)
* @handle: nic device for the handler
* @buf_in: input buffer
* @in_size: input size
- * @buf_in: output buffer
+ * @buf_out: output buffer
* @out_size: returned output size
*
* Return 0 - Success, negative - Failure
@@ -1284,6 +1329,16 @@ static int nic_dev_init(struct pci_dev *pdev)
goto err_init_intr;
}
+ hinic_dbg_init(nic_dev);
+
+ hinic_func_tbl_dbgfs_init(nic_dev);
+
+ err = hinic_func_table_debug_add(nic_dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to add func_table debug\n");
+ goto err_add_func_table_dbg;
+ }
+
err = register_netdev(netdev);
if (err) {
dev_err(&pdev->dev, "Failed to register netdev\n");
@@ -1293,6 +1348,10 @@ static int nic_dev_init(struct pci_dev *pdev)
return 0;
err_reg_netdev:
+ hinic_func_table_debug_rem(nic_dev);
+err_add_func_table_dbg:
+ hinic_func_tbl_dbgfs_uninit(nic_dev);
+ hinic_dbg_uninit(nic_dev);
hinic_free_intr_coalesce(nic_dev);
err_init_intr:
err_set_pfc:
@@ -1415,6 +1474,12 @@ static void hinic_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
+ hinic_func_table_debug_rem(nic_dev);
+
+ hinic_func_tbl_dbgfs_uninit(nic_dev);
+
+ hinic_dbg_uninit(nic_dev);
+
hinic_free_intr_coalesce(nic_dev);
hinic_port_del_mac(nic_dev, netdev->dev_addr, 0);
@@ -1469,4 +1534,17 @@ static struct pci_driver hinic_driver = {
.sriov_configure = hinic_pci_sriov_configure,
};
-module_pci_driver(hinic_driver);
+static int __init hinic_module_init(void)
+{
+ hinic_dbg_register_debugfs(HINIC_DRV_NAME);
+ return pci_register_driver(&hinic_driver);
+}
+
+static void __exit hinic_module_exit(void)
+{
+ pci_unregister_driver(&hinic_driver);
+ hinic_dbg_unregister_debugfs();
+}
+
+module_init(hinic_module_init);
+module_exit(hinic_module_exit);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index d0072f5e7efc..070a7cc6392e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -595,7 +595,7 @@ int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
rxq_stats_init(rxq);
rxq->irq_name = devm_kasprintf(&netdev->dev, GFP_KERNEL,
- "hinic_rxq%d", qp->q_id);
+ "%s_rxq%d", netdev->name, qp->q_id);
if (!rxq->irq_name)
return -ENOMEM;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index c1f81e9144a1..8da7d46363b2 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -357,6 +357,7 @@ static int offload_csum(struct hinic_sq_task *task, u32 *queue_info,
enum hinic_l4_offload_type l4_offload;
u32 offset, l4_len, network_hdr_len;
enum hinic_l3_offload_type l3_type;
+ u32 tunnel_type = NOT_TUNNEL;
union hinic_l3 ip;
union hinic_l4 l4;
u8 l4_proto;
@@ -367,27 +368,55 @@ static int offload_csum(struct hinic_sq_task *task, u32 *queue_info,
if (skb->encapsulation) {
u32 l4_tunnel_len;
+ tunnel_type = TUNNEL_UDP_NO_CSUM;
ip.hdr = skb_network_header(skb);
- if (ip.v4->version == 4)
+ if (ip.v4->version == 4) {
l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD;
- else if (ip.v4->version == 6)
+ l4_proto = ip.v4->protocol;
+ } else if (ip.v4->version == 6) {
+ unsigned char *exthdr;
+ __be16 frag_off;
l3_type = IPV6_PKT;
- else
+ tunnel_type = TUNNEL_UDP_CSUM;
+ exthdr = ip.hdr + sizeof(*ip.v6);
+ l4_proto = ip.v6->nexthdr;
+ l4.hdr = skb_transport_header(skb);
+ if (l4.hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &l4_proto, &frag_off);
+ } else {
l3_type = L3TYPE_UNKNOWN;
+ l4_proto = IPPROTO_RAW;
+ }
hinic_task_set_outter_l3(task, l3_type,
skb_network_header_len(skb));
- l4_tunnel_len = skb_inner_network_offset(skb) -
- skb_transport_offset(skb);
-
- hinic_task_set_tunnel_l4(task, TUNNEL_UDP_NO_CSUM,
- l4_tunnel_len);
+ switch (l4_proto) {
+ case IPPROTO_UDP:
+ l4_tunnel_len = skb_inner_network_offset(skb) -
+ skb_transport_offset(skb);
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_inner_transport_header(skb);
+ network_hdr_len = skb_inner_network_header_len(skb);
+ break;
+ case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
+ tunnel_type = NOT_TUNNEL;
+ l4_tunnel_len = 0;
+
+ ip.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+ network_hdr_len = skb_network_header_len(skb);
+ break;
+ default:
+ /* Unsupported tunnel packet, disable csum offload */
+ skb_checksum_help(skb);
+ return 0;
+ }
- ip.hdr = skb_inner_network_header(skb);
- l4.hdr = skb_inner_transport_header(skb);
- network_hdr_len = skb_inner_network_header_len(skb);
+ hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len);
} else {
ip.hdr = skb_network_header(skb);
l4.hdr = skb_transport_header(skb);
@@ -853,14 +882,14 @@ int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq,
goto err_alloc_free_sges;
}
- irqname_len = snprintf(NULL, 0, "hinic_txq%d", qp->q_id) + 1;
+ irqname_len = snprintf(NULL, 0, "%s_txq%d", netdev->name, qp->q_id) + 1;
txq->irq_name = devm_kzalloc(&netdev->dev, irqname_len, GFP_KERNEL);
if (!txq->irq_name) {
err = -ENOMEM;
goto err_alloc_irqname;
}
- sprintf(txq->irq_name, "hinic_txq%d", qp->q_id);
+ sprintf(txq->irq_name, "%s_txq%d", netdev->name, qp->q_id);
err = hinic_hwdev_hw_ci_addr_set(hwdev, sq, CI_UPDATE_NO_PENDING,
CI_UPDATE_NO_COALESC);
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 3153d62cc73e..c2e740475786 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -1212,9 +1212,9 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
}
}
-static void ehea_neq_tasklet(unsigned long data)
+static void ehea_neq_tasklet(struct tasklet_struct *t)
{
- struct ehea_adapter *adapter = (struct ehea_adapter *)data;
+ struct ehea_adapter *adapter = from_tasklet(adapter, t, neq_tasklet);
struct ehea_eqe *eqe;
u64 event_mask;
@@ -3417,8 +3417,7 @@ static int ehea_probe_adapter(struct platform_device *dev)
goto out_free_ad;
}
- tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
- (unsigned long)adapter);
+ tasklet_setup(&adapter->neq_tasklet, ehea_neq_tasklet);
ret = ehea_create_device_sysfs(dev);
if (ret)
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index c5c732601e35..7ef3369953b6 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1349,6 +1349,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
int offset = ibmveth_rxq_frame_offset(adapter);
int csum_good = ibmveth_rxq_csum_good(adapter);
int lrg_pkt = ibmveth_rxq_large_packet(adapter);
+ __sum16 iph_check = 0;
skb = ibmveth_rxq_get_buffer(adapter);
@@ -1385,16 +1386,26 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
skb_put(skb, length);
skb->protocol = eth_type_trans(skb, netdev);
- if (csum_good) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- ibmveth_rx_csum_helper(skb, adapter);
+ /* PHYP without PLSO support places a -1 in the ip
+ * checksum for large send frames.
+ */
+ if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
+ struct iphdr *iph = (struct iphdr *)skb->data;
+
+ iph_check = iph->check;
}
- if (length > netdev->mtu + ETH_HLEN) {
+ if ((length > netdev->mtu + ETH_HLEN) ||
+ lrg_pkt || iph_check == 0xffff) {
ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
adapter->rx_large_packets++;
}
+ if (csum_good) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ ibmveth_rx_csum_helper(skb, adapter);
+ }
+
napi_gro_receive(napi, skb); /* send it up */
netdev->stats.rx_packets++;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 1b702a43a5d0..1f7fe6b3dd5a 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -97,15 +97,14 @@ static int pending_scrq(struct ibmvnic_adapter *,
static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
struct ibmvnic_sub_crq_queue *);
static int ibmvnic_poll(struct napi_struct *napi, int data);
-static void send_map_query(struct ibmvnic_adapter *adapter);
+static void send_query_map(struct ibmvnic_adapter *adapter);
static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
static int send_request_unmap(struct ibmvnic_adapter *, u8);
static int send_login(struct ibmvnic_adapter *adapter);
-static void send_cap_queries(struct ibmvnic_adapter *adapter);
+static void send_query_cap(struct ibmvnic_adapter *adapter);
static int init_sub_crqs(struct ibmvnic_adapter *);
static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
-static int ibmvnic_init(struct ibmvnic_adapter *);
-static int ibmvnic_reset_init(struct ibmvnic_adapter *);
+static int ibmvnic_reset_init(struct ibmvnic_adapter *, bool reset);
static void release_crq_queue(struct ibmvnic_adapter *);
static int __ibmvnic_set_mac(struct net_device *, u8 *);
static int init_crq_queue(struct ibmvnic_adapter *adapter);
@@ -297,8 +296,7 @@ static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
{
int i;
- for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
- i++)
+ for (i = 0; i < adapter->num_active_rx_pools; i++)
adapter->rx_pool[i].active = 0;
}
@@ -306,6 +304,7 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
struct ibmvnic_rx_pool *pool)
{
int count = pool->size - atomic_read(&pool->available);
+ u64 handle = adapter->rx_scrq[pool->index]->handle;
struct device *dev = &adapter->vdev->dev;
int buffers_added = 0;
unsigned long lpar_rc;
@@ -314,7 +313,6 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
unsigned int offset;
dma_addr_t dma_addr;
unsigned char *dst;
- u64 *handle_array;
int shift = 0;
int index;
int i;
@@ -322,10 +320,6 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
if (!pool->active)
return;
- handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
- be32_to_cpu(adapter->login_rsp_buf->
- off_rxadd_subcrqs));
-
for (i = 0; i < count; ++i) {
skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
if (!skb) {
@@ -369,8 +363,7 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
#endif
sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
- lpar_rc = send_subcrq(adapter, handle_array[pool->index],
- &sub_crq);
+ lpar_rc = send_subcrq(adapter, handle, &sub_crq);
if (lpar_rc != H_SUCCESS)
goto failure;
@@ -407,8 +400,7 @@ static void replenish_pools(struct ibmvnic_adapter *adapter)
int i;
adapter->replenish_task_cycles++;
- for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
- i++) {
+ for (i = 0; i < adapter->num_active_rx_pools; i++) {
if (adapter->rx_pool[i].active)
replenish_rx_pool(adapter, &adapter->rx_pool[i]);
}
@@ -475,25 +467,23 @@ static int init_stats_token(struct ibmvnic_adapter *adapter)
static int reset_rx_pools(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_rx_pool *rx_pool;
+ u64 buff_size;
int rx_scrqs;
int i, j, rc;
- u64 *size_array;
if (!adapter->rx_pool)
return -1;
- size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
- be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
-
- rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
+ buff_size = adapter->cur_rx_buf_sz;
+ rx_scrqs = adapter->num_active_rx_pools;
for (i = 0; i < rx_scrqs; i++) {
rx_pool = &adapter->rx_pool[i];
netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
- if (rx_pool->buff_size != be64_to_cpu(size_array[i])) {
+ if (rx_pool->buff_size != buff_size) {
free_long_term_buff(adapter, &rx_pool->long_term_buff);
- rx_pool->buff_size = be64_to_cpu(size_array[i]);
+ rx_pool->buff_size = buff_size;
rc = alloc_long_term_buff(adapter,
&rx_pool->long_term_buff,
rx_pool->size *
@@ -561,13 +551,11 @@ static int init_rx_pools(struct net_device *netdev)
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_rx_pool *rx_pool;
int rxadd_subcrqs;
- u64 *size_array;
+ u64 buff_size;
int i, j;
- rxadd_subcrqs =
- be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
- size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
- be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
+ rxadd_subcrqs = adapter->num_active_rx_scrqs;
+ buff_size = adapter->cur_rx_buf_sz;
adapter->rx_pool = kcalloc(rxadd_subcrqs,
sizeof(struct ibmvnic_rx_pool),
@@ -585,11 +573,11 @@ static int init_rx_pools(struct net_device *netdev)
netdev_dbg(adapter->netdev,
"Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
i, adapter->req_rx_add_entries_per_subcrq,
- be64_to_cpu(size_array[i]));
+ buff_size);
rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
rx_pool->index = i;
- rx_pool->buff_size = be64_to_cpu(size_array[i]);
+ rx_pool->buff_size = buff_size;
rx_pool->active = 1;
rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
@@ -655,7 +643,7 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter)
if (!adapter->tx_pool)
return -1;
- tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
+ tx_scrqs = adapter->num_active_tx_pools;
for (i = 0; i < tx_scrqs; i++) {
rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
if (rc)
@@ -744,7 +732,7 @@ static int init_tx_pools(struct net_device *netdev)
int tx_subcrqs;
int i, rc;
- tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
+ tx_subcrqs = adapter->num_active_tx_scrqs;
adapter->tx_pool = kcalloc(tx_subcrqs,
sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
if (!adapter->tx_pool)
@@ -894,7 +882,7 @@ static int ibmvnic_login(struct net_device *netdev)
"Received partial success, retrying...\n");
adapter->init_done_rc = 0;
reinit_completion(&adapter->init_done);
- send_cap_queries(adapter);
+ send_query_cap(adapter);
if (!wait_for_completion_timeout(&adapter->init_done,
timeout)) {
netdev_warn(netdev,
@@ -980,7 +968,7 @@ static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
return -1;
}
- if (adapter->init_done_rc == 1) {
+ if (adapter->init_done_rc == PARTIALSUCCESS) {
/* Partuial success, delay and re-send */
mdelay(1000);
resend = true;
@@ -1125,7 +1113,7 @@ static int init_resources(struct ibmvnic_adapter *adapter)
if (rc)
return rc;
- send_map_query(adapter);
+ send_query_map(adapter);
rc = init_rx_pools(netdev);
if (rc)
@@ -1530,9 +1518,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
unsigned int offset;
int num_entries = 1;
unsigned char *dst;
- u64 *handle_array;
int index = 0;
u8 proto = 0;
+ u64 handle;
netdev_tx_t ret = NETDEV_TX_OK;
if (test_bit(0, &adapter->resetting)) {
@@ -1559,8 +1547,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_scrq = adapter->tx_scrq[queue_num];
txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
- handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
- be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
+ handle = tx_scrq->handle;
index = tx_pool->free_map[tx_pool->consumer_index];
@@ -1672,14 +1659,14 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
ret = NETDEV_TX_OK;
goto tx_err_out;
}
- lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
+ lpar_rc = send_subcrq_indirect(adapter, handle,
(u64)tx_buff->indir_dma,
(u64)num_entries);
dma_unmap_single(dev, tx_buff->indir_dma,
sizeof(tx_buff->indir_arr), DMA_TO_DEVICE);
} else {
tx_buff->num_entries = num_entries;
- lpar_rc = send_subcrq(adapter, handle_array[queue_num],
+ lpar_rc = send_subcrq(adapter, handle,
&tx_crq);
}
if (lpar_rc != H_SUCCESS) {
@@ -1874,7 +1861,7 @@ static int do_change_param_reset(struct ibmvnic_adapter *adapter,
return rc;
}
- rc = ibmvnic_reset_init(adapter);
+ rc = ibmvnic_reset_init(adapter, true);
if (rc)
return IBMVNIC_INIT_FAILED;
@@ -1992,7 +1979,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
goto out;
}
- rc = ibmvnic_reset_init(adapter);
+ rc = ibmvnic_reset_init(adapter, true);
if (rc) {
rc = IBMVNIC_INIT_FAILED;
goto out;
@@ -2108,7 +2095,7 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
return rc;
}
- rc = ibmvnic_init(adapter);
+ rc = ibmvnic_reset_init(adapter, false);
if (rc)
return rc;
@@ -3312,7 +3299,7 @@ tx_failed:
return -1;
}
-static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
+static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
{
struct device *dev = &adapter->vdev->dev;
union ibmvnic_crq crq;
@@ -3583,8 +3570,7 @@ static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
if (rc) {
if (rc == H_CLOSED) {
dev_warn(dev, "CRQ Queue closed\n");
- if (test_bit(0, &adapter->resetting))
- ibmvnic_reset(adapter, VNIC_RESET_FATAL);
+ /* do not reset, report the fail, wait for passive init from server */
}
dev_warn(dev, "Send error (rc=%d)\n", rc);
@@ -3595,14 +3581,31 @@ static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
{
+ struct device *dev = &adapter->vdev->dev;
union ibmvnic_crq crq;
+ int retries = 100;
+ int rc;
memset(&crq, 0, sizeof(crq));
crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
crq.generic.cmd = IBMVNIC_CRQ_INIT;
netdev_dbg(adapter->netdev, "Sending CRQ init\n");
- return ibmvnic_send_crq(adapter, &crq);
+ do {
+ rc = ibmvnic_send_crq(adapter, &crq);
+ if (rc != H_CLOSED)
+ break;
+ retries--;
+ msleep(50);
+
+ } while (retries > 0);
+
+ if (rc) {
+ dev_err(dev, "Failed to send init request, rc = %d\n", rc);
+ return rc;
+ }
+
+ return 0;
}
static int send_version_xchg(struct ibmvnic_adapter *adapter)
@@ -3822,7 +3825,7 @@ static int send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
return ibmvnic_send_crq(adapter, &crq);
}
-static void send_map_query(struct ibmvnic_adapter *adapter)
+static void send_query_map(struct ibmvnic_adapter *adapter)
{
union ibmvnic_crq crq;
@@ -3833,7 +3836,7 @@ static void send_map_query(struct ibmvnic_adapter *adapter)
}
/* Send a series of CRQs requesting various capabilities of the VNIC server */
-static void send_cap_queries(struct ibmvnic_adapter *adapter)
+static void send_query_cap(struct ibmvnic_adapter *adapter)
{
union ibmvnic_crq crq;
@@ -3950,6 +3953,113 @@ static void send_cap_queries(struct ibmvnic_adapter *adapter)
ibmvnic_send_crq(adapter, &crq);
}
+static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
+{
+ int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
+ struct device *dev = &adapter->vdev->dev;
+ union ibmvnic_crq crq;
+
+ adapter->ip_offload_tok =
+ dma_map_single(dev,
+ &adapter->ip_offload_buf,
+ buf_sz,
+ DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
+ if (!firmware_has_feature(FW_FEATURE_CMO))
+ dev_err(dev, "Couldn't map offload buffer\n");
+ return;
+ }
+
+ memset(&crq, 0, sizeof(crq));
+ crq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
+ crq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
+ crq.query_ip_offload.len = cpu_to_be32(buf_sz);
+ crq.query_ip_offload.ioba =
+ cpu_to_be32(adapter->ip_offload_tok);
+
+ ibmvnic_send_crq(adapter, &crq);
+}
+
+static void send_control_ip_offload(struct ibmvnic_adapter *adapter)
+{
+ struct ibmvnic_control_ip_offload_buffer *ctrl_buf = &adapter->ip_offload_ctrl;
+ struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
+ struct device *dev = &adapter->vdev->dev;
+ netdev_features_t old_hw_features = 0;
+ union ibmvnic_crq crq;
+
+ adapter->ip_offload_ctrl_tok =
+ dma_map_single(dev,
+ ctrl_buf,
+ sizeof(adapter->ip_offload_ctrl),
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
+ dev_err(dev, "Couldn't map ip offload control buffer\n");
+ return;
+ }
+
+ ctrl_buf->len = cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
+ ctrl_buf->version = cpu_to_be32(INITIAL_VERSION_IOB);
+ ctrl_buf->ipv4_chksum = buf->ipv4_chksum;
+ ctrl_buf->ipv6_chksum = buf->ipv6_chksum;
+ ctrl_buf->tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
+ ctrl_buf->udp_ipv4_chksum = buf->udp_ipv4_chksum;
+ ctrl_buf->tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
+ ctrl_buf->udp_ipv6_chksum = buf->udp_ipv6_chksum;
+ ctrl_buf->large_tx_ipv4 = buf->large_tx_ipv4;
+ ctrl_buf->large_tx_ipv6 = buf->large_tx_ipv6;
+
+ /* large_rx disabled for now, additional features needed */
+ ctrl_buf->large_rx_ipv4 = 0;
+ ctrl_buf->large_rx_ipv6 = 0;
+
+ if (adapter->state != VNIC_PROBING) {
+ old_hw_features = adapter->netdev->hw_features;
+ adapter->netdev->hw_features = 0;
+ }
+
+ adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
+
+ if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
+ adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
+
+ if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
+ adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
+
+ if ((adapter->netdev->features &
+ (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
+ adapter->netdev->hw_features |= NETIF_F_RXCSUM;
+
+ if (buf->large_tx_ipv4)
+ adapter->netdev->hw_features |= NETIF_F_TSO;
+ if (buf->large_tx_ipv6)
+ adapter->netdev->hw_features |= NETIF_F_TSO6;
+
+ if (adapter->state == VNIC_PROBING) {
+ adapter->netdev->features |= adapter->netdev->hw_features;
+ } else if (old_hw_features != adapter->netdev->hw_features) {
+ netdev_features_t tmp = 0;
+
+ /* disable features no longer supported */
+ adapter->netdev->features &= adapter->netdev->hw_features;
+ /* turn on features now supported if previously enabled */
+ tmp = (old_hw_features ^ adapter->netdev->hw_features) &
+ adapter->netdev->hw_features;
+ adapter->netdev->features |=
+ tmp & adapter->netdev->wanted_features;
+ }
+
+ memset(&crq, 0, sizeof(crq));
+ crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
+ crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
+ crq.control_ip_offload.len =
+ cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
+ crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
+ ibmvnic_send_crq(adapter, &crq);
+}
+
static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
struct ibmvnic_adapter *adapter)
{
@@ -4019,8 +4129,6 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
- netdev_features_t old_hw_features = 0;
- union ibmvnic_crq crq;
int i;
dma_unmap_single(dev, adapter->ip_offload_tok,
@@ -4070,74 +4178,7 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
buf->off_ipv6_ext_headers);
- adapter->ip_offload_ctrl_tok =
- dma_map_single(dev, &adapter->ip_offload_ctrl,
- sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
-
- if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
- dev_err(dev, "Couldn't map ip offload control buffer\n");
- return;
- }
-
- adapter->ip_offload_ctrl.len =
- cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
- adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
- adapter->ip_offload_ctrl.ipv4_chksum = buf->ipv4_chksum;
- adapter->ip_offload_ctrl.ipv6_chksum = buf->ipv6_chksum;
- adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
- adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
- adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
- adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
- adapter->ip_offload_ctrl.large_tx_ipv4 = buf->large_tx_ipv4;
- adapter->ip_offload_ctrl.large_tx_ipv6 = buf->large_tx_ipv6;
-
- /* large_rx disabled for now, additional features needed */
- adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
- adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
-
- if (adapter->state != VNIC_PROBING) {
- old_hw_features = adapter->netdev->hw_features;
- adapter->netdev->hw_features = 0;
- }
-
- adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
-
- if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
- adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
-
- if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
- adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
-
- if ((adapter->netdev->features &
- (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
- adapter->netdev->hw_features |= NETIF_F_RXCSUM;
-
- if (buf->large_tx_ipv4)
- adapter->netdev->hw_features |= NETIF_F_TSO;
- if (buf->large_tx_ipv6)
- adapter->netdev->hw_features |= NETIF_F_TSO6;
-
- if (adapter->state == VNIC_PROBING) {
- adapter->netdev->features |= adapter->netdev->hw_features;
- } else if (old_hw_features != adapter->netdev->hw_features) {
- netdev_features_t tmp = 0;
-
- /* disable features no longer supported */
- adapter->netdev->features &= adapter->netdev->hw_features;
- /* turn on features now supported if previously enabled */
- tmp = (old_hw_features ^ adapter->netdev->hw_features) &
- adapter->netdev->hw_features;
- adapter->netdev->features |=
- tmp & adapter->netdev->wanted_features;
- }
-
- memset(&crq, 0, sizeof(crq));
- crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
- crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
- crq.control_ip_offload.len =
- cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
- crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
- ibmvnic_send_crq(adapter, &crq);
+ send_control_ip_offload(adapter);
}
static const char *ibmvnic_fw_err_cause(u16 cause)
@@ -4263,7 +4304,7 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
be64_to_cpu(crq->request_capability_rsp.number);
}
- ibmvnic_send_req_caps(adapter, 1);
+ send_request_cap(adapter, 1);
return;
default:
dev_err(dev, "Error %d in request cap rsp\n",
@@ -4273,30 +4314,8 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
/* Done receiving requested capabilities, query IP offload support */
if (atomic_read(&adapter->running_cap_crqs) == 0) {
- union ibmvnic_crq newcrq;
- int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
- struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
- &adapter->ip_offload_buf;
-
adapter->wait_capability = false;
- adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
- buf_sz,
- DMA_FROM_DEVICE);
-
- if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
- if (!firmware_has_feature(FW_FEATURE_CMO))
- dev_err(dev, "Couldn't map offload buffer\n");
- return;
- }
-
- memset(&newcrq, 0, sizeof(newcrq));
- newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
- newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
- newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
- newcrq.query_ip_offload.ioba =
- cpu_to_be32(adapter->ip_offload_tok);
-
- ibmvnic_send_crq(adapter, &newcrq);
+ send_query_ip_offload(adapter);
}
}
@@ -4307,6 +4326,11 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
struct net_device *netdev = adapter->netdev;
struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
struct ibmvnic_login_buffer *login = adapter->login_buf;
+ u64 *tx_handle_array;
+ u64 *rx_handle_array;
+ int num_tx_pools;
+ int num_rx_pools;
+ u64 *size_array;
int i;
dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
@@ -4341,6 +4365,30 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
ibmvnic_remove(adapter->vdev);
return -EIO;
}
+ size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
+ be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
+ /* variable buffer sizes are not supported, so just read the
+ * first entry.
+ */
+ adapter->cur_rx_buf_sz = be64_to_cpu(size_array[0]);
+
+ num_tx_pools = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
+ num_rx_pools = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
+
+ tx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
+ be32_to_cpu(adapter->login_rsp_buf->off_txsubm_subcrqs));
+ rx_handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
+ be32_to_cpu(adapter->login_rsp_buf->off_rxadd_subcrqs));
+
+ for (i = 0; i < num_tx_pools; i++)
+ adapter->tx_scrq[i]->handle = tx_handle_array[i];
+
+ for (i = 0; i < num_rx_pools; i++)
+ adapter->rx_scrq[i]->handle = rx_handle_array[i];
+
+ adapter->num_active_tx_scrqs = num_tx_pools;
+ adapter->num_active_rx_scrqs = num_rx_pools;
+ release_login_rsp_buffer(adapter);
release_login_buffer(adapter);
complete(&adapter->init_done);
@@ -4550,7 +4598,7 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
out:
if (atomic_read(&adapter->running_cap_crqs) == 0) {
adapter->wait_capability = false;
- ibmvnic_send_req_caps(adapter, 0);
+ send_request_cap(adapter, 0);
}
}
@@ -4605,7 +4653,7 @@ static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
case IBMVNIC_1GBPS:
adapter->speed = SPEED_1000;
break;
- case IBMVNIC_10GBP:
+ case IBMVNIC_10GBPS:
adapter->speed = SPEED_10000;
break;
case IBMVNIC_25GBPS:
@@ -4620,6 +4668,9 @@ static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
case IBMVNIC_100GBPS:
adapter->speed = SPEED_100000;
break;
+ case IBMVNIC_200GBPS:
+ adapter->speed = SPEED_200000;
+ break;
default:
if (netif_carrier_ok(netdev))
netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
@@ -4715,7 +4766,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
be16_to_cpu(crq->version_exchange_rsp.version);
dev_info(dev, "Partner protocol version is %d\n",
ibmvnic_version);
- send_cap_queries(adapter);
+ send_query_cap(adapter);
break;
case QUERY_CAPABILITY_RSP:
handle_query_cap_rsp(crq, adapter);
@@ -4812,9 +4863,9 @@ static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
return IRQ_HANDLED;
}
-static void ibmvnic_tasklet(void *data)
+static void ibmvnic_tasklet(struct tasklet_struct *t)
{
- struct ibmvnic_adapter *adapter = data;
+ struct ibmvnic_adapter *adapter = from_tasklet(adapter, t, tasklet);
struct ibmvnic_crq_queue *queue = &adapter->crq;
union ibmvnic_crq *crq;
unsigned long flags;
@@ -4949,8 +5000,7 @@ static int init_crq_queue(struct ibmvnic_adapter *adapter)
retrc = 0;
- tasklet_init(&adapter->tasklet, (void *)ibmvnic_tasklet,
- (unsigned long)adapter);
+ tasklet_setup(&adapter->tasklet, (void *)ibmvnic_tasklet);
netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
snprintf(crq->name, sizeof(crq->name), "ibmvnic-%x",
@@ -4986,7 +5036,7 @@ map_failed:
return retrc;
}
-static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
+static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter, bool reset)
{
struct device *dev = &adapter->vdev->dev;
unsigned long timeout = msecs_to_jiffies(30000);
@@ -4995,12 +5045,19 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
adapter->from_passive_init = false;
- old_num_rx_queues = adapter->req_rx_queues;
- old_num_tx_queues = adapter->req_tx_queues;
+ if (reset) {
+ old_num_rx_queues = adapter->req_rx_queues;
+ old_num_tx_queues = adapter->req_tx_queues;
+ reinit_completion(&adapter->init_done);
+ }
- reinit_completion(&adapter->init_done);
adapter->init_done_rc = 0;
- ibmvnic_send_crq_init(adapter);
+ rc = ibmvnic_send_crq_init(adapter);
+ if (rc) {
+ dev_err(dev, "Send crq init failed with error %d\n", rc);
+ return rc;
+ }
+
if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
dev_err(dev, "Initialization sequence timed out\n");
return -1;
@@ -5017,7 +5074,8 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
return -1;
}
- if (test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
+ if (reset &&
+ test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
adapter->reset_reason != VNIC_RESET_MOBILITY) {
if (adapter->req_rx_queues != old_num_rx_queues ||
adapter->req_tx_queues != old_num_tx_queues) {
@@ -5045,48 +5103,6 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
return rc;
}
-static int ibmvnic_init(struct ibmvnic_adapter *adapter)
-{
- struct device *dev = &adapter->vdev->dev;
- unsigned long timeout = msecs_to_jiffies(30000);
- int rc;
-
- adapter->from_passive_init = false;
-
- adapter->init_done_rc = 0;
- ibmvnic_send_crq_init(adapter);
- if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
- dev_err(dev, "Initialization sequence timed out\n");
- return -1;
- }
-
- if (adapter->init_done_rc) {
- release_crq_queue(adapter);
- return adapter->init_done_rc;
- }
-
- if (adapter->from_passive_init) {
- adapter->state = VNIC_OPEN;
- adapter->from_passive_init = false;
- return -1;
- }
-
- rc = init_sub_crqs(adapter);
- if (rc) {
- dev_err(dev, "Initialization of sub crqs failed\n");
- release_crq_queue(adapter);
- return rc;
- }
-
- rc = init_sub_crq_irqs(adapter);
- if (rc) {
- dev_err(dev, "Failed to initialize sub crq irqs\n");
- release_crq_queue(adapter);
- }
-
- return rc;
-}
-
static struct device_attribute dev_attr_failover;
static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
@@ -5149,7 +5165,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
goto ibmvnic_init_fail;
}
- rc = ibmvnic_init(adapter);
+ rc = ibmvnic_reset_init(adapter, false);
if (rc && rc != EAGAIN)
goto ibmvnic_init_fail;
} while (rc == EAGAIN);
@@ -5299,8 +5315,7 @@ static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
ret += 4 * PAGE_SIZE; /* the scrq message queue */
- for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
- i++)
+ for (i = 0; i < adapter->num_active_rx_pools; i++)
ret += adapter->rx_pool[i].size *
IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index f8416e1d4cf0..217dcc7ded70 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -373,7 +373,7 @@ struct ibmvnic_phys_parms {
#define IBMVNIC_10MBPS 0x40000000
#define IBMVNIC_100MBPS 0x20000000
#define IBMVNIC_1GBPS 0x10000000
-#define IBMVNIC_10GBP 0x08000000
+#define IBMVNIC_10GBPS 0x08000000
#define IBMVNIC_40GBPS 0x04000000
#define IBMVNIC_100GBPS 0x02000000
#define IBMVNIC_25GBPS 0x01000000
@@ -875,6 +875,7 @@ struct ibmvnic_sub_crq_queue {
struct ibmvnic_adapter *adapter;
atomic_t used;
char name[32];
+ u64 handle;
};
struct ibmvnic_long_term_buff {
@@ -1075,6 +1076,7 @@ struct ibmvnic_adapter {
u32 num_active_rx_napi;
u32 num_active_tx_scrqs;
u32 num_active_tx_pools;
+ u32 cur_rx_buf_sz;
struct tasklet_struct tasklet;
enum vnic_state state;
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 36da059388dc..8cc651d37a7f 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -384,7 +384,7 @@ enum cb_status {
cb_ok = 0x2000,
};
-/**
+/*
* cb_command - Command Block flags
* @cb_tx_nc: 0: controller does CRC (normal), 1: CRC from skb memory
*/
@@ -1531,7 +1531,7 @@ static int e100_hw_init(struct nic *nic)
e100_hw_reset(nic);
netif_err(nic, hw, nic->netdev, "e100_hw_init\n");
- if (!in_interrupt() && (err = e100_self_test(nic)))
+ if ((err = e100_self_test(nic)))
return err;
if ((err = e100_phy_init(nic)))
@@ -2155,7 +2155,7 @@ static int e100_rx_alloc_list(struct nic *nic)
nic->rx_to_use = nic->rx_to_clean = NULL;
nic->ru_running = RU_UNINITIALIZED;
- if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_ATOMIC)))
+ if (!(nic->rxs = kcalloc(count, sizeof(struct rx), GFP_KERNEL)))
return -ENOMEM;
for (rx = nic->rxs, i = 0; i < count; rx++, i++) {
@@ -2593,7 +2593,7 @@ static void e100_diag_test(struct net_device *netdev,
{
struct ethtool_cmd cmd;
struct nic *nic = netdev_priv(netdev);
- int i, err;
+ int i;
memset(data, 0, E100_TEST_LEN * sizeof(u64));
data[0] = !mii_link_ok(&nic->mii);
@@ -2601,7 +2601,7 @@ static void e100_diag_test(struct net_device *netdev,
if (test->flags & ETH_TEST_FL_OFFLINE) {
/* save speed, duplex & autoneg settings */
- err = mii_ethtool_gset(&nic->mii, &cmd);
+ mii_ethtool_gset(&nic->mii, &cmd);
if (netif_running(netdev))
e100_down(nic);
@@ -2610,7 +2610,7 @@ static void e100_diag_test(struct net_device *netdev,
data[4] = e100_loopback_test(nic, lb_phy);
/* restore speed, duplex & autoneg settings */
- err = mii_ethtool_sset(&nic->mii, &cmd);
+ mii_ethtool_sset(&nic->mii, &cmd);
if (netif_running(netdev))
e100_up(nic);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 4e7a0810eaeb..4c0c9433bd60 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -129,7 +129,6 @@ static s32 e1000_set_phy_type(struct e1000_hw *hw)
*/
static void e1000_phy_init_script(struct e1000_hw *hw)
{
- u32 ret_val;
u16 phy_saved_data;
if (hw->phy_init_script) {
@@ -138,7 +137,7 @@ static void e1000_phy_init_script(struct e1000_hw *hw)
/* Save off the current value of register 0x2F5B to be restored
* at the end of this routine.
*/
- ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+ e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
/* Disabled the PHY transmitter */
e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
@@ -377,7 +376,6 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
{
u32 ctrl;
u32 ctrl_ext;
- u32 icr;
u32 manc;
u32 led_ctrl;
s32 ret_val;
@@ -502,7 +500,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
ew32(IMC, 0xffffffff);
/* Clear any pending interrupt events. */
- icr = er32(ICR);
+ er32(ICR);
/* If MWI was previously enabled, reenable it. */
if (hw->mac_type == e1000_82542_rev2_0) {
@@ -1897,7 +1895,6 @@ void e1000_config_collision_dist(struct e1000_hw *hw)
/**
* e1000_config_mac_to_phy - sync phy and mac settings
* @hw: Struct containing variables accessed by shared code
- * @mii_reg: data to write to the MII control register
*
* Sets MAC speed and duplex settings to reflect the those in the PHY
* The contents of the PHY register containing the needed information need to
@@ -2370,16 +2367,13 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
*/
s32 e1000_check_for_link(struct e1000_hw *hw)
{
- u32 rxcw = 0;
- u32 ctrl;
u32 status;
u32 rctl;
u32 icr;
- u32 signal = 0;
s32 ret_val;
u16 phy_data;
- ctrl = er32(CTRL);
+ er32(CTRL);
status = er32(STATUS);
/* On adapters with a MAC newer than 82544, SW Definable pin 1 will be
@@ -2388,12 +2382,9 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
*/
if ((hw->media_type == e1000_media_type_fiber) ||
(hw->media_type == e1000_media_type_internal_serdes)) {
- rxcw = er32(RXCW);
+ er32(RXCW);
if (hw->media_type == e1000_media_type_fiber) {
- signal =
- (hw->mac_type >
- e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
if (status & E1000_STATUS_LU)
hw->get_link_status = false;
}
@@ -2922,7 +2913,7 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
*
* @hw: Struct containing variables accessed by shared code
* @reg_addr: address of the PHY register to write
- * @data: data to write to the PHY
+ * @phy_data: data to write to the PHY
*
* Writes a value to a PHY register
*/
@@ -4410,17 +4401,9 @@ void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
static void e1000_clear_vfta(struct e1000_hw *hw)
{
u32 offset;
- u32 vfta_value = 0;
- u32 vfta_offset = 0;
- u32 vfta_bit_in_reg = 0;
for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
- /* If the offset we want to clear is the same offset of the
- * manageability VLAN ID, then clear all bits except that of the
- * manageability unit
- */
- vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
- E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value);
+ E1000_WRITE_REG_ARRAY(hw, VFTA, offset, 0);
E1000_WRITE_FLUSH();
}
}
@@ -4675,78 +4658,76 @@ s32 e1000_led_off(struct e1000_hw *hw)
*/
static void e1000_clear_hw_cntrs(struct e1000_hw *hw)
{
- volatile u32 temp;
-
- temp = er32(CRCERRS);
- temp = er32(SYMERRS);
- temp = er32(MPC);
- temp = er32(SCC);
- temp = er32(ECOL);
- temp = er32(MCC);
- temp = er32(LATECOL);
- temp = er32(COLC);
- temp = er32(DC);
- temp = er32(SEC);
- temp = er32(RLEC);
- temp = er32(XONRXC);
- temp = er32(XONTXC);
- temp = er32(XOFFRXC);
- temp = er32(XOFFTXC);
- temp = er32(FCRUC);
-
- temp = er32(PRC64);
- temp = er32(PRC127);
- temp = er32(PRC255);
- temp = er32(PRC511);
- temp = er32(PRC1023);
- temp = er32(PRC1522);
-
- temp = er32(GPRC);
- temp = er32(BPRC);
- temp = er32(MPRC);
- temp = er32(GPTC);
- temp = er32(GORCL);
- temp = er32(GORCH);
- temp = er32(GOTCL);
- temp = er32(GOTCH);
- temp = er32(RNBC);
- temp = er32(RUC);
- temp = er32(RFC);
- temp = er32(ROC);
- temp = er32(RJC);
- temp = er32(TORL);
- temp = er32(TORH);
- temp = er32(TOTL);
- temp = er32(TOTH);
- temp = er32(TPR);
- temp = er32(TPT);
-
- temp = er32(PTC64);
- temp = er32(PTC127);
- temp = er32(PTC255);
- temp = er32(PTC511);
- temp = er32(PTC1023);
- temp = er32(PTC1522);
-
- temp = er32(MPTC);
- temp = er32(BPTC);
+ er32(CRCERRS);
+ er32(SYMERRS);
+ er32(MPC);
+ er32(SCC);
+ er32(ECOL);
+ er32(MCC);
+ er32(LATECOL);
+ er32(COLC);
+ er32(DC);
+ er32(SEC);
+ er32(RLEC);
+ er32(XONRXC);
+ er32(XONTXC);
+ er32(XOFFRXC);
+ er32(XOFFTXC);
+ er32(FCRUC);
+
+ er32(PRC64);
+ er32(PRC127);
+ er32(PRC255);
+ er32(PRC511);
+ er32(PRC1023);
+ er32(PRC1522);
+
+ er32(GPRC);
+ er32(BPRC);
+ er32(MPRC);
+ er32(GPTC);
+ er32(GORCL);
+ er32(GORCH);
+ er32(GOTCL);
+ er32(GOTCH);
+ er32(RNBC);
+ er32(RUC);
+ er32(RFC);
+ er32(ROC);
+ er32(RJC);
+ er32(TORL);
+ er32(TORH);
+ er32(TOTL);
+ er32(TOTH);
+ er32(TPR);
+ er32(TPT);
+
+ er32(PTC64);
+ er32(PTC127);
+ er32(PTC255);
+ er32(PTC511);
+ er32(PTC1023);
+ er32(PTC1522);
+
+ er32(MPTC);
+ er32(BPTC);
if (hw->mac_type < e1000_82543)
return;
- temp = er32(ALGNERRC);
- temp = er32(RXERRC);
- temp = er32(TNCRS);
- temp = er32(CEXTERR);
- temp = er32(TSCTC);
- temp = er32(TSCTFC);
+ er32(ALGNERRC);
+ er32(RXERRC);
+ er32(TNCRS);
+ er32(CEXTERR);
+ er32(TSCTC);
+ er32(TSCTFC);
if (hw->mac_type <= e1000_82544)
return;
- temp = er32(MGTPRC);
- temp = er32(MGTPDC);
- temp = er32(MGTPTC);
+ er32(MGTPRC);
+ er32(MGTPDC);
+ er32(MGTPTC);
}
/**
@@ -4778,8 +4759,6 @@ void e1000_reset_adaptive(struct e1000_hw *hw)
/**
* e1000_update_adaptive - update adaptive IFS
* @hw: Struct containing variables accessed by shared code
- * @tx_packets: Number of transmits since last callback
- * @total_collisions: Number of collisions since last callback
*
* Called during the callback/watchdog routine to update IFS value based on
* the ratio of transmits to collisions.
@@ -5064,8 +5043,6 @@ static s32 e1000_check_polarity(struct e1000_hw *hw,
/**
* e1000_check_downshift - Check if Downshift occurred
* @hw: Struct containing variables accessed by shared code
- * @downshift: output parameter : 0 - No Downshift occurred.
- * 1 - Downshift occurred.
*
* returns: - E1000_ERR_XXX
* E1000_SUCCESS
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 1e6ec081fd9d..5e28cf4fa2cd 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -199,8 +199,10 @@ module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
/**
- * e1000_get_hw_dev - return device
- * used by hardware layer to print debugging information
+ * e1000_get_hw_dev - helper function for getting netdev
+ * @hw: pointer to HW struct
+ *
+ * return device used by hardware layer to print debugging information
*
**/
struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
@@ -354,7 +356,7 @@ static void e1000_release_manageability(struct e1000_adapter *adapter)
/**
* e1000_configure - configure the hardware for RX and TX
- * @adapter = private board structure
+ * @adapter: private board structure
**/
static void e1000_configure(struct e1000_adapter *adapter)
{
@@ -534,7 +536,6 @@ void e1000_down(struct e1000_adapter *adapter)
void e1000_reinit_locked(struct e1000_adapter *adapter)
{
- WARN_ON(in_interrupt());
while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
msleep(1);
@@ -3489,8 +3490,9 @@ exit:
/**
* e1000_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: number of the Tx queue that hung (unused)
**/
-static void e1000_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+static void e1000_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -3787,7 +3789,8 @@ static irqreturn_t e1000_intr(int irq, void *data)
/**
* e1000_clean - NAPI Rx polling callback
- * @adapter: board private structure
+ * @napi: napi struct containing references to driver info
+ * @budget: budget given to driver for receive packets
**/
static int e1000_clean(struct napi_struct *napi, int budget)
{
@@ -3818,6 +3821,7 @@ static int e1000_clean(struct napi_struct *napi, int budget)
/**
* e1000_clean_tx_irq - Reclaim resources after transmit completes
* @adapter: board private structure
+ * @tx_ring: ring to clean
**/
static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
struct e1000_tx_ring *tx_ring)
@@ -3933,7 +3937,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
* @adapter: board private structure
* @status_err: receive descriptor status and error fields
* @csum: receive descriptor csum field
- * @sk_buff: socket buffer with received data
+ * @skb: socket buffer with received data
**/
static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
u32 csum, struct sk_buff *skb)
@@ -3970,6 +3974,9 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
/**
* e1000_consume_page - helper function for jumbo Rx path
+ * @bi: software descriptor shadow data
+ * @skb: skb being modified
+ * @length: length of data being added
**/
static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
u16 length)
@@ -4003,6 +4010,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
/**
* e1000_tbi_adjust_stats
* @hw: Struct containing variables accessed by shared code
+ * @stats: point to stats struct
* @frame_len: The length of the frame in question
* @mac_addr: The Ethernet destination address of the frame in question
*
@@ -4548,6 +4556,8 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
/**
* e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
* @adapter: address of board private structure
+ * @rx_ring: pointer to ring struct
+ * @cleaned_count: number of new Rx buffers to try to allocate
**/
static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
@@ -4662,7 +4672,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
/**
* e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
- * @adapter:
+ * @adapter: address of board private structure
**/
static void e1000_smartspeed(struct e1000_adapter *adapter)
{
@@ -4718,10 +4728,10 @@ static void e1000_smartspeed(struct e1000_adapter *adapter)
}
/**
- * e1000_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
+ * e1000_ioctl - handle ioctl calls
+ * @netdev: pointer to our netdev
+ * @ifr: pointer to interface request structure
+ * @cmd: ioctl data
**/
static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
@@ -4737,9 +4747,9 @@ static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
/**
* e1000_mii_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
+ * @netdev: pointer to our netdev
+ * @ifr: pointer to interface request structure
+ * @cmd: ioctl data
**/
static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
int cmd)
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index 4b103cca8a39..be9c695dde12 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -1072,7 +1072,6 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
/**
* e1000_cfg_on_link_up_80003es2lan - es2 link configuration after link-up
* @hw: pointer to the HW structure
- * @duplex: current duplex setting
*
* Configure the KMRN interface by applying last minute quirks for
* 10/100 operation.
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index a8fc9208382c..03215b0aee4b 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -895,6 +895,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
+ case e1000_pch_mtp:
mask |= BIT(18);
break;
default:
@@ -1560,6 +1561,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
+ case e1000_pch_mtp:
fext_nvm11 = er32(FEXTNVM11);
fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX;
ew32(FEXTNVM11, fext_nvm11);
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index b1447221669e..69a2329ea463 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -102,6 +102,10 @@ struct e1000_hw;
#define E1000_DEV_ID_PCH_ADP_I219_V16 0x1A1F
#define E1000_DEV_ID_PCH_ADP_I219_LM17 0x1A1C
#define E1000_DEV_ID_PCH_ADP_I219_V17 0x1A1D
+#define E1000_DEV_ID_PCH_MTP_I219_LM18 0x550A
+#define E1000_DEV_ID_PCH_MTP_I219_V18 0x550B
+#define E1000_DEV_ID_PCH_MTP_I219_LM19 0x550C
+#define E1000_DEV_ID_PCH_MTP_I219_V19 0x550D
#define E1000_REVISION_4 4
@@ -127,6 +131,7 @@ enum e1000_mac_type {
e1000_pch_cnp,
e1000_pch_tgp,
e1000_pch_adp,
+ e1000_pch_mtp,
};
enum e1000_media_type {
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index b2f2fcfdf732..9aa6fad8ed47 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -320,6 +320,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
+ case e1000_pch_mtp:
if (e1000_phy_is_accessible_pchlan(hw))
break;
@@ -464,6 +465,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
+ case e1000_pch_mtp:
/* In case the PHY needs to be in mdio slow mode,
* set slow mode and try to get the PHY id again.
*/
@@ -708,6 +710,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
+ case e1000_pch_mtp:
case e1000_pchlan:
/* check management mode */
mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
@@ -743,7 +746,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
/**
* __e1000_access_emi_reg_locked - Read/write EMI register
* @hw: pointer to the HW structure
- * @addr: EMI address to program
+ * @address: EMI address to program
* @data: pointer to value to read/write from/to the EMI address
* @read: boolean flag to indicate read or write
*
@@ -1648,6 +1651,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
+ case e1000_pch_mtp:
rc = e1000_init_phy_params_pchlan(hw);
break;
default:
@@ -2102,6 +2106,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
+ case e1000_pch_mtp:
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
break;
default:
@@ -2266,7 +2271,7 @@ release:
/**
* e1000_configure_k1_ich8lan - Configure K1 power state
* @hw: pointer to the HW structure
- * @enable: K1 state to configure
+ * @k1_enable: K1 state to configure
*
* Configure the K1 power state based on the provided parameter.
* Assumes semaphore already acquired.
@@ -2405,8 +2410,10 @@ static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
}
/**
- * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
- * done after every PHY reset.
+ * e1000_hv_phy_workarounds_ich8lan - apply PHY workarounds
+ * @hw: pointer to the HW structure
+ *
+ * A series of PHY workarounds to be done after every PHY reset.
**/
static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
{
@@ -2694,8 +2701,10 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
}
/**
- * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
- * done after every PHY reset.
+ * e1000_lv_phy_workarounds_ich8lan - apply ich8 specific workarounds
+ * @hw: pointer to the HW structure
+ *
+ * A series of PHY workarounds to be done after every PHY reset.
**/
static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
{
@@ -3141,6 +3150,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
+ case e1000_pch_mtp:
bank1_offset = nvm->flash_bank_size;
act_offset = E1000_ICH_NVM_SIG_WORD;
@@ -4086,6 +4096,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
+ case e1000_pch_mtp:
word = NVM_COMPAT;
valid_csum_mask = NVM_COMPAT_VALID_CSUM;
break;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 664e8ccc88d2..b30f00891c03 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -501,6 +501,7 @@ rx_ring_summary:
/**
* e1000_desc_unused - calculate if we have unused descriptors
+ * @ring: pointer to ring struct to perform calculation on
**/
static int e1000_desc_unused(struct e1000_ring *ring)
{
@@ -577,6 +578,7 @@ static void e1000e_rx_hwtstamp(struct e1000_adapter *adapter, u32 status,
/**
* e1000_receive_skb - helper function to handle Rx indications
* @adapter: board private structure
+ * @netdev: pointer to netdev struct
* @staterr: descriptor extended error and status field as written by hardware
* @vlan: descriptor vlan field as written by hardware (no le/be conversion)
* @skb: pointer to sk_buff to be indicated to stack
@@ -601,8 +603,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter,
* e1000_rx_checksum - Receive Checksum Offload
* @adapter: board private structure
* @status_err: receive descriptor status and error fields
- * @csum: receive descriptor csum field
- * @sk_buff: socket buffer with received data
+ * @skb: socket buffer with received data
**/
static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
struct sk_buff *skb)
@@ -673,6 +674,8 @@ static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
/**
* e1000_alloc_rx_buffers - Replace used receive buffers
* @rx_ring: Rx descriptor ring
+ * @cleaned_count: number to reallocate
+ * @gfp: flags for allocation
**/
static void e1000_alloc_rx_buffers(struct e1000_ring *rx_ring,
int cleaned_count, gfp_t gfp)
@@ -741,6 +744,8 @@ map_skb:
/**
* e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
* @rx_ring: Rx descriptor ring
+ * @cleaned_count: number to reallocate
+ * @gfp: flags for allocation
**/
static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring,
int cleaned_count, gfp_t gfp)
@@ -844,6 +849,7 @@ no_buffers:
* e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
* @rx_ring: Rx descriptor ring
* @cleaned_count: number of buffers to allocate this pass
+ * @gfp: flags for allocation
**/
static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring,
@@ -933,6 +939,8 @@ static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss,
/**
* e1000_clean_rx_irq - Send received data up the network stack
* @rx_ring: Rx descriptor ring
+ * @work_done: output parameter for indicating completed work
+ * @work_to_do: how many packets we can clean
*
* the return value indicates whether actual cleaning was done, there
* is no guarantee that everything was cleaned
@@ -1327,6 +1335,8 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
/**
* e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
* @rx_ring: Rx descriptor ring
+ * @work_done: output parameter for indicating completed work
+ * @work_to_do: how many packets we can clean
*
* the return value indicates whether actual cleaning was done, there
* is no guarantee that everything was cleaned
@@ -1517,9 +1527,6 @@ next_desc:
return cleaned;
}
-/**
- * e1000_consume_page - helper function
- **/
static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
u16 length)
{
@@ -1531,7 +1538,9 @@ static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
/**
* e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
- * @adapter: board private structure
+ * @rx_ring: Rx descriptor ring
+ * @work_done: output parameter for indicating completed work
+ * @work_to_do: how many packets we can clean
*
* the return value indicates whether actual cleaning was done, there
* is no guarantee that everything was cleaned
@@ -1994,6 +2003,7 @@ static irqreturn_t e1000_intr_msix_rx(int __always_unused irq, void *data)
/**
* e1000_configure_msix - Configure MSI-X hardware
+ * @adapter: board private structure
*
* e1000_configure_msix sets up the hardware to properly
* generate MSI-X interrupts.
@@ -2072,6 +2082,7 @@ void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
/**
* e1000e_set_interrupt_capability - set MSI or MSI-X if supported
+ * @adapter: board private structure
*
* Attempt to configure interrupts using the best available
* capabilities of the hardware and kernel.
@@ -2127,6 +2138,7 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
/**
* e1000_request_msix - Initialize MSI-X interrupts
+ * @adapter: board private structure
*
* e1000_request_msix allocates MSI-X vectors and requests interrupts from the
* kernel.
@@ -2180,6 +2192,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
/**
* e1000_request_irq - initialize interrupts
+ * @adapter: board private structure
*
* Attempts to configure interrupts using the best available
* capabilities of the hardware and kernel.
@@ -2240,6 +2253,7 @@ static void e1000_free_irq(struct e1000_adapter *adapter)
/**
* e1000_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
**/
static void e1000_irq_disable(struct e1000_adapter *adapter)
{
@@ -2262,6 +2276,7 @@ static void e1000_irq_disable(struct e1000_adapter *adapter)
/**
* e1000_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
**/
static void e1000_irq_enable(struct e1000_adapter *adapter)
{
@@ -2332,6 +2347,8 @@ void e1000e_release_hw_control(struct e1000_adapter *adapter)
/**
* e1000_alloc_ring_dma - allocate memory for a ring structure
+ * @adapter: board private structure
+ * @ring: ring struct for which to allocate dma
**/
static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
struct e1000_ring *ring)
@@ -2507,7 +2524,6 @@ void e1000e_free_rx_resources(struct e1000_ring *rx_ring)
/**
* e1000_update_itr - update the dynamic ITR value based on statistics
- * @adapter: pointer to adapter
* @itr_setting: current adapter->itr
* @packets: the number of packets during this measurement interval
* @bytes: the number of bytes during this measurement interval
@@ -3049,12 +3065,13 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
}
}
+#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
+ (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
+
/**
* e1000_setup_rctl - configure the receive control registers
* @adapter: Board private structure
**/
-#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
- (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
static void e1000_setup_rctl(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -3570,6 +3587,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
+ case e1000_pch_mtp:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
/* Stable 24MHz frequency */
incperiod = INCPERIOD_24MHZ;
@@ -3605,6 +3623,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
/**
* e1000e_config_hwtstamp - configure the hwtstamp registers and enable/disable
* @adapter: board private structure
+ * @config: timestamp configuration
*
* Outgoing time stamping can be enabled and disabled. Play nice and
* disable it when requested, although it shouldn't cause any overhead
@@ -3808,6 +3827,7 @@ void e1000e_power_up_phy(struct e1000_adapter *adapter)
/**
* e1000_power_down_phy - Power down the PHY
+ * @adapter: board private structure
*
* Power down the PHY so no link is implied when interface is down.
* The PHY cannot be powered down if management or WoL is active.
@@ -3820,6 +3840,7 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
/**
* e1000_flush_tx_ring - remove all descriptors from the tx_ring
+ * @adapter: board private structure
*
* We want to clear all pending descriptors from the TX ring.
* zeroing happens when the HW reads the regs. We assign the ring itself as
@@ -3854,6 +3875,7 @@ static void e1000_flush_tx_ring(struct e1000_adapter *adapter)
/**
* e1000_flush_rx_ring - remove all descriptors from the rx_ring
+ * @adapter: board private structure
*
* Mark all descriptors in the RX ring as consumed and disable the rx ring
*/
@@ -3886,6 +3908,7 @@ static void e1000_flush_rx_ring(struct e1000_adapter *adapter)
/**
* e1000_flush_desc_rings - remove all descriptors from the descriptor rings
+ * @adapter: board private structure
*
* In i219, the descriptor rings must be emptied before resetting the HW
* or before changing the device state to D3 during runtime (runtime PM).
@@ -3968,6 +3991,7 @@ static void e1000e_systim_reset(struct e1000_adapter *adapter)
/**
* e1000e_reset - bring the hardware into a known good state
+ * @adapter: board private structure
*
* This function boots the hardware and enables some settings that
* require a configuration cycle of the hardware - those cannot be
@@ -4081,6 +4105,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
+ case e1000_pch_mtp:
fc->refresh_time = 0xFFFF;
fc->pause_time = 0xFFFF;
@@ -4847,7 +4872,7 @@ static void e1000e_update_phy_task(struct work_struct *work)
/**
* e1000_update_phy_info - timre call-back to update PHY info
- * @data: pointer to adapter cast into an unsigned long
+ * @t: pointer to timer_list containing private info adapter
*
* Need to wait a few seconds after link up to get diagnostic information from
* the phy
@@ -5187,7 +5212,7 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
/**
* e1000_watchdog - Timer Call-back
- * @data: pointer to adapter cast into an unsigned long
+ * @t: pointer to timer_list containing private info adapter
**/
static void e1000_watchdog(struct timer_list *t)
{
@@ -5972,8 +5997,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
/**
* e1000_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: index of the hung queue (unused)
**/
-static void e1000_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+static void e1000_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -6174,7 +6200,7 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
/**
* e1000e_hwtstamp_ioctl - control hardware time stamping
* @netdev: network interface device structure
- * @ifreq: interface request
+ * @ifr: interface request
*
* Outgoing time stamping can be enabled and disabled. Play nice and
* disable it when requested, although it shouldn't cause any overhead
@@ -7853,6 +7879,10 @@ static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_cnp },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
};
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index e11c877595fb..bdd9dc163f15 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -2311,6 +2311,7 @@ s32 e1000e_determine_phy_address(struct e1000_hw *hw)
/**
* e1000_get_phy_addr_for_bm_page - Retrieve PHY page address
* @page: page to access
+ * @reg: register to check
*
* Returns the phy address for the page requested.
**/
@@ -2728,6 +2729,7 @@ void e1000_power_down_phy_copper(struct e1000_hw *hw)
* @offset: register offset to be read
* @data: pointer to the read data
* @locked: semaphore has already been acquired or not
+ * @page_set: BM_WUC_PAGE already set and access enabled
*
* Acquires semaphore, if necessary, then reads the PHY register at offset
* and stores the retrieved information in data. Release any acquired
@@ -2836,6 +2838,7 @@ s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data)
* @offset: register offset to write to
* @data: data to write at register offset
* @locked: semaphore has already been acquired or not
+ * @page_set: BM_WUC_PAGE already set and access enabled
*
* Acquires semaphore, if necessary, then writes the data to PHY register
* at the offset. Release any acquired semaphores before exiting.
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 34b988d70488..f3f671311855 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -144,7 +144,7 @@ static int e1000e_phc_get_syncdevicetime(ktime_t *device,
/**
* e1000e_phc_getsynctime - Reads the current system/device cross timestamp
* @ptp: ptp clock structure
- * @cts: structure containing timestamp
+ * @xtstamp: structure containing timestamp
*
* Read device and system (ART) clock simultaneously and return the scaled
* clock values in ns.
@@ -297,6 +297,7 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pch_adp:
+ case e1000_pch_mtp:
if ((hw->mac.type < e1000_pch_lpt) ||
(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
adapter->ptp_clock_info.max_adj = 24000000 - 1;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index d88dd41a9442..99b8252eb969 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -310,10 +310,7 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
rx_buffer->page_offset;
/* prefetch first cache line of first page */
- prefetch(page_addr);
-#if L1_CACHE_BYTES < 128
- prefetch((void *)((u8 *)page_addr + L1_CACHE_BYTES));
-#endif
+ net_prefetch(page_addr);
/* allocate a skb to store the frags */
skb = napi_alloc_skb(&rx_ring->q_vector->napi,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 140212bfe08b..9e3103fae723 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -221,8 +221,6 @@ static bool fm10k_prepare_for_reset(struct fm10k_intfc *interface)
{
struct net_device *netdev = interface->netdev;
- WARN_ON(in_interrupt());
-
/* put off any impending NetWatchDogTimeout */
netif_trans_update(netdev);
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index a7e212d1caa2..537300e762f0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -35,6 +35,7 @@
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
+#include <net/udp_tunnel.h>
#include <net/xdp_sock.h>
#include "i40e_type.h"
#include "i40e_prototype.h"
@@ -90,7 +91,7 @@
#define I40E_OEM_RELEASE_MASK 0x0000ffff
#define I40E_RX_DESC(R, i) \
- (&(((union i40e_32byte_rx_desc *)((R)->desc))[i]))
+ (&(((union i40e_rx_desc *)((R)->desc))[i]))
#define I40E_TX_DESC(R, i) \
(&(((struct i40e_tx_desc *)((R)->desc))[i]))
#define I40E_TX_CTXTDESC(R, i) \
@@ -133,7 +134,6 @@ enum i40e_state_t {
__I40E_PORT_SUSPENDED,
__I40E_VF_DISABLE,
__I40E_MACVLAN_SYNC_PENDING,
- __I40E_UDP_FILTER_SYNC_PENDING,
__I40E_TEMP_LINK_POLLING,
__I40E_CLIENT_SERVICE_REQUESTED,
__I40E_CLIENT_L2_CHANGE,
@@ -478,8 +478,8 @@ struct i40e_pf {
struct list_head l3_flex_pit_list;
struct list_head l4_flex_pit_list;
- struct i40e_udp_port_config udp_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
- u16 pending_udp_bitmap;
+ struct udp_tunnel_nic_shared udp_tunnel_shared;
+ struct udp_tunnel_nic_info udp_tunnel_nic;
struct hlist_head cloud_filter_list;
u16 num_cloud_filters;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index c897a2863e4f..593912b17609 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -541,6 +541,12 @@ static void i40e_set_hw_flags(struct i40e_hw *hw)
(aq->api_maj_ver == 1 &&
aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722))
hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
+
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= I40E_MINOR_VER_FW_REQUEST_FEC_X722))
+ hw->flags |= I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE;
+
fallthrough;
default:
break;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index edec3df78971..ee394aacef4d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -85,8 +85,8 @@ struct i40e_adminq_info {
/**
* i40e_aq_rc_to_posix - convert errors to user-land codes
- * aq_ret: AdminQ handler error code can override aq_rc
- * aq_rc: AdminQ firmware error code to convert
+ * @aq_ret: AdminQ handler error code can override aq_rc
+ * @aq_rc: AdminQ firmware error code to convert
**/
static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
{
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index c0c8efe42fce..1e960c3c7ef0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -24,6 +24,8 @@
#define I40E_MINOR_VER_GET_LINK_INFO_X722 0x0009
/* API version 1.6 for X722 devices adds ability to stop FW LLDP agent */
#define I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722 0x0006
+/* API version 1.10 for X722 devices adds ability to request FEC encoding */
+#define I40E_MINOR_VER_FW_REQUEST_FEC_X722 0x000A
struct i40e_aq_desc {
__le16 flags;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index befd3018183f..a2dba32383f6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -278,8 +278,6 @@ void i40e_client_update_msix_info(struct i40e_pf *pf)
/**
* i40e_client_add_instance - add a client instance struct to the instance list
* @pf: pointer to the board struct
- * @client: pointer to a client struct in the client list.
- * @existing: if there was already an existing instance
*
**/
static void i40e_client_add_instance(struct i40e_pf *pf)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 6ab52cbd697a..adc9e4fa4789 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -3766,9 +3766,7 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
/**
* i40e_aq_start_lldp
* @hw: pointer to the hw struct
- * @buff: buffer for result
* @persist: True if start of LLDP should be persistent across power cycles
- * @buff_size: buffer size
* @cmd_details: pointer to command details structure or NULL
*
* Start the embedded LLDP Agent on all ports.
@@ -5395,6 +5393,7 @@ static void i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio,
* @hw: pointer to the hw struct
* @phy_select: select which phy should be accessed
* @dev_addr: PHY device address
+ * @page_change: flag to indicate if phy page should be updated
* @set_mdio: use MDIO I/F number specified by mdio_num
* @mdio_num: MDIO I/F number
* @reg_addr: PHY register address
@@ -5439,6 +5438,7 @@ enum i40e_status_code i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
* @hw: pointer to the hw struct
* @phy_select: select which phy should be accessed
* @dev_addr: PHY device address
+ * @page_change: flag to indicate if phy page should be updated
* @set_mdio: use MDIO I/F number specified by mdio_num
* @mdio_num: MDIO I/F number
* @reg_addr: PHY register address
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index d3ad2e3aa838..d7c13ca9be7d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -604,10 +604,9 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
} else {
rxd = I40E_RX_DESC(ring, i);
dev_info(&pf->pdev->dev,
- " d[%03x] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
+ " d[%03x] = 0x%016llx 0x%016llx\n",
i, rxd->read.pkt_addr,
- rxd->read.hdr_addr,
- rxd->read.rsvd1, rxd->read.rsvd2);
+ rxd->read.hdr_addr);
}
}
} else if (cnt == 3) {
@@ -625,10 +624,9 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
} else {
rxd = I40E_RX_DESC(ring, desc_n);
dev_info(&pf->pdev->dev,
- "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
+ "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
vsi_seid, ring_id, desc_n,
- rxd->read.pkt_addr, rxd->read.hdr_addr,
- rxd->read.rsvd1, rxd->read.rsvd2);
+ rxd->read.pkt_addr, rxd->read.hdr_addr);
}
} else {
dev_info(&pf->pdev->dev, "dump desc rx/tx/xdp <vsi_seid> <ring_id> [<desc_n>]\n");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 825c104ecba1..26ba1f3eb2d8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -891,6 +891,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
ethtool_link_ksettings_add_link_mode(ks, advertising,
10000baseT_Full);
+ i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks);
break;
case I40E_PHY_TYPE_SGMII:
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
@@ -1481,12 +1482,16 @@ static int i40e_set_fec_param(struct net_device *netdev,
struct i40e_pf *pf = np->vsi->back;
struct i40e_hw *hw = &pf->hw;
u8 fec_cfg = 0;
- int err = 0;
if (hw->device_id != I40E_DEV_ID_25G_SFP28 &&
- hw->device_id != I40E_DEV_ID_25G_B) {
- err = -EPERM;
- goto done;
+ hw->device_id != I40E_DEV_ID_25G_B &&
+ hw->device_id != I40E_DEV_ID_KX_X722)
+ return -EPERM;
+
+ if (hw->mac.type == I40E_MAC_X722 &&
+ !(hw->flags & I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE)) {
+ netdev_err(netdev, "Setting FEC encoding not supported by firmware. Please update the NVM image.\n");
+ return -EOPNOTSUPP;
}
switch (fecparam->fec) {
@@ -1508,14 +1513,10 @@ static int i40e_set_fec_param(struct net_device *netdev,
default:
dev_warn(&pf->pdev->dev, "Unsupported FEC mode: %d",
fecparam->fec);
- err = -EINVAL;
- goto done;
+ return -EINVAL;
}
- err = i40e_set_fec_cfg(netdev, fec_cfg);
-
-done:
- return err;
+ return i40e_set_fec_cfg(netdev, fec_cfg);
}
static int i40e_nway_reset(struct net_device *netdev)
@@ -1967,7 +1968,7 @@ static int i40e_set_ringparam(struct net_device *netdev,
(new_rx_count == vsi->rx_rings[0]->count))
return 0;
- /* If there is a AF_XDP UMEM attached to any of Rx rings,
+ /* If there is a AF_XDP page pool attached to any of Rx rings,
* disallow changing the number of descriptors -- regardless
* if the netdev is running or not.
*/
@@ -4951,8 +4952,7 @@ flags_complete:
}
}
- if (((changed_flags & I40E_FLAG_RS_FEC) ||
- (changed_flags & I40E_FLAG_BASE_R_FEC)) &&
+ if (changed_flags & I40E_FLAG_RS_FEC &&
pf->hw.device_id != I40E_DEV_ID_25G_SFP28 &&
pf->hw.device_id != I40E_DEV_ID_25G_B) {
dev_warn(&pf->pdev->dev,
@@ -4960,6 +4960,15 @@ flags_complete:
return -EOPNOTSUPP;
}
+ if (changed_flags & I40E_FLAG_BASE_R_FEC &&
+ pf->hw.device_id != I40E_DEV_ID_25G_SFP28 &&
+ pf->hw.device_id != I40E_DEV_ID_25G_B &&
+ pf->hw.device_id != I40E_DEV_ID_KX_X722) {
+ dev_warn(&pf->pdev->dev,
+ "Device does not support changing FEC configuration\n");
+ return -EOPNOTSUPP;
+ }
+
/* Process any additional changes needed as a result of flag changes.
* The changed_flags value reflects the list of bits that were
* changed in the code above.
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 2e433fdbf2c3..4f8a2154b93f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -287,6 +287,7 @@ void i40e_service_event_schedule(struct i40e_pf *pf)
/**
* i40e_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: queue number timing out
*
* If any port has noticed a Tx timeout, it is likely that the whole
* device is munged, not just the one netdev port, so go for the full
@@ -1609,6 +1610,8 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
* i40e_config_rss_aq - Prepare for RSS using AQ commands
* @vsi: vsi structure
* @seed: RSS hash seed
+ * @lut: pointer to lookup table of lut_size
+ * @lut_size: size of the lookup table
**/
static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
u8 *lut, u16 lut_size)
@@ -3122,12 +3125,12 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
}
/**
- * i40e_xsk_umem - Retrieve the AF_XDP ZC if XDP and ZC is enabled
+ * i40e_xsk_pool - Retrieve the AF_XDP buffer pool if XDP and ZC is enabled
* @ring: The Tx or Rx ring
*
- * Returns the UMEM or NULL.
+ * Returns the AF_XDP buffer pool or NULL.
**/
-static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
+static struct xsk_buff_pool *i40e_xsk_pool(struct i40e_ring *ring)
{
bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
int qid = ring->queue_index;
@@ -3138,7 +3141,7 @@ static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
return NULL;
- return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
+ return xsk_get_pool_from_qid(ring->vsi->netdev, qid);
}
/**
@@ -3157,7 +3160,7 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
u32 qtx_ctl = 0;
if (ring_is_xdp(ring))
- ring->xsk_umem = i40e_xsk_umem(ring);
+ ring->xsk_pool = i40e_xsk_pool(ring);
/* some ATR related tx ring init */
if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
@@ -3280,12 +3283,13 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
kfree(ring->rx_bi);
- ring->xsk_umem = i40e_xsk_umem(ring);
- if (ring->xsk_umem) {
+ ring->xsk_pool = i40e_xsk_pool(ring);
+ if (ring->xsk_pool) {
ret = i40e_alloc_rx_bi_zc(ring);
if (ret)
return ret;
- ring->rx_buf_len = xsk_umem_get_rx_frame_size(ring->xsk_umem);
+ ring->rx_buf_len =
+ xsk_pool_get_rx_frame_size(ring->xsk_pool);
/* For AF_XDP ZC, we disallow packets to span on
* multiple buffers, thus letting us skip that
* handling in the fast-path.
@@ -3320,8 +3324,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
rx_ctx.base = (ring->dma / 128);
rx_ctx.qlen = ring->count;
- /* use 32 byte descriptors */
- rx_ctx.dsize = 1;
+ /* use 16 byte descriptors */
+ rx_ctx.dsize = 0;
/* descriptor type is always zero
* rx_ctx.dtype = 0;
@@ -3368,8 +3372,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
writel(0, ring->tail);
- if (ring->xsk_umem) {
- xsk_buff_set_rxq_info(ring->xsk_umem, &ring->xdp_rxq);
+ if (ring->xsk_pool) {
+ xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
ok = i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring));
} else {
ok = !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
@@ -3380,7 +3384,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
*/
dev_info(&vsi->back->pdev->dev,
"Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n",
- ring->xsk_umem ? "UMEM enabled " : "",
+ ring->xsk_pool ? "AF_XDP ZC enabled " : "",
ring->queue_index, pf_q);
}
@@ -5814,7 +5818,6 @@ static int i40e_vsi_reconfig_rss(struct i40e_vsi *vsi, u16 rss_size)
/**
* i40e_channel_setup_queue_map - Setup a channel queue map
* @pf: ptr to PF device
- * @vsi: the VSI being setup
* @ctxt: VSI context structure
* @ch: ptr to channel structure
*
@@ -6057,8 +6060,7 @@ static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
/**
* i40e_setup_channel - setup new channel using uplink element
* @pf: ptr to PF device
- * @type: type of channel to be created (VMDq2/VF)
- * @uplink_seid: underlying HW switching element (VEB) ID
+ * @vsi: pointer to the VSI to set up the channel within
* @ch: ptr to channel structure
*
* Setup new channel (VSI) based on specified type (VMDq2/VF)
@@ -6623,6 +6625,25 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
netdev_info(vsi->netdev,
"NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
speed, req_fec, fec, an, fc);
+ } else if (pf->hw.device_id == I40E_DEV_ID_KX_X722) {
+ req_fec = "None";
+ fec = "None";
+ an = "False";
+
+ if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
+ an = "True";
+
+ if (pf->hw.phy.link_info.fec_info &
+ I40E_AQ_CONFIG_FEC_KR_ENA)
+ fec = "CL74 FC-FEC/BASE-R";
+
+ if (pf->hw.phy.link_info.req_fec_info &
+ I40E_AQ_REQUEST_FEC_KR)
+ req_fec = "CL74 FC-FEC/BASE-R";
+
+ netdev_info(vsi->netdev,
+ "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
+ speed, req_fec, fec, an, fc);
} else {
netdev_info(vsi->netdev,
"NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n",
@@ -6689,7 +6710,6 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- WARN_ON(in_interrupt());
while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state))
usleep_range(1000, 2000);
i40e_down(vsi);
@@ -7779,7 +7799,7 @@ int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
/**
* i40e_parse_cls_flower - Parse tc flower filters provided by kernel
* @vsi: Pointer to VSI
- * @cls_flower: Pointer to struct flow_cls_offload
+ * @f: Pointer to struct flow_cls_offload
* @filter: Pointer to cloud filter structure
*
**/
@@ -8160,8 +8180,8 @@ static int i40e_delete_clsflower(struct i40e_vsi *vsi,
/**
* i40e_setup_tc_cls_flower - flower classifier offloads
- * @netdev: net device to configure
- * @type_data: offload data
+ * @np: net device to configure
+ * @cls_flower: offload data
**/
static int i40e_setup_tc_cls_flower(struct i40e_netdev_priv *np,
struct flow_cls_offload *cls_flower)
@@ -8462,9 +8482,6 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
{
u32 val;
- WARN_ON(in_interrupt());
-
-
/* do the biggest reset indicated */
if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
@@ -9585,6 +9602,7 @@ end_reconstitute:
/**
* i40e_get_capabilities - get info about the HW
* @pf: the PF struct
+ * @list_type: AQ capability to be queried
**/
static int i40e_get_capabilities(struct i40e_pf *pf,
enum i40e_admin_queue_opc list_type)
@@ -10383,106 +10401,6 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
i40e_flush(hw);
}
-static const char *i40e_tunnel_name(u8 type)
-{
- switch (type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- return "vxlan";
- case UDP_TUNNEL_TYPE_GENEVE:
- return "geneve";
- default:
- return "unknown";
- }
-}
-
-/**
- * i40e_sync_udp_filters - Trigger a sync event for existing UDP filters
- * @pf: board private structure
- **/
-static void i40e_sync_udp_filters(struct i40e_pf *pf)
-{
- int i;
-
- /* loop through and set pending bit for all active UDP filters */
- for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
- if (pf->udp_ports[i].port)
- pf->pending_udp_bitmap |= BIT_ULL(i);
- }
-
- set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
-}
-
-/**
- * i40e_sync_udp_filters_subtask - Sync the VSI filter list with HW
- * @pf: board private structure
- **/
-static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- u8 filter_index, type;
- u16 port;
- int i;
-
- if (!test_and_clear_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state))
- return;
-
- /* acquire RTNL to maintain state of flags and port requests */
- rtnl_lock();
-
- for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
- if (pf->pending_udp_bitmap & BIT_ULL(i)) {
- struct i40e_udp_port_config *udp_port;
- i40e_status ret = 0;
-
- udp_port = &pf->udp_ports[i];
- pf->pending_udp_bitmap &= ~BIT_ULL(i);
-
- port = READ_ONCE(udp_port->port);
- type = READ_ONCE(udp_port->type);
- filter_index = READ_ONCE(udp_port->filter_index);
-
- /* release RTNL while we wait on AQ command */
- rtnl_unlock();
-
- if (port)
- ret = i40e_aq_add_udp_tunnel(hw, port,
- type,
- &filter_index,
- NULL);
- else if (filter_index != I40E_UDP_PORT_INDEX_UNUSED)
- ret = i40e_aq_del_udp_tunnel(hw, filter_index,
- NULL);
-
- /* reacquire RTNL so we can update filter_index */
- rtnl_lock();
-
- if (ret) {
- dev_info(&pf->pdev->dev,
- "%s %s port %d, index %d failed, err %s aq_err %s\n",
- i40e_tunnel_name(type),
- port ? "add" : "delete",
- port,
- filter_index,
- i40e_stat_str(&pf->hw, ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
- if (port) {
- /* failed to add, just reset port,
- * drop pending bit for any deletion
- */
- udp_port->port = 0;
- pf->pending_udp_bitmap &= ~BIT_ULL(i);
- }
- } else if (port) {
- /* record filter index on success */
- udp_port->filter_index = filter_index;
- }
- }
- }
-
- rtnl_unlock();
-}
-
/**
* i40e_service_task - Run the driver's async subtasks
* @work: pointer to work_struct containing our data
@@ -10522,7 +10440,6 @@ static void i40e_service_task(struct work_struct *work)
pf->vsi[pf->lan_vsi]);
}
i40e_sync_filters_subtask(pf);
- i40e_sync_udp_filters_subtask(pf);
} else {
i40e_reset_subtask(pf);
}
@@ -10546,7 +10463,7 @@ static void i40e_service_task(struct work_struct *work)
/**
* i40e_service_timer - timer callback
- * @data: pointer to PF struct
+ * @t: timer list pointer
**/
static void i40e_service_timer(struct timer_list *t)
{
@@ -11185,11 +11102,10 @@ static int i40e_init_msix(struct i40e_pf *pf)
* i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
* @vsi: the VSI being configured
* @v_idx: index of the vector in the vsi struct
- * @cpu: cpu to be used on affinity_mask
*
* We allocate one q_vector. If allocation fails we return -ENOMEM.
**/
-static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
+static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
{
struct i40e_q_vector *q_vector;
@@ -11222,7 +11138,7 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- int err, v_idx, num_q_vectors, current_cpu;
+ int err, v_idx, num_q_vectors;
/* if not MSIX, give the one vector only to the LAN VSI */
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
@@ -11232,15 +11148,10 @@ static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
else
return -EINVAL;
- current_cpu = cpumask_first(cpu_online_mask);
-
for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
- err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
+ err = i40e_vsi_alloc_q_vector(vsi, v_idx);
if (err)
goto err_out;
- current_cpu = cpumask_next(current_cpu, cpu_online_mask);
- if (unlikely(current_cpu >= nr_cpu_ids))
- current_cpu = cpumask_first(cpu_online_mask);
}
return 0;
@@ -12228,131 +12139,48 @@ static int i40e_set_features(struct net_device *netdev,
return 0;
}
-/**
- * i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
- * @pf: board private structure
- * @port: The UDP port to look up
- *
- * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
- **/
-static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, u16 port)
-{
- u8 i;
-
- for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
- /* Do not report ports with pending deletions as
- * being available.
- */
- if (!port && (pf->pending_udp_bitmap & BIT_ULL(i)))
- continue;
- if (pf->udp_ports[i].port == port)
- return i;
- }
-
- return i;
-}
-
-/**
- * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
- * @netdev: This physical port's netdev
- * @ti: Tunnel endpoint information
- **/
-static void i40e_udp_tunnel_add(struct net_device *netdev,
- struct udp_tunnel_info *ti)
+static int i40e_udp_tunnel_set_port(struct net_device *netdev,
+ unsigned int table, unsigned int idx,
+ struct udp_tunnel_info *ti)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_vsi *vsi = np->vsi;
- struct i40e_pf *pf = vsi->back;
- u16 port = ntohs(ti->port);
- u8 next_idx;
- u8 idx;
-
- idx = i40e_get_udp_port_idx(pf, port);
-
- /* Check if port already exists */
- if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- netdev_info(netdev, "port %d already offloaded\n", port);
- return;
- }
-
- /* Now check if there is space to add the new port */
- next_idx = i40e_get_udp_port_idx(pf, 0);
+ struct i40e_hw *hw = &np->vsi->back->hw;
+ u8 type, filter_index;
+ i40e_status ret;
- if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
- port);
- return;
- }
+ type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? I40E_AQC_TUNNEL_TYPE_VXLAN :
+ I40E_AQC_TUNNEL_TYPE_NGE;
- switch (ti->type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
- break;
- case UDP_TUNNEL_TYPE_GENEVE:
- if (!(pf->hw_features & I40E_HW_GENEVE_OFFLOAD_CAPABLE))
- return;
- pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
- break;
- default:
- return;
+ ret = i40e_aq_add_udp_tunnel(hw, ntohs(ti->port), type, &filter_index,
+ NULL);
+ if (ret) {
+ netdev_info(netdev, "add UDP port failed, err %s aq_err %s\n",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return -EIO;
}
- /* New port: add it and mark its index in the bitmap */
- pf->udp_ports[next_idx].port = port;
- pf->udp_ports[next_idx].filter_index = I40E_UDP_PORT_INDEX_UNUSED;
- pf->pending_udp_bitmap |= BIT_ULL(next_idx);
- set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
+ udp_tunnel_nic_set_port_priv(netdev, table, idx, filter_index);
+ return 0;
}
-/**
- * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
- * @netdev: This physical port's netdev
- * @ti: Tunnel endpoint information
- **/
-static void i40e_udp_tunnel_del(struct net_device *netdev,
- struct udp_tunnel_info *ti)
+static int i40e_udp_tunnel_unset_port(struct net_device *netdev,
+ unsigned int table, unsigned int idx,
+ struct udp_tunnel_info *ti)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_vsi *vsi = np->vsi;
- struct i40e_pf *pf = vsi->back;
- u16 port = ntohs(ti->port);
- u8 idx;
-
- idx = i40e_get_udp_port_idx(pf, port);
-
- /* Check if port already exists */
- if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
- goto not_found;
+ struct i40e_hw *hw = &np->vsi->back->hw;
+ i40e_status ret;
- switch (ti->type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
- goto not_found;
- break;
- case UDP_TUNNEL_TYPE_GENEVE:
- if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
- goto not_found;
- break;
- default:
- goto not_found;
+ ret = i40e_aq_del_udp_tunnel(hw, ti->hw_priv, NULL);
+ if (ret) {
+ netdev_info(netdev, "delete UDP port failed, err %s aq_err %s\n",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return -EIO;
}
- /* if port exists, set it to 0 (mark for deletion)
- * and make it pending
- */
- pf->udp_ports[idx].port = 0;
-
- /* Toggle pending bit instead of setting it. This way if we are
- * deleting a port that has yet to be added we just clear the pending
- * bit and don't have to worry about it.
- */
- pf->pending_udp_bitmap ^= BIT_ULL(idx);
- set_bit(__I40E_UDP_FILTER_SYNC_PENDING, pf->state);
-
- return;
-not_found:
- netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
- port);
+ return 0;
}
static int i40e_get_phys_port_id(struct net_device *netdev,
@@ -12379,6 +12207,7 @@ static int i40e_get_phys_port_id(struct net_device *netdev,
* @addr: the MAC address entry being added
* @vid: VLAN ID
* @flags: instructions from stack about fdb operation
+ * @extack: netlink extended ack, unused currently
*/
static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
@@ -12644,7 +12473,7 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi,
*/
if (need_reset && prog)
for (i = 0; i < vsi->num_queue_pairs; i++)
- if (vsi->xdp_rings[i]->xsk_umem)
+ if (vsi->xdp_rings[i]->xsk_pool)
(void)i40e_xsk_wakeup(vsi->netdev, i,
XDP_WAKEUP_RX);
@@ -12923,8 +12752,8 @@ static int i40e_xdp(struct net_device *dev,
switch (xdp->command) {
case XDP_SETUP_PROG:
return i40e_xdp_setup(vsi, xdp->prog);
- case XDP_SETUP_XSK_UMEM:
- return i40e_xsk_umem_setup(vsi, xdp->xsk.umem,
+ case XDP_SETUP_XSK_POOL:
+ return i40e_xsk_pool_setup(vsi, xdp->xsk.pool,
xdp->xsk.queue_id);
default:
return -EINVAL;
@@ -12957,8 +12786,8 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
.ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
.ndo_set_vf_trust = i40e_ndo_set_vf_trust,
- .ndo_udp_tunnel_add = i40e_udp_tunnel_add,
- .ndo_udp_tunnel_del = i40e_udp_tunnel_del,
+ .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
+ .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
.ndo_get_phys_port_id = i40e_get_phys_port_id,
.ndo_fdb_add = i40e_ndo_fdb_add,
.ndo_features_check = i40e_features_check,
@@ -13022,6 +12851,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
if (!(pf->hw_features & I40E_HW_OUTER_UDP_CSUM_CAPABLE))
netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ netdev->udp_tunnel_nic_info = &pf->udp_tunnel_nic;
+
netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
netdev->hw_enc_features |= hw_enc_features;
@@ -14422,7 +14253,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
i40e_ptp_init(pf);
/* repopulate tunnel port filters */
- i40e_sync_udp_filters(pf);
+ udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev);
return ret;
}
@@ -15151,6 +14982,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_switch_setup;
+ pf->udp_tunnel_nic.set_port = i40e_udp_tunnel_set_port;
+ pf->udp_tunnel_nic.unset_port = i40e_udp_tunnel_unset_port;
+ pf->udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
+ pf->udp_tunnel_nic.shared = &pf->udp_tunnel_shared;
+ pf->udp_tunnel_nic.tables[0].n_entries = I40E_MAX_PF_UDP_OFFLOAD_PORTS;
+ pf->udp_tunnel_nic.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN |
+ UDP_TUNNEL_TYPE_GENEVE;
+
/* The number of VSIs reported by the FW is the minimum guaranteed
* to us; HW supports far more and we share the remaining pool with
* the other PFs. We allocate space for more than the guarantee with
@@ -15160,6 +14999,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
else
pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
+ if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
+ dev_warn(&pf->pdev->dev,
+ "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
+ pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
+ pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
+ }
/* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
pf->vsi = kcalloc(pf->num_alloc_vsi, sizeof(struct i40e_vsi *),
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index ff7b19c6bc73..7a879614ca55 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -259,7 +259,6 @@ static u32 i40e_ptp_get_rx_events(struct i40e_pf *pf)
/**
* i40e_ptp_rx_hang - Detect error case when Rx timestamp registers are hung
* @pf: The PF private data structure
- * @vsi: The VSI with the rings relevant to 1588
*
* This watchdog task is scheduled to detect error case where hardware has
* dropped an Rx packet that was timestamped when the ring is full. The
diff --git a/drivers/net/ethernet/intel/i40e/i40e_trace.h b/drivers/net/ethernet/intel/i40e/i40e_trace.h
index 424f02077e2e..b5b12299931f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_trace.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_trace.h
@@ -22,7 +22,7 @@
#include <linux/tracepoint.h>
-/**
+/*
* i40e_trace() macro enables shared code to refer to trace points
* like:
*
@@ -112,7 +112,7 @@ DECLARE_EVENT_CLASS(
i40e_rx_template,
TP_PROTO(struct i40e_ring *ring,
- union i40e_32byte_rx_desc *desc,
+ union i40e_16byte_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb),
@@ -140,7 +140,7 @@ DECLARE_EVENT_CLASS(
DEFINE_EVENT(
i40e_rx_template, i40e_clean_rx_irq,
TP_PROTO(struct i40e_ring *ring,
- union i40e_32byte_rx_desc *desc,
+ union i40e_16byte_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb));
@@ -148,7 +148,7 @@ DEFINE_EVENT(
DEFINE_EVENT(
i40e_rx_template, i40e_clean_rx_irq_rx,
TP_PROTO(struct i40e_ring *ring,
- union i40e_32byte_rx_desc *desc,
+ union i40e_16byte_rx_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb));
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 3e5c566ceb01..d43ce13a93c9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -533,11 +533,11 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring, u64 qword0_raw,
{
struct i40e_pf *pf = rx_ring->vsi->back;
struct pci_dev *pdev = pf->pdev;
- struct i40e_32b_rx_wb_qw0 *qw0;
+ struct i40e_16b_rx_wb_qw0 *qw0;
u32 fcnt_prog, fcnt_avail;
u32 error;
- qw0 = (struct i40e_32b_rx_wb_qw0 *)&qword0_raw;
+ qw0 = (struct i40e_16b_rx_wb_qw0 *)&qword0_raw;
error = (qword1 & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
@@ -636,7 +636,7 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
unsigned long bi_size;
u16 i;
- if (ring_is_xdp(tx_ring) && tx_ring->xsk_umem) {
+ if (ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
i40e_xsk_clean_tx_ring(tx_ring);
} else {
/* ring already cleared, nothing to do */
@@ -1335,7 +1335,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
rx_ring->skb = NULL;
}
- if (rx_ring->xsk_umem) {
+ if (rx_ring->xsk_pool) {
i40e_xsk_clean_rx_ring(rx_ring);
goto skip_free;
}
@@ -1369,7 +1369,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
}
skip_free:
- if (rx_ring->xsk_umem)
+ if (rx_ring->xsk_pool)
i40e_clear_rx_bi_zc(rx_ring);
else
i40e_clear_rx_bi(rx_ring);
@@ -1418,7 +1418,7 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
u64_stats_init(&rx_ring->syncp);
/* Round up to nearest 4K */
- rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
+ rx_ring->size = rx_ring->count * sizeof(union i40e_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
@@ -1755,7 +1755,6 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
* @rx_ring: rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
* @skb: pointer to current skb being populated
- * @rx_ptype: the packet type decoded by hardware
*
* This function checks the ring, descriptor, and packet information in
* order to populate the hash, checksum, VLAN, protocol, and
@@ -1953,7 +1952,7 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer;
rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
- prefetchw(rx_buffer->page);
+ prefetch_page_address(rx_buffer->page);
/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev,
@@ -1992,10 +1991,8 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(xdp->data);
-#if L1_CACHE_BYTES < 128
- prefetch(xdp->data + L1_CACHE_BYTES);
-#endif
+ net_prefetch(xdp->data);
+
/* Note, we get here by enabling legacy-rx via:
*
* ethtool --set-priv-flags <dev> legacy-rx on
@@ -2078,10 +2075,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
* likely have a consumer accessing first few bytes of meta
* data, and then actual data.
*/
- prefetch(xdp->data_meta);
-#if L1_CACHE_BYTES < 128
- prefetch(xdp->data_meta + L1_CACHE_BYTES);
-#endif
+ net_prefetch(xdp->data_meta);
+
/* build an skb around the page buffer */
skb = build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
@@ -2300,6 +2295,19 @@ void i40e_finalize_xdp_rx(struct i40e_ring *rx_ring, unsigned int xdp_res)
}
/**
+ * i40e_inc_ntc: Advance the next_to_clean index
+ * @rx_ring: Rx ring
+ **/
+static void i40e_inc_ntc(struct i40e_ring *rx_ring)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+ prefetch(I40E_RX_DESC(rx_ring, ntc));
+}
+
+/**
* i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
@@ -2579,7 +2587,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
i40e_for_each_ring(ring, q_vector->tx) {
- bool wd = ring->xsk_umem ?
+ bool wd = ring->xsk_pool ?
i40e_clean_xdp_tx_irq(vsi, ring) :
i40e_clean_tx_irq(vsi, ring, budget);
@@ -2607,7 +2615,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
budget_per_ring = budget;
i40e_for_each_ring(ring, q_vector->rx) {
- int cleaned = ring->xsk_umem ?
+ int cleaned = ring->xsk_pool ?
i40e_clean_rx_irq_zc(ring, budget_per_ring) :
i40e_clean_rx_irq(ring, budget_per_ring);
@@ -3503,7 +3511,7 @@ dma_error:
/**
* i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
- * @xdp: data to transmit
+ * @xdpf: data to transmit
* @xdp_ring: XDP Tx ring
**/
static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
@@ -3698,7 +3706,9 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/**
* i40e_xdp_xmit - Implements ndo_xdp_xmit
* @dev: netdev
- * @xdp: XDP buffer
+ * @n: number of frames
+ * @frames: array of XDP buffer pointers
+ * @flags: XDP extra info
*
* Returns number of frames successfully sent. Frames that fail are
* free'ed via XDP return API.
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 4036893d6825..2feed920ef8a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -110,7 +110,7 @@ enum i40e_dyn_idx_t {
*/
#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
#define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
-#define i40e_rx_desc i40e_32byte_rx_desc
+#define i40e_rx_desc i40e_16byte_rx_desc
#define I40E_RX_DMA_ATTR \
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
@@ -388,7 +388,7 @@ struct i40e_ring {
struct i40e_channel *ch;
struct xdp_rxq_info xdp_rxq;
- struct xdp_umem *xsk_umem;
+ struct xsk_buff_pool *xsk_pool;
} ____cacheline_internodealigned_in_smp;
static inline bool ring_uses_build_skb(struct i40e_ring *ring)
@@ -482,7 +482,6 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
/**
* i40e_xmit_descriptor_count - calculate number of Tx descriptors needed
* @skb: send buffer
- * @tx_ring: ring to send buffer on
*
* Returns number of data descriptors needed for this skb. Returns 0 to indicate
* there is not enough descriptors available in this ring since we need at least
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
index 667c4dc4b39f..19da3b22160f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx_common.h
@@ -21,9 +21,9 @@ void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val);
#define I40E_XDP_TX BIT(1)
#define I40E_XDP_REDIR BIT(2)
-/**
+/*
* build_ctob - Builds the Tx descriptor (cmd, offset and type) qword
- **/
+ */
static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
u32 td_tag)
{
@@ -37,7 +37,7 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
/**
* i40e_update_tx_stats - Update the egress statistics for the Tx ring
* @tx_ring: Tx ring to update
- * @total_packet: total packets sent
+ * @total_packets: total packets sent
* @total_bytes: total bytes sent
**/
static inline void i40e_update_tx_stats(struct i40e_ring *tx_ring,
@@ -99,19 +99,6 @@ static inline bool i40e_rx_is_programming_status(u64 qword1)
return qword1 & I40E_RXD_QW1_LENGTH_SPH_MASK;
}
-/**
- * i40e_inc_ntc: Advance the next_to_clean index
- * @rx_ring: Rx ring
- **/
-static inline void i40e_inc_ntc(struct i40e_ring *rx_ring)
-{
- u32 ntc = rx_ring->next_to_clean + 1;
-
- ntc = (ntc < rx_ring->count) ? ntc : 0;
- rx_ring->next_to_clean = ntc;
- prefetch(I40E_RX_DESC(rx_ring, ntc));
-}
-
void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring);
void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring);
bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 52410d609ba1..c0bdc666f557 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -595,6 +595,7 @@ struct i40e_hw {
#define I40E_HW_FLAG_FW_LLDP_PERSISTENT BIT_ULL(5)
#define I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED BIT_ULL(6)
#define I40E_HW_FLAG_DROP_MODE BIT_ULL(7)
+#define I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE BIT_ULL(8)
u64 flags;
/* Used in set switch config AQ command */
@@ -628,7 +629,7 @@ union i40e_16byte_rx_desc {
__le64 hdr_addr; /* Header buffer address */
} read;
struct {
- struct {
+ struct i40e_16b_rx_wb_qw0 {
struct {
union {
__le16 mirroring_status;
@@ -647,6 +648,9 @@ union i40e_16byte_rx_desc {
__le64 status_error_len;
} qword1;
} wb; /* writeback */
+ struct {
+ u64 qword[2];
+ } raw;
};
union i40e_32byte_rx_desc {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 47bfb2e95e2d..c96e2f2d4cba 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -2244,7 +2244,8 @@ error_param:
}
/**
- * i40e_validate_queue_map
+ * i40e_validate_queue_map - check queue map is valid
+ * @vf: the VF structure pointer
* @vsi_id: vsi id
* @queuemap: Tx or Rx queue map
*
@@ -3160,8 +3161,8 @@ err:
/**
* i40e_validate_cloud_filter
- * @mask: mask for TC filter
- * @data: data for TC filter
+ * @vf: pointer to VF structure
+ * @tc_filter: pointer to filter requested
*
* This function validates cloud filter programmed as TC filter for ADq
**/
@@ -3294,7 +3295,7 @@ err:
/**
* i40e_find_vsi_from_seid - searches for the vsi with the given seid
* @vf: pointer to the VF info
- * @seid - seid of the vsi it is searching for
+ * @seid: seid of the vsi it is searching for
**/
static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid)
{
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 8ce57b507a21..6acede0acdca 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -29,14 +29,16 @@ static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
}
/**
- * i40e_xsk_umem_enable - Enable/associate a UMEM to a certain ring/qid
+ * i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a
+ * certain ring/qid
* @vsi: Current VSI
- * @umem: UMEM
- * @qid: Rx ring to associate UMEM to
+ * @pool: buffer pool
+ * @qid: Rx ring to associate buffer pool with
*
* Returns 0 on success, <0 on failure
**/
-static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
+static int i40e_xsk_pool_enable(struct i40e_vsi *vsi,
+ struct xsk_buff_pool *pool,
u16 qid)
{
struct net_device *netdev = vsi->netdev;
@@ -53,7 +55,7 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
qid >= netdev->real_num_tx_queues)
return -EINVAL;
- err = xsk_buff_dma_map(umem, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR);
+ err = xsk_pool_dma_map(pool, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR);
if (err)
return err;
@@ -80,21 +82,22 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
}
/**
- * i40e_xsk_umem_disable - Disassociate a UMEM from a certain ring/qid
+ * i40e_xsk_pool_disable - Disassociate an AF_XDP buffer pool from a
+ * certain ring/qid
* @vsi: Current VSI
- * @qid: Rx ring to associate UMEM to
+ * @qid: Rx ring to associate buffer pool with
*
* Returns 0 on success, <0 on failure
**/
-static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
+static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid)
{
struct net_device *netdev = vsi->netdev;
- struct xdp_umem *umem;
+ struct xsk_buff_pool *pool;
bool if_running;
int err;
- umem = xdp_get_umem_from_qid(netdev, qid);
- if (!umem)
+ pool = xsk_get_pool_from_qid(netdev, qid);
+ if (!pool)
return -EINVAL;
if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
@@ -106,7 +109,7 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
}
clear_bit(qid, vsi->af_xdp_zc_qps);
- xsk_buff_dma_unmap(umem, I40E_RX_DMA_ATTR);
+ xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR);
if (if_running) {
err = i40e_queue_pair_enable(vsi, qid);
@@ -118,20 +121,21 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
}
/**
- * i40e_xsk_umem_setup - Enable/disassociate a UMEM to/from a ring/qid
+ * i40e_xsk_pool_setup - Enable/disassociate an AF_XDP buffer pool to/from
+ * a ring/qid
* @vsi: Current VSI
- * @umem: UMEM to enable/associate to a ring, or NULL to disable
- * @qid: Rx ring to (dis)associate UMEM (from)to
+ * @pool: Buffer pool to enable/associate to a ring, or NULL to disable
+ * @qid: Rx ring to (dis)associate buffer pool (from)to
*
- * This function enables or disables a UMEM to a certain ring.
+ * This function enables or disables a buffer pool to a certain ring.
*
* Returns 0 on success, <0 on failure
**/
-int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem,
+int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool,
u16 qid)
{
- return umem ? i40e_xsk_umem_enable(vsi, umem, qid) :
- i40e_xsk_umem_disable(vsi, qid);
+ return pool ? i40e_xsk_pool_enable(vsi, pool, qid) :
+ i40e_xsk_pool_disable(vsi, qid);
}
/**
@@ -191,7 +195,7 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
rx_desc = I40E_RX_DESC(rx_ring, ntu);
bi = i40e_rx_bi(rx_ring, ntu);
do {
- xdp = xsk_buff_alloc(rx_ring->xsk_umem);
+ xdp = xsk_buff_alloc(rx_ring->xsk_pool);
if (!xdp) {
ok = false;
goto no_buffers;
@@ -254,6 +258,18 @@ static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
}
/**
+ * i40e_inc_ntc: Advance the next_to_clean index
+ * @rx_ring: Rx ring
+ **/
+static void i40e_inc_ntc(struct i40e_ring *rx_ring)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+}
+
+/**
* i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
* @rx_ring: Rx ring
* @budget: NAPI budget
@@ -265,8 +281,8 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
unsigned int xdp_res, xdp_xmit = 0;
- bool failure = false;
struct sk_buff *skb;
+ bool failure;
while (likely(total_rx_packets < (unsigned int)budget)) {
union i40e_rx_desc *rx_desc;
@@ -274,13 +290,6 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
unsigned int size;
u64 qword;
- if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
- failure = failure ||
- !i40e_alloc_rx_buffers_zc(rx_ring,
- cleaned_count);
- cleaned_count = 0;
- }
-
rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
@@ -310,7 +319,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
(*bi)->data_end = (*bi)->data + size;
- xsk_buff_dma_sync_for_cpu(*bi);
+ xsk_buff_dma_sync_for_cpu(*bi, rx_ring->xsk_pool);
xdp_res = i40e_run_xdp_zc(rx_ring, *bi);
if (xdp_res) {
@@ -355,14 +364,17 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
napi_gro_receive(&rx_ring->q_vector->napi, skb);
}
+ if (cleaned_count >= I40E_RX_BUFFER_WRITE)
+ failure = !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count);
+
i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
- if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) {
+ if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
- xsk_set_rx_need_wakeup(rx_ring->xsk_umem);
+ xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
else
- xsk_clear_rx_need_wakeup(rx_ring->xsk_umem);
+ xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
return (int)total_rx_packets;
}
@@ -385,11 +397,11 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
dma_addr_t dma;
while (budget-- > 0) {
- if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
+ if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc))
break;
- dma = xsk_buff_raw_get_dma(xdp_ring->xsk_umem, desc.addr);
- xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_umem, dma,
+ dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr);
+ xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma,
desc.len);
tx_bi = &xdp_ring->tx_bi[xdp_ring->next_to_use];
@@ -416,7 +428,7 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
I40E_TXD_QW1_CMD_SHIFT);
i40e_xdp_ring_update_tail(xdp_ring);
- xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
+ xsk_tx_release(xdp_ring->xsk_pool);
i40e_update_tx_stats(xdp_ring, sent_frames, total_bytes);
}
@@ -448,7 +460,7 @@ static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring,
**/
bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring)
{
- struct xdp_umem *umem = tx_ring->xsk_umem;
+ struct xsk_buff_pool *bp = tx_ring->xsk_pool;
u32 i, completed_frames, xsk_frames = 0;
u32 head_idx = i40e_get_head(tx_ring);
struct i40e_tx_buffer *tx_bi;
@@ -488,13 +500,13 @@ skip:
tx_ring->next_to_clean -= tx_ring->count;
if (xsk_frames)
- xsk_umem_complete_tx(umem, xsk_frames);
+ xsk_tx_completed(bp, xsk_frames);
i40e_arm_wb(tx_ring, vsi, completed_frames);
out_xmit:
- if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem))
- xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
+ if (xsk_uses_need_wakeup(tx_ring->xsk_pool))
+ xsk_set_tx_need_wakeup(tx_ring->xsk_pool);
return i40e_xmit_zc(tx_ring, I40E_DESC_UNUSED(tx_ring));
}
@@ -526,7 +538,7 @@ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
if (queue_id >= vsi->num_queue_pairs)
return -ENXIO;
- if (!vsi->xdp_rings[queue_id]->xsk_umem)
+ if (!vsi->xdp_rings[queue_id]->xsk_pool)
return -ENXIO;
ring = vsi->xdp_rings[queue_id];
@@ -565,7 +577,7 @@ void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
{
u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
- struct xdp_umem *umem = tx_ring->xsk_umem;
+ struct xsk_buff_pool *bp = tx_ring->xsk_pool;
struct i40e_tx_buffer *tx_bi;
u32 xsk_frames = 0;
@@ -585,14 +597,15 @@ void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
}
if (xsk_frames)
- xsk_umem_complete_tx(umem, xsk_frames);
+ xsk_tx_completed(bp, xsk_frames);
}
/**
- * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have AF_XDP UMEM attached
+ * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have an AF_XDP
+ * buffer pool attached
* @vsi: vsi
*
- * Returns true if any of the Rx rings has an AF_XDP UMEM attached
+ * Returns true if any of the Rx rings has an AF_XDP buffer pool attached
**/
bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
{
@@ -600,7 +613,7 @@ bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
int i;
for (i = 0; i < vsi->num_queue_pairs; i++) {
- if (xdp_get_umem_from_qid(netdev, i))
+ if (xsk_get_pool_from_qid(netdev, i))
return true;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
index c524c142127f..7adfd8539247 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
@@ -5,12 +5,12 @@
#define _I40E_XSK_H_
struct i40e_vsi;
-struct xdp_umem;
+struct xsk_buff_pool;
struct zero_copy_allocator;
int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair);
int i40e_queue_pair_enable(struct i40e_vsi *vsi, int queue_pair);
-int i40e_xsk_umem_setup(struct i40e_vsi *vsi, struct xdp_umem *umem,
+int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool,
u16 qid);
bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 cleaned_count);
int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_adminq.h b/drivers/net/ethernet/intel/iavf/iavf_adminq.h
index baf2fe26f302..1f60518eb0e5 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_adminq.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_adminq.h
@@ -85,8 +85,8 @@ struct iavf_adminq_info {
/**
* iavf_aq_rc_to_posix - convert errors to user-land codes
- * aq_ret: AdminQ handler error code can override aq_rc
- * aq_rc: AdminQ firmware error code to convert
+ * @aq_ret: AdminQ handler error code can override aq_rc
+ * @aq_rc: AdminQ firmware error code to convert
**/
static inline int iavf_aq_rc_to_posix(int aq_ret, int aq_rc)
{
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index cf539db79af9..95543dfd4fe7 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -147,6 +147,7 @@ void iavf_schedule_reset(struct iavf_adapter *adapter)
/**
* iavf_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: queue number that is timing out
**/
static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
@@ -2572,8 +2573,8 @@ static int iavf_validate_ch_config(struct iavf_adapter *adapter,
}
/**
- * iavf_del_all_cloud_filters - delete all cloud filters
- * on the traffic classes
+ * iavf_del_all_cloud_filters - delete all cloud filters on the traffic classes
+ * @adapter: board private structure
**/
static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
{
@@ -2592,7 +2593,7 @@ static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
/**
* __iavf_setup_tc - configure multiple traffic classes
* @netdev: network interface device structure
- * @type_date: tc offload data
+ * @type_data: tc offload data
*
* This function processes the config information provided by the
* user to configure traffic classes/queue channels and packages the
@@ -2690,7 +2691,7 @@ exit:
/**
* iavf_parse_cls_flower - Parse tc flower filters provided by kernel
* @adapter: board private structure
- * @cls_flower: pointer to struct flow_cls_offload
+ * @f: pointer to struct flow_cls_offload
* @filter: pointer to cloud filter structure
*/
static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
@@ -3064,8 +3065,8 @@ static int iavf_delete_clsflower(struct iavf_adapter *adapter,
/**
* iavf_setup_tc_cls_flower - flower classifier offloads
- * @netdev: net device to configure
- * @type_data: offload data
+ * @adapter: board private structure
+ * @cls_flower: pointer to flow_cls_offload struct with flow info
*/
static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
struct flow_cls_offload *cls_flower)
@@ -3112,7 +3113,7 @@ static LIST_HEAD(iavf_block_cb_list);
* iavf_setup_tc - configure multiple traffic classes
* @netdev: network interface device structure
* @type: type of offload
- * @type_date: tc offload data
+ * @type_data: tc offload data
*
* This function is the callback to ndo_setup_tc in the
* netdev_ops.
@@ -3768,8 +3769,7 @@ err_dma:
/**
* iavf_suspend - Power management suspend routine
- * @pdev: PCI device information struct
- * @state: unused
+ * @dev_d: device info pointer
*
* Called when the system (VM) is entering sleep/suspend.
**/
@@ -3799,7 +3799,7 @@ static int __maybe_unused iavf_suspend(struct device *dev_d)
/**
* iavf_resume - Power management resume routine
- * @pdev: PCI device information struct
+ * @dev_d: device info pointer
*
* Called when the system (VM) is resumed from sleep/suspend.
**/
diff --git a/drivers/net/ethernet/intel/iavf/iavf_trace.h b/drivers/net/ethernet/intel/iavf/iavf_trace.h
index 1058e68a02b4..82fda6f5abf0 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_trace.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_trace.h
@@ -22,7 +22,7 @@
#include <linux/tracepoint.h>
-/**
+/*
* iavf_trace() macro enables shared code to refer to trace points
* like:
*
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index ca041b39ffda..256fa07d54d5 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -1309,10 +1309,7 @@ static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
return NULL;
/* prefetch first cache line of first page */
va = page_address(rx_buffer->page) + rx_buffer->page_offset;
- prefetch(va);
-#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
-#endif
+ net_prefetch(va);
/* allocate a skb to store the frags */
skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
@@ -1376,10 +1373,8 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
return NULL;
/* prefetch first cache line of first page */
va = page_address(rx_buffer->page) + rx_buffer->page_offset;
- prefetch(va);
-#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
-#endif
+ net_prefetch(va);
+
/* build an skb around the page buffer */
skb = build_skb(va - IAVF_SKB_PAD, truesize);
if (unlikely(!skb))
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
index dd3348f9da9d..e5b9ba42dd00 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
@@ -454,7 +454,6 @@ bool __iavf_chk_linearize(struct sk_buff *skb);
/**
* iavf_xmit_descriptor_count - calculate number of Tx descriptors needed
* @skb: send buffer
- * @tx_ring: ring to send buffer on
*
* Returns number of data descriptors needed for this skb. Returns 0 to indicate
* there is not enough descriptors available in this ring since we need at least
@@ -514,6 +513,7 @@ static inline bool iavf_chk_linearize(struct sk_buff *skb, int count)
return count != IAVF_MAX_BUFFER_TXD;
}
/**
+ * txring_txq - helper to convert from a ring to a queue
* @ring: Tx ring to find the netdev equivalent of
**/
static inline struct netdev_queue *txring_txq(const struct iavf_ring *ring)
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index fe140ff38f74..a0723831c4e4 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -284,6 +284,10 @@ struct ice_vsi {
spinlock_t arfs_lock; /* protects aRFS hash table and filter state */
atomic_t *arfs_last_fltr_id;
+ /* devlink port data */
+ struct devlink_port devlink_port;
+ bool devlink_port_registered;
+
u16 max_frame;
u16 rx_buf_len;
@@ -321,9 +325,9 @@ struct ice_vsi {
struct ice_ring **xdp_rings; /* XDP ring array */
u16 num_xdp_txq; /* Used XDP queues */
u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
- struct xdp_umem **xsk_umems;
- u16 num_xsk_umems_used;
- u16 num_xsk_umems;
+ struct xsk_buff_pool **xsk_pools;
+ u16 num_xsk_pools_used;
+ u16 num_xsk_pools;
} ____cacheline_internodealigned_in_smp;
/* struct that defines an interrupt vector */
@@ -375,9 +379,6 @@ enum ice_pf_flags {
struct ice_pf {
struct pci_dev *pdev;
- /* devlink port data */
- struct devlink_port devlink_port;
-
struct devlink_region *nvm_region;
struct devlink_region *devcaps_region;
@@ -507,25 +508,25 @@ static inline void ice_set_ring_xdp(struct ice_ring *ring)
}
/**
- * ice_xsk_umem - get XDP UMEM bound to a ring
- * @ring - ring to use
+ * ice_xsk_pool - get XSK buffer pool bound to a ring
+ * @ring: ring to use
*
- * Returns a pointer to xdp_umem structure if there is an UMEM present,
+ * Returns a pointer to xdp_umem structure if there is a buffer pool present,
* NULL otherwise.
*/
-static inline struct xdp_umem *ice_xsk_umem(struct ice_ring *ring)
+static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_ring *ring)
{
- struct xdp_umem **umems = ring->vsi->xsk_umems;
+ struct xsk_buff_pool **pools = ring->vsi->xsk_pools;
u16 qid = ring->q_index;
if (ice_ring_is_xdp(ring))
qid -= ring->vsi->num_xdp_txq;
- if (qid >= ring->vsi->num_xsk_umems || !umems || !umems[qid] ||
+ if (qid >= ring->vsi->num_xsk_pools || !pools || !pools[qid] ||
!ice_is_xdp_ena_vsi(ring->vsi))
return NULL;
- return umems[qid];
+ return pools[qid];
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index ba9375218fef..b06fbe99d8e9 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -1422,7 +1422,7 @@ struct ice_aqc_nvm_comp_tbl {
u8 cvs[]; /* Component Version String */
} __packed;
-/**
+/*
* Send to PF command (indirect 0x0801) ID is only used by PF
*
* Send to VF command (indirect 0x0802) ID is only used by PF
@@ -1826,8 +1826,8 @@ struct ice_aqc_event_lan_overflow {
* @opcode: AQ command opcode
* @datalen: length in bytes of indirect/external data buffer
* @retval: return value from firmware
- * @cookie_h: opaque data high-half
- * @cookie_l: opaque data low-half
+ * @cookie_high: opaque data high-half
+ * @cookie_low: opaque data low-half
* @params: command-specific parameters
*
* Descriptor format for commands the driver posts on the Admin Transmit Queue
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 87008476d8fe..fe4320e2d1f2 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -308,12 +308,12 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index);
- ring->xsk_umem = ice_xsk_umem(ring);
- if (ring->xsk_umem) {
+ ring->xsk_pool = ice_xsk_pool(ring);
+ if (ring->xsk_pool) {
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
ring->rx_buf_len =
- xsk_umem_get_rx_frame_size(ring->xsk_umem);
+ xsk_pool_get_rx_frame_size(ring->xsk_pool);
/* For AF_XDP ZC, we disallow packets to span on
* multiple buffers, thus letting us skip that
* handling in the fast-path.
@@ -324,7 +324,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
NULL);
if (err)
return err;
- xsk_buff_set_rxq_info(ring->xsk_umem, &ring->xdp_rxq);
+ xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->q_index);
@@ -417,9 +417,9 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
writel(0, ring->tail);
- if (ring->xsk_umem) {
- if (!xsk_buff_can_alloc(ring->xsk_umem, num_bufs)) {
- dev_warn(dev, "UMEM does not provide enough addresses to fill %d buffers on Rx ring %d\n",
+ if (ring->xsk_pool) {
+ if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) {
+ dev_warn(dev, "XSK buffer pool does not provide enough addresses to fill %d buffers on Rx ring %d\n",
num_bufs, ring->q_index);
dev_warn(dev, "Change Rx ring/fill queue size to avoid performance issues\n");
@@ -428,7 +428,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
err = ice_alloc_rx_bufs_zc(ring, num_bufs);
if (err)
- dev_info(dev, "Failed to allocate some buffers on UMEM enabled Rx ring %d (pf_q %d)\n",
+ dev_info(dev, "Failed to allocate some buffers on XSK buffer pool enabled Rx ring %d (pf_q %d)\n",
ring->q_index, pf_q);
return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index 111d6bfe4222..511da59bd6f2 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -6,18 +6,14 @@
#include "ice_devlink.h"
#include "ice_fw_update.h"
-static int ice_info_get_dsn(struct ice_pf *pf, char *buf, size_t len)
+static void ice_info_get_dsn(struct ice_pf *pf, char *buf, size_t len)
{
u8 dsn[8];
/* Copy the DSN into an array in Big Endian format */
put_unaligned_be64(pci_get_dsn(pf->pdev), dsn);
- snprintf(buf, len, "%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x",
- dsn[0], dsn[1], dsn[2], dsn[3],
- dsn[4], dsn[5], dsn[6], dsn[7]);
-
- return 0;
+ snprintf(buf, len, "%8phD", dsn);
}
static int ice_info_pba(struct ice_pf *pf, char *buf, size_t len)
@@ -106,6 +102,13 @@ static int ice_info_ddp_pkg_version(struct ice_pf *pf, char *buf, size_t len)
return 0;
}
+static int ice_info_ddp_pkg_bundle_id(struct ice_pf *pf, char *buf, size_t len)
+{
+ snprintf(buf, len, "0x%08x", pf->hw.active_track_id);
+
+ return 0;
+}
+
static int ice_info_netlist_ver(struct ice_pf *pf, char *buf, size_t len)
{
struct ice_netlist_ver_info *netlist = &pf->hw.netlist_ver;
@@ -150,6 +153,7 @@ static const struct ice_devlink_version {
running(DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID, ice_info_eetrack),
running("fw.app.name", ice_info_ddp_pkg_name),
running(DEVLINK_INFO_VERSION_GENERIC_FW_APP, ice_info_ddp_pkg_version),
+ running("fw.app.bundle_id", ice_info_ddp_pkg_bundle_id),
running("fw.netlist", ice_info_netlist_ver),
running("fw.netlist.build", ice_info_netlist_build),
};
@@ -180,11 +184,7 @@ static int ice_devlink_info_get(struct devlink *devlink,
return err;
}
- err = ice_info_get_dsn(pf, buf, sizeof(buf));
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Unable to obtain serial number");
- return err;
- }
+ ice_info_get_dsn(pf, buf, sizeof(buf));
err = devlink_info_serial_number_put(req, buf);
if (err) {
@@ -233,8 +233,7 @@ static int ice_devlink_info_get(struct devlink *devlink,
/**
* ice_devlink_flash_update - Update firmware stored in flash on the device
* @devlink: pointer to devlink associated with device to update
- * @path: the path of the firmware file to use via request_firmware
- * @component: name of the component to update, or NULL
+ * @params: flash update parameters
* @extack: netlink extended ACK structure
*
* Perform a device flash update. The bulk of the update logic is contained
@@ -243,38 +242,52 @@ static int ice_devlink_info_get(struct devlink *devlink,
* Returns: zero on success, or an error code on failure.
*/
static int
-ice_devlink_flash_update(struct devlink *devlink, const char *path,
- const char *component, struct netlink_ext_ack *extack)
+ice_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
struct device *dev = &pf->pdev->dev;
struct ice_hw *hw = &pf->hw;
const struct firmware *fw;
+ u8 preservation;
int err;
- /* individual component update is not yet supported */
- if (component)
+ if (!params->overwrite_mask) {
+ /* preserve all settings and identifiers */
+ preservation = ICE_AQC_NVM_PRESERVE_ALL;
+ } else if (params->overwrite_mask == DEVLINK_FLASH_OVERWRITE_SETTINGS) {
+ /* overwrite settings, but preserve the vital device identifiers */
+ preservation = ICE_AQC_NVM_PRESERVE_SELECTED;
+ } else if (params->overwrite_mask == (DEVLINK_FLASH_OVERWRITE_SETTINGS |
+ DEVLINK_FLASH_OVERWRITE_IDENTIFIERS)) {
+ /* overwrite both settings and identifiers, preserve nothing */
+ preservation = ICE_AQC_NVM_NO_PRESERVATION;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Requested overwrite mask is not supported");
return -EOPNOTSUPP;
+ }
if (!hw->dev_caps.common_cap.nvm_unified_update) {
NL_SET_ERR_MSG_MOD(extack, "Current firmware does not support unified update");
return -EOPNOTSUPP;
}
- err = ice_check_for_pending_update(pf, component, extack);
+ err = ice_check_for_pending_update(pf, NULL, extack);
if (err)
return err;
- err = request_firmware(&fw, path, dev);
+ err = request_firmware(&fw, params->file_name, dev);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Unable to read file from disk");
return err;
}
+ dev_dbg(dev, "Beginning flash update with file '%s'\n", params->file_name);
+
devlink_flash_update_begin_notify(devlink);
- devlink_flash_update_status_notify(devlink, "Preparing to flash",
- component, 0, 0);
- err = ice_flash_pldm_image(pf, fw, extack);
+ devlink_flash_update_status_notify(devlink, "Preparing to flash", NULL, 0, 0);
+ err = ice_flash_pldm_image(pf, fw, preservation, extack);
devlink_flash_update_end_notify(devlink);
release_firmware(fw);
@@ -283,6 +296,7 @@ ice_devlink_flash_update(struct devlink *devlink, const char *path,
}
static const struct devlink_ops ice_devlink_ops = {
+ .supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
.info_get = ice_devlink_info_get,
.flash_update = ice_devlink_flash_update,
};
@@ -352,55 +366,66 @@ void ice_devlink_unregister(struct ice_pf *pf)
}
/**
- * ice_devlink_create_port - Create a devlink port for this PF
- * @pf: the PF to create a port for
+ * ice_devlink_create_port - Create a devlink port for this VSI
+ * @vsi: the VSI to create a port for
*
- * Create and register a devlink_port for this PF. Note that although each
- * physical function is connected to a separate devlink instance, the port
- * will still be numbered according to the physical function ID.
+ * Create and register a devlink_port for this VSI.
*
* Return: zero on success or an error code on failure.
*/
-int ice_devlink_create_port(struct ice_pf *pf)
+int ice_devlink_create_port(struct ice_vsi *vsi)
{
- struct devlink *devlink = priv_to_devlink(pf);
- struct ice_vsi *vsi = ice_get_main_vsi(pf);
- struct device *dev = ice_pf_to_dev(pf);
struct devlink_port_attrs attrs = {};
+ struct ice_port_info *pi;
+ struct devlink *devlink;
+ struct device *dev;
+ struct ice_pf *pf;
int err;
- if (!vsi) {
- dev_err(dev, "%s: unable to find main VSI\n", __func__);
- return -EIO;
- }
+ /* Currently we only create devlink_port instances for PF VSIs */
+ if (vsi->type != ICE_VSI_PF)
+ return -EINVAL;
+
+ pf = vsi->back;
+ devlink = priv_to_devlink(pf);
+ dev = ice_pf_to_dev(pf);
+ pi = pf->hw.port_info;
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
- attrs.phys.port_number = pf->hw.pf_id;
- devlink_port_attrs_set(&pf->devlink_port, &attrs);
- err = devlink_port_register(devlink, &pf->devlink_port, pf->hw.pf_id);
+ attrs.phys.port_number = pi->lport;
+ devlink_port_attrs_set(&vsi->devlink_port, &attrs);
+ err = devlink_port_register(devlink, &vsi->devlink_port, vsi->idx);
if (err) {
dev_err(dev, "devlink_port_register failed: %d\n", err);
return err;
}
+ vsi->devlink_port_registered = true;
+
return 0;
}
/**
- * ice_devlink_destroy_port - Destroy the devlink_port for this PF
- * @pf: the PF to cleanup
+ * ice_devlink_destroy_port - Destroy the devlink_port for this VSI
+ * @vsi: the VSI to cleanup
*
- * Unregisters the devlink_port structure associated with this PF.
+ * Unregisters the devlink_port structure associated with this VSI.
*/
-void ice_devlink_destroy_port(struct ice_pf *pf)
+void ice_devlink_destroy_port(struct ice_vsi *vsi)
{
- devlink_port_type_clear(&pf->devlink_port);
- devlink_port_unregister(&pf->devlink_port);
+ if (!vsi->devlink_port_registered)
+ return;
+
+ devlink_port_type_clear(&vsi->devlink_port);
+ devlink_port_unregister(&vsi->devlink_port);
+
+ vsi->devlink_port_registered = false;
}
/**
* ice_devlink_nvm_snapshot - Capture a snapshot of the Shadow RAM contents
* @devlink: the devlink instance
+ * @ops: the devlink region being snapshotted
* @extack: extended ACK response structure
* @data: on exit points to snapshot data buffer
*
@@ -413,6 +438,7 @@ void ice_devlink_destroy_port(struct ice_pf *pf)
* error code on failure.
*/
static int ice_devlink_nvm_snapshot(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
struct netlink_ext_ack *extack, u8 **data)
{
struct ice_pf *pf = devlink_priv(devlink);
@@ -456,6 +482,7 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
/**
* ice_devlink_devcaps_snapshot - Capture snapshot of device capabilities
* @devlink: the devlink instance
+ * @ops: the devlink region being snapshotted
* @extack: extended ACK response structure
* @data: on exit points to snapshot data buffer
*
@@ -468,6 +495,7 @@ static int ice_devlink_nvm_snapshot(struct devlink *devlink,
*/
static int
ice_devlink_devcaps_snapshot(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
struct netlink_ext_ack *extack, u8 **data)
{
struct ice_pf *pf = devlink_priv(devlink);
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/ice_devlink.h
index 6e806a08dc23..e07e74426bde 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.h
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.h
@@ -8,8 +8,8 @@ struct ice_pf *ice_allocate_pf(struct device *dev);
int ice_devlink_register(struct ice_pf *pf);
void ice_devlink_unregister(struct ice_pf *pf);
-int ice_devlink_create_port(struct ice_pf *pf);
-void ice_devlink_destroy_port(struct ice_pf *pf);
+int ice_devlink_create_port(struct ice_vsi *vsi);
+void ice_devlink_destroy_port(struct ice_vsi *vsi);
void ice_devlink_init_regions(struct ice_pf *pf);
void ice_devlink_destroy_regions(struct ice_pf *pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index d7430ce6af26..2d27f66ac853 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -1268,8 +1268,7 @@ ice_fdir_write_all_fltr(struct ice_pf *pf, struct ice_fdir_fltr *input,
bool is_tun = tun == ICE_FD_HW_SEG_TUN;
int err;
- if (is_tun && !ice_get_open_tunnel_port(&pf->hw, TNL_ALL,
- &port_num))
+ if (is_tun && !ice_get_open_tunnel_port(&pf->hw, &port_num))
continue;
err = ice_fdir_write_fltr(pf, input, add, is_tun);
if (err)
@@ -1647,8 +1646,7 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd)
}
/* return error if not an update and no available filters */
- fltrs_needed = ice_get_open_tunnel_port(hw, TNL_ALL, &tunnel_port) ?
- 2 : 1;
+ fltrs_needed = ice_get_open_tunnel_port(hw, &tunnel_port) ? 2 : 1;
if (!ice_fdir_find_fltr_by_idx(hw, fsp->location) &&
ice_fdir_num_avail_fltr(hw, pf->vsi[vsi->idx]) < fltrs_needed) {
dev_err(dev, "Failed to add filter. The maximum number of flow director filters has been reached.\n");
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c
index 6834df14332f..59c0c6a0f8c5 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.c
@@ -556,7 +556,7 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
memcpy(pkt, ice_fdir_pkt[idx].pkt, ice_fdir_pkt[idx].pkt_len);
loc = pkt;
} else {
- if (!ice_get_open_tunnel_port(hw, TNL_ALL, &tnl_port))
+ if (!ice_get_open_tunnel_port(hw, &tnl_port))
return ICE_ERR_DOES_NOT_EXIST;
if (!ice_fdir_pkt[idx].tun_pkt)
return ICE_ERR_PARAM;
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index b17ae3e20157..9095b4d274ad 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -489,8 +489,6 @@ static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
if ((label_name[len] - '0') == hw->pf_id) {
hw->tnl.tbl[hw->tnl.count].type = tnls[i].type;
hw->tnl.tbl[hw->tnl.count].valid = false;
- hw->tnl.tbl[hw->tnl.count].in_use = false;
- hw->tnl.tbl[hw->tnl.count].marked = false;
hw->tnl.tbl[hw->tnl.count].boost_addr = val;
hw->tnl.tbl[hw->tnl.count].port = 0;
hw->tnl.count++;
@@ -505,8 +503,11 @@ static void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
for (i = 0; i < hw->tnl.count; i++) {
ice_find_boost_entry(ice_seg, hw->tnl.tbl[i].boost_addr,
&hw->tnl.tbl[i].boost_entry);
- if (hw->tnl.tbl[i].boost_entry)
+ if (hw->tnl.tbl[i].boost_entry) {
hw->tnl.tbl[i].valid = true;
+ if (hw->tnl.tbl[i].type < __TNL_TYPE_CNT)
+ hw->tnl.valid_count[hw->tnl.tbl[i].type]++;
+ }
}
}
@@ -1626,104 +1627,59 @@ static struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld)
}
/**
- * ice_tunnel_port_in_use_hlpr - helper function to determine tunnel usage
+ * ice_get_open_tunnel_port - retrieve an open tunnel port
* @hw: pointer to the HW structure
- * @port: port to search for
- * @index: optionally returns index
- *
- * Returns whether a port is already in use as a tunnel, and optionally its
- * index
+ * @port: returns open port
*/
-static bool ice_tunnel_port_in_use_hlpr(struct ice_hw *hw, u16 port, u16 *index)
+bool
+ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port)
{
+ bool res = false;
u16 i;
+ mutex_lock(&hw->tnl_lock);
+
for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
- if (hw->tnl.tbl[i].in_use && hw->tnl.tbl[i].port == port) {
- if (index)
- *index = i;
- return true;
+ if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].port) {
+ *port = hw->tnl.tbl[i].port;
+ res = true;
+ break;
}
- return false;
-}
-
-/**
- * ice_tunnel_port_in_use
- * @hw: pointer to the HW structure
- * @port: port to search for
- * @index: optionally returns index
- *
- * Returns whether a port is already in use as a tunnel, and optionally its
- * index
- */
-bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index)
-{
- bool res;
-
- mutex_lock(&hw->tnl_lock);
- res = ice_tunnel_port_in_use_hlpr(hw, port, index);
mutex_unlock(&hw->tnl_lock);
return res;
}
/**
- * ice_find_free_tunnel_entry
+ * ice_tunnel_idx_to_entry - convert linear index to the sparse one
* @hw: pointer to the HW structure
- * @type: tunnel type
- * @index: optionally returns index
+ * @type: type of tunnel
+ * @idx: linear index
*
- * Returns whether there is a free tunnel entry, and optionally its index
+ * Stack assumes we have 2 linear tables with indexes [0, count_valid),
+ * but really the port table may be sprase, and types are mixed, so convert
+ * the stack index into the device index.
*/
-static bool
-ice_find_free_tunnel_entry(struct ice_hw *hw, enum ice_tunnel_type type,
- u16 *index)
+static u16 ice_tunnel_idx_to_entry(struct ice_hw *hw, enum ice_tunnel_type type,
+ u16 idx)
{
u16 i;
for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
- if (hw->tnl.tbl[i].valid && !hw->tnl.tbl[i].in_use &&
- hw->tnl.tbl[i].type == type) {
- if (index)
- *index = i;
- return true;
- }
+ if (hw->tnl.tbl[i].valid &&
+ hw->tnl.tbl[i].type == type &&
+ idx--)
+ return i;
- return false;
-}
-
-/**
- * ice_get_open_tunnel_port - retrieve an open tunnel port
- * @hw: pointer to the HW structure
- * @type: tunnel type (TNL_ALL will return any open port)
- * @port: returns open port
- */
-bool
-ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
- u16 *port)
-{
- bool res = false;
- u16 i;
-
- mutex_lock(&hw->tnl_lock);
-
- for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
- if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
- (type == TNL_ALL || hw->tnl.tbl[i].type == type)) {
- *port = hw->tnl.tbl[i].port;
- res = true;
- break;
- }
-
- mutex_unlock(&hw->tnl_lock);
-
- return res;
+ WARN_ON_ONCE(1);
+ return 0;
}
/**
* ice_create_tunnel
* @hw: pointer to the HW structure
+ * @index: device table entry
* @type: type of tunnel
* @port: port of tunnel to create
*
@@ -1731,27 +1687,16 @@ ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
* creating a package buffer with the tunnel info and issuing an update package
* command.
*/
-enum ice_status
-ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port)
+static enum ice_status
+ice_create_tunnel(struct ice_hw *hw, u16 index,
+ enum ice_tunnel_type type, u16 port)
{
struct ice_boost_tcam_section *sect_rx, *sect_tx;
enum ice_status status = ICE_ERR_MAX_LIMIT;
struct ice_buf_build *bld;
- u16 index;
mutex_lock(&hw->tnl_lock);
- if (ice_tunnel_port_in_use_hlpr(hw, port, &index)) {
- hw->tnl.tbl[index].ref++;
- status = 0;
- goto ice_create_tunnel_end;
- }
-
- if (!ice_find_free_tunnel_entry(hw, type, &index)) {
- status = ICE_ERR_OUT_OF_RANGE;
- goto ice_create_tunnel_end;
- }
-
bld = ice_pkg_buf_alloc(hw);
if (!bld) {
status = ICE_ERR_NO_MEMORY;
@@ -1790,11 +1735,8 @@ ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port)
memcpy(sect_tx->tcam, sect_rx->tcam, sizeof(*sect_tx->tcam));
status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
- if (!status) {
+ if (!status)
hw->tnl.tbl[index].port = port;
- hw->tnl.tbl[index].in_use = true;
- hw->tnl.tbl[index].ref = 1;
- }
ice_create_tunnel_err:
ice_pkg_buf_free(hw, bld);
@@ -1808,46 +1750,31 @@ ice_create_tunnel_end:
/**
* ice_destroy_tunnel
* @hw: pointer to the HW structure
+ * @index: device table entry
+ * @type: type of tunnel
* @port: port of tunnel to destroy (ignored if the all parameter is true)
- * @all: flag that states to destroy all tunnels
*
* Destroys a tunnel or all tunnels by creating an update package buffer
* targeting the specific updates requested and then performing an update
* package.
*/
-enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
+static enum ice_status
+ice_destroy_tunnel(struct ice_hw *hw, u16 index, enum ice_tunnel_type type,
+ u16 port)
{
struct ice_boost_tcam_section *sect_rx, *sect_tx;
enum ice_status status = ICE_ERR_MAX_LIMIT;
struct ice_buf_build *bld;
- u16 count = 0;
- u16 index;
- u16 size;
- u16 i;
mutex_lock(&hw->tnl_lock);
- if (!all && ice_tunnel_port_in_use_hlpr(hw, port, &index))
- if (hw->tnl.tbl[index].ref > 1) {
- hw->tnl.tbl[index].ref--;
- status = 0;
- goto ice_destroy_tunnel_end;
- }
-
- /* determine count */
- for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
- if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
- (all || hw->tnl.tbl[i].port == port))
- count++;
-
- if (!count) {
- status = ICE_ERR_PARAM;
+ if (WARN_ON(!hw->tnl.tbl[index].valid ||
+ hw->tnl.tbl[index].type != type ||
+ hw->tnl.tbl[index].port != port)) {
+ status = ICE_ERR_OUT_OF_RANGE;
goto ice_destroy_tunnel_end;
}
- /* size of section - there is at least one entry */
- size = struct_size(sect_rx, tcam, count);
-
bld = ice_pkg_buf_alloc(hw);
if (!bld) {
status = ICE_ERR_NO_MEMORY;
@@ -1859,13 +1786,13 @@ enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
goto ice_destroy_tunnel_err;
sect_rx = ice_pkg_buf_alloc_section(bld, ICE_SID_RXPARSER_BOOST_TCAM,
- size);
+ struct_size(sect_rx, tcam, 1));
if (!sect_rx)
goto ice_destroy_tunnel_err;
sect_rx->count = cpu_to_le16(1);
sect_tx = ice_pkg_buf_alloc_section(bld, ICE_SID_TXPARSER_BOOST_TCAM,
- size);
+ struct_size(sect_tx, tcam, 1));
if (!sect_tx)
goto ice_destroy_tunnel_err;
sect_tx->count = cpu_to_le16(1);
@@ -1873,26 +1800,14 @@ enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
/* copy original boost entry to update package buffer, one copy to Rx
* section, another copy to the Tx section
*/
- for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
- if (hw->tnl.tbl[i].valid && hw->tnl.tbl[i].in_use &&
- (all || hw->tnl.tbl[i].port == port)) {
- memcpy(sect_rx->tcam + i, hw->tnl.tbl[i].boost_entry,
- sizeof(*sect_rx->tcam));
- memcpy(sect_tx->tcam + i, hw->tnl.tbl[i].boost_entry,
- sizeof(*sect_tx->tcam));
- hw->tnl.tbl[i].marked = true;
- }
+ memcpy(sect_rx->tcam, hw->tnl.tbl[index].boost_entry,
+ sizeof(*sect_rx->tcam));
+ memcpy(sect_tx->tcam, hw->tnl.tbl[index].boost_entry,
+ sizeof(*sect_tx->tcam));
status = ice_update_pkg(hw, ice_pkg_buf(bld), 1);
if (!status)
- for (i = 0; i < hw->tnl.count &&
- i < ICE_TUNNEL_MAX_ENTRIES; i++)
- if (hw->tnl.tbl[i].marked) {
- hw->tnl.tbl[i].ref = 0;
- hw->tnl.tbl[i].port = 0;
- hw->tnl.tbl[i].in_use = false;
- hw->tnl.tbl[i].marked = false;
- }
+ hw->tnl.tbl[index].port = 0;
ice_destroy_tunnel_err:
ice_pkg_buf_free(hw, bld);
@@ -1903,6 +1818,52 @@ ice_destroy_tunnel_end:
return status;
}
+int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
+ unsigned int idx, struct udp_tunnel_info *ti)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ enum ice_tunnel_type tnl_type;
+ enum ice_status status;
+ u16 index;
+
+ tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
+ index = ice_tunnel_idx_to_entry(&pf->hw, idx, tnl_type);
+
+ status = ice_create_tunnel(&pf->hw, index, tnl_type, ntohs(ti->port));
+ if (status) {
+ netdev_err(netdev, "Error adding UDP tunnel - %s\n",
+ ice_stat_str(status));
+ return -EIO;
+ }
+
+ udp_tunnel_nic_set_port_priv(netdev, table, idx, index);
+ return 0;
+}
+
+int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
+ unsigned int idx, struct udp_tunnel_info *ti)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ enum ice_tunnel_type tnl_type;
+ enum ice_status status;
+
+ tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
+
+ status = ice_destroy_tunnel(&pf->hw, ti->hw_priv, tnl_type,
+ ntohs(ti->port));
+ if (status) {
+ netdev_err(netdev, "Error removing UDP tunnel - %s\n",
+ ice_stat_str(status));
+ return -EIO;
+ }
+
+ return 0;
+}
+
/* PTG Management */
/**
@@ -4915,7 +4876,7 @@ ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
if (last_profile) {
/* If there are no profiles left for this VSIG,
- * then simply remove the the VSIG.
+ * then simply remove the VSIG.
*/
status = ice_rem_vsig(hw, blk, vsig, &chg);
if (status)
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index 568ea519af51..20deddb807c5 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -19,12 +19,11 @@
#define ICE_PKG_CNT 4
bool
-ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
- u16 *port);
-enum ice_status
-ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port);
-enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all);
-bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index);
+ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port);
+int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
+ unsigned int idx, struct udp_tunnel_info *ti);
+int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
+ unsigned int idx, struct udp_tunnel_info *ti);
enum ice_status
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id, u8 ptypes[],
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h
index c1c99a267a98..24063c1351b2 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h
@@ -298,6 +298,7 @@ struct ice_pkg_enum {
enum ice_tunnel_type {
TNL_VXLAN = 0,
TNL_GENEVE,
+ __TNL_TYPE_CNT,
TNL_LAST = 0xFF,
TNL_ALL = 0xFF,
};
@@ -311,11 +312,8 @@ struct ice_tunnel_entry {
enum ice_tunnel_type type;
u16 boost_addr;
u16 port;
- u16 ref;
struct ice_boost_tcam_entry *boost_entry;
u8 valid;
- u8 in_use;
- u8 marked;
};
#define ICE_TUNNEL_MAX_ENTRIES 16
@@ -323,6 +321,7 @@ struct ice_tunnel_entry {
struct ice_tunnel_table {
struct ice_tunnel_entry tbl[ICE_TUNNEL_MAX_ENTRIES];
u16 count;
+ u16 valid_count[__TNL_TYPE_CNT];
};
struct ice_pkg_es {
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index fe677621dd51..eadc85aee389 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -99,6 +99,54 @@ static const u32 ice_ptypes_ipv6_il[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
+/* Packet types for packets with an Outer/First/Single IPv4 header - no L4 */
+static const u32 ice_ipv4_ofos_no_l4[] = {
+ 0x10C00000, 0x04000800, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last IPv4 header - no L4 */
+static const u32 ice_ipv4_il_no_l4[] = {
+ 0x60000000, 0x18043008, 0x80000002, 0x6010c021,
+ 0x00000008, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Outer/First/Single IPv6 header - no L4 */
+static const u32 ice_ipv6_ofos_no_l4[] = {
+ 0x00000000, 0x00000000, 0x43000000, 0x10002000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
+/* Packet types for packets with an Innermost/Last IPv6 header - no L4 */
+static const u32 ice_ipv6_il_no_l4[] = {
+ 0x00000000, 0x02180430, 0x0000010c, 0x086010c0,
+ 0x00000430, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+};
+
/* UDP Packet types for non-tunneled packets or tunneled
* packets with inner UDP.
*/
@@ -250,11 +298,23 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
hdrs = prof->segs[i].hdrs;
- if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
+ if ((hdrs & ICE_FLOW_SEG_HDR_IPV4) &&
+ !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) {
+ src = !i ? (const unsigned long *)ice_ipv4_ofos_no_l4 :
+ (const unsigned long *)ice_ipv4_il_no_l4;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
+ } else if (hdrs & ICE_FLOW_SEG_HDR_IPV4) {
src = !i ? (const unsigned long *)ice_ptypes_ipv4_ofos :
(const unsigned long *)ice_ptypes_ipv4_il;
bitmap_and(params->ptypes, params->ptypes, src,
ICE_FLOW_PTYPE_MAX);
+ } else if ((hdrs & ICE_FLOW_SEG_HDR_IPV6) &&
+ !(hdrs & ICE_FLOW_SEG_HDRS_L4_MASK)) {
+ src = !i ? (const unsigned long *)ice_ipv6_ofos_no_l4 :
+ (const unsigned long *)ice_ipv6_il_no_l4;
+ bitmap_and(params->ptypes, params->ptypes, src,
+ ICE_FLOW_PTYPE_MAX);
} else if (hdrs & ICE_FLOW_SEG_HDR_IPV6) {
src = !i ? (const unsigned long *)ice_ptypes_ipv6_ofos :
(const unsigned long *)ice_ptypes_ipv6_il;
@@ -385,7 +445,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
* ice_flow_xtract_raws - Create extract sequence entries for raw bytes
* @hw: pointer to the HW struct
* @params: information about the flow to be processed
- * @seg: index of packet segment whose raw fields are to be be extracted
+ * @seg: index of packet segment whose raw fields are to be extracted
*/
static enum ice_status
ice_flow_xtract_raws(struct ice_hw *hw, struct ice_flow_prof_params *params,
@@ -999,7 +1059,7 @@ enum ice_status ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk,
*
* This helper function stores information of a field being matched, including
* the type of the field and the locations of the value to match, the mask, and
- * and the upper-bound value in the start of the input buffer for a flow entry.
+ * the upper-bound value in the start of the input buffer for a flow entry.
* This function should only be used for fixed-size data structures.
*
* This function also opportunistically determines the protocol headers to be
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h
index 3913da2116d2..829f90b1e998 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.h
+++ b/drivers/net/ethernet/intel/ice/ice_flow.h
@@ -194,8 +194,8 @@ struct ice_flow_entry {
u16 entry_sz;
};
-#define ICE_FLOW_ENTRY_HNDL(e) ((u64)e)
-#define ICE_FLOW_ENTRY_PTR(h) ((struct ice_flow_entry *)(h))
+#define ICE_FLOW_ENTRY_HNDL(e) ((u64)(uintptr_t)e)
+#define ICE_FLOW_ENTRY_PTR(h) ((struct ice_flow_entry *)(uintptr_t)(h))
struct ice_flow_prof {
struct list_head l_entry;
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c
index 8968fdd4816b..8f81b95e679c 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.c
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c
@@ -43,6 +43,8 @@ ice_send_package_data(struct pldmfw *context, const u8 *data, u16 length)
enum ice_status status;
u8 *package_data;
+ dev_dbg(dev, "Sending PLDM record package data to firmware\n");
+
package_data = kmemdup(data, length, GFP_KERNEL);
if (!package_data)
return -ENOMEM;
@@ -229,6 +231,8 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon
comp_tbl->cvs_len = component->version_len;
memcpy(comp_tbl->cvs, component->version_string, component->version_len);
+ dev_dbg(dev, "Sending component table to firmware:\n");
+
status = ice_nvm_pass_component_tbl(hw, (u8 *)comp_tbl, length,
transfer_flag, &comp_response,
&comp_response_code, NULL);
@@ -279,11 +283,14 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
memset(&event, 0, sizeof(event));
+ dev_dbg(dev, "Writing block of %u bytes for module 0x%02x at offset %u\n",
+ block_size, module, offset);
+
status = ice_aq_update_nvm(hw, module, offset, block_size, block,
last_cmd, 0, NULL);
if (status) {
- dev_err(dev, "Failed to program flash module 0x%02x at offset %u, err %s aq_err %s\n",
- module, offset, ice_stat_str(status),
+ dev_err(dev, "Failed to flash module 0x%02x with block of size %u at offset %u, err %s aq_err %s\n",
+ module, block_size, offset, ice_stat_str(status),
ice_aq_str(hw->adminq.sq_last_status));
NL_SET_ERR_MSG_MOD(extack, "Failed to program flash module");
return -EIO;
@@ -297,8 +304,8 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
*/
err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write, 15 * HZ, &event);
if (err) {
- dev_err(dev, "Timed out waiting for firmware write completion for module 0x%02x, err %d\n",
- module, err);
+ dev_err(dev, "Timed out while trying to flash module 0x%02x with block of size %u at offset %u, err %d\n",
+ module, block_size, offset, err);
NL_SET_ERR_MSG_MOD(extack, "Timed out waiting for firmware");
return -EIO;
}
@@ -324,8 +331,8 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
}
if (completion_retval) {
- dev_err(dev, "Firmware failed to program flash module 0x%02x at offset %u, completion err %s\n",
- module, offset,
+ dev_err(dev, "Firmware failed to flash module 0x%02x with block of size %u at offset %u, err %s\n",
+ module, block_size, offset,
ice_aq_str((enum ice_aq_err)completion_retval));
NL_SET_ERR_MSG_MOD(extack, "Firmware failed to program flash module");
return -EIO;
@@ -356,12 +363,15 @@ ice_write_nvm_module(struct ice_pf *pf, u16 module, const char *component,
const u8 *image, u32 length,
struct netlink_ext_ack *extack)
{
+ struct device *dev = ice_pf_to_dev(pf);
struct devlink *devlink;
u32 offset = 0;
bool last_cmd;
u8 *block;
int err;
+ dev_dbg(dev, "Beginning write of flash component '%s', module 0x%02x\n", component, module);
+
devlink = priv_to_devlink(pf);
devlink_flash_update_status_notify(devlink, "Flashing",
@@ -394,6 +404,8 @@ ice_write_nvm_module(struct ice_pf *pf, u16 module, const char *component,
component, offset, length);
} while (!last_cmd);
+ dev_dbg(dev, "Completed write of flash component '%s', module 0x%02x\n", component, module);
+
if (err)
devlink_flash_update_status_notify(devlink, "Flashing failed",
component, length, length);
@@ -431,6 +443,8 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
enum ice_status status;
int err;
+ dev_dbg(dev, "Beginning erase of flash component '%s', module 0x%02x\n", component, module);
+
memset(&event, 0, sizeof(event));
devlink = priv_to_devlink(pf);
@@ -476,6 +490,8 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
goto out_notify_devlink;
}
+ dev_dbg(dev, "Completed erase of flash component '%s', module 0x%02x\n", component, module);
+
out_notify_devlink:
if (err)
devlink_flash_update_status_notify(devlink, "Erasing failed",
@@ -614,14 +630,9 @@ static int ice_finalize_update(struct pldmfw *context)
struct ice_fwu_priv *priv = container_of(context, struct ice_fwu_priv, context);
struct netlink_ext_ack *extack = priv->extack;
struct ice_pf *pf = priv->pf;
- int err;
/* Finally, notify firmware to activate the written NVM banks */
- err = ice_switch_flash_banks(pf, priv->activate_flags, extack);
- if (err)
- return err;
-
- return 0;
+ return ice_switch_flash_banks(pf, priv->activate_flags, extack);
}
static const struct pldmfw_ops ice_fwu_ops = {
@@ -636,6 +647,7 @@ static const struct pldmfw_ops ice_fwu_ops = {
* ice_flash_pldm_image - Write a PLDM-formatted firmware image to the device
* @pf: private device driver structure
* @fw: firmware object pointing to the relevant firmware file
+ * @preservation: preservation level to request from firmware
* @extack: netlink extended ACK structure
*
* Parse the data for a given firmware file, verifying that it is a valid PLDM
@@ -649,7 +661,7 @@ static const struct pldmfw_ops ice_fwu_ops = {
* Returns: zero on success or a negative error code on failure.
*/
int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw,
- struct netlink_ext_ack *extack)
+ u8 preservation, struct netlink_ext_ack *extack)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
@@ -657,13 +669,24 @@ int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw,
enum ice_status status;
int err;
+ switch (preservation) {
+ case ICE_AQC_NVM_PRESERVE_ALL:
+ case ICE_AQC_NVM_PRESERVE_SELECTED:
+ case ICE_AQC_NVM_NO_PRESERVATION:
+ case ICE_AQC_NVM_FACTORY_DEFAULT:
+ break;
+ default:
+ WARN(1, "Unexpected preservation level request %u", preservation);
+ return -EINVAL;
+ }
+
memset(&priv, 0, sizeof(priv));
priv.context.ops = &ice_fwu_ops;
priv.context.dev = dev;
priv.extack = extack;
priv.pf = pf;
- priv.activate_flags = ICE_AQC_NVM_PRESERVE_ALL;
+ priv.activate_flags = preservation;
status = ice_acquire_nvm(hw, ICE_RES_WRITE);
if (status) {
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.h b/drivers/net/ethernet/intel/ice/ice_fw_update.h
index 79472cc618b4..c6390f6851ff 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.h
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.h
@@ -5,7 +5,7 @@
#define _ICE_FW_UPDATE_H_
int ice_flash_pldm_image(struct ice_pf *pf, const struct firmware *fw,
- struct netlink_ext_ack *extack);
+ u8 preservation, struct netlink_ext_ack *extack);
int ice_check_for_pending_update(struct ice_pf *pf, const char *component,
struct netlink_ext_ack *extack);
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index ebbb8f54871c..3df67486d42d 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -7,6 +7,7 @@
#include "ice_lib.h"
#include "ice_fltr.h"
#include "ice_dcb_lib.h"
+#include "ice_devlink.h"
/**
* ice_vsi_type_str - maps VSI type enum to string equivalents
@@ -1755,7 +1756,7 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
return ret;
for (i = 0; i < vsi->num_xdp_txq; i++)
- vsi->xdp_rings[i]->xsk_umem = ice_xsk_umem(vsi->xdp_rings[i]);
+ vsi->xdp_rings[i]->xsk_pool = ice_xsk_pool(vsi->xdp_rings[i]);
return ret;
}
@@ -2616,8 +2617,10 @@ int ice_vsi_release(struct ice_vsi *vsi)
* PF that is running the work queue items currently. This is done to
* avoid check_flush_dependency() warning on this wq
*/
- if (vsi->netdev && !ice_is_reset_in_progress(pf->state))
+ if (vsi->netdev && !ice_is_reset_in_progress(pf->state)) {
unregister_netdev(vsi->netdev);
+ ice_devlink_destroy_port(vsi);
+ }
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_rss_clean(vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 54a7f55eb8c1..2dea4d0e9415 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -486,7 +486,6 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
struct ice_hw *hw = &pf->hw;
dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
- WARN_ON(in_interrupt());
ice_prepare_for_reset(pf);
@@ -1057,7 +1056,9 @@ struct ice_aq_task {
int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
struct ice_rq_event_info *event)
{
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_aq_task *task;
+ unsigned long start;
long ret;
int err;
@@ -1074,6 +1075,8 @@ int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
hlist_add_head(&task->entry, &pf->aq_wait_list);
spin_unlock_bh(&pf->aq_wait_lock);
+ start = jiffies;
+
ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state,
timeout);
switch (task->state) {
@@ -1092,6 +1095,11 @@ int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
break;
}
+ dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
+ jiffies_to_msecs(jiffies - start),
+ jiffies_to_msecs(timeout),
+ opcode);
+
spin_lock_bh(&pf->aq_wait_lock);
hlist_del(&task->entry);
spin_unlock_bh(&pf->aq_wait_lock);
@@ -2273,7 +2281,7 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
if (ice_setup_tx_ring(xdp_ring))
goto free_xdp_rings;
ice_set_ring_xdp(xdp_ring);
- xdp_ring->xsk_umem = ice_xsk_umem(xdp_ring);
+ xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
}
return 0;
@@ -2417,7 +2425,7 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi)
int i, v_idx;
/* q_vectors are freed in reset path so there's no point in detaching
- * rings; in case of rebuild being triggered not from reset reset bits
+ * rings; in case of rebuild being triggered not from reset bits
* in pf->state won't be set, so additionally check first q_vector
* against NULL
*/
@@ -2517,13 +2525,13 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
if (if_running)
ret = ice_up(vsi);
- if (!ret && prog && vsi->xsk_umems) {
+ if (!ret && prog && vsi->xsk_pools) {
int i;
ice_for_each_rxq(vsi, i) {
struct ice_ring *rx_ring = vsi->rx_rings[i];
- if (rx_ring->xsk_umem)
+ if (rx_ring->xsk_pool)
napi_schedule(&rx_ring->q_vector->napi);
}
}
@@ -2549,8 +2557,8 @@ static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
return ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack);
- case XDP_SETUP_XSK_UMEM:
- return ice_xsk_umem_setup(vsi, xdp->xsk.umem,
+ case XDP_SETUP_XSK_POOL:
+ return ice_xsk_pool_setup(vsi, xdp->xsk.pool,
xdp->xsk.queue_id);
default:
return -EINVAL;
@@ -2873,6 +2881,7 @@ static void ice_set_ops(struct net_device *netdev)
}
netdev->netdev_ops = &ice_netdev_ops;
+ netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic;
ice_set_ethtool_ops(netdev);
}
@@ -2953,7 +2962,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
u8 mac_addr[ETH_ALEN];
int err;
- err = ice_devlink_create_port(pf);
+ err = ice_devlink_create_port(vsi);
if (err)
return err;
@@ -2994,7 +3003,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
if (err)
goto err_free_netdev;
- devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev);
+ devlink_port_type_eth_set(&vsi->devlink_port, vsi->netdev);
netif_carrier_off(vsi->netdev);
@@ -3007,7 +3016,7 @@ err_free_netdev:
free_netdev(vsi->netdev);
vsi->netdev = NULL;
err_destroy_devlink_port:
- ice_devlink_destroy_port(pf);
+ ice_devlink_destroy_port(vsi);
return err;
}
@@ -3971,7 +3980,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
struct device *dev = &pdev->dev;
struct ice_pf *pf;
struct ice_hw *hw;
- int err;
+ int i, err;
/* this driver uses devres, see
* Documentation/driver-api/driver-model/devres.rst
@@ -4066,11 +4075,37 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
ice_devlink_init_regions(pf);
+ pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port;
+ pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port;
+ pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
+ pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared;
+ i = 0;
+ if (pf->hw.tnl.valid_count[TNL_VXLAN]) {
+ pf->hw.udp_tunnel_nic.tables[i].n_entries =
+ pf->hw.tnl.valid_count[TNL_VXLAN];
+ pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
+ UDP_TUNNEL_TYPE_VXLAN;
+ i++;
+ }
+ if (pf->hw.tnl.valid_count[TNL_GENEVE]) {
+ pf->hw.udp_tunnel_nic.tables[i].n_entries =
+ pf->hw.tnl.valid_count[TNL_GENEVE];
+ pf->hw.udp_tunnel_nic.tables[i].tunnel_types =
+ UDP_TUNNEL_TYPE_GENEVE;
+ i++;
+ }
+
pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
if (!pf->num_alloc_vsi) {
err = -EIO;
goto err_init_pf_unroll;
}
+ if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) {
+ dev_warn(&pf->pdev->dev,
+ "limiting the VSI count due to UDP tunnel limitation %d > %d\n",
+ pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES);
+ pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES;
+ }
pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi),
GFP_KERNEL);
@@ -4216,7 +4251,6 @@ probe_done:
err_send_version_unroll:
ice_vsi_release_all(pf);
err_alloc_sw_unroll:
- ice_devlink_destroy_port(pf);
set_bit(__ICE_SERVICE_DIS, pf->state);
set_bit(__ICE_DOWN, pf->state);
devm_kfree(dev, pf->first_sw);
@@ -4331,7 +4365,6 @@ static void ice_remove(struct pci_dev *pdev)
if (!ice_is_safe_mode(pf))
ice_remove_arfs(pf);
ice_setup_mc_magic_wake(pf);
- ice_devlink_destroy_port(pf);
ice_vsi_release_all(pf);
ice_set_wake(pf);
ice_free_irq_msix_misc(pf);
@@ -6569,70 +6602,6 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
}
/**
- * ice_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
- * @netdev: This physical port's netdev
- * @ti: Tunnel endpoint information
- */
-static void
-ice_udp_tunnel_add(struct net_device *netdev, struct udp_tunnel_info *ti)
-{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
- enum ice_tunnel_type tnl_type;
- u16 port = ntohs(ti->port);
- enum ice_status status;
-
- switch (ti->type) {
- case UDP_TUNNEL_TYPE_VXLAN:
- tnl_type = TNL_VXLAN;
- break;
- case UDP_TUNNEL_TYPE_GENEVE:
- tnl_type = TNL_GENEVE;
- break;
- default:
- netdev_err(netdev, "Unknown tunnel type\n");
- return;
- }
-
- status = ice_create_tunnel(&pf->hw, tnl_type, port);
- if (status == ICE_ERR_OUT_OF_RANGE)
- netdev_info(netdev, "Max tunneled UDP ports reached, port %d not added\n",
- port);
- else if (status)
- netdev_err(netdev, "Error adding UDP tunnel - %s\n",
- ice_stat_str(status));
-}
-
-/**
- * ice_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
- * @netdev: This physical port's netdev
- * @ti: Tunnel endpoint information
- */
-static void
-ice_udp_tunnel_del(struct net_device *netdev, struct udp_tunnel_info *ti)
-{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
- struct ice_pf *pf = vsi->back;
- u16 port = ntohs(ti->port);
- enum ice_status status;
- bool retval;
-
- retval = ice_tunnel_port_in_use(&pf->hw, port, NULL);
- if (!retval) {
- netdev_info(netdev, "port %d not found in UDP tunnels list\n",
- port);
- return;
- }
-
- status = ice_destroy_tunnel(&pf->hw, port, false);
- if (status)
- netdev_err(netdev, "error deleting port %d from UDP tunnels list\n",
- port);
-}
-
-/**
* ice_open - Called when a network interface becomes active
* @netdev: network interface device structure
*
@@ -6824,6 +6793,6 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_bpf = ice_xdp,
.ndo_xdp_xmit = ice_xdp_xmit,
.ndo_xsk_wakeup = ice_xsk_wakeup,
- .ndo_udp_tunnel_add = ice_udp_tunnel_add,
- .ndo_udp_tunnel_del = ice_udp_tunnel_del,
+ .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
+ .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
};
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 9d0d6b0025cf..eae75260fe20 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -145,7 +145,7 @@ void ice_clean_tx_ring(struct ice_ring *tx_ring)
{
u16 i;
- if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_umem) {
+ if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
ice_xsk_clean_xdp_ring(tx_ring);
goto tx_skip_free;
}
@@ -375,7 +375,7 @@ void ice_clean_rx_ring(struct ice_ring *rx_ring)
if (!rx_ring->rx_buf)
return;
- if (rx_ring->xsk_umem) {
+ if (rx_ring->xsk_pool) {
ice_xsk_clean_rx_ring(rx_ring);
goto rx_skip_free;
}
@@ -919,10 +919,7 @@ ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
* likely have a consumer accessing first few bytes of meta
* data, and then actual data.
*/
- prefetch(xdp->data_meta);
-#if L1_CACHE_BYTES < 128
- prefetch((void *)(xdp->data + L1_CACHE_BYTES));
-#endif
+ net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
skb = build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
@@ -964,10 +961,7 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(xdp->data);
-#if L1_CACHE_BYTES < 128
- prefetch((void *)(xdp->data + L1_CACHE_BYTES));
-#endif /* L1_CACHE_BYTES */
+ net_prefetch(xdp->data);
/* allocate a skb to store the frags */
skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
@@ -1616,7 +1610,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
ice_for_each_ring(ring, q_vector->tx) {
- bool wd = ring->xsk_umem ?
+ bool wd = ring->xsk_pool ?
ice_clean_tx_irq_zc(ring, budget) :
ice_clean_tx_irq(ring, budget);
@@ -1646,7 +1640,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
* comparison in the irq context instead of many inside the
* ice_clean_rx_irq function and makes the codebase cleaner.
*/
- cleaned = ring->xsk_umem ?
+ cleaned = ring->xsk_pool ?
ice_clean_rx_irq_zc(ring, budget_per_ring) :
ice_clean_rx_irq(ring, budget_per_ring);
work_done += cleaned;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 51b4df7a59d2..ff1a1cbd078e 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -43,7 +43,7 @@
/**
* ice_compute_pad - compute the padding
- * rx_buf_len: buffer length
+ * @rx_buf_len: buffer length
*
* Figure out the size of half page based on given buffer length and
* then subtract the skb_shared_info followed by subtraction of the
@@ -295,7 +295,7 @@ struct ice_ring {
struct rcu_head rcu; /* to avoid race on free */
struct bpf_prog *xdp_prog;
- struct xdp_umem *xsk_umem;
+ struct xsk_buff_pool *xsk_pool;
/* CL3 - 3rd cacheline starts here */
struct xdp_rxq_info xdp_rxq;
/* CLX - the below items are only accessed infrequently and should be
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 4cdccfadf274..2226a291a394 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -676,6 +676,9 @@ struct ice_hw {
struct mutex tnl_lock;
struct ice_tunnel_table tnl;
+ struct udp_tunnel_nic_shared udp_tunnel_shared;
+ struct udp_tunnel_nic_info udp_tunnel_nic;
+
/* HW block tables */
struct ice_blk_info blk[ICE_BLK_COUNT];
struct mutex fl_profs_locks[ICE_BLK_COUNT]; /* lock fltr profiles */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 71497776ac62..ec7f6c64132e 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -871,7 +871,7 @@ static int ice_get_max_valid_res_idx(struct ice_res_tracker *res)
* If there are not enough resources available, return an error. This should
* always be caught by ice_set_per_vf_res().
*
- * Return 0 on success, and -EINVAL when there are not enough MSIX vectors in
+ * Return 0 on success, and -EINVAL when there are not enough MSIX vectors
* in the PF's space available for SR-IOV.
*/
static int ice_sriov_set_msix_res(struct ice_pf *pf, u16 num_msix_needed)
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 20ac5fca68c6..797886524054 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -236,7 +236,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
if (err)
goto free_buf;
ice_set_ring_xdp(xdp_ring);
- xdp_ring->xsk_umem = ice_xsk_umem(xdp_ring);
+ xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
}
err = ice_setup_rx_ctx(rx_ring);
@@ -260,21 +260,21 @@ free_buf:
}
/**
- * ice_xsk_alloc_umems - allocate a UMEM region for an XDP socket
- * @vsi: VSI to allocate the UMEM on
+ * ice_xsk_alloc_pools - allocate a buffer pool for an XDP socket
+ * @vsi: VSI to allocate the buffer pool on
*
* Returns 0 on success, negative on error
*/
-static int ice_xsk_alloc_umems(struct ice_vsi *vsi)
+static int ice_xsk_alloc_pools(struct ice_vsi *vsi)
{
- if (vsi->xsk_umems)
+ if (vsi->xsk_pools)
return 0;
- vsi->xsk_umems = kcalloc(vsi->num_xsk_umems, sizeof(*vsi->xsk_umems),
+ vsi->xsk_pools = kcalloc(vsi->num_xsk_pools, sizeof(*vsi->xsk_pools),
GFP_KERNEL);
- if (!vsi->xsk_umems) {
- vsi->num_xsk_umems = 0;
+ if (!vsi->xsk_pools) {
+ vsi->num_xsk_pools = 0;
return -ENOMEM;
}
@@ -282,73 +282,73 @@ static int ice_xsk_alloc_umems(struct ice_vsi *vsi)
}
/**
- * ice_xsk_remove_umem - Remove an UMEM for a certain ring/qid
+ * ice_xsk_remove_pool - Remove an buffer pool for a certain ring/qid
* @vsi: VSI from which the VSI will be removed
- * @qid: Ring/qid associated with the UMEM
+ * @qid: Ring/qid associated with the buffer pool
*/
-static void ice_xsk_remove_umem(struct ice_vsi *vsi, u16 qid)
+static void ice_xsk_remove_pool(struct ice_vsi *vsi, u16 qid)
{
- vsi->xsk_umems[qid] = NULL;
- vsi->num_xsk_umems_used--;
+ vsi->xsk_pools[qid] = NULL;
+ vsi->num_xsk_pools_used--;
- if (vsi->num_xsk_umems_used == 0) {
- kfree(vsi->xsk_umems);
- vsi->xsk_umems = NULL;
- vsi->num_xsk_umems = 0;
+ if (vsi->num_xsk_pools_used == 0) {
+ kfree(vsi->xsk_pools);
+ vsi->xsk_pools = NULL;
+ vsi->num_xsk_pools = 0;
}
}
/**
- * ice_xsk_umem_disable - disable a UMEM region
+ * ice_xsk_pool_disable - disable a buffer pool region
* @vsi: Current VSI
* @qid: queue ID
*
* Returns 0 on success, negative on failure
*/
-static int ice_xsk_umem_disable(struct ice_vsi *vsi, u16 qid)
+static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
{
- if (!vsi->xsk_umems || qid >= vsi->num_xsk_umems ||
- !vsi->xsk_umems[qid])
+ if (!vsi->xsk_pools || qid >= vsi->num_xsk_pools ||
+ !vsi->xsk_pools[qid])
return -EINVAL;
- xsk_buff_dma_unmap(vsi->xsk_umems[qid], ICE_RX_DMA_ATTR);
- ice_xsk_remove_umem(vsi, qid);
+ xsk_pool_dma_unmap(vsi->xsk_pools[qid], ICE_RX_DMA_ATTR);
+ ice_xsk_remove_pool(vsi, qid);
return 0;
}
/**
- * ice_xsk_umem_enable - enable a UMEM region
+ * ice_xsk_pool_enable - enable a buffer pool region
* @vsi: Current VSI
- * @umem: pointer to a requested UMEM region
+ * @pool: pointer to a requested buffer pool region
* @qid: queue ID
*
* Returns 0 on success, negative on failure
*/
static int
-ice_xsk_umem_enable(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
+ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
{
int err;
if (vsi->type != ICE_VSI_PF)
return -EINVAL;
- if (!vsi->num_xsk_umems)
- vsi->num_xsk_umems = min_t(u16, vsi->num_rxq, vsi->num_txq);
- if (qid >= vsi->num_xsk_umems)
+ if (!vsi->num_xsk_pools)
+ vsi->num_xsk_pools = min_t(u16, vsi->num_rxq, vsi->num_txq);
+ if (qid >= vsi->num_xsk_pools)
return -EINVAL;
- err = ice_xsk_alloc_umems(vsi);
+ err = ice_xsk_alloc_pools(vsi);
if (err)
return err;
- if (vsi->xsk_umems && vsi->xsk_umems[qid])
+ if (vsi->xsk_pools && vsi->xsk_pools[qid])
return -EBUSY;
- vsi->xsk_umems[qid] = umem;
- vsi->num_xsk_umems_used++;
+ vsi->xsk_pools[qid] = pool;
+ vsi->num_xsk_pools_used++;
- err = xsk_buff_dma_map(vsi->xsk_umems[qid], ice_pf_to_dev(vsi->back),
+ err = xsk_pool_dma_map(vsi->xsk_pools[qid], ice_pf_to_dev(vsi->back),
ICE_RX_DMA_ATTR);
if (err)
return err;
@@ -357,17 +357,17 @@ ice_xsk_umem_enable(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
}
/**
- * ice_xsk_umem_setup - enable/disable a UMEM region depending on its state
+ * ice_xsk_pool_setup - enable/disable a buffer pool region depending on its state
* @vsi: Current VSI
- * @umem: UMEM to enable/associate to a ring, NULL to disable
+ * @pool: buffer pool to enable/associate to a ring, NULL to disable
* @qid: queue ID
*
* Returns 0 on success, negative on failure
*/
-int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
+int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
{
- bool if_running, umem_present = !!umem;
- int ret = 0, umem_failure = 0;
+ bool if_running, pool_present = !!pool;
+ int ret = 0, pool_failure = 0;
if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
@@ -375,26 +375,26 @@ int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
ret = ice_qp_dis(vsi, qid);
if (ret) {
netdev_err(vsi->netdev, "ice_qp_dis error = %d\n", ret);
- goto xsk_umem_if_up;
+ goto xsk_pool_if_up;
}
}
- umem_failure = umem_present ? ice_xsk_umem_enable(vsi, umem, qid) :
- ice_xsk_umem_disable(vsi, qid);
+ pool_failure = pool_present ? ice_xsk_pool_enable(vsi, pool, qid) :
+ ice_xsk_pool_disable(vsi, qid);
-xsk_umem_if_up:
+xsk_pool_if_up:
if (if_running) {
ret = ice_qp_ena(vsi, qid);
- if (!ret && umem_present)
+ if (!ret && pool_present)
napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi);
else if (ret)
netdev_err(vsi->netdev, "ice_qp_ena error = %d\n", ret);
}
- if (umem_failure) {
- netdev_err(vsi->netdev, "Could not %sable UMEM, error = %d\n",
- umem_present ? "en" : "dis", umem_failure);
- return umem_failure;
+ if (pool_failure) {
+ netdev_err(vsi->netdev, "Could not %sable buffer pool, error = %d\n",
+ pool_present ? "en" : "dis", pool_failure);
+ return pool_failure;
}
return ret;
@@ -425,7 +425,7 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
rx_buf = &rx_ring->rx_buf[ntu];
do {
- rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_umem);
+ rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
if (!rx_buf->xdp) {
ret = true;
break;
@@ -595,7 +595,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
rx_buf->xdp->data_end = rx_buf->xdp->data + size;
- xsk_buff_dma_sync_for_cpu(rx_buf->xdp);
+ xsk_buff_dma_sync_for_cpu(rx_buf->xdp, rx_ring->xsk_pool);
xdp_res = ice_run_xdp_zc(rx_ring, rx_buf->xdp);
if (xdp_res) {
@@ -645,11 +645,11 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
ice_finalize_xdp_rx(rx_ring, xdp_xmit);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
- if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) {
+ if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
- xsk_set_rx_need_wakeup(rx_ring->xsk_umem);
+ xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
else
- xsk_clear_rx_need_wakeup(rx_ring->xsk_umem);
+ xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
return (int)total_rx_packets;
}
@@ -682,11 +682,11 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];
- if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
+ if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc))
break;
- dma = xsk_buff_raw_get_dma(xdp_ring->xsk_umem, desc.addr);
- xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_umem, dma,
+ dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr);
+ xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma,
desc.len);
tx_buf->bytecount = desc.len;
@@ -703,7 +703,7 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
if (tx_desc) {
ice_xdp_ring_update_tail(xdp_ring);
- xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
+ xsk_tx_release(xdp_ring->xsk_pool);
}
return budget > 0 && work_done;
@@ -777,10 +777,10 @@ bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
xdp_ring->next_to_clean = ntc;
if (xsk_frames)
- xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames);
+ xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
- if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem))
- xsk_set_tx_need_wakeup(xdp_ring->xsk_umem);
+ if (xsk_uses_need_wakeup(xdp_ring->xsk_pool))
+ xsk_set_tx_need_wakeup(xdp_ring->xsk_pool);
ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);
@@ -814,7 +814,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
if (queue_id >= vsi->num_txq)
return -ENXIO;
- if (!vsi->xdp_rings[queue_id]->xsk_umem)
+ if (!vsi->xdp_rings[queue_id]->xsk_pool)
return -ENXIO;
ring = vsi->xdp_rings[queue_id];
@@ -833,20 +833,20 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
}
/**
- * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP UMEM attached
+ * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP buff pool attached
* @vsi: VSI to be checked
*
- * Returns true if any of the Rx rings has an AF_XDP UMEM attached
+ * Returns true if any of the Rx rings has an AF_XDP buff pool attached
*/
bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
{
int i;
- if (!vsi->xsk_umems)
+ if (!vsi->xsk_pools)
return false;
- for (i = 0; i < vsi->num_xsk_umems; i++) {
- if (vsi->xsk_umems[i])
+ for (i = 0; i < vsi->num_xsk_pools; i++) {
+ if (vsi->xsk_pools[i])
return true;
}
@@ -854,7 +854,7 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
}
/**
- * ice_xsk_clean_rx_ring - clean UMEM queues connected to a given Rx ring
+ * ice_xsk_clean_rx_ring - clean buffer pool queues connected to a given Rx ring
* @rx_ring: ring to be cleaned
*/
void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring)
@@ -872,7 +872,7 @@ void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring)
}
/**
- * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its UMEM queues
+ * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its buffer pool queues
* @xdp_ring: XDP_Tx ring
*/
void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring)
@@ -896,5 +896,5 @@ void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring)
}
if (xsk_frames)
- xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames);
+ xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index fc1a06b4df36..fad783690134 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -9,7 +9,8 @@
struct ice_vsi;
#ifdef CONFIG_XDP_SOCKETS
-int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid);
+int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool,
+ u16 qid);
int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget);
bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget);
int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
@@ -19,8 +20,8 @@ void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring);
void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring);
#else
static inline int
-ice_xsk_umem_setup(struct ice_vsi __always_unused *vsi,
- struct xdp_umem __always_unused *umem,
+ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
+ struct xsk_buff_pool __always_unused *pool,
u16 __always_unused qid)
{
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index a32391e82762..50863fd87d53 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -2554,7 +2554,7 @@ out:
/**
* __igb_access_emi_reg - Read/write EMI register
* @hw: pointer to the HW structure
- * @addr: EMI address to program
+ * @address: EMI address to program
* @data: pointer to value to read/write from/to the EMI address
* @read: boolean flag to indicate read or write
**/
@@ -2590,7 +2590,7 @@ s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
* igb_set_eee_i350 - Enable/disable EEE support
* @hw: pointer to the HW structure
* @adv1G: boolean flag enabling 1G EEE advertisement
- * @adv100m: boolean flag enabling 100M EEE advertisement
+ * @adv100M: boolean flag enabling 100M EEE advertisement
*
* Enable/disable EEE based on setting in dev_spec structure.
*
@@ -2646,7 +2646,7 @@ out:
* igb_set_eee_i354 - Enable/disable EEE support
* @hw: pointer to the HW structure
* @adv1G: boolean flag enabling 1G EEE advertisement
- * @adv100m: boolean flag enabling 100M EEE advertisement
+ * @adv100M: boolean flag enabling 100M EEE advertisement
*
* Enable/disable EEE legacy mode based on setting in dev_spec structure.
*
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index c393cb2c0f16..9265901455cd 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -357,13 +357,14 @@ static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
/**
* igb_read_invm_i210 - Read invm wrapper function for I210/I211
* @hw: pointer to the HW structure
- * @words: number of words to read
+ * @offset: offset to read from
+ * @words: number of words to read (unused)
* @data: pointer to the data read
*
* Wrapper function to return data formerly found in the NVM.
**/
static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
- u16 words __always_unused, u16 *data)
+ u16 __always_unused words, u16 *data)
{
s32 ret_val = 0;
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 3254737c07a3..fd8eb2f9ab9d 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -166,6 +166,7 @@ static s32 igb_find_vlvf_slot(struct e1000_hw *hw, u32 vlan, bool vlvf_bypass)
* @vlan: VLAN id to add or remove
* @vind: VMDq output index that maps queue to VLAN id
* @vlan_on: if true add filter, if false remove
+ * @vlvf_bypass: skip VLVF if no match is found
*
* Sets or clears a bit in the VLAN filter table array based on VLAN id
* and if we are adding or removing the filter
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index 46debd991bfe..33cceb77e960 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -9,6 +9,7 @@
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to read
+ * @unlock: skip locking or not
*
* returns SUCCESS if it successfully read message from buffer
**/
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 2f015b60a995..0286d2fceee4 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -19,6 +19,8 @@
#include <linux/pci.h>
#include <linux/mdio.h>
+#include <net/xdp.h>
+
struct igb_adapter;
#define E1000_PCS_CFG_IGN_SD 1
@@ -79,6 +81,12 @@ struct igb_adapter;
#define IGB_I210_RX_LATENCY_100 2213
#define IGB_I210_RX_LATENCY_1000 448
+/* XDP */
+#define IGB_XDP_PASS 0
+#define IGB_XDP_CONSUMED BIT(0)
+#define IGB_XDP_TX BIT(1)
+#define IGB_XDP_REDIR BIT(2)
+
struct vf_data_storage {
unsigned char vf_mac_addresses[ETH_ALEN];
u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
@@ -132,17 +140,62 @@ struct vf_mac_filter {
/* Supported Rx Buffer Sizes */
#define IGB_RXBUFFER_256 256
+#define IGB_RXBUFFER_1536 1536
#define IGB_RXBUFFER_2048 2048
#define IGB_RXBUFFER_3072 3072
#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
#define IGB_TS_HDR_LEN 16
-#define IGB_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+/* Attempt to maximize the headroom available for incoming frames. We
+ * use a 2K buffer for receives and need 1536/1534 to store the data for
+ * the frame. This leaves us with 512 bytes of room. From that we need
+ * to deduct the space needed for the shared info and the padding needed
+ * to IP align the frame.
+ *
+ * Note: For cache line sizes 256 or larger this value is going to end
+ * up negative. In these cases we should fall back to the 3K
+ * buffers.
+ */
#if (PAGE_SIZE < 8192)
-#define IGB_MAX_FRAME_BUILD_SKB \
- (SKB_WITH_OVERHEAD(IGB_RXBUFFER_2048) - IGB_SKB_PAD - IGB_TS_HDR_LEN)
+#define IGB_MAX_FRAME_BUILD_SKB (IGB_RXBUFFER_1536 - NET_IP_ALIGN)
+#define IGB_2K_TOO_SMALL_WITH_PADDING \
+((NET_SKB_PAD + IGB_TS_HDR_LEN + IGB_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IGB_RXBUFFER_2048))
+
+static inline int igb_compute_pad(int rx_buf_len)
+{
+ int page_size, pad_size;
+
+ page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
+ pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
+
+ return pad_size;
+}
+
+static inline int igb_skb_pad(void)
+{
+ int rx_buf_len;
+
+ /* If a 2K buffer cannot handle a standard Ethernet frame then
+ * optimize padding for a 3K buffer instead of a 1.5K buffer.
+ *
+ * For a 3K buffer we need to add enough padding to allow for
+ * tailroom due to NET_IP_ALIGN possibly shifting us out of
+ * cache-line alignment.
+ */
+ if (IGB_2K_TOO_SMALL_WITH_PADDING)
+ rx_buf_len = IGB_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
+ else
+ rx_buf_len = IGB_RXBUFFER_1536;
+
+ /* if needed make room for NET_IP_ALIGN */
+ rx_buf_len -= NET_IP_ALIGN;
+
+ return igb_compute_pad(rx_buf_len);
+}
+
+#define IGB_SKB_PAD igb_skb_pad()
#else
-#define IGB_MAX_FRAME_BUILD_SKB (IGB_RXBUFFER_2048 - IGB_TS_HDR_LEN)
+#define IGB_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
#endif
/* How many Rx Buffers do we bundle into one write to the hardware ? */
@@ -194,13 +247,22 @@ enum igb_tx_flags {
#define IGB_SFF_ADDRESSING_MODE 0x4
#define IGB_SFF_8472_UNSUP 0x00
+enum igb_tx_buf_type {
+ IGB_TYPE_SKB = 0,
+ IGB_TYPE_XDP,
+};
+
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer
*/
struct igb_tx_buffer {
union e1000_adv_tx_desc *next_to_watch;
unsigned long time_stamp;
- struct sk_buff *skb;
+ enum igb_tx_buf_type type;
+ union {
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ };
unsigned int bytecount;
u16 gso_segs;
__be16 protocol;
@@ -248,6 +310,7 @@ struct igb_ring_container {
struct igb_ring {
struct igb_q_vector *q_vector; /* backlink to q_vector */
struct net_device *netdev; /* back pointer to net_device */
+ struct bpf_prog *xdp_prog;
struct device *dev; /* device pointer for dma mapping */
union { /* array of buffer info structs */
struct igb_tx_buffer *tx_buffer_info;
@@ -288,6 +351,7 @@ struct igb_ring {
struct u64_stats_sync rx_syncp;
};
};
+ struct xdp_rxq_info xdp_rxq;
} ____cacheline_internodealigned_in_smp;
struct igb_q_vector {
@@ -339,7 +403,7 @@ static inline unsigned int igb_rx_bufsz(struct igb_ring *ring)
return IGB_RXBUFFER_3072;
if (ring_uses_build_skb(ring))
- return IGB_MAX_FRAME_BUILD_SKB + IGB_TS_HDR_LEN;
+ return IGB_MAX_FRAME_BUILD_SKB;
#endif
return IGB_RXBUFFER_2048;
}
@@ -467,6 +531,7 @@ struct igb_adapter {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct net_device *netdev;
+ struct bpf_prog *xdp_prog;
unsigned long state;
unsigned int flags;
@@ -643,6 +708,9 @@ enum igb_boards {
extern char igb_driver_name[];
+int igb_xmit_xdp_ring(struct igb_adapter *adapter,
+ struct igb_ring *ring,
+ struct xdp_frame *xdpf);
int igb_open(struct net_device *netdev);
int igb_close(struct net_device *netdev);
int igb_up(struct igb_adapter *);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 6e8231c1ddf0..28baf203459a 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -961,6 +961,10 @@ static int igb_set_ringparam(struct net_device *netdev,
memcpy(&temp_ring[i], adapter->rx_ring[i],
sizeof(struct igb_ring));
+ /* Clear copied XDP RX-queue info */
+ memset(&temp_ring[i].xdp_rxq, 0,
+ sizeof(temp_ring[i].xdp_rxq));
+
temp_ring[i].count = new_rx_count;
err = igb_setup_rx_resources(&temp_ring[i]);
if (err) {
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index d9c3a6b169f9..5fc2c381da55 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -30,6 +30,8 @@
#include <linux/if_ether.h>
#include <linux/aer.h>
#include <linux/prefetch.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include <linux/pm_runtime.h>
#include <linux/etherdevice.h>
#ifdef CONFIG_IGB_DCA
@@ -549,8 +551,7 @@ exit:
/**
* igb_get_i2c_data - Reads the I2C SDA data bit
- * @hw: pointer to hardware structure
- * @i2cctl: Current value of I2CCTL register
+ * @data: opaque pointer to adapter struct
*
* Returns the I2C data bit value
**/
@@ -2220,7 +2221,6 @@ void igb_down(struct igb_adapter *adapter)
void igb_reinit_locked(struct igb_adapter *adapter)
{
- WARN_ON(in_interrupt());
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
usleep_range(1000, 2000);
igb_down(adapter);
@@ -2824,6 +2824,147 @@ static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
}
}
+static int igb_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
+{
+ int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ struct igb_adapter *adapter = netdev_priv(dev);
+ bool running = netif_running(dev);
+ struct bpf_prog *old_prog;
+ bool need_reset;
+
+ /* verify igb ring attributes are sufficient for XDP */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct igb_ring *ring = adapter->rx_ring[i];
+
+ if (frame_size > igb_rx_bufsz(ring))
+ return -EINVAL;
+ }
+
+ old_prog = xchg(&adapter->xdp_prog, prog);
+ need_reset = (!!prog != !!old_prog);
+
+ /* device is up and bpf is added/removed, must setup the RX queues */
+ if (need_reset && running) {
+ igb_close(dev);
+ } else {
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ (void)xchg(&adapter->rx_ring[i]->xdp_prog,
+ adapter->xdp_prog);
+ }
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ /* bpf is just replaced, RXQ and MTU are already setup */
+ if (!need_reset)
+ return 0;
+
+ if (running)
+ igb_open(dev);
+
+ return 0;
+}
+
+static int igb_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return igb_xdp_setup(dev, xdp->prog);
+ default:
+ return -EINVAL;
+ }
+}
+
+static void igb_xdp_ring_update_tail(struct igb_ring *ring)
+{
+ /* Force memory writes to complete before letting h/w know there
+ * are new descriptors to fetch.
+ */
+ wmb();
+ writel(ring->next_to_use, ring->tail);
+}
+
+static struct igb_ring *igb_xdp_tx_queue_mapping(struct igb_adapter *adapter)
+{
+ unsigned int r_idx = smp_processor_id();
+
+ if (r_idx >= adapter->num_tx_queues)
+ r_idx = r_idx % adapter->num_tx_queues;
+
+ return adapter->tx_ring[r_idx];
+}
+
+static int igb_xdp_xmit_back(struct igb_adapter *adapter, struct xdp_buff *xdp)
+{
+ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
+ int cpu = smp_processor_id();
+ struct igb_ring *tx_ring;
+ struct netdev_queue *nq;
+ u32 ret;
+
+ if (unlikely(!xdpf))
+ return IGB_XDP_CONSUMED;
+
+ /* During program transitions its possible adapter->xdp_prog is assigned
+ * but ring has not been configured yet. In this case simply abort xmit.
+ */
+ tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
+ if (unlikely(!tx_ring))
+ return -ENXIO;
+
+ nq = txring_txq(tx_ring);
+ __netif_tx_lock(nq, cpu);
+ ret = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
+ __netif_tx_unlock(nq);
+
+ return ret;
+}
+
+static int igb_xdp_xmit(struct net_device *dev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct igb_adapter *adapter = netdev_priv(dev);
+ int cpu = smp_processor_id();
+ struct igb_ring *tx_ring;
+ struct netdev_queue *nq;
+ int drops = 0;
+ int i;
+
+ if (unlikely(test_bit(__IGB_DOWN, &adapter->state)))
+ return -ENETDOWN;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ /* During program transitions its possible adapter->xdp_prog is assigned
+ * but ring has not been configured yet. In this case simply abort xmit.
+ */
+ tx_ring = adapter->xdp_prog ? igb_xdp_tx_queue_mapping(adapter) : NULL;
+ if (unlikely(!tx_ring))
+ return -ENXIO;
+
+ nq = txring_txq(tx_ring);
+ __netif_tx_lock(nq, cpu);
+
+ for (i = 0; i < n; i++) {
+ struct xdp_frame *xdpf = frames[i];
+ int err;
+
+ err = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
+ if (err != IGB_XDP_TX) {
+ xdp_return_frame_rx_napi(xdpf);
+ drops++;
+ }
+ }
+
+ __netif_tx_unlock(nq);
+
+ if (unlikely(flags & XDP_XMIT_FLUSH))
+ igb_xdp_ring_update_tail(tx_ring);
+
+ return n - drops;
+}
+
static const struct net_device_ops igb_netdev_ops = {
.ndo_open = igb_open,
.ndo_stop = igb_close,
@@ -2848,6 +2989,8 @@ static const struct net_device_ops igb_netdev_ops = {
.ndo_fdb_add = igb_ndo_fdb_add,
.ndo_features_check = igb_features_check,
.ndo_setup_tc = igb_setup_tc,
+ .ndo_bpf = igb_xdp,
+ .ndo_xdp_xmit = igb_xdp_xmit,
};
/**
@@ -3388,7 +3531,9 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
"Width x1" : "unknown"), netdev->dev_addr);
}
- if ((hw->mac.type >= e1000_i210 ||
+ if ((hw->mac.type == e1000_82576 &&
+ rd32(E1000_EECD) & E1000_EECD_PRES) ||
+ (hw->mac.type >= e1000_i210 ||
igb_get_flash_presence_i210(hw))) {
ret_val = igb_read_part_string(hw, part_str,
E1000_PBANUM_LENGTH);
@@ -3868,6 +4013,7 @@ static int igb_sw_init(struct igb_adapter *adapter)
/**
* igb_open - Called when a network interface is made active
* @netdev: network interface device structure
+ * @resuming: indicates whether we are in a resume call
*
* Returns 0 on success, negative value on failure
*
@@ -3985,6 +4131,7 @@ int igb_open(struct net_device *netdev)
/**
* igb_close - Disables a network interface
* @netdev: network interface device structure
+ * @suspending: indicates we are in a suspend call
*
* Returns 0, this is not allowed to fail
*
@@ -4178,6 +4325,7 @@ static void igb_configure_tx(struct igb_adapter *adapter)
**/
int igb_setup_rx_resources(struct igb_ring *rx_ring)
{
+ struct igb_adapter *adapter = netdev_priv(rx_ring->netdev);
struct device *dev = rx_ring->dev;
int size;
@@ -4200,6 +4348,13 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
+ rx_ring->xdp_prog = adapter->xdp_prog;
+
+ /* XDP RX-queue info */
+ if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
+ rx_ring->queue_index) < 0)
+ goto err;
+
return 0;
err:
@@ -4504,6 +4659,10 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
int reg_idx = ring->reg_idx;
u32 rxdctl = 0;
+ xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
+ WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED, NULL));
+
/* disable the queue */
wr32(E1000_RXDCTL(reg_idx), 0);
@@ -4708,6 +4867,8 @@ void igb_free_rx_resources(struct igb_ring *rx_ring)
{
igb_clean_rx_ring(rx_ring);
+ rx_ring->xdp_prog = NULL;
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL;
@@ -5219,7 +5380,7 @@ static void igb_check_lvmmc(struct igb_adapter *adapter)
/**
* igb_watchdog - Timer Call-back
- * @data: pointer to adapter cast into an unsigned long
+ * @t: pointer to timer_list containing our private info pointer
**/
static void igb_watchdog(struct timer_list *t)
{
@@ -6077,6 +6238,80 @@ dma_error:
return -1;
}
+int igb_xmit_xdp_ring(struct igb_adapter *adapter,
+ struct igb_ring *tx_ring,
+ struct xdp_frame *xdpf)
+{
+ union e1000_adv_tx_desc *tx_desc;
+ u32 len, cmd_type, olinfo_status;
+ struct igb_tx_buffer *tx_buffer;
+ dma_addr_t dma;
+ u16 i;
+
+ len = xdpf->len;
+
+ if (unlikely(!igb_desc_unused(tx_ring)))
+ return IGB_XDP_CONSUMED;
+
+ dma = dma_map_single(tx_ring->dev, xdpf->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_ring->dev, dma))
+ return IGB_XDP_CONSUMED;
+
+ /* record the location of the first descriptor for this packet */
+ tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ tx_buffer->bytecount = len;
+ tx_buffer->gso_segs = 1;
+ tx_buffer->protocol = 0;
+
+ i = tx_ring->next_to_use;
+ tx_desc = IGB_TX_DESC(tx_ring, i);
+
+ dma_unmap_len_set(tx_buffer, len, len);
+ dma_unmap_addr_set(tx_buffer, dma, dma);
+ tx_buffer->type = IGB_TYPE_XDP;
+ tx_buffer->xdpf = xdpf;
+
+ tx_desc->read.buffer_addr = cpu_to_le64(dma);
+
+ /* put descriptor type bits */
+ cmd_type = E1000_ADVTXD_DTYP_DATA |
+ E1000_ADVTXD_DCMD_DEXT |
+ E1000_ADVTXD_DCMD_IFCS;
+ cmd_type |= len | IGB_TXD_DCMD;
+ tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+
+ olinfo_status = cpu_to_le32(len << E1000_ADVTXD_PAYLEN_SHIFT);
+ /* 82575 requires a unique index per ring */
+ if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
+ olinfo_status |= tx_ring->reg_idx << 4;
+
+ tx_desc->read.olinfo_status = olinfo_status;
+
+ netdev_tx_sent_queue(txring_txq(tx_ring), tx_buffer->bytecount);
+
+ /* set the timestamp */
+ tx_buffer->time_stamp = jiffies;
+
+ /* Avoid any potential race with xdp_xmit and cleanup */
+ smp_wmb();
+
+ /* set next_to_watch value indicating a packet is present */
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+
+ tx_buffer->next_to_watch = tx_desc;
+ tx_ring->next_to_use = i;
+
+ /* Make sure there is space in the ring for the next send. */
+ igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
+ writel(i, tx_ring->tail);
+
+ return IGB_XDP_TX;
+}
+
netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
struct igb_ring *tx_ring)
{
@@ -6105,6 +6340,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
/* record the location of the first descriptor for this packet */
first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+ first->type = IGB_TYPE_SKB;
first->skb = skb;
first->bytecount = skb->len;
first->gso_segs = 1;
@@ -6192,8 +6428,9 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
/**
* igb_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: number of the Tx queue that hung (unused)
**/
-static void igb_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+static void igb_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -6256,6 +6493,19 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
struct igb_adapter *adapter = netdev_priv(netdev);
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ if (adapter->xdp_prog) {
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct igb_ring *ring = adapter->rx_ring[i];
+
+ if (max_frame > igb_rx_bufsz(ring)) {
+ netdev_warn(adapter->netdev, "Requested MTU size is not supported with XDP\n");
+ return -EINVAL;
+ }
+ }
+ }
+
/* adjust max frame to be at least the size of a standard frame */
if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
@@ -7809,7 +8059,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
total_packets += tx_buffer->gso_segs;
/* free the skb */
- napi_consume_skb(tx_buffer->skb, napi_budget);
+ if (tx_buffer->type == IGB_TYPE_SKB)
+ napi_consume_skb(tx_buffer->skb, napi_budget);
+ else
+ xdp_return_frame(tx_buffer->xdpf);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -7993,8 +8246,8 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
* the pagecnt_bias and page count so that we fully restock the
* number of references the driver holds.
*/
- if (unlikely(!pagecnt_bias)) {
- page_ref_add(page, USHRT_MAX);
+ if (unlikely(pagecnt_bias == 1)) {
+ page_ref_add(page, USHRT_MAX - 1);
rx_buffer->pagecnt_bias = USHRT_MAX;
}
@@ -8033,23 +8286,21 @@ static void igb_add_rx_frag(struct igb_ring *rx_ring,
static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
struct igb_rx_buffer *rx_buffer,
- union e1000_adv_rx_desc *rx_desc,
- unsigned int size)
+ struct xdp_buff *xdp,
+ union e1000_adv_rx_desc *rx_desc)
{
- void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
#if (PAGE_SIZE < 8192)
unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
#else
- unsigned int truesize = SKB_DATA_ALIGN(size);
+ unsigned int truesize = SKB_DATA_ALIGN(xdp->data_end -
+ xdp->data_hard_start);
#endif
+ unsigned int size = xdp->data_end - xdp->data;
unsigned int headlen;
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(va);
-#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
-#endif
+ net_prefetch(xdp->data);
/* allocate a skb to store the frags */
skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
@@ -8057,24 +8308,24 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
return NULL;
if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
- igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
- va += IGB_TS_HDR_LEN;
+ igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb);
+ xdp->data += IGB_TS_HDR_LEN;
size -= IGB_TS_HDR_LEN;
}
/* Determine available headroom for copy */
headlen = size;
if (headlen > IGB_RX_HDR_LEN)
- headlen = eth_get_headlen(skb->dev, va, IGB_RX_HDR_LEN);
+ headlen = eth_get_headlen(skb->dev, xdp->data, IGB_RX_HDR_LEN);
/* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
+ memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen, sizeof(long)));
/* update all of the pointers */
size -= headlen;
if (size) {
skb_add_rx_frag(skb, 0, rx_buffer->page,
- (va + headlen) - page_address(rx_buffer->page),
+ (xdp->data + headlen) - page_address(rx_buffer->page),
size, truesize);
#if (PAGE_SIZE < 8192)
rx_buffer->page_offset ^= truesize;
@@ -8090,32 +8341,29 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
struct igb_rx_buffer *rx_buffer,
- union e1000_adv_rx_desc *rx_desc,
- unsigned int size)
+ struct xdp_buff *xdp,
+ union e1000_adv_rx_desc *rx_desc)
{
- void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
#if (PAGE_SIZE < 8192)
unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
#else
unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
- SKB_DATA_ALIGN(IGB_SKB_PAD + size);
+ SKB_DATA_ALIGN(xdp->data_end -
+ xdp->data_hard_start);
#endif
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(va);
-#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
-#endif
+ net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
- skb = build_skb(va - IGB_SKB_PAD, truesize);
+ skb = build_skb(xdp->data_hard_start, truesize);
if (unlikely(!skb))
return NULL;
/* update pointers within the skb to store the data */
- skb_reserve(skb, IGB_SKB_PAD);
- __skb_put(skb, size);
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ __skb_put(skb, xdp->data_end - xdp->data);
/* pull timestamp out of packet data */
if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
@@ -8133,6 +8381,79 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
return skb;
}
+static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter,
+ struct igb_ring *rx_ring,
+ struct xdp_buff *xdp)
+{
+ int err, result = IGB_XDP_PASS;
+ struct bpf_prog *xdp_prog;
+ u32 act;
+
+ rcu_read_lock();
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+
+ if (!xdp_prog)
+ goto xdp_out;
+
+ prefetchw(xdp->data_hard_start); /* xdp_frame write */
+
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ result = igb_xdp_xmit_back(adapter, xdp);
+ break;
+ case XDP_REDIRECT:
+ err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
+ if (!err)
+ result = IGB_XDP_REDIR;
+ else
+ result = IGB_XDP_CONSUMED;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
+ fallthrough;
+ case XDP_DROP:
+ result = IGB_XDP_CONSUMED;
+ break;
+ }
+xdp_out:
+ rcu_read_unlock();
+ return ERR_PTR(-result);
+}
+
+static unsigned int igb_rx_frame_truesize(struct igb_ring *rx_ring,
+ unsigned int size)
+{
+ unsigned int truesize;
+
+#if (PAGE_SIZE < 8192)
+ truesize = igb_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
+#else
+ truesize = ring_uses_build_skb(rx_ring) ?
+ SKB_DATA_ALIGN(IGB_SKB_PAD + size) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
+ SKB_DATA_ALIGN(size);
+#endif
+ return truesize;
+}
+
+static void igb_rx_buffer_flip(struct igb_ring *rx_ring,
+ struct igb_rx_buffer *rx_buffer,
+ unsigned int size)
+{
+ unsigned int truesize = igb_rx_frame_truesize(rx_ring, size);
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
+}
+
static inline void igb_rx_checksum(struct igb_ring *ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
@@ -8187,7 +8508,6 @@ static inline void igb_rx_hash(struct igb_ring *ring,
* igb_is_non_eop - process handling of non-EOP buffers
* @rx_ring: Rx ring being processed
* @rx_desc: Rx descriptor for current buffer
- * @skb: current socket buffer containing buffer in progress
*
* This function updates next to clean. If the buffer is an EOP buffer
* this function exits returning false, otherwise it will place the
@@ -8229,6 +8549,10 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
+ /* XDP packets use error pointer so abort at this point */
+ if (IS_ERR(skb))
+ return true;
+
if (unlikely((igb_test_staterr(rx_desc,
E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
struct net_device *netdev = rx_ring->netdev;
@@ -8287,6 +8611,11 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
}
+static unsigned int igb_rx_offset(struct igb_ring *rx_ring)
+{
+ return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0;
+}
+
static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
const unsigned int size)
{
@@ -8330,10 +8659,20 @@ static void igb_put_rx_buffer(struct igb_ring *rx_ring,
static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
{
+ struct igb_adapter *adapter = q_vector->adapter;
struct igb_ring *rx_ring = q_vector->rx.ring;
struct sk_buff *skb = rx_ring->skb;
unsigned int total_bytes = 0, total_packets = 0;
u16 cleaned_count = igb_desc_unused(rx_ring);
+ unsigned int xdp_xmit = 0;
+ struct xdp_buff xdp;
+
+ xdp.rxq = &rx_ring->xdp_rxq;
+
+ /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
+#if (PAGE_SIZE < 8192)
+ xdp.frame_sz = igb_rx_frame_truesize(rx_ring, 0);
+#endif
while (likely(total_packets < budget)) {
union e1000_adv_rx_desc *rx_desc;
@@ -8360,13 +8699,38 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
rx_buffer = igb_get_rx_buffer(rx_ring, size);
/* retrieve a buffer from the ring */
- if (skb)
+ if (!skb) {
+ xdp.data = page_address(rx_buffer->page) +
+ rx_buffer->page_offset;
+ xdp.data_meta = xdp.data;
+ xdp.data_hard_start = xdp.data -
+ igb_rx_offset(rx_ring);
+ xdp.data_end = xdp.data + size;
+#if (PAGE_SIZE > 4096)
+ /* At larger PAGE_SIZE, frame_sz depend on len size */
+ xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size);
+#endif
+ skb = igb_run_xdp(adapter, rx_ring, &xdp);
+ }
+
+ if (IS_ERR(skb)) {
+ unsigned int xdp_res = -PTR_ERR(skb);
+
+ if (xdp_res & (IGB_XDP_TX | IGB_XDP_REDIR)) {
+ xdp_xmit |= xdp_res;
+ igb_rx_buffer_flip(rx_ring, rx_buffer, size);
+ } else {
+ rx_buffer->pagecnt_bias++;
+ }
+ total_packets++;
+ total_bytes += size;
+ } else if (skb)
igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
else if (ring_uses_build_skb(rx_ring))
- skb = igb_build_skb(rx_ring, rx_buffer, rx_desc, size);
+ skb = igb_build_skb(rx_ring, rx_buffer, &xdp, rx_desc);
else
skb = igb_construct_skb(rx_ring, rx_buffer,
- rx_desc, size);
+ &xdp, rx_desc);
/* exit if we failed to retrieve a buffer */
if (!skb) {
@@ -8406,6 +8770,15 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
/* place incomplete frames back on ring for completion */
rx_ring->skb = skb;
+ if (xdp_xmit & IGB_XDP_REDIR)
+ xdp_do_flush_map();
+
+ if (xdp_xmit & IGB_XDP_TX) {
+ struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
+
+ igb_xdp_ring_update_tail(tx_ring);
+ }
+
u64_stats_update_begin(&rx_ring->rx_syncp);
rx_ring->rx_stats.packets += total_packets;
rx_ring->rx_stats.bytes += total_bytes;
@@ -8419,11 +8792,6 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
return total_packets;
}
-static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
-{
- return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0;
-}
-
static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
struct igb_rx_buffer *bi)
{
@@ -8460,14 +8828,16 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
bi->dma = dma;
bi->page = page;
bi->page_offset = igb_rx_offset(rx_ring);
- bi->pagecnt_bias = 1;
+ page_ref_add(page, USHRT_MAX - 1);
+ bi->pagecnt_bias = USHRT_MAX;
return true;
}
/**
- * igb_alloc_rx_buffers - Replace used receive buffers; packet split
- * @adapter: address of board private structure
+ * igb_alloc_rx_buffers - Replace used receive buffers
+ * @rx_ring: rx descriptor ring to allocate new receive buffers
+ * @cleaned_count: count of buffers to allocate
**/
void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
{
@@ -8536,9 +8906,9 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
/**
* igb_mii_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
+ * @netdev: pointer to netdev struct
+ * @ifr: interface structure
+ * @cmd: ioctl command to execute
**/
static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
@@ -8566,9 +8936,9 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
/**
* igb_ioctl -
- * @netdev:
- * @ifreq:
- * @cmd:
+ * @netdev: pointer to netdev struct
+ * @ifr: interface structure
+ * @cmd: ioctl command to execute
**/
static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 490368d3d03c..7cc5428c3b3d 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -957,8 +957,8 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
/**
* igb_ptp_get_ts_config - get hardware time stamping config
- * @netdev:
- * @ifreq:
+ * @netdev: netdev struct
+ * @ifr: interface struct
*
* Get the hwtstamp_config settings to return to the user. Rather than attempt
* to deconstruct the settings from the registers, just return a shadow copy
@@ -1141,8 +1141,8 @@ static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter,
/**
* igb_ptp_set_ts_config - set hardware time stamping config
- * @netdev:
- * @ifreq:
+ * @netdev: netdev struct
+ * @ifr: interface struct
*
**/
int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 19269f5d52bc..ee9f8c1dca83 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -61,7 +61,7 @@ static const struct igbvf_info *igbvf_info_tbl[] = {
/**
* igbvf_desc_unused - calculate if we have unused descriptors
- * @rx_ring: address of receive ring structure
+ * @ring: address of receive ring structure
**/
static int igbvf_desc_unused(struct igbvf_ring *ring)
{
@@ -74,6 +74,8 @@ static int igbvf_desc_unused(struct igbvf_ring *ring)
/**
* igbvf_receive_skb - helper function to handle Rx indications
* @adapter: board private structure
+ * @netdev: pointer to netdev struct
+ * @skb: skb to indicate to stack
* @status: descriptor status field as written by hardware
* @vlan: descriptor vlan field as written by hardware (no le/be conversion)
* @skb: pointer to sk_buff to be indicated to stack
@@ -233,6 +235,8 @@ no_buffers:
/**
* igbvf_clean_rx_irq - Send received data up the network stack; legacy
* @adapter: board private structure
+ * @work_done: output parameter used to indicate completed work
+ * @work_to_do: input parameter setting limit of work
*
* the return value indicates whether actual cleaning was done, there
* is no guarantee that everything was cleaned
@@ -406,6 +410,7 @@ static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
/**
* igbvf_setup_tx_resources - allocate Tx resources (Descriptors)
* @adapter: board private structure
+ * @tx_ring: ring being initialized
*
* Return 0 on success, negative on failure
**/
@@ -444,6 +449,7 @@ err:
/**
* igbvf_setup_rx_resources - allocate Rx resources (Descriptors)
* @adapter: board private structure
+ * @rx_ring: ring being initialized
*
* Returns 0 on success, negative on failure
**/
@@ -540,7 +546,7 @@ void igbvf_free_tx_resources(struct igbvf_ring *tx_ring)
/**
* igbvf_clean_rx_ring - Free Rx Buffers per Queue
- * @adapter: board private structure
+ * @rx_ring: ring structure pointer to free buffers from
**/
static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
{
@@ -760,7 +766,7 @@ static void igbvf_set_itr(struct igbvf_adapter *adapter)
/**
* igbvf_clean_tx_irq - Reclaim resources after transmit completes
- * @adapter: board private structure
+ * @tx_ring: ring structure to clean descriptors from
*
* returns true if ring is completely cleaned
**/
@@ -1891,7 +1897,7 @@ static bool igbvf_has_link(struct igbvf_adapter *adapter)
/**
* igbvf_watchdog - Timer Call-back
- * @data: pointer to adapter cast into an unsigned long
+ * @t: timer list pointer containing private struct
**/
static void igbvf_watchdog(struct timer_list *t)
{
@@ -2372,8 +2378,9 @@ static netdev_tx_t igbvf_xmit_frame(struct sk_buff *skb,
/**
* igbvf_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: queue timing out (unused)
**/
-static void igbvf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+static void igbvf_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 2d566f3c827b..35baae900c1f 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -215,6 +215,8 @@ struct igc_adapter {
spinlock_t tmreg_lock;
struct cyclecounter cc;
struct timecounter tc;
+ struct timespec64 prev_ptp_time; /* Pre-reset PTP clock */
+ ktime_t ptp_reset_start; /* Reset time in clock mono */
};
void igc_up(struct igc_adapter *adapter);
@@ -548,6 +550,7 @@ void igc_ptp_rx_pktstamp(struct igc_q_vector *q_vector, void *va,
int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
void igc_ptp_tx_hang(struct igc_adapter *adapter);
+void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts);
#define igc_rx_pg_size(_ring) (PAGE_SIZE << igc_rx_pg_order(_ring))
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index cc5a6cf531c7..fd37d2c203af 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -215,6 +215,11 @@ static s32 igc_get_invariants_base(struct igc_hw *hw)
case IGC_DEV_ID_I225_K2:
case IGC_DEV_ID_I225_LMVP:
case IGC_DEV_ID_I225_IT:
+ case IGC_DEV_ID_I226_LM:
+ case IGC_DEV_ID_I226_V:
+ case IGC_DEV_ID_I226_IT:
+ case IGC_DEV_ID_I221_V:
+ case IGC_DEV_ID_I226_BLANK_NVM:
case IGC_DEV_ID_I225_BLANK_NVM:
mac->type = igc_i225;
break;
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index f1f464967f87..32f5fd684139 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -324,22 +324,10 @@
/* Advanced Receive Descriptor bit definitions */
#define IGC_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
-#define IGC_RXDEXT_STATERR_CE 0x01000000
-#define IGC_RXDEXT_STATERR_SE 0x02000000
-#define IGC_RXDEXT_STATERR_SEQ 0x04000000
-#define IGC_RXDEXT_STATERR_CXE 0x10000000
-#define IGC_RXDEXT_STATERR_TCPE 0x20000000
+#define IGC_RXDEXT_STATERR_L4E 0x20000000
#define IGC_RXDEXT_STATERR_IPE 0x40000000
#define IGC_RXDEXT_STATERR_RXE 0x80000000
-/* Same mask, but for extended and packet split descriptors */
-#define IGC_RXDEXT_ERR_FRAME_ERR_MASK ( \
- IGC_RXDEXT_STATERR_CE | \
- IGC_RXDEXT_STATERR_SE | \
- IGC_RXDEXT_STATERR_SEQ | \
- IGC_RXDEXT_STATERR_CXE | \
- IGC_RXDEXT_STATERR_RXE)
-
#define IGC_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
#define IGC_MRQC_RSS_FIELD_IPV4 0x00020000
#define IGC_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000
@@ -409,7 +397,7 @@
#define IGC_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
/* Time Sync Transmit Control bit definitions */
-#define IGC_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */
+#define IGC_TSYNCTXCTL_TXTT_0 0x00000001 /* Tx timestamp reg 0 valid */
#define IGC_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */
#define IGC_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK 0x0000F000 /* max delay */
#define IGC_TSYNCTXCTL_SYNC_COMP_ERR 0x20000000 /* sync err */
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 44410c2265d6..61d331ce38cd 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -321,6 +321,9 @@ static void igc_ethtool_get_regs(struct net_device *netdev,
for (i = 0; i < 8; i++)
regs_buff[205 + i] = rd32(IGC_ETQF(i));
+
+ regs_buff[213] = adapter->stats.tlpic;
+ regs_buff[214] = adapter->stats.rlpic;
}
static void igc_ethtool_get_wol(struct net_device *netdev,
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index b9fe51b91c47..55dae7c4703f 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -24,6 +24,11 @@
#define IGC_DEV_ID_I225_K2 0x3101
#define IGC_DEV_ID_I225_LMVP 0x5502
#define IGC_DEV_ID_I225_IT 0x0D9F
+#define IGC_DEV_ID_I226_LM 0x125B
+#define IGC_DEV_ID_I226_V 0x125C
+#define IGC_DEV_ID_I226_IT 0x125D
+#define IGC_DEV_ID_I221_V 0x125E
+#define IGC_DEV_ID_I226_BLANK_NVM 0x125F
#define IGC_DEV_ID_I225_BLANK_NVM 0x15FD
/* Function pointers for the MAC. */
@@ -125,9 +130,6 @@ struct igc_nvm_info {
struct igc_nvm_operations ops;
enum igc_nvm_type type;
- u32 flash_bank_size;
- u32 flash_base_addr;
-
u16 word_size;
u16 delay_usec;
u16 address_bits;
@@ -153,7 +155,6 @@ struct igc_phy_info {
u8 mdix;
bool is_mdix;
- bool reset_disable;
bool speed_downgraded;
bool autoneg_wait_to_complete;
};
@@ -239,6 +240,8 @@ struct igc_hw_stats {
u64 prc511;
u64 prc1023;
u64 prc1522;
+ u64 tlpic;
+ u64 rlpic;
u64 gprc;
u64 bprc;
u64 mprc;
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 9593aa4eea36..9112dff075cf 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -47,6 +47,11 @@ static const struct pci_device_id igc_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_IT), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I221_V), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_BLANK_NVM), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_BLANK_NVM), board_base },
/* required last entry */
{0, }
@@ -1428,7 +1433,7 @@ static void igc_rx_checksum(struct igc_ring *ring,
/* TCP/UDP checksum error bit is set */
if (igc_test_staterr(rx_desc,
- IGC_RXDEXT_STATERR_TCPE |
+ IGC_RXDEXT_STATERR_L4E |
IGC_RXDEXT_STATERR_IPE)) {
/* work around errata with sctp packets where the TCPE aka
* L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
@@ -1550,10 +1555,7 @@ static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(va);
-#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
-#endif
+ net_prefetch(va);
/* build an skb around the page buffer */
skb = build_skb(va - IGC_SKB_PAD, truesize);
@@ -1589,10 +1591,7 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(va);
-#if L1_CACHE_BYTES < 128
- prefetch(va + L1_CACHE_BYTES);
-#endif
+ net_prefetch(va);
/* allocate a skb to store the frags */
skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGC_RX_HDR_LEN);
@@ -1743,8 +1742,7 @@ static bool igc_cleanup_headers(struct igc_ring *rx_ring,
union igc_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
- if (unlikely((igc_test_staterr(rx_desc,
- IGC_RXDEXT_ERR_FRAME_ERR_MASK)))) {
+ if (unlikely(igc_test_staterr(rx_desc, IGC_RXDEXT_STATERR_RXE))) {
struct net_device *netdev = rx_ring->netdev;
if (!(netdev->features & NETIF_F_RXALL)) {
@@ -3685,6 +3683,8 @@ void igc_update_stats(struct igc_adapter *adapter)
adapter->stats.prc511 += rd32(IGC_PRC511);
adapter->stats.prc1023 += rd32(IGC_PRC1023);
adapter->stats.prc1522 += rd32(IGC_PRC1522);
+ adapter->stats.tlpic += rd32(IGC_TLPIC);
+ adapter->stats.rlpic += rd32(IGC_RLPIC);
mpc = rd32(IGC_MPC);
adapter->stats.mpc += mpc;
@@ -3778,6 +3778,8 @@ void igc_down(struct igc_adapter *adapter)
set_bit(__IGC_DOWN, &adapter->state);
+ igc_ptp_suspend(adapter);
+
/* disable receives in the hardware */
rctl = rd32(IGC_RCTL);
wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN);
@@ -3831,7 +3833,6 @@ void igc_down(struct igc_adapter *adapter)
void igc_reinit_locked(struct igc_adapter *adapter)
{
- WARN_ON(in_interrupt());
while (test_and_set_bit(__IGC_RESETTING, &adapter->state))
usleep_range(1000, 2000);
igc_down(adapter);
@@ -4659,7 +4660,7 @@ int igc_close(struct net_device *netdev)
/**
* igc_ioctl - Access the hwtstamp interface
* @netdev: network interface device structure
- * @ifreq: interface request data
+ * @ifr: interface request data
* @cmd: ioctl command
**/
static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
@@ -4700,14 +4701,35 @@ static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue,
return 0;
}
-static bool validate_schedule(const struct tc_taprio_qopt_offload *qopt)
+static bool is_base_time_past(ktime_t base_time, const struct timespec64 *now)
+{
+ struct timespec64 b;
+
+ b = ktime_to_timespec64(base_time);
+
+ return timespec64_compare(now, &b) > 0;
+}
+
+static bool validate_schedule(struct igc_adapter *adapter,
+ const struct tc_taprio_qopt_offload *qopt)
{
int queue_uses[IGC_MAX_TX_QUEUES] = { };
+ struct timespec64 now;
size_t n;
if (qopt->cycle_time_extension)
return false;
+ igc_ptp_read(adapter, &now);
+
+ /* If we program the controller's BASET registers with a time
+ * in the future, it will hold all the packets until that
+ * time, causing a lot of TX Hangs, so to avoid that, we
+ * reject schedules that would start in the future.
+ */
+ if (!is_base_time_past(qopt->base_time, &now))
+ return false;
+
for (n = 0; n < qopt->num_entries; n++) {
const struct tc_taprio_sched_entry *e;
int i;
@@ -4762,7 +4784,7 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
if (adapter->base_time)
return -EALREADY;
- if (!validate_schedule(qopt))
+ if (!validate_schedule(adapter, qopt))
return -EINVAL;
adapter->cycle_time = qopt->cycle_time;
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 6a9b5102aa55..ac0b9c85da7c 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -8,6 +8,7 @@
#include <linux/pci.h>
#include <linux/ptp_classify.h>
#include <linux/clocksource.h>
+#include <linux/ktime.h>
#define INCVALUE_MASK 0x7fffffff
#define ISGN 0x80000000
@@ -16,17 +17,12 @@
#define IGC_PTP_TX_TIMEOUT (HZ * 15)
/* SYSTIM read access for I225 */
-static void igc_ptp_read_i225(struct igc_adapter *adapter,
- struct timespec64 *ts)
+void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts)
{
struct igc_hw *hw = &adapter->hw;
u32 sec, nsec;
- /* The timestamp latches on lowest register read. For I210/I211, the
- * lowest register is SYSTIMR. Since we only need to provide nanosecond
- * resolution, we can ignore it.
- */
- rd32(IGC_SYSTIMR);
+ /* The timestamp is latched when SYSTIML is read. */
nsec = rd32(IGC_SYSTIML);
sec = rd32(IGC_SYSTIMH);
@@ -39,9 +35,6 @@ static void igc_ptp_write_i225(struct igc_adapter *adapter,
{
struct igc_hw *hw = &adapter->hw;
- /* Writing the SYSTIMR register is not necessary as it only
- * provides sub-nanosecond resolution.
- */
wr32(IGC_SYSTIML, ts->tv_nsec);
wr32(IGC_SYSTIMH, ts->tv_sec);
}
@@ -81,7 +74,7 @@ static int igc_ptp_adjtime_i225(struct ptp_clock_info *ptp, s64 delta)
spin_lock_irqsave(&igc->tmreg_lock, flags);
- igc_ptp_read_i225(igc, &now);
+ igc_ptp_read(igc, &now);
now = timespec64_add(now, then);
igc_ptp_write_i225(igc, (const struct timespec64 *)&now);
@@ -102,10 +95,9 @@ static int igc_ptp_gettimex64_i225(struct ptp_clock_info *ptp,
spin_lock_irqsave(&igc->tmreg_lock, flags);
ptp_read_system_prets(sts);
- rd32(IGC_SYSTIMR);
- ptp_read_system_postts(sts);
ts->tv_nsec = rd32(IGC_SYSTIML);
ts->tv_sec = rd32(IGC_SYSTIMH);
+ ptp_read_system_postts(sts);
spin_unlock_irqrestore(&igc->tmreg_lock, flags);
@@ -422,24 +414,17 @@ static void igc_ptp_tx_work(struct work_struct *work)
if (!test_bit(__IGC_PTP_TX_IN_PROGRESS, &adapter->state))
return;
- if (time_is_before_jiffies(adapter->ptp_tx_start +
- IGC_PTP_TX_TIMEOUT)) {
- igc_ptp_tx_timeout(adapter);
+ tsynctxctl = rd32(IGC_TSYNCTXCTL);
+ if (WARN_ON_ONCE(!(tsynctxctl & IGC_TSYNCTXCTL_TXTT_0)))
return;
- }
- tsynctxctl = rd32(IGC_TSYNCTXCTL);
- if (tsynctxctl & IGC_TSYNCTXCTL_VALID)
- igc_ptp_tx_hwtstamp(adapter);
- else
- /* reschedule to check later */
- schedule_work(&adapter->ptp_tx_work);
+ igc_ptp_tx_hwtstamp(adapter);
}
/**
* igc_ptp_set_ts_config - set hardware time stamping config
* @netdev: network interface device structure
- * @ifreq: interface request data
+ * @ifr: interface request data
*
**/
int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
@@ -466,7 +451,7 @@ int igc_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
/**
* igc_ptp_get_ts_config - get hardware time stamping config
* @netdev: network interface device structure
- * @ifreq: interface request data
+ * @ifr: interface request data
*
* Get the hwtstamp_config settings to return to the user. Rather than attempt
* to deconstruct the settings from the registers, just return a shadow copy
@@ -515,6 +500,9 @@ void igc_ptp_init(struct igc_adapter *adapter)
adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+ adapter->prev_ptp_time = ktime_to_timespec64(ktime_get_real());
+ adapter->ptp_reset_start = ktime_get();
+
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
&adapter->pdev->dev);
if (IS_ERR(adapter->ptp_clock)) {
@@ -526,6 +514,24 @@ void igc_ptp_init(struct igc_adapter *adapter)
}
}
+static void igc_ptp_time_save(struct igc_adapter *adapter)
+{
+ igc_ptp_read(adapter, &adapter->prev_ptp_time);
+ adapter->ptp_reset_start = ktime_get();
+}
+
+static void igc_ptp_time_restore(struct igc_adapter *adapter)
+{
+ struct timespec64 ts = adapter->prev_ptp_time;
+ ktime_t delta;
+
+ delta = ktime_sub(ktime_get(), adapter->ptp_reset_start);
+
+ timespec64_add_ns(&ts, ktime_to_ns(delta));
+
+ igc_ptp_write_i225(adapter, &ts);
+}
+
/**
* igc_ptp_suspend - Disable PTP work items and prepare for suspend
* @adapter: Board private structure
@@ -542,6 +548,8 @@ void igc_ptp_suspend(struct igc_adapter *adapter)
dev_kfree_skb_any(adapter->ptp_tx_skb);
adapter->ptp_tx_skb = NULL;
clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state);
+
+ igc_ptp_time_save(adapter);
}
/**
@@ -591,9 +599,7 @@ void igc_ptp_reset(struct igc_adapter *adapter)
/* Re-initialize the timer. */
if (hw->mac.type == igc_i225) {
- struct timespec64 ts64 = ktime_to_timespec64(ktime_get_real());
-
- igc_ptp_write_i225(adapter, &ts64);
+ igc_ptp_time_restore(adapter);
} else {
timecounter_init(&adapter->tc, &adapter->cc,
ktime_to_ns(ktime_get_real()));
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
index cbaa933ef30d..a430871d1c27 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
@@ -98,7 +98,6 @@ bool
ixgb_adapter_stop(struct ixgb_hw *hw)
{
u32 ctrl_reg;
- u32 icr_reg;
ENTER();
@@ -142,7 +141,7 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
IXGB_WRITE_REG(hw, IMC, 0xffffffff);
/* Clear any pending interrupt events. */
- icr_reg = IXGB_READ_REG(hw, ICR);
+ IXGB_READ_REG(hw, ICR);
return ctrl_reg & IXGB_CTRL0_RST;
}
@@ -274,7 +273,6 @@ bool
ixgb_init_hw(struct ixgb_hw *hw)
{
u32 i;
- u32 ctrl_reg;
bool status;
ENTER();
@@ -286,7 +284,7 @@ ixgb_init_hw(struct ixgb_hw *hw)
*/
pr_debug("Issuing a global reset to MAC\n");
- ctrl_reg = ixgb_mac_reset(hw);
+ ixgb_mac_reset(hw);
pr_debug("Issuing an EE reset to MAC\n");
#ifdef HP_ZX1
@@ -949,8 +947,6 @@ bool ixgb_check_for_bad_link(struct ixgb_hw *hw)
static void
ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
{
- volatile u32 temp_reg;
-
ENTER();
/* if we are stopped or resetting exit gracefully */
@@ -959,66 +955,66 @@ ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
return;
}
- temp_reg = IXGB_READ_REG(hw, TPRL);
- temp_reg = IXGB_READ_REG(hw, TPRH);
- temp_reg = IXGB_READ_REG(hw, GPRCL);
- temp_reg = IXGB_READ_REG(hw, GPRCH);
- temp_reg = IXGB_READ_REG(hw, BPRCL);
- temp_reg = IXGB_READ_REG(hw, BPRCH);
- temp_reg = IXGB_READ_REG(hw, MPRCL);
- temp_reg = IXGB_READ_REG(hw, MPRCH);
- temp_reg = IXGB_READ_REG(hw, UPRCL);
- temp_reg = IXGB_READ_REG(hw, UPRCH);
- temp_reg = IXGB_READ_REG(hw, VPRCL);
- temp_reg = IXGB_READ_REG(hw, VPRCH);
- temp_reg = IXGB_READ_REG(hw, JPRCL);
- temp_reg = IXGB_READ_REG(hw, JPRCH);
- temp_reg = IXGB_READ_REG(hw, GORCL);
- temp_reg = IXGB_READ_REG(hw, GORCH);
- temp_reg = IXGB_READ_REG(hw, TORL);
- temp_reg = IXGB_READ_REG(hw, TORH);
- temp_reg = IXGB_READ_REG(hw, RNBC);
- temp_reg = IXGB_READ_REG(hw, RUC);
- temp_reg = IXGB_READ_REG(hw, ROC);
- temp_reg = IXGB_READ_REG(hw, RLEC);
- temp_reg = IXGB_READ_REG(hw, CRCERRS);
- temp_reg = IXGB_READ_REG(hw, ICBC);
- temp_reg = IXGB_READ_REG(hw, ECBC);
- temp_reg = IXGB_READ_REG(hw, MPC);
- temp_reg = IXGB_READ_REG(hw, TPTL);
- temp_reg = IXGB_READ_REG(hw, TPTH);
- temp_reg = IXGB_READ_REG(hw, GPTCL);
- temp_reg = IXGB_READ_REG(hw, GPTCH);
- temp_reg = IXGB_READ_REG(hw, BPTCL);
- temp_reg = IXGB_READ_REG(hw, BPTCH);
- temp_reg = IXGB_READ_REG(hw, MPTCL);
- temp_reg = IXGB_READ_REG(hw, MPTCH);
- temp_reg = IXGB_READ_REG(hw, UPTCL);
- temp_reg = IXGB_READ_REG(hw, UPTCH);
- temp_reg = IXGB_READ_REG(hw, VPTCL);
- temp_reg = IXGB_READ_REG(hw, VPTCH);
- temp_reg = IXGB_READ_REG(hw, JPTCL);
- temp_reg = IXGB_READ_REG(hw, JPTCH);
- temp_reg = IXGB_READ_REG(hw, GOTCL);
- temp_reg = IXGB_READ_REG(hw, GOTCH);
- temp_reg = IXGB_READ_REG(hw, TOTL);
- temp_reg = IXGB_READ_REG(hw, TOTH);
- temp_reg = IXGB_READ_REG(hw, DC);
- temp_reg = IXGB_READ_REG(hw, PLT64C);
- temp_reg = IXGB_READ_REG(hw, TSCTC);
- temp_reg = IXGB_READ_REG(hw, TSCTFC);
- temp_reg = IXGB_READ_REG(hw, IBIC);
- temp_reg = IXGB_READ_REG(hw, RFC);
- temp_reg = IXGB_READ_REG(hw, LFC);
- temp_reg = IXGB_READ_REG(hw, PFRC);
- temp_reg = IXGB_READ_REG(hw, PFTC);
- temp_reg = IXGB_READ_REG(hw, MCFRC);
- temp_reg = IXGB_READ_REG(hw, MCFTC);
- temp_reg = IXGB_READ_REG(hw, XONRXC);
- temp_reg = IXGB_READ_REG(hw, XONTXC);
- temp_reg = IXGB_READ_REG(hw, XOFFRXC);
- temp_reg = IXGB_READ_REG(hw, XOFFTXC);
- temp_reg = IXGB_READ_REG(hw, RJC);
+ IXGB_READ_REG(hw, TPRL);
+ IXGB_READ_REG(hw, TPRH);
+ IXGB_READ_REG(hw, GPRCL);
+ IXGB_READ_REG(hw, GPRCH);
+ IXGB_READ_REG(hw, BPRCL);
+ IXGB_READ_REG(hw, BPRCH);
+ IXGB_READ_REG(hw, MPRCL);
+ IXGB_READ_REG(hw, MPRCH);
+ IXGB_READ_REG(hw, UPRCL);
+ IXGB_READ_REG(hw, UPRCH);
+ IXGB_READ_REG(hw, VPRCL);
+ IXGB_READ_REG(hw, VPRCH);
+ IXGB_READ_REG(hw, JPRCL);
+ IXGB_READ_REG(hw, JPRCH);
+ IXGB_READ_REG(hw, GORCL);
+ IXGB_READ_REG(hw, GORCH);
+ IXGB_READ_REG(hw, TORL);
+ IXGB_READ_REG(hw, TORH);
+ IXGB_READ_REG(hw, RNBC);
+ IXGB_READ_REG(hw, RUC);
+ IXGB_READ_REG(hw, ROC);
+ IXGB_READ_REG(hw, RLEC);
+ IXGB_READ_REG(hw, CRCERRS);
+ IXGB_READ_REG(hw, ICBC);
+ IXGB_READ_REG(hw, ECBC);
+ IXGB_READ_REG(hw, MPC);
+ IXGB_READ_REG(hw, TPTL);
+ IXGB_READ_REG(hw, TPTH);
+ IXGB_READ_REG(hw, GPTCL);
+ IXGB_READ_REG(hw, GPTCH);
+ IXGB_READ_REG(hw, BPTCL);
+ IXGB_READ_REG(hw, BPTCH);
+ IXGB_READ_REG(hw, MPTCL);
+ IXGB_READ_REG(hw, MPTCH);
+ IXGB_READ_REG(hw, UPTCL);
+ IXGB_READ_REG(hw, UPTCH);
+ IXGB_READ_REG(hw, VPTCL);
+ IXGB_READ_REG(hw, VPTCH);
+ IXGB_READ_REG(hw, JPTCL);
+ IXGB_READ_REG(hw, JPTCH);
+ IXGB_READ_REG(hw, GOTCL);
+ IXGB_READ_REG(hw, GOTCH);
+ IXGB_READ_REG(hw, TOTL);
+ IXGB_READ_REG(hw, TOTH);
+ IXGB_READ_REG(hw, DC);
+ IXGB_READ_REG(hw, PLT64C);
+ IXGB_READ_REG(hw, TSCTC);
+ IXGB_READ_REG(hw, TSCTFC);
+ IXGB_READ_REG(hw, IBIC);
+ IXGB_READ_REG(hw, RFC);
+ IXGB_READ_REG(hw, LFC);
+ IXGB_READ_REG(hw, PFRC);
+ IXGB_READ_REG(hw, PFTC);
+ IXGB_READ_REG(hw, MCFRC);
+ IXGB_READ_REG(hw, MCFTC);
+ IXGB_READ_REG(hw, XONRXC);
+ IXGB_READ_REG(hw, XONTXC);
+ IXGB_READ_REG(hw, XOFFRXC);
+ IXGB_READ_REG(hw, XOFFTXC);
+ IXGB_READ_REG(hw, RJC);
}
/******************************************************************************
@@ -1161,18 +1157,13 @@ static void
ixgb_optics_reset(struct ixgb_hw *hw)
{
if (hw->phy_type == ixgb_phy_type_txn17401) {
- u16 mdio_reg;
-
ixgb_write_phy_reg(hw,
MDIO_CTRL1,
IXGB_PHY_ADDRESS,
MDIO_MMD_PMAPMD,
MDIO_CTRL1_RESET);
- mdio_reg = ixgb_read_phy_reg(hw,
- MDIO_CTRL1,
- IXGB_PHY_ADDRESS,
- MDIO_MMD_PMAPMD);
+ ixgb_read_phy_reg(hw, MDIO_CTRL1, IXGB_PHY_ADDRESS, MDIO_MMD_PMAPMD);
}
}
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 048351cf0e4a..1588376d4c67 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -1109,7 +1109,7 @@ alloc_failed:
/**
* ixgb_watchdog - Timer Call-back
- * @data: pointer to netdev cast into an unsigned long
+ * @t: pointer to timer_list containing our private info pointer
**/
static void
@@ -1531,10 +1531,11 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/**
* ixgb_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: queue hanging (unused)
**/
static void
-ixgb_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+ixgb_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
@@ -1746,7 +1747,8 @@ ixgb_intr(int irq, void *data)
/**
* ixgb_clean - NAPI Rx polling callback
- * @adapter: board private structure
+ * @napi: napi struct pointer
+ * @budget: max number of receives to clean
**/
static int
@@ -1865,7 +1867,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
* ixgb_rx_checksum - Receive Checksum Offload for 82597.
* @adapter: board private structure
* @rx_desc: receive descriptor
- * @sk_buff: socket buffer with received data
+ * @skb: socket buffer with received data
**/
static void
@@ -1923,6 +1925,8 @@ static void ixgb_check_copybreak(struct napi_struct *napi,
/**
* ixgb_clean_rx_irq - Send received data up the network stack,
* @adapter: board private structure
+ * @work_done: output pointer to amount of packets cleaned
+ * @work_to_do: how much work we can complete
**/
static bool
@@ -2042,6 +2046,7 @@ rxdesc_done:
/**
* ixgb_alloc_rx_buffers - Replace used receive buffers
* @adapter: address of board private structure
+ * @cleaned_count: how many buffers to allocate
**/
static void
@@ -2211,7 +2216,7 @@ static pci_ers_result_t ixgb_io_error_detected(struct pci_dev *pdev,
/**
* ixgb_io_slot_reset - called after the pci bus has been reset.
- * @pdev pointer to pci device with error
+ * @pdev: pointer to pci device with error
*
* This callback is called after the PCI bus has been reset.
* Basically, this tries to restart the card from scratch.
@@ -2259,7 +2264,7 @@ static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
/**
* ixgb_io_resume - called when its OK to resume normal operations
- * @pdev pointer to pci device with error
+ * @pdev: pointer to pci device with error
*
* The error recovery driver tells us that its OK to resume
* normal operation. Implementation resembles the second-half
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 1e8a809233a0..de0fc6ecf491 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -350,7 +350,7 @@ struct ixgbe_ring {
struct ixgbe_rx_queue_stats rx_stats;
};
struct xdp_rxq_info xdp_rxq;
- struct xdp_umem *xsk_umem;
+ struct xsk_buff_pool *xsk_pool;
u16 ring_idx; /* {rx,tx,xdp}_ring back reference idx */
u16 rx_buf_len;
} ____cacheline_internodealigned_in_smp;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 71ec908266a6..a280aa34ca1d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -531,6 +531,16 @@ static int ixgbe_set_link_ksettings(struct net_device *netdev,
return err;
}
+static void ixgbe_get_pause_stats(struct net_device *netdev,
+ struct ethtool_pause_stats *stats)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw_stats *hwstats = &adapter->stats;
+
+ stats->tx_pause_frames = hwstats->lxontxc + hwstats->lxofftxc;
+ stats->rx_pause_frames = hwstats->lxonrxc + hwstats->lxoffrxc;
+}
+
static void ixgbe_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
@@ -3546,6 +3556,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.set_eeprom = ixgbe_set_eeprom,
.get_ringparam = ixgbe_get_ringparam,
.set_ringparam = ixgbe_set_ringparam,
+ .get_pause_stats = ixgbe_get_pause_stats,
.get_pauseparam = ixgbe_get_pauseparam,
.set_pauseparam = ixgbe_set_pauseparam,
.get_msglevel = ixgbe_get_msglevel,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 2e35c5706cf1..df389a11d3af 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -1029,10 +1029,10 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
WRITE_ONCE(adapter->rx_ring[ring->queue_index], NULL);
adapter->q_vector[v_idx] = NULL;
- napi_hash_del(&q_vector->napi);
- netif_napi_del(&q_vector->napi);
+ __netif_napi_del(&q_vector->napi);
/*
+ * after a call to __netif_napi_del() napi may still be used and
* ixgbe_get_stats64() might access the rings on this vector,
* we must wait a grace period before freeing it.
*/
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 86ca8b9ea1b8..45ae33e15303 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2095,10 +2095,8 @@ static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(xdp->data);
-#if L1_CACHE_BYTES < 128
- prefetch(xdp->data + L1_CACHE_BYTES);
-#endif
+ net_prefetch(xdp->data);
+
/* Note, we get here by enabling legacy-rx via:
*
* ethtool --set-priv-flags <dev> legacy-rx on
@@ -2161,10 +2159,7 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
* likely have a consumer accessing first few bytes of meta
* data, and then actual data.
*/
- prefetch(xdp->data_meta);
-#if L1_CACHE_BYTES < 128
- prefetch(xdp->data_meta + L1_CACHE_BYTES);
-#endif
+ net_prefetch(xdp->data_meta);
/* build an skb to around the page buffer */
skb = build_skb(xdp->data_hard_start, truesize);
@@ -3156,7 +3151,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
#endif
ixgbe_for_each_ring(ring, q_vector->tx) {
- bool wd = ring->xsk_umem ?
+ bool wd = ring->xsk_pool ?
ixgbe_clean_xdp_tx_irq(q_vector, ring, budget) :
ixgbe_clean_tx_irq(q_vector, ring, budget);
@@ -3176,7 +3171,7 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
per_ring_budget = budget;
ixgbe_for_each_ring(ring, q_vector->rx) {
- int cleaned = ring->xsk_umem ?
+ int cleaned = ring->xsk_pool ?
ixgbe_clean_rx_irq_zc(q_vector, ring,
per_ring_budget) :
ixgbe_clean_rx_irq(q_vector, ring,
@@ -3471,9 +3466,9 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
u32 txdctl = IXGBE_TXDCTL_ENABLE;
u8 reg_idx = ring->reg_idx;
- ring->xsk_umem = NULL;
+ ring->xsk_pool = NULL;
if (ring_is_xdp(ring))
- ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
+ ring->xsk_pool = ixgbe_xsk_pool(adapter, ring);
/* disable queue to avoid issues while updating state */
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0);
@@ -3713,8 +3708,8 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
/* configure the packet buffer length */
- if (rx_ring->xsk_umem) {
- u32 xsk_buf_len = xsk_umem_get_rx_frame_size(rx_ring->xsk_umem);
+ if (rx_ring->xsk_pool) {
+ u32 xsk_buf_len = xsk_pool_get_rx_frame_size(rx_ring->xsk_pool);
/* If the MAC support setting RXDCTL.RLPML, the
* SRRCTL[n].BSIZEPKT is set to PAGE_SIZE and
@@ -4059,12 +4054,12 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
u8 reg_idx = ring->reg_idx;
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
- ring->xsk_umem = ixgbe_xsk_umem(adapter, ring);
- if (ring->xsk_umem) {
+ ring->xsk_pool = ixgbe_xsk_pool(adapter, ring);
+ if (ring->xsk_pool) {
WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL,
NULL));
- xsk_buff_set_rxq_info(ring->xsk_umem, &ring->xdp_rxq);
+ xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq);
} else {
WARN_ON(xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_PAGE_SHARED, NULL));
@@ -4119,8 +4114,8 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
#endif
}
- if (ring->xsk_umem && hw->mac.type != ixgbe_mac_82599EB) {
- u32 xsk_buf_len = xsk_umem_get_rx_frame_size(ring->xsk_umem);
+ if (ring->xsk_pool && hw->mac.type != ixgbe_mac_82599EB) {
+ u32 xsk_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
IXGBE_RXDCTL_RLPML_EN);
@@ -4142,7 +4137,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
ixgbe_rx_desc_queue_enable(adapter, ring);
- if (ring->xsk_umem)
+ if (ring->xsk_pool)
ixgbe_alloc_rx_buffers_zc(ring, ixgbe_desc_unused(ring));
else
ixgbe_alloc_rx_buffers(ring, ixgbe_desc_unused(ring));
@@ -5292,7 +5287,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
u16 i = rx_ring->next_to_clean;
struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i];
- if (rx_ring->xsk_umem) {
+ if (rx_ring->xsk_pool) {
ixgbe_xsk_clean_rx_ring(rx_ring);
goto skip_free;
}
@@ -5682,7 +5677,6 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter)
void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
{
- WARN_ON(in_interrupt());
/* put off any impending NetWatchDogTimeout */
netif_trans_update(adapter->netdev);
@@ -5989,7 +5983,7 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
u16 i = tx_ring->next_to_clean;
struct ixgbe_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
- if (tx_ring->xsk_umem) {
+ if (tx_ring->xsk_pool) {
ixgbe_xsk_clean_tx_ring(tx_ring);
goto out;
}
@@ -6185,8 +6179,9 @@ static void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter)
/**
* ixgbe_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: queue number that timed out
**/
-static void ixgbe_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+static void ixgbe_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -10161,7 +10156,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
*/
if (need_reset && prog)
for (i = 0; i < adapter->num_rx_queues; i++)
- if (adapter->xdp_ring[i]->xsk_umem)
+ if (adapter->xdp_ring[i]->xsk_pool)
(void)ixgbe_xsk_wakeup(adapter->netdev, i,
XDP_WAKEUP_RX);
@@ -10175,8 +10170,8 @@ static int ixgbe_xdp(struct net_device *dev, struct netdev_bpf *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
return ixgbe_xdp_setup(dev, xdp->prog);
- case XDP_SETUP_XSK_UMEM:
- return ixgbe_xsk_umem_setup(adapter, xdp->xsk.umem,
+ case XDP_SETUP_XSK_POOL:
+ return ixgbe_xsk_pool_setup(adapter, xdp->xsk.pool,
xdp->xsk.queue_id);
default:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 7980d7265e10..f77fa3e4fdd1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -771,7 +771,7 @@ mii_bus_write_done:
/**
* ixgbe_mii_bus_read - Read a clause 22/45 register
- * @hw: pointer to hardware structure
+ * @bus: pointer to mii_bus structure which points to our driver private
* @addr: address
* @regnum: register number
**/
@@ -786,7 +786,7 @@ static s32 ixgbe_mii_bus_read(struct mii_bus *bus, int addr, int regnum)
/**
* ixgbe_mii_bus_write - Write a clause 22/45 register
- * @hw: pointer to hardware structure
+ * @bus: pointer to mii_bus structure which points to our driver private
* @addr: address
* @regnum: register number
* @val: value to write
@@ -803,7 +803,7 @@ static s32 ixgbe_mii_bus_write(struct mii_bus *bus, int addr, int regnum,
/**
* ixgbe_x550em_a_mii_bus_read - Read a clause 22/45 register on x550em_a
- * @hw: pointer to hardware structure
+ * @bus: pointer to mii_bus structure which points to our driver private
* @addr: address
* @regnum: register number
**/
@@ -820,7 +820,7 @@ static s32 ixgbe_x550em_a_mii_bus_read(struct mii_bus *bus, int addr,
/**
* ixgbe_x550em_a_mii_bus_write - Write a clause 22/45 register on x550em_a
- * @hw: pointer to hardware structure
+ * @bus: pointer to mii_bus structure which points to our driver private
* @addr: address
* @regnum: register number
* @val: value to write
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
index 7887ae4aaf4f..2aeec78029bc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
@@ -28,9 +28,10 @@ void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, u64 qmask);
void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring);
void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring);
-struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *ring);
-int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem,
+struct xsk_buff_pool *ixgbe_xsk_pool(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring);
+int ixgbe_xsk_pool_setup(struct ixgbe_adapter *adapter,
+ struct xsk_buff_pool *pool,
u16 qid);
void ixgbe_zca_free(struct zero_copy_allocator *alloc, unsigned long handle);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index ec7121f352e2..3771857cf887 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -8,8 +8,8 @@
#include "ixgbe.h"
#include "ixgbe_txrx_common.h"
-struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *ring)
+struct xsk_buff_pool *ixgbe_xsk_pool(struct ixgbe_adapter *adapter,
+ struct ixgbe_ring *ring)
{
bool xdp_on = READ_ONCE(adapter->xdp_prog);
int qid = ring->ring_idx;
@@ -17,11 +17,11 @@ struct xdp_umem *ixgbe_xsk_umem(struct ixgbe_adapter *adapter,
if (!xdp_on || !test_bit(qid, adapter->af_xdp_zc_qps))
return NULL;
- return xdp_get_umem_from_qid(adapter->netdev, qid);
+ return xsk_get_pool_from_qid(adapter->netdev, qid);
}
-static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
- struct xdp_umem *umem,
+static int ixgbe_xsk_pool_enable(struct ixgbe_adapter *adapter,
+ struct xsk_buff_pool *pool,
u16 qid)
{
struct net_device *netdev = adapter->netdev;
@@ -35,7 +35,7 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
qid >= netdev->real_num_tx_queues)
return -EINVAL;
- err = xsk_buff_dma_map(umem, &adapter->pdev->dev, IXGBE_RX_DMA_ATTR);
+ err = xsk_pool_dma_map(pool, &adapter->pdev->dev, IXGBE_RX_DMA_ATTR);
if (err)
return err;
@@ -59,13 +59,13 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter,
return 0;
}
-static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
+static int ixgbe_xsk_pool_disable(struct ixgbe_adapter *adapter, u16 qid)
{
- struct xdp_umem *umem;
+ struct xsk_buff_pool *pool;
bool if_running;
- umem = xdp_get_umem_from_qid(adapter->netdev, qid);
- if (!umem)
+ pool = xsk_get_pool_from_qid(adapter->netdev, qid);
+ if (!pool)
return -EINVAL;
if_running = netif_running(adapter->netdev) &&
@@ -75,7 +75,7 @@ static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
ixgbe_txrx_ring_disable(adapter, qid);
clear_bit(qid, adapter->af_xdp_zc_qps);
- xsk_buff_dma_unmap(umem, IXGBE_RX_DMA_ATTR);
+ xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR);
if (if_running)
ixgbe_txrx_ring_enable(adapter, qid);
@@ -83,11 +83,12 @@ static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid)
return 0;
}
-int ixgbe_xsk_umem_setup(struct ixgbe_adapter *adapter, struct xdp_umem *umem,
+int ixgbe_xsk_pool_setup(struct ixgbe_adapter *adapter,
+ struct xsk_buff_pool *pool,
u16 qid)
{
- return umem ? ixgbe_xsk_umem_enable(adapter, umem, qid) :
- ixgbe_xsk_umem_disable(adapter, qid);
+ return pool ? ixgbe_xsk_pool_enable(adapter, pool, qid) :
+ ixgbe_xsk_pool_disable(adapter, qid);
}
static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
@@ -149,7 +150,7 @@ bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
i -= rx_ring->count;
do {
- bi->xdp = xsk_buff_alloc(rx_ring->xsk_umem);
+ bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
if (!bi->xdp) {
ok = false;
break;
@@ -286,7 +287,7 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
}
bi->xdp->data_end = bi->xdp->data + size;
- xsk_buff_dma_sync_for_cpu(bi->xdp);
+ xsk_buff_dma_sync_for_cpu(bi->xdp, rx_ring->xsk_pool);
xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp);
if (xdp_res) {
@@ -344,11 +345,11 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
q_vector->rx.total_packets += total_rx_packets;
q_vector->rx.total_bytes += total_rx_bytes;
- if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) {
+ if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
- xsk_set_rx_need_wakeup(rx_ring->xsk_umem);
+ xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
else
- xsk_clear_rx_need_wakeup(rx_ring->xsk_umem);
+ xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
return (int)total_rx_packets;
}
@@ -373,6 +374,7 @@ void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring)
static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
{
+ struct xsk_buff_pool *pool = xdp_ring->xsk_pool;
union ixgbe_adv_tx_desc *tx_desc = NULL;
struct ixgbe_tx_buffer *tx_bi;
bool work_done = true;
@@ -387,12 +389,11 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
break;
}
- if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
+ if (!xsk_tx_peek_desc(pool, &desc))
break;
- dma = xsk_buff_raw_get_dma(xdp_ring->xsk_umem, desc.addr);
- xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_umem, dma,
- desc.len);
+ dma = xsk_buff_raw_get_dma(pool, desc.addr);
+ xsk_buff_raw_dma_sync_for_device(pool, dma, desc.len);
tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use];
tx_bi->bytecount = desc.len;
@@ -418,7 +419,7 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
if (tx_desc) {
ixgbe_xdp_ring_update_tail(xdp_ring);
- xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
+ xsk_tx_release(pool);
}
return !!budget && work_done;
@@ -439,7 +440,7 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
{
u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
unsigned int total_packets = 0, total_bytes = 0;
- struct xdp_umem *umem = tx_ring->xsk_umem;
+ struct xsk_buff_pool *pool = tx_ring->xsk_pool;
union ixgbe_adv_tx_desc *tx_desc;
struct ixgbe_tx_buffer *tx_bi;
u32 xsk_frames = 0;
@@ -484,10 +485,10 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
q_vector->tx.total_packets += total_packets;
if (xsk_frames)
- xsk_umem_complete_tx(umem, xsk_frames);
+ xsk_tx_completed(pool, xsk_frames);
- if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem))
- xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
+ if (xsk_uses_need_wakeup(pool))
+ xsk_set_tx_need_wakeup(pool);
return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
}
@@ -511,7 +512,7 @@ int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
if (test_bit(__IXGBE_TX_DISABLED, &ring->state))
return -ENETDOWN;
- if (!ring->xsk_umem)
+ if (!ring->xsk_pool)
return -ENXIO;
if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
@@ -526,7 +527,7 @@ int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring)
{
u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
- struct xdp_umem *umem = tx_ring->xsk_umem;
+ struct xsk_buff_pool *pool = tx_ring->xsk_pool;
struct ixgbe_tx_buffer *tx_bi;
u32 xsk_frames = 0;
@@ -546,5 +547,5 @@ void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring)
}
if (xsk_frames)
- xsk_umem_complete_tx(umem, xsk_frames);
+ xsk_tx_completed(pool, xsk_frames);
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index a428113e6d54..82fce27f682b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -246,8 +246,9 @@ static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
/**
* ixgbevf_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
+ * @txqueue: transmit queue hanging (unused)
**/
-static void ixgbevf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
+static void ixgbevf_tx_timeout(struct net_device *netdev, unsigned int __always_unused txqueue)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
@@ -866,10 +867,8 @@ struct sk_buff *ixgbevf_construct_skb(struct ixgbevf_ring *rx_ring,
struct sk_buff *skb;
/* prefetch first cache line of first page */
- prefetch(xdp->data);
-#if L1_CACHE_BYTES < 128
- prefetch(xdp->data + L1_CACHE_BYTES);
-#endif
+ net_prefetch(xdp->data);
+
/* Note, we get here by enabling legacy-rx via:
*
* ethtool --set-priv-flags <dev> legacy-rx on
@@ -947,10 +946,7 @@ static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring,
* have a consumer accessing first few bytes of meta data,
* and then actual data.
*/
- prefetch(xdp->data_meta);
-#if L1_CACHE_BYTES < 128
- prefetch(xdp->data_meta + L1_CACHE_BYTES);
-#endif
+ net_prefetch(xdp->data_meta);
/* build an skb around the page buffer */
skb = build_skb(xdp->data_hard_start, truesize);
@@ -2526,8 +2522,6 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
{
- WARN_ON(in_interrupt());
-
while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
msleep(1);
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index ddc757680089..e9efe074edc1 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1187,9 +1187,9 @@ jme_shutdown_nic(struct jme_adapter *jme)
}
static void
-jme_pcc_tasklet(unsigned long arg)
+jme_pcc_tasklet(struct tasklet_struct *t)
{
- struct jme_adapter *jme = (struct jme_adapter *)arg;
+ struct jme_adapter *jme = from_tasklet(jme, t, pcc_task);
struct net_device *netdev = jme->dev;
if (unlikely(test_bit(JME_FLAG_SHUTDOWN, &jme->flags))) {
@@ -1265,10 +1265,9 @@ jme_stop_shutdown_timer(struct jme_adapter *jme)
jwrite32f(jme, JME_APMC, apmc);
}
-static void
-jme_link_change_tasklet(unsigned long arg)
+static void jme_link_change_tasklet(struct tasklet_struct *t)
{
- struct jme_adapter *jme = (struct jme_adapter *)arg;
+ struct jme_adapter *jme = from_tasklet(jme, t, linkch_task);
struct net_device *netdev = jme->dev;
int rc;
@@ -1345,9 +1344,9 @@ out:
}
static void
-jme_rx_clean_tasklet(unsigned long arg)
+jme_rx_clean_tasklet(struct tasklet_struct *t)
{
- struct jme_adapter *jme = (struct jme_adapter *)arg;
+ struct jme_adapter *jme = from_tasklet(jme, t, rxclean_task);
struct dynpcc_info *dpi = &(jme->dpi);
jme_process_receive(jme, jme->rx_ring_size);
@@ -1380,9 +1379,9 @@ jme_poll(JME_NAPI_HOLDER(holder), JME_NAPI_WEIGHT(budget))
}
static void
-jme_rx_empty_tasklet(unsigned long arg)
+jme_rx_empty_tasklet(struct tasklet_struct *t)
{
- struct jme_adapter *jme = (struct jme_adapter *)arg;
+ struct jme_adapter *jme = from_tasklet(jme, t, rxempty_task);
if (unlikely(atomic_read(&jme->link_changing) != 1))
return;
@@ -1392,7 +1391,7 @@ jme_rx_empty_tasklet(unsigned long arg)
netif_info(jme, rx_status, jme->dev, "RX Queue Full!\n");
- jme_rx_clean_tasklet(arg);
+ jme_rx_clean_tasklet(&jme->rxclean_task);
while (atomic_read(&jme->rx_empty) > 0) {
atomic_dec(&jme->rx_empty);
@@ -1416,10 +1415,9 @@ jme_wake_queue_if_stopped(struct jme_adapter *jme)
}
-static void
-jme_tx_clean_tasklet(unsigned long arg)
+static void jme_tx_clean_tasklet(struct tasklet_struct *t)
{
- struct jme_adapter *jme = (struct jme_adapter *)arg;
+ struct jme_adapter *jme = from_tasklet(jme, t, txclean_task);
struct jme_ring *txring = &(jme->txring[0]);
struct txdesc *txdesc = txring->desc;
struct jme_buffer_info *txbi = txring->bufinf, *ctxbi, *ttxbi;
@@ -1834,14 +1832,10 @@ jme_open(struct net_device *netdev)
jme_clear_pm_disable_wol(jme);
JME_NAPI_ENABLE(jme);
- tasklet_init(&jme->linkch_task, jme_link_change_tasklet,
- (unsigned long) jme);
- tasklet_init(&jme->txclean_task, jme_tx_clean_tasklet,
- (unsigned long) jme);
- tasklet_init(&jme->rxclean_task, jme_rx_clean_tasklet,
- (unsigned long) jme);
- tasklet_init(&jme->rxempty_task, jme_rx_empty_tasklet,
- (unsigned long) jme);
+ tasklet_setup(&jme->linkch_task, jme_link_change_tasklet);
+ tasklet_setup(&jme->txclean_task, jme_tx_clean_tasklet);
+ tasklet_setup(&jme->rxclean_task, jme_rx_clean_tasklet);
+ tasklet_setup(&jme->rxempty_task, jme_rx_empty_tasklet);
rc = jme_request_irq(jme);
if (rc)
@@ -3040,9 +3034,7 @@ jme_init_one(struct pci_dev *pdev,
atomic_set(&jme->tx_cleaning, 1);
atomic_set(&jme->rx_empty, 1);
- tasklet_init(&jme->pcc_task,
- jme_pcc_tasklet,
- (unsigned long) jme);
+ tasklet_setup(&jme->pcc_task, jme_pcc_tasklet);
jme->dpi.cur = PCC_P1;
jme->reg_ghc = 0;
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 03e034918d14..af441d699a57 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -1113,7 +1113,7 @@ out:
return rc;
probe_err_register:
- kfree(lp->td_ring);
+ kfree(KSEG0ADDR(lp->td_ring));
probe_err_td_ring:
iounmap(lp->tx_dma_regs);
probe_err_dma_tx:
@@ -1133,6 +1133,7 @@ static int korina_remove(struct platform_device *pdev)
iounmap(lp->eth_regs);
iounmap(lp->rx_dma_regs);
iounmap(lp->tx_dma_regs);
+ kfree(KSEG0ADDR(lp->td_ring));
unregister_netdev(bif->dev);
free_netdev(bif->dev);
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index ef4f35ba077d..41815b609569 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -92,6 +92,12 @@ config MVPP2
This driver supports the network interface units in the
Marvell ARMADA 375, 7K and 8K SoCs.
+config MVPP2_PTP
+ bool "Marvell Armada 8K Enable PTP support"
+ depends on NETWORK_PHY_TIMESTAMPING
+ depends on (PTP_1588_CLOCK = y && MVPP2 = y) || \
+ (PTP_1588_CLOCK && MVPP2 = m)
+
config PXA168_ETH
tristate "Marvell pxa168 ethernet support"
depends on HAS_IOMEM
@@ -172,5 +178,6 @@ config SKY2_DEBUG
source "drivers/net/ethernet/marvell/octeontx2/Kconfig"
+source "drivers/net/ethernet/marvell/prestera/Kconfig"
endif # NET_VENDOR_MARVELL
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
index 89dea7284d5b..9f88fe822555 100644
--- a/drivers/net/ethernet/marvell/Makefile
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -12,3 +12,4 @@ obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
obj-$(CONFIG_SKGE) += skge.o
obj-$(CONFIG_SKY2) += sky2.o
obj-y += octeontx2/
+obj-y += prestera/
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 5bf0409f5d42..54b0bf574c05 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -330,7 +330,6 @@
#define MVNETA_SKB_HEADROOM ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8)
#define MVNETA_SKB_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info) + \
MVNETA_SKB_HEADROOM))
-#define MVNETA_SKB_SIZE(len) (SKB_DATA_ALIGN(len) + MVNETA_SKB_PAD)
#define MVNETA_MAX_RX_BUF_SIZE (PAGE_SIZE - MVNETA_SKB_PAD)
#define IS_TSO_HEADER(txq, addr) \
@@ -752,13 +751,12 @@ static void mvneta_txq_inc_put(struct mvneta_tx_queue *txq)
static void mvneta_mib_counters_clear(struct mvneta_port *pp)
{
int i;
- u32 dummy;
/* Perform dummy reads from MIB counters */
for (i = 0; i < MVNETA_MIB_LATE_COLLISION; i += 4)
- dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
- dummy = mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
- dummy = mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
+ mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i));
+ mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT);
+ mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT);
}
/* Get System Network Statistics */
@@ -1833,7 +1831,7 @@ static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
/* Free tx queue skbuffs */
static void mvneta_txq_bufs_free(struct mvneta_port *pp,
struct mvneta_tx_queue *txq, int num,
- struct netdev_queue *nq)
+ struct netdev_queue *nq, bool napi)
{
unsigned int bytes_compl = 0, pkts_compl = 0;
int i;
@@ -1856,7 +1854,10 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
dev_kfree_skb_any(buf->skb);
} else if (buf->type == MVNETA_TYPE_XDP_TX ||
buf->type == MVNETA_TYPE_XDP_NDO) {
- xdp_return_frame(buf->xdpf);
+ if (napi && buf->type == MVNETA_TYPE_XDP_TX)
+ xdp_return_frame_rx_napi(buf->xdpf);
+ else
+ xdp_return_frame(buf->xdpf);
}
}
@@ -1874,7 +1875,7 @@ static void mvneta_txq_done(struct mvneta_port *pp,
if (!tx_done)
return;
- mvneta_txq_bufs_free(pp, txq, tx_done, nq);
+ mvneta_txq_bufs_free(pp, txq, tx_done, nq, true);
txq->count -= tx_done;
@@ -2227,8 +2228,7 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
struct mvneta_rx_desc *rx_desc,
struct mvneta_rx_queue *rxq,
struct xdp_buff *xdp, int *size,
- struct page *page,
- struct mvneta_stats *stats)
+ struct page *page)
{
unsigned char *data = page_address(page);
int data_len = -MVNETA_MH_SIZE, len;
@@ -2236,19 +2236,22 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
enum dma_data_direction dma_dir;
struct skb_shared_info *sinfo;
- if (MVNETA_SKB_SIZE(rx_desc->data_size) > PAGE_SIZE) {
+ if (*size > MVNETA_MAX_RX_BUF_SIZE) {
len = MVNETA_MAX_RX_BUF_SIZE;
data_len += len;
} else {
- len = rx_desc->data_size;
+ len = *size;
data_len += len - ETH_FCS_LEN;
}
+ *size = *size - len;
dma_dir = page_pool_get_dma_dir(rxq->page_pool);
dma_sync_single_for_cpu(dev->dev.parent,
rx_desc->buf_phys_addr,
len, dma_dir);
+ rx_desc->buf_phys_addr = 0;
+
/* Prefetch header */
prefetch(data);
@@ -2259,9 +2262,6 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
sinfo = xdp_get_shared_info_from_buff(xdp);
sinfo->nr_frags = 0;
-
- *size = rx_desc->data_size - len;
- rx_desc->buf_phys_addr = 0;
}
static void
@@ -2307,11 +2307,8 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
{
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
int i, num_frags = sinfo->nr_frags;
- skb_frag_t frags[MAX_SKB_FRAGS];
struct sk_buff *skb;
- memcpy(frags, sinfo->frags, sizeof(skb_frag_t) * num_frags);
-
skb = build_skb(xdp->data_hard_start, PAGE_SIZE);
if (!skb)
return ERR_PTR(-ENOMEM);
@@ -2323,12 +2320,12 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
mvneta_rx_csum(pp, desc_status, skb);
for (i = 0; i < num_frags; i++) {
- struct page *page = skb_frag_page(&frags[i]);
+ skb_frag_t *frag = &sinfo->frags[i];
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- page, skb_frag_off(&frags[i]),
- skb_frag_size(&frags[i]), PAGE_SIZE);
- page_pool_release_page(rxq->page_pool, page);
+ skb_frag_page(frag), skb_frag_off(frag),
+ skb_frag_size(frag), PAGE_SIZE);
+ page_pool_release_page(rxq->page_pool, skb_frag_page(frag));
}
return skb;
@@ -2378,10 +2375,10 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
size = rx_desc->data_size;
frame_sz = size - ETH_FCS_LEN;
- desc_status = rx_desc->status;
+ desc_status = rx_status;
mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
- &size, page, &ps);
+ &size, page);
} else {
if (unlikely(!xdp_buf.data_hard_start)) {
rx_desc->buf_phys_addr = 0;
@@ -2865,7 +2862,7 @@ static void mvneta_txq_done_force(struct mvneta_port *pp,
struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
int tx_done = txq->count;
- mvneta_txq_bufs_free(pp, txq, tx_done, nq);
+ mvneta_txq_bufs_free(pp, txq, tx_done, nq, false);
/* reset txq */
txq->count = 0;
diff --git a/drivers/net/ethernet/marvell/mvpp2/Makefile b/drivers/net/ethernet/marvell/mvpp2/Makefile
index 51f65a202c6e..9bd8e7964b40 100644
--- a/drivers/net/ethernet/marvell/mvpp2/Makefile
+++ b/drivers/net/ethernet/marvell/mvpp2/Makefile
@@ -4,4 +4,5 @@
#
obj-$(CONFIG_MVPP2) := mvpp2.o
-mvpp2-objs := mvpp2_main.o mvpp2_prs.o mvpp2_cls.o mvpp2_debugfs.o
+mvpp2-y := mvpp2_main.o mvpp2_prs.o mvpp2_cls.o mvpp2_debugfs.o
+mvpp2-$(CONFIG_MVPP2_PTP) += mvpp2_tai.o
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index 32753cc771bf..834775843067 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -12,6 +12,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
#include <linux/phy.h>
#include <linux/phylink.h>
#include <net/flow_offload.h>
@@ -461,8 +462,12 @@
#define MVPP22_CTRL4_DP_CLK_SEL BIT(5)
#define MVPP22_CTRL4_SYNC_BYPASS_DIS BIT(6)
#define MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE BIT(7)
+#define MVPP22_GMAC_INT_SUM_STAT 0xa0
+#define MVPP22_GMAC_INT_SUM_STAT_INTERNAL BIT(1)
+#define MVPP22_GMAC_INT_SUM_STAT_PTP BIT(2)
#define MVPP22_GMAC_INT_SUM_MASK 0xa4
#define MVPP22_GMAC_INT_SUM_MASK_LINK_STAT BIT(1)
+#define MVPP22_GMAC_INT_SUM_MASK_PTP BIT(2)
/* Per-port XGMAC registers. PPv2.2 only, only for GOP port 0,
* relative to port->base.
@@ -488,9 +493,13 @@
#define MVPP22_XLG_CTRL3_MACMODESELECT_MASK (7 << 13)
#define MVPP22_XLG_CTRL3_MACMODESELECT_GMAC (0 << 13)
#define MVPP22_XLG_CTRL3_MACMODESELECT_10G (1 << 13)
+#define MVPP22_XLG_EXT_INT_STAT 0x158
+#define MVPP22_XLG_EXT_INT_STAT_XLG BIT(1)
+#define MVPP22_XLG_EXT_INT_STAT_PTP BIT(7)
#define MVPP22_XLG_EXT_INT_MASK 0x15c
#define MVPP22_XLG_EXT_INT_MASK_XLG BIT(1)
#define MVPP22_XLG_EXT_INT_MASK_GIG BIT(2)
+#define MVPP22_XLG_EXT_INT_MASK_PTP BIT(7)
#define MVPP22_XLG_CTRL4_REG 0x184
#define MVPP22_XLG_CTRL4_FWD_FC BIT(5)
#define MVPP22_XLG_CTRL4_FWD_PFC BIT(6)
@@ -501,6 +510,70 @@
#define MVPP22_SMI_MISC_CFG_REG 0x1204
#define MVPP22_SMI_POLLING_EN BIT(10)
+/* TAI registers, PPv2.2 only, relative to priv->iface_base */
+#define MVPP22_TAI_INT_CAUSE 0x1400
+#define MVPP22_TAI_INT_MASK 0x1404
+#define MVPP22_TAI_CR0 0x1408
+#define MVPP22_TAI_CR1 0x140c
+#define MVPP22_TAI_TCFCR0 0x1410
+#define MVPP22_TAI_TCFCR1 0x1414
+#define MVPP22_TAI_TCFCR2 0x1418
+#define MVPP22_TAI_FATWR 0x141c
+#define MVPP22_TAI_TOD_STEP_NANO_CR 0x1420
+#define MVPP22_TAI_TOD_STEP_FRAC_HIGH 0x1424
+#define MVPP22_TAI_TOD_STEP_FRAC_LOW 0x1428
+#define MVPP22_TAI_TAPDC_HIGH 0x142c
+#define MVPP22_TAI_TAPDC_LOW 0x1430
+#define MVPP22_TAI_TGTOD_SEC_HIGH 0x1434
+#define MVPP22_TAI_TGTOD_SEC_MED 0x1438
+#define MVPP22_TAI_TGTOD_SEC_LOW 0x143c
+#define MVPP22_TAI_TGTOD_NANO_HIGH 0x1440
+#define MVPP22_TAI_TGTOD_NANO_LOW 0x1444
+#define MVPP22_TAI_TGTOD_FRAC_HIGH 0x1448
+#define MVPP22_TAI_TGTOD_FRAC_LOW 0x144c
+#define MVPP22_TAI_TLV_SEC_HIGH 0x1450
+#define MVPP22_TAI_TLV_SEC_MED 0x1454
+#define MVPP22_TAI_TLV_SEC_LOW 0x1458
+#define MVPP22_TAI_TLV_NANO_HIGH 0x145c
+#define MVPP22_TAI_TLV_NANO_LOW 0x1460
+#define MVPP22_TAI_TLV_FRAC_HIGH 0x1464
+#define MVPP22_TAI_TLV_FRAC_LOW 0x1468
+#define MVPP22_TAI_TCV0_SEC_HIGH 0x146c
+#define MVPP22_TAI_TCV0_SEC_MED 0x1470
+#define MVPP22_TAI_TCV0_SEC_LOW 0x1474
+#define MVPP22_TAI_TCV0_NANO_HIGH 0x1478
+#define MVPP22_TAI_TCV0_NANO_LOW 0x147c
+#define MVPP22_TAI_TCV0_FRAC_HIGH 0x1480
+#define MVPP22_TAI_TCV0_FRAC_LOW 0x1484
+#define MVPP22_TAI_TCV1_SEC_HIGH 0x1488
+#define MVPP22_TAI_TCV1_SEC_MED 0x148c
+#define MVPP22_TAI_TCV1_SEC_LOW 0x1490
+#define MVPP22_TAI_TCV1_NANO_HIGH 0x1494
+#define MVPP22_TAI_TCV1_NANO_LOW 0x1498
+#define MVPP22_TAI_TCV1_FRAC_HIGH 0x149c
+#define MVPP22_TAI_TCV1_FRAC_LOW 0x14a0
+#define MVPP22_TAI_TCSR 0x14a4
+#define MVPP22_TAI_TUC_LSB 0x14a8
+#define MVPP22_TAI_GFM_SEC_HIGH 0x14ac
+#define MVPP22_TAI_GFM_SEC_MED 0x14b0
+#define MVPP22_TAI_GFM_SEC_LOW 0x14b4
+#define MVPP22_TAI_GFM_NANO_HIGH 0x14b8
+#define MVPP22_TAI_GFM_NANO_LOW 0x14bc
+#define MVPP22_TAI_GFM_FRAC_HIGH 0x14c0
+#define MVPP22_TAI_GFM_FRAC_LOW 0x14c4
+#define MVPP22_TAI_PCLK_DA_HIGH 0x14c8
+#define MVPP22_TAI_PCLK_DA_LOW 0x14cc
+#define MVPP22_TAI_CTCR 0x14d0
+#define MVPP22_TAI_PCLK_CCC_HIGH 0x14d4
+#define MVPP22_TAI_PCLK_CCC_LOW 0x14d8
+#define MVPP22_TAI_DTC_HIGH 0x14dc
+#define MVPP22_TAI_DTC_LOW 0x14e0
+#define MVPP22_TAI_CCC_HIGH 0x14e4
+#define MVPP22_TAI_CCC_LOW 0x14e8
+#define MVPP22_TAI_ICICE 0x14f4
+#define MVPP22_TAI_ICICC_LOW 0x14f8
+#define MVPP22_TAI_TUC_MSB 0x14fc
+
#define MVPP22_GMAC_BASE(port) (0x7000 + (port) * 0x1000 + 0xe00)
#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff
@@ -527,6 +600,46 @@
#define MVPP22_XPCS_CFG0_PCS_MODE(n) ((n) << 3)
#define MVPP22_XPCS_CFG0_ACTIVE_LANE(n) ((n) << 5)
+/* PTP registers. PPv2.2 only */
+#define MVPP22_PTP_BASE(port) (0x7800 + (port * 0x1000))
+#define MVPP22_PTP_INT_CAUSE 0x00
+#define MVPP22_PTP_INT_CAUSE_QUEUE1 BIT(6)
+#define MVPP22_PTP_INT_CAUSE_QUEUE0 BIT(5)
+#define MVPP22_PTP_INT_MASK 0x04
+#define MVPP22_PTP_INT_MASK_QUEUE1 BIT(6)
+#define MVPP22_PTP_INT_MASK_QUEUE0 BIT(5)
+#define MVPP22_PTP_GCR 0x08
+#define MVPP22_PTP_GCR_RX_RESET BIT(13)
+#define MVPP22_PTP_GCR_TX_RESET BIT(1)
+#define MVPP22_PTP_GCR_TSU_ENABLE BIT(0)
+#define MVPP22_PTP_TX_Q0_R0 0x0c
+#define MVPP22_PTP_TX_Q0_R1 0x10
+#define MVPP22_PTP_TX_Q0_R2 0x14
+#define MVPP22_PTP_TX_Q1_R0 0x18
+#define MVPP22_PTP_TX_Q1_R1 0x1c
+#define MVPP22_PTP_TX_Q1_R2 0x20
+#define MVPP22_PTP_TPCR 0x24
+#define MVPP22_PTP_V1PCR 0x28
+#define MVPP22_PTP_V2PCR 0x2c
+#define MVPP22_PTP_Y1731PCR 0x30
+#define MVPP22_PTP_NTPTSPCR 0x34
+#define MVPP22_PTP_NTPRXPCR 0x38
+#define MVPP22_PTP_NTPTXPCR 0x3c
+#define MVPP22_PTP_WAMPPCR 0x40
+#define MVPP22_PTP_NAPCR 0x44
+#define MVPP22_PTP_FAPCR 0x48
+#define MVPP22_PTP_CAPCR 0x50
+#define MVPP22_PTP_ATAPCR 0x54
+#define MVPP22_PTP_ACTAPCR 0x58
+#define MVPP22_PTP_CATAPCR 0x5c
+#define MVPP22_PTP_CACTAPCR 0x60
+#define MVPP22_PTP_AITAPCR 0x64
+#define MVPP22_PTP_CAITAPCR 0x68
+#define MVPP22_PTP_CITAPCR 0x6c
+#define MVPP22_PTP_NTP_OFF_HIGH 0x70
+#define MVPP22_PTP_NTP_OFF_LOW 0x74
+#define MVPP22_PTP_TX_PIPE_STATUS_DELAY 0x78
+
/* System controller registers. Accessed through a regmap. */
#define GENCONF_SOFT_RESET1 0x1108
#define GENCONF_SOFT_RESET1_GOP BIT(6)
@@ -692,6 +805,43 @@ enum mvpp2_prs_l3_cast {
MVPP2_PRS_L3_BROAD_CAST
};
+/* PTP descriptor constants. The low bits of the descriptor are stored
+ * separately from the high bits.
+ */
+#define MVPP22_PTP_DESC_MASK_LOW 0xfff
+
+/* PTPAction */
+enum mvpp22_ptp_action {
+ MVPP22_PTP_ACTION_NONE = 0,
+ MVPP22_PTP_ACTION_FORWARD = 1,
+ MVPP22_PTP_ACTION_CAPTURE = 3,
+ /* The following have not been verified */
+ MVPP22_PTP_ACTION_ADDTIME = 4,
+ MVPP22_PTP_ACTION_ADDCORRECTEDTIME = 5,
+ MVPP22_PTP_ACTION_CAPTUREADDTIME = 6,
+ MVPP22_PTP_ACTION_CAPTUREADDCORRECTEDTIME = 7,
+ MVPP22_PTP_ACTION_ADDINGRESSTIME = 8,
+ MVPP22_PTP_ACTION_CAPTUREADDINGRESSTIME = 9,
+ MVPP22_PTP_ACTION_CAPTUREINGRESSTIME = 10,
+};
+
+/* PTPPacketFormat */
+enum mvpp22_ptp_packet_format {
+ MVPP22_PTP_PKT_FMT_PTPV2 = 0,
+ MVPP22_PTP_PKT_FMT_PTPV1 = 1,
+ MVPP22_PTP_PKT_FMT_Y1731 = 2,
+ MVPP22_PTP_PKT_FMT_NTPTS = 3,
+ MVPP22_PTP_PKT_FMT_NTPRX = 4,
+ MVPP22_PTP_PKT_FMT_NTPTX = 5,
+ MVPP22_PTP_PKT_FMT_TWAMP = 6,
+};
+
+#define MVPP22_PTP_ACTION(x) (((x) & 15) << 0)
+#define MVPP22_PTP_PACKETFORMAT(x) (((x) & 7) << 4)
+#define MVPP22_PTP_MACTIMESTAMPINGEN BIT(11)
+#define MVPP22_PTP_TIMESTAMPENTRYID(x) (((x) & 31) << 12)
+#define MVPP22_PTP_TIMESTAMPQUEUESELECT BIT(18)
+
/* BM constants */
#define MVPP2_BM_JUMBO_BUF_NUM 512
#define MVPP2_BM_LONG_BUF_NUM 1024
@@ -759,6 +909,8 @@ enum mvpp2_prs_l3_cast {
#define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40)
+struct mvpp2_tai;
+
/* Definitions */
struct mvpp2_dbgfs_entries;
@@ -794,6 +946,7 @@ struct mvpp2 {
/* List of pointers to port structures */
int port_count;
struct mvpp2_port *port_list[MVPP2_MAX_PORTS];
+ struct mvpp2_tai *tai;
/* Number of Tx threads used */
unsigned int nthreads;
@@ -907,6 +1060,11 @@ struct mvpp2_ethtool_fs {
struct ethtool_rxnfc rxnfc;
};
+struct mvpp2_hwtstamp_queue {
+ struct sk_buff *skb[32];
+ u8 next;
+};
+
struct mvpp2_port {
u8 id;
@@ -915,7 +1073,7 @@ struct mvpp2_port {
*/
int gop_id;
- int link_irq;
+ int port_irq;
struct mvpp2 *priv;
@@ -967,6 +1125,7 @@ struct mvpp2_port {
phy_interface_t phy_interface;
struct phylink *phylink;
struct phylink_config phylink_config;
+ struct phylink_pcs phylink_pcs;
struct phy *comphy;
struct mvpp2_bm_pool *pool_long;
@@ -989,6 +1148,11 @@ struct mvpp2_port {
* them from 0
*/
int rss_ctx[MVPP22_N_RSS_TABLES];
+
+ bool hwtstamp;
+ bool rx_hwtstamp;
+ enum hwtstamp_tx_types tx_hwtstamp_type;
+ struct mvpp2_hwtstamp_queue tx_hwtstamp_queue[2];
};
/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
@@ -1057,7 +1221,8 @@ struct mvpp22_tx_desc {
u8 packet_offset;
u8 phys_txq;
__le16 data_size;
- __le64 reserved1;
+ __le32 ptp_descriptor;
+ __le32 reserved2;
__le64 buf_dma_addr_ptp;
__le64 buf_cookie_misc;
};
@@ -1068,7 +1233,7 @@ struct mvpp22_rx_desc {
__le16 reserved1;
__le16 data_size;
__le32 reserved2;
- __le32 reserved3;
+ __le32 timestamp;
__le64 buf_dma_addr_key_hash;
__le64 buf_cookie_misc;
};
@@ -1248,4 +1413,36 @@ void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name);
void mvpp2_dbgfs_cleanup(struct mvpp2 *priv);
+#ifdef CONFIG_MVPP2_PTP
+int mvpp22_tai_probe(struct device *dev, struct mvpp2 *priv);
+void mvpp22_tai_tstamp(struct mvpp2_tai *tai, u32 tstamp,
+ struct skb_shared_hwtstamps *hwtstamp);
+void mvpp22_tai_start(struct mvpp2_tai *tai);
+void mvpp22_tai_stop(struct mvpp2_tai *tai);
+int mvpp22_tai_ptp_clock_index(struct mvpp2_tai *tai);
+#else
+static inline int mvpp22_tai_probe(struct device *dev, struct mvpp2 *priv)
+{
+ return 0;
+}
+static inline void mvpp22_tai_tstamp(struct mvpp2_tai *tai, u32 tstamp,
+ struct skb_shared_hwtstamps *hwtstamp)
+{
+}
+static inline void mvpp22_tai_start(struct mvpp2_tai *tai)
+{
+}
+static inline void mvpp22_tai_stop(struct mvpp2_tai *tai)
+{
+}
+static inline int mvpp22_tai_ptp_clock_index(struct mvpp2_tai *tai)
+{
+ return -1;
+}
+#endif
+
+static inline bool mvpp22_rx_hwtstamping(struct mvpp2_port *port)
+{
+ return IS_ENABLED(CONFIG_MVPP2_PTP) && port->rx_hwtstamp;
+}
#endif
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 6e140d1b8967..f6616c8933ca 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -28,6 +28,7 @@
#include <linux/phy.h>
#include <linux/phylink.h>
#include <linux/phy/phy.h>
+#include <linux/ptp_classify.h>
#include <linux/clk.h>
#include <linux/hrtimer.h>
#include <linux/ktime.h>
@@ -57,13 +58,7 @@ static struct {
/* The prototype is added here to be used in start_dev when using ACPI. This
* will be removed once phylink is used for all modes (dt+ACPI).
*/
-static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
- const struct phylink_link_state *state);
-static void mvpp2_mac_link_up(struct phylink_config *config,
- struct phy_device *phy,
- unsigned int mode, phy_interface_t interface,
- int speed, int duplex,
- bool tx_pause, bool rx_pause);
+static void mvpp2_acpi_start(struct mvpp2_port *port);
/* Queue modes */
#define MVPP2_QDIST_SINGLE_MODE 0
@@ -1385,6 +1380,10 @@ static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
{
u32 val;
+ mvpp2_modify(port->base + MVPP22_GMAC_INT_SUM_MASK,
+ MVPP22_GMAC_INT_SUM_MASK_PTP,
+ MVPP22_GMAC_INT_SUM_MASK_PTP);
+
if (port->phylink ||
phy_interface_mode_is_rgmii(port->phy_interface) ||
phy_interface_mode_is_8023z(port->phy_interface) ||
@@ -1398,6 +1397,10 @@ static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
val = readl(port->base + MVPP22_XLG_INT_MASK);
val |= MVPP22_XLG_INT_MASK_LINK;
writel(val, port->base + MVPP22_XLG_INT_MASK);
+
+ mvpp2_modify(port->base + MVPP22_XLG_EXT_INT_MASK,
+ MVPP22_XLG_EXT_INT_MASK_PTP,
+ MVPP22_XLG_EXT_INT_MASK_PTP);
}
mvpp22_gop_unmask_irq(port);
@@ -1485,8 +1488,8 @@ static void mvpp2_port_loopback_set(struct mvpp2_port *port,
else
val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
- if (phy_interface_mode_is_8023z(port->phy_interface) ||
- port->phy_interface == PHY_INTERFACE_MODE_SGMII)
+ if (phy_interface_mode_is_8023z(state->interface) ||
+ state->interface == PHY_INTERFACE_MODE_SGMII)
val |= MVPP2_GMAC_PCS_LB_EN_MASK;
else
val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
@@ -2980,44 +2983,67 @@ static irqreturn_t mvpp2_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/* Per-port interrupt for link status changes */
-static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id)
+static void mvpp2_isr_handle_ptp_queue(struct mvpp2_port *port, int nq)
{
- struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
- struct net_device *dev = port->dev;
- bool event = false, link = false;
- u32 val;
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct mvpp2_hwtstamp_queue *queue;
+ struct sk_buff *skb;
+ void __iomem *ptp_q;
+ unsigned int id;
+ u32 r0, r1, r2;
- mvpp22_gop_mask_irq(port);
+ ptp_q = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
+ if (nq)
+ ptp_q += MVPP22_PTP_TX_Q1_R0 - MVPP22_PTP_TX_Q0_R0;
- if (mvpp2_port_supports_xlg(port) &&
- mvpp2_is_xlg(port->phy_interface)) {
- val = readl(port->base + MVPP22_XLG_INT_STAT);
- if (val & MVPP22_XLG_INT_STAT_LINK) {
- event = true;
- val = readl(port->base + MVPP22_XLG_STATUS);
- if (val & MVPP22_XLG_STATUS_LINK_UP)
- link = true;
- }
- } else if (phy_interface_mode_is_rgmii(port->phy_interface) ||
- phy_interface_mode_is_8023z(port->phy_interface) ||
- port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
- val = readl(port->base + MVPP22_GMAC_INT_STAT);
- if (val & MVPP22_GMAC_INT_STAT_LINK) {
- event = true;
- val = readl(port->base + MVPP2_GMAC_STATUS0);
- if (val & MVPP2_GMAC_STATUS0_LINK_UP)
- link = true;
+ queue = &port->tx_hwtstamp_queue[nq];
+
+ while (1) {
+ r0 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R0) & 0xffff;
+ if (!r0)
+ break;
+
+ r1 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R1) & 0xffff;
+ r2 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R2) & 0xffff;
+
+ id = (r0 >> 1) & 31;
+
+ skb = queue->skb[id];
+ queue->skb[id] = NULL;
+ if (skb) {
+ u32 ts = r2 << 19 | r1 << 3 | r0 >> 13;
+
+ mvpp22_tai_tstamp(port->priv->tai, ts, &shhwtstamps);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
}
}
+}
+
+static void mvpp2_isr_handle_ptp(struct mvpp2_port *port)
+{
+ void __iomem *ptp;
+ u32 val;
+
+ ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
+ val = readl(ptp + MVPP22_PTP_INT_CAUSE);
+ if (val & MVPP22_PTP_INT_CAUSE_QUEUE0)
+ mvpp2_isr_handle_ptp_queue(port, 0);
+ if (val & MVPP22_PTP_INT_CAUSE_QUEUE1)
+ mvpp2_isr_handle_ptp_queue(port, 1);
+}
+
+static void mvpp2_isr_handle_link(struct mvpp2_port *port, bool link)
+{
+ struct net_device *dev = port->dev;
if (port->phylink) {
phylink_mac_change(port->phylink, link);
- goto handled;
+ return;
}
- if (!netif_running(dev) || !event)
- goto handled;
+ if (!netif_running(dev))
+ return;
if (link) {
mvpp2_interrupts_enable(port);
@@ -3034,8 +3060,65 @@ static irqreturn_t mvpp2_link_status_isr(int irq, void *dev_id)
mvpp2_interrupts_disable(port);
}
+}
+
+static void mvpp2_isr_handle_xlg(struct mvpp2_port *port)
+{
+ bool link;
+ u32 val;
+
+ val = readl(port->base + MVPP22_XLG_INT_STAT);
+ if (val & MVPP22_XLG_INT_STAT_LINK) {
+ val = readl(port->base + MVPP22_XLG_STATUS);
+ link = (val & MVPP22_XLG_STATUS_LINK_UP);
+ mvpp2_isr_handle_link(port, link);
+ }
+}
+
+static void mvpp2_isr_handle_gmac_internal(struct mvpp2_port *port)
+{
+ bool link;
+ u32 val;
+
+ if (phy_interface_mode_is_rgmii(port->phy_interface) ||
+ phy_interface_mode_is_8023z(port->phy_interface) ||
+ port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
+ val = readl(port->base + MVPP22_GMAC_INT_STAT);
+ if (val & MVPP22_GMAC_INT_STAT_LINK) {
+ val = readl(port->base + MVPP2_GMAC_STATUS0);
+ link = (val & MVPP2_GMAC_STATUS0_LINK_UP);
+ mvpp2_isr_handle_link(port, link);
+ }
+ }
+}
+
+/* Per-port interrupt for link status changes */
+static irqreturn_t mvpp2_port_isr(int irq, void *dev_id)
+{
+ struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
+ u32 val;
+
+ mvpp22_gop_mask_irq(port);
+
+ if (mvpp2_port_supports_xlg(port) &&
+ mvpp2_is_xlg(port->phy_interface)) {
+ /* Check the external status register */
+ val = readl(port->base + MVPP22_XLG_EXT_INT_STAT);
+ if (val & MVPP22_XLG_EXT_INT_STAT_XLG)
+ mvpp2_isr_handle_xlg(port);
+ if (val & MVPP22_XLG_EXT_INT_STAT_PTP)
+ mvpp2_isr_handle_ptp(port);
+ } else {
+ /* If it's not the XLG, we must be using the GMAC.
+ * Check the summary status.
+ */
+ val = readl(port->base + MVPP22_GMAC_INT_SUM_STAT);
+ if (val & MVPP22_GMAC_INT_SUM_STAT_INTERNAL)
+ mvpp2_isr_handle_gmac_internal(port);
+ if (val & MVPP22_GMAC_INT_SUM_STAT_PTP)
+ mvpp2_isr_handle_ptp(port);
+ }
-handled:
mvpp22_gop_unmask_irq(port);
return IRQ_HANDLED;
}
@@ -3427,7 +3510,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
unsigned int frag_size;
dma_addr_t dma_addr;
phys_addr_t phys_addr;
- u32 rx_status;
+ u32 rx_status, timestamp;
int pool, rx_bytes, err, ret;
void *data;
@@ -3505,6 +3588,15 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
goto err_drop_frame;
}
+ /* If we have RX hardware timestamping enabled, grab the
+ * timestamp from the queue and convert.
+ */
+ if (mvpp22_rx_hwtstamping(port)) {
+ timestamp = le32_to_cpu(rx_desc->pp22.timestamp);
+ mvpp22_tai_tstamp(port->priv->tai, timestamp,
+ skb_hwtstamps(skb));
+ }
+
err = mvpp2_rx_refill(port, bm_pool, pp, pool);
if (err) {
netdev_err(port->dev, "failed to refill BM pools\n");
@@ -3579,6 +3671,94 @@ tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
mvpp2_txq_desc_put(txq);
}
+static void mvpp2_txdesc_clear_ptp(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *desc)
+{
+ /* We only need to clear the low bits */
+ if (port->priv->hw_version != MVPP21)
+ desc->pp22.ptp_descriptor &=
+ cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW);
+}
+
+static bool mvpp2_tx_hw_tstamp(struct mvpp2_port *port,
+ struct mvpp2_tx_desc *tx_desc,
+ struct sk_buff *skb)
+{
+ struct mvpp2_hwtstamp_queue *queue;
+ unsigned int mtype, type, i;
+ struct ptp_header *hdr;
+ u64 ptpdesc;
+
+ if (port->priv->hw_version == MVPP21 ||
+ port->tx_hwtstamp_type == HWTSTAMP_TX_OFF)
+ return false;
+
+ type = ptp_classify_raw(skb);
+ if (!type)
+ return false;
+
+ hdr = ptp_parse_header(skb, type);
+ if (!hdr)
+ return false;
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ ptpdesc = MVPP22_PTP_MACTIMESTAMPINGEN |
+ MVPP22_PTP_ACTION_CAPTURE;
+ queue = &port->tx_hwtstamp_queue[0];
+
+ switch (type & PTP_CLASS_VMASK) {
+ case PTP_CLASS_V1:
+ ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV1);
+ break;
+
+ case PTP_CLASS_V2:
+ ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV2);
+ mtype = hdr->tsmt & 15;
+ /* Direct PTP Sync messages to queue 1 */
+ if (mtype == 0) {
+ ptpdesc |= MVPP22_PTP_TIMESTAMPQUEUESELECT;
+ queue = &port->tx_hwtstamp_queue[1];
+ }
+ break;
+ }
+
+ /* Take a reference on the skb and insert into our queue */
+ i = queue->next;
+ queue->next = (i + 1) & 31;
+ if (queue->skb[i])
+ dev_kfree_skb_any(queue->skb[i]);
+ queue->skb[i] = skb_get(skb);
+
+ ptpdesc |= MVPP22_PTP_TIMESTAMPENTRYID(i);
+
+ /*
+ * 3:0 - PTPAction
+ * 6:4 - PTPPacketFormat
+ * 7 - PTP_CF_WraparoundCheckEn
+ * 9:8 - IngressTimestampSeconds[1:0]
+ * 10 - Reserved
+ * 11 - MACTimestampingEn
+ * 17:12 - PTP_TimestampQueueEntryID[5:0]
+ * 18 - PTPTimestampQueueSelect
+ * 19 - UDPChecksumUpdateEn
+ * 27:20 - TimestampOffset
+ * PTP, NTPTransmit, OWAMP/TWAMP - L3 to PTP header
+ * NTPTs, Y.1731 - L3 to timestamp entry
+ * 35:28 - UDP Checksum Offset
+ *
+ * stored in tx descriptor bits 75:64 (11:0) and 191:168 (35:12)
+ */
+ tx_desc->pp22.ptp_descriptor &=
+ cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW);
+ tx_desc->pp22.ptp_descriptor |=
+ cpu_to_le32(ptpdesc & MVPP22_PTP_DESC_MASK_LOW);
+ tx_desc->pp22.buf_dma_addr_ptp &= cpu_to_le64(~0xffffff0000000000ULL);
+ tx_desc->pp22.buf_dma_addr_ptp |= cpu_to_le64((ptpdesc >> 12) << 40);
+
+ return true;
+}
+
/* Handle tx fragmentation processing */
static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
struct mvpp2_tx_queue *aggr_txq,
@@ -3595,6 +3775,7 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
void *addr = skb_frag_address(frag);
tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
+ mvpp2_txdesc_clear_ptp(port, tx_desc);
mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag));
@@ -3644,6 +3825,7 @@ static inline void mvpp2_tso_put_hdr(struct sk_buff *skb,
struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
dma_addr_t addr;
+ mvpp2_txdesc_clear_ptp(port, tx_desc);
mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
mvpp2_txdesc_size_set(port, tx_desc, hdr_sz);
@@ -3668,6 +3850,7 @@ static inline int mvpp2_tso_put_data(struct sk_buff *skb,
struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
dma_addr_t buf_dma_addr;
+ mvpp2_txdesc_clear_ptp(port, tx_desc);
mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
mvpp2_txdesc_size_set(port, tx_desc, sz);
@@ -3784,6 +3967,9 @@ static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
/* Get a descriptor for the first part of the packet */
tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ||
+ !mvpp2_tx_hw_tstamp(port, tx_desc, skb))
+ mvpp2_txdesc_clear_ptp(port, tx_desc);
mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
@@ -4007,17 +4193,7 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
if (port->phylink) {
phylink_start(port->phylink);
} else {
- /* Phylink isn't used as of now for ACPI, so the MAC has to be
- * configured manually when the interface is started. This will
- * be removed as soon as the phylink ACPI support lands in.
- */
- struct phylink_link_state state = {
- .interface = port->phy_interface,
- };
- mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state);
- mvpp2_mac_link_up(&port->phylink_config, NULL,
- MLO_AN_INBAND, port->phy_interface,
- SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false);
+ mvpp2_acpi_start(port);
}
netif_tx_start_all_queues(port->dev);
@@ -4227,12 +4403,13 @@ static int mvpp2_open(struct net_device *dev)
valid = true;
}
- if (priv->hw_version == MVPP22 && port->link_irq) {
- err = request_irq(port->link_irq, mvpp2_link_status_isr, 0,
+ if (priv->hw_version == MVPP22 && port->port_irq) {
+ err = request_irq(port->port_irq, mvpp2_port_isr, 0,
dev->name, port);
if (err) {
- netdev_err(port->dev, "cannot request link IRQ %d\n",
- port->link_irq);
+ netdev_err(port->dev,
+ "cannot request port link/ptp IRQ %d\n",
+ port->port_irq);
goto err_free_irq;
}
@@ -4243,7 +4420,7 @@ static int mvpp2_open(struct net_device *dev)
valid = true;
} else {
- port->link_irq = 0;
+ port->port_irq = 0;
}
if (!valid) {
@@ -4287,8 +4464,8 @@ static int mvpp2_stop(struct net_device *dev)
if (port->phylink)
phylink_disconnect_phy(port->phylink);
- if (port->link_irq)
- free_irq(port->link_irq, port);
+ if (port->port_irq)
+ free_irq(port->port_irq, port);
mvpp2_irqs_deinit(port);
if (!port->has_tx_irqs) {
@@ -4548,10 +4725,124 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
stats->tx_dropped = dev->stats.tx_dropped;
}
+static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ void __iomem *ptp;
+ u32 gcr, int_mask;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ if (config.flags)
+ return -EINVAL;
+
+ if (config.tx_type != HWTSTAMP_TX_OFF &&
+ config.tx_type != HWTSTAMP_TX_ON)
+ return -ERANGE;
+
+ ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
+
+ int_mask = gcr = 0;
+ if (config.tx_type != HWTSTAMP_TX_OFF) {
+ gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_TX_RESET;
+ int_mask |= MVPP22_PTP_INT_MASK_QUEUE1 |
+ MVPP22_PTP_INT_MASK_QUEUE0;
+ }
+
+ /* It seems we must also release the TX reset when enabling the TSU */
+ if (config.rx_filter != HWTSTAMP_FILTER_NONE)
+ gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_RX_RESET |
+ MVPP22_PTP_GCR_TX_RESET;
+
+ if (gcr & MVPP22_PTP_GCR_TSU_ENABLE)
+ mvpp22_tai_start(port->priv->tai);
+
+ if (config.rx_filter != HWTSTAMP_FILTER_NONE) {
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ mvpp2_modify(ptp + MVPP22_PTP_GCR,
+ MVPP22_PTP_GCR_RX_RESET |
+ MVPP22_PTP_GCR_TX_RESET |
+ MVPP22_PTP_GCR_TSU_ENABLE, gcr);
+ port->rx_hwtstamp = true;
+ } else {
+ port->rx_hwtstamp = false;
+ mvpp2_modify(ptp + MVPP22_PTP_GCR,
+ MVPP22_PTP_GCR_RX_RESET |
+ MVPP22_PTP_GCR_TX_RESET |
+ MVPP22_PTP_GCR_TSU_ENABLE, gcr);
+ }
+
+ mvpp2_modify(ptp + MVPP22_PTP_INT_MASK,
+ MVPP22_PTP_INT_MASK_QUEUE1 |
+ MVPP22_PTP_INT_MASK_QUEUE0, int_mask);
+
+ if (!(gcr & MVPP22_PTP_GCR_TSU_ENABLE))
+ mvpp22_tai_stop(port->priv->tai);
+
+ port->tx_hwtstamp_type = config.tx_type;
+
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int mvpp2_get_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+
+ memset(&config, 0, sizeof(config));
+
+ config.tx_type = port->tx_hwtstamp_type;
+ config.rx_filter = port->rx_hwtstamp ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+
+ if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int mvpp2_ethtool_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (!port->hwtstamp)
+ return -EOPNOTSUPP;
+
+ info->phc_index = mvpp22_tai_ptp_clock_index(port->priv->tai);
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mvpp2_port *port = netdev_priv(dev);
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ if (port->hwtstamp)
+ return mvpp2_set_ts_config(port, ifr);
+ break;
+
+ case SIOCGHWTSTAMP:
+ if (port->hwtstamp)
+ return mvpp2_get_ts_config(port, ifr);
+ break;
+ }
+
if (!port->phylink)
return -ENOTSUPP;
@@ -5021,6 +5312,7 @@ static const struct ethtool_ops mvpp2_eth_tool_ops = {
ETHTOOL_COALESCE_MAX_FRAMES,
.nway_reset = mvpp2_ethtool_nway_reset,
.get_link = ethtool_op_get_link,
+ .get_ts_info = mvpp2_ethtool_get_ts_info,
.set_coalesce = mvpp2_ethtool_set_coalesce,
.get_coalesce = mvpp2_ethtool_get_coalesce,
.get_drvinfo = mvpp2_ethtool_get_drvinfo,
@@ -5392,6 +5684,155 @@ static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config)
return container_of(config, struct mvpp2_port, phylink_config);
}
+static struct mvpp2_port *mvpp2_pcs_to_port(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct mvpp2_port, phylink_pcs);
+}
+
+static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
+ u32 val;
+
+ state->speed = SPEED_10000;
+ state->duplex = 1;
+ state->an_complete = 1;
+
+ val = readl(port->base + MVPP22_XLG_STATUS);
+ state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP);
+
+ state->pause = 0;
+ val = readl(port->base + MVPP22_XLG_CTRL0_REG);
+ if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN)
+ state->pause |= MLO_PAUSE_TX;
+ if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN)
+ state->pause |= MLO_PAUSE_RX;
+}
+
+static int mvpp2_xlg_pcs_config(struct phylink_pcs *pcs,
+ unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ return 0;
+}
+
+static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = {
+ .pcs_get_state = mvpp2_xlg_pcs_get_state,
+ .pcs_config = mvpp2_xlg_pcs_config,
+};
+
+static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
+ u32 val;
+
+ val = readl(port->base + MVPP2_GMAC_STATUS0);
+
+ state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE);
+ state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP);
+ state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX);
+
+ switch (port->phy_interface) {
+ case PHY_INTERFACE_MODE_1000BASEX:
+ state->speed = SPEED_1000;
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ state->speed = SPEED_2500;
+ break;
+ default:
+ if (val & MVPP2_GMAC_STATUS0_GMII_SPEED)
+ state->speed = SPEED_1000;
+ else if (val & MVPP2_GMAC_STATUS0_MII_SPEED)
+ state->speed = SPEED_100;
+ else
+ state->speed = SPEED_10;
+ }
+
+ state->pause = 0;
+ if (val & MVPP2_GMAC_STATUS0_RX_PAUSE)
+ state->pause |= MLO_PAUSE_RX;
+ if (val & MVPP2_GMAC_STATUS0_TX_PAUSE)
+ state->pause |= MLO_PAUSE_TX;
+}
+
+static int mvpp2_gmac_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
+ u32 mask, val, an, old_an, changed;
+
+ mask = MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
+ MVPP2_GMAC_IN_BAND_AUTONEG |
+ MVPP2_GMAC_AN_SPEED_EN |
+ MVPP2_GMAC_FLOW_CTRL_AUTONEG |
+ MVPP2_GMAC_AN_DUPLEX_EN;
+
+ if (phylink_autoneg_inband(mode)) {
+ mask |= MVPP2_GMAC_CONFIG_MII_SPEED |
+ MVPP2_GMAC_CONFIG_GMII_SPEED |
+ MVPP2_GMAC_CONFIG_FULL_DUPLEX;
+ val = MVPP2_GMAC_IN_BAND_AUTONEG;
+
+ if (interface == PHY_INTERFACE_MODE_SGMII) {
+ /* SGMII mode receives the speed and duplex from PHY */
+ val |= MVPP2_GMAC_AN_SPEED_EN |
+ MVPP2_GMAC_AN_DUPLEX_EN;
+ } else {
+ /* 802.3z mode has fixed speed and duplex */
+ val |= MVPP2_GMAC_CONFIG_GMII_SPEED |
+ MVPP2_GMAC_CONFIG_FULL_DUPLEX;
+
+ /* The FLOW_CTRL_AUTONEG bit selects either the hardware
+ * automatically or the bits in MVPP22_GMAC_CTRL_4_REG
+ * manually controls the GMAC pause modes.
+ */
+ if (permit_pause_to_mac)
+ val |= MVPP2_GMAC_FLOW_CTRL_AUTONEG;
+
+ /* Configure advertisement bits */
+ mask |= MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN;
+ if (phylink_test(advertising, Pause))
+ val |= MVPP2_GMAC_FC_ADV_EN;
+ if (phylink_test(advertising, Asym_Pause))
+ val |= MVPP2_GMAC_FC_ADV_ASM_EN;
+ }
+ } else {
+ val = 0;
+ }
+
+ old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ an = (an & ~mask) | val;
+ changed = an ^ old_an;
+ if (changed)
+ writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+
+ /* We are only interested in the advertisement bits changing */
+ return changed & (MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN);
+}
+
+static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs)
+{
+ struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
+ u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+
+ writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN,
+ port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN,
+ port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+}
+
+static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = {
+ .pcs_get_state = mvpp2_gmac_pcs_get_state,
+ .pcs_config = mvpp2_gmac_pcs_config,
+ .pcs_an_restart = mvpp2_gmac_pcs_an_restart,
+};
+
static void mvpp2_phylink_validate(struct phylink_config *config,
unsigned long *supported,
struct phylink_link_state *state)
@@ -5480,89 +5921,6 @@ empty_set:
bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static void mvpp22_xlg_pcs_get_state(struct mvpp2_port *port,
- struct phylink_link_state *state)
-{
- u32 val;
-
- state->speed = SPEED_10000;
- state->duplex = 1;
- state->an_complete = 1;
-
- val = readl(port->base + MVPP22_XLG_STATUS);
- state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP);
-
- state->pause = 0;
- val = readl(port->base + MVPP22_XLG_CTRL0_REG);
- if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN)
- state->pause |= MLO_PAUSE_TX;
- if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN)
- state->pause |= MLO_PAUSE_RX;
-}
-
-static void mvpp2_gmac_pcs_get_state(struct mvpp2_port *port,
- struct phylink_link_state *state)
-{
- u32 val;
-
- val = readl(port->base + MVPP2_GMAC_STATUS0);
-
- state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE);
- state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP);
- state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX);
-
- switch (port->phy_interface) {
- case PHY_INTERFACE_MODE_1000BASEX:
- state->speed = SPEED_1000;
- break;
- case PHY_INTERFACE_MODE_2500BASEX:
- state->speed = SPEED_2500;
- break;
- default:
- if (val & MVPP2_GMAC_STATUS0_GMII_SPEED)
- state->speed = SPEED_1000;
- else if (val & MVPP2_GMAC_STATUS0_MII_SPEED)
- state->speed = SPEED_100;
- else
- state->speed = SPEED_10;
- }
-
- state->pause = 0;
- if (val & MVPP2_GMAC_STATUS0_RX_PAUSE)
- state->pause |= MLO_PAUSE_RX;
- if (val & MVPP2_GMAC_STATUS0_TX_PAUSE)
- state->pause |= MLO_PAUSE_TX;
-}
-
-static void mvpp2_phylink_mac_pcs_get_state(struct phylink_config *config,
- struct phylink_link_state *state)
-{
- struct mvpp2_port *port = mvpp2_phylink_to_port(config);
-
- if (port->priv->hw_version == MVPP22 && port->gop_id == 0) {
- u32 mode = readl(port->base + MVPP22_XLG_CTRL3_REG);
- mode &= MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
-
- if (mode == MVPP22_XLG_CTRL3_MACMODESELECT_10G) {
- mvpp22_xlg_pcs_get_state(port, state);
- return;
- }
- }
-
- mvpp2_gmac_pcs_get_state(port, state);
-}
-
-static void mvpp2_mac_an_restart(struct phylink_config *config)
-{
- struct mvpp2_port *port = mvpp2_phylink_to_port(config);
- u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
-
- writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN,
- port->base + MVPP2_GMAC_AUTONEG_CONFIG);
- writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN,
- port->base + MVPP2_GMAC_AUTONEG_CONFIG);
-}
-
static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
const struct phylink_link_state *state)
{
@@ -5586,23 +5944,16 @@ static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
const struct phylink_link_state *state)
{
- u32 old_an, an;
u32 old_ctrl0, ctrl0;
u32 old_ctrl2, ctrl2;
u32 old_ctrl4, ctrl4;
- old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
old_ctrl0 = ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
old_ctrl2 = ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
- an &= ~(MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_FC_ADV_EN |
- MVPP2_GMAC_FC_ADV_ASM_EN | MVPP2_GMAC_FLOW_CTRL_AUTONEG |
- MVPP2_GMAC_AN_DUPLEX_EN | MVPP2_GMAC_IN_BAND_AUTONEG |
- MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS);
ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
- ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PORT_RESET_MASK |
- MVPP2_GMAC_PCS_ENABLE_MASK);
+ ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK);
/* Configure port type */
if (phy_interface_mode_is_8023z(state->interface)) {
@@ -5624,12 +5975,6 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
}
- /* Configure advertisement bits */
- if (phylink_test(state->advertising, Pause))
- an |= MVPP2_GMAC_FC_ADV_EN;
- if (phylink_test(state->advertising, Asym_Pause))
- an |= MVPP2_GMAC_FC_ADV_ASM_EN;
-
/* Configure negotiation style */
if (!phylink_autoneg_inband(mode)) {
/* Phy or fixed speed - no in-band AN, nothing to do, leave the
@@ -5638,14 +5983,6 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
} else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
/* SGMII in-band mode receives the speed and duplex from
* the PHY. Flow control information is not received. */
- an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN |
- MVPP2_GMAC_FORCE_LINK_PASS |
- MVPP2_GMAC_CONFIG_MII_SPEED |
- MVPP2_GMAC_CONFIG_GMII_SPEED |
- MVPP2_GMAC_CONFIG_FULL_DUPLEX);
- an |= MVPP2_GMAC_IN_BAND_AUTONEG |
- MVPP2_GMAC_AN_SPEED_EN |
- MVPP2_GMAC_AN_DUPLEX_EN;
} else if (phy_interface_mode_is_8023z(state->interface)) {
/* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can
* they negotiate duplex: they are always operating with a fixed
@@ -5653,42 +5990,6 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
* speed and full duplex here.
*/
ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
- an &= ~(MVPP2_GMAC_FORCE_LINK_DOWN |
- MVPP2_GMAC_FORCE_LINK_PASS |
- MVPP2_GMAC_CONFIG_MII_SPEED |
- MVPP2_GMAC_CONFIG_GMII_SPEED |
- MVPP2_GMAC_CONFIG_FULL_DUPLEX);
- an |= MVPP2_GMAC_IN_BAND_AUTONEG |
- MVPP2_GMAC_CONFIG_GMII_SPEED |
- MVPP2_GMAC_CONFIG_FULL_DUPLEX;
-
- if (state->pause & MLO_PAUSE_AN && state->an_enabled)
- an |= MVPP2_GMAC_FLOW_CTRL_AUTONEG;
- }
-
-/* Some fields of the auto-negotiation register require the port to be down when
- * their value is updated.
- */
-#define MVPP2_GMAC_AN_PORT_DOWN_MASK \
- (MVPP2_GMAC_IN_BAND_AUTONEG | \
- MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS | \
- MVPP2_GMAC_CONFIG_MII_SPEED | MVPP2_GMAC_CONFIG_GMII_SPEED | \
- MVPP2_GMAC_AN_SPEED_EN | MVPP2_GMAC_CONFIG_FULL_DUPLEX | \
- MVPP2_GMAC_AN_DUPLEX_EN)
-
- if ((old_ctrl0 ^ ctrl0) & MVPP2_GMAC_PORT_TYPE_MASK ||
- (old_ctrl2 ^ ctrl2) & MVPP2_GMAC_INBAND_AN_MASK ||
- (old_an ^ an) & MVPP2_GMAC_AN_PORT_DOWN_MASK) {
- /* Force link down */
- old_an &= ~MVPP2_GMAC_FORCE_LINK_PASS;
- old_an |= MVPP2_GMAC_FORCE_LINK_DOWN;
- writel(old_an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
-
- /* Set the GMAC in a reset state - do this in a way that
- * ensures we clear it below.
- */
- old_ctrl2 |= MVPP2_GMAC_PORT_RESET_MASK;
- writel(old_ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
}
if (old_ctrl0 != ctrl0)
@@ -5697,41 +5998,85 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
if (old_ctrl4 != ctrl4)
writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG);
- if (old_an != an)
- writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
-
- if (old_ctrl2 & MVPP2_GMAC_PORT_RESET_MASK) {
- while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
- MVPP2_GMAC_PORT_RESET_MASK)
- continue;
- }
}
-static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
- const struct phylink_link_state *state)
+static int mvpp2__mac_prepare(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
{
struct mvpp2_port *port = mvpp2_phylink_to_port(config);
- bool change_interface = port->phy_interface != state->interface;
/* Check for invalid configuration */
- if (mvpp2_is_xlg(state->interface) && port->gop_id != 0) {
+ if (mvpp2_is_xlg(interface) && port->gop_id != 0) {
netdev_err(port->dev, "Invalid mode on %s\n", port->dev->name);
- return;
+ return -EINVAL;
+ }
+
+ if (port->phy_interface != interface ||
+ phylink_autoneg_inband(mode)) {
+ /* Force the link down when changing the interface or if in
+ * in-band mode to ensure we do not change the configuration
+ * while the hardware is indicating link is up. We force both
+ * XLG and GMAC down to ensure that they're both in a known
+ * state.
+ */
+ mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
+ MVPP2_GMAC_FORCE_LINK_PASS |
+ MVPP2_GMAC_FORCE_LINK_DOWN,
+ MVPP2_GMAC_FORCE_LINK_DOWN);
+
+ if (mvpp2_port_supports_xlg(port))
+ mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
+ MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
+ MVPP22_XLG_CTRL0_FORCE_LINK_DOWN,
+ MVPP22_XLG_CTRL0_FORCE_LINK_DOWN);
}
/* Make sure the port is disabled when reconfiguring the mode */
mvpp2_port_disable(port);
- if (port->priv->hw_version == MVPP22 && change_interface) {
- mvpp22_gop_mask_irq(port);
+ if (port->phy_interface != interface) {
+ /* Place GMAC into reset */
+ mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG,
+ MVPP2_GMAC_PORT_RESET_MASK,
+ MVPP2_GMAC_PORT_RESET_MASK);
- port->phy_interface = state->interface;
+ if (port->priv->hw_version == MVPP22) {
+ mvpp22_gop_mask_irq(port);
- /* Reconfigure the serdes lanes */
- phy_power_off(port->comphy);
- mvpp22_mode_reconfigure(port);
+ phy_power_off(port->comphy);
+ }
}
+ /* Select the appropriate PCS operations depending on the
+ * configured interface mode. We will only switch to a mode
+ * that the validate() checks have already passed.
+ */
+ if (mvpp2_is_xlg(interface))
+ port->phylink_pcs.ops = &mvpp2_phylink_xlg_pcs_ops;
+ else
+ port->phylink_pcs.ops = &mvpp2_phylink_gmac_pcs_ops;
+
+ return 0;
+}
+
+static int mvpp2_mac_prepare(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
+ int ret;
+
+ ret = mvpp2__mac_prepare(config, mode, interface);
+ if (ret == 0)
+ phylink_set_pcs(port->phylink, &port->phylink_pcs);
+
+ return ret;
+}
+
+static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
+
/* mac (re)configuration */
if (mvpp2_is_xlg(state->interface))
mvpp2_xlg_config(port, mode, state);
@@ -5742,11 +6087,51 @@ static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK)
mvpp2_port_loopback_set(port, state);
+}
+
+static int mvpp2_mac_finish(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct mvpp2_port *port = mvpp2_phylink_to_port(config);
+
+ if (port->priv->hw_version == MVPP22 &&
+ port->phy_interface != interface) {
+ port->phy_interface = interface;
- if (port->priv->hw_version == MVPP22 && change_interface)
+ /* Reconfigure the serdes lanes */
+ mvpp22_mode_reconfigure(port);
+
+ /* Unmask interrupts */
mvpp22_gop_unmask_irq(port);
+ }
+
+ if (!mvpp2_is_xlg(interface)) {
+ /* Release GMAC reset and wait */
+ mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG,
+ MVPP2_GMAC_PORT_RESET_MASK, 0);
+
+ while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
+ MVPP2_GMAC_PORT_RESET_MASK)
+ continue;
+ }
mvpp2_port_enable(port);
+
+ /* Allow the link to come up if in in-band mode, otherwise the
+ * link is forced via mac_link_down()/mac_link_up()
+ */
+ if (phylink_autoneg_inband(mode)) {
+ if (mvpp2_is_xlg(interface))
+ mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
+ MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
+ MVPP22_XLG_CTRL0_FORCE_LINK_DOWN, 0);
+ else
+ mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
+ MVPP2_GMAC_FORCE_LINK_PASS |
+ MVPP2_GMAC_FORCE_LINK_DOWN, 0);
+ }
+
+ return 0;
}
static void mvpp2_mac_link_up(struct phylink_config *config,
@@ -5843,13 +6228,36 @@ static void mvpp2_mac_link_down(struct phylink_config *config,
static const struct phylink_mac_ops mvpp2_phylink_ops = {
.validate = mvpp2_phylink_validate,
- .mac_pcs_get_state = mvpp2_phylink_mac_pcs_get_state,
- .mac_an_restart = mvpp2_mac_an_restart,
+ .mac_prepare = mvpp2_mac_prepare,
.mac_config = mvpp2_mac_config,
+ .mac_finish = mvpp2_mac_finish,
.mac_link_up = mvpp2_mac_link_up,
.mac_link_down = mvpp2_mac_link_down,
};
+/* Work-around for ACPI */
+static void mvpp2_acpi_start(struct mvpp2_port *port)
+{
+ /* Phylink isn't used as of now for ACPI, so the MAC has to be
+ * configured manually when the interface is started. This will
+ * be removed as soon as the phylink ACPI support lands in.
+ */
+ struct phylink_link_state state = {
+ .interface = port->phy_interface,
+ };
+ mvpp2__mac_prepare(&port->phylink_config, MLO_AN_INBAND,
+ port->phy_interface);
+ mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state);
+ port->phylink_pcs.ops->pcs_config(&port->phylink_pcs, MLO_AN_INBAND,
+ port->phy_interface,
+ state.advertising, false);
+ mvpp2_mac_finish(&port->phylink_config, MLO_AN_INBAND,
+ port->phy_interface);
+ mvpp2_mac_link_up(&port->phylink_config, NULL,
+ MLO_AN_INBAND, port->phy_interface,
+ SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false);
+}
+
/* Ports initialization */
static int mvpp2_port_probe(struct platform_device *pdev,
struct fwnode_handle *port_fwnode,
@@ -5937,16 +6345,16 @@ static int mvpp2_port_probe(struct platform_device *pdev,
goto err_free_netdev;
if (port_node)
- port->link_irq = of_irq_get_byname(port_node, "link");
+ port->port_irq = of_irq_get_byname(port_node, "link");
else
- port->link_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1);
- if (port->link_irq == -EPROBE_DEFER) {
+ port->port_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1);
+ if (port->port_irq == -EPROBE_DEFER) {
err = -EPROBE_DEFER;
goto err_deinit_qvecs;
}
- if (port->link_irq <= 0)
+ if (port->port_irq <= 0)
/* the link irq is optional */
- port->link_irq = 0;
+ port->port_irq = 0;
if (fwnode_property_read_bool(port_fwnode, "marvell,loopback"))
port->flags |= MVPP2_F_LOOPBACK;
@@ -5983,6 +6391,12 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port->stats_base = port->priv->iface_base +
MVPP22_MIB_COUNTERS_OFFSET +
port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ;
+
+ /* We may want a property to describe whether we should use
+ * MAC hardware timestamping.
+ */
+ if (priv->tai)
+ port->hwtstamp = true;
}
/* Alloc per-cpu and ethtool stats */
@@ -6110,8 +6524,8 @@ err_free_txq_pcpu:
err_free_stats:
free_percpu(port->stats);
err_free_irq:
- if (port->link_irq)
- irq_dispose_mapping(port->link_irq);
+ if (port->port_irq)
+ irq_dispose_mapping(port->port_irq);
err_deinit_qvecs:
mvpp2_queue_vectors_deinit(port);
err_free_netdev:
@@ -6132,8 +6546,8 @@ static void mvpp2_port_remove(struct mvpp2_port *port)
for (i = 0; i < port->ntxqs; i++)
free_percpu(port->txqs[i]->pcpu);
mvpp2_queue_vectors_deinit(port);
- if (port->link_irq)
- irq_dispose_mapping(port->link_irq);
+ if (port->port_irq)
+ irq_dispose_mapping(port->port_irq);
free_netdev(port->dev);
}
@@ -6545,6 +6959,10 @@ static int mvpp2_probe(struct platform_device *pdev)
goto err_axi_clk;
}
+ err = mvpp22_tai_probe(&pdev->dev, priv);
+ if (err < 0)
+ goto err_axi_clk;
+
/* Initialize ports */
fwnode_for_each_available_child_node(fwnode, port_fwnode) {
err = mvpp2_port_probe(pdev, port_fwnode, priv);
@@ -6663,11 +7081,13 @@ static const struct of_device_id mvpp2_match[] = {
};
MODULE_DEVICE_TABLE(of, mvpp2_match);
+#ifdef CONFIG_ACPI
static const struct acpi_device_id mvpp2_acpi_match[] = {
{ "MRVL0110", MVPP22 },
{ },
};
MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
+#endif
static struct platform_driver mvpp2_driver = {
.probe = mvpp2_probe,
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_tai.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_tai.c
new file mode 100644
index 000000000000..95862aff49f1
--- /dev/null
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_tai.c
@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Marvell PP2.2 TAI support
+ *
+ * Note:
+ * Do NOT use the event capture support.
+ * Do Not even set the MPP muxes to allow PTP_EVENT_REQ to be used.
+ * It will disrupt the operation of this driver, and there is nothing
+ * that this driver can do to prevent that. Even using PTP_EVENT_REQ
+ * as an output will be seen as a trigger input, which can't be masked.
+ * When ever a trigger input is seen, the action in the TCFCR0_TCF
+ * field will be performed - whether it is a set, increment, decrement
+ * read, or frequency update.
+ *
+ * Other notes (useful, not specified in the documentation):
+ * - PTP_PULSE_OUT (PTP_EVENT_REQ MPP)
+ * It looks like the hardware can't generate a pulse at nsec=0. (The
+ * output doesn't trigger if the nsec field is zero.)
+ * Note: when configured as an output via the register at 0xfX441120,
+ * the input is still very much alive, and will trigger the current TCF
+ * function.
+ * - PTP_CLK_OUT (PTP_TRIG_GEN MPP)
+ * This generates a "PPS" signal determined by the CCC registers. It
+ * seems this is not aligned to the TOD counter in any way (it may be
+ * initially, but if you specify a non-round second interval, it won't,
+ * and you can't easily get it back.)
+ * - PTP_PCLK_OUT
+ * This generates a 50% duty cycle clock based on the TOD counter, and
+ * seems it can be set to any period of 1ns resolution. It is probably
+ * limited by the TOD step size. Its period is defined by the PCLK_CCC
+ * registers. Again, its alignment to the second is questionable.
+ *
+ * Consequently, we support none of these.
+ */
+#include <linux/io.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/slab.h>
+
+#include "mvpp2.h"
+
+#define CR0_SW_NRESET BIT(0)
+
+#define TCFCR0_PHASE_UPDATE_ENABLE BIT(8)
+#define TCFCR0_TCF_MASK (7 << 2)
+#define TCFCR0_TCF_UPDATE (0 << 2)
+#define TCFCR0_TCF_FREQUPDATE (1 << 2)
+#define TCFCR0_TCF_INCREMENT (2 << 2)
+#define TCFCR0_TCF_DECREMENT (3 << 2)
+#define TCFCR0_TCF_CAPTURE (4 << 2)
+#define TCFCR0_TCF_NOP (7 << 2)
+#define TCFCR0_TCF_TRIGGER BIT(0)
+
+#define TCSR_CAPTURE_1_VALID BIT(1)
+#define TCSR_CAPTURE_0_VALID BIT(0)
+
+struct mvpp2_tai {
+ struct ptp_clock_info caps;
+ struct ptp_clock *ptp_clock;
+ void __iomem *base;
+ spinlock_t lock;
+ u64 period; // nanosecond period in 32.32 fixed point
+ /* This timestamp is updated every two seconds */
+ struct timespec64 stamp;
+};
+
+static void mvpp2_tai_modify(void __iomem *reg, u32 mask, u32 set)
+{
+ u32 val;
+
+ val = readl_relaxed(reg) & ~mask;
+ val |= set & mask;
+ writel(val, reg);
+}
+
+static void mvpp2_tai_write(u32 val, void __iomem *reg)
+{
+ writel_relaxed(val & 0xffff, reg);
+}
+
+static u32 mvpp2_tai_read(void __iomem *reg)
+{
+ return readl_relaxed(reg) & 0xffff;
+}
+
+static struct mvpp2_tai *ptp_to_tai(struct ptp_clock_info *ptp)
+{
+ return container_of(ptp, struct mvpp2_tai, caps);
+}
+
+static void mvpp22_tai_read_ts(struct timespec64 *ts, void __iomem *base)
+{
+ ts->tv_sec = (u64)mvpp2_tai_read(base + 0) << 32 |
+ mvpp2_tai_read(base + 4) << 16 |
+ mvpp2_tai_read(base + 8);
+
+ ts->tv_nsec = mvpp2_tai_read(base + 12) << 16 |
+ mvpp2_tai_read(base + 16);
+
+ /* Read and discard fractional part */
+ readl_relaxed(base + 20);
+ readl_relaxed(base + 24);
+}
+
+static void mvpp2_tai_write_tlv(const struct timespec64 *ts, u32 frac,
+ void __iomem *base)
+{
+ mvpp2_tai_write(ts->tv_sec >> 32, base + MVPP22_TAI_TLV_SEC_HIGH);
+ mvpp2_tai_write(ts->tv_sec >> 16, base + MVPP22_TAI_TLV_SEC_MED);
+ mvpp2_tai_write(ts->tv_sec, base + MVPP22_TAI_TLV_SEC_LOW);
+ mvpp2_tai_write(ts->tv_nsec >> 16, base + MVPP22_TAI_TLV_NANO_HIGH);
+ mvpp2_tai_write(ts->tv_nsec, base + MVPP22_TAI_TLV_NANO_LOW);
+ mvpp2_tai_write(frac >> 16, base + MVPP22_TAI_TLV_FRAC_HIGH);
+ mvpp2_tai_write(frac, base + MVPP22_TAI_TLV_FRAC_LOW);
+}
+
+static void mvpp2_tai_op(u32 op, void __iomem *base)
+{
+ /* Trigger the operation. Note that an external unmaskable
+ * event on PTP_EVENT_REQ will also trigger this action.
+ */
+ mvpp2_tai_modify(base + MVPP22_TAI_TCFCR0,
+ TCFCR0_TCF_MASK | TCFCR0_TCF_TRIGGER,
+ op | TCFCR0_TCF_TRIGGER);
+ mvpp2_tai_modify(base + MVPP22_TAI_TCFCR0, TCFCR0_TCF_MASK,
+ TCFCR0_TCF_NOP);
+}
+
+/* The adjustment has a range of +0.5ns to -0.5ns in 2^32 steps, so has units
+ * of 2^-32 ns.
+ *
+ * units(s) = 1 / (2^32 * 10^9)
+ * fractional = abs_scaled_ppm / (2^16 * 10^6)
+ *
+ * What we want to achieve:
+ * freq_adjusted = freq_nominal * (1 + fractional)
+ * freq_delta = freq_adjusted - freq_nominal => positive = faster
+ * freq_delta = freq_nominal * (1 + fractional) - freq_nominal
+ * So: freq_delta = freq_nominal * fractional
+ *
+ * However, we are dealing with periods, so:
+ * period_adjusted = period_nominal / (1 + fractional)
+ * period_delta = period_nominal - period_adjusted => positive = faster
+ * period_delta = period_nominal * fractional / (1 + fractional)
+ *
+ * Hence:
+ * period_delta = period_nominal * abs_scaled_ppm /
+ * (2^16 * 10^6 + abs_scaled_ppm)
+ *
+ * To avoid overflow, we reduce both sides of the divide operation by a factor
+ * of 16.
+ */
+static u64 mvpp22_calc_frac_ppm(struct mvpp2_tai *tai, long abs_scaled_ppm)
+{
+ u64 val = tai->period * abs_scaled_ppm >> 4;
+
+ return div_u64(val, (1000000 << 12) + (abs_scaled_ppm >> 4));
+}
+
+static s32 mvpp22_calc_max_adj(struct mvpp2_tai *tai)
+{
+ return 1000000;
+}
+
+static int mvpp22_tai_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct mvpp2_tai *tai = ptp_to_tai(ptp);
+ unsigned long flags;
+ void __iomem *base;
+ bool neg_adj;
+ s32 frac;
+ u64 val;
+
+ neg_adj = scaled_ppm < 0;
+ if (neg_adj)
+ scaled_ppm = -scaled_ppm;
+
+ val = mvpp22_calc_frac_ppm(tai, scaled_ppm);
+
+ /* Convert to a signed 32-bit adjustment */
+ if (neg_adj) {
+ /* -S32_MIN warns, -val < S32_MIN fails, so go for the easy
+ * solution.
+ */
+ if (val > 0x80000000)
+ return -ERANGE;
+
+ frac = -val;
+ } else {
+ if (val > S32_MAX)
+ return -ERANGE;
+
+ frac = val;
+ }
+
+ base = tai->base;
+ spin_lock_irqsave(&tai->lock, flags);
+ mvpp2_tai_write(frac >> 16, base + MVPP22_TAI_TLV_FRAC_HIGH);
+ mvpp2_tai_write(frac, base + MVPP22_TAI_TLV_FRAC_LOW);
+ mvpp2_tai_op(TCFCR0_TCF_FREQUPDATE, base);
+ spin_unlock_irqrestore(&tai->lock, flags);
+
+ return 0;
+}
+
+static int mvpp22_tai_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct mvpp2_tai *tai = ptp_to_tai(ptp);
+ struct timespec64 ts;
+ unsigned long flags;
+ void __iomem *base;
+ u32 tcf;
+
+ /* We can't deal with S64_MIN */
+ if (delta == S64_MIN)
+ return -ERANGE;
+
+ if (delta < 0) {
+ delta = -delta;
+ tcf = TCFCR0_TCF_DECREMENT;
+ } else {
+ tcf = TCFCR0_TCF_INCREMENT;
+ }
+
+ ts = ns_to_timespec64(delta);
+
+ base = tai->base;
+ spin_lock_irqsave(&tai->lock, flags);
+ mvpp2_tai_write_tlv(&ts, 0, base);
+ mvpp2_tai_op(tcf, base);
+ spin_unlock_irqrestore(&tai->lock, flags);
+
+ return 0;
+}
+
+static int mvpp22_tai_gettimex64(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct mvpp2_tai *tai = ptp_to_tai(ptp);
+ unsigned long flags;
+ void __iomem *base;
+ u32 tcsr;
+ int ret;
+
+ base = tai->base;
+ spin_lock_irqsave(&tai->lock, flags);
+ /* XXX: the only way to read the PTP time is for the CPU to trigger
+ * an event. However, there is no way to distinguish between the CPU
+ * triggered event, and an external event on PTP_EVENT_REQ. So this
+ * is incompatible with external use of PTP_EVENT_REQ.
+ */
+ ptp_read_system_prets(sts);
+ mvpp2_tai_modify(base + MVPP22_TAI_TCFCR0,
+ TCFCR0_TCF_MASK | TCFCR0_TCF_TRIGGER,
+ TCFCR0_TCF_CAPTURE | TCFCR0_TCF_TRIGGER);
+ ptp_read_system_postts(sts);
+ mvpp2_tai_modify(base + MVPP22_TAI_TCFCR0, TCFCR0_TCF_MASK,
+ TCFCR0_TCF_NOP);
+
+ tcsr = readl(base + MVPP22_TAI_TCSR);
+ if (tcsr & TCSR_CAPTURE_1_VALID) {
+ mvpp22_tai_read_ts(ts, base + MVPP22_TAI_TCV1_SEC_HIGH);
+ ret = 0;
+ } else if (tcsr & TCSR_CAPTURE_0_VALID) {
+ mvpp22_tai_read_ts(ts, base + MVPP22_TAI_TCV0_SEC_HIGH);
+ ret = 0;
+ } else {
+ /* We don't seem to have a reading... */
+ ret = -EBUSY;
+ }
+ spin_unlock_irqrestore(&tai->lock, flags);
+
+ return ret;
+}
+
+static int mvpp22_tai_settime64(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct mvpp2_tai *tai = ptp_to_tai(ptp);
+ unsigned long flags;
+ void __iomem *base;
+
+ base = tai->base;
+ spin_lock_irqsave(&tai->lock, flags);
+ mvpp2_tai_write_tlv(ts, 0, base);
+
+ /* Trigger an update to load the value from the TLV registers
+ * into the TOD counter. Note that an external unmaskable event on
+ * PTP_EVENT_REQ will also trigger this action.
+ */
+ mvpp2_tai_modify(base + MVPP22_TAI_TCFCR0,
+ TCFCR0_PHASE_UPDATE_ENABLE |
+ TCFCR0_TCF_MASK | TCFCR0_TCF_TRIGGER,
+ TCFCR0_TCF_UPDATE | TCFCR0_TCF_TRIGGER);
+ mvpp2_tai_modify(base + MVPP22_TAI_TCFCR0, TCFCR0_TCF_MASK,
+ TCFCR0_TCF_NOP);
+ spin_unlock_irqrestore(&tai->lock, flags);
+
+ return 0;
+}
+
+static long mvpp22_tai_aux_work(struct ptp_clock_info *ptp)
+{
+ struct mvpp2_tai *tai = ptp_to_tai(ptp);
+
+ mvpp22_tai_gettimex64(ptp, &tai->stamp, NULL);
+
+ return msecs_to_jiffies(2000);
+}
+
+static void mvpp22_tai_set_step(struct mvpp2_tai *tai)
+{
+ void __iomem *base = tai->base;
+ u32 nano, frac;
+
+ nano = upper_32_bits(tai->period);
+ frac = lower_32_bits(tai->period);
+
+ /* As the fractional nanosecond is a signed offset, if the MSB (sign)
+ * bit is set, we have to increment the whole nanoseconds.
+ */
+ if (frac >= 0x80000000)
+ nano += 1;
+
+ mvpp2_tai_write(nano, base + MVPP22_TAI_TOD_STEP_NANO_CR);
+ mvpp2_tai_write(frac >> 16, base + MVPP22_TAI_TOD_STEP_FRAC_HIGH);
+ mvpp2_tai_write(frac, base + MVPP22_TAI_TOD_STEP_FRAC_LOW);
+}
+
+static void mvpp22_tai_init(struct mvpp2_tai *tai)
+{
+ void __iomem *base = tai->base;
+
+ mvpp22_tai_set_step(tai);
+
+ /* Release the TAI reset */
+ mvpp2_tai_modify(base + MVPP22_TAI_CR0, CR0_SW_NRESET, CR0_SW_NRESET);
+}
+
+int mvpp22_tai_ptp_clock_index(struct mvpp2_tai *tai)
+{
+ return ptp_clock_index(tai->ptp_clock);
+}
+
+void mvpp22_tai_tstamp(struct mvpp2_tai *tai, u32 tstamp,
+ struct skb_shared_hwtstamps *hwtstamp)
+{
+ struct timespec64 ts;
+ int delta;
+
+ /* The tstamp consists of 2 bits of seconds and 30 bits of nanoseconds.
+ * We use our stored timestamp (tai->stamp) to form a full timestamp,
+ * and we must read the seconds exactly once.
+ */
+ ts.tv_sec = READ_ONCE(tai->stamp.tv_sec);
+ ts.tv_nsec = tstamp & 0x3fffffff;
+
+ /* Calculate the delta in seconds between our stored timestamp and
+ * the value read from the queue. Allow timestamps one second in the
+ * past, otherwise consider them to be in the future.
+ */
+ delta = ((tstamp >> 30) - (ts.tv_sec & 3)) & 3;
+ if (delta == 3)
+ delta -= 4;
+ ts.tv_sec += delta;
+
+ memset(hwtstamp, 0, sizeof(*hwtstamp));
+ hwtstamp->hwtstamp = timespec64_to_ktime(ts);
+}
+
+void mvpp22_tai_start(struct mvpp2_tai *tai)
+{
+ long delay;
+
+ delay = mvpp22_tai_aux_work(&tai->caps);
+
+ ptp_schedule_worker(tai->ptp_clock, delay);
+}
+
+void mvpp22_tai_stop(struct mvpp2_tai *tai)
+{
+ ptp_cancel_worker_sync(tai->ptp_clock);
+}
+
+static void mvpp22_tai_remove(void *priv)
+{
+ struct mvpp2_tai *tai = priv;
+
+ if (!IS_ERR(tai->ptp_clock))
+ ptp_clock_unregister(tai->ptp_clock);
+}
+
+int mvpp22_tai_probe(struct device *dev, struct mvpp2 *priv)
+{
+ struct mvpp2_tai *tai;
+ int ret;
+
+ tai = devm_kzalloc(dev, sizeof(*tai), GFP_KERNEL);
+ if (!tai)
+ return -ENOMEM;
+
+ spin_lock_init(&tai->lock);
+
+ tai->base = priv->iface_base;
+
+ /* The step size consists of three registers - a 16-bit nanosecond step
+ * size, and a 32-bit fractional nanosecond step size split over two
+ * registers. The fractional nanosecond step size has units of 2^-32ns.
+ *
+ * To calculate this, we calculate:
+ * (10^9 + freq / 2) / (freq * 2^-32)
+ * which gives us the nanosecond step to the nearest integer in 16.32
+ * fixed point format, and the fractional part of the step size with
+ * the MSB inverted. With rounding of the fractional nanosecond, and
+ * simplification, this becomes:
+ * (10^9 << 32 + freq << 31 + (freq + 1) >> 1) / freq
+ *
+ * So:
+ * div = (10^9 << 32 + freq << 31 + (freq + 1) >> 1) / freq
+ * nano = upper_32_bits(div);
+ * frac = lower_32_bits(div) ^ 0x80000000;
+ * Will give the values for the registers.
+ *
+ * This is all seems perfect, but alas it is not when considering the
+ * whole story. The system is clocked from 25MHz, which is multiplied
+ * by a PLL to 1GHz, and then divided by three, giving 333333333Hz
+ * (recurring). This gives exactly 3ns, but using 333333333Hz with
+ * the above gives an error of 13*2^-32ns.
+ *
+ * Consequently, we use the period rather than calculating from the
+ * frequency.
+ */
+ tai->period = 3ULL << 32;
+
+ mvpp22_tai_init(tai);
+
+ tai->caps.owner = THIS_MODULE;
+ strscpy(tai->caps.name, "Marvell PP2.2", sizeof(tai->caps.name));
+ tai->caps.max_adj = mvpp22_calc_max_adj(tai);
+ tai->caps.adjfine = mvpp22_tai_adjfine;
+ tai->caps.adjtime = mvpp22_tai_adjtime;
+ tai->caps.gettimex64 = mvpp22_tai_gettimex64;
+ tai->caps.settime64 = mvpp22_tai_settime64;
+ tai->caps.do_aux_work = mvpp22_tai_aux_work;
+
+ ret = devm_add_action(dev, mvpp22_tai_remove, tai);
+ if (ret)
+ return ret;
+
+ tai->ptp_clock = ptp_clock_register(&tai->caps, dev);
+ if (IS_ERR(tai->ptp_clock))
+ return PTR_ERR(tai->ptp_clock);
+
+ priv->tai = tai;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
index 1b25948c662b..2f7a861d0c7b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -3,9 +3,10 @@
# Makefile for Marvell's OcteonTX2 RVU Admin Function driver
#
+ccflags-y += -I$(src)
obj-$(CONFIG_OCTEONTX2_MBOX) += octeontx2_mbox.o
obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o
-octeontx2_mbox-y := mbox.o
+octeontx2_mbox-y := mbox.o rvu_trace.o
octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
- rvu_reg.o rvu_npc.o rvu_debugfs.o
+ rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index a4e65da8d95b..8f17e26dca53 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -468,6 +468,35 @@ static void cgx_lmac_pause_frm_config(struct cgx *cgx, int lmac_id, bool enable)
}
}
+void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
+{
+ struct cgx *cgx = cgxd;
+ u64 cfg;
+
+ if (!cgx)
+ return;
+
+ if (enable) {
+ /* Enable inbound PTP timestamping */
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg |= CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg |= CGX_SMUX_RX_FRM_CTL_PTP_MODE;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ } else {
+ /* Disable inbound PTP stamping */
+ cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
+ cfg &= ~CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE;
+ cgx_write(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL, cfg);
+
+ cfg = cgx_read(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL);
+ cfg &= ~CGX_SMUX_RX_FRM_CTL_PTP_MODE;
+ cgx_write(cgx, lmac_id, CGXX_SMUX_RX_FRM_CTL, cfg);
+ }
+}
+
/* CGX Firmware interface low level support */
static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 394f96591feb..27ca3291682b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -58,8 +58,10 @@
#define CGXX_SMUX_RX_FRM_CTL 0x20020
#define CGX_SMUX_RX_FRM_CTL_CTL_BCK BIT_ULL(3)
+#define CGX_SMUX_RX_FRM_CTL_PTP_MODE BIT_ULL(12)
#define CGXX_GMP_GMI_RXX_FRM_CTL 0x38028
#define CGX_GMP_GMI_RXX_FRM_CTL_CTL_BCK BIT_ULL(3)
+#define CGX_GMP_GMI_RXX_FRM_CTL_PTP_MODE BIT_ULL(12)
#define CGXX_SMUX_TX_CTL 0x20178
#define CGXX_SMUX_TX_PAUSE_PKT_TIME 0x20110
#define CGXX_SMUX_TX_PAUSE_PKT_INTERVAL 0x20120
@@ -139,4 +141,6 @@ int cgx_lmac_get_pause_frm(void *cgxd, int lmac_id,
u8 *tx_pause, u8 *rx_pause);
int cgx_lmac_set_pause_frm(void *cgxd, int lmac_id,
u8 tx_pause, u8 rx_pause);
+void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable);
+
#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
index 2718fe201c14..bbabb8e64201 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -14,6 +14,7 @@
#include "rvu_reg.h"
#include "mbox.h"
+#include "rvu_trace.h"
static const u16 msgs_offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
@@ -207,6 +208,9 @@ void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
*/
tx_hdr->num_msgs = mdev->num_msgs;
rx_hdr->num_msgs = 0;
+
+ trace_otx2_msg_send(mbox->pdev, tx_hdr->num_msgs, tx_hdr->msg_size);
+
spin_unlock(&mdev->mbox_lock);
/* The interrupt should be fired after num_msgs is written
@@ -303,10 +307,15 @@ int otx2_mbox_check_rsp_msgs(struct otx2_mbox *mbox, int devid)
struct mbox_msghdr *preq = mdev->mbase + ireq;
struct mbox_msghdr *prsp = mdev->mbase + irsp;
- if (preq->id != prsp->id)
+ if (preq->id != prsp->id) {
+ trace_otx2_msg_check(mbox->pdev, preq->id,
+ prsp->id, prsp->rc);
goto exit;
+ }
if (prsp->rc) {
rc = prsp->rc;
+ trace_otx2_msg_check(mbox->pdev, preq->id,
+ prsp->id, prsp->rc);
goto exit;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index ab433789d2c3..263a21129416 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -128,6 +128,7 @@ M(ATTACH_RESOURCES, 0x002, attach_resources, rsrc_attach, msg_rsp) \
M(DETACH_RESOURCES, 0x003, detach_resources, rsrc_detach, msg_rsp) \
M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \
M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
+M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp) \
M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \
@@ -144,6 +145,8 @@ M(CGX_STOP_LINKEVENTS, 0x208, cgx_stop_linkevents, msg_req, msg_rsp) \
M(CGX_GET_LINKINFO, 0x209, cgx_get_linkinfo, msg_req, cgx_link_info_msg) \
M(CGX_INTLBK_ENABLE, 0x20A, cgx_intlbk_enable, msg_req, msg_rsp) \
M(CGX_INTLBK_DISABLE, 0x20B, cgx_intlbk_disable, msg_req, msg_rsp) \
+M(CGX_PTP_RX_ENABLE, 0x20C, cgx_ptp_rx_enable, msg_req, msg_rsp) \
+M(CGX_PTP_RX_DISABLE, 0x20D, cgx_ptp_rx_disable, msg_req, msg_rsp) \
M(CGX_CFG_PAUSE_FRM, 0x20E, cgx_cfg_pause_frm, cgx_pause_frm_cfg, \
cgx_pause_frm_cfg) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
@@ -214,6 +217,8 @@ M(NIX_LSO_FORMAT_CFG, 0x8011, nix_lso_format_cfg, \
nix_lso_format_cfg, \
nix_lso_format_cfg_rsp) \
M(NIX_RXVLAN_ALLOC, 0x8012, nix_rxvlan_alloc, msg_req, msg_rsp) \
+M(NIX_LF_PTP_TX_ENABLE, 0x8013, nix_lf_ptp_tx_enable, msg_req, msg_rsp) \
+M(NIX_LF_PTP_TX_DISABLE, 0x8014, nix_lf_ptp_tx_disable, msg_req, msg_rsp) \
M(NIX_BP_ENABLE, 0x8016, nix_bp_enable, nix_bp_cfg_req, \
nix_bp_cfg_rsp) \
M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp) \
@@ -621,6 +626,7 @@ struct nix_rss_flowkey_cfg {
#define NIX_FLOW_KEY_TYPE_INNR_UDP BIT(15)
#define NIX_FLOW_KEY_TYPE_INNR_SCTP BIT(16)
#define NIX_FLOW_KEY_TYPE_INNR_ETH_DMAC BIT(17)
+#define NIX_FLOW_KEY_TYPE_VLAN BIT(20)
u32 flowkey_cfg; /* Flowkey types selected */
u8 group; /* RSS context or group */
};
@@ -859,4 +865,20 @@ struct npc_get_kex_cfg_rsp {
u8 mkex_pfl_name[MKEX_NAME_LEN];
};
+enum ptp_op {
+ PTP_OP_ADJFINE = 0,
+ PTP_OP_GET_CLOCK = 1,
+};
+
+struct ptp_req {
+ struct mbox_msghdr hdr;
+ u8 op;
+ s64 scaled_ppm;
+};
+
+struct ptp_rsp {
+ struct mbox_msghdr hdr;
+ u64 clk;
+};
+
#endif /* MBOX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index 3803af9231c6..91a9d00e4fb5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -49,6 +49,7 @@ enum npc_kpu_lb_ltype {
NPC_LT_LB_EDSA_VLAN,
NPC_LT_LB_EXDSA,
NPC_LT_LB_EXDSA_VLAN,
+ NPC_LT_LB_FDSA,
NPC_LT_LB_CUSTOM0 = 0xE,
NPC_LT_LB_CUSTOM1 = 0xF,
};
@@ -77,21 +78,21 @@ enum npc_kpu_ld_ltype {
NPC_LT_LD_ICMP,
NPC_LT_LD_SCTP,
NPC_LT_LD_ICMP6,
+ NPC_LT_LD_CUSTOM0,
+ NPC_LT_LD_CUSTOM1,
NPC_LT_LD_IGMP = 8,
- NPC_LT_LD_ESP,
NPC_LT_LD_AH,
NPC_LT_LD_GRE,
NPC_LT_LD_NVGRE,
NPC_LT_LD_NSH,
NPC_LT_LD_TU_MPLS_IN_NSH,
NPC_LT_LD_TU_MPLS_IN_IP,
- NPC_LT_LD_CUSTOM0 = 0xE,
- NPC_LT_LD_CUSTOM1 = 0xF,
};
enum npc_kpu_le_ltype {
NPC_LT_LE_VXLAN = 1,
NPC_LT_LE_GENEVE,
+ NPC_LT_LE_ESP,
NPC_LT_LE_GTPU = 4,
NPC_LT_LE_VXLANGPE,
NPC_LT_LE_GTPC,
@@ -173,8 +174,8 @@ struct npc_kpu_profile_action {
struct npc_kpu_profile {
int cam_entries;
int action_entries;
- struct npc_kpu_profile_cam *cam;
- struct npc_kpu_profile_action *action;
+ const struct npc_kpu_profile_cam *cam;
+ const struct npc_kpu_profile_action *action;
};
/* NPC KPU register formats */
@@ -296,6 +297,9 @@ struct nix_rx_action {
#endif
};
+/* NPC_AF_INTFX_KEX_CFG field masks */
+#define NPC_PARSE_NIBBLE GENMASK_ULL(30, 0)
+
/* NIX Receive Vtag Action Structure */
#define VTAG0_VALID_BIT BIT_ULL(15)
#define VTAG0_TYPE_MASK GENMASK_ULL(14, 12)
@@ -320,4 +324,37 @@ struct npc_mcam_kex {
u64 intf_ld_flags[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL];
} __packed;
+struct npc_lt_def {
+ u8 ltype_mask;
+ u8 ltype_match;
+ u8 lid;
+};
+
+struct npc_lt_def_ipsec {
+ u8 ltype_mask;
+ u8 ltype_match;
+ u8 lid;
+ u8 spi_offset;
+ u8 spi_nz;
+};
+
+struct npc_lt_def_cfg {
+ struct npc_lt_def rx_ol2;
+ struct npc_lt_def rx_oip4;
+ struct npc_lt_def rx_iip4;
+ struct npc_lt_def rx_oip6;
+ struct npc_lt_def rx_iip6;
+ struct npc_lt_def rx_otcp;
+ struct npc_lt_def rx_itcp;
+ struct npc_lt_def rx_oudp;
+ struct npc_lt_def rx_iudp;
+ struct npc_lt_def rx_osctp;
+ struct npc_lt_def rx_isctp;
+ struct npc_lt_def_ipsec rx_ipsec[2];
+ struct npc_lt_def pck_ol2;
+ struct npc_lt_def pck_oip4;
+ struct npc_lt_def pck_oip6;
+ struct npc_lt_def pck_iip4;
+};
+
#endif /* NPC_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
index aa2727e6211a..77bb4ed32600 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -63,6 +63,7 @@
#define NPC_UDP_PORT_VXLANGPE 4790
#define NPC_UDP_PORT_GENEVE 6081
#define NPC_UDP_PORT_MPLS 6635
+#define NPC_UDP_PORT_ESP 4500
#define NPC_VXLANGPE_NP_IP 0x1
#define NPC_VXLANGPE_NP_IP6 0x2
@@ -139,6 +140,13 @@
#define NPC_DSA_EXTEND 0x1000
#define NPC_DSA_EDSA 0x8000
+#define NPC_DSA_FDSA 0xc000
+
+#define NPC_KEXOF_DMAC 8
+#define MKEX_SIGN 0x19bbfdbd15f /* strtoull of "mkexprof" with base:36 */
+#define KEX_LD_CFG(bytesm1, hdr_ofs, ena, flags_ena, key_ofs) \
+ (((bytesm1) << 16) | ((hdr_ofs) << 8) | ((ena) << 7) | \
+ ((flags_ena) << 6) | ((key_ofs) & 0x3F))
enum npc_kpu_parser_state {
NPC_S_NA = 0,
@@ -166,6 +174,7 @@ enum npc_kpu_parser_state {
NPC_S_KPU3_DSA,
NPC_S_KPU4_MPLS,
NPC_S_KPU4_NSH,
+ NPC_S_KPU4_FDSA,
NPC_S_KPU5_IP,
NPC_S_KPU5_IP6,
NPC_S_KPU5_ARP,
@@ -189,7 +198,6 @@ enum npc_kpu_parser_state {
NPC_S_KPU8_IGMP,
NPC_S_KPU8_ICMP6,
NPC_S_KPU8_GRE,
- NPC_S_KPU8_ESP,
NPC_S_KPU8_AH,
NPC_S_KPU9_TU_MPLS_IN_GRE,
NPC_S_KPU9_TU_MPLS_IN_NSH,
@@ -201,6 +209,7 @@ enum npc_kpu_parser_state {
NPC_S_KPU9_GENEVE,
NPC_S_KPU9_GTPC,
NPC_S_KPU9_GTPU,
+ NPC_S_KPU9_ESP,
NPC_S_KPU10_TU_MPLS_IN_VXLANGPE,
NPC_S_KPU10_TU_MPLS_PL,
NPC_S_KPU10_TU_MPLS,
@@ -271,6 +280,7 @@ enum npc_kpu_lb_lflag {
NPC_F_LB_L_EDSA_VLAN,
NPC_F_LB_L_EXDSA,
NPC_F_LB_L_EXDSA_VLAN,
+ NPC_F_LB_L_FDSA,
};
enum npc_kpu_lc_uflag {
@@ -418,7 +428,7 @@ enum NPC_ERRLEV_E {
NPC_ERRLEV_ENUM_LAST = 16,
};
-static struct npc_kpu_profile_action ikpu_action_entries[] = {
+static const struct npc_kpu_profile_action ikpu_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
12, 16, 20, 0, 0,
@@ -979,7 +989,7 @@ static struct npc_kpu_profile_action ikpu_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 12, 16, 20, 0, 0,
+ 12, 14, 20, 0, 0,
NPC_S_KPU1_EXDSA, 0, 0,
NPC_LID_LA, NPC_LT_NA,
0,
@@ -997,7 +1007,7 @@ static struct npc_kpu_profile_action ikpu_action_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu1_cam_entries[] = {
{
NPC_S_KPU1_ETHER, 0xff,
NPC_ETYPE_IP,
@@ -1351,10 +1361,19 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_EXDSA, 0xff,
+ 0x0000,
+ 0x0000,
NPC_DSA_EXTEND,
NPC_DSA_EXTEND,
0x0000,
0x0000,
+ },
+ {
+ NPC_S_KPU1_EXDSA, 0xff,
+ NPC_DSA_FDSA,
+ NPC_DSA_FDSA,
+ 0x0000,
+ 0x0000,
0x0000,
0x0000,
},
@@ -1666,7 +1685,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu2_cam_entries[] = {
{
NPC_S_KPU2_CTAG, 0xff,
NPC_ETYPE_IP,
@@ -2794,7 +2813,7 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu3_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu3_cam_entries[] = {
{
NPC_S_KPU3_CTAG, 0xff,
NPC_ETYPE_IP,
@@ -3913,7 +3932,7 @@ static struct npc_kpu_profile_cam kpu3_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu4_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu4_cam_entries[] = {
{
NPC_S_KPU4_MPLS, 0xff,
NPC_MPLS_S,
@@ -3996,6 +4015,69 @@ static struct npc_kpu_profile_cam kpu4_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_ARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_RARP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_PTP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ NPC_ETYPE_FCOE,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU4_FDSA, 0xff,
+ 0x0000,
+ NPC_DSA_FDSA,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -4006,7 +4088,7 @@ static struct npc_kpu_profile_cam kpu4_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu5_cam_entries[] = {
{
NPC_S_KPU5_IP, 0xff,
0x0000,
@@ -4576,7 +4658,7 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu6_cam_entries[] = {
{
NPC_S_KPU6_IP6_EXT, 0xff,
0x0000,
@@ -4921,7 +5003,7 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu7_cam_entries[] = {
{
NPC_S_KPU7_IP6_EXT, 0xff,
0x0000,
@@ -5140,7 +5222,7 @@ static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu8_cam_entries[] = {
{
NPC_S_KPU8_TCP, 0xff,
0x0000,
@@ -5341,15 +5423,24 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
},
{
NPC_S_KPU8_UDP, 0xff,
+ NPC_UDP_PORT_ESP,
+ 0xffff,
0x0000,
0x0000,
0x0000,
0x0000,
+ },
+ {
+ NPC_S_KPU8_UDP, 0xff,
+ 0x0000,
+ 0x0000,
+ NPC_UDP_PORT_ESP,
+ 0xffff,
0x0000,
0x0000,
},
{
- NPC_S_KPU8_SCTP, 0xff,
+ NPC_S_KPU8_UDP, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5358,7 +5449,7 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU8_ICMP, 0xff,
+ NPC_S_KPU8_SCTP, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5367,7 +5458,7 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU8_IGMP, 0xff,
+ NPC_S_KPU8_ICMP, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5376,7 +5467,7 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU8_ICMP6, 0xff,
+ NPC_S_KPU8_IGMP, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5385,7 +5476,7 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU8_ESP, 0xff,
+ NPC_S_KPU8_ICMP6, 0xff,
0x0000,
0x0000,
0x0000,
@@ -5872,7 +5963,7 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu9_cam_entries[] = {
{
NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff,
NPC_MPLS_S,
@@ -6324,6 +6415,15 @@ static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
NPC_MPLS_S,
},
{
+ NPC_S_KPU9_ESP, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -6334,7 +6434,7 @@ static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu10_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu10_cam_entries[] = {
{
NPC_S_KPU10_TU_MPLS, 0xff,
NPC_MPLS_S,
@@ -6499,7 +6599,7 @@ static struct npc_kpu_profile_cam kpu10_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu11_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu11_cam_entries[] = {
{
NPC_S_KPU11_TU_ETHER, 0xff,
NPC_ETYPE_IP,
@@ -6808,7 +6908,7 @@ static struct npc_kpu_profile_cam kpu11_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu12_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu12_cam_entries[] = {
{
NPC_S_KPU12_TU_IP, 0xff,
NPC_IPNH_TCP,
@@ -7063,7 +7163,7 @@ static struct npc_kpu_profile_cam kpu12_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu13_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu13_cam_entries[] = {
{
NPC_S_KPU13_TU_IP6_EXT, 0xff,
0x0000,
@@ -7075,7 +7175,7 @@ static struct npc_kpu_profile_cam kpu13_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu14_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu14_cam_entries[] = {
{
NPC_S_KPU14_TU_IP6_EXT, 0xff,
0x0000,
@@ -7087,7 +7187,7 @@ static struct npc_kpu_profile_cam kpu14_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu15_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu15_cam_entries[] = {
{
NPC_S_KPU15_TU_TCP, 0xff,
0x0000,
@@ -7288,7 +7388,7 @@ static struct npc_kpu_profile_cam kpu15_cam_entries[] = {
},
};
-static struct npc_kpu_profile_cam kpu16_cam_entries[] = {
+static const struct npc_kpu_profile_cam kpu16_cam_entries[] = {
{
NPC_S_KPU16_TCP_DATA, 0xff,
0x0000,
@@ -7345,7 +7445,7 @@ static struct npc_kpu_profile_cam kpu16_cam_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu1_action_entries[] = {
+static const struct npc_kpu_profile_action kpu1_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 3, 0,
@@ -7673,6 +7773,14 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 4, 8, 16, 2, 0,
+ NPC_S_KPU4_FDSA, 12, 1,
+ NPC_LID_LA, NPC_LT_LA_ETHER,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LA, NPC_EC_EDSA_UNK,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
@@ -7962,7 +8070,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu2_action_entries[] = {
+static const struct npc_kpu_profile_action kpu2_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 2, 0,
@@ -8965,7 +9073,7 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu3_action_entries[] = {
+static const struct npc_kpu_profile_action kpu3_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 1, 0,
@@ -9960,7 +10068,7 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu4_action_entries[] = {
+static const struct npc_kpu_profile_action kpu4_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 0,
@@ -10034,6 +10142,62 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_IP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_IP6, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_ARP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 0, 0,
+ NPC_S_KPU5_RARP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 0, 0,
+ NPC_S_KPU5_PTP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU5_FCOE, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LB, NPC_LT_LB_FDSA,
+ NPC_F_LB_U_UNK_ETYPE | NPC_F_LB_L_FDSA,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LB, NPC_EC_L2_K4,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -10043,7 +10207,7 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu5_action_entries[] = {
+static const struct npc_kpu_profile_action kpu5_action_entries[] = {
{
NPC_ERRLEV_LC, NPC_EC_IP_TTL_0,
0, 0, 0, 0, 1,
@@ -10102,8 +10266,8 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU8_ESP, 20, 1,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU9_ESP, 20, 1,
NPC_LID_LC, NPC_LT_LC_IP,
0,
0, 0, 0, 0,
@@ -10206,8 +10370,8 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU8_ESP, 0, 1,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU9_ESP, 0, 1,
NPC_LID_LC, NPC_LT_LC_IP_OPT,
0,
0, 0xf, 0, 2,
@@ -10414,8 +10578,8 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU8_ESP, 40, 1,
+ 0, 0, 0, 3, 0,
+ NPC_S_KPU9_ESP, 40, 1,
NPC_LID_LC, NPC_LT_LC_IP6_EXT,
0,
0, 0, 0, 0,
@@ -10550,7 +10714,7 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu6_action_entries[] = {
+static const struct npc_kpu_profile_action kpu6_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -10561,80 +10725,80 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 12, 0, 1, 0,
- NPC_S_KPU8_TCP, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 8, 10, 1, 0,
- NPC_S_KPU8_UDP, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU8_SCTP, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU8_ICMP, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU8_ICMP6, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU8_ESP, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU8_AH, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU8_GRE, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 5, 0,
- NPC_S_KPU12_TU_IP6, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 6, 10, 2, 0,
- NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -10689,8 +10853,8 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU8_ESP, 8, 0,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU9_ESP, 8, 0,
NPC_LID_LC, NPC_LT_NA,
0,
1, 0xff, 0, 3,
@@ -10793,8 +10957,8 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 1, 0,
- NPC_S_KPU8_ESP, 8, 0,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU9_ESP, 8, 0,
NPC_LID_LC, NPC_LT_NA,
0,
1, 0xff, 0, 3,
@@ -10857,7 +11021,7 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu7_action_entries[] = {
+static const struct npc_kpu_profile_action kpu7_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -10908,8 +11072,8 @@ static struct npc_kpu_profile_action kpu7_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 0,
- NPC_S_KPU8_ESP, 8, 0,
+ 0, 0, 0, 1, 0,
+ NPC_S_KPU9_ESP, 8, 0,
NPC_LID_LC, NPC_LT_NA,
0,
1, 0xff, 0, 3,
@@ -10956,80 +11120,80 @@ static struct npc_kpu_profile_action kpu7_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 12, 0, 0, 0,
- NPC_S_KPU8_TCP, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 8, 10, 0, 0,
- NPC_S_KPU8_UDP, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 0,
- NPC_S_KPU8_SCTP, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 0,
- NPC_S_KPU8_ICMP, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 0,
- NPC_S_KPU8_ICMP6, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 0,
- NPC_S_KPU8_ESP, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 0,
- NPC_S_KPU8_AH, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 0,
- NPC_S_KPU8_GRE, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 4, 0,
- NPC_S_KPU12_TU_IP6, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 6, 10, 1, 0,
- NPC_S_KPU9_TU_MPLS_IN_IP, 8, 0,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
NPC_LID_LC, NPC_LT_NA,
0,
0, 0, 0, 0,
@@ -11052,7 +11216,7 @@ static struct npc_kpu_profile_action kpu7_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu8_action_entries[] = {
+static const struct npc_kpu_profile_action kpu8_action_entries[] = {
{
NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_FIN_ONLY,
0, 0, 0, 0, 1,
@@ -11231,6 +11395,22 @@ static struct npc_kpu_profile_action kpu8_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU9_ESP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 0,
+ NPC_S_KPU9_ESP, 8, 1,
+ NPC_LID_LD, NPC_LT_LD_UDP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 7, 0,
NPC_S_KPU16_UDP_DATA, 8, 1,
NPC_LID_LD, NPC_LT_LD_UDP,
@@ -11273,14 +11453,6 @@ static struct npc_kpu_profile_action kpu8_action_entries[] = {
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
- NPC_LID_LD, NPC_LT_LD_ESP,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
NPC_LID_LD, NPC_LT_LD_AH,
0,
0, 0, 0, 0,
@@ -11703,7 +11875,7 @@ static struct npc_kpu_profile_action kpu8_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu9_action_entries[] = {
+static const struct npc_kpu_profile_action kpu9_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 0,
@@ -12105,6 +12277,14 @@ static struct npc_kpu_profile_action kpu9_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LE, NPC_LT_LE_ESP,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LE, NPC_EC_UNK,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -12114,7 +12294,7 @@ static struct npc_kpu_profile_action kpu9_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu10_action_entries[] = {
+static const struct npc_kpu_profile_action kpu10_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 1, 0,
@@ -12261,7 +12441,7 @@ static struct npc_kpu_profile_action kpu10_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu11_action_entries[] = {
+static const struct npc_kpu_profile_action kpu11_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 0, 0,
@@ -12536,7 +12716,7 @@ static struct npc_kpu_profile_action kpu11_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu12_action_entries[] = {
+static const struct npc_kpu_profile_action kpu12_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 12, 0, 2, 0,
@@ -12763,7 +12943,7 @@ static struct npc_kpu_profile_action kpu12_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu13_action_entries[] = {
+static const struct npc_kpu_profile_action kpu13_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -12774,7 +12954,7 @@ static struct npc_kpu_profile_action kpu13_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu14_action_entries[] = {
+static const struct npc_kpu_profile_action kpu14_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -12785,7 +12965,7 @@ static struct npc_kpu_profile_action kpu14_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu15_action_entries[] = {
+static const struct npc_kpu_profile_action kpu15_action_entries[] = {
{
NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_FIN_ONLY,
0, 0, 0, 0, 1,
@@ -12964,7 +13144,7 @@ static struct npc_kpu_profile_action kpu15_action_entries[] = {
},
};
-static struct npc_kpu_profile_action kpu16_action_entries[] = {
+static const struct npc_kpu_profile_action kpu16_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -13015,7 +13195,7 @@ static struct npc_kpu_profile_action kpu16_action_entries[] = {
},
};
-static struct npc_kpu_profile npc_kpu_profiles[] = {
+static const struct npc_kpu_profile npc_kpu_profiles[] = {
{
ARRAY_SIZE(kpu1_cam_entries),
ARRAY_SIZE(kpu1_action_entries),
@@ -13114,4 +13294,163 @@ static struct npc_kpu_profile npc_kpu_profiles[] = {
},
};
+static const struct npc_lt_def_cfg npc_lt_defaults = {
+ .rx_ol2 = {
+ .lid = NPC_LID_LA,
+ .ltype_match = NPC_LT_LA_ETHER,
+ .ltype_mask = 0x0F,
+ },
+ .rx_oip4 = {
+ .lid = NPC_LID_LC,
+ .ltype_match = NPC_LT_LC_IP,
+ .ltype_mask = 0x0E,
+ },
+ .rx_iip4 = {
+ .lid = NPC_LID_LG,
+ .ltype_match = NPC_LT_LG_TU_IP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_oip6 = {
+ .lid = NPC_LID_LC,
+ .ltype_match = NPC_LT_LC_IP6,
+ .ltype_mask = 0x0E,
+ },
+ .rx_iip6 = {
+ .lid = NPC_LID_LG,
+ .ltype_match = NPC_LT_LG_TU_IP6,
+ .ltype_mask = 0x0F,
+ },
+ .rx_otcp = {
+ .lid = NPC_LID_LD,
+ .ltype_match = NPC_LT_LD_TCP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_itcp = {
+ .lid = NPC_LID_LH,
+ .ltype_match = NPC_LT_LH_TU_TCP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_oudp = {
+ .lid = NPC_LID_LD,
+ .ltype_match = NPC_LT_LD_UDP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_iudp = {
+ .lid = NPC_LID_LH,
+ .ltype_match = NPC_LT_LH_TU_UDP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_osctp = {
+ .lid = NPC_LID_LD,
+ .ltype_match = NPC_LT_LD_SCTP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_isctp = {
+ .lid = NPC_LID_LH,
+ .ltype_match = NPC_LT_LH_TU_SCTP,
+ .ltype_mask = 0x0F,
+ },
+ .rx_ipsec = {
+ {
+ .lid = NPC_LID_LE,
+ .ltype_match = NPC_LT_LE_ESP,
+ .ltype_mask = 0x0F,
+ },
+ {
+ .spi_offset = 8,
+ .lid = NPC_LID_LH,
+ .ltype_match = NPC_LT_LH_TU_ESP,
+ .ltype_mask = 0x0F,
+ },
+ },
+ .pck_ol2 = {
+ .lid = NPC_LID_LA,
+ .ltype_match = NPC_LT_LA_ETHER,
+ .ltype_mask = 0x0F,
+ },
+ .pck_oip4 = {
+ .lid = NPC_LID_LC,
+ .ltype_match = NPC_LT_LC_IP,
+ .ltype_mask = 0x0E,
+ },
+ .pck_iip4 = {
+ .lid = NPC_LID_LG,
+ .ltype_match = NPC_LT_LG_TU_IP,
+ .ltype_mask = 0x0F,
+ },
+};
+
+static const struct npc_mcam_kex npc_mkex_default = {
+ .mkex_sign = MKEX_SIGN,
+ .name = "default",
+ .kpu_version = NPC_KPU_PROFILE_VER,
+ .keyx_cfg = {
+ /* nibble: LA..LE (ltype only) + Channel */
+ [NIX_INTF_RX] = ((u64)NPC_MCAM_KEY_X2 << 32) | 0x49247,
+ [NIX_INTF_TX] = ((u64)NPC_MCAM_KEY_X2 << 32) | ((1ULL << 19) - 1),
+ },
+ .intf_lid_lt_ld = {
+ /* Default RX MCAM KEX profile */
+ [NIX_INTF_RX] = {
+ [NPC_LID_LA] = {
+ /* Layer A: Ethernet: */
+ [NPC_LT_LA_ETHER] = {
+ /* DMAC: 6 bytes, KW1[47:0] */
+ KEX_LD_CFG(0x05, 0x0, 0x1, 0x0, NPC_KEXOF_DMAC),
+ /* Ethertype: 2 bytes, KW0[47:32] */
+ KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x4),
+ },
+ },
+ [NPC_LID_LB] = {
+ /* Layer B: Single VLAN (CTAG) */
+ /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
+ [NPC_LT_LB_CTAG] = {
+ KEX_LD_CFG(0x03, 0x0, 0x1, 0x0, 0x4),
+ },
+ /* Layer B: Stacked VLAN (STAG|QinQ) */
+ [NPC_LT_LB_STAG_QINQ] = {
+ /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
+ KEX_LD_CFG(0x03, 0x4, 0x1, 0x0, 0x4),
+ },
+ [NPC_LT_LB_FDSA] = {
+ /* SWITCH PORT: 1 byte, KW0[63:48] */
+ KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0x6),
+ /* Ethertype: 2 bytes, KW0[47:32] */
+ KEX_LD_CFG(0x01, 0x4, 0x1, 0x0, 0x4),
+ },
+ },
+ [NPC_LID_LC] = {
+ /* Layer C: IPv4 */
+ [NPC_LT_LC_IP] = {
+ /* SIP+DIP: 8 bytes, KW2[63:0] */
+ KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10),
+ /* TOS: 1 byte, KW1[63:56] */
+ KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0xf),
+ },
+ /* Layer C: IPv6 */
+ [NPC_LT_LC_IP6] = {
+ /* Everything up to SADDR: 8 bytes, KW2[63:0] */
+ KEX_LD_CFG(0x07, 0x0, 0x1, 0x0, 0x10),
+ },
+ },
+ [NPC_LID_LD] = {
+ /* Layer D:UDP */
+ [NPC_LT_LD_UDP] = {
+ /* SPORT: 2 bytes, KW3[15:0] */
+ KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18),
+ /* DPORT: 2 bytes, KW3[31:16] */
+ KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a),
+ },
+ /* Layer D:TCP */
+ [NPC_LT_LD_TCP] = {
+ /* SPORT: 2 bytes, KW3[15:0] */
+ KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18),
+ /* DPORT: 2 bytes, KW3[31:16] */
+ KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a),
+ },
+ },
+ },
+ },
+};
+
#endif /* NPC_PROFILE_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
new file mode 100644
index 000000000000..f69f4f35ae48
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell PTP driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "ptp.h"
+#include "mbox.h"
+#include "rvu.h"
+
+#define DRV_NAME "Marvell PTP Driver"
+
+#define PCI_DEVID_OCTEONTX2_PTP 0xA00C
+#define PCI_SUBSYS_DEVID_OCTX2_98xx_PTP 0xB100
+#define PCI_SUBSYS_DEVID_OCTX2_96XX_PTP 0xB200
+#define PCI_SUBSYS_DEVID_OCTX2_95XX_PTP 0xB300
+#define PCI_SUBSYS_DEVID_OCTX2_LOKI_PTP 0xB400
+#define PCI_SUBSYS_DEVID_OCTX2_95MM_PTP 0xB500
+#define PCI_DEVID_OCTEONTX2_RST 0xA085
+
+#define PCI_PTP_BAR_NO 0
+#define PCI_RST_BAR_NO 0
+
+#define PTP_CLOCK_CFG 0xF00ULL
+#define PTP_CLOCK_CFG_PTP_EN BIT_ULL(0)
+#define PTP_CLOCK_LO 0xF08ULL
+#define PTP_CLOCK_HI 0xF10ULL
+#define PTP_CLOCK_COMP 0xF18ULL
+
+#define RST_BOOT 0x1600ULL
+#define RST_MUL_BITS GENMASK_ULL(38, 33)
+#define CLOCK_BASE_RATE 50000000ULL
+
+static u64 get_clock_rate(void)
+{
+ u64 cfg, ret = CLOCK_BASE_RATE * 16;
+ struct pci_dev *pdev;
+ void __iomem *base;
+
+ /* To get the input clock frequency with which PTP co-processor
+ * block is running the base frequency(50 MHz) needs to be multiplied
+ * with multiplier bits present in RST_BOOT register of RESET block.
+ * Hence below code gets the multiplier bits from the RESET PCI
+ * device present in the system.
+ */
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_RST, NULL);
+ if (!pdev)
+ goto error;
+
+ base = pci_ioremap_bar(pdev, PCI_RST_BAR_NO);
+ if (!base)
+ goto error_put_pdev;
+
+ cfg = readq(base + RST_BOOT);
+ ret = CLOCK_BASE_RATE * FIELD_GET(RST_MUL_BITS, cfg);
+
+ iounmap(base);
+
+error_put_pdev:
+ pci_dev_put(pdev);
+
+error:
+ return ret;
+}
+
+struct ptp *ptp_get(void)
+{
+ struct pci_dev *pdev;
+ struct ptp *ptp;
+
+ /* If the PTP pci device is found on the system and ptp
+ * driver is bound to it then the PTP pci device is returned
+ * to the caller(rvu driver).
+ */
+ pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_PTP, NULL);
+ if (!pdev)
+ return ERR_PTR(-ENODEV);
+
+ ptp = pci_get_drvdata(pdev);
+ if (!ptp)
+ ptp = ERR_PTR(-EPROBE_DEFER);
+ if (IS_ERR(ptp))
+ pci_dev_put(pdev);
+
+ return ptp;
+}
+
+void ptp_put(struct ptp *ptp)
+{
+ if (!ptp)
+ return;
+
+ pci_dev_put(ptp->pdev);
+}
+
+static int ptp_adjfine(struct ptp *ptp, long scaled_ppm)
+{
+ bool neg_adj = false;
+ u64 comp;
+ u64 adj;
+ s64 ppb;
+
+ if (scaled_ppm < 0) {
+ neg_adj = true;
+ scaled_ppm = -scaled_ppm;
+ }
+
+ /* The hardware adds the clock compensation value to the PTP clock
+ * on every coprocessor clock cycle. Typical convention is that it
+ * represent number of nanosecond betwen each cycle. In this
+ * convention compensation value is in 64 bit fixed-point
+ * representation where upper 32 bits are number of nanoseconds
+ * and lower is fractions of nanosecond.
+ * The scaled_ppm represent the ratio in "parts per million" by which
+ * the compensation value should be corrected.
+ * To calculate new compenstation value we use 64bit fixed point
+ * arithmetic on following formula
+ * comp = tbase + tbase * scaled_ppm / (1M * 2^16)
+ * where tbase is the basic compensation value calculated
+ * initialy in the probe function.
+ */
+ comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
+ /* convert scaled_ppm to ppb */
+ ppb = 1 + scaled_ppm;
+ ppb *= 125;
+ ppb >>= 13;
+ adj = comp * ppb;
+ adj = div_u64(adj, 1000000000ull);
+ comp = neg_adj ? comp - adj : comp + adj;
+
+ writeq(comp, ptp->reg_base + PTP_CLOCK_COMP);
+
+ return 0;
+}
+
+static int ptp_get_clock(struct ptp *ptp, u64 *clk)
+{
+ /* Return the current PTP clock */
+ *clk = readq(ptp->reg_base + PTP_CLOCK_HI);
+
+ return 0;
+}
+
+static int ptp_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct ptp *ptp;
+ u64 clock_comp;
+ u64 clock_cfg;
+ int err;
+
+ ptp = devm_kzalloc(dev, sizeof(*ptp), GFP_KERNEL);
+ if (!ptp) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ ptp->pdev = pdev;
+
+ err = pcim_enable_device(pdev);
+ if (err)
+ goto error_free;
+
+ err = pcim_iomap_regions(pdev, 1 << PCI_PTP_BAR_NO, pci_name(pdev));
+ if (err)
+ goto error_free;
+
+ ptp->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO];
+
+ ptp->clock_rate = get_clock_rate();
+
+ /* Enable PTP clock */
+ clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
+ clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
+ writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
+
+ clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
+ /* Initial compensation value to start the nanosecs counter */
+ writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP);
+
+ pci_set_drvdata(pdev, ptp);
+
+ return 0;
+
+error_free:
+ devm_kfree(dev, ptp);
+
+error:
+ /* For `ptp_get()` we need to differentiate between the case
+ * when the core has not tried to probe this device and the case when
+ * the probe failed. In the later case we pretend that the
+ * initialization was successful and keep the error in
+ * `dev->driver_data`.
+ */
+ pci_set_drvdata(pdev, ERR_PTR(err));
+ return 0;
+}
+
+static void ptp_remove(struct pci_dev *pdev)
+{
+ struct ptp *ptp = pci_get_drvdata(pdev);
+ u64 clock_cfg;
+
+ if (IS_ERR_OR_NULL(ptp))
+ return;
+
+ /* Disable PTP clock */
+ clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
+ clock_cfg &= ~PTP_CLOCK_CFG_PTP_EN;
+ writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
+}
+
+static const struct pci_device_id ptp_id_table[] = {
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_98xx_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_96XX_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_95XX_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_LOKI_PTP) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP,
+ PCI_VENDOR_ID_CAVIUM,
+ PCI_SUBSYS_DEVID_OCTX2_95MM_PTP) },
+ { 0, }
+};
+
+struct pci_driver ptp_driver = {
+ .name = DRV_NAME,
+ .id_table = ptp_id_table,
+ .probe = ptp_probe,
+ .remove = ptp_remove,
+};
+
+int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req,
+ struct ptp_rsp *rsp)
+{
+ int err = 0;
+
+ /* This function is the PTP mailbox handler invoked when
+ * called by AF consumers/netdev drivers via mailbox mechanism.
+ * It is used by netdev driver to get the PTP clock and to set
+ * frequency adjustments. Since mailbox can be called without
+ * notion of whether the driver is bound to ptp device below
+ * validation is needed as first step.
+ */
+ if (!rvu->ptp)
+ return -ENODEV;
+
+ switch (req->op) {
+ case PTP_OP_ADJFINE:
+ err = ptp_adjfine(rvu->ptp, req->scaled_ppm);
+ break;
+ case PTP_OP_GET_CLOCK:
+ err = ptp_get_clock(rvu->ptp, &rsp->clk);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
new file mode 100644
index 000000000000..878bc395d28f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell PTP driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#ifndef PTP_H
+#define PTP_H
+
+#include <linux/timecounter.h>
+#include <linux/time64.h>
+#include <linux/spinlock.h>
+
+struct ptp {
+ struct pci_dev *pdev;
+ void __iomem *reg_base;
+ u32 clock_rate;
+};
+
+struct ptp *ptp_get(void);
+void ptp_put(struct ptp *ptp);
+
+extern struct pci_driver ptp_driver;
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 557e4292c846..e1f918960730 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -18,6 +18,9 @@
#include "cgx.h"
#include "rvu.h"
#include "rvu_reg.h"
+#include "ptp.h"
+
+#include "rvu_trace.h"
#define DRV_NAME "octeontx2-af"
#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
@@ -1548,6 +1551,7 @@ static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
if (rsp && err) \
rsp->hdr.rc = err; \
\
+ trace_otx2_msg_process(mbox->pdev, _id, err); \
return rsp ? err : -ENOMEM; \
}
MBOX_MESSAGES
@@ -1880,6 +1884,8 @@ static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
/* Clear interrupts */
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr);
+ if (intr)
+ trace_otx2_msg_interrupt(rvu->pdev, "PF(s) to AF", intr);
/* Sync with mbox memory region */
rmb();
@@ -1897,6 +1903,8 @@ static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0));
rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), intr);
+ if (intr)
+ trace_otx2_msg_interrupt(rvu->pdev, "VF(s) to AF", intr);
rvu_queue_work(&rvu->afvf_wq_info, 0, vfs, intr);
@@ -2565,13 +2573,21 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
+ rvu->ptp = ptp_get();
+ if (IS_ERR(rvu->ptp)) {
+ err = PTR_ERR(rvu->ptp);
+ if (err == -EPROBE_DEFER)
+ goto err_release_regions;
+ rvu->ptp = NULL;
+ }
+
/* Map Admin function CSRs */
rvu->afreg_base = pcim_iomap(pdev, PCI_AF_REG_BAR_NUM, 0);
rvu->pfreg_base = pcim_iomap(pdev, PCI_PF_REG_BAR_NUM, 0);
if (!rvu->afreg_base || !rvu->pfreg_base) {
dev_err(dev, "Unable to map admin function CSRs, aborting\n");
err = -ENOMEM;
- goto err_release_regions;
+ goto err_put_ptp;
}
/* Store module params in rvu structure */
@@ -2586,7 +2602,7 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = rvu_setup_hw_resources(rvu);
if (err)
- goto err_release_regions;
+ goto err_put_ptp;
/* Init mailbox btw AF and PFs */
err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF,
@@ -2626,6 +2642,8 @@ err_hwsetup:
rvu_reset_all_blocks(rvu);
rvu_free_hw_resources(rvu);
rvu_clear_rvum_blk_revid(rvu);
+err_put_ptp:
+ ptp_put(rvu->ptp);
err_release_regions:
pci_release_regions(pdev);
err_disable_device:
@@ -2651,6 +2669,7 @@ static void rvu_remove(struct pci_dev *pdev)
rvu_reset_all_blocks(rvu);
rvu_free_hw_resources(rvu);
rvu_clear_rvum_blk_revid(rvu);
+ ptp_put(rvu->ptp);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
@@ -2676,9 +2695,19 @@ static int __init rvu_init_module(void)
if (err < 0)
return err;
+ err = pci_register_driver(&ptp_driver);
+ if (err < 0)
+ goto ptp_err;
+
err = pci_register_driver(&rvu_driver);
if (err < 0)
- pci_unregister_driver(&cgx_driver);
+ goto rvu_err;
+
+ return 0;
+rvu_err:
+ pci_unregister_driver(&ptp_driver);
+ptp_err:
+ pci_unregister_driver(&cgx_driver);
return err;
}
@@ -2686,6 +2715,7 @@ static int __init rvu_init_module(void)
static void __exit rvu_cleanup_module(void)
{
pci_unregister_driver(&rvu_driver);
+ pci_unregister_driver(&ptp_driver);
pci_unregister_driver(&cgx_driver);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index b89dde2c8b08..90eed3160915 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -289,6 +289,22 @@ struct rvu_fwdata {
u64 reserved[FWDATA_RESERVED_MEM];
};
+struct ptp;
+
+/* KPU profile adapter structure gathering all KPU configuration data and abstracting out the
+ * source where it came from.
+ */
+struct npc_kpu_profile_adapter {
+ const char *name;
+ u64 version;
+ const struct npc_lt_def_cfg *lt_def;
+ const struct npc_kpu_profile_action *ikpu; /* array[pkinds] */
+ const struct npc_kpu_profile *kpu; /* array[kpus] */
+ const struct npc_mcam_kex *mkex;
+ size_t pkinds;
+ size_t kpus;
+};
+
struct rvu {
void __iomem *afreg_base;
void __iomem *pfreg_base;
@@ -337,6 +353,11 @@ struct rvu {
/* Firmware data */
struct rvu_fwdata *fwdata;
+ /* NPC KPU data */
+ struct npc_kpu_profile_adapter kpu;
+
+ struct ptp *ptp;
+
#ifdef CONFIG_DEBUG_FS
struct rvu_debugfs rvu_dbg;
#endif
@@ -470,6 +491,7 @@ int rvu_npc_init(struct rvu *rvu);
void rvu_npc_freemem(struct rvu *rvu);
int rvu_npc_get_pkind(struct rvu *rvu, u16 pf);
void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf);
+int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool en);
void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc,
int nixlf, u64 chan, u8 *mac_addr);
void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index f3c82e489897..fa9152ff5e2a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -15,6 +15,7 @@
#include "rvu.h"
#include "cgx.h"
#include "rvu_reg.h"
+#include "rvu_trace.h"
struct cgx_evq_entry {
struct list_head evq_node;
@@ -34,6 +35,7 @@ static struct _req_type __maybe_unused \
return NULL; \
req->hdr.sig = OTX2_MBOX_REQ_SIG; \
req->hdr.id = _id; \
+ trace_otx2_msg_alloc(rvu->pdev, _id, sizeof(*req)); \
return req; \
}
@@ -509,6 +511,45 @@ int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
return 0;
}
+static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
+{
+ int pf = rvu_get_pf(pcifunc);
+ u8 cgx_id, lmac_id;
+ void *cgxd;
+
+ /* This msg is expected only from PFs that are mapped to CGX LMACs,
+ * if received from other PF/VF simply ACK, nothing to do.
+ */
+ if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
+ !is_pf_cgxmapped(rvu, pf))
+ return -ENODEV;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+
+ cgx_lmac_ptp_config(cgxd, lmac_id, enable);
+ /* If PTP is enabled then inform NPC that packets to be
+ * parsed by this PF will have their data shifted by 8 bytes
+ * and if PTP is disabled then no shift is required
+ */
+ if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable))
+ return -EINVAL;
+
+ return 0;
+}
+
+int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true);
+}
+
+int rvu_mbox_handler_cgx_ptp_rx_disable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, false);
+}
+
static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en)
{
int pf = rvu_get_pf(pcifunc);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 0fc70824fd6b..21a89dd76d3c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -2508,6 +2508,14 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
field->ltype_match = NPC_LT_LE_GTPU;
field->ltype_mask = 0xF;
break;
+ case NIX_FLOW_KEY_TYPE_VLAN:
+ field->lid = NPC_LID_LB;
+ field->hdr_offset = 2; /* Skip TPID (2-bytes) */
+ field->bytesm1 = 1; /* 2 Bytes (Actually 12 bits) */
+ field->ltype_match = NPC_LT_LB_CTAG;
+ field->ltype_mask = 0xF;
+ field->fn_mask = 1; /* Mask out the first nibble */
+ break;
}
field->ena = 1;
@@ -3103,6 +3111,7 @@ static int nix_aq_init(struct rvu *rvu, struct rvu_block *block)
int rvu_nix_init(struct rvu *rvu)
{
+ const struct npc_lt_def_cfg *ltdefs;
struct rvu_hwinfo *hw = rvu->hw;
struct rvu_block *block;
int blkaddr, err;
@@ -3133,6 +3142,7 @@ int rvu_nix_init(struct rvu *rvu)
rvu_write64(rvu, blkaddr, NIX_AF_SQM_DBG_CTL_STATUS, cfg);
}
+ ltdefs = rvu->kpu.lt_def;
/* Calibrate X2P bus to check if CGX/LBK links are fine */
err = nix_calibrate_x2p(rvu, blkaddr);
if (err)
@@ -3180,28 +3190,38 @@ int rvu_nix_init(struct rvu *rvu)
* and validate length and checksums.
*/
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2,
- (NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F);
+ (ltdefs->rx_ol2.lid << 8) | (ltdefs->rx_ol2.ltype_match << 4) |
+ ltdefs->rx_ol2.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4,
- (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
+ (ltdefs->rx_oip4.lid << 8) | (ltdefs->rx_oip4.ltype_match << 4) |
+ ltdefs->rx_oip4.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4,
- (NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP << 4) | 0x0F);
+ (ltdefs->rx_iip4.lid << 8) | (ltdefs->rx_iip4.ltype_match << 4) |
+ ltdefs->rx_iip4.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6,
- (NPC_LID_LC << 8) | (NPC_LT_LC_IP6 << 4) | 0x0F);
+ (ltdefs->rx_oip6.lid << 8) | (ltdefs->rx_oip6.ltype_match << 4) |
+ ltdefs->rx_oip6.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6,
- (NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP6 << 4) | 0x0F);
+ (ltdefs->rx_iip6.lid << 8) | (ltdefs->rx_iip6.ltype_match << 4) |
+ ltdefs->rx_iip6.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP,
- (NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F);
+ (ltdefs->rx_otcp.lid << 8) | (ltdefs->rx_otcp.ltype_match << 4) |
+ ltdefs->rx_otcp.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP,
- (NPC_LID_LH << 8) | (NPC_LT_LH_TU_TCP << 4) | 0x0F);
+ (ltdefs->rx_itcp.lid << 8) | (ltdefs->rx_itcp.ltype_match << 4) |
+ ltdefs->rx_itcp.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP,
- (NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F);
+ (ltdefs->rx_oudp.lid << 8) | (ltdefs->rx_oudp.ltype_match << 4) |
+ ltdefs->rx_oudp.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP,
- (NPC_LID_LH << 8) | (NPC_LT_LH_TU_UDP << 4) | 0x0F);
+ (ltdefs->rx_iudp.lid << 8) | (ltdefs->rx_iudp.ltype_match << 4) |
+ ltdefs->rx_iudp.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP,
- (NPC_LID_LD << 8) | (NPC_LT_LD_SCTP << 4) | 0x0F);
+ (ltdefs->rx_osctp.lid << 8) | (ltdefs->rx_osctp.ltype_match << 4) |
+ ltdefs->rx_osctp.ltype_mask);
rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP,
- (NPC_LID_LH << 8) | (NPC_LT_LH_TU_SCTP << 4) |
- 0x0F);
+ (ltdefs->rx_isctp.lid << 8) | (ltdefs->rx_isctp.ltype_match << 4) |
+ ltdefs->rx_isctp.ltype_mask);
err = nix_rx_flowkey_alg_cfg(rvu, blkaddr);
if (err)
@@ -3318,6 +3338,49 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
nix_ctx_free(rvu, pfvf);
}
+#define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32)
+
+static int rvu_nix_lf_ptp_tx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int blkaddr;
+ int nixlf;
+ u64 cfg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (blkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ block = &hw->block[blkaddr];
+ nixlf = rvu_get_lf(rvu, block, pcifunc, 0);
+ if (nixlf < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf));
+
+ if (enable)
+ cfg |= NIX_AF_LFX_TX_CFG_PTP_EN;
+ else
+ cfg &= ~NIX_AF_LFX_TX_CFG_PTP_EN;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg);
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_lf_ptp_tx_enable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, true);
+}
+
+int rvu_mbox_handler_nix_lf_ptp_tx_disable(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_nix_lf_ptp_tx_cfg(rvu, req->hdr.pcifunc, false);
+}
+
int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
struct nix_lso_format_cfg *req,
struct nix_lso_format_cfg_rsp *rsp)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index fbaf9bcd83f2..511b01dd03ed 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -27,6 +27,9 @@
#define NIXLF_PROMISC_ENTRY 2
#define NPC_PARSE_RESULT_DMAC_OFFSET 8
+#define NPC_HW_TSTAMP_OFFSET 8
+
+static const char def_pfl_name[] = "default";
static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, u16 pcifunc);
@@ -61,6 +64,36 @@ int rvu_npc_get_pkind(struct rvu *rvu, u16 pf)
return -1;
}
+#define NPC_AF_ACTION0_PTR_ADVANCE GENMASK_ULL(27, 20)
+
+int npc_config_ts_kpuaction(struct rvu *rvu, int pf, u16 pcifunc, bool enable)
+{
+ int pkind, blkaddr;
+ u64 val;
+
+ pkind = rvu_npc_get_pkind(rvu, pf);
+ if (pkind < 0) {
+ dev_err(rvu->dev, "%s: pkind not mapped\n", __func__);
+ return -EINVAL;
+ }
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, pcifunc);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -EINVAL;
+ }
+
+ val = rvu_read64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind));
+ val &= ~NPC_AF_ACTION0_PTR_ADVANCE;
+ /* If timestamp is enabled then configure NPC to shift 8 bytes */
+ if (enable)
+ val |= FIELD_PREP(NPC_AF_ACTION0_PTR_ADVANCE,
+ NPC_HW_TSTAMP_OFFSET);
+ rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind), val);
+
+ return 0;
+}
+
static int npc_get_nixlf_mcam_index(struct npc_mcam *mcam,
u16 pcifunc, int nixlf, int type)
{
@@ -417,7 +450,7 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc,
entry.kw_mask[0] = 0xFFFULL;
if (allmulti) {
- kwi = NPC_PARSE_RESULT_DMAC_OFFSET / sizeof(u64);
+ kwi = NPC_KEXOF_DMAC / sizeof(u64);
entry.kw[kwi] = BIT_ULL(40); /* LSB bit of 1st byte in DMAC */
entry.kw_mask[kwi] = BIT_ULL(40);
}
@@ -699,88 +732,8 @@ void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf)
rvu_write64(rvu, blkaddr, \
NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg)
-#define KEX_LD_CFG(bytesm1, hdr_ofs, ena, flags_ena, key_ofs) \
- (((bytesm1) << 16) | ((hdr_ofs) << 8) | ((ena) << 7) | \
- ((flags_ena) << 6) | ((key_ofs) & 0x3F))
-
-static void npc_config_ldata_extract(struct rvu *rvu, int blkaddr)
-{
- struct npc_mcam *mcam = &rvu->hw->mcam;
- int lid, ltype;
- int lid_count;
- u64 cfg;
-
- cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST);
- lid_count = (cfg >> 4) & 0xF;
-
- /* First clear any existing config i.e
- * disable LDATA and FLAGS extraction.
- */
- for (lid = 0; lid < lid_count; lid++) {
- for (ltype = 0; ltype < 16; ltype++) {
- SET_KEX_LD(NIX_INTF_RX, lid, ltype, 0, 0ULL);
- SET_KEX_LD(NIX_INTF_RX, lid, ltype, 1, 0ULL);
- SET_KEX_LD(NIX_INTF_TX, lid, ltype, 0, 0ULL);
- SET_KEX_LD(NIX_INTF_TX, lid, ltype, 1, 0ULL);
-
- SET_KEX_LDFLAGS(NIX_INTF_RX, 0, ltype, 0ULL);
- SET_KEX_LDFLAGS(NIX_INTF_RX, 1, ltype, 0ULL);
- SET_KEX_LDFLAGS(NIX_INTF_TX, 0, ltype, 0ULL);
- SET_KEX_LDFLAGS(NIX_INTF_TX, 1, ltype, 0ULL);
- }
- }
-
- if (mcam->keysize != NPC_MCAM_KEY_X2)
- return;
-
- /* Default MCAM KEX profile */
- /* Layer A: Ethernet: */
-
- /* DMAC: 6 bytes, KW1[47:0] */
- cfg = KEX_LD_CFG(0x05, 0x0, 0x1, 0x0, NPC_PARSE_RESULT_DMAC_OFFSET);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_ETHER, 0, cfg);
-
- /* Ethertype: 2 bytes, KW0[47:32] */
- cfg = KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x4);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_ETHER, 1, cfg);
-
- /* Layer B: Single VLAN (CTAG) */
- /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
- cfg = KEX_LD_CFG(0x03, 0x0, 0x1, 0x0, 0x4);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_CTAG, 0, cfg);
-
- /* Layer B: Stacked VLAN (STAG|QinQ) */
- /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */
- cfg = KEX_LD_CFG(0x03, 0x4, 0x1, 0x0, 0x4);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 0, cfg);
-
- /* Layer C: IPv4 */
- /* SIP+DIP: 8 bytes, KW2[63:0] */
- cfg = KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LC, NPC_LT_LC_IP, 0, cfg);
- /* TOS: 1 byte, KW1[63:56] */
- cfg = KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0xf);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LC, NPC_LT_LC_IP, 1, cfg);
-
- /* Layer D:UDP */
- /* SPORT: 2 bytes, KW3[15:0] */
- cfg = KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_UDP, 0, cfg);
- /* DPORT: 2 bytes, KW3[31:16] */
- cfg = KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_UDP, 1, cfg);
-
- /* Layer D:TCP */
- /* SPORT: 2 bytes, KW3[15:0] */
- cfg = KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_TCP, 0, cfg);
- /* DPORT: 2 bytes, KW3[31:16] */
- cfg = KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a);
- SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_TCP, 1, cfg);
-}
-
static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr,
- struct npc_mcam_kex *mkex)
+ const struct npc_mcam_kex *mkex)
{
int lid, lt, ld, fl;
@@ -820,34 +773,31 @@ static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr,
}
}
-/* strtoull of "mkexprof" with base:36 */
-#define MKEX_SIGN 0x19bbfdbd15f
#define MKEX_END_SIGN 0xdeadbeef
-static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr)
+static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr,
+ const char *mkex_profile)
{
- const char *mkex_profile = rvu->mkex_pfl_name;
struct device *dev = &rvu->pdev->dev;
- void __iomem *mkex_prfl_addr = NULL;
struct npc_mcam_kex *mcam_kex;
- u64 prfl_addr;
- u64 prfl_sz;
+ void *mkex_prfl_addr = NULL;
+ u64 prfl_addr, prfl_sz;
/* If user not selected mkex profile */
- if (!strncmp(mkex_profile, "default", MKEX_NAME_LEN))
- goto load_default;
+ if (!strncmp(mkex_profile, def_pfl_name, MKEX_NAME_LEN))
+ goto program_mkex;
if (!rvu->fwdata)
- goto load_default;
+ goto program_mkex;
prfl_addr = rvu->fwdata->mcam_addr;
prfl_sz = rvu->fwdata->mcam_sz;
if (!prfl_addr || !prfl_sz)
- goto load_default;
+ goto program_mkex;
- mkex_prfl_addr = ioremap_wc(prfl_addr, prfl_sz);
+ mkex_prfl_addr = memremap(prfl_addr, prfl_sz, MEMREMAP_WC);
if (!mkex_prfl_addr)
- goto load_default;
+ goto program_mkex;
mcam_kex = (struct npc_mcam_kex *)mkex_prfl_addr;
@@ -859,35 +809,27 @@ static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr)
* parse nibble enable configuration has to be
* identical for both Rx and Tx interfaces.
*/
- if (is_rvu_96xx_B0(rvu) &&
- mcam_kex->keyx_cfg[NIX_INTF_RX] !=
- mcam_kex->keyx_cfg[NIX_INTF_TX])
- goto load_default;
-
- /* Program selected mkex profile */
- npc_program_mkex_profile(rvu, blkaddr, mcam_kex);
-
- goto unmap;
+ if (!is_rvu_96xx_B0(rvu) ||
+ mcam_kex->keyx_cfg[NIX_INTF_RX] == mcam_kex->keyx_cfg[NIX_INTF_TX])
+ rvu->kpu.mkex = mcam_kex;
+ goto program_mkex;
}
mcam_kex++;
prfl_sz -= sizeof(struct npc_mcam_kex);
}
- dev_warn(dev, "Failed to load requested profile: %s\n",
- rvu->mkex_pfl_name);
+ dev_warn(dev, "Failed to load requested profile: %s\n", mkex_profile);
-load_default:
- dev_info(rvu->dev, "Using default mkex profile\n");
- /* Config packet data and flags extraction into PARSE result */
- npc_config_ldata_extract(rvu, blkaddr);
-
-unmap:
+program_mkex:
+ dev_info(rvu->dev, "Using %s mkex profile\n", rvu->kpu.mkex->name);
+ /* Program selected mkex profile */
+ npc_program_mkex_profile(rvu, blkaddr, rvu->kpu.mkex);
if (mkex_prfl_addr)
- iounmap(mkex_prfl_addr);
+ memunmap(mkex_prfl_addr);
}
static void npc_config_kpuaction(struct rvu *rvu, int blkaddr,
- struct npc_kpu_profile_action *kpuaction,
+ const struct npc_kpu_profile_action *kpuaction,
int kpu, int entry, bool pkind)
{
struct npc_kpu_action0 action0 = {0};
@@ -929,7 +871,7 @@ static void npc_config_kpuaction(struct rvu *rvu, int blkaddr,
}
static void npc_config_kpucam(struct rvu *rvu, int blkaddr,
- struct npc_kpu_profile_cam *kpucam,
+ const struct npc_kpu_profile_cam *kpucam,
int kpu, int entry)
{
struct npc_kpu_cam cam0 = {0};
@@ -957,7 +899,7 @@ static inline u64 enable_mask(int count)
}
static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu,
- struct npc_kpu_profile *profile)
+ const struct npc_kpu_profile *profile)
{
int entry, num_entries, max_entries;
@@ -995,6 +937,27 @@ static void npc_program_kpu_profile(struct rvu *rvu, int blkaddr, int kpu,
rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(kpu), 0x01);
}
+static int npc_prepare_default_kpu(struct npc_kpu_profile_adapter *profile)
+{
+ profile->name = def_pfl_name;
+ profile->version = NPC_KPU_PROFILE_VER;
+ profile->ikpu = ikpu_action_entries;
+ profile->pkinds = ARRAY_SIZE(ikpu_action_entries);
+ profile->kpu = npc_kpu_profiles;
+ profile->kpus = ARRAY_SIZE(npc_kpu_profiles);
+ profile->lt_def = &npc_lt_defaults;
+ profile->mkex = &npc_mkex_default;
+
+ return 0;
+}
+
+static void npc_load_kpu_profile(struct rvu *rvu)
+{
+ struct npc_kpu_profile_adapter *profile = &rvu->kpu;
+
+ npc_prepare_default_kpu(profile);
+}
+
static void npc_parser_profile_init(struct rvu *rvu, int blkaddr)
{
struct rvu_hwinfo *hw = rvu->hw;
@@ -1013,25 +976,26 @@ static void npc_parser_profile_init(struct rvu *rvu, int blkaddr)
rvu_write64(rvu, blkaddr, NPC_AF_KPUX_CFG(idx), 0x00);
}
+ /* Load and customize KPU profile. */
+ npc_load_kpu_profile(rvu);
+
/* First program IKPU profile i.e PKIND configs.
* Check HW max count to avoid configuring junk or
* writing to unsupported CSR addresses.
*/
pkind = &hw->pkind;
- num_pkinds = ARRAY_SIZE(ikpu_action_entries);
+ num_pkinds = rvu->kpu.pkinds;
num_pkinds = min_t(int, pkind->rsrc.max, num_pkinds);
for (idx = 0; idx < num_pkinds; idx++)
- npc_config_kpuaction(rvu, blkaddr,
- &ikpu_action_entries[idx], 0, idx, true);
+ npc_config_kpuaction(rvu, blkaddr, &rvu->kpu.ikpu[idx], 0, idx, true);
/* Program KPU CAM and Action profiles */
- num_kpus = ARRAY_SIZE(npc_kpu_profiles);
+ num_kpus = rvu->kpu.kpus;
num_kpus = min_t(int, hw->npc_kpus, num_kpus);
for (idx = 0; idx < num_kpus; idx++)
- npc_program_kpu_profile(rvu, blkaddr,
- idx, &npc_kpu_profiles[idx]);
+ npc_program_kpu_profile(rvu, blkaddr, idx, &rvu->kpu.kpu[idx]);
}
static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr)
@@ -1156,11 +1120,11 @@ free_mem:
int rvu_npc_init(struct rvu *rvu)
{
+ struct npc_kpu_profile_adapter *kpu = &rvu->kpu;
struct npc_pkind *pkind = &rvu->hw->pkind;
struct npc_mcam *mcam = &rvu->hw->mcam;
- u64 keyz = NPC_MCAM_KEY_X2;
+ u64 cfg, nibble_ena, rx_kex, tx_kex;
int blkaddr, entry, bank, err;
- u64 cfg, nibble_ena;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
if (blkaddr < 0) {
@@ -1194,13 +1158,16 @@ int rvu_npc_init(struct rvu *rvu)
/* Config Outer L2, IPv4's NPC layer info */
rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OL2,
- (NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F);
+ (kpu->lt_def->pck_ol2.lid << 8) | (kpu->lt_def->pck_ol2.ltype_match << 4) |
+ kpu->lt_def->pck_ol2.ltype_mask);
rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OIP4,
- (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F);
+ (kpu->lt_def->pck_oip4.lid << 8) | (kpu->lt_def->pck_oip4.ltype_match << 4) |
+ kpu->lt_def->pck_oip4.ltype_mask);
/* Config Inner IPV4 NPC layer info */
rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_IIP4,
- (NPC_LID_LG << 8) | (NPC_LT_LG_TU_IP << 4) | 0x0F);
+ (kpu->lt_def->pck_iip4.lid << 8) | (kpu->lt_def->pck_iip4.ltype_match << 4) |
+ kpu->lt_def->pck_iip4.ltype_mask);
/* Enable below for Rx pkts.
* - Outer IPv4 header checksum validation.
@@ -1216,23 +1183,25 @@ int rvu_npc_init(struct rvu *rvu)
/* Set RX and TX side MCAM search key size.
* LA..LD (ltype only) + Channel
*/
- nibble_ena = 0x49247;
- rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX),
- ((keyz & 0x3) << 32) | nibble_ena);
+ rx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_RX];
+ tx_kex = npc_mkex_default.keyx_cfg[NIX_INTF_TX];
+ nibble_ena = FIELD_GET(NPC_PARSE_NIBBLE, rx_kex);
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX), rx_kex);
/* Due to an errata (35786) in A0 pass silicon, parse nibble enable
* configuration has to be identical for both Rx and Tx interfaces.
*/
- if (!is_rvu_96xx_B0(rvu))
- nibble_ena = (1ULL << 19) - 1;
- rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX),
- ((keyz & 0x3) << 32) | nibble_ena);
+ if (is_rvu_96xx_B0(rvu)) {
+ tx_kex &= ~NPC_PARSE_NIBBLE;
+ tx_kex |= FIELD_PREP(NPC_PARSE_NIBBLE, nibble_ena);
+ }
+ rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX), tx_kex);
err = npc_mcam_rsrcs_init(rvu, blkaddr);
if (err)
return err;
/* Configure MKEX profile */
- npc_load_mkex_profile(rvu, blkaddr);
+ npc_load_mkex_profile(rvu, blkaddr, rvu->mkex_pfl_name);
/* Set TX miss action to UCAST_DEFAULT i.e
* transmit the packet on NIX LF SQ's default channel.
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c
new file mode 100644
index 000000000000..56f90cf9c4c0
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Admin Function driver tracepoints
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#define CREATE_TRACE_POINTS
+#include "rvu_trace.h"
+
+EXPORT_TRACEPOINT_SYMBOL(otx2_msg_alloc);
+EXPORT_TRACEPOINT_SYMBOL(otx2_msg_interrupt);
+EXPORT_TRACEPOINT_SYMBOL(otx2_msg_process);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
new file mode 100644
index 000000000000..e6609068e81b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Admin Function driver tracepoints
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rvu
+
+#if !defined(__RVU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __RVU_TRACE_H
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <linux/pci.h>
+
+TRACE_EVENT(otx2_msg_alloc,
+ TP_PROTO(const struct pci_dev *pdev, u16 id, u64 size),
+ TP_ARGS(pdev, id, size),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __field(u16, id)
+ __field(u64, size)
+ ),
+ TP_fast_assign(__assign_str(dev, pci_name(pdev))
+ __entry->id = id;
+ __entry->size = size;
+ ),
+ TP_printk("[%s] msg:(0x%x) size:%lld\n", __get_str(dev),
+ __entry->id, __entry->size)
+);
+
+TRACE_EVENT(otx2_msg_send,
+ TP_PROTO(const struct pci_dev *pdev, u16 num_msgs, u64 msg_size),
+ TP_ARGS(pdev, num_msgs, msg_size),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __field(u16, num_msgs)
+ __field(u64, msg_size)
+ ),
+ TP_fast_assign(__assign_str(dev, pci_name(pdev))
+ __entry->num_msgs = num_msgs;
+ __entry->msg_size = msg_size;
+ ),
+ TP_printk("[%s] sent %d msg(s) of size:%lld\n", __get_str(dev),
+ __entry->num_msgs, __entry->msg_size)
+);
+
+TRACE_EVENT(otx2_msg_check,
+ TP_PROTO(const struct pci_dev *pdev, u16 reqid, u16 rspid, int rc),
+ TP_ARGS(pdev, reqid, rspid, rc),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __field(u16, reqid)
+ __field(u16, rspid)
+ __field(int, rc)
+ ),
+ TP_fast_assign(__assign_str(dev, pci_name(pdev))
+ __entry->reqid = reqid;
+ __entry->rspid = rspid;
+ __entry->rc = rc;
+ ),
+ TP_printk("[%s] req->id:0x%x rsp->id:0x%x resp_code:%d\n",
+ __get_str(dev), __entry->reqid,
+ __entry->rspid, __entry->rc)
+);
+
+TRACE_EVENT(otx2_msg_interrupt,
+ TP_PROTO(const struct pci_dev *pdev, const char *msg, u64 intr),
+ TP_ARGS(pdev, msg, intr),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __string(str, msg)
+ __field(u64, intr)
+ ),
+ TP_fast_assign(__assign_str(dev, pci_name(pdev))
+ __assign_str(str, msg)
+ __entry->intr = intr;
+ ),
+ TP_printk("[%s] mbox interrupt %s (0x%llx)\n", __get_str(dev),
+ __get_str(str), __entry->intr)
+);
+
+TRACE_EVENT(otx2_msg_process,
+ TP_PROTO(const struct pci_dev *pdev, u16 id, int err),
+ TP_ARGS(pdev, id, err),
+ TP_STRUCT__entry(__string(dev, pci_name(pdev))
+ __field(u16, id)
+ __field(int, err)
+ ),
+ TP_fast_assign(__assign_str(dev, pci_name(pdev))
+ __entry->id = id;
+ __entry->err = err;
+ ),
+ TP_printk("[%s] msg:(0x%x) error:%d\n", __get_str(dev),
+ __entry->id, __entry->err)
+);
+
+#endif /* __RVU_TRACE_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE rvu_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index 778df331c8ac..b2c6385707c9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -6,7 +6,8 @@
obj-$(CONFIG_OCTEONTX2_PF) += octeontx2_nicpf.o
obj-$(CONFIG_OCTEONTX2_VF) += octeontx2_nicvf.o
-octeontx2_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o
+octeontx2_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
+ otx2_ptp.o
octeontx2_nicvf-y := otx2_vf.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 93c4cf7fedbf..d2581090f9a4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -355,7 +355,7 @@ int otx2_rss_init(struct otx2_nic *pfvf)
rss->flowkey_cfg = rss->enable ? rss->flowkey_cfg :
NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6 |
NIX_FLOW_KEY_TYPE_TCP | NIX_FLOW_KEY_TYPE_UDP |
- NIX_FLOW_KEY_TYPE_SCTP;
+ NIX_FLOW_KEY_TYPE_SCTP | NIX_FLOW_KEY_TYPE_VLAN;
ret = otx2_set_flowkey_cfg(pfvf);
if (ret)
@@ -365,6 +365,95 @@ int otx2_rss_init(struct otx2_nic *pfvf)
return 0;
}
+/* Setup UDP segmentation algorithm in HW */
+static void otx2_setup_udp_segmentation(struct nix_lso_format_cfg *lso, bool v4)
+{
+ struct nix_lso_format *field;
+
+ field = (struct nix_lso_format *)&lso->fields[0];
+ lso->field_mask = GENMASK(18, 0);
+
+ /* IP's Length field */
+ field->layer = NIX_TXLAYER_OL3;
+ /* In ipv4, length field is at offset 2 bytes, for ipv6 it's 4 */
+ field->offset = v4 ? 2 : 4;
+ field->sizem1 = 1; /* i.e 2 bytes */
+ field->alg = NIX_LSOALG_ADD_PAYLEN;
+ field++;
+
+ /* No ID field in IPv6 header */
+ if (v4) {
+ /* Increment IPID */
+ field->layer = NIX_TXLAYER_OL3;
+ field->offset = 4;
+ field->sizem1 = 1; /* i.e 2 bytes */
+ field->alg = NIX_LSOALG_ADD_SEGNUM;
+ field++;
+ }
+
+ /* Update length in UDP header */
+ field->layer = NIX_TXLAYER_OL4;
+ field->offset = 4;
+ field->sizem1 = 1;
+ field->alg = NIX_LSOALG_ADD_PAYLEN;
+}
+
+/* Setup segmentation algorithms in HW and retrieve algorithm index */
+void otx2_setup_segmentation(struct otx2_nic *pfvf)
+{
+ struct nix_lso_format_cfg_rsp *rsp;
+ struct nix_lso_format_cfg *lso;
+ struct otx2_hw *hw = &pfvf->hw;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ /* UDPv4 segmentation */
+ lso = otx2_mbox_alloc_msg_nix_lso_format_cfg(&pfvf->mbox);
+ if (!lso)
+ goto fail;
+
+ /* Setup UDP/IP header fields that HW should update per segment */
+ otx2_setup_udp_segmentation(lso, true);
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ goto fail;
+
+ rsp = (struct nix_lso_format_cfg_rsp *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &lso->hdr);
+ if (IS_ERR(rsp))
+ goto fail;
+
+ hw->lso_udpv4_idx = rsp->lso_format_idx;
+
+ /* UDPv6 segmentation */
+ lso = otx2_mbox_alloc_msg_nix_lso_format_cfg(&pfvf->mbox);
+ if (!lso)
+ goto fail;
+
+ /* Setup UDP/IP header fields that HW should update per segment */
+ otx2_setup_udp_segmentation(lso, false);
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err)
+ goto fail;
+
+ rsp = (struct nix_lso_format_cfg_rsp *)
+ otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &lso->hdr);
+ if (IS_ERR(rsp))
+ goto fail;
+
+ hw->lso_udpv6_idx = rsp->lso_format_idx;
+ mutex_unlock(&pfvf->mbox.lock);
+ return;
+fail:
+ mutex_unlock(&pfvf->mbox.lock);
+ netdev_info(pfvf->netdev,
+ "Failed to get LSO index for UDP GSO offload, disabling\n");
+ pfvf->netdev->hw_features &= ~NETIF_F_GSO_UDP_L4;
+}
+
void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx)
{
/* Configure CQE interrupt coalescing parameters
@@ -671,6 +760,13 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
if (!sq->sg)
return -ENOMEM;
+ if (pfvf->ptp) {
+ err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt,
+ sizeof(*sq->timestamps));
+ if (err)
+ return err;
+ }
+
sq->head = 0;
sq->sqe_per_sqb = (pfvf->hw.sqb_size / sq->sqe_size) - 1;
sq->num_sqbs = (qset->sqe_cnt + sq->sqe_per_sqb) / sq->sqe_per_sqb;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 2fa29889522e..d6253f2a414d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -13,10 +13,14 @@
#include <linux/pci.h>
#include <linux/iommu.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
#include <mbox.h>
#include "otx2_reg.h"
#include "otx2_txrx.h"
+#include <rvu_trace.h>
/* PCI device IDs */
#define PCI_DEVID_OCTEONTX2_RVU_PF 0xA063
@@ -174,9 +178,11 @@ struct otx2_hw {
u16 rq_skid;
u8 cq_time_wait;
- /* For TSO segmentation */
+ /* Segmentation */
u8 lso_tsov4_idx;
u8 lso_tsov6_idx;
+ u8 lso_udpv4_idx;
+ u8 lso_udpv6_idx;
u8 hw_tso;
/* MSI-X */
@@ -209,6 +215,17 @@ struct refill_work {
struct otx2_nic *pf;
};
+struct otx2_ptp {
+ struct ptp_clock_info ptp_info;
+ struct ptp_clock *ptp_clock;
+ struct otx2_nic *nic;
+
+ struct cyclecounter cycle_counter;
+ struct timecounter time_counter;
+};
+
+#define OTX2_HW_TIMESTAMP_LEN 8
+
struct otx2_nic {
void __iomem *reg_base;
struct net_device *netdev;
@@ -216,6 +233,8 @@ struct otx2_nic {
u16 max_frs;
u16 rbsize; /* Receive buffer size */
+#define OTX2_FLAG_RX_TSTAMP_ENABLED BIT_ULL(0)
+#define OTX2_FLAG_TX_TSTAMP_ENABLED BIT_ULL(1)
#define OTX2_FLAG_INTF_DOWN BIT_ULL(2)
#define OTX2_FLAG_RX_PAUSE_ENABLED BIT_ULL(9)
#define OTX2_FLAG_TX_PAUSE_ENABLED BIT_ULL(10)
@@ -251,6 +270,9 @@ struct otx2_nic {
/* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */
int nix_blkaddr;
+
+ struct otx2_ptp *ptp;
+ struct hwtstamp_config tstamp;
};
static inline bool is_otx2_lbkvf(struct pci_dev *pdev)
@@ -502,6 +524,7 @@ static struct _req_type __maybe_unused \
return NULL; \
req->hdr.sig = OTX2_MBOX_REQ_SIG; \
req->hdr.id = _id; \
+ trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req)); \
return req; \
}
@@ -561,6 +584,7 @@ void otx2_tx_timeout(struct net_device *netdev, unsigned int txq);
void otx2_get_mac_from_af(struct net_device *netdev);
void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx);
int otx2_config_pause_frm(struct otx2_nic *pfvf);
+void otx2_setup_segmentation(struct otx2_nic *pfvf);
/* RVU block related APIs */
int otx2_attach_npa_nix(struct otx2_nic *pfvf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index d59f5a9c7273..662fb80dbb9d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -13,8 +13,10 @@
#include <linux/stddef.h>
#include <linux/etherdevice.h>
#include <linux/log2.h>
+#include <linux/net_tstamp.h>
#include "otx2_common.h"
+#include "otx2_ptp.h"
#define DRV_NAME "octeontx2-nicpf"
#define DRV_VF_NAME "octeontx2-nicvf"
@@ -426,6 +428,8 @@ static int otx2_get_rss_hash_opts(struct otx2_nic *pfvf,
/* Mimimum is IPv4 and IPv6, SIP/DIP */
nfc->data = RXH_IP_SRC | RXH_IP_DST;
+ if (rss->flowkey_cfg & NIX_FLOW_KEY_TYPE_VLAN)
+ nfc->data |= RXH_VLAN;
switch (nfc->flow_type) {
case TCP_V4_FLOW:
@@ -475,6 +479,11 @@ static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf,
if (!(nfc->data & RXH_IP_SRC) || !(nfc->data & RXH_IP_DST))
return -EINVAL;
+ if (nfc->data & RXH_VLAN)
+ rss_cfg |= NIX_FLOW_KEY_TYPE_VLAN;
+ else
+ rss_cfg &= ~NIX_FLOW_KEY_TYPE_VLAN;
+
switch (nfc->flow_type) {
case TCP_V4_FLOW:
case TCP_V6_FLOW:
@@ -663,6 +672,31 @@ static u32 otx2_get_link(struct net_device *netdev)
return pfvf->linfo.link_up;
}
+static int otx2_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+
+ if (!pfvf->ptp)
+ return ethtool_op_get_ts_info(netdev, info);
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ info->phc_index = otx2_ptp_clock_index(pfvf);
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
static const struct ethtool_ops otx2_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES,
@@ -687,6 +721,7 @@ static const struct ethtool_ops otx2_ethtool_ops = {
.set_msglevel = otx2_set_msglevel,
.get_pauseparam = otx2_get_pauseparam,
.set_pauseparam = otx2_set_pauseparam,
+ .get_ts_info = otx2_get_ts_info,
};
void otx2_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 2fb45670aca4..66f1a212f1f4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -21,6 +21,8 @@
#include "otx2_common.h"
#include "otx2_txrx.h"
#include "otx2_struct.h"
+#include "otx2_ptp.h"
+#include <rvu_trace.h>
#define DRV_NAME "octeontx2-nicpf"
#define DRV_STRING "Marvell OcteonTX2 NIC Physical Function Driver"
@@ -41,6 +43,9 @@ enum {
TYPE_PFVF,
};
+static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable);
+static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable);
+
static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
{
bool if_up = netif_running(netdev);
@@ -554,6 +559,8 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF);
+ trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
+
return IRQ_HANDLED;
}
@@ -937,6 +944,9 @@ static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
mbox = &pf->mbox;
+
+ trace_otx2_msg_interrupt(mbox->mbox.pdev, "AF to PF", BIT_ULL(0));
+
otx2_queue_work(mbox, pf->mbox_wq, 0, 1, 1, TYPE_PFAF);
return IRQ_HANDLED;
@@ -1282,7 +1292,8 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
/* Get the size of receive buffers to allocate */
- pf->rbsize = RCV_FRAG_LEN(pf->netdev->mtu + OTX2_ETH_HLEN);
+ pf->rbsize = RCV_FRAG_LEN(OTX2_HW_TIMESTAMP_LEN + pf->netdev->mtu +
+ OTX2_ETH_HLEN);
mutex_lock(&mbox->lock);
/* NPA init */
@@ -1497,6 +1508,9 @@ int otx2_open(struct net_device *netdev)
if (err)
goto err_disable_napi;
+ /* Setup segmentation algorithms, if failed, clear offload capability */
+ otx2_setup_segmentation(pf);
+
/* Initialize RSS */
err = otx2_rss_init(pf);
if (err)
@@ -1548,6 +1562,16 @@ int otx2_open(struct net_device *netdev)
otx2_set_cints_affinity(pf);
+ /* When reinitializing enable time stamping if it is enabled before */
+ if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED) {
+ pf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
+ otx2_config_hw_tx_tstamp(pf, true);
+ }
+ if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED) {
+ pf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
+ otx2_config_hw_rx_tstamp(pf, true);
+ }
+
pf->flags &= ~OTX2_FLAG_INTF_DOWN;
/* 'intf_down' may be checked on any cpu */
smp_wmb();
@@ -1742,6 +1766,143 @@ static void otx2_reset_task(struct work_struct *work)
rtnl_unlock();
}
+static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable)
+{
+ struct msg_req *req;
+ int err;
+
+ if (pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED && enable)
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ if (enable)
+ req = otx2_mbox_alloc_msg_cgx_ptp_rx_enable(&pfvf->mbox);
+ else
+ req = otx2_mbox_alloc_msg_cgx_ptp_rx_disable(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+ }
+
+ mutex_unlock(&pfvf->mbox.lock);
+ if (enable)
+ pfvf->flags |= OTX2_FLAG_RX_TSTAMP_ENABLED;
+ else
+ pfvf->flags &= ~OTX2_FLAG_RX_TSTAMP_ENABLED;
+ return 0;
+}
+
+static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable)
+{
+ struct msg_req *req;
+ int err;
+
+ if (pfvf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED && enable)
+ return 0;
+
+ mutex_lock(&pfvf->mbox.lock);
+ if (enable)
+ req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_enable(&pfvf->mbox);
+ else
+ req = otx2_mbox_alloc_msg_nix_lf_ptp_tx_disable(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+ }
+
+ mutex_unlock(&pfvf->mbox.lock);
+ if (enable)
+ pfvf->flags |= OTX2_FLAG_TX_TSTAMP_ENABLED;
+ else
+ pfvf->flags &= ~OTX2_FLAG_TX_TSTAMP_ENABLED;
+ return 0;
+}
+
+static int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct hwtstamp_config config;
+
+ if (!pfvf->ptp)
+ return -ENODEV;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* reserved for future extensions */
+ if (config.flags)
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ otx2_config_hw_tx_tstamp(pfvf, false);
+ break;
+ case HWTSTAMP_TX_ON:
+ otx2_config_hw_tx_tstamp(pfvf, true);
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ otx2_config_hw_rx_tstamp(pfvf, false);
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ otx2_config_hw_rx_tstamp(pfvf, true);
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ memcpy(&pfvf->tstamp, &config, sizeof(config));
+
+ return copy_to_user(ifr->ifr_data, &config,
+ sizeof(config)) ? -EFAULT : 0;
+}
+
+static int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
+{
+ struct otx2_nic *pfvf = netdev_priv(netdev);
+ struct hwtstamp_config *cfg = &pfvf->tstamp;
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return otx2_config_hwtstamp(netdev, req);
+ case SIOCGHWTSTAMP:
+ return copy_to_user(req->ifr_data, cfg,
+ sizeof(*cfg)) ? -EFAULT : 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static const struct net_device_ops otx2_netdev_ops = {
.ndo_open = otx2_open,
.ndo_stop = otx2_stop,
@@ -1752,6 +1913,7 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_set_features = otx2_set_features,
.ndo_tx_timeout = otx2_tx_timeout,
.ndo_get_stats64 = otx2_get_stats64,
+ .ndo_do_ioctl = otx2_ioctl,
};
static int otx2_wq_init(struct otx2_nic *pf)
@@ -1924,6 +2086,9 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Assign default mac address */
otx2_get_mac_from_af(netdev);
+ /* Don't check for error. Proceed without ptp */
+ otx2_ptp_init(pf);
+
/* NPA's pool is a stack to which SW frees buffer pointers via Aura.
* HW allocates buffer pointer from stack and uses it for DMA'ing
* ingress packet. In some scenarios HW can free back allocated buffer
@@ -1939,7 +2104,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
- NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6);
+ NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4);
netdev->features |= netdev->hw_features;
netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL;
@@ -1956,7 +2122,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
- goto err_detach_rsrc;
+ goto err_ptp_destroy;
}
err = otx2_wq_init(pf);
@@ -1976,6 +2142,8 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_unreg_netdev:
unregister_netdev(netdev);
+err_ptp_destroy:
+ otx2_ptp_destroy(pf);
err_detach_rsrc:
otx2_detach_resources(&pf->mbox);
err_disable_mbox_intr:
@@ -2117,6 +2285,11 @@ static void otx2_remove(struct pci_dev *pdev)
pf = netdev_priv(netdev);
+ if (pf->flags & OTX2_FLAG_TX_TSTAMP_ENABLED)
+ otx2_config_hw_tx_tstamp(pf, false);
+ if (pf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED)
+ otx2_config_hw_rx_tstamp(pf, false);
+
cancel_work_sync(&pf->reset_task);
/* Disable link notifications */
otx2_cgx_config_linkevents(pf, false);
@@ -2126,6 +2299,7 @@ static void otx2_remove(struct pci_dev *pdev)
if (pf->otx2_wq)
destroy_workqueue(pf->otx2_wq);
+ otx2_ptp_destroy(pf);
otx2_detach_resources(&pf->mbox);
otx2_disable_mbox_intr(pf);
otx2_pfaf_mbox_destroy(pf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
new file mode 100644
index 000000000000..7bcf5246350f
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 PTP support for ethernet driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include "otx2_common.h"
+#include "otx2_ptp.h"
+
+static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
+{
+ struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
+ ptp_info);
+ struct ptp_req *req;
+ int err;
+
+ if (!ptp->nic)
+ return -ENODEV;
+
+ req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ req->op = PTP_OP_ADJFINE;
+ req->scaled_ppm = scaled_ppm;
+
+ err = otx2_sync_mbox_msg(&ptp->nic->mbox);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static u64 ptp_cc_read(const struct cyclecounter *cc)
+{
+ struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter);
+ struct ptp_req *req;
+ struct ptp_rsp *rsp;
+ int err;
+
+ if (!ptp->nic)
+ return 0;
+
+ req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
+ if (!req)
+ return 0;
+
+ req->op = PTP_OP_GET_CLOCK;
+
+ err = otx2_sync_mbox_msg(&ptp->nic->mbox);
+ if (err)
+ return 0;
+
+ rsp = (struct ptp_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0,
+ &req->hdr);
+ if (IS_ERR(rsp))
+ return 0;
+
+ return rsp->clk;
+}
+
+static int otx2_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
+{
+ struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
+ ptp_info);
+ struct otx2_nic *pfvf = ptp->nic;
+
+ mutex_lock(&pfvf->mbox.lock);
+ timecounter_adjtime(&ptp->time_counter, delta);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return 0;
+}
+
+static int otx2_ptp_gettime(struct ptp_clock_info *ptp_info,
+ struct timespec64 *ts)
+{
+ struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
+ ptp_info);
+ struct otx2_nic *pfvf = ptp->nic;
+ u64 nsec;
+
+ mutex_lock(&pfvf->mbox.lock);
+ nsec = timecounter_read(&ptp->time_counter);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ *ts = ns_to_timespec64(nsec);
+
+ return 0;
+}
+
+static int otx2_ptp_settime(struct ptp_clock_info *ptp_info,
+ const struct timespec64 *ts)
+{
+ struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
+ ptp_info);
+ struct otx2_nic *pfvf = ptp->nic;
+ u64 nsec;
+
+ nsec = timespec64_to_ns(ts);
+
+ mutex_lock(&pfvf->mbox.lock);
+ timecounter_init(&ptp->time_counter, &ptp->cycle_counter, nsec);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return 0;
+}
+
+static int otx2_ptp_enable(struct ptp_clock_info *ptp_info,
+ struct ptp_clock_request *rq, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+int otx2_ptp_init(struct otx2_nic *pfvf)
+{
+ struct otx2_ptp *ptp_ptr;
+ struct cyclecounter *cc;
+ struct ptp_req *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ /* check if PTP block is available */
+ req = otx2_mbox_alloc_msg_ptp_op(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->op = PTP_OP_GET_CLOCK;
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ if (err) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+ }
+ mutex_unlock(&pfvf->mbox.lock);
+
+ ptp_ptr = kzalloc(sizeof(*ptp_ptr), GFP_KERNEL);
+ if (!ptp_ptr) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ ptp_ptr->nic = pfvf;
+
+ cc = &ptp_ptr->cycle_counter;
+ cc->read = ptp_cc_read;
+ cc->mask = CYCLECOUNTER_MASK(64);
+ cc->mult = 1;
+ cc->shift = 0;
+
+ timecounter_init(&ptp_ptr->time_counter, &ptp_ptr->cycle_counter,
+ ktime_to_ns(ktime_get_real()));
+
+ ptp_ptr->ptp_info = (struct ptp_clock_info) {
+ .owner = THIS_MODULE,
+ .name = "OcteonTX2 PTP",
+ .max_adj = 1000000000ull,
+ .n_ext_ts = 0,
+ .n_pins = 0,
+ .pps = 0,
+ .adjfine = otx2_ptp_adjfine,
+ .adjtime = otx2_ptp_adjtime,
+ .gettime64 = otx2_ptp_gettime,
+ .settime64 = otx2_ptp_settime,
+ .enable = otx2_ptp_enable,
+ };
+
+ ptp_ptr->ptp_clock = ptp_clock_register(&ptp_ptr->ptp_info, pfvf->dev);
+ if (IS_ERR_OR_NULL(ptp_ptr->ptp_clock)) {
+ err = ptp_ptr->ptp_clock ?
+ PTR_ERR(ptp_ptr->ptp_clock) : -ENODEV;
+ kfree(ptp_ptr);
+ goto error;
+ }
+
+ pfvf->ptp = ptp_ptr;
+
+error:
+ return err;
+}
+
+void otx2_ptp_destroy(struct otx2_nic *pfvf)
+{
+ struct otx2_ptp *ptp = pfvf->ptp;
+
+ if (!ptp)
+ return;
+
+ ptp_clock_unregister(ptp->ptp_clock);
+ kfree(ptp);
+ pfvf->ptp = NULL;
+}
+
+int otx2_ptp_clock_index(struct otx2_nic *pfvf)
+{
+ if (!pfvf->ptp)
+ return -ENODEV;
+
+ return ptp_clock_index(pfvf->ptp->ptp_clock);
+}
+
+int otx2_ptp_tstamp2time(struct otx2_nic *pfvf, u64 tstamp, u64 *tsns)
+{
+ if (!pfvf->ptp)
+ return -ENODEV;
+
+ *tsns = timecounter_cyc2time(&pfvf->ptp->time_counter, tstamp);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h
new file mode 100644
index 000000000000..706d63a43ae1
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 PTP support for ethernet driver */
+
+#ifndef OTX2_PTP_H
+#define OTX2_PTP_H
+
+int otx2_ptp_init(struct otx2_nic *pfvf);
+void otx2_ptp_destroy(struct otx2_nic *pfvf);
+
+int otx2_ptp_clock_index(struct otx2_nic *pfvf);
+int otx2_ptp_tstamp2time(struct otx2_nic *pfvf, u64 tstamp, u64 *tsns);
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index e46834e043be..d5d7a2f37493 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -16,6 +16,7 @@
#include "otx2_common.h"
#include "otx2_struct.h"
#include "otx2_txrx.h"
+#include "otx2_ptp.h"
#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
@@ -81,8 +82,11 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
int budget, int *tx_pkts, int *tx_bytes)
{
struct nix_send_comp_s *snd_comp = &cqe->comp;
+ struct skb_shared_hwtstamps ts;
struct sk_buff *skb = NULL;
+ u64 timestamp, tsns;
struct sg_list *sg;
+ int err;
if (unlikely(snd_comp->status) && netif_msg_tx_err(pfvf))
net_err_ratelimited("%s: TX%d: Error in send CQ status:%x\n",
@@ -94,6 +98,18 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
if (unlikely(!skb))
return;
+ if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
+ timestamp = ((u64 *)sq->timestamps->base)[snd_comp->sqe_id];
+ if (timestamp != 1) {
+ err = otx2_ptp_tstamp2time(pfvf, timestamp, &tsns);
+ if (!err) {
+ memset(&ts, 0, sizeof(ts));
+ ts.hwtstamp = ns_to_ktime(tsns);
+ skb_tstamp_tx(skb, &ts);
+ }
+ }
+ }
+
*tx_bytes += skb->len;
(*tx_pkts)++;
otx2_dma_unmap_skb_frags(pfvf, sg);
@@ -101,16 +117,47 @@ static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
sg->skb = (u64)NULL;
}
+static void otx2_set_rxtstamp(struct otx2_nic *pfvf,
+ struct sk_buff *skb, void *data)
+{
+ u64 tsns;
+ int err;
+
+ if (!(pfvf->flags & OTX2_FLAG_RX_TSTAMP_ENABLED))
+ return;
+
+ /* The first 8 bytes is the timestamp */
+ err = otx2_ptp_tstamp2time(pfvf, be64_to_cpu(*(__be64 *)data), &tsns);
+ if (err)
+ return;
+
+ skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tsns);
+}
+
static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
- u64 iova, int len)
+ u64 iova, int len, struct nix_rx_parse_s *parse)
{
struct page *page;
+ int off = 0;
void *va;
va = phys_to_virt(otx2_iova_to_phys(pfvf->iommu_domain, iova));
+
+ if (likely(!skb_shinfo(skb)->nr_frags)) {
+ /* Check if data starts at some nonzero offset
+ * from the start of the buffer. For now the
+ * only possible offset is 8 bytes in the case
+ * where packet is prepended by a timestamp.
+ */
+ if (parse->laptr) {
+ otx2_set_rxtstamp(pfvf, skb, va);
+ off = OTX2_HW_TIMESTAMP_LEN;
+ }
+ }
+
page = virt_to_page(va);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- va - page_address(page), len, pfvf->rbsize);
+ va - page_address(page) + off, len - off, pfvf->rbsize);
otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM,
pfvf->rbsize, DMA_FROM_DEVICE);
@@ -239,7 +286,7 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
if (unlikely(!skb))
return;
- otx2_skb_add_frag(pfvf, skb, cqe->sg.seg_addr, cqe->sg.seg_size);
+ otx2_skb_add_frag(pfvf, skb, cqe->sg.seg_addr, cqe->sg.seg_size, parse);
cq->pool_ptrs++;
otx2_set_rxhash(pfvf, cqe, skb);
@@ -477,15 +524,55 @@ static void otx2_sqe_add_ext(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
*/
ip_hdr(skb)->tot_len =
htons(ext->lso_sb - skb_network_offset(skb));
- } else {
+ } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
ext->lso_format = pfvf->hw.lso_tsov6_idx;
+
ipv6_hdr(skb)->payload_len =
htons(ext->lso_sb - skb_network_offset(skb));
+ } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ __be16 l3_proto = vlan_get_protocol(skb);
+ struct udphdr *udph = udp_hdr(skb);
+ u16 iplen;
+
+ ext->lso_sb = skb_transport_offset(skb) +
+ sizeof(struct udphdr);
+
+ /* HW adds payload size to length fields in IP and
+ * UDP headers while segmentation, hence adjust the
+ * lengths to just header sizes.
+ */
+ iplen = htons(ext->lso_sb - skb_network_offset(skb));
+ if (l3_proto == htons(ETH_P_IP)) {
+ ip_hdr(skb)->tot_len = iplen;
+ ext->lso_format = pfvf->hw.lso_udpv4_idx;
+ } else {
+ ipv6_hdr(skb)->payload_len = iplen;
+ ext->lso_format = pfvf->hw.lso_udpv6_idx;
+ }
+
+ udph->len = htons(sizeof(struct udphdr));
}
+ } else if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ ext->tstmp = 1;
}
+
*offset += sizeof(*ext);
}
+static void otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset,
+ int alg, u64 iova)
+{
+ struct nix_sqe_mem_s *mem;
+
+ mem = (struct nix_sqe_mem_s *)(sq->sqe_base + *offset);
+ mem->subdc = NIX_SUBDC_MEM;
+ mem->alg = alg;
+ mem->wmem = 1; /* wait for the memory operation */
+ mem->addr = iova;
+
+ *offset += sizeof(*mem);
+}
+
/* Add SQE header subdescriptor structure */
static void otx2_sqe_add_hdr(struct otx2_nic *pfvf, struct otx2_snd_queue *sq,
struct nix_sqe_hdr_s *sqe_hdr,
@@ -737,6 +824,21 @@ static int otx2_get_sqe_count(struct otx2_nic *pfvf, struct sk_buff *skb)
return skb_shinfo(skb)->gso_segs;
}
+static void otx2_set_txtstamp(struct otx2_nic *pfvf, struct sk_buff *skb,
+ struct otx2_snd_queue *sq, int *offset)
+{
+ u64 iova;
+
+ if (!skb_shinfo(skb)->gso_size &&
+ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ iova = sq->timestamps->iova + (sq->head * sizeof(u64));
+ otx2_sqe_add_mem(sq, offset, NIX_SENDMEMALG_E_SETTSTMP, iova);
+ } else {
+ skb_tx_timestamp(skb);
+ }
+}
+
bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
struct sk_buff *skb, u16 qidx)
{
@@ -790,6 +892,8 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
return false;
}
+ otx2_set_txtstamp(pfvf, skb, sq, &offset);
+
sqe_hdr->sizem1 = (offset / 16) - 1;
netdev_tx_sent_queue(txq, skb->len);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index da97f2d4416f..73af15685657 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -91,6 +91,7 @@ struct otx2_snd_queue {
struct qmem *sqe;
struct qmem *tso_hdrs;
struct sg_list *sg;
+ struct qmem *timestamps;
struct queue_stats stats;
u16 sqb_count;
u64 *sqb_ptrs;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 2f90f1721441..67fabf265fe6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -187,6 +187,8 @@ static irqreturn_t otx2vf_vfaf_mbox_intr_handler(int irq, void *vf_irq)
mdev = &mbox->dev[0];
otx2_sync_mbox_bbuf(mbox, 0);
+ trace_otx2_msg_interrupt(mbox->pdev, "PF to VF", BIT_ULL(0));
+
hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
if (hdr->num_msgs) {
vf->mbox.num_msgs = hdr->num_msgs;
@@ -553,7 +555,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
- NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
+ NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_L4;
netdev->features = netdev->hw_features;
netdev->gso_max_segs = OTX2_MAX_GSO_SEGS;
diff --git a/drivers/net/ethernet/marvell/prestera/Kconfig b/drivers/net/ethernet/marvell/prestera/Kconfig
new file mode 100644
index 000000000000..b1fcc44f566a
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/Kconfig
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Marvell Prestera drivers configuration
+#
+
+config PRESTERA
+ tristate "Marvell Prestera Switch ASICs support"
+ depends on NET_SWITCHDEV && VLAN_8021Q
+ select NET_DEVLINK
+ help
+ This driver supports Marvell Prestera Switch ASICs family.
+
+ To compile this driver as a module, choose M here: the
+ module will be called prestera.
+
+config PRESTERA_PCI
+ tristate "PCI interface driver for Marvell Prestera Switch ASICs family"
+ depends on PCI && HAS_IOMEM && PRESTERA
+ default PRESTERA
+ help
+ This is implementation of PCI interface support for Marvell Prestera
+ Switch ASICs family.
+
+ To compile this driver as a module, choose M here: the
+ module will be called prestera_pci.
diff --git a/drivers/net/ethernet/marvell/prestera/Makefile b/drivers/net/ethernet/marvell/prestera/Makefile
new file mode 100644
index 000000000000..93129e32ebc5
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PRESTERA) += prestera.o
+prestera-objs := prestera_main.o prestera_hw.o prestera_dsa.o \
+ prestera_rxtx.o prestera_devlink.o prestera_ethtool.o \
+ prestera_switchdev.o
+
+obj-$(CONFIG_PRESTERA_PCI) += prestera_pci.o
diff --git a/drivers/net/ethernet/marvell/prestera/prestera.h b/drivers/net/ethernet/marvell/prestera/prestera.h
new file mode 100644
index 000000000000..55aa4bf8a27c
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera.h
@@ -0,0 +1,206 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_H_
+#define _PRESTERA_H_
+
+#include <linux/notifier.h>
+#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+#include <net/devlink.h>
+#include <uapi/linux/if_ether.h>
+
+#define PRESTERA_DRV_NAME "prestera"
+
+#define PRESTERA_DEFAULT_VID 1
+
+struct prestera_fw_rev {
+ u16 maj;
+ u16 min;
+ u16 sub;
+};
+
+struct prestera_port_stats {
+ u64 good_octets_received;
+ u64 bad_octets_received;
+ u64 mac_trans_error;
+ u64 broadcast_frames_received;
+ u64 multicast_frames_received;
+ u64 frames_64_octets;
+ u64 frames_65_to_127_octets;
+ u64 frames_128_to_255_octets;
+ u64 frames_256_to_511_octets;
+ u64 frames_512_to_1023_octets;
+ u64 frames_1024_to_max_octets;
+ u64 excessive_collision;
+ u64 multicast_frames_sent;
+ u64 broadcast_frames_sent;
+ u64 fc_sent;
+ u64 fc_received;
+ u64 buffer_overrun;
+ u64 undersize;
+ u64 fragments;
+ u64 oversize;
+ u64 jabber;
+ u64 rx_error_frame_received;
+ u64 bad_crc;
+ u64 collisions;
+ u64 late_collision;
+ u64 unicast_frames_received;
+ u64 unicast_frames_sent;
+ u64 sent_multiple;
+ u64 sent_deferred;
+ u64 good_octets_sent;
+};
+
+struct prestera_port_caps {
+ u64 supp_link_modes;
+ u8 supp_fec;
+ u8 type;
+ u8 transceiver;
+};
+
+struct prestera_port {
+ struct net_device *dev;
+ struct prestera_switch *sw;
+ struct devlink_port dl_port;
+ u32 id;
+ u32 hw_id;
+ u32 dev_id;
+ u16 fp_id;
+ u16 pvid;
+ bool autoneg;
+ u64 adver_link_modes;
+ u8 adver_fec;
+ struct prestera_port_caps caps;
+ struct list_head list;
+ struct list_head vlans_list;
+ struct {
+ struct prestera_port_stats stats;
+ struct delayed_work caching_dw;
+ } cached_hw_stats;
+};
+
+struct prestera_device {
+ struct device *dev;
+ u8 __iomem *ctl_regs;
+ u8 __iomem *pp_regs;
+ struct prestera_fw_rev fw_rev;
+ void *priv;
+
+ /* called by device driver to handle received packets */
+ void (*recv_pkt)(struct prestera_device *dev);
+
+ /* called by device driver to pass event up to the higher layer */
+ int (*recv_msg)(struct prestera_device *dev, void *msg, size_t size);
+
+ /* called by higher layer to send request to the firmware */
+ int (*send_req)(struct prestera_device *dev, void *in_msg,
+ size_t in_size, void *out_msg, size_t out_size,
+ unsigned int wait);
+};
+
+enum prestera_event_type {
+ PRESTERA_EVENT_TYPE_UNSPEC,
+
+ PRESTERA_EVENT_TYPE_PORT,
+ PRESTERA_EVENT_TYPE_FDB,
+ PRESTERA_EVENT_TYPE_RXTX,
+
+ PRESTERA_EVENT_TYPE_MAX
+};
+
+enum prestera_rxtx_event_id {
+ PRESTERA_RXTX_EVENT_UNSPEC,
+ PRESTERA_RXTX_EVENT_RCV_PKT,
+};
+
+enum prestera_port_event_id {
+ PRESTERA_PORT_EVENT_UNSPEC,
+ PRESTERA_PORT_EVENT_STATE_CHANGED,
+};
+
+struct prestera_port_event {
+ u32 port_id;
+ union {
+ u32 oper_state;
+ } data;
+};
+
+enum prestera_fdb_event_id {
+ PRESTERA_FDB_EVENT_UNSPEC,
+ PRESTERA_FDB_EVENT_LEARNED,
+ PRESTERA_FDB_EVENT_AGED,
+};
+
+struct prestera_fdb_event {
+ u32 port_id;
+ u32 vid;
+ union {
+ u8 mac[ETH_ALEN];
+ } data;
+};
+
+struct prestera_event {
+ u16 id;
+ union {
+ struct prestera_port_event port_evt;
+ struct prestera_fdb_event fdb_evt;
+ };
+};
+
+struct prestera_switchdev;
+struct prestera_rxtx;
+
+struct prestera_switch {
+ struct prestera_device *dev;
+ struct prestera_switchdev *swdev;
+ struct prestera_rxtx *rxtx;
+ struct list_head event_handlers;
+ struct notifier_block netdev_nb;
+ char base_mac[ETH_ALEN];
+ struct list_head port_list;
+ rwlock_t port_list_lock;
+ u32 port_count;
+ u32 mtu_min;
+ u32 mtu_max;
+ u8 id;
+};
+
+struct prestera_rxtx_params {
+ bool use_sdma;
+ u32 map_addr;
+};
+
+#define prestera_dev(sw) ((sw)->dev->dev)
+
+static inline void prestera_write(const struct prestera_switch *sw,
+ unsigned int reg, u32 val)
+{
+ writel(val, sw->dev->pp_regs + reg);
+}
+
+static inline u32 prestera_read(const struct prestera_switch *sw,
+ unsigned int reg)
+{
+ return readl(sw->dev->pp_regs + reg);
+}
+
+int prestera_device_register(struct prestera_device *dev);
+void prestera_device_unregister(struct prestera_device *dev);
+
+struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw,
+ u32 dev_id, u32 hw_id);
+
+int prestera_port_autoneg_set(struct prestera_port *port, bool enable,
+ u64 adver_link_modes, u8 adver_fec);
+
+struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id);
+
+struct prestera_port *prestera_port_dev_lower_find(struct net_device *dev);
+
+int prestera_port_pvid_set(struct prestera_port *port, u16 vid);
+
+bool prestera_netdev_check(const struct net_device *dev);
+
+#endif /* _PRESTERA_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
new file mode 100644
index 000000000000..94c185a0e2b8
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
+
+#include <net/devlink.h>
+
+#include "prestera_devlink.h"
+
+static int prestera_dl_info_get(struct devlink *dl,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct prestera_switch *sw = devlink_priv(dl);
+ char buf[16];
+ int err;
+
+ err = devlink_info_driver_name_put(req, PRESTERA_DRV_NAME);
+ if (err)
+ return err;
+
+ snprintf(buf, sizeof(buf), "%d.%d.%d",
+ sw->dev->fw_rev.maj,
+ sw->dev->fw_rev.min,
+ sw->dev->fw_rev.sub);
+
+ return devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ buf);
+}
+
+static const struct devlink_ops prestera_dl_ops = {
+ .info_get = prestera_dl_info_get,
+};
+
+struct prestera_switch *prestera_devlink_alloc(void)
+{
+ struct devlink *dl;
+
+ dl = devlink_alloc(&prestera_dl_ops, sizeof(struct prestera_switch));
+
+ return devlink_priv(dl);
+}
+
+void prestera_devlink_free(struct prestera_switch *sw)
+{
+ struct devlink *dl = priv_to_devlink(sw);
+
+ devlink_free(dl);
+}
+
+int prestera_devlink_register(struct prestera_switch *sw)
+{
+ struct devlink *dl = priv_to_devlink(sw);
+ int err;
+
+ err = devlink_register(dl, sw->dev->dev);
+ if (err)
+ dev_err(prestera_dev(sw), "devlink_register failed: %d\n", err);
+
+ return err;
+}
+
+void prestera_devlink_unregister(struct prestera_switch *sw)
+{
+ struct devlink *dl = priv_to_devlink(sw);
+
+ devlink_unregister(dl);
+}
+
+int prestera_devlink_port_register(struct prestera_port *port)
+{
+ struct prestera_switch *sw = port->sw;
+ struct devlink *dl = priv_to_devlink(sw);
+ struct devlink_port_attrs attrs = {};
+ int err;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ attrs.phys.port_number = port->fp_id;
+ attrs.switch_id.id_len = sizeof(sw->id);
+ memcpy(attrs.switch_id.id, &sw->id, attrs.switch_id.id_len);
+
+ devlink_port_attrs_set(&port->dl_port, &attrs);
+
+ err = devlink_port_register(dl, &port->dl_port, port->fp_id);
+ if (err) {
+ dev_err(prestera_dev(sw), "devlink_port_register failed: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+void prestera_devlink_port_unregister(struct prestera_port *port)
+{
+ devlink_port_unregister(&port->dl_port);
+}
+
+void prestera_devlink_port_set(struct prestera_port *port)
+{
+ devlink_port_type_eth_set(&port->dl_port, port->dev);
+}
+
+void prestera_devlink_port_clear(struct prestera_port *port)
+{
+ devlink_port_type_clear(&port->dl_port);
+}
+
+struct devlink_port *prestera_devlink_get_port(struct net_device *dev)
+{
+ struct prestera_port *port = netdev_priv(dev);
+
+ return &port->dl_port;
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.h b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h
new file mode 100644
index 000000000000..51bee9f75415
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_DEVLINK_H_
+#define _PRESTERA_DEVLINK_H_
+
+#include "prestera.h"
+
+struct prestera_switch *prestera_devlink_alloc(void);
+void prestera_devlink_free(struct prestera_switch *sw);
+
+int prestera_devlink_register(struct prestera_switch *sw);
+void prestera_devlink_unregister(struct prestera_switch *sw);
+
+int prestera_devlink_port_register(struct prestera_port *port);
+void prestera_devlink_port_unregister(struct prestera_port *port);
+
+void prestera_devlink_port_set(struct prestera_port *port);
+void prestera_devlink_port_clear(struct prestera_port *port);
+
+struct devlink_port *prestera_devlink_get_port(struct net_device *dev);
+
+#endif /* _PRESTERA_DEVLINK_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_dsa.c b/drivers/net/ethernet/marvell/prestera/prestera_dsa.c
new file mode 100644
index 000000000000..a5e01c7a307b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_dsa.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+
+#include "prestera_dsa.h"
+
+#define PRESTERA_DSA_W0_CMD GENMASK(31, 30)
+#define PRESTERA_DSA_W0_IS_TAGGED BIT(29)
+#define PRESTERA_DSA_W0_DEV_NUM GENMASK(28, 24)
+#define PRESTERA_DSA_W0_PORT_NUM GENMASK(23, 19)
+#define PRESTERA_DSA_W0_VPT GENMASK(15, 13)
+#define PRESTERA_DSA_W0_EXT_BIT BIT(12)
+#define PRESTERA_DSA_W0_VID GENMASK(11, 0)
+
+#define PRESTERA_DSA_W1_EXT_BIT BIT(31)
+#define PRESTERA_DSA_W1_CFI_BIT BIT(30)
+#define PRESTERA_DSA_W1_PORT_NUM GENMASK(11, 10)
+
+#define PRESTERA_DSA_W2_EXT_BIT BIT(31)
+#define PRESTERA_DSA_W2_PORT_NUM BIT(20)
+
+#define PRESTERA_DSA_W3_VID GENMASK(30, 27)
+#define PRESTERA_DSA_W3_DST_EPORT GENMASK(23, 7)
+#define PRESTERA_DSA_W3_DEV_NUM GENMASK(6, 0)
+
+#define PRESTERA_DSA_VID GENMASK(15, 12)
+#define PRESTERA_DSA_DEV_NUM GENMASK(11, 5)
+
+int prestera_dsa_parse(struct prestera_dsa *dsa, const u8 *dsa_buf)
+{
+ __be32 *dsa_words = (__be32 *)dsa_buf;
+ enum prestera_dsa_cmd cmd;
+ u32 words[4];
+ u32 field;
+
+ words[0] = ntohl(dsa_words[0]);
+ words[1] = ntohl(dsa_words[1]);
+ words[2] = ntohl(dsa_words[2]);
+ words[3] = ntohl(dsa_words[3]);
+
+ /* set the common parameters */
+ cmd = (enum prestera_dsa_cmd)FIELD_GET(PRESTERA_DSA_W0_CMD, words[0]);
+
+ /* only to CPU is supported */
+ if (unlikely(cmd != PRESTERA_DSA_CMD_TO_CPU))
+ return -EINVAL;
+
+ if (FIELD_GET(PRESTERA_DSA_W0_EXT_BIT, words[0]) == 0)
+ return -EINVAL;
+ if (FIELD_GET(PRESTERA_DSA_W1_EXT_BIT, words[1]) == 0)
+ return -EINVAL;
+ if (FIELD_GET(PRESTERA_DSA_W2_EXT_BIT, words[2]) == 0)
+ return -EINVAL;
+
+ field = FIELD_GET(PRESTERA_DSA_W3_VID, words[3]);
+
+ dsa->vlan.is_tagged = FIELD_GET(PRESTERA_DSA_W0_IS_TAGGED, words[0]);
+ dsa->vlan.cfi_bit = FIELD_GET(PRESTERA_DSA_W1_CFI_BIT, words[1]);
+ dsa->vlan.vpt = FIELD_GET(PRESTERA_DSA_W0_VPT, words[0]);
+ dsa->vlan.vid = FIELD_GET(PRESTERA_DSA_W0_VID, words[0]);
+ dsa->vlan.vid &= ~PRESTERA_DSA_VID;
+ dsa->vlan.vid |= FIELD_PREP(PRESTERA_DSA_VID, field);
+
+ field = FIELD_GET(PRESTERA_DSA_W3_DEV_NUM, words[3]);
+
+ dsa->hw_dev_num = FIELD_GET(PRESTERA_DSA_W0_DEV_NUM, words[0]);
+ dsa->hw_dev_num |= FIELD_PREP(PRESTERA_DSA_DEV_NUM, field);
+
+ dsa->port_num = (FIELD_GET(PRESTERA_DSA_W0_PORT_NUM, words[0]) << 0) |
+ (FIELD_GET(PRESTERA_DSA_W1_PORT_NUM, words[1]) << 5) |
+ (FIELD_GET(PRESTERA_DSA_W2_PORT_NUM, words[2]) << 7);
+
+ return 0;
+}
+
+int prestera_dsa_build(const struct prestera_dsa *dsa, u8 *dsa_buf)
+{
+ __be32 *dsa_words = (__be32 *)dsa_buf;
+ u32 dev_num = dsa->hw_dev_num;
+ u32 words[4] = { 0 };
+
+ words[0] |= FIELD_PREP(PRESTERA_DSA_W0_CMD, PRESTERA_DSA_CMD_FROM_CPU);
+
+ words[0] |= FIELD_PREP(PRESTERA_DSA_W0_DEV_NUM, dev_num);
+ dev_num = FIELD_GET(PRESTERA_DSA_DEV_NUM, dev_num);
+ words[3] |= FIELD_PREP(PRESTERA_DSA_W3_DEV_NUM, dev_num);
+
+ words[3] |= FIELD_PREP(PRESTERA_DSA_W3_DST_EPORT, dsa->port_num);
+
+ words[0] |= FIELD_PREP(PRESTERA_DSA_W0_EXT_BIT, 1);
+ words[1] |= FIELD_PREP(PRESTERA_DSA_W1_EXT_BIT, 1);
+ words[2] |= FIELD_PREP(PRESTERA_DSA_W2_EXT_BIT, 1);
+
+ dsa_words[0] = htonl(words[0]);
+ dsa_words[1] = htonl(words[1]);
+ dsa_words[2] = htonl(words[2]);
+ dsa_words[3] = htonl(words[3]);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_dsa.h b/drivers/net/ethernet/marvell/prestera/prestera_dsa.h
new file mode 100644
index 000000000000..67018629bdd2
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_dsa.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef __PRESTERA_DSA_H_
+#define __PRESTERA_DSA_H_
+
+#include <linux/types.h>
+
+#define PRESTERA_DSA_HLEN 16
+
+enum prestera_dsa_cmd {
+ /* DSA command is "To CPU" */
+ PRESTERA_DSA_CMD_TO_CPU = 0,
+
+ /* DSA command is "From CPU" */
+ PRESTERA_DSA_CMD_FROM_CPU,
+};
+
+struct prestera_dsa_vlan {
+ u16 vid;
+ u8 vpt;
+ u8 cfi_bit;
+ bool is_tagged;
+};
+
+struct prestera_dsa {
+ struct prestera_dsa_vlan vlan;
+ u32 hw_dev_num;
+ u32 port_num;
+};
+
+int prestera_dsa_parse(struct prestera_dsa *dsa, const u8 *dsa_buf);
+int prestera_dsa_build(const struct prestera_dsa *dsa, u8 *dsa_buf);
+
+#endif /* _PRESTERA_DSA_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
new file mode 100644
index 000000000000..93a5e2baf808
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
@@ -0,0 +1,780 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/ethtool.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+
+#include "prestera_ethtool.h"
+#include "prestera.h"
+#include "prestera_hw.h"
+
+#define PRESTERA_STATS_CNT \
+ (sizeof(struct prestera_port_stats) / sizeof(u64))
+#define PRESTERA_STATS_IDX(name) \
+ (offsetof(struct prestera_port_stats, name) / sizeof(u64))
+#define PRESTERA_STATS_FIELD(name) \
+ [PRESTERA_STATS_IDX(name)] = __stringify(name)
+
+static const char driver_kind[] = "prestera";
+
+static const struct prestera_link_mode {
+ enum ethtool_link_mode_bit_indices eth_mode;
+ u32 speed;
+ u64 pr_mask;
+ u8 duplex;
+ u8 port_type;
+} port_link_modes[PRESTERA_LINK_MODE_MAX] = {
+ [PRESTERA_LINK_MODE_10baseT_Half] = {
+ .eth_mode = ETHTOOL_LINK_MODE_10baseT_Half_BIT,
+ .speed = 10,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_10baseT_Half,
+ .duplex = PRESTERA_PORT_DUPLEX_HALF,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_10baseT_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ .speed = 10,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_10baseT_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_100baseT_Half] = {
+ .eth_mode = ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ .speed = 100,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_100baseT_Half,
+ .duplex = PRESTERA_PORT_DUPLEX_HALF,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_100baseT_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ .speed = 100,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_100baseT_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_1000baseT_Half] = {
+ .eth_mode = ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ .speed = 1000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_1000baseT_Half,
+ .duplex = PRESTERA_PORT_DUPLEX_HALF,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_1000baseT_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ .speed = 1000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_1000baseT_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_1000baseX_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ .speed = 1000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_1000baseX_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_FIBRE,
+ },
+ [PRESTERA_LINK_MODE_1000baseKX_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ .speed = 1000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_1000baseKX_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_2500baseX_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
+ .speed = 2500,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_2500baseX_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ },
+ [PRESTERA_LINK_MODE_10GbaseKR_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ .speed = 10000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_10GbaseKR_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_10GbaseSR_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ .speed = 10000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_10GbaseSR_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_FIBRE,
+ },
+ [PRESTERA_LINK_MODE_10GbaseLR_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+ .speed = 10000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_10GbaseLR_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_FIBRE,
+ },
+ [PRESTERA_LINK_MODE_20GbaseKR2_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
+ .speed = 20000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_20GbaseKR2_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_25GbaseCR_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ .speed = 25000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_25GbaseCR_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_DA,
+ },
+ [PRESTERA_LINK_MODE_25GbaseKR_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ .speed = 25000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_25GbaseKR_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_25GbaseSR_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ .speed = 25000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_25GbaseSR_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_FIBRE,
+ },
+ [PRESTERA_LINK_MODE_40GbaseKR4_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ .speed = 40000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_40GbaseKR4_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_40GbaseCR4_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ .speed = 40000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_40GbaseCR4_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_DA,
+ },
+ [PRESTERA_LINK_MODE_40GbaseSR4_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ .speed = 40000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_40GbaseSR4_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_FIBRE,
+ },
+ [PRESTERA_LINK_MODE_50GbaseCR2_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+ .speed = 50000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_50GbaseCR2_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_DA,
+ },
+ [PRESTERA_LINK_MODE_50GbaseKR2_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+ .speed = 50000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_50GbaseKR2_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_50GbaseSR2_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+ .speed = 50000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_50GbaseSR2_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_FIBRE,
+ },
+ [PRESTERA_LINK_MODE_100GbaseKR4_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ .speed = 100000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_100GbaseKR4_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_TP,
+ },
+ [PRESTERA_LINK_MODE_100GbaseSR4_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ .speed = 100000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_100GbaseSR4_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_FIBRE,
+ },
+ [PRESTERA_LINK_MODE_100GbaseCR4_Full] = {
+ .eth_mode = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ .speed = 100000,
+ .pr_mask = 1 << PRESTERA_LINK_MODE_100GbaseCR4_Full,
+ .duplex = PRESTERA_PORT_DUPLEX_FULL,
+ .port_type = PRESTERA_PORT_TYPE_DA,
+ }
+};
+
+static const struct prestera_fec {
+ u32 eth_fec;
+ enum ethtool_link_mode_bit_indices eth_mode;
+ u8 pr_fec;
+} port_fec_caps[PRESTERA_PORT_FEC_MAX] = {
+ [PRESTERA_PORT_FEC_OFF] = {
+ .eth_fec = ETHTOOL_FEC_OFF,
+ .eth_mode = ETHTOOL_LINK_MODE_FEC_NONE_BIT,
+ .pr_fec = 1 << PRESTERA_PORT_FEC_OFF,
+ },
+ [PRESTERA_PORT_FEC_BASER] = {
+ .eth_fec = ETHTOOL_FEC_BASER,
+ .eth_mode = ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ .pr_fec = 1 << PRESTERA_PORT_FEC_BASER,
+ },
+ [PRESTERA_PORT_FEC_RS] = {
+ .eth_fec = ETHTOOL_FEC_RS,
+ .eth_mode = ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ .pr_fec = 1 << PRESTERA_PORT_FEC_RS,
+ }
+};
+
+static const struct prestera_port_type {
+ enum ethtool_link_mode_bit_indices eth_mode;
+ u8 eth_type;
+} port_types[PRESTERA_PORT_TYPE_MAX] = {
+ [PRESTERA_PORT_TYPE_NONE] = {
+ .eth_mode = __ETHTOOL_LINK_MODE_MASK_NBITS,
+ .eth_type = PORT_NONE,
+ },
+ [PRESTERA_PORT_TYPE_TP] = {
+ .eth_mode = ETHTOOL_LINK_MODE_TP_BIT,
+ .eth_type = PORT_TP,
+ },
+ [PRESTERA_PORT_TYPE_AUI] = {
+ .eth_mode = ETHTOOL_LINK_MODE_AUI_BIT,
+ .eth_type = PORT_AUI,
+ },
+ [PRESTERA_PORT_TYPE_MII] = {
+ .eth_mode = ETHTOOL_LINK_MODE_MII_BIT,
+ .eth_type = PORT_MII,
+ },
+ [PRESTERA_PORT_TYPE_FIBRE] = {
+ .eth_mode = ETHTOOL_LINK_MODE_FIBRE_BIT,
+ .eth_type = PORT_FIBRE,
+ },
+ [PRESTERA_PORT_TYPE_BNC] = {
+ .eth_mode = ETHTOOL_LINK_MODE_BNC_BIT,
+ .eth_type = PORT_BNC,
+ },
+ [PRESTERA_PORT_TYPE_DA] = {
+ .eth_mode = ETHTOOL_LINK_MODE_TP_BIT,
+ .eth_type = PORT_TP,
+ },
+ [PRESTERA_PORT_TYPE_OTHER] = {
+ .eth_mode = __ETHTOOL_LINK_MODE_MASK_NBITS,
+ .eth_type = PORT_OTHER,
+ }
+};
+
+static const char prestera_cnt_name[PRESTERA_STATS_CNT][ETH_GSTRING_LEN] = {
+ PRESTERA_STATS_FIELD(good_octets_received),
+ PRESTERA_STATS_FIELD(bad_octets_received),
+ PRESTERA_STATS_FIELD(mac_trans_error),
+ PRESTERA_STATS_FIELD(broadcast_frames_received),
+ PRESTERA_STATS_FIELD(multicast_frames_received),
+ PRESTERA_STATS_FIELD(frames_64_octets),
+ PRESTERA_STATS_FIELD(frames_65_to_127_octets),
+ PRESTERA_STATS_FIELD(frames_128_to_255_octets),
+ PRESTERA_STATS_FIELD(frames_256_to_511_octets),
+ PRESTERA_STATS_FIELD(frames_512_to_1023_octets),
+ PRESTERA_STATS_FIELD(frames_1024_to_max_octets),
+ PRESTERA_STATS_FIELD(excessive_collision),
+ PRESTERA_STATS_FIELD(multicast_frames_sent),
+ PRESTERA_STATS_FIELD(broadcast_frames_sent),
+ PRESTERA_STATS_FIELD(fc_sent),
+ PRESTERA_STATS_FIELD(fc_received),
+ PRESTERA_STATS_FIELD(buffer_overrun),
+ PRESTERA_STATS_FIELD(undersize),
+ PRESTERA_STATS_FIELD(fragments),
+ PRESTERA_STATS_FIELD(oversize),
+ PRESTERA_STATS_FIELD(jabber),
+ PRESTERA_STATS_FIELD(rx_error_frame_received),
+ PRESTERA_STATS_FIELD(bad_crc),
+ PRESTERA_STATS_FIELD(collisions),
+ PRESTERA_STATS_FIELD(late_collision),
+ PRESTERA_STATS_FIELD(unicast_frames_received),
+ PRESTERA_STATS_FIELD(unicast_frames_sent),
+ PRESTERA_STATS_FIELD(sent_multiple),
+ PRESTERA_STATS_FIELD(sent_deferred),
+ PRESTERA_STATS_FIELD(good_octets_sent),
+};
+
+static void prestera_ethtool_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ struct prestera_switch *sw = port->sw;
+
+ strlcpy(drvinfo->driver, driver_kind, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->bus_info, dev_name(prestera_dev(sw)),
+ sizeof(drvinfo->bus_info));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d",
+ sw->dev->fw_rev.maj,
+ sw->dev->fw_rev.min,
+ sw->dev->fw_rev.sub);
+}
+
+static u8 prestera_port_type_get(struct prestera_port *port)
+{
+ if (port->caps.type < PRESTERA_PORT_TYPE_MAX)
+ return port_types[port->caps.type].eth_type;
+
+ return PORT_OTHER;
+}
+
+static int prestera_port_type_set(const struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
+{
+ u32 new_mode = PRESTERA_LINK_MODE_MAX;
+ u32 type, mode;
+ int err;
+
+ for (type = 0; type < PRESTERA_PORT_TYPE_MAX; type++) {
+ if (port_types[type].eth_type == ecmd->base.port &&
+ test_bit(port_types[type].eth_mode,
+ ecmd->link_modes.supported)) {
+ break;
+ }
+ }
+
+ if (type == port->caps.type)
+ return 0;
+ if (type != port->caps.type && ecmd->base.autoneg == AUTONEG_ENABLE)
+ return -EINVAL;
+ if (type == PRESTERA_PORT_TYPE_MAX)
+ return -EOPNOTSUPP;
+
+ for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) {
+ if ((port_link_modes[mode].pr_mask &
+ port->caps.supp_link_modes) &&
+ type == port_link_modes[mode].port_type) {
+ new_mode = mode;
+ }
+ }
+
+ if (new_mode < PRESTERA_LINK_MODE_MAX)
+ err = prestera_hw_port_link_mode_set(port, new_mode);
+ else
+ err = -EINVAL;
+
+ if (err)
+ return err;
+
+ port->caps.type = type;
+ port->autoneg = false;
+
+ return 0;
+}
+
+static void prestera_modes_to_eth(unsigned long *eth_modes, u64 link_modes,
+ u8 fec, u8 type)
+{
+ u32 mode;
+
+ for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) {
+ if ((port_link_modes[mode].pr_mask & link_modes) == 0)
+ continue;
+
+ if (type != PRESTERA_PORT_TYPE_NONE &&
+ port_link_modes[mode].port_type != type)
+ continue;
+
+ __set_bit(port_link_modes[mode].eth_mode, eth_modes);
+ }
+
+ for (mode = 0; mode < PRESTERA_PORT_FEC_MAX; mode++) {
+ if ((port_fec_caps[mode].pr_fec & fec) == 0)
+ continue;
+
+ __set_bit(port_fec_caps[mode].eth_mode, eth_modes);
+ }
+}
+
+static void prestera_modes_from_eth(const unsigned long *eth_modes,
+ u64 *link_modes, u8 *fec, u8 type)
+{
+ u64 adver_modes = 0;
+ u32 fec_modes = 0;
+ u32 mode;
+
+ for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) {
+ if (!test_bit(port_link_modes[mode].eth_mode, eth_modes))
+ continue;
+
+ if (port_link_modes[mode].port_type != type)
+ continue;
+
+ adver_modes |= port_link_modes[mode].pr_mask;
+ }
+
+ for (mode = 0; mode < PRESTERA_PORT_FEC_MAX; mode++) {
+ if (!test_bit(port_fec_caps[mode].eth_mode, eth_modes))
+ continue;
+
+ fec_modes |= port_fec_caps[mode].pr_fec;
+ }
+
+ *link_modes = adver_modes;
+ *fec = fec_modes;
+}
+
+static void prestera_port_supp_types_get(struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
+{
+ u32 mode;
+ u8 ptype;
+
+ for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) {
+ if ((port_link_modes[mode].pr_mask &
+ port->caps.supp_link_modes) == 0)
+ continue;
+
+ ptype = port_link_modes[mode].port_type;
+ __set_bit(port_types[ptype].eth_mode,
+ ecmd->link_modes.supported);
+ }
+}
+
+static void prestera_port_remote_cap_get(struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
+{
+ bool asym_pause;
+ bool pause;
+ u64 bitmap;
+ int err;
+
+ err = prestera_hw_port_remote_cap_get(port, &bitmap);
+ if (!err) {
+ prestera_modes_to_eth(ecmd->link_modes.lp_advertising,
+ bitmap, 0, PRESTERA_PORT_TYPE_NONE);
+
+ if (!bitmap_empty(ecmd->link_modes.lp_advertising,
+ __ETHTOOL_LINK_MODE_MASK_NBITS)) {
+ ethtool_link_ksettings_add_link_mode(ecmd,
+ lp_advertising,
+ Autoneg);
+ }
+ }
+
+ err = prestera_hw_port_remote_fc_get(port, &pause, &asym_pause);
+ if (err)
+ return;
+
+ if (pause)
+ ethtool_link_ksettings_add_link_mode(ecmd,
+ lp_advertising,
+ Pause);
+ if (asym_pause)
+ ethtool_link_ksettings_add_link_mode(ecmd,
+ lp_advertising,
+ Asym_Pause);
+}
+
+static void prestera_port_speed_get(struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
+{
+ u32 speed;
+ int err;
+
+ err = prestera_hw_port_speed_get(port, &speed);
+ ecmd->base.speed = err ? SPEED_UNKNOWN : speed;
+}
+
+static void prestera_port_duplex_get(struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
+{
+ u8 duplex;
+ int err;
+
+ err = prestera_hw_port_duplex_get(port, &duplex);
+ if (err) {
+ ecmd->base.duplex = DUPLEX_UNKNOWN;
+ return;
+ }
+
+ ecmd->base.duplex = duplex == PRESTERA_PORT_DUPLEX_FULL ?
+ DUPLEX_FULL : DUPLEX_HALF;
+}
+
+static int
+prestera_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *ecmd)
+{
+ struct prestera_port *port = netdev_priv(dev);
+
+ ethtool_link_ksettings_zero_link_mode(ecmd, supported);
+ ethtool_link_ksettings_zero_link_mode(ecmd, advertising);
+ ethtool_link_ksettings_zero_link_mode(ecmd, lp_advertising);
+
+ ecmd->base.autoneg = port->autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+
+ if (port->caps.type == PRESTERA_PORT_TYPE_TP) {
+ ethtool_link_ksettings_add_link_mode(ecmd, supported, Autoneg);
+
+ if (netif_running(dev) &&
+ (port->autoneg ||
+ port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER))
+ ethtool_link_ksettings_add_link_mode(ecmd, advertising,
+ Autoneg);
+ }
+
+ prestera_modes_to_eth(ecmd->link_modes.supported,
+ port->caps.supp_link_modes,
+ port->caps.supp_fec,
+ port->caps.type);
+
+ prestera_port_supp_types_get(ecmd, port);
+
+ if (netif_carrier_ok(dev)) {
+ prestera_port_speed_get(ecmd, port);
+ prestera_port_duplex_get(ecmd, port);
+ } else {
+ ecmd->base.speed = SPEED_UNKNOWN;
+ ecmd->base.duplex = DUPLEX_UNKNOWN;
+ }
+
+ ecmd->base.port = prestera_port_type_get(port);
+
+ if (port->autoneg) {
+ if (netif_running(dev))
+ prestera_modes_to_eth(ecmd->link_modes.advertising,
+ port->adver_link_modes,
+ port->adver_fec,
+ port->caps.type);
+
+ if (netif_carrier_ok(dev) &&
+ port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER)
+ prestera_port_remote_cap_get(ecmd, port);
+ }
+
+ if (port->caps.type == PRESTERA_PORT_TYPE_TP &&
+ port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER)
+ prestera_hw_port_mdix_get(port, &ecmd->base.eth_tp_mdix,
+ &ecmd->base.eth_tp_mdix_ctrl);
+
+ return 0;
+}
+
+static int prestera_port_mdix_set(const struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
+{
+ if (ecmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_INVALID &&
+ port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER &&
+ port->caps.type == PRESTERA_PORT_TYPE_TP)
+ return prestera_hw_port_mdix_set(port,
+ ecmd->base.eth_tp_mdix_ctrl);
+
+ return 0;
+}
+
+static int prestera_port_link_mode_set(struct prestera_port *port,
+ u32 speed, u8 duplex, u8 type)
+{
+ u32 new_mode = PRESTERA_LINK_MODE_MAX;
+ u32 mode;
+
+ for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) {
+ if (speed != port_link_modes[mode].speed)
+ continue;
+
+ if (duplex != port_link_modes[mode].duplex)
+ continue;
+
+ if (!(port_link_modes[mode].pr_mask &
+ port->caps.supp_link_modes))
+ continue;
+
+ if (type != port_link_modes[mode].port_type)
+ continue;
+
+ new_mode = mode;
+ break;
+ }
+
+ if (new_mode == PRESTERA_LINK_MODE_MAX)
+ return -EOPNOTSUPP;
+
+ return prestera_hw_port_link_mode_set(port, new_mode);
+}
+
+static int
+prestera_port_speed_duplex_set(const struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
+{
+ u32 curr_mode;
+ u8 duplex;
+ u32 speed;
+ int err;
+
+ err = prestera_hw_port_link_mode_get(port, &curr_mode);
+ if (err)
+ return err;
+ if (curr_mode >= PRESTERA_LINK_MODE_MAX)
+ return -EINVAL;
+
+ if (ecmd->base.duplex != DUPLEX_UNKNOWN)
+ duplex = ecmd->base.duplex == DUPLEX_FULL ?
+ PRESTERA_PORT_DUPLEX_FULL : PRESTERA_PORT_DUPLEX_HALF;
+ else
+ duplex = port_link_modes[curr_mode].duplex;
+
+ if (ecmd->base.speed != SPEED_UNKNOWN)
+ speed = ecmd->base.speed;
+ else
+ speed = port_link_modes[curr_mode].speed;
+
+ return prestera_port_link_mode_set(port, speed, duplex,
+ port->caps.type);
+}
+
+static int
+prestera_ethtool_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *ecmd)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ u64 adver_modes;
+ u8 adver_fec;
+ int err;
+
+ err = prestera_port_type_set(ecmd, port);
+ if (err)
+ return err;
+
+ if (port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER) {
+ err = prestera_port_mdix_set(ecmd, port);
+ if (err)
+ return err;
+ }
+
+ prestera_modes_from_eth(ecmd->link_modes.advertising, &adver_modes,
+ &adver_fec, port->caps.type);
+
+ err = prestera_port_autoneg_set(port,
+ ecmd->base.autoneg == AUTONEG_ENABLE,
+ adver_modes, adver_fec);
+ if (err)
+ return err;
+
+ if (ecmd->base.autoneg == AUTONEG_DISABLE) {
+ err = prestera_port_speed_duplex_set(ecmd, port);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int prestera_ethtool_get_fecparam(struct net_device *dev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ u8 active;
+ u32 mode;
+ int err;
+
+ err = prestera_hw_port_fec_get(port, &active);
+ if (err)
+ return err;
+
+ fecparam->fec = 0;
+
+ for (mode = 0; mode < PRESTERA_PORT_FEC_MAX; mode++) {
+ if ((port_fec_caps[mode].pr_fec & port->caps.supp_fec) == 0)
+ continue;
+
+ fecparam->fec |= port_fec_caps[mode].eth_fec;
+ }
+
+ if (active < PRESTERA_PORT_FEC_MAX)
+ fecparam->active_fec = port_fec_caps[active].eth_fec;
+ else
+ fecparam->active_fec = ETHTOOL_FEC_AUTO;
+
+ return 0;
+}
+
+static int prestera_ethtool_set_fecparam(struct net_device *dev,
+ struct ethtool_fecparam *fecparam)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ u8 fec, active;
+ u32 mode;
+ int err;
+
+ if (port->autoneg) {
+ netdev_err(dev, "FEC set is not allowed while autoneg is on\n");
+ return -EINVAL;
+ }
+
+ err = prestera_hw_port_fec_get(port, &active);
+ if (err)
+ return err;
+
+ fec = PRESTERA_PORT_FEC_MAX;
+ for (mode = 0; mode < PRESTERA_PORT_FEC_MAX; mode++) {
+ if ((port_fec_caps[mode].eth_fec & fecparam->fec) &&
+ (port_fec_caps[mode].pr_fec & port->caps.supp_fec)) {
+ fec = mode;
+ break;
+ }
+ }
+
+ if (fec == active)
+ return 0;
+
+ if (fec == PRESTERA_PORT_FEC_MAX)
+ return -EOPNOTSUPP;
+
+ return prestera_hw_port_fec_set(port, fec);
+}
+
+static int prestera_ethtool_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return PRESTERA_STATS_CNT;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void prestera_ethtool_get_strings(struct net_device *dev,
+ u32 stringset, u8 *data)
+{
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ memcpy(data, prestera_cnt_name, sizeof(prestera_cnt_name));
+}
+
+static void prestera_ethtool_get_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ struct prestera_port_stats *port_stats;
+
+ port_stats = &port->cached_hw_stats.stats;
+
+ memcpy(data, port_stats, sizeof(*port_stats));
+}
+
+static int prestera_ethtool_nway_reset(struct net_device *dev)
+{
+ struct prestera_port *port = netdev_priv(dev);
+
+ if (netif_running(dev) &&
+ port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER &&
+ port->caps.type == PRESTERA_PORT_TYPE_TP)
+ return prestera_hw_port_autoneg_restart(port);
+
+ return -EINVAL;
+}
+
+const struct ethtool_ops prestera_ethtool_ops = {
+ .get_drvinfo = prestera_ethtool_get_drvinfo,
+ .get_link_ksettings = prestera_ethtool_get_link_ksettings,
+ .set_link_ksettings = prestera_ethtool_set_link_ksettings,
+ .get_fecparam = prestera_ethtool_get_fecparam,
+ .set_fecparam = prestera_ethtool_set_fecparam,
+ .get_sset_count = prestera_ethtool_get_sset_count,
+ .get_strings = prestera_ethtool_get_strings,
+ .get_ethtool_stats = prestera_ethtool_get_stats,
+ .get_link = ethtool_op_get_link,
+ .nway_reset = prestera_ethtool_nway_reset
+};
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h
new file mode 100644
index 000000000000..523ef1f592ce
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef __PRESTERA_ETHTOOL_H_
+#define __PRESTERA_ETHTOOL_H_
+
+#include <linux/ethtool.h>
+
+extern const struct ethtool_ops prestera_ethtool_ops;
+
+#endif /* _PRESTERA_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
new file mode 100644
index 000000000000..0424718d5998
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
@@ -0,0 +1,1253 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/list.h>
+
+#include "prestera.h"
+#include "prestera_hw.h"
+
+#define PRESTERA_SWITCH_INIT_TIMEOUT_MS (30 * 1000)
+
+#define PRESTERA_MIN_MTU 64
+
+enum prestera_cmd_type_t {
+ PRESTERA_CMD_TYPE_SWITCH_INIT = 0x1,
+ PRESTERA_CMD_TYPE_SWITCH_ATTR_SET = 0x2,
+
+ PRESTERA_CMD_TYPE_PORT_ATTR_SET = 0x100,
+ PRESTERA_CMD_TYPE_PORT_ATTR_GET = 0x101,
+ PRESTERA_CMD_TYPE_PORT_INFO_GET = 0x110,
+
+ PRESTERA_CMD_TYPE_VLAN_CREATE = 0x200,
+ PRESTERA_CMD_TYPE_VLAN_DELETE = 0x201,
+ PRESTERA_CMD_TYPE_VLAN_PORT_SET = 0x202,
+ PRESTERA_CMD_TYPE_VLAN_PVID_SET = 0x203,
+
+ PRESTERA_CMD_TYPE_FDB_ADD = 0x300,
+ PRESTERA_CMD_TYPE_FDB_DELETE = 0x301,
+ PRESTERA_CMD_TYPE_FDB_FLUSH_PORT = 0x310,
+ PRESTERA_CMD_TYPE_FDB_FLUSH_VLAN = 0x311,
+ PRESTERA_CMD_TYPE_FDB_FLUSH_PORT_VLAN = 0x312,
+
+ PRESTERA_CMD_TYPE_BRIDGE_CREATE = 0x400,
+ PRESTERA_CMD_TYPE_BRIDGE_DELETE = 0x401,
+ PRESTERA_CMD_TYPE_BRIDGE_PORT_ADD = 0x402,
+ PRESTERA_CMD_TYPE_BRIDGE_PORT_DELETE = 0x403,
+
+ PRESTERA_CMD_TYPE_RXTX_INIT = 0x800,
+ PRESTERA_CMD_TYPE_RXTX_PORT_INIT = 0x801,
+
+ PRESTERA_CMD_TYPE_STP_PORT_SET = 0x1000,
+
+ PRESTERA_CMD_TYPE_ACK = 0x10000,
+ PRESTERA_CMD_TYPE_MAX
+};
+
+enum {
+ PRESTERA_CMD_PORT_ATTR_ADMIN_STATE = 1,
+ PRESTERA_CMD_PORT_ATTR_MTU = 3,
+ PRESTERA_CMD_PORT_ATTR_MAC = 4,
+ PRESTERA_CMD_PORT_ATTR_SPEED = 5,
+ PRESTERA_CMD_PORT_ATTR_ACCEPT_FRAME_TYPE = 6,
+ PRESTERA_CMD_PORT_ATTR_LEARNING = 7,
+ PRESTERA_CMD_PORT_ATTR_FLOOD = 8,
+ PRESTERA_CMD_PORT_ATTR_CAPABILITY = 9,
+ PRESTERA_CMD_PORT_ATTR_REMOTE_CAPABILITY = 10,
+ PRESTERA_CMD_PORT_ATTR_REMOTE_FC = 11,
+ PRESTERA_CMD_PORT_ATTR_LINK_MODE = 12,
+ PRESTERA_CMD_PORT_ATTR_TYPE = 13,
+ PRESTERA_CMD_PORT_ATTR_FEC = 14,
+ PRESTERA_CMD_PORT_ATTR_AUTONEG = 15,
+ PRESTERA_CMD_PORT_ATTR_DUPLEX = 16,
+ PRESTERA_CMD_PORT_ATTR_STATS = 17,
+ PRESTERA_CMD_PORT_ATTR_MDIX = 18,
+ PRESTERA_CMD_PORT_ATTR_AUTONEG_RESTART = 19,
+};
+
+enum {
+ PRESTERA_CMD_SWITCH_ATTR_MAC = 1,
+ PRESTERA_CMD_SWITCH_ATTR_AGEING = 2,
+};
+
+enum {
+ PRESTERA_CMD_ACK_OK,
+ PRESTERA_CMD_ACK_FAILED,
+
+ PRESTERA_CMD_ACK_MAX
+};
+
+enum {
+ PRESTERA_PORT_TP_NA,
+ PRESTERA_PORT_TP_MDI,
+ PRESTERA_PORT_TP_MDIX,
+ PRESTERA_PORT_TP_AUTO,
+};
+
+enum {
+ PRESTERA_PORT_GOOD_OCTETS_RCV_CNT,
+ PRESTERA_PORT_BAD_OCTETS_RCV_CNT,
+ PRESTERA_PORT_MAC_TRANSMIT_ERR_CNT,
+ PRESTERA_PORT_BRDC_PKTS_RCV_CNT,
+ PRESTERA_PORT_MC_PKTS_RCV_CNT,
+ PRESTERA_PORT_PKTS_64L_CNT,
+ PRESTERA_PORT_PKTS_65TO127L_CNT,
+ PRESTERA_PORT_PKTS_128TO255L_CNT,
+ PRESTERA_PORT_PKTS_256TO511L_CNT,
+ PRESTERA_PORT_PKTS_512TO1023L_CNT,
+ PRESTERA_PORT_PKTS_1024TOMAXL_CNT,
+ PRESTERA_PORT_EXCESSIVE_COLLISIONS_CNT,
+ PRESTERA_PORT_MC_PKTS_SENT_CNT,
+ PRESTERA_PORT_BRDC_PKTS_SENT_CNT,
+ PRESTERA_PORT_FC_SENT_CNT,
+ PRESTERA_PORT_GOOD_FC_RCV_CNT,
+ PRESTERA_PORT_DROP_EVENTS_CNT,
+ PRESTERA_PORT_UNDERSIZE_PKTS_CNT,
+ PRESTERA_PORT_FRAGMENTS_PKTS_CNT,
+ PRESTERA_PORT_OVERSIZE_PKTS_CNT,
+ PRESTERA_PORT_JABBER_PKTS_CNT,
+ PRESTERA_PORT_MAC_RCV_ERROR_CNT,
+ PRESTERA_PORT_BAD_CRC_CNT,
+ PRESTERA_PORT_COLLISIONS_CNT,
+ PRESTERA_PORT_LATE_COLLISIONS_CNT,
+ PRESTERA_PORT_GOOD_UC_PKTS_RCV_CNT,
+ PRESTERA_PORT_GOOD_UC_PKTS_SENT_CNT,
+ PRESTERA_PORT_MULTIPLE_PKTS_SENT_CNT,
+ PRESTERA_PORT_DEFERRED_PKTS_SENT_CNT,
+ PRESTERA_PORT_GOOD_OCTETS_SENT_CNT,
+
+ PRESTERA_PORT_CNT_MAX
+};
+
+enum {
+ PRESTERA_FC_NONE,
+ PRESTERA_FC_SYMMETRIC,
+ PRESTERA_FC_ASYMMETRIC,
+ PRESTERA_FC_SYMM_ASYMM,
+};
+
+struct prestera_fw_event_handler {
+ struct list_head list;
+ struct rcu_head rcu;
+ enum prestera_event_type type;
+ prestera_event_cb_t func;
+ void *arg;
+};
+
+struct prestera_msg_cmd {
+ u32 type;
+};
+
+struct prestera_msg_ret {
+ struct prestera_msg_cmd cmd;
+ u32 status;
+};
+
+struct prestera_msg_common_req {
+ struct prestera_msg_cmd cmd;
+};
+
+struct prestera_msg_common_resp {
+ struct prestera_msg_ret ret;
+};
+
+union prestera_msg_switch_param {
+ u8 mac[ETH_ALEN];
+ u32 ageing_timeout_ms;
+};
+
+struct prestera_msg_switch_attr_req {
+ struct prestera_msg_cmd cmd;
+ u32 attr;
+ union prestera_msg_switch_param param;
+};
+
+struct prestera_msg_switch_init_resp {
+ struct prestera_msg_ret ret;
+ u32 port_count;
+ u32 mtu_max;
+ u8 switch_id;
+};
+
+struct prestera_msg_port_autoneg_param {
+ u64 link_mode;
+ u8 enable;
+ u8 fec;
+};
+
+struct prestera_msg_port_cap_param {
+ u64 link_mode;
+ u8 type;
+ u8 fec;
+ u8 transceiver;
+};
+
+struct prestera_msg_port_mdix_param {
+ u8 status;
+ u8 admin_mode;
+};
+
+union prestera_msg_port_param {
+ u8 admin_state;
+ u8 oper_state;
+ u32 mtu;
+ u8 mac[ETH_ALEN];
+ u8 accept_frm_type;
+ u32 speed;
+ u8 learning;
+ u8 flood;
+ u32 link_mode;
+ u8 type;
+ u8 duplex;
+ u8 fec;
+ u8 fc;
+ struct prestera_msg_port_mdix_param mdix;
+ struct prestera_msg_port_autoneg_param autoneg;
+ struct prestera_msg_port_cap_param cap;
+};
+
+struct prestera_msg_port_attr_req {
+ struct prestera_msg_cmd cmd;
+ u32 attr;
+ u32 port;
+ u32 dev;
+ union prestera_msg_port_param param;
+};
+
+struct prestera_msg_port_attr_resp {
+ struct prestera_msg_ret ret;
+ union prestera_msg_port_param param;
+};
+
+struct prestera_msg_port_stats_resp {
+ struct prestera_msg_ret ret;
+ u64 stats[PRESTERA_PORT_CNT_MAX];
+};
+
+struct prestera_msg_port_info_req {
+ struct prestera_msg_cmd cmd;
+ u32 port;
+};
+
+struct prestera_msg_port_info_resp {
+ struct prestera_msg_ret ret;
+ u32 hw_id;
+ u32 dev_id;
+ u16 fp_id;
+};
+
+struct prestera_msg_vlan_req {
+ struct prestera_msg_cmd cmd;
+ u32 port;
+ u32 dev;
+ u16 vid;
+ u8 is_member;
+ u8 is_tagged;
+};
+
+struct prestera_msg_fdb_req {
+ struct prestera_msg_cmd cmd;
+ u8 dest_type;
+ u32 port;
+ u32 dev;
+ u8 mac[ETH_ALEN];
+ u16 vid;
+ u8 dynamic;
+ u32 flush_mode;
+};
+
+struct prestera_msg_bridge_req {
+ struct prestera_msg_cmd cmd;
+ u32 port;
+ u32 dev;
+ u16 bridge;
+};
+
+struct prestera_msg_bridge_resp {
+ struct prestera_msg_ret ret;
+ u16 bridge;
+};
+
+struct prestera_msg_stp_req {
+ struct prestera_msg_cmd cmd;
+ u32 port;
+ u32 dev;
+ u16 vid;
+ u8 state;
+};
+
+struct prestera_msg_rxtx_req {
+ struct prestera_msg_cmd cmd;
+ u8 use_sdma;
+};
+
+struct prestera_msg_rxtx_resp {
+ struct prestera_msg_ret ret;
+ u32 map_addr;
+};
+
+struct prestera_msg_rxtx_port_req {
+ struct prestera_msg_cmd cmd;
+ u32 port;
+ u32 dev;
+};
+
+struct prestera_msg_event {
+ u16 type;
+ u16 id;
+};
+
+union prestera_msg_event_port_param {
+ u32 oper_state;
+};
+
+struct prestera_msg_event_port {
+ struct prestera_msg_event id;
+ u32 port_id;
+ union prestera_msg_event_port_param param;
+};
+
+union prestera_msg_event_fdb_param {
+ u8 mac[ETH_ALEN];
+};
+
+struct prestera_msg_event_fdb {
+ struct prestera_msg_event id;
+ u8 dest_type;
+ u32 port_id;
+ u32 vid;
+ union prestera_msg_event_fdb_param param;
+};
+
+static int __prestera_cmd_ret(struct prestera_switch *sw,
+ enum prestera_cmd_type_t type,
+ struct prestera_msg_cmd *cmd, size_t clen,
+ struct prestera_msg_ret *ret, size_t rlen,
+ int waitms)
+{
+ struct prestera_device *dev = sw->dev;
+ int err;
+
+ cmd->type = type;
+
+ err = dev->send_req(dev, cmd, clen, ret, rlen, waitms);
+ if (err)
+ return err;
+
+ if (ret->cmd.type != PRESTERA_CMD_TYPE_ACK)
+ return -EBADE;
+ if (ret->status != PRESTERA_CMD_ACK_OK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int prestera_cmd_ret(struct prestera_switch *sw,
+ enum prestera_cmd_type_t type,
+ struct prestera_msg_cmd *cmd, size_t clen,
+ struct prestera_msg_ret *ret, size_t rlen)
+{
+ return __prestera_cmd_ret(sw, type, cmd, clen, ret, rlen, 0);
+}
+
+static int prestera_cmd_ret_wait(struct prestera_switch *sw,
+ enum prestera_cmd_type_t type,
+ struct prestera_msg_cmd *cmd, size_t clen,
+ struct prestera_msg_ret *ret, size_t rlen,
+ int waitms)
+{
+ return __prestera_cmd_ret(sw, type, cmd, clen, ret, rlen, waitms);
+}
+
+static int prestera_cmd(struct prestera_switch *sw,
+ enum prestera_cmd_type_t type,
+ struct prestera_msg_cmd *cmd, size_t clen)
+{
+ struct prestera_msg_common_resp resp;
+
+ return prestera_cmd_ret(sw, type, cmd, clen, &resp.ret, sizeof(resp));
+}
+
+static int prestera_fw_parse_port_evt(void *msg, struct prestera_event *evt)
+{
+ struct prestera_msg_event_port *hw_evt = msg;
+
+ if (evt->id != PRESTERA_PORT_EVENT_STATE_CHANGED)
+ return -EINVAL;
+
+ evt->port_evt.data.oper_state = hw_evt->param.oper_state;
+ evt->port_evt.port_id = hw_evt->port_id;
+
+ return 0;
+}
+
+static int prestera_fw_parse_fdb_evt(void *msg, struct prestera_event *evt)
+{
+ struct prestera_msg_event_fdb *hw_evt = msg;
+
+ evt->fdb_evt.port_id = hw_evt->port_id;
+ evt->fdb_evt.vid = hw_evt->vid;
+
+ ether_addr_copy(evt->fdb_evt.data.mac, hw_evt->param.mac);
+
+ return 0;
+}
+
+static struct prestera_fw_evt_parser {
+ int (*func)(void *msg, struct prestera_event *evt);
+} fw_event_parsers[PRESTERA_EVENT_TYPE_MAX] = {
+ [PRESTERA_EVENT_TYPE_PORT] = { .func = prestera_fw_parse_port_evt },
+ [PRESTERA_EVENT_TYPE_FDB] = { .func = prestera_fw_parse_fdb_evt },
+};
+
+static struct prestera_fw_event_handler *
+__find_event_handler(const struct prestera_switch *sw,
+ enum prestera_event_type type)
+{
+ struct prestera_fw_event_handler *eh;
+
+ list_for_each_entry_rcu(eh, &sw->event_handlers, list) {
+ if (eh->type == type)
+ return eh;
+ }
+
+ return NULL;
+}
+
+static int prestera_find_event_handler(const struct prestera_switch *sw,
+ enum prestera_event_type type,
+ struct prestera_fw_event_handler *eh)
+{
+ struct prestera_fw_event_handler *tmp;
+ int err = 0;
+
+ rcu_read_lock();
+ tmp = __find_event_handler(sw, type);
+ if (tmp)
+ *eh = *tmp;
+ else
+ err = -ENOENT;
+ rcu_read_unlock();
+
+ return err;
+}
+
+static int prestera_evt_recv(struct prestera_device *dev, void *buf, size_t size)
+{
+ struct prestera_switch *sw = dev->priv;
+ struct prestera_msg_event *msg = buf;
+ struct prestera_fw_event_handler eh;
+ struct prestera_event evt;
+ int err;
+
+ if (msg->type >= PRESTERA_EVENT_TYPE_MAX)
+ return -EINVAL;
+ if (!fw_event_parsers[msg->type].func)
+ return -ENOENT;
+
+ err = prestera_find_event_handler(sw, msg->type, &eh);
+ if (err)
+ return err;
+
+ evt.id = msg->id;
+
+ err = fw_event_parsers[msg->type].func(buf, &evt);
+ if (err)
+ return err;
+
+ eh.func(sw, &evt, eh.arg);
+
+ return 0;
+}
+
+static void prestera_pkt_recv(struct prestera_device *dev)
+{
+ struct prestera_switch *sw = dev->priv;
+ struct prestera_fw_event_handler eh;
+ struct prestera_event ev;
+ int err;
+
+ ev.id = PRESTERA_RXTX_EVENT_RCV_PKT;
+
+ err = prestera_find_event_handler(sw, PRESTERA_EVENT_TYPE_RXTX, &eh);
+ if (err)
+ return;
+
+ eh.func(sw, &ev, eh.arg);
+}
+
+int prestera_hw_port_info_get(const struct prestera_port *port,
+ u32 *dev_id, u32 *hw_id, u16 *fp_id)
+{
+ struct prestera_msg_port_info_req req = {
+ .port = port->id,
+ };
+ struct prestera_msg_port_info_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_INFO_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *dev_id = resp.dev_id;
+ *hw_id = resp.hw_id;
+ *fp_id = resp.fp_id;
+
+ return 0;
+}
+
+int prestera_hw_switch_mac_set(struct prestera_switch *sw, const char *mac)
+{
+ struct prestera_msg_switch_attr_req req = {
+ .attr = PRESTERA_CMD_SWITCH_ATTR_MAC,
+ };
+
+ ether_addr_copy(req.param.mac, mac);
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_SWITCH_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_switch_init(struct prestera_switch *sw)
+{
+ struct prestera_msg_switch_init_resp resp;
+ struct prestera_msg_common_req req;
+ int err;
+
+ INIT_LIST_HEAD(&sw->event_handlers);
+
+ err = prestera_cmd_ret_wait(sw, PRESTERA_CMD_TYPE_SWITCH_INIT,
+ &req.cmd, sizeof(req),
+ &resp.ret, sizeof(resp),
+ PRESTERA_SWITCH_INIT_TIMEOUT_MS);
+ if (err)
+ return err;
+
+ sw->dev->recv_msg = prestera_evt_recv;
+ sw->dev->recv_pkt = prestera_pkt_recv;
+ sw->port_count = resp.port_count;
+ sw->mtu_min = PRESTERA_MIN_MTU;
+ sw->mtu_max = resp.mtu_max;
+ sw->id = resp.switch_id;
+
+ return 0;
+}
+
+void prestera_hw_switch_fini(struct prestera_switch *sw)
+{
+ WARN_ON(!list_empty(&sw->event_handlers));
+}
+
+int prestera_hw_switch_ageing_set(struct prestera_switch *sw, u32 ageing_ms)
+{
+ struct prestera_msg_switch_attr_req req = {
+ .attr = PRESTERA_CMD_SWITCH_ATTR_AGEING,
+ .param = {
+ .ageing_timeout_ms = ageing_ms,
+ },
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_SWITCH_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_state_set(const struct prestera_port *port,
+ bool admin_state)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_ADMIN_STATE,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .param = {
+ .admin_state = admin_state,
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_mtu_set(const struct prestera_port *port, u32 mtu)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_MTU,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .param = {
+ .mtu = mtu,
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_mac_set(const struct prestera_port *port, const char *mac)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_MAC,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+
+ ether_addr_copy(req.param.mac, mac);
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_accept_frm_type(struct prestera_port *port,
+ enum prestera_accept_frm_type type)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_ACCEPT_FRAME_TYPE,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .param = {
+ .accept_frm_type = type,
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_cap_get(const struct prestera_port *port,
+ struct prestera_port_caps *caps)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_CAPABILITY,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ caps->supp_link_modes = resp.param.cap.link_mode;
+ caps->transceiver = resp.param.cap.transceiver;
+ caps->supp_fec = resp.param.cap.fec;
+ caps->type = resp.param.cap.type;
+
+ return err;
+}
+
+int prestera_hw_port_remote_cap_get(const struct prestera_port *port,
+ u64 *link_mode_bitmap)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_REMOTE_CAPABILITY,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *link_mode_bitmap = resp.param.cap.link_mode;
+
+ return 0;
+}
+
+int prestera_hw_port_remote_fc_get(const struct prestera_port *port,
+ bool *pause, bool *asym_pause)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_REMOTE_FC,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ switch (resp.param.fc) {
+ case PRESTERA_FC_SYMMETRIC:
+ *pause = true;
+ *asym_pause = false;
+ break;
+ case PRESTERA_FC_ASYMMETRIC:
+ *pause = false;
+ *asym_pause = true;
+ break;
+ case PRESTERA_FC_SYMM_ASYMM:
+ *pause = true;
+ *asym_pause = true;
+ break;
+ default:
+ *pause = false;
+ *asym_pause = false;
+ }
+
+ return 0;
+}
+
+int prestera_hw_port_type_get(const struct prestera_port *port, u8 *type)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_TYPE,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *type = resp.param.type;
+
+ return 0;
+}
+
+int prestera_hw_port_fec_get(const struct prestera_port *port, u8 *fec)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_FEC,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *fec = resp.param.fec;
+
+ return 0;
+}
+
+int prestera_hw_port_fec_set(const struct prestera_port *port, u8 fec)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_FEC,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .param = {
+ .fec = fec,
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+static u8 prestera_hw_mdix_to_eth(u8 mode)
+{
+ switch (mode) {
+ case PRESTERA_PORT_TP_MDI:
+ return ETH_TP_MDI;
+ case PRESTERA_PORT_TP_MDIX:
+ return ETH_TP_MDI_X;
+ case PRESTERA_PORT_TP_AUTO:
+ return ETH_TP_MDI_AUTO;
+ default:
+ return ETH_TP_MDI_INVALID;
+ }
+}
+
+static u8 prestera_hw_mdix_from_eth(u8 mode)
+{
+ switch (mode) {
+ case ETH_TP_MDI:
+ return PRESTERA_PORT_TP_MDI;
+ case ETH_TP_MDI_X:
+ return PRESTERA_PORT_TP_MDIX;
+ case ETH_TP_MDI_AUTO:
+ return PRESTERA_PORT_TP_AUTO;
+ default:
+ return PRESTERA_PORT_TP_NA;
+ }
+}
+
+int prestera_hw_port_mdix_get(const struct prestera_port *port, u8 *status,
+ u8 *admin_mode)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_MDIX,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *status = prestera_hw_mdix_to_eth(resp.param.mdix.status);
+ *admin_mode = prestera_hw_mdix_to_eth(resp.param.mdix.admin_mode);
+
+ return 0;
+}
+
+int prestera_hw_port_mdix_set(const struct prestera_port *port, u8 mode)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_MDIX,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+
+ req.param.mdix.admin_mode = prestera_hw_mdix_from_eth(mode);
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_link_mode_set(const struct prestera_port *port, u32 mode)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_LINK_MODE,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .param = {
+ .link_mode = mode,
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_link_mode_get(const struct prestera_port *port, u32 *mode)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_LINK_MODE,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *mode = resp.param.link_mode;
+
+ return 0;
+}
+
+int prestera_hw_port_speed_get(const struct prestera_port *port, u32 *speed)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_SPEED,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *speed = resp.param.speed;
+
+ return 0;
+}
+
+int prestera_hw_port_autoneg_set(const struct prestera_port *port,
+ bool autoneg, u64 link_modes, u8 fec)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_AUTONEG,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .param = {
+ .autoneg = {
+ .link_mode = link_modes,
+ .enable = autoneg,
+ .fec = fec,
+ }
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_autoneg_restart(struct prestera_port *port)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_AUTONEG_RESTART,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_duplex_get(const struct prestera_port *port, u8 *duplex)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_DUPLEX,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_attr_resp resp;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *duplex = resp.param.duplex;
+
+ return 0;
+}
+
+int prestera_hw_port_stats_get(const struct prestera_port *port,
+ struct prestera_port_stats *st)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_STATS,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+ struct prestera_msg_port_stats_resp resp;
+ u64 *hw = resp.stats;
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ st->good_octets_received = hw[PRESTERA_PORT_GOOD_OCTETS_RCV_CNT];
+ st->bad_octets_received = hw[PRESTERA_PORT_BAD_OCTETS_RCV_CNT];
+ st->mac_trans_error = hw[PRESTERA_PORT_MAC_TRANSMIT_ERR_CNT];
+ st->broadcast_frames_received = hw[PRESTERA_PORT_BRDC_PKTS_RCV_CNT];
+ st->multicast_frames_received = hw[PRESTERA_PORT_MC_PKTS_RCV_CNT];
+ st->frames_64_octets = hw[PRESTERA_PORT_PKTS_64L_CNT];
+ st->frames_65_to_127_octets = hw[PRESTERA_PORT_PKTS_65TO127L_CNT];
+ st->frames_128_to_255_octets = hw[PRESTERA_PORT_PKTS_128TO255L_CNT];
+ st->frames_256_to_511_octets = hw[PRESTERA_PORT_PKTS_256TO511L_CNT];
+ st->frames_512_to_1023_octets = hw[PRESTERA_PORT_PKTS_512TO1023L_CNT];
+ st->frames_1024_to_max_octets = hw[PRESTERA_PORT_PKTS_1024TOMAXL_CNT];
+ st->excessive_collision = hw[PRESTERA_PORT_EXCESSIVE_COLLISIONS_CNT];
+ st->multicast_frames_sent = hw[PRESTERA_PORT_MC_PKTS_SENT_CNT];
+ st->broadcast_frames_sent = hw[PRESTERA_PORT_BRDC_PKTS_SENT_CNT];
+ st->fc_sent = hw[PRESTERA_PORT_FC_SENT_CNT];
+ st->fc_received = hw[PRESTERA_PORT_GOOD_FC_RCV_CNT];
+ st->buffer_overrun = hw[PRESTERA_PORT_DROP_EVENTS_CNT];
+ st->undersize = hw[PRESTERA_PORT_UNDERSIZE_PKTS_CNT];
+ st->fragments = hw[PRESTERA_PORT_FRAGMENTS_PKTS_CNT];
+ st->oversize = hw[PRESTERA_PORT_OVERSIZE_PKTS_CNT];
+ st->jabber = hw[PRESTERA_PORT_JABBER_PKTS_CNT];
+ st->rx_error_frame_received = hw[PRESTERA_PORT_MAC_RCV_ERROR_CNT];
+ st->bad_crc = hw[PRESTERA_PORT_BAD_CRC_CNT];
+ st->collisions = hw[PRESTERA_PORT_COLLISIONS_CNT];
+ st->late_collision = hw[PRESTERA_PORT_LATE_COLLISIONS_CNT];
+ st->unicast_frames_received = hw[PRESTERA_PORT_GOOD_UC_PKTS_RCV_CNT];
+ st->unicast_frames_sent = hw[PRESTERA_PORT_GOOD_UC_PKTS_SENT_CNT];
+ st->sent_multiple = hw[PRESTERA_PORT_MULTIPLE_PKTS_SENT_CNT];
+ st->sent_deferred = hw[PRESTERA_PORT_DEFERRED_PKTS_SENT_CNT];
+ st->good_octets_sent = hw[PRESTERA_PORT_GOOD_OCTETS_SENT_CNT];
+
+ return 0;
+}
+
+int prestera_hw_port_learning_set(struct prestera_port *port, bool enable)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_LEARNING,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .param = {
+ .learning = enable,
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_port_flood_set(struct prestera_port *port, bool flood)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = PRESTERA_CMD_PORT_ATTR_FLOOD,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .param = {
+ .flood = flood,
+ }
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_vlan_create(struct prestera_switch *sw, u16 vid)
+{
+ struct prestera_msg_vlan_req req = {
+ .vid = vid,
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_VLAN_CREATE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_vlan_delete(struct prestera_switch *sw, u16 vid)
+{
+ struct prestera_msg_vlan_req req = {
+ .vid = vid,
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_VLAN_DELETE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_vlan_port_set(struct prestera_port *port, u16 vid,
+ bool is_member, bool untagged)
+{
+ struct prestera_msg_vlan_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .vid = vid,
+ .is_member = is_member,
+ .is_tagged = !untagged,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_VLAN_PORT_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_vlan_port_vid_set(struct prestera_port *port, u16 vid)
+{
+ struct prestera_msg_vlan_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .vid = vid,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_VLAN_PVID_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_vlan_port_stp_set(struct prestera_port *port, u16 vid, u8 state)
+{
+ struct prestera_msg_stp_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .vid = vid,
+ .state = state,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_STP_PORT_SET,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_fdb_add(struct prestera_port *port, const unsigned char *mac,
+ u16 vid, bool dynamic)
+{
+ struct prestera_msg_fdb_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .vid = vid,
+ .dynamic = dynamic,
+ };
+
+ ether_addr_copy(req.mac, mac);
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_ADD,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_fdb_del(struct prestera_port *port, const unsigned char *mac,
+ u16 vid)
+{
+ struct prestera_msg_fdb_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .vid = vid,
+ };
+
+ ether_addr_copy(req.mac, mac);
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_DELETE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_fdb_flush_port(struct prestera_port *port, u32 mode)
+{
+ struct prestera_msg_fdb_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .flush_mode = mode,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_fdb_flush_vlan(struct prestera_switch *sw, u16 vid, u32 mode)
+{
+ struct prestera_msg_fdb_req req = {
+ .vid = vid,
+ .flush_mode = mode,
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_FLUSH_VLAN,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_fdb_flush_port_vlan(struct prestera_port *port, u16 vid,
+ u32 mode)
+{
+ struct prestera_msg_fdb_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ .vid = vid,
+ .flush_mode = mode,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT_VLAN,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_bridge_create(struct prestera_switch *sw, u16 *bridge_id)
+{
+ struct prestera_msg_bridge_resp resp;
+ struct prestera_msg_bridge_req req;
+ int err;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_BRIDGE_CREATE,
+ &req.cmd, sizeof(req),
+ &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ *bridge_id = resp.bridge;
+
+ return 0;
+}
+
+int prestera_hw_bridge_delete(struct prestera_switch *sw, u16 bridge_id)
+{
+ struct prestera_msg_bridge_req req = {
+ .bridge = bridge_id,
+ };
+
+ return prestera_cmd(sw, PRESTERA_CMD_TYPE_BRIDGE_DELETE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_bridge_port_add(struct prestera_port *port, u16 bridge_id)
+{
+ struct prestera_msg_bridge_req req = {
+ .bridge = bridge_id,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_BRIDGE_PORT_ADD,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_bridge_port_delete(struct prestera_port *port, u16 bridge_id)
+{
+ struct prestera_msg_bridge_req req = {
+ .bridge = bridge_id,
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_BRIDGE_PORT_DELETE,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_rxtx_init(struct prestera_switch *sw,
+ struct prestera_rxtx_params *params)
+{
+ struct prestera_msg_rxtx_resp resp;
+ struct prestera_msg_rxtx_req req;
+ int err;
+
+ req.use_sdma = params->use_sdma;
+
+ err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_RXTX_INIT,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ params->map_addr = resp.map_addr;
+
+ return 0;
+}
+
+int prestera_hw_rxtx_port_init(struct prestera_port *port)
+{
+ struct prestera_msg_rxtx_port_req req = {
+ .port = port->hw_id,
+ .dev = port->dev_id,
+ };
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_RXTX_PORT_INIT,
+ &req.cmd, sizeof(req));
+}
+
+int prestera_hw_event_handler_register(struct prestera_switch *sw,
+ enum prestera_event_type type,
+ prestera_event_cb_t fn,
+ void *arg)
+{
+ struct prestera_fw_event_handler *eh;
+
+ eh = __find_event_handler(sw, type);
+ if (eh)
+ return -EEXIST;
+
+ eh = kmalloc(sizeof(*eh), GFP_KERNEL);
+ if (!eh)
+ return -ENOMEM;
+
+ eh->type = type;
+ eh->func = fn;
+ eh->arg = arg;
+
+ INIT_LIST_HEAD(&eh->list);
+
+ list_add_rcu(&eh->list, &sw->event_handlers);
+
+ return 0;
+}
+
+void prestera_hw_event_handler_unregister(struct prestera_switch *sw,
+ enum prestera_event_type type,
+ prestera_event_cb_t fn)
+{
+ struct prestera_fw_event_handler *eh;
+
+ eh = __find_event_handler(sw, type);
+ if (!eh)
+ return;
+
+ list_del_rcu(&eh->list);
+ kfree_rcu(eh, rcu);
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
new file mode 100644
index 000000000000..b2b5ac95b4e3
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_HW_H_
+#define _PRESTERA_HW_H_
+
+#include <linux/types.h>
+
+enum prestera_accept_frm_type {
+ PRESTERA_ACCEPT_FRAME_TYPE_TAGGED,
+ PRESTERA_ACCEPT_FRAME_TYPE_UNTAGGED,
+ PRESTERA_ACCEPT_FRAME_TYPE_ALL,
+};
+
+enum prestera_fdb_flush_mode {
+ PRESTERA_FDB_FLUSH_MODE_DYNAMIC = BIT(0),
+ PRESTERA_FDB_FLUSH_MODE_STATIC = BIT(1),
+ PRESTERA_FDB_FLUSH_MODE_ALL = PRESTERA_FDB_FLUSH_MODE_DYNAMIC
+ | PRESTERA_FDB_FLUSH_MODE_STATIC,
+};
+
+enum {
+ PRESTERA_LINK_MODE_10baseT_Half,
+ PRESTERA_LINK_MODE_10baseT_Full,
+ PRESTERA_LINK_MODE_100baseT_Half,
+ PRESTERA_LINK_MODE_100baseT_Full,
+ PRESTERA_LINK_MODE_1000baseT_Half,
+ PRESTERA_LINK_MODE_1000baseT_Full,
+ PRESTERA_LINK_MODE_1000baseX_Full,
+ PRESTERA_LINK_MODE_1000baseKX_Full,
+ PRESTERA_LINK_MODE_2500baseX_Full,
+ PRESTERA_LINK_MODE_10GbaseKR_Full,
+ PRESTERA_LINK_MODE_10GbaseSR_Full,
+ PRESTERA_LINK_MODE_10GbaseLR_Full,
+ PRESTERA_LINK_MODE_20GbaseKR2_Full,
+ PRESTERA_LINK_MODE_25GbaseCR_Full,
+ PRESTERA_LINK_MODE_25GbaseKR_Full,
+ PRESTERA_LINK_MODE_25GbaseSR_Full,
+ PRESTERA_LINK_MODE_40GbaseKR4_Full,
+ PRESTERA_LINK_MODE_40GbaseCR4_Full,
+ PRESTERA_LINK_MODE_40GbaseSR4_Full,
+ PRESTERA_LINK_MODE_50GbaseCR2_Full,
+ PRESTERA_LINK_MODE_50GbaseKR2_Full,
+ PRESTERA_LINK_MODE_50GbaseSR2_Full,
+ PRESTERA_LINK_MODE_100GbaseKR4_Full,
+ PRESTERA_LINK_MODE_100GbaseSR4_Full,
+ PRESTERA_LINK_MODE_100GbaseCR4_Full,
+
+ PRESTERA_LINK_MODE_MAX
+};
+
+enum {
+ PRESTERA_PORT_TYPE_NONE,
+ PRESTERA_PORT_TYPE_TP,
+ PRESTERA_PORT_TYPE_AUI,
+ PRESTERA_PORT_TYPE_MII,
+ PRESTERA_PORT_TYPE_FIBRE,
+ PRESTERA_PORT_TYPE_BNC,
+ PRESTERA_PORT_TYPE_DA,
+ PRESTERA_PORT_TYPE_OTHER,
+
+ PRESTERA_PORT_TYPE_MAX
+};
+
+enum {
+ PRESTERA_PORT_TCVR_COPPER,
+ PRESTERA_PORT_TCVR_SFP,
+
+ PRESTERA_PORT_TCVR_MAX
+};
+
+enum {
+ PRESTERA_PORT_FEC_OFF,
+ PRESTERA_PORT_FEC_BASER,
+ PRESTERA_PORT_FEC_RS,
+
+ PRESTERA_PORT_FEC_MAX
+};
+
+enum {
+ PRESTERA_PORT_DUPLEX_HALF,
+ PRESTERA_PORT_DUPLEX_FULL,
+};
+
+enum {
+ PRESTERA_STP_DISABLED,
+ PRESTERA_STP_BLOCK_LISTEN,
+ PRESTERA_STP_LEARN,
+ PRESTERA_STP_FORWARD,
+};
+
+struct prestera_switch;
+struct prestera_port;
+struct prestera_port_stats;
+struct prestera_port_caps;
+enum prestera_event_type;
+struct prestera_event;
+
+typedef void (*prestera_event_cb_t)
+ (struct prestera_switch *sw, struct prestera_event *evt, void *arg);
+
+struct prestera_rxtx_params;
+
+/* Switch API */
+int prestera_hw_switch_init(struct prestera_switch *sw);
+void prestera_hw_switch_fini(struct prestera_switch *sw);
+int prestera_hw_switch_ageing_set(struct prestera_switch *sw, u32 ageing_ms);
+int prestera_hw_switch_mac_set(struct prestera_switch *sw, const char *mac);
+
+/* Port API */
+int prestera_hw_port_info_get(const struct prestera_port *port,
+ u32 *dev_id, u32 *hw_id, u16 *fp_id);
+int prestera_hw_port_state_set(const struct prestera_port *port,
+ bool admin_state);
+int prestera_hw_port_mtu_set(const struct prestera_port *port, u32 mtu);
+int prestera_hw_port_mtu_get(const struct prestera_port *port, u32 *mtu);
+int prestera_hw_port_mac_set(const struct prestera_port *port, const char *mac);
+int prestera_hw_port_mac_get(const struct prestera_port *port, char *mac);
+int prestera_hw_port_cap_get(const struct prestera_port *port,
+ struct prestera_port_caps *caps);
+int prestera_hw_port_remote_cap_get(const struct prestera_port *port,
+ u64 *link_mode_bitmap);
+int prestera_hw_port_remote_fc_get(const struct prestera_port *port,
+ bool *pause, bool *asym_pause);
+int prestera_hw_port_type_get(const struct prestera_port *port, u8 *type);
+int prestera_hw_port_fec_get(const struct prestera_port *port, u8 *fec);
+int prestera_hw_port_fec_set(const struct prestera_port *port, u8 fec);
+int prestera_hw_port_autoneg_set(const struct prestera_port *port,
+ bool autoneg, u64 link_modes, u8 fec);
+int prestera_hw_port_autoneg_restart(struct prestera_port *port);
+int prestera_hw_port_duplex_get(const struct prestera_port *port, u8 *duplex);
+int prestera_hw_port_stats_get(const struct prestera_port *port,
+ struct prestera_port_stats *stats);
+int prestera_hw_port_link_mode_set(const struct prestera_port *port, u32 mode);
+int prestera_hw_port_link_mode_get(const struct prestera_port *port, u32 *mode);
+int prestera_hw_port_mdix_get(const struct prestera_port *port, u8 *status,
+ u8 *admin_mode);
+int prestera_hw_port_mdix_set(const struct prestera_port *port, u8 mode);
+int prestera_hw_port_speed_get(const struct prestera_port *port, u32 *speed);
+int prestera_hw_port_learning_set(struct prestera_port *port, bool enable);
+int prestera_hw_port_flood_set(struct prestera_port *port, bool flood);
+int prestera_hw_port_accept_frm_type(struct prestera_port *port,
+ enum prestera_accept_frm_type type);
+/* Vlan API */
+int prestera_hw_vlan_create(struct prestera_switch *sw, u16 vid);
+int prestera_hw_vlan_delete(struct prestera_switch *sw, u16 vid);
+int prestera_hw_vlan_port_set(struct prestera_port *port, u16 vid,
+ bool is_member, bool untagged);
+int prestera_hw_vlan_port_vid_set(struct prestera_port *port, u16 vid);
+int prestera_hw_vlan_port_stp_set(struct prestera_port *port, u16 vid, u8 state);
+
+/* FDB API */
+int prestera_hw_fdb_add(struct prestera_port *port, const unsigned char *mac,
+ u16 vid, bool dynamic);
+int prestera_hw_fdb_del(struct prestera_port *port, const unsigned char *mac,
+ u16 vid);
+int prestera_hw_fdb_flush_port(struct prestera_port *port, u32 mode);
+int prestera_hw_fdb_flush_vlan(struct prestera_switch *sw, u16 vid, u32 mode);
+int prestera_hw_fdb_flush_port_vlan(struct prestera_port *port, u16 vid,
+ u32 mode);
+
+/* Bridge API */
+int prestera_hw_bridge_create(struct prestera_switch *sw, u16 *bridge_id);
+int prestera_hw_bridge_delete(struct prestera_switch *sw, u16 bridge_id);
+int prestera_hw_bridge_port_add(struct prestera_port *port, u16 bridge_id);
+int prestera_hw_bridge_port_delete(struct prestera_port *port, u16 bridge_id);
+
+/* Event handlers */
+int prestera_hw_event_handler_register(struct prestera_switch *sw,
+ enum prestera_event_type type,
+ prestera_event_cb_t fn,
+ void *arg);
+void prestera_hw_event_handler_unregister(struct prestera_switch *sw,
+ enum prestera_event_type type,
+ prestera_event_cb_t fn);
+
+/* RX/TX */
+int prestera_hw_rxtx_init(struct prestera_switch *sw,
+ struct prestera_rxtx_params *params);
+int prestera_hw_rxtx_port_init(struct prestera_port *port);
+
+#endif /* _PRESTERA_HW_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
new file mode 100644
index 000000000000..0f20e0788cce
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -0,0 +1,667 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/etherdevice.h>
+#include <linux/jiffies.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/netdev_features.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+
+#include "prestera.h"
+#include "prestera_hw.h"
+#include "prestera_rxtx.h"
+#include "prestera_devlink.h"
+#include "prestera_ethtool.h"
+#include "prestera_switchdev.h"
+
+#define PRESTERA_MTU_DEFAULT 1536
+
+#define PRESTERA_STATS_DELAY_MS 1000
+
+#define PRESTERA_MAC_ADDR_NUM_MAX 255
+
+static struct workqueue_struct *prestera_wq;
+
+int prestera_port_pvid_set(struct prestera_port *port, u16 vid)
+{
+ enum prestera_accept_frm_type frm_type;
+ int err;
+
+ frm_type = PRESTERA_ACCEPT_FRAME_TYPE_TAGGED;
+
+ if (vid) {
+ err = prestera_hw_vlan_port_vid_set(port, vid);
+ if (err)
+ return err;
+
+ frm_type = PRESTERA_ACCEPT_FRAME_TYPE_ALL;
+ }
+
+ err = prestera_hw_port_accept_frm_type(port, frm_type);
+ if (err && frm_type == PRESTERA_ACCEPT_FRAME_TYPE_ALL)
+ prestera_hw_vlan_port_vid_set(port, port->pvid);
+
+ port->pvid = vid;
+ return 0;
+}
+
+struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw,
+ u32 dev_id, u32 hw_id)
+{
+ struct prestera_port *port = NULL;
+
+ read_lock(&sw->port_list_lock);
+ list_for_each_entry(port, &sw->port_list, list) {
+ if (port->dev_id == dev_id && port->hw_id == hw_id)
+ break;
+ }
+ read_unlock(&sw->port_list_lock);
+
+ return port;
+}
+
+struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id)
+{
+ struct prestera_port *port = NULL;
+
+ read_lock(&sw->port_list_lock);
+ list_for_each_entry(port, &sw->port_list, list) {
+ if (port->id == id)
+ break;
+ }
+ read_unlock(&sw->port_list_lock);
+
+ return port;
+}
+
+static int prestera_port_open(struct net_device *dev)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ int err;
+
+ err = prestera_hw_port_state_set(port, true);
+ if (err)
+ return err;
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static int prestera_port_close(struct net_device *dev)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ int err;
+
+ netif_stop_queue(dev);
+
+ err = prestera_hw_port_state_set(port, false);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static netdev_tx_t prestera_port_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ return prestera_rxtx_xmit(netdev_priv(dev), skb);
+}
+
+static int prestera_is_valid_mac_addr(struct prestera_port *port, u8 *addr)
+{
+ if (!is_valid_ether_addr(addr))
+ return -EADDRNOTAVAIL;
+
+ /* firmware requires that port's MAC address contains first 5 bytes
+ * of the base MAC address
+ */
+ if (memcmp(port->sw->base_mac, addr, ETH_ALEN - 1))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int prestera_port_set_mac_address(struct net_device *dev, void *p)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ struct sockaddr *addr = p;
+ int err;
+
+ err = prestera_is_valid_mac_addr(port, addr->sa_data);
+ if (err)
+ return err;
+
+ err = prestera_hw_port_mac_set(port, addr->sa_data);
+ if (err)
+ return err;
+
+ ether_addr_copy(dev->dev_addr, addr->sa_data);
+
+ return 0;
+}
+
+static int prestera_port_change_mtu(struct net_device *dev, int mtu)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ int err;
+
+ err = prestera_hw_port_mtu_set(port, mtu);
+ if (err)
+ return err;
+
+ dev->mtu = mtu;
+
+ return 0;
+}
+
+static void prestera_port_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ struct prestera_port_stats *port_stats = &port->cached_hw_stats.stats;
+
+ stats->rx_packets = port_stats->broadcast_frames_received +
+ port_stats->multicast_frames_received +
+ port_stats->unicast_frames_received;
+
+ stats->tx_packets = port_stats->broadcast_frames_sent +
+ port_stats->multicast_frames_sent +
+ port_stats->unicast_frames_sent;
+
+ stats->rx_bytes = port_stats->good_octets_received;
+
+ stats->tx_bytes = port_stats->good_octets_sent;
+
+ stats->rx_errors = port_stats->rx_error_frame_received;
+ stats->tx_errors = port_stats->mac_trans_error;
+
+ stats->rx_dropped = port_stats->buffer_overrun;
+ stats->tx_dropped = 0;
+
+ stats->multicast = port_stats->multicast_frames_received;
+ stats->collisions = port_stats->excessive_collision;
+
+ stats->rx_crc_errors = port_stats->bad_crc;
+}
+
+static void prestera_port_get_hw_stats(struct prestera_port *port)
+{
+ prestera_hw_port_stats_get(port, &port->cached_hw_stats.stats);
+}
+
+static void prestera_port_stats_update(struct work_struct *work)
+{
+ struct prestera_port *port =
+ container_of(work, struct prestera_port,
+ cached_hw_stats.caching_dw.work);
+
+ prestera_port_get_hw_stats(port);
+
+ queue_delayed_work(prestera_wq, &port->cached_hw_stats.caching_dw,
+ msecs_to_jiffies(PRESTERA_STATS_DELAY_MS));
+}
+
+static const struct net_device_ops prestera_netdev_ops = {
+ .ndo_open = prestera_port_open,
+ .ndo_stop = prestera_port_close,
+ .ndo_start_xmit = prestera_port_xmit,
+ .ndo_change_mtu = prestera_port_change_mtu,
+ .ndo_get_stats64 = prestera_port_get_stats64,
+ .ndo_set_mac_address = prestera_port_set_mac_address,
+ .ndo_get_devlink_port = prestera_devlink_get_port,
+};
+
+int prestera_port_autoneg_set(struct prestera_port *port, bool enable,
+ u64 adver_link_modes, u8 adver_fec)
+{
+ bool refresh = false;
+ u64 link_modes;
+ int err;
+ u8 fec;
+
+ if (port->caps.type != PRESTERA_PORT_TYPE_TP)
+ return enable ? -EINVAL : 0;
+
+ if (!enable)
+ goto set_autoneg;
+
+ link_modes = port->caps.supp_link_modes & adver_link_modes;
+ fec = port->caps.supp_fec & adver_fec;
+
+ if (!link_modes && !fec)
+ return -EOPNOTSUPP;
+
+ if (link_modes && port->adver_link_modes != link_modes) {
+ port->adver_link_modes = link_modes;
+ refresh = true;
+ }
+
+ if (fec && port->adver_fec != fec) {
+ port->adver_fec = fec;
+ refresh = true;
+ }
+
+set_autoneg:
+ if (port->autoneg == enable && !refresh)
+ return 0;
+
+ err = prestera_hw_port_autoneg_set(port, enable, port->adver_link_modes,
+ port->adver_fec);
+ if (err)
+ return err;
+
+ port->autoneg = enable;
+
+ return 0;
+}
+
+static void prestera_port_list_add(struct prestera_port *port)
+{
+ write_lock(&port->sw->port_list_lock);
+ list_add(&port->list, &port->sw->port_list);
+ write_unlock(&port->sw->port_list_lock);
+}
+
+static void prestera_port_list_del(struct prestera_port *port)
+{
+ write_lock(&port->sw->port_list_lock);
+ list_del(&port->list);
+ write_unlock(&port->sw->port_list_lock);
+}
+
+static int prestera_port_create(struct prestera_switch *sw, u32 id)
+{
+ struct prestera_port *port;
+ struct net_device *dev;
+ int err;
+
+ dev = alloc_etherdev(sizeof(*port));
+ if (!dev)
+ return -ENOMEM;
+
+ port = netdev_priv(dev);
+
+ INIT_LIST_HEAD(&port->vlans_list);
+ port->pvid = PRESTERA_DEFAULT_VID;
+ port->dev = dev;
+ port->id = id;
+ port->sw = sw;
+
+ err = prestera_hw_port_info_get(port, &port->dev_id, &port->hw_id,
+ &port->fp_id);
+ if (err) {
+ dev_err(prestera_dev(sw), "Failed to get port(%u) info\n", id);
+ goto err_port_info_get;
+ }
+
+ err = prestera_devlink_port_register(port);
+ if (err)
+ goto err_dl_port_register;
+
+ dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->netdev_ops = &prestera_netdev_ops;
+ dev->ethtool_ops = &prestera_ethtool_ops;
+
+ netif_carrier_off(dev);
+
+ dev->mtu = min_t(unsigned int, sw->mtu_max, PRESTERA_MTU_DEFAULT);
+ dev->min_mtu = sw->mtu_min;
+ dev->max_mtu = sw->mtu_max;
+
+ err = prestera_hw_port_mtu_set(port, dev->mtu);
+ if (err) {
+ dev_err(prestera_dev(sw), "Failed to set port(%u) mtu(%d)\n",
+ id, dev->mtu);
+ goto err_port_init;
+ }
+
+ if (port->fp_id >= PRESTERA_MAC_ADDR_NUM_MAX)
+ goto err_port_init;
+
+ /* firmware requires that port's MAC address consist of the first
+ * 5 bytes of the base MAC address
+ */
+ memcpy(dev->dev_addr, sw->base_mac, dev->addr_len - 1);
+ dev->dev_addr[dev->addr_len - 1] = port->fp_id;
+
+ err = prestera_hw_port_mac_set(port, dev->dev_addr);
+ if (err) {
+ dev_err(prestera_dev(sw), "Failed to set port(%u) mac addr\n", id);
+ goto err_port_init;
+ }
+
+ err = prestera_hw_port_cap_get(port, &port->caps);
+ if (err) {
+ dev_err(prestera_dev(sw), "Failed to get port(%u) caps\n", id);
+ goto err_port_init;
+ }
+
+ port->adver_fec = BIT(PRESTERA_PORT_FEC_OFF);
+ prestera_port_autoneg_set(port, true, port->caps.supp_link_modes,
+ port->caps.supp_fec);
+
+ err = prestera_hw_port_state_set(port, false);
+ if (err) {
+ dev_err(prestera_dev(sw), "Failed to set port(%u) down\n", id);
+ goto err_port_init;
+ }
+
+ err = prestera_rxtx_port_init(port);
+ if (err)
+ goto err_port_init;
+
+ INIT_DELAYED_WORK(&port->cached_hw_stats.caching_dw,
+ &prestera_port_stats_update);
+
+ prestera_port_list_add(port);
+
+ err = register_netdev(dev);
+ if (err)
+ goto err_register_netdev;
+
+ prestera_devlink_port_set(port);
+
+ return 0;
+
+err_register_netdev:
+ prestera_port_list_del(port);
+err_port_init:
+ prestera_devlink_port_unregister(port);
+err_dl_port_register:
+err_port_info_get:
+ free_netdev(dev);
+ return err;
+}
+
+static void prestera_port_destroy(struct prestera_port *port)
+{
+ struct net_device *dev = port->dev;
+
+ cancel_delayed_work_sync(&port->cached_hw_stats.caching_dw);
+ prestera_devlink_port_clear(port);
+ unregister_netdev(dev);
+ prestera_port_list_del(port);
+ prestera_devlink_port_unregister(port);
+ free_netdev(dev);
+}
+
+static void prestera_destroy_ports(struct prestera_switch *sw)
+{
+ struct prestera_port *port, *tmp;
+
+ list_for_each_entry_safe(port, tmp, &sw->port_list, list)
+ prestera_port_destroy(port);
+}
+
+static int prestera_create_ports(struct prestera_switch *sw)
+{
+ struct prestera_port *port, *tmp;
+ u32 port_idx;
+ int err;
+
+ for (port_idx = 0; port_idx < sw->port_count; port_idx++) {
+ err = prestera_port_create(sw, port_idx);
+ if (err)
+ goto err_port_create;
+ }
+
+ return 0;
+
+err_port_create:
+ list_for_each_entry_safe(port, tmp, &sw->port_list, list)
+ prestera_port_destroy(port);
+
+ return err;
+}
+
+static void prestera_port_handle_event(struct prestera_switch *sw,
+ struct prestera_event *evt, void *arg)
+{
+ struct delayed_work *caching_dw;
+ struct prestera_port *port;
+
+ port = prestera_find_port(sw, evt->port_evt.port_id);
+ if (!port || !port->dev)
+ return;
+
+ caching_dw = &port->cached_hw_stats.caching_dw;
+
+ if (evt->id == PRESTERA_PORT_EVENT_STATE_CHANGED) {
+ if (evt->port_evt.data.oper_state) {
+ netif_carrier_on(port->dev);
+ if (!delayed_work_pending(caching_dw))
+ queue_delayed_work(prestera_wq, caching_dw, 0);
+ } else {
+ netif_carrier_off(port->dev);
+ if (delayed_work_pending(caching_dw))
+ cancel_delayed_work(caching_dw);
+ }
+ }
+}
+
+static int prestera_event_handlers_register(struct prestera_switch *sw)
+{
+ return prestera_hw_event_handler_register(sw, PRESTERA_EVENT_TYPE_PORT,
+ prestera_port_handle_event,
+ NULL);
+}
+
+static void prestera_event_handlers_unregister(struct prestera_switch *sw)
+{
+ prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_PORT,
+ prestera_port_handle_event);
+}
+
+static int prestera_switch_set_base_mac_addr(struct prestera_switch *sw)
+{
+ struct device_node *base_mac_np;
+ struct device_node *np;
+ const char *base_mac;
+
+ np = of_find_compatible_node(NULL, NULL, "marvell,prestera");
+ base_mac_np = of_parse_phandle(np, "base-mac-provider", 0);
+
+ base_mac = of_get_mac_address(base_mac_np);
+ of_node_put(base_mac_np);
+ if (!IS_ERR(base_mac))
+ ether_addr_copy(sw->base_mac, base_mac);
+
+ if (!is_valid_ether_addr(sw->base_mac)) {
+ eth_random_addr(sw->base_mac);
+ dev_info(prestera_dev(sw), "using random base mac address\n");
+ }
+
+ return prestera_hw_switch_mac_set(sw, sw->base_mac);
+}
+
+bool prestera_netdev_check(const struct net_device *dev)
+{
+ return dev->netdev_ops == &prestera_netdev_ops;
+}
+
+static int prestera_lower_dev_walk(struct net_device *dev,
+ struct netdev_nested_priv *priv)
+{
+ struct prestera_port **pport = (struct prestera_port **)priv->data;
+
+ if (prestera_netdev_check(dev)) {
+ *pport = netdev_priv(dev);
+ return 1;
+ }
+
+ return 0;
+}
+
+struct prestera_port *prestera_port_dev_lower_find(struct net_device *dev)
+{
+ struct prestera_port *port = NULL;
+ struct netdev_nested_priv priv = {
+ .data = (void *)&port,
+ };
+
+ if (prestera_netdev_check(dev))
+ return netdev_priv(dev);
+
+ netdev_walk_all_lower_dev(dev, prestera_lower_dev_walk, &priv);
+
+ return port;
+}
+
+static int prestera_netdev_port_event(struct net_device *dev,
+ unsigned long event, void *ptr)
+{
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ case NETDEV_CHANGEUPPER:
+ return prestera_bridge_port_event(dev, event, ptr);
+ default:
+ return 0;
+ }
+}
+
+static int prestera_netdev_event_handler(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ int err = 0;
+
+ if (prestera_netdev_check(dev))
+ err = prestera_netdev_port_event(dev, event, ptr);
+
+ return notifier_from_errno(err);
+}
+
+static int prestera_netdev_event_handler_register(struct prestera_switch *sw)
+{
+ sw->netdev_nb.notifier_call = prestera_netdev_event_handler;
+
+ return register_netdevice_notifier(&sw->netdev_nb);
+}
+
+static void prestera_netdev_event_handler_unregister(struct prestera_switch *sw)
+{
+ unregister_netdevice_notifier(&sw->netdev_nb);
+}
+
+static int prestera_switch_init(struct prestera_switch *sw)
+{
+ int err;
+
+ err = prestera_hw_switch_init(sw);
+ if (err) {
+ dev_err(prestera_dev(sw), "Failed to init Switch device\n");
+ return err;
+ }
+
+ rwlock_init(&sw->port_list_lock);
+ INIT_LIST_HEAD(&sw->port_list);
+
+ err = prestera_switch_set_base_mac_addr(sw);
+ if (err)
+ return err;
+
+ err = prestera_netdev_event_handler_register(sw);
+ if (err)
+ return err;
+
+ err = prestera_switchdev_init(sw);
+ if (err)
+ goto err_swdev_register;
+
+ err = prestera_rxtx_switch_init(sw);
+ if (err)
+ goto err_rxtx_register;
+
+ err = prestera_event_handlers_register(sw);
+ if (err)
+ goto err_handlers_register;
+
+ err = prestera_devlink_register(sw);
+ if (err)
+ goto err_dl_register;
+
+ err = prestera_create_ports(sw);
+ if (err)
+ goto err_ports_create;
+
+ return 0;
+
+err_ports_create:
+ prestera_devlink_unregister(sw);
+err_dl_register:
+ prestera_event_handlers_unregister(sw);
+err_handlers_register:
+ prestera_rxtx_switch_fini(sw);
+err_rxtx_register:
+ prestera_switchdev_fini(sw);
+err_swdev_register:
+ prestera_netdev_event_handler_unregister(sw);
+ prestera_hw_switch_fini(sw);
+
+ return err;
+}
+
+static void prestera_switch_fini(struct prestera_switch *sw)
+{
+ prestera_destroy_ports(sw);
+ prestera_devlink_unregister(sw);
+ prestera_event_handlers_unregister(sw);
+ prestera_rxtx_switch_fini(sw);
+ prestera_switchdev_fini(sw);
+ prestera_netdev_event_handler_unregister(sw);
+ prestera_hw_switch_fini(sw);
+}
+
+int prestera_device_register(struct prestera_device *dev)
+{
+ struct prestera_switch *sw;
+ int err;
+
+ sw = prestera_devlink_alloc();
+ if (!sw)
+ return -ENOMEM;
+
+ dev->priv = sw;
+ sw->dev = dev;
+
+ err = prestera_switch_init(sw);
+ if (err) {
+ prestera_devlink_free(sw);
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(prestera_device_register);
+
+void prestera_device_unregister(struct prestera_device *dev)
+{
+ struct prestera_switch *sw = dev->priv;
+
+ prestera_switch_fini(sw);
+ prestera_devlink_free(sw);
+}
+EXPORT_SYMBOL(prestera_device_unregister);
+
+static int __init prestera_module_init(void)
+{
+ prestera_wq = alloc_workqueue("prestera", 0, 0);
+ if (!prestera_wq)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void __exit prestera_module_exit(void)
+{
+ destroy_workqueue(prestera_wq);
+}
+
+module_init(prestera_module_init);
+module_exit(prestera_module_exit);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Marvell Prestera switch driver");
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
new file mode 100644
index 000000000000..1b97adae542e
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
@@ -0,0 +1,769 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/circ_buf.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include "prestera.h"
+
+#define PRESTERA_MSG_MAX_SIZE 1500
+
+#define PRESTERA_SUPP_FW_MAJ_VER 2
+#define PRESTERA_SUPP_FW_MIN_VER 0
+
+#define PRESTERA_FW_PATH_FMT "mrvl/prestera/mvsw_prestera_fw-v%u.%u.img"
+
+#define PRESTERA_FW_HDR_MAGIC 0x351D9D06
+#define PRESTERA_FW_DL_TIMEOUT_MS 50000
+#define PRESTERA_FW_BLK_SZ 1024
+
+#define PRESTERA_FW_VER_MAJ_MUL 1000000
+#define PRESTERA_FW_VER_MIN_MUL 1000
+
+#define PRESTERA_FW_VER_MAJ(v) ((v) / PRESTERA_FW_VER_MAJ_MUL)
+
+#define PRESTERA_FW_VER_MIN(v) \
+ (((v) - (PRESTERA_FW_VER_MAJ(v) * PRESTERA_FW_VER_MAJ_MUL)) / \
+ PRESTERA_FW_VER_MIN_MUL)
+
+#define PRESTERA_FW_VER_PATCH(v) \
+ ((v) - (PRESTERA_FW_VER_MAJ(v) * PRESTERA_FW_VER_MAJ_MUL) - \
+ (PRESTERA_FW_VER_MIN(v) * PRESTERA_FW_VER_MIN_MUL))
+
+enum prestera_pci_bar_t {
+ PRESTERA_PCI_BAR_FW = 2,
+ PRESTERA_PCI_BAR_PP = 4,
+};
+
+struct prestera_fw_header {
+ __be32 magic_number;
+ __be32 version_value;
+ u8 reserved[8];
+};
+
+struct prestera_ldr_regs {
+ u32 ldr_ready;
+ u32 pad1;
+
+ u32 ldr_img_size;
+ u32 ldr_ctl_flags;
+
+ u32 ldr_buf_offs;
+ u32 ldr_buf_size;
+
+ u32 ldr_buf_rd;
+ u32 pad2;
+ u32 ldr_buf_wr;
+
+ u32 ldr_status;
+};
+
+#define PRESTERA_LDR_REG_OFFSET(f) offsetof(struct prestera_ldr_regs, f)
+
+#define PRESTERA_LDR_READY_MAGIC 0xf00dfeed
+
+#define PRESTERA_LDR_STATUS_IMG_DL BIT(0)
+#define PRESTERA_LDR_STATUS_START_FW BIT(1)
+#define PRESTERA_LDR_STATUS_INVALID_IMG BIT(2)
+#define PRESTERA_LDR_STATUS_NOMEM BIT(3)
+
+#define PRESTERA_LDR_REG_BASE(fw) ((fw)->ldr_regs)
+#define PRESTERA_LDR_REG_ADDR(fw, reg) (PRESTERA_LDR_REG_BASE(fw) + (reg))
+
+/* fw loader registers */
+#define PRESTERA_LDR_READY_REG PRESTERA_LDR_REG_OFFSET(ldr_ready)
+#define PRESTERA_LDR_IMG_SIZE_REG PRESTERA_LDR_REG_OFFSET(ldr_img_size)
+#define PRESTERA_LDR_CTL_REG PRESTERA_LDR_REG_OFFSET(ldr_ctl_flags)
+#define PRESTERA_LDR_BUF_SIZE_REG PRESTERA_LDR_REG_OFFSET(ldr_buf_size)
+#define PRESTERA_LDR_BUF_OFFS_REG PRESTERA_LDR_REG_OFFSET(ldr_buf_offs)
+#define PRESTERA_LDR_BUF_RD_REG PRESTERA_LDR_REG_OFFSET(ldr_buf_rd)
+#define PRESTERA_LDR_BUF_WR_REG PRESTERA_LDR_REG_OFFSET(ldr_buf_wr)
+#define PRESTERA_LDR_STATUS_REG PRESTERA_LDR_REG_OFFSET(ldr_status)
+
+#define PRESTERA_LDR_CTL_DL_START BIT(0)
+
+#define PRESTERA_EVT_QNUM_MAX 4
+
+struct prestera_fw_evtq_regs {
+ u32 rd_idx;
+ u32 pad1;
+ u32 wr_idx;
+ u32 pad2;
+ u32 offs;
+ u32 len;
+};
+
+struct prestera_fw_regs {
+ u32 fw_ready;
+ u32 pad;
+ u32 cmd_offs;
+ u32 cmd_len;
+ u32 evt_offs;
+ u32 evt_qnum;
+
+ u32 cmd_req_ctl;
+ u32 cmd_req_len;
+ u32 cmd_rcv_ctl;
+ u32 cmd_rcv_len;
+
+ u32 fw_status;
+ u32 rx_status;
+
+ struct prestera_fw_evtq_regs evtq_list[PRESTERA_EVT_QNUM_MAX];
+};
+
+#define PRESTERA_FW_REG_OFFSET(f) offsetof(struct prestera_fw_regs, f)
+
+#define PRESTERA_FW_READY_MAGIC 0xcafebabe
+
+/* fw registers */
+#define PRESTERA_FW_READY_REG PRESTERA_FW_REG_OFFSET(fw_ready)
+
+#define PRESTERA_CMD_BUF_OFFS_REG PRESTERA_FW_REG_OFFSET(cmd_offs)
+#define PRESTERA_CMD_BUF_LEN_REG PRESTERA_FW_REG_OFFSET(cmd_len)
+#define PRESTERA_EVT_BUF_OFFS_REG PRESTERA_FW_REG_OFFSET(evt_offs)
+#define PRESTERA_EVT_QNUM_REG PRESTERA_FW_REG_OFFSET(evt_qnum)
+
+#define PRESTERA_CMD_REQ_CTL_REG PRESTERA_FW_REG_OFFSET(cmd_req_ctl)
+#define PRESTERA_CMD_REQ_LEN_REG PRESTERA_FW_REG_OFFSET(cmd_req_len)
+
+#define PRESTERA_CMD_RCV_CTL_REG PRESTERA_FW_REG_OFFSET(cmd_rcv_ctl)
+#define PRESTERA_CMD_RCV_LEN_REG PRESTERA_FW_REG_OFFSET(cmd_rcv_len)
+#define PRESTERA_FW_STATUS_REG PRESTERA_FW_REG_OFFSET(fw_status)
+#define PRESTERA_RX_STATUS_REG PRESTERA_FW_REG_OFFSET(rx_status)
+
+/* PRESTERA_CMD_REQ_CTL_REG flags */
+#define PRESTERA_CMD_F_REQ_SENT BIT(0)
+#define PRESTERA_CMD_F_REPL_RCVD BIT(1)
+
+/* PRESTERA_CMD_RCV_CTL_REG flags */
+#define PRESTERA_CMD_F_REPL_SENT BIT(0)
+
+#define PRESTERA_EVTQ_REG_OFFSET(q, f) \
+ (PRESTERA_FW_REG_OFFSET(evtq_list) + \
+ (q) * sizeof(struct prestera_fw_evtq_regs) + \
+ offsetof(struct prestera_fw_evtq_regs, f))
+
+#define PRESTERA_EVTQ_RD_IDX_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, rd_idx)
+#define PRESTERA_EVTQ_WR_IDX_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, wr_idx)
+#define PRESTERA_EVTQ_OFFS_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, offs)
+#define PRESTERA_EVTQ_LEN_REG(q) PRESTERA_EVTQ_REG_OFFSET(q, len)
+
+#define PRESTERA_FW_REG_BASE(fw) ((fw)->dev.ctl_regs)
+#define PRESTERA_FW_REG_ADDR(fw, reg) PRESTERA_FW_REG_BASE((fw)) + (reg)
+
+#define PRESTERA_FW_CMD_DEFAULT_WAIT_MS 30000
+#define PRESTERA_FW_READY_WAIT_MS 20000
+
+struct prestera_fw_evtq {
+ u8 __iomem *addr;
+ size_t len;
+};
+
+struct prestera_fw {
+ struct workqueue_struct *wq;
+ struct prestera_device dev;
+ u8 __iomem *ldr_regs;
+ u8 __iomem *ldr_ring_buf;
+ u32 ldr_buf_len;
+ u32 ldr_wr_idx;
+ struct mutex cmd_mtx; /* serialize access to dev->send_req */
+ size_t cmd_mbox_len;
+ u8 __iomem *cmd_mbox;
+ struct prestera_fw_evtq evt_queue[PRESTERA_EVT_QNUM_MAX];
+ u8 evt_qnum;
+ struct work_struct evt_work;
+ u8 __iomem *evt_buf;
+ u8 *evt_msg;
+};
+
+static int prestera_fw_load(struct prestera_fw *fw);
+
+static void prestera_fw_write(struct prestera_fw *fw, u32 reg, u32 val)
+{
+ writel(val, PRESTERA_FW_REG_ADDR(fw, reg));
+}
+
+static u32 prestera_fw_read(struct prestera_fw *fw, u32 reg)
+{
+ return readl(PRESTERA_FW_REG_ADDR(fw, reg));
+}
+
+static u32 prestera_fw_evtq_len(struct prestera_fw *fw, u8 qid)
+{
+ return fw->evt_queue[qid].len;
+}
+
+static u32 prestera_fw_evtq_avail(struct prestera_fw *fw, u8 qid)
+{
+ u32 wr_idx = prestera_fw_read(fw, PRESTERA_EVTQ_WR_IDX_REG(qid));
+ u32 rd_idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid));
+
+ return CIRC_CNT(wr_idx, rd_idx, prestera_fw_evtq_len(fw, qid));
+}
+
+static void prestera_fw_evtq_rd_set(struct prestera_fw *fw,
+ u8 qid, u32 idx)
+{
+ u32 rd_idx = idx & (prestera_fw_evtq_len(fw, qid) - 1);
+
+ prestera_fw_write(fw, PRESTERA_EVTQ_RD_IDX_REG(qid), rd_idx);
+}
+
+static u8 __iomem *prestera_fw_evtq_buf(struct prestera_fw *fw, u8 qid)
+{
+ return fw->evt_queue[qid].addr;
+}
+
+static u32 prestera_fw_evtq_read32(struct prestera_fw *fw, u8 qid)
+{
+ u32 rd_idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid));
+ u32 val;
+
+ val = readl(prestera_fw_evtq_buf(fw, qid) + rd_idx);
+ prestera_fw_evtq_rd_set(fw, qid, rd_idx + 4);
+ return val;
+}
+
+static ssize_t prestera_fw_evtq_read_buf(struct prestera_fw *fw,
+ u8 qid, void *buf, size_t len)
+{
+ u32 idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid));
+ u8 __iomem *evtq_addr = prestera_fw_evtq_buf(fw, qid);
+ u32 *buf32 = buf;
+ int i;
+
+ for (i = 0; i < len / 4; buf32++, i++) {
+ *buf32 = readl_relaxed(evtq_addr + idx);
+ idx = (idx + 4) & (prestera_fw_evtq_len(fw, qid) - 1);
+ }
+
+ prestera_fw_evtq_rd_set(fw, qid, idx);
+
+ return i;
+}
+
+static u8 prestera_fw_evtq_pick(struct prestera_fw *fw)
+{
+ int qid;
+
+ for (qid = 0; qid < fw->evt_qnum; qid++) {
+ if (prestera_fw_evtq_avail(fw, qid) >= 4)
+ return qid;
+ }
+
+ return PRESTERA_EVT_QNUM_MAX;
+}
+
+static void prestera_fw_evt_work_fn(struct work_struct *work)
+{
+ struct prestera_fw *fw;
+ void *msg;
+ u8 qid;
+
+ fw = container_of(work, struct prestera_fw, evt_work);
+ msg = fw->evt_msg;
+
+ while ((qid = prestera_fw_evtq_pick(fw)) < PRESTERA_EVT_QNUM_MAX) {
+ u32 idx;
+ u32 len;
+
+ len = prestera_fw_evtq_read32(fw, qid);
+ idx = prestera_fw_read(fw, PRESTERA_EVTQ_RD_IDX_REG(qid));
+
+ WARN_ON(prestera_fw_evtq_avail(fw, qid) < len);
+
+ if (WARN_ON(len > PRESTERA_MSG_MAX_SIZE)) {
+ prestera_fw_evtq_rd_set(fw, qid, idx + len);
+ continue;
+ }
+
+ prestera_fw_evtq_read_buf(fw, qid, msg, len);
+
+ if (fw->dev.recv_msg)
+ fw->dev.recv_msg(&fw->dev, msg, len);
+ }
+}
+
+static int prestera_fw_wait_reg32(struct prestera_fw *fw, u32 reg, u32 cmp,
+ unsigned int waitms)
+{
+ u8 __iomem *addr = PRESTERA_FW_REG_ADDR(fw, reg);
+ u32 val;
+
+ return readl_poll_timeout(addr, val, cmp == val,
+ 1 * USEC_PER_MSEC, waitms * USEC_PER_MSEC);
+}
+
+static int prestera_fw_cmd_send(struct prestera_fw *fw,
+ void *in_msg, size_t in_size,
+ void *out_msg, size_t out_size,
+ unsigned int waitms)
+{
+ u32 ret_size;
+ int err;
+
+ if (!waitms)
+ waitms = PRESTERA_FW_CMD_DEFAULT_WAIT_MS;
+
+ if (ALIGN(in_size, 4) > fw->cmd_mbox_len)
+ return -EMSGSIZE;
+
+ /* wait for finish previous reply from FW */
+ err = prestera_fw_wait_reg32(fw, PRESTERA_CMD_RCV_CTL_REG, 0, 30);
+ if (err) {
+ dev_err(fw->dev.dev, "finish reply from FW is timed out\n");
+ return err;
+ }
+
+ prestera_fw_write(fw, PRESTERA_CMD_REQ_LEN_REG, in_size);
+ memcpy_toio(fw->cmd_mbox, in_msg, in_size);
+
+ prestera_fw_write(fw, PRESTERA_CMD_REQ_CTL_REG, PRESTERA_CMD_F_REQ_SENT);
+
+ /* wait for reply from FW */
+ err = prestera_fw_wait_reg32(fw, PRESTERA_CMD_RCV_CTL_REG,
+ PRESTERA_CMD_F_REPL_SENT, waitms);
+ if (err) {
+ dev_err(fw->dev.dev, "reply from FW is timed out\n");
+ goto cmd_exit;
+ }
+
+ ret_size = prestera_fw_read(fw, PRESTERA_CMD_RCV_LEN_REG);
+ if (ret_size > out_size) {
+ dev_err(fw->dev.dev, "ret_size (%u) > out_len(%zu)\n",
+ ret_size, out_size);
+ err = -EMSGSIZE;
+ goto cmd_exit;
+ }
+
+ memcpy_fromio(out_msg, fw->cmd_mbox + in_size, ret_size);
+
+cmd_exit:
+ prestera_fw_write(fw, PRESTERA_CMD_REQ_CTL_REG, PRESTERA_CMD_F_REPL_RCVD);
+ return err;
+}
+
+static int prestera_fw_send_req(struct prestera_device *dev,
+ void *in_msg, size_t in_size, void *out_msg,
+ size_t out_size, unsigned int waitms)
+{
+ struct prestera_fw *fw;
+ ssize_t ret;
+
+ fw = container_of(dev, struct prestera_fw, dev);
+
+ mutex_lock(&fw->cmd_mtx);
+ ret = prestera_fw_cmd_send(fw, in_msg, in_size, out_msg, out_size, waitms);
+ mutex_unlock(&fw->cmd_mtx);
+
+ return ret;
+}
+
+static int prestera_fw_init(struct prestera_fw *fw)
+{
+ u8 __iomem *base;
+ int err;
+ u8 qid;
+
+ fw->dev.send_req = prestera_fw_send_req;
+ fw->ldr_regs = fw->dev.ctl_regs;
+
+ err = prestera_fw_load(fw);
+ if (err)
+ return err;
+
+ err = prestera_fw_wait_reg32(fw, PRESTERA_FW_READY_REG,
+ PRESTERA_FW_READY_MAGIC,
+ PRESTERA_FW_READY_WAIT_MS);
+ if (err) {
+ dev_err(fw->dev.dev, "FW failed to start\n");
+ return err;
+ }
+
+ base = fw->dev.ctl_regs;
+
+ fw->cmd_mbox = base + prestera_fw_read(fw, PRESTERA_CMD_BUF_OFFS_REG);
+ fw->cmd_mbox_len = prestera_fw_read(fw, PRESTERA_CMD_BUF_LEN_REG);
+ mutex_init(&fw->cmd_mtx);
+
+ fw->evt_buf = base + prestera_fw_read(fw, PRESTERA_EVT_BUF_OFFS_REG);
+ fw->evt_qnum = prestera_fw_read(fw, PRESTERA_EVT_QNUM_REG);
+ fw->evt_msg = kmalloc(PRESTERA_MSG_MAX_SIZE, GFP_KERNEL);
+ if (!fw->evt_msg)
+ return -ENOMEM;
+
+ for (qid = 0; qid < fw->evt_qnum; qid++) {
+ u32 offs = prestera_fw_read(fw, PRESTERA_EVTQ_OFFS_REG(qid));
+ struct prestera_fw_evtq *evtq = &fw->evt_queue[qid];
+
+ evtq->len = prestera_fw_read(fw, PRESTERA_EVTQ_LEN_REG(qid));
+ evtq->addr = fw->evt_buf + offs;
+ }
+
+ return 0;
+}
+
+static void prestera_fw_uninit(struct prestera_fw *fw)
+{
+ kfree(fw->evt_msg);
+}
+
+static irqreturn_t prestera_pci_irq_handler(int irq, void *dev_id)
+{
+ struct prestera_fw *fw = dev_id;
+
+ if (prestera_fw_read(fw, PRESTERA_RX_STATUS_REG)) {
+ prestera_fw_write(fw, PRESTERA_RX_STATUS_REG, 0);
+
+ if (fw->dev.recv_pkt)
+ fw->dev.recv_pkt(&fw->dev);
+ }
+
+ queue_work(fw->wq, &fw->evt_work);
+
+ return IRQ_HANDLED;
+}
+
+static void prestera_ldr_write(struct prestera_fw *fw, u32 reg, u32 val)
+{
+ writel(val, PRESTERA_LDR_REG_ADDR(fw, reg));
+}
+
+static u32 prestera_ldr_read(struct prestera_fw *fw, u32 reg)
+{
+ return readl(PRESTERA_LDR_REG_ADDR(fw, reg));
+}
+
+static int prestera_ldr_wait_reg32(struct prestera_fw *fw,
+ u32 reg, u32 cmp, unsigned int waitms)
+{
+ u8 __iomem *addr = PRESTERA_LDR_REG_ADDR(fw, reg);
+ u32 val;
+
+ return readl_poll_timeout(addr, val, cmp == val,
+ 10 * USEC_PER_MSEC, waitms * USEC_PER_MSEC);
+}
+
+static u32 prestera_ldr_wait_buf(struct prestera_fw *fw, size_t len)
+{
+ u8 __iomem *addr = PRESTERA_LDR_REG_ADDR(fw, PRESTERA_LDR_BUF_RD_REG);
+ u32 buf_len = fw->ldr_buf_len;
+ u32 wr_idx = fw->ldr_wr_idx;
+ u32 rd_idx;
+
+ return readl_poll_timeout(addr, rd_idx,
+ CIRC_SPACE(wr_idx, rd_idx, buf_len) >= len,
+ 1 * USEC_PER_MSEC, 100 * USEC_PER_MSEC);
+}
+
+static int prestera_ldr_wait_dl_finish(struct prestera_fw *fw)
+{
+ u8 __iomem *addr = PRESTERA_LDR_REG_ADDR(fw, PRESTERA_LDR_STATUS_REG);
+ unsigned long mask = ~(PRESTERA_LDR_STATUS_IMG_DL);
+ u32 val;
+ int err;
+
+ err = readl_poll_timeout(addr, val, val & mask, 10 * USEC_PER_MSEC,
+ PRESTERA_FW_DL_TIMEOUT_MS * USEC_PER_MSEC);
+ if (err) {
+ dev_err(fw->dev.dev, "Timeout to load FW img [state=%d]",
+ prestera_ldr_read(fw, PRESTERA_LDR_STATUS_REG));
+ return err;
+ }
+
+ return 0;
+}
+
+static void prestera_ldr_wr_idx_move(struct prestera_fw *fw, unsigned int n)
+{
+ fw->ldr_wr_idx = (fw->ldr_wr_idx + (n)) & (fw->ldr_buf_len - 1);
+}
+
+static void prestera_ldr_wr_idx_commit(struct prestera_fw *fw)
+{
+ prestera_ldr_write(fw, PRESTERA_LDR_BUF_WR_REG, fw->ldr_wr_idx);
+}
+
+static u8 __iomem *prestera_ldr_wr_ptr(struct prestera_fw *fw)
+{
+ return fw->ldr_ring_buf + fw->ldr_wr_idx;
+}
+
+static int prestera_ldr_send(struct prestera_fw *fw, const u8 *buf, size_t len)
+{
+ int err;
+ int i;
+
+ err = prestera_ldr_wait_buf(fw, len);
+ if (err) {
+ dev_err(fw->dev.dev, "failed wait for sending firmware\n");
+ return err;
+ }
+
+ for (i = 0; i < len; i += 4) {
+ writel_relaxed(*(u32 *)(buf + i), prestera_ldr_wr_ptr(fw));
+ prestera_ldr_wr_idx_move(fw, 4);
+ }
+
+ prestera_ldr_wr_idx_commit(fw);
+ return 0;
+}
+
+static int prestera_ldr_fw_send(struct prestera_fw *fw,
+ const char *img, u32 fw_size)
+{
+ u32 status;
+ u32 pos;
+ int err;
+
+ err = prestera_ldr_wait_reg32(fw, PRESTERA_LDR_STATUS_REG,
+ PRESTERA_LDR_STATUS_IMG_DL,
+ 5 * MSEC_PER_SEC);
+ if (err) {
+ dev_err(fw->dev.dev, "Loader is not ready to load image\n");
+ return err;
+ }
+
+ for (pos = 0; pos < fw_size; pos += PRESTERA_FW_BLK_SZ) {
+ if (pos + PRESTERA_FW_BLK_SZ > fw_size)
+ break;
+
+ err = prestera_ldr_send(fw, img + pos, PRESTERA_FW_BLK_SZ);
+ if (err)
+ return err;
+ }
+
+ if (pos < fw_size) {
+ err = prestera_ldr_send(fw, img + pos, fw_size - pos);
+ if (err)
+ return err;
+ }
+
+ err = prestera_ldr_wait_dl_finish(fw);
+ if (err)
+ return err;
+
+ status = prestera_ldr_read(fw, PRESTERA_LDR_STATUS_REG);
+
+ switch (status) {
+ case PRESTERA_LDR_STATUS_INVALID_IMG:
+ dev_err(fw->dev.dev, "FW img has bad CRC\n");
+ return -EINVAL;
+ case PRESTERA_LDR_STATUS_NOMEM:
+ dev_err(fw->dev.dev, "Loader has no enough mem\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void prestera_fw_rev_parse(const struct prestera_fw_header *hdr,
+ struct prestera_fw_rev *rev)
+{
+ u32 version = be32_to_cpu(hdr->version_value);
+
+ rev->maj = PRESTERA_FW_VER_MAJ(version);
+ rev->min = PRESTERA_FW_VER_MIN(version);
+ rev->sub = PRESTERA_FW_VER_PATCH(version);
+}
+
+static int prestera_fw_rev_check(struct prestera_fw *fw)
+{
+ struct prestera_fw_rev *rev = &fw->dev.fw_rev;
+ u16 maj_supp = PRESTERA_SUPP_FW_MAJ_VER;
+ u16 min_supp = PRESTERA_SUPP_FW_MIN_VER;
+
+ if (rev->maj == maj_supp && rev->min >= min_supp)
+ return 0;
+
+ dev_err(fw->dev.dev, "Driver supports FW version only '%u.%u.x'",
+ PRESTERA_SUPP_FW_MAJ_VER, PRESTERA_SUPP_FW_MIN_VER);
+
+ return -EINVAL;
+}
+
+static int prestera_fw_hdr_parse(struct prestera_fw *fw,
+ const struct firmware *img)
+{
+ struct prestera_fw_header *hdr = (struct prestera_fw_header *)img->data;
+ struct prestera_fw_rev *rev = &fw->dev.fw_rev;
+ u32 magic;
+
+ magic = be32_to_cpu(hdr->magic_number);
+ if (magic != PRESTERA_FW_HDR_MAGIC) {
+ dev_err(fw->dev.dev, "FW img hdr magic is invalid");
+ return -EINVAL;
+ }
+
+ prestera_fw_rev_parse(hdr, rev);
+
+ dev_info(fw->dev.dev, "FW version '%u.%u.%u'\n",
+ rev->maj, rev->min, rev->sub);
+
+ return prestera_fw_rev_check(fw);
+}
+
+static int prestera_fw_load(struct prestera_fw *fw)
+{
+ size_t hlen = sizeof(struct prestera_fw_header);
+ const struct firmware *f;
+ char fw_path[128];
+ int err;
+
+ err = prestera_ldr_wait_reg32(fw, PRESTERA_LDR_READY_REG,
+ PRESTERA_LDR_READY_MAGIC,
+ 5 * MSEC_PER_SEC);
+ if (err) {
+ dev_err(fw->dev.dev, "waiting for FW loader is timed out");
+ return err;
+ }
+
+ fw->ldr_ring_buf = fw->ldr_regs +
+ prestera_ldr_read(fw, PRESTERA_LDR_BUF_OFFS_REG);
+
+ fw->ldr_buf_len =
+ prestera_ldr_read(fw, PRESTERA_LDR_BUF_SIZE_REG);
+
+ fw->ldr_wr_idx = 0;
+
+ snprintf(fw_path, sizeof(fw_path), PRESTERA_FW_PATH_FMT,
+ PRESTERA_SUPP_FW_MAJ_VER, PRESTERA_SUPP_FW_MIN_VER);
+
+ err = request_firmware_direct(&f, fw_path, fw->dev.dev);
+ if (err) {
+ dev_err(fw->dev.dev, "failed to request firmware file\n");
+ return err;
+ }
+
+ err = prestera_fw_hdr_parse(fw, f);
+ if (err) {
+ dev_err(fw->dev.dev, "FW image header is invalid\n");
+ goto out_release;
+ }
+
+ prestera_ldr_write(fw, PRESTERA_LDR_IMG_SIZE_REG, f->size - hlen);
+ prestera_ldr_write(fw, PRESTERA_LDR_CTL_REG, PRESTERA_LDR_CTL_DL_START);
+
+ dev_info(fw->dev.dev, "Loading %s ...", fw_path);
+
+ err = prestera_ldr_fw_send(fw, f->data + hlen, f->size - hlen);
+
+out_release:
+ release_firmware(f);
+ return err;
+}
+
+static int prestera_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ const char *driver_name = pdev->driver->name;
+ struct prestera_fw *fw;
+ int err;
+
+ err = pcim_enable_device(pdev);
+ if (err)
+ return err;
+
+ err = pcim_iomap_regions(pdev, BIT(PRESTERA_PCI_BAR_FW) |
+ BIT(PRESTERA_PCI_BAR_PP),
+ pci_name(pdev));
+ if (err)
+ return err;
+
+ if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(30))) {
+ dev_err(&pdev->dev, "fail to set DMA mask\n");
+ goto err_dma_mask;
+ }
+
+ pci_set_master(pdev);
+
+ fw = devm_kzalloc(&pdev->dev, sizeof(*fw), GFP_KERNEL);
+ if (!fw) {
+ err = -ENOMEM;
+ goto err_pci_dev_alloc;
+ }
+
+ fw->dev.ctl_regs = pcim_iomap_table(pdev)[PRESTERA_PCI_BAR_FW];
+ fw->dev.pp_regs = pcim_iomap_table(pdev)[PRESTERA_PCI_BAR_PP];
+ fw->dev.dev = &pdev->dev;
+
+ pci_set_drvdata(pdev, fw);
+
+ err = prestera_fw_init(fw);
+ if (err)
+ goto err_prestera_fw_init;
+
+ dev_info(fw->dev.dev, "Prestera FW is ready\n");
+
+ fw->wq = alloc_workqueue("prestera_fw_wq", WQ_HIGHPRI, 1);
+ if (!fw->wq)
+ goto err_wq_alloc;
+
+ INIT_WORK(&fw->evt_work, prestera_fw_evt_work_fn);
+
+ err = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (err < 0) {
+ dev_err(&pdev->dev, "MSI IRQ init failed\n");
+ goto err_irq_alloc;
+ }
+
+ err = request_irq(pci_irq_vector(pdev, 0), prestera_pci_irq_handler,
+ 0, driver_name, fw);
+ if (err) {
+ dev_err(&pdev->dev, "fail to request IRQ\n");
+ goto err_request_irq;
+ }
+
+ err = prestera_device_register(&fw->dev);
+ if (err)
+ goto err_prestera_dev_register;
+
+ return 0;
+
+err_prestera_dev_register:
+ free_irq(pci_irq_vector(pdev, 0), fw);
+err_request_irq:
+ pci_free_irq_vectors(pdev);
+err_irq_alloc:
+ destroy_workqueue(fw->wq);
+err_wq_alloc:
+ prestera_fw_uninit(fw);
+err_prestera_fw_init:
+err_pci_dev_alloc:
+err_dma_mask:
+ return err;
+}
+
+static void prestera_pci_remove(struct pci_dev *pdev)
+{
+ struct prestera_fw *fw = pci_get_drvdata(pdev);
+
+ prestera_device_unregister(&fw->dev);
+ free_irq(pci_irq_vector(pdev, 0), fw);
+ pci_free_irq_vectors(pdev);
+ destroy_workqueue(fw->wq);
+ prestera_fw_uninit(fw);
+}
+
+static const struct pci_device_id prestera_pci_devices[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0xC804) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, prestera_pci_devices);
+
+static struct pci_driver prestera_pci_driver = {
+ .name = "Prestera DX",
+ .id_table = prestera_pci_devices,
+ .probe = prestera_pci_probe,
+ .remove = prestera_pci_remove,
+};
+module_pci_driver(prestera_pci_driver);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Marvell Prestera switch PCI interface");
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
new file mode 100644
index 000000000000..2a13c318048c
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
@@ -0,0 +1,820 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/bitfield.h>
+#include <linux/dmapool.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "prestera_dsa.h"
+#include "prestera.h"
+#include "prestera_hw.h"
+#include "prestera_rxtx.h"
+
+#define PRESTERA_SDMA_WAIT_MUL 10
+
+struct prestera_sdma_desc {
+ __le32 word1;
+ __le32 word2;
+ __le32 buff;
+ __le32 next;
+} __packed __aligned(16);
+
+#define PRESTERA_SDMA_BUFF_SIZE_MAX 1544
+
+#define PRESTERA_SDMA_RX_DESC_PKT_LEN(desc) \
+ ((le32_to_cpu((desc)->word2) >> 16) & GENMASK(13, 0))
+
+#define PRESTERA_SDMA_RX_DESC_OWNER(desc) \
+ ((le32_to_cpu((desc)->word1) & BIT(31)) >> 31)
+
+#define PRESTERA_SDMA_RX_DESC_IS_RCVD(desc) \
+ (PRESTERA_SDMA_RX_DESC_OWNER(desc) == PRESTERA_SDMA_RX_DESC_CPU_OWN)
+
+#define PRESTERA_SDMA_RX_DESC_CPU_OWN 0
+#define PRESTERA_SDMA_RX_DESC_DMA_OWN 1
+
+#define PRESTERA_SDMA_RX_QUEUE_NUM 8
+
+#define PRESTERA_SDMA_RX_DESC_PER_Q 1000
+
+#define PRESTERA_SDMA_TX_DESC_PER_Q 1000
+#define PRESTERA_SDMA_TX_MAX_BURST 64
+
+#define PRESTERA_SDMA_TX_DESC_OWNER(desc) \
+ ((le32_to_cpu((desc)->word1) & BIT(31)) >> 31)
+
+#define PRESTERA_SDMA_TX_DESC_CPU_OWN 0
+#define PRESTERA_SDMA_TX_DESC_DMA_OWN 1U
+
+#define PRESTERA_SDMA_TX_DESC_IS_SENT(desc) \
+ (PRESTERA_SDMA_TX_DESC_OWNER(desc) == PRESTERA_SDMA_TX_DESC_CPU_OWN)
+
+#define PRESTERA_SDMA_TX_DESC_LAST BIT(20)
+#define PRESTERA_SDMA_TX_DESC_FIRST BIT(21)
+#define PRESTERA_SDMA_TX_DESC_CALC_CRC BIT(12)
+
+#define PRESTERA_SDMA_TX_DESC_SINGLE \
+ (PRESTERA_SDMA_TX_DESC_FIRST | PRESTERA_SDMA_TX_DESC_LAST)
+
+#define PRESTERA_SDMA_TX_DESC_INIT \
+ (PRESTERA_SDMA_TX_DESC_SINGLE | PRESTERA_SDMA_TX_DESC_CALC_CRC)
+
+#define PRESTERA_SDMA_RX_INTR_MASK_REG 0x2814
+#define PRESTERA_SDMA_RX_QUEUE_STATUS_REG 0x2680
+#define PRESTERA_SDMA_RX_QUEUE_DESC_REG(n) (0x260C + (n) * 16)
+
+#define PRESTERA_SDMA_TX_QUEUE_DESC_REG 0x26C0
+#define PRESTERA_SDMA_TX_QUEUE_START_REG 0x2868
+
+struct prestera_sdma_buf {
+ struct prestera_sdma_desc *desc;
+ dma_addr_t desc_dma;
+ struct sk_buff *skb;
+ dma_addr_t buf_dma;
+ bool is_used;
+};
+
+struct prestera_rx_ring {
+ struct prestera_sdma_buf *bufs;
+ int next_rx;
+};
+
+struct prestera_tx_ring {
+ struct prestera_sdma_buf *bufs;
+ int next_tx;
+ int max_burst;
+ int burst;
+};
+
+struct prestera_sdma {
+ struct prestera_rx_ring rx_ring[PRESTERA_SDMA_RX_QUEUE_NUM];
+ struct prestera_tx_ring tx_ring;
+ struct prestera_switch *sw;
+ struct dma_pool *desc_pool;
+ struct work_struct tx_work;
+ struct napi_struct rx_napi;
+ struct net_device napi_dev;
+ u32 map_addr;
+ u64 dma_mask;
+ /* protect SDMA with concurrrent access from multiple CPUs */
+ spinlock_t tx_lock;
+};
+
+struct prestera_rxtx {
+ struct prestera_sdma sdma;
+};
+
+static int prestera_sdma_buf_init(struct prestera_sdma *sdma,
+ struct prestera_sdma_buf *buf)
+{
+ struct prestera_sdma_desc *desc;
+ dma_addr_t dma;
+
+ desc = dma_pool_alloc(sdma->desc_pool, GFP_DMA | GFP_KERNEL, &dma);
+ if (!desc)
+ return -ENOMEM;
+
+ buf->buf_dma = DMA_MAPPING_ERROR;
+ buf->desc_dma = dma;
+ buf->desc = desc;
+ buf->skb = NULL;
+
+ return 0;
+}
+
+static u32 prestera_sdma_map(struct prestera_sdma *sdma, dma_addr_t pa)
+{
+ return sdma->map_addr + pa;
+}
+
+static void prestera_sdma_rx_desc_init(struct prestera_sdma *sdma,
+ struct prestera_sdma_desc *desc,
+ dma_addr_t buf)
+{
+ u32 word = le32_to_cpu(desc->word2);
+
+ u32p_replace_bits(&word, PRESTERA_SDMA_BUFF_SIZE_MAX, GENMASK(15, 0));
+ desc->word2 = cpu_to_le32(word);
+
+ desc->buff = cpu_to_le32(prestera_sdma_map(sdma, buf));
+
+ /* make sure buffer is set before reset the descriptor */
+ wmb();
+
+ desc->word1 = cpu_to_le32(0xA0000000);
+}
+
+static void prestera_sdma_rx_desc_set_next(struct prestera_sdma *sdma,
+ struct prestera_sdma_desc *desc,
+ dma_addr_t next)
+{
+ desc->next = cpu_to_le32(prestera_sdma_map(sdma, next));
+}
+
+static int prestera_sdma_rx_skb_alloc(struct prestera_sdma *sdma,
+ struct prestera_sdma_buf *buf)
+{
+ struct device *dev = sdma->sw->dev->dev;
+ struct sk_buff *skb;
+ dma_addr_t dma;
+
+ skb = alloc_skb(PRESTERA_SDMA_BUFF_SIZE_MAX, GFP_DMA | GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ dma = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, dma))
+ goto err_dma_map;
+
+ if (buf->skb)
+ dma_unmap_single(dev, buf->buf_dma, buf->skb->len,
+ DMA_FROM_DEVICE);
+
+ buf->buf_dma = dma;
+ buf->skb = skb;
+
+ return 0;
+
+err_dma_map:
+ kfree_skb(skb);
+
+ return -ENOMEM;
+}
+
+static struct sk_buff *prestera_sdma_rx_skb_get(struct prestera_sdma *sdma,
+ struct prestera_sdma_buf *buf)
+{
+ dma_addr_t buf_dma = buf->buf_dma;
+ struct sk_buff *skb = buf->skb;
+ u32 len = skb->len;
+ int err;
+
+ err = prestera_sdma_rx_skb_alloc(sdma, buf);
+ if (err) {
+ buf->buf_dma = buf_dma;
+ buf->skb = skb;
+
+ skb = alloc_skb(skb->len, GFP_ATOMIC);
+ if (skb) {
+ skb_put(skb, len);
+ skb_copy_from_linear_data(buf->skb, skb->data, len);
+ }
+ }
+
+ prestera_sdma_rx_desc_init(sdma, buf->desc, buf->buf_dma);
+
+ return skb;
+}
+
+static int prestera_rxtx_process_skb(struct prestera_sdma *sdma,
+ struct sk_buff *skb)
+{
+ const struct prestera_port *port;
+ struct prestera_dsa dsa;
+ u32 hw_port, dev_id;
+ int err;
+
+ skb_pull(skb, ETH_HLEN);
+
+ /* ethertype field is part of the dsa header */
+ err = prestera_dsa_parse(&dsa, skb->data - ETH_TLEN);
+ if (err)
+ return err;
+
+ dev_id = dsa.hw_dev_num;
+ hw_port = dsa.port_num;
+
+ port = prestera_port_find_by_hwid(sdma->sw, dev_id, hw_port);
+ if (unlikely(!port)) {
+ dev_warn_ratelimited(prestera_dev(sdma->sw), "received pkt for non-existent port(%u, %u)\n",
+ dev_id, hw_port);
+ return -ENOENT;
+ }
+
+ if (unlikely(!pskb_may_pull(skb, PRESTERA_DSA_HLEN)))
+ return -EINVAL;
+
+ /* remove DSA tag and update checksum */
+ skb_pull_rcsum(skb, PRESTERA_DSA_HLEN);
+
+ memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - PRESTERA_DSA_HLEN,
+ ETH_ALEN * 2);
+
+ skb_push(skb, ETH_HLEN);
+
+ skb->protocol = eth_type_trans(skb, port->dev);
+
+ if (dsa.vlan.is_tagged) {
+ u16 tci = dsa.vlan.vid & VLAN_VID_MASK;
+
+ tci |= dsa.vlan.vpt << VLAN_PRIO_SHIFT;
+ if (dsa.vlan.cfi_bit)
+ tci |= VLAN_CFI_MASK;
+
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tci);
+ }
+
+ return 0;
+}
+
+static int prestera_sdma_next_rx_buf_idx(int buf_idx)
+{
+ return (buf_idx + 1) % PRESTERA_SDMA_RX_DESC_PER_Q;
+}
+
+static int prestera_sdma_rx_poll(struct napi_struct *napi, int budget)
+{
+ int qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
+ unsigned int rxq_done_map = 0;
+ struct prestera_sdma *sdma;
+ struct list_head rx_list;
+ unsigned int qmask;
+ int pkts_done = 0;
+ int q;
+
+ qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
+ qmask = GENMASK(qnum - 1, 0);
+
+ INIT_LIST_HEAD(&rx_list);
+
+ sdma = container_of(napi, struct prestera_sdma, rx_napi);
+
+ while (pkts_done < budget && rxq_done_map != qmask) {
+ for (q = 0; q < qnum && pkts_done < budget; q++) {
+ struct prestera_rx_ring *ring = &sdma->rx_ring[q];
+ struct prestera_sdma_desc *desc;
+ struct prestera_sdma_buf *buf;
+ int buf_idx = ring->next_rx;
+ struct sk_buff *skb;
+
+ buf = &ring->bufs[buf_idx];
+ desc = buf->desc;
+
+ if (PRESTERA_SDMA_RX_DESC_IS_RCVD(desc)) {
+ rxq_done_map &= ~BIT(q);
+ } else {
+ rxq_done_map |= BIT(q);
+ continue;
+ }
+
+ pkts_done++;
+
+ __skb_trim(buf->skb, PRESTERA_SDMA_RX_DESC_PKT_LEN(desc));
+
+ skb = prestera_sdma_rx_skb_get(sdma, buf);
+ if (!skb)
+ goto rx_next_buf;
+
+ if (unlikely(prestera_rxtx_process_skb(sdma, skb)))
+ goto rx_next_buf;
+
+ list_add_tail(&skb->list, &rx_list);
+rx_next_buf:
+ ring->next_rx = prestera_sdma_next_rx_buf_idx(buf_idx);
+ }
+ }
+
+ if (pkts_done < budget && napi_complete_done(napi, pkts_done))
+ prestera_write(sdma->sw, PRESTERA_SDMA_RX_INTR_MASK_REG,
+ GENMASK(9, 2));
+
+ netif_receive_skb_list(&rx_list);
+
+ return pkts_done;
+}
+
+static void prestera_sdma_rx_fini(struct prestera_sdma *sdma)
+{
+ int qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
+ int q, b;
+
+ /* disable all rx queues */
+ prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG,
+ GENMASK(15, 8));
+
+ for (q = 0; q < qnum; q++) {
+ struct prestera_rx_ring *ring = &sdma->rx_ring[q];
+
+ if (!ring->bufs)
+ break;
+
+ for (b = 0; b < PRESTERA_SDMA_RX_DESC_PER_Q; b++) {
+ struct prestera_sdma_buf *buf = &ring->bufs[b];
+
+ if (buf->desc_dma)
+ dma_pool_free(sdma->desc_pool, buf->desc,
+ buf->desc_dma);
+
+ if (!buf->skb)
+ continue;
+
+ if (buf->buf_dma != DMA_MAPPING_ERROR)
+ dma_unmap_single(sdma->sw->dev->dev,
+ buf->buf_dma, buf->skb->len,
+ DMA_FROM_DEVICE);
+ kfree_skb(buf->skb);
+ }
+ }
+}
+
+static int prestera_sdma_rx_init(struct prestera_sdma *sdma)
+{
+ int bnum = PRESTERA_SDMA_RX_DESC_PER_Q;
+ int qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
+ int err;
+ int q;
+
+ /* disable all rx queues */
+ prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG,
+ GENMASK(15, 8));
+
+ for (q = 0; q < qnum; q++) {
+ struct prestera_sdma_buf *head, *tail, *next, *prev;
+ struct prestera_rx_ring *ring = &sdma->rx_ring[q];
+
+ ring->bufs = kmalloc_array(bnum, sizeof(*head), GFP_KERNEL);
+ if (!ring->bufs)
+ return -ENOMEM;
+
+ ring->next_rx = 0;
+
+ tail = &ring->bufs[bnum - 1];
+ head = &ring->bufs[0];
+ next = head;
+ prev = next;
+
+ do {
+ err = prestera_sdma_buf_init(sdma, next);
+ if (err)
+ return err;
+
+ err = prestera_sdma_rx_skb_alloc(sdma, next);
+ if (err)
+ return err;
+
+ prestera_sdma_rx_desc_init(sdma, next->desc,
+ next->buf_dma);
+
+ prestera_sdma_rx_desc_set_next(sdma, prev->desc,
+ next->desc_dma);
+
+ prev = next;
+ next++;
+ } while (prev != tail);
+
+ /* join tail with head to make a circular list */
+ prestera_sdma_rx_desc_set_next(sdma, tail->desc, head->desc_dma);
+
+ prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_DESC_REG(q),
+ prestera_sdma_map(sdma, head->desc_dma));
+ }
+
+ /* make sure all rx descs are filled before enabling all rx queues */
+ wmb();
+
+ prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG,
+ GENMASK(7, 0));
+
+ return 0;
+}
+
+static void prestera_sdma_tx_desc_init(struct prestera_sdma *sdma,
+ struct prestera_sdma_desc *desc)
+{
+ desc->word1 = cpu_to_le32(PRESTERA_SDMA_TX_DESC_INIT);
+ desc->word2 = 0;
+}
+
+static void prestera_sdma_tx_desc_set_next(struct prestera_sdma *sdma,
+ struct prestera_sdma_desc *desc,
+ dma_addr_t next)
+{
+ desc->next = cpu_to_le32(prestera_sdma_map(sdma, next));
+}
+
+static void prestera_sdma_tx_desc_set_buf(struct prestera_sdma *sdma,
+ struct prestera_sdma_desc *desc,
+ dma_addr_t buf, size_t len)
+{
+ u32 word = le32_to_cpu(desc->word2);
+
+ u32p_replace_bits(&word, len + ETH_FCS_LEN, GENMASK(30, 16));
+
+ desc->buff = cpu_to_le32(prestera_sdma_map(sdma, buf));
+ desc->word2 = cpu_to_le32(word);
+}
+
+static void prestera_sdma_tx_desc_xmit(struct prestera_sdma_desc *desc)
+{
+ u32 word = le32_to_cpu(desc->word1);
+
+ word |= PRESTERA_SDMA_TX_DESC_DMA_OWN << 31;
+
+ /* make sure everything is written before enable xmit */
+ wmb();
+
+ desc->word1 = cpu_to_le32(word);
+}
+
+static int prestera_sdma_tx_buf_map(struct prestera_sdma *sdma,
+ struct prestera_sdma_buf *buf,
+ struct sk_buff *skb)
+{
+ struct device *dma_dev = sdma->sw->dev->dev;
+ dma_addr_t dma;
+
+ dma = dma_map_single(dma_dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dma_dev, dma))
+ return -ENOMEM;
+
+ buf->buf_dma = dma;
+ buf->skb = skb;
+
+ return 0;
+}
+
+static void prestera_sdma_tx_buf_unmap(struct prestera_sdma *sdma,
+ struct prestera_sdma_buf *buf)
+{
+ struct device *dma_dev = sdma->sw->dev->dev;
+
+ dma_unmap_single(dma_dev, buf->buf_dma, buf->skb->len, DMA_TO_DEVICE);
+}
+
+static void prestera_sdma_tx_recycle_work_fn(struct work_struct *work)
+{
+ int bnum = PRESTERA_SDMA_TX_DESC_PER_Q;
+ struct prestera_tx_ring *tx_ring;
+ struct prestera_sdma *sdma;
+ int b;
+
+ sdma = container_of(work, struct prestera_sdma, tx_work);
+
+ tx_ring = &sdma->tx_ring;
+
+ for (b = 0; b < bnum; b++) {
+ struct prestera_sdma_buf *buf = &tx_ring->bufs[b];
+
+ if (!buf->is_used)
+ continue;
+
+ if (!PRESTERA_SDMA_TX_DESC_IS_SENT(buf->desc))
+ continue;
+
+ prestera_sdma_tx_buf_unmap(sdma, buf);
+ dev_consume_skb_any(buf->skb);
+ buf->skb = NULL;
+
+ /* make sure everything is cleaned up */
+ wmb();
+
+ buf->is_used = false;
+ }
+}
+
+static int prestera_sdma_tx_init(struct prestera_sdma *sdma)
+{
+ struct prestera_sdma_buf *head, *tail, *next, *prev;
+ struct prestera_tx_ring *tx_ring = &sdma->tx_ring;
+ int bnum = PRESTERA_SDMA_TX_DESC_PER_Q;
+ int err;
+
+ INIT_WORK(&sdma->tx_work, prestera_sdma_tx_recycle_work_fn);
+ spin_lock_init(&sdma->tx_lock);
+
+ tx_ring->bufs = kmalloc_array(bnum, sizeof(*head), GFP_KERNEL);
+ if (!tx_ring->bufs)
+ return -ENOMEM;
+
+ tail = &tx_ring->bufs[bnum - 1];
+ head = &tx_ring->bufs[0];
+ next = head;
+ prev = next;
+
+ tx_ring->max_burst = PRESTERA_SDMA_TX_MAX_BURST;
+ tx_ring->burst = tx_ring->max_burst;
+ tx_ring->next_tx = 0;
+
+ do {
+ err = prestera_sdma_buf_init(sdma, next);
+ if (err)
+ return err;
+
+ next->is_used = false;
+
+ prestera_sdma_tx_desc_init(sdma, next->desc);
+
+ prestera_sdma_tx_desc_set_next(sdma, prev->desc,
+ next->desc_dma);
+
+ prev = next;
+ next++;
+ } while (prev != tail);
+
+ /* join tail with head to make a circular list */
+ prestera_sdma_tx_desc_set_next(sdma, tail->desc, head->desc_dma);
+
+ /* make sure descriptors are written */
+ wmb();
+
+ prestera_write(sdma->sw, PRESTERA_SDMA_TX_QUEUE_DESC_REG,
+ prestera_sdma_map(sdma, head->desc_dma));
+
+ return 0;
+}
+
+static void prestera_sdma_tx_fini(struct prestera_sdma *sdma)
+{
+ struct prestera_tx_ring *ring = &sdma->tx_ring;
+ int bnum = PRESTERA_SDMA_TX_DESC_PER_Q;
+ int b;
+
+ cancel_work_sync(&sdma->tx_work);
+
+ if (!ring->bufs)
+ return;
+
+ for (b = 0; b < bnum; b++) {
+ struct prestera_sdma_buf *buf = &ring->bufs[b];
+
+ if (buf->desc)
+ dma_pool_free(sdma->desc_pool, buf->desc,
+ buf->desc_dma);
+
+ if (!buf->skb)
+ continue;
+
+ dma_unmap_single(sdma->sw->dev->dev, buf->buf_dma,
+ buf->skb->len, DMA_TO_DEVICE);
+
+ dev_consume_skb_any(buf->skb);
+ }
+}
+
+static void prestera_rxtx_handle_event(struct prestera_switch *sw,
+ struct prestera_event *evt,
+ void *arg)
+{
+ struct prestera_sdma *sdma = arg;
+
+ if (evt->id != PRESTERA_RXTX_EVENT_RCV_PKT)
+ return;
+
+ prestera_write(sdma->sw, PRESTERA_SDMA_RX_INTR_MASK_REG, 0);
+ napi_schedule(&sdma->rx_napi);
+}
+
+static int prestera_sdma_switch_init(struct prestera_switch *sw)
+{
+ struct prestera_sdma *sdma = &sw->rxtx->sdma;
+ struct device *dev = sw->dev->dev;
+ struct prestera_rxtx_params p;
+ int err;
+
+ p.use_sdma = true;
+
+ err = prestera_hw_rxtx_init(sw, &p);
+ if (err) {
+ dev_err(dev, "failed to init rxtx by hw\n");
+ return err;
+ }
+
+ sdma->dma_mask = dma_get_mask(dev);
+ sdma->map_addr = p.map_addr;
+ sdma->sw = sw;
+
+ sdma->desc_pool = dma_pool_create("desc_pool", dev,
+ sizeof(struct prestera_sdma_desc),
+ 16, 0);
+ if (!sdma->desc_pool)
+ return -ENOMEM;
+
+ err = prestera_sdma_rx_init(sdma);
+ if (err) {
+ dev_err(dev, "failed to init rx ring\n");
+ goto err_rx_init;
+ }
+
+ err = prestera_sdma_tx_init(sdma);
+ if (err) {
+ dev_err(dev, "failed to init tx ring\n");
+ goto err_tx_init;
+ }
+
+ err = prestera_hw_event_handler_register(sw, PRESTERA_EVENT_TYPE_RXTX,
+ prestera_rxtx_handle_event,
+ sdma);
+ if (err)
+ goto err_evt_register;
+
+ init_dummy_netdev(&sdma->napi_dev);
+
+ netif_napi_add(&sdma->napi_dev, &sdma->rx_napi, prestera_sdma_rx_poll, 64);
+ napi_enable(&sdma->rx_napi);
+
+ return 0;
+
+err_evt_register:
+err_tx_init:
+ prestera_sdma_tx_fini(sdma);
+err_rx_init:
+ prestera_sdma_rx_fini(sdma);
+
+ dma_pool_destroy(sdma->desc_pool);
+ return err;
+}
+
+static void prestera_sdma_switch_fini(struct prestera_switch *sw)
+{
+ struct prestera_sdma *sdma = &sw->rxtx->sdma;
+
+ napi_disable(&sdma->rx_napi);
+ netif_napi_del(&sdma->rx_napi);
+ prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_RXTX,
+ prestera_rxtx_handle_event);
+ prestera_sdma_tx_fini(sdma);
+ prestera_sdma_rx_fini(sdma);
+ dma_pool_destroy(sdma->desc_pool);
+}
+
+static bool prestera_sdma_is_ready(struct prestera_sdma *sdma)
+{
+ return !(prestera_read(sdma->sw, PRESTERA_SDMA_TX_QUEUE_START_REG) & 1);
+}
+
+static int prestera_sdma_tx_wait(struct prestera_sdma *sdma,
+ struct prestera_tx_ring *tx_ring)
+{
+ int tx_wait_num = PRESTERA_SDMA_WAIT_MUL * tx_ring->max_burst;
+
+ do {
+ if (prestera_sdma_is_ready(sdma))
+ return 0;
+
+ udelay(1);
+ } while (--tx_wait_num);
+
+ return -EBUSY;
+}
+
+static void prestera_sdma_tx_start(struct prestera_sdma *sdma)
+{
+ prestera_write(sdma->sw, PRESTERA_SDMA_TX_QUEUE_START_REG, 1);
+ schedule_work(&sdma->tx_work);
+}
+
+static netdev_tx_t prestera_sdma_xmit(struct prestera_sdma *sdma,
+ struct sk_buff *skb)
+{
+ struct device *dma_dev = sdma->sw->dev->dev;
+ struct net_device *dev = skb->dev;
+ struct prestera_tx_ring *tx_ring;
+ struct prestera_sdma_buf *buf;
+ int err;
+
+ spin_lock(&sdma->tx_lock);
+
+ tx_ring = &sdma->tx_ring;
+
+ buf = &tx_ring->bufs[tx_ring->next_tx];
+ if (buf->is_used) {
+ schedule_work(&sdma->tx_work);
+ goto drop_skb;
+ }
+
+ if (unlikely(eth_skb_pad(skb)))
+ goto drop_skb_nofree;
+
+ err = prestera_sdma_tx_buf_map(sdma, buf, skb);
+ if (err)
+ goto drop_skb;
+
+ prestera_sdma_tx_desc_set_buf(sdma, buf->desc, buf->buf_dma, skb->len);
+
+ dma_sync_single_for_device(dma_dev, buf->buf_dma, skb->len,
+ DMA_TO_DEVICE);
+
+ if (tx_ring->burst) {
+ tx_ring->burst--;
+ } else {
+ tx_ring->burst = tx_ring->max_burst;
+
+ err = prestera_sdma_tx_wait(sdma, tx_ring);
+ if (err)
+ goto drop_skb_unmap;
+ }
+
+ tx_ring->next_tx = (tx_ring->next_tx + 1) % PRESTERA_SDMA_TX_DESC_PER_Q;
+ prestera_sdma_tx_desc_xmit(buf->desc);
+ buf->is_used = true;
+
+ prestera_sdma_tx_start(sdma);
+
+ goto tx_done;
+
+drop_skb_unmap:
+ prestera_sdma_tx_buf_unmap(sdma, buf);
+drop_skb:
+ dev_consume_skb_any(skb);
+drop_skb_nofree:
+ dev->stats.tx_dropped++;
+tx_done:
+ spin_unlock(&sdma->tx_lock);
+ return NETDEV_TX_OK;
+}
+
+int prestera_rxtx_switch_init(struct prestera_switch *sw)
+{
+ struct prestera_rxtx *rxtx;
+
+ rxtx = kzalloc(sizeof(*rxtx), GFP_KERNEL);
+ if (!rxtx)
+ return -ENOMEM;
+
+ sw->rxtx = rxtx;
+
+ return prestera_sdma_switch_init(sw);
+}
+
+void prestera_rxtx_switch_fini(struct prestera_switch *sw)
+{
+ prestera_sdma_switch_fini(sw);
+ kfree(sw->rxtx);
+}
+
+int prestera_rxtx_port_init(struct prestera_port *port)
+{
+ int err;
+
+ err = prestera_hw_rxtx_port_init(port);
+ if (err)
+ return err;
+
+ port->dev->needed_headroom = PRESTERA_DSA_HLEN;
+
+ return 0;
+}
+
+netdev_tx_t prestera_rxtx_xmit(struct prestera_port *port, struct sk_buff *skb)
+{
+ struct prestera_dsa dsa;
+
+ dsa.hw_dev_num = port->dev_id;
+ dsa.port_num = port->hw_id;
+
+ if (skb_cow_head(skb, PRESTERA_DSA_HLEN) < 0)
+ return NET_XMIT_DROP;
+
+ skb_push(skb, PRESTERA_DSA_HLEN);
+ memmove(skb->data, skb->data + PRESTERA_DSA_HLEN, 2 * ETH_ALEN);
+
+ if (prestera_dsa_build(&dsa, skb->data + 2 * ETH_ALEN) != 0)
+ return NET_XMIT_DROP;
+
+ return prestera_sdma_xmit(&port->sw->rxtx->sdma, skb);
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.h b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.h
new file mode 100644
index 000000000000..882a1225c323
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_RXTX_H_
+#define _PRESTERA_RXTX_H_
+
+#include <linux/netdevice.h>
+
+struct prestera_switch;
+struct prestera_port;
+
+int prestera_rxtx_switch_init(struct prestera_switch *sw);
+void prestera_rxtx_switch_fini(struct prestera_switch *sw);
+
+int prestera_rxtx_port_init(struct prestera_port *port);
+
+netdev_tx_t prestera_rxtx_xmit(struct prestera_port *port, struct sk_buff *skb);
+
+#endif /* _PRESTERA_RXTX_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
new file mode 100644
index 000000000000..7d83e1f91ef1
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c
@@ -0,0 +1,1277 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
+
+#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <net/netevent.h>
+#include <net/switchdev.h>
+
+#include "prestera.h"
+#include "prestera_hw.h"
+#include "prestera_switchdev.h"
+
+#define PRESTERA_VID_ALL (0xffff)
+
+#define PRESTERA_DEFAULT_AGEING_TIME_MS 300000
+#define PRESTERA_MAX_AGEING_TIME_MS 1000000000
+#define PRESTERA_MIN_AGEING_TIME_MS 32000
+
+struct prestera_fdb_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct net_device *dev;
+ unsigned long event;
+};
+
+struct prestera_switchdev {
+ struct prestera_switch *sw;
+ struct list_head bridge_list;
+ bool bridge_8021q_exists;
+ struct notifier_block swdev_nb_blk;
+ struct notifier_block swdev_nb;
+};
+
+struct prestera_bridge {
+ struct list_head head;
+ struct net_device *dev;
+ struct prestera_switchdev *swdev;
+ struct list_head port_list;
+ bool vlan_enabled;
+ u16 bridge_id;
+};
+
+struct prestera_bridge_port {
+ struct list_head head;
+ struct net_device *dev;
+ struct prestera_bridge *bridge;
+ struct list_head vlan_list;
+ refcount_t ref_count;
+ unsigned long flags;
+ u8 stp_state;
+};
+
+struct prestera_bridge_vlan {
+ struct list_head head;
+ struct list_head port_vlan_list;
+ u16 vid;
+};
+
+struct prestera_port_vlan {
+ struct list_head br_vlan_head;
+ struct list_head port_head;
+ struct prestera_port *port;
+ struct prestera_bridge_port *br_port;
+ u16 vid;
+};
+
+static struct workqueue_struct *swdev_wq;
+
+static void prestera_bridge_port_put(struct prestera_bridge_port *br_port);
+
+static int prestera_port_vid_stp_set(struct prestera_port *port, u16 vid,
+ u8 state);
+
+static struct prestera_bridge_vlan *
+prestera_bridge_vlan_create(struct prestera_bridge_port *br_port, u16 vid)
+{
+ struct prestera_bridge_vlan *br_vlan;
+
+ br_vlan = kzalloc(sizeof(*br_vlan), GFP_KERNEL);
+ if (!br_vlan)
+ return NULL;
+
+ INIT_LIST_HEAD(&br_vlan->port_vlan_list);
+ br_vlan->vid = vid;
+ list_add(&br_vlan->head, &br_port->vlan_list);
+
+ return br_vlan;
+}
+
+static void prestera_bridge_vlan_destroy(struct prestera_bridge_vlan *br_vlan)
+{
+ list_del(&br_vlan->head);
+ WARN_ON(!list_empty(&br_vlan->port_vlan_list));
+ kfree(br_vlan);
+}
+
+static struct prestera_bridge_vlan *
+prestera_bridge_vlan_by_vid(struct prestera_bridge_port *br_port, u16 vid)
+{
+ struct prestera_bridge_vlan *br_vlan;
+
+ list_for_each_entry(br_vlan, &br_port->vlan_list, head) {
+ if (br_vlan->vid == vid)
+ return br_vlan;
+ }
+
+ return NULL;
+}
+
+static int prestera_bridge_vlan_port_count(struct prestera_bridge *bridge,
+ u16 vid)
+{
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge_vlan *br_vlan;
+ int count = 0;
+
+ list_for_each_entry(br_port, &bridge->port_list, head) {
+ list_for_each_entry(br_vlan, &br_port->vlan_list, head) {
+ if (br_vlan->vid == vid) {
+ count += 1;
+ break;
+ }
+ }
+ }
+
+ return count;
+}
+
+static void prestera_bridge_vlan_put(struct prestera_bridge_vlan *br_vlan)
+{
+ if (list_empty(&br_vlan->port_vlan_list))
+ prestera_bridge_vlan_destroy(br_vlan);
+}
+
+static struct prestera_port_vlan *
+prestera_port_vlan_by_vid(struct prestera_port *port, u16 vid)
+{
+ struct prestera_port_vlan *port_vlan;
+
+ list_for_each_entry(port_vlan, &port->vlans_list, port_head) {
+ if (port_vlan->vid == vid)
+ return port_vlan;
+ }
+
+ return NULL;
+}
+
+static struct prestera_port_vlan *
+prestera_port_vlan_create(struct prestera_port *port, u16 vid, bool untagged)
+{
+ struct prestera_port_vlan *port_vlan;
+ int err;
+
+ port_vlan = prestera_port_vlan_by_vid(port, vid);
+ if (port_vlan)
+ return ERR_PTR(-EEXIST);
+
+ err = prestera_hw_vlan_port_set(port, vid, true, untagged);
+ if (err)
+ return ERR_PTR(err);
+
+ port_vlan = kzalloc(sizeof(*port_vlan), GFP_KERNEL);
+ if (!port_vlan) {
+ err = -ENOMEM;
+ goto err_port_vlan_alloc;
+ }
+
+ port_vlan->port = port;
+ port_vlan->vid = vid;
+
+ list_add(&port_vlan->port_head, &port->vlans_list);
+
+ return port_vlan;
+
+err_port_vlan_alloc:
+ prestera_hw_vlan_port_set(port, vid, false, false);
+ return ERR_PTR(err);
+}
+
+static void
+prestera_port_vlan_bridge_leave(struct prestera_port_vlan *port_vlan)
+{
+ u32 fdb_flush_mode = PRESTERA_FDB_FLUSH_MODE_DYNAMIC;
+ struct prestera_port *port = port_vlan->port;
+ struct prestera_bridge_vlan *br_vlan;
+ struct prestera_bridge_port *br_port;
+ bool last_port, last_vlan;
+ u16 vid = port_vlan->vid;
+ int port_count;
+
+ br_port = port_vlan->br_port;
+ port_count = prestera_bridge_vlan_port_count(br_port->bridge, vid);
+ br_vlan = prestera_bridge_vlan_by_vid(br_port, vid);
+
+ last_vlan = list_is_singular(&br_port->vlan_list);
+ last_port = port_count == 1;
+
+ if (last_vlan)
+ prestera_hw_fdb_flush_port(port, fdb_flush_mode);
+ else if (last_port)
+ prestera_hw_fdb_flush_vlan(port->sw, vid, fdb_flush_mode);
+ else
+ prestera_hw_fdb_flush_port_vlan(port, vid, fdb_flush_mode);
+
+ list_del(&port_vlan->br_vlan_head);
+ prestera_bridge_vlan_put(br_vlan);
+ prestera_bridge_port_put(br_port);
+ port_vlan->br_port = NULL;
+}
+
+static void prestera_port_vlan_destroy(struct prestera_port_vlan *port_vlan)
+{
+ struct prestera_port *port = port_vlan->port;
+ u16 vid = port_vlan->vid;
+
+ if (port_vlan->br_port)
+ prestera_port_vlan_bridge_leave(port_vlan);
+
+ prestera_hw_vlan_port_set(port, vid, false, false);
+ list_del(&port_vlan->port_head);
+ kfree(port_vlan);
+}
+
+static struct prestera_bridge *
+prestera_bridge_create(struct prestera_switchdev *swdev, struct net_device *dev)
+{
+ bool vlan_enabled = br_vlan_enabled(dev);
+ struct prestera_bridge *bridge;
+ u16 bridge_id;
+ int err;
+
+ if (vlan_enabled && swdev->bridge_8021q_exists) {
+ netdev_err(dev, "Only one VLAN-aware bridge is supported\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+ if (!bridge)
+ return ERR_PTR(-ENOMEM);
+
+ if (vlan_enabled) {
+ swdev->bridge_8021q_exists = true;
+ } else {
+ err = prestera_hw_bridge_create(swdev->sw, &bridge_id);
+ if (err) {
+ kfree(bridge);
+ return ERR_PTR(err);
+ }
+
+ bridge->bridge_id = bridge_id;
+ }
+
+ bridge->vlan_enabled = vlan_enabled;
+ bridge->swdev = swdev;
+ bridge->dev = dev;
+
+ INIT_LIST_HEAD(&bridge->port_list);
+
+ list_add(&bridge->head, &swdev->bridge_list);
+
+ return bridge;
+}
+
+static void prestera_bridge_destroy(struct prestera_bridge *bridge)
+{
+ struct prestera_switchdev *swdev = bridge->swdev;
+
+ list_del(&bridge->head);
+
+ if (bridge->vlan_enabled)
+ swdev->bridge_8021q_exists = false;
+ else
+ prestera_hw_bridge_delete(swdev->sw, bridge->bridge_id);
+
+ WARN_ON(!list_empty(&bridge->port_list));
+ kfree(bridge);
+}
+
+static void prestera_bridge_put(struct prestera_bridge *bridge)
+{
+ if (list_empty(&bridge->port_list))
+ prestera_bridge_destroy(bridge);
+}
+
+static
+struct prestera_bridge *prestera_bridge_by_dev(struct prestera_switchdev *swdev,
+ const struct net_device *dev)
+{
+ struct prestera_bridge *bridge;
+
+ list_for_each_entry(bridge, &swdev->bridge_list, head)
+ if (bridge->dev == dev)
+ return bridge;
+
+ return NULL;
+}
+
+static struct prestera_bridge_port *
+__prestera_bridge_port_by_dev(struct prestera_bridge *bridge,
+ struct net_device *dev)
+{
+ struct prestera_bridge_port *br_port;
+
+ list_for_each_entry(br_port, &bridge->port_list, head) {
+ if (br_port->dev == dev)
+ return br_port;
+ }
+
+ return NULL;
+}
+
+static struct prestera_bridge_port *
+prestera_bridge_port_by_dev(struct prestera_switchdev *swdev,
+ struct net_device *dev)
+{
+ struct net_device *br_dev = netdev_master_upper_dev_get(dev);
+ struct prestera_bridge *bridge;
+
+ if (!br_dev)
+ return NULL;
+
+ bridge = prestera_bridge_by_dev(swdev, br_dev);
+ if (!bridge)
+ return NULL;
+
+ return __prestera_bridge_port_by_dev(bridge, dev);
+}
+
+static struct prestera_bridge_port *
+prestera_bridge_port_create(struct prestera_bridge *bridge,
+ struct net_device *dev)
+{
+ struct prestera_bridge_port *br_port;
+
+ br_port = kzalloc(sizeof(*br_port), GFP_KERNEL);
+ if (!br_port)
+ return NULL;
+
+ br_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
+ BR_MCAST_FLOOD;
+ br_port->stp_state = BR_STATE_DISABLED;
+ refcount_set(&br_port->ref_count, 1);
+ br_port->bridge = bridge;
+ br_port->dev = dev;
+
+ INIT_LIST_HEAD(&br_port->vlan_list);
+ list_add(&br_port->head, &bridge->port_list);
+
+ return br_port;
+}
+
+static void
+prestera_bridge_port_destroy(struct prestera_bridge_port *br_port)
+{
+ list_del(&br_port->head);
+ WARN_ON(!list_empty(&br_port->vlan_list));
+ kfree(br_port);
+}
+
+static void prestera_bridge_port_get(struct prestera_bridge_port *br_port)
+{
+ refcount_inc(&br_port->ref_count);
+}
+
+static void prestera_bridge_port_put(struct prestera_bridge_port *br_port)
+{
+ struct prestera_bridge *bridge = br_port->bridge;
+
+ if (refcount_dec_and_test(&br_port->ref_count)) {
+ prestera_bridge_port_destroy(br_port);
+ prestera_bridge_put(bridge);
+ }
+}
+
+static struct prestera_bridge_port *
+prestera_bridge_port_add(struct prestera_bridge *bridge, struct net_device *dev)
+{
+ struct prestera_bridge_port *br_port;
+
+ br_port = __prestera_bridge_port_by_dev(bridge, dev);
+ if (br_port) {
+ prestera_bridge_port_get(br_port);
+ return br_port;
+ }
+
+ br_port = prestera_bridge_port_create(bridge, dev);
+ if (!br_port)
+ return ERR_PTR(-ENOMEM);
+
+ return br_port;
+}
+
+static int
+prestera_bridge_1d_port_join(struct prestera_bridge_port *br_port)
+{
+ struct prestera_port *port = netdev_priv(br_port->dev);
+ struct prestera_bridge *bridge = br_port->bridge;
+ int err;
+
+ err = prestera_hw_bridge_port_add(port, bridge->bridge_id);
+ if (err)
+ return err;
+
+ err = prestera_hw_port_flood_set(port, br_port->flags & BR_FLOOD);
+ if (err)
+ goto err_port_flood_set;
+
+ err = prestera_hw_port_learning_set(port, br_port->flags & BR_LEARNING);
+ if (err)
+ goto err_port_learning_set;
+
+ return 0;
+
+err_port_learning_set:
+ prestera_hw_port_flood_set(port, false);
+err_port_flood_set:
+ prestera_hw_bridge_port_delete(port, bridge->bridge_id);
+
+ return err;
+}
+
+static int prestera_port_bridge_join(struct prestera_port *port,
+ struct net_device *upper)
+{
+ struct prestera_switchdev *swdev = port->sw->swdev;
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge *bridge;
+ int err;
+
+ bridge = prestera_bridge_by_dev(swdev, upper);
+ if (!bridge) {
+ bridge = prestera_bridge_create(swdev, upper);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+ }
+
+ br_port = prestera_bridge_port_add(bridge, port->dev);
+ if (IS_ERR(br_port)) {
+ err = PTR_ERR(br_port);
+ goto err_brport_create;
+ }
+
+ if (bridge->vlan_enabled)
+ return 0;
+
+ err = prestera_bridge_1d_port_join(br_port);
+ if (err)
+ goto err_port_join;
+
+ return 0;
+
+err_port_join:
+ prestera_bridge_port_put(br_port);
+err_brport_create:
+ prestera_bridge_put(bridge);
+ return err;
+}
+
+static void prestera_bridge_1q_port_leave(struct prestera_bridge_port *br_port)
+{
+ struct prestera_port *port = netdev_priv(br_port->dev);
+
+ prestera_hw_fdb_flush_port(port, PRESTERA_FDB_FLUSH_MODE_ALL);
+ prestera_port_pvid_set(port, PRESTERA_DEFAULT_VID);
+}
+
+static void prestera_bridge_1d_port_leave(struct prestera_bridge_port *br_port)
+{
+ struct prestera_port *port = netdev_priv(br_port->dev);
+
+ prestera_hw_fdb_flush_port(port, PRESTERA_FDB_FLUSH_MODE_ALL);
+ prestera_hw_bridge_port_delete(port, br_port->bridge->bridge_id);
+}
+
+static int prestera_port_vid_stp_set(struct prestera_port *port, u16 vid,
+ u8 state)
+{
+ u8 hw_state = state;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ hw_state = PRESTERA_STP_DISABLED;
+ break;
+
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ hw_state = PRESTERA_STP_BLOCK_LISTEN;
+ break;
+
+ case BR_STATE_LEARNING:
+ hw_state = PRESTERA_STP_LEARN;
+ break;
+
+ case BR_STATE_FORWARDING:
+ hw_state = PRESTERA_STP_FORWARD;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return prestera_hw_vlan_port_stp_set(port, vid, hw_state);
+}
+
+static void prestera_port_bridge_leave(struct prestera_port *port,
+ struct net_device *upper)
+{
+ struct prestera_switchdev *swdev = port->sw->swdev;
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge *bridge;
+
+ bridge = prestera_bridge_by_dev(swdev, upper);
+ if (!bridge)
+ return;
+
+ br_port = __prestera_bridge_port_by_dev(bridge, port->dev);
+ if (!br_port)
+ return;
+
+ bridge = br_port->bridge;
+
+ if (bridge->vlan_enabled)
+ prestera_bridge_1q_port_leave(br_port);
+ else
+ prestera_bridge_1d_port_leave(br_port);
+
+ prestera_hw_port_learning_set(port, false);
+ prestera_hw_port_flood_set(port, false);
+ prestera_port_vid_stp_set(port, PRESTERA_VID_ALL, BR_STATE_FORWARDING);
+ prestera_bridge_port_put(br_port);
+}
+
+int prestera_bridge_port_event(struct net_device *dev, unsigned long event,
+ void *ptr)
+{
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct netlink_ext_ack *extack;
+ struct prestera_port *port;
+ struct net_device *upper;
+ int err;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+ port = netdev_priv(dev);
+ upper = info->upper_dev;
+
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER:
+ if (!netif_is_bridge_master(upper)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
+ return -EINVAL;
+ }
+
+ if (!info->linking)
+ break;
+
+ if (netdev_has_any_upper_dev(upper)) {
+ NL_SET_ERR_MSG_MOD(extack, "Upper device is already enslaved");
+ return -EINVAL;
+ }
+ break;
+
+ case NETDEV_CHANGEUPPER:
+ if (!netif_is_bridge_master(upper))
+ break;
+
+ if (info->linking) {
+ err = prestera_port_bridge_join(port, upper);
+ if (err)
+ return err;
+ } else {
+ prestera_port_bridge_leave(port, upper);
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static int prestera_port_attr_br_flags_set(struct prestera_port *port,
+ struct switchdev_trans *trans,
+ struct net_device *dev,
+ unsigned long flags)
+{
+ struct prestera_bridge_port *br_port;
+ int err;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ br_port = prestera_bridge_port_by_dev(port->sw->swdev, dev);
+ if (!br_port)
+ return 0;
+
+ err = prestera_hw_port_flood_set(port, flags & BR_FLOOD);
+ if (err)
+ return err;
+
+ err = prestera_hw_port_learning_set(port, flags & BR_LEARNING);
+ if (err)
+ return err;
+
+ memcpy(&br_port->flags, &flags, sizeof(flags));
+
+ return 0;
+}
+
+static int prestera_port_attr_br_ageing_set(struct prestera_port *port,
+ struct switchdev_trans *trans,
+ unsigned long ageing_clock_t)
+{
+ unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
+ u32 ageing_time_ms = jiffies_to_msecs(ageing_jiffies);
+ struct prestera_switch *sw = port->sw;
+
+ if (switchdev_trans_ph_prepare(trans)) {
+ if (ageing_time_ms < PRESTERA_MIN_AGEING_TIME_MS ||
+ ageing_time_ms > PRESTERA_MAX_AGEING_TIME_MS)
+ return -ERANGE;
+ else
+ return 0;
+ }
+
+ return prestera_hw_switch_ageing_set(sw, ageing_time_ms);
+}
+
+static int prestera_port_attr_br_vlan_set(struct prestera_port *port,
+ struct switchdev_trans *trans,
+ struct net_device *dev,
+ bool vlan_enabled)
+{
+ struct prestera_switch *sw = port->sw;
+ struct prestera_bridge *bridge;
+
+ if (!switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ bridge = prestera_bridge_by_dev(sw->swdev, dev);
+ if (WARN_ON(!bridge))
+ return -EINVAL;
+
+ if (bridge->vlan_enabled == vlan_enabled)
+ return 0;
+
+ netdev_err(bridge->dev, "VLAN filtering can't be changed for existing bridge\n");
+
+ return -EINVAL;
+}
+
+static int prestera_port_bridge_vlan_stp_set(struct prestera_port *port,
+ struct prestera_bridge_vlan *br_vlan,
+ u8 state)
+{
+ struct prestera_port_vlan *port_vlan;
+
+ list_for_each_entry(port_vlan, &br_vlan->port_vlan_list, br_vlan_head) {
+ if (port_vlan->port != port)
+ continue;
+
+ return prestera_port_vid_stp_set(port, br_vlan->vid, state);
+ }
+
+ return 0;
+}
+
+static int presterar_port_attr_stp_state_set(struct prestera_port *port,
+ struct switchdev_trans *trans,
+ struct net_device *dev,
+ u8 state)
+{
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge_vlan *br_vlan;
+ int err;
+ u16 vid;
+
+ if (switchdev_trans_ph_prepare(trans))
+ return 0;
+
+ br_port = prestera_bridge_port_by_dev(port->sw->swdev, dev);
+ if (!br_port)
+ return 0;
+
+ if (!br_port->bridge->vlan_enabled) {
+ vid = br_port->bridge->bridge_id;
+ err = prestera_port_vid_stp_set(port, vid, state);
+ if (err)
+ goto err_port_stp_set;
+ } else {
+ list_for_each_entry(br_vlan, &br_port->vlan_list, head) {
+ err = prestera_port_bridge_vlan_stp_set(port, br_vlan,
+ state);
+ if (err)
+ goto err_port_vlan_stp_set;
+ }
+ }
+
+ br_port->stp_state = state;
+
+ return 0;
+
+err_port_vlan_stp_set:
+ list_for_each_entry_continue_reverse(br_vlan, &br_port->vlan_list, head)
+ prestera_port_bridge_vlan_stp_set(port, br_vlan, br_port->stp_state);
+ return err;
+
+err_port_stp_set:
+ prestera_port_vid_stp_set(port, vid, br_port->stp_state);
+
+ return err;
+}
+
+static int prestera_port_obj_attr_set(struct net_device *dev,
+ const struct switchdev_attr *attr,
+ struct switchdev_trans *trans)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ int err = 0;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ err = presterar_port_attr_stp_state_set(port, trans,
+ attr->orig_dev,
+ attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ if (attr->u.brport_flags &
+ ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD))
+ err = -EINVAL;
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ err = prestera_port_attr_br_flags_set(port, trans,
+ attr->orig_dev,
+ attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+ err = prestera_port_attr_br_ageing_set(port, trans,
+ attr->u.ageing_time);
+ break;
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+ err = prestera_port_attr_br_vlan_set(port, trans,
+ attr->orig_dev,
+ attr->u.vlan_filtering);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static void
+prestera_fdb_offload_notify(struct prestera_port *port,
+ struct switchdev_notifier_fdb_info *info)
+{
+ struct switchdev_notifier_fdb_info send_info;
+
+ send_info.addr = info->addr;
+ send_info.vid = info->vid;
+ send_info.offloaded = true;
+
+ call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, port->dev,
+ &send_info.info, NULL);
+}
+
+static int prestera_port_fdb_set(struct prestera_port *port,
+ struct switchdev_notifier_fdb_info *fdb_info,
+ bool adding)
+{
+ struct prestera_switch *sw = port->sw;
+ struct prestera_bridge_port *br_port;
+ struct prestera_bridge *bridge;
+ int err;
+ u16 vid;
+
+ br_port = prestera_bridge_port_by_dev(sw->swdev, port->dev);
+ if (!br_port)
+ return -EINVAL;
+
+ bridge = br_port->bridge;
+
+ if (bridge->vlan_enabled)
+ vid = fdb_info->vid;
+ else
+ vid = bridge->bridge_id;
+
+ if (adding)
+ err = prestera_hw_fdb_add(port, fdb_info->addr, vid, false);
+ else
+ err = prestera_hw_fdb_del(port, fdb_info->addr, vid);
+
+ return err;
+}
+
+static void prestera_fdb_event_work(struct work_struct *work)
+{
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct prestera_fdb_event_work *swdev_work;
+ struct prestera_port *port;
+ struct net_device *dev;
+ int err;
+
+ swdev_work = container_of(work, struct prestera_fdb_event_work, work);
+ dev = swdev_work->dev;
+
+ rtnl_lock();
+
+ port = prestera_port_dev_lower_find(dev);
+ if (!port)
+ goto out_unlock;
+
+ switch (swdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ fdb_info = &swdev_work->fdb_info;
+ if (!fdb_info->added_by_user)
+ break;
+
+ err = prestera_port_fdb_set(port, fdb_info, true);
+ if (err)
+ break;
+
+ prestera_fdb_offload_notify(port, fdb_info);
+ break;
+
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ fdb_info = &swdev_work->fdb_info;
+ prestera_port_fdb_set(port, fdb_info, false);
+ break;
+ }
+
+out_unlock:
+ rtnl_unlock();
+
+ kfree(swdev_work->fdb_info.addr);
+ kfree(swdev_work);
+ dev_put(dev);
+}
+
+static int prestera_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct switchdev_notifier_info *info = ptr;
+ struct prestera_fdb_event_work *swdev_work;
+ struct net_device *upper;
+ int err;
+
+ if (event == SWITCHDEV_PORT_ATTR_SET) {
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ prestera_netdev_check,
+ prestera_port_obj_attr_set);
+ return notifier_from_errno(err);
+ }
+
+ if (!prestera_netdev_check(dev))
+ return NOTIFY_DONE;
+
+ upper = netdev_master_upper_dev_get_rcu(dev);
+ if (!upper)
+ return NOTIFY_DONE;
+
+ if (!netif_is_bridge_master(upper))
+ return NOTIFY_DONE;
+
+ swdev_work = kzalloc(sizeof(*swdev_work), GFP_ATOMIC);
+ if (!swdev_work)
+ return NOTIFY_BAD;
+
+ swdev_work->event = event;
+ swdev_work->dev = dev;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ fdb_info = container_of(info,
+ struct switchdev_notifier_fdb_info,
+ info);
+
+ INIT_WORK(&swdev_work->work, prestera_fdb_event_work);
+ memcpy(&swdev_work->fdb_info, ptr,
+ sizeof(swdev_work->fdb_info));
+
+ swdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!swdev_work->fdb_info.addr)
+ goto out_bad;
+
+ ether_addr_copy((u8 *)swdev_work->fdb_info.addr,
+ fdb_info->addr);
+ dev_hold(dev);
+ break;
+
+ default:
+ kfree(swdev_work);
+ return NOTIFY_DONE;
+ }
+
+ queue_work(swdev_wq, &swdev_work->work);
+ return NOTIFY_DONE;
+
+out_bad:
+ kfree(swdev_work);
+ return NOTIFY_BAD;
+}
+
+static int
+prestera_port_vlan_bridge_join(struct prestera_port_vlan *port_vlan,
+ struct prestera_bridge_port *br_port)
+{
+ struct prestera_port *port = port_vlan->port;
+ struct prestera_bridge_vlan *br_vlan;
+ u16 vid = port_vlan->vid;
+ int err;
+
+ if (port_vlan->br_port)
+ return 0;
+
+ err = prestera_hw_port_flood_set(port, br_port->flags & BR_FLOOD);
+ if (err)
+ return err;
+
+ err = prestera_hw_port_learning_set(port, br_port->flags & BR_LEARNING);
+ if (err)
+ goto err_port_learning_set;
+
+ err = prestera_port_vid_stp_set(port, vid, br_port->stp_state);
+ if (err)
+ goto err_port_vid_stp_set;
+
+ br_vlan = prestera_bridge_vlan_by_vid(br_port, vid);
+ if (!br_vlan) {
+ br_vlan = prestera_bridge_vlan_create(br_port, vid);
+ if (!br_vlan) {
+ err = -ENOMEM;
+ goto err_bridge_vlan_get;
+ }
+ }
+
+ list_add(&port_vlan->br_vlan_head, &br_vlan->port_vlan_list);
+
+ prestera_bridge_port_get(br_port);
+ port_vlan->br_port = br_port;
+
+ return 0;
+
+err_bridge_vlan_get:
+ prestera_port_vid_stp_set(port, vid, BR_STATE_FORWARDING);
+err_port_vid_stp_set:
+ prestera_hw_port_learning_set(port, false);
+err_port_learning_set:
+ return err;
+}
+
+static int
+prestera_bridge_port_vlan_add(struct prestera_port *port,
+ struct prestera_bridge_port *br_port,
+ u16 vid, bool is_untagged, bool is_pvid,
+ struct netlink_ext_ack *extack)
+{
+ struct prestera_port_vlan *port_vlan;
+ u16 old_pvid = port->pvid;
+ u16 pvid;
+ int err;
+
+ if (is_pvid)
+ pvid = vid;
+ else
+ pvid = port->pvid == vid ? 0 : port->pvid;
+
+ port_vlan = prestera_port_vlan_by_vid(port, vid);
+ if (port_vlan && port_vlan->br_port != br_port)
+ return -EEXIST;
+
+ if (!port_vlan) {
+ port_vlan = prestera_port_vlan_create(port, vid, is_untagged);
+ if (IS_ERR(port_vlan))
+ return PTR_ERR(port_vlan);
+ } else {
+ err = prestera_hw_vlan_port_set(port, vid, true, is_untagged);
+ if (err)
+ goto err_port_vlan_set;
+ }
+
+ err = prestera_port_pvid_set(port, pvid);
+ if (err)
+ goto err_port_pvid_set;
+
+ err = prestera_port_vlan_bridge_join(port_vlan, br_port);
+ if (err)
+ goto err_port_vlan_bridge_join;
+
+ return 0;
+
+err_port_vlan_bridge_join:
+ prestera_port_pvid_set(port, old_pvid);
+err_port_pvid_set:
+ prestera_hw_vlan_port_set(port, vid, false, false);
+err_port_vlan_set:
+ prestera_port_vlan_destroy(port_vlan);
+
+ return err;
+}
+
+static void
+prestera_bridge_port_vlan_del(struct prestera_port *port,
+ struct prestera_bridge_port *br_port, u16 vid)
+{
+ u16 pvid = port->pvid == vid ? 0 : port->pvid;
+ struct prestera_port_vlan *port_vlan;
+
+ port_vlan = prestera_port_vlan_by_vid(port, vid);
+ if (WARN_ON(!port_vlan))
+ return;
+
+ prestera_port_vlan_bridge_leave(port_vlan);
+ prestera_port_pvid_set(port, pvid);
+ prestera_port_vlan_destroy(port_vlan);
+}
+
+static int prestera_port_vlans_add(struct prestera_port *port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans,
+ struct netlink_ext_ack *extack)
+{
+ bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct net_device *dev = vlan->obj.orig_dev;
+ struct prestera_bridge_port *br_port;
+ struct prestera_switch *sw = port->sw;
+ struct prestera_bridge *bridge;
+ u16 vid;
+
+ if (netif_is_bridge_master(dev))
+ return 0;
+
+ if (switchdev_trans_ph_commit(trans))
+ return 0;
+
+ br_port = prestera_bridge_port_by_dev(sw->swdev, dev);
+ if (WARN_ON(!br_port))
+ return -EINVAL;
+
+ bridge = br_port->bridge;
+ if (!bridge->vlan_enabled)
+ return 0;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
+ int err;
+
+ err = prestera_bridge_port_vlan_add(port, br_port,
+ vid, flag_untagged,
+ flag_pvid, extack);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int prestera_port_obj_add(struct net_device *dev,
+ const struct switchdev_obj *obj,
+ struct switchdev_trans *trans,
+ struct netlink_ext_ack *extack)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ const struct switchdev_obj_port_vlan *vlan;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ return prestera_port_vlans_add(port, vlan, trans, extack);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int prestera_port_vlans_del(struct prestera_port *port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct net_device *dev = vlan->obj.orig_dev;
+ struct prestera_bridge_port *br_port;
+ struct prestera_switch *sw = port->sw;
+ u16 vid;
+
+ if (netif_is_bridge_master(dev))
+ return -EOPNOTSUPP;
+
+ br_port = prestera_bridge_port_by_dev(sw->swdev, dev);
+ if (WARN_ON(!br_port))
+ return -EINVAL;
+
+ if (!br_port->bridge->vlan_enabled)
+ return 0;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
+ prestera_bridge_port_vlan_del(port, br_port, vid);
+
+ return 0;
+}
+
+static int prestera_port_obj_del(struct net_device *dev,
+ const struct switchdev_obj *obj)
+{
+ struct prestera_port *port = netdev_priv(dev);
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ return prestera_port_vlans_del(port, SWITCHDEV_OBJ_PORT_VLAN(obj));
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int prestera_switchdev_blk_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = switchdev_handle_port_obj_add(dev, ptr,
+ prestera_netdev_check,
+ prestera_port_obj_add);
+ break;
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = switchdev_handle_port_obj_del(dev, ptr,
+ prestera_netdev_check,
+ prestera_port_obj_del);
+ break;
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ prestera_netdev_check,
+ prestera_port_obj_attr_set);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ return notifier_from_errno(err);
+}
+
+static void prestera_fdb_event(struct prestera_switch *sw,
+ struct prestera_event *evt, void *arg)
+{
+ struct switchdev_notifier_fdb_info info;
+ struct prestera_port *port;
+
+ port = prestera_find_port(sw, evt->fdb_evt.port_id);
+ if (!port)
+ return;
+
+ info.addr = evt->fdb_evt.data.mac;
+ info.vid = evt->fdb_evt.vid;
+ info.offloaded = true;
+
+ rtnl_lock();
+
+ switch (evt->id) {
+ case PRESTERA_FDB_EVENT_LEARNED:
+ call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
+ port->dev, &info.info, NULL);
+ break;
+ case PRESTERA_FDB_EVENT_AGED:
+ call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
+ port->dev, &info.info, NULL);
+ break;
+ }
+
+ rtnl_unlock();
+}
+
+static int prestera_fdb_init(struct prestera_switch *sw)
+{
+ int err;
+
+ err = prestera_hw_event_handler_register(sw, PRESTERA_EVENT_TYPE_FDB,
+ prestera_fdb_event, NULL);
+ if (err)
+ return err;
+
+ err = prestera_hw_switch_ageing_set(sw, PRESTERA_DEFAULT_AGEING_TIME_MS);
+ if (err)
+ goto err_ageing_set;
+
+ return 0;
+
+err_ageing_set:
+ prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_FDB,
+ prestera_fdb_event);
+ return err;
+}
+
+static void prestera_fdb_fini(struct prestera_switch *sw)
+{
+ prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_FDB,
+ prestera_fdb_event);
+}
+
+static int prestera_switchdev_handler_init(struct prestera_switchdev *swdev)
+{
+ int err;
+
+ swdev->swdev_nb.notifier_call = prestera_switchdev_event;
+ err = register_switchdev_notifier(&swdev->swdev_nb);
+ if (err)
+ goto err_register_swdev_notifier;
+
+ swdev->swdev_nb_blk.notifier_call = prestera_switchdev_blk_event;
+ err = register_switchdev_blocking_notifier(&swdev->swdev_nb_blk);
+ if (err)
+ goto err_register_blk_swdev_notifier;
+
+ return 0;
+
+err_register_blk_swdev_notifier:
+ unregister_switchdev_notifier(&swdev->swdev_nb);
+err_register_swdev_notifier:
+ destroy_workqueue(swdev_wq);
+ return err;
+}
+
+static void prestera_switchdev_handler_fini(struct prestera_switchdev *swdev)
+{
+ unregister_switchdev_blocking_notifier(&swdev->swdev_nb_blk);
+ unregister_switchdev_notifier(&swdev->swdev_nb);
+}
+
+int prestera_switchdev_init(struct prestera_switch *sw)
+{
+ struct prestera_switchdev *swdev;
+ int err;
+
+ swdev = kzalloc(sizeof(*swdev), GFP_KERNEL);
+ if (!swdev)
+ return -ENOMEM;
+
+ sw->swdev = swdev;
+ swdev->sw = sw;
+
+ INIT_LIST_HEAD(&swdev->bridge_list);
+
+ swdev_wq = alloc_ordered_workqueue("%s_ordered", 0, "prestera_br");
+ if (!swdev_wq) {
+ err = -ENOMEM;
+ goto err_alloc_wq;
+ }
+
+ err = prestera_switchdev_handler_init(swdev);
+ if (err)
+ goto err_swdev_init;
+
+ err = prestera_fdb_init(sw);
+ if (err)
+ goto err_fdb_init;
+
+ return 0;
+
+err_fdb_init:
+err_swdev_init:
+ destroy_workqueue(swdev_wq);
+err_alloc_wq:
+ kfree(swdev);
+
+ return err;
+}
+
+void prestera_switchdev_fini(struct prestera_switch *sw)
+{
+ struct prestera_switchdev *swdev = sw->swdev;
+
+ prestera_fdb_fini(sw);
+ prestera_switchdev_handler_fini(swdev);
+ destroy_workqueue(swdev_wq);
+ kfree(swdev);
+}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h
new file mode 100644
index 000000000000..606e21d2355b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */
+/* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved. */
+
+#ifndef _PRESTERA_SWITCHDEV_H_
+#define _PRESTERA_SWITCHDEV_H_
+
+int prestera_switchdev_init(struct prestera_switch *sw);
+void prestera_switchdev_fini(struct prestera_switch *sw);
+
+int prestera_bridge_port_event(struct net_device *dev, unsigned long event,
+ void *ptr);
+
+#endif /* _PRESTERA_SWITCHDEV_H_ */
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index eb8cf60ecf12..d1e4d42e497d 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1187,11 +1187,10 @@ static int pxa168_eth_stop(struct net_device *dev)
static int pxa168_eth_change_mtu(struct net_device *dev, int mtu)
{
- int retval;
struct pxa168_eth_private *pep = netdev_priv(dev);
dev->mtu = mtu;
- retval = set_port_config_ext(pep);
+ set_port_config_ext(pep);
if (!netif_running(dev))
return 0;
@@ -1541,10 +1540,8 @@ static int pxa168_eth_remove(struct platform_device *pdev)
}
if (dev->phydev)
phy_disconnect(dev->phydev);
- if (pep->clk) {
- clk_disable_unprepare(pep->clk);
- }
+ clk_disable_unprepare(pep->clk);
mdiobus_unregister(pep->smi_bus);
mdiobus_free(pep->smi_bus);
unregister_netdev(dev);
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 6a930351cb23..8a9c0f490bfb 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3338,9 +3338,9 @@ static void skge_error_irq(struct skge_hw *hw)
* because accessing phy registers requires spin wait which might
* cause excess interrupt latency.
*/
-static void skge_extirq(unsigned long arg)
+static void skge_extirq(struct tasklet_struct *t)
{
- struct skge_hw *hw = (struct skge_hw *) arg;
+ struct skge_hw *hw = from_tasklet(hw, t, phy_task);
int port;
for (port = 0; port < hw->ports; port++) {
@@ -3927,7 +3927,7 @@ static int skge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->pdev = pdev;
spin_lock_init(&hw->hw_lock);
spin_lock_init(&hw->phy_lock);
- tasklet_init(&hw->phy_task, skge_extirq, (unsigned long) hw);
+ tasklet_setup(&hw->phy_task, skge_extirq);
hw->regs = ioremap(pci_resource_start(pdev, 0), 0x4000);
if (!hw->regs) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 65f8a4b6ed0c..3b8576b9c2f9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -55,11 +55,11 @@
#define TASKLET_MAX_TIME 2
#define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME)
-void mlx4_cq_tasklet_cb(unsigned long data)
+void mlx4_cq_tasklet_cb(struct tasklet_struct *t)
{
unsigned long flags;
unsigned long end = jiffies + TASKLET_MAX_TIME_JIFFIES;
- struct mlx4_eq_tasklet *ctx = (struct mlx4_eq_tasklet *)data;
+ struct mlx4_eq_tasklet *ctx = from_tasklet(ctx, t, task);
struct mlx4_cq *mcq, *temp;
spin_lock_irqsave(&ctx->lock, flags);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index b816154bc79a..23849f2b9c25 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1106,6 +1106,24 @@ static int mlx4_en_set_pauseparam(struct net_device *dev,
return err;
}
+static void mlx4_en_get_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *stats)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct bitmap_iterator it;
+
+ bitmap_iterator_init(&it, priv->stats_bitmap.bitmap, NUM_ALL_STATS);
+
+ spin_lock_bh(&priv->stats_lock);
+ if (test_bit(FLOW_PRIORITY_STATS_IDX_TX_FRAMES,
+ priv->stats_bitmap.bitmap))
+ stats->tx_pause_frames = priv->tx_flowstats.tx_pause;
+ if (test_bit(FLOW_PRIORITY_STATS_IDX_RX_FRAMES,
+ priv->stats_bitmap.bitmap))
+ stats->rx_pause_frames = priv->rx_flowstats.rx_pause;
+ spin_unlock_bh(&priv->stats_lock);
+}
+
static void mlx4_en_get_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *pause)
{
@@ -2138,6 +2156,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
.set_msglevel = mlx4_en_set_msglevel,
.get_coalesce = mlx4_en_get_coalesce,
.set_coalesce = mlx4_en_set_coalesce,
+ .get_pause_stats = mlx4_en_get_pause_stats,
.get_pauseparam = mlx4_en_get_pauseparam,
.set_pauseparam = mlx4_en_set_pauseparam,
.get_ringparam = mlx4_en_get_ringparam,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index b50c567ef508..502d1b97855c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -705,7 +705,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
frags = ring->rx_info + (index << priv->log_rx_info);
va = page_address(frags[0].page) + frags[0].page_offset;
- prefetchw(va);
+ net_prefetchw(va);
/*
* make sure we read the CQE after we read the ownership bit
*/
@@ -943,6 +943,9 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
bool clean_complete = true;
int done;
+ if (!budget)
+ return 0;
+
if (priv->tx_ring_num[TX_XDP]) {
xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring];
if (xdp_tx_cq->xdp_busy) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 9dff7b086c9f..3ddb7268e415 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -350,7 +350,7 @@ u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
.dma = tx_info->map0_dma,
};
- if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
+ if (!napi_mode || !mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
dma_unmap_page(priv->ddev, tx_info->map0_dma,
PAGE_SIZE, priv->dma_dir);
put_page(tx_info->page);
@@ -842,6 +842,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
struct mlx4_en_tx_desc *tx_desc;
struct mlx4_wqe_data_seg *data;
struct mlx4_en_tx_info *tx_info;
+ u32 __maybe_unused ring_cons;
int tx_ind;
int nr_txbb;
int desc_size;
@@ -855,7 +856,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
bool stop_queue;
bool inline_ok;
u8 data_offset;
- u32 ring_cons;
bool bf_ok;
tx_ind = skb_get_queue_mapping(skb);
@@ -1075,7 +1075,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
*/
smp_rmb();
- ring_cons = READ_ONCE(ring->cons);
if (unlikely(!mlx4_en_is_tx_ring_full(ring))) {
netif_tx_wake_queue(ring->tx_queue);
ring->wake_queue++;
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index ae305c2e9225..9e48509ed3b2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -1057,8 +1057,7 @@ static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
INIT_LIST_HEAD(&eq->tasklet_ctx.list);
INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
spin_lock_init(&eq->tasklet_ctx.lock);
- tasklet_init(&eq->tasklet_ctx.task, mlx4_cq_tasklet_cb,
- (unsigned long)&eq->tasklet_ctx);
+ tasklet_setup(&eq->tasklet_ctx.task, mlx4_cq_tasklet_cb);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 258c7a96f269..c326b434734e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -3031,6 +3031,17 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
if (err)
return err;
+ /* Ethernet and IB drivers will normally set the port type,
+ * but if they are not built set the type now to prevent
+ * devlink_port_type_warn() from firing.
+ */
+ if (!IS_ENABLED(CONFIG_MLX4_EN) &&
+ dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
+ devlink_port_type_eth_set(&info->devlink_port, NULL);
+ else if (!IS_ENABLED(CONFIG_MLX4_INFINIBAND) &&
+ dev->caps.port_type[port] == MLX4_PORT_TYPE_IB)
+ devlink_port_type_ib_set(&info->devlink_port, NULL);
+
info->dev = dev;
info->port = port;
if (!mlx4_is_slave(dev)) {
@@ -3935,6 +3946,8 @@ static int mlx4_restart_one_up(struct pci_dev *pdev, bool reload,
struct devlink *devlink);
static int mlx4_devlink_reload_down(struct devlink *devlink, bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
struct netlink_ext_ack *extack)
{
struct mlx4_priv *priv = devlink_priv(devlink);
@@ -3951,7 +3964,8 @@ static int mlx4_devlink_reload_down(struct devlink *devlink, bool netns_change,
return 0;
}
-static int mlx4_devlink_reload_up(struct devlink *devlink,
+static int mlx4_devlink_reload_up(struct devlink *devlink, enum devlink_reload_action action,
+ enum devlink_reload_limit limit, u32 *actions_performed,
struct netlink_ext_ack *extack)
{
struct mlx4_priv *priv = devlink_priv(devlink);
@@ -3959,6 +3973,7 @@ static int mlx4_devlink_reload_up(struct devlink *devlink,
struct mlx4_dev_persistent *persist = dev->persist;
int err;
+ *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
err = mlx4_restart_one_up(persist->pdev, true, devlink);
if (err)
mlx4_err(persist->dev, "mlx4_restart_one_up failed, ret=%d\n",
@@ -3969,6 +3984,7 @@ static int mlx4_devlink_reload_up(struct devlink *devlink,
static const struct devlink_ops mlx4_devlink_ops = {
.port_type_set = mlx4_devlink_port_type_set,
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT),
.reload_down = mlx4_devlink_reload_down,
.reload_up = mlx4_devlink_reload_up,
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 527b52e48276..64bed7ac3836 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1217,7 +1217,7 @@ void mlx4_cmd_use_polling(struct mlx4_dev *dev);
int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
u16 op, unsigned long timeout);
-void mlx4_cq_tasklet_cb(unsigned long data);
+void mlx4_cq_tasklet_cb(struct tasklet_struct *t);
void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn);
void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
index 86b6051da8ec..51d4eaab6a2f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
@@ -84,6 +84,11 @@ struct mlx4_en_flow_stats_rx {
MLX4_NUM_PRIORITIES)
};
+#define FLOW_PRIORITY_STATS_IDX_RX_FRAMES (NUM_MAIN_STATS + \
+ NUM_PORT_STATS + \
+ NUM_PF_STATS + \
+ NUM_FLOW_PRIORITY_STATS_RX)
+
struct mlx4_en_flow_stats_tx {
u64 tx_pause;
u64 tx_pause_duration;
@@ -93,6 +98,13 @@ struct mlx4_en_flow_stats_tx {
MLX4_NUM_PRIORITIES)
};
+#define FLOW_PRIORITY_STATS_IDX_TX_FRAMES (NUM_MAIN_STATS + \
+ NUM_PORT_STATS + \
+ NUM_PF_STATS + \
+ NUM_FLOW_PRIORITY_STATS_RX + \
+ NUM_FLOW_STATS_RX + \
+ NUM_FLOW_PRIORITY_STATS_TX)
+
#define NUM_FLOW_STATS (NUM_FLOW_STATS_RX + NUM_FLOW_STATS_TX + \
NUM_FLOW_PRIORITY_STATS_TX + \
NUM_FLOW_PRIORITY_STATS_RX)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 10e6886c96ba..2d477f9a8cb7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -16,7 +16,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \
fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o diag/fs_tracepoint.o \
- diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o
+ diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o fw_reset.o
#
# Netdev basic
@@ -24,7 +24,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \
en_selftest.o en/port.o en/monitor_stats.o en/health.o \
- en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/umem.o \
+ en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \
en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o
#
@@ -37,7 +37,7 @@ mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += lag_mp.o lib/geneve.o lib/port_tun.o \
en_rep.o en/rep/bond.o en/mod_hdr.o
mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \
- en/mapping.o esw/chains.o en/tc_tun.o \
+ en/mapping.o lib/fs_chains.o en/tc_tun.o \
en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \
en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o
mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o
@@ -49,7 +49,8 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offlo
ecpf.o rdma.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \
esw/acl/egress_lgcy.o esw/acl/egress_ofld.o \
- esw/acl/ingress_lgcy.o esw/acl/ingress_ofld.o
+ esw/acl/ingress_lgcy.o esw/acl/ingress_ofld.o \
+ esw/devlink_port.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 8db4b5f0f963..291e427e9e4f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -56,8 +56,8 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
size_t size, dma_addr_t *dma_handle,
int node)
{
+ struct device *device = mlx5_core_dma_dev(dev);
struct mlx5_priv *priv = &dev->priv;
- struct device *device = dev->device;
int original_node;
void *cpu_handle;
@@ -111,7 +111,7 @@ EXPORT_SYMBOL(mlx5_buf_alloc);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
{
- dma_free_coherent(dev->device, buf->size, buf->frags->buf,
+ dma_free_coherent(mlx5_core_dma_dev(dev), buf->size, buf->frags->buf,
buf->frags->map);
kfree(buf->frags);
@@ -140,7 +140,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
if (!frag->buf)
goto err_free_buf;
if (frag->map & ((1 << buf->page_shift) - 1)) {
- dma_free_coherent(dev->device, frag_sz,
+ dma_free_coherent(mlx5_core_dma_dev(dev), frag_sz,
buf->frags[i].buf, buf->frags[i].map);
mlx5_core_warn(dev, "unexpected map alignment: %pad, page_shift=%d\n",
&frag->map, buf->page_shift);
@@ -153,7 +153,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
err_free_buf:
while (i--)
- dma_free_coherent(dev->device, PAGE_SIZE, buf->frags[i].buf,
+ dma_free_coherent(mlx5_core_dma_dev(dev), PAGE_SIZE, buf->frags[i].buf,
buf->frags[i].map);
kfree(buf->frags);
err_out:
@@ -169,7 +169,7 @@ void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
for (i = 0; i < buf->npages; i++) {
int frag_sz = min_t(int, size, PAGE_SIZE);
- dma_free_coherent(dev->device, frag_sz, buf->frags[i].buf,
+ dma_free_coherent(mlx5_core_dma_dev(dev), frag_sz, buf->frags[i].buf,
buf->frags[i].map);
size -= frag_sz;
}
@@ -275,7 +275,7 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
__set_bit(db->index, db->u.pgdir->bitmap);
if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) {
- dma_free_coherent(dev->device, PAGE_SIZE,
+ dma_free_coherent(mlx5_core_dma_dev(dev), PAGE_SIZE,
db->u.pgdir->db_page, db->u.pgdir->db_dma);
list_del(&db->u.pgdir->list);
bitmap_free(db->u.pgdir->bitmap);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 2d1f4b3be9bf..e49387dbef98 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1989,9 +1989,7 @@ static void create_msg_cache(struct mlx5_core_dev *dev)
static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
{
- struct device *ddev = dev->device;
-
- cmd->cmd_alloc_buf = dma_alloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
+ cmd->cmd_alloc_buf = dma_alloc_coherent(mlx5_core_dma_dev(dev), MLX5_ADAPTER_PAGE_SIZE,
&cmd->alloc_dma, GFP_KERNEL);
if (!cmd->cmd_alloc_buf)
return -ENOMEM;
@@ -2004,9 +2002,9 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
return 0;
}
- dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
+ dma_free_coherent(mlx5_core_dma_dev(dev), MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
cmd->alloc_dma);
- cmd->cmd_alloc_buf = dma_alloc_coherent(ddev,
+ cmd->cmd_alloc_buf = dma_alloc_coherent(mlx5_core_dma_dev(dev),
2 * MLX5_ADAPTER_PAGE_SIZE - 1,
&cmd->alloc_dma, GFP_KERNEL);
if (!cmd->cmd_alloc_buf)
@@ -2020,9 +2018,7 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
{
- struct device *ddev = dev->device;
-
- dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf,
+ dma_free_coherent(mlx5_core_dma_dev(dev), cmd->alloc_size, cmd->cmd_alloc_buf,
cmd->alloc_dma);
}
@@ -2054,7 +2050,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
if (!cmd->stats)
return -ENOMEM;
- cmd->pool = dma_pool_create("mlx5_cmd", dev->device, size, align, 0);
+ cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0);
if (!cmd->pool) {
err = -ENOMEM;
goto dma_pool_err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 8379b24cb838..df3e4938ecdd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -42,11 +42,11 @@
#define TASKLET_MAX_TIME 2
#define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME)
-void mlx5_cq_tasklet_cb(unsigned long data)
+void mlx5_cq_tasklet_cb(struct tasklet_struct *t)
{
unsigned long flags;
unsigned long end = jiffies + TASKLET_MAX_TIME_JIFFIES;
- struct mlx5_eq_tasklet *ctx = (struct mlx5_eq_tasklet *)data;
+ struct mlx5_eq_tasklet *ctx = from_tasklet(ctx, t, task);
struct mlx5_core_cq *mcq;
struct mlx5_core_cq *temp;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index c709e9a385f6..a28f95df2901 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -4,22 +4,19 @@
#include <devlink.h>
#include "mlx5_core.h"
+#include "fw_reset.h"
#include "fs_core.h"
#include "eswitch.h"
static int mlx5_devlink_flash_update(struct devlink *devlink,
- const char *file_name,
- const char *component,
+ struct devlink_flash_update_params *params,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
const struct firmware *fw;
int err;
- if (component)
- return -EOPNOTSUPP;
-
- err = request_firmware_direct(&fw, file_name, &dev->pdev->dev);
+ err = request_firmware_direct(&fw, params->file_name, &dev->pdev->dev);
if (err)
return err;
@@ -88,21 +85,96 @@ mlx5_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
return 0;
}
+static int mlx5_devlink_reload_fw_activate(struct devlink *devlink, struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u8 reset_level, reset_type, net_port_alive;
+ int err;
+
+ err = mlx5_fw_reset_query(dev, &reset_level, &reset_type);
+ if (err)
+ return err;
+ if (!(reset_level & MLX5_MFRL_REG_RESET_LEVEL3)) {
+ NL_SET_ERR_MSG_MOD(extack, "FW activate requires reboot");
+ return -EINVAL;
+ }
+
+ net_port_alive = !!(reset_type & MLX5_MFRL_REG_RESET_TYPE_NET_PORT_ALIVE);
+ err = mlx5_fw_reset_set_reset_sync(dev, net_port_alive);
+ if (err)
+ goto out;
+
+ err = mlx5_fw_reset_wait_reset_done(dev);
+out:
+ if (err)
+ NL_SET_ERR_MSG_MOD(extack, "FW activate command failed");
+ return err;
+}
+
+static int mlx5_devlink_trigger_fw_live_patch(struct devlink *devlink,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+ u8 reset_level;
+ int err;
+
+ err = mlx5_fw_reset_query(dev, &reset_level, NULL);
+ if (err)
+ return err;
+ if (!(reset_level & MLX5_MFRL_REG_RESET_LEVEL0)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "FW upgrade to the stored FW can't be done by FW live patching");
+ return -EINVAL;
+ }
+
+ return mlx5_fw_reset_set_live_patch(dev);
+}
+
static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
- mlx5_unload_one(dev, false);
- return 0;
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
+ mlx5_unload_one(dev, false);
+ return 0;
+ case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
+ if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
+ return mlx5_devlink_trigger_fw_live_patch(devlink, extack);
+ return mlx5_devlink_reload_fw_activate(devlink, extack);
+ default:
+ /* Unsupported action should not get to this function */
+ WARN_ON(1);
+ return -EOPNOTSUPP;
+ }
}
-static int mlx5_devlink_reload_up(struct devlink *devlink,
+static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_action action,
+ enum devlink_reload_limit limit, u32 *actions_performed,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
- return mlx5_load_one(dev, false);
+ *actions_performed = BIT(action);
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
+ return mlx5_load_one(dev, false);
+ case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
+ if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
+ break;
+ /* On fw_activate action, also driver is reloaded and reinit performed */
+ *actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
+ return mlx5_load_one(dev, false);
+ default:
+ /* Unsupported action should not get to this function */
+ WARN_ON(1);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
}
static const struct devlink_ops mlx5_devlink_ops = {
@@ -118,6 +190,9 @@ static const struct devlink_ops mlx5_devlink_ops = {
#endif
.flash_update = mlx5_devlink_flash_update,
.info_get = mlx5_devlink_info_get,
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
+ BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
+ .reload_limits = BIT(DEVLINK_RELOAD_LIMIT_NO_RESET),
.reload_down = mlx5_devlink_reload_down,
.reload_up = mlx5_devlink_reload_up,
};
@@ -228,6 +303,24 @@ static int mlx5_devlink_large_group_num_validate(struct devlink *devlink, u32 id
}
#endif
+static int mlx5_devlink_enable_remote_dev_reset_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ mlx5_fw_reset_enable_remote_dev_reset_set(dev, ctx->val.vbool);
+ return 0;
+}
+
+static int mlx5_devlink_enable_remote_dev_reset_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ ctx->val.vbool = mlx5_fw_reset_enable_remote_dev_reset_get(dev);
+ return 0;
+}
+
static const struct devlink_param mlx5_devlink_params[] = {
DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_FLOW_STEERING_MODE,
"flow_steering_mode", DEVLINK_PARAM_TYPE_STRING,
@@ -243,6 +336,9 @@ static const struct devlink_param mlx5_devlink_params[] = {
NULL, NULL,
mlx5_devlink_large_group_num_validate),
#endif
+ DEVLINK_PARAM_GENERIC(ENABLE_REMOTE_DEV_RESET, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ mlx5_devlink_enable_remote_dev_reset_get,
+ mlx5_devlink_enable_remote_dev_reset_set, NULL),
};
static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index ad3594c4afcb..2eb022ad7fd0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -124,7 +124,7 @@ static void mlx5_fw_tracer_ownership_release(struct mlx5_fw_tracer *tracer)
static int mlx5_fw_tracer_create_log_buf(struct mlx5_fw_tracer *tracer)
{
struct mlx5_core_dev *dev = tracer->dev;
- struct device *ddev = &dev->pdev->dev;
+ struct device *ddev;
dma_addr_t dma;
void *buff;
gfp_t gfp;
@@ -142,6 +142,7 @@ static int mlx5_fw_tracer_create_log_buf(struct mlx5_fw_tracer *tracer)
}
tracer->buff.log_buf = buff;
+ ddev = mlx5_core_dma_dev(dev);
dma = dma_map_single(ddev, buff, tracer->buff.size, DMA_FROM_DEVICE);
if (dma_mapping_error(ddev, dma)) {
mlx5_core_warn(dev, "FWTracer: Unable to map DMA: %d\n",
@@ -162,11 +163,12 @@ free_pages:
static void mlx5_fw_tracer_destroy_log_buf(struct mlx5_fw_tracer *tracer)
{
struct mlx5_core_dev *dev = tracer->dev;
- struct device *ddev = &dev->pdev->dev;
+ struct device *ddev;
if (!tracer->buff.log_buf)
return;
+ ddev = mlx5_core_dma_dev(dev);
dma_unmap_single(ddev, tracer->buff.dma, tracer->buff.size, DMA_FROM_DEVICE);
free_pages((unsigned long)tracer->buff.log_buf, get_order(tracer->buff.size));
}
@@ -1064,6 +1066,58 @@ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer)
kvfree(tracer);
}
+static int mlx5_fw_tracer_recreate_strings_db(struct mlx5_fw_tracer *tracer)
+{
+ struct mlx5_core_dev *dev;
+ int err;
+
+ cancel_work_sync(&tracer->read_fw_strings_work);
+ mlx5_fw_tracer_clean_ready_list(tracer);
+ mlx5_fw_tracer_clean_print_hash(tracer);
+ mlx5_fw_tracer_clean_saved_traces_array(tracer);
+ mlx5_fw_tracer_free_strings_db(tracer);
+
+ dev = tracer->dev;
+ err = mlx5_query_mtrc_caps(tracer);
+ if (err) {
+ mlx5_core_dbg(dev, "FWTracer: Failed to query capabilities %d\n", err);
+ return err;
+ }
+
+ err = mlx5_fw_tracer_allocate_strings_db(tracer);
+ if (err) {
+ mlx5_core_warn(dev, "FWTracer: Allocate strings DB failed %d\n", err);
+ return err;
+ }
+ mlx5_fw_tracer_init_saved_traces_array(tracer);
+
+ return 0;
+}
+
+int mlx5_fw_tracer_reload(struct mlx5_fw_tracer *tracer)
+{
+ struct mlx5_core_dev *dev;
+ int err;
+
+ if (IS_ERR_OR_NULL(tracer))
+ return -EINVAL;
+
+ dev = tracer->dev;
+ mlx5_fw_tracer_cleanup(tracer);
+ err = mlx5_fw_tracer_recreate_strings_db(tracer);
+ if (err) {
+ mlx5_core_warn(dev, "Failed to recreate FW tracer strings DB\n");
+ return err;
+ }
+ err = mlx5_fw_tracer_init(tracer);
+ if (err) {
+ mlx5_core_warn(dev, "Failed to re-initialize FW tracer\n");
+ return err;
+ }
+
+ return 0;
+}
+
static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void *data)
{
struct mlx5_fw_tracer *tracer = mlx5_nb_cof(nb, struct mlx5_fw_tracer, nb);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
index 40601fba80ba..97252a85d65e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
@@ -191,5 +191,6 @@ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer);
int mlx5_fw_tracer_trigger_core_dump_general(struct mlx5_core_dev *dev);
int mlx5_fw_tracer_get_saved_traces_objects(struct mlx5_fw_tracer *tracer,
struct devlink_fmsg *fmsg);
+int mlx5_fw_tracer_reload(struct mlx5_fw_tracer *tracer);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
index 4924a5658853..ed4fb79b4db7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
@@ -78,7 +78,7 @@ static int mlx5_rsc_dump_trigger(struct mlx5_core_dev *dev, struct mlx5_rsc_dump
struct page *page)
{
struct mlx5_rsc_dump *rsc_dump = dev->rsc_dump;
- struct device *ddev = &dev->pdev->dev;
+ struct device *ddev = mlx5_core_dma_dev(dev);
u32 out_seq_num;
u32 in_seq_num;
dma_addr_t dma;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
index a894ea98c95a..3dc9dd3f24dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
@@ -43,19 +43,13 @@ static void mlx5_peer_pf_cleanup(struct mlx5_core_dev *dev)
int mlx5_ec_init(struct mlx5_core_dev *dev)
{
- int err = 0;
-
if (!mlx5_core_is_ecpf(dev))
return 0;
/* ECPF shall enable HCA for peer PF in the same way a PF
* does this for its VFs.
*/
- err = mlx5_peer_pf_init(dev);
- if (err)
- return err;
-
- return 0;
+ return mlx5_peer_pf_init(dev);
}
void mlx5_ec_cleanup(struct mlx5_core_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 356f5852955f..2f05b0f9de01 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -226,6 +226,7 @@ enum mlx5e_priv_flag {
MLX5E_PFLAG_RX_STRIDING_RQ,
MLX5E_PFLAG_RX_NO_CSUM_COMPLETE,
MLX5E_PFLAG_XDP_TX_MPWQE,
+ MLX5E_PFLAG_SKB_TX_MPWQE,
MLX5E_NUM_PFLAGS, /* Keep last */
};
@@ -270,6 +271,7 @@ enum {
MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */
MLX5E_RQ_STATE_FPGA_TLS, /* FPGA TLS enabled */
+ MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX /* set when mini_cqe_resp_stride_index cap is used */
};
struct mlx5e_cq {
@@ -309,6 +311,7 @@ struct mlx5e_sq_dma {
enum {
MLX5E_SQ_STATE_ENABLED,
+ MLX5E_SQ_STATE_MPWQE,
MLX5E_SQ_STATE_RECOVERING,
MLX5E_SQ_STATE_IPSEC,
MLX5E_SQ_STATE_AM,
@@ -317,26 +320,40 @@ enum {
MLX5E_SQ_STATE_PENDING_XSK_TX,
};
+struct mlx5e_tx_mpwqe {
+ /* Current MPWQE session */
+ struct mlx5e_tx_wqe *wqe;
+ u32 bytes_count;
+ u8 ds_count;
+ u8 pkt_count;
+ u8 inline_on;
+};
+
struct mlx5e_txqsq {
/* data path */
/* dirtied @completion */
u16 cc;
+ u16 skb_fifo_cc;
u32 dma_fifo_cc;
struct dim dim; /* Adaptive Moderation */
/* dirtied @xmit */
u16 pc ____cacheline_aligned_in_smp;
+ u16 skb_fifo_pc;
u32 dma_fifo_pc;
+ struct mlx5e_tx_mpwqe mpwqe;
struct mlx5e_cq cq;
/* read only */
struct mlx5_wq_cyc wq;
u32 dma_fifo_mask;
+ u16 skb_fifo_mask;
struct mlx5e_sq_stats *stats;
struct {
struct mlx5e_sq_dma *dma_fifo;
+ struct sk_buff **skb_fifo;
struct mlx5e_tx_wqe_info *wqe_info;
} db;
void __iomem *uar_map;
@@ -403,7 +420,7 @@ struct mlx5e_xdp_info {
};
};
-struct mlx5e_xdp_xmit_data {
+struct mlx5e_xmit_data {
dma_addr_t dma_addr;
void *data;
u32 len;
@@ -416,18 +433,10 @@ struct mlx5e_xdp_info_fifo {
u32 mask;
};
-struct mlx5e_xdp_mpwqe {
- /* Current MPWQE session */
- struct mlx5e_tx_wqe *wqe;
- u8 ds_count;
- u8 pkt_count;
- u8 inline_on;
-};
-
struct mlx5e_xdpsq;
typedef int (*mlx5e_fp_xmit_xdp_frame_check)(struct mlx5e_xdpsq *);
typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq *,
- struct mlx5e_xdp_xmit_data *,
+ struct mlx5e_xmit_data *,
struct mlx5e_xdp_info *,
int);
@@ -442,12 +451,12 @@ struct mlx5e_xdpsq {
u32 xdpi_fifo_pc ____cacheline_aligned_in_smp;
u16 pc;
struct mlx5_wqe_ctrl_seg *doorbell_cseg;
- struct mlx5e_xdp_mpwqe mpwqe;
+ struct mlx5e_tx_mpwqe mpwqe;
struct mlx5e_cq cq;
/* read only */
- struct xdp_umem *umem;
+ struct xsk_buff_pool *xsk_pool;
struct mlx5_wq_cyc wq;
struct mlx5e_xdpsq_stats *stats;
mlx5e_fp_xmit_xdp_frame_check xmit_xdp_frame_check;
@@ -611,7 +620,7 @@ struct mlx5e_rq {
struct page_pool *page_pool;
/* AF_XDP zero-copy */
- struct xdp_umem *umem;
+ struct xsk_buff_pool *xsk_pool;
struct work_struct recover_work;
@@ -735,12 +744,13 @@ struct mlx5e_hv_vhca_stats_agent {
#endif
struct mlx5e_xsk {
- /* UMEMs are stored separately from channels, because we don't want to
- * lose them when channels are recreated. The kernel also stores UMEMs,
- * but it doesn't distinguish between zero-copy and non-zero-copy UMEMs,
- * so rely on our mechanism.
+ /* XSK buffer pools are stored separately from channels,
+ * because we don't want to lose them when channels are
+ * recreated. The kernel also stores buffer pool, but it doesn't
+ * distinguish between zero-copy and non-zero-copy UMEMs, so
+ * rely on our mechanism.
*/
- struct xdp_umem **umems;
+ struct xsk_buff_pool **pools;
u16 refcnt;
bool ever_used;
};
@@ -899,7 +909,7 @@ struct mlx5e_xsk_param;
struct mlx5e_rq_param;
int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
struct mlx5e_rq_param *param, struct mlx5e_xsk_param *xsk,
- struct xdp_umem *umem, struct mlx5e_rq *rq);
+ struct xsk_buff_pool *xsk_pool, struct mlx5e_rq *rq);
int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time);
void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
void mlx5e_close_rq(struct mlx5e_rq *rq);
@@ -909,7 +919,7 @@ int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
struct mlx5e_sq_param *param, struct mlx5e_icosq *sq);
void mlx5e_close_icosq(struct mlx5e_icosq *sq);
int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
- struct mlx5e_sq_param *param, struct xdp_umem *umem,
+ struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
struct mlx5e_xdpsq *sq, bool is_redirect);
void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 6fdcd5e69476..dc744702aee4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -12,9 +12,12 @@ enum {
};
struct mlx5e_tc_table {
- /* protects flow table */
+ /* Protects the dynamic assignment of the t parameter
+ * which is the nic tc root table.
+ */
struct mutex t_lock;
struct mlx5_flow_table *t;
+ struct mlx5_fs_chains *chains;
struct rhashtable ht;
@@ -24,6 +27,8 @@ struct mlx5e_tc_table {
struct notifier_block netdevice_nb;
struct netdev_net_notifier netdevice_nn;
+
+ struct mlx5_tc_ct_priv *ct;
};
struct mlx5e_flow_table {
@@ -231,6 +236,7 @@ struct mlx5e_accel_fs_tcp;
struct mlx5e_flow_steering {
struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_namespace *egress_ns;
#ifdef CONFIG_MLX5_EN_RXNFC
struct mlx5e_ethtool_steering ethtool;
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
index 3dc200bcfabd..69a05da0e3e3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
@@ -242,8 +242,8 @@ static int mlx5e_health_rsc_fmsg_binary(struct devlink_fmsg *fmsg,
{
u32 data_size;
+ int err = 0;
u32 offset;
- int err;
for (offset = 0; offset < value_len; offset += data_size) {
data_size = value_len - offset;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index 79cc42d88eec..e36e505d38ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -12,7 +12,7 @@
#include "neigh.h"
#include "en_rep.h"
#include "eswitch.h"
-#include "esw/chains.h"
+#include "lib/fs_chains.h"
#include "en/tc_ct.h"
#include "en/mapping.h"
#include "en/tc_tun.h"
@@ -191,7 +191,7 @@ static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data,
case TC_SETUP_CLSFLOWER:
memcpy(&tmp, f, sizeof(*f));
- if (!mlx5_esw_chains_prios_supported(esw))
+ if (!mlx5_chains_prios_supported(esw_chains(esw)))
return -EOPNOTSUPP;
/* Re-use tc offload path by moving the ft flow to the
@@ -203,12 +203,12 @@ static int mlx5e_rep_setup_ft_cb(enum tc_setup_type type, void *type_data,
*
* We only support chain 0 of FT offload.
*/
- if (tmp.common.prio >= mlx5_esw_chains_get_prio_range(esw))
+ if (tmp.common.prio >= mlx5_chains_get_prio_range(esw_chains(esw)))
return -EOPNOTSUPP;
if (tmp.common.chain_index != 0)
return -EOPNOTSUPP;
- tmp.common.chain_index = mlx5_esw_chains_get_ft_chain(esw);
+ tmp.common.chain_index = mlx5_chains_get_nf_ft_chain(esw_chains(esw));
tmp.common.prio++;
err = mlx5e_rep_setup_tc_cls_flower(priv, &tmp, flags);
memcpy(&f->stats, &tmp.stats, sizeof(f->stats));
@@ -378,12 +378,12 @@ static int mlx5e_rep_indr_setup_ft_cb(enum tc_setup_type type,
*
* We only support chain 0 of FT offload.
*/
- if (!mlx5_esw_chains_prios_supported(esw) ||
- tmp.common.prio >= mlx5_esw_chains_get_prio_range(esw) ||
+ if (!mlx5_chains_prios_supported(esw_chains(esw)) ||
+ tmp.common.prio >= mlx5_chains_get_prio_range(esw_chains(esw)) ||
tmp.common.chain_index)
return -EOPNOTSUPP;
- tmp.common.chain_index = mlx5_esw_chains_get_ft_chain(esw);
+ tmp.common.chain_index = mlx5_chains_get_nf_ft_chain(esw_chains(esw));
tmp.common.prio++;
err = mlx5e_rep_indr_offload(priv->netdev, &tmp, priv, flags);
memcpy(&f->stats, &tmp.stats, sizeof(f->stats));
@@ -612,7 +612,6 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
struct tc_skb_ext *tc_skb_ext;
struct mlx5_eswitch *esw;
struct mlx5e_priv *priv;
- int tunnel_moffset;
int err;
reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK);
@@ -626,7 +625,7 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
priv = netdev_priv(skb->dev);
esw = priv->mdev->priv.eswitch;
- err = mlx5_eswitch_get_chain_for_tag(esw, reg_c0, &chain);
+ err = mlx5_get_chain_for_tag(esw_chains(esw), reg_c0, &chain);
if (err) {
netdev_dbg(priv->netdev,
"Couldn't find chain for chain tag: %d, err: %d\n",
@@ -647,13 +646,12 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
uplink_priv = &uplink_rpriv->uplink_priv;
- if (!mlx5e_tc_ct_restore_flow(uplink_priv, skb,
+ if (!mlx5e_tc_ct_restore_flow(uplink_priv->ct_priv, skb,
zone_restore_id))
return false;
}
- tunnel_moffset = mlx5e_tc_attr_to_reg_mappings[TUNNEL_TO_REG].moffset;
- tunnel_id = reg_c1 >> (8 * tunnel_moffset);
+ tunnel_id = reg_c1 >> REG_MAPPING_SHIFT(TUNNEL_TO_REG);
return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id);
#endif /* CONFIG_NET_TC_SKB_EXT */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index a8be40cbe325..e521254d886e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -14,7 +14,7 @@
#include <linux/workqueue.h>
#include <linux/xarray.h>
-#include "esw/chains.h"
+#include "lib/fs_chains.h"
#include "en/tc_ct.h"
#include "en/mod_hdr.h"
#include "en/mapping.h"
@@ -39,8 +39,9 @@
netdev_dbg(ct_priv->netdev, "ct_debug: " fmt "\n", ##args)
struct mlx5_tc_ct_priv {
- struct mlx5_eswitch *esw;
+ struct mlx5_core_dev *dev;
const struct net_device *netdev;
+ struct mod_hdr_tbl *mod_hdr_tbl;
struct idr fte_ids;
struct xarray tuple_ids;
struct rhashtable zone_ht;
@@ -50,13 +51,16 @@ struct mlx5_tc_ct_priv {
struct mlx5_flow_table *ct_nat;
struct mlx5_flow_table *post_ct;
struct mutex control_lock; /* guards parallel adds/dels */
+ struct mutex shared_counter_lock;
struct mapping_ctx *zone_mapping;
struct mapping_ctx *labels_mapping;
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5_fs_chains *chains;
};
struct mlx5_ct_flow {
- struct mlx5_esw_flow_attr pre_ct_attr;
- struct mlx5_esw_flow_attr post_ct_attr;
+ struct mlx5_flow_attr *pre_ct_attr;
+ struct mlx5_flow_attr *post_ct_attr;
struct mlx5_flow_handle *pre_ct_rule;
struct mlx5_flow_handle *post_ct_rule;
struct mlx5_ct_ft *ft;
@@ -67,12 +71,12 @@ struct mlx5_ct_flow {
struct mlx5_ct_zone_rule {
struct mlx5_flow_handle *rule;
struct mlx5e_mod_hdr_handle *mh;
- struct mlx5_esw_flow_attr attr;
+ struct mlx5_flow_attr *attr;
bool nat;
};
struct mlx5_tc_ct_pre {
- struct mlx5_flow_table *fdb;
+ struct mlx5_flow_table *ft;
struct mlx5_flow_group *flow_grp;
struct mlx5_flow_group *miss_grp;
struct mlx5_flow_handle *flow_rule;
@@ -114,11 +118,16 @@ struct mlx5_ct_tuple {
u16 zone;
};
+struct mlx5_ct_shared_counter {
+ struct mlx5_fc *counter;
+ refcount_t refcount;
+};
+
struct mlx5_ct_entry {
struct rhash_head node;
struct rhash_head tuple_node;
struct rhash_head tuple_nat_node;
- struct mlx5_fc *counter;
+ struct mlx5_ct_shared_counter *shared_counter;
unsigned long cookie;
unsigned long restore_cookie;
struct mlx5_ct_tuple tuple;
@@ -157,18 +166,6 @@ static const struct rhashtable_params tuples_nat_ht_params = {
.min_size = 16 * 1024,
};
-static struct mlx5_tc_ct_priv *
-mlx5_tc_ct_get_ct_priv(struct mlx5e_priv *priv)
-{
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_rep_uplink_priv *uplink_priv;
- struct mlx5e_rep_priv *uplink_rpriv;
-
- uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
- uplink_priv = &uplink_rpriv->uplink_priv;
- return uplink_priv->ct_priv;
-}
-
static int
mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple *tuple, struct flow_rule *rule)
{
@@ -397,20 +394,30 @@ mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
}
static void
+mlx5_tc_ct_shared_counter_put(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry)
+{
+ if (!refcount_dec_and_test(&entry->shared_counter->refcount))
+ return;
+
+ mlx5_fc_destroy(ct_priv->dev, entry->shared_counter->counter);
+ kfree(entry->shared_counter);
+}
+
+static void
mlx5_tc_ct_entry_del_rule(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_ct_entry *entry,
bool nat)
{
struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat];
- struct mlx5_esw_flow_attr *attr = &zone_rule->attr;
- struct mlx5_eswitch *esw = ct_priv->esw;
+ struct mlx5_flow_attr *attr = zone_rule->attr;
ct_dbg("Deleting ct entry rule in zone %d", entry->tuple.zone);
- mlx5_eswitch_del_offloaded_rule(esw, zone_rule->rule, attr);
- mlx5e_mod_hdr_detach(ct_priv->esw->dev,
- &esw->offloads.mod_hdr, zone_rule->mh);
+ mlx5_tc_rule_delete(netdev_priv(ct_priv->netdev), zone_rule->rule, attr);
+ mlx5e_mod_hdr_detach(ct_priv->dev,
+ ct_priv->mod_hdr_tbl, zone_rule->mh);
mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
+ kfree(attr);
}
static void
@@ -419,8 +426,6 @@ mlx5_tc_ct_entry_del_rules(struct mlx5_tc_ct_priv *ct_priv,
{
mlx5_tc_ct_entry_del_rule(ct_priv, entry, true);
mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
-
- mlx5_fc_destroy(ct_priv->esw->dev, entry->counter);
}
static struct flow_action_entry *
@@ -446,29 +451,40 @@ mlx5_tc_ct_entry_set_registers(struct mlx5_tc_ct_priv *ct_priv,
u32 labels_id,
u8 zone_restore_id)
{
- struct mlx5_eswitch *esw = ct_priv->esw;
+ enum mlx5_flow_namespace_type ns = ct_priv->ns_type;
+ struct mlx5_core_dev *dev = ct_priv->dev;
int err;
- err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts,
+ err = mlx5e_tc_match_to_reg_set(dev, mod_acts, ns,
CTSTATE_TO_REG, ct_state);
if (err)
return err;
- err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts,
+ err = mlx5e_tc_match_to_reg_set(dev, mod_acts, ns,
MARK_TO_REG, mark);
if (err)
return err;
- err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts,
+ err = mlx5e_tc_match_to_reg_set(dev, mod_acts, ns,
LABELS_TO_REG, labels_id);
if (err)
return err;
- err = mlx5e_tc_match_to_reg_set(esw->dev, mod_acts,
+ err = mlx5e_tc_match_to_reg_set(dev, mod_acts, ns,
ZONE_RESTORE_TO_REG, zone_restore_id);
if (err)
return err;
+ /* Make another copy of zone id in reg_b for
+ * NIC rx flows since we don't copy reg_c1 to
+ * reg_b upon miss.
+ */
+ if (ns != MLX5_FLOW_NAMESPACE_FDB) {
+ err = mlx5e_tc_match_to_reg_set(dev, mod_acts, ns,
+ NIC_ZONE_RESTORE_TO_REG, zone_restore_id);
+ if (err)
+ return err;
+ }
return 0;
}
@@ -549,7 +565,7 @@ mlx5_tc_ct_entry_create_nat(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5e_tc_mod_hdr_acts *mod_acts)
{
struct flow_action *flow_action = &flow_rule->action;
- struct mlx5_core_dev *mdev = ct_priv->esw->dev;
+ struct mlx5_core_dev *mdev = ct_priv->dev;
struct flow_action_entry *act;
size_t action_size;
char *modact;
@@ -560,8 +576,7 @@ mlx5_tc_ct_entry_create_nat(struct mlx5_tc_ct_priv *ct_priv,
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_MANGLE: {
- err = alloc_mod_hdr_actions(mdev,
- MLX5_FLOW_NAMESPACE_FDB,
+ err = alloc_mod_hdr_actions(mdev, ct_priv->ns_type,
mod_acts);
if (err)
return err;
@@ -590,7 +605,7 @@ mlx5_tc_ct_entry_create_nat(struct mlx5_tc_ct_priv *ct_priv,
static int
mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
- struct mlx5_esw_flow_attr *attr,
+ struct mlx5_flow_attr *attr,
struct flow_rule *flow_rule,
struct mlx5e_mod_hdr_handle **mh,
u8 zone_restore_id, bool nat)
@@ -626,9 +641,9 @@ mlx5_tc_ct_entry_create_mod_hdr(struct mlx5_tc_ct_priv *ct_priv,
if (err)
goto err_mapping;
- *mh = mlx5e_mod_hdr_attach(ct_priv->esw->dev,
- &ct_priv->esw->offloads.mod_hdr,
- MLX5_FLOW_NAMESPACE_FDB,
+ *mh = mlx5e_mod_hdr_attach(ct_priv->dev,
+ ct_priv->mod_hdr_tbl,
+ ct_priv->ns_type,
&mod_acts);
if (IS_ERR(*mh)) {
err = PTR_ERR(*mh);
@@ -652,9 +667,9 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
bool nat, u8 zone_restore_id)
{
struct mlx5_ct_zone_rule *zone_rule = &entry->zone_rules[nat];
- struct mlx5_esw_flow_attr *attr = &zone_rule->attr;
- struct mlx5_eswitch *esw = ct_priv->esw;
+ struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev);
struct mlx5_flow_spec *spec = NULL;
+ struct mlx5_flow_attr *attr;
int err;
zone_rule->nat = nat;
@@ -663,6 +678,12 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
if (!spec)
return -ENOMEM;
+ attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
+ if (!attr) {
+ err = -ENOMEM;
+ goto err_attr;
+ }
+
err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule,
&zone_rule->mh,
zone_restore_id, nat);
@@ -676,9 +697,9 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
MLX5_FLOW_CONTEXT_ACTION_COUNT;
attr->dest_chain = 0;
attr->dest_ft = ct_priv->post_ct;
- attr->fdb = nat ? ct_priv->ct_nat : ct_priv->ct;
+ attr->ft = nat ? ct_priv->ct_nat : ct_priv->ct;
attr->outer_match_level = MLX5_MATCH_L4;
- attr->counter = entry->counter;
+ attr->counter = entry->shared_counter->counter;
attr->flags |= MLX5_ESW_ATTR_FLAG_NO_IN_PORT;
mlx5_tc_ct_set_tuple_match(netdev_priv(ct_priv->netdev), spec, flow_rule);
@@ -686,39 +707,100 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
entry->tuple.zone & MLX5_CT_ZONE_MASK,
MLX5_CT_ZONE_MASK);
- zone_rule->rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
+ zone_rule->rule = mlx5_tc_rule_insert(priv, spec, attr);
if (IS_ERR(zone_rule->rule)) {
err = PTR_ERR(zone_rule->rule);
ct_dbg("Failed to add ct entry rule, nat: %d", nat);
goto err_rule;
}
+ zone_rule->attr = attr;
+
kfree(spec);
ct_dbg("Offloaded ct entry rule in zone %d", entry->tuple.zone);
return 0;
err_rule:
- mlx5e_mod_hdr_detach(ct_priv->esw->dev,
- &esw->offloads.mod_hdr, zone_rule->mh);
+ mlx5e_mod_hdr_detach(ct_priv->dev,
+ ct_priv->mod_hdr_tbl, zone_rule->mh);
mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id);
err_mod_hdr:
+ kfree(attr);
+err_attr:
kfree(spec);
return err;
}
+static struct mlx5_ct_shared_counter *
+mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
+ struct mlx5_ct_entry *entry)
+{
+ struct mlx5_ct_tuple rev_tuple = entry->tuple;
+ struct mlx5_ct_shared_counter *shared_counter;
+ struct mlx5_core_dev *dev = ct_priv->dev;
+ struct mlx5_ct_entry *rev_entry;
+ __be16 tmp_port;
+ int ret;
+
+ /* get the reversed tuple */
+ tmp_port = rev_tuple.port.src;
+ rev_tuple.port.src = rev_tuple.port.dst;
+ rev_tuple.port.dst = tmp_port;
+
+ if (rev_tuple.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ __be32 tmp_addr = rev_tuple.ip.src_v4;
+
+ rev_tuple.ip.src_v4 = rev_tuple.ip.dst_v4;
+ rev_tuple.ip.dst_v4 = tmp_addr;
+ } else if (rev_tuple.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct in6_addr tmp_addr = rev_tuple.ip.src_v6;
+
+ rev_tuple.ip.src_v6 = rev_tuple.ip.dst_v6;
+ rev_tuple.ip.dst_v6 = tmp_addr;
+ } else {
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ /* Use the same counter as the reverse direction */
+ mutex_lock(&ct_priv->shared_counter_lock);
+ rev_entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &rev_tuple,
+ tuples_ht_params);
+ if (rev_entry) {
+ if (refcount_inc_not_zero(&rev_entry->shared_counter->refcount)) {
+ mutex_unlock(&ct_priv->shared_counter_lock);
+ return rev_entry->shared_counter;
+ }
+ }
+ mutex_unlock(&ct_priv->shared_counter_lock);
+
+ shared_counter = kzalloc(sizeof(*shared_counter), GFP_KERNEL);
+ if (!shared_counter)
+ return ERR_PTR(-ENOMEM);
+
+ shared_counter->counter = mlx5_fc_create(dev, true);
+ if (IS_ERR(shared_counter->counter)) {
+ ct_dbg("Failed to create counter for ct entry");
+ ret = PTR_ERR(shared_counter->counter);
+ kfree(shared_counter);
+ return ERR_PTR(ret);
+ }
+
+ refcount_set(&shared_counter->refcount, 1);
+ return shared_counter;
+}
+
static int
mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
struct flow_rule *flow_rule,
struct mlx5_ct_entry *entry,
u8 zone_restore_id)
{
- struct mlx5_eswitch *esw = ct_priv->esw;
int err;
- entry->counter = mlx5_fc_create(esw->dev, true);
- if (IS_ERR(entry->counter)) {
- err = PTR_ERR(entry->counter);
+ entry->shared_counter = mlx5_tc_ct_shared_counter_get(ct_priv, entry);
+ if (IS_ERR(entry->shared_counter)) {
+ err = PTR_ERR(entry->shared_counter);
ct_dbg("Failed to create counter for ct entry");
return err;
}
@@ -738,7 +820,7 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
err_nat:
mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
err_orig:
- mlx5_fc_destroy(esw->dev, entry->counter);
+ mlx5_tc_ct_shared_counter_put(ct_priv, entry);
return err;
}
@@ -828,12 +910,16 @@ mlx5_tc_ct_del_ft_entry(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_ct_entry *entry)
{
mlx5_tc_ct_entry_del_rules(ct_priv, entry);
+ mutex_lock(&ct_priv->shared_counter_lock);
if (entry->tuple_node.next)
rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
&entry->tuple_nat_node,
tuples_nat_ht_params);
rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
tuples_ht_params);
+ mutex_unlock(&ct_priv->shared_counter_lock);
+ mlx5_tc_ct_shared_counter_put(ct_priv, entry);
+
}
static int
@@ -870,7 +956,7 @@ mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft,
if (!entry)
return -ENOENT;
- mlx5_fc_query_cached(entry->counter, &bytes, &packets, &lastuse);
+ mlx5_fc_query_cached(entry->shared_counter->counter, &bytes, &packets, &lastuse);
flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
FLOW_ACTION_HW_STATS_DELAYED);
@@ -943,9 +1029,7 @@ out:
return false;
}
-int
-mlx5_tc_ct_add_no_trk_match(struct mlx5e_priv *priv,
- struct mlx5_flow_spec *spec)
+int mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec)
{
u32 ctstate = 0, ctstate_mask = 0;
@@ -961,24 +1045,21 @@ mlx5_tc_ct_add_no_trk_match(struct mlx5e_priv *priv,
return 0;
}
-void mlx5_tc_ct_match_del(struct mlx5e_priv *priv, struct mlx5_ct_attr *ct_attr)
+void mlx5_tc_ct_match_del(struct mlx5_tc_ct_priv *priv, struct mlx5_ct_attr *ct_attr)
{
- struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
-
- if (!ct_priv || !ct_attr->ct_labels_id)
+ if (!priv || !ct_attr->ct_labels_id)
return;
- mapping_remove(ct_priv->labels_mapping, ct_attr->ct_labels_id);
+ mapping_remove(priv->labels_mapping, ct_attr->ct_labels_id);
}
int
-mlx5_tc_ct_match_add(struct mlx5e_priv *priv,
+mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
struct mlx5_flow_spec *spec,
struct flow_cls_offload *f,
struct mlx5_ct_attr *ct_attr,
struct netlink_ext_ack *extack)
{
- struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_dissector_key_ct *mask, *key;
bool trk, est, untrk, unest, new;
@@ -991,7 +1072,7 @@ mlx5_tc_ct_match_add(struct mlx5e_priv *priv,
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CT))
return 0;
- if (!ct_priv) {
+ if (!priv) {
NL_SET_ERR_MSG_MOD(extack,
"offload of ct matching isn't available");
return -EOPNOTSUPP;
@@ -1047,7 +1128,7 @@ mlx5_tc_ct_match_add(struct mlx5e_priv *priv,
ct_labels[1] = key->ct_labels[1] & mask->ct_labels[1];
ct_labels[2] = key->ct_labels[2] & mask->ct_labels[2];
ct_labels[3] = key->ct_labels[3] & mask->ct_labels[3];
- if (mapping_add(ct_priv->labels_mapping, ct_labels, &ct_attr->ct_labels_id))
+ if (mapping_add(priv->labels_mapping, ct_labels, &ct_attr->ct_labels_id))
return -EOPNOTSUPP;
mlx5e_tc_match_to_reg_match(spec, LABELS_TO_REG, ct_attr->ct_labels_id,
MLX5_CT_LABELS_MASK);
@@ -1057,14 +1138,12 @@ mlx5_tc_ct_match_add(struct mlx5e_priv *priv,
}
int
-mlx5_tc_ct_parse_action(struct mlx5e_priv *priv,
- struct mlx5_esw_flow_attr *attr,
+mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
+ struct mlx5_flow_attr *attr,
const struct flow_action_entry *act,
struct netlink_ext_ack *extack)
{
- struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
-
- if (!ct_priv) {
+ if (!priv) {
NL_SET_ERR_MSG_MOD(extack,
"offload of ct action isn't available");
return -EOPNOTSUPP;
@@ -1083,8 +1162,8 @@ static int tc_ct_pre_ct_add_rules(struct mlx5_ct_ft *ct_ft,
{
struct mlx5_tc_ct_priv *ct_priv = ct_ft->ct_priv;
struct mlx5e_tc_mod_hdr_acts pre_mod_acts = {};
- struct mlx5_core_dev *dev = ct_priv->esw->dev;
- struct mlx5_flow_table *fdb = pre_ct->fdb;
+ struct mlx5_core_dev *dev = ct_priv->dev;
+ struct mlx5_flow_table *ft = pre_ct->ft;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {};
struct mlx5_modify_hdr *mod_hdr;
@@ -1099,14 +1178,14 @@ static int tc_ct_pre_ct_add_rules(struct mlx5_ct_ft *ct_ft,
return -ENOMEM;
zone = ct_ft->zone & MLX5_CT_ZONE_MASK;
- err = mlx5e_tc_match_to_reg_set(dev, &pre_mod_acts, ZONE_TO_REG, zone);
+ err = mlx5e_tc_match_to_reg_set(dev, &pre_mod_acts, ct_priv->ns_type,
+ ZONE_TO_REG, zone);
if (err) {
ct_dbg("Failed to set zone register mapping");
goto err_mapping;
}
- mod_hdr = mlx5_modify_header_alloc(dev,
- MLX5_FLOW_NAMESPACE_FDB,
+ mod_hdr = mlx5_modify_header_alloc(dev, ct_priv->ns_type,
pre_mod_acts.num_actions,
pre_mod_acts.actions);
@@ -1132,7 +1211,7 @@ static int tc_ct_pre_ct_add_rules(struct mlx5_ct_ft *ct_ft,
mlx5e_tc_match_to_reg_match(spec, CTSTATE_TO_REG, ctstate, ctstate);
dest.ft = ct_priv->post_ct;
- rule = mlx5_add_flow_rules(fdb, spec, &flow_act, &dest, 1);
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
ct_dbg("Failed to add pre ct flow rule zone %d", zone);
@@ -1143,7 +1222,7 @@ static int tc_ct_pre_ct_add_rules(struct mlx5_ct_ft *ct_ft,
/* add miss rule */
memset(spec, 0, sizeof(*spec));
dest.ft = nat ? ct_priv->ct_nat : ct_priv->ct;
- rule = mlx5_add_flow_rules(fdb, spec, &flow_act, &dest, 1);
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
ct_dbg("Failed to add pre ct miss rule zone %d", zone);
@@ -1170,7 +1249,7 @@ tc_ct_pre_ct_del_rules(struct mlx5_ct_ft *ct_ft,
struct mlx5_tc_ct_pre *pre_ct)
{
struct mlx5_tc_ct_priv *ct_priv = ct_ft->ct_priv;
- struct mlx5_core_dev *dev = ct_priv->esw->dev;
+ struct mlx5_core_dev *dev = ct_priv->dev;
mlx5_del_flow_rules(pre_ct->flow_rule);
mlx5_del_flow_rules(pre_ct->miss_rule);
@@ -1184,7 +1263,7 @@ mlx5_tc_ct_alloc_pre_ct(struct mlx5_ct_ft *ct_ft,
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_tc_ct_priv *ct_priv = ct_ft->ct_priv;
- struct mlx5_core_dev *dev = ct_priv->esw->dev;
+ struct mlx5_core_dev *dev = ct_priv->dev;
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *ns;
struct mlx5_flow_table *ft;
@@ -1194,10 +1273,10 @@ mlx5_tc_ct_alloc_pre_ct(struct mlx5_ct_ft *ct_ft,
void *misc;
int err;
- ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+ ns = mlx5_get_flow_namespace(dev, ct_priv->ns_type);
if (!ns) {
err = -EOPNOTSUPP;
- ct_dbg("Failed to get FDB flow namespace");
+ ct_dbg("Failed to get flow namespace");
return err;
}
@@ -1206,7 +1285,8 @@ mlx5_tc_ct_alloc_pre_ct(struct mlx5_ct_ft *ct_ft,
return -ENOMEM;
ft_attr.flags = MLX5_FLOW_TABLE_UNMANAGED;
- ft_attr.prio = FDB_TC_OFFLOAD;
+ ft_attr.prio = ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB ?
+ FDB_TC_OFFLOAD : MLX5E_TC_PRIO;
ft_attr.max_fte = 2;
ft_attr.level = 1;
ft = mlx5_create_flow_table(ns, &ft_attr);
@@ -1215,7 +1295,7 @@ mlx5_tc_ct_alloc_pre_ct(struct mlx5_ct_ft *ct_ft,
ct_dbg("Failed to create pre ct table");
goto out_free;
}
- pre_ct->fdb = ft;
+ pre_ct->ft = ft;
/* create flow group */
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
@@ -1279,7 +1359,7 @@ mlx5_tc_ct_free_pre_ct(struct mlx5_ct_ft *ct_ft,
tc_ct_pre_ct_del_rules(ct_ft, pre_ct);
mlx5_destroy_flow_group(pre_ct->miss_grp);
mlx5_destroy_flow_group(pre_ct->flow_grp);
- mlx5_destroy_flow_table(pre_ct->fdb);
+ mlx5_destroy_flow_table(pre_ct->ft);
}
static int
@@ -1398,7 +1478,7 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
/* We translate the tc filter with CT action to the following HW model:
*
* +---------------------+
- * + fdb prio (tc chain) +
+ * + ft prio (tc chain) +
* + original match +
* +---------------------+
* | set chain miss mapping
@@ -1428,17 +1508,17 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
* +--------------+
*/
static struct mlx5_flow_handle *
-__mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
+__mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *orig_spec,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5_flow_attr *attr)
{
- struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
bool nat = attr->ct_attr.ct_action & TCA_CT_ACT_NAT;
+ struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev);
struct mlx5e_tc_mod_hdr_acts pre_mod_acts = {};
+ u32 attr_sz = ns_to_attr_sz(ct_priv->ns_type);
struct mlx5_flow_spec *post_ct_spec = NULL;
- struct mlx5_eswitch *esw = ct_priv->esw;
- struct mlx5_esw_flow_attr *pre_ct_attr;
+ struct mlx5_flow_attr *pre_ct_attr;
struct mlx5_modify_hdr *mod_hdr;
struct mlx5_flow_handle *rule;
struct mlx5_ct_flow *ct_flow;
@@ -1473,10 +1553,22 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
}
ct_flow->fte_id = fte_id;
- /* Base esw attributes of both rules on original rule attribute */
- pre_ct_attr = &ct_flow->pre_ct_attr;
- memcpy(pre_ct_attr, attr, sizeof(*attr));
- memcpy(&ct_flow->post_ct_attr, attr, sizeof(*attr));
+ /* Base flow attributes of both rules on original rule attribute */
+ ct_flow->pre_ct_attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
+ if (!ct_flow->pre_ct_attr) {
+ err = -ENOMEM;
+ goto err_alloc_pre;
+ }
+
+ ct_flow->post_ct_attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
+ if (!ct_flow->post_ct_attr) {
+ err = -ENOMEM;
+ goto err_alloc_post;
+ }
+
+ pre_ct_attr = ct_flow->pre_ct_attr;
+ memcpy(pre_ct_attr, attr, attr_sz);
+ memcpy(ct_flow->post_ct_attr, attr, attr_sz);
/* Modify the original rule's action to fwd and modify, leave decap */
pre_ct_attr->action = attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP;
@@ -1487,22 +1579,22 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
* don't go though all prios of this chain as normal tc rules
* miss.
*/
- err = mlx5_esw_chains_get_chain_mapping(esw, attr->chain,
- &chain_mapping);
+ err = mlx5_chains_get_chain_mapping(ct_priv->chains, attr->chain,
+ &chain_mapping);
if (err) {
ct_dbg("Failed to get chain register mapping for chain");
goto err_get_chain;
}
ct_flow->chain_mapping = chain_mapping;
- err = mlx5e_tc_match_to_reg_set(esw->dev, &pre_mod_acts,
+ err = mlx5e_tc_match_to_reg_set(priv->mdev, &pre_mod_acts, ct_priv->ns_type,
CHAIN_TO_REG, chain_mapping);
if (err) {
ct_dbg("Failed to set chain register mapping");
goto err_mapping;
}
- err = mlx5e_tc_match_to_reg_set(esw->dev, &pre_mod_acts,
+ err = mlx5e_tc_match_to_reg_set(priv->mdev, &pre_mod_acts, ct_priv->ns_type,
FTEID_TO_REG, fte_id);
if (err) {
ct_dbg("Failed to set fte_id register mapping");
@@ -1516,7 +1608,8 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
attr->chain == 0) {
u32 tun_id = mlx5e_tc_get_flow_tun_id(flow);
- err = mlx5e_tc_match_to_reg_set(esw->dev, &pre_mod_acts,
+ err = mlx5e_tc_match_to_reg_set(priv->mdev, &pre_mod_acts,
+ ct_priv->ns_type,
TUNNEL_TO_REG,
tun_id);
if (err) {
@@ -1525,8 +1618,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
}
}
- mod_hdr = mlx5_modify_header_alloc(esw->dev,
- MLX5_FLOW_NAMESPACE_FDB,
+ mod_hdr = mlx5_modify_header_alloc(priv->mdev, ct_priv->ns_type,
pre_mod_acts.num_actions,
pre_mod_acts.actions);
if (IS_ERR(mod_hdr)) {
@@ -1542,16 +1634,16 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
mlx5e_tc_match_to_reg_match(post_ct_spec, FTEID_TO_REG,
fte_id, MLX5_FTE_ID_MASK);
- /* Put post_ct rule on post_ct fdb */
- ct_flow->post_ct_attr.chain = 0;
- ct_flow->post_ct_attr.prio = 0;
- ct_flow->post_ct_attr.fdb = ct_priv->post_ct;
+ /* Put post_ct rule on post_ct flow table */
+ ct_flow->post_ct_attr->chain = 0;
+ ct_flow->post_ct_attr->prio = 0;
+ ct_flow->post_ct_attr->ft = ct_priv->post_ct;
- ct_flow->post_ct_attr.inner_match_level = MLX5_MATCH_NONE;
- ct_flow->post_ct_attr.outer_match_level = MLX5_MATCH_NONE;
- ct_flow->post_ct_attr.action &= ~(MLX5_FLOW_CONTEXT_ACTION_DECAP);
- rule = mlx5_eswitch_add_offloaded_rule(esw, post_ct_spec,
- &ct_flow->post_ct_attr);
+ ct_flow->post_ct_attr->inner_match_level = MLX5_MATCH_NONE;
+ ct_flow->post_ct_attr->outer_match_level = MLX5_MATCH_NONE;
+ ct_flow->post_ct_attr->action &= ~(MLX5_FLOW_CONTEXT_ACTION_DECAP);
+ rule = mlx5_tc_rule_insert(priv, post_ct_spec,
+ ct_flow->post_ct_attr);
ct_flow->post_ct_rule = rule;
if (IS_ERR(ct_flow->post_ct_rule)) {
err = PTR_ERR(ct_flow->post_ct_rule);
@@ -1561,10 +1653,9 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
/* Change original rule point to ct table */
pre_ct_attr->dest_chain = 0;
- pre_ct_attr->dest_ft = nat ? ft->pre_ct_nat.fdb : ft->pre_ct.fdb;
- ct_flow->pre_ct_rule = mlx5_eswitch_add_offloaded_rule(esw,
- orig_spec,
- pre_ct_attr);
+ pre_ct_attr->dest_ft = nat ? ft->pre_ct_nat.ft : ft->pre_ct.ft;
+ ct_flow->pre_ct_rule = mlx5_tc_rule_insert(priv, orig_spec,
+ pre_ct_attr);
if (IS_ERR(ct_flow->pre_ct_rule)) {
err = PTR_ERR(ct_flow->pre_ct_rule);
ct_dbg("Failed to add pre ct rule");
@@ -1578,14 +1669,18 @@ __mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
return rule;
err_insert_orig:
- mlx5_eswitch_del_offloaded_rule(ct_priv->esw, ct_flow->post_ct_rule,
- &ct_flow->post_ct_attr);
+ mlx5_tc_rule_delete(priv, ct_flow->post_ct_rule,
+ ct_flow->post_ct_attr);
err_insert_post_ct:
mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
err_mapping:
dealloc_mod_hdr_actions(&pre_mod_acts);
- mlx5_esw_chains_put_chain_mapping(esw, ct_flow->chain_mapping);
+ mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping);
err_get_chain:
+ kfree(ct_flow->post_ct_attr);
+err_alloc_post:
+ kfree(ct_flow->pre_ct_attr);
+err_alloc_pre:
idr_remove(&ct_priv->fte_ids, fte_id);
err_idr:
mlx5_tc_ct_del_ft_cb(ct_priv, ft);
@@ -1597,14 +1692,14 @@ err_ft:
}
static struct mlx5_flow_handle *
-__mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv,
+__mlx5_tc_ct_flow_offload_clear(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_flow_spec *orig_spec,
- struct mlx5_esw_flow_attr *attr,
+ struct mlx5_flow_attr *attr,
struct mlx5e_tc_mod_hdr_acts *mod_acts)
{
- struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
- struct mlx5_eswitch *esw = ct_priv->esw;
- struct mlx5_esw_flow_attr *pre_ct_attr;
+ struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev);
+ u32 attr_sz = ns_to_attr_sz(ct_priv->ns_type);
+ struct mlx5_flow_attr *pre_ct_attr;
struct mlx5_modify_hdr *mod_hdr;
struct mlx5_flow_handle *rule;
struct mlx5_ct_flow *ct_flow;
@@ -1615,8 +1710,13 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv,
return ERR_PTR(-ENOMEM);
/* Base esw attributes on original rule attribute */
- pre_ct_attr = &ct_flow->pre_ct_attr;
- memcpy(pre_ct_attr, attr, sizeof(*attr));
+ pre_ct_attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
+ if (!pre_ct_attr) {
+ err = -ENOMEM;
+ goto err_attr;
+ }
+
+ memcpy(pre_ct_attr, attr, attr_sz);
err = mlx5_tc_ct_entry_set_registers(ct_priv, mod_acts, 0, 0, 0, 0);
if (err) {
@@ -1624,8 +1724,7 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv,
goto err_set_registers;
}
- mod_hdr = mlx5_modify_header_alloc(esw->dev,
- MLX5_FLOW_NAMESPACE_FDB,
+ mod_hdr = mlx5_modify_header_alloc(priv->mdev, ct_priv->ns_type,
mod_acts->num_actions,
mod_acts->actions);
if (IS_ERR(mod_hdr)) {
@@ -1638,7 +1737,7 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv,
pre_ct_attr->modify_hdr = mod_hdr;
pre_ct_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- rule = mlx5_eswitch_add_offloaded_rule(esw, orig_spec, pre_ct_attr);
+ rule = mlx5_tc_rule_insert(priv, orig_spec, pre_ct_attr);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
ct_dbg("Failed to add ct clear rule");
@@ -1646,6 +1745,7 @@ __mlx5_tc_ct_flow_offload_clear(struct mlx5e_priv *priv,
}
attr->ct_attr.ct_flow = ct_flow;
+ ct_flow->pre_ct_attr = pre_ct_attr;
ct_flow->pre_ct_rule = rule;
return rule;
@@ -1654,61 +1754,67 @@ err_insert:
err_set_registers:
netdev_warn(priv->netdev,
"Failed to offload ct clear flow, err %d\n", err);
+ kfree(pre_ct_attr);
+err_attr:
+ kfree(ct_flow);
+
return ERR_PTR(err);
}
struct mlx5_flow_handle *
-mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
+mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
- struct mlx5_esw_flow_attr *attr,
+ struct mlx5_flow_attr *attr,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
{
bool clear_action = attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
- struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
struct mlx5_flow_handle *rule;
- if (!ct_priv)
+ if (!priv)
return ERR_PTR(-EOPNOTSUPP);
- mutex_lock(&ct_priv->control_lock);
+ mutex_lock(&priv->control_lock);
if (clear_action)
rule = __mlx5_tc_ct_flow_offload_clear(priv, spec, attr, mod_hdr_acts);
else
rule = __mlx5_tc_ct_flow_offload(priv, flow, spec, attr);
- mutex_unlock(&ct_priv->control_lock);
+ mutex_unlock(&priv->control_lock);
return rule;
}
static void
__mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *ct_priv,
+ struct mlx5e_tc_flow *flow,
struct mlx5_ct_flow *ct_flow)
{
- struct mlx5_esw_flow_attr *pre_ct_attr = &ct_flow->pre_ct_attr;
- struct mlx5_eswitch *esw = ct_priv->esw;
+ struct mlx5_flow_attr *pre_ct_attr = ct_flow->pre_ct_attr;
+ struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev);
- mlx5_eswitch_del_offloaded_rule(esw, ct_flow->pre_ct_rule,
- pre_ct_attr);
- mlx5_modify_header_dealloc(esw->dev, pre_ct_attr->modify_hdr);
+ mlx5_tc_rule_delete(priv, ct_flow->pre_ct_rule,
+ pre_ct_attr);
+ mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
if (ct_flow->post_ct_rule) {
- mlx5_eswitch_del_offloaded_rule(esw, ct_flow->post_ct_rule,
- &ct_flow->post_ct_attr);
- mlx5_esw_chains_put_chain_mapping(esw, ct_flow->chain_mapping);
+ mlx5_tc_rule_delete(priv, ct_flow->post_ct_rule,
+ ct_flow->post_ct_attr);
+ mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping);
idr_remove(&ct_priv->fte_ids, ct_flow->fte_id);
mlx5_tc_ct_del_ft_cb(ct_priv, ct_flow->ft);
}
+ kfree(ct_flow->pre_ct_attr);
+ kfree(ct_flow->post_ct_attr);
kfree(ct_flow);
}
void
-mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow,
- struct mlx5_esw_flow_attr *attr)
+mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr)
{
- struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
struct mlx5_ct_flow *ct_flow = attr->ct_attr.ct_flow;
/* We are called on error to clean up stuff from parsing
@@ -1717,22 +1823,15 @@ mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow,
if (!ct_flow)
return;
- mutex_lock(&ct_priv->control_lock);
- __mlx5_tc_ct_delete_flow(ct_priv, ct_flow);
- mutex_unlock(&ct_priv->control_lock);
+ mutex_lock(&priv->control_lock);
+ __mlx5_tc_ct_delete_flow(priv, flow, ct_flow);
+ mutex_unlock(&priv->control_lock);
}
static int
-mlx5_tc_ct_init_check_support(struct mlx5_eswitch *esw,
- const char **err_msg)
+mlx5_tc_ct_init_check_esw_support(struct mlx5_eswitch *esw,
+ const char **err_msg)
{
-#if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
- /* cannot restore chain ID on HW miss */
-
- *err_msg = "tc skb extension missing";
- return -EOPNOTSUPP;
-#endif
-
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level)) {
*err_msg = "firmware level support is missing";
return -EOPNOTSUPP;
@@ -1766,44 +1865,61 @@ mlx5_tc_ct_init_check_support(struct mlx5_eswitch *esw,
return 0;
}
-static void
-mlx5_tc_ct_init_err(struct mlx5e_rep_priv *rpriv, const char *msg, int err)
+static int
+mlx5_tc_ct_init_check_nic_support(struct mlx5e_priv *priv,
+ const char **err_msg)
+{
+ if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) {
+ *err_msg = "firmware level support is missing";
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+mlx5_tc_ct_init_check_support(struct mlx5e_priv *priv,
+ enum mlx5_flow_namespace_type ns_type,
+ const char **err_msg)
{
- if (msg)
- netdev_warn(rpriv->netdev,
- "tc ct offload not supported, %s, err: %d\n",
- msg, err);
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+#if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+ /* cannot restore chain ID on HW miss */
+
+ *err_msg = "tc skb extension missing";
+ return -EOPNOTSUPP;
+#endif
+ if (ns_type == MLX5_FLOW_NAMESPACE_FDB)
+ return mlx5_tc_ct_init_check_esw_support(esw, err_msg);
else
- netdev_warn(rpriv->netdev,
- "tc ct offload not supported, err: %d\n",
- err);
+ return mlx5_tc_ct_init_check_nic_support(priv, err_msg);
}
-int
-mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv)
+#define INIT_ERR_PREFIX "tc ct offload init failed"
+
+struct mlx5_tc_ct_priv *
+mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
+ struct mod_hdr_tbl *mod_hdr,
+ enum mlx5_flow_namespace_type ns_type)
{
struct mlx5_tc_ct_priv *ct_priv;
- struct mlx5e_rep_priv *rpriv;
- struct mlx5_eswitch *esw;
- struct mlx5e_priv *priv;
+ struct mlx5_core_dev *dev;
const char *msg;
int err;
- rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
- priv = netdev_priv(rpriv->netdev);
- esw = priv->mdev->priv.eswitch;
-
- err = mlx5_tc_ct_init_check_support(esw, &msg);
+ dev = priv->mdev;
+ err = mlx5_tc_ct_init_check_support(priv, ns_type, &msg);
if (err) {
- mlx5_tc_ct_init_err(rpriv, msg, err);
+ mlx5_core_warn(dev,
+ "tc ct offload not supported, %s\n",
+ msg);
goto err_support;
}
ct_priv = kzalloc(sizeof(*ct_priv), GFP_KERNEL);
- if (!ct_priv) {
- mlx5_tc_ct_init_err(rpriv, NULL, -ENOMEM);
+ if (!ct_priv)
goto err_alloc;
- }
ct_priv->zone_mapping = mapping_create(sizeof(u16), 0, true);
if (IS_ERR(ct_priv->zone_mapping)) {
@@ -1817,46 +1933,51 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv)
goto err_mapping_labels;
}
- ct_priv->esw = esw;
- ct_priv->netdev = rpriv->netdev;
- ct_priv->ct = mlx5_esw_chains_create_global_table(esw);
+ ct_priv->ns_type = ns_type;
+ ct_priv->chains = chains;
+ ct_priv->netdev = priv->netdev;
+ ct_priv->dev = priv->mdev;
+ ct_priv->mod_hdr_tbl = mod_hdr;
+ ct_priv->ct = mlx5_chains_create_global_table(chains);
if (IS_ERR(ct_priv->ct)) {
err = PTR_ERR(ct_priv->ct);
- mlx5_tc_ct_init_err(rpriv, "failed to create ct table", err);
+ mlx5_core_warn(dev,
+ "%s, failed to create ct table err: %d\n",
+ INIT_ERR_PREFIX, err);
goto err_ct_tbl;
}
- ct_priv->ct_nat = mlx5_esw_chains_create_global_table(esw);
+ ct_priv->ct_nat = mlx5_chains_create_global_table(chains);
if (IS_ERR(ct_priv->ct_nat)) {
err = PTR_ERR(ct_priv->ct_nat);
- mlx5_tc_ct_init_err(rpriv, "failed to create ct nat table",
- err);
+ mlx5_core_warn(dev,
+ "%s, failed to create ct nat table err: %d\n",
+ INIT_ERR_PREFIX, err);
goto err_ct_nat_tbl;
}
- ct_priv->post_ct = mlx5_esw_chains_create_global_table(esw);
+ ct_priv->post_ct = mlx5_chains_create_global_table(chains);
if (IS_ERR(ct_priv->post_ct)) {
err = PTR_ERR(ct_priv->post_ct);
- mlx5_tc_ct_init_err(rpriv, "failed to create post ct table",
- err);
+ mlx5_core_warn(dev,
+ "%s, failed to create post ct table err: %d\n",
+ INIT_ERR_PREFIX, err);
goto err_post_ct_tbl;
}
idr_init(&ct_priv->fte_ids);
mutex_init(&ct_priv->control_lock);
+ mutex_init(&ct_priv->shared_counter_lock);
rhashtable_init(&ct_priv->zone_ht, &zone_params);
rhashtable_init(&ct_priv->ct_tuples_ht, &tuples_ht_params);
rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params);
- /* Done, set ct_priv to know it initializted */
- uplink_priv->ct_priv = ct_priv;
-
- return 0;
+ return ct_priv;
err_post_ct_tbl:
- mlx5_esw_chains_destroy_global_table(esw, ct_priv->ct_nat);
+ mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat);
err_ct_nat_tbl:
- mlx5_esw_chains_destroy_global_table(esw, ct_priv->ct);
+ mlx5_chains_destroy_global_table(chains, ct_priv->ct);
err_ct_tbl:
mapping_destroy(ct_priv->labels_mapping);
err_mapping_labels:
@@ -1866,20 +1987,22 @@ err_mapping_zone:
err_alloc:
err_support:
- return 0;
+ return NULL;
}
void
-mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv)
+mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv)
{
- struct mlx5_tc_ct_priv *ct_priv = uplink_priv->ct_priv;
+ struct mlx5_fs_chains *chains;
if (!ct_priv)
return;
- mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->post_ct);
- mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct_nat);
- mlx5_esw_chains_destroy_global_table(ct_priv->esw, ct_priv->ct);
+ chains = ct_priv->chains;
+
+ mlx5_chains_destroy_global_table(chains, ct_priv->post_ct);
+ mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat);
+ mlx5_chains_destroy_global_table(chains, ct_priv->ct);
mapping_destroy(ct_priv->zone_mapping);
mapping_destroy(ct_priv->labels_mapping);
@@ -1887,17 +2010,15 @@ mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv)
rhashtable_destroy(&ct_priv->ct_tuples_nat_ht);
rhashtable_destroy(&ct_priv->zone_ht);
mutex_destroy(&ct_priv->control_lock);
+ mutex_destroy(&ct_priv->shared_counter_lock);
idr_destroy(&ct_priv->fte_ids);
kfree(ct_priv);
-
- uplink_priv->ct_priv = NULL;
}
bool
-mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv,
+mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
struct sk_buff *skb, u8 zone_restore_id)
{
- struct mlx5_tc_ct_priv *ct_priv = uplink_priv->ct_priv;
struct mlx5_ct_tuple tuple = {};
struct mlx5_ct_entry *entry;
u16 zone;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
index 708c216325d3..6503b614337c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
@@ -10,12 +10,14 @@
#include "en.h"
-struct mlx5_esw_flow_attr;
+struct mlx5_flow_attr;
struct mlx5e_tc_mod_hdr_acts;
struct mlx5_rep_uplink_priv;
struct mlx5e_tc_flow;
struct mlx5e_priv;
+struct mlx5_fs_chains;
+struct mlx5_tc_ct_priv;
struct mlx5_ct_flow;
struct nf_flowtable;
@@ -76,68 +78,78 @@ struct mlx5_ct_attr {
misc_parameters_2.metadata_reg_c_1) + 3,\
}
+#define nic_zone_restore_to_reg_ct {\
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B,\
+ .moffset = 2,\
+ .mlen = 1,\
+}
+
#define REG_MAPPING_MLEN(reg) (mlx5e_tc_attr_to_reg_mappings[reg].mlen)
+#define REG_MAPPING_MOFFSET(reg) (mlx5e_tc_attr_to_reg_mappings[reg].moffset)
+#define REG_MAPPING_SHIFT(reg) (REG_MAPPING_MOFFSET(reg) * 8)
#define ZONE_RESTORE_BITS (REG_MAPPING_MLEN(ZONE_RESTORE_TO_REG) * 8)
#define ZONE_RESTORE_MAX GENMASK(ZONE_RESTORE_BITS - 1, 0)
#if IS_ENABLED(CONFIG_MLX5_TC_CT)
-int
-mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv);
+struct mlx5_tc_ct_priv *
+mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
+ struct mod_hdr_tbl *mod_hdr,
+ enum mlx5_flow_namespace_type ns_type);
void
-mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv);
+mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv);
void
-mlx5_tc_ct_match_del(struct mlx5e_priv *priv, struct mlx5_ct_attr *ct_attr);
+mlx5_tc_ct_match_del(struct mlx5_tc_ct_priv *priv, struct mlx5_ct_attr *ct_attr);
int
-mlx5_tc_ct_match_add(struct mlx5e_priv *priv,
+mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
struct mlx5_flow_spec *spec,
struct flow_cls_offload *f,
struct mlx5_ct_attr *ct_attr,
struct netlink_ext_ack *extack);
+int mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec);
int
-mlx5_tc_ct_add_no_trk_match(struct mlx5e_priv *priv,
- struct mlx5_flow_spec *spec);
-int
-mlx5_tc_ct_parse_action(struct mlx5e_priv *priv,
- struct mlx5_esw_flow_attr *attr,
+mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
+ struct mlx5_flow_attr *attr,
const struct flow_action_entry *act,
struct netlink_ext_ack *extack);
struct mlx5_flow_handle *
-mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
+mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
- struct mlx5_esw_flow_attr *attr,
+ struct mlx5_flow_attr *attr,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts);
void
-mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv,
+mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *priv,
struct mlx5e_tc_flow *flow,
- struct mlx5_esw_flow_attr *attr);
+ struct mlx5_flow_attr *attr);
bool
-mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv,
+mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
struct sk_buff *skb, u8 zone_restore_id);
#else /* CONFIG_MLX5_TC_CT */
-static inline int
-mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv)
+static inline struct mlx5_tc_ct_priv *
+mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
+ struct mod_hdr_tbl *mod_hdr,
+ enum mlx5_flow_namespace_type ns_type)
{
- return 0;
+ return NULL;
}
static inline void
-mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv)
+mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv)
{
}
static inline void
-mlx5_tc_ct_match_del(struct mlx5e_priv *priv, struct mlx5_ct_attr *ct_attr) {}
+mlx5_tc_ct_match_del(struct mlx5_tc_ct_priv *priv, struct mlx5_ct_attr *ct_attr) {}
static inline int
-mlx5_tc_ct_match_add(struct mlx5e_priv *priv,
+mlx5_tc_ct_match_add(struct mlx5_tc_ct_priv *priv,
struct mlx5_flow_spec *spec,
struct flow_cls_offload *f,
struct mlx5_ct_attr *ct_attr,
@@ -149,47 +161,44 @@ mlx5_tc_ct_match_add(struct mlx5e_priv *priv,
return 0;
NL_SET_ERR_MSG_MOD(extack, "mlx5 tc ct offload isn't enabled.");
- netdev_warn(priv->netdev, "mlx5 tc ct offload isn't enabled.\n");
return -EOPNOTSUPP;
}
static inline int
-mlx5_tc_ct_add_no_trk_match(struct mlx5e_priv *priv,
- struct mlx5_flow_spec *spec)
+mlx5_tc_ct_add_no_trk_match(struct mlx5_flow_spec *spec)
{
return 0;
}
static inline int
-mlx5_tc_ct_parse_action(struct mlx5e_priv *priv,
- struct mlx5_esw_flow_attr *attr,
+mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
+ struct mlx5_flow_attr *attr,
const struct flow_action_entry *act,
struct netlink_ext_ack *extack)
{
NL_SET_ERR_MSG_MOD(extack, "mlx5 tc ct offload isn't enabled.");
- netdev_warn(priv->netdev, "mlx5 tc ct offload isn't enabled.\n");
return -EOPNOTSUPP;
}
static inline struct mlx5_flow_handle *
-mlx5_tc_ct_flow_offload(struct mlx5e_priv *priv,
+mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
- struct mlx5_esw_flow_attr *attr,
+ struct mlx5_flow_attr *attr,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline void
-mlx5_tc_ct_delete_flow(struct mlx5e_priv *priv,
+mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *priv,
struct mlx5e_tc_flow *flow,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5_flow_attr *attr)
{
}
static inline bool
-mlx5e_tc_ct_restore_flow(struct mlx5_rep_uplink_priv *uplink_priv,
+mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv,
struct sk_buff *skb, u8 zone_restore_id)
{
if (!zone_restore_id)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 24336c60123a..07ee1d236ab3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -7,6 +7,21 @@
#include "en.h"
#include <linux/indirect_call_wrapper.h>
+#define MLX5E_TX_WQE_EMPTY_DS_COUNT (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
+
+/* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS
+ * (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment.
+ * We use a bound lower that MLX5_SEND_WQE_MAX_WQEBBS to let a
+ * full-session WQE be cache-aligned.
+ */
+#if L1_CACHE_BYTES < 128
+#define MLX5E_TX_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 1)
+#else
+#define MLX5E_TX_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 2)
+#endif
+
+#define MLX5E_TX_MPW_MAX_NUM_DS (MLX5E_TX_MPW_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS)
+
#define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
enum mlx5e_icosq_wqe_type {
@@ -46,8 +61,6 @@ void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq);
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev);
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
-void mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
@@ -110,6 +123,7 @@ struct mlx5e_tx_wqe_info {
u32 num_bytes;
u8 num_wqebbs;
u8 num_dma;
+ u8 num_fifo_pkts;
#ifdef CONFIG_MLX5_EN_TLS
struct page *resync_dump_frag_page;
#endif
@@ -194,23 +208,6 @@ static inline u16 mlx5e_icosq_get_next_pi(struct mlx5e_icosq *sq, u16 size)
}
static inline void
-mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq, struct mlx5_wq_cyc *wq,
- u16 pi, u16 nnops)
-{
- struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi];
-
- edge_wi = wi + nnops;
-
- /* fill sq frag edge with nops to avoid wqe wrapping two pages */
- for (; wi < edge_wi; wi++) {
- memset(wi, 0, sizeof(*wi));
- wi->num_wqebbs = 1;
- mlx5e_post_nop(wq, sq->sqn, &sq->pc);
- }
- sq->stats->nop += nnops;
-}
-
-static inline void
mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
struct mlx5_wqe_ctrl_seg *ctrl)
{
@@ -228,29 +225,6 @@ mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map,
mlx5_write64((__be32 *)ctrl, uar_map);
}
-static inline bool mlx5e_transport_inline_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg)
-{
- return cseg && !!cseg->tis_tir_num;
-}
-
-static inline u8
-mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg,
- struct sk_buff *skb)
-{
- u8 mode;
-
- if (mlx5e_transport_inline_tx_wqe(cseg))
- return MLX5_INLINE_MODE_TCP_UDP;
-
- mode = sq->min_inline_mode;
-
- if (skb_vlan_tag_present(skb) &&
- test_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state))
- mode = max_t(u8, MLX5_INLINE_MODE_L2, mode);
-
- return mode;
-}
-
static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
{
struct mlx5_core_cq *mcq;
@@ -276,6 +250,23 @@ mlx5e_dma_push(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size,
dma->type = map_type;
}
+static inline struct sk_buff **mlx5e_skb_fifo_get(struct mlx5e_txqsq *sq, u16 i)
+{
+ return &sq->db.skb_fifo[i & sq->skb_fifo_mask];
+}
+
+static inline void mlx5e_skb_fifo_push(struct mlx5e_txqsq *sq, struct sk_buff *skb)
+{
+ struct sk_buff **skb_item = mlx5e_skb_fifo_get(sq, sq->skb_fifo_pc++);
+
+ *skb_item = skb;
+}
+
+static inline struct sk_buff *mlx5e_skb_fifo_pop(struct mlx5e_txqsq *sq)
+{
+ return *mlx5e_skb_fifo_get(sq, sq->skb_fifo_cc++);
+}
+
static inline void
mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
{
@@ -291,6 +282,14 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
}
}
+void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit_more);
+void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq);
+
+static inline bool mlx5e_tx_mpwqe_is_full(struct mlx5e_tx_mpwqe *session)
+{
+ return session->ds_count == MLX5E_TX_MPW_MAX_NUM_DS;
+}
+
static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
{
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index b28df21981a1..ae90d533a350 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -59,7 +59,7 @@ static inline bool
mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
struct mlx5e_dma_info *di, struct xdp_buff *xdp)
{
- struct mlx5e_xdp_xmit_data xdptxd;
+ struct mlx5e_xmit_data xdptxd;
struct mlx5e_xdp_info xdpi;
struct xdp_frame *xdpf;
dma_addr_t dma_addr;
@@ -194,18 +194,22 @@ static u16 mlx5e_xdpsq_get_next_pi(struct mlx5e_xdpsq *sq, u16 size)
static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
{
- struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
+ struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
struct mlx5e_xdpsq_stats *stats = sq->stats;
+ struct mlx5e_tx_wqe *wqe;
u16 pi;
- pi = mlx5e_xdpsq_get_next_pi(sq, MLX5_SEND_WQE_MAX_WQEBBS);
- session->wqe = MLX5E_TX_FETCH_WQE(sq, pi);
+ pi = mlx5e_xdpsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS);
+ wqe = MLX5E_TX_FETCH_WQE(sq, pi);
+ net_prefetchw(wqe->data);
- prefetchw(session->wqe->data);
- session->ds_count = MLX5E_XDP_TX_EMPTY_DS_COUNT;
- session->pkt_count = 0;
-
- mlx5e_xdp_update_inline_state(sq);
+ *session = (struct mlx5e_tx_mpwqe) {
+ .wqe = wqe,
+ .bytes_count = 0,
+ .ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT,
+ .pkt_count = 0,
+ .inline_on = mlx5e_xdp_get_inline_state(sq, session->inline_on),
+ };
stats->mpwqe++;
}
@@ -213,7 +217,7 @@ static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq)
void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq)
{
struct mlx5_wq_cyc *wq = &sq->wq;
- struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
+ struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
struct mlx5_wqe_ctrl_seg *cseg = &session->wqe->ctrl;
u16 ds_count = session->ds_count;
u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
@@ -258,10 +262,10 @@ INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq
}
INDIRECT_CALLABLE_SCOPE bool
-mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_xmit_data *xdptxd,
+mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
struct mlx5e_xdp_info *xdpi, int check_result)
{
- struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
+ struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
struct mlx5e_xdpsq_stats *stats = sq->stats;
if (unlikely(xdptxd->len > sq->hw_mtu)) {
@@ -284,8 +288,7 @@ mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_xmit_data *x
mlx5e_xdp_mpwqe_add_dseg(sq, xdptxd, stats);
- if (unlikely(mlx5e_xdp_no_room_for_inline_pkt(session) ||
- session->ds_count == MLX5E_XDP_MPW_MAX_NUM_DS))
+ if (unlikely(mlx5e_xdp_mpqwe_is_full(session)))
mlx5e_xdp_mpwqe_complete(sq);
mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
@@ -306,7 +309,7 @@ INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)
}
INDIRECT_CALLABLE_SCOPE bool
-mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_xmit_data *xdptxd,
+mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
struct mlx5e_xdp_info *xdpi, int check_result)
{
struct mlx5_wq_cyc *wq = &sq->wq;
@@ -322,7 +325,7 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_xmit_data *xdptxd,
struct mlx5e_xdpsq_stats *stats = sq->stats;
- prefetchw(wqe);
+ net_prefetchw(wqe);
if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || sq->hw_mtu < dma_len)) {
stats->err++;
@@ -445,7 +448,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq)
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
if (xsk_frames)
- xsk_umem_complete_tx(sq->umem, xsk_frames);
+ xsk_tx_completed(sq->xsk_pool, xsk_frames);
sq->stats->cqes += i;
@@ -475,7 +478,7 @@ void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq)
}
if (xsk_frames)
- xsk_umem_complete_tx(sq->umem, xsk_frames);
+ xsk_tx_completed(sq->xsk_pool, xsk_frames);
}
int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
@@ -503,7 +506,7 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
- struct mlx5e_xdp_xmit_data xdptxd;
+ struct mlx5e_xmit_data xdptxd;
struct mlx5e_xdp_info xdpi;
bool ret;
@@ -563,4 +566,3 @@ void mlx5e_set_xmit_fp(struct mlx5e_xdpsq *sq, bool is_mpw)
sq->xmit_xdp_frame = is_mpw ?
mlx5e_xmit_xdp_frame_mpwqe : mlx5e_xmit_xdp_frame;
}
-
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index e806c13d491f..d487e5e37162 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -38,27 +38,12 @@
#include "en/txrx.h"
#define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
-#define MLX5E_XDP_TX_EMPTY_DS_COUNT \
- (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
-#define MLX5E_XDP_TX_DS_COUNT (MLX5E_XDP_TX_EMPTY_DS_COUNT + 1 /* SG DS */)
-
-#define MLX5E_XDP_INLINE_WQE_SZ_THRSD (256 - sizeof(struct mlx5_wqe_inline_seg))
-#define MLX5E_XDP_INLINE_WQE_MAX_DS_CNT \
- DIV_ROUND_UP(MLX5E_XDP_INLINE_WQE_SZ_THRSD, MLX5_SEND_WQE_DS)
-
-/* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS
- * (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment.
- * We use a bound lower that MLX5_SEND_WQE_MAX_WQEBBS to let a
- * full-session WQE be cache-aligned.
- */
-#if L1_CACHE_BYTES < 128
-#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 1)
-#else
-#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 2)
-#endif
+#define MLX5E_XDP_TX_DS_COUNT (MLX5E_TX_WQE_EMPTY_DS_COUNT + 1 /* SG DS */)
-#define MLX5E_XDP_MPW_MAX_NUM_DS \
- (MLX5E_XDP_MPW_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS)
+#define MLX5E_XDP_INLINE_WQE_MAX_DS_CNT 16
+#define MLX5E_XDP_INLINE_WQE_SZ_THRSD \
+ (MLX5E_XDP_INLINE_WQE_MAX_DS_CNT * MLX5_SEND_WQE_DS - \
+ sizeof(struct mlx5_wqe_inline_seg))
struct mlx5e_xsk_param;
int mlx5e_xdp_max_mtu(struct mlx5e_params *params, struct mlx5e_xsk_param *xsk);
@@ -73,11 +58,11 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
- struct mlx5e_xdp_xmit_data *xdptxd,
+ struct mlx5e_xmit_data *xdptxd,
struct mlx5e_xdp_info *xdpi,
int check_result));
INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq,
- struct mlx5e_xdp_xmit_data *xdptxd,
+ struct mlx5e_xmit_data *xdptxd,
struct mlx5e_xdp_info *xdpi,
int check_result));
INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq));
@@ -122,30 +107,28 @@ static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
/* Enable inline WQEs to shift some load from a congested HCA (HW) to
* a less congested cpu (SW).
*/
-static inline void mlx5e_xdp_update_inline_state(struct mlx5e_xdpsq *sq)
+static inline bool mlx5e_xdp_get_inline_state(struct mlx5e_xdpsq *sq, bool cur)
{
u16 outstanding = sq->xdpi_fifo_pc - sq->xdpi_fifo_cc;
- struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
#define MLX5E_XDP_INLINE_WATERMARK_LOW 10
#define MLX5E_XDP_INLINE_WATERMARK_HIGH 128
- if (session->inline_on) {
- if (outstanding <= MLX5E_XDP_INLINE_WATERMARK_LOW)
- session->inline_on = 0;
- return;
- }
+ if (cur && outstanding <= MLX5E_XDP_INLINE_WATERMARK_LOW)
+ return false;
+
+ if (!cur && outstanding >= MLX5E_XDP_INLINE_WATERMARK_HIGH)
+ return true;
- /* inline is false */
- if (outstanding >= MLX5E_XDP_INLINE_WATERMARK_HIGH)
- session->inline_on = 1;
+ return cur;
}
-static inline bool
-mlx5e_xdp_no_room_for_inline_pkt(struct mlx5e_xdp_mpwqe *session)
+static inline bool mlx5e_xdp_mpqwe_is_full(struct mlx5e_tx_mpwqe *session)
{
- return session->inline_on &&
- session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT > MLX5E_XDP_MPW_MAX_NUM_DS;
+ if (session->inline_on)
+ return session->ds_count + MLX5E_XDP_INLINE_WQE_MAX_DS_CNT >
+ MLX5E_TX_MPW_MAX_NUM_DS;
+ return mlx5e_tx_mpwqe_is_full(session);
}
struct mlx5e_xdp_wqe_info {
@@ -155,15 +138,16 @@ struct mlx5e_xdp_wqe_info {
static inline void
mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq,
- struct mlx5e_xdp_xmit_data *xdptxd,
+ struct mlx5e_xmit_data *xdptxd,
struct mlx5e_xdpsq_stats *stats)
{
- struct mlx5e_xdp_mpwqe *session = &sq->mpwqe;
+ struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
struct mlx5_wqe_data_seg *dseg =
(struct mlx5_wqe_data_seg *)session->wqe + session->ds_count;
u32 dma_len = xdptxd->len;
session->pkt_count++;
+ session->bytes_count += dma_len;
if (session->inline_on && dma_len <= MLX5E_XDP_INLINE_WQE_SZ_THRSD) {
struct mlx5_wqe_inline_seg *inline_dseg =
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
index 331ca2b0f8a4..71e8d66fa150 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c
@@ -1,31 +1,31 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2019 Mellanox Technologies. */
+/* Copyright (c) 2019-2020, Mellanox Technologies inc. All rights reserved. */
#include <net/xdp_sock_drv.h>
-#include "umem.h"
+#include "pool.h"
#include "setup.h"
#include "en/params.h"
-static int mlx5e_xsk_map_umem(struct mlx5e_priv *priv,
- struct xdp_umem *umem)
+static int mlx5e_xsk_map_pool(struct mlx5e_priv *priv,
+ struct xsk_buff_pool *pool)
{
- struct device *dev = priv->mdev->device;
+ struct device *dev = mlx5_core_dma_dev(priv->mdev);
- return xsk_buff_dma_map(umem, dev, 0);
+ return xsk_pool_dma_map(pool, dev, 0);
}
-static void mlx5e_xsk_unmap_umem(struct mlx5e_priv *priv,
- struct xdp_umem *umem)
+static void mlx5e_xsk_unmap_pool(struct mlx5e_priv *priv,
+ struct xsk_buff_pool *pool)
{
- return xsk_buff_dma_unmap(umem, 0);
+ return xsk_pool_dma_unmap(pool, 0);
}
-static int mlx5e_xsk_get_umems(struct mlx5e_xsk *xsk)
+static int mlx5e_xsk_get_pools(struct mlx5e_xsk *xsk)
{
- if (!xsk->umems) {
- xsk->umems = kcalloc(MLX5E_MAX_NUM_CHANNELS,
- sizeof(*xsk->umems), GFP_KERNEL);
- if (unlikely(!xsk->umems))
+ if (!xsk->pools) {
+ xsk->pools = kcalloc(MLX5E_MAX_NUM_CHANNELS,
+ sizeof(*xsk->pools), GFP_KERNEL);
+ if (unlikely(!xsk->pools))
return -ENOMEM;
}
@@ -35,68 +35,68 @@ static int mlx5e_xsk_get_umems(struct mlx5e_xsk *xsk)
return 0;
}
-static void mlx5e_xsk_put_umems(struct mlx5e_xsk *xsk)
+static void mlx5e_xsk_put_pools(struct mlx5e_xsk *xsk)
{
if (!--xsk->refcnt) {
- kfree(xsk->umems);
- xsk->umems = NULL;
+ kfree(xsk->pools);
+ xsk->pools = NULL;
}
}
-static int mlx5e_xsk_add_umem(struct mlx5e_xsk *xsk, struct xdp_umem *umem, u16 ix)
+static int mlx5e_xsk_add_pool(struct mlx5e_xsk *xsk, struct xsk_buff_pool *pool, u16 ix)
{
int err;
- err = mlx5e_xsk_get_umems(xsk);
+ err = mlx5e_xsk_get_pools(xsk);
if (unlikely(err))
return err;
- xsk->umems[ix] = umem;
+ xsk->pools[ix] = pool;
return 0;
}
-static void mlx5e_xsk_remove_umem(struct mlx5e_xsk *xsk, u16 ix)
+static void mlx5e_xsk_remove_pool(struct mlx5e_xsk *xsk, u16 ix)
{
- xsk->umems[ix] = NULL;
+ xsk->pools[ix] = NULL;
- mlx5e_xsk_put_umems(xsk);
+ mlx5e_xsk_put_pools(xsk);
}
-static bool mlx5e_xsk_is_umem_sane(struct xdp_umem *umem)
+static bool mlx5e_xsk_is_pool_sane(struct xsk_buff_pool *pool)
{
- return xsk_umem_get_headroom(umem) <= 0xffff &&
- xsk_umem_get_chunk_size(umem) <= 0xffff;
+ return xsk_pool_get_headroom(pool) <= 0xffff &&
+ xsk_pool_get_chunk_size(pool) <= 0xffff;
}
-void mlx5e_build_xsk_param(struct xdp_umem *umem, struct mlx5e_xsk_param *xsk)
+void mlx5e_build_xsk_param(struct xsk_buff_pool *pool, struct mlx5e_xsk_param *xsk)
{
- xsk->headroom = xsk_umem_get_headroom(umem);
- xsk->chunk_size = xsk_umem_get_chunk_size(umem);
+ xsk->headroom = xsk_pool_get_headroom(pool);
+ xsk->chunk_size = xsk_pool_get_chunk_size(pool);
}
static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
- struct xdp_umem *umem, u16 ix)
+ struct xsk_buff_pool *pool, u16 ix)
{
struct mlx5e_params *params = &priv->channels.params;
struct mlx5e_xsk_param xsk;
struct mlx5e_channel *c;
int err;
- if (unlikely(mlx5e_xsk_get_umem(&priv->channels.params, &priv->xsk, ix)))
+ if (unlikely(mlx5e_xsk_get_pool(&priv->channels.params, &priv->xsk, ix)))
return -EBUSY;
- if (unlikely(!mlx5e_xsk_is_umem_sane(umem)))
+ if (unlikely(!mlx5e_xsk_is_pool_sane(pool)))
return -EINVAL;
- err = mlx5e_xsk_map_umem(priv, umem);
+ err = mlx5e_xsk_map_pool(priv, pool);
if (unlikely(err))
return err;
- err = mlx5e_xsk_add_umem(&priv->xsk, umem, ix);
+ err = mlx5e_xsk_add_pool(&priv->xsk, pool, ix);
if (unlikely(err))
- goto err_unmap_umem;
+ goto err_unmap_pool;
- mlx5e_build_xsk_param(umem, &xsk);
+ mlx5e_build_xsk_param(pool, &xsk);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
/* XSK objects will be created on open. */
@@ -112,9 +112,9 @@ static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv,
c = priv->channels.c[ix];
- err = mlx5e_open_xsk(priv, params, &xsk, umem, c);
+ err = mlx5e_open_xsk(priv, params, &xsk, pool, c);
if (unlikely(err))
- goto err_remove_umem;
+ goto err_remove_pool;
mlx5e_activate_xsk(c);
@@ -132,11 +132,11 @@ err_deactivate:
mlx5e_deactivate_xsk(c);
mlx5e_close_xsk(c);
-err_remove_umem:
- mlx5e_xsk_remove_umem(&priv->xsk, ix);
+err_remove_pool:
+ mlx5e_xsk_remove_pool(&priv->xsk, ix);
-err_unmap_umem:
- mlx5e_xsk_unmap_umem(priv, umem);
+err_unmap_pool:
+ mlx5e_xsk_unmap_pool(priv, pool);
return err;
@@ -146,7 +146,7 @@ validate_closed:
*/
if (!mlx5e_validate_xsk_param(params, &xsk, priv->mdev)) {
err = -EINVAL;
- goto err_remove_umem;
+ goto err_remove_pool;
}
return 0;
@@ -154,45 +154,45 @@ validate_closed:
static int mlx5e_xsk_disable_locked(struct mlx5e_priv *priv, u16 ix)
{
- struct xdp_umem *umem = mlx5e_xsk_get_umem(&priv->channels.params,
+ struct xsk_buff_pool *pool = mlx5e_xsk_get_pool(&priv->channels.params,
&priv->xsk, ix);
struct mlx5e_channel *c;
- if (unlikely(!umem))
+ if (unlikely(!pool))
return -EINVAL;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
- goto remove_umem;
+ goto remove_pool;
/* XSK RQ and SQ are only created if XDP program is set. */
if (!priv->channels.params.xdp_prog)
- goto remove_umem;
+ goto remove_pool;
c = priv->channels.c[ix];
mlx5e_xsk_redirect_rqt_to_drop(priv, ix);
mlx5e_deactivate_xsk(c);
mlx5e_close_xsk(c);
-remove_umem:
- mlx5e_xsk_remove_umem(&priv->xsk, ix);
- mlx5e_xsk_unmap_umem(priv, umem);
+remove_pool:
+ mlx5e_xsk_remove_pool(&priv->xsk, ix);
+ mlx5e_xsk_unmap_pool(priv, pool);
return 0;
}
-static int mlx5e_xsk_enable_umem(struct mlx5e_priv *priv, struct xdp_umem *umem,
+static int mlx5e_xsk_enable_pool(struct mlx5e_priv *priv, struct xsk_buff_pool *pool,
u16 ix)
{
int err;
mutex_lock(&priv->state_lock);
- err = mlx5e_xsk_enable_locked(priv, umem, ix);
+ err = mlx5e_xsk_enable_locked(priv, pool, ix);
mutex_unlock(&priv->state_lock);
return err;
}
-static int mlx5e_xsk_disable_umem(struct mlx5e_priv *priv, u16 ix)
+static int mlx5e_xsk_disable_pool(struct mlx5e_priv *priv, u16 ix)
{
int err;
@@ -203,7 +203,7 @@ static int mlx5e_xsk_disable_umem(struct mlx5e_priv *priv, u16 ix)
return err;
}
-int mlx5e_xsk_setup_umem(struct net_device *dev, struct xdp_umem *umem, u16 qid)
+int mlx5e_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_params *params = &priv->channels.params;
@@ -212,6 +212,6 @@ int mlx5e_xsk_setup_umem(struct net_device *dev, struct xdp_umem *umem, u16 qid)
if (unlikely(!mlx5e_qid_get_ch_if_in_group(params, qid, MLX5E_RQ_GROUP_XSK, &ix)))
return -EINVAL;
- return umem ? mlx5e_xsk_enable_umem(priv, umem, ix) :
- mlx5e_xsk_disable_umem(priv, ix);
+ return pool ? mlx5e_xsk_enable_pool(priv, pool, ix) :
+ mlx5e_xsk_disable_pool(priv, ix);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.h
new file mode 100644
index 000000000000..dca0010a0866
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019-2020, Mellanox Technologies inc. All rights reserved. */
+
+#ifndef __MLX5_EN_XSK_POOL_H__
+#define __MLX5_EN_XSK_POOL_H__
+
+#include "en.h"
+
+static inline struct xsk_buff_pool *mlx5e_xsk_get_pool(struct mlx5e_params *params,
+ struct mlx5e_xsk *xsk, u16 ix)
+{
+ if (!xsk || !xsk->pools)
+ return NULL;
+
+ if (unlikely(ix >= params->num_channels))
+ return NULL;
+
+ return xsk->pools[ix];
+}
+
+struct mlx5e_xsk_param;
+void mlx5e_build_xsk_param(struct xsk_buff_pool *pool, struct mlx5e_xsk_param *xsk);
+
+/* .ndo_bpf callback. */
+int mlx5e_xsk_setup_pool(struct net_device *dev, struct xsk_buff_pool *pool, u16 qid);
+
+#endif /* __MLX5_EN_XSK_POOL_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
index 40db27bf790b..8e7b877d8a12 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -47,8 +47,8 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
xdp->data_end = xdp->data + cqe_bcnt32;
xdp_set_data_meta_invalid(xdp);
- xsk_buff_dma_sync_for_cpu(xdp);
- prefetch(xdp->data);
+ xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
+ net_prefetch(xdp->data);
/* Possible flows:
* - XDP_REDIRECT to XSKMAP:
@@ -93,8 +93,8 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
xdp->data_end = xdp->data + cqe_bcnt;
xdp_set_data_meta_invalid(xdp);
- xsk_buff_dma_sync_for_cpu(xdp);
- prefetch(xdp->data);
+ xsk_buff_dma_sync_for_cpu(xdp, rq->xsk_pool);
+ net_prefetch(xdp->data);
if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {
rq->stats->wqe_err++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
index d147b2f13b54..7f88ccf67fdd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
@@ -19,10 +19,10 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *wi,
u32 cqe_bcnt);
-static inline int mlx5e_xsk_page_alloc_umem(struct mlx5e_rq *rq,
+static inline int mlx5e_xsk_page_alloc_pool(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info)
{
- dma_info->xsk = xsk_buff_alloc(rq->umem);
+ dma_info->xsk = xsk_buff_alloc(rq->xsk_pool);
if (!dma_info->xsk)
return -ENOMEM;
@@ -38,13 +38,13 @@ static inline int mlx5e_xsk_page_alloc_umem(struct mlx5e_rq *rq,
static inline bool mlx5e_xsk_update_rx_wakeup(struct mlx5e_rq *rq, bool alloc_err)
{
- if (!xsk_umem_uses_need_wakeup(rq->umem))
+ if (!xsk_uses_need_wakeup(rq->xsk_pool))
return alloc_err;
if (unlikely(alloc_err))
- xsk_set_rx_need_wakeup(rq->umem);
+ xsk_set_rx_need_wakeup(rq->xsk_pool);
else
- xsk_clear_rx_need_wakeup(rq->umem);
+ xsk_clear_rx_need_wakeup(rq->xsk_pool);
return false;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 55e65a438de7..4e574ac73019 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -45,7 +45,7 @@ static void mlx5e_build_xsk_cparam(struct mlx5e_priv *priv,
}
int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk, struct xdp_umem *umem,
+ struct mlx5e_xsk_param *xsk, struct xsk_buff_pool *pool,
struct mlx5e_channel *c)
{
struct mlx5e_channel_param *cparam;
@@ -64,7 +64,7 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
if (unlikely(err))
goto err_free_cparam;
- err = mlx5e_open_rq(c, params, &cparam->rq, xsk, umem, &c->xskrq);
+ err = mlx5e_open_rq(c, params, &cparam->rq, xsk, pool, &c->xskrq);
if (unlikely(err))
goto err_close_rx_cq;
@@ -72,13 +72,13 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
if (unlikely(err))
goto err_close_rq;
- /* Create a separate SQ, so that when the UMEM is disabled, we could
+ /* Create a separate SQ, so that when the buff pool is disabled, we could
* close this SQ safely and stop receiving CQEs. In other case, e.g., if
- * the XDPSQ was used instead, we might run into trouble when the UMEM
+ * the XDPSQ was used instead, we might run into trouble when the buff pool
* is disabled and then reenabled, but the SQ continues receiving CQEs
- * from the old UMEM.
+ * from the old buff pool.
*/
- err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, umem, &c->xsksq, true);
+ err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, pool, &c->xsksq, true);
if (unlikely(err))
goto err_close_tx_cq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h
index 0dd11b81c046..ca20f1ff5e39 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h
@@ -12,7 +12,7 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
struct mlx5_core_dev *mdev);
int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params,
- struct mlx5e_xsk_param *xsk, struct xdp_umem *umem,
+ struct mlx5e_xsk_param *xsk, struct xsk_buff_pool *pool,
struct mlx5e_channel *c);
void mlx5e_close_xsk(struct mlx5e_channel *c);
void mlx5e_activate_xsk(struct mlx5e_channel *c);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
index 4d892f6cecb3..fb671a457129 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
@@ -2,7 +2,7 @@
/* Copyright (c) 2019 Mellanox Technologies. */
#include "tx.h"
-#include "umem.h"
+#include "pool.h"
#include "en/xdp.h"
#include "en/params.h"
#include <net/xdp_sock_drv.h>
@@ -66,9 +66,9 @@ static void mlx5e_xsk_tx_post_err(struct mlx5e_xdpsq *sq,
bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
{
- struct xdp_umem *umem = sq->umem;
+ struct xsk_buff_pool *pool = sq->xsk_pool;
+ struct mlx5e_xmit_data xdptxd;
struct mlx5e_xdp_info xdpi;
- struct mlx5e_xdp_xmit_data xdptxd;
bool work_done = true;
bool flush = false;
@@ -87,7 +87,7 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
break;
}
- if (!xsk_umem_consume_tx(umem, &desc)) {
+ if (!xsk_tx_peek_desc(pool, &desc)) {
/* TX will get stuck until something wakes it up by
* triggering NAPI. Currently it's expected that the
* application calls sendto() if there are consumed, but
@@ -96,11 +96,11 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
break;
}
- xdptxd.dma_addr = xsk_buff_raw_get_dma(umem, desc.addr);
- xdptxd.data = xsk_buff_raw_get_data(umem, desc.addr);
+ xdptxd.dma_addr = xsk_buff_raw_get_dma(pool, desc.addr);
+ xdptxd.data = xsk_buff_raw_get_data(pool, desc.addr);
xdptxd.len = desc.len;
- xsk_buff_raw_dma_sync_for_device(umem, xdptxd.dma_addr, xdptxd.len);
+ xsk_buff_raw_dma_sync_for_device(pool, xdptxd.dma_addr, xdptxd.len);
ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, check_result);
@@ -119,7 +119,7 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
mlx5e_xdp_mpwqe_complete(sq);
mlx5e_xmit_xdp_doorbell(sq);
- xsk_umem_consume_tx_done(umem);
+ xsk_tx_release(pool);
}
return !(budget && work_done);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h
index 39fa0a705856..a05085035f23 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h
@@ -15,13 +15,13 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget);
static inline void mlx5e_xsk_update_tx_wakeup(struct mlx5e_xdpsq *sq)
{
- if (!xsk_umem_uses_need_wakeup(sq->umem))
+ if (!xsk_uses_need_wakeup(sq->xsk_pool))
return;
if (sq->pc != sq->cc)
- xsk_clear_tx_need_wakeup(sq->umem);
+ xsk_clear_tx_need_wakeup(sq->xsk_pool);
else
- xsk_set_tx_need_wakeup(sq->umem);
+ xsk_set_tx_need_wakeup(sq->xsk_pool);
}
#endif /* __MLX5_EN_XSK_TX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.h
deleted file mode 100644
index bada94973586..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/umem.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2019 Mellanox Technologies. */
-
-#ifndef __MLX5_EN_XSK_UMEM_H__
-#define __MLX5_EN_XSK_UMEM_H__
-
-#include "en.h"
-
-static inline struct xdp_umem *mlx5e_xsk_get_umem(struct mlx5e_params *params,
- struct mlx5e_xsk *xsk, u16 ix)
-{
- if (!xsk || !xsk->umems)
- return NULL;
-
- if (unlikely(ix >= params->num_channels))
- return NULL;
-
- return xsk->umems[ix];
-}
-
-struct mlx5e_xsk_param;
-void mlx5e_build_xsk_param(struct xdp_umem *umem, struct mlx5e_xsk_param *xsk);
-
-/* .ndo_bpf callback. */
-int mlx5e_xsk_setup_umem(struct net_device *dev, struct xdp_umem *umem, u16 qid);
-
-int mlx5e_xsk_resize_reuseq(struct xdp_umem *umem, u32 nentries);
-
-#endif /* __MLX5_EN_XSK_UMEM_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 110476bdeffb..899b98aca0d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -107,6 +107,9 @@ struct mlx5e_accel_tx_state {
#ifdef CONFIG_MLX5_EN_TLS
struct mlx5e_accel_tx_tls_state tls;
#endif
+#ifdef CONFIG_MLX5_EN_IPSEC
+ struct mlx5e_accel_tx_ipsec_state ipsec;
+#endif
};
static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
@@ -125,27 +128,70 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
}
#endif
+#ifdef CONFIG_MLX5_EN_IPSEC
+ if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) && xfrm_offload(skb)) {
+ if (unlikely(!mlx5e_ipsec_handle_tx_skb(dev, skb, &state->ipsec)))
+ return false;
+ }
+#endif
+
return true;
}
-static inline bool mlx5e_accel_tx_finish(struct mlx5e_priv *priv,
- struct mlx5e_txqsq *sq,
- struct sk_buff *skb,
+static inline bool mlx5e_accel_tx_is_ipsec_flow(struct mlx5e_accel_tx_state *state)
+{
+#ifdef CONFIG_MLX5_EN_IPSEC
+ return mlx5e_ipsec_is_tx_flow(&state->ipsec);
+#endif
+
+ return false;
+}
+
+static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
+ struct mlx5e_accel_tx_state *state)
+{
+#ifdef CONFIG_MLX5_EN_IPSEC
+ if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state))
+ return mlx5e_ipsec_tx_ids_len(&state->ipsec);
+#endif
+
+ return 0;
+}
+
+/* Part of the eseg touched by TX offloads */
+#define MLX5E_ACCEL_ESEG_LEN offsetof(struct mlx5_wqe_eth_seg, mss)
+
+static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
+ struct sk_buff *skb,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+#ifdef CONFIG_MLX5_EN_IPSEC
+ if (xfrm_offload(skb))
+ mlx5e_ipsec_tx_build_eseg(priv, skb, eseg);
+#endif
+
+#if IS_ENABLED(CONFIG_GENEVE)
+ if (skb->encapsulation)
+ mlx5e_tx_tunnel_accel(skb, eseg);
+#endif
+
+ return true;
+}
+
+static inline void mlx5e_accel_tx_finish(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe *wqe,
- struct mlx5e_accel_tx_state *state)
+ struct mlx5e_accel_tx_state *state,
+ struct mlx5_wqe_inline_seg *inlseg)
{
#ifdef CONFIG_MLX5_EN_TLS
mlx5e_tls_handle_tx_wqe(sq, &wqe->ctrl, &state->tls);
#endif
#ifdef CONFIG_MLX5_EN_IPSEC
- if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state)) {
- if (unlikely(!mlx5e_ipsec_handle_tx_skb(priv, &wqe->eth, skb)))
- return false;
- }
+ if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state) &&
+ state->ipsec.xo && state->ipsec.tailen)
+ mlx5e_ipsec_handle_tx_wqe(wqe, &state->ipsec, inlseg);
#endif
-
- return true;
}
static inline int mlx5e_accel_init_rx(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
index 4cdd9eac647d..97f1594cee11 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
@@ -191,7 +191,7 @@ static int accel_fs_tcp_create_groups(struct mlx5e_flow_table *ft,
ft->g = kcalloc(MLX5E_ACCEL_FS_TCP_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL);
in = kvzalloc(inlen, GFP_KERNEL);
if (!in || !ft->g) {
- kvfree(ft->g);
+ kfree(ft->g);
kvfree(in);
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index d39989cddd90..3d45341e2216 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -560,6 +560,9 @@ void mlx5e_ipsec_build_netdev(struct mlx5e_priv *priv)
return;
}
+ if (mlx5_is_ipsec_device(mdev))
+ netdev->gso_partial_features |= NETIF_F_GSO_ESP;
+
mlx5_core_dbg(mdev, "mlx5e: ESP GSO capability turned on\n");
netdev->features |= NETIF_F_GSO_ESP;
netdev->hw_features |= NETIF_F_GSO_ESP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
index 0fc8b4d4f4a3..6164c7f59efb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
@@ -76,6 +76,7 @@ struct mlx5e_ipsec_stats {
};
struct mlx5e_accel_fs_esp;
+struct mlx5e_ipsec_tx;
struct mlx5e_ipsec {
struct mlx5e_priv *en_priv;
@@ -87,6 +88,7 @@ struct mlx5e_ipsec {
struct mlx5e_ipsec_stats stats;
struct workqueue_struct *wq;
struct mlx5e_accel_fs_esp *rx_fs;
+ struct mlx5e_ipsec_tx *tx_fs;
};
struct mlx5e_ipsec_esn_state {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 429428bbc903..0e45590662a8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -34,6 +34,12 @@ struct mlx5e_accel_fs_esp {
struct mlx5e_accel_fs_esp_prot fs_prot[ACCEL_FS_ESP_NUM_TYPES];
};
+struct mlx5e_ipsec_tx {
+ struct mlx5_flow_table *ft;
+ struct mutex mutex; /* Protect IPsec TX steering */
+ u32 refcnt;
+};
+
/* IPsec RX flow steering */
static enum mlx5e_traffic_types fs_esp2tt(enum accel_fs_esp_type i)
{
@@ -228,8 +234,8 @@ static int rx_fs_create(struct mlx5e_priv *priv,
fs_prot->miss_rule = miss_rule;
out:
- kfree(flow_group_in);
- kfree(spec);
+ kvfree(flow_group_in);
+ kvfree(spec);
return err;
}
@@ -323,6 +329,77 @@ out:
mutex_unlock(&fs_prot->prot_mutex);
}
+/* IPsec TX flow steering */
+static int tx_create(struct mlx5e_priv *priv)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5e_ipsec *ipsec = priv->ipsec;
+ struct mlx5_flow_table *ft;
+ int err;
+
+ priv->fs.egress_ns =
+ mlx5_get_flow_namespace(priv->mdev,
+ MLX5_FLOW_NAMESPACE_EGRESS_KERNEL);
+ if (!priv->fs.egress_ns)
+ return -EOPNOTSUPP;
+
+ ft_attr.max_fte = NUM_IPSEC_FTE;
+ ft_attr.autogroup.max_num_groups = 1;
+ ft = mlx5_create_auto_grouped_flow_table(priv->fs.egress_ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ netdev_err(priv->netdev, "fail to create ipsec tx ft err=%d\n", err);
+ return err;
+ }
+ ipsec->tx_fs->ft = ft;
+ return 0;
+}
+
+static void tx_destroy(struct mlx5e_priv *priv)
+{
+ struct mlx5e_ipsec *ipsec = priv->ipsec;
+
+ if (IS_ERR_OR_NULL(ipsec->tx_fs->ft))
+ return;
+
+ mlx5_destroy_flow_table(ipsec->tx_fs->ft);
+ ipsec->tx_fs->ft = NULL;
+}
+
+static int tx_ft_get(struct mlx5e_priv *priv)
+{
+ struct mlx5e_ipsec_tx *tx_fs = priv->ipsec->tx_fs;
+ int err = 0;
+
+ mutex_lock(&tx_fs->mutex);
+ if (tx_fs->refcnt++)
+ goto out;
+
+ err = tx_create(priv);
+ if (err) {
+ tx_fs->refcnt--;
+ goto out;
+ }
+
+out:
+ mutex_unlock(&tx_fs->mutex);
+ return err;
+}
+
+static void tx_ft_put(struct mlx5e_priv *priv)
+{
+ struct mlx5e_ipsec_tx *tx_fs = priv->ipsec->tx_fs;
+
+ mutex_lock(&tx_fs->mutex);
+ if (--tx_fs->refcnt)
+ goto out;
+
+ tx_destroy(priv);
+
+out:
+ mutex_unlock(&tx_fs->mutex);
+}
+
static void setup_fte_common(struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 ipsec_obj_id,
struct mlx5_flow_spec *spec,
@@ -457,6 +534,54 @@ out:
return err;
}
+static int tx_add_rule(struct mlx5e_priv *priv,
+ struct mlx5_accel_esp_xfrm_attrs *attrs,
+ u32 ipsec_obj_id,
+ struct mlx5e_ipsec_rule *ipsec_rule)
+{
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ err = tx_ft_get(priv);
+ if (err)
+ return err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ setup_fte_common(attrs, ipsec_obj_id, spec, &flow_act);
+
+ /* Add IPsec indicator in metadata_reg_a */
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+ MLX5_SET(fte_match_param, spec->match_criteria, misc_parameters_2.metadata_reg_a,
+ MLX5_ETH_WQE_FT_META_IPSEC);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_a,
+ MLX5_ETH_WQE_FT_META_IPSEC);
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW |
+ MLX5_FLOW_CONTEXT_ACTION_IPSEC_ENCRYPT;
+ rule = mlx5_add_flow_rules(priv->ipsec->tx_fs->ft, spec, &flow_act, NULL, 0);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(priv->netdev, "fail to add ipsec rule attrs->action=0x%x, err=%d\n",
+ attrs->action, err);
+ goto out;
+ }
+
+ ipsec_rule->rule = rule;
+
+out:
+ kvfree(spec);
+ if (err)
+ tx_ft_put(priv);
+ return err;
+}
+
static void rx_del_rule(struct mlx5e_priv *priv,
struct mlx5_accel_esp_xfrm_attrs *attrs,
struct mlx5e_ipsec_rule *ipsec_rule)
@@ -470,15 +595,27 @@ static void rx_del_rule(struct mlx5e_priv *priv,
rx_ft_put(priv, attrs->is_ipv6 ? ACCEL_FS_ESP6 : ACCEL_FS_ESP4);
}
+static void tx_del_rule(struct mlx5e_priv *priv,
+ struct mlx5e_ipsec_rule *ipsec_rule)
+{
+ mlx5_del_flow_rules(ipsec_rule->rule);
+ ipsec_rule->rule = NULL;
+
+ tx_ft_put(priv);
+}
+
int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_priv *priv,
struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 ipsec_obj_id,
struct mlx5e_ipsec_rule *ipsec_rule)
{
- if (!priv->ipsec->rx_fs || attrs->action != MLX5_ACCEL_ESP_ACTION_DECRYPT)
+ if (!priv->ipsec->rx_fs)
return -EOPNOTSUPP;
- return rx_add_rule(priv, attrs, ipsec_obj_id, ipsec_rule);
+ if (attrs->action == MLX5_ACCEL_ESP_ACTION_DECRYPT)
+ return rx_add_rule(priv, attrs, ipsec_obj_id, ipsec_rule);
+ else
+ return tx_add_rule(priv, attrs, ipsec_obj_id, ipsec_rule);
}
void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
@@ -488,7 +625,18 @@ void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_priv *priv,
if (!priv->ipsec->rx_fs)
return;
- rx_del_rule(priv, attrs, ipsec_rule);
+ if (attrs->action == MLX5_ACCEL_ESP_ACTION_DECRYPT)
+ rx_del_rule(priv, attrs, ipsec_rule);
+ else
+ tx_del_rule(priv, ipsec_rule);
+}
+
+static void fs_cleanup_tx(struct mlx5e_priv *priv)
+{
+ mutex_destroy(&priv->ipsec->tx_fs->mutex);
+ WARN_ON(priv->ipsec->tx_fs->refcnt);
+ kfree(priv->ipsec->tx_fs);
+ priv->ipsec->tx_fs = NULL;
}
static void fs_cleanup_rx(struct mlx5e_priv *priv)
@@ -507,6 +655,17 @@ static void fs_cleanup_rx(struct mlx5e_priv *priv)
priv->ipsec->rx_fs = NULL;
}
+static int fs_init_tx(struct mlx5e_priv *priv)
+{
+ priv->ipsec->tx_fs =
+ kzalloc(sizeof(struct mlx5e_ipsec_tx), GFP_KERNEL);
+ if (!priv->ipsec->tx_fs)
+ return -ENOMEM;
+
+ mutex_init(&priv->ipsec->tx_fs->mutex);
+ return 0;
+}
+
static int fs_init_rx(struct mlx5e_priv *priv)
{
struct mlx5e_accel_fs_esp_prot *fs_prot;
@@ -532,13 +691,24 @@ void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_priv *priv)
if (!priv->ipsec->rx_fs)
return;
+ fs_cleanup_tx(priv);
fs_cleanup_rx(priv);
}
int mlx5e_accel_ipsec_fs_init(struct mlx5e_priv *priv)
{
+ int err;
+
if (!mlx5_is_ipsec_device(priv->mdev) || !priv->ipsec)
return -EOPNOTSUPP;
- return fs_init_rx(priv);
+ err = fs_init_tx(priv);
+ if (err)
+ return err;
+
+ err = fs_init_rx(priv);
+ if (err)
+ fs_cleanup_tx(priv);
+
+ return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 93a8d68815ad..11e31a3db2be 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -34,7 +34,7 @@
#include <crypto/aead.h>
#include <net/xfrm.h>
#include <net/esp.h>
-
+#include "accel/ipsec_offload.h"
#include "en_accel/ipsec_rxtx.h"
#include "en_accel/ipsec.h"
#include "accel/accel.h"
@@ -233,18 +233,94 @@ static void mlx5e_ipsec_set_metadata(struct sk_buff *skb,
ntohs(mdata->content.tx.seq));
}
-bool mlx5e_ipsec_handle_tx_skb(struct mlx5e_priv *priv,
- struct mlx5_wqe_eth_seg *eseg,
- struct sk_buff *skb)
+void mlx5e_ipsec_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
+ struct mlx5e_accel_tx_ipsec_state *ipsec_st,
+ struct mlx5_wqe_inline_seg *inlseg)
+{
+ inlseg->byte_count = cpu_to_be32(ipsec_st->tailen | MLX5_INLINE_SEG);
+ esp_output_fill_trailer((u8 *)inlseg->data, 0, ipsec_st->plen, ipsec_st->xo->proto);
+}
+
+static int mlx5e_ipsec_set_state(struct mlx5e_priv *priv,
+ struct sk_buff *skb,
+ struct xfrm_state *x,
+ struct xfrm_offload *xo,
+ struct mlx5e_accel_tx_ipsec_state *ipsec_st)
+{
+ unsigned int blksize, clen, alen, plen;
+ struct crypto_aead *aead;
+ unsigned int tailen;
+
+ ipsec_st->x = x;
+ ipsec_st->xo = xo;
+ if (mlx5_is_ipsec_device(priv->mdev)) {
+ aead = x->data;
+ alen = crypto_aead_authsize(aead);
+ blksize = ALIGN(crypto_aead_blocksize(aead), 4);
+ clen = ALIGN(skb->len + 2, blksize);
+ plen = max_t(u32, clen - skb->len, 4);
+ tailen = plen + alen;
+ ipsec_st->plen = plen;
+ ipsec_st->tailen = tailen;
+ }
+
+ return 0;
+}
+
+void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
+ struct mlx5_wqe_eth_seg *eseg)
{
struct xfrm_offload *xo = xfrm_offload(skb);
- struct mlx5e_ipsec_metadata *mdata;
- struct mlx5e_ipsec_sa_entry *sa_entry;
+ struct xfrm_encap_tmpl *encap;
struct xfrm_state *x;
struct sec_path *sp;
+ u8 l3_proto;
+
+ sp = skb_sec_path(skb);
+ if (unlikely(sp->len != 1))
+ return;
+
+ x = xfrm_input_state(skb);
+ if (unlikely(!x))
+ return;
+
+ if (unlikely(!x->xso.offload_handle ||
+ (skb->protocol != htons(ETH_P_IP) &&
+ skb->protocol != htons(ETH_P_IPV6))))
+ return;
+
+ mlx5e_ipsec_set_swp(skb, eseg, x->props.mode, xo);
- if (!xo)
- return true;
+ l3_proto = (x->props.family == AF_INET) ?
+ ((struct iphdr *)skb_network_header(skb))->protocol :
+ ((struct ipv6hdr *)skb_network_header(skb))->nexthdr;
+
+ if (mlx5_is_ipsec_device(priv->mdev)) {
+ eseg->flow_table_metadata |= cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC);
+ eseg->trailer |= cpu_to_be32(MLX5_ETH_WQE_INSERT_TRAILER);
+ encap = x->encap;
+ if (!encap) {
+ eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
+ cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_IP_ASSOC) :
+ cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_OUTER_L4_ASSOC);
+ } else if (encap->encap_type == UDP_ENCAP_ESPINUDP) {
+ eseg->trailer |= (l3_proto == IPPROTO_ESP) ?
+ cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_IP_ASSOC) :
+ cpu_to_be32(MLX5_ETH_WQE_TRAILER_HDR_INNER_L4_ASSOC);
+ }
+ }
+}
+
+bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct mlx5e_accel_tx_ipsec_state *ipsec_st)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct xfrm_offload *xo = xfrm_offload(skb);
+ struct mlx5e_ipsec_sa_entry *sa_entry;
+ struct mlx5e_ipsec_metadata *mdata;
+ struct xfrm_state *x;
+ struct sec_path *sp;
sp = skb_sec_path(skb);
if (unlikely(sp->len != 1)) {
@@ -270,15 +346,21 @@ bool mlx5e_ipsec_handle_tx_skb(struct mlx5e_priv *priv,
atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_trailer);
goto drop;
}
- mdata = mlx5e_ipsec_add_metadata(skb);
- if (IS_ERR(mdata)) {
- atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata);
- goto drop;
+
+ if (MLX5_CAP_GEN(priv->mdev, fpga)) {
+ mdata = mlx5e_ipsec_add_metadata(skb);
+ if (IS_ERR(mdata)) {
+ atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata);
+ goto drop;
+ }
}
- mlx5e_ipsec_set_swp(skb, eseg, x->props.mode, xo);
+
sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
sa_entry->set_iv_op(skb, x, xo);
- mlx5e_ipsec_set_metadata(skb, mdata, xo);
+ if (MLX5_CAP_GEN(priv->mdev, fpga))
+ mlx5e_ipsec_set_metadata(skb, mdata, xo);
+
+ mlx5e_ipsec_set_state(priv, skb, x, xo, ipsec_st);
return true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index f96e786db158..056dacb612b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -43,6 +43,13 @@
#define MLX5_IPSEC_METADATA_SYNDROM_MASK (0x7F)
#define MLX5_IPSEC_METADATA_HANDLE(metadata) (((metadata) >> 8) & 0xFF)
+struct mlx5e_accel_tx_ipsec_state {
+ struct xfrm_offload *xo;
+ struct xfrm_state *x;
+ u32 tailen;
+ u32 plen;
+};
+
#ifdef CONFIG_MLX5_EN_IPSEC
struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev,
@@ -55,16 +62,32 @@ void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo);
void mlx5e_ipsec_set_iv(struct sk_buff *skb, struct xfrm_state *x,
struct xfrm_offload *xo);
-bool mlx5e_ipsec_handle_tx_skb(struct mlx5e_priv *priv,
- struct mlx5_wqe_eth_seg *eseg,
- struct sk_buff *skb);
+bool mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct mlx5e_accel_tx_ipsec_state *ipsec_st);
+void mlx5e_ipsec_handle_tx_wqe(struct mlx5e_tx_wqe *wqe,
+ struct mlx5e_accel_tx_ipsec_state *ipsec_st,
+ struct mlx5_wqe_inline_seg *inlseg);
void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
struct sk_buff *skb,
struct mlx5_cqe64 *cqe);
+static inline unsigned int mlx5e_ipsec_tx_ids_len(struct mlx5e_accel_tx_ipsec_state *ipsec_st)
+{
+ return ipsec_st->tailen;
+}
+
static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe)
{
return !!(MLX5_IPSEC_METADATA_MARKER_MASK & be32_to_cpu(cqe->ft_metadata));
}
+
+static inline bool mlx5e_ipsec_is_tx_flow(struct mlx5e_accel_tx_ipsec_state *ipsec_st)
+{
+ return ipsec_st->x;
+}
+
+void mlx5e_ipsec_tx_build_eseg(struct mlx5e_priv *priv, struct sk_buff *skb,
+ struct mlx5_wqe_eth_seg *eseg);
#else
static inline
void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
index 6bbfcf18107d..ccaccb9fc2f7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c
@@ -253,7 +253,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
goto err_out;
}
- pdev = sq->channel->priv->mdev->device;
+ pdev = mlx5_core_dma_dev(sq->channel->priv->mdev);
buf->dma_addr = dma_map_single(pdev, &buf->progress,
PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(pdev, buf->dma_addr))) {
@@ -390,7 +390,7 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
priv_rx = buf->priv_rx;
resync = &priv_rx->resync;
- dev = resync->priv->mdev->device;
+ dev = mlx5_core_dma_dev(resync->priv->mdev);
if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags)))
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index f4861545b236..b140e13fdcc8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -345,9 +345,6 @@ void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
struct mlx5e_sq_stats *stats;
struct mlx5e_sq_dma *dma;
- if (!wi->resync_dump_frag_page)
- return;
-
dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
stats = sq->stats;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
index ff4c740af10b..7521c9be735b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
@@ -29,12 +29,24 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
struct mlx5e_tx_wqe_info *wi,
u32 *dma_fifo_cc);
+static inline bool
+mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
+ struct mlx5e_tx_wqe_info *wi,
+ u32 *dma_fifo_cc)
+{
+ if (unlikely(wi->resync_dump_frag_page)) {
+ mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, dma_fifo_cc);
+ return true;
+ }
+ return false;
+}
#else
-static inline void
-mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
- struct mlx5e_tx_wqe_info *wi,
- u32 *dma_fifo_cc)
+static inline bool
+mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
+ struct mlx5e_tx_wqe_info *wi,
+ u32 *dma_fifo_cc)
{
+ return false;
}
#endif /* CONFIG_MLX5_EN_TLS */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
index b0c31d49ff8d..6982b193ee8a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c
@@ -189,12 +189,10 @@ static bool mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
struct mlx5e_tls *tls)
{
u32 tcp_seq = ntohl(tcp_hdr(skb)->seq);
- struct mlx5e_tx_wqe *wqe;
struct sync_info info;
struct sk_buff *nskb;
int linear_len = 0;
int headln;
- u16 pi;
int i;
sq->stats->tls_ooo++;
@@ -246,9 +244,7 @@ static bool mlx5e_tls_handle_ooo(struct mlx5e_tls_offload_context_tx *context,
sq->stats->tls_resync_bytes += nskb->len;
mlx5e_tls_complete_sync_skb(skb, nskb, tcp_seq, headln,
cpu_to_be64(info.rcd_sn));
- pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
- wqe = MLX5E_TX_FETCH_WQE(sq, pi);
- mlx5e_sq_xmit(sq, nskb, wqe, pi, true);
+ mlx5e_sq_xmit_simple(sq, nskb, true);
return true;
@@ -274,6 +270,8 @@ bool mlx5e_tls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
if (!datalen)
return true;
+ mlx5e_tx_mpwqe_ensure_complete(sq);
+
tls_ctx = tls_get_ctx(skb->sk);
if (WARN_ON_ONCE(tls_ctx->netdev != netdev))
goto err_out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 08270987c506..d25a56ec6876 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -32,7 +32,7 @@
#include "en.h"
#include "en/port.h"
-#include "en/xsk/umem.h"
+#include "en/xsk/pool.h"
#include "lib/clock.h"
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
@@ -243,7 +243,6 @@ int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset)
return MLX5E_NUM_PFLAGS;
case ETH_SS_TEST:
return mlx5e_self_test_num(priv);
- fallthrough;
default:
return -EOPNOTSUPP;
}
@@ -1341,6 +1340,14 @@ static int mlx5e_set_tunable(struct net_device *dev,
return err;
}
+static void mlx5e_get_pause_stats(struct net_device *netdev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ mlx5e_stats_pause_get(priv, pause_stats);
+}
+
void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv,
struct ethtool_pauseparam *pauseparam)
{
@@ -1901,7 +1908,7 @@ static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable)
return 0;
}
-static int set_pflag_xdp_tx_mpwqe(struct net_device *netdev, bool enable)
+static int set_pflag_tx_mpwqe_common(struct net_device *netdev, u32 flag, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
@@ -1913,7 +1920,7 @@ static int set_pflag_xdp_tx_mpwqe(struct net_device *netdev, bool enable)
new_channels.params = priv->channels.params;
- MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_XDP_TX_MPWQE, enable);
+ MLX5E_SET_PFLAG(&new_channels.params, flag, enable);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
@@ -1924,6 +1931,16 @@ static int set_pflag_xdp_tx_mpwqe(struct net_device *netdev, bool enable)
return err;
}
+static int set_pflag_xdp_tx_mpwqe(struct net_device *netdev, bool enable)
+{
+ return set_pflag_tx_mpwqe_common(netdev, MLX5E_PFLAG_XDP_TX_MPWQE, enable);
+}
+
+static int set_pflag_skb_tx_mpwqe(struct net_device *netdev, bool enable)
+{
+ return set_pflag_tx_mpwqe_common(netdev, MLX5E_PFLAG_SKB_TX_MPWQE, enable);
+}
+
static const struct pflag_desc mlx5e_priv_flags[MLX5E_NUM_PFLAGS] = {
{ "rx_cqe_moder", set_pflag_rx_cqe_based_moder },
{ "tx_cqe_moder", set_pflag_tx_cqe_based_moder },
@@ -1931,6 +1948,7 @@ static const struct pflag_desc mlx5e_priv_flags[MLX5E_NUM_PFLAGS] = {
{ "rx_striding_rq", set_pflag_rx_striding_rq },
{ "rx_no_csum_complete", set_pflag_rx_no_csum_complete },
{ "xdp_tx_mpwqe", set_pflag_xdp_tx_mpwqe },
+ { "skb_tx_mpwqe", set_pflag_skb_tx_mpwqe },
};
static int mlx5e_handle_pflag(struct net_device *netdev,
@@ -2033,6 +2051,7 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.set_rxnfc = mlx5e_set_rxnfc,
.get_tunable = mlx5e_get_tunable,
.set_tunable = mlx5e_set_tunable,
+ .get_pause_stats = mlx5e_get_pause_stats,
.get_pauseparam = mlx5e_get_pauseparam,
.set_pauseparam = mlx5e_set_pauseparam,
.get_ts_info = mlx5e_get_ts_info,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 83c9b2bbc4af..b416a8ee2eed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -33,7 +33,7 @@
#include <linux/mlx5/fs.h>
#include "en.h"
#include "en/params.h"
-#include "en/xsk/umem.h"
+#include "en/xsk/pool.h"
struct mlx5e_ethtool_rule {
struct list_head list;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 42ec28e29834..b3f02aac7f26 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -57,7 +57,7 @@
#include "en/monitor_stats.h"
#include "en/health.h"
#include "en/params.h"
-#include "en/xsk/umem.h"
+#include "en/xsk/pool.h"
#include "en/xsk/setup.h"
#include "en/xsk/rx.h"
#include "en/xsk/tx.h"
@@ -393,7 +393,7 @@ static void mlx5e_free_mpwqe_rq_drop_page(struct mlx5e_rq *rq)
static int mlx5e_alloc_rq(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
- struct xdp_umem *umem,
+ struct xsk_buff_pool *xsk_pool,
struct mlx5e_rq_param *rqp,
struct mlx5e_rq *rq)
{
@@ -419,9 +419,9 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->mdev = mdev;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
rq->xdpsq = &c->rq_xdpsq;
- rq->umem = umem;
+ rq->xsk_pool = xsk_pool;
- if (rq->umem)
+ if (rq->xsk_pool)
rq->stats = &c->priv->channel_stats[c->ix].xskrq;
else
rq->stats = &c->priv->channel_stats[c->ix].rq;
@@ -511,7 +511,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
if (xsk) {
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
MEM_TYPE_XSK_BUFF_POOL, NULL);
- xsk_buff_set_rxq_info(rq->umem, &rq->xdp_rxq);
+ xsk_pool_set_rxq_info(rq->xsk_pool, &rq->xdp_rxq);
} else {
/* Create a page_pool and register it with rxq */
pp_params.order = 0;
@@ -861,11 +861,11 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
struct mlx5e_rq_param *param, struct mlx5e_xsk_param *xsk,
- struct xdp_umem *umem, struct mlx5e_rq *rq)
+ struct xsk_buff_pool *xsk_pool, struct mlx5e_rq *rq)
{
int err;
- err = mlx5e_alloc_rq(c, params, xsk, umem, param, rq);
+ err = mlx5e_alloc_rq(c, params, xsk, xsk_pool, param, rq);
if (err)
return err;
@@ -893,6 +893,13 @@ int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp)
__set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
+ /* For CQE compression on striding RQ, use stride index provided by
+ * HW if capability is supported.
+ */
+ if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ) &&
+ MLX5_CAP_GEN(c->mdev, mini_cqe_resp_stride_index))
+ __set_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &c->rq.state);
+
return 0;
err_destroy_rq:
@@ -970,7 +977,7 @@ static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
struct mlx5e_params *params,
- struct xdp_umem *umem,
+ struct xsk_buff_pool *xsk_pool,
struct mlx5e_sq_param *param,
struct mlx5e_xdpsq *sq,
bool is_redirect)
@@ -986,9 +993,9 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
sq->uar_map = mdev->mlx5e_res.bfreg.map;
sq->min_inline_mode = params->tx_min_inline_mode;
sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
- sq->umem = umem;
+ sq->xsk_pool = xsk_pool;
- sq->stats = sq->umem ?
+ sq->stats = sq->xsk_pool ?
&c->priv->channel_stats[c->ix].xsksq :
is_redirect ?
&c->priv->channel_stats[c->ix].xdpsq :
@@ -1085,6 +1092,7 @@ static void mlx5e_free_icosq(struct mlx5e_icosq *sq)
static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
{
kvfree(sq->db.wqe_info);
+ kvfree(sq->db.skb_fifo);
kvfree(sq->db.dma_fifo);
}
@@ -1096,15 +1104,19 @@ static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
sq->db.dma_fifo = kvzalloc_node(array_size(df_sz,
sizeof(*sq->db.dma_fifo)),
GFP_KERNEL, numa);
+ sq->db.skb_fifo = kvzalloc_node(array_size(df_sz,
+ sizeof(*sq->db.skb_fifo)),
+ GFP_KERNEL, numa);
sq->db.wqe_info = kvzalloc_node(array_size(wq_sz,
sizeof(*sq->db.wqe_info)),
GFP_KERNEL, numa);
- if (!sq->db.dma_fifo || !sq->db.wqe_info) {
+ if (!sq->db.dma_fifo || !sq->db.skb_fifo || !sq->db.wqe_info) {
mlx5e_free_txqsq_db(sq);
return -ENOMEM;
}
sq->dma_fifo_mask = df_sz - 1;
+ sq->skb_fifo_mask = df_sz - 1;
return 0;
}
@@ -1115,6 +1127,12 @@ static int mlx5e_calc_sq_stop_room(struct mlx5e_txqsq *sq, u8 log_sq_size)
sq->stop_room = mlx5e_tls_get_stop_room(sq);
sq->stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
+ if (test_bit(MLX5E_SQ_STATE_MPWQE, &sq->state))
+ /* A MPWQE can take up to the maximum-sized WQE + all the normal
+ * stop room can be taken if a new packet breaks the active
+ * MPWQE session and allocates its WQEs right away.
+ */
+ sq->stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
if (WARN_ON(sq->stop_room >= sq_size)) {
netdev_err(sq->channel->netdev, "Stop room %hu is bigger than the SQ size %d\n",
@@ -1156,6 +1174,8 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
if (mlx5_accel_is_tls_device(c->priv->mdev))
set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
+ if (param->is_mpw)
+ set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state);
err = mlx5e_calc_sq_stop_room(sq, params->log_sq_size);
if (err)
return err;
@@ -1449,13 +1469,13 @@ void mlx5e_close_icosq(struct mlx5e_icosq *sq)
}
int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
- struct mlx5e_sq_param *param, struct xdp_umem *umem,
+ struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
struct mlx5e_xdpsq *sq, bool is_redirect)
{
struct mlx5e_create_sq_param csp = {};
int err;
- err = mlx5e_alloc_xdpsq(c, params, umem, param, sq, is_redirect);
+ err = mlx5e_alloc_xdpsq(c, params, xsk_pool, param, sq, is_redirect);
if (err)
return err;
@@ -1948,7 +1968,7 @@ static u8 mlx5e_enumerate_lag_port(struct mlx5_core_dev *mdev, int ix)
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam,
- struct xdp_umem *umem,
+ struct xsk_buff_pool *xsk_pool,
struct mlx5e_channel **cp)
{
int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, ix));
@@ -1972,7 +1992,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->tstamp = &priv->tstamp;
c->ix = ix;
c->cpu = cpu;
- c->pdev = priv->mdev->device;
+ c->pdev = mlx5_core_dma_dev(priv->mdev);
c->netdev = priv->netdev;
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
c->num_tc = params->num_tc;
@@ -1987,9 +2007,9 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
if (unlikely(err))
goto err_napi_del;
- if (umem) {
- mlx5e_build_xsk_param(umem, &xsk);
- err = mlx5e_open_xsk(priv, params, &xsk, umem, c);
+ if (xsk_pool) {
+ mlx5e_build_xsk_param(xsk_pool, &xsk);
+ err = mlx5e_open_xsk(priv, params, &xsk, xsk_pool, c);
if (unlikely(err))
goto err_close_queues;
}
@@ -2160,7 +2180,7 @@ void mlx5e_build_rq_param(struct mlx5e_priv *priv,
MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
- param->wq.buf_numa_node = dev_to_node(mdev->device);
+ param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
mlx5e_build_rx_cq_param(priv, params, xsk, &param->cqp);
}
@@ -2176,7 +2196,7 @@ static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv,
mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1));
MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter);
- param->wq.buf_numa_node = dev_to_node(mdev->device);
+ param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
}
void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
@@ -2188,7 +2208,7 @@ void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
- param->wq.buf_numa_node = dev_to_node(priv->mdev->device);
+ param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(priv->mdev));
}
static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
@@ -2204,6 +2224,7 @@ static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
mlx5e_build_sq_param_common(priv, param);
MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
MLX5_SET(sqc, sqc, allow_swp, allow_swp);
+ param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
mlx5e_build_tx_cq_param(priv, params, &param->cqp);
}
@@ -2223,6 +2244,7 @@ void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
struct mlx5e_cq_param *param)
{
struct mlx5_core_dev *mdev = priv->mdev;
+ bool hw_stridx = false;
void *cqc = param->cqc;
u8 log_cq_size;
@@ -2230,6 +2252,7 @@ void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
log_cq_size = mlx5e_mpwqe_get_log_rq_size(params, xsk) +
mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
+ hw_stridx = MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
log_cq_size = params->log_rq_mtu_frames;
@@ -2237,7 +2260,8 @@ void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
- MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
+ MLX5_SET(cqc, cqc, mini_cqe_res_format, hw_stridx ?
+ MLX5_CQE_FORMAT_CSUM_STRIDX : MLX5_CQE_FORMAT_CSUM);
MLX5_SET(cqc, cqc, cqe_comp_en, 1);
}
@@ -2350,12 +2374,12 @@ int mlx5e_open_channels(struct mlx5e_priv *priv,
mlx5e_build_channel_param(priv, &chs->params, cparam);
for (i = 0; i < chs->num; i++) {
- struct xdp_umem *umem = NULL;
+ struct xsk_buff_pool *xsk_pool = NULL;
if (chs->params.xdp_prog)
- umem = mlx5e_xsk_get_umem(&chs->params, chs->params.xsk, i);
+ xsk_pool = mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, i);
- err = mlx5e_open_channel(priv, i, &chs->params, cparam, umem, &chs->c[i]);
+ err = mlx5e_open_channel(priv, i, &chs->params, cparam, xsk_pool, &chs->c[i]);
if (err)
goto err_close_channels;
}
@@ -3222,8 +3246,8 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
struct mlx5e_cq *cq,
struct mlx5e_cq_param *param)
{
- param->wq.buf_numa_node = dev_to_node(mdev->device);
- param->wq.db_numa_node = dev_to_node(mdev->device);
+ param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
+ param->wq.db_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
return mlx5e_alloc_cq_common(mdev, param, cq);
}
@@ -3927,13 +3951,14 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev,
u16 ix;
for (ix = 0; ix < chs->params.num_channels; ix++) {
- struct xdp_umem *umem = mlx5e_xsk_get_umem(&chs->params, chs->params.xsk, ix);
+ struct xsk_buff_pool *xsk_pool =
+ mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, ix);
struct mlx5e_xsk_param xsk;
- if (!umem)
+ if (!xsk_pool)
continue;
- mlx5e_build_xsk_param(umem, &xsk);
+ mlx5e_build_xsk_param(xsk_pool, &xsk);
if (!mlx5e_validate_xsk_param(new_params, &xsk, mdev)) {
u32 hr = mlx5e_get_linear_rq_headroom(new_params, &xsk);
@@ -4466,8 +4491,8 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
return mlx5e_xdp_set(dev, xdp->prog);
- case XDP_SETUP_XSK_UMEM:
- return mlx5e_xsk_setup_umem(dev, xdp->xsk.umem,
+ case XDP_SETUP_XSK_POOL:
+ return mlx5e_xsk_setup_pool(dev, xdp->xsk.pool,
xdp->xsk.queue_id);
default:
return -EINVAL;
@@ -4758,6 +4783,8 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv,
params->log_sq_size = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
+ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE,
+ MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe));
/* XDP SQ */
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index e979bff64c49..67247c33b9fd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -39,7 +39,6 @@
#include <net/ipv6_stubs.h>
#include "eswitch.h"
-#include "esw/chains.h"
#include "en.h"
#include "en_rep.h"
#include "en/txrx.h"
@@ -288,6 +287,14 @@ static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev)
return mlx5e_ethtool_get_rxfh_indir_size(priv);
}
+static void mlx5e_uplink_rep_get_pause_stats(struct net_device *netdev,
+ struct ethtool_pause_stats *stats)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ mlx5e_stats_pause_get(priv, stats);
+}
+
static void mlx5e_uplink_rep_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pauseparam)
{
@@ -362,23 +369,11 @@ static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = {
.set_rxfh = mlx5e_set_rxfh,
.get_rxnfc = mlx5e_get_rxnfc,
.set_rxnfc = mlx5e_set_rxnfc,
+ .get_pause_stats = mlx5e_uplink_rep_get_pause_stats,
.get_pauseparam = mlx5e_uplink_rep_get_pauseparam,
.set_pauseparam = mlx5e_uplink_rep_set_pauseparam,
};
-static void mlx5e_rep_get_port_parent_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid)
-{
- struct mlx5e_priv *priv;
- u64 parent_id;
-
- priv = netdev_priv(dev);
-
- parent_id = mlx5_query_nic_system_image_guid(priv->mdev);
- ppid->id_len = sizeof(parent_id);
- memcpy(ppid->id, &parent_id, sizeof(parent_id));
-}
-
static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep)
{
@@ -603,12 +598,13 @@ static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan
return 0;
}
-static struct devlink_port *mlx5e_rep_get_devlink_port(struct net_device *dev)
+static struct devlink_port *mlx5e_rep_get_devlink_port(struct net_device *netdev)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_core_dev *dev = priv->mdev;
- return &rpriv->dl_port;
+ return mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
}
static int mlx5e_rep_change_carrier(struct net_device *dev, bool new_carrier)
@@ -1198,63 +1194,13 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
.stats_grps_num = mlx5e_ul_rep_stats_grps_num,
};
-static bool
-is_devlink_port_supported(const struct mlx5_core_dev *dev,
- const struct mlx5e_rep_priv *rpriv)
-{
- return rpriv->rep->vport == MLX5_VPORT_UPLINK ||
- rpriv->rep->vport == MLX5_VPORT_PF ||
- mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport);
-}
-
-static int register_devlink_port(struct mlx5_core_dev *dev,
- struct mlx5e_rep_priv *rpriv)
-{
- struct devlink *devlink = priv_to_devlink(dev);
- struct mlx5_eswitch_rep *rep = rpriv->rep;
- struct devlink_port_attrs attrs = {};
- struct netdev_phys_item_id ppid = {};
- unsigned int dl_port_index = 0;
- u16 pfnum;
-
- if (!is_devlink_port_supported(dev, rpriv))
- return 0;
-
- mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid);
- dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, rep->vport);
- pfnum = PCI_FUNC(dev->pdev->devfn);
- if (rep->vport == MLX5_VPORT_UPLINK) {
- attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
- attrs.phys.port_number = pfnum;
- memcpy(attrs.switch_id.id, &ppid.id[0], ppid.id_len);
- attrs.switch_id.id_len = ppid.id_len;
- devlink_port_attrs_set(&rpriv->dl_port, &attrs);
- } else if (rep->vport == MLX5_VPORT_PF) {
- memcpy(rpriv->dl_port.attrs.switch_id.id, &ppid.id[0], ppid.id_len);
- rpriv->dl_port.attrs.switch_id.id_len = ppid.id_len;
- devlink_port_attrs_pci_pf_set(&rpriv->dl_port, pfnum);
- } else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport)) {
- memcpy(rpriv->dl_port.attrs.switch_id.id, &ppid.id[0], ppid.id_len);
- rpriv->dl_port.attrs.switch_id.id_len = ppid.id_len;
- devlink_port_attrs_pci_vf_set(&rpriv->dl_port,
- pfnum, rep->vport - 1);
- }
- return devlink_port_register(devlink, &rpriv->dl_port, dl_port_index);
-}
-
-static void unregister_devlink_port(struct mlx5_core_dev *dev,
- struct mlx5e_rep_priv *rpriv)
-{
- if (is_devlink_port_supported(dev, rpriv))
- devlink_port_unregister(&rpriv->dl_port);
-}
-
/* e-Switch vport representors */
static int
mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{
const struct mlx5e_profile *profile;
struct mlx5e_rep_priv *rpriv;
+ struct devlink_port *dl_port;
struct net_device *netdev;
int nch, err;
@@ -1304,28 +1250,19 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
goto err_detach_netdev;
}
- err = register_devlink_port(dev, rpriv);
- if (err) {
- netdev_warn(netdev, "Failed to register devlink port %d\n",
- rep->vport);
- goto err_neigh_cleanup;
- }
-
err = register_netdev(netdev);
if (err) {
netdev_warn(netdev,
"Failed to register representor netdev for vport %d\n",
rep->vport);
- goto err_devlink_cleanup;
+ goto err_neigh_cleanup;
}
- if (is_devlink_port_supported(dev, rpriv))
- devlink_port_type_eth_set(&rpriv->dl_port, netdev);
+ dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
+ if (dl_port)
+ devlink_port_type_eth_set(dl_port, netdev);
return 0;
-err_devlink_cleanup:
- unregister_devlink_port(dev, rpriv);
-
err_neigh_cleanup:
mlx5e_rep_neigh_cleanup(rpriv);
@@ -1349,12 +1286,13 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
struct net_device *netdev = rpriv->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *dev = priv->mdev;
+ struct devlink_port *dl_port;
void *ppriv = priv->ppriv;
- if (is_devlink_port_supported(dev, rpriv))
- devlink_port_type_clear(&rpriv->dl_port);
+ dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
+ if (dl_port)
+ devlink_port_type_clear(dl_port);
unregister_netdev(netdev);
- unregister_devlink_port(dev, rpriv);
mlx5e_rep_neigh_cleanup(rpriv);
mlx5e_detach_netdev(priv);
if (rep->vport == MLX5_VPORT_UPLINK)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index 0d1562e20118..9020d1419bcf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -101,7 +101,6 @@ struct mlx5e_rep_priv {
struct list_head vport_sqs_list;
struct mlx5_rep_uplink_priv uplink_priv; /* valid for uplink rep */
struct rtnl_link_stats64 prev_vf_vport_stats;
- struct devlink_port dl_port;
};
static inline
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 64c8ac5eabf6..599f5b5ebc97 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <linux/prefetch.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
@@ -139,8 +138,17 @@ static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
title->check_sum = mini_cqe->checksum;
title->op_own &= 0xf0;
title->op_own |= 0x01 & (cqcc >> wq->fbc.log_sz);
- title->wqe_counter = cpu_to_be16(cqd->wqe_counter);
+ /* state bit set implies linked-list striding RQ wq type and
+ * HW stride index capability supported
+ */
+ if (test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)) {
+ title->wqe_counter = mini_cqe->stridx;
+ return;
+ }
+
+ /* HW stride index capability not supported */
+ title->wqe_counter = cpu_to_be16(cqd->wqe_counter);
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
cqd->wqe_counter += mpwrq_get_cqe_consumed_strides(title);
else
@@ -282,8 +290,8 @@ static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq,
static inline int mlx5e_page_alloc(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info)
{
- if (rq->umem)
- return mlx5e_xsk_page_alloc_umem(rq, dma_info);
+ if (rq->xsk_pool)
+ return mlx5e_xsk_page_alloc_pool(rq, dma_info);
else
return mlx5e_page_alloc_pool(rq, dma_info);
}
@@ -314,7 +322,7 @@ static inline void mlx5e_page_release(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info,
bool recycle)
{
- if (rq->umem)
+ if (rq->xsk_pool)
/* The `recycle` parameter is ignored, and the page is always
* put into the Reuse Ring, because there is no way to return
* the page to the userspace when the interface goes down.
@@ -401,14 +409,14 @@ static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk)
int err;
int i;
- if (rq->umem) {
+ if (rq->xsk_pool) {
int pages_desired = wqe_bulk << rq->wqe.info.log_num_frags;
/* Check in advance that we have enough frames, instead of
* allocating one-by-one, failing and moving frames to the
* Reuse Ring.
*/
- if (unlikely(!xsk_buff_can_alloc(rq->umem, pages_desired)))
+ if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, pages_desired)))
return -ENOMEM;
}
@@ -506,8 +514,8 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
/* Check in advance that we have enough frames, instead of allocating
* one-by-one, failing and moving frames to the Reuse Ring.
*/
- if (rq->umem &&
- unlikely(!xsk_buff_can_alloc(rq->umem, MLX5_MPWRQ_PAGES_PER_WQE))) {
+ if (rq->xsk_pool &&
+ unlikely(!xsk_buff_can_alloc(rq->xsk_pool, MLX5_MPWRQ_PAGES_PER_WQE))) {
err = -ENOMEM;
goto err;
}
@@ -755,7 +763,7 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
* the driver when it refills the Fill Ring.
* 2. Otherwise, busy poll by rescheduling the NAPI poll.
*/
- if (unlikely(alloc_err == -ENOMEM && rq->umem))
+ if (unlikely(alloc_err == -ENOMEM && rq->xsk_pool))
return true;
return false;
@@ -1144,8 +1152,8 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
dma_sync_single_range_for_cpu(rq->pdev, di->addr, wi->offset,
frag_size, DMA_FROM_DEVICE);
- prefetchw(va); /* xdp_frame data area */
- prefetch(data);
+ net_prefetchw(va); /* xdp_frame data area */
+ net_prefetch(data);
mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp);
if (mlx5e_xdp_handle(rq, di, &cqe_bcnt, &xdp))
@@ -1184,7 +1192,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
return NULL;
}
- prefetchw(skb->data);
+ net_prefetchw(skb->data);
while (byte_cnt) {
u16 frag_consumed_bytes =
@@ -1252,6 +1260,11 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
}
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+
+ if (mlx5e_cqe_regb_chain(cqe))
+ if (!mlx5e_tc_update_skb(cqe, skb))
+ goto free_wqe;
+
napi_gro_receive(rq->cq.napi, skb);
free_wqe:
@@ -1399,7 +1412,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
return NULL;
}
- prefetchw(skb->data);
+ net_prefetchw(skb->data);
if (unlikely(frag_offset >= PAGE_SIZE)) {
di++;
@@ -1451,8 +1464,8 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset,
frag_size, DMA_FROM_DEVICE);
- prefetchw(va); /* xdp_frame data area */
- prefetch(data);
+ net_prefetchw(va); /* xdp_frame data area */
+ net_prefetch(data);
mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt32, &xdp);
if (mlx5e_xdp_handle(rq, di, &cqe_bcnt32, &xdp)) {
@@ -1513,6 +1526,11 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq
goto mpwrq_cqe_out;
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+
+ if (mlx5e_cqe_regb_chain(cqe))
+ if (!mlx5e_tc_update_skb(cqe, skb))
+ goto mpwrq_cqe_out;
+
napi_gro_receive(rq->cq.napi, skb);
mpwrq_cqe_out:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 46790216ce86..ce8ab1f01876 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <linux/prefetch.h>
#include <linux/ip.h>
#include <linux/udp.h>
#include <net/udp.h>
@@ -115,7 +114,7 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv)
return NULL;
}
- prefetchw(skb->data);
+ net_prefetchw(skb->data);
skb_reserve(skb, NET_IP_ALIGN);
/* Reserve for ethernet and IP header */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index f6383bc2bc3f..78f6a6f0a7e0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -110,6 +110,8 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_nop) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_blks) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_mpwqe_pkts) },
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_packets) },
@@ -365,6 +367,8 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw)
s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
s->tx_nop += sq_stats->nop;
+ s->tx_mpwqe_blks += sq_stats->mpwqe_blks;
+ s->tx_mpwqe_pkts += sq_stats->mpwqe_pkts;
s->tx_queue_stopped += sq_stats->stopped;
s->tx_queue_wake += sq_stats->wake;
s->tx_queue_dropped += sq_stats->dropped;
@@ -689,6 +693,35 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(802_3)
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
}
+#define MLX5E_READ_CTR64_BE_F(ptr, c) \
+ be64_to_cpu(*(__be64 *)((char *)ptr + \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)))
+
+void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
+ struct ethtool_pause_stats *pause_stats)
+{
+ u32 ppcnt_ieee_802_3[MLX5_ST_SZ_DW(ppcnt_reg)];
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {};
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+
+ if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev))
+ return;
+
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, ppcnt_ieee_802_3,
+ sz, MLX5_REG_PPCNT, 0, 0);
+
+ pause_stats->tx_pause_frames =
+ MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
+ a_pause_mac_ctrl_frames_transmitted);
+ pause_stats->rx_pause_frames =
+ MLX5E_READ_CTR64_BE_F(ppcnt_ieee_802_3,
+ a_pause_mac_ctrl_frames_received);
+}
+
#define PPORT_2863_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
@@ -1539,6 +1572,8 @@ static const struct counter_desc sq_stats_desc[] = {
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
#ifdef CONFIG_MLX5_EN_TLS
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_packets) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 562263d62141..162daaadb0d8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -105,6 +105,9 @@ void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx);
void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data);
void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv);
+void mlx5e_stats_pause_get(struct mlx5e_priv *priv,
+ struct ethtool_pause_stats *pause_stats);
+
/* Concrete NIC Stats */
struct mlx5e_sw_stats {
@@ -118,6 +121,8 @@ struct mlx5e_sw_stats {
u64 tx_tso_inner_bytes;
u64 tx_added_vlan_packets;
u64 tx_nop;
+ u64 tx_mpwqe_blks;
+ u64 tx_mpwqe_pkts;
u64 rx_lro_packets;
u64 rx_lro_bytes;
u64 rx_mcast_packets;
@@ -348,6 +353,8 @@ struct mlx5e_sq_stats {
u64 csum_partial_inner;
u64 added_vlan_packets;
u64 nop;
+ u64 mpwqe_blks;
+ u64 mpwqe_pkts;
#ifdef CONFIG_MLX5_EN_TLS
u64 tls_encrypted_packets;
u64 tls_encrypted_bytes;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 1c93f92d9210..e3a968e9e2a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -57,7 +57,6 @@
#include "en/rep/neigh.h"
#include "en_tc.h"
#include "eswitch.h"
-#include "esw/chains.h"
#include "fs_core.h"
#include "en/port.h"
#include "en/tc_tun.h"
@@ -66,20 +65,11 @@
#include "en/mod_hdr.h"
#include "lib/devcom.h"
#include "lib/geneve.h"
+#include "lib/fs_chains.h"
#include "diag/en_tc_tracepoint.h"
+#define nic_chains(priv) ((priv)->fs.tc.chains)
#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
-
-struct mlx5_nic_flow_attr {
- u32 action;
- u32 flow_tag;
- struct mlx5_modify_hdr *modify_hdr;
- u32 hairpin_tirn;
- u8 match_level;
- struct mlx5_flow_table *hairpin_ft;
- struct mlx5_fc *counter;
-};
-
#define MLX5E_TC_FLOW_BASE (MLX5E_TC_FLAG_LAST_EXPORTED_BIT + 1)
enum {
@@ -153,11 +143,7 @@ struct mlx5e_tc_flow {
struct rcu_head rcu_head;
struct completion init_done;
int tunnel_id; /* the mapped tunnel id of this flow */
-
- union {
- struct mlx5_esw_flow_attr esw_attr[0];
- struct mlx5_nic_flow_attr nic_attr[0];
- };
+ struct mlx5_flow_attr *attr;
};
struct mlx5e_tc_flow_parse_attr {
@@ -170,7 +156,7 @@ struct mlx5e_tc_flow_parse_attr {
};
#define MLX5E_TC_TABLE_NUM_GROUPS 4
-#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
+#define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18)
struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
[CHAIN_TO_REG] = {
@@ -191,6 +177,16 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
[MARK_TO_REG] = mark_to_reg_ct,
[LABELS_TO_REG] = labels_to_reg_ct,
[FTEID_TO_REG] = fteid_to_reg_ct,
+ /* For NIC rules we store the retore metadata directly
+ * into reg_b that is passed to SW since we don't
+ * jump between steering domains.
+ */
+ [NIC_CHAIN_TO_REG] = {
+ .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B,
+ .moffset = 0,
+ .mlen = 2,
+ },
+ [NIC_ZONE_RESTORE_TO_REG] = nic_zone_restore_to_reg_ct,
};
static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
@@ -244,6 +240,7 @@ mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
int
mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
+ enum mlx5_flow_namespace_type ns,
enum mlx5e_tc_attr_to_reg type,
u32 data)
{
@@ -253,8 +250,7 @@ mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
char *modact;
int err;
- err = alloc_mod_hdr_actions(mdev, MLX5_FLOW_NAMESPACE_FDB,
- mod_hdr_acts);
+ err = alloc_mod_hdr_actions(mdev, ns, mod_hdr_acts);
if (err)
return err;
@@ -275,6 +271,54 @@ mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
return 0;
}
+#define esw_offloads_mode(esw) (mlx5_eswitch_mode(esw) == MLX5_ESWITCH_OFFLOADS)
+
+static struct mlx5_tc_ct_priv *
+get_ct_priv(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+
+ if (esw_offloads_mode(esw)) {
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+
+ return uplink_priv->ct_priv;
+ }
+
+ return priv->fs.tc.ct;
+}
+
+struct mlx5_flow_handle *
+mlx5_tc_rule_insert(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ if (esw_offloads_mode(esw))
+ return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
+
+ return mlx5e_add_offloaded_nic_rule(priv, spec, attr);
+}
+
+void
+mlx5_tc_rule_delete(struct mlx5e_priv *priv,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ if (esw_offloads_mode(esw)) {
+ mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
+
+ return;
+ }
+
+ mlx5e_del_offloaded_nic_rule(priv, rule, attr);
+}
+
struct mlx5e_hairpin {
struct mlx5_hairpin *pair;
@@ -370,7 +414,7 @@ static bool __flow_flag_test(struct mlx5e_tc_flow *flow, unsigned long flag)
#define flow_flag_test(flow, flag) __flow_flag_test(flow, \
MLX5E_TC_FLOW_FLAG_##flag)
-static bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
+bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
{
return flow_flag_test(flow, ESWITCH);
}
@@ -415,10 +459,7 @@ static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
return PTR_ERR(mh);
modify_hdr = mlx5e_mod_hdr_get(mh);
- if (mlx5e_is_eswitch_flow(flow))
- flow->esw_attr->modify_hdr = modify_hdr;
- else
- flow->nic_attr->modify_hdr = modify_hdr;
+ flow->attr->modify_hdr = modify_hdr;
flow->mh = mh;
return 0;
@@ -858,9 +899,9 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
attach_flow:
if (hpe->hp->num_channels > 1) {
flow_flag_set(flow, HAIRPIN_RSS);
- flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
+ flow->attr->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
} else {
- flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
+ flow->attr->nic_attr->hairpin_tirn = hpe->hp->tirn;
}
flow->hpe = hpe;
@@ -890,129 +931,212 @@ static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
flow->hpe = NULL;
}
-static int
-mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct mlx5e_tc_flow *flow,
- struct netlink_ext_ack *extack)
+struct mlx5_flow_handle *
+mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr)
{
- struct mlx5_flow_context *flow_context = &parse_attr->spec.flow_context;
- struct mlx5_nic_flow_attr *attr = flow->nic_attr;
- struct mlx5_core_dev *dev = priv->mdev;
+ struct mlx5_flow_context *flow_context = &spec->flow_context;
+ struct mlx5_fs_chains *nic_chains = nic_chains(priv);
+ struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr;
+ struct mlx5e_tc_table *tc = &priv->fs.tc;
struct mlx5_flow_destination dest[2] = {};
struct mlx5_flow_act flow_act = {
.action = attr->action,
.flags = FLOW_ACT_NO_APPEND,
};
- struct mlx5_fc *counter = NULL;
- int err, dest_ix = 0;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_table *ft;
+ int dest_ix = 0;
flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
- flow_context->flow_tag = attr->flow_tag;
-
- if (flow_flag_test(flow, HAIRPIN)) {
- err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
- if (err)
- return err;
+ flow_context->flow_tag = nic_attr->flow_tag;
- if (flow_flag_test(flow, HAIRPIN_RSS)) {
- dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[dest_ix].ft = attr->hairpin_ft;
- } else {
- dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
- dest[dest_ix].tir_num = attr->hairpin_tirn;
- }
+ if (attr->dest_ft) {
+ dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[dest_ix].ft = attr->dest_ft;
+ dest_ix++;
+ } else if (nic_attr->hairpin_ft) {
+ dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[dest_ix].ft = nic_attr->hairpin_ft;
+ dest_ix++;
+ } else if (nic_attr->hairpin_tirn) {
+ dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dest[dest_ix].tir_num = nic_attr->hairpin_tirn;
dest_ix++;
} else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[dest_ix].ft = priv->fs.vlan.ft.t;
+ if (attr->dest_chain) {
+ dest[dest_ix].ft = mlx5_chains_get_table(nic_chains,
+ attr->dest_chain, 1,
+ MLX5E_TC_FT_LEVEL);
+ if (IS_ERR(dest[dest_ix].ft))
+ return ERR_CAST(dest[dest_ix].ft);
+ } else {
+ dest[dest_ix].ft = priv->fs.vlan.ft.t;
+ }
+ dest_ix++;
+ }
+
+ if (dest[0].type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
+ flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+
+ if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[dest_ix].counter_id = mlx5_fc_id(attr->counter);
dest_ix++;
}
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ flow_act.modify_hdr = attr->modify_hdr;
+
+ mutex_lock(&tc->t_lock);
+ if (IS_ERR_OR_NULL(tc->t)) {
+ /* Create the root table here if doesn't exist yet */
+ tc->t =
+ mlx5_chains_get_table(nic_chains, 0, 1, MLX5E_TC_FT_LEVEL);
+
+ if (IS_ERR(tc->t)) {
+ mutex_unlock(&tc->t_lock);
+ netdev_err(priv->netdev,
+ "Failed to create tc offload table\n");
+ rule = ERR_CAST(priv->fs.tc.t);
+ goto err_ft_get;
+ }
+ }
+ mutex_unlock(&tc->t_lock);
+
+ if (attr->chain || attr->prio)
+ ft = mlx5_chains_get_table(nic_chains,
+ attr->chain, attr->prio,
+ MLX5E_TC_FT_LEVEL);
+ else
+ ft = attr->ft;
+
+ if (IS_ERR(ft)) {
+ rule = ERR_CAST(ft);
+ goto err_ft_get;
+ }
+
+ if (attr->outer_match_level != MLX5_MATCH_NONE)
+ spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+
+ rule = mlx5_add_flow_rules(ft, spec,
+ &flow_act, dest, dest_ix);
+ if (IS_ERR(rule))
+ goto err_rule;
+
+ return rule;
+
+err_rule:
+ if (attr->chain || attr->prio)
+ mlx5_chains_put_table(nic_chains,
+ attr->chain, attr->prio,
+ MLX5E_TC_FT_LEVEL);
+err_ft_get:
+ if (attr->dest_chain)
+ mlx5_chains_put_table(nic_chains,
+ attr->dest_chain, 1,
+ MLX5E_TC_FT_LEVEL);
+
+ return ERR_CAST(rule);
+}
+
+static int
+mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_flow_attr *attr = flow->attr;
+ struct mlx5_core_dev *dev = priv->mdev;
+ struct mlx5_fc *counter = NULL;
+ int err;
+
+ if (flow_flag_test(flow, HAIRPIN)) {
+ err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
+ if (err)
+ return err;
+ }
+
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
counter = mlx5_fc_create(dev, true);
if (IS_ERR(counter))
return PTR_ERR(counter);
- dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
- dest[dest_ix].counter_id = mlx5_fc_id(counter);
- dest_ix++;
attr->counter = counter;
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
- flow_act.modify_hdr = attr->modify_hdr;
dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
if (err)
return err;
}
- mutex_lock(&priv->fs.tc.t_lock);
- if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
- struct mlx5_flow_table_attr ft_attr = {};
- int tc_grp_size, tc_tbl_size, tc_num_grps;
- u32 max_flow_counter;
-
- max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
- MLX5_CAP_GEN(dev, max_flow_counter_15_0);
-
- tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
-
- tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
- BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
- tc_num_grps = MLX5E_TC_TABLE_NUM_GROUPS;
-
- ft_attr.prio = MLX5E_TC_PRIO;
- ft_attr.max_fte = tc_tbl_size;
- ft_attr.level = MLX5E_TC_FT_LEVEL;
- ft_attr.autogroup.max_num_groups = tc_num_grps;
- priv->fs.tc.t =
- mlx5_create_auto_grouped_flow_table(priv->fs.ns,
- &ft_attr);
- if (IS_ERR(priv->fs.tc.t)) {
- mutex_unlock(&priv->fs.tc.t_lock);
- NL_SET_ERR_MSG_MOD(extack,
- "Failed to create tc offload table");
- netdev_err(priv->netdev,
- "Failed to create tc offload table\n");
- return PTR_ERR(priv->fs.tc.t);
- }
- }
+ if (flow_flag_test(flow, CT))
+ flow->rule[0] = mlx5_tc_ct_flow_offload(get_ct_priv(priv), flow, &parse_attr->spec,
+ attr, &parse_attr->mod_hdr_acts);
+ else
+ flow->rule[0] = mlx5e_add_offloaded_nic_rule(priv, &parse_attr->spec,
+ attr);
- if (attr->match_level != MLX5_MATCH_NONE)
- parse_attr->spec.match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+ return PTR_ERR_OR_ZERO(flow->rule[0]);
+}
- flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
- &flow_act, dest, dest_ix);
- mutex_unlock(&priv->fs.tc.t_lock);
+void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_flow_attr *attr)
+{
+ struct mlx5_fs_chains *nic_chains = nic_chains(priv);
- return PTR_ERR_OR_ZERO(flow->rule[0]);
+ mlx5_del_flow_rules(rule);
+
+ if (attr->chain || attr->prio)
+ mlx5_chains_put_table(nic_chains, attr->chain, attr->prio,
+ MLX5E_TC_FT_LEVEL);
+
+ if (attr->dest_chain)
+ mlx5_chains_put_table(nic_chains, attr->dest_chain, 1,
+ MLX5E_TC_FT_LEVEL);
}
static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
- struct mlx5_nic_flow_attr *attr = flow->nic_attr;
- struct mlx5_fc *counter = NULL;
+ struct mlx5_flow_attr *attr = flow->attr;
+ struct mlx5e_tc_table *tc = &priv->fs.tc;
+
+ flow_flag_clear(flow, OFFLOADED);
- counter = attr->counter;
- if (!IS_ERR_OR_NULL(flow->rule[0]))
- mlx5_del_flow_rules(flow->rule[0]);
- mlx5_fc_destroy(priv->mdev, counter);
+ if (flow_flag_test(flow, CT))
+ mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
+ else if (!IS_ERR_OR_NULL(flow->rule[0]))
+ mlx5e_del_offloaded_nic_rule(priv, flow->rule[0], attr);
+ /* Remove root table if no rules are left to avoid
+ * extra steering hops.
+ */
mutex_lock(&priv->fs.tc.t_lock);
- if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) && priv->fs.tc.t) {
- mlx5_destroy_flow_table(priv->fs.tc.t);
+ if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) &&
+ !IS_ERR_OR_NULL(tc->t)) {
+ mlx5_chains_put_table(nic_chains(priv), 0, 1, MLX5E_TC_FT_LEVEL);
priv->fs.tc.t = NULL;
}
mutex_unlock(&priv->fs.tc.t_lock);
+ kvfree(attr->parse_attr);
+
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
+ mlx5_fc_destroy(priv->mdev, attr->counter);
+
if (flow_flag_test(flow, HAIRPIN))
mlx5e_hairpin_flow_del(priv, flow);
+
+ kfree(flow->attr);
}
static void mlx5e_detach_encap(struct mlx5e_priv *priv,
@@ -1035,7 +1159,7 @@ static struct mlx5_flow_handle *
mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5_flow_attr *attr)
{
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
struct mlx5_flow_handle *rule;
@@ -1043,7 +1167,8 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
if (flow_flag_test(flow, CT)) {
mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
- return mlx5_tc_ct_flow_offload(flow->priv, flow, spec, attr,
+ return mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv),
+ flow, spec, attr,
mod_hdr_acts);
}
@@ -1051,7 +1176,7 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
if (IS_ERR(rule))
return rule;
- if (attr->split_count) {
+ if (attr->esw_attr->split_count) {
flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
if (IS_ERR(flow->rule[1])) {
mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
@@ -1065,16 +1190,16 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
static void
mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5_flow_attr *attr)
{
flow_flag_clear(flow, OFFLOADED);
if (flow_flag_test(flow, CT)) {
- mlx5_tc_ct_delete_flow(flow->priv, flow, attr);
+ mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
return;
}
- if (attr->split_count)
+ if (attr->esw_attr->split_count)
mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
@@ -1085,18 +1210,24 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec)
{
- struct mlx5_esw_flow_attr slow_attr;
+ struct mlx5_flow_attr *slow_attr;
struct mlx5_flow_handle *rule;
- memcpy(&slow_attr, flow->esw_attr, sizeof(slow_attr));
- slow_attr.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- slow_attr.split_count = 0;
- slow_attr.flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
+ slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
+ if (!slow_attr)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
+ slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ slow_attr->esw_attr->split_count = 0;
+ slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
- rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, &slow_attr);
+ rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
if (!IS_ERR(rule))
flow_flag_set(flow, SLOW);
+ kfree(slow_attr);
+
return rule;
}
@@ -1104,14 +1235,21 @@ static void
mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow)
{
- struct mlx5_esw_flow_attr slow_attr;
+ struct mlx5_flow_attr *slow_attr;
- memcpy(&slow_attr, flow->esw_attr, sizeof(slow_attr));
- slow_attr.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- slow_attr.split_count = 0;
- slow_attr.flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
- mlx5e_tc_unoffload_fdb_rules(esw, flow, &slow_attr);
+ slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
+ if (!slow_attr) {
+ mlx5_core_warn(flow->priv->mdev, "Unable to alloc attr to unoffload slow path rule\n");
+ return;
+ }
+
+ memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
+ slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ slow_attr->esw_attr->split_count = 0;
+ slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
+ mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
flow_flag_clear(flow, SLOW);
+ kfree(slow_attr);
}
/* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
@@ -1169,9 +1307,10 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_esw_flow_attr *attr = flow->esw_attr;
- struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
struct net_device *out_dev, *encap_dev = NULL;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5_flow_attr *attr = flow->attr;
+ struct mlx5_esw_flow_attr *esw_attr;
struct mlx5_fc *counter = NULL;
struct mlx5e_rep_priv *rpriv;
struct mlx5e_priv *out_priv;
@@ -1180,7 +1319,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
int err = 0;
int out_index;
- if (!mlx5_esw_chains_prios_supported(esw) && attr->prio != 1) {
+ if (!mlx5_chains_prios_supported(esw_chains(esw)) && attr->prio != 1) {
NL_SET_ERR_MSG_MOD(extack,
"E-switch priorities unsupported, upgrade FW");
return -EOPNOTSUPP;
@@ -1191,14 +1330,14 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
* FDB_FT_CHAIN which is outside tc range.
* See mlx5e_rep_setup_ft_cb().
*/
- max_chain = mlx5_esw_chains_get_chain_range(esw);
+ max_chain = mlx5_chains_get_chain_range(esw_chains(esw));
if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
NL_SET_ERR_MSG_MOD(extack,
"Requested chain is out of supported range");
return -EOPNOTSUPP;
}
- max_prio = mlx5_esw_chains_get_prio_range(esw);
+ max_prio = mlx5_chains_get_prio_range(esw_chains(esw));
if (attr->prio > max_prio) {
NL_SET_ERR_MSG_MOD(extack,
"Requested priority is out of supported range");
@@ -1211,10 +1350,13 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
return err;
}
+ parse_attr = attr->parse_attr;
+ esw_attr = attr->esw_attr;
+
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
int mirred_ifindex;
- if (!(attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
+ if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
continue;
mirred_ifindex = parse_attr->mirred_ifindex[out_index];
@@ -1227,8 +1369,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
out_priv = netdev_priv(encap_dev);
rpriv = out_priv->ppriv;
- attr->dests[out_index].rep = rpriv->rep;
- attr->dests[out_index].mdev = out_priv->mdev;
+ esw_attr->dests[out_index].rep = rpriv->rep;
+ esw_attr->dests[out_index].mdev = out_priv->mdev;
}
err = mlx5_eswitch_add_vlan_action(esw, attr);
@@ -1244,7 +1386,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
}
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
- counter = mlx5_fc_create(attr->counter_dev, true);
+ counter = mlx5_fc_create(esw_attr->counter_dev, true);
if (IS_ERR(counter))
return PTR_ERR(counter);
@@ -1270,7 +1412,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
{
- struct mlx5_flow_spec *spec = &flow->esw_attr->parse_attr->spec;
+ struct mlx5_flow_spec *spec = &flow->attr->parse_attr->spec;
void *headers_v = MLX5_ADDR_OF(fte_match_param,
spec->match_value,
misc_parameters_3);
@@ -1285,7 +1427,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+ struct mlx5_flow_attr *attr = flow->attr;
int out_index;
mlx5e_put_flow_tunnel_id(flow);
@@ -1306,22 +1448,24 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
mlx5_eswitch_del_vlan_action(esw, attr);
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
- if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
+ if (attr->esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
mlx5e_detach_encap(priv, flow, out_index);
kfree(attr->parse_attr->tun_info[out_index]);
}
kvfree(attr->parse_attr);
- mlx5_tc_ct_match_del(priv, &flow->esw_attr->ct_attr);
+ mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
mlx5e_detach_mod_hdr(priv, flow);
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
- mlx5_fc_destroy(attr->counter_dev, attr->counter);
+ mlx5_fc_destroy(attr->esw_attr->counter_dev, attr->counter);
if (flow_flag_test(flow, L3_TO_L2_DECAP))
mlx5e_detach_decap(priv, flow);
+
+ kfree(flow->attr);
}
void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
@@ -1331,6 +1475,7 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *esw_attr;
struct mlx5_flow_handle *rule;
+ struct mlx5_flow_attr *attr;
struct mlx5_flow_spec *spec;
struct mlx5e_tc_flow *flow;
int err;
@@ -1353,8 +1498,9 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
if (!mlx5e_is_offloaded_flow(flow))
continue;
- esw_attr = flow->esw_attr;
- spec = &esw_attr->parse_attr->spec;
+ attr = flow->attr;
+ esw_attr = attr->esw_attr;
+ spec = &attr->parse_attr->spec;
esw_attr->dests[flow->tmp_efi_index].pkt_reformat = e->pkt_reformat;
esw_attr->dests[flow->tmp_efi_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
@@ -1374,7 +1520,7 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
if (!all_flow_encaps_valid)
continue;
/* update from slow path rule to encap rule */
- rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, esw_attr);
+ rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
@@ -1394,7 +1540,9 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
struct list_head *flow_list)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_esw_flow_attr *esw_attr;
struct mlx5_flow_handle *rule;
+ struct mlx5_flow_attr *attr;
struct mlx5_flow_spec *spec;
struct mlx5e_tc_flow *flow;
int err;
@@ -1402,12 +1550,14 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
list_for_each_entry(flow, flow_list, tmp_list) {
if (!mlx5e_is_offloaded_flow(flow))
continue;
- spec = &flow->esw_attr->parse_attr->spec;
+ attr = flow->attr;
+ esw_attr = attr->esw_attr;
+ spec = &attr->parse_attr->spec;
/* update from encap rule to slow path rule */
rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
/* mark the flow's encap dest as non-valid */
- flow->esw_attr->dests[flow->tmp_efi_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
+ esw_attr->dests[flow->tmp_efi_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
@@ -1416,7 +1566,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
continue;
}
- mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->esw_attr);
+ mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
flow->rule[0] = rule;
/* was unset when fast path rule removed */
flow_flag_set(flow, OFFLOADED);
@@ -1429,10 +1579,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
{
- if (mlx5e_is_eswitch_flow(flow))
- return flow->esw_attr->counter;
- else
- return flow->nic_attr->counter;
+ return flow->attr->counter;
}
/* Takes reference to all flows attached to encap and adds the flows to
@@ -1798,11 +1945,11 @@ static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct netlink_ext_ack *extack = f->common.extack;
- struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
struct flow_match_enc_opts enc_opts_match;
struct tunnel_match_enc_opts tun_enc_opts;
struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5_flow_attr *attr = flow->attr;
struct mlx5e_rep_priv *uplink_rpriv;
struct tunnel_match_key tunnel_key;
bool enc_opts_is_dont_care = true;
@@ -1866,7 +2013,7 @@ static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
} else {
mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
err = mlx5e_tc_match_to_reg_set(priv->mdev,
- mod_hdr_acts,
+ mod_hdr_acts, MLX5_FLOW_NAMESPACE_FDB,
TUNNEL_TO_REG, value);
if (err)
goto err_set;
@@ -1952,8 +2099,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
if (!mlx5e_is_eswitch_flow(flow))
return -EOPNOTSUPP;
- needs_mapping = !!flow->esw_attr->chain;
- sets_mapping = !flow->esw_attr->chain && flow_has_tc_fwd_action(f);
+ needs_mapping = !!flow->attr->chain;
+ sets_mapping = !flow->attr->chain && flow_has_tc_fwd_action(f);
*match_inner = !needs_mapping;
if ((needs_mapping || sets_mapping) &&
@@ -1965,7 +2112,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- if (!flow->esw_attr->chain) {
+ if (!flow->attr->chain) {
err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
match_level);
if (err) {
@@ -1980,7 +2127,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
* object
*/
if (!netif_is_bareudp(filter_dev))
- flow->esw_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
+ flow->attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
}
if (!needs_mapping && !sets_mapping)
@@ -2483,12 +2630,9 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
}
}
- if (is_eswitch_flow) {
- flow->esw_attr->inner_match_level = inner_match_level;
- flow->esw_attr->outer_match_level = outer_match_level;
- } else {
- flow->nic_attr->match_level = non_tunnel_match_level;
- }
+ flow->attr->inner_match_level = inner_match_level;
+ flow->attr->outer_match_level = outer_match_level;
+
return err;
}
@@ -2614,6 +2758,7 @@ static struct mlx5_fields fields[] = {
OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
+ OFFLOAD(IP_DSCP, 16, 0xc00f, ip6, 0, ip_dscp),
OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport),
OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport),
@@ -3090,7 +3235,7 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv,
* we can't restore ct state
*/
if (!ct_clear && modify_tuple &&
- mlx5_tc_ct_add_no_trk_match(priv, spec)) {
+ mlx5_tc_ct_add_no_trk_match(spec)) {
NL_SET_ERR_MSG_MOD(extack,
"can't offload tuple modify header with ct matches");
netdev_info(priv->netdev,
@@ -3121,12 +3266,13 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
bool ct_flow = false, ct_clear = false;
u32 actions;
+ ct_clear = flow->attr->ct_attr.ct_action &
+ TCA_CT_ACT_CLEAR;
+ ct_flow = flow_flag_test(flow, CT) && !ct_clear;
+ actions = flow->attr->action;
+
if (mlx5e_is_eswitch_flow(flow)) {
- actions = flow->esw_attr->action;
- ct_clear = flow->esw_attr->ct_attr.ct_action &
- TCA_CT_ACT_CLEAR;
- ct_flow = flow_flag_test(flow, CT) && !ct_clear;
- if (flow->esw_attr->split_count && ct_flow) {
+ if (flow->attr->esw_attr->split_count && ct_flow) {
/* All registers used by ct are cleared when using
* split rules.
*/
@@ -3134,8 +3280,6 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
"Can't offload mirroring with action ct");
return false;
}
- } else {
- actions = flow->nic_attr->action;
}
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
@@ -3233,15 +3377,67 @@ add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
extack);
}
+static int validate_goto_chain(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
+ const struct flow_action_entry *act,
+ u32 actions,
+ struct netlink_ext_ack *extack)
+{
+ bool is_esw = mlx5e_is_eswitch_flow(flow);
+ struct mlx5_flow_attr *attr = flow->attr;
+ bool ft_flow = mlx5e_is_ft_flow(flow);
+ u32 dest_chain = act->chain_index;
+ struct mlx5_fs_chains *chains;
+ struct mlx5_eswitch *esw;
+ u32 reformat_and_fwd;
+ u32 max_chain;
+
+ esw = priv->mdev->priv.eswitch;
+ chains = is_esw ? esw_chains(esw) : nic_chains(priv);
+ max_chain = mlx5_chains_get_chain_range(chains);
+ reformat_and_fwd = is_esw ?
+ MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_and_fwd_to_table) :
+ MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, reformat_and_fwd_to_table);
+
+ if (ft_flow) {
+ NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!mlx5_chains_backwards_supported(chains) &&
+ dest_chain <= attr->chain) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Goto lower numbered chain isn't supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (dest_chain > max_chain) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Requested destination chain is out of supported range");
+ return -EOPNOTSUPP;
+ }
+
+ if (actions & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
+ MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
+ !reformat_and_fwd) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Goto chain is not allowed if action has reformat or decap");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static int parse_tc_nic_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
- struct mlx5_nic_flow_attr *attr = flow->nic_attr;
+ struct mlx5_flow_attr *attr = flow->attr;
struct pedit_headers_action hdrs[2] = {};
const struct flow_action_entry *act;
+ struct mlx5_nic_flow_attr *nic_attr;
u32 action = 0;
int err, i;
@@ -3252,7 +3448,9 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
FLOW_ACTION_HW_STATS_DELAYED_BIT))
return -EOPNOTSUPP;
- attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
+ nic_attr = attr->nic_attr;
+
+ nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
@@ -3273,8 +3471,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
if (err)
return err;
- action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
break;
case FLOW_ACTION_VLAN_MANGLE:
err = add_vlan_rewrite_action(priv,
@@ -3319,10 +3516,26 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
return -EINVAL;
}
- attr->flow_tag = mark;
+ nic_attr->flow_tag = mark;
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
}
break;
+ case FLOW_ACTION_GOTO:
+ err = validate_goto_chain(priv, flow, act, action,
+ extack);
+ if (err)
+ return err;
+
+ action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->dest_chain = act->chain_index;
+ break;
+ case FLOW_ACTION_CT:
+ err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, act, extack);
+ if (err)
+ return err;
+
+ flow_flag_set(flow, CT);
+ break;
default:
NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
return -EOPNOTSUPP;
@@ -3345,6 +3558,18 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
}
attr->action = action;
+
+ if (attr->dest_chain) {
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
+ return -EOPNOTSUPP;
+ }
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ }
+
+ if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+ attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+
if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
return -EOPNOTSUPP;
@@ -3476,8 +3701,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
bool *encap_valid)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5_flow_attr *attr = flow->attr;
const struct ip_tunnel_info *tun_info;
struct encap_key key;
struct mlx5e_encap_entry *e;
@@ -3563,8 +3788,8 @@ attach_flow:
flow->encaps[out_index].index = out_index;
*encap_dev = e->out_dev;
if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
- attr->dests[out_index].pkt_reformat = e->pkt_reformat;
- attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
+ attr->esw_attr->dests[out_index].pkt_reformat = e->pkt_reformat;
+ attr->esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
*encap_valid = true;
} else {
*encap_valid = false;
@@ -3591,14 +3816,14 @@ static int mlx5e_attach_decap(struct mlx5e_priv *priv,
struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+ struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_decap_entry *d;
struct mlx5e_decap_key key;
uintptr_t hash_key;
int err = 0;
- parse_attr = attr->parse_attr;
+ parse_attr = flow->attr->parse_attr;
if (sizeof(parse_attr->eth) > MLX5_CAP_ESW(priv->mdev, max_encap_header_size)) {
NL_SET_ERR_MSG_MOD(extack,
"encap header larger than max supported");
@@ -3740,7 +3965,7 @@ static struct net_device *get_fdb_out_dev(struct net_device *uplink_dev,
}
static int add_vlan_push_action(struct mlx5e_priv *priv,
- struct mlx5_esw_flow_attr *attr,
+ struct mlx5_flow_attr *attr,
struct net_device **out_dev,
u32 *action)
{
@@ -3753,7 +3978,7 @@ static int add_vlan_push_action(struct mlx5e_priv *priv,
};
int err;
- err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
+ err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action);
if (err)
return err;
@@ -3766,7 +3991,7 @@ static int add_vlan_push_action(struct mlx5e_priv *priv,
}
static int add_vlan_pop_action(struct mlx5e_priv *priv,
- struct mlx5_esw_flow_attr *attr,
+ struct mlx5_flow_attr *attr,
u32 *action)
{
struct flow_action_entry vlan_act = {
@@ -3777,7 +4002,7 @@ static int add_vlan_pop_action(struct mlx5e_priv *priv,
nest_level = attr->parse_attr->filter_dev->lower_level -
priv->netdev->lower_level;
while (nest_level--) {
- err = parse_tc_vlan_action(priv, &vlan_act, attr, action);
+ err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action);
if (err)
return err;
}
@@ -3838,59 +4063,20 @@ static bool is_duplicated_output_device(struct net_device *dev,
return false;
}
-static int mlx5_validate_goto_chain(struct mlx5_eswitch *esw,
- struct mlx5e_tc_flow *flow,
- const struct flow_action_entry *act,
- u32 actions,
- struct netlink_ext_ack *extack)
-{
- u32 max_chain = mlx5_esw_chains_get_chain_range(esw);
- struct mlx5_esw_flow_attr *attr = flow->esw_attr;
- bool ft_flow = mlx5e_is_ft_flow(flow);
- u32 dest_chain = act->chain_index;
-
- if (ft_flow) {
- NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
- return -EOPNOTSUPP;
- }
-
- if (!mlx5_esw_chains_backwards_supported(esw) &&
- dest_chain <= attr->chain) {
- NL_SET_ERR_MSG_MOD(extack,
- "Goto lower numbered chain isn't supported");
- return -EOPNOTSUPP;
- }
- if (dest_chain > max_chain) {
- NL_SET_ERR_MSG_MOD(extack,
- "Requested destination chain is out of supported range");
- return -EOPNOTSUPP;
- }
-
- if (actions & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
- MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
- !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_and_fwd_to_table)) {
- NL_SET_ERR_MSG_MOD(extack,
- "Goto chain is not allowed if action has reformat or decap");
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
static int verify_uplink_forwarding(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct net_device *out_dev,
struct netlink_ext_ack *extack)
{
+ struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5e_rep_priv *rep_priv;
/* Forwarding non encapsulated traffic between
* uplink ports is allowed only if
* termination_table_raw_traffic cap is set.
*
- * Input vport was stored esw_attr->in_rep.
+ * Input vport was stored attr->in_rep.
* In LAG case, *priv* is the private data of
* uplink which may be not the input vport.
*/
@@ -3925,13 +4111,14 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
{
struct pedit_headers_action hdrs[2] = {};
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- struct mlx5_esw_flow_attr *attr = flow->esw_attr;
- struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
const struct ip_tunnel_info *info = NULL;
+ struct mlx5_flow_attr *attr = flow->attr;
int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
bool ft_flow = mlx5e_is_ft_flow(flow);
const struct flow_action_entry *act;
+ struct mlx5_esw_flow_attr *esw_attr;
bool encap = false, decap = false;
u32 action = attr->action;
int err, i, if_count = 0;
@@ -3944,12 +4131,25 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
FLOW_ACTION_HW_STATS_DELAYED_BIT))
return -EOPNOTSUPP;
+ esw_attr = attr->esw_attr;
+ parse_attr = attr->parse_attr;
+
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_DROP:
action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
break;
+ case FLOW_ACTION_TRAP:
+ if (!flow_offload_has_one_action(flow_action)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "action trap is supported as a sole action only");
+ return -EOPNOTSUPP;
+ }
+ action |= (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT);
+ attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
+ break;
case FLOW_ACTION_MPLS_PUSH:
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
reformat_l2_to_l3_tunnel) ||
@@ -3990,7 +4190,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (!flow_flag_test(flow, L3_TO_L2_DECAP)) {
action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- attr->split_count = attr->out_count;
+ esw_attr->split_count = esw_attr->out_count;
}
break;
case FLOW_ACTION_CSUM:
@@ -4027,27 +4227,27 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
+ if (esw_attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
NL_SET_ERR_MSG_MOD(extack,
"can't support more output ports, can't offload forwarding");
netdev_warn(priv->netdev,
"can't support more than %d output ports, can't offload forwarding\n",
- attr->out_count);
+ esw_attr->out_count);
return -EOPNOTSUPP;
}
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
if (encap) {
- parse_attr->mirred_ifindex[attr->out_count] =
+ parse_attr->mirred_ifindex[esw_attr->out_count] =
out_dev->ifindex;
- parse_attr->tun_info[attr->out_count] = dup_tun_info(info);
- if (!parse_attr->tun_info[attr->out_count])
+ parse_attr->tun_info[esw_attr->out_count] = dup_tun_info(info);
+ if (!parse_attr->tun_info[esw_attr->out_count])
return -ENOMEM;
encap = false;
- attr->dests[attr->out_count].flags |=
+ esw_attr->dests[esw_attr->out_count].flags |=
MLX5_ESW_DEST_ENCAP;
- attr->out_count++;
+ esw_attr->out_count++;
/* attr->dests[].rep is resolved when we
* handle encap
*/
@@ -4096,9 +4296,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
out_priv = netdev_priv(out_dev);
rpriv = out_priv->ppriv;
- attr->dests[attr->out_count].rep = rpriv->rep;
- attr->dests[attr->out_count].mdev = out_priv->mdev;
- attr->out_count++;
+ esw_attr->dests[esw_attr->out_count].rep = rpriv->rep;
+ esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev;
+ esw_attr->out_count++;
} else if (parse_attr->filter_dev != priv->netdev) {
/* All mlx5 devices are called to configure
* high level device filters. Therefore, the
@@ -4136,12 +4336,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
act, parse_attr, hdrs,
&action, extack);
} else {
- err = parse_tc_vlan_action(priv, act, attr, &action);
+ err = parse_tc_vlan_action(priv, act, esw_attr, &action);
}
if (err)
return err;
- attr->split_count = attr->out_count;
+ esw_attr->split_count = esw_attr->out_count;
break;
case FLOW_ACTION_VLAN_MANGLE:
err = add_vlan_rewrite_action(priv,
@@ -4151,14 +4351,14 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (err)
return err;
- attr->split_count = attr->out_count;
+ esw_attr->split_count = esw_attr->out_count;
break;
case FLOW_ACTION_TUNNEL_DECAP:
decap = true;
break;
case FLOW_ACTION_GOTO:
- err = mlx5_validate_goto_chain(esw, flow, act, action,
- extack);
+ err = validate_goto_chain(priv, flow, act, action,
+ extack);
if (err)
return err;
@@ -4166,7 +4366,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
attr->dest_chain = act->chain_index;
break;
case FLOW_ACTION_CT:
- err = mlx5_tc_ct_parse_action(priv, attr, act, extack);
+ err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, act, extack);
if (err)
return err;
@@ -4205,7 +4405,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
(action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
- attr->split_count = 0;
+ esw_attr->split_count = 0;
}
}
@@ -4245,7 +4445,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
+ if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
NL_SET_ERR_MSG_MOD(extack,
"current firmware doesn't support split rule for port mirroring");
netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
@@ -4296,25 +4496,37 @@ static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
{
- struct mlx5_esw_flow_attr *attr = flow->esw_attr;
- bool is_rep_ingress = attr->in_rep->vport != MLX5_VPORT_UPLINK &&
+ struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
+ struct mlx5_flow_attr *attr = flow->attr;
+ bool is_rep_ingress = esw_attr->in_rep->vport != MLX5_VPORT_UPLINK &&
flow_flag_test(flow, INGRESS);
bool act_is_encap = !!(attr->action &
MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
- bool esw_paired = mlx5_devcom_is_paired(attr->in_mdev->priv.devcom,
+ bool esw_paired = mlx5_devcom_is_paired(esw_attr->in_mdev->priv.devcom,
MLX5_DEVCOM_ESW_OFFLOADS);
if (!esw_paired)
return false;
- if ((mlx5_lag_is_sriov(attr->in_mdev) ||
- mlx5_lag_is_multipath(attr->in_mdev)) &&
+ if ((mlx5_lag_is_sriov(esw_attr->in_mdev) ||
+ mlx5_lag_is_multipath(esw_attr->in_mdev)) &&
(is_rep_ingress || act_is_encap))
return true;
return false;
}
+struct mlx5_flow_attr *
+mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type)
+{
+ u32 ex_attr_size = (type == MLX5_FLOW_NAMESPACE_FDB) ?
+ sizeof(struct mlx5_esw_flow_attr) :
+ sizeof(struct mlx5_nic_flow_attr);
+ struct mlx5_flow_attr *attr;
+
+ return kzalloc(sizeof(*attr) + ex_attr_size, GFP_KERNEL);
+}
+
static int
mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
struct flow_cls_offload *f, unsigned long flow_flags,
@@ -4322,19 +4534,26 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
struct mlx5e_tc_flow **__flow)
{
struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5_flow_attr *attr;
struct mlx5e_tc_flow *flow;
- int out_index, err;
+ int err = -ENOMEM;
+ int out_index;
- flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
+ flow = kzalloc(sizeof(*flow), GFP_KERNEL);
parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
- if (!parse_attr || !flow) {
- err = -ENOMEM;
+ if (!parse_attr || !flow)
goto err_free;
- }
- flow->cookie = f->cookie;
flow->flags = flow_flags;
+ flow->cookie = f->cookie;
flow->priv = priv;
+
+ attr = mlx5_alloc_flow_attr(get_flow_name_space(flow));
+ if (!attr)
+ goto err_free;
+
+ flow->attr = attr;
+
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
INIT_LIST_HEAD(&flow->encaps[out_index].list);
INIT_LIST_HEAD(&flow->hairpin);
@@ -4354,7 +4573,17 @@ err_free:
}
static void
-mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr,
+mlx5e_flow_attr_init(struct mlx5_flow_attr *attr,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct flow_cls_offload *f)
+{
+ attr->parse_attr = parse_attr;
+ attr->chain = f->common.chain_index;
+ attr->prio = f->common.prio;
+}
+
+static void
+mlx5e_flow_esw_attr_init(struct mlx5_flow_attr *attr,
struct mlx5e_priv *priv,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct flow_cls_offload *f,
@@ -4362,10 +4591,9 @@ mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr,
struct mlx5_core_dev *in_mdev)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
- esw_attr->parse_attr = parse_attr;
- esw_attr->chain = f->common.chain_index;
- esw_attr->prio = f->common.prio;
+ mlx5e_flow_attr_init(attr, parse_attr, f);
esw_attr->in_rep = in_rep;
esw_attr->in_mdev = in_mdev;
@@ -4399,7 +4627,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
goto out;
parse_attr->filter_dev = filter_dev;
- mlx5e_flow_esw_attr_init(flow->esw_attr,
+ mlx5e_flow_esw_attr_init(flow->attr,
priv, parse_attr,
f, in_rep, in_mdev);
@@ -4409,8 +4637,8 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
goto err_free;
/* actions validation depends on parsing the ct matches first */
- err = mlx5_tc_ct_match_add(priv, &parse_attr->spec, f,
- &flow->esw_attr->ct_attr, extack);
+ err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f,
+ &flow->attr->ct_attr, extack);
if (err)
goto err_free;
@@ -4441,6 +4669,7 @@ static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
{
struct mlx5e_priv *priv = flow->priv, *peer_priv;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
+ struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5e_rep_priv *peer_urpriv;
@@ -4460,15 +4689,15 @@ static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
* original flow and packets redirected from uplink use the
* peer mdev.
*/
- if (flow->esw_attr->in_rep->vport == MLX5_VPORT_UPLINK)
+ if (attr->in_rep->vport == MLX5_VPORT_UPLINK)
in_mdev = peer_priv->mdev;
else
in_mdev = priv->mdev;
- parse_attr = flow->esw_attr->parse_attr;
+ parse_attr = flow->attr->parse_attr;
peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags,
parse_attr->filter_dev,
- flow->esw_attr->in_rep, in_mdev);
+ attr->in_rep, in_mdev);
if (IS_ERR(peer_flow)) {
err = PTR_ERR(peer_flow);
goto out;
@@ -4532,9 +4761,12 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow;
int attr_size, err;
- /* multi-chain not supported for NIC rules */
- if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
+ if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) {
+ if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
+ return -EOPNOTSUPP;
+ } else if (!tc_can_offload_extack(priv->netdev, f->common.extack)) {
return -EOPNOTSUPP;
+ }
flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
attr_size = sizeof(struct mlx5_nic_flow_attr);
@@ -4544,11 +4776,18 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv,
goto out;
parse_attr->filter_dev = filter_dev;
+ mlx5e_flow_attr_init(flow->attr, parse_attr, f);
+
err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
f, filter_dev);
if (err)
goto err_free;
+ err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f,
+ &flow->attr->ct_attr, extack);
+ if (err)
+ goto err_free;
+
err = parse_tc_nic_actions(priv, &rule->action, parse_attr, flow, extack);
if (err)
goto err_free;
@@ -4558,14 +4797,12 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv,
goto err_free;
flow_flag_set(flow, OFFLOADED);
- kvfree(parse_attr);
*__flow = flow;
return 0;
err_free:
mlx5e_flow_put(priv, flow);
- kvfree(parse_attr);
out:
return err;
}
@@ -4940,9 +5177,27 @@ static int mlx5e_tc_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
}
+static int mlx5e_tc_nic_get_ft_size(struct mlx5_core_dev *dev)
+{
+ int tc_grp_size, tc_tbl_size;
+ u32 max_flow_counter;
+
+ max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
+ MLX5_CAP_GEN(dev, max_flow_counter_15_0);
+
+ tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
+
+ tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
+ BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
+
+ return tc_tbl_size;
+}
+
int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
+ struct mlx5_core_dev *dev = priv->mdev;
+ struct mlx5_chains_attr attr = {};
int err;
mlx5e_mod_hdr_tbl_init(&tc->mod_hdr);
@@ -4954,6 +5209,27 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
if (err)
return err;
+ if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) {
+ attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
+ MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
+ attr.max_restore_tag = MLX5E_TC_TABLE_CHAIN_TAG_MASK;
+ }
+ attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
+ attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev);
+ attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
+ attr.default_ft = priv->fs.vlan.ft.t;
+
+ tc->chains = mlx5_chains_create(dev, &attr);
+ if (IS_ERR(tc->chains)) {
+ err = PTR_ERR(tc->chains);
+ goto err_chains;
+ }
+
+ tc->ct = mlx5_tc_ct_init(priv, tc->chains, &priv->fs.tc.mod_hdr,
+ MLX5_FLOW_NAMESPACE_KERNEL);
+ if (IS_ERR(tc->ct))
+ goto err_ct;
+
tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
err = register_netdevice_notifier_dev_net(priv->netdev,
&tc->netdevice_nb,
@@ -4961,8 +5237,17 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
if (err) {
tc->netdevice_nb.notifier_call = NULL;
mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
+ goto err_reg;
}
+ return 0;
+
+err_reg:
+ mlx5_tc_ct_clean(tc->ct);
+err_ct:
+ mlx5_chains_destroy(tc->chains);
+err_chains:
+ rhashtable_destroy(&tc->ht);
return err;
}
@@ -4987,28 +5272,38 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
mlx5e_mod_hdr_tbl_destroy(&tc->mod_hdr);
mutex_destroy(&tc->hairpin_tbl_lock);
- rhashtable_destroy(&tc->ht);
+ rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
if (!IS_ERR_OR_NULL(tc->t)) {
- mlx5_destroy_flow_table(tc->t);
+ mlx5_chains_put_table(tc->chains, 0, 1, MLX5E_TC_FT_LEVEL);
tc->t = NULL;
}
mutex_destroy(&tc->t_lock);
+
+ mlx5_tc_ct_clean(tc->ct);
+ mlx5_chains_destroy(tc->chains);
}
int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
{
const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts);
struct mlx5_rep_uplink_priv *uplink_priv;
- struct mlx5e_rep_priv *priv;
+ struct mlx5e_rep_priv *rpriv;
struct mapping_ctx *mapping;
- int err;
+ struct mlx5_eswitch *esw;
+ struct mlx5e_priv *priv;
+ int err = 0;
uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
- priv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
+ rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
+ priv = netdev_priv(rpriv->netdev);
+ esw = priv->mdev->priv.eswitch;
- err = mlx5_tc_ct_init(uplink_priv);
- if (err)
+ uplink_priv->ct_priv = mlx5_tc_ct_init(netdev_priv(priv->netdev),
+ esw_chains(esw),
+ &esw->offloads.mod_hdr,
+ MLX5_FLOW_NAMESPACE_FDB);
+ if (IS_ERR(uplink_priv->ct_priv))
goto err_ct;
mapping = mapping_create(sizeof(struct tunnel_match_key),
@@ -5037,7 +5332,7 @@ err_ht_init:
err_enc_opts_mapping:
mapping_destroy(uplink_priv->tunnel_mapping);
err_tun_mapping:
- mlx5_tc_ct_clean(uplink_priv);
+ mlx5_tc_ct_clean(uplink_priv->ct_priv);
err_ct:
netdev_warn(priv->netdev,
"Failed to initialize tc (eswitch), err: %d", err);
@@ -5051,10 +5346,11 @@ void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
+
mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
mapping_destroy(uplink_priv->tunnel_mapping);
- mlx5_tc_ct_clean(uplink_priv);
+ mlx5_tc_ct_clean(uplink_priv->ct_priv);
}
int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
@@ -5119,3 +5415,44 @@ int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
return -EOPNOTSUPP;
}
}
+
+bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
+ struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+ u32 chain = 0, chain_tag, reg_b, zone_restore_id;
+ struct mlx5e_priv *priv = netdev_priv(skb->dev);
+ struct mlx5e_tc_table *tc = &priv->fs.tc;
+ struct tc_skb_ext *tc_skb_ext;
+ int err;
+
+ reg_b = be32_to_cpu(cqe->ft_metadata);
+
+ chain_tag = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
+
+ err = mlx5_get_chain_for_tag(nic_chains(priv), chain_tag, &chain);
+ if (err) {
+ netdev_dbg(priv->netdev,
+ "Couldn't find chain for chain tag: %d, err: %d\n",
+ chain_tag, err);
+ return false;
+ }
+
+ if (chain) {
+ tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
+ if (WARN_ON(!tc_skb_ext))
+ return false;
+
+ tc_skb_ext->chain = chain;
+
+ zone_restore_id = (reg_b >> REG_MAPPING_SHIFT(NIC_ZONE_RESTORE_TO_REG)) &
+ ZONE_RESTORE_MAX;
+
+ if (!mlx5e_tc_ct_restore_flow(tc->ct, skb,
+ zone_restore_id))
+ return false;
+ }
+#endif /* CONFIG_NET_TC_SKB_EXT */
+
+ return true;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 437f680728fd..3b979008143d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -35,17 +35,57 @@
#include <net/pkt_cls.h>
#include "en.h"
+#include "eswitch.h"
+#include "en/tc_ct.h"
#define MLX5E_TC_FLOW_ID_MASK 0x0000ffff
#ifdef CONFIG_MLX5_ESWITCH
+#define NIC_FLOW_ATTR_SZ (sizeof(struct mlx5_flow_attr) +\
+ sizeof(struct mlx5_nic_flow_attr))
+#define ESW_FLOW_ATTR_SZ (sizeof(struct mlx5_flow_attr) +\
+ sizeof(struct mlx5_esw_flow_attr))
+#define ns_to_attr_sz(ns) (((ns) == MLX5_FLOW_NAMESPACE_FDB) ?\
+ ESW_FLOW_ATTR_SZ :\
+ NIC_FLOW_ATTR_SZ)
+
+
int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags);
struct mlx5e_tc_update_priv {
struct net_device *tun_dev;
};
+struct mlx5_nic_flow_attr {
+ u32 flow_tag;
+ u32 hairpin_tirn;
+ struct mlx5_flow_table *hairpin_ft;
+};
+
+struct mlx5_flow_attr {
+ u32 action;
+ struct mlx5_fc *counter;
+ struct mlx5_modify_hdr *modify_hdr;
+ struct mlx5_ct_attr ct_attr;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ u32 chain;
+ u16 prio;
+ u32 dest_chain;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_table *dest_ft;
+ u8 inner_match_level;
+ u8 outer_match_level;
+ u32 flags;
+ union {
+ struct mlx5_esw_flow_attr esw_attr[0];
+ struct mlx5_nic_flow_attr nic_attr[0];
+ };
+};
+
+#define MLX5E_TC_TABLE_CHAIN_TAG_BITS 16
+#define MLX5E_TC_TABLE_CHAIN_TAG_MASK GENMASK(MLX5E_TC_TABLE_CHAIN_TAG_BITS - 1, 0)
+
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
struct tunnel_match_key {
@@ -90,6 +130,7 @@ enum {
int mlx5e_tc_esw_init(struct rhashtable *tc_ht);
void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht);
+bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow);
int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct flow_cls_offload *f, unsigned long flags);
@@ -133,6 +174,8 @@ enum mlx5e_tc_attr_to_reg {
MARK_TO_REG,
LABELS_TO_REG,
FTEID_TO_REG,
+ NIC_CHAIN_TO_REG,
+ NIC_ZONE_RESTORE_TO_REG,
};
struct mlx5e_tc_attr_to_reg_mapping {
@@ -150,6 +193,7 @@ bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
int mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
+ enum mlx5_flow_namespace_type ns,
enum mlx5e_tc_attr_to_reg type,
u32 data);
@@ -181,14 +225,42 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv);
int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
void *cb_priv);
+struct mlx5_flow_handle *
+mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr);
+void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_flow_attr *attr);
+
+struct mlx5_flow_handle *
+mlx5_tc_rule_insert(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr);
+void
+mlx5_tc_rule_delete(struct mlx5e_priv *priv,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_flow_attr *attr);
+
#else /* CONFIG_MLX5_CLS_ACT */
static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
static inline int
mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
{ return -EOPNOTSUPP; }
+
#endif /* CONFIG_MLX5_CLS_ACT */
+struct mlx5_flow_attr *mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type);
+
+struct mlx5_flow_handle *
+mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr);
+void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_flow_attr *attr);
+
#else /* CONFIG_MLX5_ESWITCH */
static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
@@ -203,4 +275,29 @@ mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
{ return -EOPNOTSUPP; }
#endif
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
+{
+#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+ u32 chain, reg_b;
+
+ reg_b = be32_to_cpu(cqe->ft_metadata);
+
+ chain = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
+ if (chain)
+ return true;
+#endif
+
+ return false;
+}
+
+bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb);
+#else /* CONFIG_MLX5_CLS_ACT */
+static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
+{ return false; }
+static inline bool
+mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb)
+{ return true; }
+#endif
+
#endif /* __MLX5_EN_TC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index da596de3abba..82b4419af9d4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -144,9 +144,29 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
}
+/* RM 2311217: no L4 inner checksum for IPsec tunnel type packet */
+static void
+ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+ eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
+ if (skb->encapsulation) {
+ eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
+ sq->stats->csum_partial_inner++;
+ } else {
+ eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
+ sq->stats->csum_partial++;
+ }
+}
+
static inline void
mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
{
+ if (unlikely(eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC))) {
+ ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
+ return;
+ }
+
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
if (skb->encapsulation) {
@@ -232,131 +252,188 @@ dma_unmap_wqe_err:
return -ENOMEM;
}
+struct mlx5e_tx_attr {
+ u32 num_bytes;
+ u16 headlen;
+ u16 ihs;
+ __be16 mss;
+ u16 insz;
+ u8 opcode;
+};
+
+struct mlx5e_tx_wqe_attr {
+ u16 ds_cnt;
+ u16 ds_cnt_inl;
+ u16 ds_cnt_ids;
+ u8 num_wqebbs;
+};
+
+static u8
+mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_accel_tx_state *accel)
+{
+ u8 mode;
+
+#ifdef CONFIG_MLX5_EN_TLS
+ if (accel && accel->tls.tls_tisn)
+ return MLX5_INLINE_MODE_TCP_UDP;
+#endif
+
+ mode = sq->min_inline_mode;
+
+ if (skb_vlan_tag_present(skb) &&
+ test_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state))
+ mode = max_t(u8, MLX5_INLINE_MODE_L2, mode);
+
+ return mode;
+}
+
+static void mlx5e_sq_xmit_prepare(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5e_accel_tx_state *accel,
+ struct mlx5e_tx_attr *attr)
+{
+ struct mlx5e_sq_stats *stats = sq->stats;
+
+ if (skb_is_gso(skb)) {
+ u16 ihs = mlx5e_tx_get_gso_ihs(sq, skb);
+
+ *attr = (struct mlx5e_tx_attr) {
+ .opcode = MLX5_OPCODE_LSO,
+ .mss = cpu_to_be16(skb_shinfo(skb)->gso_size),
+ .ihs = ihs,
+ .num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs,
+ .headlen = skb_headlen(skb) - ihs,
+ };
+
+ stats->packets += skb_shinfo(skb)->gso_segs;
+ } else {
+ u8 mode = mlx5e_tx_wqe_inline_mode(sq, skb, accel);
+ u16 ihs = mlx5e_calc_min_inline(mode, skb);
+
+ *attr = (struct mlx5e_tx_attr) {
+ .opcode = MLX5_OPCODE_SEND,
+ .mss = cpu_to_be16(0),
+ .ihs = ihs,
+ .num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN),
+ .headlen = skb_headlen(skb) - ihs,
+ };
+
+ stats->packets++;
+ }
+
+ attr->insz = mlx5e_accel_tx_ids_len(sq, accel);
+ stats->bytes += attr->num_bytes;
+}
+
+static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_attr *attr,
+ struct mlx5e_tx_wqe_attr *wqe_attr)
+{
+ u16 ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT;
+ u16 ds_cnt_inl = 0;
+ u16 ds_cnt_ids = 0;
+
+ if (attr->insz)
+ ds_cnt_ids = DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + attr->insz,
+ MLX5_SEND_WQE_DS);
+
+ ds_cnt += !!attr->headlen + skb_shinfo(skb)->nr_frags + ds_cnt_ids;
+ if (attr->ihs) {
+ u16 inl = attr->ihs - INL_HDR_START_SZ;
+
+ if (skb_vlan_tag_present(skb))
+ inl += VLAN_HLEN;
+
+ ds_cnt_inl = DIV_ROUND_UP(inl, MLX5_SEND_WQE_DS);
+ ds_cnt += ds_cnt_inl;
+ }
+
+ *wqe_attr = (struct mlx5e_tx_wqe_attr) {
+ .ds_cnt = ds_cnt,
+ .ds_cnt_inl = ds_cnt_inl,
+ .ds_cnt_ids = ds_cnt_ids,
+ .num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS),
+ };
+}
+
+static void mlx5e_tx_skb_update_hwts_flags(struct sk_buff *skb)
+{
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+}
+
+static void mlx5e_tx_check_stop(struct mlx5e_txqsq *sq)
+{
+ if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room))) {
+ netif_tx_stop_queue(sq->txq);
+ sq->stats->stopped++;
+ }
+}
+
static inline void
mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- u8 opcode, u16 ds_cnt, u8 num_wqebbs, u32 num_bytes, u8 num_dma,
+ const struct mlx5e_tx_attr *attr,
+ const struct mlx5e_tx_wqe_attr *wqe_attr, u8 num_dma,
struct mlx5e_tx_wqe_info *wi, struct mlx5_wqe_ctrl_seg *cseg,
bool xmit_more)
{
struct mlx5_wq_cyc *wq = &sq->wq;
bool send_doorbell;
- wi->num_bytes = num_bytes;
- wi->num_dma = num_dma;
- wi->num_wqebbs = num_wqebbs;
- wi->skb = skb;
+ *wi = (struct mlx5e_tx_wqe_info) {
+ .skb = skb,
+ .num_bytes = attr->num_bytes,
+ .num_dma = num_dma,
+ .num_wqebbs = wqe_attr->num_wqebbs,
+ .num_fifo_pkts = 0,
+ };
- cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
- cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+ cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | attr->opcode);
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | wqe_attr->ds_cnt);
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ mlx5e_tx_skb_update_hwts_flags(skb);
sq->pc += wi->num_wqebbs;
- if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, sq->stop_room))) {
- netif_tx_stop_queue(sq->txq);
- sq->stats->stopped++;
- }
- send_doorbell = __netdev_tx_sent_queue(sq->txq, num_bytes,
- xmit_more);
+ mlx5e_tx_check_stop(sq);
+
+ send_doorbell = __netdev_tx_sent_queue(sq->txq, attr->num_bytes, xmit_more);
if (send_doorbell)
mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg);
}
-void mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more)
+static void
+mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ const struct mlx5e_tx_attr *attr, const struct mlx5e_tx_wqe_attr *wqe_attr,
+ struct mlx5e_tx_wqe *wqe, u16 pi, bool xmit_more)
{
- struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5_wqe_ctrl_seg *cseg;
struct mlx5_wqe_eth_seg *eseg;
struct mlx5_wqe_data_seg *dseg;
struct mlx5e_tx_wqe_info *wi;
struct mlx5e_sq_stats *stats = sq->stats;
- u16 headlen, ihs, contig_wqebbs_room;
- u16 ds_cnt, ds_cnt_inl = 0;
- u8 num_wqebbs, opcode;
- u32 num_bytes;
int num_dma;
- __be16 mss;
-
- /* Calc ihs and ds cnt, no writes to wqe yet */
- ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
- if (skb_is_gso(skb)) {
- opcode = MLX5_OPCODE_LSO;
- mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
- ihs = mlx5e_tx_get_gso_ihs(sq, skb);
- num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
- stats->packets += skb_shinfo(skb)->gso_segs;
- } else {
- u8 mode = mlx5e_tx_wqe_inline_mode(sq, &wqe->ctrl, skb);
- opcode = MLX5_OPCODE_SEND;
- mss = 0;
- ihs = mlx5e_calc_min_inline(mode, skb);
- num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
- stats->packets++;
- }
-
- stats->bytes += num_bytes;
stats->xmit_more += xmit_more;
- headlen = skb->len - ihs - skb->data_len;
- ds_cnt += !!headlen;
- ds_cnt += skb_shinfo(skb)->nr_frags;
-
- if (ihs) {
- ihs += !!skb_vlan_tag_present(skb) * VLAN_HLEN;
-
- ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS);
- ds_cnt += ds_cnt_inl;
- }
-
- num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
- contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
- if (unlikely(contig_wqebbs_room < num_wqebbs)) {
-#ifdef CONFIG_MLX5_EN_IPSEC
- struct mlx5_wqe_eth_seg cur_eth = wqe->eth;
-#endif
-#ifdef CONFIG_MLX5_EN_TLS
- struct mlx5_wqe_ctrl_seg cur_ctrl = wqe->ctrl;
-#endif
- mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
- pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
- wqe = MLX5E_TX_FETCH_WQE(sq, pi);
-#ifdef CONFIG_MLX5_EN_IPSEC
- wqe->eth = cur_eth;
-#endif
-#ifdef CONFIG_MLX5_EN_TLS
- wqe->ctrl = cur_ctrl;
-#endif
- }
-
/* fill wqe */
wi = &sq->db.wqe_info[pi];
cseg = &wqe->ctrl;
eseg = &wqe->eth;
dseg = wqe->data;
-#if IS_ENABLED(CONFIG_GENEVE)
- if (skb->encapsulation)
- mlx5e_tx_tunnel_accel(skb, eseg);
-#endif
- mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
+ eseg->mss = attr->mss;
- eseg->mss = mss;
-
- if (ihs) {
- eseg->inline_hdr.sz = cpu_to_be16(ihs);
+ if (attr->ihs) {
if (skb_vlan_tag_present(skb)) {
- ihs -= VLAN_HLEN;
- mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs);
+ eseg->inline_hdr.sz |= cpu_to_be16(attr->ihs + VLAN_HLEN);
+ mlx5e_insert_vlan(eseg->inline_hdr.start, skb, attr->ihs);
stats->added_vlan_packets++;
} else {
- memcpy(eseg->inline_hdr.start, skb->data, ihs);
+ eseg->inline_hdr.sz |= cpu_to_be16(attr->ihs);
+ memcpy(eseg->inline_hdr.start, skb->data, attr->ihs);
}
- dseg += ds_cnt_inl;
+ dseg += wqe_attr->ds_cnt_inl;
} else if (skb_vlan_tag_present(skb)) {
eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD))
@@ -365,12 +442,13 @@ void mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
stats->added_vlan_packets++;
}
- num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + ihs, headlen, dseg);
+ dseg += wqe_attr->ds_cnt_ids;
+ num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr->ihs,
+ attr->headlen, dseg);
if (unlikely(num_dma < 0))
goto err_drop;
- mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
- num_dma, wi, cseg, xmit_more);
+ mlx5e_txwqe_complete(sq, skb, attr, wqe_attr, num_dma, wi, cseg, xmit_more);
return;
@@ -379,10 +457,173 @@ err_drop:
dev_kfree_skb_any(skb);
}
+static bool mlx5e_tx_skb_supports_mpwqe(struct sk_buff *skb, struct mlx5e_tx_attr *attr)
+{
+ return !skb_is_nonlinear(skb) && !skb_vlan_tag_present(skb) && !attr->ihs &&
+ !attr->insz;
+}
+
+static bool mlx5e_tx_mpwqe_same_eseg(struct mlx5e_txqsq *sq, struct mlx5_wqe_eth_seg *eseg)
+{
+ struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
+
+ /* Assumes the session is already running and has at least one packet. */
+ return !memcmp(&session->wqe->eth, eseg, MLX5E_ACCEL_ESEG_LEN);
+}
+
+static void mlx5e_tx_mpwqe_session_start(struct mlx5e_txqsq *sq,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+ struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
+ struct mlx5e_tx_wqe *wqe;
+ u16 pi;
+
+ pi = mlx5e_txqsq_get_next_pi(sq, MLX5E_TX_MPW_MAX_WQEBBS);
+ wqe = MLX5E_TX_FETCH_WQE(sq, pi);
+ prefetchw(wqe->data);
+
+ *session = (struct mlx5e_tx_mpwqe) {
+ .wqe = wqe,
+ .bytes_count = 0,
+ .ds_count = MLX5E_TX_WQE_EMPTY_DS_COUNT,
+ .pkt_count = 0,
+ .inline_on = 0,
+ };
+
+ memcpy(&session->wqe->eth, eseg, MLX5E_ACCEL_ESEG_LEN);
+
+ sq->stats->mpwqe_blks++;
+}
+
+static bool mlx5e_tx_mpwqe_session_is_active(struct mlx5e_txqsq *sq)
+{
+ return sq->mpwqe.wqe;
+}
+
+static void mlx5e_tx_mpwqe_add_dseg(struct mlx5e_txqsq *sq, struct mlx5e_xmit_data *txd)
+{
+ struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
+ struct mlx5_wqe_data_seg *dseg;
+
+ dseg = (struct mlx5_wqe_data_seg *)session->wqe + session->ds_count;
+
+ session->pkt_count++;
+ session->bytes_count += txd->len;
+
+ dseg->addr = cpu_to_be64(txd->dma_addr);
+ dseg->byte_count = cpu_to_be32(txd->len);
+ dseg->lkey = sq->mkey_be;
+ session->ds_count++;
+
+ sq->stats->mpwqe_pkts++;
+}
+
+static struct mlx5_wqe_ctrl_seg *mlx5e_tx_mpwqe_session_complete(struct mlx5e_txqsq *sq)
+{
+ struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
+ u8 ds_count = session->ds_count;
+ struct mlx5_wqe_ctrl_seg *cseg;
+ struct mlx5e_tx_wqe_info *wi;
+ u16 pi;
+
+ cseg = &session->wqe->ctrl;
+ cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_ENHANCED_MPSW);
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_count);
+
+ pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
+ wi = &sq->db.wqe_info[pi];
+ *wi = (struct mlx5e_tx_wqe_info) {
+ .skb = NULL,
+ .num_bytes = session->bytes_count,
+ .num_wqebbs = DIV_ROUND_UP(ds_count, MLX5_SEND_WQEBB_NUM_DS),
+ .num_dma = session->pkt_count,
+ .num_fifo_pkts = session->pkt_count,
+ };
+
+ sq->pc += wi->num_wqebbs;
+
+ session->wqe = NULL;
+
+ mlx5e_tx_check_stop(sq);
+
+ return cseg;
+}
+
+static void
+mlx5e_sq_xmit_mpwqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5_wqe_eth_seg *eseg, bool xmit_more)
+{
+ struct mlx5_wqe_ctrl_seg *cseg;
+ struct mlx5e_xmit_data txd;
+
+ if (!mlx5e_tx_mpwqe_session_is_active(sq)) {
+ mlx5e_tx_mpwqe_session_start(sq, eseg);
+ } else if (!mlx5e_tx_mpwqe_same_eseg(sq, eseg)) {
+ mlx5e_tx_mpwqe_session_complete(sq);
+ mlx5e_tx_mpwqe_session_start(sq, eseg);
+ }
+
+ sq->stats->xmit_more += xmit_more;
+
+ txd.data = skb->data;
+ txd.len = skb->len;
+
+ txd.dma_addr = dma_map_single(sq->pdev, txd.data, txd.len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(sq->pdev, txd.dma_addr)))
+ goto err_unmap;
+ mlx5e_dma_push(sq, txd.dma_addr, txd.len, MLX5E_DMA_MAP_SINGLE);
+
+ mlx5e_skb_fifo_push(sq, skb);
+
+ mlx5e_tx_mpwqe_add_dseg(sq, &txd);
+
+ mlx5e_tx_skb_update_hwts_flags(skb);
+
+ if (unlikely(mlx5e_tx_mpwqe_is_full(&sq->mpwqe))) {
+ /* Might stop the queue and affect the retval of __netdev_tx_sent_queue. */
+ cseg = mlx5e_tx_mpwqe_session_complete(sq);
+
+ if (__netdev_tx_sent_queue(sq->txq, txd.len, xmit_more))
+ mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
+ } else if (__netdev_tx_sent_queue(sq->txq, txd.len, xmit_more)) {
+ /* Might stop the queue, but we were asked to ring the doorbell anyway. */
+ cseg = mlx5e_tx_mpwqe_session_complete(sq);
+
+ mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg);
+ }
+
+ return;
+
+err_unmap:
+ mlx5e_dma_unmap_wqe_err(sq, 1);
+ sq->stats->dropped++;
+ dev_kfree_skb_any(skb);
+}
+
+void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
+{
+ /* Unlikely in non-MPWQE workloads; not important in MPWQE workloads. */
+ if (unlikely(mlx5e_tx_mpwqe_session_is_active(sq)))
+ mlx5e_tx_mpwqe_session_complete(sq);
+}
+
+static bool mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
+ struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
+{
+ if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg)))
+ return false;
+
+ mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
+
+ return true;
+}
+
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_accel_tx_state accel = {};
+ struct mlx5e_tx_wqe_attr wqe_attr;
+ struct mlx5e_tx_attr attr;
struct mlx5e_tx_wqe *wqe;
struct mlx5e_txqsq *sq;
u16 pi;
@@ -391,21 +632,92 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
/* May send SKBs and WQEs. */
if (unlikely(!mlx5e_accel_tx_begin(dev, sq, skb, &accel)))
- goto out;
+ return NETDEV_TX_OK;
- pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
+ mlx5e_sq_xmit_prepare(sq, skb, &accel, &attr);
+
+ if (test_bit(MLX5E_SQ_STATE_MPWQE, &sq->state)) {
+ if (mlx5e_tx_skb_supports_mpwqe(skb, &attr)) {
+ struct mlx5_wqe_eth_seg eseg = {};
+
+ if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &eseg)))
+ return NETDEV_TX_OK;
+
+ mlx5e_sq_xmit_mpwqe(sq, skb, &eseg, netdev_xmit_more());
+ return NETDEV_TX_OK;
+ }
+
+ mlx5e_tx_mpwqe_ensure_complete(sq);
+ }
+
+ mlx5e_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
+ pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
wqe = MLX5E_TX_FETCH_WQE(sq, pi);
/* May update the WQE, but may not post other WQEs. */
- if (unlikely(!mlx5e_accel_tx_finish(priv, sq, skb, wqe, &accel)))
- goto out;
+ mlx5e_accel_tx_finish(sq, wqe, &accel,
+ (struct mlx5_wqe_inline_seg *)(wqe->data + wqe_attr.ds_cnt_inl));
+ if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &wqe->eth)))
+ return NETDEV_TX_OK;
- mlx5e_sq_xmit(sq, skb, wqe, pi, netdev_xmit_more());
+ mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, netdev_xmit_more());
-out:
return NETDEV_TX_OK;
}
+void mlx5e_sq_xmit_simple(struct mlx5e_txqsq *sq, struct sk_buff *skb, bool xmit_more)
+{
+ struct mlx5e_tx_wqe_attr wqe_attr;
+ struct mlx5e_tx_attr attr;
+ struct mlx5e_tx_wqe *wqe;
+ u16 pi;
+
+ mlx5e_sq_xmit_prepare(sq, skb, NULL, &attr);
+ mlx5e_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
+ pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
+ wqe = MLX5E_TX_FETCH_WQE(sq, pi);
+ mlx5e_txwqe_build_eseg_csum(sq, skb, &wqe->eth);
+ mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, xmit_more);
+}
+
+static void mlx5e_tx_wi_dma_unmap(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi,
+ u32 *dma_fifo_cc)
+{
+ int i;
+
+ for (i = 0; i < wi->num_dma; i++) {
+ struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
+
+ mlx5e_tx_dma_unmap(sq->pdev, dma);
+ }
+}
+
+static void mlx5e_consume_skb(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5_cqe64 *cqe, int napi_budget)
+{
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
+ struct skb_shared_hwtstamps hwts = {};
+ u64 ts = get_cqe_ts(cqe);
+
+ hwts.hwtstamp = mlx5_timecounter_cyc2time(sq->clock, ts);
+ skb_tstamp_tx(skb, &hwts);
+ }
+
+ napi_consume_skb(skb, napi_budget);
+}
+
+static void mlx5e_tx_wi_consume_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi,
+ struct mlx5_cqe64 *cqe, int napi_budget)
+{
+ int i;
+
+ for (i = 0; i < wi->num_fifo_pkts; i++) {
+ struct sk_buff *skb = mlx5e_skb_fifo_pop(sq);
+
+ mlx5e_consume_skb(sq, skb, cqe, napi_budget);
+ }
+}
+
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
{
struct mlx5e_sq_stats *stats;
@@ -451,42 +763,33 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
wqe_counter = be16_to_cpu(cqe->wqe_counter);
do {
- struct sk_buff *skb;
- int j;
-
last_wqe = (sqcc == wqe_counter);
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
wi = &sq->db.wqe_info[ci];
- skb = wi->skb;
- if (unlikely(!skb)) {
- mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, &dma_fifo_cc);
- sqcc += wi->num_wqebbs;
- continue;
- }
+ sqcc += wi->num_wqebbs;
- if (unlikely(skb_shinfo(skb)->tx_flags &
- SKBTX_HW_TSTAMP)) {
- struct skb_shared_hwtstamps hwts = {};
+ if (likely(wi->skb)) {
+ mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
+ mlx5e_consume_skb(sq, wi->skb, cqe, napi_budget);
- hwts.hwtstamp =
- mlx5_timecounter_cyc2time(sq->clock,
- get_cqe_ts(cqe));
- skb_tstamp_tx(skb, &hwts);
+ npkts++;
+ nbytes += wi->num_bytes;
+ continue;
}
- for (j = 0; j < wi->num_dma; j++) {
- struct mlx5e_sq_dma *dma =
- mlx5e_dma_get(sq, dma_fifo_cc++);
+ if (unlikely(mlx5e_ktls_tx_try_handle_resync_dump_comp(sq, wi,
+ &dma_fifo_cc)))
+ continue;
- mlx5e_tx_dma_unmap(sq->pdev, dma);
- }
+ if (wi->num_fifo_pkts) {
+ mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
+ mlx5e_tx_wi_consume_fifo_skbs(sq, wi, cqe, napi_budget);
- npkts++;
- nbytes += wi->num_bytes;
- sqcc += wi->num_wqebbs;
- napi_consume_skb(skb, napi_budget);
+ npkts += wi->num_fifo_pkts;
+ nbytes += wi->num_bytes;
+ }
} while (!last_wqe);
if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
@@ -525,13 +828,19 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
return (i == MLX5E_TX_CQ_POLL_BUDGET);
}
+static void mlx5e_tx_wi_kfree_fifo_skbs(struct mlx5e_txqsq *sq, struct mlx5e_tx_wqe_info *wi)
+{
+ int i;
+
+ for (i = 0; i < wi->num_fifo_pkts; i++)
+ dev_kfree_skb_any(mlx5e_skb_fifo_pop(sq));
+}
+
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
{
struct mlx5e_tx_wqe_info *wi;
u32 dma_fifo_cc, nbytes = 0;
u16 ci, sqcc, npkts = 0;
- struct sk_buff *skb;
- int i;
sqcc = sq->cc;
dma_fifo_cc = sq->dma_fifo_cc;
@@ -539,25 +848,28 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
while (sqcc != sq->pc) {
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
wi = &sq->db.wqe_info[ci];
- skb = wi->skb;
- if (!skb) {
- mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, &dma_fifo_cc);
- sqcc += wi->num_wqebbs;
+ sqcc += wi->num_wqebbs;
+
+ if (likely(wi->skb)) {
+ mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
+ dev_kfree_skb_any(wi->skb);
+
+ npkts++;
+ nbytes += wi->num_bytes;
continue;
}
- for (i = 0; i < wi->num_dma; i++) {
- struct mlx5e_sq_dma *dma =
- mlx5e_dma_get(sq, dma_fifo_cc++);
+ if (unlikely(mlx5e_ktls_tx_try_handle_resync_dump_comp(sq, wi, &dma_fifo_cc)))
+ continue;
- mlx5e_tx_dma_unmap(sq->pdev, dma);
- }
+ if (wi->num_fifo_pkts) {
+ mlx5e_tx_wi_dma_unmap(sq, wi, &dma_fifo_cc);
+ mlx5e_tx_wi_kfree_fifo_skbs(sq, wi);
- dev_kfree_skb_any(skb);
- npkts++;
- nbytes += wi->num_bytes;
- sqcc += wi->num_wqebbs;
+ npkts += wi->num_fifo_pkts;
+ nbytes += wi->num_bytes;
+ }
}
sq->dma_fifo_cc = dma_fifo_cc;
@@ -576,9 +888,34 @@ mlx5i_txwqe_build_datagram(struct mlx5_av *av, u32 dqpn, u32 dqkey,
dseg->av.key.qkey.qkey = cpu_to_be32(dqkey);
}
+static void mlx5i_sq_calc_wqe_attr(struct sk_buff *skb,
+ const struct mlx5e_tx_attr *attr,
+ struct mlx5e_tx_wqe_attr *wqe_attr)
+{
+ u16 ds_cnt = sizeof(struct mlx5i_tx_wqe) / MLX5_SEND_WQE_DS;
+ u16 ds_cnt_inl = 0;
+
+ ds_cnt += !!attr->headlen + skb_shinfo(skb)->nr_frags;
+
+ if (attr->ihs) {
+ u16 inl = attr->ihs - INL_HDR_START_SZ;
+
+ ds_cnt_inl = DIV_ROUND_UP(inl, MLX5_SEND_WQE_DS);
+ ds_cnt += ds_cnt_inl;
+ }
+
+ *wqe_attr = (struct mlx5e_tx_wqe_attr) {
+ .ds_cnt = ds_cnt,
+ .ds_cnt_inl = ds_cnt_inl,
+ .num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS),
+ };
+}
+
void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_av *av, u32 dqpn, u32 dqkey, bool xmit_more)
{
+ struct mlx5e_tx_wqe_attr wqe_attr;
+ struct mlx5e_tx_attr attr;
struct mlx5i_tx_wqe *wqe;
struct mlx5_wqe_datagram_seg *datagram;
@@ -588,47 +925,17 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_tx_wqe_info *wi;
struct mlx5e_sq_stats *stats = sq->stats;
- u16 ds_cnt, ds_cnt_inl = 0;
- u8 num_wqebbs, opcode;
- u16 headlen, ihs, pi;
- u32 num_bytes;
int num_dma;
- __be16 mss;
+ u16 pi;
- /* Calc ihs and ds cnt, no writes to wqe yet */
- ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
- if (skb_is_gso(skb)) {
- opcode = MLX5_OPCODE_LSO;
- mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
- ihs = mlx5e_tx_get_gso_ihs(sq, skb);
- num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
- stats->packets += skb_shinfo(skb)->gso_segs;
- } else {
- u8 mode = mlx5e_tx_wqe_inline_mode(sq, NULL, skb);
+ mlx5e_sq_xmit_prepare(sq, skb, NULL, &attr);
+ mlx5i_sq_calc_wqe_attr(skb, &attr, &wqe_attr);
- opcode = MLX5_OPCODE_SEND;
- mss = 0;
- ihs = mlx5e_calc_min_inline(mode, skb);
- num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
- stats->packets++;
- }
+ pi = mlx5e_txqsq_get_next_pi(sq, wqe_attr.num_wqebbs);
+ wqe = MLX5I_SQ_FETCH_WQE(sq, pi);
- stats->bytes += num_bytes;
stats->xmit_more += xmit_more;
- headlen = skb->len - ihs - skb->data_len;
- ds_cnt += !!headlen;
- ds_cnt += skb_shinfo(skb)->nr_frags;
-
- if (ihs) {
- ds_cnt_inl = DIV_ROUND_UP(ihs - INL_HDR_START_SZ, MLX5_SEND_WQE_DS);
- ds_cnt += ds_cnt_inl;
- }
-
- num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
- pi = mlx5e_txqsq_get_next_pi(sq, num_wqebbs);
- wqe = MLX5I_SQ_FETCH_WQE(sq, pi);
-
/* fill wqe */
wi = &sq->db.wqe_info[pi];
cseg = &wqe->ctrl;
@@ -640,20 +947,20 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
mlx5e_txwqe_build_eseg_csum(sq, skb, eseg);
- eseg->mss = mss;
+ eseg->mss = attr.mss;
- if (ihs) {
- memcpy(eseg->inline_hdr.start, skb->data, ihs);
- eseg->inline_hdr.sz = cpu_to_be16(ihs);
- dseg += ds_cnt_inl;
+ if (attr.ihs) {
+ memcpy(eseg->inline_hdr.start, skb->data, attr.ihs);
+ eseg->inline_hdr.sz = cpu_to_be16(attr.ihs);
+ dseg += wqe_attr.ds_cnt_inl;
}
- num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + ihs, headlen, dseg);
+ num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + attr.ihs,
+ attr.headlen, dseg);
if (unlikely(num_dma < 0))
goto err_drop;
- mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes,
- num_dma, wi, cseg, xmit_more);
+ mlx5e_txwqe_complete(sq, skb, &attr, &wqe_attr, num_dma, wi, cseg, xmit_more);
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 22a19d391e17..8ebfe782f95e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -828,8 +828,7 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
INIT_LIST_HEAD(&eq->tasklet_ctx.list);
INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
spin_lock_init(&eq->tasklet_ctx.lock);
- tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb,
- (unsigned long)&eq->tasklet_ctx);
+ tasklet_setup(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb);
eq->irq_nb.notifier_call = mlx5_eq_comp_int;
param = (struct mlx5_eq_param) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c
index 07b2acd7e6b3..c3faae67e4d6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c
@@ -148,6 +148,11 @@ static void esw_acl_egress_ofld_groups_destroy(struct mlx5_vport *vport)
esw_acl_egress_vlan_grp_destroy(vport);
}
+static bool esw_acl_egress_needed(const struct mlx5_eswitch *esw, u16 vport_num)
+{
+ return mlx5_eswitch_is_vf_vport(esw, vport_num);
+}
+
int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
int table_size = 0;
@@ -157,6 +162,9 @@ int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport
!MLX5_CAP_GEN(esw->dev, prio_tag_required))
return 0;
+ if (!esw_acl_egress_needed(esw, vport->vport))
+ return 0;
+
esw_acl_egress_ofld_rules_destroy(vport);
if (mlx5_esw_acl_egress_fwd2vport_supported(esw))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c
deleted file mode 100644
index d5bf908dfecd..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.c
+++ /dev/null
@@ -1,944 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-// Copyright (c) 2020 Mellanox Technologies.
-
-#include <linux/mlx5/driver.h>
-#include <linux/mlx5/mlx5_ifc.h>
-#include <linux/mlx5/fs.h>
-
-#include "esw/chains.h"
-#include "en/mapping.h"
-#include "mlx5_core.h"
-#include "fs_core.h"
-#include "eswitch.h"
-#include "en.h"
-#include "en_tc.h"
-
-#define esw_chains_priv(esw) ((esw)->fdb_table.offloads.esw_chains_priv)
-#define esw_chains_lock(esw) (esw_chains_priv(esw)->lock)
-#define esw_chains_ht(esw) (esw_chains_priv(esw)->chains_ht)
-#define esw_chains_mapping(esw) (esw_chains_priv(esw)->chains_mapping)
-#define esw_prios_ht(esw) (esw_chains_priv(esw)->prios_ht)
-#define fdb_pool_left(esw) (esw_chains_priv(esw)->fdb_left)
-#define tc_slow_fdb(esw) ((esw)->fdb_table.offloads.slow_fdb)
-#define tc_end_fdb(esw) (esw_chains_priv(esw)->tc_end_fdb)
-#define fdb_ignore_flow_level_supported(esw) \
- (MLX5_CAP_ESW_FLOWTABLE_FDB((esw)->dev, ignore_flow_level))
-#define fdb_modify_header_fwd_to_table_supported(esw) \
- (MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table))
-
-/* Firmware currently has 4 pool of 4 sizes that it supports (ESW_POOLS),
- * and a virtual memory region of 16M (ESW_SIZE), this region is duplicated
- * for each flow table pool. We can allocate up to 16M of each pool,
- * and we keep track of how much we used via get_next_avail_sz_from_pool.
- * Firmware doesn't report any of this for now.
- * ESW_POOL is expected to be sorted from large to small and match firmware
- * pools.
- */
-#define ESW_SIZE (16 * 1024 * 1024)
-static const unsigned int ESW_POOLS[] = { 4 * 1024 * 1024,
- 1 * 1024 * 1024,
- 64 * 1024,
- 128 };
-#define ESW_FT_TBL_SZ (64 * 1024)
-
-struct mlx5_esw_chains_priv {
- struct rhashtable chains_ht;
- struct rhashtable prios_ht;
- /* Protects above chains_ht and prios_ht */
- struct mutex lock;
-
- struct mlx5_flow_table *tc_end_fdb;
- struct mapping_ctx *chains_mapping;
-
- int fdb_left[ARRAY_SIZE(ESW_POOLS)];
-};
-
-struct fdb_chain {
- struct rhash_head node;
-
- u32 chain;
-
- int ref;
- int id;
-
- struct mlx5_eswitch *esw;
- struct list_head prios_list;
- struct mlx5_flow_handle *restore_rule;
- struct mlx5_modify_hdr *miss_modify_hdr;
-};
-
-struct fdb_prio_key {
- u32 chain;
- u32 prio;
- u32 level;
-};
-
-struct fdb_prio {
- struct rhash_head node;
- struct list_head list;
-
- struct fdb_prio_key key;
-
- int ref;
-
- struct fdb_chain *fdb_chain;
- struct mlx5_flow_table *fdb;
- struct mlx5_flow_table *next_fdb;
- struct mlx5_flow_group *miss_group;
- struct mlx5_flow_handle *miss_rule;
-};
-
-static const struct rhashtable_params chain_params = {
- .head_offset = offsetof(struct fdb_chain, node),
- .key_offset = offsetof(struct fdb_chain, chain),
- .key_len = sizeof_field(struct fdb_chain, chain),
- .automatic_shrinking = true,
-};
-
-static const struct rhashtable_params prio_params = {
- .head_offset = offsetof(struct fdb_prio, node),
- .key_offset = offsetof(struct fdb_prio, key),
- .key_len = sizeof_field(struct fdb_prio, key),
- .automatic_shrinking = true,
-};
-
-bool mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw)
-{
- return esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
-}
-
-bool mlx5_esw_chains_backwards_supported(struct mlx5_eswitch *esw)
-{
- return mlx5_esw_chains_prios_supported(esw) &&
- fdb_ignore_flow_level_supported(esw);
-}
-
-u32 mlx5_esw_chains_get_chain_range(struct mlx5_eswitch *esw)
-{
- if (!mlx5_esw_chains_prios_supported(esw))
- return 1;
-
- if (fdb_ignore_flow_level_supported(esw))
- return UINT_MAX - 1;
-
- return FDB_TC_MAX_CHAIN;
-}
-
-u32 mlx5_esw_chains_get_ft_chain(struct mlx5_eswitch *esw)
-{
- return mlx5_esw_chains_get_chain_range(esw) + 1;
-}
-
-u32 mlx5_esw_chains_get_prio_range(struct mlx5_eswitch *esw)
-{
- if (!mlx5_esw_chains_prios_supported(esw))
- return 1;
-
- if (fdb_ignore_flow_level_supported(esw))
- return UINT_MAX;
-
- return FDB_TC_MAX_PRIO;
-}
-
-static unsigned int mlx5_esw_chains_get_level_range(struct mlx5_eswitch *esw)
-{
- if (fdb_ignore_flow_level_supported(esw))
- return UINT_MAX;
-
- return FDB_TC_LEVELS_PER_PRIO;
-}
-
-#define POOL_NEXT_SIZE 0
-static int
-mlx5_esw_chains_get_avail_sz_from_pool(struct mlx5_eswitch *esw,
- int desired_size)
-{
- int i, found_i = -1;
-
- for (i = ARRAY_SIZE(ESW_POOLS) - 1; i >= 0; i--) {
- if (fdb_pool_left(esw)[i] && ESW_POOLS[i] > desired_size) {
- found_i = i;
- if (desired_size != POOL_NEXT_SIZE)
- break;
- }
- }
-
- if (found_i != -1) {
- --fdb_pool_left(esw)[found_i];
- return ESW_POOLS[found_i];
- }
-
- return 0;
-}
-
-static void
-mlx5_esw_chains_put_sz_to_pool(struct mlx5_eswitch *esw, int sz)
-{
- int i;
-
- for (i = ARRAY_SIZE(ESW_POOLS) - 1; i >= 0; i--) {
- if (sz == ESW_POOLS[i]) {
- ++fdb_pool_left(esw)[i];
- return;
- }
- }
-
- WARN_ONCE(1, "Couldn't find size %d in fdb size pool", sz);
-}
-
-static void
-mlx5_esw_chains_init_sz_pool(struct mlx5_eswitch *esw)
-{
- u32 fdb_max;
- int i;
-
- fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, log_max_ft_size);
-
- for (i = ARRAY_SIZE(ESW_POOLS) - 1; i >= 0; i--)
- fdb_pool_left(esw)[i] =
- ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0;
-}
-
-static struct mlx5_flow_table *
-mlx5_esw_chains_create_fdb_table(struct mlx5_eswitch *esw,
- u32 chain, u32 prio, u32 level)
-{
- struct mlx5_flow_table_attr ft_attr = {};
- struct mlx5_flow_namespace *ns;
- struct mlx5_flow_table *fdb;
- int sz;
-
- if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
- ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
- MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
-
- sz = (chain == mlx5_esw_chains_get_ft_chain(esw)) ?
- mlx5_esw_chains_get_avail_sz_from_pool(esw, ESW_FT_TBL_SZ) :
- mlx5_esw_chains_get_avail_sz_from_pool(esw, POOL_NEXT_SIZE);
- if (!sz)
- return ERR_PTR(-ENOSPC);
- ft_attr.max_fte = sz;
-
- /* We use tc_slow_fdb(esw) as the table's next_ft till
- * ignore_flow_level is allowed on FT creation and not just for FTEs.
- * Instead caller should add an explicit miss rule if needed.
- */
- ft_attr.next_ft = tc_slow_fdb(esw);
-
- /* The root table(chain 0, prio 1, level 0) is required to be
- * connected to the previous prio (FDB_BYPASS_PATH if exists).
- * We always create it, as a managed table, in order to align with
- * fs_core logic.
- */
- if (!fdb_ignore_flow_level_supported(esw) ||
- (chain == 0 && prio == 1 && level == 0)) {
- ft_attr.level = level;
- ft_attr.prio = prio - 1;
- ns = mlx5_get_fdb_sub_ns(esw->dev, chain);
- } else {
- ft_attr.flags |= MLX5_FLOW_TABLE_UNMANAGED;
- ft_attr.prio = FDB_TC_OFFLOAD;
- /* Firmware doesn't allow us to create another level 0 table,
- * so we create all unmanaged tables as level 1.
- *
- * To connect them, we use explicit miss rules with
- * ignore_flow_level. Caller is responsible to create
- * these rules (if needed).
- */
- ft_attr.level = 1;
- ns = mlx5_get_flow_namespace(esw->dev, MLX5_FLOW_NAMESPACE_FDB);
- }
-
- ft_attr.autogroup.num_reserved_entries = 2;
- ft_attr.autogroup.max_num_groups = esw->params.large_group_num;
- fdb = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
- if (IS_ERR(fdb)) {
- esw_warn(esw->dev,
- "Failed to create FDB table err %d (chain: %d, prio: %d, level: %d, size: %d)\n",
- (int)PTR_ERR(fdb), chain, prio, level, sz);
- mlx5_esw_chains_put_sz_to_pool(esw, sz);
- return fdb;
- }
-
- return fdb;
-}
-
-static void
-mlx5_esw_chains_destroy_fdb_table(struct mlx5_eswitch *esw,
- struct mlx5_flow_table *fdb)
-{
- mlx5_esw_chains_put_sz_to_pool(esw, fdb->max_fte);
- mlx5_destroy_flow_table(fdb);
-}
-
-static int
-create_fdb_chain_restore(struct fdb_chain *fdb_chain)
-{
- char modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)];
- struct mlx5_eswitch *esw = fdb_chain->esw;
- struct mlx5_modify_hdr *mod_hdr;
- u32 index;
- int err;
-
- if (fdb_chain->chain == mlx5_esw_chains_get_ft_chain(esw) ||
- !mlx5_esw_chains_prios_supported(esw))
- return 0;
-
- err = mapping_add(esw_chains_mapping(esw), &fdb_chain->chain, &index);
- if (err)
- return err;
- if (index == MLX5_FS_DEFAULT_FLOW_TAG) {
- /* we got the special default flow tag id, so we won't know
- * if we actually marked the packet with the restore rule
- * we create.
- *
- * This case isn't possible with MLX5_FS_DEFAULT_FLOW_TAG = 0.
- */
- err = mapping_add(esw_chains_mapping(esw),
- &fdb_chain->chain, &index);
- mapping_remove(esw_chains_mapping(esw),
- MLX5_FS_DEFAULT_FLOW_TAG);
- if (err)
- return err;
- }
-
- fdb_chain->id = index;
-
- MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
- MLX5_SET(set_action_in, modact, field,
- mlx5e_tc_attr_to_reg_mappings[CHAIN_TO_REG].mfield);
- MLX5_SET(set_action_in, modact, offset,
- mlx5e_tc_attr_to_reg_mappings[CHAIN_TO_REG].moffset * 8);
- MLX5_SET(set_action_in, modact, length,
- mlx5e_tc_attr_to_reg_mappings[CHAIN_TO_REG].mlen * 8);
- MLX5_SET(set_action_in, modact, data, fdb_chain->id);
- mod_hdr = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_FDB,
- 1, modact);
- if (IS_ERR(mod_hdr)) {
- err = PTR_ERR(mod_hdr);
- goto err_mod_hdr;
- }
- fdb_chain->miss_modify_hdr = mod_hdr;
-
- fdb_chain->restore_rule = esw_add_restore_rule(esw, fdb_chain->id);
- if (IS_ERR(fdb_chain->restore_rule)) {
- err = PTR_ERR(fdb_chain->restore_rule);
- goto err_rule;
- }
-
- return 0;
-
-err_rule:
- mlx5_modify_header_dealloc(esw->dev, fdb_chain->miss_modify_hdr);
-err_mod_hdr:
- /* Datapath can't find this mapping, so we can safely remove it */
- mapping_remove(esw_chains_mapping(esw), fdb_chain->id);
- return err;
-}
-
-static void destroy_fdb_chain_restore(struct fdb_chain *fdb_chain)
-{
- struct mlx5_eswitch *esw = fdb_chain->esw;
-
- if (!fdb_chain->miss_modify_hdr)
- return;
-
- mlx5_del_flow_rules(fdb_chain->restore_rule);
- mlx5_modify_header_dealloc(esw->dev, fdb_chain->miss_modify_hdr);
- mapping_remove(esw_chains_mapping(esw), fdb_chain->id);
-}
-
-static struct fdb_chain *
-mlx5_esw_chains_create_fdb_chain(struct mlx5_eswitch *esw, u32 chain)
-{
- struct fdb_chain *fdb_chain = NULL;
- int err;
-
- fdb_chain = kvzalloc(sizeof(*fdb_chain), GFP_KERNEL);
- if (!fdb_chain)
- return ERR_PTR(-ENOMEM);
-
- fdb_chain->esw = esw;
- fdb_chain->chain = chain;
- INIT_LIST_HEAD(&fdb_chain->prios_list);
-
- err = create_fdb_chain_restore(fdb_chain);
- if (err)
- goto err_restore;
-
- err = rhashtable_insert_fast(&esw_chains_ht(esw), &fdb_chain->node,
- chain_params);
- if (err)
- goto err_insert;
-
- return fdb_chain;
-
-err_insert:
- destroy_fdb_chain_restore(fdb_chain);
-err_restore:
- kvfree(fdb_chain);
- return ERR_PTR(err);
-}
-
-static void
-mlx5_esw_chains_destroy_fdb_chain(struct fdb_chain *fdb_chain)
-{
- struct mlx5_eswitch *esw = fdb_chain->esw;
-
- rhashtable_remove_fast(&esw_chains_ht(esw), &fdb_chain->node,
- chain_params);
-
- destroy_fdb_chain_restore(fdb_chain);
- kvfree(fdb_chain);
-}
-
-static struct fdb_chain *
-mlx5_esw_chains_get_fdb_chain(struct mlx5_eswitch *esw, u32 chain)
-{
- struct fdb_chain *fdb_chain;
-
- fdb_chain = rhashtable_lookup_fast(&esw_chains_ht(esw), &chain,
- chain_params);
- if (!fdb_chain) {
- fdb_chain = mlx5_esw_chains_create_fdb_chain(esw, chain);
- if (IS_ERR(fdb_chain))
- return fdb_chain;
- }
-
- fdb_chain->ref++;
-
- return fdb_chain;
-}
-
-static struct mlx5_flow_handle *
-mlx5_esw_chains_add_miss_rule(struct fdb_chain *fdb_chain,
- struct mlx5_flow_table *fdb,
- struct mlx5_flow_table *next_fdb)
-{
- struct mlx5_eswitch *esw = fdb_chain->esw;
- struct mlx5_flow_destination dest = {};
- struct mlx5_flow_act act = {};
-
- act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL | FLOW_ACT_NO_APPEND;
- act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = next_fdb;
-
- if (next_fdb == tc_end_fdb(esw) &&
- mlx5_esw_chains_prios_supported(esw)) {
- act.modify_hdr = fdb_chain->miss_modify_hdr;
- act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- }
-
- return mlx5_add_flow_rules(fdb, NULL, &act, &dest, 1);
-}
-
-static int
-mlx5_esw_chains_update_prio_prevs(struct fdb_prio *fdb_prio,
- struct mlx5_flow_table *next_fdb)
-{
- struct mlx5_flow_handle *miss_rules[FDB_TC_LEVELS_PER_PRIO + 1] = {};
- struct fdb_chain *fdb_chain = fdb_prio->fdb_chain;
- struct fdb_prio *pos;
- int n = 0, err;
-
- if (fdb_prio->key.level)
- return 0;
-
- /* Iterate in reverse order until reaching the level 0 rule of
- * the previous priority, adding all the miss rules first, so we can
- * revert them if any of them fails.
- */
- pos = fdb_prio;
- list_for_each_entry_continue_reverse(pos,
- &fdb_chain->prios_list,
- list) {
- miss_rules[n] = mlx5_esw_chains_add_miss_rule(fdb_chain,
- pos->fdb,
- next_fdb);
- if (IS_ERR(miss_rules[n])) {
- err = PTR_ERR(miss_rules[n]);
- goto err_prev_rule;
- }
-
- n++;
- if (!pos->key.level)
- break;
- }
-
- /* Success, delete old miss rules, and update the pointers. */
- n = 0;
- pos = fdb_prio;
- list_for_each_entry_continue_reverse(pos,
- &fdb_chain->prios_list,
- list) {
- mlx5_del_flow_rules(pos->miss_rule);
-
- pos->miss_rule = miss_rules[n];
- pos->next_fdb = next_fdb;
-
- n++;
- if (!pos->key.level)
- break;
- }
-
- return 0;
-
-err_prev_rule:
- while (--n >= 0)
- mlx5_del_flow_rules(miss_rules[n]);
-
- return err;
-}
-
-static void
-mlx5_esw_chains_put_fdb_chain(struct fdb_chain *fdb_chain)
-{
- if (--fdb_chain->ref == 0)
- mlx5_esw_chains_destroy_fdb_chain(fdb_chain);
-}
-
-static struct fdb_prio *
-mlx5_esw_chains_create_fdb_prio(struct mlx5_eswitch *esw,
- u32 chain, u32 prio, u32 level)
-{
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct mlx5_flow_handle *miss_rule = NULL;
- struct mlx5_flow_group *miss_group;
- struct fdb_prio *fdb_prio = NULL;
- struct mlx5_flow_table *next_fdb;
- struct fdb_chain *fdb_chain;
- struct mlx5_flow_table *fdb;
- struct list_head *pos;
- u32 *flow_group_in;
- int err;
-
- fdb_chain = mlx5_esw_chains_get_fdb_chain(esw, chain);
- if (IS_ERR(fdb_chain))
- return ERR_CAST(fdb_chain);
-
- fdb_prio = kvzalloc(sizeof(*fdb_prio), GFP_KERNEL);
- flow_group_in = kvzalloc(inlen, GFP_KERNEL);
- if (!fdb_prio || !flow_group_in) {
- err = -ENOMEM;
- goto err_alloc;
- }
-
- /* Chain's prio list is sorted by prio and level.
- * And all levels of some prio point to the next prio's level 0.
- * Example list (prio, level):
- * (3,0)->(3,1)->(5,0)->(5,1)->(6,1)->(7,0)
- * In hardware, we will we have the following pointers:
- * (3,0) -> (5,0) -> (7,0) -> Slow path
- * (3,1) -> (5,0)
- * (5,1) -> (7,0)
- * (6,1) -> (7,0)
- */
-
- /* Default miss for each chain: */
- next_fdb = (chain == mlx5_esw_chains_get_ft_chain(esw)) ?
- tc_slow_fdb(esw) :
- tc_end_fdb(esw);
- list_for_each(pos, &fdb_chain->prios_list) {
- struct fdb_prio *p = list_entry(pos, struct fdb_prio, list);
-
- /* exit on first pos that is larger */
- if (prio < p->key.prio || (prio == p->key.prio &&
- level < p->key.level)) {
- /* Get next level 0 table */
- next_fdb = p->key.level == 0 ? p->fdb : p->next_fdb;
- break;
- }
- }
-
- fdb = mlx5_esw_chains_create_fdb_table(esw, chain, prio, level);
- if (IS_ERR(fdb)) {
- err = PTR_ERR(fdb);
- goto err_create;
- }
-
- MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
- fdb->max_fte - 2);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
- fdb->max_fte - 1);
- miss_group = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR(miss_group)) {
- err = PTR_ERR(miss_group);
- goto err_group;
- }
-
- /* Add miss rule to next_fdb */
- miss_rule = mlx5_esw_chains_add_miss_rule(fdb_chain, fdb, next_fdb);
- if (IS_ERR(miss_rule)) {
- err = PTR_ERR(miss_rule);
- goto err_miss_rule;
- }
-
- fdb_prio->miss_group = miss_group;
- fdb_prio->miss_rule = miss_rule;
- fdb_prio->next_fdb = next_fdb;
- fdb_prio->fdb_chain = fdb_chain;
- fdb_prio->key.chain = chain;
- fdb_prio->key.prio = prio;
- fdb_prio->key.level = level;
- fdb_prio->fdb = fdb;
-
- err = rhashtable_insert_fast(&esw_prios_ht(esw), &fdb_prio->node,
- prio_params);
- if (err)
- goto err_insert;
-
- list_add(&fdb_prio->list, pos->prev);
-
- /* Table is ready, connect it */
- err = mlx5_esw_chains_update_prio_prevs(fdb_prio, fdb);
- if (err)
- goto err_update;
-
- kvfree(flow_group_in);
- return fdb_prio;
-
-err_update:
- list_del(&fdb_prio->list);
- rhashtable_remove_fast(&esw_prios_ht(esw), &fdb_prio->node,
- prio_params);
-err_insert:
- mlx5_del_flow_rules(miss_rule);
-err_miss_rule:
- mlx5_destroy_flow_group(miss_group);
-err_group:
- mlx5_esw_chains_destroy_fdb_table(esw, fdb);
-err_create:
-err_alloc:
- kvfree(fdb_prio);
- kvfree(flow_group_in);
- mlx5_esw_chains_put_fdb_chain(fdb_chain);
- return ERR_PTR(err);
-}
-
-static void
-mlx5_esw_chains_destroy_fdb_prio(struct mlx5_eswitch *esw,
- struct fdb_prio *fdb_prio)
-{
- struct fdb_chain *fdb_chain = fdb_prio->fdb_chain;
-
- WARN_ON(mlx5_esw_chains_update_prio_prevs(fdb_prio,
- fdb_prio->next_fdb));
-
- list_del(&fdb_prio->list);
- rhashtable_remove_fast(&esw_prios_ht(esw), &fdb_prio->node,
- prio_params);
- mlx5_del_flow_rules(fdb_prio->miss_rule);
- mlx5_destroy_flow_group(fdb_prio->miss_group);
- mlx5_esw_chains_destroy_fdb_table(esw, fdb_prio->fdb);
- mlx5_esw_chains_put_fdb_chain(fdb_chain);
- kvfree(fdb_prio);
-}
-
-struct mlx5_flow_table *
-mlx5_esw_chains_get_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
- u32 level)
-{
- struct mlx5_flow_table *prev_fts;
- struct fdb_prio *fdb_prio;
- struct fdb_prio_key key;
- int l = 0;
-
- if ((chain > mlx5_esw_chains_get_chain_range(esw) &&
- chain != mlx5_esw_chains_get_ft_chain(esw)) ||
- prio > mlx5_esw_chains_get_prio_range(esw) ||
- level > mlx5_esw_chains_get_level_range(esw))
- return ERR_PTR(-EOPNOTSUPP);
-
- /* create earlier levels for correct fs_core lookup when
- * connecting tables.
- */
- for (l = 0; l < level; l++) {
- prev_fts = mlx5_esw_chains_get_table(esw, chain, prio, l);
- if (IS_ERR(prev_fts)) {
- fdb_prio = ERR_CAST(prev_fts);
- goto err_get_prevs;
- }
- }
-
- key.chain = chain;
- key.prio = prio;
- key.level = level;
-
- mutex_lock(&esw_chains_lock(esw));
- fdb_prio = rhashtable_lookup_fast(&esw_prios_ht(esw), &key,
- prio_params);
- if (!fdb_prio) {
- fdb_prio = mlx5_esw_chains_create_fdb_prio(esw, chain,
- prio, level);
- if (IS_ERR(fdb_prio))
- goto err_create_prio;
- }
-
- ++fdb_prio->ref;
- mutex_unlock(&esw_chains_lock(esw));
-
- return fdb_prio->fdb;
-
-err_create_prio:
- mutex_unlock(&esw_chains_lock(esw));
-err_get_prevs:
- while (--l >= 0)
- mlx5_esw_chains_put_table(esw, chain, prio, l);
- return ERR_CAST(fdb_prio);
-}
-
-void
-mlx5_esw_chains_put_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
- u32 level)
-{
- struct fdb_prio *fdb_prio;
- struct fdb_prio_key key;
-
- key.chain = chain;
- key.prio = prio;
- key.level = level;
-
- mutex_lock(&esw_chains_lock(esw));
- fdb_prio = rhashtable_lookup_fast(&esw_prios_ht(esw), &key,
- prio_params);
- if (!fdb_prio)
- goto err_get_prio;
-
- if (--fdb_prio->ref == 0)
- mlx5_esw_chains_destroy_fdb_prio(esw, fdb_prio);
- mutex_unlock(&esw_chains_lock(esw));
-
- while (level-- > 0)
- mlx5_esw_chains_put_table(esw, chain, prio, level);
-
- return;
-
-err_get_prio:
- mutex_unlock(&esw_chains_lock(esw));
- WARN_ONCE(1,
- "Couldn't find table: (chain: %d prio: %d level: %d)",
- chain, prio, level);
-}
-
-struct mlx5_flow_table *
-mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw)
-{
- return tc_end_fdb(esw);
-}
-
-struct mlx5_flow_table *
-mlx5_esw_chains_create_global_table(struct mlx5_eswitch *esw)
-{
- u32 chain, prio, level;
- int err;
-
- if (!fdb_ignore_flow_level_supported(esw)) {
- err = -EOPNOTSUPP;
-
- esw_warn(esw->dev,
- "Couldn't create global flow table, ignore_flow_level not supported.");
- goto err_ignore;
- }
-
- chain = mlx5_esw_chains_get_chain_range(esw),
- prio = mlx5_esw_chains_get_prio_range(esw);
- level = mlx5_esw_chains_get_level_range(esw);
-
- return mlx5_esw_chains_create_fdb_table(esw, chain, prio, level);
-
-err_ignore:
- return ERR_PTR(err);
-}
-
-void
-mlx5_esw_chains_destroy_global_table(struct mlx5_eswitch *esw,
- struct mlx5_flow_table *ft)
-{
- mlx5_esw_chains_destroy_fdb_table(esw, ft);
-}
-
-static int
-mlx5_esw_chains_init(struct mlx5_eswitch *esw)
-{
- struct mlx5_esw_chains_priv *chains_priv;
- struct mlx5_core_dev *dev = esw->dev;
- u32 max_flow_counter, fdb_max;
- struct mapping_ctx *mapping;
- int err;
-
- chains_priv = kzalloc(sizeof(*chains_priv), GFP_KERNEL);
- if (!chains_priv)
- return -ENOMEM;
- esw_chains_priv(esw) = chains_priv;
-
- max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
- MLX5_CAP_GEN(dev, max_flow_counter_15_0);
- fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
-
- esw_debug(dev,
- "Init esw offloads chains, max counters(%d), groups(%d), max flow table size(%d)\n",
- max_flow_counter, esw->params.large_group_num, fdb_max);
-
- mlx5_esw_chains_init_sz_pool(esw);
-
- if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, multi_fdb_encap) &&
- esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
- esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
- esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n");
- } else if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
- esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
- esw_warn(dev, "Tc chains and priorities offload aren't supported\n");
- } else if (!fdb_modify_header_fwd_to_table_supported(esw)) {
- /* Disabled when ttl workaround is needed, e.g
- * when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig
- */
- esw_warn(dev,
- "Tc chains and priorities offload aren't supported, check firmware version, or mlxconfig settings\n");
- esw->fdb_table.flags &= ~ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
- } else {
- esw->fdb_table.flags |= ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED;
- esw_info(dev, "Supported tc offload range - chains: %u, prios: %u\n",
- mlx5_esw_chains_get_chain_range(esw),
- mlx5_esw_chains_get_prio_range(esw));
- }
-
- err = rhashtable_init(&esw_chains_ht(esw), &chain_params);
- if (err)
- goto init_chains_ht_err;
-
- err = rhashtable_init(&esw_prios_ht(esw), &prio_params);
- if (err)
- goto init_prios_ht_err;
-
- mapping = mapping_create(sizeof(u32), esw_get_max_restore_tag(esw),
- true);
- if (IS_ERR(mapping)) {
- err = PTR_ERR(mapping);
- goto mapping_err;
- }
- esw_chains_mapping(esw) = mapping;
-
- mutex_init(&esw_chains_lock(esw));
-
- return 0;
-
-mapping_err:
- rhashtable_destroy(&esw_prios_ht(esw));
-init_prios_ht_err:
- rhashtable_destroy(&esw_chains_ht(esw));
-init_chains_ht_err:
- kfree(chains_priv);
- return err;
-}
-
-static void
-mlx5_esw_chains_cleanup(struct mlx5_eswitch *esw)
-{
- mutex_destroy(&esw_chains_lock(esw));
- mapping_destroy(esw_chains_mapping(esw));
- rhashtable_destroy(&esw_prios_ht(esw));
- rhashtable_destroy(&esw_chains_ht(esw));
-
- kfree(esw_chains_priv(esw));
-}
-
-static int
-mlx5_esw_chains_open(struct mlx5_eswitch *esw)
-{
- struct mlx5_flow_table *ft;
- int err;
-
- /* Create tc_end_fdb(esw) which is the always created ft chain */
- ft = mlx5_esw_chains_get_table(esw, mlx5_esw_chains_get_ft_chain(esw),
- 1, 0);
- if (IS_ERR(ft))
- return PTR_ERR(ft);
-
- tc_end_fdb(esw) = ft;
-
- /* Always open the root for fast path */
- ft = mlx5_esw_chains_get_table(esw, 0, 1, 0);
- if (IS_ERR(ft)) {
- err = PTR_ERR(ft);
- goto level_0_err;
- }
-
- /* Open level 1 for split rules now if prios isn't supported */
- if (!mlx5_esw_chains_prios_supported(esw)) {
- err = mlx5_esw_vport_tbl_get(esw);
- if (err)
- goto level_1_err;
- }
-
- return 0;
-
-level_1_err:
- mlx5_esw_chains_put_table(esw, 0, 1, 0);
-level_0_err:
- mlx5_esw_chains_put_table(esw, mlx5_esw_chains_get_ft_chain(esw), 1, 0);
- return err;
-}
-
-static void
-mlx5_esw_chains_close(struct mlx5_eswitch *esw)
-{
- if (!mlx5_esw_chains_prios_supported(esw))
- mlx5_esw_vport_tbl_put(esw);
- mlx5_esw_chains_put_table(esw, 0, 1, 0);
- mlx5_esw_chains_put_table(esw, mlx5_esw_chains_get_ft_chain(esw), 1, 0);
-}
-
-int
-mlx5_esw_chains_create(struct mlx5_eswitch *esw)
-{
- int err;
-
- err = mlx5_esw_chains_init(esw);
- if (err)
- return err;
-
- err = mlx5_esw_chains_open(esw);
- if (err)
- goto err_open;
-
- return 0;
-
-err_open:
- mlx5_esw_chains_cleanup(esw);
- return err;
-}
-
-void
-mlx5_esw_chains_destroy(struct mlx5_eswitch *esw)
-{
- mlx5_esw_chains_close(esw);
- mlx5_esw_chains_cleanup(esw);
-}
-
-int
-mlx5_esw_chains_get_chain_mapping(struct mlx5_eswitch *esw, u32 chain,
- u32 *chain_mapping)
-{
- return mapping_add(esw_chains_mapping(esw), &chain, chain_mapping);
-}
-
-int
-mlx5_esw_chains_put_chain_mapping(struct mlx5_eswitch *esw, u32 chain_mapping)
-{
- return mapping_remove(esw_chains_mapping(esw), chain_mapping);
-}
-
-int mlx5_eswitch_get_chain_for_tag(struct mlx5_eswitch *esw, u32 tag,
- u32 *chain)
-{
- int err;
-
- err = mapping_find(esw_chains_mapping(esw), tag, chain);
- if (err) {
- esw_warn(esw->dev, "Can't find chain for tag: %d\n", tag);
- return -ENOENT;
- }
-
- return 0;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.h
deleted file mode 100644
index 7679ac359e31..000000000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/chains.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2020 Mellanox Technologies. */
-
-#ifndef __ML5_ESW_CHAINS_H__
-#define __ML5_ESW_CHAINS_H__
-
-#include "eswitch.h"
-
-#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
-
-bool
-mlx5_esw_chains_prios_supported(struct mlx5_eswitch *esw);
-bool
-mlx5_esw_chains_backwards_supported(struct mlx5_eswitch *esw);
-u32
-mlx5_esw_chains_get_prio_range(struct mlx5_eswitch *esw);
-u32
-mlx5_esw_chains_get_chain_range(struct mlx5_eswitch *esw);
-u32
-mlx5_esw_chains_get_ft_chain(struct mlx5_eswitch *esw);
-
-struct mlx5_flow_table *
-mlx5_esw_chains_get_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
- u32 level);
-void
-mlx5_esw_chains_put_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
- u32 level);
-
-struct mlx5_flow_table *
-mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw);
-
-struct mlx5_flow_table *
-mlx5_esw_chains_create_global_table(struct mlx5_eswitch *esw);
-void
-mlx5_esw_chains_destroy_global_table(struct mlx5_eswitch *esw,
- struct mlx5_flow_table *ft);
-
-int
-mlx5_esw_chains_get_chain_mapping(struct mlx5_eswitch *esw, u32 chain,
- u32 *chain_mapping);
-int
-mlx5_esw_chains_put_chain_mapping(struct mlx5_eswitch *esw,
- u32 chain_mapping);
-
-int mlx5_esw_chains_create(struct mlx5_eswitch *esw);
-void mlx5_esw_chains_destroy(struct mlx5_eswitch *esw);
-
-int
-mlx5_eswitch_get_chain_for_tag(struct mlx5_eswitch *esw, u32 tag, u32 *chain);
-
-#else /* CONFIG_MLX5_CLS_ACT */
-
-static inline struct mlx5_flow_table *
-mlx5_esw_chains_get_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
- u32 level) { return ERR_PTR(-EOPNOTSUPP); }
-static inline void
-mlx5_esw_chains_put_table(struct mlx5_eswitch *esw, u32 chain, u32 prio,
- u32 level) {}
-
-static inline struct mlx5_flow_table *
-mlx5_esw_chains_get_tc_end_ft(struct mlx5_eswitch *esw) { return ERR_PTR(-EOPNOTSUPP); }
-
-static inline int mlx5_esw_chains_create(struct mlx5_eswitch *esw) { return 0; }
-static inline void mlx5_esw_chains_destroy(struct mlx5_eswitch *esw) {}
-
-#endif /* CONFIG_MLX5_CLS_ACT */
-
-#endif /* __ML5_ESW_CHAINS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
new file mode 100644
index 000000000000..ffff11baa3d0
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 Mellanox Technologies Ltd. */
+
+#include <linux/mlx5/driver.h>
+#include "eswitch.h"
+
+static void
+mlx5_esw_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_item_id *ppid)
+{
+ u64 parent_id;
+
+ parent_id = mlx5_query_nic_system_image_guid(dev);
+ ppid->id_len = sizeof(parent_id);
+ memcpy(ppid->id, &parent_id, sizeof(parent_id));
+}
+
+static bool
+mlx5_esw_devlink_port_supported(const struct mlx5_eswitch *esw, u16 vport_num)
+{
+ return vport_num == MLX5_VPORT_UPLINK ||
+ (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF) ||
+ mlx5_eswitch_is_vf_vport(esw, vport_num);
+}
+
+static struct devlink_port *mlx5_esw_dl_port_alloc(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ struct devlink_port_attrs attrs = {};
+ struct netdev_phys_item_id ppid = {};
+ struct devlink_port *dl_port;
+ u32 controller_num = 0;
+ bool external;
+ u16 pfnum;
+
+ dl_port = kzalloc(sizeof(*dl_port), GFP_KERNEL);
+ if (!dl_port)
+ return NULL;
+
+ mlx5_esw_get_port_parent_id(dev, &ppid);
+ pfnum = PCI_FUNC(dev->pdev->devfn);
+ external = mlx5_core_is_ecpf_esw_manager(dev);
+ if (external)
+ controller_num = dev->priv.eswitch->offloads.host_number + 1;
+
+ if (vport_num == MLX5_VPORT_UPLINK) {
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ attrs.phys.port_number = pfnum;
+ memcpy(attrs.switch_id.id, ppid.id, ppid.id_len);
+ attrs.switch_id.id_len = ppid.id_len;
+ devlink_port_attrs_set(dl_port, &attrs);
+ } else if (vport_num == MLX5_VPORT_PF) {
+ memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len);
+ dl_port->attrs.switch_id.id_len = ppid.id_len;
+ devlink_port_attrs_pci_pf_set(dl_port, controller_num, pfnum, external);
+ } else if (mlx5_eswitch_is_vf_vport(esw, vport_num)) {
+ memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len);
+ dl_port->attrs.switch_id.id_len = ppid.id_len;
+ devlink_port_attrs_pci_vf_set(dl_port, controller_num, pfnum,
+ vport_num - 1, external);
+ }
+ return dl_port;
+}
+
+static void mlx5_esw_dl_port_free(struct devlink_port *dl_port)
+{
+ kfree(dl_port);
+}
+
+int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ struct devlink_port *dl_port;
+ unsigned int dl_port_index;
+ struct mlx5_vport *vport;
+ struct devlink *devlink;
+ int err;
+
+ if (!mlx5_esw_devlink_port_supported(esw, vport_num))
+ return 0;
+
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
+
+ dl_port = mlx5_esw_dl_port_alloc(esw, vport_num);
+ if (!dl_port)
+ return -ENOMEM;
+
+ devlink = priv_to_devlink(dev);
+ dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num);
+ err = devlink_port_register(devlink, dl_port, dl_port_index);
+ if (err)
+ goto reg_err;
+
+ vport->dl_port = dl_port;
+ return 0;
+
+reg_err:
+ mlx5_esw_dl_port_free(dl_port);
+ return err;
+}
+
+void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ struct mlx5_vport *vport;
+
+ if (!mlx5_esw_devlink_port_supported(esw, vport_num))
+ return;
+
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ if (IS_ERR(vport))
+ return;
+ devlink_port_unregister(vport->dl_port);
+ mlx5_esw_dl_port_free(vport->dl_port);
+ vport->dl_port = NULL;
+}
+
+struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ struct mlx5_vport *vport;
+
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ return vport->dl_port;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 867d8120b8a5..cf87de94418f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -42,6 +42,7 @@
#include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h>
#include "lib/mpfs.h"
+#include "lib/fs_chains.h"
#include "en/tc_ct.h"
#ifdef CONFIG_MLX5_ESWITCH
@@ -62,6 +63,9 @@
#define mlx5_esw_has_fwd_fdb(dev) \
MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table)
+#define esw_chains(esw) \
+ ((esw)->fdb_table.offloads.esw_chains_priv)
+
struct vport_ingress {
struct mlx5_flow_table *acl;
struct mlx5_flow_handle *allow_rule;
@@ -152,14 +156,9 @@ struct mlx5_vport {
bool enabled;
enum mlx5_eswitch_vport_event enabled_events;
+ struct devlink_port *dl_port;
};
-enum offloads_fdb_flags {
- ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED = BIT(0),
-};
-
-struct mlx5_esw_chains_priv;
-
struct mlx5_eswitch_fdb {
union {
struct legacy_fdb {
@@ -183,7 +182,7 @@ struct mlx5_eswitch_fdb {
struct mlx5_flow_handle *miss_rule_multi;
int vlan_push_pop_refcount;
- struct mlx5_esw_chains_priv *esw_chains_priv;
+ struct mlx5_fs_chains *esw_chains_priv;
struct {
DECLARE_HASHTABLE(table, 8);
/* Protects vports.table */
@@ -217,6 +216,7 @@ struct mlx5_esw_offload {
atomic64_t num_flows;
enum devlink_eswitch_encap_mode encap;
struct ida vport_metadata_ida;
+ unsigned int host_number; /* ECPF supports one external host */
};
/* E-Switch MC FDB table hash node */
@@ -329,7 +329,7 @@ struct mlx5_termtbl_handle;
bool
mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
- struct mlx5_esw_flow_attr *attr,
+ struct mlx5_flow_attr *attr,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_spec *spec);
@@ -349,19 +349,19 @@ mlx5_eswitch_termtbl_put(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
- struct mlx5_esw_flow_attr *attr);
+ struct mlx5_flow_attr *attr);
struct mlx5_flow_handle *
mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
- struct mlx5_esw_flow_attr *attr);
+ struct mlx5_flow_attr *attr);
void
mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule,
- struct mlx5_esw_flow_attr *attr);
+ struct mlx5_flow_attr *attr);
void
mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule,
- struct mlx5_esw_flow_attr *attr);
+ struct mlx5_flow_attr *attr);
struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
@@ -401,7 +401,6 @@ struct mlx5_esw_flow_attr {
int split_count;
int out_count;
- int action;
__be16 vlan_proto[MLX5_FS_VLAN_DEPTH];
u16 vlan_vid[MLX5_FS_VLAN_DEPTH];
u8 vlan_prio[MLX5_FS_VLAN_DEPTH];
@@ -413,19 +412,7 @@ struct mlx5_esw_flow_attr {
struct mlx5_core_dev *mdev;
struct mlx5_termtbl_handle *termtbl;
} dests[MLX5_MAX_FLOW_FWD_VPORTS];
- struct mlx5_modify_hdr *modify_hdr;
- u8 inner_match_level;
- u8 outer_match_level;
- struct mlx5_fc *counter;
- u32 chain;
- u16 prio;
- u32 dest_chain;
- u32 flags;
- struct mlx5_flow_table *fdb;
- struct mlx5_flow_table *dest_ft;
- struct mlx5_ct_attr ct_attr;
struct mlx5_pkt_reformat *decap_pkt_reformat;
- struct mlx5e_tc_flow_parse_attr *parse_attr;
};
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
@@ -451,9 +438,9 @@ int mlx5_devlink_port_function_hw_addr_set(struct devlink *devlink,
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
- struct mlx5_esw_flow_attr *attr);
+ struct mlx5_flow_attr *attr);
int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
- struct mlx5_esw_flow_attr *attr);
+ struct mlx5_flow_attr *attr);
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
u16 vport, u16 vlan, u8 qos, u8 set_flags);
@@ -677,6 +664,9 @@ int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
enum mlx5_eswitch_vport_event enabled_events);
void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs);
+int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_num);
+void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
+struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num);
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 1bcf2609dca8..c9c2962ad49f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -39,12 +39,13 @@
#include "mlx5_core.h"
#include "eswitch.h"
#include "esw/acl/ofld.h"
-#include "esw/chains.h"
#include "rdma.h"
#include "en.h"
#include "fs_core.h"
#include "lib/devcom.h"
#include "lib/eq.h"
+#include "lib/fs_chains.h"
+#include "en_tc.h"
/* There are two match-all miss flows, one for unicast dst mac and
* one for multicast.
@@ -66,6 +67,12 @@ struct mlx5_vport_key {
u16 vhca_id;
} __packed;
+struct mlx5_vport_tbl_attr {
+ u16 chain;
+ u16 prio;
+ u16 vport;
+};
+
struct mlx5_vport_table {
struct hlist_node hlist;
struct mlx5_flow_table *fdb;
@@ -94,10 +101,10 @@ esw_vport_tbl_create(struct mlx5_eswitch *esw, struct mlx5_flow_namespace *ns)
}
static u32 flow_attr_to_vport_key(struct mlx5_eswitch *esw,
- struct mlx5_esw_flow_attr *attr,
+ struct mlx5_vport_tbl_attr *attr,
struct mlx5_vport_key *key)
{
- key->vport = attr->in_rep->vport;
+ key->vport = attr->vport;
key->chain = attr->chain;
key->prio = attr->prio;
key->vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
@@ -118,7 +125,7 @@ esw_vport_tbl_lookup(struct mlx5_eswitch *esw, struct mlx5_vport_key *skey, u32
}
static void
-esw_vport_tbl_put(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr)
+esw_vport_tbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr)
{
struct mlx5_vport_table *e;
struct mlx5_vport_key key;
@@ -138,7 +145,7 @@ out:
}
static struct mlx5_flow_table *
-esw_vport_tbl_get(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr)
+esw_vport_tbl_get(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr)
{
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *ns;
@@ -189,16 +196,15 @@ err_alloc:
int mlx5_esw_vport_tbl_get(struct mlx5_eswitch *esw)
{
- struct mlx5_esw_flow_attr attr = {};
- struct mlx5_eswitch_rep rep = {};
+ struct mlx5_vport_tbl_attr attr;
struct mlx5_flow_table *fdb;
struct mlx5_vport *vport;
int i;
+ attr.chain = 0;
attr.prio = 1;
- attr.in_rep = &rep;
mlx5_esw_for_all_vports(esw, i, vport) {
- attr.in_rep->vport = vport->vport;
+ attr.vport = vport->vport;
fdb = esw_vport_tbl_get(esw, &attr);
if (IS_ERR(fdb))
goto out;
@@ -212,15 +218,14 @@ out:
void mlx5_esw_vport_tbl_put(struct mlx5_eswitch *esw)
{
- struct mlx5_esw_flow_attr attr = {};
- struct mlx5_eswitch_rep rep = {};
+ struct mlx5_vport_tbl_attr attr;
struct mlx5_vport *vport;
int i;
+ attr.chain = 0;
attr.prio = 1;
- attr.in_rep = &rep;
mlx5_esw_for_all_vports(esw, i, vport) {
- attr.in_rep->vport = vport->vport;
+ attr.vport = vport->vport;
esw_vport_tbl_put(esw, &attr);
}
}
@@ -242,8 +247,11 @@ mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw,
struct mlx5_esw_flow_attr *attr)
{
if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
- attr && attr->in_rep && attr->in_rep->vport == MLX5_VPORT_UPLINK)
- spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+ attr && attr->in_rep)
+ spec->flow_context.flow_source =
+ attr->in_rep->vport == MLX5_VPORT_UPLINK ?
+ MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK :
+ MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
}
static void
@@ -290,11 +298,14 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *
mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5_flow_attr *attr)
{
struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
- bool split = !!(attr->split_count);
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct mlx5_fs_chains *chains = esw_chains(esw);
+ bool split = !!(esw_attr->split_count);
+ struct mlx5_vport_tbl_attr fwd_attr;
struct mlx5_flow_handle *rule;
struct mlx5_flow_table *fdb;
int j, i = 0;
@@ -308,13 +319,13 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
- flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]);
- flow_act.vlan[0].vid = attr->vlan_vid[0];
- flow_act.vlan[0].prio = attr->vlan_prio[0];
+ flow_act.vlan[0].ethtype = ntohs(esw_attr->vlan_proto[0]);
+ flow_act.vlan[0].vid = esw_attr->vlan_vid[0];
+ flow_act.vlan[0].prio = esw_attr->vlan_prio[0];
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
- flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]);
- flow_act.vlan[1].vid = attr->vlan_vid[1];
- flow_act.vlan[1].prio = attr->vlan_prio[1];
+ flow_act.vlan[1].ethtype = ntohs(esw_attr->vlan_proto[1]);
+ flow_act.vlan[1].vid = esw_attr->vlan_vid[1];
+ flow_act.vlan[1].prio = esw_attr->vlan_prio[1];
}
}
@@ -329,12 +340,12 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
} else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest[i].ft = mlx5_esw_chains_get_tc_end_ft(esw);
+ dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
i++;
} else if (attr->dest_chain) {
flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
- ft = mlx5_esw_chains_get_table(esw, attr->dest_chain,
- 1, 0);
+ ft = mlx5_chains_get_table(chains, attr->dest_chain,
+ 1, 0);
if (IS_ERR(ft)) {
rule = ERR_CAST(ft);
goto err_create_goto_table;
@@ -344,28 +355,29 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
dest[i].ft = ft;
i++;
} else {
- for (j = attr->split_count; j < attr->out_count; j++) {
+ for (j = esw_attr->split_count; j < esw_attr->out_count; j++) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest[i].vport.num = attr->dests[j].rep->vport;
+ dest[i].vport.num = esw_attr->dests[j].rep->vport;
dest[i].vport.vhca_id =
- MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id);
+ MLX5_CAP_GEN(esw_attr->dests[j].mdev, vhca_id);
if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
dest[i].vport.flags |=
MLX5_FLOW_DEST_VPORT_VHCA_ID;
- if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
+ if (esw_attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) {
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
- flow_act.pkt_reformat = attr->dests[j].pkt_reformat;
+ flow_act.pkt_reformat =
+ esw_attr->dests[j].pkt_reformat;
dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
dest[i].vport.pkt_reformat =
- attr->dests[j].pkt_reformat;
+ esw_attr->dests[j].pkt_reformat;
}
i++;
}
}
}
- if (attr->decap_pkt_reformat)
- flow_act.pkt_reformat = attr->decap_pkt_reformat;
+ if (esw_attr->decap_pkt_reformat)
+ flow_act.pkt_reformat = esw_attr->decap_pkt_reformat;
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
@@ -382,26 +394,30 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
flow_act.modify_hdr = attr->modify_hdr;
if (split) {
- fdb = esw_vport_tbl_get(esw, attr);
+ fwd_attr.chain = attr->chain;
+ fwd_attr.prio = attr->prio;
+ fwd_attr.vport = esw_attr->in_rep->vport;
+
+ fdb = esw_vport_tbl_get(esw, &fwd_attr);
} else {
if (attr->chain || attr->prio)
- fdb = mlx5_esw_chains_get_table(esw, attr->chain,
- attr->prio, 0);
+ fdb = mlx5_chains_get_table(chains, attr->chain,
+ attr->prio, 0);
else
- fdb = attr->fdb;
+ fdb = attr->ft;
if (!(attr->flags & MLX5_ESW_ATTR_FLAG_NO_IN_PORT))
- mlx5_eswitch_set_rule_source_port(esw, spec, attr);
+ mlx5_eswitch_set_rule_source_port(esw, spec, esw_attr);
}
if (IS_ERR(fdb)) {
rule = ERR_CAST(fdb);
goto err_esw_get;
}
- mlx5_eswitch_set_rule_flow_source(esw, spec, attr);
+ mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr);
if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
- rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr,
+ rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, esw_attr,
&flow_act, dest, i);
else
rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
@@ -414,12 +430,12 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
err_add_rule:
if (split)
- esw_vport_tbl_put(esw, attr);
+ esw_vport_tbl_put(esw, &fwd_attr);
else if (attr->chain || attr->prio)
- mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
+ mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
err_esw_get:
if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) && attr->dest_chain)
- mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
+ mlx5_chains_put_table(chains, attr->dest_chain, 1, 0);
err_create_goto_table:
return rule;
}
@@ -427,46 +443,51 @@ err_create_goto_table:
struct mlx5_flow_handle *
mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5_flow_attr *attr)
{
struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct mlx5_fs_chains *chains = esw_chains(esw);
+ struct mlx5_vport_tbl_attr fwd_attr;
struct mlx5_flow_table *fast_fdb;
struct mlx5_flow_table *fwd_fdb;
struct mlx5_flow_handle *rule;
int i;
- fast_fdb = mlx5_esw_chains_get_table(esw, attr->chain, attr->prio, 0);
+ fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0);
if (IS_ERR(fast_fdb)) {
rule = ERR_CAST(fast_fdb);
goto err_get_fast;
}
- fwd_fdb = esw_vport_tbl_get(esw, attr);
+ fwd_attr.chain = attr->chain;
+ fwd_attr.prio = attr->prio;
+ fwd_attr.vport = esw_attr->in_rep->vport;
+ fwd_fdb = esw_vport_tbl_get(esw, &fwd_attr);
if (IS_ERR(fwd_fdb)) {
rule = ERR_CAST(fwd_fdb);
goto err_get_fwd;
}
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- for (i = 0; i < attr->split_count; i++) {
+ for (i = 0; i < esw_attr->split_count; i++) {
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
- dest[i].vport.num = attr->dests[i].rep->vport;
+ dest[i].vport.num = esw_attr->dests[i].rep->vport;
dest[i].vport.vhca_id =
- MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id);
+ MLX5_CAP_GEN(esw_attr->dests[i].mdev, vhca_id);
if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
- if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
+ if (esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) {
dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
- dest[i].vport.pkt_reformat = attr->dests[i].pkt_reformat;
+ dest[i].vport.pkt_reformat = esw_attr->dests[i].pkt_reformat;
}
}
dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[i].ft = fwd_fdb,
i++;
- mlx5_eswitch_set_rule_source_port(esw, spec, attr);
- mlx5_eswitch_set_rule_flow_source(esw, spec, attr);
+ mlx5_eswitch_set_rule_source_port(esw, spec, esw_attr);
if (attr->outer_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
@@ -481,9 +502,9 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
return rule;
add_err:
- esw_vport_tbl_put(esw, attr);
+ esw_vport_tbl_put(esw, &fwd_attr);
err_get_fwd:
- mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
+ mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
err_get_fast:
return rule;
}
@@ -491,10 +512,13 @@ err_get_fast:
static void
__mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule,
- struct mlx5_esw_flow_attr *attr,
+ struct mlx5_flow_attr *attr,
bool fwd_rule)
{
- bool split = (attr->split_count > 0);
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct mlx5_fs_chains *chains = esw_chains(esw);
+ bool split = (esw_attr->split_count > 0);
+ struct mlx5_vport_tbl_attr fwd_attr;
int i;
mlx5_del_flow_rules(rule);
@@ -502,31 +526,36 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) {
/* unref the term table */
for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
- if (attr->dests[i].termtbl)
- mlx5_eswitch_termtbl_put(esw, attr->dests[i].termtbl);
+ if (esw_attr->dests[i].termtbl)
+ mlx5_eswitch_termtbl_put(esw, esw_attr->dests[i].termtbl);
}
}
atomic64_dec(&esw->offloads.num_flows);
+ if (fwd_rule || split) {
+ fwd_attr.chain = attr->chain;
+ fwd_attr.prio = attr->prio;
+ fwd_attr.vport = esw_attr->in_rep->vport;
+ }
+
if (fwd_rule) {
- esw_vport_tbl_put(esw, attr);
- mlx5_esw_chains_put_table(esw, attr->chain, attr->prio, 0);
+ esw_vport_tbl_put(esw, &fwd_attr);
+ mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
} else {
if (split)
- esw_vport_tbl_put(esw, attr);
+ esw_vport_tbl_put(esw, &fwd_attr);
else if (attr->chain || attr->prio)
- mlx5_esw_chains_put_table(esw, attr->chain, attr->prio,
- 0);
+ mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
if (attr->dest_chain)
- mlx5_esw_chains_put_table(esw, attr->dest_chain, 1, 0);
+ mlx5_chains_put_table(chains, attr->dest_chain, 1, 0);
}
}
void
mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5_flow_attr *attr)
{
__mlx5_eswitch_del_rule(esw, rule, attr, false);
}
@@ -534,7 +563,7 @@ mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
void
mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_handle *rule,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5_flow_attr *attr)
{
__mlx5_eswitch_del_rule(esw, rule, attr, true);
}
@@ -611,9 +640,10 @@ out_notsupp:
}
int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5_flow_attr *attr)
{
struct offloads_fdb *offloads = &esw->fdb_table.offloads;
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct mlx5_eswitch_rep *vport = NULL;
bool push, pop, fwd;
int err = 0;
@@ -629,17 +659,17 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
mutex_lock(&esw->state_lock);
- err = esw_add_vlan_action_check(attr, push, pop, fwd);
+ err = esw_add_vlan_action_check(esw_attr, push, pop, fwd);
if (err)
goto unlock;
attr->flags &= ~MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
- vport = esw_vlan_action_get_vport(attr, push, pop);
+ vport = esw_vlan_action_get_vport(esw_attr, push, pop);
if (!push && !pop && fwd) {
/* tracks VF --> wire rules without vlan push action */
- if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
+ if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
vport->vlan_refcount++;
attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
}
@@ -662,11 +692,11 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
if (vport->vlan_refcount)
goto skip_set_push;
- err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0,
- SET_VLAN_INSERT | SET_VLAN_STRIP);
+ err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, esw_attr->vlan_vid[0],
+ 0, SET_VLAN_INSERT | SET_VLAN_STRIP);
if (err)
goto out;
- vport->vlan = attr->vlan_vid[0];
+ vport->vlan = esw_attr->vlan_vid[0];
skip_set_push:
vport->vlan_refcount++;
}
@@ -679,9 +709,10 @@ unlock:
}
int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
- struct mlx5_esw_flow_attr *attr)
+ struct mlx5_flow_attr *attr)
{
struct offloads_fdb *offloads = &esw->fdb_table.offloads;
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct mlx5_eswitch_rep *vport = NULL;
bool push, pop, fwd;
int err = 0;
@@ -699,11 +730,11 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
mutex_lock(&esw->state_lock);
- vport = esw_vlan_action_get_vport(attr, push, pop);
+ vport = esw_vlan_action_get_vport(esw_attr, push, pop);
if (!push && !pop && fwd) {
/* tracks VF --> wire rules without vlan push action */
- if (attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
+ if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
vport->vlan_refcount--;
goto out;
@@ -1137,6 +1168,126 @@ static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
}
}
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+#define fdb_modify_header_fwd_to_table_supported(esw) \
+ (MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table))
+static void esw_init_chains_offload_flags(struct mlx5_eswitch *esw, u32 *flags)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+
+ if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ignore_flow_level))
+ *flags |= MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
+
+ if (!MLX5_CAP_ESW_FLOWTABLE(dev, multi_fdb_encap) &&
+ esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
+ *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
+ esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n");
+ } else if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
+ *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
+ esw_warn(dev, "Tc chains and priorities offload aren't supported\n");
+ } else if (!fdb_modify_header_fwd_to_table_supported(esw)) {
+ /* Disabled when ttl workaround is needed, e.g
+ * when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig
+ */
+ esw_warn(dev,
+ "Tc chains and priorities offload aren't supported, check firmware version, or mlxconfig settings\n");
+ *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
+ } else {
+ *flags |= MLX5_CHAINS_AND_PRIOS_SUPPORTED;
+ esw_info(dev, "Supported tc chains and prios offload\n");
+ }
+
+ if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
+ *flags |= MLX5_CHAINS_FT_TUNNEL_SUPPORTED;
+}
+
+static int
+esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_table *nf_ft, *ft;
+ struct mlx5_chains_attr attr = {};
+ struct mlx5_fs_chains *chains;
+ u32 fdb_max;
+ int err;
+
+ fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
+
+ esw_init_chains_offload_flags(esw, &attr.flags);
+ attr.ns = MLX5_FLOW_NAMESPACE_FDB;
+ attr.max_ft_sz = fdb_max;
+ attr.max_grp_num = esw->params.large_group_num;
+ attr.default_ft = miss_fdb;
+ attr.max_restore_tag = esw_get_max_restore_tag(esw);
+
+ chains = mlx5_chains_create(dev, &attr);
+ if (IS_ERR(chains)) {
+ err = PTR_ERR(chains);
+ esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
+ return err;
+ }
+
+ esw->fdb_table.offloads.esw_chains_priv = chains;
+
+ /* Create tc_end_ft which is the always created ft chain */
+ nf_ft = mlx5_chains_get_table(chains, mlx5_chains_get_nf_ft_chain(chains),
+ 1, 0);
+ if (IS_ERR(nf_ft)) {
+ err = PTR_ERR(nf_ft);
+ goto nf_ft_err;
+ }
+
+ /* Always open the root for fast path */
+ ft = mlx5_chains_get_table(chains, 0, 1, 0);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto level_0_err;
+ }
+
+ /* Open level 1 for split fdb rules now if prios isn't supported */
+ if (!mlx5_chains_prios_supported(chains)) {
+ err = mlx5_esw_vport_tbl_get(esw);
+ if (err)
+ goto level_1_err;
+ }
+
+ mlx5_chains_set_end_ft(chains, nf_ft);
+
+ return 0;
+
+level_1_err:
+ mlx5_chains_put_table(chains, 0, 1, 0);
+level_0_err:
+ mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
+nf_ft_err:
+ mlx5_chains_destroy(chains);
+ esw->fdb_table.offloads.esw_chains_priv = NULL;
+
+ return err;
+}
+
+static void
+esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
+{
+ if (!mlx5_chains_prios_supported(chains))
+ mlx5_esw_vport_tbl_put(esw);
+ mlx5_chains_put_table(chains, 0, 1, 0);
+ mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
+ mlx5_chains_destroy(chains);
+}
+
+#else /* CONFIG_MLX5_CLS_ACT */
+
+static int
+esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
+{ return 0; }
+
+static void
+esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
+{}
+
+#endif
+
static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
@@ -1192,9 +1343,9 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
}
esw->fdb_table.offloads.slow_fdb = fdb;
- err = mlx5_esw_chains_create(esw);
+ err = esw_chains_create(esw, fdb);
if (err) {
- esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
+ esw_warn(dev, "Failed to open fdb chains err(%d)\n", err);
goto fdb_chains_err;
}
@@ -1288,7 +1439,7 @@ miss_err:
peer_miss_err:
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
send_vport_err:
- mlx5_esw_chains_destroy(esw);
+ esw_chains_destroy(esw, esw_chains(esw));
fdb_chains_err:
mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
slow_fdb_err:
@@ -1312,7 +1463,8 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
- mlx5_esw_chains_destroy(esw);
+ esw_chains_destroy(esw, esw_chains(esw));
+
mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
/* Holds true only as long as DMFS is the default */
mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
@@ -1671,15 +1823,12 @@ static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
__esw_offloads_unload_rep(esw, rep, rep_type);
}
-int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
+static int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_eswitch_rep *rep;
int rep_type;
int err;
- if (esw->mode != MLX5_ESWITCH_OFFLOADS)
- return 0;
-
rep = mlx5_eswitch_get_rep(esw, vport_num);
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
@@ -1698,19 +1847,46 @@ err_reps:
return err;
}
-void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
+static void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_eswitch_rep *rep;
int rep_type;
- if (esw->mode != MLX5_ESWITCH_OFFLOADS)
- return;
-
rep = mlx5_eswitch_get_rep(esw, vport_num);
for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--)
__esw_offloads_unload_rep(esw, rep, rep_type);
}
+int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ int err;
+
+ if (esw->mode != MLX5_ESWITCH_OFFLOADS)
+ return 0;
+
+ err = mlx5_esw_offloads_devlink_port_register(esw, vport_num);
+ if (err)
+ return err;
+
+ err = mlx5_esw_offloads_rep_load(esw, vport_num);
+ if (err)
+ goto load_err;
+ return err;
+
+load_err:
+ mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
+ return err;
+}
+
+void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ if (esw->mode != MLX5_ESWITCH_OFFLOADS)
+ return;
+
+ mlx5_esw_offloads_rep_unload(esw, vport_num);
+ mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
+}
+
#define ESW_OFFLOADS_DEVCOM_PAIR (0)
#define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
@@ -1868,53 +2044,38 @@ esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
return true;
}
-static bool
-esw_check_vport_match_metadata_mandatory(const struct mlx5_eswitch *esw)
-{
- return mlx5_core_mp_enabled(esw->dev);
-}
-
-static bool esw_use_vport_metadata(const struct mlx5_eswitch *esw)
-{
- return esw_check_vport_match_metadata_mandatory(esw) &&
- esw_check_vport_match_metadata_supported(esw);
-}
-
u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
{
- u32 num_vports = GENMASK(ESW_VPORT_BITS - 1, 0) - 1;
- u32 vhca_id_mask = GENMASK(ESW_VHCA_ID_BITS - 1, 0);
- u32 vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
- u32 start;
- u32 end;
+ u32 vport_end_ida = (1 << ESW_VPORT_BITS) - 1;
+ u32 max_pf_num = (1 << ESW_PFNUM_BITS) - 1;
+ u32 pf_num;
int id;
- /* Make sure the vhca_id fits the ESW_VHCA_ID_BITS */
- WARN_ON_ONCE(vhca_id >= BIT(ESW_VHCA_ID_BITS));
-
- /* Trim vhca_id to ESW_VHCA_ID_BITS */
- vhca_id &= vhca_id_mask;
-
- start = (vhca_id << ESW_VPORT_BITS);
- end = start + num_vports;
- if (!vhca_id)
- start += 1; /* zero is reserved/invalid metadata */
- id = ida_alloc_range(&esw->offloads.vport_metadata_ida, start, end, GFP_KERNEL);
+ /* Only 4 bits of pf_num */
+ pf_num = PCI_FUNC(esw->dev->pdev->devfn);
+ if (pf_num > max_pf_num)
+ return 0;
- return (id < 0) ? 0 : id;
+ /* Metadata is 4 bits of PFNUM and 12 bits of unique id */
+ /* Use only non-zero vport_id (1-4095) for all PF's */
+ id = ida_alloc_range(&esw->offloads.vport_metadata_ida, 1, vport_end_ida, GFP_KERNEL);
+ if (id < 0)
+ return 0;
+ id = (pf_num << ESW_VPORT_BITS) | id;
+ return id;
}
void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata)
{
- ida_free(&esw->offloads.vport_metadata_ida, metadata);
+ u32 vport_bit_mask = (1 << ESW_VPORT_BITS) - 1;
+
+ /* Metadata contains only 12 bits of actual ida id */
+ ida_free(&esw->offloads.vport_metadata_ida, metadata & vport_bit_mask);
}
static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
- if (vport->vport == MLX5_VPORT_UPLINK)
- return 0;
-
vport->default_metadata = mlx5_esw_match_metadata_alloc(esw);
vport->metadata = vport->default_metadata;
return vport->metadata ? 0 : -ENOSPC;
@@ -1923,40 +2084,65 @@ static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
- if (vport->vport == MLX5_VPORT_UPLINK || !vport->default_metadata)
+ if (!vport->default_metadata)
return;
WARN_ON(vport->metadata != vport->default_metadata);
mlx5_esw_match_metadata_free(esw, vport->default_metadata);
}
+static void esw_offloads_metadata_uninit(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ int i;
+
+ if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
+ return;
+
+ mlx5_esw_for_all_vports_reverse(esw, i, vport)
+ esw_offloads_vport_metadata_cleanup(esw, vport);
+}
+
+static int esw_offloads_metadata_init(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ int err;
+ int i;
+
+ if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
+ return 0;
+
+ mlx5_esw_for_all_vports(esw, i, vport) {
+ err = esw_offloads_vport_metadata_setup(esw, vport);
+ if (err)
+ goto metadata_err;
+ }
+
+ return 0;
+
+metadata_err:
+ esw_offloads_metadata_uninit(esw);
+ return err;
+}
+
int
esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
int err;
- err = esw_offloads_vport_metadata_setup(esw, vport);
- if (err)
- goto metadata_err;
-
err = esw_acl_ingress_ofld_setup(esw, vport);
if (err)
- goto ingress_err;
+ return err;
- if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
- err = esw_acl_egress_ofld_setup(esw, vport);
- if (err)
- goto egress_err;
- }
+ err = esw_acl_egress_ofld_setup(esw, vport);
+ if (err)
+ goto egress_err;
return 0;
egress_err:
esw_acl_ingress_ofld_cleanup(esw, vport);
-ingress_err:
- esw_offloads_vport_metadata_cleanup(esw, vport);
-metadata_err:
return err;
}
@@ -1966,22 +2152,14 @@ esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
{
esw_acl_egress_ofld_cleanup(vport);
esw_acl_ingress_ofld_cleanup(esw, vport);
- esw_offloads_vport_metadata_cleanup(esw, vport);
}
static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
- int err;
-
- if (esw_use_vport_metadata(esw))
- esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
- err = esw_vport_create_offloads_acl_tables(esw, vport);
- if (err)
- esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
- return err;
+ return esw_vport_create_offloads_acl_tables(esw, vport);
}
static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
@@ -1990,7 +2168,6 @@ static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
esw_vport_destroy_offloads_acl_tables(esw, vport);
- esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
}
static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
@@ -2114,6 +2291,24 @@ int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type
return NOTIFY_OK;
}
+static int mlx5_esw_host_number_init(struct mlx5_eswitch *esw)
+{
+ const u32 *query_host_out;
+
+ if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
+ return 0;
+
+ query_host_out = mlx5_esw_query_functions(esw->dev);
+ if (IS_ERR(query_host_out))
+ return PTR_ERR(query_host_out);
+
+ /* Mark non local controller with non zero controller number. */
+ esw->offloads.host_number = MLX5_GET(query_esw_functions_out, query_host_out,
+ host_params_context.host_number);
+ kvfree(query_host_out);
+ return 0;
+}
+
int esw_offloads_enable(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
@@ -2128,6 +2323,17 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
mutex_init(&esw->offloads.termtbl_mutex);
mlx5_rdma_enable_roce(esw->dev);
+ err = mlx5_esw_host_number_init(esw);
+ if (err)
+ goto err_metadata;
+
+ if (esw_check_vport_match_metadata_supported(esw))
+ esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
+
+ err = esw_offloads_metadata_init(esw);
+ if (err)
+ goto err_metadata;
+
err = esw_set_passing_vport_metadata(esw, true);
if (err)
goto err_vport_metadata;
@@ -2160,6 +2366,9 @@ err_uplink:
err_steering_init:
esw_set_passing_vport_metadata(esw, false);
err_vport_metadata:
+ esw_offloads_metadata_uninit(esw);
+err_metadata:
+ esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
mlx5_rdma_disable_roce(esw->dev);
mutex_destroy(&esw->offloads.termtbl_mutex);
return err;
@@ -2193,6 +2402,8 @@ void esw_offloads_disable(struct mlx5_eswitch *esw)
esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
esw_set_passing_vport_metadata(esw, false);
esw_offloads_steering_cleanup(esw);
+ esw_offloads_metadata_uninit(esw);
+ esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
mlx5_rdma_disable_roce(esw->dev);
mutex_destroy(&esw->offloads.termtbl_mutex);
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
index 17a0d2bc102b..ec679560a95d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -3,6 +3,7 @@
#include <linux/mlx5/fs.h>
#include "eswitch.h"
+#include "en_tc.h"
#include "fs_core.h"
struct mlx5_termtbl_handle {
@@ -228,10 +229,11 @@ static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
bool
mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
- struct mlx5_esw_flow_attr *attr,
+ struct mlx5_flow_attr *attr,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_spec *spec)
{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
int i;
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table) ||
@@ -244,8 +246,8 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
return true;
/* hairpin */
- for (i = attr->split_count; i < attr->out_count; i++)
- if (attr->dests[i].rep->vport == MLX5_VPORT_UPLINK)
+ for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
+ if (esw_attr->dests[i].rep->vport == MLX5_VPORT_UPLINK)
return true;
return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
index 831d2c39e153..80da50e12915 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
@@ -54,7 +54,7 @@ static int mlx5_fpga_conn_map_buf(struct mlx5_fpga_conn *conn,
if (unlikely(!buf->sg[0].data))
goto out;
- dma_device = &conn->fdev->mdev->pdev->dev;
+ dma_device = mlx5_core_dma_dev(conn->fdev->mdev);
buf->sg[0].dma_addr = dma_map_single(dma_device, buf->sg[0].data,
buf->sg[0].size, buf->dma_dir);
err = dma_mapping_error(dma_device, buf->sg[0].dma_addr);
@@ -86,7 +86,7 @@ static void mlx5_fpga_conn_unmap_buf(struct mlx5_fpga_conn *conn,
{
struct device *dma_device;
- dma_device = &conn->fdev->mdev->pdev->dev;
+ dma_device = mlx5_core_dma_dev(conn->fdev->mdev);
if (buf->sg[1].data)
dma_unmap_single(dma_device, buf->sg[1].dma_addr,
buf->sg[1].size, buf->dma_dir);
@@ -388,9 +388,9 @@ static inline void mlx5_fpga_conn_cqes(struct mlx5_fpga_conn *conn,
mlx5_fpga_conn_arm_cq(conn);
}
-static void mlx5_fpga_conn_cq_tasklet(unsigned long data)
+static void mlx5_fpga_conn_cq_tasklet(struct tasklet_struct *t)
{
- struct mlx5_fpga_conn *conn = (void *)data;
+ struct mlx5_fpga_conn *conn = from_tasklet(conn, t, cq.tasklet);
if (unlikely(!conn->qp.active))
return;
@@ -478,8 +478,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
conn->cq.mcq.comp = mlx5_fpga_conn_cq_complete;
conn->cq.mcq.irqn = irqn;
conn->cq.mcq.uar = fdev->conn_res.uar;
- tasklet_init(&conn->cq.tasklet, mlx5_fpga_conn_cq_tasklet,
- (unsigned long)conn);
+ tasklet_setup(&conn->cq.tasklet, mlx5_fpga_conn_cq_tasklet);
mlx5_fpga_dbg(fdev, "Created CQ #0x%x\n", conn->cq.mcq.cqn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index fee169732de7..babe3405132a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -776,6 +776,9 @@ static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
table_type = FS_FT_NIC_RX;
break;
case MLX5_FLOW_NAMESPACE_EGRESS:
+#ifdef CONFIG_MLX5_IPSEC
+ case MLX5_FLOW_NAMESPACE_EGRESS_KERNEL:
+#endif
max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions);
table_type = FS_FT_NIC_TX;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 75fa44eee434..16091838bfcf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -126,6 +126,10 @@
#define LAG_NUM_PRIOS 1
#define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
+#define KERNEL_TX_IPSEC_NUM_PRIOS 1
+#define KERNEL_TX_IPSEC_NUM_LEVELS 1
+#define KERNEL_TX_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS)
+
struct node_caps {
size_t arr_sz;
long *caps;
@@ -180,13 +184,24 @@ static struct init_tree_node {
static struct init_tree_node egress_root_fs = {
.type = FS_TYPE_NAMESPACE,
+#ifdef CONFIG_MLX5_IPSEC
+ .ar_size = 2,
+#else
.ar_size = 1,
+#endif
.children = (struct init_tree_node[]) {
ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
FS_CHAINING_CAPS_EGRESS,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
BY_PASS_PRIO_NUM_LEVELS))),
+#ifdef CONFIG_MLX5_IPSEC
+ ADD_PRIO(0, KERNEL_TX_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS_EGRESS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(KERNEL_TX_IPSEC_NUM_PRIOS,
+ KERNEL_TX_IPSEC_NUM_LEVELS))),
+#endif
}
};
@@ -1595,11 +1610,12 @@ static bool dest_is_valid(struct mlx5_flow_destination *dest,
return true;
if (ignore_level) {
- if (ft->type != FS_FT_FDB)
+ if (ft->type != FS_FT_FDB &&
+ ft->type != FS_FT_NIC_RX)
return false;
if (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
- dest->ft->type != FS_FT_FDB)
+ ft->type != dest->ft->type)
return false;
}
@@ -2164,8 +2180,10 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
break;
}
- if (type == MLX5_FLOW_NAMESPACE_EGRESS) {
+ if (type == MLX5_FLOW_NAMESPACE_EGRESS ||
+ type == MLX5_FLOW_NAMESPACE_EGRESS_KERNEL) {
root_ns = steering->egress_root_ns;
+ prio = type - MLX5_FLOW_NAMESPACE_EGRESS;
} else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX) {
root_ns = steering->rdma_rx_root_ns;
prio = RDMA_RX_BYPASS_PRIO;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
new file mode 100644
index 000000000000..f9042e147c7f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -0,0 +1,463 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+
+#include "fw_reset.h"
+#include "diag/fw_tracer.h"
+
+enum {
+ MLX5_FW_RESET_FLAGS_RESET_REQUESTED,
+ MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST,
+ MLX5_FW_RESET_FLAGS_PENDING_COMP
+};
+
+struct mlx5_fw_reset {
+ struct mlx5_core_dev *dev;
+ struct mlx5_nb nb;
+ struct workqueue_struct *wq;
+ struct work_struct fw_live_patch_work;
+ struct work_struct reset_request_work;
+ struct work_struct reset_reload_work;
+ struct work_struct reset_now_work;
+ struct work_struct reset_abort_work;
+ unsigned long reset_flags;
+ struct timer_list timer;
+ struct completion done;
+ int ret;
+};
+
+void mlx5_fw_reset_enable_remote_dev_reset_set(struct mlx5_core_dev *dev, bool enable)
+{
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+
+ if (enable)
+ clear_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags);
+ else
+ set_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags);
+}
+
+bool mlx5_fw_reset_enable_remote_dev_reset_get(struct mlx5_core_dev *dev)
+{
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+
+ return !test_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags);
+}
+
+static int mlx5_reg_mfrl_set(struct mlx5_core_dev *dev, u8 reset_level,
+ u8 reset_type_sel, u8 sync_resp, bool sync_start)
+{
+ u32 out[MLX5_ST_SZ_DW(mfrl_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(mfrl_reg)] = {};
+
+ MLX5_SET(mfrl_reg, in, reset_level, reset_level);
+ MLX5_SET(mfrl_reg, in, rst_type_sel, reset_type_sel);
+ MLX5_SET(mfrl_reg, in, pci_sync_for_fw_update_resp, sync_resp);
+ MLX5_SET(mfrl_reg, in, pci_sync_for_fw_update_start, sync_start);
+
+ return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_MFRL, 0, 1);
+}
+
+static int mlx5_reg_mfrl_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type)
+{
+ u32 out[MLX5_ST_SZ_DW(mfrl_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(mfrl_reg)] = {};
+ int err;
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_MFRL, 0, 0);
+ if (err)
+ return err;
+
+ if (reset_level)
+ *reset_level = MLX5_GET(mfrl_reg, out, reset_level);
+ if (reset_type)
+ *reset_type = MLX5_GET(mfrl_reg, out, reset_type);
+
+ return 0;
+}
+
+int mlx5_fw_reset_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type)
+{
+ return mlx5_reg_mfrl_query(dev, reset_level, reset_type);
+}
+
+int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel)
+{
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ int err;
+
+ set_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
+ err = mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL3, reset_type_sel, 0, true);
+ if (err)
+ clear_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
+ return err;
+}
+
+int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev)
+{
+ return mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL0, 0, 0, false);
+}
+
+static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
+{
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+
+ /* if this is the driver that initiated the fw reset, devlink completed the reload */
+ if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) {
+ complete(&fw_reset->done);
+ } else {
+ mlx5_load_one(dev, false);
+ devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0,
+ BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
+ BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE));
+ }
+}
+
+static void mlx5_sync_reset_reload_work(struct work_struct *work)
+{
+ struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
+ reset_reload_work);
+ struct mlx5_core_dev *dev = fw_reset->dev;
+ int err;
+
+ mlx5_enter_error_state(dev, true);
+ mlx5_unload_one(dev, false);
+ err = mlx5_health_wait_pci_up(dev);
+ if (err)
+ mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
+ fw_reset->ret = err;
+ mlx5_fw_reset_complete_reload(dev);
+}
+
+static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev)
+{
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+
+ del_timer(&fw_reset->timer);
+}
+
+static void mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health)
+{
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+
+ mlx5_stop_sync_reset_poll(dev);
+ clear_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags);
+ if (poll_health)
+ mlx5_start_health_poll(dev);
+}
+
+#define MLX5_RESET_POLL_INTERVAL (HZ / 10)
+static void poll_sync_reset(struct timer_list *t)
+{
+ struct mlx5_fw_reset *fw_reset = from_timer(fw_reset, t, timer);
+ struct mlx5_core_dev *dev = fw_reset->dev;
+ u32 fatal_error;
+
+ if (!test_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags))
+ return;
+
+ fatal_error = mlx5_health_check_fatal_sensors(dev);
+
+ if (fatal_error) {
+ mlx5_core_warn(dev, "Got Device Reset\n");
+ mlx5_sync_reset_clear_reset_requested(dev, false);
+ queue_work(fw_reset->wq, &fw_reset->reset_reload_work);
+ return;
+ }
+
+ mod_timer(&fw_reset->timer, round_jiffies(jiffies + MLX5_RESET_POLL_INTERVAL));
+}
+
+static void mlx5_start_sync_reset_poll(struct mlx5_core_dev *dev)
+{
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+
+ timer_setup(&fw_reset->timer, poll_sync_reset, 0);
+ fw_reset->timer.expires = round_jiffies(jiffies + MLX5_RESET_POLL_INTERVAL);
+ add_timer(&fw_reset->timer);
+}
+
+static int mlx5_fw_reset_set_reset_sync_ack(struct mlx5_core_dev *dev)
+{
+ return mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL3, 0, 1, false);
+}
+
+static int mlx5_fw_reset_set_reset_sync_nack(struct mlx5_core_dev *dev)
+{
+ return mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL3, 0, 2, false);
+}
+
+static void mlx5_sync_reset_set_reset_requested(struct mlx5_core_dev *dev)
+{
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+
+ mlx5_stop_health_poll(dev, true);
+ set_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags);
+ mlx5_start_sync_reset_poll(dev);
+}
+
+static void mlx5_fw_live_patch_event(struct work_struct *work)
+{
+ struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
+ fw_live_patch_work);
+ struct mlx5_core_dev *dev = fw_reset->dev;
+ struct mlx5_fw_tracer *tracer;
+
+ mlx5_core_info(dev, "Live patch updated firmware version: %d.%d.%d\n", fw_rev_maj(dev),
+ fw_rev_min(dev), fw_rev_sub(dev));
+
+ tracer = dev->tracer;
+ if (IS_ERR_OR_NULL(tracer))
+ return;
+
+ if (mlx5_fw_tracer_reload(tracer))
+ mlx5_core_err(dev, "Failed to reload FW tracer\n");
+}
+
+static void mlx5_sync_reset_request_event(struct work_struct *work)
+{
+ struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
+ reset_request_work);
+ struct mlx5_core_dev *dev = fw_reset->dev;
+ int err;
+
+ if (test_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags)) {
+ err = mlx5_fw_reset_set_reset_sync_nack(dev);
+ mlx5_core_warn(dev, "PCI Sync FW Update Reset Nack %s",
+ err ? "Failed" : "Sent");
+ return;
+ }
+ mlx5_sync_reset_set_reset_requested(dev);
+ err = mlx5_fw_reset_set_reset_sync_ack(dev);
+ if (err)
+ mlx5_core_warn(dev, "PCI Sync FW Update Reset Ack Failed. Error code: %d\n", err);
+ else
+ mlx5_core_warn(dev, "PCI Sync FW Update Reset Ack. Device reset is expected.\n");
+}
+
+#define MLX5_PCI_LINK_UP_TIMEOUT 2000
+
+static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev)
+{
+ struct pci_bus *bridge_bus = dev->pdev->bus;
+ struct pci_dev *bridge = bridge_bus->self;
+ u16 reg16, dev_id, sdev_id;
+ unsigned long timeout;
+ struct pci_dev *sdev;
+ int cap, err;
+ u32 reg32;
+
+ /* Check that all functions under the pci bridge are PFs of
+ * this device otherwise fail this function.
+ */
+ err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id);
+ if (err)
+ return err;
+ list_for_each_entry(sdev, &bridge_bus->devices, bus_list) {
+ err = pci_read_config_word(sdev, PCI_DEVICE_ID, &sdev_id);
+ if (err)
+ return err;
+ if (sdev_id != dev_id)
+ return -EPERM;
+ }
+
+ cap = pci_find_capability(bridge, PCI_CAP_ID_EXP);
+ if (!cap)
+ return -EOPNOTSUPP;
+
+ list_for_each_entry(sdev, &bridge_bus->devices, bus_list) {
+ pci_save_state(sdev);
+ pci_cfg_access_lock(sdev);
+ }
+ /* PCI link toggle */
+ err = pci_read_config_word(bridge, cap + PCI_EXP_LNKCTL, &reg16);
+ if (err)
+ return err;
+ reg16 |= PCI_EXP_LNKCTL_LD;
+ err = pci_write_config_word(bridge, cap + PCI_EXP_LNKCTL, reg16);
+ if (err)
+ return err;
+ msleep(500);
+ reg16 &= ~PCI_EXP_LNKCTL_LD;
+ err = pci_write_config_word(bridge, cap + PCI_EXP_LNKCTL, reg16);
+ if (err)
+ return err;
+
+ /* Check link */
+ err = pci_read_config_dword(bridge, cap + PCI_EXP_LNKCAP, &reg32);
+ if (err)
+ return err;
+ if (!(reg32 & PCI_EXP_LNKCAP_DLLLARC)) {
+ mlx5_core_warn(dev, "No PCI link reporting capability (0x%08x)\n", reg32);
+ msleep(1000);
+ goto restore;
+ }
+
+ timeout = jiffies + msecs_to_jiffies(MLX5_PCI_LINK_UP_TIMEOUT);
+ do {
+ err = pci_read_config_word(bridge, cap + PCI_EXP_LNKSTA, &reg16);
+ if (err)
+ return err;
+ if (reg16 & PCI_EXP_LNKSTA_DLLLA)
+ break;
+ msleep(20);
+ } while (!time_after(jiffies, timeout));
+
+ if (reg16 & PCI_EXP_LNKSTA_DLLLA) {
+ mlx5_core_info(dev, "PCI Link up\n");
+ } else {
+ mlx5_core_err(dev, "PCI link not ready (0x%04x) after %d ms\n",
+ reg16, MLX5_PCI_LINK_UP_TIMEOUT);
+ err = -ETIMEDOUT;
+ }
+
+restore:
+ list_for_each_entry(sdev, &bridge_bus->devices, bus_list) {
+ pci_cfg_access_unlock(sdev);
+ pci_restore_state(sdev);
+ }
+
+ return err;
+}
+
+static void mlx5_sync_reset_now_event(struct work_struct *work)
+{
+ struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
+ reset_now_work);
+ struct mlx5_core_dev *dev = fw_reset->dev;
+ int err;
+
+ mlx5_sync_reset_clear_reset_requested(dev, false);
+
+ mlx5_core_warn(dev, "Sync Reset now. Device is going to reset.\n");
+
+ err = mlx5_cmd_fast_teardown_hca(dev);
+ if (err) {
+ mlx5_core_warn(dev, "Fast teardown failed, no reset done, err %d\n", err);
+ goto done;
+ }
+
+ err = mlx5_pci_link_toggle(dev);
+ if (err) {
+ mlx5_core_warn(dev, "mlx5_pci_link_toggle failed, no reset done, err %d\n", err);
+ goto done;
+ }
+
+ mlx5_enter_error_state(dev, true);
+ mlx5_unload_one(dev, false);
+done:
+ fw_reset->ret = err;
+ mlx5_fw_reset_complete_reload(dev);
+}
+
+static void mlx5_sync_reset_abort_event(struct work_struct *work)
+{
+ struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
+ reset_abort_work);
+ struct mlx5_core_dev *dev = fw_reset->dev;
+
+ mlx5_sync_reset_clear_reset_requested(dev, true);
+ mlx5_core_warn(dev, "PCI Sync FW Update Reset Aborted.\n");
+}
+
+static void mlx5_sync_reset_events_handle(struct mlx5_fw_reset *fw_reset, struct mlx5_eqe *eqe)
+{
+ struct mlx5_eqe_sync_fw_update *sync_fw_update_eqe;
+ u8 sync_event_rst_type;
+
+ sync_fw_update_eqe = &eqe->data.sync_fw_update;
+ sync_event_rst_type = sync_fw_update_eqe->sync_rst_state & SYNC_RST_STATE_MASK;
+ switch (sync_event_rst_type) {
+ case MLX5_SYNC_RST_STATE_RESET_REQUEST:
+ queue_work(fw_reset->wq, &fw_reset->reset_request_work);
+ break;
+ case MLX5_SYNC_RST_STATE_RESET_NOW:
+ queue_work(fw_reset->wq, &fw_reset->reset_now_work);
+ break;
+ case MLX5_SYNC_RST_STATE_RESET_ABORT:
+ queue_work(fw_reset->wq, &fw_reset->reset_abort_work);
+ break;
+ }
+}
+
+static int fw_reset_event_notifier(struct notifier_block *nb, unsigned long action, void *data)
+{
+ struct mlx5_fw_reset *fw_reset = mlx5_nb_cof(nb, struct mlx5_fw_reset, nb);
+ struct mlx5_eqe *eqe = data;
+
+ switch (eqe->sub_type) {
+ case MLX5_GENERAL_SUBTYPE_FW_LIVE_PATCH_EVENT:
+ queue_work(fw_reset->wq, &fw_reset->fw_live_patch_work);
+ break;
+ case MLX5_GENERAL_SUBTYPE_PCI_SYNC_FOR_FW_UPDATE_EVENT:
+ mlx5_sync_reset_events_handle(fw_reset, eqe);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+#define MLX5_FW_RESET_TIMEOUT_MSEC 5000
+int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev)
+{
+ unsigned long timeout = msecs_to_jiffies(MLX5_FW_RESET_TIMEOUT_MSEC);
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ int err;
+
+ if (!wait_for_completion_timeout(&fw_reset->done, timeout)) {
+ mlx5_core_warn(dev, "FW sync reset timeout after %d seconds\n",
+ MLX5_FW_RESET_TIMEOUT_MSEC / 1000);
+ err = -ETIMEDOUT;
+ goto out;
+ }
+ err = fw_reset->ret;
+out:
+ clear_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags);
+ return err;
+}
+
+void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev)
+{
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+
+ MLX5_NB_INIT(&fw_reset->nb, fw_reset_event_notifier, GENERAL_EVENT);
+ mlx5_eq_notifier_register(dev, &fw_reset->nb);
+}
+
+void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev)
+{
+ mlx5_eq_notifier_unregister(dev, &dev->priv.fw_reset->nb);
+}
+
+int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
+{
+ struct mlx5_fw_reset *fw_reset = kzalloc(sizeof(*fw_reset), GFP_KERNEL);
+
+ if (!fw_reset)
+ return -ENOMEM;
+ fw_reset->wq = create_singlethread_workqueue("mlx5_fw_reset_events");
+ if (!fw_reset->wq) {
+ kfree(fw_reset);
+ return -ENOMEM;
+ }
+
+ fw_reset->dev = dev;
+ dev->priv.fw_reset = fw_reset;
+
+ INIT_WORK(&fw_reset->fw_live_patch_work, mlx5_fw_live_patch_event);
+ INIT_WORK(&fw_reset->reset_request_work, mlx5_sync_reset_request_event);
+ INIT_WORK(&fw_reset->reset_reload_work, mlx5_sync_reset_reload_work);
+ INIT_WORK(&fw_reset->reset_now_work, mlx5_sync_reset_now_event);
+ INIT_WORK(&fw_reset->reset_abort_work, mlx5_sync_reset_abort_event);
+
+ init_completion(&fw_reset->done);
+ return 0;
+}
+
+void mlx5_fw_reset_cleanup(struct mlx5_core_dev *dev)
+{
+ struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+
+ destroy_workqueue(fw_reset->wq);
+ kfree(dev->priv.fw_reset);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
new file mode 100644
index 000000000000..7761ee5fc7d0
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020, Mellanox Technologies inc. All rights reserved. */
+
+#ifndef __MLX5_FW_RESET_H
+#define __MLX5_FW_RESET_H
+
+#include "mlx5_core.h"
+
+void mlx5_fw_reset_enable_remote_dev_reset_set(struct mlx5_core_dev *dev, bool enable);
+bool mlx5_fw_reset_enable_remote_dev_reset_get(struct mlx5_core_dev *dev);
+int mlx5_fw_reset_query(struct mlx5_core_dev *dev, u8 *reset_level, u8 *reset_type);
+int mlx5_fw_reset_set_reset_sync(struct mlx5_core_dev *dev, u8 reset_type_sel);
+int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev);
+
+int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev);
+void mlx5_fw_reset_events_start(struct mlx5_core_dev *dev);
+void mlx5_fw_reset_events_stop(struct mlx5_core_dev *dev);
+int mlx5_fw_reset_init(struct mlx5_core_dev *dev);
+void mlx5_fw_reset_cleanup(struct mlx5_core_dev *dev);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index b31f769d2df9..54523bed16cd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -110,7 +110,7 @@ static bool sensor_fw_synd_rfr(struct mlx5_core_dev *dev)
return rfr && synd;
}
-static u32 check_fatal_sensors(struct mlx5_core_dev *dev)
+u32 mlx5_health_check_fatal_sensors(struct mlx5_core_dev *dev)
{
if (sensor_pci_not_working(dev))
return MLX5_SENSOR_PCI_COMM_ERR;
@@ -173,7 +173,7 @@ static bool reset_fw_if_needed(struct mlx5_core_dev *dev)
* Check again to avoid a redundant 2nd reset. If the fatal erros was
* PCI related a reset won't help.
*/
- fatal_error = check_fatal_sensors(dev);
+ fatal_error = mlx5_health_check_fatal_sensors(dev);
if (fatal_error == MLX5_SENSOR_PCI_COMM_ERR ||
fatal_error == MLX5_SENSOR_NIC_DISABLED ||
fatal_error == MLX5_SENSOR_NIC_SW_RESET) {
@@ -195,7 +195,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
bool err_detected = false;
/* Mark the device as fatal in order to abort FW commands */
- if ((check_fatal_sensors(dev) || force) &&
+ if ((mlx5_health_check_fatal_sensors(dev) || force) &&
dev->state == MLX5_DEVICE_STATE_UP) {
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
err_detected = true;
@@ -208,7 +208,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
goto unlock;
}
- if (check_fatal_sensors(dev) || force) { /* protected state setting */
+ if (mlx5_health_check_fatal_sensors(dev) || force) { /* protected state setting */
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
mlx5_cmd_flush(dev);
}
@@ -231,7 +231,7 @@ void mlx5_error_sw_reset(struct mlx5_core_dev *dev)
mlx5_core_err(dev, "start\n");
- if (check_fatal_sensors(dev) == MLX5_SENSOR_FW_SYND_RFR) {
+ if (mlx5_health_check_fatal_sensors(dev) == MLX5_SENSOR_FW_SYND_RFR) {
/* Get cr-dump and reset FW semaphore */
lock = lock_sem_sw_reset(dev, true);
@@ -308,26 +308,31 @@ static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
/* How much time to wait until health resetting the driver (in msecs) */
#define MLX5_RECOVERY_WAIT_MSECS 60000
-static int mlx5_health_try_recover(struct mlx5_core_dev *dev)
+int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev)
{
unsigned long end;
- mlx5_core_warn(dev, "handling bad device here\n");
- mlx5_handle_bad_state(dev);
end = jiffies + msecs_to_jiffies(MLX5_RECOVERY_WAIT_MSECS);
while (sensor_pci_not_working(dev)) {
- if (time_after(jiffies, end)) {
- mlx5_core_err(dev,
- "health recovery flow aborted, PCI reads still not working\n");
- return -EIO;
- }
+ if (time_after(jiffies, end))
+ return -ETIMEDOUT;
msleep(100);
}
+ return 0;
+}
+static int mlx5_health_try_recover(struct mlx5_core_dev *dev)
+{
+ mlx5_core_warn(dev, "handling bad device here\n");
+ mlx5_handle_bad_state(dev);
+ if (mlx5_health_wait_pci_up(dev)) {
+ mlx5_core_err(dev, "health recovery flow aborted, PCI reads still not working\n");
+ return -EIO;
+ }
mlx5_core_err(dev, "starting health recovery flow\n");
mlx5_recover_device(dev);
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state) ||
- check_fatal_sensors(dev)) {
+ mlx5_health_check_fatal_sensors(dev)) {
mlx5_core_err(dev, "health recovery failed\n");
return -EIO;
}
@@ -696,7 +701,7 @@ static void poll_health(struct timer_list *t)
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
goto out;
- fatal_error = check_fatal_sensors(dev);
+ fatal_error = mlx5_health_check_fatal_sensors(dev);
if (fatal_error && !health->fatal_error) {
mlx5_core_err(dev, "Fatal error %u detected\n", fatal_error);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index 874c70e8cc54..33081b24f10a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -102,7 +102,7 @@ int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
if (ldev->pf[i].netdev == ndev)
return i;
- return -1;
+ return -ENOENT;
}
static bool __mlx5_lag_is_roce(struct mlx5_lag *ldev)
@@ -271,7 +271,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
bool do_bond, roce_lag;
int err;
- if (!dev0 || !dev1)
+ if (!mlx5_lag_is_ready(ldev))
return;
spin_lock(&lag_lock);
@@ -355,7 +355,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
{
struct net_device *upper = info->upper_dev, *ndev_tmp;
struct netdev_lag_upper_info *lag_upper_info = NULL;
- bool is_bonded;
+ bool is_bonded, is_in_lag, mode_supported;
int bond_status = 0;
int num_slaves = 0;
int idx;
@@ -374,7 +374,7 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
rcu_read_lock();
for_each_netdev_in_bond_rcu(upper, ndev_tmp) {
idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev_tmp);
- if (idx > -1)
+ if (idx >= 0)
bond_status |= (1 << idx);
num_slaves++;
@@ -391,13 +391,24 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
/* Determine bonding status:
* A device is considered bonded if both its physical ports are slaves
* of the same lag master, and only them.
- * Lag mode must be activebackup or hash.
*/
- is_bonded = (num_slaves == MLX5_MAX_PORTS) &&
- (bond_status == 0x3) &&
- ((tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) ||
- (tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH));
+ is_in_lag = num_slaves == MLX5_MAX_PORTS && bond_status == 0x3;
+ if (!mlx5_lag_is_ready(ldev) && is_in_lag) {
+ NL_SET_ERR_MSG_MOD(info->info.extack,
+ "Can't activate LAG offload, PF is configured with more than 64 VFs");
+ return 0;
+ }
+
+ /* Lag mode must be activebackup or hash. */
+ mode_supported = tracker->tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP ||
+ tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH;
+
+ if (is_in_lag && !mode_supported)
+ NL_SET_ERR_MSG_MOD(info->info.extack,
+ "Can't activate LAG offload, TX type isn't supported");
+
+ is_bonded = is_in_lag && mode_supported;
if (tracker->is_bonded != is_bonded) {
tracker->is_bonded = is_bonded;
return 1;
@@ -418,7 +429,7 @@ static int mlx5_handle_changelowerstate_event(struct mlx5_lag *ldev,
return 0;
idx = mlx5_lag_dev_get_netdev_idx(ldev, ndev);
- if (idx == -1)
+ if (idx < 0)
return 0;
/* This information is used to determine virtual to physical
@@ -445,6 +456,10 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
ldev = container_of(this, struct mlx5_lag, nb);
+
+ if (!mlx5_lag_is_ready(ldev) && event == NETDEV_CHANGELOWERSTATE)
+ return NOTIFY_DONE;
+
tracker = ldev->tracker;
switch (event) {
@@ -493,14 +508,14 @@ static void mlx5_lag_dev_free(struct mlx5_lag *ldev)
kfree(ldev);
}
-static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
- struct mlx5_core_dev *dev,
- struct net_device *netdev)
+static int mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
+ struct mlx5_core_dev *dev,
+ struct net_device *netdev)
{
unsigned int fn = PCI_FUNC(dev->pdev->devfn);
if (fn >= MLX5_MAX_PORTS)
- return;
+ return -EPERM;
spin_lock(&lag_lock);
ldev->pf[fn].dev = dev;
@@ -511,6 +526,8 @@ static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev,
dev->priv.lag = ldev;
spin_unlock(&lag_lock);
+
+ return fn;
}
static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev,
@@ -537,11 +554,9 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
{
struct mlx5_lag *ldev = NULL;
struct mlx5_core_dev *tmp_dev;
- int err;
+ int i, err;
- if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
- !MLX5_CAP_GEN(dev, lag_master) ||
- (MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS))
+ if (!MLX5_CAP_GEN(dev, vport_group_manager))
return;
tmp_dev = mlx5_get_next_phys_dev(dev);
@@ -556,7 +571,18 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
}
}
- mlx5_lag_dev_add_pf(ldev, dev, netdev);
+ if (mlx5_lag_dev_add_pf(ldev, dev, netdev) < 0)
+ return;
+
+ for (i = 0; i < MLX5_MAX_PORTS; i++) {
+ tmp_dev = ldev->pf[i].dev;
+ if (!tmp_dev || !MLX5_CAP_GEN(tmp_dev, lag_master) ||
+ MLX5_CAP_GEN(tmp_dev, num_lag_ports) != MLX5_MAX_PORTS)
+ break;
+ }
+
+ if (i >= MLX5_MAX_PORTS)
+ ldev->flags |= MLX5_LAG_FLAG_READY;
if (!ldev->nb.notifier_call) {
ldev->nb.notifier_call = mlx5_lag_netdev_event;
@@ -587,6 +613,8 @@ void mlx5_lag_remove(struct mlx5_core_dev *dev)
mlx5_lag_dev_remove_pf(ldev, dev);
+ ldev->flags &= ~MLX5_LAG_FLAG_READY;
+
for (i = 0; i < MLX5_MAX_PORTS; i++)
if (ldev->pf[i].dev)
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag.h
index f1068aac6406..8d8cf2d0bc6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.h
@@ -16,6 +16,7 @@ enum {
MLX5_LAG_FLAG_ROCE = 1 << 0,
MLX5_LAG_FLAG_SRIOV = 1 << 1,
MLX5_LAG_FLAG_MULTIPATH = 1 << 2,
+ MLX5_LAG_FLAG_READY = 1 << 3,
};
#define MLX5_LAG_MODE_FLAGS (MLX5_LAG_FLAG_ROCE | MLX5_LAG_FLAG_SRIOV |\
@@ -59,6 +60,12 @@ __mlx5_lag_is_active(struct mlx5_lag *ldev)
return !!(ldev->flags & MLX5_LAG_MODE_FLAGS);
}
+static inline bool
+mlx5_lag_is_ready(struct mlx5_lag *ldev)
+{
+ return ldev->flags & MLX5_LAG_FLAG_READY;
+}
+
void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker);
int mlx5_activate_lag(struct mlx5_lag *ldev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
index 9e68f5926ab6..88e58ac902de 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
@@ -11,7 +11,7 @@
static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
{
- if (!ldev->pf[MLX5_LAG_P1].dev || !ldev->pf[MLX5_LAG_P2].dev)
+ if (!mlx5_lag_is_ready(ldev))
return false;
return mlx5_esw_multipath_prereq(ldev->pf[MLX5_LAG_P1].dev,
@@ -131,7 +131,12 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev,
struct net_device *nh_dev = nh->fib_nh_dev;
int i = mlx5_lag_dev_get_netdev_idx(ldev, nh_dev);
- mlx5_lag_set_port_affinity(ldev, ++i);
+ if (i < 0)
+ i = MLX5_LAG_NORMAL_AFFINITY;
+ else
+ ++i;
+
+ mlx5_lag_set_port_affinity(ldev, i);
}
return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 2d55b7c22c03..c70c1f0ca0c1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -150,28 +150,30 @@ static void mlx5_pps_out(struct work_struct *work)
static void mlx5_timestamp_overflow(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
- struct mlx5_clock *clock = container_of(dwork, struct mlx5_clock,
- overflow_work);
+ struct mlx5_core_dev *mdev;
+ struct mlx5_clock *clock;
unsigned long flags;
+ clock = container_of(dwork, struct mlx5_clock, overflow_work);
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
write_seqlock_irqsave(&clock->lock, flags);
timecounter_read(&clock->tc);
- mlx5_update_clock_info_page(clock->mdev);
+ mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
schedule_delayed_work(&clock->overflow_work, clock->overflow_period);
}
-static int mlx5_ptp_settime(struct ptp_clock_info *ptp,
- const struct timespec64 *ts)
+static int mlx5_ptp_settime(struct ptp_clock_info *ptp, const struct timespec64 *ts)
{
- struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
- ptp_info);
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
u64 ns = timespec64_to_ns(ts);
+ struct mlx5_core_dev *mdev;
unsigned long flags;
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
write_seqlock_irqsave(&clock->lock, flags);
timecounter_init(&clock->tc, &clock->cycles, ns);
- mlx5_update_clock_info_page(clock->mdev);
+ mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
return 0;
@@ -180,13 +182,12 @@ static int mlx5_ptp_settime(struct ptp_clock_info *ptp,
static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
struct ptp_system_timestamp *sts)
{
- struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
- ptp_info);
- struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
- clock);
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
+ struct mlx5_core_dev *mdev;
unsigned long flags;
u64 cycles, ns;
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
write_seqlock_irqsave(&clock->lock, flags);
cycles = mlx5_read_internal_timer(mdev, sts);
ns = timecounter_cyc2time(&clock->tc, cycles);
@@ -199,13 +200,14 @@ static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts,
static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
- struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
- ptp_info);
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
+ struct mlx5_core_dev *mdev;
unsigned long flags;
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
write_seqlock_irqsave(&clock->lock, flags);
timecounter_adjtime(&clock->tc, delta);
- mlx5_update_clock_info_page(clock->mdev);
+ mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
return 0;
@@ -213,12 +215,13 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
{
- u64 adj;
- u32 diff;
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
+ struct mlx5_core_dev *mdev;
unsigned long flags;
int neg_adj = 0;
- struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
- ptp_info);
+ u32 diff;
+ u64 adj;
+
if (delta < 0) {
neg_adj = 1;
@@ -229,11 +232,12 @@ static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
adj *= delta;
diff = div_u64(adj, 1000000000ULL);
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
write_seqlock_irqsave(&clock->lock, flags);
timecounter_read(&clock->tc);
clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
clock->nominal_c_mult + diff;
- mlx5_update_clock_info_page(clock->mdev);
+ mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
return 0;
@@ -431,13 +435,11 @@ static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
default:
return -EOPNOTSUPP;
}
-
- return -EOPNOTSUPP;
}
static const struct ptp_clock_info mlx5_ptp_clock_info = {
.owner = THIS_MODULE,
- .name = "mlx5_p2p",
+ .name = "mlx5_ptp",
.max_adj = 100000000,
.n_alarm = 0,
.n_ext_ts = 0,
@@ -465,7 +467,8 @@ static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
{
- struct mlx5_core_dev *mdev = clock->mdev;
+ struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock);
+
u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {};
u8 mode;
int err;
@@ -538,20 +541,23 @@ static int mlx5_pps_event(struct notifier_block *nb,
unsigned long type, void *data)
{
struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb);
- struct mlx5_core_dev *mdev = clock->mdev;
struct ptp_clock_event ptp_event;
u64 cycles_now, cycles_delta;
u64 nsec_now, nsec_delta, ns;
struct mlx5_eqe *eqe = data;
int pin = eqe->data.pps.pin;
+ struct mlx5_core_dev *mdev;
struct timespec64 ts;
unsigned long flags;
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
+
switch (clock->ptp_info.pin_config[pin].func) {
case PTP_PF_EXTTS:
ptp_event.index = pin;
- ptp_event.timestamp = timecounter_cyc2time(&clock->tc,
- be64_to_cpu(eqe->data.pps.time_stamp));
+ ptp_event.timestamp =
+ mlx5_timecounter_cyc2time(clock,
+ be64_to_cpu(eqe->data.pps.time_stamp));
if (clock->pps_info.enabled) {
ptp_event.type = PTP_CLOCK_PPSUSR;
ptp_event.pps_times.ts_real =
@@ -574,8 +580,8 @@ static int mlx5_pps_event(struct notifier_block *nb,
cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
clock->cycles.mult);
clock->pps_info.start[pin] = cycles_now + cycles_delta;
- schedule_work(&clock->pps_info.out_work);
write_sequnlock_irqrestore(&clock->lock, flags);
+ schedule_work(&clock->pps_info.out_work);
break;
default:
mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n",
@@ -605,7 +611,6 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
clock->cycles.shift);
clock->nominal_c_mult = clock->cycles.mult;
clock->cycles.mask = CLOCKSOURCE_MASK(41);
- clock->mdev = mdev;
timecounter_init(&clock->tc, &clock->cycles,
ktime_to_ns(ktime_get_real()));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
index 5c681e31983b..81f2cc4ca1da 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h
@@ -78,7 +78,7 @@ int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
void mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn);
struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev);
-void mlx5_cq_tasklet_cb(unsigned long data);
+void mlx5_cq_tasklet_cb(struct tasklet_struct *t);
struct cpumask *mlx5_eq_comp_cpumask(struct mlx5_core_dev *dev, int ix);
u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
new file mode 100644
index 000000000000..947f346bdc2d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
@@ -0,0 +1,911 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2020 Mellanox Technologies.
+
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/mlx5_ifc.h>
+#include <linux/mlx5/fs.h>
+
+#include "lib/fs_chains.h"
+#include "en/mapping.h"
+#include "mlx5_core.h"
+#include "fs_core.h"
+#include "eswitch.h"
+#include "en.h"
+#include "en_tc.h"
+
+#define chains_lock(chains) ((chains)->lock)
+#define chains_ht(chains) ((chains)->chains_ht)
+#define chains_mapping(chains) ((chains)->chains_mapping)
+#define prios_ht(chains) ((chains)->prios_ht)
+#define ft_pool_left(chains) ((chains)->ft_left)
+#define tc_default_ft(chains) ((chains)->tc_default_ft)
+#define tc_end_ft(chains) ((chains)->tc_end_ft)
+#define ns_to_chains_fs_prio(ns) ((ns) == MLX5_FLOW_NAMESPACE_FDB ? \
+ FDB_TC_OFFLOAD : MLX5E_TC_PRIO)
+
+/* Firmware currently has 4 pool of 4 sizes that it supports (FT_POOLS),
+ * and a virtual memory region of 16M (MLX5_FT_SIZE), this region is duplicated
+ * for each flow table pool. We can allocate up to 16M of each pool,
+ * and we keep track of how much we used via get_next_avail_sz_from_pool.
+ * Firmware doesn't report any of this for now.
+ * ESW_POOL is expected to be sorted from large to small and match firmware
+ * pools.
+ */
+#define FT_SIZE (16 * 1024 * 1024)
+static const unsigned int FT_POOLS[] = { 4 * 1024 * 1024,
+ 1 * 1024 * 1024,
+ 64 * 1024,
+ 128 };
+#define FT_TBL_SZ (64 * 1024)
+
+struct mlx5_fs_chains {
+ struct mlx5_core_dev *dev;
+
+ struct rhashtable chains_ht;
+ struct rhashtable prios_ht;
+ /* Protects above chains_ht and prios_ht */
+ struct mutex lock;
+
+ struct mlx5_flow_table *tc_default_ft;
+ struct mlx5_flow_table *tc_end_ft;
+ struct mapping_ctx *chains_mapping;
+
+ enum mlx5_flow_namespace_type ns;
+ u32 group_num;
+ u32 flags;
+
+ int ft_left[ARRAY_SIZE(FT_POOLS)];
+};
+
+struct fs_chain {
+ struct rhash_head node;
+
+ u32 chain;
+
+ int ref;
+ int id;
+
+ struct mlx5_fs_chains *chains;
+ struct list_head prios_list;
+ struct mlx5_flow_handle *restore_rule;
+ struct mlx5_modify_hdr *miss_modify_hdr;
+};
+
+struct prio_key {
+ u32 chain;
+ u32 prio;
+ u32 level;
+};
+
+struct prio {
+ struct rhash_head node;
+ struct list_head list;
+
+ struct prio_key key;
+
+ int ref;
+
+ struct fs_chain *chain;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_table *next_ft;
+ struct mlx5_flow_group *miss_group;
+ struct mlx5_flow_handle *miss_rule;
+};
+
+static const struct rhashtable_params chain_params = {
+ .head_offset = offsetof(struct fs_chain, node),
+ .key_offset = offsetof(struct fs_chain, chain),
+ .key_len = sizeof_field(struct fs_chain, chain),
+ .automatic_shrinking = true,
+};
+
+static const struct rhashtable_params prio_params = {
+ .head_offset = offsetof(struct prio, node),
+ .key_offset = offsetof(struct prio, key),
+ .key_len = sizeof_field(struct prio, key),
+ .automatic_shrinking = true,
+};
+
+bool mlx5_chains_prios_supported(struct mlx5_fs_chains *chains)
+{
+ return chains->flags & MLX5_CHAINS_AND_PRIOS_SUPPORTED;
+}
+
+static bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains)
+{
+ return chains->flags & MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
+}
+
+bool mlx5_chains_backwards_supported(struct mlx5_fs_chains *chains)
+{
+ return mlx5_chains_prios_supported(chains) &&
+ mlx5_chains_ignore_flow_level_supported(chains);
+}
+
+u32 mlx5_chains_get_chain_range(struct mlx5_fs_chains *chains)
+{
+ if (!mlx5_chains_prios_supported(chains))
+ return 1;
+
+ if (mlx5_chains_ignore_flow_level_supported(chains))
+ return UINT_MAX - 1;
+
+ /* We should get here only for eswitch case */
+ return FDB_TC_MAX_CHAIN;
+}
+
+u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
+{
+ return mlx5_chains_get_chain_range(chains) + 1;
+}
+
+u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
+{
+ if (!mlx5_chains_prios_supported(chains))
+ return 1;
+
+ if (mlx5_chains_ignore_flow_level_supported(chains))
+ return UINT_MAX;
+
+ /* We should get here only for eswitch case */
+ return FDB_TC_MAX_PRIO;
+}
+
+static unsigned int mlx5_chains_get_level_range(struct mlx5_fs_chains *chains)
+{
+ if (mlx5_chains_ignore_flow_level_supported(chains))
+ return UINT_MAX;
+
+ /* Same value for FDB and NIC RX tables */
+ return FDB_TC_LEVELS_PER_PRIO;
+}
+
+void
+mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains,
+ struct mlx5_flow_table *ft)
+{
+ tc_end_ft(chains) = ft;
+}
+
+#define POOL_NEXT_SIZE 0
+static int
+mlx5_chains_get_avail_sz_from_pool(struct mlx5_fs_chains *chains,
+ int desired_size)
+{
+ int i, found_i = -1;
+
+ for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) {
+ if (ft_pool_left(chains)[i] && FT_POOLS[i] > desired_size) {
+ found_i = i;
+ if (desired_size != POOL_NEXT_SIZE)
+ break;
+ }
+ }
+
+ if (found_i != -1) {
+ --ft_pool_left(chains)[found_i];
+ return FT_POOLS[found_i];
+ }
+
+ return 0;
+}
+
+static void
+mlx5_chains_put_sz_to_pool(struct mlx5_fs_chains *chains, int sz)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) {
+ if (sz == FT_POOLS[i]) {
+ ++ft_pool_left(chains)[i];
+ return;
+ }
+ }
+
+ WARN_ONCE(1, "Couldn't find size %d in flow table size pool", sz);
+}
+
+static void
+mlx5_chains_init_sz_pool(struct mlx5_fs_chains *chains, u32 ft_max)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--)
+ ft_pool_left(chains)[i] =
+ FT_POOLS[i] <= ft_max ? FT_SIZE / FT_POOLS[i] : 0;
+}
+
+static struct mlx5_flow_table *
+mlx5_chains_create_table(struct mlx5_fs_chains *chains,
+ u32 chain, u32 prio, u32 level)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table *ft;
+ int sz;
+
+ if (chains->flags & MLX5_CHAINS_FT_TUNNEL_SUPPORTED)
+ ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
+ MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
+
+ sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ?
+ mlx5_chains_get_avail_sz_from_pool(chains, FT_TBL_SZ) :
+ mlx5_chains_get_avail_sz_from_pool(chains, POOL_NEXT_SIZE);
+ if (!sz)
+ return ERR_PTR(-ENOSPC);
+ ft_attr.max_fte = sz;
+
+ /* We use tc_default_ft(chains) as the table's next_ft till
+ * ignore_flow_level is allowed on FT creation and not just for FTEs.
+ * Instead caller should add an explicit miss rule if needed.
+ */
+ ft_attr.next_ft = tc_default_ft(chains);
+
+ /* The root table(chain 0, prio 1, level 0) is required to be
+ * connected to the previous fs_core managed prio.
+ * We always create it, as a managed table, in order to align with
+ * fs_core logic.
+ */
+ if (!mlx5_chains_ignore_flow_level_supported(chains) ||
+ (chain == 0 && prio == 1 && level == 0)) {
+ ft_attr.level = level;
+ ft_attr.prio = prio - 1;
+ ns = (chains->ns == MLX5_FLOW_NAMESPACE_FDB) ?
+ mlx5_get_fdb_sub_ns(chains->dev, chain) :
+ mlx5_get_flow_namespace(chains->dev, chains->ns);
+ } else {
+ ft_attr.flags |= MLX5_FLOW_TABLE_UNMANAGED;
+ ft_attr.prio = ns_to_chains_fs_prio(chains->ns);
+ /* Firmware doesn't allow us to create another level 0 table,
+ * so we create all unmanaged tables as level 1.
+ *
+ * To connect them, we use explicit miss rules with
+ * ignore_flow_level. Caller is responsible to create
+ * these rules (if needed).
+ */
+ ft_attr.level = 1;
+ ns = mlx5_get_flow_namespace(chains->dev, chains->ns);
+ }
+
+ ft_attr.autogroup.num_reserved_entries = 2;
+ ft_attr.autogroup.max_num_groups = chains->group_num;
+ ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ mlx5_core_warn(chains->dev, "Failed to create chains table err %d (chain: %d, prio: %d, level: %d, size: %d)\n",
+ (int)PTR_ERR(ft), chain, prio, level, sz);
+ mlx5_chains_put_sz_to_pool(chains, sz);
+ return ft;
+ }
+
+ return ft;
+}
+
+static void
+mlx5_chains_destroy_table(struct mlx5_fs_chains *chains,
+ struct mlx5_flow_table *ft)
+{
+ mlx5_chains_put_sz_to_pool(chains, ft->max_fte);
+ mlx5_destroy_flow_table(ft);
+}
+
+static int
+create_chain_restore(struct fs_chain *chain)
+{
+ struct mlx5_eswitch *esw = chain->chains->dev->priv.eswitch;
+ char modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)];
+ struct mlx5_fs_chains *chains = chain->chains;
+ enum mlx5e_tc_attr_to_reg chain_to_reg;
+ struct mlx5_modify_hdr *mod_hdr;
+ u32 index;
+ int err;
+
+ if (chain->chain == mlx5_chains_get_nf_ft_chain(chains) ||
+ !mlx5_chains_prios_supported(chains))
+ return 0;
+
+ err = mapping_add(chains_mapping(chains), &chain->chain, &index);
+ if (err)
+ return err;
+ if (index == MLX5_FS_DEFAULT_FLOW_TAG) {
+ /* we got the special default flow tag id, so we won't know
+ * if we actually marked the packet with the restore rule
+ * we create.
+ *
+ * This case isn't possible with MLX5_FS_DEFAULT_FLOW_TAG = 0.
+ */
+ err = mapping_add(chains_mapping(chains),
+ &chain->chain, &index);
+ mapping_remove(chains_mapping(chains),
+ MLX5_FS_DEFAULT_FLOW_TAG);
+ if (err)
+ return err;
+ }
+
+ chain->id = index;
+
+ if (chains->ns == MLX5_FLOW_NAMESPACE_FDB) {
+ chain_to_reg = CHAIN_TO_REG;
+ chain->restore_rule = esw_add_restore_rule(esw, chain->id);
+ if (IS_ERR(chain->restore_rule)) {
+ err = PTR_ERR(chain->restore_rule);
+ goto err_rule;
+ }
+ } else if (chains->ns == MLX5_FLOW_NAMESPACE_KERNEL) {
+ /* For NIC RX we don't need a restore rule
+ * since we write the metadata to reg_b
+ * that is passed to SW directly.
+ */
+ chain_to_reg = NIC_CHAIN_TO_REG;
+ } else {
+ err = -EINVAL;
+ goto err_rule;
+ }
+
+ MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, modact, field,
+ mlx5e_tc_attr_to_reg_mappings[chain_to_reg].mfield);
+ MLX5_SET(set_action_in, modact, offset,
+ mlx5e_tc_attr_to_reg_mappings[chain_to_reg].moffset * 8);
+ MLX5_SET(set_action_in, modact, length,
+ mlx5e_tc_attr_to_reg_mappings[chain_to_reg].mlen * 8);
+ MLX5_SET(set_action_in, modact, data, chain->id);
+ mod_hdr = mlx5_modify_header_alloc(chains->dev, chains->ns,
+ 1, modact);
+ if (IS_ERR(mod_hdr)) {
+ err = PTR_ERR(mod_hdr);
+ goto err_mod_hdr;
+ }
+ chain->miss_modify_hdr = mod_hdr;
+
+ return 0;
+
+err_mod_hdr:
+ if (!IS_ERR_OR_NULL(chain->restore_rule))
+ mlx5_del_flow_rules(chain->restore_rule);
+err_rule:
+ /* Datapath can't find this mapping, so we can safely remove it */
+ mapping_remove(chains_mapping(chains), chain->id);
+ return err;
+}
+
+static void destroy_chain_restore(struct fs_chain *chain)
+{
+ struct mlx5_fs_chains *chains = chain->chains;
+
+ if (!chain->miss_modify_hdr)
+ return;
+
+ if (chain->restore_rule)
+ mlx5_del_flow_rules(chain->restore_rule);
+
+ mlx5_modify_header_dealloc(chains->dev, chain->miss_modify_hdr);
+ mapping_remove(chains_mapping(chains), chain->id);
+}
+
+static struct fs_chain *
+mlx5_chains_create_chain(struct mlx5_fs_chains *chains, u32 chain)
+{
+ struct fs_chain *chain_s = NULL;
+ int err;
+
+ chain_s = kvzalloc(sizeof(*chain_s), GFP_KERNEL);
+ if (!chain_s)
+ return ERR_PTR(-ENOMEM);
+
+ chain_s->chains = chains;
+ chain_s->chain = chain;
+ INIT_LIST_HEAD(&chain_s->prios_list);
+
+ err = create_chain_restore(chain_s);
+ if (err)
+ goto err_restore;
+
+ err = rhashtable_insert_fast(&chains_ht(chains), &chain_s->node,
+ chain_params);
+ if (err)
+ goto err_insert;
+
+ return chain_s;
+
+err_insert:
+ destroy_chain_restore(chain_s);
+err_restore:
+ kvfree(chain_s);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_chains_destroy_chain(struct fs_chain *chain)
+{
+ struct mlx5_fs_chains *chains = chain->chains;
+
+ rhashtable_remove_fast(&chains_ht(chains), &chain->node,
+ chain_params);
+
+ destroy_chain_restore(chain);
+ kvfree(chain);
+}
+
+static struct fs_chain *
+mlx5_chains_get_chain(struct mlx5_fs_chains *chains, u32 chain)
+{
+ struct fs_chain *chain_s;
+
+ chain_s = rhashtable_lookup_fast(&chains_ht(chains), &chain,
+ chain_params);
+ if (!chain_s) {
+ chain_s = mlx5_chains_create_chain(chains, chain);
+ if (IS_ERR(chain_s))
+ return chain_s;
+ }
+
+ chain_s->ref++;
+
+ return chain_s;
+}
+
+static struct mlx5_flow_handle *
+mlx5_chains_add_miss_rule(struct fs_chain *chain,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_table *next_ft)
+{
+ struct mlx5_fs_chains *chains = chain->chains;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act act = {};
+
+ act.flags = FLOW_ACT_NO_APPEND;
+ if (mlx5_chains_ignore_flow_level_supported(chain->chains))
+ act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+
+ act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = next_ft;
+
+ if (next_ft == tc_end_ft(chains) &&
+ chain->chain != mlx5_chains_get_nf_ft_chain(chains) &&
+ mlx5_chains_prios_supported(chains)) {
+ act.modify_hdr = chain->miss_modify_hdr;
+ act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ }
+
+ return mlx5_add_flow_rules(ft, NULL, &act, &dest, 1);
+}
+
+static int
+mlx5_chains_update_prio_prevs(struct prio *prio,
+ struct mlx5_flow_table *next_ft)
+{
+ struct mlx5_flow_handle *miss_rules[FDB_TC_LEVELS_PER_PRIO + 1] = {};
+ struct fs_chain *chain = prio->chain;
+ struct prio *pos;
+ int n = 0, err;
+
+ if (prio->key.level)
+ return 0;
+
+ /* Iterate in reverse order until reaching the level 0 rule of
+ * the previous priority, adding all the miss rules first, so we can
+ * revert them if any of them fails.
+ */
+ pos = prio;
+ list_for_each_entry_continue_reverse(pos,
+ &chain->prios_list,
+ list) {
+ miss_rules[n] = mlx5_chains_add_miss_rule(chain,
+ pos->ft,
+ next_ft);
+ if (IS_ERR(miss_rules[n])) {
+ err = PTR_ERR(miss_rules[n]);
+ goto err_prev_rule;
+ }
+
+ n++;
+ if (!pos->key.level)
+ break;
+ }
+
+ /* Success, delete old miss rules, and update the pointers. */
+ n = 0;
+ pos = prio;
+ list_for_each_entry_continue_reverse(pos,
+ &chain->prios_list,
+ list) {
+ mlx5_del_flow_rules(pos->miss_rule);
+
+ pos->miss_rule = miss_rules[n];
+ pos->next_ft = next_ft;
+
+ n++;
+ if (!pos->key.level)
+ break;
+ }
+
+ return 0;
+
+err_prev_rule:
+ while (--n >= 0)
+ mlx5_del_flow_rules(miss_rules[n]);
+
+ return err;
+}
+
+static void
+mlx5_chains_put_chain(struct fs_chain *chain)
+{
+ if (--chain->ref == 0)
+ mlx5_chains_destroy_chain(chain);
+}
+
+static struct prio *
+mlx5_chains_create_prio(struct mlx5_fs_chains *chains,
+ u32 chain, u32 prio, u32 level)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_handle *miss_rule = NULL;
+ struct mlx5_flow_group *miss_group;
+ struct mlx5_flow_table *next_ft;
+ struct mlx5_flow_table *ft;
+ struct prio *prio_s = NULL;
+ struct fs_chain *chain_s;
+ struct list_head *pos;
+ u32 *flow_group_in;
+ int err;
+
+ chain_s = mlx5_chains_get_chain(chains, chain);
+ if (IS_ERR(chain_s))
+ return ERR_CAST(chain_s);
+
+ prio_s = kvzalloc(sizeof(*prio_s), GFP_KERNEL);
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!prio_s || !flow_group_in) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ /* Chain's prio list is sorted by prio and level.
+ * And all levels of some prio point to the next prio's level 0.
+ * Example list (prio, level):
+ * (3,0)->(3,1)->(5,0)->(5,1)->(6,1)->(7,0)
+ * In hardware, we will we have the following pointers:
+ * (3,0) -> (5,0) -> (7,0) -> Slow path
+ * (3,1) -> (5,0)
+ * (5,1) -> (7,0)
+ * (6,1) -> (7,0)
+ */
+
+ /* Default miss for each chain: */
+ next_ft = (chain == mlx5_chains_get_nf_ft_chain(chains)) ?
+ tc_default_ft(chains) :
+ tc_end_ft(chains);
+ list_for_each(pos, &chain_s->prios_list) {
+ struct prio *p = list_entry(pos, struct prio, list);
+
+ /* exit on first pos that is larger */
+ if (prio < p->key.prio || (prio == p->key.prio &&
+ level < p->key.level)) {
+ /* Get next level 0 table */
+ next_ft = p->key.level == 0 ? p->ft : p->next_ft;
+ break;
+ }
+ }
+
+ ft = mlx5_chains_create_table(chains, chain, prio, level);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto err_create;
+ }
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
+ ft->max_fte - 2);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
+ ft->max_fte - 1);
+ miss_group = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(miss_group)) {
+ err = PTR_ERR(miss_group);
+ goto err_group;
+ }
+
+ /* Add miss rule to next_ft */
+ miss_rule = mlx5_chains_add_miss_rule(chain_s, ft, next_ft);
+ if (IS_ERR(miss_rule)) {
+ err = PTR_ERR(miss_rule);
+ goto err_miss_rule;
+ }
+
+ prio_s->miss_group = miss_group;
+ prio_s->miss_rule = miss_rule;
+ prio_s->next_ft = next_ft;
+ prio_s->chain = chain_s;
+ prio_s->key.chain = chain;
+ prio_s->key.prio = prio;
+ prio_s->key.level = level;
+ prio_s->ft = ft;
+
+ err = rhashtable_insert_fast(&prios_ht(chains), &prio_s->node,
+ prio_params);
+ if (err)
+ goto err_insert;
+
+ list_add(&prio_s->list, pos->prev);
+
+ /* Table is ready, connect it */
+ err = mlx5_chains_update_prio_prevs(prio_s, ft);
+ if (err)
+ goto err_update;
+
+ kvfree(flow_group_in);
+ return prio_s;
+
+err_update:
+ list_del(&prio_s->list);
+ rhashtable_remove_fast(&prios_ht(chains), &prio_s->node,
+ prio_params);
+err_insert:
+ mlx5_del_flow_rules(miss_rule);
+err_miss_rule:
+ mlx5_destroy_flow_group(miss_group);
+err_group:
+ mlx5_chains_destroy_table(chains, ft);
+err_create:
+err_alloc:
+ kvfree(prio_s);
+ kvfree(flow_group_in);
+ mlx5_chains_put_chain(chain_s);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_chains_destroy_prio(struct mlx5_fs_chains *chains,
+ struct prio *prio)
+{
+ struct fs_chain *chain = prio->chain;
+
+ WARN_ON(mlx5_chains_update_prio_prevs(prio,
+ prio->next_ft));
+
+ list_del(&prio->list);
+ rhashtable_remove_fast(&prios_ht(chains), &prio->node,
+ prio_params);
+ mlx5_del_flow_rules(prio->miss_rule);
+ mlx5_destroy_flow_group(prio->miss_group);
+ mlx5_chains_destroy_table(chains, prio->ft);
+ mlx5_chains_put_chain(chain);
+ kvfree(prio);
+}
+
+struct mlx5_flow_table *
+mlx5_chains_get_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
+ u32 level)
+{
+ struct mlx5_flow_table *prev_fts;
+ struct prio *prio_s;
+ struct prio_key key;
+ int l = 0;
+
+ if ((chain > mlx5_chains_get_chain_range(chains) &&
+ chain != mlx5_chains_get_nf_ft_chain(chains)) ||
+ prio > mlx5_chains_get_prio_range(chains) ||
+ level > mlx5_chains_get_level_range(chains))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ /* create earlier levels for correct fs_core lookup when
+ * connecting tables.
+ */
+ for (l = 0; l < level; l++) {
+ prev_fts = mlx5_chains_get_table(chains, chain, prio, l);
+ if (IS_ERR(prev_fts)) {
+ prio_s = ERR_CAST(prev_fts);
+ goto err_get_prevs;
+ }
+ }
+
+ key.chain = chain;
+ key.prio = prio;
+ key.level = level;
+
+ mutex_lock(&chains_lock(chains));
+ prio_s = rhashtable_lookup_fast(&prios_ht(chains), &key,
+ prio_params);
+ if (!prio_s) {
+ prio_s = mlx5_chains_create_prio(chains, chain,
+ prio, level);
+ if (IS_ERR(prio_s))
+ goto err_create_prio;
+ }
+
+ ++prio_s->ref;
+ mutex_unlock(&chains_lock(chains));
+
+ return prio_s->ft;
+
+err_create_prio:
+ mutex_unlock(&chains_lock(chains));
+err_get_prevs:
+ while (--l >= 0)
+ mlx5_chains_put_table(chains, chain, prio, l);
+ return ERR_CAST(prio_s);
+}
+
+void
+mlx5_chains_put_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
+ u32 level)
+{
+ struct prio *prio_s;
+ struct prio_key key;
+
+ key.chain = chain;
+ key.prio = prio;
+ key.level = level;
+
+ mutex_lock(&chains_lock(chains));
+ prio_s = rhashtable_lookup_fast(&prios_ht(chains), &key,
+ prio_params);
+ if (!prio_s)
+ goto err_get_prio;
+
+ if (--prio_s->ref == 0)
+ mlx5_chains_destroy_prio(chains, prio_s);
+ mutex_unlock(&chains_lock(chains));
+
+ while (level-- > 0)
+ mlx5_chains_put_table(chains, chain, prio, level);
+
+ return;
+
+err_get_prio:
+ mutex_unlock(&chains_lock(chains));
+ WARN_ONCE(1,
+ "Couldn't find table: (chain: %d prio: %d level: %d)",
+ chain, prio, level);
+}
+
+struct mlx5_flow_table *
+mlx5_chains_get_tc_end_ft(struct mlx5_fs_chains *chains)
+{
+ return tc_end_ft(chains);
+}
+
+struct mlx5_flow_table *
+mlx5_chains_create_global_table(struct mlx5_fs_chains *chains)
+{
+ u32 chain, prio, level;
+ int err;
+
+ if (!mlx5_chains_ignore_flow_level_supported(chains)) {
+ err = -EOPNOTSUPP;
+
+ mlx5_core_warn(chains->dev,
+ "Couldn't create global flow table, ignore_flow_level not supported.");
+ goto err_ignore;
+ }
+
+ chain = mlx5_chains_get_chain_range(chains),
+ prio = mlx5_chains_get_prio_range(chains);
+ level = mlx5_chains_get_level_range(chains);
+
+ return mlx5_chains_create_table(chains, chain, prio, level);
+
+err_ignore:
+ return ERR_PTR(err);
+}
+
+void
+mlx5_chains_destroy_global_table(struct mlx5_fs_chains *chains,
+ struct mlx5_flow_table *ft)
+{
+ mlx5_chains_destroy_table(chains, ft);
+}
+
+static struct mlx5_fs_chains *
+mlx5_chains_init(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
+{
+ struct mlx5_fs_chains *chains_priv;
+ struct mapping_ctx *mapping;
+ u32 max_flow_counter;
+ int err;
+
+ chains_priv = kzalloc(sizeof(*chains_priv), GFP_KERNEL);
+ if (!chains_priv)
+ return ERR_PTR(-ENOMEM);
+
+ max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
+ MLX5_CAP_GEN(dev, max_flow_counter_15_0);
+
+ mlx5_core_dbg(dev,
+ "Init flow table chains, max counters(%d), groups(%d), max flow table size(%d)\n",
+ max_flow_counter, attr->max_grp_num, attr->max_ft_sz);
+
+ chains_priv->dev = dev;
+ chains_priv->flags = attr->flags;
+ chains_priv->ns = attr->ns;
+ chains_priv->group_num = attr->max_grp_num;
+ tc_default_ft(chains_priv) = tc_end_ft(chains_priv) = attr->default_ft;
+
+ mlx5_core_info(dev, "Supported tc offload range - chains: %u, prios: %u\n",
+ mlx5_chains_get_chain_range(chains_priv),
+ mlx5_chains_get_prio_range(chains_priv));
+
+ mlx5_chains_init_sz_pool(chains_priv, attr->max_ft_sz);
+
+ err = rhashtable_init(&chains_ht(chains_priv), &chain_params);
+ if (err)
+ goto init_chains_ht_err;
+
+ err = rhashtable_init(&prios_ht(chains_priv), &prio_params);
+ if (err)
+ goto init_prios_ht_err;
+
+ mapping = mapping_create(sizeof(u32), attr->max_restore_tag,
+ true);
+ if (IS_ERR(mapping)) {
+ err = PTR_ERR(mapping);
+ goto mapping_err;
+ }
+ chains_mapping(chains_priv) = mapping;
+
+ mutex_init(&chains_lock(chains_priv));
+
+ return chains_priv;
+
+mapping_err:
+ rhashtable_destroy(&prios_ht(chains_priv));
+init_prios_ht_err:
+ rhashtable_destroy(&chains_ht(chains_priv));
+init_chains_ht_err:
+ kfree(chains_priv);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_chains_cleanup(struct mlx5_fs_chains *chains)
+{
+ mutex_destroy(&chains_lock(chains));
+ mapping_destroy(chains_mapping(chains));
+ rhashtable_destroy(&prios_ht(chains));
+ rhashtable_destroy(&chains_ht(chains));
+
+ kfree(chains);
+}
+
+struct mlx5_fs_chains *
+mlx5_chains_create(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
+{
+ struct mlx5_fs_chains *chains;
+
+ chains = mlx5_chains_init(dev, attr);
+
+ return chains;
+}
+
+void
+mlx5_chains_destroy(struct mlx5_fs_chains *chains)
+{
+ mlx5_chains_cleanup(chains);
+}
+
+int
+mlx5_chains_get_chain_mapping(struct mlx5_fs_chains *chains, u32 chain,
+ u32 *chain_mapping)
+{
+ return mapping_add(chains_mapping(chains), &chain, chain_mapping);
+}
+
+int
+mlx5_chains_put_chain_mapping(struct mlx5_fs_chains *chains, u32 chain_mapping)
+{
+ return mapping_remove(chains_mapping(chains), chain_mapping);
+}
+
+int mlx5_get_chain_for_tag(struct mlx5_fs_chains *chains, u32 tag,
+ u32 *chain)
+{
+ int err;
+
+ err = mapping_find(chains_mapping(chains), tag, chain);
+ if (err) {
+ mlx5_core_warn(chains->dev, "Can't find chain for tag: %d\n", tag);
+ return -ENOENT;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h
new file mode 100644
index 000000000000..6d5be31b05dd
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies. */
+
+#ifndef __ML5_ESW_CHAINS_H__
+#define __ML5_ESW_CHAINS_H__
+
+#include <linux/mlx5/fs.h>
+
+struct mlx5_fs_chains;
+
+enum mlx5_chains_flags {
+ MLX5_CHAINS_AND_PRIOS_SUPPORTED = BIT(0),
+ MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED = BIT(1),
+ MLX5_CHAINS_FT_TUNNEL_SUPPORTED = BIT(2),
+};
+
+struct mlx5_chains_attr {
+ enum mlx5_flow_namespace_type ns;
+ u32 flags;
+ u32 max_ft_sz;
+ u32 max_grp_num;
+ struct mlx5_flow_table *default_ft;
+ u32 max_restore_tag;
+};
+
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+
+bool
+mlx5_chains_prios_supported(struct mlx5_fs_chains *chains);
+bool
+mlx5_chains_backwards_supported(struct mlx5_fs_chains *chains);
+u32
+mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains);
+u32
+mlx5_chains_get_chain_range(struct mlx5_fs_chains *chains);
+u32
+mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains);
+
+struct mlx5_flow_table *
+mlx5_chains_get_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
+ u32 level);
+void
+mlx5_chains_put_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
+ u32 level);
+
+struct mlx5_flow_table *
+mlx5_chains_get_tc_end_ft(struct mlx5_fs_chains *chains);
+
+struct mlx5_flow_table *
+mlx5_chains_create_global_table(struct mlx5_fs_chains *chains);
+void
+mlx5_chains_destroy_global_table(struct mlx5_fs_chains *chains,
+ struct mlx5_flow_table *ft);
+
+int
+mlx5_chains_get_chain_mapping(struct mlx5_fs_chains *chains, u32 chain,
+ u32 *chain_mapping);
+int
+mlx5_chains_put_chain_mapping(struct mlx5_fs_chains *chains,
+ u32 chain_mapping);
+
+struct mlx5_fs_chains *
+mlx5_chains_create(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr);
+void mlx5_chains_destroy(struct mlx5_fs_chains *chains);
+
+int
+mlx5_get_chain_for_tag(struct mlx5_fs_chains *chains, u32 tag, u32 *chain);
+
+void
+mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains,
+ struct mlx5_flow_table *ft);
+
+#else /* CONFIG_MLX5_CLS_ACT */
+
+static inline struct mlx5_flow_table *
+mlx5_chains_get_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
+ u32 level) { return ERR_PTR(-EOPNOTSUPP); }
+static inline void
+mlx5_chains_put_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
+ u32 level) {};
+
+static inline struct mlx5_flow_table *
+mlx5_chains_get_tc_end_ft(struct mlx5_fs_chains *chains) { return ERR_PTR(-EOPNOTSUPP); }
+
+static inline struct mlx5_fs_chains *
+mlx5_chains_create(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
+{ return NULL; }
+static inline void
+mlx5_chains_destroy(struct mlx5_fs_chains *chains) {};
+
+#endif /* CONFIG_MLX5_CLS_ACT */
+
+#endif /* __ML5_ESW_CHAINS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index ce43e3feccd9..8ff207aa1479 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -57,6 +57,7 @@
#include "lib/mpfs.h"
#include "eswitch.h"
#include "devlink.h"
+#include "fw_reset.h"
#include "lib/mlx5.h"
#include "fpga/core.h"
#include "fpga/ipsec.h"
@@ -548,6 +549,9 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
if (MLX5_CAP_GEN_MAX(dev, dct))
MLX5_SET(cmd_hca_cap, set_hca_cap, dct, 1);
+ if (MLX5_CAP_GEN_MAX(dev, pci_sync_for_fw_update_event))
+ MLX5_SET(cmd_hca_cap, set_hca_cap, pci_sync_for_fw_update_event, 1);
+
if (MLX5_CAP_GEN_MAX(dev, num_vhca_ports))
MLX5_SET(cmd_hca_cap,
set_hca_cap,
@@ -739,7 +743,7 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
pci_set_drvdata(dev->pdev, dev);
dev->bar_addr = pci_resource_start(pdev, 0);
- priv->numa_node = dev_to_node(&dev->pdev->dev);
+ priv->numa_node = dev_to_node(mlx5_core_dma_dev(dev));
err = mlx5_pci_enable_device(dev);
if (err) {
@@ -832,6 +836,12 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
goto err_eq_cleanup;
}
+ err = mlx5_fw_reset_init(dev);
+ if (err) {
+ mlx5_core_err(dev, "failed to initialize fw reset events\n");
+ goto err_events_cleanup;
+ }
+
mlx5_cq_debugfs_init(dev);
mlx5_init_reserved_gids(dev);
@@ -893,6 +903,8 @@ err_tables_cleanup:
mlx5_geneve_destroy(dev->geneve);
mlx5_vxlan_destroy(dev->vxlan);
mlx5_cq_debugfs_cleanup(dev);
+ mlx5_fw_reset_cleanup(dev);
+err_events_cleanup:
mlx5_events_cleanup(dev);
err_eq_cleanup:
mlx5_eq_table_cleanup(dev);
@@ -920,6 +932,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_cleanup_clock(dev);
mlx5_cleanup_reserved_gids(dev);
mlx5_cq_debugfs_cleanup(dev);
+ mlx5_fw_reset_cleanup(dev);
mlx5_events_cleanup(dev);
mlx5_eq_table_cleanup(dev);
mlx5_irq_table_cleanup(dev);
@@ -1078,6 +1091,7 @@ static int mlx5_load(struct mlx5_core_dev *dev)
goto err_fw_tracer;
}
+ mlx5_fw_reset_events_start(dev);
mlx5_hv_vhca_init(dev->hv_vhca);
err = mlx5_rsc_dump_init(dev);
@@ -1139,6 +1153,7 @@ err_fpga_start:
mlx5_rsc_dump_cleanup(dev);
err_rsc_dump:
mlx5_hv_vhca_cleanup(dev->hv_vhca);
+ mlx5_fw_reset_events_stop(dev);
mlx5_fw_tracer_cleanup(dev->tracer);
err_fw_tracer:
mlx5_eq_table_destroy(dev);
@@ -1161,6 +1176,7 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
mlx5_fpga_device_stop(dev);
mlx5_rsc_dump_cleanup(dev);
mlx5_hv_vhca_cleanup(dev->hv_vhca);
+ mlx5_fw_reset_events_stop(dev);
mlx5_fw_tracer_cleanup(dev->tracer);
mlx5_eq_table_destroy(dev);
mlx5_irq_table_destroy(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index fc1649dac11b..8cec85ab419d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -100,6 +100,11 @@ do { \
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
+static inline struct device *mlx5_core_dma_dev(struct mlx5_core_dev *dev)
+{
+ return &dev->pdev->dev;
+}
+
enum {
MLX5_CMD_DATA, /* print command payload only */
MLX5_CMD_TIME, /* print command execution time */
@@ -123,6 +128,8 @@ int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev);
void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force);
void mlx5_error_sw_reset(struct mlx5_core_dev *dev);
+u32 mlx5_health_check_fatal_sensors(struct mlx5_core_dev *dev);
+int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev);
void mlx5_disable_device(struct mlx5_core_dev *dev);
void mlx5_recover_device(struct mlx5_core_dev *dev);
int mlx5_sriov_init(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index c0e18f2ade99..150638814517 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -238,7 +238,7 @@ static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
rb_erase(&fwp->rb_node, root);
if (in_free_list)
list_del(&fwp->list);
- dma_unmap_page(dev->device, fwp->addr & MLX5_U64_4K_PAGE_MASK,
+ dma_unmap_page(mlx5_core_dma_dev(dev), fwp->addr & MLX5_U64_4K_PAGE_MASK,
PAGE_SIZE, DMA_BIDIRECTIONAL);
__free_page(fwp->page);
kfree(fwp);
@@ -265,7 +265,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 func_id)
static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
{
- struct device *device = dev->device;
+ struct device *device = mlx5_core_dma_dev(dev);
int nid = dev_to_node(device);
struct page *page;
u64 zero_addr = 1;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
index c63f727273d8..7df883686d46 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
@@ -203,7 +203,6 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_match_param mask = {};
- struct mlx5dr_match_misc3 *misc3;
struct mlx5dr_ste_build *sb;
bool inner, rx;
int idx = 0;
@@ -252,18 +251,14 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (dr_mask_is_gvmi_or_qpn_set(&mask.misc) &&
(dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX)) {
- ret = mlx5dr_ste_build_src_gvmi_qpn(&sb[idx++], &mask,
- dmn, inner, rx);
- if (ret)
- return ret;
+ mlx5dr_ste_build_src_gvmi_qpn(&sb[idx++], &mask,
+ dmn, inner, rx);
}
if (dr_mask_is_smac_set(&mask.outer) &&
dr_mask_is_dmac_set(&mask.outer)) {
- ret = mlx5dr_ste_build_eth_l2_src_des(&sb[idx++], &mask,
- inner, rx);
- if (ret)
- return ret;
+ mlx5dr_ste_build_eth_l2_src_des(&sb[idx++], &mask,
+ inner, rx);
}
if (dr_mask_is_smac_set(&mask.outer))
@@ -313,8 +308,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
mlx5dr_ste_build_flex_parser_0(&sb[idx++], &mask,
inner, rx);
- misc3 = &mask.misc3;
- if ((DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(misc3) &&
+ if ((DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(&mask.misc3) &&
mlx5dr_matcher_supp_flex_parser_icmp_v4(&dmn->info.caps)) ||
(dr_mask_is_flex_parser_icmpv6_set(&mask.misc3) &&
mlx5dr_matcher_supp_flex_parser_icmp_v6(&dmn->info.caps))) {
@@ -340,10 +334,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (dr_mask_is_smac_set(&mask.inner) &&
dr_mask_is_dmac_set(&mask.inner)) {
- ret = mlx5dr_ste_build_eth_l2_src_des(&sb[idx++],
- &mask, inner, rx);
- if (ret)
- return ret;
+ mlx5dr_ste_build_eth_l2_src_des(&sb[idx++],
+ &mask, inner, rx);
}
if (dr_mask_is_smac_set(&mask.inner))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index 6ec5106bc472..b3c9dc032026 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -242,7 +242,7 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl);
new_ste = &new_htbl->ste_arr[new_idx];
- if (mlx5dr_ste_not_used_ste(new_ste)) {
+ if (mlx5dr_ste_is_not_used(new_ste)) {
mlx5dr_htbl_get(new_htbl);
list_add_tail(&new_ste->miss_list_node,
mlx5dr_ste_get_miss_list(new_ste));
@@ -335,7 +335,7 @@ static int dr_rule_rehash_copy_htbl(struct mlx5dr_matcher *matcher,
for (i = 0; i < cur_entries; i++) {
cur_ste = &cur_htbl->ste_arr[i];
- if (mlx5dr_ste_not_used_ste(cur_ste)) /* Empty, nothing to copy */
+ if (mlx5dr_ste_is_not_used(cur_ste)) /* Empty, nothing to copy */
continue;
err = dr_rule_rehash_copy_miss_list(matcher,
@@ -791,7 +791,7 @@ again:
miss_list = &cur_htbl->chunk->miss_list[index];
ste = &cur_htbl->ste_arr[index];
- if (mlx5dr_ste_not_used_ste(ste)) {
+ if (mlx5dr_ste_is_not_used(ste)) {
if (dr_rule_handle_empty_entry(matcher, nic_matcher, cur_htbl,
ste, ste_location,
hw_ste, miss_list,
@@ -985,31 +985,28 @@ static enum mlx5dr_ipv dr_rule_get_ipv(struct mlx5dr_match_spec *spec)
static bool dr_rule_skip(enum mlx5dr_domain_type domain,
enum mlx5dr_ste_entry_type ste_type,
struct mlx5dr_match_param *mask,
- struct mlx5dr_match_param *value)
+ struct mlx5dr_match_param *value,
+ u32 flow_source)
{
+ bool rx = ste_type == MLX5DR_STE_TYPE_RX;
+
if (domain != MLX5DR_DOMAIN_TYPE_FDB)
return false;
if (mask->misc.source_port) {
- if (ste_type == MLX5DR_STE_TYPE_RX)
- if (value->misc.source_port != WIRE_PORT)
- return true;
+ if (rx && value->misc.source_port != WIRE_PORT)
+ return true;
- if (ste_type == MLX5DR_STE_TYPE_TX)
- if (value->misc.source_port == WIRE_PORT)
- return true;
+ if (!rx && value->misc.source_port == WIRE_PORT)
+ return true;
}
- /* Metadata C can be used to describe the source vport */
- if (mask->misc2.metadata_reg_c_0) {
- if (ste_type == MLX5DR_STE_TYPE_RX)
- if ((value->misc2.metadata_reg_c_0 & WIRE_PORT) != WIRE_PORT)
- return true;
+ if (rx && flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT)
+ return true;
+
+ if (!rx && flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK)
+ return true;
- if (ste_type == MLX5DR_STE_TYPE_TX)
- if ((value->misc2.metadata_reg_c_0 & WIRE_PORT) == WIRE_PORT)
- return true;
- }
return false;
}
@@ -1038,7 +1035,8 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
INIT_LIST_HEAD(&nic_rule->rule_members_list);
- if (dr_rule_skip(dmn->type, nic_dmn->ste_type, &matcher->mask, param))
+ if (dr_rule_skip(dmn->type, nic_dmn->ste_type, &matcher->mask, param,
+ rule->flow_source))
return 0;
hw_ste_arr = kzalloc(DR_RULE_MAX_STE_CHAIN * DR_STE_SIZE, GFP_KERNEL);
@@ -1173,7 +1171,8 @@ static struct mlx5dr_rule *
dr_rule_create_rule(struct mlx5dr_matcher *matcher,
struct mlx5dr_match_parameters *value,
size_t num_actions,
- struct mlx5dr_action *actions[])
+ struct mlx5dr_action *actions[],
+ u32 flow_source)
{
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_match_param param = {};
@@ -1188,6 +1187,7 @@ dr_rule_create_rule(struct mlx5dr_matcher *matcher,
return NULL;
rule->matcher = matcher;
+ rule->flow_source = flow_source;
INIT_LIST_HEAD(&rule->rule_actions_list);
ret = dr_rule_add_action_members(rule, num_actions, actions);
@@ -1232,13 +1232,14 @@ free_rule:
struct mlx5dr_rule *mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
struct mlx5dr_match_parameters *value,
size_t num_actions,
- struct mlx5dr_action *actions[])
+ struct mlx5dr_action *actions[],
+ u32 flow_source)
{
struct mlx5dr_rule *rule;
refcount_inc(&matcher->refcount);
- rule = dr_rule_create_rule(matcher, value, num_actions, actions);
+ rule = dr_rule_create_rule(matcher, value, num_actions, actions, flow_source);
if (!rule)
refcount_dec(&matcher->refcount);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index 2ca79b9bde1f..24dede1b0a20 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -466,10 +466,10 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
* need to add the bit_mask
*/
for (j = 0; j < num_stes_per_iter; j++) {
- u8 *hw_ste = htbl->ste_arr[ste_index + j].hw_ste;
+ struct mlx5dr_ste *ste = &htbl->ste_arr[ste_index + j];
u32 ste_off = j * DR_STE_SIZE;
- if (mlx5dr_ste_is_not_valid_entry(hw_ste)) {
+ if (mlx5dr_ste_is_not_used(ste)) {
memcpy(data + ste_off,
formatted_ste, DR_STE_SIZE);
} else {
@@ -831,7 +831,7 @@ static struct mlx5dr_mr *dr_reg_mr(struct mlx5_core_dev *mdev,
if (!mr)
return NULL;
- dma_device = &mdev->pdev->dev;
+ dma_device = mlx5_core_dma_dev(mdev);
dma_addr = dma_map_single(dma_device, buf, size,
DMA_BIDIRECTIONAL);
err = dma_mapping_error(dma_device, dma_addr);
@@ -860,7 +860,7 @@ static struct mlx5dr_mr *dr_reg_mr(struct mlx5_core_dev *mdev,
static void dr_dereg_mr(struct mlx5_core_dev *mdev, struct mlx5dr_mr *mr)
{
mlx5_core_destroy_mkey(mdev, &mr->mkey);
- dma_unmap_single(&mdev->pdev->dev, mr->dma_addr, mr->size,
+ dma_unmap_single(mlx5_core_dma_dev(mdev), mr->dma_addr, mr->size,
DMA_BIDIRECTIONAL);
kfree(mr);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index 00c2f598f034..b01aaec75622 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -155,6 +155,13 @@ static u16 dr_ste_conv_bit_to_byte_mask(u8 *bit_mask)
return byte_mask;
}
+static u8 *mlx5dr_ste_get_tag(u8 *hw_ste_p)
+{
+ struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+
+ return hw_ste->tag;
+}
+
void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask)
{
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
@@ -549,25 +556,6 @@ void mlx5dr_ste_always_miss_addr(struct mlx5dr_ste *ste, u64 miss_addr)
dr_ste_set_always_miss((struct dr_hw_ste_format *)ste->hw_ste);
}
-/* The assumption here is that we don't update the ste->hw_ste if it is not
- * used ste, so it will be all zero, checking the next_lu_type.
- */
-bool mlx5dr_ste_is_not_valid_entry(u8 *p_hw_ste)
-{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)p_hw_ste;
-
- if (MLX5_GET(ste_general, hw_ste, next_lu_type) ==
- MLX5DR_STE_LU_TYPE_NOP)
- return true;
-
- return false;
-}
-
-bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste)
-{
- return !ste->refcount;
-}
-
/* Init one ste as a pattern for ste data array */
void mlx5dr_ste_set_formatted_ste(u16 gvmi,
struct mlx5dr_domain_rx_tx *nic_dmn,
@@ -728,7 +716,14 @@ int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
{
if (!value && (match_criteria & DR_MATCHER_CRITERIA_MISC)) {
if (mask->misc.source_port && mask->misc.source_port != 0xffff) {
- mlx5dr_err(dmn, "Partial mask source_port is not supported\n");
+ mlx5dr_err(dmn,
+ "Partial mask source_port is not supported\n");
+ return -EINVAL;
+ }
+ if (mask->misc.source_eswitch_owner_vhca_id &&
+ mask->misc.source_eswitch_owner_vhca_id != 0xffff) {
+ mlx5dr_err(dmn,
+ "Partial mask source_eswitch_owner_vhca_id is not supported\n");
return -EINVAL;
}
}
@@ -760,7 +755,7 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
mlx5dr_ste_set_bit_mask(ste_arr, sb->bit_mask);
- ret = sb->ste_build_tag_func(value, sb, ste_arr);
+ ret = sb->ste_build_tag_func(value, sb, mlx5dr_ste_get_tag(ste_arr));
if (ret)
return ret;
@@ -778,8 +773,8 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
return 0;
}
-static int dr_ste_build_eth_l2_src_des_bit_mask(struct mlx5dr_match_param *value,
- bool inner, u8 *bit_mask)
+static void dr_ste_build_eth_l2_src_des_bit_mask(struct mlx5dr_match_param *value,
+ bool inner, u8 *bit_mask)
{
struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
@@ -807,13 +802,6 @@ static int dr_ste_build_eth_l2_src_des_bit_mask(struct mlx5dr_match_param *value
MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
mask->svlan_tag = 0;
}
-
- if (mask->cvlan_tag || mask->svlan_tag) {
- pr_info("Invalid c/svlan mask configuration\n");
- return -EINVAL;
- }
-
- return 0;
}
static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec)
@@ -1059,11 +1047,9 @@ void mlx5dr_ste_copy_param(u8 match_criteria,
static int dr_ste_build_eth_l2_src_des_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
- u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_47_16, spec, dmac_47_16);
DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_15_0, spec, dmac_15_0);
@@ -1104,23 +1090,17 @@ static int dr_ste_build_eth_l2_src_des_tag(struct mlx5dr_match_param *value,
return 0;
}
-int mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- bool inner, bool rx)
+void mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx)
{
- int ret;
-
- ret = dr_ste_build_eth_l2_src_des_bit_mask(mask, inner, sb->bit_mask);
- if (ret)
- return ret;
+ dr_ste_build_eth_l2_src_des_bit_mask(mask, inner, sb->bit_mask);
sb->rx = rx;
sb->inner = inner;
sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC_DST, rx, inner);
sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_build_eth_l2_src_des_tag;
-
- return 0;
}
static void dr_ste_build_eth_l3_ipv6_dst_bit_mask(struct mlx5dr_match_param *value,
@@ -1136,11 +1116,9 @@ static void dr_ste_build_eth_l3_ipv6_dst_bit_mask(struct mlx5dr_match_param *val
static int dr_ste_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
- u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
@@ -1176,11 +1154,9 @@ static void dr_ste_build_eth_l3_ipv6_src_bit_mask(struct mlx5dr_match_param *val
static int dr_ste_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
- u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
@@ -1238,11 +1214,9 @@ static void dr_ste_build_eth_l3_ipv4_5_tuple_bit_mask(struct mlx5dr_match_param
static int dr_ste_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
- u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_address, spec, dst_ip_31_0);
DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_address, spec, src_ip_31_0);
@@ -1328,12 +1302,10 @@ dr_ste_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
}
static int dr_ste_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
- bool inner, u8 *hw_ste_p)
+ bool inner, u8 *tag)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
struct mlx5dr_match_misc *misc_spec = &value->misc;
- u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(eth_l2_src, tag, first_vlan_id, spec, first_vid);
DR_STE_SET_TAG(eth_l2_src, tag, first_cfi, spec, first_cfi);
@@ -1403,16 +1375,14 @@ static void dr_ste_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
static int dr_ste_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
- u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(eth_l2_src, tag, smac_47_16, spec, smac_47_16);
DR_STE_SET_TAG(eth_l2_src, tag, smac_15_0, spec, smac_15_0);
- return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, hw_ste_p);
+ return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
}
void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_build *sb,
@@ -1440,16 +1410,14 @@ static void dr_ste_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
static int dr_ste_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
- u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(eth_l2_dst, tag, dmac_47_16, spec, dmac_47_16);
DR_STE_SET_TAG(eth_l2_dst, tag, dmac_15_0, spec, dmac_15_0);
- return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, hw_ste_p);
+ return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
}
void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build *sb,
@@ -1495,12 +1463,10 @@ static void dr_ste_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
static int dr_ste_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc *misc = &value->misc;
- u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_47_16, spec, dmac_47_16);
DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_15_0, spec, dmac_15_0);
@@ -1561,11 +1527,9 @@ static void dr_ste_build_eth_l3_ipv4_misc_bit_mask(struct mlx5dr_match_param *va
static int dr_ste_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
- u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, time_to_live, spec, ttl_hoplimit);
@@ -1608,11 +1572,9 @@ static void dr_ste_build_ipv6_l3_l4_bit_mask(struct mlx5dr_match_param *value,
static int dr_ste_build_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
- u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, tcp_dport);
DR_STE_SET_TAG(eth_l4, tag, src_port, spec, tcp_sport);
@@ -1647,7 +1609,7 @@ void mlx5dr_ste_build_ipv6_l3_l4(struct mlx5dr_ste_build *sb,
static int dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
return 0;
}
@@ -1673,11 +1635,9 @@ static void dr_ste_build_mpls_bit_mask(struct mlx5dr_match_param *value,
static int dr_ste_build_mpls_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc2 *misc2_mask = &value->misc2;
- u8 *tag = hw_ste->tag;
if (sb->inner)
DR_STE_SET_MPLS_TAG(mpls, misc2_mask, inner, tag);
@@ -1716,11 +1676,9 @@ static void dr_ste_build_gre_bit_mask(struct mlx5dr_match_param *value,
static int dr_ste_build_gre_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc *misc = &value->misc;
- u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(gre, tag, gre_protocol, misc, gre_protocol);
@@ -1781,11 +1739,9 @@ static void dr_ste_build_flex_parser_0_bit_mask(struct mlx5dr_match_param *value
static int dr_ste_build_flex_parser_0_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
- u8 *tag = hw_ste->tag;
if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2_mask)) {
DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
@@ -1903,11 +1859,9 @@ static int dr_ste_build_flex_parser_1_bit_mask(struct mlx5dr_match_param *mask,
static int dr_ste_build_flex_parser_1_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc3 *misc_3 = &value->misc3;
- u8 *tag = hw_ste->tag;
u32 icmp_header_data;
int dw0_location;
int dw1_location;
@@ -2007,11 +1961,9 @@ static void dr_ste_build_general_purpose_bit_mask(struct mlx5dr_match_param *val
static int dr_ste_build_general_purpose_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
- u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
misc_2_mask, metadata_reg_a);
@@ -2052,11 +2004,9 @@ static void dr_ste_build_eth_l4_misc_bit_mask(struct mlx5dr_match_param *value,
static int dr_ste_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc3 *misc3 = &value->misc3;
- u8 *tag = hw_ste->tag;
if (sb->inner) {
DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, inner_tcp_seq_num);
@@ -2102,11 +2052,9 @@ dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(struct mlx5dr_match_param *value
static int
dr_ste_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc3 *misc3 = &value->misc3;
- u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
outer_vxlan_gpe_flags, misc3,
@@ -2158,11 +2106,9 @@ dr_ste_build_flex_parser_tnl_geneve_bit_mask(struct mlx5dr_match_param *value,
static int
dr_ste_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc *misc = &value->misc;
- u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
geneve_protocol_type, misc, geneve_protocol_type);
@@ -2205,11 +2151,9 @@ static void dr_ste_build_register_0_bit_mask(struct mlx5dr_match_param *value,
static int dr_ste_build_register_0_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc2 *misc2 = &value->misc2;
- u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
@@ -2249,11 +2193,9 @@ static void dr_ste_build_register_1_bit_mask(struct mlx5dr_match_param *value,
static int dr_ste_build_register_1_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc2 *misc2 = &value->misc2;
- u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
@@ -2276,38 +2218,25 @@ void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_build_register_1_tag;
}
-static int dr_ste_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
- u8 *bit_mask)
+static void dr_ste_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
+ u8 *bit_mask)
{
struct mlx5dr_match_misc *misc_mask = &value->misc;
- /* Partial misc source_port is not supported */
- if (misc_mask->source_port && misc_mask->source_port != 0xffff)
- return -EINVAL;
-
- /* Partial misc source_eswitch_owner_vhca_id is not supported */
- if (misc_mask->source_eswitch_owner_vhca_id &&
- misc_mask->source_eswitch_owner_vhca_id != 0xffff)
- return -EINVAL;
-
DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_gvmi, misc_mask, source_port);
DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_qp, misc_mask, source_sqn);
misc_mask->source_eswitch_owner_vhca_id = 0;
-
- return 0;
}
static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p)
+ u8 *tag)
{
- struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc *misc = &value->misc;
struct mlx5dr_cmd_vport_cap *vport_cap;
struct mlx5dr_domain *dmn = sb->dmn;
struct mlx5dr_cmd_caps *caps;
u8 *bit_mask = sb->bit_mask;
- u8 *tag = hw_ste->tag;
bool source_gvmi_set;
DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn);
@@ -2339,19 +2268,15 @@ static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
return 0;
}
-int mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- struct mlx5dr_domain *dmn,
- bool inner, bool rx)
+void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_domain *dmn,
+ bool inner, bool rx)
{
- int ret;
-
/* Set vhca_id_valid before we reset source_eswitch_owner_vhca_id */
sb->vhca_id_valid = mask->misc.source_eswitch_owner_vhca_id;
- ret = dr_ste_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
- if (ret)
- return ret;
+ dr_ste_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
sb->rx = rx;
sb->dmn = dmn;
@@ -2359,6 +2284,4 @@ int mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
sb->lu_type = MLX5DR_STE_LU_TYPE_SRC_GVMI_AND_QP;
sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_build_src_gvmi_qpn_tag;
-
- return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 0883956c58c0..f50f3b107aa3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -194,7 +194,7 @@ struct mlx5dr_ste_build {
u8 bit_mask[DR_STE_SIZE_MASK];
int (*ste_build_tag_func)(struct mlx5dr_match_param *spec,
struct mlx5dr_ste_build *sb,
- u8 *hw_ste_p);
+ u8 *tag);
};
struct mlx5dr_ste_htbl *
@@ -227,7 +227,6 @@ void mlx5dr_ste_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi);
void mlx5dr_ste_set_hit_addr(u8 *hw_ste, u64 icm_addr, u32 ht_size);
void mlx5dr_ste_always_miss_addr(struct mlx5dr_ste *ste, u64 miss_addr);
void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask);
-bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste);
bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
u8 ste_location);
void mlx5dr_ste_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag);
@@ -266,6 +265,11 @@ static inline void mlx5dr_ste_get(struct mlx5dr_ste *ste)
ste->refcount++;
}
+static inline bool mlx5dr_ste_is_not_used(struct mlx5dr_ste *ste)
+{
+ return !ste->refcount;
+}
+
void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste,
struct mlx5dr_ste_htbl *next_htbl);
bool mlx5dr_ste_equal_tag(void *src, void *dst);
@@ -284,9 +288,9 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
struct mlx5dr_match_param *value,
u8 *ste_arr);
-int mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build *builder,
- struct mlx5dr_match_param *mask,
- bool inner, bool rx);
+void mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build *builder,
+ struct mlx5dr_match_param *mask,
+ bool inner, bool rx);
void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
@@ -342,10 +346,10 @@ void mlx5dr_ste_build_register_0(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
-int mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
- struct mlx5dr_match_param *mask,
- struct mlx5dr_domain *dmn,
- bool inner, bool rx);
+void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
+ struct mlx5dr_match_param *mask,
+ struct mlx5dr_domain *dmn,
+ bool inner, bool rx);
void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx);
/* Actions utils */
@@ -793,6 +797,7 @@ struct mlx5dr_rule {
struct mlx5dr_rule_rx_tx rx;
struct mlx5dr_rule_rx_tx tx;
struct list_head rule_actions_list;
+ u32 flow_source;
};
void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *new_ste,
@@ -991,7 +996,6 @@ struct mlx5dr_icm_chunk *
mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
enum mlx5dr_icm_chunk_size chunk_size);
void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk);
-bool mlx5dr_ste_is_not_valid_entry(u8 *p_hw_ste);
int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
struct mlx5dr_domain_rx_tx *nic_dmn,
struct mlx5dr_ste_htbl *htbl,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 9b08eb557a31..96c39a17d026 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -487,7 +487,8 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
rule = mlx5dr_rule_create(group->fs_dr_matcher.dr_matcher,
&params,
num_actions,
- actions);
+ actions,
+ fte->flow_context.flow_source);
if (!rule) {
err = -EINVAL;
goto free_actions;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index 7deaca9ade3b..7914fe3fc68d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -67,7 +67,8 @@ struct mlx5dr_rule *
mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
struct mlx5dr_match_parameters *value,
size_t num_actions,
- struct mlx5dr_action *actions[]);
+ struct mlx5dr_action *actions[],
+ u32 flow_source);
int mlx5dr_rule_destroy(struct mlx5dr_rule *rule);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index ec45a03140d7..7f77c2a71d1c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -20,11 +20,13 @@
#include <linux/rcupdate.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
+#include <linux/firmware.h>
#include <asm/byteorder.h>
#include <net/devlink.h>
#include <trace/events/devlink.h>
#include "core.h"
+#include "core_env.h"
#include "item.h"
#include "cmd.h"
#include "port.h"
@@ -32,6 +34,7 @@
#include "emad.h"
#include "reg.h"
#include "resources.h"
+#include "../mlxfw/mlxfw.h"
static LIST_HEAD(mlxsw_core_driver_list);
static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
@@ -82,6 +85,11 @@ struct mlxsw_core {
struct mlxsw_core_port *ports;
unsigned int max_ports;
bool fw_flash_in_progress;
+ struct {
+ struct devlink_health_reporter *fw_fatal;
+ } health;
+ struct mlxsw_env *env;
+ bool is_initialized; /* Denotes if core was already initialized. */
unsigned long driver_priv[];
/* driver_priv has to be always the last item */
};
@@ -128,6 +136,11 @@ bool mlxsw_core_res_query_enabled(const struct mlxsw_core *mlxsw_core)
}
EXPORT_SYMBOL(mlxsw_core_res_query_enabled);
+bool mlxsw_core_temp_warn_enabled(const struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->driver->temp_warn_enabled;
+}
+
bool
mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev,
const struct mlxsw_fw_rev *req_rev)
@@ -864,6 +877,294 @@ static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
return mlxsw_driver;
}
+struct mlxsw_core_fw_info {
+ struct mlxfw_dev mlxfw_dev;
+ struct mlxsw_core *mlxsw_core;
+};
+
+static int mlxsw_core_fw_component_query(struct mlxfw_dev *mlxfw_dev,
+ u16 component_index, u32 *p_max_size,
+ u8 *p_align_bits, u16 *p_max_write_size)
+{
+ struct mlxsw_core_fw_info *mlxsw_core_fw_info =
+ container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
+ struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
+ char mcqi_pl[MLXSW_REG_MCQI_LEN];
+ int err;
+
+ mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcqi), mcqi_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits, p_max_write_size);
+
+ *p_align_bits = max_t(u8, *p_align_bits, 2);
+ *p_max_write_size = min_t(u16, *p_max_write_size, MLXSW_REG_MCDA_MAX_DATA_LEN);
+ return 0;
+}
+
+static int mlxsw_core_fw_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
+{
+ struct mlxsw_core_fw_info *mlxsw_core_fw_info =
+ container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
+ struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+ u8 control_state;
+ int err;
+
+ mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
+ if (control_state != MLXFW_FSM_STATE_IDLE)
+ return -EBUSY;
+
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE, 0, *fwhandle, 0);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static int mlxsw_core_fw_fsm_component_update(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ u16 component_index, u32 component_size)
+{
+ struct mlxsw_core_fw_info *mlxsw_core_fw_info =
+ container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
+ struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
+ component_index, fwhandle, component_size);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static int mlxsw_core_fw_fsm_block_download(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ u8 *data, u16 size, u32 offset)
+{
+ struct mlxsw_core_fw_info *mlxsw_core_fw_info =
+ container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
+ struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
+ char mcda_pl[MLXSW_REG_MCDA_LEN];
+
+ mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcda), mcda_pl);
+}
+
+static int mlxsw_core_fw_fsm_component_verify(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ u16 component_index)
+{
+ struct mlxsw_core_fw_info *mlxsw_core_fw_info =
+ container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
+ struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
+ component_index, fwhandle, 0);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static int mlxsw_core_fw_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
+{
+ struct mlxsw_core_fw_info *mlxsw_core_fw_info =
+ container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
+ struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0, fwhandle, 0);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static int mlxsw_core_fw_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
+ enum mlxfw_fsm_state *fsm_state,
+ enum mlxfw_fsm_state_err *fsm_state_err)
+{
+ struct mlxsw_core_fw_info *mlxsw_core_fw_info =
+ container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
+ struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+ u8 control_state;
+ u8 error_code;
+ int err;
+
+ mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
+ *fsm_state = control_state;
+ *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code, MLXFW_FSM_STATE_ERR_MAX);
+ return 0;
+}
+
+static void mlxsw_core_fw_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
+{
+ struct mlxsw_core_fw_info *mlxsw_core_fw_info =
+ container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
+ struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0, fwhandle, 0);
+ mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static void mlxsw_core_fw_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
+{
+ struct mlxsw_core_fw_info *mlxsw_core_fw_info =
+ container_of(mlxfw_dev, struct mlxsw_core_fw_info, mlxfw_dev);
+ struct mlxsw_core *mlxsw_core = mlxsw_core_fw_info->mlxsw_core;
+ char mcc_pl[MLXSW_REG_MCC_LEN];
+
+ mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0, fwhandle, 0);
+ mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcc), mcc_pl);
+}
+
+static const struct mlxfw_dev_ops mlxsw_core_fw_mlxsw_dev_ops = {
+ .component_query = mlxsw_core_fw_component_query,
+ .fsm_lock = mlxsw_core_fw_fsm_lock,
+ .fsm_component_update = mlxsw_core_fw_fsm_component_update,
+ .fsm_block_download = mlxsw_core_fw_fsm_block_download,
+ .fsm_component_verify = mlxsw_core_fw_fsm_component_verify,
+ .fsm_activate = mlxsw_core_fw_fsm_activate,
+ .fsm_query_state = mlxsw_core_fw_fsm_query_state,
+ .fsm_cancel = mlxsw_core_fw_fsm_cancel,
+ .fsm_release = mlxsw_core_fw_fsm_release,
+};
+
+static int mlxsw_core_fw_flash(struct mlxsw_core *mlxsw_core, const struct firmware *firmware,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core_fw_info mlxsw_core_fw_info = {
+ .mlxfw_dev = {
+ .ops = &mlxsw_core_fw_mlxsw_dev_ops,
+ .psid = mlxsw_core->bus_info->psid,
+ .psid_size = strlen(mlxsw_core->bus_info->psid),
+ .devlink = priv_to_devlink(mlxsw_core),
+ },
+ .mlxsw_core = mlxsw_core
+ };
+ int err;
+
+ mlxsw_core->fw_flash_in_progress = true;
+ err = mlxfw_firmware_flash(&mlxsw_core_fw_info.mlxfw_dev, firmware, extack);
+ mlxsw_core->fw_flash_in_progress = false;
+
+ return err;
+}
+
+static int mlxsw_core_fw_rev_validate(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_bus_info *mlxsw_bus_info,
+ const struct mlxsw_fw_rev *req_rev,
+ const char *filename)
+{
+ const struct mlxsw_fw_rev *rev = &mlxsw_bus_info->fw_rev;
+ union devlink_param_value value;
+ const struct firmware *firmware;
+ int err;
+
+ /* Don't check if driver does not require it */
+ if (!req_rev || !filename)
+ return 0;
+
+ /* Don't check if devlink 'fw_load_policy' param is 'flash' */
+ err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_core),
+ DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
+ &value);
+ if (err)
+ return err;
+ if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)
+ return 0;
+
+ /* Validate driver & FW are compatible */
+ if (rev->major != req_rev->major) {
+ WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n",
+ rev->major, req_rev->major);
+ return -EINVAL;
+ }
+ if (mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev))
+ return 0;
+
+ dev_err(mlxsw_bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver (required >= %d.%d.%d)\n",
+ rev->major, rev->minor, rev->subminor, req_rev->major,
+ req_rev->minor, req_rev->subminor);
+ dev_info(mlxsw_bus_info->dev, "Flashing firmware using file %s\n", filename);
+
+ err = request_firmware_direct(&firmware, filename, mlxsw_bus_info->dev);
+ if (err) {
+ dev_err(mlxsw_bus_info->dev, "Could not request firmware file %s\n", filename);
+ return err;
+ }
+
+ err = mlxsw_core_fw_flash(mlxsw_core, firmware, NULL);
+ release_firmware(firmware);
+ if (err)
+ dev_err(mlxsw_bus_info->dev, "Could not upgrade firmware\n");
+
+ /* On FW flash success, tell the caller FW reset is needed
+ * if current FW supports it.
+ */
+ if (rev->minor >= req_rev->can_reset_minor)
+ return err ? err : -EAGAIN;
+ else
+ return 0;
+}
+
+static int mlxsw_core_fw_flash_update(struct mlxsw_core *mlxsw_core,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ const struct firmware *firmware;
+ int err;
+
+ err = request_firmware_direct(&firmware, params->file_name, mlxsw_core->bus_info->dev);
+ if (err)
+ return err;
+ err = mlxsw_core_fw_flash(mlxsw_core, firmware, extack);
+ release_firmware(firmware);
+
+ return err;
+}
+
+static int mlxsw_core_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ if (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER &&
+ val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) {
+ NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct devlink_param mlxsw_core_fw_devlink_params[] = {
+ DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), NULL, NULL,
+ mlxsw_core_devlink_param_fw_load_policy_validate),
+};
+
+static int mlxsw_core_fw_params_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ union devlink_param_value value;
+ int err;
+
+ err = devlink_params_register(devlink, mlxsw_core_fw_devlink_params,
+ ARRAY_SIZE(mlxsw_core_fw_devlink_params));
+ if (err)
+ return err;
+
+ value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER;
+ devlink_param_driverinit_value_set(devlink, DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, value);
+ return 0;
+}
+
+static void mlxsw_core_fw_params_unregister(struct mlxsw_core *mlxsw_core)
+{
+ devlink_params_unregister(priv_to_devlink(mlxsw_core), mlxsw_core_fw_devlink_params,
+ ARRAY_SIZE(mlxsw_core_fw_devlink_params));
+}
+
static int mlxsw_devlink_port_split(struct devlink *devlink,
unsigned int port_index,
unsigned int count,
@@ -1113,7 +1414,8 @@ mlxsw_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
static int
mlxsw_devlink_core_bus_device_reload_down(struct devlink *devlink,
- bool netns_change,
+ bool netns_change, enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
@@ -1126,11 +1428,14 @@ mlxsw_devlink_core_bus_device_reload_down(struct devlink *devlink,
}
static int
-mlxsw_devlink_core_bus_device_reload_up(struct devlink *devlink,
+mlxsw_devlink_core_bus_device_reload_up(struct devlink *devlink, enum devlink_reload_action action,
+ enum devlink_reload_limit limit, u32 *actions_performed,
struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
+ BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE);
return mlxsw_core_bus_device_register(mlxsw_core->bus_info,
mlxsw_core->bus,
mlxsw_core->bus_priv, true,
@@ -1138,17 +1443,12 @@ mlxsw_devlink_core_bus_device_reload_up(struct devlink *devlink,
}
static int mlxsw_devlink_flash_update(struct devlink *devlink,
- const char *file_name,
- const char *component,
+ struct devlink_flash_update_params *params,
struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
- struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
- if (!mlxsw_driver->flash_update)
- return -EOPNOTSUPP;
- return mlxsw_driver->flash_update(mlxsw_core, file_name,
- component, extack);
+ return mlxsw_core_fw_flash_update(mlxsw_core, params, extack);
}
static int mlxsw_devlink_trap_init(struct devlink *devlink,
@@ -1268,6 +1568,8 @@ mlxsw_devlink_trap_policer_counter_get(struct devlink *devlink,
}
static const struct devlink_ops mlxsw_devlink_ops = {
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
+ BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
.reload_down = mlxsw_devlink_core_bus_device_reload_down,
.reload_up = mlxsw_devlink_core_bus_device_reload_up,
.port_type_set = mlxsw_devlink_port_type_set,
@@ -1296,6 +1598,263 @@ static const struct devlink_ops mlxsw_devlink_ops = {
.trap_policer_counter_get = mlxsw_devlink_trap_policer_counter_get,
};
+static int mlxsw_core_params_register(struct mlxsw_core *mlxsw_core)
+{
+ int err;
+
+ err = mlxsw_core_fw_params_register(mlxsw_core);
+ if (err)
+ return err;
+
+ if (mlxsw_core->driver->params_register) {
+ err = mlxsw_core->driver->params_register(mlxsw_core);
+ if (err)
+ goto err_params_register;
+ }
+ return 0;
+
+err_params_register:
+ mlxsw_core_fw_params_unregister(mlxsw_core);
+ return err;
+}
+
+static void mlxsw_core_params_unregister(struct mlxsw_core *mlxsw_core)
+{
+ mlxsw_core_fw_params_unregister(mlxsw_core);
+ if (mlxsw_core->driver->params_register)
+ mlxsw_core->driver->params_unregister(mlxsw_core);
+}
+
+struct mlxsw_core_health_event {
+ struct mlxsw_core *mlxsw_core;
+ char mfde_pl[MLXSW_REG_MFDE_LEN];
+ struct work_struct work;
+};
+
+static void mlxsw_core_health_event_work(struct work_struct *work)
+{
+ struct mlxsw_core_health_event *event;
+ struct mlxsw_core *mlxsw_core;
+
+ event = container_of(work, struct mlxsw_core_health_event, work);
+ mlxsw_core = event->mlxsw_core;
+ devlink_health_report(mlxsw_core->health.fw_fatal, "FW fatal event occurred",
+ event->mfde_pl);
+ kfree(event);
+}
+
+static void mlxsw_core_health_listener_func(const struct mlxsw_reg_info *reg,
+ char *mfde_pl, void *priv)
+{
+ struct mlxsw_core_health_event *event;
+ struct mlxsw_core *mlxsw_core = priv;
+
+ event = kmalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return;
+ event->mlxsw_core = mlxsw_core;
+ memcpy(event->mfde_pl, mfde_pl, sizeof(event->mfde_pl));
+ INIT_WORK(&event->work, mlxsw_core_health_event_work);
+ mlxsw_core_schedule_work(&event->work);
+}
+
+static const struct mlxsw_listener mlxsw_core_health_listener =
+ MLXSW_EVENTL(mlxsw_core_health_listener_func, MFDE, MFDE);
+
+static int mlxsw_core_health_fw_fatal_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ char *mfde_pl = priv_ctx;
+ char *val_str;
+ u8 event_id;
+ u32 val;
+ int err;
+
+ if (!priv_ctx)
+ /* User-triggered dumps are not possible */
+ return -EOPNOTSUPP;
+
+ val = mlxsw_reg_mfde_irisc_id_get(mfde_pl);
+ err = devlink_fmsg_u8_pair_put(fmsg, "irisc_id", val);
+ if (err)
+ return err;
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "event");
+ if (err)
+ return err;
+
+ event_id = mlxsw_reg_mfde_event_id_get(mfde_pl);
+ err = devlink_fmsg_u8_pair_put(fmsg, "id", event_id);
+ if (err)
+ return err;
+ switch (event_id) {
+ case MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO:
+ val_str = "CR space timeout";
+ break;
+ case MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP:
+ val_str = "KVD insertion machine stopped";
+ break;
+ default:
+ val_str = NULL;
+ }
+ if (val_str) {
+ err = devlink_fmsg_string_pair_put(fmsg, "desc", val_str);
+ if (err)
+ return err;
+ }
+ err = devlink_fmsg_arr_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ val = mlxsw_reg_mfde_method_get(mfde_pl);
+ switch (val) {
+ case MLXSW_REG_MFDE_METHOD_QUERY:
+ val_str = "query";
+ break;
+ case MLXSW_REG_MFDE_METHOD_WRITE:
+ val_str = "write";
+ break;
+ default:
+ val_str = NULL;
+ }
+ if (val_str) {
+ err = devlink_fmsg_string_pair_put(fmsg, "method", val_str);
+ if (err)
+ return err;
+ }
+
+ val = mlxsw_reg_mfde_long_process_get(mfde_pl);
+ err = devlink_fmsg_bool_pair_put(fmsg, "long_process", val);
+ if (err)
+ return err;
+
+ val = mlxsw_reg_mfde_command_type_get(mfde_pl);
+ switch (val) {
+ case MLXSW_REG_MFDE_COMMAND_TYPE_MAD:
+ val_str = "mad";
+ break;
+ case MLXSW_REG_MFDE_COMMAND_TYPE_EMAD:
+ val_str = "emad";
+ break;
+ case MLXSW_REG_MFDE_COMMAND_TYPE_CMDIF:
+ val_str = "cmdif";
+ break;
+ default:
+ val_str = NULL;
+ }
+ if (val_str) {
+ err = devlink_fmsg_string_pair_put(fmsg, "command_type", val_str);
+ if (err)
+ return err;
+ }
+
+ val = mlxsw_reg_mfde_reg_attr_id_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "reg_attr_id", val);
+ if (err)
+ return err;
+
+ if (event_id == MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO) {
+ val = mlxsw_reg_mfde_log_address_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "log_address", val);
+ if (err)
+ return err;
+ val = mlxsw_reg_mfde_log_id_get(mfde_pl);
+ err = devlink_fmsg_u8_pair_put(fmsg, "log_irisc_id", val);
+ if (err)
+ return err;
+ } else if (event_id == MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP) {
+ val = mlxsw_reg_mfde_pipes_mask_get(mfde_pl);
+ err = devlink_fmsg_u32_pair_put(fmsg, "pipes_mask", val);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+mlxsw_core_health_fw_fatal_test(struct devlink_health_reporter *reporter,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core *mlxsw_core = devlink_health_reporter_priv(reporter);
+ char mfgd_pl[MLXSW_REG_MFGD_LEN];
+ int err;
+
+ /* Read the register first to make sure no other bits are changed. */
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mfgd_trigger_test_set(mfgd_pl, true);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl);
+}
+
+static const struct devlink_health_reporter_ops
+mlxsw_core_health_fw_fatal_ops = {
+ .name = "fw_fatal",
+ .dump = mlxsw_core_health_fw_fatal_dump,
+ .test = mlxsw_core_health_fw_fatal_test,
+};
+
+static int mlxsw_core_health_fw_fatal_config(struct mlxsw_core *mlxsw_core,
+ bool enable)
+{
+ char mfgd_pl[MLXSW_REG_MFGD_LEN];
+ int err;
+
+ /* Read the register first to make sure no other bits are changed. */
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mfgd_fatal_event_mode_set(mfgd_pl, enable);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mfgd), mfgd_pl);
+}
+
+static int mlxsw_core_health_init(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ struct devlink_health_reporter *fw_fatal;
+ int err;
+
+ if (!mlxsw_core->driver->fw_fatal_enabled)
+ return 0;
+
+ fw_fatal = devlink_health_reporter_create(devlink, &mlxsw_core_health_fw_fatal_ops,
+ 0, mlxsw_core);
+ if (IS_ERR(fw_fatal)) {
+ dev_err(mlxsw_core->bus_info->dev, "Failed to create fw fatal reporter");
+ return PTR_ERR(fw_fatal);
+ }
+ mlxsw_core->health.fw_fatal = fw_fatal;
+
+ err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core);
+ if (err)
+ goto err_trap_register;
+
+ err = mlxsw_core_health_fw_fatal_config(mlxsw_core, true);
+ if (err)
+ goto err_fw_fatal_config;
+
+ return 0;
+
+err_fw_fatal_config:
+ mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core);
+err_trap_register:
+ devlink_health_reporter_destroy(mlxsw_core->health.fw_fatal);
+ return err;
+}
+
+static void mlxsw_core_health_fini(struct mlxsw_core *mlxsw_core)
+{
+ if (!mlxsw_core->driver->fw_fatal_enabled)
+ return;
+
+ mlxsw_core_health_fw_fatal_config(mlxsw_core, false);
+ mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_core_health_listener, mlxsw_core);
+ /* Make sure there is no more event work scheduled */
+ mlxsw_core_flush_owq();
+ devlink_health_reporter_destroy(mlxsw_core->health.fw_fatal);
+}
+
static int
__mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
const struct mlxsw_bus *mlxsw_bus,
@@ -1368,12 +1927,21 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
goto err_devlink_register;
}
- if (mlxsw_driver->params_register && !reload) {
- err = mlxsw_driver->params_register(mlxsw_core);
+ if (!reload) {
+ err = mlxsw_core_params_register(mlxsw_core);
if (err)
goto err_register_params;
}
+ err = mlxsw_core_fw_rev_validate(mlxsw_core, mlxsw_bus_info, mlxsw_driver->fw_req_rev,
+ mlxsw_driver->fw_filename);
+ if (err)
+ goto err_fw_rev_validate;
+
+ err = mlxsw_core_health_init(mlxsw_core);
+ if (err)
+ goto err_health_init;
+
if (mlxsw_driver->init) {
err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info, extack);
if (err)
@@ -1389,22 +1957,31 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_thermal_init;
- if (mlxsw_driver->params_register)
- devlink_params_publish(devlink);
+ err = mlxsw_env_init(mlxsw_core, &mlxsw_core->env);
+ if (err)
+ goto err_env_init;
+
+ mlxsw_core->is_initialized = true;
+ devlink_params_publish(devlink);
if (!reload)
devlink_reload_enable(devlink);
return 0;
+err_env_init:
+ mlxsw_thermal_fini(mlxsw_core->thermal);
err_thermal_init:
mlxsw_hwmon_fini(mlxsw_core->hwmon);
err_hwmon_init:
if (mlxsw_core->driver->fini)
mlxsw_core->driver->fini(mlxsw_core);
err_driver_init:
- if (mlxsw_driver->params_unregister && !reload)
- mlxsw_driver->params_unregister(mlxsw_core);
+ mlxsw_core_health_fini(mlxsw_core);
+err_health_init:
+err_fw_rev_validate:
+ if (!reload)
+ mlxsw_core_params_unregister(mlxsw_core);
err_register_params:
if (!reload)
devlink_unregister(devlink);
@@ -1469,14 +2046,16 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
return;
}
- if (mlxsw_core->driver->params_unregister)
- devlink_params_unpublish(devlink);
+ devlink_params_unpublish(devlink);
+ mlxsw_core->is_initialized = false;
+ mlxsw_env_fini(mlxsw_core->env);
mlxsw_thermal_fini(mlxsw_core->thermal);
mlxsw_hwmon_fini(mlxsw_core->hwmon);
if (mlxsw_core->driver->fini)
mlxsw_core->driver->fini(mlxsw_core);
- if (mlxsw_core->driver->params_unregister && !reload)
- mlxsw_core->driver->params_unregister(mlxsw_core);
+ mlxsw_core_health_fini(mlxsw_core);
+ if (!reload)
+ mlxsw_core_params_unregister(mlxsw_core);
if (!reload)
devlink_unregister(devlink);
mlxsw_emad_fini(mlxsw_core);
@@ -1489,8 +2068,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
return;
reload_fail_deinit:
- if (mlxsw_core->driver->params_unregister)
- mlxsw_core->driver->params_unregister(mlxsw_core);
+ mlxsw_core_params_unregister(mlxsw_core);
devlink_unregister(devlink);
devlink_resources_unregister(devlink, NULL);
devlink_free(devlink);
@@ -2274,6 +2852,16 @@ mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_port_devlink_port_get);
+struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->env;
+}
+
+bool mlxsw_core_is_initialized(const struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->is_initialized;
+}
+
int mlxsw_core_module_max_width(struct mlxsw_core *mlxsw_core, u8 module)
{
enum mlxsw_reg_pmtm_module_type module_type;
@@ -2410,18 +2998,6 @@ int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get);
-void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core)
-{
- mlxsw_core->fw_flash_in_progress = true;
-}
-EXPORT_SYMBOL(mlxsw_core_fw_flash_start);
-
-void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core)
-{
- mlxsw_core->fw_flash_in_progress = false;
-}
-EXPORT_SYMBOL(mlxsw_core_fw_flash_end);
-
int mlxsw_core_resources_query(struct mlxsw_core *mlxsw_core, char *mbox,
struct mlxsw_res *res)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 11af3308f8cc..92f7398287be 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -32,6 +32,8 @@ void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core);
bool mlxsw_core_res_query_enabled(const struct mlxsw_core *mlxsw_core);
+bool mlxsw_core_temp_warn_enabled(const struct mlxsw_core *mlxsw_core);
+
bool
mlxsw_core_fw_rev_minor_subminor_validate(const struct mlxsw_fw_rev *rev,
const struct mlxsw_fw_rev *req_rev);
@@ -221,6 +223,8 @@ enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
struct devlink_port *
mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
u8 local_port);
+struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core);
+bool mlxsw_core_is_initialized(const struct mlxsw_core *mlxsw_core);
int mlxsw_core_module_max_width(struct mlxsw_core *mlxsw_core, u8 module);
int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay);
@@ -280,6 +284,8 @@ struct mlxsw_driver {
struct list_head list;
const char *kind;
size_t priv_size;
+ const struct mlxsw_fw_rev *fw_req_rev;
+ const char *fw_filename;
int (*init)(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *mlxsw_bus_info,
struct netlink_ext_ack *extack);
@@ -324,9 +330,6 @@ struct mlxsw_driver {
unsigned int sb_index, u16 tc_index,
enum devlink_sb_pool_type pool_type,
u32 *p_cur, u32 *p_max);
- int (*flash_update)(struct mlxsw_core *mlxsw_core,
- const char *file_name, const char *component,
- struct netlink_ext_ack *extack);
int (*trap_init)(struct mlxsw_core *mlxsw_core,
const struct devlink_trap *trap, void *trap_ctx);
void (*trap_fini)(struct mlxsw_core *mlxsw_core,
@@ -371,6 +374,8 @@ struct mlxsw_driver {
u8 txhdr_len;
const struct mlxsw_config_profile *profile;
bool res_query_enabled;
+ bool fw_fatal_enabled;
+ bool temp_warn_enabled;
};
int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
@@ -378,9 +383,6 @@ int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
u64 *p_single_size, u64 *p_double_size,
u64 *p_linear_size);
-void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core);
-void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core);
-
u32 mlxsw_core_read_frc_h(struct mlxsw_core *mlxsw_core);
u32 mlxsw_core_read_frc_l(struct mlxsw_core *mlxsw_core);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index 056eeb85be60..dd26865bd587 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -10,6 +10,18 @@
#include "item.h"
#include "reg.h"
+struct mlxsw_env_module_info {
+ u64 module_overheat_counter;
+ bool is_overheat;
+};
+
+struct mlxsw_env {
+ struct mlxsw_core *core;
+ u8 module_count;
+ spinlock_t module_info_lock; /* Protects 'module_info'. */
+ struct mlxsw_env_module_info module_info[];
+};
+
static int mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id,
bool *qsfp, bool *cmis)
{
@@ -293,3 +305,359 @@ int mlxsw_env_get_module_eeprom(struct net_device *netdev,
return 0;
}
EXPORT_SYMBOL(mlxsw_env_get_module_eeprom);
+
+static int mlxsw_env_module_has_temp_sensor(struct mlxsw_core *mlxsw_core,
+ u8 module,
+ bool *p_has_temp_sensor)
+{
+ char mtbr_pl[MLXSW_REG_MTBR_LEN];
+ u16 temp;
+ int err;
+
+ mlxsw_reg_mtbr_pack(mtbr_pl, MLXSW_REG_MTBR_BASE_MODULE_INDEX + module,
+ 1);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mtbr), mtbr_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mtbr_temp_unpack(mtbr_pl, 0, &temp, NULL);
+
+ switch (temp) {
+ case MLXSW_REG_MTBR_BAD_SENS_INFO:
+ case MLXSW_REG_MTBR_NO_CONN:
+ case MLXSW_REG_MTBR_NO_TEMP_SENS:
+ case MLXSW_REG_MTBR_INDEX_NA:
+ *p_has_temp_sensor = false;
+ break;
+ default:
+ *p_has_temp_sensor = temp ? true : false;
+ }
+ return 0;
+}
+
+static int mlxsw_env_temp_event_set(struct mlxsw_core *mlxsw_core,
+ u16 sensor_index, bool enable)
+{
+ char mtmp_pl[MLXSW_REG_MTMP_LEN] = {0};
+ enum mlxsw_reg_mtmp_tee tee;
+ int err, threshold_hi;
+
+ mlxsw_reg_mtmp_sensor_index_set(mtmp_pl, sensor_index);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mtmp), mtmp_pl);
+ if (err)
+ return err;
+
+ if (enable) {
+ err = mlxsw_env_module_temp_thresholds_get(mlxsw_core,
+ sensor_index -
+ MLXSW_REG_MTMP_MODULE_INDEX_MIN,
+ SFP_TEMP_HIGH_WARN,
+ &threshold_hi);
+ /* In case it is not possible to query the module's threshold,
+ * use the default value.
+ */
+ if (err)
+ threshold_hi = MLXSW_REG_MTMP_THRESH_HI;
+ else
+ /* mlxsw_env_module_temp_thresholds_get() multiplies
+ * Celsius degrees by 1000 whereas MTMP expects
+ * temperature in 0.125 Celsius degrees units.
+ * Convert threshold_hi to correct units.
+ */
+ threshold_hi = threshold_hi / 1000 * 8;
+
+ mlxsw_reg_mtmp_temperature_threshold_hi_set(mtmp_pl, threshold_hi);
+ mlxsw_reg_mtmp_temperature_threshold_lo_set(mtmp_pl, threshold_hi -
+ MLXSW_REG_MTMP_HYSTERESIS_TEMP);
+ }
+ tee = enable ? MLXSW_REG_MTMP_TEE_GENERATE_EVENT : MLXSW_REG_MTMP_TEE_NO_EVENT;
+ mlxsw_reg_mtmp_tee_set(mtmp_pl, tee);
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(mtmp), mtmp_pl);
+}
+
+static int mlxsw_env_module_temp_event_enable(struct mlxsw_core *mlxsw_core,
+ u8 module_count)
+{
+ int i, err, sensor_index;
+ bool has_temp_sensor;
+
+ for (i = 0; i < module_count; i++) {
+ err = mlxsw_env_module_has_temp_sensor(mlxsw_core, i,
+ &has_temp_sensor);
+ if (err)
+ return err;
+
+ if (!has_temp_sensor)
+ continue;
+
+ sensor_index = i + MLXSW_REG_MTMP_MODULE_INDEX_MIN;
+ err = mlxsw_env_temp_event_set(mlxsw_core, sensor_index, true);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void mlxsw_env_mtwe_event_func(const struct mlxsw_reg_info *reg,
+ char *mtwe_pl, void *priv)
+{
+ struct mlxsw_env *mlxsw_env = priv;
+ int i, sensor_warning;
+ bool is_overheat;
+
+ for (i = 0; i < mlxsw_env->module_count; i++) {
+ /* 64-127 of sensor_index are mapped to the port modules
+ * sequentially (module 0 is mapped to sensor_index 64,
+ * module 1 to sensor_index 65 and so on)
+ */
+ sensor_warning =
+ mlxsw_reg_mtwe_sensor_warning_get(mtwe_pl,
+ i + MLXSW_REG_MTMP_MODULE_INDEX_MIN);
+ spin_lock(&mlxsw_env->module_info_lock);
+ is_overheat =
+ mlxsw_env->module_info[i].is_overheat;
+
+ if ((is_overheat && sensor_warning) ||
+ (!is_overheat && !sensor_warning)) {
+ /* Current state is "warning" and MTWE still reports
+ * warning OR current state in "no warning" and MTWE
+ * does not report warning.
+ */
+ spin_unlock(&mlxsw_env->module_info_lock);
+ continue;
+ } else if (is_overheat && !sensor_warning) {
+ /* MTWE reports "no warning", turn is_overheat off.
+ */
+ mlxsw_env->module_info[i].is_overheat = false;
+ spin_unlock(&mlxsw_env->module_info_lock);
+ } else {
+ /* Current state is "no warning" and MTWE reports
+ * "warning", increase the counter and turn is_overheat
+ * on.
+ */
+ mlxsw_env->module_info[i].is_overheat = true;
+ mlxsw_env->module_info[i].module_overheat_counter++;
+ spin_unlock(&mlxsw_env->module_info_lock);
+ }
+ }
+}
+
+static const struct mlxsw_listener mlxsw_env_temp_warn_listener =
+ MLXSW_EVENTL(mlxsw_env_mtwe_event_func, MTWE, MTWE);
+
+static int mlxsw_env_temp_warn_event_register(struct mlxsw_core *mlxsw_core)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+
+ if (!mlxsw_core_temp_warn_enabled(mlxsw_core))
+ return 0;
+
+ return mlxsw_core_trap_register(mlxsw_core,
+ &mlxsw_env_temp_warn_listener,
+ mlxsw_env);
+}
+
+static void mlxsw_env_temp_warn_event_unregister(struct mlxsw_env *mlxsw_env)
+{
+ if (!mlxsw_core_temp_warn_enabled(mlxsw_env->core))
+ return;
+
+ mlxsw_core_trap_unregister(mlxsw_env->core,
+ &mlxsw_env_temp_warn_listener, mlxsw_env);
+}
+
+struct mlxsw_env_module_plug_unplug_event {
+ struct mlxsw_env *mlxsw_env;
+ u8 module;
+ struct work_struct work;
+};
+
+static void mlxsw_env_pmpe_event_work(struct work_struct *work)
+{
+ struct mlxsw_env_module_plug_unplug_event *event;
+ struct mlxsw_env *mlxsw_env;
+ bool has_temp_sensor;
+ u16 sensor_index;
+ int err;
+
+ event = container_of(work, struct mlxsw_env_module_plug_unplug_event,
+ work);
+ mlxsw_env = event->mlxsw_env;
+
+ spin_lock_bh(&mlxsw_env->module_info_lock);
+ mlxsw_env->module_info[event->module].is_overheat = false;
+ spin_unlock_bh(&mlxsw_env->module_info_lock);
+
+ err = mlxsw_env_module_has_temp_sensor(mlxsw_env->core, event->module,
+ &has_temp_sensor);
+ /* Do not disable events on modules without sensors or faulty sensors
+ * because FW returns errors.
+ */
+ if (err)
+ goto out;
+
+ if (!has_temp_sensor)
+ goto out;
+
+ sensor_index = event->module + MLXSW_REG_MTMP_MODULE_INDEX_MIN;
+ mlxsw_env_temp_event_set(mlxsw_env->core, sensor_index, true);
+
+out:
+ kfree(event);
+}
+
+static void
+mlxsw_env_pmpe_listener_func(const struct mlxsw_reg_info *reg, char *pmpe_pl,
+ void *priv)
+{
+ struct mlxsw_env_module_plug_unplug_event *event;
+ enum mlxsw_reg_pmpe_module_status module_status;
+ u8 module = mlxsw_reg_pmpe_module_get(pmpe_pl);
+ struct mlxsw_env *mlxsw_env = priv;
+
+ if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
+ return;
+
+ module_status = mlxsw_reg_pmpe_module_status_get(pmpe_pl);
+ if (module_status != MLXSW_REG_PMPE_MODULE_STATUS_PLUGGED_ENABLED)
+ return;
+
+ event = kmalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return;
+
+ event->mlxsw_env = mlxsw_env;
+ event->module = module;
+ INIT_WORK(&event->work, mlxsw_env_pmpe_event_work);
+ mlxsw_core_schedule_work(&event->work);
+}
+
+static const struct mlxsw_listener mlxsw_env_module_plug_listener =
+ MLXSW_EVENTL(mlxsw_env_pmpe_listener_func, PMPE, PMPE);
+
+static int
+mlxsw_env_module_plug_event_register(struct mlxsw_core *mlxsw_core)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+
+ if (!mlxsw_core_temp_warn_enabled(mlxsw_core))
+ return 0;
+
+ return mlxsw_core_trap_register(mlxsw_core,
+ &mlxsw_env_module_plug_listener,
+ mlxsw_env);
+}
+
+static void
+mlxsw_env_module_plug_event_unregister(struct mlxsw_env *mlxsw_env)
+{
+ if (!mlxsw_core_temp_warn_enabled(mlxsw_env->core))
+ return;
+
+ mlxsw_core_trap_unregister(mlxsw_env->core,
+ &mlxsw_env_module_plug_listener,
+ mlxsw_env);
+}
+
+static int
+mlxsw_env_module_oper_state_event_enable(struct mlxsw_core *mlxsw_core,
+ u8 module_count)
+{
+ int i, err;
+
+ for (i = 0; i < module_count; i++) {
+ char pmaos_pl[MLXSW_REG_PMAOS_LEN];
+
+ mlxsw_reg_pmaos_pack(pmaos_pl, i,
+ MLXSW_REG_PMAOS_E_GENERATE_EVENT);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmaos), pmaos_pl);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+int
+mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 module,
+ u64 *p_counter)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+
+ /* Prevent switch driver from accessing uninitialized data. */
+ if (!mlxsw_core_is_initialized(mlxsw_core)) {
+ *p_counter = 0;
+ return 0;
+ }
+
+ if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
+ return -EINVAL;
+
+ spin_lock_bh(&mlxsw_env->module_info_lock);
+ *p_counter = mlxsw_env->module_info[module].module_overheat_counter;
+ spin_unlock_bh(&mlxsw_env->module_info_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_env_module_overheat_counter_get);
+
+int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env)
+{
+ char mgpir_pl[MLXSW_REG_MGPIR_LEN];
+ struct mlxsw_env *env;
+ u8 module_count;
+ int err;
+
+ mlxsw_reg_mgpir_pack(mgpir_pl);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgpir), mgpir_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_mgpir_unpack(mgpir_pl, NULL, NULL, NULL, &module_count);
+
+ env = kzalloc(struct_size(env, module_info, module_count), GFP_KERNEL);
+ if (!env)
+ return -ENOMEM;
+
+ spin_lock_init(&env->module_info_lock);
+ env->core = mlxsw_core;
+ env->module_count = module_count;
+ *p_env = env;
+
+ err = mlxsw_env_temp_warn_event_register(mlxsw_core);
+ if (err)
+ goto err_temp_warn_event_register;
+
+ err = mlxsw_env_module_plug_event_register(mlxsw_core);
+ if (err)
+ goto err_module_plug_event_register;
+
+ err = mlxsw_env_module_oper_state_event_enable(mlxsw_core,
+ env->module_count);
+ if (err)
+ goto err_oper_state_event_enable;
+
+ err = mlxsw_env_module_temp_event_enable(mlxsw_core, env->module_count);
+ if (err)
+ goto err_temp_event_enable;
+
+ return 0;
+
+err_temp_event_enable:
+err_oper_state_event_enable:
+ mlxsw_env_module_plug_event_unregister(env);
+err_module_plug_event_register:
+ mlxsw_env_temp_warn_event_unregister(env);
+err_temp_warn_event_register:
+ kfree(env);
+ return err;
+}
+
+void mlxsw_env_fini(struct mlxsw_env *env)
+{
+ mlxsw_env_module_plug_event_unregister(env);
+ /* Make sure there is no more event work scheduled. */
+ mlxsw_core_flush_owq();
+ mlxsw_env_temp_warn_event_unregister(env);
+ kfree(env);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.h b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
index 064d0e770c01..8e36a2634ef5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
@@ -14,4 +14,10 @@ int mlxsw_env_get_module_eeprom(struct net_device *netdev,
struct mlxsw_core *mlxsw_core, int module,
struct ethtool_eeprom *ee, u8 *data);
+int
+mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 module,
+ u64 *p_counter);
+int mlxsw_env_init(struct mlxsw_core *core, struct mlxsw_env **p_env);
+void mlxsw_env_fini(struct mlxsw_env *env);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
index 61719ec89808..2196c946698a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
@@ -12,8 +12,17 @@
#include "core.h"
#include "core_env.h"
-#define MLXSW_HWMON_TEMP_SENSOR_MAX_COUNT 127
-#define MLXSW_HWMON_ATTR_COUNT (MLXSW_HWMON_TEMP_SENSOR_MAX_COUNT * 4 + \
+#define MLXSW_HWMON_SENSORS_MAX_COUNT 64
+#define MLXSW_HWMON_MODULES_MAX_COUNT 64
+#define MLXSW_HWMON_GEARBOXES_MAX_COUNT 32
+
+#define MLXSW_HWMON_ATTR_PER_SENSOR 3
+#define MLXSW_HWMON_ATTR_PER_MODULE 7
+#define MLXSW_HWMON_ATTR_PER_GEARBOX 4
+
+#define MLXSW_HWMON_ATTR_COUNT (MLXSW_HWMON_SENSORS_MAX_COUNT * MLXSW_HWMON_ATTR_PER_SENSOR + \
+ MLXSW_HWMON_MODULES_MAX_COUNT * MLXSW_HWMON_ATTR_PER_MODULE + \
+ MLXSW_HWMON_GEARBOXES_MAX_COUNT * MLXSW_HWMON_ATTR_PER_GEARBOX + \
MLXSW_MFCR_TACHOS_MAX + MLXSW_MFCR_PWMS_MAX)
struct mlxsw_hwmon_attr {
@@ -97,7 +106,7 @@ static ssize_t mlxsw_hwmon_temp_rst_store(struct device *dev,
struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
- char mtmp_pl[MLXSW_REG_MTMP_LEN];
+ char mtmp_pl[MLXSW_REG_MTMP_LEN] = {0};
unsigned long val;
int index;
int err;
@@ -110,7 +119,13 @@ static ssize_t mlxsw_hwmon_temp_rst_store(struct device *dev,
index = mlxsw_hwmon_get_attr_index(mlwsw_hwmon_attr->type_index,
mlxsw_hwmon->module_sensor_max);
- mlxsw_reg_mtmp_pack(mtmp_pl, index, true, true);
+
+ mlxsw_reg_mtmp_sensor_index_set(mtmp_pl, index);
+ err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mtmp_mte_set(mtmp_pl, true);
+ mlxsw_reg_mtmp_mtr_set(mtmp_pl, true);
err = mlxsw_reg_write(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
if (err) {
dev_err(mlxsw_hwmon->bus_info->dev, "Failed to reset temp sensor history\n");
@@ -205,25 +220,39 @@ static ssize_t mlxsw_hwmon_pwm_store(struct device *dev,
return len;
}
-static ssize_t mlxsw_hwmon_module_temp_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static int mlxsw_hwmon_module_temp_get(struct device *dev,
+ struct device_attribute *attr,
+ int *p_temp)
{
struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
char mtmp_pl[MLXSW_REG_MTMP_LEN];
u8 module;
- int temp;
int err;
module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
mlxsw_reg_mtmp_pack(mtmp_pl, MLXSW_REG_MTMP_MODULE_INDEX_MIN + module,
false, false);
err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp), mtmp_pl);
+ if (err) {
+ dev_err(dev, "Failed to query module temperature\n");
+ return err;
+ }
+ mlxsw_reg_mtmp_unpack(mtmp_pl, p_temp, NULL, NULL);
+
+ return 0;
+}
+
+static ssize_t mlxsw_hwmon_module_temp_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int err, temp;
+
+ err = mlxsw_hwmon_module_temp_get(dev, attr, &temp);
if (err)
return err;
- mlxsw_reg_mtmp_unpack(mtmp_pl, &temp, NULL, NULL);
return sprintf(buf, "%d\n", temp);
}
@@ -270,48 +299,72 @@ static ssize_t mlxsw_hwmon_module_temp_fault_show(struct device *dev,
return sprintf(buf, "%u\n", fault);
}
-static ssize_t
-mlxsw_hwmon_module_temp_critical_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static int mlxsw_hwmon_module_temp_critical_get(struct device *dev,
+ struct device_attribute *attr,
+ int *p_temp)
{
struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
- int temp;
u8 module;
int err;
module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, module,
- SFP_TEMP_HIGH_WARN, &temp);
+ SFP_TEMP_HIGH_WARN, p_temp);
if (err) {
dev_err(dev, "Failed to query module temperature thresholds\n");
return err;
}
- return sprintf(buf, "%u\n", temp);
+ return 0;
}
static ssize_t
-mlxsw_hwmon_module_temp_emergency_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+mlxsw_hwmon_module_temp_critical_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int err, temp;
+
+ err = mlxsw_hwmon_module_temp_critical_get(dev, attr, &temp);
+ if (err)
+ return err;
+
+ return sprintf(buf, "%u\n", temp);
+}
+
+static int mlxsw_hwmon_module_temp_emergency_get(struct device *dev,
+ struct device_attribute *attr,
+ int *p_temp)
{
struct mlxsw_hwmon_attr *mlwsw_hwmon_attr =
container_of(attr, struct mlxsw_hwmon_attr, dev_attr);
struct mlxsw_hwmon *mlxsw_hwmon = mlwsw_hwmon_attr->hwmon;
u8 module;
- int temp;
int err;
module = mlwsw_hwmon_attr->type_index - mlxsw_hwmon->sensor_count;
err = mlxsw_env_module_temp_thresholds_get(mlxsw_hwmon->core, module,
- SFP_TEMP_HIGH_ALARM, &temp);
+ SFP_TEMP_HIGH_ALARM, p_temp);
if (err) {
dev_err(dev, "Failed to query module temperature thresholds\n");
return err;
}
+ return 0;
+}
+
+static ssize_t
+mlxsw_hwmon_module_temp_emergency_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int err, temp;
+
+ err = mlxsw_hwmon_module_temp_emergency_get(dev, attr, &temp);
+ if (err)
+ return err;
+
return sprintf(buf, "%u\n", temp);
}
@@ -341,6 +394,53 @@ mlxsw_hwmon_gbox_temp_label_show(struct device *dev,
return sprintf(buf, "gearbox %03u\n", index);
}
+static ssize_t mlxsw_hwmon_temp_critical_alarm_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int err, temp, emergency_temp, critic_temp;
+
+ err = mlxsw_hwmon_module_temp_get(dev, attr, &temp);
+ if (err)
+ return err;
+
+ if (temp <= 0)
+ return sprintf(buf, "%d\n", false);
+
+ err = mlxsw_hwmon_module_temp_emergency_get(dev, attr, &emergency_temp);
+ if (err)
+ return err;
+
+ if (temp >= emergency_temp)
+ return sprintf(buf, "%d\n", false);
+
+ err = mlxsw_hwmon_module_temp_critical_get(dev, attr, &critic_temp);
+ if (err)
+ return err;
+
+ return sprintf(buf, "%d\n", temp >= critic_temp);
+}
+
+static ssize_t mlxsw_hwmon_temp_emergency_alarm_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int err, temp, emergency_temp;
+
+ err = mlxsw_hwmon_module_temp_get(dev, attr, &temp);
+ if (err)
+ return err;
+
+ if (temp <= 0)
+ return sprintf(buf, "%d\n", false);
+
+ err = mlxsw_hwmon_module_temp_emergency_get(dev, attr, &emergency_temp);
+ if (err)
+ return err;
+
+ return sprintf(buf, "%d\n", temp >= emergency_temp);
+}
+
enum mlxsw_hwmon_attr_type {
MLXSW_HWMON_ATTR_TYPE_TEMP,
MLXSW_HWMON_ATTR_TYPE_TEMP_MAX,
@@ -354,6 +454,8 @@ enum mlxsw_hwmon_attr_type {
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_EMERG,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_LABEL,
MLXSW_HWMON_ATTR_TYPE_TEMP_GBOX_LABEL,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_CRIT_ALARM,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_EMERGENCY_ALARM,
};
static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon,
@@ -444,6 +546,20 @@ static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon,
snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
"temp%u_label", num + 1);
break;
+ case MLXSW_HWMON_ATTR_TYPE_TEMP_CRIT_ALARM:
+ mlxsw_hwmon_attr->dev_attr.show =
+ mlxsw_hwmon_temp_critical_alarm_show;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0444;
+ snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
+ "temp%u_crit_alarm", num + 1);
+ break;
+ case MLXSW_HWMON_ATTR_TYPE_TEMP_EMERGENCY_ALARM:
+ mlxsw_hwmon_attr->dev_attr.show =
+ mlxsw_hwmon_temp_emergency_alarm_show;
+ mlxsw_hwmon_attr->dev_attr.attr.mode = 0444;
+ snprintf(mlxsw_hwmon_attr->name, sizeof(mlxsw_hwmon_attr->name),
+ "temp%u_emergency_alarm", num + 1);
+ break;
default:
WARN_ON(1);
}
@@ -460,7 +576,6 @@ static void mlxsw_hwmon_attr_add(struct mlxsw_hwmon *mlxsw_hwmon,
static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon *mlxsw_hwmon)
{
char mtcap_pl[MLXSW_REG_MTCAP_LEN] = {0};
- char mtmp_pl[MLXSW_REG_MTMP_LEN];
int i;
int err;
@@ -471,7 +586,15 @@ static int mlxsw_hwmon_temp_init(struct mlxsw_hwmon *mlxsw_hwmon)
}
mlxsw_hwmon->sensor_count = mlxsw_reg_mtcap_sensor_count_get(mtcap_pl);
for (i = 0; i < mlxsw_hwmon->sensor_count; i++) {
- mlxsw_reg_mtmp_pack(mtmp_pl, i, true, true);
+ char mtmp_pl[MLXSW_REG_MTMP_LEN] = {0};
+
+ mlxsw_reg_mtmp_sensor_index_set(mtmp_pl, i);
+ err = mlxsw_reg_query(mlxsw_hwmon->core, MLXSW_REG(mtmp),
+ mtmp_pl);
+ if (err)
+ return err;
+ mlxsw_reg_mtmp_mte_set(mtmp_pl, true);
+ mlxsw_reg_mtmp_mtr_set(mtmp_pl, true);
err = mlxsw_reg_write(mlxsw_hwmon->core,
MLXSW_REG(mtmp), mtmp_pl);
if (err) {
@@ -566,6 +689,12 @@ static int mlxsw_hwmon_module_init(struct mlxsw_hwmon *mlxsw_hwmon)
mlxsw_hwmon_attr_add(mlxsw_hwmon,
MLXSW_HWMON_ATTR_TYPE_TEMP_MODULE_LABEL,
i, i);
+ mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_CRIT_ALARM,
+ i, i);
+ mlxsw_hwmon_attr_add(mlxsw_hwmon,
+ MLXSW_HWMON_ATTR_TYPE_TEMP_EMERGENCY_ALARM,
+ i, i);
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 1c64b03ff48e..641cdd81882b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -620,9 +620,9 @@ static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
return elem;
}
-static void mlxsw_pci_cq_tasklet(unsigned long data)
+static void mlxsw_pci_cq_tasklet(struct tasklet_struct *t)
{
- struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
+ struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
struct mlxsw_pci *mlxsw_pci = q->pci;
char *cqe;
int items = 0;
@@ -733,9 +733,9 @@ static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
return elem;
}
-static void mlxsw_pci_eq_tasklet(unsigned long data)
+static void mlxsw_pci_eq_tasklet(struct tasklet_struct *t)
{
- struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
+ struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
struct mlxsw_pci *mlxsw_pci = q->pci;
u8 cq_count = mlxsw_pci_cq_count(mlxsw_pci);
unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)];
@@ -792,7 +792,7 @@ struct mlxsw_pci_queue_ops {
struct mlxsw_pci_queue *q);
void (*fini)(struct mlxsw_pci *mlxsw_pci,
struct mlxsw_pci_queue *q);
- void (*tasklet)(unsigned long data);
+ void (*tasklet)(struct tasklet_struct *t);
u16 (*elem_count_f)(const struct mlxsw_pci_queue *q);
u8 (*elem_size_f)(const struct mlxsw_pci_queue *q);
u16 elem_count;
@@ -855,7 +855,7 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
q->pci = mlxsw_pci;
if (q_ops->tasklet)
- tasklet_init(&q->tasklet, q_ops->tasklet, (unsigned long) q);
+ tasklet_setup(&q->tasklet, q_ops->tasklet);
mem_item->size = MLXSW_PCI_AQ_SIZE;
mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 079b080de7f7..39eff6a57ba2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -4174,7 +4174,6 @@ MLXSW_ITEM32(reg, ptys, an_status, 0x04, 28, 4);
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_SGMII_100M BIT(0)
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_1000BASE_X_SGMII BIT(1)
-#define MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII BIT(2)
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R BIT(3)
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_XFI_XAUI_1_10G BIT(4)
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_XLAUI_4_XLPPI_4_40G BIT(5)
@@ -4197,7 +4196,6 @@ MLXSW_ITEM32(reg, ptys, ext_eth_proto_cap, 0x08, 0, 32);
#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 BIT(2)
#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 BIT(3)
#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR BIT(4)
-#define MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2 BIT(5)
#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 BIT(6)
#define MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 BIT(7)
#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR BIT(12)
@@ -4210,10 +4208,6 @@ MLXSW_ITEM32(reg, ptys, ext_eth_proto_cap, 0x08, 0, 32);
#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 BIT(20)
#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 BIT(21)
#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 BIT(22)
-#define MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4 BIT(23)
-#define MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX BIT(24)
-#define MLXSW_REG_PTYS_ETH_SPEED_100BASE_T BIT(25)
-#define MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T BIT(26)
#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR BIT(27)
#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR BIT(28)
#define MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR BIT(29)
@@ -5411,6 +5405,64 @@ static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port)
mlxsw_reg_pspa_sub_port_set(payload, 0);
}
+/* PMAOS - Ports Module Administrative and Operational Status
+ * ----------------------------------------------------------
+ * This register configures and retrieves the per module status.
+ */
+#define MLXSW_REG_PMAOS_ID 0x5012
+#define MLXSW_REG_PMAOS_LEN 0x10
+
+MLXSW_REG_DEFINE(pmaos, MLXSW_REG_PMAOS_ID, MLXSW_REG_PMAOS_LEN);
+
+/* reg_slot_index
+ * Slot index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmaos, slot_index, 0x00, 24, 4);
+
+/* reg_pmaos_module
+ * Module number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmaos, module, 0x00, 16, 8);
+
+/* reg_pmaos_ase
+ * Admin state update enable.
+ * If this bit is set, admin state will be updated based on admin_state field.
+ * Only relevant on Set() operations.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, pmaos, ase, 0x04, 31, 1);
+
+/* reg_pmaos_ee
+ * Event update enable.
+ * If this bit is set, event generation will be updated based on the e field.
+ * Only relevant on Set operations.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, pmaos, ee, 0x04, 30, 1);
+
+enum mlxsw_reg_pmaos_e {
+ MLXSW_REG_PMAOS_E_DO_NOT_GENERATE_EVENT,
+ MLXSW_REG_PMAOS_E_GENERATE_EVENT,
+ MLXSW_REG_PMAOS_E_GENERATE_SINGLE_EVENT,
+};
+
+/* reg_pmaos_e
+ * Event Generation on operational state change.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmaos, e, 0x04, 0, 2);
+
+static inline void mlxsw_reg_pmaos_pack(char *payload, u8 module,
+ enum mlxsw_reg_pmaos_e e)
+{
+ MLXSW_REG_ZERO(pmaos, payload);
+ mlxsw_reg_pmaos_module_set(payload, module);
+ mlxsw_reg_pmaos_e_set(payload, e);
+ mlxsw_reg_pmaos_ee_set(payload, true);
+}
+
/* PPLR - Port Physical Loopback Register
* --------------------------------------
* This register allows configuration of the port's loopback mode.
@@ -5447,6 +5499,50 @@ static inline void mlxsw_reg_pplr_pack(char *payload, u8 local_port,
MLXSW_REG_PPLR_LB_TYPE_BIT_PHY_LOCAL : 0);
}
+/* PMPE - Port Module Plug/Unplug Event Register
+ * ---------------------------------------------
+ * This register reports any operational status change of a module.
+ * A change in the module’s state will generate an event only if the change
+ * happens after arming the event mechanism. Any changes to the module state
+ * while the event mechanism is not armed will not be reported. Software can
+ * query the PMPE register for module status.
+ */
+#define MLXSW_REG_PMPE_ID 0x5024
+#define MLXSW_REG_PMPE_LEN 0x10
+
+MLXSW_REG_DEFINE(pmpe, MLXSW_REG_PMPE_ID, MLXSW_REG_PMPE_LEN);
+
+/* reg_pmpe_slot_index
+ * Slot index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmpe, slot_index, 0x00, 24, 4);
+
+/* reg_pmpe_module
+ * Module number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmpe, module, 0x00, 16, 8);
+
+enum mlxsw_reg_pmpe_module_status {
+ MLXSW_REG_PMPE_MODULE_STATUS_PLUGGED_ENABLED = 1,
+ MLXSW_REG_PMPE_MODULE_STATUS_UNPLUGGED,
+ MLXSW_REG_PMPE_MODULE_STATUS_PLUGGED_ERROR,
+ MLXSW_REG_PMPE_MODULE_STATUS_PLUGGED_DISABLED,
+};
+
+/* reg_pmpe_module_status
+ * Module status.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pmpe, module_status, 0x00, 0, 4);
+
+/* reg_pmpe_error_type
+ * Module error details.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pmpe, error_type, 0x04, 8, 4);
+
/* PDDR - Port Diagnostics Database Register
* -----------------------------------------
* The PDDR enables to read the Phy debug database
@@ -5585,6 +5681,9 @@ MLXSW_ITEM32(reg, htgt, type, 0x00, 8, 4);
enum mlxsw_reg_htgt_trap_group {
MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
+ MLXSW_REG_HTGT_TRAP_GROUP_MFDE,
+ MLXSW_REG_HTGT_TRAP_GROUP_MTWE,
+ MLXSW_REG_HTGT_TRAP_GROUP_PMPE,
MLXSW_REG_HTGT_TRAP_GROUP_SP_STP,
MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP,
MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP,
@@ -8418,6 +8517,13 @@ MLXSW_ITEM32(reg, mtmp, max_temperature, 0x08, 0, 16);
* 2 - Generate single event
* Access: RW
*/
+
+enum mlxsw_reg_mtmp_tee {
+ MLXSW_REG_MTMP_TEE_NO_EVENT,
+ MLXSW_REG_MTMP_TEE_GENERATE_EVENT,
+ MLXSW_REG_MTMP_TEE_GENERATE_SINGLE_EVENT,
+};
+
MLXSW_ITEM32(reg, mtmp, tee, 0x0C, 30, 2);
#define MLXSW_REG_MTMP_THRESH_HI 0x348 /* 105 Celsius */
@@ -8428,6 +8534,7 @@ MLXSW_ITEM32(reg, mtmp, tee, 0x0C, 30, 2);
*/
MLXSW_ITEM32(reg, mtmp, temperature_threshold_hi, 0x0C, 0, 16);
+#define MLXSW_REG_MTMP_HYSTERESIS_TEMP 0x28 /* 5 Celsius */
/* reg_mtmp_temperature_threshold_lo
* Low threshold for Temperature Warning Event. In 0.125 Celsius.
* Access: RW
@@ -8471,6 +8578,23 @@ static inline void mlxsw_reg_mtmp_unpack(char *payload, int *p_temp,
mlxsw_reg_mtmp_sensor_name_memcpy_from(payload, sensor_name);
}
+/* MTWE - Management Temperature Warning Event
+ * -------------------------------------------
+ * This register is used for over temperature warning.
+ */
+#define MLXSW_REG_MTWE_ID 0x900B
+#define MLXSW_REG_MTWE_LEN 0x10
+
+MLXSW_REG_DEFINE(mtwe, MLXSW_REG_MTWE_ID, MLXSW_REG_MTWE_LEN);
+
+/* reg_mtwe_sensor_warning
+ * Bit vector indicating which of the sensor reading is above threshold.
+ * Address 00h bit31 is sensor_warning[127].
+ * Address 0Ch bit0 is sensor_warning[0].
+ * Access: RO
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, mtwe, sensor_warning, 0x0, 0x10, 1);
+
/* MTBR - Management Temperature Bulk Register
* -------------------------------------------
* This register is used for bulk temperature reading.
@@ -9827,6 +9951,26 @@ static inline void mlxsw_reg_mtptptp_pack(char *payload,
mlxsw_reg_mtptpt_message_type_set(payload, message_type);
}
+/* MFGD - Monitoring FW General Debug Register
+ * -------------------------------------------
+ */
+#define MLXSW_REG_MFGD_ID 0x90F0
+#define MLXSW_REG_MFGD_LEN 0x0C
+
+MLXSW_REG_DEFINE(mfgd, MLXSW_REG_MFGD_ID, MLXSW_REG_MFGD_LEN);
+
+/* reg_mfgd_fw_fatal_event_mode
+ * 0 - don't check FW fatal (default)
+ * 1 - check FW fatal - enable MFDE trap
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mfgd, fatal_event_mode, 0x00, 9, 2);
+
+/* reg_mfgd_trigger_test
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, mfgd, trigger_test, 0x00, 11, 1);
+
/* MGPIR - Management General Peripheral Information Register
* ----------------------------------------------------------
* MGPIR register allows software to query the hardware and
@@ -9886,6 +10030,84 @@ mlxsw_reg_mgpir_unpack(char *payload, u8 *num_of_devices,
*num_of_modules = mlxsw_reg_mgpir_num_of_modules_get(payload);
}
+/* MFDE - Monitoring FW Debug Register
+ * -----------------------------------
+ */
+#define MLXSW_REG_MFDE_ID 0x9200
+#define MLXSW_REG_MFDE_LEN 0x18
+
+MLXSW_REG_DEFINE(mfde, MLXSW_REG_MFDE_ID, MLXSW_REG_MFDE_LEN);
+
+/* reg_mfde_irisc_id
+ * Which irisc triggered the event
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, irisc_id, 0x00, 8, 4);
+
+enum mlxsw_reg_mfde_event_id {
+ MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO = 1,
+ /* KVD insertion machine stopped */
+ MLXSW_REG_MFDE_EVENT_ID_KVD_IM_STOP,
+};
+
+/* reg_mfde_event_id
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, event_id, 0x00, 0, 8);
+
+enum mlxsw_reg_mfde_method {
+ MLXSW_REG_MFDE_METHOD_QUERY,
+ MLXSW_REG_MFDE_METHOD_WRITE,
+};
+
+/* reg_mfde_method
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, method, 0x04, 29, 1);
+
+/* reg_mfde_long_process
+ * Indicates if the command is in long_process mode.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, long_process, 0x04, 28, 1);
+
+enum mlxsw_reg_mfde_command_type {
+ MLXSW_REG_MFDE_COMMAND_TYPE_MAD,
+ MLXSW_REG_MFDE_COMMAND_TYPE_EMAD,
+ MLXSW_REG_MFDE_COMMAND_TYPE_CMDIF,
+};
+
+/* reg_mfde_command_type
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, command_type, 0x04, 24, 2);
+
+/* reg_mfde_reg_attr_id
+ * EMAD - register id, MAD - attibute id
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, reg_attr_id, 0x04, 0, 16);
+
+/* reg_mfde_log_address
+ * crspace address accessed, which resulted in timeout.
+ * Valid in case event_id == MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, log_address, 0x10, 0, 32);
+
+/* reg_mfde_log_id
+ * Which irisc triggered the timeout.
+ * Valid in case event_id == MLXSW_REG_MFDE_EVENT_ID_CRSPACE_TO
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, log_id, 0x14, 0, 4);
+
+/* reg_mfde_pipes_mask
+ * Bit per kvh pipe.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mfde, pipes_mask, 0x10, 0, 16);
+
/* TNGCR - Tunneling NVE General Configuration Register
* ----------------------------------------------------
* The TNGCR register is used for setting up the NVE Tunneling configuration.
@@ -10948,7 +11170,9 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(pptb),
MLXSW_REG(pbmc),
MLXSW_REG(pspa),
+ MLXSW_REG(pmaos),
MLXSW_REG(pplr),
+ MLXSW_REG(pmpe),
MLXSW_REG(pddr),
MLXSW_REG(pmtm),
MLXSW_REG(htgt),
@@ -10978,6 +11202,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(fore),
MLXSW_REG(mtcap),
MLXSW_REG(mtmp),
+ MLXSW_REG(mtwe),
MLXSW_REG(mtbr),
MLXSW_REG(mcia),
MLXSW_REG(mpat),
@@ -10999,7 +11224,9 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(mtpppc),
MLXSW_REG(mtpptr),
MLXSW_REG(mtptpt),
+ MLXSW_REG(mfgd),
MLXSW_REG(mgpir),
+ MLXSW_REG(mfde),
MLXSW_REG(tngcr),
MLXSW_REG(tnumt),
MLXSW_REG(tnqcr),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index f3c0e241e1b4..16b47fce540b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -42,11 +42,10 @@
#include "spectrum_span.h"
#include "spectrum_ptp.h"
#include "spectrum_trap.h"
-#include "../mlxfw/mlxfw.h"
#define MLXSW_SP1_FWREV_MAJOR 13
-#define MLXSW_SP1_FWREV_MINOR 2007
-#define MLXSW_SP1_FWREV_SUBMINOR 1168
+#define MLXSW_SP1_FWREV_MINOR 2008
+#define MLXSW_SP1_FWREV_SUBMINOR 1310
#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
@@ -62,8 +61,8 @@ static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
"." __stringify(MLXSW_SP1_FWREV_SUBMINOR) ".mfa2"
#define MLXSW_SP2_FWREV_MAJOR 29
-#define MLXSW_SP2_FWREV_MINOR 2007
-#define MLXSW_SP2_FWREV_SUBMINOR 1168
+#define MLXSW_SP2_FWREV_MINOR 2008
+#define MLXSW_SP2_FWREV_SUBMINOR 1310
static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
.major = MLXSW_SP2_FWREV_MAJOR,
@@ -77,8 +76,8 @@ static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
"." __stringify(MLXSW_SP2_FWREV_SUBMINOR) ".mfa2"
#define MLXSW_SP3_FWREV_MAJOR 30
-#define MLXSW_SP3_FWREV_MINOR 2007
-#define MLXSW_SP3_FWREV_SUBMINOR 1168
+#define MLXSW_SP3_FWREV_MINOR 2008
+#define MLXSW_SP3_FWREV_SUBMINOR 1310
static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
.major = MLXSW_SP3_FWREV_MAJOR,
@@ -170,274 +169,6 @@ MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
*/
MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
-struct mlxsw_sp_mlxfw_dev {
- struct mlxfw_dev mlxfw_dev;
- struct mlxsw_sp *mlxsw_sp;
-};
-
-static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev,
- u16 component_index, u32 *p_max_size,
- u8 *p_align_bits, u16 *p_max_write_size)
-{
- struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
- container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
- char mcqi_pl[MLXSW_REG_MCQI_LEN];
- int err;
-
- mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl);
- if (err)
- return err;
- mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits,
- p_max_write_size);
-
- *p_align_bits = max_t(u8, *p_align_bits, 2);
- *p_max_write_size = min_t(u16, *p_max_write_size,
- MLXSW_REG_MCDA_MAX_DATA_LEN);
- return 0;
-}
-
-static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
-{
- struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
- container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
- char mcc_pl[MLXSW_REG_MCC_LEN];
- u8 control_state;
- int err;
-
- mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
- if (err)
- return err;
-
- mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
- if (control_state != MLXFW_FSM_STATE_IDLE)
- return -EBUSY;
-
- mlxsw_reg_mcc_pack(mcc_pl,
- MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE,
- 0, *fwhandle, 0);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
-}
-
-static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev,
- u32 fwhandle, u16 component_index,
- u32 component_size)
-{
- struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
- container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
- char mcc_pl[MLXSW_REG_MCC_LEN];
-
- mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
- component_index, fwhandle, component_size);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
-}
-
-static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev,
- u32 fwhandle, u8 *data, u16 size,
- u32 offset)
-{
- struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
- container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
- char mcda_pl[MLXSW_REG_MCDA_LEN];
-
- mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl);
-}
-
-static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev,
- u32 fwhandle, u16 component_index)
-{
- struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
- container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
- char mcc_pl[MLXSW_REG_MCC_LEN];
-
- mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
- component_index, fwhandle, 0);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
-}
-
-static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
-{
- struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
- container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
- char mcc_pl[MLXSW_REG_MCC_LEN];
-
- mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0,
- fwhandle, 0);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
-}
-
-static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
- enum mlxfw_fsm_state *fsm_state,
- enum mlxfw_fsm_state_err *fsm_state_err)
-{
- struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
- container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
- char mcc_pl[MLXSW_REG_MCC_LEN];
- u8 control_state;
- u8 error_code;
- int err;
-
- mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
- if (err)
- return err;
-
- mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
- *fsm_state = control_state;
- *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code,
- MLXFW_FSM_STATE_ERR_MAX);
- return 0;
-}
-
-static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
-{
- struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
- container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
- char mcc_pl[MLXSW_REG_MCC_LEN];
-
- mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0,
- fwhandle, 0);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
-}
-
-static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
-{
- struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
- container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
- char mcc_pl[MLXSW_REG_MCC_LEN];
-
- mlxsw_reg_mcc_pack(mcc_pl,
- MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0,
- fwhandle, 0);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
-}
-
-static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = {
- .component_query = mlxsw_sp_component_query,
- .fsm_lock = mlxsw_sp_fsm_lock,
- .fsm_component_update = mlxsw_sp_fsm_component_update,
- .fsm_block_download = mlxsw_sp_fsm_block_download,
- .fsm_component_verify = mlxsw_sp_fsm_component_verify,
- .fsm_activate = mlxsw_sp_fsm_activate,
- .fsm_query_state = mlxsw_sp_fsm_query_state,
- .fsm_cancel = mlxsw_sp_fsm_cancel,
- .fsm_release = mlxsw_sp_fsm_release,
-};
-
-static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
- const struct firmware *firmware,
- struct netlink_ext_ack *extack)
-{
- struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = {
- .mlxfw_dev = {
- .ops = &mlxsw_sp_mlxfw_dev_ops,
- .psid = mlxsw_sp->bus_info->psid,
- .psid_size = strlen(mlxsw_sp->bus_info->psid),
- .devlink = priv_to_devlink(mlxsw_sp->core),
- },
- .mlxsw_sp = mlxsw_sp
- };
- int err;
-
- mlxsw_core_fw_flash_start(mlxsw_sp->core);
- err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev,
- firmware, extack);
- mlxsw_core_fw_flash_end(mlxsw_sp->core);
-
- return err;
-}
-
-static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
-{
- const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
- const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev;
- const char *fw_filename = mlxsw_sp->fw_filename;
- union devlink_param_value value;
- const struct firmware *firmware;
- int err;
-
- /* Don't check if driver does not require it */
- if (!req_rev || !fw_filename)
- return 0;
-
- /* Don't check if devlink 'fw_load_policy' param is 'flash' */
- err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_sp->core),
- DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
- &value);
- if (err)
- return err;
- if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)
- return 0;
-
- /* Validate driver & FW are compatible */
- if (rev->major != req_rev->major) {
- WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n",
- rev->major, req_rev->major);
- return -EINVAL;
- }
- if (mlxsw_core_fw_rev_minor_subminor_validate(rev, req_rev))
- return 0;
-
- dev_err(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d is incompatible with the driver (required >= %d.%d.%d)\n",
- rev->major, rev->minor, rev->subminor, req_rev->major,
- req_rev->minor, req_rev->subminor);
- dev_info(mlxsw_sp->bus_info->dev, "Flashing firmware using file %s\n",
- fw_filename);
-
- err = request_firmware_direct(&firmware, fw_filename,
- mlxsw_sp->bus_info->dev);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n",
- fw_filename);
- return err;
- }
-
- err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, NULL);
- release_firmware(firmware);
- if (err)
- dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n");
-
- /* On FW flash success, tell the caller FW reset is needed
- * if current FW supports it.
- */
- if (rev->minor >= req_rev->can_reset_minor)
- return err ? err : -EAGAIN;
- else
- return 0;
-}
-
-static int mlxsw_sp_flash_update(struct mlxsw_core *mlxsw_core,
- const char *file_name, const char *component,
- struct netlink_ext_ack *extack)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- const struct firmware *firmware;
- int err;
-
- if (component)
- return -EOPNOTSUPP;
-
- err = request_firmware_direct(&firmware, file_name,
- mlxsw_sp->bus_info->dev);
- if (err)
- return err;
- err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware, extack);
- release_firmware(firmware);
-
- return err;
-}
-
int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
unsigned int counter_index, u64 *packets,
u64 *bytes)
@@ -590,21 +321,28 @@ static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
}
-static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
+static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pmtu_pl[MLXSW_REG_PMTU_LEN];
- int max_mtu;
int err;
- mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
if (err)
return err;
- max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
- if (mtu > max_mtu)
+ *p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
+ return 0;
+}
+
+static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char pmtu_pl[MLXSW_REG_PMTU_LEN];
+
+ mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
+ if (mtu > mlxsw_sp_port->max_mtu)
return -EINVAL;
mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
@@ -872,133 +610,25 @@ static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
return 0;
}
-static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp,
- int mtu)
-{
- return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
-}
-
-#define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */
-
-static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
- u16 delay)
-{
- delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay,
- BITS_PER_BYTE));
- return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp,
- mtu);
-}
-
-/* Maximum delay buffer needed in case of PAUSE frames, in bytes.
- * Assumes 100m cable and maximum MTU.
- */
-#define MLXSW_SP_PAUSE_DELAY 58752
-
-static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
- u16 delay, bool pfc, bool pause)
-{
- if (pfc)
- return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay);
- else if (pause)
- return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY);
- else
- return 0;
-}
-
-static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres,
- bool lossy)
+static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
{
- if (lossy)
- mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
- else
- mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
- thres);
-}
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp_hdroom orig_hdroom;
+ struct mlxsw_sp_hdroom hdroom;
+ int err;
-int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
- u8 *prio_tc, bool pause_en,
- struct ieee_pfc *my_pfc)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
- u16 delay = !!my_pfc ? my_pfc->delay : 0;
- char pbmc_pl[MLXSW_REG_PBMC_LEN];
- u32 taken_headroom_cells = 0;
- u32 max_headroom_cells;
- int i, j, err;
+ orig_hdroom = *mlxsw_sp_port->hdroom;
- max_headroom_cells = mlxsw_sp_sb_max_headroom_cells(mlxsw_sp);
+ hdroom = orig_hdroom;
+ hdroom.mtu = mtu;
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
- mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
- if (err)
+ err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
+ if (err) {
+ netdev_err(dev, "Failed to configure port's headroom\n");
return err;
-
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- bool configure = false;
- bool pfc = false;
- u16 thres_cells;
- u16 delay_cells;
- u16 total_cells;
- bool lossy;
-
- for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
- if (prio_tc[j] == i) {
- pfc = pfc_en & BIT(j);
- configure = true;
- break;
- }
- }
-
- if (!configure)
- continue;
-
- lossy = !(pfc || pause_en);
- thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
- thres_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, thres_cells);
- delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay,
- pfc, pause_en);
- delay_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, delay_cells);
- total_cells = thres_cells + delay_cells;
-
- taken_headroom_cells += total_cells;
- if (taken_headroom_cells > max_headroom_cells)
- return -ENOBUFS;
-
- mlxsw_sp_pg_buf_pack(pbmc_pl, i, total_cells,
- thres_cells, lossy);
}
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
-}
-
-int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
- int mtu, bool pause_en)
-{
- u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
- bool dcb_en = !!mlxsw_sp_port->dcb.ets;
- struct ieee_pfc *my_pfc;
- u8 *prio_tc;
-
- prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
- my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
-
- return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
- pause_en, my_pfc);
-}
-
-static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
-{
- struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
- int err;
-
- err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
- if (err)
- return err;
- err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu);
- if (err)
- goto err_span_port_mtu_update;
err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
if (err)
goto err_port_mtu_set;
@@ -1006,9 +636,7 @@ static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
return 0;
err_port_mtu_set:
- mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu);
-err_span_port_mtu_update:
- mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
+ mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
return err;
}
@@ -1737,6 +1365,22 @@ static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl);
}
+static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 module = mlxsw_sp_port->mapping.module;
+ u64 overheat_counter;
+ int err;
+
+ err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, module,
+ &overheat_counter);
+ if (err)
+ return err;
+
+ mlxsw_sp_port->module_overheat_initial_val = overheat_counter;
+ return 0;
+}
+
static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
u8 split_base_local_port,
struct mlxsw_sp_port_mapping *port_mapping)
@@ -1842,6 +1486,21 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_speed_by_width_set;
}
+ err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port,
+ &mlxsw_sp_port->max_speed);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n",
+ mlxsw_sp_port->local_port);
+ goto err_max_speed_get;
+ }
+
+ err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_max_mtu_get;
+ }
+
err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
@@ -1930,10 +1589,16 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw,
mlxsw_sp->ptp_ops->shaper_work);
- INIT_DELAYED_WORK(&mlxsw_sp_port->span.speed_update_dw,
- mlxsw_sp_span_speed_update_work);
mlxsw_sp->ports[local_port] = mlxsw_sp_port;
+
+ err = mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set overheat initial value\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_overheat_init_val_set;
+ }
+
err = register_netdev(dev);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
@@ -1947,6 +1612,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
return 0;
err_register_netdev:
+err_port_overheat_init_val_set:
mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
err_port_vlan_create:
@@ -1963,9 +1629,12 @@ err_port_dcb_init:
mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
err_port_tc_mc_mode:
err_port_ets_init:
+ mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
err_port_buffers_init:
err_port_admin_status_set:
err_port_mtu_set:
+err_port_max_mtu_get:
+err_max_speed_get:
err_port_speed_by_width_set:
err_port_system_port_mapping_set:
err_dev_addr_init:
@@ -1986,7 +1655,6 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
- cancel_delayed_work_sync(&mlxsw_sp_port->span.speed_update_dw);
cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
@@ -1998,6 +1666,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp_port_fids_fini(mlxsw_sp_port);
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
+ mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
mlxsw_sp_port_module_unmap(mlxsw_sp_port);
free_percpu(mlxsw_sp_port->pcpu_stats);
@@ -2390,7 +2059,6 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
netdev_info(mlxsw_sp_port->dev, "link up\n");
netif_carrier_on(mlxsw_sp_port->dev);
mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0);
- mlxsw_core_schedule_dw(&mlxsw_sp_port->span.speed_update_dw, 0);
} else {
netdev_info(mlxsw_sp_port->dev, "link down\n");
netif_carrier_off(mlxsw_sp_port->dev);
@@ -2783,11 +2451,36 @@ static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
{
char htgt_pl[MLXSW_REG_HTGT_LEN];
+ int err;
mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
MLXSW_REG_HTGT_INVALID_POLICER,
MLXSW_REG_HTGT_DEFAULT_PRIORITY,
MLXSW_REG_HTGT_DEFAULT_TC);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_MFDE,
+ MLXSW_REG_HTGT_INVALID_POLICER,
+ MLXSW_REG_HTGT_DEFAULT_PRIORITY,
+ MLXSW_REG_HTGT_DEFAULT_TC);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_MTWE,
+ MLXSW_REG_HTGT_INVALID_POLICER,
+ MLXSW_REG_HTGT_DEFAULT_PRIORITY,
+ MLXSW_REG_HTGT_DEFAULT_TC);
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_PMPE,
+ MLXSW_REG_HTGT_INVALID_POLICER,
+ MLXSW_REG_HTGT_DEFAULT_PRIORITY,
+ MLXSW_REG_HTGT_DEFAULT_TC);
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
}
@@ -2836,10 +2529,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->core = mlxsw_core;
mlxsw_sp->bus_info = mlxsw_bus_info;
- err = mlxsw_sp_fw_rev_validate(mlxsw_sp);
- if (err)
- return err;
-
mlxsw_core_emad_string_tlv_enable(mlxsw_core);
err = mlxsw_sp_base_mac_get(mlxsw_sp);
@@ -3039,8 +2728,6 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- mlxsw_sp->req_rev = &mlxsw_sp1_fw_rev;
- mlxsw_sp->fw_filename = MLXSW_SP1_FW_FILENAME;
mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
@@ -3051,6 +2738,7 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
mlxsw_sp->rif_ops_arr = mlxsw_sp1_rif_ops_arr;
mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
+ mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops;
mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
@@ -3069,8 +2757,6 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- mlxsw_sp->req_rev = &mlxsw_sp2_fw_rev;
- mlxsw_sp->fw_filename = MLXSW_SP2_FW_FILENAME;
mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
@@ -3081,6 +2767,7 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
+ mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops;
mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
@@ -3097,8 +2784,6 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- mlxsw_sp->req_rev = &mlxsw_sp3_fw_rev;
- mlxsw_sp->fw_filename = MLXSW_SP3_FW_FILENAME;
mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
@@ -3109,6 +2794,7 @@ static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
mlxsw_sp->rif_ops_arr = mlxsw_sp2_rif_ops_arr;
mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
+ mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
@@ -3451,52 +3137,6 @@ static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
}
static int
-mlxsw_sp_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id,
- union devlink_param_value val,
- struct netlink_ext_ack *extack)
-{
- if ((val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER) &&
- (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)) {
- NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'");
- return -EINVAL;
- }
-
- return 0;
-}
-
-static const struct devlink_param mlxsw_sp_devlink_params[] = {
- DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY,
- BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
- NULL, NULL,
- mlxsw_sp_devlink_param_fw_load_policy_validate),
-};
-
-static int mlxsw_sp_params_register(struct mlxsw_core *mlxsw_core)
-{
- struct devlink *devlink = priv_to_devlink(mlxsw_core);
- union devlink_param_value value;
- int err;
-
- err = devlink_params_register(devlink, mlxsw_sp_devlink_params,
- ARRAY_SIZE(mlxsw_sp_devlink_params));
- if (err)
- return err;
-
- value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER;
- devlink_param_driverinit_value_set(devlink,
- DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
- value);
- return 0;
-}
-
-static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core)
-{
- devlink_params_unregister(priv_to_devlink(mlxsw_core),
- mlxsw_sp_devlink_params,
- ARRAY_SIZE(mlxsw_sp_devlink_params));
-}
-
-static int
mlxsw_sp_params_acl_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
{
@@ -3533,24 +3173,16 @@ static int mlxsw_sp2_params_register(struct mlxsw_core *mlxsw_core)
union devlink_param_value value;
int err;
- err = mlxsw_sp_params_register(mlxsw_core);
- if (err)
- return err;
-
err = devlink_params_register(devlink, mlxsw_sp2_devlink_params,
ARRAY_SIZE(mlxsw_sp2_devlink_params));
if (err)
- goto err_devlink_params_register;
+ return err;
value.vu32 = 0;
devlink_param_driverinit_value_set(devlink,
MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
value);
return 0;
-
-err_devlink_params_register:
- mlxsw_sp_params_unregister(mlxsw_core);
- return err;
}
static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core)
@@ -3558,7 +3190,6 @@ static void mlxsw_sp2_params_unregister(struct mlxsw_core *mlxsw_core)
devlink_params_unregister(priv_to_devlink(mlxsw_core),
mlxsw_sp2_devlink_params,
ARRAY_SIZE(mlxsw_sp2_devlink_params));
- mlxsw_sp_params_unregister(mlxsw_core);
}
static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
@@ -3573,6 +3204,8 @@ static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
static struct mlxsw_driver mlxsw_sp1_driver = {
.kind = mlxsw_sp1_driver_name,
.priv_size = sizeof(struct mlxsw_sp),
+ .fw_req_rev = &mlxsw_sp1_fw_rev,
+ .fw_filename = MLXSW_SP1_FW_FILENAME,
.init = mlxsw_sp1_init,
.fini = mlxsw_sp_fini,
.basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
@@ -3588,7 +3221,6 @@ static struct mlxsw_driver mlxsw_sp1_driver = {
.sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
.sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
- .flash_update = mlxsw_sp_flash_update,
.trap_init = mlxsw_sp_trap_init,
.trap_fini = mlxsw_sp_trap_fini,
.trap_action_set = mlxsw_sp_trap_action_set,
@@ -3601,17 +3233,19 @@ static struct mlxsw_driver mlxsw_sp1_driver = {
.txhdr_construct = mlxsw_sp_txhdr_construct,
.resources_register = mlxsw_sp1_resources_register,
.kvd_sizes_get = mlxsw_sp_kvd_sizes_get,
- .params_register = mlxsw_sp_params_register,
- .params_unregister = mlxsw_sp_params_unregister,
.ptp_transmitted = mlxsw_sp_ptp_transmitted,
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp1_config_profile,
.res_query_enabled = true,
+ .fw_fatal_enabled = true,
+ .temp_warn_enabled = true,
};
static struct mlxsw_driver mlxsw_sp2_driver = {
.kind = mlxsw_sp2_driver_name,
.priv_size = sizeof(struct mlxsw_sp),
+ .fw_req_rev = &mlxsw_sp2_fw_rev,
+ .fw_filename = MLXSW_SP2_FW_FILENAME,
.init = mlxsw_sp2_init,
.fini = mlxsw_sp_fini,
.basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
@@ -3627,7 +3261,6 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
.sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
.sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
- .flash_update = mlxsw_sp_flash_update,
.trap_init = mlxsw_sp_trap_init,
.trap_fini = mlxsw_sp_trap_fini,
.trap_action_set = mlxsw_sp_trap_action_set,
@@ -3645,11 +3278,15 @@ static struct mlxsw_driver mlxsw_sp2_driver = {
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
.res_query_enabled = true,
+ .fw_fatal_enabled = true,
+ .temp_warn_enabled = true,
};
static struct mlxsw_driver mlxsw_sp3_driver = {
.kind = mlxsw_sp3_driver_name,
.priv_size = sizeof(struct mlxsw_sp),
+ .fw_req_rev = &mlxsw_sp3_fw_rev,
+ .fw_filename = MLXSW_SP3_FW_FILENAME,
.init = mlxsw_sp3_init,
.fini = mlxsw_sp_fini,
.basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
@@ -3665,7 +3302,6 @@ static struct mlxsw_driver mlxsw_sp3_driver = {
.sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
.sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
.sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
- .flash_update = mlxsw_sp_flash_update,
.trap_init = mlxsw_sp_trap_init,
.trap_fini = mlxsw_sp_trap_fini,
.trap_action_set = mlxsw_sp_trap_action_set,
@@ -3683,6 +3319,8 @@ static struct mlxsw_driver mlxsw_sp3_driver = {
.txhdr_len = MLXSW_TXHDR_LEN,
.profile = &mlxsw_sp2_config_profile,
.res_query_enabled = true,
+ .fw_fatal_enabled = true,
+ .temp_warn_enabled = true,
};
bool mlxsw_sp_port_dev_check(const struct net_device *dev)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 5240bf11b6c4..3e26eb6cb140 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -125,6 +125,7 @@ struct mlxsw_sp_mr_tcam_ops;
struct mlxsw_sp_acl_rulei_ops;
struct mlxsw_sp_acl_tcam_ops;
struct mlxsw_sp_nve_ops;
+struct mlxsw_sp_sb_ops;
struct mlxsw_sp_sb_vals;
struct mlxsw_sp_port_type_speed_ops;
struct mlxsw_sp_ptp_state;
@@ -162,8 +163,6 @@ struct mlxsw_sp {
struct mlxsw_sp_counter_pool *counter_pool;
struct mlxsw_sp_span *span;
struct mlxsw_sp_trap *trap;
- const struct mlxsw_fw_rev *req_rev;
- const char *fw_filename;
const struct mlxsw_sp_kvdl_ops *kvdl_ops;
const struct mlxsw_afa_ops *afa_ops;
const struct mlxsw_afk_ops *afk_ops;
@@ -173,6 +172,7 @@ struct mlxsw_sp {
const struct mlxsw_sp_nve_ops **nve_ops_arr;
const struct mlxsw_sp_rif_ops **rif_ops_arr;
const struct mlxsw_sp_sb_vals *sb_vals;
+ const struct mlxsw_sp_sb_ops *sb_ops;
const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
const struct mlxsw_sp_ptp_ops *ptp_ops;
const struct mlxsw_sp_span_ops *span_ops;
@@ -316,9 +316,10 @@ struct mlxsw_sp_port {
struct mlxsw_sp_ptp_port_stats stats;
} ptp;
u8 split_base_local_port;
- struct {
- struct delayed_work speed_update_dw;
- } span;
+ int max_mtu;
+ u32 max_speed;
+ struct mlxsw_sp_hdroom *hdroom;
+ u64 module_overheat_initial_val;
};
struct mlxsw_sp_port_type_speed_ops {
@@ -331,6 +332,7 @@ struct mlxsw_sp_port_type_speed_ops {
void (*from_ptys_speed_duplex)(struct mlxsw_sp *mlxsw_sp,
bool carrier_ok, u32 ptys_eth_proto,
struct ethtool_link_ksettings *cmd);
+ int (*ptys_max_speed)(struct mlxsw_sp_port *mlxsw_sp_port, u32 *p_max_speed);
u32 (*to_ptys_advert_link)(struct mlxsw_sp *mlxsw_sp, u8 width,
const struct ethtool_link_ksettings *cmd);
u32 (*to_ptys_speed)(struct mlxsw_sp *mlxsw_sp, u8 width, u32 speed);
@@ -414,34 +416,73 @@ mlxsw_sp_port_vlan_find_by_vid(const struct mlxsw_sp_port *mlxsw_sp_port,
return NULL;
}
-static inline u32
-mlxsw_sp_port_headroom_8x_adjust(const struct mlxsw_sp_port *mlxsw_sp_port,
- u32 size_cells)
-{
- /* Ports with eight lanes use two headroom buffers between which the
- * configured headroom size is split. Therefore, multiply the calculated
- * headroom size by two.
- */
- return mlxsw_sp_port->mapping.width == 8 ? 2 * size_cells : size_cells;
-}
-
enum mlxsw_sp_flood_type {
MLXSW_SP_FLOOD_TYPE_UC,
MLXSW_SP_FLOOD_TYPE_BC,
MLXSW_SP_FLOOD_TYPE_MC,
};
-int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
- int mtu, bool pause_en);
int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
int prio, char *ppcnt_pl);
int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool is_up);
/* spectrum_buffers.c */
+struct mlxsw_sp_hdroom_prio {
+ /* Number of port buffer associated with this priority. This is the
+ * actually configured value.
+ */
+ u8 buf_idx;
+ /* Value of buf_idx deduced from the DCB ETS configuration. */
+ u8 ets_buf_idx;
+ /* Value of buf_idx taken from the dcbnl_setbuffer configuration. */
+ u8 set_buf_idx;
+ bool lossy;
+};
+
+struct mlxsw_sp_hdroom_buf {
+ u32 thres_cells;
+ u32 size_cells;
+ /* Size requirement form dcbnl_setbuffer. */
+ u32 set_size_cells;
+ bool lossy;
+};
+
+enum mlxsw_sp_hdroom_mode {
+ MLXSW_SP_HDROOM_MODE_DCB,
+ MLXSW_SP_HDROOM_MODE_TC,
+};
+
+#define MLXSW_SP_PB_COUNT 10
+
+struct mlxsw_sp_hdroom {
+ enum mlxsw_sp_hdroom_mode mode;
+
+ struct {
+ struct mlxsw_sp_hdroom_prio prio[IEEE_8021Q_MAX_PRIORITIES];
+ } prios;
+ struct {
+ struct mlxsw_sp_hdroom_buf buf[MLXSW_SP_PB_COUNT];
+ } bufs;
+ struct {
+ /* Size actually configured for the internal buffer. Equal to
+ * reserve when internal buffer is enabled.
+ */
+ u32 size_cells;
+ /* Space reserved in the headroom for the internal buffer. Port
+ * buffers are not allowed to grow into this space.
+ */
+ u32 reserve_cells;
+ bool enable;
+ } int_buf;
+ int delay_bytes;
+ int mtu;
+};
+
int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_port_buffers_fini(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
unsigned int sb_index, u16 pool_index,
struct devlink_sb_pool_info *pool_info);
@@ -477,11 +518,20 @@ int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
u32 *p_cur, u32 *p_max);
u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells);
u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes);
-u32 mlxsw_sp_sb_max_headroom_cells(const struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_hdroom_prios_reset_buf_idx(struct mlxsw_sp_hdroom *hdroom);
+void mlxsw_sp_hdroom_bufs_reset_lossiness(struct mlxsw_sp_hdroom *hdroom);
+void mlxsw_sp_hdroom_bufs_reset_sizes(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_hdroom *hdroom);
+int mlxsw_sp_hdroom_configure(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_hdroom *hdroom);
extern const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals;
extern const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals;
+extern const struct mlxsw_sp_sb_ops mlxsw_sp1_sb_ops;
+extern const struct mlxsw_sp_sb_ops mlxsw_sp2_sb_ops;
+extern const struct mlxsw_sp_sb_ops mlxsw_sp3_sb_ops;
+
/* spectrum_switchdev.c */
int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp);
@@ -519,9 +569,6 @@ int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool dwrr, u8 dwrr_weight);
int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
u8 switch_prio, u8 tclass);
-int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
- u8 *prio_tc, bool pause_en,
- struct ieee_pfc *my_pfc);
int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_reg_qeec_hr hr, u8 index,
u8 next_index, u32 maxrate, u8 burst_size);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 6f84557a5a6f..37ff29a1686e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -121,6 +121,10 @@ struct mlxsw_sp_sb_vals {
unsigned int cms_cpu_count;
};
+struct mlxsw_sp_sb_ops {
+ u32 (*int_buf_size_get)(int mtu, u32 speed);
+};
+
u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells)
{
return mlxsw_sp->sb->cell_size * cells;
@@ -131,9 +135,14 @@ u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes)
return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size);
}
-u32 mlxsw_sp_sb_max_headroom_cells(const struct mlxsw_sp *mlxsw_sp)
+static u32 mlxsw_sp_port_headroom_8x_adjust(const struct mlxsw_sp_port *mlxsw_sp_port,
+ u32 size_cells)
{
- return mlxsw_sp->sb->max_headroom_cells;
+ /* Ports with eight lanes use two headroom buffers between which the
+ * configured headroom size is split. Therefore, multiply the calculated
+ * headroom size by two.
+ */
+ return mlxsw_sp_port->mapping.width == 8 ? 2 * size_cells : size_cells;
}
static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
@@ -291,55 +300,308 @@ static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
(unsigned long) pm);
}
-/* 1/4 of a headroom necessary for 100Gbps port and 100m cable. */
-#define MLXSW_SP_PB_HEADROOM 25632
+void mlxsw_sp_hdroom_prios_reset_buf_idx(struct mlxsw_sp_hdroom *hdroom)
+{
+ int prio;
+
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
+ switch (hdroom->mode) {
+ case MLXSW_SP_HDROOM_MODE_DCB:
+ hdroom->prios.prio[prio].buf_idx = hdroom->prios.prio[prio].ets_buf_idx;
+ break;
+ case MLXSW_SP_HDROOM_MODE_TC:
+ hdroom->prios.prio[prio].buf_idx = hdroom->prios.prio[prio].set_buf_idx;
+ break;
+ }
+ }
+}
+
+void mlxsw_sp_hdroom_bufs_reset_lossiness(struct mlxsw_sp_hdroom *hdroom)
+{
+ int prio;
+ int i;
+
+ for (i = 0; i < DCBX_MAX_BUFFERS; i++)
+ hdroom->bufs.buf[i].lossy = true;
+
+ for (prio = 0; prio < IEEE_8021Q_MAX_PRIORITIES; prio++) {
+ if (!hdroom->prios.prio[prio].lossy)
+ hdroom->bufs.buf[hdroom->prios.prio[prio].buf_idx].lossy = false;
+ }
+}
+
+static u16 mlxsw_sp_hdroom_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp, int mtu)
+{
+ return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
+}
+
+static void mlxsw_sp_hdroom_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres, bool lossy)
+{
+ if (lossy)
+ mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
+ else
+ mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
+ thres);
+}
+
+static u16 mlxsw_sp_hdroom_buf_delay_get(const struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_hdroom *hdroom)
+{
+ u16 delay_cells;
+
+ delay_cells = mlxsw_sp_bytes_cells(mlxsw_sp, hdroom->delay_bytes);
+
+ /* In the worst case scenario the delay will be made up of packets that
+ * are all of size CELL_SIZE + 1, which means each packet will require
+ * almost twice its true size when buffered in the switch. We therefore
+ * multiply this value by the "cell factor", which is close to 2.
+ *
+ * Another MTU is added in case the transmitting host already started
+ * transmitting a maximum length frame when the PFC packet was received.
+ */
+ return 2 * delay_cells + mlxsw_sp_bytes_cells(mlxsw_sp, hdroom->mtu);
+}
+
+static u32 mlxsw_sp_hdroom_int_buf_size_get(struct mlxsw_sp *mlxsw_sp, int mtu, u32 speed)
+{
+ u32 buffsize = mlxsw_sp->sb_ops->int_buf_size_get(speed, mtu);
+
+ return mlxsw_sp_bytes_cells(mlxsw_sp, buffsize) + 1;
+}
+
+static bool mlxsw_sp_hdroom_buf_is_used(const struct mlxsw_sp_hdroom *hdroom, int buf)
+{
+ int prio;
+
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
+ if (hdroom->prios.prio[prio].buf_idx == buf)
+ return true;
+ }
+ return false;
+}
+
+void mlxsw_sp_hdroom_bufs_reset_sizes(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_hdroom *hdroom)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u16 reserve_cells;
+ int i;
+
+ /* Internal buffer. */
+ reserve_cells = mlxsw_sp_hdroom_int_buf_size_get(mlxsw_sp, mlxsw_sp_port->max_speed,
+ mlxsw_sp_port->max_mtu);
+ reserve_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, reserve_cells);
+ hdroom->int_buf.reserve_cells = reserve_cells;
+
+ if (hdroom->int_buf.enable)
+ hdroom->int_buf.size_cells = reserve_cells;
+ else
+ hdroom->int_buf.size_cells = 0;
+
+ /* PG buffers. */
+ for (i = 0; i < DCBX_MAX_BUFFERS; i++) {
+ struct mlxsw_sp_hdroom_buf *buf = &hdroom->bufs.buf[i];
+ u16 thres_cells;
+ u16 delay_cells;
+
+ if (!mlxsw_sp_hdroom_buf_is_used(hdroom, i)) {
+ thres_cells = 0;
+ delay_cells = 0;
+ } else if (buf->lossy) {
+ thres_cells = mlxsw_sp_hdroom_buf_threshold_get(mlxsw_sp, hdroom->mtu);
+ delay_cells = 0;
+ } else {
+ thres_cells = mlxsw_sp_hdroom_buf_threshold_get(mlxsw_sp, hdroom->mtu);
+ delay_cells = mlxsw_sp_hdroom_buf_delay_get(mlxsw_sp, hdroom);
+ }
+
+ thres_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, thres_cells);
+ delay_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, delay_cells);
+
+ buf->thres_cells = thres_cells;
+ if (hdroom->mode == MLXSW_SP_HDROOM_MODE_DCB) {
+ buf->size_cells = thres_cells + delay_cells;
+ } else {
+ /* Do not allow going below the minimum size, even if
+ * the user requested it.
+ */
+ buf->size_cells = max(buf->set_size_cells, buf->thres_cells);
+ }
+ }
+}
+
#define MLXSW_SP_PB_UNUSED 8
-static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
+static int mlxsw_sp_hdroom_configure_buffers(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_hdroom *hdroom, bool force)
{
- const u32 pbs[] = {
- [0] = MLXSW_SP_PB_HEADROOM * mlxsw_sp_port->mapping.width,
- [9] = MLXSW_PORT_MAX_MTU,
- };
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pbmc_pl[MLXSW_REG_PBMC_LEN];
+ bool dirty;
+ int err;
int i;
- mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
- 0xffff, 0xffff / 2);
- for (i = 0; i < ARRAY_SIZE(pbs); i++) {
- u16 size = mlxsw_sp_bytes_cells(mlxsw_sp, pbs[i]);
+ dirty = memcmp(&mlxsw_sp_port->hdroom->bufs, &hdroom->bufs, sizeof(hdroom->bufs));
+ if (!dirty && !force)
+ return 0;
+
+ mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0xffff, 0xffff / 2);
+ for (i = 0; i < MLXSW_SP_PB_COUNT; i++) {
+ const struct mlxsw_sp_hdroom_buf *buf = &hdroom->bufs.buf[i];
if (i == MLXSW_SP_PB_UNUSED)
continue;
- size = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, size);
- mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size);
+
+ mlxsw_sp_hdroom_buf_pack(pbmc_pl, i, buf->size_cells, buf->thres_cells, buf->lossy);
}
- mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
- MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
+
+ mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
+ if (err)
+ return err;
+
+ mlxsw_sp_port->hdroom->bufs = hdroom->bufs;
+ return 0;
}
-static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
+static int mlxsw_sp_hdroom_configure_priomap(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_hdroom *hdroom, bool force)
{
char pptb_pl[MLXSW_REG_PPTB_LEN];
- int i;
+ bool dirty;
+ int prio;
+ int err;
+
+ dirty = memcmp(&mlxsw_sp_port->hdroom->prios, &hdroom->prios, sizeof(hdroom->prios));
+ if (!dirty && !force)
+ return 0;
mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0);
- return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
- pptb_pl);
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
+ mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, prio, hdroom->prios.prio[prio].buf_idx);
+
+ err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb), pptb_pl);
+ if (err)
+ return err;
+
+ mlxsw_sp_port->hdroom->prios = hdroom->prios;
+ return 0;
}
-static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
+static int mlxsw_sp_hdroom_configure_int_buf(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_hdroom *hdroom, bool force)
+{
+ char sbib_pl[MLXSW_REG_SBIB_LEN];
+ bool dirty;
+ int err;
+
+ dirty = memcmp(&mlxsw_sp_port->hdroom->int_buf, &hdroom->int_buf, sizeof(hdroom->int_buf));
+ if (!dirty && !force)
+ return 0;
+
+ mlxsw_reg_sbib_pack(sbib_pl, mlxsw_sp_port->local_port, hdroom->int_buf.size_cells);
+ err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+ if (err)
+ return err;
+
+ mlxsw_sp_port->hdroom->int_buf = hdroom->int_buf;
+ return 0;
+}
+
+static bool mlxsw_sp_hdroom_bufs_fit(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_hdroom *hdroom)
{
+ u32 taken_headroom_cells = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_PB_COUNT; i++)
+ taken_headroom_cells += hdroom->bufs.buf[i].size_cells;
+
+ taken_headroom_cells += hdroom->int_buf.reserve_cells;
+ return taken_headroom_cells <= mlxsw_sp->sb->max_headroom_cells;
+}
+
+static int __mlxsw_sp_hdroom_configure(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_hdroom *hdroom, bool force)
+{
+ struct mlxsw_sp_hdroom orig_hdroom;
+ struct mlxsw_sp_hdroom tmp_hdroom;
int err;
+ int i;
+
+ /* Port buffers need to be configured in three steps. First, all buffers
+ * with non-zero size are configured. Then, prio-to-buffer map is
+ * updated, allowing traffic to flow to the now non-zero buffers.
+ * Finally, zero-sized buffers are configured, because now no traffic
+ * should be directed to them anymore. This way, in a non-congested
+ * system, no packet drops are introduced by the reconfiguration.
+ */
- err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
+ orig_hdroom = *mlxsw_sp_port->hdroom;
+ tmp_hdroom = orig_hdroom;
+ for (i = 0; i < MLXSW_SP_PB_COUNT; i++) {
+ if (hdroom->bufs.buf[i].size_cells)
+ tmp_hdroom.bufs.buf[i] = hdroom->bufs.buf[i];
+ }
+
+ if (!mlxsw_sp_hdroom_bufs_fit(mlxsw_sp_port->mlxsw_sp, &tmp_hdroom) ||
+ !mlxsw_sp_hdroom_bufs_fit(mlxsw_sp_port->mlxsw_sp, hdroom))
+ return -ENOBUFS;
+
+ err = mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, &tmp_hdroom, force);
if (err)
return err;
- return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port);
+
+ err = mlxsw_sp_hdroom_configure_priomap(mlxsw_sp_port, hdroom, force);
+ if (err)
+ goto err_configure_priomap;
+
+ err = mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, hdroom, false);
+ if (err)
+ goto err_configure_buffers;
+
+ err = mlxsw_sp_hdroom_configure_int_buf(mlxsw_sp_port, hdroom, false);
+ if (err)
+ goto err_configure_int_buf;
+
+ *mlxsw_sp_port->hdroom = *hdroom;
+ return 0;
+
+err_configure_int_buf:
+ mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, &tmp_hdroom, false);
+err_configure_buffers:
+ mlxsw_sp_hdroom_configure_priomap(mlxsw_sp_port, &tmp_hdroom, false);
+err_configure_priomap:
+ mlxsw_sp_hdroom_configure_buffers(mlxsw_sp_port, &orig_hdroom, false);
+ return err;
+}
+
+int mlxsw_sp_hdroom_configure(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct mlxsw_sp_hdroom *hdroom)
+{
+ return __mlxsw_sp_hdroom_configure(mlxsw_sp_port, hdroom, false);
+}
+
+static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_hdroom hdroom = {};
+ u32 size9;
+ int prio;
+
+ hdroom.mtu = mlxsw_sp_port->dev->mtu;
+ hdroom.mode = MLXSW_SP_HDROOM_MODE_DCB;
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
+ hdroom.prios.prio[prio].lossy = true;
+
+ mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
+
+ /* Buffer 9 is used for control traffic. */
+ size9 = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, mlxsw_sp_port->max_mtu);
+ hdroom.bufs.buf[9].size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size9);
+
+ return __mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom, true);
}
static int mlxsw_sp_sb_port_init(struct mlxsw_sp *mlxsw_sp,
@@ -916,6 +1178,46 @@ const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals = {
.cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
};
+static u32 mlxsw_sp1_pb_int_buf_size_get(int mtu, u32 speed)
+{
+ return mtu * 5 / 2;
+}
+
+static u32 __mlxsw_sp_pb_int_buf_size_get(int mtu, u32 speed, u32 buffer_factor)
+{
+ return 3 * mtu + buffer_factor * speed / 1000;
+}
+
+#define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38
+
+static u32 mlxsw_sp2_pb_int_buf_size_get(int mtu, u32 speed)
+{
+ int factor = MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR;
+
+ return __mlxsw_sp_pb_int_buf_size_get(mtu, speed, factor);
+}
+
+#define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50
+
+static u32 mlxsw_sp3_pb_int_buf_size_get(int mtu, u32 speed)
+{
+ int factor = MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR;
+
+ return __mlxsw_sp_pb_int_buf_size_get(mtu, speed, factor);
+}
+
+const struct mlxsw_sp_sb_ops mlxsw_sp1_sb_ops = {
+ .int_buf_size_get = mlxsw_sp1_pb_int_buf_size_get,
+};
+
+const struct mlxsw_sp_sb_ops mlxsw_sp2_sb_ops = {
+ .int_buf_size_get = mlxsw_sp2_pb_int_buf_size_get,
+};
+
+const struct mlxsw_sp_sb_ops mlxsw_sp3_sb_ops = {
+ .int_buf_size_get = mlxsw_sp3_pb_int_buf_size_get,
+};
+
int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
{
u32 max_headroom_size;
@@ -995,17 +1297,34 @@ int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
int err;
+ mlxsw_sp_port->hdroom = kzalloc(sizeof(*mlxsw_sp_port->hdroom), GFP_KERNEL);
+ if (!mlxsw_sp_port->hdroom)
+ return -ENOMEM;
+ mlxsw_sp_port->hdroom->mtu = mlxsw_sp_port->dev->mtu;
+
err = mlxsw_sp_port_headroom_init(mlxsw_sp_port);
if (err)
- return err;
+ goto err_headroom_init;
err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
if (err)
- return err;
+ goto err_port_sb_cms_init;
err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port);
+ if (err)
+ goto err_port_sb_pms_init;
+ return 0;
+err_port_sb_pms_init:
+err_port_sb_cms_init:
+err_headroom_init:
+ kfree(mlxsw_sp_port->hdroom);
return err;
}
+void mlxsw_sp_port_buffers_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ kfree(mlxsw_sp_port->hdroom);
+}
+
int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
unsigned int sb_index, u16 pool_index,
struct devlink_sb_pool_info *pool_info)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
index 0d3fb2e51ea5..5f92b1691360 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
@@ -64,87 +64,28 @@ static int mlxsw_sp_port_ets_validate(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
}
-static int mlxsw_sp_port_pg_prio_map(struct mlxsw_sp_port *mlxsw_sp_port,
- u8 *prio_tc)
-{
- char pptb_pl[MLXSW_REG_PPTB_LEN];
- int i;
-
- mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, prio_tc[i]);
-
- return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
- pptb_pl);
-}
-
-static bool mlxsw_sp_ets_has_pg(u8 *prio_tc, u8 pg)
-{
- int i;
-
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
- if (prio_tc[i] == pg)
- return true;
- return false;
-}
-
-static int mlxsw_sp_port_pg_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
- u8 *old_prio_tc, u8 *new_prio_tc)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char pbmc_pl[MLXSW_REG_PBMC_LEN];
- int err, i;
-
- mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
- if (err)
- return err;
-
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- u8 pg = old_prio_tc[i];
-
- if (!mlxsw_sp_ets_has_pg(new_prio_tc, pg))
- mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg, 0);
- }
-
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
-}
-
static int mlxsw_sp_port_headroom_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct ieee_ets *ets)
{
- bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
- struct ieee_ets *my_ets = mlxsw_sp_port->dcb.ets;
struct net_device *dev = mlxsw_sp_port->dev;
+ struct mlxsw_sp_hdroom hdroom;
+ int prio;
int err;
- /* Create the required PGs, but don't destroy existing ones, as
- * traffic is still directed to them.
- */
- err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
- ets->prio_tc, pause_en,
- mlxsw_sp_port->dcb.pfc);
+ hdroom = *mlxsw_sp_port->hdroom;
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
+ hdroom.prios.prio[prio].ets_buf_idx = ets->prio_tc[prio];
+ mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
+
+ err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
if (err) {
netdev_err(dev, "Failed to configure port's headroom\n");
return err;
}
- err = mlxsw_sp_port_pg_prio_map(mlxsw_sp_port, ets->prio_tc);
- if (err) {
- netdev_err(dev, "Failed to set PG-priority mapping\n");
- goto err_port_prio_pg_map;
- }
-
- err = mlxsw_sp_port_pg_destroy(mlxsw_sp_port, my_ets->prio_tc,
- ets->prio_tc);
- if (err)
- netdev_warn(dev, "Failed to remove unused PGs\n");
-
return 0;
-
-err_port_prio_pg_map:
- mlxsw_sp_port_pg_destroy(mlxsw_sp_port, ets->prio_tc, my_ets->prio_tc);
- return err;
}
static int __mlxsw_sp_dcbnl_ieee_setets(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -605,6 +546,9 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
+ struct mlxsw_sp_hdroom orig_hdroom;
+ struct mlxsw_sp_hdroom hdroom;
+ int prio;
int err;
if (pause_en && pfc->pfc_en) {
@@ -612,9 +556,21 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
return -EINVAL;
}
- err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
- mlxsw_sp_port->dcb.ets->prio_tc,
- pause_en, pfc);
+ orig_hdroom = *mlxsw_sp_port->hdroom;
+
+ hdroom = orig_hdroom;
+ if (pfc->pfc_en)
+ hdroom.delay_bytes = DIV_ROUND_UP(pfc->delay, BITS_PER_BYTE);
+ else
+ hdroom.delay_bytes = 0;
+
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
+ hdroom.prios.prio[prio].lossy = !(pfc->pfc_en & BIT(prio));
+
+ mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
+
+ err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
if (err) {
netdev_err(dev, "Failed to configure port's headroom for PFC\n");
return err;
@@ -632,12 +588,66 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
return 0;
err_port_pfc_set:
- __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
- mlxsw_sp_port->dcb.ets->prio_tc, pause_en,
- mlxsw_sp_port->dcb.pfc);
+ mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
return err;
}
+static int mlxsw_sp_dcbnl_getbuffer(struct net_device *dev, struct dcbnl_buffer *buf)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp_hdroom *hdroom = mlxsw_sp_port->hdroom;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ int prio;
+ int i;
+
+ buf->total_size = 0;
+
+ BUILD_BUG_ON(DCBX_MAX_BUFFERS > MLXSW_SP_PB_COUNT);
+ for (i = 0; i < MLXSW_SP_PB_COUNT; i++) {
+ u32 bytes = mlxsw_sp_cells_bytes(mlxsw_sp, hdroom->bufs.buf[i].size_cells);
+
+ if (i < DCBX_MAX_BUFFERS)
+ buf->buffer_size[i] = bytes;
+ buf->total_size += bytes;
+ }
+
+ buf->total_size += mlxsw_sp_cells_bytes(mlxsw_sp, hdroom->int_buf.size_cells);
+
+ for (prio = 0; prio < IEEE_8021Q_MAX_PRIORITIES; prio++)
+ buf->prio2buffer[prio] = hdroom->prios.prio[prio].buf_idx;
+
+ return 0;
+}
+
+static int mlxsw_sp_dcbnl_setbuffer(struct net_device *dev, struct dcbnl_buffer *buf)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_hdroom hdroom;
+ int prio;
+ int i;
+
+ hdroom = *mlxsw_sp_port->hdroom;
+
+ if (hdroom.mode != MLXSW_SP_HDROOM_MODE_TC) {
+ netdev_err(dev, "The use of dcbnl_setbuffer is only allowed if egress is configured using TC\n");
+ return -EINVAL;
+ }
+
+ for (prio = 0; prio < IEEE_8021Q_MAX_PRIORITIES; prio++)
+ hdroom.prios.prio[prio].set_buf_idx = buf->prio2buffer[prio];
+
+ BUILD_BUG_ON(DCBX_MAX_BUFFERS > MLXSW_SP_PB_COUNT);
+ for (i = 0; i < DCBX_MAX_BUFFERS; i++)
+ hdroom.bufs.buf[i].set_size_cells = mlxsw_sp_bytes_cells(mlxsw_sp,
+ buf->buffer_size[i]);
+
+ mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
+ return mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
+}
+
static const struct dcbnl_rtnl_ops mlxsw_sp_dcbnl_ops = {
.ieee_getets = mlxsw_sp_dcbnl_ieee_getets,
.ieee_setets = mlxsw_sp_dcbnl_ieee_setets,
@@ -650,6 +660,9 @@ static const struct dcbnl_rtnl_ops mlxsw_sp_dcbnl_ops = {
.getdcbx = mlxsw_sp_dcbnl_getdcbx,
.setdcbx = mlxsw_sp_dcbnl_setdcbx,
+
+ .dcbnl_getbuffer = mlxsw_sp_dcbnl_getbuffer,
+ .dcbnl_setbuffer = mlxsw_sp_dcbnl_setbuffer,
};
static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 14c78f73bb65..2096b6478958 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2020 Mellanox Technologies. All rights reserved */
#include "reg.h"
+#include "core.h"
#include "spectrum.h"
#include "core_env.h"
@@ -192,11 +193,19 @@ static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
pfcc_pl);
}
+/* Maximum delay buffer needed in case of PAUSE frames. Similar to PFC delay, but is
+ * measured in bytes. Assumes 100m cable and does not take into account MTU.
+ */
+#define MLXSW_SP_PAUSE_DELAY_BYTES 19476
+
static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *pause)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
bool pause_en = pause->tx_pause || pause->rx_pause;
+ struct mlxsw_sp_hdroom orig_hdroom;
+ struct mlxsw_sp_hdroom hdroom;
+ int prio;
int err;
if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
@@ -209,7 +218,21 @@ static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
return -EINVAL;
}
- err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
+ orig_hdroom = *mlxsw_sp_port->hdroom;
+
+ hdroom = orig_hdroom;
+ if (pause_en)
+ hdroom.delay_bytes = MLXSW_SP_PAUSE_DELAY_BYTES;
+ else
+ hdroom.delay_bytes = 0;
+
+ for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
+ hdroom.prios.prio[prio].lossy = !pause_en;
+
+ mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
+
+ err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
if (err) {
netdev_err(dev, "Failed to configure port's headroom\n");
return err;
@@ -227,8 +250,7 @@ static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
return 0;
err_port_pause_configure:
- pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
- mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
+ mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
return err;
}
@@ -531,6 +553,37 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
#define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
+struct mlxsw_sp_port_stats {
+ char str[ETH_GSTRING_LEN];
+ u64 (*getter)(struct mlxsw_sp_port *mlxsw_sp_port);
+};
+
+static u64
+mlxsw_sp_port_get_transceiver_overheat_stats(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_port_mapping port_mapping = mlxsw_sp_port->mapping;
+ struct mlxsw_core *mlxsw_core = mlxsw_sp_port->mlxsw_sp->core;
+ u64 stats;
+ int err;
+
+ err = mlxsw_env_module_overheat_counter_get(mlxsw_core,
+ port_mapping.module,
+ &stats);
+ if (err)
+ return mlxsw_sp_port->module_overheat_initial_val;
+
+ return stats - mlxsw_sp_port->module_overheat_initial_val;
+}
+
+static struct mlxsw_sp_port_stats mlxsw_sp_port_transceiver_stats[] = {
+ {
+ .str = "transceiver_overheat",
+ .getter = mlxsw_sp_port_get_transceiver_overheat_stats,
+ },
+};
+
+#define MLXSW_SP_PORT_HW_TRANSCEIVER_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_transceiver_stats)
+
#define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \
MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \
@@ -540,7 +593,8 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
(MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \
IEEE_8021QAZ_MAX_TCS) + \
(MLXSW_SP_PORT_HW_TC_STATS_LEN * \
- TC_MAX_QUEUE))
+ TC_MAX_QUEUE) + \
+ MLXSW_SP_PORT_HW_TRANSCEIVER_STATS_LEN)
static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
{
@@ -616,6 +670,12 @@ static void mlxsw_sp_port_get_strings(struct net_device *dev,
mlxsw_sp_port_get_tc_strings(&p, i);
mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_strings(&p);
+
+ for (i = 0; i < MLXSW_SP_PORT_HW_TRANSCEIVER_STATS_LEN; i++) {
+ memcpy(p, mlxsw_sp_port_transceiver_stats[i].str,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
break;
}
}
@@ -711,6 +771,17 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev,
}
}
+static void __mlxsw_sp_port_get_env_stats(struct net_device *dev, u64 *data, int data_index,
+ struct mlxsw_sp_port_stats *port_stats,
+ int len)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < len; i++)
+ data[data_index + i] = port_stats[i].getter(mlxsw_sp_port);
+}
+
static void mlxsw_sp_port_get_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
@@ -765,6 +836,11 @@ static void mlxsw_sp_port_get_stats(struct net_device *dev,
mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats(mlxsw_sp_port,
data, data_index);
data_index += mlxsw_sp_port->mlxsw_sp->ptp_ops->get_stats_count();
+
+ /* Transceiver counters */
+ __mlxsw_sp_port_get_env_stats(dev, data, data_index, mlxsw_sp_port_transceiver_stats,
+ MLXSW_SP_PORT_HW_TRANSCEIVER_STATS_LEN);
+ data_index += MLXSW_SP_PORT_HW_TRANSCEIVER_STATS_LEN;
}
static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
@@ -842,6 +918,29 @@ mlxsw_sp_port_connector_port(enum mlxsw_reg_ptys_connector_type connector_type)
}
}
+static int mlxsw_sp_port_ptys_query(struct mlxsw_sp_port *mlxsw_sp_port,
+ u32 *p_eth_proto_cap, u32 *p_eth_proto_admin,
+ u32 *p_eth_proto_oper, u8 *p_connector_type)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ const struct mlxsw_sp_port_type_speed_ops *ops;
+ char ptys_pl[MLXSW_REG_PTYS_LEN];
+ int err;
+
+ ops = mlxsw_sp->port_type_speed_ops;
+
+ ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port, 0, false);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+ if (err)
+ return err;
+
+ ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, p_eth_proto_cap, p_eth_proto_admin,
+ p_eth_proto_oper);
+ if (p_connector_type)
+ *p_connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl);
+ return 0;
+}
+
static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
@@ -849,21 +948,17 @@ static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
const struct mlxsw_sp_port_type_speed_ops *ops;
- char ptys_pl[MLXSW_REG_PTYS_LEN];
u8 connector_type;
bool autoneg;
int err;
- ops = mlxsw_sp->port_type_speed_ops;
-
- autoneg = mlxsw_sp_port->link.autoneg;
- ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
- 0, false);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
+ err = mlxsw_sp_port_ptys_query(mlxsw_sp_port, &eth_proto_cap, &eth_proto_admin,
+ &eth_proto_oper, &connector_type);
if (err)
return err;
- ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap,
- &eth_proto_admin, &eth_proto_oper);
+
+ ops = mlxsw_sp->port_type_speed_ops;
+ autoneg = mlxsw_sp_port->link.autoneg;
mlxsw_sp_port_get_link_supported(mlxsw_sp, eth_proto_cap,
mlxsw_sp_port->mapping.width, cmd);
@@ -872,7 +967,6 @@ static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
mlxsw_sp_port->mapping.width, cmd);
cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
- connector_type = mlxsw_reg_ptys_connector_type_get(ptys_pl);
cmd->base.port = mlxsw_sp_port_connector_port(connector_type);
ops->from_ptys_speed_duplex(mlxsw_sp, netif_carrier_ok(dev),
eth_proto_oper, cmd);
@@ -993,22 +1087,12 @@ struct mlxsw_sp1_port_link_mode {
static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = {
{
- .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
- .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
- .speed = SPEED_100,
- },
- {
.mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
.mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
.speed = SPEED_1000,
},
{
- .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
- .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
- .speed = SPEED_10000,
- },
- {
.mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
.mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
@@ -1023,11 +1107,6 @@ static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = {
.speed = SPEED_10000,
},
{
- .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
- .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
- .speed = SPEED_20000,
- },
- {
.mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
.mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
.speed = SPEED_40000,
@@ -1092,11 +1171,6 @@ static const struct mlxsw_sp1_port_link_mode mlxsw_sp1_port_link_mode[] = {
.mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
.speed = SPEED_100000,
},
- {
- .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
- .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
- .speed = SPEED_100000,
- },
};
#define MLXSW_SP1_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp1_port_link_mode)
@@ -1164,6 +1238,27 @@ mlxsw_sp1_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
cmd->base.duplex = DUPLEX_FULL;
}
+static int mlxsw_sp1_ptys_max_speed(struct mlxsw_sp_port *mlxsw_sp_port, u32 *p_max_speed)
+{
+ u32 eth_proto_cap;
+ u32 max_speed = 0;
+ int err;
+ int i;
+
+ err = mlxsw_sp_port_ptys_query(mlxsw_sp_port, &eth_proto_cap, NULL, NULL, NULL);
+ if (err)
+ return err;
+
+ for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
+ if ((eth_proto_cap & mlxsw_sp1_port_link_mode[i].mask) &&
+ mlxsw_sp1_port_link_mode[i].speed > max_speed)
+ max_speed = mlxsw_sp1_port_link_mode[i].speed;
+ }
+
+ *p_max_speed = max_speed;
+ return 0;
+}
+
static u32
mlxsw_sp1_to_ptys_advert_link(struct mlxsw_sp *mlxsw_sp, u8 width,
const struct ethtool_link_ksettings *cmd)
@@ -1213,6 +1308,7 @@ const struct mlxsw_sp_port_type_speed_ops mlxsw_sp1_port_type_speed_ops = {
.from_ptys_link = mlxsw_sp1_from_ptys_link,
.from_ptys_speed = mlxsw_sp1_from_ptys_speed,
.from_ptys_speed_duplex = mlxsw_sp1_from_ptys_speed_duplex,
+ .ptys_max_speed = mlxsw_sp1_ptys_max_speed,
.to_ptys_advert_link = mlxsw_sp1_to_ptys_advert_link,
.to_ptys_speed = mlxsw_sp1_to_ptys_speed,
.reg_ptys_eth_pack = mlxsw_sp1_reg_ptys_eth_pack,
@@ -1237,14 +1333,6 @@ mlxsw_sp2_mask_ethtool_1000base_x_sgmii[] = {
ARRAY_SIZE(mlxsw_sp2_mask_ethtool_1000base_x_sgmii)
static const enum ethtool_link_mode_bit_indices
-mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii[] = {
- ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
-};
-
-#define MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN \
- ARRAY_SIZE(mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii)
-
-static const enum ethtool_link_mode_bit_indices
mlxsw_sp2_mask_ethtool_5gbase_r[] = {
ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
};
@@ -1408,16 +1496,6 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.speed = SPEED_1000,
},
{
- .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_2_5GBASE_X_2_5GMII,
- .mask_ethtool = mlxsw_sp2_mask_ethtool_2_5gbase_x_2_5gmii,
- .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_2_5GBASE_X_2_5GMII_LEN,
- .mask_width = MLXSW_SP_PORT_MASK_WIDTH_1X |
- MLXSW_SP_PORT_MASK_WIDTH_2X |
- MLXSW_SP_PORT_MASK_WIDTH_4X |
- MLXSW_SP_PORT_MASK_WIDTH_8X,
- .speed = SPEED_2500,
- },
- {
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_5GBASE_R,
.mask_ethtool = mlxsw_sp2_mask_ethtool_5gbase_r,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_5GBASE_R_LEN,
@@ -1568,6 +1646,27 @@ mlxsw_sp2_from_ptys_speed_duplex(struct mlxsw_sp *mlxsw_sp, bool carrier_ok,
cmd->base.duplex = DUPLEX_FULL;
}
+static int mlxsw_sp2_ptys_max_speed(struct mlxsw_sp_port *mlxsw_sp_port, u32 *p_max_speed)
+{
+ u32 eth_proto_cap;
+ u32 max_speed = 0;
+ int err;
+ int i;
+
+ err = mlxsw_sp_port_ptys_query(mlxsw_sp_port, &eth_proto_cap, NULL, NULL, NULL);
+ if (err)
+ return err;
+
+ for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
+ if ((eth_proto_cap & mlxsw_sp2_port_link_mode[i].mask) &&
+ mlxsw_sp2_port_link_mode[i].speed > max_speed)
+ max_speed = mlxsw_sp2_port_link_mode[i].speed;
+ }
+
+ *p_max_speed = max_speed;
+ return 0;
+}
+
static bool
mlxsw_sp2_test_bit_ethtool(const struct mlxsw_sp2_port_link_mode *link_mode,
const unsigned long *mode)
@@ -1637,6 +1736,7 @@ const struct mlxsw_sp_port_type_speed_ops mlxsw_sp2_port_type_speed_ops = {
.from_ptys_link = mlxsw_sp2_from_ptys_link,
.from_ptys_speed = mlxsw_sp2_from_ptys_speed,
.from_ptys_speed_duplex = mlxsw_sp2_from_ptys_speed_duplex,
+ .ptys_max_speed = mlxsw_sp2_ptys_max_speed,
.to_ptys_advert_link = mlxsw_sp2_to_ptys_advert_link,
.to_ptys_speed = mlxsw_sp2_to_ptys_speed,
.reg_ptys_eth_pack = mlxsw_sp2_reg_ptys_eth_pack,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index 9650562fc0ef..ca8090a28dec 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -314,11 +314,9 @@ static int mlxsw_sp_ptp_parse(struct sk_buff *skb,
u8 *p_message_type,
u16 *p_sequence_id)
{
- unsigned int offset = 0;
unsigned int ptp_class;
- u8 *data;
+ struct ptp_header *hdr;
- data = skb_mac_header(skb);
ptp_class = ptp_classify_raw(skb);
switch (ptp_class & PTP_CLASS_VMASK) {
@@ -329,30 +327,14 @@ static int mlxsw_sp_ptp_parse(struct sk_buff *skb,
return -ERANGE;
}
- if (ptp_class & PTP_CLASS_VLAN)
- offset += VLAN_HLEN;
-
- switch (ptp_class & PTP_CLASS_PMASK) {
- case PTP_CLASS_IPV4:
- offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
- break;
- case PTP_CLASS_IPV6:
- offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
- break;
- case PTP_CLASS_L2:
- offset += ETH_HLEN;
- break;
- default:
- return -ERANGE;
- }
-
- /* PTP header is 34 bytes. */
- if (skb->len < offset + 34)
+ hdr = ptp_parse_header(skb, ptp_class);
+ if (!hdr)
return -EINVAL;
- *p_message_type = data[offset] & 0x0f;
- *p_domain_number = data[offset + 4];
- *p_sequence_id = (u16)(data[offset + 30]) << 8 | data[offset + 31];
+ *p_message_type = ptp_get_msgtype(hdr, ptp_class);
+ *p_domain_number = hdr->domain_number;
+ *p_sequence_id = be16_to_cpu(hdr->sequence_id);
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index 964fd444bb10..fd672c6c9133 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -140,18 +140,31 @@ static int
mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
+ struct mlxsw_sp_qdisc *root_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
+ int err_hdroom = 0;
int err = 0;
if (!mlxsw_sp_qdisc)
return 0;
+ if (root_qdisc == mlxsw_sp_qdisc) {
+ struct mlxsw_sp_hdroom hdroom = *mlxsw_sp_port->hdroom;
+
+ hdroom.mode = MLXSW_SP_HDROOM_MODE_DCB;
+ mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
+ err_hdroom = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
+ }
+
if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->destroy)
err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port,
mlxsw_sp_qdisc);
mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
mlxsw_sp_qdisc->ops = NULL;
- return err;
+
+ return err_hdroom ?: err;
}
static int
@@ -159,6 +172,8 @@ mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
struct mlxsw_sp_qdisc_ops *ops, void *params)
{
+ struct mlxsw_sp_qdisc *root_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
+ struct mlxsw_sp_hdroom orig_hdroom;
int err;
if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type)
@@ -168,6 +183,21 @@ mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
* new one.
*/
mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
+
+ orig_hdroom = *mlxsw_sp_port->hdroom;
+ if (root_qdisc == mlxsw_sp_qdisc) {
+ struct mlxsw_sp_hdroom hdroom = orig_hdroom;
+
+ hdroom.mode = MLXSW_SP_HDROOM_MODE_TC;
+ mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
+
+ err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
+ if (err)
+ goto err_hdroom_configure;
+ }
+
err = ops->check_params(mlxsw_sp_port, mlxsw_sp_qdisc, params);
if (err)
goto err_bad_param;
@@ -191,6 +221,8 @@ mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
err_bad_param:
err_config:
+ mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
+err_hdroom_configure:
if (mlxsw_sp_qdisc->handle == handle && ops->unoffload)
ops->unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, params);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 460cb523312f..4381f8c6c3fb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -8038,7 +8038,6 @@ static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
bool usp = net->ipv4.sysctl_ip_fwd_update_priority;
char rgcr_pl[MLXSW_REG_RGCR_LEN];
u64 max_rifs;
- int err;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS))
return -EIO;
@@ -8047,10 +8046,7 @@ static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
mlxsw_reg_rgcr_pack(rgcr_pl, true, true);
mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs);
mlxsw_reg_rgcr_usp_set(rgcr_pl, usp);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
- if (err)
- return err;
- return 0;
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
}
static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 1d18e41ab255..c6c5826aba41 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -968,42 +968,26 @@ static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-static u32 mlxsw_sp_span_buffsize_get(struct mlxsw_sp *mlxsw_sp, int mtu,
- u32 speed)
+static int mlxsw_sp_span_port_buffer_update(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
{
- u32 buffsize = mlxsw_sp->span_ops->buffsize_get(speed, mtu);
+ struct mlxsw_sp_hdroom hdroom;
- return mlxsw_sp_bytes_cells(mlxsw_sp, buffsize) + 1;
+ hdroom = *mlxsw_sp_port->hdroom;
+ hdroom.int_buf.enable = enable;
+ mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
+
+ return mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
}
static int
-mlxsw_sp_span_port_buffer_update(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
+mlxsw_sp_span_port_buffer_enable(struct mlxsw_sp_port *mlxsw_sp_port)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char sbib_pl[MLXSW_REG_SBIB_LEN];
- u32 buffsize;
- u32 speed;
- int err;
-
- err = mlxsw_sp_port_speed_get(mlxsw_sp_port, &speed);
- if (err)
- return err;
- if (speed == SPEED_UNKNOWN)
- speed = 0;
-
- buffsize = mlxsw_sp_span_buffsize_get(mlxsw_sp, speed, mtu);
- buffsize = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, buffsize);
- mlxsw_reg_sbib_pack(sbib_pl, mlxsw_sp_port->local_port, buffsize);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+ return mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, true);
}
-static void mlxsw_sp_span_port_buffer_disable(struct mlxsw_sp *mlxsw_sp,
- u8 local_port)
+static void mlxsw_sp_span_port_buffer_disable(struct mlxsw_sp_port *mlxsw_sp_port)
{
- char sbib_pl[MLXSW_REG_SBIB_LEN];
-
- mlxsw_reg_sbib_pack(sbib_pl, local_port, 0);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+ mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, false);
}
static struct mlxsw_sp_span_analyzed_port *
@@ -1021,48 +1005,6 @@ mlxsw_sp_span_analyzed_port_find(struct mlxsw_sp_span *span, u8 local_port,
return NULL;
}
-int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
-{
- struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- int err = 0;
-
- /* If port is egress mirrored, the shared buffer size should be
- * updated according to the mtu value
- */
- mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
-
- if (mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span, port->local_port,
- false))
- err = mlxsw_sp_span_port_buffer_update(port, mtu);
-
- mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
-
- return err;
-}
-
-void mlxsw_sp_span_speed_update_work(struct work_struct *work)
-{
- struct delayed_work *dwork = to_delayed_work(work);
- struct mlxsw_sp_port *mlxsw_sp_port;
- struct mlxsw_sp *mlxsw_sp;
-
- mlxsw_sp_port = container_of(dwork, struct mlxsw_sp_port,
- span.speed_update_dw);
-
- /* If port is egress mirrored, the shared buffer size should be
- * updated according to the speed value.
- */
- mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- mutex_lock(&mlxsw_sp->span->analyzed_ports_lock);
-
- if (mlxsw_sp_span_analyzed_port_find(mlxsw_sp->span,
- mlxsw_sp_port->local_port, false))
- mlxsw_sp_span_port_buffer_update(mlxsw_sp_port,
- mlxsw_sp_port->dev->mtu);
-
- mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
-}
-
static const struct mlxsw_sp_span_entry_ops *
mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp,
const struct net_device *to_dev)
@@ -1180,9 +1122,7 @@ mlxsw_sp_span_analyzed_port_create(struct mlxsw_sp_span *span,
* does the mirroring.
*/
if (!ingress) {
- u16 mtu = mlxsw_sp_port->dev->mtu;
-
- err = mlxsw_sp_span_port_buffer_update(mlxsw_sp_port, mtu);
+ err = mlxsw_sp_span_port_buffer_enable(mlxsw_sp_port);
if (err)
goto err_buffer_update;
}
@@ -1196,18 +1136,15 @@ err_buffer_update:
}
static void
-mlxsw_sp_span_analyzed_port_destroy(struct mlxsw_sp_span *span,
+mlxsw_sp_span_analyzed_port_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_span_analyzed_port *
analyzed_port)
{
- struct mlxsw_sp *mlxsw_sp = span->mlxsw_sp;
-
/* Remove egress mirror buffer now that port is no longer analyzed
* at egress.
*/
if (!analyzed_port->ingress)
- mlxsw_sp_span_port_buffer_disable(mlxsw_sp,
- analyzed_port->local_port);
+ mlxsw_sp_span_port_buffer_disable(mlxsw_sp_port);
list_del(&analyzed_port->list);
kfree(analyzed_port);
@@ -1258,7 +1195,7 @@ void mlxsw_sp_span_analyzed_port_put(struct mlxsw_sp_port *mlxsw_sp_port,
if (!refcount_dec_and_test(&analyzed_port->ref_count))
goto out_unlock;
- mlxsw_sp_span_analyzed_port_destroy(mlxsw_sp->span, analyzed_port);
+ mlxsw_sp_span_analyzed_port_destroy(mlxsw_sp_port, analyzed_port);
out_unlock:
mutex_unlock(&mlxsw_sp->span->analyzed_ports_lock);
@@ -1712,11 +1649,6 @@ static int mlxsw_sp1_span_init(struct mlxsw_sp *mlxsw_sp)
return 0;
}
-static u32 mlxsw_sp1_span_buffsize_get(int mtu, u32 speed)
-{
- return mtu * 5 / 2;
-}
-
static int mlxsw_sp1_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp,
u16 policer_id_base)
{
@@ -1725,7 +1657,6 @@ static int mlxsw_sp1_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops = {
.init = mlxsw_sp1_span_init,
- .buffsize_get = mlxsw_sp1_span_buffsize_get,
.policer_id_base_set = mlxsw_sp1_span_policer_id_base_set,
};
@@ -1750,18 +1681,6 @@ static int mlxsw_sp2_span_init(struct mlxsw_sp *mlxsw_sp)
#define MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR 38
#define MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR 50
-static u32 __mlxsw_sp_span_buffsize_get(int mtu, u32 speed, u32 buffer_factor)
-{
- return 3 * mtu + buffer_factor * speed / 1000;
-}
-
-static u32 mlxsw_sp2_span_buffsize_get(int mtu, u32 speed)
-{
- int factor = MLXSW_SP2_SPAN_EG_MIRROR_BUFFER_FACTOR;
-
- return __mlxsw_sp_span_buffsize_get(mtu, speed, factor);
-}
-
static int mlxsw_sp2_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp,
u16 policer_id_base)
{
@@ -1778,19 +1697,10 @@ static int mlxsw_sp2_span_policer_id_base_set(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops = {
.init = mlxsw_sp2_span_init,
- .buffsize_get = mlxsw_sp2_span_buffsize_get,
.policer_id_base_set = mlxsw_sp2_span_policer_id_base_set,
};
-static u32 mlxsw_sp3_span_buffsize_get(int mtu, u32 speed)
-{
- int factor = MLXSW_SP3_SPAN_EG_MIRROR_BUFFER_FACTOR;
-
- return __mlxsw_sp_span_buffsize_get(mtu, speed, factor);
-}
-
const struct mlxsw_sp_span_ops mlxsw_sp3_span_ops = {
.init = mlxsw_sp2_span_init,
- .buffsize_get = mlxsw_sp3_span_buffsize_get,
.policer_id_base_set = mlxsw_sp2_span_policer_id_base_set,
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
index 1c746dd3b1bd..d907718bc8c5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
@@ -47,7 +47,6 @@ struct mlxsw_sp_span_entry_ops;
struct mlxsw_sp_span_ops {
int (*init)(struct mlxsw_sp *mlxsw_sp);
- u32 (*buffsize_get)(int mtu, u32 speed);
int (*policer_id_base_set)(struct mlxsw_sp *mlxsw_sp,
u16 policer_id_base);
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index 2e41c5519c1b..433f14ade464 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -291,7 +291,7 @@ static void mlxsw_sp_rx_sample_listener(struct sk_buff *skb, u8 local_port,
static const struct mlxsw_sp_trap_policer_item
mlxsw_sp_trap_policer_items_arr[] = {
{
- .policer = MLXSW_SP_TRAP_POLICER(1, 10 * 1024, 128),
+ .policer = MLXSW_SP_TRAP_POLICER(1, 10 * 1024, 4096),
},
{
.policer = MLXSW_SP_TRAP_POLICER(2, 128, 128),
@@ -303,25 +303,25 @@ mlxsw_sp_trap_policer_items_arr[] = {
.policer = MLXSW_SP_TRAP_POLICER(4, 128, 128),
},
{
- .policer = MLXSW_SP_TRAP_POLICER(5, 16 * 1024, 128),
+ .policer = MLXSW_SP_TRAP_POLICER(5, 16 * 1024, 8192),
},
{
.policer = MLXSW_SP_TRAP_POLICER(6, 128, 128),
},
{
- .policer = MLXSW_SP_TRAP_POLICER(7, 1024, 128),
+ .policer = MLXSW_SP_TRAP_POLICER(7, 1024, 512),
},
{
- .policer = MLXSW_SP_TRAP_POLICER(8, 20 * 1024, 1024),
+ .policer = MLXSW_SP_TRAP_POLICER(8, 20 * 1024, 8192),
},
{
.policer = MLXSW_SP_TRAP_POLICER(9, 128, 128),
},
{
- .policer = MLXSW_SP_TRAP_POLICER(10, 1024, 128),
+ .policer = MLXSW_SP_TRAP_POLICER(10, 1024, 512),
},
{
- .policer = MLXSW_SP_TRAP_POLICER(11, 360, 128),
+ .policer = MLXSW_SP_TRAP_POLICER(11, 256, 128),
},
{
.policer = MLXSW_SP_TRAP_POLICER(12, 128, 128),
@@ -330,19 +330,19 @@ mlxsw_sp_trap_policer_items_arr[] = {
.policer = MLXSW_SP_TRAP_POLICER(13, 128, 128),
},
{
- .policer = MLXSW_SP_TRAP_POLICER(14, 1024, 128),
+ .policer = MLXSW_SP_TRAP_POLICER(14, 1024, 512),
},
{
- .policer = MLXSW_SP_TRAP_POLICER(15, 1024, 128),
+ .policer = MLXSW_SP_TRAP_POLICER(15, 1024, 512),
},
{
- .policer = MLXSW_SP_TRAP_POLICER(16, 24 * 1024, 4096),
+ .policer = MLXSW_SP_TRAP_POLICER(16, 24 * 1024, 16384),
},
{
- .policer = MLXSW_SP_TRAP_POLICER(17, 19 * 1024, 4096),
+ .policer = MLXSW_SP_TRAP_POLICER(17, 19 * 1024, 8192),
},
{
- .policer = MLXSW_SP_TRAP_POLICER(18, 1024, 128),
+ .policer = MLXSW_SP_TRAP_POLICER(18, 1024, 512),
},
{
.policer = MLXSW_SP_TRAP_POLICER(19, 1024, 512),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 6f9a725662fb..5023d91269f4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -551,16 +551,6 @@ struct mlxsw_sx_port_link_mode {
static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = {
{
- .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
- .supported = SUPPORTED_100baseT_Full,
- .advertised = ADVERTISED_100baseT_Full,
- .speed = 100,
- },
- {
- .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_TX,
- .speed = 100,
- },
- {
.mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
.supported = SUPPORTED_1000baseKX_Full,
@@ -568,12 +558,6 @@ static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = {
.speed = 1000,
},
{
- .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
- .supported = SUPPORTED_10000baseT_Full,
- .advertised = ADVERTISED_10000baseT_Full,
- .speed = 10000,
- },
- {
.mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
.supported = SUPPORTED_10000baseKX4_Full,
@@ -590,12 +574,6 @@ static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = {
.speed = 10000,
},
{
- .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
- .supported = SUPPORTED_20000baseKR2_Full,
- .advertised = ADVERTISED_20000baseKR2_Full,
- .speed = 20000,
- },
- {
.mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
.supported = SUPPORTED_40000baseCR4_Full,
.advertised = ADVERTISED_40000baseCR4_Full,
@@ -634,8 +612,7 @@ static const struct mlxsw_sx_port_link_mode mlxsw_sx_port_link_mode[] = {
{
.mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4 |
MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
- MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
- MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
+ MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
.speed = 100000,
},
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 33909887d0ac..57f9e24602d0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -120,8 +120,14 @@ enum {
};
enum mlxsw_event_trap_id {
+ /* Fatal Event generated by FW */
+ MLXSW_TRAP_ID_MFDE = 0x3,
/* Port Up/Down event generated by hardware */
MLXSW_TRAP_ID_PUDE = 0x8,
+ /* Port Module Plug/Unplug Event generated by hardware */
+ MLXSW_TRAP_ID_PMPE = 0x9,
+ /* Temperature Warning event generated by hardware */
+ MLXSW_TRAP_ID_MTWE = 0xC,
/* PTP Ingress FIFO has a new entry */
MLXSW_TRAP_ID_PTP_ING_FIFO = 0x2D,
/* PTP Egress FIFO has a new entry */
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index f3f6dfe3eddc..caa251d0e381 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -587,10 +587,10 @@ out:
return err;
}
-static void ks8842_rx_frame_dma_tasklet(unsigned long arg)
+static void ks8842_rx_frame_dma_tasklet(struct tasklet_struct *t)
{
- struct net_device *netdev = (struct net_device *)arg;
- struct ks8842_adapter *adapter = netdev_priv(netdev);
+ struct ks8842_adapter *adapter = from_tasklet(adapter, t, dma_rx.tasklet);
+ struct net_device *netdev = adapter->netdev;
struct ks8842_rx_dma_ctl *ctl = &adapter->dma_rx;
struct sk_buff *skb = ctl->skb;
dma_addr_t addr = sg_dma_address(&ctl->sg);
@@ -720,10 +720,10 @@ static void ks8842_handle_rx_overrun(struct net_device *netdev,
netdev->stats.rx_fifo_errors++;
}
-static void ks8842_tasklet(unsigned long arg)
+static void ks8842_tasklet(struct tasklet_struct *t)
{
- struct net_device *netdev = (struct net_device *)arg;
- struct ks8842_adapter *adapter = netdev_priv(netdev);
+ struct ks8842_adapter *adapter = from_tasklet(adapter, t, tasklet);
+ struct net_device *netdev = adapter->netdev;
u16 isr;
unsigned long flags;
u16 entry_bank;
@@ -953,8 +953,7 @@ static int ks8842_alloc_dma_bufs(struct net_device *netdev)
goto err;
}
- tasklet_init(&rx_ctl->tasklet, ks8842_rx_frame_dma_tasklet,
- (unsigned long)netdev);
+ tasklet_setup(&rx_ctl->tasklet, ks8842_rx_frame_dma_tasklet);
return 0;
err:
@@ -1173,7 +1172,7 @@ static int ks8842_probe(struct platform_device *pdev)
adapter->dma_tx.channel = -1;
}
- tasklet_init(&adapter->tasklet, ks8842_tasklet, (unsigned long)netdev);
+ tasklet_setup(&adapter->tasklet, ks8842_tasklet);
spin_lock_init(&adapter->lock);
netdev->netdev_ops = &ks8842_netdev_ops;
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index bb646b65cc95..9ed264ed7070 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* drivers/net/ethernet/micrel/ksx884x.c - Micrel KSZ8841/2 PCI Ethernet driver
*
* Copyright (c) 2009-2010 Micrel, Inc.
@@ -959,7 +959,7 @@ struct ksz_sw_desc {
* struct ksz_dma_buf - OS dependent DMA buffer data structure
* @skb: Associated socket buffer.
* @dma: Associated physical DMA address.
- * len: Actual len used.
+ * @len: Actual len used.
*/
struct ksz_dma_buf {
struct sk_buff *skb;
@@ -1254,6 +1254,7 @@ struct ksz_port_info {
* @multi_list_size: Multicast address list size.
* @enabled: Indication of hardware enabled.
* @rx_stop: Indication of receive process stop.
+ * @reserved2: none
* @features: Hardware features to enable.
* @overrides: Hardware features to override.
* @parent: Pointer to parent, network device private structure.
@@ -1447,7 +1448,7 @@ struct dev_info {
* struct dev_priv - Network device private data structure
* @adapter: Adapter device information.
* @port: Port information.
- * @monitor_time_info: Timer to monitor ports.
+ * @monitor_timer_info: Timer to monitor ports.
* @proc_sem: Semaphore for proc accessing.
* @id: Device ID.
* @mii_if: MII interface information.
@@ -1566,6 +1567,7 @@ static inline void hw_restore_intr(struct ksz_hw *hw, uint interrupt)
/**
* hw_block_intr - block hardware interrupts
+ * @hw: The hardware instance.
*
* This function blocks all interrupts of the hardware and returns the current
* interrupt enable mask so that interrupts can be restored later.
@@ -1649,8 +1651,7 @@ static inline void set_tx_len(struct ksz_desc *desc, u32 len)
#define HW_DELAY(hw, reg) \
do { \
- u16 dummy; \
- dummy = readw(hw->io + reg); \
+ readw(hw->io + reg); \
} while (0)
/**
@@ -1819,6 +1820,7 @@ static void port_r_mib_cnt(struct ksz_hw *hw, int port, u16 addr, u64 *cnt)
* port_r_mib_pkt - read dropped packet counts
* @hw: The hardware instance.
* @port: The port index.
+ * @last: last one
* @cnt: Buffer to store the receive and transmit dropped packet counts.
*
* This routine reads the dropped packet counts of the port.
@@ -1972,7 +1974,7 @@ static void port_cfg(struct ksz_hw *hw, int port, int offset, u16 bits,
* port_chk_shift - check port bit
* @hw: The hardware instance.
* @port: The port index.
- * @offset: The offset of the register.
+ * @addr: The offset of the register.
* @shift: Number of bits to shift.
*
* This function checks whether the specified port is set in the register or
@@ -1994,7 +1996,7 @@ static int port_chk_shift(struct ksz_hw *hw, int port, u32 addr, int shift)
* port_cfg_shift - set port bit
* @hw: The hardware instance.
* @port: The port index.
- * @offset: The offset of the register.
+ * @addr: The offset of the register.
* @shift: Number of bits to shift.
* @set: The flag indicating whether the port is to be set or not.
*
@@ -4425,6 +4427,8 @@ static int ksz_alloc_desc(struct dev_info *adapter)
/**
* free_dma_buf - release DMA buffer resources
* @adapter: Adapter information structure.
+ * @dma_buf: pointer to buf
+ * @direction: to or from device
*
* This routine is just a helper function to release the DMA buffer resources.
*/
@@ -4562,6 +4566,7 @@ static void ksz_free_desc(struct dev_info *adapter)
* ksz_free_buffers - free buffers used in the descriptors
* @adapter: Adapter information structure.
* @desc_info: Descriptor information structure.
+ * @direction: to or from device
*
* This local routine frees buffers used in the DMA buffers.
*/
@@ -4721,7 +4726,8 @@ static void send_packet(struct sk_buff *skb, struct net_device *dev)
/**
* transmit_cleanup - clean up transmit descriptors
- * @dev: Network device.
+ * @hw_priv: Network device.
+ * @normal: break if owned
*
* This routine is called to clean up the transmitted buffers.
*/
@@ -4777,7 +4783,7 @@ static void transmit_cleanup(struct dev_info *hw_priv, int normal)
/**
* transmit_done - transmit done processing
- * @dev: Network device.
+ * @hw_priv: Network device.
*
* This routine is called when the transmit interrupt is triggered, indicating
* either a packet is sent successfully or there are transmit errors.
@@ -4883,6 +4889,7 @@ unlock:
/**
* netdev_tx_timeout - transmit timeout processing
* @dev: Network device.
+ * @txqueue: index of hanging queue
*
* This routine is called when the transmit timer expires. That indicates the
* hardware is not running correctly because transmit interrupts are not
@@ -4978,7 +4985,6 @@ static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
struct dev_info *hw_priv = priv->adapter;
struct ksz_dma_buf *dma_buf;
struct sk_buff *skb;
- int rx_status;
/* Received length includes 4-byte CRC. */
packet_len = status.rx.frame_len - 4;
@@ -5014,7 +5020,7 @@ static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
dev->stats.rx_bytes += packet_len;
/* Notify upper layer for received packet. */
- rx_status = netif_rx(skb);
+ netif_rx(skb);
return 0;
}
@@ -5159,9 +5165,9 @@ release_packet:
return received;
}
-static void rx_proc_task(unsigned long data)
+static void rx_proc_task(struct tasklet_struct *t)
{
- struct dev_info *hw_priv = (struct dev_info *) data;
+ struct dev_info *hw_priv = from_tasklet(hw_priv, t, rx_tasklet);
struct ksz_hw *hw = &hw_priv->hw;
if (!hw->enabled)
@@ -5181,9 +5187,9 @@ static void rx_proc_task(unsigned long data)
}
}
-static void tx_proc_task(unsigned long data)
+static void tx_proc_task(struct tasklet_struct *t)
{
- struct dev_info *hw_priv = (struct dev_info *) data;
+ struct dev_info *hw_priv = from_tasklet(hw_priv, t, tx_tasklet);
struct ksz_hw *hw = &hw_priv->hw;
hw_ack_intr(hw, KS884X_INT_TX_MASK);
@@ -5436,10 +5442,8 @@ static int prepare_hardware(struct net_device *dev)
rc = request_irq(dev->irq, netdev_intr, IRQF_SHARED, dev->name, dev);
if (rc)
return rc;
- tasklet_init(&hw_priv->rx_tasklet, rx_proc_task,
- (unsigned long) hw_priv);
- tasklet_init(&hw_priv->tx_tasklet, tx_proc_task,
- (unsigned long) hw_priv);
+ tasklet_setup(&hw_priv->rx_tasklet, rx_proc_task);
+ tasklet_setup(&hw_priv->tx_tasklet, tx_proc_task);
hw->promiscuous = 0;
hw->all_multi = 0;
@@ -5829,8 +5833,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
/* Get address of MII PHY in use. */
case SIOCGMIIPHY:
data->phy_id = priv->id;
-
- /* Fallthrough... */
+ fallthrough;
/* Read MII PHY register. */
case SIOCGMIIREG:
@@ -6078,14 +6081,6 @@ static void netdev_get_drvinfo(struct net_device *dev,
sizeof(info->bus_info));
}
-/**
- * netdev_get_regs_len - get length of register dump
- * @dev: Network device.
- *
- * This function returns the length of the register dump.
- *
- * Return length of the register dump.
- */
static struct hw_regs {
int start;
int end;
@@ -6099,6 +6094,14 @@ static struct hw_regs {
{ 0, 0 }
};
+/**
+ * netdev_get_regs_len - get length of register dump
+ * @dev: Network device.
+ *
+ * This function returns the length of the register dump.
+ *
+ * Return length of the register dump.
+ */
static int netdev_get_regs_len(struct net_device *dev)
{
struct hw_regs *range = hw_regs_range;
@@ -6240,6 +6243,8 @@ static int netdev_get_eeprom_len(struct net_device *dev)
return EEPROM_SIZE * 2;
}
+#define EEPROM_MAGIC 0x10A18842
+
/**
* netdev_get_eeprom - get EEPROM data
* @dev: Network device.
@@ -6250,8 +6255,6 @@ static int netdev_get_eeprom_len(struct net_device *dev)
*
* Return 0 if successful; otherwise an error code.
*/
-#define EEPROM_MAGIC 0x10A18842
-
static int netdev_get_eeprom(struct net_device *dev,
struct ethtool_eeprom *eeprom, u8 *data)
{
@@ -6388,7 +6391,7 @@ static int netdev_set_pauseparam(struct net_device *dev,
/**
* netdev_get_ringparam - get tx/rx ring parameters
* @dev: Network device.
- * @pause: Ethtool RING settings data structure.
+ * @ring: Ethtool RING settings data structure.
*
* This procedure returns the TX/RX ring settings.
*/
@@ -6509,7 +6512,6 @@ static void netdev_get_ethtool_stats(struct net_device *dev,
int i;
int n;
int p;
- int rc;
u64 counter[TOTAL_PORT_COUNTER_NUM];
mutex_lock(&hw_priv->lock);
@@ -6530,19 +6532,19 @@ static void netdev_get_ethtool_stats(struct net_device *dev,
if (1 == port->mib_port_cnt && n < SWITCH_PORT_NUM) {
p = n;
- rc = wait_event_interruptible_timeout(
+ wait_event_interruptible_timeout(
hw_priv->counter[p].counter,
2 == hw_priv->counter[p].read,
HZ * 1);
} else
for (i = 0, p = n; i < port->mib_port_cnt - n; i++, p++) {
if (0 == i) {
- rc = wait_event_interruptible_timeout(
+ wait_event_interruptible_timeout(
hw_priv->counter[p].counter,
2 == hw_priv->counter[p].read,
HZ * 2);
} else if (hw->port_mib[p].cnt_ptr) {
- rc = wait_event_interruptible_timeout(
+ wait_event_interruptible_timeout(
hw_priv->counter[p].counter,
2 == hw_priv->counter[p].read,
HZ * 1);
@@ -6693,7 +6695,7 @@ static void mib_monitor(struct timer_list *t)
/**
* dev_monitor - periodic monitoring
- * @ptr: Network device pointer.
+ * @t: timer list containing a network device pointer.
*
* This routine is run in a kernel timer to monitor the network device.
*/
diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c
index 5bd7fb917b7a..796e46a53926 100644
--- a/drivers/net/ethernet/microchip/encx24j600-regmap.c
+++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* Register map access API - ENCX24J600 support
*
* Copyright 2015 Gridpoint
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index de93cc6ebc1a..a1938842f828 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -158,9 +158,8 @@ static void lan743x_tx_isr(void *context, u32 int_sts, u32 flags)
struct lan743x_tx *tx = context;
struct lan743x_adapter *adapter = tx->adapter;
bool enable_flag = true;
- u32 int_en = 0;
- int_en = lan743x_csr_read(adapter, INT_EN_SET);
+ lan743x_csr_read(adapter, INT_EN_SET);
if (flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_CLEAR) {
lan743x_csr_write(adapter, INT_EN_CLR,
INT_BIT_DMA_TX_(tx->channel_number));
@@ -1699,10 +1698,9 @@ static int lan743x_tx_napi_poll(struct napi_struct *napi, int weight)
bool start_transmitter = false;
unsigned long irq_flags = 0;
u32 ioc_bit = 0;
- u32 int_sts = 0;
ioc_bit = DMAC_INT_BIT_TX_IOC_(tx->channel_number);
- int_sts = lan743x_csr_read(adapter, DMAC_INT_STS);
+ lan743x_csr_read(adapter, DMAC_INT_STS);
if (tx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_STATUS_W2C)
lan743x_csr_write(adapter, DMAC_INT_STS, ioc_bit);
spin_lock_irqsave(&tx->ring_lock, irq_flags);
@@ -3038,7 +3036,6 @@ static int lan743x_pm_suspend(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct lan743x_adapter *adapter = netdev_priv(netdev);
- int ret;
lan743x_pcidev_shutdown(pdev);
@@ -3051,9 +3048,7 @@ static int lan743x_pm_suspend(struct device *dev)
lan743x_pm_set_wol(adapter);
/* Host sets PME_En, put D3hot */
- ret = pci_prepare_to_sleep(pdev);
-
- return 0;
+ return pci_prepare_to_sleep(pdev);;
}
static int lan743x_pm_resume(struct device *dev)
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index aa002db04250..70bf8c67d7ef 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -5,6 +5,7 @@
* Copyright (c) 2017 Microsemi Corporation
*/
#include <linux/if_bridge.h>
+#include <soc/mscc/ocelot_vcap.h>
#include "ocelot.h"
#include "ocelot_vcap.h"
@@ -107,6 +108,13 @@ static void ocelot_vcap_enable(struct ocelot *ocelot, int port)
ocelot_write_gix(ocelot, ANA_PORT_VCAP_S2_CFG_S2_ENA |
ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG(0xa),
ANA_PORT_VCAP_S2_CFG, port);
+
+ ocelot_write_gix(ocelot, ANA_PORT_VCAP_CFG_S1_ENA,
+ ANA_PORT_VCAP_CFG, port);
+
+ ocelot_rmw_gix(ocelot, REW_PORT_CFG_ES0_EN,
+ REW_PORT_CFG_ES0_EN,
+ REW_PORT_CFG, port);
}
static inline u32 ocelot_vlant_read_vlanaccess(struct ocelot *ocelot)
@@ -191,12 +199,28 @@ static int ocelot_port_set_native_vlan(struct ocelot *ocelot, int port,
return 0;
}
-void ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
- bool vlan_aware)
+int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
+ bool vlan_aware, struct switchdev_trans *trans)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
u32 val;
+ if (switchdev_trans_ph_prepare(trans)) {
+ struct ocelot_vcap_block *block = &ocelot->block[VCAP_IS1];
+ struct ocelot_vcap_filter *filter;
+
+ list_for_each_entry(filter, &block->rules, list) {
+ if (filter->ingress_port_mask & BIT(port) &&
+ filter->action.vid_replace_ena) {
+ dev_err(ocelot->dev,
+ "Cannot change VLAN state with vlan modify rules active\n");
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+ }
+
ocelot_port->vlan_aware = vlan_aware;
if (vlan_aware)
@@ -210,6 +234,8 @@ void ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
ANA_PORT_VLAN_CFG, port);
ocelot_port_set_native_vlan(ocelot, port, ocelot_port->vid);
+
+ return 0;
}
EXPORT_SYMBOL(ocelot_port_vlan_filtering);
@@ -413,26 +439,20 @@ void ocelot_port_disable(struct ocelot *ocelot, int port)
}
EXPORT_SYMBOL(ocelot_port_disable);
-int ocelot_port_add_txtstamp_skb(struct ocelot_port *ocelot_port,
- struct sk_buff *skb)
+void ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port,
+ struct sk_buff *clone)
{
- struct skb_shared_info *shinfo = skb_shinfo(skb);
- struct ocelot *ocelot = ocelot_port->ocelot;
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
- if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP &&
- ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
- spin_lock(&ocelot_port->ts_id_lock);
+ spin_lock(&ocelot_port->ts_id_lock);
- shinfo->tx_flags |= SKBTX_IN_PROGRESS;
- /* Store timestamp ID in cb[0] of sk_buff */
- skb->cb[0] = ocelot_port->ts_id;
- ocelot_port->ts_id = (ocelot_port->ts_id + 1) % 4;
- skb_queue_tail(&ocelot_port->tx_skbs, skb);
+ skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
+ /* Store timestamp ID in cb[0] of sk_buff */
+ clone->cb[0] = ocelot_port->ts_id;
+ ocelot_port->ts_id = (ocelot_port->ts_id + 1) % 4;
+ skb_queue_tail(&ocelot_port->tx_skbs, clone);
- spin_unlock(&ocelot_port->ts_id_lock);
- return 0;
- }
- return -ENODATA;
+ spin_unlock(&ocelot_port->ts_id_lock);
}
EXPORT_SYMBOL(ocelot_port_add_txtstamp_skb);
@@ -511,9 +531,7 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
/* Set the timestamp into the skb */
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
- skb_tstamp_tx(skb_match, &shhwtstamps);
-
- dev_kfree_skb_any(skb_match);
+ skb_complete_tx_timestamp(skb_match, &shhwtstamps);
/* Next ts */
ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
@@ -1102,12 +1120,24 @@ EXPORT_SYMBOL(ocelot_port_bridge_join);
int ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
struct net_device *bridge)
{
+ struct switchdev_trans trans;
+ int ret;
+
ocelot->bridge_mask &= ~BIT(port);
if (!ocelot->bridge_mask)
ocelot->hw_bridge_dev = NULL;
- ocelot_port_vlan_filtering(ocelot, port, 0);
+ trans.ph_prepare = true;
+ ret = ocelot_port_vlan_filtering(ocelot, port, false, &trans);
+ if (ret)
+ return ret;
+
+ trans.ph_prepare = false;
+ ret = ocelot_port_vlan_filtering(ocelot, port, false, &trans);
+ if (ret)
+ return ret;
+
ocelot_port_set_pvid(ocelot, port, 0);
return ocelot_port_set_native_vlan(ocelot, port, 0);
}
@@ -1354,22 +1384,14 @@ void ocelot_init_port(struct ocelot *ocelot, int port)
}
EXPORT_SYMBOL(ocelot_init_port);
-/* Configure and enable the CPU port module, which is a set of queues.
- * If @npi contains a valid port index, the CPU port module is connected
- * to the Node Processor Interface (NPI). This is the mode through which
- * frames can be injected from and extracted to an external CPU,
- * over Ethernet.
+/* Configure and enable the CPU port module, which is a set of queues
+ * accessible through register MMIO, frame DMA or Ethernet (in case
+ * NPI mode is used).
*/
-void ocelot_configure_cpu(struct ocelot *ocelot, int npi,
- enum ocelot_tag_prefix injection,
- enum ocelot_tag_prefix extraction)
+static void ocelot_cpu_port_init(struct ocelot *ocelot)
{
int cpu = ocelot->num_phys_ports;
- ocelot->npi = npi;
- ocelot->inj_prefix = injection;
- ocelot->xtr_prefix = extraction;
-
/* The unicast destination PGID for the CPU port module is unused */
ocelot_write_rix(ocelot, 0, ANA_PGID_PGID, cpu);
/* Instead set up a multicast destination PGID for traffic copied to
@@ -1381,31 +1403,13 @@ void ocelot_configure_cpu(struct ocelot *ocelot, int npi,
ANA_PORT_PORT_CFG_PORTID_VAL(cpu),
ANA_PORT_PORT_CFG, cpu);
- if (npi >= 0 && npi < ocelot->num_phys_ports) {
- ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M |
- QSYS_EXT_CPU_CFG_EXT_CPU_PORT(npi),
- QSYS_EXT_CPU_CFG);
-
- /* Enable NPI port */
- ocelot_fields_write(ocelot, npi,
- QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
- /* NPI port Injection/Extraction configuration */
- ocelot_fields_write(ocelot, npi, SYS_PORT_MODE_INCL_XTR_HDR,
- extraction);
- ocelot_fields_write(ocelot, npi, SYS_PORT_MODE_INCL_INJ_HDR,
- injection);
-
- /* Disable transmission of pause frames */
- ocelot_fields_write(ocelot, npi, SYS_PAUSE_CFG_PAUSE_ENA, 0);
- }
-
/* Enable CPU port module */
ocelot_fields_write(ocelot, cpu, QSYS_SWITCH_PORT_MODE_PORT_ENA, 1);
/* CPU port Injection/Extraction configuration */
ocelot_fields_write(ocelot, cpu, SYS_PORT_MODE_INCL_XTR_HDR,
- extraction);
+ ocelot->xtr_prefix);
ocelot_fields_write(ocelot, cpu, SYS_PORT_MODE_INCL_INJ_HDR,
- injection);
+ ocelot->inj_prefix);
/* Configure the CPU port to be VLAN aware */
ocelot_write_gix(ocelot, ANA_PORT_VLAN_CFG_VLAN_VID(0) |
@@ -1413,7 +1417,6 @@ void ocelot_configure_cpu(struct ocelot *ocelot, int npi,
ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1),
ANA_PORT_VLAN_CFG, cpu);
}
-EXPORT_SYMBOL(ocelot_configure_cpu);
int ocelot_init(struct ocelot *ocelot)
{
@@ -1453,6 +1456,7 @@ int ocelot_init(struct ocelot *ocelot)
ocelot_mact_init(ocelot);
ocelot_vlan_init(ocelot);
ocelot_vcap_init(ocelot);
+ ocelot_cpu_port_init(ocelot);
for (port = 0; port < ocelot->num_phys_ports; port++) {
/* Clear all counters (5 groups) */
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index dc29e05103a1..abb407dff93c 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -98,6 +98,8 @@ int ocelot_port_lag_join(struct ocelot *ocelot, int port,
struct net_device *bond);
void ocelot_port_lag_leave(struct ocelot *ocelot, int port,
struct net_device *bond);
+struct net_device *ocelot_port_to_netdev(struct ocelot *ocelot, int port);
+int ocelot_netdev_to_port(struct net_device *dev);
u32 ocelot_port_readl(struct ocelot_port *port, u32 reg);
void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg);
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index ec1b6e2572ba..729495a1a77e 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -5,56 +5,433 @@
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
-
+#include <soc/mscc/ocelot_vcap.h>
#include "ocelot_vcap.h"
-static int ocelot_flower_parse_action(struct flow_cls_offload *f,
+/* Arbitrarily chosen constants for encoding the VCAP block and lookup number
+ * into the chain number. This is UAPI.
+ */
+#define VCAP_BLOCK 10000
+#define VCAP_LOOKUP 1000
+#define VCAP_IS1_NUM_LOOKUPS 3
+#define VCAP_IS2_NUM_LOOKUPS 2
+#define VCAP_IS2_NUM_PAG 256
+#define VCAP_IS1_CHAIN(lookup) \
+ (1 * VCAP_BLOCK + (lookup) * VCAP_LOOKUP)
+#define VCAP_IS2_CHAIN(lookup, pag) \
+ (2 * VCAP_BLOCK + (lookup) * VCAP_LOOKUP + (pag))
+
+static int ocelot_chain_to_block(int chain, bool ingress)
+{
+ int lookup, pag;
+
+ if (!ingress) {
+ if (chain == 0)
+ return VCAP_ES0;
+ return -EOPNOTSUPP;
+ }
+
+ /* Backwards compatibility with older, single-chain tc-flower
+ * offload support in Ocelot
+ */
+ if (chain == 0)
+ return VCAP_IS2;
+
+ for (lookup = 0; lookup < VCAP_IS1_NUM_LOOKUPS; lookup++)
+ if (chain == VCAP_IS1_CHAIN(lookup))
+ return VCAP_IS1;
+
+ for (lookup = 0; lookup < VCAP_IS2_NUM_LOOKUPS; lookup++)
+ for (pag = 0; pag < VCAP_IS2_NUM_PAG; pag++)
+ if (chain == VCAP_IS2_CHAIN(lookup, pag))
+ return VCAP_IS2;
+
+ return -EOPNOTSUPP;
+}
+
+/* Caller must ensure this is a valid IS1 or IS2 chain first,
+ * by calling ocelot_chain_to_block.
+ */
+static int ocelot_chain_to_lookup(int chain)
+{
+ return (chain / VCAP_LOOKUP) % 10;
+}
+
+/* Caller must ensure this is a valid IS2 chain first,
+ * by calling ocelot_chain_to_block.
+ */
+static int ocelot_chain_to_pag(int chain)
+{
+ int lookup = ocelot_chain_to_lookup(chain);
+
+ /* calculate PAG value as chain index relative to the first PAG */
+ return chain - VCAP_IS2_CHAIN(lookup, 0);
+}
+
+static bool ocelot_is_goto_target_valid(int goto_target, int chain,
+ bool ingress)
+{
+ int pag;
+
+ /* Can't offload GOTO in VCAP ES0 */
+ if (!ingress)
+ return (goto_target < 0);
+
+ /* Non-optional GOTOs */
+ if (chain == 0)
+ /* VCAP IS1 can be skipped, either partially or completely */
+ return (goto_target == VCAP_IS1_CHAIN(0) ||
+ goto_target == VCAP_IS1_CHAIN(1) ||
+ goto_target == VCAP_IS1_CHAIN(2) ||
+ goto_target == VCAP_IS2_CHAIN(0, 0) ||
+ goto_target == VCAP_IS2_CHAIN(1, 0));
+
+ if (chain == VCAP_IS1_CHAIN(0))
+ return (goto_target == VCAP_IS1_CHAIN(1));
+
+ if (chain == VCAP_IS1_CHAIN(1))
+ return (goto_target == VCAP_IS1_CHAIN(2));
+
+ /* Lookup 2 of VCAP IS1 can really support non-optional GOTOs,
+ * using a Policy Association Group (PAG) value, which is an 8-bit
+ * value encoding a VCAP IS2 target chain.
+ */
+ if (chain == VCAP_IS1_CHAIN(2)) {
+ for (pag = 0; pag < VCAP_IS2_NUM_PAG; pag++)
+ if (goto_target == VCAP_IS2_CHAIN(0, pag))
+ return true;
+
+ return false;
+ }
+
+ /* Non-optional GOTO from VCAP IS2 lookup 0 to lookup 1.
+ * We cannot change the PAG at this point.
+ */
+ for (pag = 0; pag < VCAP_IS2_NUM_PAG; pag++)
+ if (chain == VCAP_IS2_CHAIN(0, pag))
+ return (goto_target == VCAP_IS2_CHAIN(1, pag));
+
+ /* VCAP IS2 lookup 1 cannot jump anywhere */
+ return false;
+}
+
+static struct ocelot_vcap_filter *
+ocelot_find_vcap_filter_that_points_at(struct ocelot *ocelot, int chain)
+{
+ struct ocelot_vcap_filter *filter;
+ struct ocelot_vcap_block *block;
+ int block_id;
+
+ block_id = ocelot_chain_to_block(chain, true);
+ if (block_id < 0)
+ return NULL;
+
+ if (block_id == VCAP_IS2) {
+ block = &ocelot->block[VCAP_IS1];
+
+ list_for_each_entry(filter, &block->rules, list)
+ if (filter->type == OCELOT_VCAP_FILTER_PAG &&
+ filter->goto_target == chain)
+ return filter;
+ }
+
+ list_for_each_entry(filter, &ocelot->dummy_rules, list)
+ if (filter->goto_target == chain)
+ return filter;
+
+ return NULL;
+}
+
+static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
+ bool ingress, struct flow_cls_offload *f,
struct ocelot_vcap_filter *filter)
{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct netlink_ext_ack *extack = f->common.extack;
+ bool allow_missing_goto_target = false;
const struct flow_action_entry *a;
+ enum ocelot_tag_tpid_sel tpid;
+ int i, chain, egress_port;
u64 rate;
- int i;
-
- if (!flow_offload_has_one_action(&f->rule->action))
- return -EOPNOTSUPP;
if (!flow_action_basic_hw_stats_check(&f->rule->action,
f->common.extack))
return -EOPNOTSUPP;
+ chain = f->common.chain_index;
+ filter->block_id = ocelot_chain_to_block(chain, ingress);
+ if (filter->block_id < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload to this chain");
+ return -EOPNOTSUPP;
+ }
+ if (filter->block_id == VCAP_IS1 || filter->block_id == VCAP_IS2)
+ filter->lookup = ocelot_chain_to_lookup(chain);
+ if (filter->block_id == VCAP_IS2)
+ filter->pag = ocelot_chain_to_pag(chain);
+
+ filter->goto_target = -1;
+ filter->type = OCELOT_VCAP_FILTER_DUMMY;
+
flow_action_for_each(i, a, &f->rule->action) {
switch (a->id) {
case FLOW_ACTION_DROP:
- filter->action = OCELOT_VCAP_ACTION_DROP;
+ if (filter->block_id != VCAP_IS2) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Drop action can only be offloaded to VCAP IS2");
+ return -EOPNOTSUPP;
+ }
+ if (filter->goto_target != -1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Last action must be GOTO");
+ return -EOPNOTSUPP;
+ }
+ filter->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
+ filter->action.port_mask = 0;
+ filter->action.police_ena = true;
+ filter->action.pol_ix = OCELOT_POLICER_DISCARD;
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
break;
case FLOW_ACTION_TRAP:
- filter->action = OCELOT_VCAP_ACTION_TRAP;
+ if (filter->block_id != VCAP_IS2) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Trap action can only be offloaded to VCAP IS2");
+ return -EOPNOTSUPP;
+ }
+ if (filter->goto_target != -1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Last action must be GOTO");
+ return -EOPNOTSUPP;
+ }
+ filter->action.mask_mode = OCELOT_MASK_MODE_PERMIT_DENY;
+ filter->action.port_mask = 0;
+ filter->action.cpu_copy_ena = true;
+ filter->action.cpu_qu_num = 0;
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
break;
case FLOW_ACTION_POLICE:
- filter->action = OCELOT_VCAP_ACTION_POLICE;
+ if (filter->block_id != VCAP_IS2 ||
+ filter->lookup != 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Police action can only be offloaded to VCAP IS2 lookup 0");
+ return -EOPNOTSUPP;
+ }
+ if (filter->goto_target != -1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Last action must be GOTO");
+ return -EOPNOTSUPP;
+ }
+ filter->action.police_ena = true;
rate = a->police.rate_bytes_ps;
- filter->pol.rate = div_u64(rate, 1000) * 8;
- filter->pol.burst = a->police.burst;
+ filter->action.pol.rate = div_u64(rate, 1000) * 8;
+ filter->action.pol.burst = a->police.burst;
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ break;
+ case FLOW_ACTION_REDIRECT:
+ if (filter->block_id != VCAP_IS2) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Redirect action can only be offloaded to VCAP IS2");
+ return -EOPNOTSUPP;
+ }
+ if (filter->goto_target != -1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Last action must be GOTO");
+ return -EOPNOTSUPP;
+ }
+ egress_port = ocelot->ops->netdev_to_port(a->dev);
+ if (egress_port < 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Destination not an ocelot port");
+ return -EOPNOTSUPP;
+ }
+ filter->action.mask_mode = OCELOT_MASK_MODE_REDIRECT;
+ filter->action.port_mask = BIT(egress_port);
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ break;
+ case FLOW_ACTION_VLAN_POP:
+ if (filter->block_id != VCAP_IS1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VLAN pop action can only be offloaded to VCAP IS1");
+ return -EOPNOTSUPP;
+ }
+ if (filter->goto_target != -1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Last action must be GOTO");
+ return -EOPNOTSUPP;
+ }
+ filter->action.vlan_pop_cnt_ena = true;
+ filter->action.vlan_pop_cnt++;
+ if (filter->action.vlan_pop_cnt > 2) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot pop more than 2 VLAN headers");
+ return -EOPNOTSUPP;
+ }
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ break;
+ case FLOW_ACTION_VLAN_MANGLE:
+ if (filter->block_id != VCAP_IS1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VLAN modify action can only be offloaded to VCAP IS1");
+ return -EOPNOTSUPP;
+ }
+ if (filter->goto_target != -1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Last action must be GOTO");
+ return -EOPNOTSUPP;
+ }
+ if (!ocelot_port->vlan_aware) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only modify VLAN under VLAN aware bridge");
+ return -EOPNOTSUPP;
+ }
+ filter->action.vid_replace_ena = true;
+ filter->action.pcp_dei_ena = true;
+ filter->action.vid = a->vlan.vid;
+ filter->action.pcp = a->vlan.prio;
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ break;
+ case FLOW_ACTION_PRIORITY:
+ if (filter->block_id != VCAP_IS1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Priority action can only be offloaded to VCAP IS1");
+ return -EOPNOTSUPP;
+ }
+ if (filter->goto_target != -1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Last action must be GOTO");
+ return -EOPNOTSUPP;
+ }
+ filter->action.qos_ena = true;
+ filter->action.qos_val = a->priority;
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ break;
+ case FLOW_ACTION_GOTO:
+ filter->goto_target = a->chain_index;
+
+ if (filter->block_id == VCAP_IS1 && filter->lookup == 2) {
+ int pag = ocelot_chain_to_pag(filter->goto_target);
+
+ filter->action.pag_override_mask = 0xff;
+ filter->action.pag_val = pag;
+ filter->type = OCELOT_VCAP_FILTER_PAG;
+ }
+ break;
+ case FLOW_ACTION_VLAN_PUSH:
+ if (filter->block_id != VCAP_ES0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VLAN push action can only be offloaded to VCAP ES0");
+ return -EOPNOTSUPP;
+ }
+ switch (ntohs(a->vlan.proto)) {
+ case ETH_P_8021Q:
+ tpid = OCELOT_TAG_TPID_SEL_8021Q;
+ break;
+ case ETH_P_8021AD:
+ tpid = OCELOT_TAG_TPID_SEL_8021AD;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot push custom TPID");
+ return -EOPNOTSUPP;
+ }
+ filter->action.tag_a_tpid_sel = tpid;
+ filter->action.push_outer_tag = OCELOT_ES0_TAG;
+ filter->action.tag_a_vid_sel = 1;
+ filter->action.vid_a_val = a->vlan.vid;
+ filter->action.pcp_a_val = a->vlan.prio;
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
break;
default:
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload action");
return -EOPNOTSUPP;
}
}
+ if (filter->goto_target == -1) {
+ if ((filter->block_id == VCAP_IS2 && filter->lookup == 1) ||
+ chain == 0) {
+ allow_missing_goto_target = true;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Missing GOTO action");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (!ocelot_is_goto_target_valid(filter->goto_target, chain, ingress) &&
+ !allow_missing_goto_target) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload this GOTO target");
+ return -EOPNOTSUPP;
+ }
+
return 0;
}
-static int ocelot_flower_parse(struct flow_cls_offload *f,
- struct ocelot_vcap_filter *filter)
+static int ocelot_flower_parse_indev(struct ocelot *ocelot, int port,
+ struct flow_cls_offload *f,
+ struct ocelot_vcap_filter *filter)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ const struct vcap_props *vcap = &ocelot->vcap[VCAP_ES0];
+ int key_length = vcap->keys[VCAP_ES0_IGR_PORT].length;
+ struct netlink_ext_ack *extack = f->common.extack;
+ struct net_device *dev, *indev;
+ struct flow_match_meta match;
+ int ingress_port;
+
+ flow_rule_match_meta(rule, &match);
+
+ if (!match.mask->ingress_ifindex)
+ return 0;
+
+ if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
+ return -EOPNOTSUPP;
+ }
+
+ dev = ocelot->ops->port_to_netdev(ocelot, port);
+ if (!dev)
+ return -EINVAL;
+
+ indev = __dev_get_by_index(dev_net(dev), match.key->ingress_ifindex);
+ if (!indev) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can't find the ingress port to match on");
+ return -ENOENT;
+ }
+
+ ingress_port = ocelot->ops->netdev_to_port(indev);
+ if (ingress_port < 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only offload an ocelot ingress port");
+ return -EOPNOTSUPP;
+ }
+ if (ingress_port == port) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Ingress port is equal to the egress port");
+ return -EINVAL;
+ }
+
+ filter->ingress_port.value = ingress_port;
+ filter->ingress_port.mask = GENMASK(key_length - 1, 0);
+
+ return 0;
+}
+
+static int
+ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress,
+ struct flow_cls_offload *f,
+ struct ocelot_vcap_filter *filter)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_dissector *dissector = rule->match.dissector;
+ struct netlink_ext_ack *extack = f->common.extack;
u16 proto = ntohs(f->common.protocol);
bool match_protocol = true;
+ int ret;
if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_META) |
BIT(FLOW_DISSECTOR_KEY_PORTS) |
BIT(FLOW_DISSECTOR_KEY_VLAN) |
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
@@ -63,6 +440,13 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
return -EOPNOTSUPP;
}
+ /* For VCAP ES0 (egress rewriter) we can match on the ingress port */
+ if (!ingress) {
+ ret = ocelot_flower_parse_indev(ocelot, port, f, filter);
+ if (ret)
+ return ret;
+ }
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
struct flow_match_control match;
@@ -72,6 +456,19 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_match_eth_addrs match;
+ if (filter->block_id == VCAP_ES0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VCAP ES0 cannot match on MAC address");
+ return -EOPNOTSUPP;
+ }
+
+ if (filter->block_id == VCAP_IS1 &&
+ !is_zero_ether_addr(match.mask->dst)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Key type S1_NORMAL cannot match on destination MAC");
+ return -EOPNOTSUPP;
+ }
+
/* The hw support mac matches only for MAC_ETYPE key,
* therefore if other matches(port, tcp flags, etc) are added
* then just bail out
@@ -103,6 +500,12 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
flow_rule_match_basic(rule, &match);
if (ntohs(match.key->n_proto) == ETH_P_IP) {
+ if (filter->block_id == VCAP_ES0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VCAP ES0 cannot match on IP protocol");
+ return -EOPNOTSUPP;
+ }
+
filter->key_type = OCELOT_VCAP_KEY_IPV4;
filter->key.ipv4.proto.value[0] =
match.key->ip_proto;
@@ -111,6 +514,12 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
match_protocol = false;
}
if (ntohs(match.key->n_proto) == ETH_P_IPV6) {
+ if (filter->block_id == VCAP_ES0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VCAP ES0 cannot match on IP protocol");
+ return -EOPNOTSUPP;
+ }
+
filter->key_type = OCELOT_VCAP_KEY_IPV6;
filter->key.ipv6.proto.value[0] =
match.key->ip_proto;
@@ -125,6 +534,18 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
struct flow_match_ipv4_addrs match;
u8 *tmp;
+ if (filter->block_id == VCAP_ES0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VCAP ES0 cannot match on IP address");
+ return -EOPNOTSUPP;
+ }
+
+ if (filter->block_id == VCAP_IS1 && *(u32 *)&match.mask->dst) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Key type S1_NORMAL cannot match on destination IP");
+ return -EOPNOTSUPP;
+ }
+
flow_rule_match_ipv4_addrs(rule, &match);
tmp = &filter->key.ipv4.sip.value.addr[0];
memcpy(tmp, &match.key->src, 4);
@@ -148,6 +569,12 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_match_ports match;
+ if (filter->block_id == VCAP_ES0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VCAP ES0 cannot match on L4 ports");
+ return -EOPNOTSUPP;
+ }
+
flow_rule_match_ports(rule, &match);
filter->key.ipv4.sport.value = ntohs(match.key->src);
filter->key.ipv4.sport.mask = ntohs(match.mask->src);
@@ -170,6 +597,12 @@ static int ocelot_flower_parse(struct flow_cls_offload *f,
finished_key_parsing:
if (match_protocol && proto != ETH_P_ALL) {
+ if (filter->block_id == VCAP_ES0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VCAP ES0 cannot match on L2 proto");
+ return -EOPNOTSUPP;
+ }
+
/* TODO: support SNAP, LLC etc */
if (proto < ETH_P_802_3_MIN)
return -EOPNOTSUPP;
@@ -179,14 +612,28 @@ finished_key_parsing:
}
/* else, a filter of type OCELOT_VCAP_KEY_ANY is implicitly added */
+ return 0;
+}
+
+static int ocelot_flower_parse(struct ocelot *ocelot, int port, bool ingress,
+ struct flow_cls_offload *f,
+ struct ocelot_vcap_filter *filter)
+{
+ int ret;
+
filter->prio = f->common.prio;
filter->id = f->cookie;
- return ocelot_flower_parse_action(f, filter);
+
+ ret = ocelot_flower_parse_action(ocelot, port, ingress, f, filter);
+ if (ret)
+ return ret;
+
+ return ocelot_flower_parse_key(ocelot, port, ingress, f, filter);
}
static struct ocelot_vcap_filter
-*ocelot_vcap_filter_create(struct ocelot *ocelot, int port,
- struct flow_cls_offload *f)
+*ocelot_vcap_filter_create(struct ocelot *ocelot, int port, bool ingress,
+ struct flow_cls_offload *f)
{
struct ocelot_vcap_filter *filter;
@@ -194,26 +641,65 @@ static struct ocelot_vcap_filter
if (!filter)
return NULL;
- filter->ingress_port_mask = BIT(port);
+ if (ingress) {
+ filter->ingress_port_mask = BIT(port);
+ } else {
+ const struct vcap_props *vcap = &ocelot->vcap[VCAP_ES0];
+ int key_length = vcap->keys[VCAP_ES0_EGR_PORT].length;
+
+ filter->egress_port.value = port;
+ filter->egress_port.mask = GENMASK(key_length - 1, 0);
+ }
+
return filter;
}
+static int ocelot_vcap_dummy_filter_add(struct ocelot *ocelot,
+ struct ocelot_vcap_filter *filter)
+{
+ list_add(&filter->list, &ocelot->dummy_rules);
+
+ return 0;
+}
+
+static int ocelot_vcap_dummy_filter_del(struct ocelot *ocelot,
+ struct ocelot_vcap_filter *filter)
+{
+ list_del(&filter->list);
+ kfree(filter);
+
+ return 0;
+}
+
int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
struct flow_cls_offload *f, bool ingress)
{
+ struct netlink_ext_ack *extack = f->common.extack;
struct ocelot_vcap_filter *filter;
+ int chain = f->common.chain_index;
int ret;
- filter = ocelot_vcap_filter_create(ocelot, port, f);
+ if (chain && !ocelot_find_vcap_filter_that_points_at(ocelot, chain)) {
+ NL_SET_ERR_MSG_MOD(extack, "No default GOTO action points to this chain");
+ return -EOPNOTSUPP;
+ }
+
+ filter = ocelot_vcap_filter_create(ocelot, port, ingress, f);
if (!filter)
return -ENOMEM;
- ret = ocelot_flower_parse(f, filter);
+ ret = ocelot_flower_parse(ocelot, port, ingress, f, filter);
if (ret) {
kfree(filter);
return ret;
}
+ /* The non-optional GOTOs for the TCAM skeleton don't need
+ * to be actually offloaded.
+ */
+ if (filter->type == OCELOT_VCAP_FILTER_DUMMY)
+ return ocelot_vcap_dummy_filter_add(ocelot, filter);
+
return ocelot_vcap_filter_add(ocelot, filter, f->common.extack);
}
EXPORT_SYMBOL_GPL(ocelot_cls_flower_replace);
@@ -221,28 +707,49 @@ EXPORT_SYMBOL_GPL(ocelot_cls_flower_replace);
int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port,
struct flow_cls_offload *f, bool ingress)
{
- struct ocelot_vcap_filter filter;
+ struct ocelot_vcap_filter *filter;
+ struct ocelot_vcap_block *block;
+ int block_id;
+
+ block_id = ocelot_chain_to_block(f->common.chain_index, ingress);
+ if (block_id < 0)
+ return 0;
- filter.prio = f->common.prio;
- filter.id = f->cookie;
+ block = &ocelot->block[block_id];
- return ocelot_vcap_filter_del(ocelot, &filter);
+ filter = ocelot_vcap_block_find_filter_by_id(block, f->cookie);
+ if (!filter)
+ return 0;
+
+ if (filter->type == OCELOT_VCAP_FILTER_DUMMY)
+ return ocelot_vcap_dummy_filter_del(ocelot, filter);
+
+ return ocelot_vcap_filter_del(ocelot, filter);
}
EXPORT_SYMBOL_GPL(ocelot_cls_flower_destroy);
int ocelot_cls_flower_stats(struct ocelot *ocelot, int port,
struct flow_cls_offload *f, bool ingress)
{
- struct ocelot_vcap_filter filter;
- int ret;
+ struct ocelot_vcap_filter *filter;
+ struct ocelot_vcap_block *block;
+ int block_id, ret;
+
+ block_id = ocelot_chain_to_block(f->common.chain_index, ingress);
+ if (block_id < 0)
+ return 0;
+
+ block = &ocelot->block[block_id];
+
+ filter = ocelot_vcap_block_find_filter_by_id(block, f->cookie);
+ if (!filter || filter->type == OCELOT_VCAP_FILTER_DUMMY)
+ return 0;
- filter.prio = f->common.prio;
- filter.id = f->cookie;
- ret = ocelot_vcap_filter_stats_update(ocelot, &filter);
+ ret = ocelot_vcap_filter_stats_update(ocelot, filter);
if (ret)
return ret;
- flow_stats_update(&f->stats, 0x0, filter.stats.pkts, 0, 0x0,
+ flow_stats_update(&f->stats, 0x0, filter->stats.pkts, 0, 0x0,
FLOW_ACTION_HW_STATS_IMMEDIATE);
return 0;
}
diff --git a/drivers/net/ethernet/mscc/ocelot_io.c b/drivers/net/ethernet/mscc/ocelot_io.c
index d22711282183..0acb45948418 100644
--- a/drivers/net/ethernet/mscc/ocelot_io.c
+++ b/drivers/net/ethernet/mscc/ocelot_io.c
@@ -71,6 +71,23 @@ void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg)
}
EXPORT_SYMBOL(ocelot_port_writel);
+u32 __ocelot_target_read_ix(struct ocelot *ocelot, enum ocelot_target target,
+ u32 reg, u32 offset)
+{
+ u32 val;
+
+ regmap_read(ocelot->targets[target],
+ ocelot->map[target][reg] + offset, &val);
+ return val;
+}
+
+void __ocelot_target_write_ix(struct ocelot *ocelot, enum ocelot_target target,
+ u32 val, u32 reg, u32 offset)
+{
+ regmap_write(ocelot->targets[target],
+ ocelot->map[target][reg] + offset, val);
+}
+
int ocelot_regfields_init(struct ocelot *ocelot,
const struct reg_field *const regfields)
{
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 8490e42e9e2d..b34da11acf65 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -330,7 +330,6 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
u8 grp = 0; /* Send everything on CPU group 0 */
unsigned int i, count, last;
int port = priv->chip_port;
- bool do_tstamp;
val = ocelot_read(ocelot, QS_INJ_STATUS);
if (!(val & QS_INJ_STATUS_FIFO_RDY(BIT(grp))) ||
@@ -345,7 +344,23 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
info.vid = skb_vlan_tag_get(skb);
/* Check if timestamping is needed */
- do_tstamp = (ocelot_port_add_txtstamp_skb(ocelot_port, skb) == 0);
+ if (ocelot->ptp && (shinfo->tx_flags & SKBTX_HW_TSTAMP)) {
+ info.rew_op = ocelot_port->ptp_cmd;
+
+ if (ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) {
+ struct sk_buff *clone;
+
+ clone = skb_clone_sk(skb);
+ if (!clone) {
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ ocelot_port_add_txtstamp_skb(ocelot, port, clone);
+
+ info.rew_op |= clone->cb[0] << 3;
+ }
+ }
if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP) {
info.rew_op = ocelot_port->ptp_cmd;
@@ -383,8 +398,7 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
- if (!do_tstamp)
- dev_kfree_skb_any(skb);
+ kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -642,6 +656,37 @@ static const struct net_device_ops ocelot_port_netdev_ops = {
.ndo_do_ioctl = ocelot_ioctl,
};
+struct net_device *ocelot_port_to_netdev(struct ocelot *ocelot, int port)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct ocelot_port_private *priv;
+
+ if (!ocelot_port)
+ return NULL;
+
+ priv = container_of(ocelot_port, struct ocelot_port_private, port);
+
+ return priv->dev;
+}
+
+/* Checks if the net_device instance given to us originates from our driver */
+static bool ocelot_netdevice_dev_check(const struct net_device *dev)
+{
+ return dev->netdev_ops == &ocelot_port_netdev_ops;
+}
+
+int ocelot_netdev_to_port(struct net_device *dev)
+{
+ struct ocelot_port_private *priv;
+
+ if (!dev || !ocelot_netdevice_dev_check(dev))
+ return -EINVAL;
+
+ priv = netdev_priv(dev);
+
+ return priv->chip_port;
+}
+
static void ocelot_port_get_strings(struct net_device *netdev, u32 sset,
u8 *data)
{
@@ -746,7 +791,7 @@ static int ocelot_port_attr_set(struct net_device *dev,
break;
case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
ocelot_port_vlan_filtering(ocelot, port,
- attr->u.vlan_filtering);
+ attr->u.vlan_filtering, trans);
break;
case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
ocelot_port_attr_mc_set(ocelot, port, !attr->u.mc_disabled);
@@ -863,12 +908,6 @@ static int ocelot_port_obj_del(struct net_device *dev,
return ret;
}
-/* Checks if the net_device instance given to us originate from our driver. */
-static bool ocelot_netdevice_dev_check(const struct net_device *dev)
-{
- return dev->netdev_ops == &ocelot_port_netdev_ops;
-}
-
static int ocelot_netdevice_port_event(struct net_device *dev,
unsigned long event,
struct netdev_notifier_changeupper_info *info)
diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c
index 1e08fe4daaef..a33ab315cc6b 100644
--- a/drivers/net/ethernet/mscc/ocelot_ptp.c
+++ b/drivers/net/ethernet/mscc/ocelot_ptp.c
@@ -300,7 +300,8 @@ int ocelot_ptp_enable(struct ptp_clock_info *ptp,
}
EXPORT_SYMBOL(ocelot_ptp_enable);
-int ocelot_init_timestamp(struct ocelot *ocelot, struct ptp_clock_info *info)
+int ocelot_init_timestamp(struct ocelot *ocelot,
+ const struct ptp_clock_info *info)
{
struct ptp_clock *ptp_clock;
int i;
diff --git a/drivers/net/ethernet/mscc/ocelot_s2.h b/drivers/net/ethernet/mscc/ocelot_s2.h
deleted file mode 100644
index 80107bec2e45..000000000000
--- a/drivers/net/ethernet/mscc/ocelot_s2.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
-/* Microsemi Ocelot Switch driver
- * Copyright (c) 2018 Microsemi Corporation
- */
-
-#ifndef _OCELOT_S2_CORE_H_
-#define _OCELOT_S2_CORE_H_
-
-#define S2_CORE_UPDATE_CTRL_UPDATE_CMD(x) (((x) << 22) & GENMASK(24, 22))
-#define S2_CORE_UPDATE_CTRL_UPDATE_CMD_M GENMASK(24, 22)
-#define S2_CORE_UPDATE_CTRL_UPDATE_CMD_X(x) (((x) & GENMASK(24, 22)) >> 22)
-#define S2_CORE_UPDATE_CTRL_UPDATE_ENTRY_DIS BIT(21)
-#define S2_CORE_UPDATE_CTRL_UPDATE_ACTION_DIS BIT(20)
-#define S2_CORE_UPDATE_CTRL_UPDATE_CNT_DIS BIT(19)
-#define S2_CORE_UPDATE_CTRL_UPDATE_ADDR(x) (((x) << 3) & GENMASK(18, 3))
-#define S2_CORE_UPDATE_CTRL_UPDATE_ADDR_M GENMASK(18, 3)
-#define S2_CORE_UPDATE_CTRL_UPDATE_ADDR_X(x) (((x) & GENMASK(18, 3)) >> 3)
-#define S2_CORE_UPDATE_CTRL_UPDATE_SHOT BIT(2)
-#define S2_CORE_UPDATE_CTRL_CLEAR_CACHE BIT(1)
-#define S2_CORE_UPDATE_CTRL_MV_TRAFFIC_IGN BIT(0)
-
-#define S2_CORE_MV_CFG_MV_NUM_POS(x) (((x) << 16) & GENMASK(31, 16))
-#define S2_CORE_MV_CFG_MV_NUM_POS_M GENMASK(31, 16)
-#define S2_CORE_MV_CFG_MV_NUM_POS_X(x) (((x) & GENMASK(31, 16)) >> 16)
-#define S2_CORE_MV_CFG_MV_SIZE(x) ((x) & GENMASK(15, 0))
-#define S2_CORE_MV_CFG_MV_SIZE_M GENMASK(15, 0)
-
-#define S2_CACHE_ENTRY_DAT_RSZ 0x4
-
-#define S2_CACHE_MASK_DAT_RSZ 0x4
-
-#define S2_CACHE_ACTION_DAT_RSZ 0x4
-
-#define S2_CACHE_CNT_DAT_RSZ 0x4
-
-#define S2_STICKY_VCAP_ROW_DELETED_STICKY BIT(0)
-
-#define S2_BIST_CTRL_TCAM_BIST BIT(1)
-#define S2_BIST_CTRL_TCAM_INIT BIT(0)
-
-#define S2_BIST_CFG_TCAM_BIST_SOE_ENA BIT(8)
-#define S2_BIST_CFG_TCAM_HCG_DIS BIT(7)
-#define S2_BIST_CFG_TCAM_CG_DIS BIT(6)
-#define S2_BIST_CFG_TCAM_BIAS(x) ((x) & GENMASK(5, 0))
-#define S2_BIST_CFG_TCAM_BIAS_M GENMASK(5, 0)
-
-#define S2_BIST_STAT_BIST_RT_ERR BIT(15)
-#define S2_BIST_STAT_BIST_PENC_ERR BIT(14)
-#define S2_BIST_STAT_BIST_COMP_ERR BIT(13)
-#define S2_BIST_STAT_BIST_ADDR_ERR BIT(12)
-#define S2_BIST_STAT_BIST_BL1E_ERR BIT(11)
-#define S2_BIST_STAT_BIST_BL1_ERR BIT(10)
-#define S2_BIST_STAT_BIST_BL0E_ERR BIT(9)
-#define S2_BIST_STAT_BIST_BL0_ERR BIT(8)
-#define S2_BIST_STAT_BIST_PH1_ERR BIT(7)
-#define S2_BIST_STAT_BIST_PH0_ERR BIT(6)
-#define S2_BIST_STAT_BIST_PV1_ERR BIT(5)
-#define S2_BIST_STAT_BIST_PV0_ERR BIT(4)
-#define S2_BIST_STAT_BIST_RUN BIT(3)
-#define S2_BIST_STAT_BIST_ERR BIT(2)
-#define S2_BIST_STAT_BIST_BUSY BIT(1)
-#define S2_BIST_STAT_TCAM_RDY BIT(0)
-
-#endif /* _OCELOT_S2_CORE_H_ */
diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c
index 3ef620faf995..d8c778ee6f1b 100644
--- a/drivers/net/ethernet/mscc/ocelot_vcap.c
+++ b/drivers/net/ethernet/mscc/ocelot_vcap.c
@@ -9,9 +9,7 @@
#include <soc/mscc/ocelot_vcap.h>
#include "ocelot_police.h"
#include "ocelot_vcap.h"
-#include "ocelot_s2.h"
-#define OCELOT_POLICER_DISCARD 0x17f
#define ENTRY_WIDTH 32
enum vcap_sel {
@@ -48,145 +46,174 @@ struct vcap_data {
u32 tg_mask; /* Current type-group mask */
};
-static u32 vcap_s2_read_update_ctrl(struct ocelot *ocelot)
+static u32 vcap_read_update_ctrl(struct ocelot *ocelot,
+ const struct vcap_props *vcap)
{
- return ocelot_read(ocelot, S2_CORE_UPDATE_CTRL);
+ return ocelot_target_read(ocelot, vcap->target, VCAP_CORE_UPDATE_CTRL);
}
-static void vcap_cmd(struct ocelot *ocelot, u16 ix, int cmd, int sel)
+static void vcap_cmd(struct ocelot *ocelot, const struct vcap_props *vcap,
+ u16 ix, int cmd, int sel)
{
- const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
+ u32 value = (VCAP_CORE_UPDATE_CTRL_UPDATE_CMD(cmd) |
+ VCAP_CORE_UPDATE_CTRL_UPDATE_ADDR(ix) |
+ VCAP_CORE_UPDATE_CTRL_UPDATE_SHOT);
- u32 value = (S2_CORE_UPDATE_CTRL_UPDATE_CMD(cmd) |
- S2_CORE_UPDATE_CTRL_UPDATE_ADDR(ix) |
- S2_CORE_UPDATE_CTRL_UPDATE_SHOT);
-
- if ((sel & VCAP_SEL_ENTRY) && ix >= vcap_is2->entry_count)
+ if ((sel & VCAP_SEL_ENTRY) && ix >= vcap->entry_count)
return;
if (!(sel & VCAP_SEL_ENTRY))
- value |= S2_CORE_UPDATE_CTRL_UPDATE_ENTRY_DIS;
+ value |= VCAP_CORE_UPDATE_CTRL_UPDATE_ENTRY_DIS;
if (!(sel & VCAP_SEL_ACTION))
- value |= S2_CORE_UPDATE_CTRL_UPDATE_ACTION_DIS;
+ value |= VCAP_CORE_UPDATE_CTRL_UPDATE_ACTION_DIS;
if (!(sel & VCAP_SEL_COUNTER))
- value |= S2_CORE_UPDATE_CTRL_UPDATE_CNT_DIS;
+ value |= VCAP_CORE_UPDATE_CTRL_UPDATE_CNT_DIS;
+
+ ocelot_target_write(ocelot, vcap->target, value, VCAP_CORE_UPDATE_CTRL);
- ocelot_write(ocelot, value, S2_CORE_UPDATE_CTRL);
- readx_poll_timeout(vcap_s2_read_update_ctrl, ocelot, value,
- (value & S2_CORE_UPDATE_CTRL_UPDATE_SHOT) == 0,
- 10, 100000);
+ read_poll_timeout(vcap_read_update_ctrl, value,
+ (value & VCAP_CORE_UPDATE_CTRL_UPDATE_SHOT) == 0,
+ 10, 100000, false, ocelot, vcap);
}
/* Convert from 0-based row to VCAP entry row and run command */
-static void vcap_row_cmd(struct ocelot *ocelot, u32 row, int cmd, int sel)
+static void vcap_row_cmd(struct ocelot *ocelot, const struct vcap_props *vcap,
+ u32 row, int cmd, int sel)
{
- const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
-
- vcap_cmd(ocelot, vcap_is2->entry_count - row - 1, cmd, sel);
+ vcap_cmd(ocelot, vcap, vcap->entry_count - row - 1, cmd, sel);
}
-static void vcap_entry2cache(struct ocelot *ocelot, struct vcap_data *data)
+static void vcap_entry2cache(struct ocelot *ocelot,
+ const struct vcap_props *vcap,
+ struct vcap_data *data)
{
- const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
u32 entry_words, i;
- entry_words = DIV_ROUND_UP(vcap_is2->entry_width, ENTRY_WIDTH);
+ entry_words = DIV_ROUND_UP(vcap->entry_width, ENTRY_WIDTH);
for (i = 0; i < entry_words; i++) {
- ocelot_write_rix(ocelot, data->entry[i], S2_CACHE_ENTRY_DAT, i);
- ocelot_write_rix(ocelot, ~data->mask[i], S2_CACHE_MASK_DAT, i);
+ ocelot_target_write_rix(ocelot, vcap->target, data->entry[i],
+ VCAP_CACHE_ENTRY_DAT, i);
+ ocelot_target_write_rix(ocelot, vcap->target, ~data->mask[i],
+ VCAP_CACHE_MASK_DAT, i);
}
- ocelot_write(ocelot, data->tg, S2_CACHE_TG_DAT);
+ ocelot_target_write(ocelot, vcap->target, data->tg, VCAP_CACHE_TG_DAT);
}
-static void vcap_cache2entry(struct ocelot *ocelot, struct vcap_data *data)
+static void vcap_cache2entry(struct ocelot *ocelot,
+ const struct vcap_props *vcap,
+ struct vcap_data *data)
{
- const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
u32 entry_words, i;
- entry_words = DIV_ROUND_UP(vcap_is2->entry_width, ENTRY_WIDTH);
+ entry_words = DIV_ROUND_UP(vcap->entry_width, ENTRY_WIDTH);
for (i = 0; i < entry_words; i++) {
- data->entry[i] = ocelot_read_rix(ocelot, S2_CACHE_ENTRY_DAT, i);
+ data->entry[i] = ocelot_target_read_rix(ocelot, vcap->target,
+ VCAP_CACHE_ENTRY_DAT, i);
// Invert mask
- data->mask[i] = ~ocelot_read_rix(ocelot, S2_CACHE_MASK_DAT, i);
+ data->mask[i] = ~ocelot_target_read_rix(ocelot, vcap->target,
+ VCAP_CACHE_MASK_DAT, i);
}
- data->tg = ocelot_read(ocelot, S2_CACHE_TG_DAT);
+ data->tg = ocelot_target_read(ocelot, vcap->target, VCAP_CACHE_TG_DAT);
}
-static void vcap_action2cache(struct ocelot *ocelot, struct vcap_data *data)
+static void vcap_action2cache(struct ocelot *ocelot,
+ const struct vcap_props *vcap,
+ struct vcap_data *data)
{
- const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
u32 action_words, mask;
int i, width;
/* Encode action type */
- width = vcap_is2->action_type_width;
+ width = vcap->action_type_width;
if (width) {
mask = GENMASK(width, 0);
data->action[0] = ((data->action[0] & ~mask) | data->type);
}
- action_words = DIV_ROUND_UP(vcap_is2->action_width, ENTRY_WIDTH);
+ action_words = DIV_ROUND_UP(vcap->action_width, ENTRY_WIDTH);
for (i = 0; i < action_words; i++)
- ocelot_write_rix(ocelot, data->action[i], S2_CACHE_ACTION_DAT,
- i);
+ ocelot_target_write_rix(ocelot, vcap->target, data->action[i],
+ VCAP_CACHE_ACTION_DAT, i);
- for (i = 0; i < vcap_is2->counter_words; i++)
- ocelot_write_rix(ocelot, data->counter[i], S2_CACHE_CNT_DAT, i);
+ for (i = 0; i < vcap->counter_words; i++)
+ ocelot_target_write_rix(ocelot, vcap->target, data->counter[i],
+ VCAP_CACHE_CNT_DAT, i);
}
-static void vcap_cache2action(struct ocelot *ocelot, struct vcap_data *data)
+static void vcap_cache2action(struct ocelot *ocelot,
+ const struct vcap_props *vcap,
+ struct vcap_data *data)
{
- const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
u32 action_words;
int i, width;
- action_words = DIV_ROUND_UP(vcap_is2->action_width, ENTRY_WIDTH);
+ action_words = DIV_ROUND_UP(vcap->action_width, ENTRY_WIDTH);
for (i = 0; i < action_words; i++)
- data->action[i] = ocelot_read_rix(ocelot, S2_CACHE_ACTION_DAT,
- i);
+ data->action[i] = ocelot_target_read_rix(ocelot, vcap->target,
+ VCAP_CACHE_ACTION_DAT,
+ i);
- for (i = 0; i < vcap_is2->counter_words; i++)
- data->counter[i] = ocelot_read_rix(ocelot, S2_CACHE_CNT_DAT, i);
+ for (i = 0; i < vcap->counter_words; i++)
+ data->counter[i] = ocelot_target_read_rix(ocelot, vcap->target,
+ VCAP_CACHE_CNT_DAT,
+ i);
/* Extract action type */
- width = vcap_is2->action_type_width;
+ width = vcap->action_type_width;
data->type = (width ? (data->action[0] & GENMASK(width, 0)) : 0);
}
/* Calculate offsets for entry */
-static void is2_data_get(struct ocelot *ocelot, struct vcap_data *data, int ix)
+static void vcap_data_offset_get(const struct vcap_props *vcap,
+ struct vcap_data *data, int ix)
{
- const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
- int i, col, offset, count, cnt, base;
- int width = vcap_is2->tg_width;
+ int num_subwords_per_entry, num_subwords_per_action;
+ int i, col, offset, num_entries_per_row, base;
+ u32 width = vcap->tg_width;
- count = (data->tg_sw == VCAP_TG_HALF ? 2 : 4);
- col = (ix % 2);
- cnt = (vcap_is2->sw_count / count);
- base = (vcap_is2->sw_count - col * cnt - cnt);
+ switch (data->tg_sw) {
+ case VCAP_TG_FULL:
+ num_entries_per_row = 1;
+ break;
+ case VCAP_TG_HALF:
+ num_entries_per_row = 2;
+ break;
+ case VCAP_TG_QUARTER:
+ num_entries_per_row = 4;
+ break;
+ default:
+ return;
+ }
+
+ col = (ix % num_entries_per_row);
+ num_subwords_per_entry = (vcap->sw_count / num_entries_per_row);
+ base = (vcap->sw_count - col * num_subwords_per_entry -
+ num_subwords_per_entry);
data->tg_value = 0;
data->tg_mask = 0;
- for (i = 0; i < cnt; i++) {
+ for (i = 0; i < num_subwords_per_entry; i++) {
offset = ((base + i) * width);
data->tg_value |= (data->tg_sw << offset);
data->tg_mask |= GENMASK(offset + width - 1, offset);
}
/* Calculate key/action/counter offsets */
- col = (count - col - 1);
- data->key_offset = (base * vcap_is2->entry_width) / vcap_is2->sw_count;
- data->counter_offset = (cnt * col * vcap_is2->counter_width);
+ col = (num_entries_per_row - col - 1);
+ data->key_offset = (base * vcap->entry_width) / vcap->sw_count;
+ data->counter_offset = (num_subwords_per_entry * col *
+ vcap->counter_width);
i = data->type;
- width = vcap_is2->action_table[i].width;
- cnt = vcap_is2->action_table[i].count;
- data->action_offset =
- (((cnt * col * width) / count) + vcap_is2->action_type_width);
+ width = vcap->action_table[i].width;
+ num_subwords_per_action = vcap->action_table[i].count;
+ data->action_offset = ((num_subwords_per_action * col * width) /
+ num_entries_per_row);
+ data->action_offset += vcap->action_type_width;
}
static void vcap_data_set(u32 *data, u32 offset, u32 len, u32 value)
@@ -224,22 +251,21 @@ static void vcap_key_field_set(struct vcap_data *data, u32 offset, u32 width,
vcap_data_set(data->mask, offset + data->key_offset, width, mask);
}
-static void vcap_key_set(struct ocelot *ocelot, struct vcap_data *data,
- enum vcap_is2_half_key_field field,
- u32 value, u32 mask)
+static void vcap_key_set(const struct vcap_props *vcap, struct vcap_data *data,
+ int field, u32 value, u32 mask)
{
- u32 offset = ocelot->vcap_is2_keys[field].offset;
- u32 length = ocelot->vcap_is2_keys[field].length;
+ u32 offset = vcap->keys[field].offset;
+ u32 length = vcap->keys[field].length;
vcap_key_field_set(data, offset, length, value, mask);
}
-static void vcap_key_bytes_set(struct ocelot *ocelot, struct vcap_data *data,
- enum vcap_is2_half_key_field field,
+static void vcap_key_bytes_set(const struct vcap_props *vcap,
+ struct vcap_data *data, int field,
u8 *val, u8 *msk)
{
- u32 offset = ocelot->vcap_is2_keys[field].offset;
- u32 count = ocelot->vcap_is2_keys[field].length;
+ u32 offset = vcap->keys[field].offset;
+ u32 count = vcap->keys[field].length;
u32 i, j, n = 0, value = 0, mask = 0;
WARN_ON(count % 8);
@@ -265,37 +291,37 @@ static void vcap_key_bytes_set(struct ocelot *ocelot, struct vcap_data *data,
}
}
-static void vcap_key_l4_port_set(struct ocelot *ocelot, struct vcap_data *data,
- enum vcap_is2_half_key_field field,
+static void vcap_key_l4_port_set(const struct vcap_props *vcap,
+ struct vcap_data *data, int field,
struct ocelot_vcap_udp_tcp *port)
{
- u32 offset = ocelot->vcap_is2_keys[field].offset;
- u32 length = ocelot->vcap_is2_keys[field].length;
+ u32 offset = vcap->keys[field].offset;
+ u32 length = vcap->keys[field].length;
WARN_ON(length != 16);
vcap_key_field_set(data, offset, length, port->value, port->mask);
}
-static void vcap_key_bit_set(struct ocelot *ocelot, struct vcap_data *data,
- enum vcap_is2_half_key_field field,
+static void vcap_key_bit_set(const struct vcap_props *vcap,
+ struct vcap_data *data, int field,
enum ocelot_vcap_bit val)
{
- u32 offset = ocelot->vcap_is2_keys[field].offset;
- u32 length = ocelot->vcap_is2_keys[field].length;
u32 value = (val == OCELOT_VCAP_BIT_1 ? 1 : 0);
u32 msk = (val == OCELOT_VCAP_BIT_ANY ? 0 : 1);
+ u32 offset = vcap->keys[field].offset;
+ u32 length = vcap->keys[field].length;
WARN_ON(length != 1);
vcap_key_field_set(data, offset, length, value, msk);
}
-static void vcap_action_set(struct ocelot *ocelot, struct vcap_data *data,
- enum vcap_is2_action_field field, u32 value)
+static void vcap_action_set(const struct vcap_props *vcap,
+ struct vcap_data *data, int field, u32 value)
{
- int offset = ocelot->vcap_is2_actions[field].offset;
- int length = ocelot->vcap_is2_actions[field].length;
+ int offset = vcap->actions[field].offset;
+ int length = vcap->actions[field].length;
vcap_data_set(data->action, offset + data->action_offset, length,
value);
@@ -304,40 +330,21 @@ static void vcap_action_set(struct ocelot *ocelot, struct vcap_data *data,
static void is2_action_set(struct ocelot *ocelot, struct vcap_data *data,
struct ocelot_vcap_filter *filter)
{
- switch (filter->action) {
- case OCELOT_VCAP_ACTION_DROP:
- vcap_action_set(ocelot, data, VCAP_IS2_ACT_PORT_MASK, 0);
- vcap_action_set(ocelot, data, VCAP_IS2_ACT_MASK_MODE, 1);
- vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_ENA, 1);
- vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_IDX,
- OCELOT_POLICER_DISCARD);
- vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_QU_NUM, 0);
- vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_COPY_ENA, 0);
- break;
- case OCELOT_VCAP_ACTION_TRAP:
- vcap_action_set(ocelot, data, VCAP_IS2_ACT_PORT_MASK, 0);
- vcap_action_set(ocelot, data, VCAP_IS2_ACT_MASK_MODE, 1);
- vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_ENA, 0);
- vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_IDX, 0);
- vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_QU_NUM, 0);
- vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_COPY_ENA, 1);
- break;
- case OCELOT_VCAP_ACTION_POLICE:
- vcap_action_set(ocelot, data, VCAP_IS2_ACT_PORT_MASK, 0);
- vcap_action_set(ocelot, data, VCAP_IS2_ACT_MASK_MODE, 0);
- vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_ENA, 1);
- vcap_action_set(ocelot, data, VCAP_IS2_ACT_POLICE_IDX,
- filter->pol_ix);
- vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_QU_NUM, 0);
- vcap_action_set(ocelot, data, VCAP_IS2_ACT_CPU_COPY_ENA, 0);
- break;
- }
+ const struct vcap_props *vcap = &ocelot->vcap[VCAP_IS2];
+ struct ocelot_vcap_action *a = &filter->action;
+
+ vcap_action_set(vcap, data, VCAP_IS2_ACT_MASK_MODE, a->mask_mode);
+ vcap_action_set(vcap, data, VCAP_IS2_ACT_PORT_MASK, a->port_mask);
+ vcap_action_set(vcap, data, VCAP_IS2_ACT_POLICE_ENA, a->police_ena);
+ vcap_action_set(vcap, data, VCAP_IS2_ACT_POLICE_IDX, a->pol_ix);
+ vcap_action_set(vcap, data, VCAP_IS2_ACT_CPU_QU_NUM, a->cpu_qu_num);
+ vcap_action_set(vcap, data, VCAP_IS2_ACT_CPU_COPY_ENA, a->cpu_copy_ena);
}
static void is2_entry_set(struct ocelot *ocelot, int ix,
struct ocelot_vcap_filter *filter)
{
- const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
+ const struct vcap_props *vcap = &ocelot->vcap[VCAP_IS2];
struct ocelot_vcap_key_vlan *tag = &filter->vlan;
u32 val, msk, type, type_mask = 0xf, i, count;
struct ocelot_vcap_u64 payload;
@@ -348,52 +355,55 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
memset(&data, 0, sizeof(data));
/* Read row */
- vcap_row_cmd(ocelot, row, VCAP_CMD_READ, VCAP_SEL_ALL);
- vcap_cache2entry(ocelot, &data);
- vcap_cache2action(ocelot, &data);
+ vcap_row_cmd(ocelot, vcap, row, VCAP_CMD_READ, VCAP_SEL_ALL);
+ vcap_cache2entry(ocelot, vcap, &data);
+ vcap_cache2action(ocelot, vcap, &data);
data.tg_sw = VCAP_TG_HALF;
- is2_data_get(ocelot, &data, ix);
+ vcap_data_offset_get(vcap, &data, ix);
data.tg = (data.tg & ~data.tg_mask);
if (filter->prio != 0)
data.tg |= data.tg_value;
data.type = IS2_ACTION_TYPE_NORMAL;
- vcap_key_set(ocelot, &data, VCAP_IS2_HK_PAG, 0, 0);
- vcap_key_set(ocelot, &data, VCAP_IS2_HK_IGR_PORT_MASK, 0,
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_PAG, filter->pag, 0xff);
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_FIRST,
+ (filter->lookup == 0) ? OCELOT_VCAP_BIT_1 :
+ OCELOT_VCAP_BIT_0);
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_IGR_PORT_MASK, 0,
~filter->ingress_port_mask);
- vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_FIRST, OCELOT_VCAP_BIT_1);
- vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_HOST_MATCH,
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_FIRST, OCELOT_VCAP_BIT_ANY);
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_HOST_MATCH,
OCELOT_VCAP_BIT_ANY);
- vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L2_MC, filter->dmac_mc);
- vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L2_BC, filter->dmac_bc);
- vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_VLAN_TAGGED, tag->tagged);
- vcap_key_set(ocelot, &data, VCAP_IS2_HK_VID,
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L2_MC, filter->dmac_mc);
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L2_BC, filter->dmac_bc);
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_VLAN_TAGGED, tag->tagged);
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_VID,
tag->vid.value, tag->vid.mask);
- vcap_key_set(ocelot, &data, VCAP_IS2_HK_PCP,
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_PCP,
tag->pcp.value[0], tag->pcp.mask[0]);
- vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_DEI, tag->dei);
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_DEI, tag->dei);
switch (filter->key_type) {
case OCELOT_VCAP_KEY_ETYPE: {
struct ocelot_vcap_key_etype *etype = &filter->key.etype;
type = IS2_TYPE_ETYPE;
- vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_DMAC,
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L2_DMAC,
etype->dmac.value, etype->dmac.mask);
- vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_SMAC,
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L2_SMAC,
etype->smac.value, etype->smac.mask);
- vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_MAC_ETYPE_ETYPE,
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_MAC_ETYPE_ETYPE,
etype->etype.value, etype->etype.mask);
/* Clear unused bits */
- vcap_key_set(ocelot, &data, VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0,
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0,
0, 0);
- vcap_key_set(ocelot, &data, VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1,
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD1,
0, 0);
- vcap_key_set(ocelot, &data, VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2,
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD2,
0, 0);
- vcap_key_bytes_set(ocelot, &data,
+ vcap_key_bytes_set(vcap, &data,
VCAP_IS2_HK_MAC_ETYPE_L2_PAYLOAD0,
etype->data.value, etype->data.mask);
break;
@@ -402,15 +412,15 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
struct ocelot_vcap_key_llc *llc = &filter->key.llc;
type = IS2_TYPE_LLC;
- vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_DMAC,
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L2_DMAC,
llc->dmac.value, llc->dmac.mask);
- vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_SMAC,
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L2_SMAC,
llc->smac.value, llc->smac.mask);
for (i = 0; i < 4; i++) {
payload.value[i] = llc->llc.value[i];
payload.mask[i] = llc->llc.mask[i];
}
- vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_MAC_LLC_L2_LLC,
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_MAC_LLC_L2_LLC,
payload.value, payload.mask);
break;
}
@@ -418,11 +428,11 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
struct ocelot_vcap_key_snap *snap = &filter->key.snap;
type = IS2_TYPE_SNAP;
- vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_DMAC,
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L2_DMAC,
snap->dmac.value, snap->dmac.mask);
- vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L2_SMAC,
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L2_SMAC,
snap->smac.value, snap->smac.mask);
- vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_MAC_SNAP_L2_SNAP,
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_MAC_SNAP_L2_SNAP,
filter->key.snap.snap.value,
filter->key.snap.snap.mask);
break;
@@ -431,24 +441,24 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
struct ocelot_vcap_key_arp *arp = &filter->key.arp;
type = IS2_TYPE_ARP;
- vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_MAC_ARP_SMAC,
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_MAC_ARP_SMAC,
arp->smac.value, arp->smac.mask);
- vcap_key_bit_set(ocelot, &data,
+ vcap_key_bit_set(vcap, &data,
VCAP_IS2_HK_MAC_ARP_ADDR_SPACE_OK,
arp->ethernet);
- vcap_key_bit_set(ocelot, &data,
+ vcap_key_bit_set(vcap, &data,
VCAP_IS2_HK_MAC_ARP_PROTO_SPACE_OK,
arp->ip);
- vcap_key_bit_set(ocelot, &data,
+ vcap_key_bit_set(vcap, &data,
VCAP_IS2_HK_MAC_ARP_LEN_OK,
arp->length);
- vcap_key_bit_set(ocelot, &data,
+ vcap_key_bit_set(vcap, &data,
VCAP_IS2_HK_MAC_ARP_TARGET_MATCH,
arp->dmac_match);
- vcap_key_bit_set(ocelot, &data,
+ vcap_key_bit_set(vcap, &data,
VCAP_IS2_HK_MAC_ARP_SENDER_MATCH,
arp->smac_match);
- vcap_key_bit_set(ocelot, &data,
+ vcap_key_bit_set(vcap, &data,
VCAP_IS2_HK_MAC_ARP_OPCODE_UNKNOWN,
arp->unknown);
@@ -457,15 +467,15 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
(arp->arp == OCELOT_VCAP_BIT_0 ? 2 : 0));
msk = ((arp->req == OCELOT_VCAP_BIT_ANY ? 0 : 1) |
(arp->arp == OCELOT_VCAP_BIT_ANY ? 0 : 2));
- vcap_key_set(ocelot, &data, VCAP_IS2_HK_MAC_ARP_OPCODE,
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_MAC_ARP_OPCODE,
val, msk);
- vcap_key_bytes_set(ocelot, &data,
+ vcap_key_bytes_set(vcap, &data,
VCAP_IS2_HK_MAC_ARP_L3_IP4_DIP,
arp->dip.value.addr, arp->dip.mask.addr);
- vcap_key_bytes_set(ocelot, &data,
+ vcap_key_bytes_set(vcap, &data,
VCAP_IS2_HK_MAC_ARP_L3_IP4_SIP,
arp->sip.value.addr, arp->sip.mask.addr);
- vcap_key_set(ocelot, &data, VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP,
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_MAC_ARP_DIP_EQ_SIP,
0, 0);
break;
}
@@ -534,22 +544,22 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
seq_zero = ipv6->seq_zero;
}
- vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_IP4,
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_IP4,
ipv4 ? OCELOT_VCAP_BIT_1 : OCELOT_VCAP_BIT_0);
- vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L3_FRAGMENT,
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L3_FRAGMENT,
fragment);
- vcap_key_set(ocelot, &data, VCAP_IS2_HK_L3_FRAG_OFS_GT0, 0, 0);
- vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L3_OPTIONS,
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_L3_FRAG_OFS_GT0, 0, 0);
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L3_OPTIONS,
options);
- vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_IP4_L3_TTL_GT0,
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_IP4_L3_TTL_GT0,
ttl);
- vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L3_TOS,
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L3_TOS,
ds.value, ds.mask);
- vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L3_IP4_DIP,
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L3_IP4_DIP,
dip.value.addr, dip.mask.addr);
- vcap_key_bytes_set(ocelot, &data, VCAP_IS2_HK_L3_IP4_SIP,
+ vcap_key_bytes_set(vcap, &data, VCAP_IS2_HK_L3_IP4_SIP,
sip.value.addr, sip.mask.addr);
- vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_DIP_EQ_SIP,
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_DIP_EQ_SIP,
sip_eq_dip);
val = proto.value[0];
msk = proto.mask[0];
@@ -558,33 +568,33 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
/* UDP/TCP protocol match */
tcp = (val == 6 ?
OCELOT_VCAP_BIT_1 : OCELOT_VCAP_BIT_0);
- vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_TCP, tcp);
- vcap_key_l4_port_set(ocelot, &data,
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_TCP, tcp);
+ vcap_key_l4_port_set(vcap, &data,
VCAP_IS2_HK_L4_DPORT, dport);
- vcap_key_l4_port_set(ocelot, &data,
+ vcap_key_l4_port_set(vcap, &data,
VCAP_IS2_HK_L4_SPORT, sport);
- vcap_key_set(ocelot, &data, VCAP_IS2_HK_L4_RNG, 0, 0);
- vcap_key_bit_set(ocelot, &data,
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_L4_RNG, 0, 0);
+ vcap_key_bit_set(vcap, &data,
VCAP_IS2_HK_L4_SPORT_EQ_DPORT,
sport_eq_dport);
- vcap_key_bit_set(ocelot, &data,
+ vcap_key_bit_set(vcap, &data,
VCAP_IS2_HK_L4_SEQUENCE_EQ0,
seq_zero);
- vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L4_FIN,
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L4_FIN,
tcp_fin);
- vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L4_SYN,
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L4_SYN,
tcp_syn);
- vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L4_RST,
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L4_RST,
tcp_rst);
- vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L4_PSH,
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L4_PSH,
tcp_psh);
- vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L4_ACK,
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L4_ACK,
tcp_ack);
- vcap_key_bit_set(ocelot, &data, VCAP_IS2_HK_L4_URG,
+ vcap_key_bit_set(vcap, &data, VCAP_IS2_HK_L4_URG,
tcp_urg);
- vcap_key_set(ocelot, &data, VCAP_IS2_HK_L4_1588_DOM,
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_L4_1588_DOM,
0, 0);
- vcap_key_set(ocelot, &data, VCAP_IS2_HK_L4_1588_VER,
+ vcap_key_set(vcap, &data, VCAP_IS2_HK_L4_1588_VER,
0, 0);
} else {
if (msk == 0) {
@@ -598,10 +608,10 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
payload.mask[i] = ip_data->mask[i];
}
}
- vcap_key_bytes_set(ocelot, &data,
+ vcap_key_bytes_set(vcap, &data,
VCAP_IS2_HK_IP4_L3_PROTO,
proto.value, proto.mask);
- vcap_key_bytes_set(ocelot, &data,
+ vcap_key_bytes_set(vcap, &data,
VCAP_IS2_HK_L3_PAYLOAD,
payload.value, payload.mask);
}
@@ -611,46 +621,271 @@ static void is2_entry_set(struct ocelot *ocelot, int ix,
default:
type = 0;
type_mask = 0;
- count = vcap_is2->entry_width / 2;
+ count = vcap->entry_width / 2;
/* Iterate over the non-common part of the key and
* clear entry data
*/
- for (i = ocelot->vcap_is2_keys[VCAP_IS2_HK_L2_DMAC].offset;
+ for (i = vcap->keys[VCAP_IS2_HK_L2_DMAC].offset;
i < count; i += ENTRY_WIDTH) {
vcap_key_field_set(&data, i, min(32u, count - i), 0, 0);
}
break;
}
- vcap_key_set(ocelot, &data, VCAP_IS2_TYPE, type, type_mask);
+ vcap_key_set(vcap, &data, VCAP_IS2_TYPE, type, type_mask);
is2_action_set(ocelot, &data, filter);
vcap_data_set(data.counter, data.counter_offset,
- vcap_is2->counter_width, filter->stats.pkts);
+ vcap->counter_width, filter->stats.pkts);
/* Write row */
- vcap_entry2cache(ocelot, &data);
- vcap_action2cache(ocelot, &data);
- vcap_row_cmd(ocelot, row, VCAP_CMD_WRITE, VCAP_SEL_ALL);
+ vcap_entry2cache(ocelot, vcap, &data);
+ vcap_action2cache(ocelot, vcap, &data);
+ vcap_row_cmd(ocelot, vcap, row, VCAP_CMD_WRITE, VCAP_SEL_ALL);
+}
+
+static void is1_action_set(struct ocelot *ocelot, struct vcap_data *data,
+ const struct ocelot_vcap_filter *filter)
+{
+ const struct vcap_props *vcap = &ocelot->vcap[VCAP_IS1];
+ const struct ocelot_vcap_action *a = &filter->action;
+
+ vcap_action_set(vcap, data, VCAP_IS1_ACT_VID_REPLACE_ENA,
+ a->vid_replace_ena);
+ vcap_action_set(vcap, data, VCAP_IS1_ACT_VID_ADD_VAL, a->vid);
+ vcap_action_set(vcap, data, VCAP_IS1_ACT_VLAN_POP_CNT_ENA,
+ a->vlan_pop_cnt_ena);
+ vcap_action_set(vcap, data, VCAP_IS1_ACT_VLAN_POP_CNT,
+ a->vlan_pop_cnt);
+ vcap_action_set(vcap, data, VCAP_IS1_ACT_PCP_DEI_ENA, a->pcp_dei_ena);
+ vcap_action_set(vcap, data, VCAP_IS1_ACT_PCP_VAL, a->pcp);
+ vcap_action_set(vcap, data, VCAP_IS1_ACT_DEI_VAL, a->dei);
+ vcap_action_set(vcap, data, VCAP_IS1_ACT_QOS_ENA, a->qos_ena);
+ vcap_action_set(vcap, data, VCAP_IS1_ACT_QOS_VAL, a->qos_val);
+ vcap_action_set(vcap, data, VCAP_IS1_ACT_PAG_OVERRIDE_MASK,
+ a->pag_override_mask);
+ vcap_action_set(vcap, data, VCAP_IS1_ACT_PAG_VAL, a->pag_val);
}
-static void is2_entry_get(struct ocelot *ocelot, struct ocelot_vcap_filter *filter,
- int ix)
+static void is1_entry_set(struct ocelot *ocelot, int ix,
+ struct ocelot_vcap_filter *filter)
{
- const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
+ const struct vcap_props *vcap = &ocelot->vcap[VCAP_IS1];
+ struct ocelot_vcap_key_vlan *tag = &filter->vlan;
+ struct ocelot_vcap_u64 payload;
struct vcap_data data;
- int row = (ix / 2);
- u32 cnt;
+ int row = ix / 2;
+ u32 type;
+
+ memset(&payload, 0, sizeof(payload));
+ memset(&data, 0, sizeof(data));
+
+ /* Read row */
+ vcap_row_cmd(ocelot, vcap, row, VCAP_CMD_READ, VCAP_SEL_ALL);
+ vcap_cache2entry(ocelot, vcap, &data);
+ vcap_cache2action(ocelot, vcap, &data);
- vcap_row_cmd(ocelot, row, VCAP_CMD_READ, VCAP_SEL_COUNTER);
- vcap_cache2action(ocelot, &data);
data.tg_sw = VCAP_TG_HALF;
- is2_data_get(ocelot, &data, ix);
+ data.type = IS1_ACTION_TYPE_NORMAL;
+ vcap_data_offset_get(vcap, &data, ix);
+ data.tg = (data.tg & ~data.tg_mask);
+ if (filter->prio != 0)
+ data.tg |= data.tg_value;
+
+ vcap_key_set(vcap, &data, VCAP_IS1_HK_LOOKUP, filter->lookup, 0x3);
+ vcap_key_set(vcap, &data, VCAP_IS1_HK_IGR_PORT_MASK, 0,
+ ~filter->ingress_port_mask);
+ vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_L2_MC, filter->dmac_mc);
+ vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_L2_BC, filter->dmac_bc);
+ vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_VLAN_TAGGED, tag->tagged);
+ vcap_key_set(vcap, &data, VCAP_IS1_HK_VID,
+ tag->vid.value, tag->vid.mask);
+ vcap_key_set(vcap, &data, VCAP_IS1_HK_PCP,
+ tag->pcp.value[0], tag->pcp.mask[0]);
+ type = IS1_TYPE_S1_NORMAL;
+
+ switch (filter->key_type) {
+ case OCELOT_VCAP_KEY_ETYPE: {
+ struct ocelot_vcap_key_etype *etype = &filter->key.etype;
+
+ vcap_key_bytes_set(vcap, &data, VCAP_IS1_HK_L2_SMAC,
+ etype->smac.value, etype->smac.mask);
+ vcap_key_bytes_set(vcap, &data, VCAP_IS1_HK_ETYPE,
+ etype->etype.value, etype->etype.mask);
+ break;
+ }
+ case OCELOT_VCAP_KEY_IPV4: {
+ struct ocelot_vcap_key_ipv4 *ipv4 = &filter->key.ipv4;
+ struct ocelot_vcap_udp_tcp *sport = &ipv4->sport;
+ struct ocelot_vcap_udp_tcp *dport = &ipv4->dport;
+ enum ocelot_vcap_bit tcp_udp = OCELOT_VCAP_BIT_0;
+ struct ocelot_vcap_u8 proto = ipv4->proto;
+ struct ocelot_vcap_ipv4 sip = ipv4->sip;
+ u32 val, msk;
+
+ vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_IP_SNAP,
+ OCELOT_VCAP_BIT_1);
+ vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_IP4,
+ OCELOT_VCAP_BIT_1);
+ vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_ETYPE_LEN,
+ OCELOT_VCAP_BIT_1);
+ vcap_key_bytes_set(vcap, &data, VCAP_IS1_HK_L3_IP4_SIP,
+ sip.value.addr, sip.mask.addr);
+
+ val = proto.value[0];
+ msk = proto.mask[0];
+
+ if ((val == NEXTHDR_TCP || val == NEXTHDR_UDP) && msk == 0xff)
+ tcp_udp = OCELOT_VCAP_BIT_1;
+ vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_TCP_UDP, tcp_udp);
+
+ if (tcp_udp) {
+ enum ocelot_vcap_bit tcp = OCELOT_VCAP_BIT_0;
+
+ if (val == NEXTHDR_TCP)
+ tcp = OCELOT_VCAP_BIT_1;
+
+ vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_TCP, tcp);
+ vcap_key_l4_port_set(vcap, &data, VCAP_IS1_HK_L4_SPORT,
+ sport);
+ /* Overloaded field */
+ vcap_key_l4_port_set(vcap, &data, VCAP_IS1_HK_ETYPE,
+ dport);
+ } else {
+ /* IPv4 "other" frame */
+ struct ocelot_vcap_u16 etype = {0};
+
+ /* Overloaded field */
+ etype.value[0] = proto.value[0];
+ etype.mask[0] = proto.mask[0];
+
+ vcap_key_bytes_set(vcap, &data, VCAP_IS1_HK_ETYPE,
+ etype.value, etype.mask);
+ }
+ }
+ default:
+ break;
+ }
+ vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_TYPE,
+ type ? OCELOT_VCAP_BIT_1 : OCELOT_VCAP_BIT_0);
+
+ is1_action_set(ocelot, &data, filter);
+ vcap_data_set(data.counter, data.counter_offset,
+ vcap->counter_width, filter->stats.pkts);
+
+ /* Write row */
+ vcap_entry2cache(ocelot, vcap, &data);
+ vcap_action2cache(ocelot, vcap, &data);
+ vcap_row_cmd(ocelot, vcap, row, VCAP_CMD_WRITE, VCAP_SEL_ALL);
+}
+
+static void es0_action_set(struct ocelot *ocelot, struct vcap_data *data,
+ const struct ocelot_vcap_filter *filter)
+{
+ const struct vcap_props *vcap = &ocelot->vcap[VCAP_ES0];
+ const struct ocelot_vcap_action *a = &filter->action;
+
+ vcap_action_set(vcap, data, VCAP_ES0_ACT_PUSH_OUTER_TAG,
+ a->push_outer_tag);
+ vcap_action_set(vcap, data, VCAP_ES0_ACT_PUSH_INNER_TAG,
+ a->push_inner_tag);
+ vcap_action_set(vcap, data, VCAP_ES0_ACT_TAG_A_TPID_SEL,
+ a->tag_a_tpid_sel);
+ vcap_action_set(vcap, data, VCAP_ES0_ACT_TAG_A_VID_SEL,
+ a->tag_a_vid_sel);
+ vcap_action_set(vcap, data, VCAP_ES0_ACT_TAG_A_PCP_SEL,
+ a->tag_a_pcp_sel);
+ vcap_action_set(vcap, data, VCAP_ES0_ACT_VID_A_VAL, a->vid_a_val);
+ vcap_action_set(vcap, data, VCAP_ES0_ACT_PCP_A_VAL, a->pcp_a_val);
+ vcap_action_set(vcap, data, VCAP_ES0_ACT_TAG_B_TPID_SEL,
+ a->tag_b_tpid_sel);
+ vcap_action_set(vcap, data, VCAP_ES0_ACT_TAG_B_VID_SEL,
+ a->tag_b_vid_sel);
+ vcap_action_set(vcap, data, VCAP_ES0_ACT_TAG_B_PCP_SEL,
+ a->tag_b_pcp_sel);
+ vcap_action_set(vcap, data, VCAP_ES0_ACT_VID_B_VAL, a->vid_b_val);
+ vcap_action_set(vcap, data, VCAP_ES0_ACT_PCP_B_VAL, a->pcp_b_val);
+}
+
+static void es0_entry_set(struct ocelot *ocelot, int ix,
+ struct ocelot_vcap_filter *filter)
+{
+ const struct vcap_props *vcap = &ocelot->vcap[VCAP_ES0];
+ struct ocelot_vcap_key_vlan *tag = &filter->vlan;
+ struct ocelot_vcap_u64 payload;
+ struct vcap_data data;
+ int row = ix;
+
+ memset(&payload, 0, sizeof(payload));
+ memset(&data, 0, sizeof(data));
+
+ /* Read row */
+ vcap_row_cmd(ocelot, vcap, row, VCAP_CMD_READ, VCAP_SEL_ALL);
+ vcap_cache2entry(ocelot, vcap, &data);
+ vcap_cache2action(ocelot, vcap, &data);
+
+ data.tg_sw = VCAP_TG_FULL;
+ data.type = ES0_ACTION_TYPE_NORMAL;
+ vcap_data_offset_get(vcap, &data, ix);
+ data.tg = (data.tg & ~data.tg_mask);
+ if (filter->prio != 0)
+ data.tg |= data.tg_value;
+
+ vcap_key_set(vcap, &data, VCAP_ES0_IGR_PORT, filter->ingress_port.value,
+ filter->ingress_port.mask);
+ vcap_key_set(vcap, &data, VCAP_ES0_EGR_PORT, filter->egress_port.value,
+ filter->egress_port.mask);
+ vcap_key_bit_set(vcap, &data, VCAP_ES0_L2_MC, filter->dmac_mc);
+ vcap_key_bit_set(vcap, &data, VCAP_ES0_L2_BC, filter->dmac_bc);
+ vcap_key_set(vcap, &data, VCAP_ES0_VID,
+ tag->vid.value, tag->vid.mask);
+ vcap_key_set(vcap, &data, VCAP_ES0_PCP,
+ tag->pcp.value[0], tag->pcp.mask[0]);
+
+ es0_action_set(ocelot, &data, filter);
+ vcap_data_set(data.counter, data.counter_offset,
+ vcap->counter_width, filter->stats.pkts);
+
+ /* Write row */
+ vcap_entry2cache(ocelot, vcap, &data);
+ vcap_action2cache(ocelot, vcap, &data);
+ vcap_row_cmd(ocelot, vcap, row, VCAP_CMD_WRITE, VCAP_SEL_ALL);
+}
+
+static void vcap_entry_get(struct ocelot *ocelot, int ix,
+ struct ocelot_vcap_filter *filter)
+{
+ const struct vcap_props *vcap = &ocelot->vcap[filter->block_id];
+ struct vcap_data data;
+ int row, count;
+ u32 cnt;
+
+ if (filter->block_id == VCAP_ES0)
+ data.tg_sw = VCAP_TG_FULL;
+ else
+ data.tg_sw = VCAP_TG_HALF;
+
+ count = (1 << (data.tg_sw - 1));
+ row = (ix / count);
+ vcap_row_cmd(ocelot, vcap, row, VCAP_CMD_READ, VCAP_SEL_COUNTER);
+ vcap_cache2action(ocelot, vcap, &data);
+ vcap_data_offset_get(vcap, &data, ix);
cnt = vcap_data_get(data.counter, data.counter_offset,
- vcap_is2->counter_width);
+ vcap->counter_width);
filter->stats.pkts = cnt;
}
+static void vcap_entry_set(struct ocelot *ocelot, int ix,
+ struct ocelot_vcap_filter *filter)
+{
+ if (filter->block_id == VCAP_IS1)
+ return is1_entry_set(ocelot, ix, filter);
+ if (filter->block_id == VCAP_IS2)
+ return is2_entry_set(ocelot, ix, filter);
+ if (filter->block_id == VCAP_ES0)
+ return es0_entry_set(ocelot, ix, filter);
+}
+
static int ocelot_vcap_policer_add(struct ocelot *ocelot, u32 pol_ix,
struct ocelot_policer *pol)
{
@@ -679,11 +914,12 @@ static void ocelot_vcap_policer_del(struct ocelot *ocelot,
list_for_each_entry(filter, &block->rules, list) {
index++;
- if (filter->action == OCELOT_VCAP_ACTION_POLICE &&
- filter->pol_ix < pol_ix) {
- filter->pol_ix += 1;
- ocelot_vcap_policer_add(ocelot, filter->pol_ix,
- &filter->pol);
+ if (filter->block_id == VCAP_IS2 &&
+ filter->action.police_ena &&
+ filter->action.pol_ix < pol_ix) {
+ filter->action.pol_ix += 1;
+ ocelot_vcap_policer_add(ocelot, filter->action.pol_ix,
+ &filter->action.pol);
is2_entry_set(ocelot, index, filter);
}
}
@@ -701,10 +937,11 @@ static void ocelot_vcap_filter_add_to_block(struct ocelot *ocelot,
struct ocelot_vcap_filter *tmp;
struct list_head *pos, *n;
- if (filter->action == OCELOT_VCAP_ACTION_POLICE) {
+ if (filter->block_id == VCAP_IS2 && filter->action.police_ena) {
block->pol_lpr--;
- filter->pol_ix = block->pol_lpr;
- ocelot_vcap_policer_add(ocelot, filter->pol_ix, &filter->pol);
+ filter->action.pol_ix = block->pol_lpr;
+ ocelot_vcap_policer_add(ocelot, filter->action.pol_ix,
+ &filter->action.pol);
}
block->count++;
@@ -726,19 +963,20 @@ static int ocelot_vcap_block_get_filter_index(struct ocelot_vcap_block *block,
struct ocelot_vcap_filter *filter)
{
struct ocelot_vcap_filter *tmp;
- int index = -1;
+ int index = 0;
list_for_each_entry(tmp, &block->rules, list) {
- ++index;
if (filter->id == tmp->id)
- break;
+ return index;
+ index++;
}
- return index;
+
+ return -ENOENT;
}
static struct ocelot_vcap_filter*
-ocelot_vcap_block_find_filter(struct ocelot_vcap_block *block,
- int index)
+ocelot_vcap_block_find_filter_by_index(struct ocelot_vcap_block *block,
+ int index)
{
struct ocelot_vcap_filter *tmp;
int i = 0;
@@ -752,6 +990,18 @@ ocelot_vcap_block_find_filter(struct ocelot_vcap_block *block,
return NULL;
}
+struct ocelot_vcap_filter *
+ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block, int id)
+{
+ struct ocelot_vcap_filter *filter;
+
+ list_for_each_entry(filter, &block->rules, list)
+ if (filter->id == id)
+ return filter;
+
+ return NULL;
+}
+
/* If @on=false, then SNAP, ARP, IP and OAM frames will not match on keys based
* on destination and source MAC addresses, but only on higher-level protocol
* information. The only frame types to match on keys containing MAC addresses
@@ -763,23 +1013,23 @@ ocelot_vcap_block_find_filter(struct ocelot_vcap_block *block,
* on any _other_ keys than MAC_ETYPE ones.
*/
static void ocelot_match_all_as_mac_etype(struct ocelot *ocelot, int port,
- bool on)
+ int lookup, bool on)
{
u32 val = 0;
if (on)
- val = ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS(3) |
- ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS(3) |
- ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS(3) |
- ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS(3) |
- ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS(3);
+ val = ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS(BIT(lookup)) |
+ ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS(BIT(lookup)) |
+ ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS(BIT(lookup)) |
+ ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS(BIT(lookup)) |
+ ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS(BIT(lookup));
ocelot_rmw_gix(ocelot, val,
- ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS_M |
- ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS_M |
- ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS_M |
- ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS_M |
- ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS_M,
+ ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS(BIT(lookup)) |
+ ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS(BIT(lookup)) |
+ ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS(BIT(lookup)) |
+ ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS(BIT(lookup)) |
+ ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS(BIT(lookup)),
ANA_PORT_VCAP_S2_CFG, port);
}
@@ -825,35 +1075,43 @@ static bool
ocelot_exclusive_mac_etype_filter_rules(struct ocelot *ocelot,
struct ocelot_vcap_filter *filter)
{
- struct ocelot_vcap_block *block = &ocelot->block;
+ struct ocelot_vcap_block *block = &ocelot->block[filter->block_id];
struct ocelot_vcap_filter *tmp;
unsigned long port;
int i;
+ /* We only have the S2_IP_TCPUDP_DIS set of knobs for VCAP IS2 */
+ if (filter->block_id != VCAP_IS2)
+ return true;
+
if (ocelot_vcap_is_problematic_mac_etype(filter)) {
/* Search for any non-MAC_ETYPE rules on the port */
for (i = 0; i < block->count; i++) {
- tmp = ocelot_vcap_block_find_filter(block, i);
+ tmp = ocelot_vcap_block_find_filter_by_index(block, i);
if (tmp->ingress_port_mask & filter->ingress_port_mask &&
+ tmp->lookup == filter->lookup &&
ocelot_vcap_is_problematic_non_mac_etype(tmp))
return false;
}
for_each_set_bit(port, &filter->ingress_port_mask,
ocelot->num_phys_ports)
- ocelot_match_all_as_mac_etype(ocelot, port, true);
+ ocelot_match_all_as_mac_etype(ocelot, port,
+ filter->lookup, true);
} else if (ocelot_vcap_is_problematic_non_mac_etype(filter)) {
/* Search for any MAC_ETYPE rules on the port */
for (i = 0; i < block->count; i++) {
- tmp = ocelot_vcap_block_find_filter(block, i);
+ tmp = ocelot_vcap_block_find_filter_by_index(block, i);
if (tmp->ingress_port_mask & filter->ingress_port_mask &&
+ tmp->lookup == filter->lookup &&
ocelot_vcap_is_problematic_mac_etype(tmp))
return false;
}
for_each_set_bit(port, &filter->ingress_port_mask,
ocelot->num_phys_ports)
- ocelot_match_all_as_mac_etype(ocelot, port, false);
+ ocelot_match_all_as_mac_etype(ocelot, port,
+ filter->lookup, false);
}
return true;
@@ -863,12 +1121,12 @@ int ocelot_vcap_filter_add(struct ocelot *ocelot,
struct ocelot_vcap_filter *filter,
struct netlink_ext_ack *extack)
{
- struct ocelot_vcap_block *block = &ocelot->block;
+ struct ocelot_vcap_block *block = &ocelot->block[filter->block_id];
int i, index;
if (!ocelot_exclusive_mac_etype_filter_rules(ocelot, filter)) {
NL_SET_ERR_MSG_MOD(extack,
- "Cannot mix MAC_ETYPE with non-MAC_ETYPE rules");
+ "Cannot mix MAC_ETYPE with non-MAC_ETYPE rules, use the other IS2 lookup");
return -EBUSY;
}
@@ -877,17 +1135,19 @@ int ocelot_vcap_filter_add(struct ocelot *ocelot,
/* Get the index of the inserted filter */
index = ocelot_vcap_block_get_filter_index(block, filter);
+ if (index < 0)
+ return index;
/* Move down the rules to make place for the new filter */
for (i = block->count - 1; i > index; i--) {
struct ocelot_vcap_filter *tmp;
- tmp = ocelot_vcap_block_find_filter(block, i);
- is2_entry_set(ocelot, i, tmp);
+ tmp = ocelot_vcap_block_find_filter_by_index(block, i);
+ vcap_entry_set(ocelot, i, tmp);
}
/* Now insert the new filter */
- is2_entry_set(ocelot, index, filter);
+ vcap_entry_set(ocelot, index, filter);
return 0;
}
@@ -901,9 +1161,10 @@ static void ocelot_vcap_block_remove_filter(struct ocelot *ocelot,
list_for_each_safe(pos, q, &block->rules) {
tmp = list_entry(pos, struct ocelot_vcap_filter, list);
if (tmp->id == filter->id) {
- if (tmp->action == OCELOT_VCAP_ACTION_POLICE)
+ if (tmp->block_id == VCAP_IS2 &&
+ tmp->action.police_ena)
ocelot_vcap_policer_del(ocelot, block,
- tmp->pol_ix);
+ tmp->action.pol_ix);
list_del(pos);
kfree(tmp);
@@ -916,7 +1177,7 @@ static void ocelot_vcap_block_remove_filter(struct ocelot *ocelot,
int ocelot_vcap_filter_del(struct ocelot *ocelot,
struct ocelot_vcap_filter *filter)
{
- struct ocelot_vcap_block *block = &ocelot->block;
+ struct ocelot_vcap_block *block = &ocelot->block[filter->block_id];
struct ocelot_vcap_filter del_filter;
int i, index;
@@ -924,6 +1185,8 @@ int ocelot_vcap_filter_del(struct ocelot *ocelot,
/* Gets index of the filter */
index = ocelot_vcap_block_get_filter_index(block, filter);
+ if (index < 0)
+ return index;
/* Delete filter */
ocelot_vcap_block_remove_filter(ocelot, block, filter);
@@ -932,12 +1195,12 @@ int ocelot_vcap_filter_del(struct ocelot *ocelot,
for (i = index; i < block->count; i++) {
struct ocelot_vcap_filter *tmp;
- tmp = ocelot_vcap_block_find_filter(block, i);
- is2_entry_set(ocelot, i, tmp);
+ tmp = ocelot_vcap_block_find_filter_by_index(block, i);
+ vcap_entry_set(ocelot, i, tmp);
}
/* Now delete the last filter, because it is duplicated */
- is2_entry_set(ocelot, block->count, &del_filter);
+ vcap_entry_set(ocelot, block->count, &del_filter);
return 0;
}
@@ -945,37 +1208,115 @@ int ocelot_vcap_filter_del(struct ocelot *ocelot,
int ocelot_vcap_filter_stats_update(struct ocelot *ocelot,
struct ocelot_vcap_filter *filter)
{
- struct ocelot_vcap_block *block = &ocelot->block;
- struct ocelot_vcap_filter *tmp;
+ struct ocelot_vcap_block *block = &ocelot->block[filter->block_id];
+ struct ocelot_vcap_filter tmp;
int index;
index = ocelot_vcap_block_get_filter_index(block, filter);
- is2_entry_get(ocelot, filter, index);
+ if (index < 0)
+ return index;
+
+ vcap_entry_get(ocelot, index, filter);
/* After we get the result we need to clear the counters */
- tmp = ocelot_vcap_block_find_filter(block, index);
- tmp->stats.pkts = 0;
- is2_entry_set(ocelot, index, tmp);
+ tmp = *filter;
+ tmp.stats.pkts = 0;
+ vcap_entry_set(ocelot, index, &tmp);
return 0;
}
-int ocelot_vcap_init(struct ocelot *ocelot)
+static void ocelot_vcap_init_one(struct ocelot *ocelot,
+ const struct vcap_props *vcap)
{
- const struct vcap_props *vcap_is2 = &ocelot->vcap[VCAP_IS2];
- struct ocelot_vcap_block *block = &ocelot->block;
struct vcap_data data;
memset(&data, 0, sizeof(data));
- vcap_entry2cache(ocelot, &data);
- ocelot_write(ocelot, vcap_is2->entry_count, S2_CORE_MV_CFG);
- vcap_cmd(ocelot, 0, VCAP_CMD_INITIALIZE, VCAP_SEL_ENTRY);
+ vcap_entry2cache(ocelot, vcap, &data);
+ ocelot_target_write(ocelot, vcap->target, vcap->entry_count,
+ VCAP_CORE_MV_CFG);
+ vcap_cmd(ocelot, vcap, 0, VCAP_CMD_INITIALIZE, VCAP_SEL_ENTRY);
- vcap_action2cache(ocelot, &data);
- ocelot_write(ocelot, vcap_is2->action_count, S2_CORE_MV_CFG);
- vcap_cmd(ocelot, 0, VCAP_CMD_INITIALIZE,
+ vcap_action2cache(ocelot, vcap, &data);
+ ocelot_target_write(ocelot, vcap->target, vcap->action_count,
+ VCAP_CORE_MV_CFG);
+ vcap_cmd(ocelot, vcap, 0, VCAP_CMD_INITIALIZE,
VCAP_SEL_ACTION | VCAP_SEL_COUNTER);
+}
+
+static void ocelot_vcap_detect_constants(struct ocelot *ocelot,
+ struct vcap_props *vcap)
+{
+ int counter_memory_width;
+ int num_default_actions;
+ int version;
+
+ version = ocelot_target_read(ocelot, vcap->target,
+ VCAP_CONST_VCAP_VER);
+ /* Only version 0 VCAP supported for now */
+ if (WARN_ON(version != 0))
+ return;
+
+ /* Width in bits of type-group field */
+ vcap->tg_width = ocelot_target_read(ocelot, vcap->target,
+ VCAP_CONST_ENTRY_TG_WIDTH);
+ /* Number of subwords per TCAM row */
+ vcap->sw_count = ocelot_target_read(ocelot, vcap->target,
+ VCAP_CONST_ENTRY_SWCNT);
+ /* Number of rows in TCAM. There can be this many full keys, or double
+ * this number half keys, or 4 times this number quarter keys.
+ */
+ vcap->entry_count = ocelot_target_read(ocelot, vcap->target,
+ VCAP_CONST_ENTRY_CNT);
+ /* Assuming there are 4 subwords per TCAM row, their layout in the
+ * actual TCAM (not in the cache) would be:
+ *
+ * | SW 3 | TG 3 | SW 2 | TG 2 | SW 1 | TG 1 | SW 0 | TG 0 |
+ *
+ * (where SW=subword and TG=Type-Group).
+ *
+ * What VCAP_CONST_ENTRY_CNT is giving us is the width of one full TCAM
+ * row. But when software accesses the TCAM through the cache
+ * registers, the Type-Group values are written through another set of
+ * registers VCAP_TG_DAT, and therefore, it appears as though the 4
+ * subwords are contiguous in the cache memory.
+ * Important mention: regardless of the number of key entries per row
+ * (and therefore of key size: 1 full key or 2 half keys or 4 quarter
+ * keys), software always has to configure 4 Type-Group values. For
+ * example, in the case of 1 full key, the driver needs to set all 4
+ * Type-Group to be full key.
+ *
+ * For this reason, we need to fix up the value that the hardware is
+ * giving us. We don't actually care about the width of the entry in
+ * the TCAM. What we care about is the width of the entry in the cache
+ * registers, which is how we get to interact with it. And since the
+ * VCAP_ENTRY_DAT cache registers access only the subwords and not the
+ * Type-Groups, this means we need to subtract the width of the
+ * Type-Groups when packing and unpacking key entry data in a TCAM row.
+ */
+ vcap->entry_width = ocelot_target_read(ocelot, vcap->target,
+ VCAP_CONST_ENTRY_WIDTH);
+ vcap->entry_width -= vcap->tg_width * vcap->sw_count;
+ num_default_actions = ocelot_target_read(ocelot, vcap->target,
+ VCAP_CONST_ACTION_DEF_CNT);
+ vcap->action_count = vcap->entry_count + num_default_actions;
+ vcap->action_width = ocelot_target_read(ocelot, vcap->target,
+ VCAP_CONST_ACTION_WIDTH);
+ /* The width of the counter memory, this is the complete width of all
+ * counter-fields associated with one full-word entry. There is one
+ * counter per entry sub-word (see CAP_CORE::ENTRY_SWCNT for number of
+ * subwords.)
+ */
+ vcap->counter_words = vcap->sw_count;
+ counter_memory_width = ocelot_target_read(ocelot, vcap->target,
+ VCAP_CONST_CNT_WIDTH);
+ vcap->counter_width = counter_memory_width / vcap->counter_words;
+}
+
+int ocelot_vcap_init(struct ocelot *ocelot)
+{
+ int i;
/* Create a policer that will drop the frames for the cpu.
* This policer will be used as action in the acl rules to drop
@@ -992,9 +1333,18 @@ int ocelot_vcap_init(struct ocelot *ocelot)
ocelot_write_gix(ocelot, 0x3fffff, ANA_POL_CIR_STATE,
OCELOT_POLICER_DISCARD);
- block->pol_lpr = OCELOT_POLICER_DISCARD - 1;
+ for (i = 0; i < OCELOT_NUM_VCAP_BLOCKS; i++) {
+ struct ocelot_vcap_block *block = &ocelot->block[i];
+ struct vcap_props *vcap = &ocelot->vcap[i];
+
+ INIT_LIST_HEAD(&block->rules);
+ block->pol_lpr = OCELOT_POLICER_DISCARD - 1;
+
+ ocelot_vcap_detect_constants(ocelot, vcap);
+ ocelot_vcap_init_one(ocelot, vcap);
+ }
- INIT_LIST_HEAD(&ocelot->block.rules);
+ INIT_LIST_HEAD(&ocelot->dummy_rules);
return 0;
}
diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.h b/drivers/net/ethernet/mscc/ocelot_vcap.h
index 0dfbfc011b2e..82fd10581a14 100644
--- a/drivers/net/ethernet/mscc/ocelot_vcap.h
+++ b/drivers/net/ethernet/mscc/ocelot_vcap.h
@@ -11,6 +11,8 @@
#include <net/sch_generic.h>
#include <net/pkt_cls.h>
+#define OCELOT_POLICER_DISCARD 0x17f
+
struct ocelot_ipv4 {
u8 addr[4];
};
@@ -76,6 +78,11 @@ struct ocelot_vcap_udp_tcp {
u16 mask;
};
+struct ocelot_vcap_port {
+ u8 value;
+ u8 mask;
+};
+
enum ocelot_vcap_key_type {
OCELOT_VCAP_KEY_ANY,
OCELOT_VCAP_KEY_ETYPE,
@@ -158,6 +165,7 @@ struct ocelot_vcap_key_ipv4 {
struct ocelot_vcap_key_ipv6 {
struct ocelot_vcap_u8 proto; /* IPv6 protocol */
struct ocelot_vcap_u128 sip; /* IPv6 source (byte 0-7 ignored) */
+ struct ocelot_vcap_u128 dip; /* IPv6 destination (byte 0-7 ignored) */
enum ocelot_vcap_bit ttl; /* TTL zero */
struct ocelot_vcap_u8 ds;
struct ocelot_vcap_u48 data; /* Not UDP/TCP: IP data */
@@ -174,10 +182,71 @@ struct ocelot_vcap_key_ipv6 {
enum ocelot_vcap_bit seq_zero; /* TCP sequence number is zero */
};
-enum ocelot_vcap_action {
- OCELOT_VCAP_ACTION_DROP,
- OCELOT_VCAP_ACTION_TRAP,
- OCELOT_VCAP_ACTION_POLICE,
+enum ocelot_mask_mode {
+ OCELOT_MASK_MODE_NONE,
+ OCELOT_MASK_MODE_PERMIT_DENY,
+ OCELOT_MASK_MODE_POLICY,
+ OCELOT_MASK_MODE_REDIRECT,
+};
+
+enum ocelot_es0_tag {
+ OCELOT_NO_ES0_TAG,
+ OCELOT_ES0_TAG,
+ OCELOT_FORCE_PORT_TAG,
+ OCELOT_FORCE_UNTAG,
+};
+
+enum ocelot_tag_tpid_sel {
+ OCELOT_TAG_TPID_SEL_8021Q,
+ OCELOT_TAG_TPID_SEL_8021AD,
+};
+
+struct ocelot_vcap_action {
+ union {
+ /* VCAP ES0 */
+ struct {
+ enum ocelot_es0_tag push_outer_tag;
+ enum ocelot_es0_tag push_inner_tag;
+ enum ocelot_tag_tpid_sel tag_a_tpid_sel;
+ int tag_a_vid_sel;
+ int tag_a_pcp_sel;
+ u16 vid_a_val;
+ u8 pcp_a_val;
+ u8 dei_a_val;
+ enum ocelot_tag_tpid_sel tag_b_tpid_sel;
+ int tag_b_vid_sel;
+ int tag_b_pcp_sel;
+ u16 vid_b_val;
+ u8 pcp_b_val;
+ u8 dei_b_val;
+ };
+
+ /* VCAP IS1 */
+ struct {
+ bool vid_replace_ena;
+ u16 vid;
+ bool vlan_pop_cnt_ena;
+ int vlan_pop_cnt;
+ bool pcp_dei_ena;
+ u8 pcp;
+ u8 dei;
+ bool qos_ena;
+ u8 qos_val;
+ u8 pag_override_mask;
+ u8 pag_val;
+ };
+
+ /* VCAP IS2 */
+ struct {
+ bool cpu_copy_ena;
+ u8 cpu_qu_num;
+ enum ocelot_mask_mode mask_mode;
+ unsigned long port_mask;
+ bool police_ena;
+ struct ocelot_policer pol;
+ u32 pol_ix;
+ };
+ };
};
struct ocelot_vcap_stats {
@@ -186,15 +255,30 @@ struct ocelot_vcap_stats {
u64 used;
};
+enum ocelot_vcap_filter_type {
+ OCELOT_VCAP_FILTER_DUMMY,
+ OCELOT_VCAP_FILTER_PAG,
+ OCELOT_VCAP_FILTER_OFFLOAD,
+};
+
struct ocelot_vcap_filter {
struct list_head list;
+ enum ocelot_vcap_filter_type type;
+ int block_id;
+ int goto_target;
+ int lookup;
+ u8 pag;
u16 prio;
u32 id;
- enum ocelot_vcap_action action;
+ struct ocelot_vcap_action action;
struct ocelot_vcap_stats stats;
+ /* For VCAP IS1 and IS2 */
unsigned long ingress_port_mask;
+ /* For VCAP ES0 */
+ struct ocelot_vcap_port ingress_port;
+ struct ocelot_vcap_port egress_port;
enum ocelot_vcap_bit dmac_mc;
enum ocelot_vcap_bit dmac_bc;
@@ -210,8 +294,6 @@ struct ocelot_vcap_filter {
struct ocelot_vcap_key_ipv4 ipv4;
struct ocelot_vcap_key_ipv6 ipv6;
} key;
- struct ocelot_policer pol;
- u32 pol_ix;
};
int ocelot_vcap_filter_add(struct ocelot *ocelot,
@@ -221,7 +303,10 @@ int ocelot_vcap_filter_del(struct ocelot *ocelot,
struct ocelot_vcap_filter *rule);
int ocelot_vcap_filter_stats_update(struct ocelot *ocelot,
struct ocelot_vcap_filter *rule);
+struct ocelot_vcap_filter *
+ocelot_vcap_block_find_filter_by_id(struct ocelot_vcap_block *block, int id);
+void ocelot_detect_vcap_constants(struct ocelot *ocelot);
int ocelot_vcap_init(struct ocelot *ocelot);
int ocelot_setup_tc_cls_flower(struct ocelot_port_private *priv,
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 8a6917691ba6..dc00772950e5 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -19,10 +19,6 @@
#include "ocelot.h"
#define IFH_EXTRACT_BITFIELD64(x, o, w) (((x) >> (o)) & GENMASK_ULL((w) - 1, 0))
-#define VSC7514_VCAP_IS2_CNT 64
-#define VSC7514_VCAP_IS2_ENTRY_WIDTH 376
-#define VSC7514_VCAP_IS2_ACTION_WIDTH 99
-#define VSC7514_VCAP_PORT_CNT 11
static const u32 ocelot_ana_regmap[] = {
REG(ANA_ADVLEARN, 0x009000),
@@ -241,14 +237,27 @@ static const u32 ocelot_sys_regmap[] = {
REG(SYS_PTP_CFG, 0x0006c4),
};
-static const u32 ocelot_s2_regmap[] = {
- REG(S2_CORE_UPDATE_CTRL, 0x000000),
- REG(S2_CORE_MV_CFG, 0x000004),
- REG(S2_CACHE_ENTRY_DAT, 0x000008),
- REG(S2_CACHE_MASK_DAT, 0x000108),
- REG(S2_CACHE_ACTION_DAT, 0x000208),
- REG(S2_CACHE_CNT_DAT, 0x000308),
- REG(S2_CACHE_TG_DAT, 0x000388),
+static const u32 ocelot_vcap_regmap[] = {
+ /* VCAP_CORE_CFG */
+ REG(VCAP_CORE_UPDATE_CTRL, 0x000000),
+ REG(VCAP_CORE_MV_CFG, 0x000004),
+ /* VCAP_CORE_CACHE */
+ REG(VCAP_CACHE_ENTRY_DAT, 0x000008),
+ REG(VCAP_CACHE_MASK_DAT, 0x000108),
+ REG(VCAP_CACHE_ACTION_DAT, 0x000208),
+ REG(VCAP_CACHE_CNT_DAT, 0x000308),
+ REG(VCAP_CACHE_TG_DAT, 0x000388),
+ /* VCAP_CONST */
+ REG(VCAP_CONST_VCAP_VER, 0x000398),
+ REG(VCAP_CONST_ENTRY_WIDTH, 0x00039c),
+ REG(VCAP_CONST_ENTRY_CNT, 0x0003a0),
+ REG(VCAP_CONST_ENTRY_SWCNT, 0x0003a4),
+ REG(VCAP_CONST_ENTRY_TG_WIDTH, 0x0003a8),
+ REG(VCAP_CONST_ACTION_DEF_CNT, 0x0003ac),
+ REG(VCAP_CONST_ACTION_WIDTH, 0x0003b0),
+ REG(VCAP_CONST_CNT_WIDTH, 0x0003b4),
+ REG(VCAP_CONST_CORE_CNT, 0x0003b8),
+ REG(VCAP_CONST_IF_CNT, 0x0003bc),
};
static const u32 ocelot_ptp_regmap[] = {
@@ -311,7 +320,9 @@ static const u32 *ocelot_regmap[TARGET_MAX] = {
[QSYS] = ocelot_qsys_regmap,
[REW] = ocelot_rew_regmap,
[SYS] = ocelot_sys_regmap,
- [S2] = ocelot_s2_regmap,
+ [S0] = ocelot_vcap_regmap,
+ [S1] = ocelot_vcap_regmap,
+ [S2] = ocelot_vcap_regmap,
[PTP] = ocelot_ptp_regmap,
[DEV_GMII] = ocelot_dev_gmii_regmap,
};
@@ -756,6 +767,115 @@ static u16 ocelot_wm_enc(u16 value)
static const struct ocelot_ops ocelot_ops = {
.reset = ocelot_reset,
.wm_enc = ocelot_wm_enc,
+ .port_to_netdev = ocelot_port_to_netdev,
+ .netdev_to_port = ocelot_netdev_to_port,
+};
+
+static const struct vcap_field vsc7514_vcap_es0_keys[] = {
+ [VCAP_ES0_EGR_PORT] = { 0, 4},
+ [VCAP_ES0_IGR_PORT] = { 4, 4},
+ [VCAP_ES0_RSV] = { 8, 2},
+ [VCAP_ES0_L2_MC] = { 10, 1},
+ [VCAP_ES0_L2_BC] = { 11, 1},
+ [VCAP_ES0_VID] = { 12, 12},
+ [VCAP_ES0_DP] = { 24, 1},
+ [VCAP_ES0_PCP] = { 25, 3},
+};
+
+static const struct vcap_field vsc7514_vcap_es0_actions[] = {
+ [VCAP_ES0_ACT_PUSH_OUTER_TAG] = { 0, 2},
+ [VCAP_ES0_ACT_PUSH_INNER_TAG] = { 2, 1},
+ [VCAP_ES0_ACT_TAG_A_TPID_SEL] = { 3, 2},
+ [VCAP_ES0_ACT_TAG_A_VID_SEL] = { 5, 1},
+ [VCAP_ES0_ACT_TAG_A_PCP_SEL] = { 6, 2},
+ [VCAP_ES0_ACT_TAG_A_DEI_SEL] = { 8, 2},
+ [VCAP_ES0_ACT_TAG_B_TPID_SEL] = { 10, 2},
+ [VCAP_ES0_ACT_TAG_B_VID_SEL] = { 12, 1},
+ [VCAP_ES0_ACT_TAG_B_PCP_SEL] = { 13, 2},
+ [VCAP_ES0_ACT_TAG_B_DEI_SEL] = { 15, 2},
+ [VCAP_ES0_ACT_VID_A_VAL] = { 17, 12},
+ [VCAP_ES0_ACT_PCP_A_VAL] = { 29, 3},
+ [VCAP_ES0_ACT_DEI_A_VAL] = { 32, 1},
+ [VCAP_ES0_ACT_VID_B_VAL] = { 33, 12},
+ [VCAP_ES0_ACT_PCP_B_VAL] = { 45, 3},
+ [VCAP_ES0_ACT_DEI_B_VAL] = { 48, 1},
+ [VCAP_ES0_ACT_RSV] = { 49, 24},
+ [VCAP_ES0_ACT_HIT_STICKY] = { 73, 1},
+};
+
+static const struct vcap_field vsc7514_vcap_is1_keys[] = {
+ [VCAP_IS1_HK_TYPE] = { 0, 1},
+ [VCAP_IS1_HK_LOOKUP] = { 1, 2},
+ [VCAP_IS1_HK_IGR_PORT_MASK] = { 3, 12},
+ [VCAP_IS1_HK_RSV] = { 15, 9},
+ [VCAP_IS1_HK_OAM_Y1731] = { 24, 1},
+ [VCAP_IS1_HK_L2_MC] = { 25, 1},
+ [VCAP_IS1_HK_L2_BC] = { 26, 1},
+ [VCAP_IS1_HK_IP_MC] = { 27, 1},
+ [VCAP_IS1_HK_VLAN_TAGGED] = { 28, 1},
+ [VCAP_IS1_HK_VLAN_DBL_TAGGED] = { 29, 1},
+ [VCAP_IS1_HK_TPID] = { 30, 1},
+ [VCAP_IS1_HK_VID] = { 31, 12},
+ [VCAP_IS1_HK_DEI] = { 43, 1},
+ [VCAP_IS1_HK_PCP] = { 44, 3},
+ /* Specific Fields for IS1 Half Key S1_NORMAL */
+ [VCAP_IS1_HK_L2_SMAC] = { 47, 48},
+ [VCAP_IS1_HK_ETYPE_LEN] = { 95, 1},
+ [VCAP_IS1_HK_ETYPE] = { 96, 16},
+ [VCAP_IS1_HK_IP_SNAP] = {112, 1},
+ [VCAP_IS1_HK_IP4] = {113, 1},
+ /* Layer-3 Information */
+ [VCAP_IS1_HK_L3_FRAGMENT] = {114, 1},
+ [VCAP_IS1_HK_L3_FRAG_OFS_GT0] = {115, 1},
+ [VCAP_IS1_HK_L3_OPTIONS] = {116, 1},
+ [VCAP_IS1_HK_L3_DSCP] = {117, 6},
+ [VCAP_IS1_HK_L3_IP4_SIP] = {123, 32},
+ /* Layer-4 Information */
+ [VCAP_IS1_HK_TCP_UDP] = {155, 1},
+ [VCAP_IS1_HK_TCP] = {156, 1},
+ [VCAP_IS1_HK_L4_SPORT] = {157, 16},
+ [VCAP_IS1_HK_L4_RNG] = {173, 8},
+ /* Specific Fields for IS1 Half Key S1_5TUPLE_IP4 */
+ [VCAP_IS1_HK_IP4_INNER_TPID] = { 47, 1},
+ [VCAP_IS1_HK_IP4_INNER_VID] = { 48, 12},
+ [VCAP_IS1_HK_IP4_INNER_DEI] = { 60, 1},
+ [VCAP_IS1_HK_IP4_INNER_PCP] = { 61, 3},
+ [VCAP_IS1_HK_IP4_IP4] = { 64, 1},
+ [VCAP_IS1_HK_IP4_L3_FRAGMENT] = { 65, 1},
+ [VCAP_IS1_HK_IP4_L3_FRAG_OFS_GT0] = { 66, 1},
+ [VCAP_IS1_HK_IP4_L3_OPTIONS] = { 67, 1},
+ [VCAP_IS1_HK_IP4_L3_DSCP] = { 68, 6},
+ [VCAP_IS1_HK_IP4_L3_IP4_DIP] = { 74, 32},
+ [VCAP_IS1_HK_IP4_L3_IP4_SIP] = {106, 32},
+ [VCAP_IS1_HK_IP4_L3_PROTO] = {138, 8},
+ [VCAP_IS1_HK_IP4_TCP_UDP] = {146, 1},
+ [VCAP_IS1_HK_IP4_TCP] = {147, 1},
+ [VCAP_IS1_HK_IP4_L4_RNG] = {148, 8},
+ [VCAP_IS1_HK_IP4_IP_PAYLOAD_S1_5TUPLE] = {156, 32},
+};
+
+static const struct vcap_field vsc7514_vcap_is1_actions[] = {
+ [VCAP_IS1_ACT_DSCP_ENA] = { 0, 1},
+ [VCAP_IS1_ACT_DSCP_VAL] = { 1, 6},
+ [VCAP_IS1_ACT_QOS_ENA] = { 7, 1},
+ [VCAP_IS1_ACT_QOS_VAL] = { 8, 3},
+ [VCAP_IS1_ACT_DP_ENA] = { 11, 1},
+ [VCAP_IS1_ACT_DP_VAL] = { 12, 1},
+ [VCAP_IS1_ACT_PAG_OVERRIDE_MASK] = { 13, 8},
+ [VCAP_IS1_ACT_PAG_VAL] = { 21, 8},
+ [VCAP_IS1_ACT_RSV] = { 29, 9},
+ /* The fields below are incorrectly shifted by 2 in the manual */
+ [VCAP_IS1_ACT_VID_REPLACE_ENA] = { 38, 1},
+ [VCAP_IS1_ACT_VID_ADD_VAL] = { 39, 12},
+ [VCAP_IS1_ACT_FID_SEL] = { 51, 2},
+ [VCAP_IS1_ACT_FID_VAL] = { 53, 13},
+ [VCAP_IS1_ACT_PCP_DEI_ENA] = { 66, 1},
+ [VCAP_IS1_ACT_PCP_VAL] = { 67, 3},
+ [VCAP_IS1_ACT_DEI_VAL] = { 70, 1},
+ [VCAP_IS1_ACT_VLAN_POP_CNT_ENA] = { 71, 1},
+ [VCAP_IS1_ACT_VLAN_POP_CNT] = { 72, 2},
+ [VCAP_IS1_ACT_CUSTOM_ACE_TYPE_ENA] = { 74, 4},
+ [VCAP_IS1_ACT_HIT_STICKY] = { 78, 1},
};
static const struct vcap_field vsc7514_vcap_is2_keys[] = {
@@ -856,15 +976,32 @@ static const struct vcap_field vsc7514_vcap_is2_actions[] = {
[VCAP_IS2_ACT_HIT_CNT] = { 49, 32},
};
-static const struct vcap_props vsc7514_vcap_props[] = {
+static struct vcap_props vsc7514_vcap_props[] = {
+ [VCAP_ES0] = {
+ .action_type_width = 0,
+ .action_table = {
+ [ES0_ACTION_TYPE_NORMAL] = {
+ .width = 73, /* HIT_STICKY not included */
+ .count = 1,
+ },
+ },
+ .target = S0,
+ .keys = vsc7514_vcap_es0_keys,
+ .actions = vsc7514_vcap_es0_actions,
+ },
+ [VCAP_IS1] = {
+ .action_type_width = 0,
+ .action_table = {
+ [IS1_ACTION_TYPE_NORMAL] = {
+ .width = 78, /* HIT_STICKY not included */
+ .count = 4,
+ },
+ },
+ .target = S1,
+ .keys = vsc7514_vcap_is1_keys,
+ .actions = vsc7514_vcap_is1_actions,
+ },
[VCAP_IS2] = {
- .tg_width = 2,
- .sw_count = 4,
- .entry_count = VSC7514_VCAP_IS2_CNT,
- .entry_width = VSC7514_VCAP_IS2_ENTRY_WIDTH,
- .action_count = VSC7514_VCAP_IS2_CNT +
- VSC7514_VCAP_PORT_CNT + 2,
- .action_width = 99,
.action_type_width = 1,
.action_table = {
[IS2_ACTION_TYPE_NORMAL] = {
@@ -876,8 +1013,9 @@ static const struct vcap_props vsc7514_vcap_props[] = {
.count = 4
},
},
- .counter_words = 4,
- .counter_width = 32,
+ .target = S2,
+ .keys = vsc7514_vcap_is2_keys,
+ .actions = vsc7514_vcap_is2_actions,
},
};
@@ -932,10 +1070,6 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,
if (!ocelot->ports)
return -ENOMEM;
- /* No NPI port */
- ocelot_configure_cpu(ocelot, -1, OCELOT_TAG_PREFIX_NONE,
- OCELOT_TAG_PREFIX_NONE);
-
for_each_available_child_of_node(ports, portnp) {
struct ocelot_port_private *priv;
struct ocelot_port *ocelot_port;
@@ -1043,6 +1177,8 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
{ QSYS, "qsys" },
{ ANA, "ana" },
{ QS, "qs" },
+ { S0, "s0" },
+ { S1, "s1" },
{ S2, "s2" },
{ PTP, "ptp", 1 },
};
@@ -1119,9 +1255,10 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
ocelot->num_phys_ports = of_get_child_count(ports);
- ocelot->vcap_is2_keys = vsc7514_vcap_is2_keys;
- ocelot->vcap_is2_actions = vsc7514_vcap_is2_actions;
ocelot->vcap = vsc7514_vcap_props;
+ ocelot->inj_prefix = OCELOT_TAG_PREFIX_NONE;
+ ocelot->xtr_prefix = OCELOT_TAG_PREFIX_NONE;
+ ocelot->npi = -1;
err = ocelot_init(ocelot);
if (err)
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 4a5beafa0493..1634ca6d4a8f 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -3543,11 +3543,10 @@ static void myri10ge_free_slices(struct myri10ge_priv *mgp)
ss->fw_stats, ss->fw_stats_bus);
ss->fw_stats = NULL;
}
- napi_hash_del(&ss->napi);
- netif_napi_del(&ss->napi);
+ __netif_napi_del(&ss->napi);
}
/* Wait till napi structs are no longer used, and then free ss. */
- synchronize_rcu();
+ synchronize_net();
kfree(mgp->ss);
mgp->ss = NULL;
}
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 3de8430ee8c5..b81e1487945c 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -1916,9 +1916,9 @@ static void ns_tx_timeout(struct net_device *dev, unsigned int txqueue)
static int alloc_ring(struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
- np->rx_ring = pci_alloc_consistent(np->pci_dev,
- sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),
- &np->ring_dma);
+ np->rx_ring = dma_alloc_coherent(&np->pci_dev->dev,
+ sizeof(struct netdev_desc) * (RX_RING_SIZE + TX_RING_SIZE),
+ &np->ring_dma, GFP_KERNEL);
if (!np->rx_ring)
return -ENOMEM;
np->tx_ring = &np->rx_ring[RX_RING_SIZE];
@@ -1939,10 +1939,10 @@ static void refill_rx(struct net_device *dev)
np->rx_skbuff[entry] = skb;
if (skb == NULL)
break; /* Better luck next round. */
- np->rx_dma[entry] = pci_map_single(np->pci_dev,
- skb->data, buflen, PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(np->pci_dev,
- np->rx_dma[entry])) {
+ np->rx_dma[entry] = dma_map_single(&np->pci_dev->dev,
+ skb->data, buflen,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&np->pci_dev->dev, np->rx_dma[entry])) {
dev_kfree_skb_any(skb);
np->rx_skbuff[entry] = NULL;
break; /* Better luck next round. */
@@ -2013,9 +2013,8 @@ static void drain_tx(struct net_device *dev)
for (i = 0; i < TX_RING_SIZE; i++) {
if (np->tx_skbuff[i]) {
- pci_unmap_single(np->pci_dev,
- np->tx_dma[i], np->tx_skbuff[i]->len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&np->pci_dev->dev, np->tx_dma[i],
+ np->tx_skbuff[i]->len, DMA_TO_DEVICE);
dev_kfree_skb(np->tx_skbuff[i]);
dev->stats.tx_dropped++;
}
@@ -2034,9 +2033,9 @@ static void drain_rx(struct net_device *dev)
np->rx_ring[i].cmd_status = 0;
np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
if (np->rx_skbuff[i]) {
- pci_unmap_single(np->pci_dev, np->rx_dma[i],
- buflen + NATSEMI_PADDING,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&np->pci_dev->dev, np->rx_dma[i],
+ buflen + NATSEMI_PADDING,
+ DMA_FROM_DEVICE);
dev_kfree_skb(np->rx_skbuff[i]);
}
np->rx_skbuff[i] = NULL;
@@ -2052,9 +2051,9 @@ static void drain_ring(struct net_device *dev)
static void free_ring(struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
- pci_free_consistent(np->pci_dev,
- sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),
- np->rx_ring, np->ring_dma);
+ dma_free_coherent(&np->pci_dev->dev,
+ sizeof(struct netdev_desc) * (RX_RING_SIZE + TX_RING_SIZE),
+ np->rx_ring, np->ring_dma);
}
static void reinit_rx(struct net_device *dev)
@@ -2101,9 +2100,9 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
entry = np->cur_tx % TX_RING_SIZE;
np->tx_skbuff[entry] = skb;
- np->tx_dma[entry] = pci_map_single(np->pci_dev,
- skb->data,skb->len, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(np->pci_dev, np->tx_dma[entry])) {
+ np->tx_dma[entry] = dma_map_single(&np->pci_dev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&np->pci_dev->dev, np->tx_dma[entry])) {
np->tx_skbuff[entry] = NULL;
dev_kfree_skb_irq(skb);
dev->stats.tx_dropped++;
@@ -2169,9 +2168,8 @@ static void netdev_tx_done(struct net_device *dev)
dev->stats.tx_window_errors++;
dev->stats.tx_errors++;
}
- pci_unmap_single(np->pci_dev,np->tx_dma[entry],
- np->tx_skbuff[entry]->len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&np->pci_dev->dev, np->tx_dma[entry],
+ np->tx_skbuff[entry]->len, DMA_TO_DEVICE);
/* Free the original skb. */
dev_consume_skb_irq(np->tx_skbuff[entry]);
np->tx_skbuff[entry] = NULL;
@@ -2359,21 +2357,22 @@ static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
(skb = netdev_alloc_skb(dev, pkt_len + RX_OFFSET)) != NULL) {
/* 16 byte align the IP header */
skb_reserve(skb, RX_OFFSET);
- pci_dma_sync_single_for_cpu(np->pci_dev,
- np->rx_dma[entry],
- buflen,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&np->pci_dev->dev,
+ np->rx_dma[entry],
+ buflen,
+ DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb,
np->rx_skbuff[entry]->data, pkt_len);
skb_put(skb, pkt_len);
- pci_dma_sync_single_for_device(np->pci_dev,
- np->rx_dma[entry],
- buflen,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&np->pci_dev->dev,
+ np->rx_dma[entry],
+ buflen,
+ DMA_FROM_DEVICE);
} else {
- pci_unmap_single(np->pci_dev, np->rx_dma[entry],
+ dma_unmap_single(&np->pci_dev->dev,
+ np->rx_dma[entry],
buflen + NATSEMI_PADDING,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
skb_put(skb = np->rx_skbuff[entry], pkt_len);
np->rx_skbuff[entry] = NULL;
}
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index 8e24c7acf79b..72794d158871 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -526,8 +526,8 @@ static inline int ns83820_add_rx_skb(struct ns83820 *dev, struct sk_buff *skb)
dev->rx_info.next_empty = (next_empty + 1) % NR_RX_DESC;
cmdsts = REAL_RX_BUF_SIZE | CMDSTS_INTR;
- buf = pci_map_single(dev->pci_dev, skb->data,
- REAL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ buf = dma_map_single(&dev->pci_dev->dev, skb->data, REAL_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
build_rx_desc(dev, sg, 0, buf, cmdsts, 0);
/* update link of previous rx */
if (likely(next_empty != dev->rx_info.next_rx))
@@ -600,12 +600,14 @@ static void phy_intr(struct net_device *ndev)
struct ns83820 *dev = PRIV(ndev);
static const char *speeds[] = { "10", "100", "1000", "1000(?)", "1000F" };
u32 cfg, new_cfg;
- u32 tbisr, tanar, tanlpar;
+ u32 tanar, tanlpar;
int speed, fullduplex, newlinkstate;
cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY;
if (dev->CFG_cache & CFG_TBI_EN) {
+ u32 __maybe_unused tbisr;
+
/* we have an optical transceiver */
tbisr = readl(dev->base + TBISR);
tanar = readl(dev->base + TANAR);
@@ -858,8 +860,8 @@ static void rx_irq(struct net_device *ndev)
mb();
clear_rx_desc(dev, next_rx);
- pci_unmap_single(dev->pci_dev, bufptr,
- RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&dev->pci_dev->dev, bufptr, RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
len = cmdsts & CMDSTS_LEN_MASK;
#ifdef NS83820_VLAN_ACCEL_SUPPORT
/* NH: As was mentioned below, this chip is kinda
@@ -923,10 +925,10 @@ out:
spin_unlock_irqrestore(&info->lock, flags);
}
-static void rx_action(unsigned long _dev)
+static void rx_action(struct tasklet_struct *t)
{
- struct net_device *ndev = (void *)_dev;
- struct ns83820 *dev = PRIV(ndev);
+ struct ns83820 *dev = from_tasklet(dev, t, rx_tasklet);
+ struct net_device *ndev = dev->ndev;
rx_irq(ndev);
writel(ihr, dev->base + IHR);
@@ -985,17 +987,13 @@ static void do_tx_done(struct net_device *ndev)
len = cmdsts & CMDSTS_LEN_MASK;
addr = desc_addr_get(desc + DESC_BUFPTR);
if (skb) {
- pci_unmap_single(dev->pci_dev,
- addr,
- len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&dev->pci_dev->dev, addr, len,
+ DMA_TO_DEVICE);
dev_consume_skb_irq(skb);
atomic_dec(&dev->nr_tx_skbs);
} else
- pci_unmap_page(dev->pci_dev,
- addr,
- len,
- PCI_DMA_TODEVICE);
+ dma_unmap_page(&dev->pci_dev->dev, addr, len,
+ DMA_TO_DEVICE);
tx_done_idx = (tx_done_idx + 1) % NR_TX_DESC;
dev->tx_done_idx = tx_done_idx;
@@ -1023,10 +1021,10 @@ static void ns83820_cleanup_tx(struct ns83820 *dev)
dev->tx_skbs[i] = NULL;
if (skb) {
__le32 *desc = dev->tx_descs + (i * DESC_SIZE);
- pci_unmap_single(dev->pci_dev,
- desc_addr_get(desc + DESC_BUFPTR),
- le32_to_cpu(desc[DESC_CMDSTS]) & CMDSTS_LEN_MASK,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&dev->pci_dev->dev,
+ desc_addr_get(desc + DESC_BUFPTR),
+ le32_to_cpu(desc[DESC_CMDSTS]) & CMDSTS_LEN_MASK,
+ DMA_TO_DEVICE);
dev_kfree_skb_irq(skb);
atomic_dec(&dev->nr_tx_skbs);
}
@@ -1121,7 +1119,8 @@ again:
len = skb->len;
if (nr_frags)
len -= skb->data_len;
- buf = pci_map_single(dev->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
+ buf = dma_map_single(&dev->pci_dev->dev, skb->data, len,
+ DMA_TO_DEVICE);
first_desc = dev->tx_descs + (free_idx * DESC_SIZE);
@@ -1207,7 +1206,7 @@ static int ns83820_get_link_ksettings(struct net_device *ndev,
struct ethtool_link_ksettings *cmd)
{
struct ns83820 *dev = PRIV(ndev);
- u32 cfg, tanar, tbicr;
+ u32 cfg, tbicr;
int fullduplex = 0;
u32 supported;
@@ -1226,7 +1225,7 @@ static int ns83820_get_link_ksettings(struct net_device *ndev,
/* read current configuration */
cfg = readl(dev->base + CFG) ^ SPDSTS_POLARITY;
- tanar = readl(dev->base + TANAR);
+ readl(dev->base + TANAR);
tbicr = readl(dev->base + TBICR);
fullduplex = (cfg & CFG_DUPSTS) ? 1 : 0;
@@ -1902,12 +1901,12 @@ static int ns83820_init_one(struct pci_dev *pci_dev,
/* See if we can set the dma mask early on; failure is fatal. */
if (sizeof(dma_addr_t) == 8 &&
- !pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) {
+ !dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(64))) {
using_dac = 1;
- } else if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) {
+ } else if (!dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32))) {
using_dac = 0;
} else {
- dev_warn(&pci_dev->dev, "pci_set_dma_mask failed!\n");
+ dev_warn(&pci_dev->dev, "dma_set_mask failed!\n");
return -ENODEV;
}
@@ -1927,7 +1926,7 @@ static int ns83820_init_one(struct pci_dev *pci_dev,
SET_NETDEV_DEV(ndev, &pci_dev->dev);
INIT_WORK(&dev->tq_refill, queue_refill);
- tasklet_init(&dev->rx_tasklet, rx_action, (unsigned long)ndev);
+ tasklet_setup(&dev->rx_tasklet, rx_action);
err = pci_enable_device(pci_dev);
if (err) {
@@ -1938,10 +1937,12 @@ static int ns83820_init_one(struct pci_dev *pci_dev,
pci_set_master(pci_dev);
addr = pci_resource_start(pci_dev, 1);
dev->base = ioremap(addr, PAGE_SIZE);
- dev->tx_descs = pci_alloc_consistent(pci_dev,
- 4 * DESC_SIZE * NR_TX_DESC, &dev->tx_phy_descs);
- dev->rx_info.descs = pci_alloc_consistent(pci_dev,
- 4 * DESC_SIZE * NR_RX_DESC, &dev->rx_info.phy_descs);
+ dev->tx_descs = dma_alloc_coherent(&pci_dev->dev,
+ 4 * DESC_SIZE * NR_TX_DESC,
+ &dev->tx_phy_descs, GFP_KERNEL);
+ dev->rx_info.descs = dma_alloc_coherent(&pci_dev->dev,
+ 4 * DESC_SIZE * NR_RX_DESC,
+ &dev->rx_info.phy_descs, GFP_KERNEL);
err = -ENOMEM;
if (!dev->base || !dev->tx_descs || !dev->rx_info.descs)
goto out_disable;
@@ -2183,8 +2184,10 @@ out_free_irq:
out_disable:
if (dev->base)
iounmap(dev->base);
- pci_free_consistent(pci_dev, 4 * DESC_SIZE * NR_TX_DESC, dev->tx_descs, dev->tx_phy_descs);
- pci_free_consistent(pci_dev, 4 * DESC_SIZE * NR_RX_DESC, dev->rx_info.descs, dev->rx_info.phy_descs);
+ dma_free_coherent(&pci_dev->dev, 4 * DESC_SIZE * NR_TX_DESC,
+ dev->tx_descs, dev->tx_phy_descs);
+ dma_free_coherent(&pci_dev->dev, 4 * DESC_SIZE * NR_RX_DESC,
+ dev->rx_info.descs, dev->rx_info.phy_descs);
pci_disable_device(pci_dev);
out_free:
free_netdev(ndev);
@@ -2205,10 +2208,10 @@ static void ns83820_remove_one(struct pci_dev *pci_dev)
unregister_netdev(ndev);
free_irq(dev->pci_dev->irq, ndev);
iounmap(dev->base);
- pci_free_consistent(dev->pci_dev, 4 * DESC_SIZE * NR_TX_DESC,
- dev->tx_descs, dev->tx_phy_descs);
- pci_free_consistent(dev->pci_dev, 4 * DESC_SIZE * NR_RX_DESC,
- dev->rx_info.descs, dev->rx_info.phy_descs);
+ dma_free_coherent(&dev->pci_dev->dev, 4 * DESC_SIZE * NR_TX_DESC,
+ dev->tx_descs, dev->tx_phy_descs);
+ dma_free_coherent(&dev->pci_dev->dev, 4 * DESC_SIZE * NR_RX_DESC,
+ dev->rx_info.descs, dev->rx_info.phy_descs);
pci_disable_device(dev->pci_dev);
free_netdev(ndev);
}
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
index dd3605aa5f23..d17d1b4f2585 100644
--- a/drivers/net/ethernet/natsemi/sonic.c
+++ b/drivers/net/ethernet/natsemi/sonic.c
@@ -143,7 +143,7 @@ static int sonic_open(struct net_device *dev)
/*
* Initialize the SONIC
*/
- sonic_init(dev);
+ sonic_init(dev, true);
netif_start_queue(dev);
@@ -153,7 +153,7 @@ static int sonic_open(struct net_device *dev)
}
/* Wait for the SONIC to become idle. */
-static void sonic_quiesce(struct net_device *dev, u16 mask)
+static void sonic_quiesce(struct net_device *dev, u16 mask, bool may_sleep)
{
struct sonic_local * __maybe_unused lp = netdev_priv(dev);
int i;
@@ -163,7 +163,7 @@ static void sonic_quiesce(struct net_device *dev, u16 mask)
bits = SONIC_READ(SONIC_CMD) & mask;
if (!bits)
return;
- if (irqs_disabled() || in_interrupt())
+ if (!may_sleep)
udelay(20);
else
usleep_range(100, 200);
@@ -187,7 +187,7 @@ static int sonic_close(struct net_device *dev)
* stop the SONIC, disable interrupts
*/
SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
- sonic_quiesce(dev, SONIC_CR_ALL);
+ sonic_quiesce(dev, SONIC_CR_ALL, true);
SONIC_WRITE(SONIC_IMR, 0);
SONIC_WRITE(SONIC_ISR, 0x7fff);
@@ -229,7 +229,7 @@ static void sonic_tx_timeout(struct net_device *dev, unsigned int txqueue)
* disable all interrupts before releasing DMA buffers
*/
SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
- sonic_quiesce(dev, SONIC_CR_ALL);
+ sonic_quiesce(dev, SONIC_CR_ALL, false);
SONIC_WRITE(SONIC_IMR, 0);
SONIC_WRITE(SONIC_ISR, 0x7fff);
@@ -246,7 +246,7 @@ static void sonic_tx_timeout(struct net_device *dev, unsigned int txqueue)
}
}
/* Try to restart the adaptor. */
- sonic_init(dev);
+ sonic_init(dev, false);
lp->stats.tx_errors++;
netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
@@ -692,9 +692,9 @@ static void sonic_multicast_list(struct net_device *dev)
/* LCAM and TXP commands can't be used simultaneously */
spin_lock_irqsave(&lp->lock, flags);
- sonic_quiesce(dev, SONIC_CR_TXP);
+ sonic_quiesce(dev, SONIC_CR_TXP, false);
SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
- sonic_quiesce(dev, SONIC_CR_LCAM);
+ sonic_quiesce(dev, SONIC_CR_LCAM, false);
spin_unlock_irqrestore(&lp->lock, flags);
}
}
@@ -708,7 +708,7 @@ static void sonic_multicast_list(struct net_device *dev)
/*
* Initialize the SONIC ethernet controller.
*/
-static int sonic_init(struct net_device *dev)
+static int sonic_init(struct net_device *dev, bool may_sleep)
{
struct sonic_local *lp = netdev_priv(dev);
int i;
@@ -730,7 +730,7 @@ static int sonic_init(struct net_device *dev)
*/
SONIC_WRITE(SONIC_CMD, 0);
SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS | SONIC_CR_STP);
- sonic_quiesce(dev, SONIC_CR_ALL);
+ sonic_quiesce(dev, SONIC_CR_ALL, may_sleep);
/*
* initialize the receive resource area
@@ -759,7 +759,7 @@ static int sonic_init(struct net_device *dev)
netif_dbg(lp, ifup, dev, "%s: issuing RRRA command\n", __func__);
SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA);
- sonic_quiesce(dev, SONIC_CR_RRRA);
+ sonic_quiesce(dev, SONIC_CR_RRRA, may_sleep);
/*
* Initialize the receive descriptors so that they
@@ -834,7 +834,7 @@ static int sonic_init(struct net_device *dev)
* load the CAM
*/
SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
- sonic_quiesce(dev, SONIC_CR_LCAM);
+ sonic_quiesce(dev, SONIC_CR_LCAM, may_sleep);
/*
* enable receiver, disable loopback
diff --git a/drivers/net/ethernet/natsemi/sonic.h b/drivers/net/ethernet/natsemi/sonic.h
index 3cbb62c860c8..a5b803eb8c8a 100644
--- a/drivers/net/ethernet/natsemi/sonic.h
+++ b/drivers/net/ethernet/natsemi/sonic.h
@@ -338,7 +338,7 @@ static void sonic_rx(struct net_device *dev);
static int sonic_close(struct net_device *dev);
static struct net_device_stats *sonic_get_stats(struct net_device *dev);
static void sonic_multicast_list(struct net_device *dev);
-static int sonic_init(struct net_device *dev);
+static int sonic_init(struct net_device *dev, bool may_sleep);
static void sonic_tx_timeout(struct net_device *dev, unsigned int txqueue);
static void sonic_msg_init(struct net_device *dev);
static int sonic_alloc_descriptors(struct net_device *dev);
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index bc94970bea45..d13d92bf7447 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -1000,7 +1000,7 @@ static void free_shared_mem(struct s2io_nic *nic)
}
}
-/**
+/*
* s2io_verify_pci_mode -
*/
@@ -1035,7 +1035,7 @@ static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
}
static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
-/**
+/*
* s2io_print_pci_mode -
*/
static int s2io_print_pci_mode(struct s2io_nic *nic)
@@ -2064,6 +2064,9 @@ static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
/**
* verify_pcc_quiescent- Checks for PCC quiescent state
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
+ * @flag: boolean controlling function path
* Return: 1 If PCC is quiescence
* 0 If PCC is not quiescence
*/
@@ -2099,6 +2102,8 @@ static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
}
/**
* verify_xena_quiescence - Checks whether the H/W is ready
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
* Description: Returns whether the H/W is ready to go or not. Depending
* on whether adapter enable bit was written or not the comparison
* differs and the calling function passes the input argument flag to
@@ -2305,6 +2310,9 @@ static int start_nic(struct s2io_nic *nic)
}
/**
* s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
+ * @fifo_data: fifo data pointer
+ * @txdlp: descriptor
+ * @get_off: unused
*/
static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
struct TxD *txdlp, int get_off)
@@ -2391,7 +2399,7 @@ static void free_tx_buffers(struct s2io_nic *nic)
/**
* stop_nic - To stop the nic
- * @nic ; device private variable.
+ * @nic : device private variable.
* Description:
* This function does exactly the opposite of what the start_nic()
* function does. This function is called to stop the device.
@@ -2419,7 +2427,8 @@ static void stop_nic(struct s2io_nic *nic)
/**
* fill_rx_buffers - Allocates the Rx side skbs
- * @ring_info: per ring structure
+ * @nic : device private variable.
+ * @ring: per ring structure
* @from_card_up: If this is true, we will map the buffer to get
* the dma address for buf0 and buf1 to give it to the card.
* Else we will sync the already mapped buffer to give it to the card.
@@ -2864,7 +2873,7 @@ static void s2io_netpoll(struct net_device *dev)
/**
* rx_intr_handler - Rx interrupt handler
- * @ring_info: per ring structure.
+ * @ring_data: per ring structure.
* @budget: budget for napi processing.
* Description:
* If the interrupt is because of a received frame or if the
@@ -2972,7 +2981,7 @@ static int rx_intr_handler(struct ring_info *ring_data, int budget)
/**
* tx_intr_handler - Transmit interrupt handler
- * @nic : device private variable
+ * @fifo_data : fifo data pointer
* Description:
* If an interrupt was raised to indicate DMA complete of the
* Tx packet, this function is called. It identifies the last TxD
@@ -3153,6 +3162,8 @@ static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
/**
* s2io_chk_xpak_counter - Function to check the status of the xpak counters
* @counter : counter value to be updated
+ * @regs_stat : registers status
+ * @index : index
* @flag : flag to indicate the status
* @type : counter type
* Description:
@@ -3309,8 +3320,9 @@ static void s2io_updt_xpak_counter(struct net_device *dev)
/**
* wait_for_cmd_complete - waits for a command to complete.
- * @sp : private member of the device structure, which is a pointer to the
- * s2io_nic structure.
+ * @addr: address
+ * @busy_bit: bit to check for busy
+ * @bit_state: state to check
* Description: Function that waits for a command to Write into RMAC
* ADDR DATA registers to be completed and returns either success or
* error depending on whether the command was complete or not.
@@ -4335,7 +4347,7 @@ static int do_s2io_chk_alarm_bit(u64 value, void __iomem *addr,
/**
* s2io_handle_errors - Xframe error indication handler
- * @nic: device private variable
+ * @dev_id: opaque handle to dev
* Description: Handle alarms such as loss of link, single or
* double ECC errors, critical and serious errors.
* Return Value:
@@ -4739,7 +4751,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/**
+/*
* s2io_updt_stats -
*/
static void s2io_updt_stats(struct s2io_nic *sp)
@@ -5168,7 +5180,7 @@ static u64 do_s2io_read_unicast_mc(struct s2io_nic *sp, int offset)
return tmp64 >> 16;
}
-/**
+/*
* s2io_set_mac_addr - driver entry point
*/
@@ -5243,8 +5255,7 @@ static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
/**
* s2io_ethtool_set_link_ksettings - Sets different link parameters.
- * @sp : private member of the device structure, which is a pointer to the
- * s2io_nic structure.
+ * @dev : pointer to netdev
* @cmd: pointer to the structure with parameters given by ethtool to set
* link information.
* Description:
@@ -5273,8 +5284,7 @@ s2io_ethtool_set_link_ksettings(struct net_device *dev,
/**
* s2io_ethtol_get_link_ksettings - Return link specific information.
- * @sp : private member of the device structure, pointer to the
- * s2io_nic structure.
+ * @dev: pointer to netdev
* @cmd : pointer to the structure with parameters given by ethtool
* to return link information.
* Description:
@@ -5313,8 +5323,7 @@ s2io_ethtool_get_link_ksettings(struct net_device *dev,
/**
* s2io_ethtool_gdrvinfo - Returns driver specific information.
- * @sp : private member of the device structure, which is a pointer to the
- * s2io_nic structure.
+ * @dev: pointer to netdev
* @info : pointer to the structure with parameters given by ethtool to
* return driver information.
* Description:
@@ -5335,11 +5344,10 @@ static void s2io_ethtool_gdrvinfo(struct net_device *dev,
/**
* s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
- * @sp: private member of the device structure, which is a pointer to the
- * s2io_nic structure.
+ * @dev: pointer to netdev
* @regs : pointer to the structure with parameters given by ethtool for
- * dumping the registers.
- * @reg_space: The input argument into which all the registers are dumped.
+ * dumping the registers.
+ * @space: The input argument into which all the registers are dumped.
* Description:
* Dumps the entire register space of xFrame NIC into the user given
* buffer area.
@@ -5471,8 +5479,7 @@ static void s2io_ethtool_gringparam(struct net_device *dev,
/**
* s2io_ethtool_getpause_data -Pause frame frame generation and reception.
- * @sp : private member of the device structure, which is a pointer to the
- * s2io_nic structure.
+ * @dev: pointer to netdev
* @ep : pointer to the structure with pause parameters given by ethtool.
* Description:
* Returns the Pause frame generation and reception capability of the NIC.
@@ -5496,8 +5503,7 @@ static void s2io_ethtool_getpause_data(struct net_device *dev,
/**
* s2io_ethtool_setpause_data - set/reset pause frame generation.
- * @sp : private member of the device structure, which is a pointer to the
- * s2io_nic structure.
+ * @dev: pointer to netdev
* @ep : pointer to the structure with pause parameters given by ethtool.
* Description:
* It can be used to set or reset Pause frame generation or reception
@@ -5526,6 +5532,7 @@ static int s2io_ethtool_setpause_data(struct net_device *dev,
return 0;
}
+#define S2IO_DEV_ID 5
/**
* read_eeprom - reads 4 bytes of data from user given offset.
* @sp : private member of the device structure, which is a pointer to the
@@ -5541,8 +5548,6 @@ static int s2io_ethtool_setpause_data(struct net_device *dev,
* Return value:
* -1 on failure and 0 on success.
*/
-
-#define S2IO_DEV_ID 5
static int read_eeprom(struct s2io_nic *sp, int off, u64 *data)
{
int ret = -1;
@@ -5734,8 +5739,7 @@ static void s2io_vpd_read(struct s2io_nic *nic)
/**
* s2io_ethtool_geeprom - reads the value stored in the Eeprom.
- * @sp : private member of the device structure, which is a pointer to the
- * s2io_nic structure.
+ * @dev: pointer to netdev
* @eeprom : pointer to the user level structure provided by ethtool,
* containing all relevant information.
* @data_buf : user defined value to be written into Eeprom.
@@ -5771,11 +5775,10 @@ static int s2io_ethtool_geeprom(struct net_device *dev,
/**
* s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
- * @sp : private member of the device structure, which is a pointer to the
- * s2io_nic structure.
+ * @dev: pointer to netdev
* @eeprom : pointer to the user level structure provided by ethtool,
* containing all relevant information.
- * @data_buf ; user defined value to be written into Eeprom.
+ * @data_buf : user defined value to be written into Eeprom.
* Description:
* Tries to write the user provided value in the Eeprom, at the offset
* given by the user.
@@ -6027,7 +6030,7 @@ static int s2io_bist_test(struct s2io_nic *sp, uint64_t *data)
/**
* s2io_link_test - verifies the link state of the nic
- * @sp ; private member of the device structure, which is a pointer to the
+ * @sp: private member of the device structure, which is a pointer to the
* s2io_nic structure.
* @data: variable that returns the result of each of the test conducted by
* the driver.
@@ -6150,8 +6153,7 @@ static int s2io_rldram_test(struct s2io_nic *sp, uint64_t *data)
/**
* s2io_ethtool_test - conducts 6 tsets to determine the health of card.
- * @sp : private member of the device structure, which is a pointer to the
- * s2io_nic structure.
+ * @dev: pointer to netdev
* @ethtest : pointer to a ethtool command specific structure that will be
* returned to the user.
* @data : variable that returns the result of each of the test
@@ -6597,7 +6599,7 @@ static const struct ethtool_ops netdev_ethtool_ops = {
/**
* s2io_ioctl - Entry point for the Ioctl
* @dev : Device pointer.
- * @ifr : An IOCTL specefic structure, that can contain a pointer to
+ * @rq : An IOCTL specefic structure, that can contain a pointer to
* a proprietary structure used to pass information to the driver.
* @cmd : This is used to distinguish between the different commands that
* can be passed to the IOCTL functions.
@@ -6650,7 +6652,7 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu)
/**
* s2io_set_link - Set the LInk status
- * @data: long pointer to device private structue
+ * @work: work struct containing a pointer to device private structue
* Description: Sets the link status for the adapter
*/
@@ -7187,7 +7189,7 @@ static int s2io_card_up(struct s2io_nic *sp)
/**
* s2io_restart_nic - Resets the NIC.
- * @data : long pointer to the device private structure
+ * @work : work struct containing a pointer to the device private structure
* Description:
* This function is scheduled to be run by the s2io_tx_watchdog
* function after 0.5 secs to reset the NIC. The idea is to reduce
@@ -7218,6 +7220,7 @@ out_unlock:
/**
* s2io_tx_watchdog - Watchdog for transmit side.
* @dev : Pointer to net device structure
+ * @txqueue: index of the hanging queue
* Description:
* This function is triggered if the Tx Queue is stopped
* for a pre-defined amount of time when the Interface is still up.
@@ -7242,11 +7245,8 @@ static void s2io_tx_watchdog(struct net_device *dev, unsigned int txqueue)
/**
* rx_osm_handler - To perform some OS related operations on SKB.
- * @sp: private member of the device structure,pointer to s2io_nic structure.
- * @skb : the socket buffer pointer.
- * @len : length of the packet
- * @cksum : FCS checksum of the frame.
- * @ring_no : the ring from which this RxD was extracted.
+ * @ring_data : the ring from which this RxD was extracted.
+ * @rxdp: descriptor
* Description:
* This function is called by the Rx interrupt serivce routine to perform
* some OS related operations on the SKB before passing it to the upper
@@ -7576,9 +7576,10 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
}
/**
- * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
- * or Traffic class respectively.
+ * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS or Traffic class respectively.
* @nic: device private variable
+ * @ds_codepoint: data
+ * @ring: ring index
* Description: The function configures the receive steering to
* desired receive ring.
* Return Value: SUCCESS on success and
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 78eba10300ae..f5d48d7c4ce2 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -988,6 +988,9 @@ exit:
/**
* vxge_hw_device_hw_info_get - Get the hw information
+ * @bar0: the bar
+ * @hw_info: the hw_info struct
+ *
* Returns the vpath mask that has the bits set for each vpath allocated
* for the driver, FW version information, and the first mac address for
* each vpath
@@ -2303,16 +2306,9 @@ exit:
static inline void
vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, unsigned long size)
{
- gfp_t flags;
void *vaddr;
- if (in_interrupt())
- flags = GFP_ATOMIC | GFP_DMA;
- else
- flags = GFP_KERNEL | GFP_DMA;
-
- vaddr = kmalloc((size), flags);
-
+ vaddr = kmalloc(size, GFP_KERNEL | GFP_DMA);
vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
}
@@ -3926,7 +3922,7 @@ exit:
/**
* vxge_hw_vpath_check_leak - Check for memory leak
- * @ringh: Handle to the ring object used for receive
+ * @ring: Handle to the ring object used for receive
*
* If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to
* PRC_CFG6_VPn.RXD_SPAT then a leak has occurred.
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.h b/drivers/net/ethernet/neterion/vxge/vxge-config.h
index 373165119850..0cd0750484ae 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.h
@@ -1899,18 +1899,13 @@ static inline void *vxge_os_dma_malloc(struct pci_dev *pdev,
struct pci_dev **p_dmah,
struct pci_dev **p_dma_acch)
{
- gfp_t flags;
void *vaddr;
unsigned long misaligned = 0;
int realloc_flag = 0;
*p_dma_acch = *p_dmah = NULL;
- if (in_interrupt())
- flags = GFP_ATOMIC | GFP_DMA;
- else
- flags = GFP_KERNEL | GFP_DMA;
realloc:
- vaddr = kmalloc((size), flags);
+ vaddr = kmalloc(size, GFP_KERNEL | GFP_DMA);
if (vaddr == NULL)
return vaddr;
misaligned = (unsigned long)VXGE_ALIGN((unsigned long)vaddr,
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
index 03c3d1230c17..4d91026485ae 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
@@ -119,7 +119,7 @@ static void vxge_ethtool_gdrvinfo(struct net_device *dev,
* @dev: device pointer.
* @regs: pointer to the structure with parameters given by ethtool for
* dumping the registers.
- * @reg_space: The input argument into which all the registers are dumped.
+ * @space: The input argument into which all the registers are dumped.
*
* Dumps the vpath register space of Titan NIC into the user given
* buffer area.
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 1ded4e275086..87892bd992b1 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -1275,6 +1275,7 @@ _set_all_mcast:
/**
* vxge_set_mac_addr
* @dev: pointer to the device structure
+ * @p: socket info
*
* Update entry "0" (default MAC addr)
*/
@@ -1799,7 +1800,7 @@ static void vxge_reset(struct work_struct *work)
/**
* vxge_poll - Receive handler when Receive Polling is used.
- * @dev: pointer to the device structure.
+ * @napi: pointer to the napi structure.
* @budget: Number of packets budgeted to be processed in this iteration.
*
* This function comes into picture only if Receive side is being handled
@@ -3096,7 +3097,7 @@ static int vxge_change_mtu(struct net_device *dev, int new_mtu)
/**
* vxge_get_stats64
* @dev: pointer to the device structure
- * @stats: pointer to struct rtnl_link_stats64
+ * @net_stats: pointer to struct rtnl_link_stats64
*
*/
static void
@@ -3245,7 +3246,7 @@ static int vxge_hwtstamp_get(struct vxgedev *vdev, void __user *data)
/**
* vxge_ioctl
* @dev: Device pointer.
- * @ifr: An IOCTL specific structure, that can contain a pointer to
+ * @rq: An IOCTL specific structure, that can contain a pointer to
* a proprietary structure used to pass information to the driver.
* @cmd: This is used to distinguish between the different commands that
* can be passed to the IOCTL functions.
@@ -3269,6 +3270,7 @@ static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
/**
* vxge_tx_watchdog
* @dev: pointer to net device structure
+ * @txqueue: index of the hanging queue
*
* Watchdog for transmit side.
* This function is triggered if the Tx Queue is stopped
@@ -4002,6 +4004,7 @@ static void vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
/**
* vxge_pm_suspend - vxge power management suspend entry point
+ * @dev_d: device pointer
*
*/
static int __maybe_unused vxge_pm_suspend(struct device *dev_d)
@@ -4010,6 +4013,7 @@ static int __maybe_unused vxge_pm_suspend(struct device *dev_d)
}
/**
* vxge_pm_resume - vxge power management resume entry point
+ * @dev_d: device pointer
*
*/
static int __maybe_unused vxge_pm_resume(struct device *dev_d)
@@ -4539,7 +4543,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
* due to the fact that HWTS is using the FCS as the location of the
* timestamp. The HW FCS checking will still correctly determine if
* there is a valid checksum, and the FCS is being removed by the driver
- * anyway. So no fucntionality is being lost. Since it is always
+ * anyway. So no functionality is being lost. Since it is always
* enabled, we now simply use the ioctl call to set whether or not the
* driver should be paying attention to the HWTS.
*/
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
index 709d20d9938f..ee164970b267 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c
@@ -30,8 +30,6 @@
*/
enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
{
- u64 val64;
-
struct __vxge_hw_virtualpath *vpath;
struct vxge_hw_vpath_reg __iomem *vp_reg;
enum vxge_hw_status status = VXGE_HW_OK;
@@ -84,7 +82,7 @@ enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->xgmac_vp_int_status);
- val64 = readq(&vp_reg->vpath_general_int_status);
+ readq(&vp_reg->vpath_general_int_status);
/* Mask unwanted interrupts */
@@ -157,8 +155,6 @@ exit:
enum vxge_hw_status vxge_hw_vpath_intr_disable(
struct __vxge_hw_vpath_handle *vp)
{
- u64 val64;
-
struct __vxge_hw_virtualpath *vpath;
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_hw_vpath_reg __iomem *vp_reg;
@@ -179,8 +175,6 @@ enum vxge_hw_status vxge_hw_vpath_intr_disable(
(u32)VXGE_HW_INTR_MASK_ALL,
&vp_reg->vpath_general_int_mask);
- val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
-
writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
__vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
@@ -284,7 +278,7 @@ void vxge_hw_vpath_dynamic_rti_rtimer_set(struct __vxge_hw_ring *ring)
/**
* vxge_hw_channel_msix_mask - Mask MSIX Vector.
- * @channeh: Channel for rx or tx handle
+ * @channel: Channel for rx or tx handle
* @msix_id: MSIX ID
*
* The function masks the msix interrupt for the given msix_id
@@ -301,7 +295,7 @@ void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
/**
* vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
- * @channeh: Channel for rx or tx handle
+ * @channel: Channel for rx or tx handle
* @msix_id: MSI ID
*
* The function unmasks the msix interrupt for the given msix_id
@@ -356,8 +350,6 @@ u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
/**
* vxge_hw_device_intr_enable - Enable interrupts.
* @hldev: HW device handle.
- * @op: One of the enum vxge_hw_device_intr enumerated values specifying
- * the type(s) of interrupts to enable.
*
* Enable Titan interrupts. The function is to be executed the last in
* Titan initialization sequence.
@@ -411,8 +403,6 @@ void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
/**
* vxge_hw_device_intr_disable - Disable Titan interrupts.
* @hldev: HW device handle.
- * @op: One of the enum vxge_hw_device_intr enumerated values specifying
- * the type(s) of interrupts to disable.
*
* Disable Titan interrupts.
*
@@ -487,9 +477,7 @@ void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
*/
void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
{
- u32 val32;
-
- val32 = readl(&hldev->common_reg->titan_general_int_status);
+ readl(&hldev->common_reg->titan_general_int_status);
}
/**
@@ -1414,7 +1402,7 @@ u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
/**
* vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
- * @fifoh: Handle to the fifo object used for non offload send
+ * @fifo: Handle to the fifo object used for non offload send
* @txdlh: Reserved descriptor. On success HW fills this "out" parameter
* with a valid handle.
* @txdl_priv: Buffer to return the pointer to per txdl space
@@ -1525,8 +1513,6 @@ void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
* vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
* @fifo: Handle to the fifo object used for non offload send
* @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
- * @frags: Number of contiguous buffers that are part of a single
- * transmit operation.
*
* Post descriptor on the 'fifo' type channel for transmission.
* Prior to posting the descriptor should be filled in accordance with
@@ -1699,8 +1685,7 @@ void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
}
/**
- * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath
- * to MAC address table.
+ * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath to MAC address table.
* @vp: Vpath handle.
* @macaddr: MAC address to be added for this vpath into the list
* @macaddr_mask: MAC address mask for macaddr
@@ -1716,8 +1701,8 @@ void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
enum vxge_hw_status
vxge_hw_vpath_mac_addr_add(
struct __vxge_hw_vpath_handle *vp,
- u8 (macaddr)[ETH_ALEN],
- u8 (macaddr_mask)[ETH_ALEN],
+ u8 *macaddr,
+ u8 *macaddr_mask,
enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
{
u32 i;
@@ -1765,13 +1750,13 @@ exit:
}
/**
- * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath
- * from MAC address table.
+ * vxge_hw_vpath_mac_addr_get - Get the first mac address entry
* @vp: Vpath handle.
* @macaddr: First MAC address entry for this vpath in the list
* @macaddr_mask: MAC address mask for macaddr
*
- * Returns the first mac address and mac address mask in the list for this
+ * Get the first mac address entry for this vpath from MAC address table.
+ * Return: the first mac address and mac address mask in the list for this
* vpath.
* see also: vxge_hw_vpath_mac_addr_get_next
*
@@ -1779,8 +1764,8 @@ exit:
enum vxge_hw_status
vxge_hw_vpath_mac_addr_get(
struct __vxge_hw_vpath_handle *vp,
- u8 (macaddr)[ETH_ALEN],
- u8 (macaddr_mask)[ETH_ALEN])
+ u8 *macaddr,
+ u8 *macaddr_mask)
{
u32 i;
u64 data1 = 0ULL;
@@ -1816,14 +1801,13 @@ exit:
}
/**
- * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this
- * vpath
- * from MAC address table.
+ * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry
* @vp: Vpath handle.
* @macaddr: Next MAC address entry for this vpath in the list
* @macaddr_mask: MAC address mask for macaddr
*
- * Returns the next mac address and mac address mask in the list for this
+ * Get the next mac address entry for this vpath from MAC address table.
+ * Return: the next mac address and mac address mask in the list for this
* vpath.
* see also: vxge_hw_vpath_mac_addr_get
*
@@ -1831,8 +1815,8 @@ exit:
enum vxge_hw_status
vxge_hw_vpath_mac_addr_get_next(
struct __vxge_hw_vpath_handle *vp,
- u8 (macaddr)[ETH_ALEN],
- u8 (macaddr_mask)[ETH_ALEN])
+ u8 *macaddr,
+ u8 *macaddr_mask)
{
u32 i;
u64 data1 = 0ULL;
@@ -1869,8 +1853,7 @@ exit:
}
/**
- * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
- * to MAC address table.
+ * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath to MAC address table.
* @vp: Vpath handle.
* @macaddr: MAC address to be added for this vpath into the list
* @macaddr_mask: MAC address mask for macaddr
@@ -1884,8 +1867,8 @@ exit:
enum vxge_hw_status
vxge_hw_vpath_mac_addr_delete(
struct __vxge_hw_vpath_handle *vp,
- u8 (macaddr)[ETH_ALEN],
- u8 (macaddr_mask)[ETH_ALEN])
+ u8 *macaddr,
+ u8 *macaddr_mask)
{
u32 i;
u64 data1 = 0ULL;
@@ -1916,8 +1899,7 @@ exit:
}
/**
- * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
- * to vlan id table.
+ * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath to vlan id table.
* @vp: Vpath handle.
* @vid: vlan id to be added for this vpath into the list
*
@@ -2375,7 +2357,6 @@ enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
u8 t_code;
enum vxge_hw_status status = VXGE_HW_OK;
void *first_rxdh;
- u64 val64 = 0;
int new_count = 0;
ring->cmpl_cnt = 0;
@@ -2403,8 +2384,7 @@ enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
}
writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
&ring->vp_reg->prc_rxd_doorbell);
- val64 =
- readl(&ring->common_reg->titan_general_int_status);
+ readl(&ring->common_reg->titan_general_int_status);
ring->doorbell_cnt = 0;
}
}
@@ -2413,9 +2393,11 @@ enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
}
/**
- * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
- * the same.
+ * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process the same.
* @fifo: Handle to the fifo object used for non offload send
+ * @skb_ptr: pointer to skb
+ * @nr_skb: number of skbs
+ * @more: more is coming
*
* The function polls the Tx for the completed descriptors and calls
* the driver via supplied completion callback.
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index ac02369174a9..53851853562c 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -111,7 +111,9 @@ static int
nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
struct bpf_prog *prog)
{
- int i, cnt, err;
+ int i, cnt, err = 0;
+
+ mutex_lock(&prog->aux->used_maps_mutex);
/* Quickly count the maps we will have to remember */
cnt = 0;
@@ -119,13 +121,15 @@ nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
if (bpf_map_offload_neutral(prog->aux->used_maps[i]))
cnt++;
if (!cnt)
- return 0;
+ goto out;
nfp_prog->map_records = kmalloc_array(cnt,
sizeof(nfp_prog->map_records[0]),
GFP_KERNEL);
- if (!nfp_prog->map_records)
- return -ENOMEM;
+ if (!nfp_prog->map_records) {
+ err = -ENOMEM;
+ goto out;
+ }
for (i = 0; i < prog->aux->used_map_cnt; i++)
if (bpf_map_offload_neutral(prog->aux->used_maps[i])) {
@@ -133,12 +137,14 @@ nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
prog->aux->used_maps[i]);
if (err) {
nfp_map_ptrs_forget(bpf, nfp_prog);
- return err;
+ goto out;
}
}
WARN_ON(cnt != nfp_prog->map_records_cnt);
- return 0;
+out:
+ mutex_unlock(&prog->aux->used_maps_mutex);
+ return err;
}
static int
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index bf516285510f..a2926b1b3cff 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -24,6 +24,7 @@
#define NFP_FLOWER_LAYER_VXLAN BIT(7)
#define NFP_FLOWER_LAYER2_GRE BIT(0)
+#define NFP_FLOWER_LAYER2_QINQ BIT(4)
#define NFP_FLOWER_LAYER2_GENEVE BIT(5)
#define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6)
#define NFP_FLOWER_LAYER2_TUN_IPV6 BIT(7)
@@ -319,6 +320,22 @@ struct nfp_flower_mac_mpls {
__be32 mpls_lse;
};
+/* VLAN details (2W/8B)
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | outer_tpid | outer_tci |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | inner_tpid | inner_tci |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct nfp_flower_vlan {
+ __be16 outer_tpid;
+ __be16 outer_tci;
+ __be16 inner_tpid;
+ __be16 inner_tci;
+};
+
/* L4 ports (for UDP, TCP, SCTP) (1W/4B)
* 3 2 1
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index 3bf9c1afa45e..caf12eec9945 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -30,6 +30,8 @@ struct nfp_app;
#define NFP_FLOWER_MASK_ELEMENT_RS 1
#define NFP_FLOWER_MASK_HASH_BITS 10
+#define NFP_FLOWER_KEY_MAX_LW 32
+
#define NFP_FL_META_FLAG_MANAGE_MASK BIT(7)
#define NFP_FL_MASK_REUSE_TIME_NS 40000
@@ -44,6 +46,7 @@ struct nfp_app;
#define NFP_FL_FEATS_FLOW_MOD BIT(5)
#define NFP_FL_FEATS_PRE_TUN_RULES BIT(6)
#define NFP_FL_FEATS_IPV6_TUN BIT(7)
+#define NFP_FL_FEATS_VLAN_QINQ BIT(8)
#define NFP_FL_FEATS_HOST_ACK BIT(31)
#define NFP_FL_ENABLE_FLOW_MERGE BIT(0)
@@ -57,7 +60,8 @@ struct nfp_app;
NFP_FL_FEATS_VF_RLIM | \
NFP_FL_FEATS_FLOW_MOD | \
NFP_FL_FEATS_PRE_TUN_RULES | \
- NFP_FL_FEATS_IPV6_TUN)
+ NFP_FL_FEATS_IPV6_TUN | \
+ NFP_FL_FEATS_VLAN_QINQ)
struct nfp_fl_mask_id {
struct circ_buf mask_id_free_list;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index f7f01e2e3dce..255a4dff6288 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -10,7 +10,7 @@
static void
nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
struct nfp_flower_meta_tci *msk,
- struct flow_rule *rule, u8 key_type)
+ struct flow_rule *rule, u8 key_type, bool qinq_sup)
{
u16 tmp_tci;
@@ -24,7 +24,7 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
msk->nfp_flow_key_layer = key_type;
msk->mask_id = ~0;
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ if (!qinq_sup && flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_match_vlan match;
flow_rule_match_vlan(rule, &match);
@@ -231,6 +231,50 @@ nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext,
}
static void
+nfp_flower_fill_vlan(struct flow_dissector_key_vlan *key,
+ struct nfp_flower_vlan *frame,
+ bool outer_vlan)
+{
+ u16 tci;
+
+ tci = NFP_FLOWER_MASK_VLAN_PRESENT;
+ tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+ key->vlan_priority) |
+ FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
+ key->vlan_id);
+
+ if (outer_vlan) {
+ frame->outer_tci = cpu_to_be16(tci);
+ frame->outer_tpid = key->vlan_tpid;
+ } else {
+ frame->inner_tci = cpu_to_be16(tci);
+ frame->inner_tpid = key->vlan_tpid;
+ }
+}
+
+static void
+nfp_flower_compile_vlan(struct nfp_flower_vlan *ext,
+ struct nfp_flower_vlan *msk,
+ struct flow_rule *rule)
+{
+ struct flow_match_vlan match;
+
+ memset(ext, 0, sizeof(struct nfp_flower_vlan));
+ memset(msk, 0, sizeof(struct nfp_flower_vlan));
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ flow_rule_match_vlan(rule, &match);
+ nfp_flower_fill_vlan(match.key, ext, true);
+ nfp_flower_fill_vlan(match.mask, msk, true);
+ }
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
+ flow_rule_match_cvlan(rule, &match);
+ nfp_flower_fill_vlan(match.key, ext, false);
+ nfp_flower_fill_vlan(match.mask, msk, false);
+ }
+}
+
+static void
nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext,
struct nfp_flower_ipv4 *msk, struct flow_rule *rule)
{
@@ -433,7 +477,10 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
struct netlink_ext_ack *extack)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(flow);
+ struct nfp_flower_priv *priv = app->priv;
+ bool qinq_sup;
u32 port_id;
+ int ext_len;
int err;
u8 *ext;
u8 *msk;
@@ -446,9 +493,11 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
ext = nfp_flow->unmasked_data;
msk = nfp_flow->mask_data;
+ qinq_sup = !!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ);
+
nfp_flower_compile_meta_tci((struct nfp_flower_meta_tci *)ext,
(struct nfp_flower_meta_tci *)msk,
- rule, key_ls->key_layer);
+ rule, key_ls->key_layer, qinq_sup);
ext += sizeof(struct nfp_flower_meta_tci);
msk += sizeof(struct nfp_flower_meta_tci);
@@ -547,6 +596,14 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
}
}
+ if (NFP_FLOWER_LAYER2_QINQ & key_ls->key_layer_two) {
+ nfp_flower_compile_vlan((struct nfp_flower_vlan *)ext,
+ (struct nfp_flower_vlan *)msk,
+ rule);
+ ext += sizeof(struct nfp_flower_vlan);
+ msk += sizeof(struct nfp_flower_vlan);
+ }
+
if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
@@ -589,5 +646,15 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
}
}
+ /* Check that the flow key does not exceed the maximum limit.
+ * All structures in the key is multiples of 4 bytes, so use u32.
+ */
+ ext_len = (u32 *)ext - (u32 *)nfp_flow->unmasked_data;
+ if (ext_len > NFP_FLOWER_KEY_MAX_LW) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "unsupported offload: flow key too long");
+ return -EOPNOTSUPP;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 36356f96661d..1c59aff2163c 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -31,6 +31,7 @@
BIT(FLOW_DISSECTOR_KEY_PORTS) | \
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
BIT(FLOW_DISSECTOR_KEY_VLAN) | \
+ BIT(FLOW_DISSECTOR_KEY_CVLAN) | \
BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
@@ -66,7 +67,8 @@
NFP_FLOWER_LAYER_IPV6)
#define NFP_FLOWER_PRE_TUN_RULE_FIELDS \
- (NFP_FLOWER_LAYER_PORT | \
+ (NFP_FLOWER_LAYER_EXT_META | \
+ NFP_FLOWER_LAYER_PORT | \
NFP_FLOWER_LAYER_MAC | \
NFP_FLOWER_LAYER_IPV4 | \
NFP_FLOWER_LAYER_IPV6)
@@ -285,6 +287,30 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN PCP offload");
return -EOPNOTSUPP;
}
+ if (priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ &&
+ !(key_layer_two & NFP_FLOWER_LAYER2_QINQ)) {
+ key_layer |= NFP_FLOWER_LAYER_EXT_META;
+ key_size += sizeof(struct nfp_flower_ext_meta);
+ key_size += sizeof(struct nfp_flower_vlan);
+ key_layer_two |= NFP_FLOWER_LAYER2_QINQ;
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
+ struct flow_match_vlan cvlan;
+
+ if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support VLAN QinQ offload");
+ return -EOPNOTSUPP;
+ }
+
+ flow_rule_match_vlan(rule, &cvlan);
+ if (!(key_layer_two & NFP_FLOWER_LAYER2_QINQ)) {
+ key_layer |= NFP_FLOWER_LAYER_EXT_META;
+ key_size += sizeof(struct nfp_flower_ext_meta);
+ key_size += sizeof(struct nfp_flower_vlan);
+ key_layer_two |= NFP_FLOWER_LAYER2_QINQ;
+ }
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
@@ -1066,6 +1092,7 @@ err_destroy_merge_flow:
* nfp_flower_validate_pre_tun_rule()
* @app: Pointer to the APP handle
* @flow: Pointer to NFP flow representation of rule
+ * @key_ls: Pointer to NFP key layers structure
* @extack: Netlink extended ACK report
*
* Verifies the flow as a pre-tunnel rule.
@@ -1075,10 +1102,13 @@ err_destroy_merge_flow:
static int
nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
struct nfp_fl_payload *flow,
+ struct nfp_fl_key_ls *key_ls,
struct netlink_ext_ack *extack)
{
+ struct nfp_flower_priv *priv = app->priv;
struct nfp_flower_meta_tci *meta_tci;
struct nfp_flower_mac_mpls *mac;
+ u8 *ext = flow->unmasked_data;
struct nfp_fl_act_head *act;
u8 *mask = flow->mask_data;
bool vlan = false;
@@ -1086,20 +1116,25 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
u8 key_layer;
meta_tci = (struct nfp_flower_meta_tci *)flow->unmasked_data;
- if (meta_tci->tci & cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT)) {
- u16 vlan_tci = be16_to_cpu(meta_tci->tci);
-
- vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT;
- flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci);
- vlan = true;
- } else {
- flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff);
+ key_layer = key_ls->key_layer;
+ if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) {
+ if (meta_tci->tci & cpu_to_be16(NFP_FLOWER_MASK_VLAN_PRESENT)) {
+ u16 vlan_tci = be16_to_cpu(meta_tci->tci);
+
+ vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT;
+ flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci);
+ vlan = true;
+ } else {
+ flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff);
+ }
}
- key_layer = meta_tci->nfp_flow_key_layer;
if (key_layer & ~NFP_FLOWER_PRE_TUN_RULE_FIELDS) {
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: too many match fields");
return -EOPNOTSUPP;
+ } else if (key_ls->key_layer_two & ~NFP_FLOWER_LAYER2_QINQ) {
+ NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: non-vlan in extended match fields");
+ return -EOPNOTSUPP;
}
if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
@@ -1109,7 +1144,13 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
/* Skip fields known to exist. */
mask += sizeof(struct nfp_flower_meta_tci);
+ ext += sizeof(struct nfp_flower_meta_tci);
+ if (key_ls->key_layer_two) {
+ mask += sizeof(struct nfp_flower_ext_meta);
+ ext += sizeof(struct nfp_flower_ext_meta);
+ }
mask += sizeof(struct nfp_flower_in_port);
+ ext += sizeof(struct nfp_flower_in_port);
/* Ensure destination MAC address is fully matched. */
mac = (struct nfp_flower_mac_mpls *)mask;
@@ -1118,6 +1159,8 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
return -EOPNOTSUPP;
}
+ mask += sizeof(struct nfp_flower_mac_mpls);
+ ext += sizeof(struct nfp_flower_mac_mpls);
if (key_layer & NFP_FLOWER_LAYER_IPV4 ||
key_layer & NFP_FLOWER_LAYER_IPV6) {
/* Flags and proto fields have same offset in IPv4 and IPv6. */
@@ -1130,7 +1173,6 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
sizeof(struct nfp_flower_ipv4) :
sizeof(struct nfp_flower_ipv6);
- mask += sizeof(struct nfp_flower_mac_mpls);
/* Ensure proto and flags are the only IP layer fields. */
for (i = 0; i < size; i++)
@@ -1138,6 +1180,25 @@ nfp_flower_validate_pre_tun_rule(struct nfp_app *app,
NL_SET_ERR_MSG_MOD(extack, "unsupported pre-tunnel rule: only flags and proto can be matched in ip header");
return -EOPNOTSUPP;
}
+ ext += size;
+ mask += size;
+ }
+
+ if ((priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ)) {
+ if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_QINQ) {
+ struct nfp_flower_vlan *vlan_tags;
+ u16 vlan_tci;
+
+ vlan_tags = (struct nfp_flower_vlan *)ext;
+
+ vlan_tci = be16_to_cpu(vlan_tags->outer_tci);
+
+ vlan_tci &= ~NFP_FLOWER_MASK_VLAN_PRESENT;
+ flow->pre_tun_rule.vlan_tci = cpu_to_be16(vlan_tci);
+ vlan = true;
+ } else {
+ flow->pre_tun_rule.vlan_tci = cpu_to_be16(0xffff);
+ }
}
/* Action must be a single egress or pop_vlan and egress. */
@@ -1220,7 +1281,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
goto err_destroy_flow;
if (flow_pay->pre_tun_rule.dev) {
- err = nfp_flower_validate_pre_tun_rule(app, flow_pay, extack);
+ err = nfp_flower_validate_pre_tun_rule(app, flow_pay, key_layer, extack);
if (err)
goto err_destroy_flow;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
index be52510d446b..97d2b03208de 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -329,12 +329,11 @@ err_close_nsp:
}
static int
-nfp_devlink_flash_update(struct devlink *devlink, const char *path,
- const char *component, struct netlink_ext_ack *extack)
+nfp_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
{
- if (component)
- return -EOPNOTSUPP;
- return nfp_flash_update_common(devlink_priv(devlink), path, extack);
+ return nfp_flash_update_common(devlink_priv(devlink), params->file_name, extack);
}
const struct devlink_ops nfp_devlink_ops = {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 21ea22694e47..b150da43adb2 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -2287,9 +2287,9 @@ static bool nfp_ctrl_rx(struct nfp_net_r_vector *r_vec)
return budget;
}
-static void nfp_ctrl_poll(unsigned long arg)
+static void nfp_ctrl_poll(struct tasklet_struct *t)
{
- struct nfp_net_r_vector *r_vec = (void *)arg;
+ struct nfp_net_r_vector *r_vec = from_tasklet(r_vec, t, tasklet);
spin_lock(&r_vec->lock);
nfp_net_tx_complete(r_vec->tx_ring, 0);
@@ -2337,8 +2337,7 @@ static void nfp_net_vecs_init(struct nfp_net *nn)
__skb_queue_head_init(&r_vec->queue);
spin_lock_init(&r_vec->lock);
- tasklet_init(&r_vec->tasklet, nfp_ctrl_poll,
- (unsigned long)r_vec);
+ tasklet_setup(&r_vec->tasklet, nfp_ctrl_poll);
tasklet_disable(&r_vec->tasklet);
}
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 4075f5e59955..a6861df9904f 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -787,9 +787,9 @@ out:
return IRQ_HANDLED;
}
-static void nixge_dma_err_handler(unsigned long data)
+static void nixge_dma_err_handler(struct tasklet_struct *t)
{
- struct nixge_priv *lp = (struct nixge_priv *)data;
+ struct nixge_priv *lp = from_tasklet(lp, t, dma_err_tasklet);
struct nixge_hw_dma_bd *cur_p;
struct nixge_tx_skb *tx_skb;
u32 cr, i;
@@ -879,8 +879,7 @@ static int nixge_open(struct net_device *ndev)
phy_start(phy);
/* Enable tasklets for Axi DMA error handling */
- tasklet_init(&priv->dma_err_tasklet, nixge_dma_err_handler,
- (unsigned long)priv);
+ tasklet_setup(&priv->dma_err_tasklet, nixge_dma_err_handler);
napi_enable(&priv->napi);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index b36aa5bf3c5f..a58f14aca10c 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -8,7 +8,7 @@
#include "pch_gbe.h"
#include "pch_gbe_phy.h"
-/**
+/*
* pch_gbe_stats - Stats item information
*/
struct pch_gbe_stats {
@@ -24,7 +24,7 @@ struct pch_gbe_stats {
.offset = offsetof(struct pch_gbe_hw_stats, m), \
}
-/**
+/*
* pch_gbe_gstrings_stats - ethtool information status name list
*/
static const struct pch_gbe_stats pch_gbe_gstrings_stats[] = {
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 23f7c76737c9..ade8c44c01cd 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -295,7 +295,7 @@ static s32 pch_gbe_mac_read_mac_addr(struct pch_gbe_hw *hw)
/**
* pch_gbe_wait_clr_bit - Wait to clear a bit
* @reg: Pointer of register
- * @busy: Busy bit
+ * @bit: Busy bit
*/
static void pch_gbe_wait_clr_bit(void *reg, u32 bit)
{
@@ -1034,7 +1034,7 @@ static void pch_gbe_set_mode(struct pch_gbe_adapter *adapter, u16 speed,
/**
* pch_gbe_watchdog - Watchdog process
- * @data: Board private structure
+ * @t: timer list containing a Board private structure
*/
static void pch_gbe_watchdog(struct timer_list *t)
{
@@ -2270,6 +2270,7 @@ static int pch_gbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
/**
* pch_gbe_tx_timeout - Respond to a Tx Hang
* @netdev: Network interface device structure
+ * @txqueue: index of hanging queue
*/
static void pch_gbe_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
index dceec80fd642..81fc5a6e3221 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
@@ -13,7 +13,7 @@
#define OPTION_DISABLED 0
#define OPTION_ENABLED 1
-/**
+/*
* TxDescriptors - Transmit Descriptor Count
* @Valid Range: PCH_GBE_MIN_TXD - PCH_GBE_MAX_TXD
* @Default Value: PCH_GBE_DEFAULT_TXD
@@ -22,7 +22,7 @@ static int TxDescriptors = OPTION_UNSET;
module_param(TxDescriptors, int, 0);
MODULE_PARM_DESC(TxDescriptors, "Number of transmit descriptors");
-/**
+/*
* RxDescriptors -Receive Descriptor Count
* @Valid Range: PCH_GBE_MIN_RXD - PCH_GBE_MAX_RXD
* @Default Value: PCH_GBE_DEFAULT_RXD
@@ -31,7 +31,7 @@ static int RxDescriptors = OPTION_UNSET;
module_param(RxDescriptors, int, 0);
MODULE_PARM_DESC(RxDescriptors, "Number of receive descriptors");
-/**
+/*
* Speed - User Specified Speed Override
* @Valid Range: 0, 10, 100, 1000
* - 0: auto-negotiate at all supported speeds
@@ -44,7 +44,7 @@ static int Speed = OPTION_UNSET;
module_param(Speed, int, 0);
MODULE_PARM_DESC(Speed, "Speed setting");
-/**
+/*
* Duplex - User Specified Duplex Override
* @Valid Range: 0-2
* - 0: auto-negotiate for duplex
@@ -59,7 +59,7 @@ MODULE_PARM_DESC(Duplex, "Duplex setting");
#define HALF_DUPLEX 1
#define FULL_DUPLEX 2
-/**
+/*
* AutoNeg - Auto-negotiation Advertisement Override
* @Valid Range: 0x01-0x0F, 0x20-0x2F
*
@@ -85,7 +85,7 @@ MODULE_PARM_DESC(AutoNeg, "Advertised auto-negotiation setting");
#define PHY_ADVERTISE_1000_FULL 0x0020
#define PCH_AUTONEG_ADVERTISE_DEFAULT 0x2F
-/**
+/*
* FlowControl - User Specified Flow Control Override
* @Valid Range: 0-3
* - 0: No Flow Control
@@ -124,7 +124,7 @@ MODULE_PARM_DESC(XsumTX, "Disable or enable Transmit Checksum offload");
#define PCH_GBE_DEFAULT_TX_CSUM true /* trueorfalse */
-/**
+/*
* pch_gbe_option - Force the MAC's flow control settings
* @hw: Pointer to the HW structure
* Returns:
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 3da075307178..d1dd9bc1bc7f 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -1060,7 +1060,7 @@ static int yellowfin_rx(struct net_device *dev)
struct sk_buff *rx_skb = yp->rx_skbuff[entry];
s16 frame_status;
u16 desc_status;
- int data_size, yf_size;
+ int data_size, __maybe_unused yf_size;
u8 *buf_addr;
if(!desc->result_status)
diff --git a/drivers/net/ethernet/pensando/Kconfig b/drivers/net/ethernet/pensando/Kconfig
index 76f8cc502bf9..5f8b0bb3af6e 100644
--- a/drivers/net/ethernet/pensando/Kconfig
+++ b/drivers/net/ethernet/pensando/Kconfig
@@ -21,6 +21,7 @@ config IONIC
tristate "Pensando Ethernet IONIC Support"
depends on 64BIT && PCI
select NET_DEVLINK
+ select DIMLIB
help
This enables the support for the Pensando family of Ethernet
adapters. More specific information on this driver can be
diff --git a/drivers/net/ethernet/pensando/ionic/Makefile b/drivers/net/ethernet/pensando/ionic/Makefile
index 29f304d75261..8d3c2d3cb10d 100644
--- a/drivers/net/ethernet/pensando/ionic/Makefile
+++ b/drivers/net/ethernet/pensando/ionic/Makefile
@@ -5,4 +5,4 @@ obj-$(CONFIG_IONIC) := ionic.o
ionic-y := ionic_main.o ionic_bus_pci.o ionic_devlink.o ionic_dev.o \
ionic_debugfs.o ionic_lif.o ionic_rx_filter.o ionic_ethtool.o \
- ionic_txrx.o ionic_stats.o
+ ionic_txrx.o ionic_stats.o ionic_fw.o
diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h
index f5a910c458ba..084a924431d5 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic.h
@@ -42,13 +42,11 @@ struct ionic {
struct ionic_dev_bar bars[IONIC_BARS_MAX];
unsigned int num_bars;
struct ionic_identity ident;
- struct list_head lifs;
- struct ionic_lif *master_lif;
+ struct ionic_lif *lif;
unsigned int nnqs_per_lif;
unsigned int neqs_per_lif;
unsigned int ntxqs_per_lif;
unsigned int nrxqs_per_lif;
- DECLARE_BITMAP(lifbits, IONIC_LIFS_MAX);
unsigned int nintrs;
DECLARE_BITMAP(intrs, IONIC_INTR_CTRL_REGS_MAX);
struct work_struct nb_work;
@@ -66,9 +64,6 @@ struct ionic_admin_ctx {
union ionic_adminq_comp comp;
};
-int ionic_napi(struct napi_struct *napi, int budget, ionic_cq_cb cb,
- ionic_cq_done_cb done_cb, void *done_arg);
-
int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_wait);
int ionic_set_dma_mask(struct ionic *ionic);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index 85c686c16741..b0d8499d373b 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -266,6 +266,7 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(dev, "Cannot identify device: %d, aborting\n", err);
goto err_out_teardown;
}
+ ionic_debugfs_add_ident(ionic);
err = ionic_init(ionic);
if (err) {
@@ -286,29 +287,22 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_reset;
}
- /* Configure LIFs */
- err = ionic_lif_identify(ionic, IONIC_LIF_TYPE_CLASSIC,
- &ionic->ident.lif);
+ /* Allocate and init the LIF */
+ err = ionic_lif_size(ionic);
if (err) {
- dev_err(dev, "Cannot identify LIFs: %d, aborting\n", err);
+ dev_err(dev, "Cannot size LIF: %d, aborting\n", err);
goto err_out_port_reset;
}
- err = ionic_lifs_size(ionic);
+ err = ionic_lif_alloc(ionic);
if (err) {
- dev_err(dev, "Cannot size LIFs: %d, aborting\n", err);
- goto err_out_port_reset;
- }
-
- err = ionic_lifs_alloc(ionic);
- if (err) {
- dev_err(dev, "Cannot allocate LIFs: %d, aborting\n", err);
+ dev_err(dev, "Cannot allocate LIF: %d, aborting\n", err);
goto err_out_free_irqs;
}
- err = ionic_lifs_init(ionic);
+ err = ionic_lif_init(ionic->lif);
if (err) {
- dev_err(dev, "Cannot init LIFs: %d, aborting\n", err);
+ dev_err(dev, "Cannot init LIF: %d, aborting\n", err);
goto err_out_free_lifs;
}
@@ -321,9 +315,9 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(dev, "Cannot enable existing VFs: %d\n", err);
}
- err = ionic_lifs_register(ionic);
+ err = ionic_lif_register(ionic->lif);
if (err) {
- dev_err(dev, "Cannot register LIFs: %d, aborting\n", err);
+ dev_err(dev, "Cannot register LIF: %d, aborting\n", err);
goto err_out_deinit_lifs;
}
@@ -336,12 +330,13 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
err_out_deregister_lifs:
- ionic_lifs_unregister(ionic);
+ ionic_lif_unregister(ionic->lif);
err_out_deinit_lifs:
ionic_vf_dealloc(ionic);
- ionic_lifs_deinit(ionic);
+ ionic_lif_deinit(ionic->lif);
err_out_free_lifs:
- ionic_lifs_free(ionic);
+ ionic_lif_free(ionic->lif);
+ ionic->lif = NULL;
err_out_free_irqs:
ionic_bus_free_irq_vectors(ionic);
err_out_port_reset:
@@ -349,7 +344,7 @@ err_out_port_reset:
err_out_reset:
ionic_reset(ionic);
err_out_teardown:
- ionic_dev_teardown(ionic);
+ del_timer_sync(&ionic->watchdog_timer);
pci_clear_master(pdev);
/* Don't fail the probe for these errors, keep
* the hw interface around for inspection
@@ -377,17 +372,19 @@ static void ionic_remove(struct pci_dev *pdev)
if (!ionic)
return;
- if (ionic->master_lif) {
+ del_timer_sync(&ionic->watchdog_timer);
+
+ if (ionic->lif) {
ionic_devlink_unregister(ionic);
- ionic_lifs_unregister(ionic);
- ionic_lifs_deinit(ionic);
- ionic_lifs_free(ionic);
+ ionic_lif_unregister(ionic->lif);
+ ionic_lif_deinit(ionic->lif);
+ ionic_lif_free(ionic->lif);
+ ionic->lif = NULL;
ionic_bus_free_irq_vectors(ionic);
}
ionic_port_reset(ionic);
ionic_reset(ionic);
- ionic_dev_teardown(ionic);
pci_clear_master(pdev);
ionic_unmap_bars(ionic);
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
index 11621ccc1faf..39f59849720d 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
@@ -76,7 +76,7 @@ static int q_tail_show(struct seq_file *seq, void *v)
{
struct ionic_queue *q = seq->private;
- seq_printf(seq, "%d\n", q->tail->index);
+ seq_printf(seq, "%d\n", q->tail_idx);
return 0;
}
@@ -86,7 +86,7 @@ static int q_head_show(struct seq_file *seq, void *v)
{
struct ionic_queue *q = seq->private;
- seq_printf(seq, "%d\n", q->head->index);
+ seq_printf(seq, "%d\n", q->head_idx);
return 0;
}
@@ -96,7 +96,7 @@ static int cq_tail_show(struct seq_file *seq, void *v)
{
struct ionic_cq *cq = seq->private;
- seq_printf(seq, "%d\n", cq->tail->index);
+ seq_printf(seq, "%d\n", cq->tail_idx);
return 0;
}
@@ -112,7 +112,8 @@ static const struct debugfs_reg32 intr_ctrl_regs[] = {
void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq)
{
- struct dentry *q_dentry, *cq_dentry, *intr_dentry, *stats_dentry;
+ struct dentry *qcq_dentry, *q_dentry, *cq_dentry;
+ struct dentry *intr_dentry, *stats_dentry;
struct ionic_dev *idev = &lif->ionic->idev;
struct debugfs_regset32 *intr_ctrl_regset;
struct ionic_intr_info *intr = &qcq->intr;
@@ -121,21 +122,21 @@ void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq)
struct ionic_queue *q = &qcq->q;
struct ionic_cq *cq = &qcq->cq;
- qcq->dentry = debugfs_create_dir(q->name, lif->dentry);
+ qcq_dentry = debugfs_create_dir(q->name, lif->dentry);
+ if (IS_ERR_OR_NULL(qcq_dentry))
+ return;
+ qcq->dentry = qcq_dentry;
- debugfs_create_x32("total_size", 0400, qcq->dentry, &qcq->total_size);
- debugfs_create_x64("base_pa", 0400, qcq->dentry, &qcq->base_pa);
+ debugfs_create_x64("q_base_pa", 0400, qcq_dentry, &qcq->q_base_pa);
+ debugfs_create_x32("q_size", 0400, qcq_dentry, &qcq->q_size);
+ debugfs_create_x64("cq_base_pa", 0400, qcq_dentry, &qcq->cq_base_pa);
+ debugfs_create_x32("cq_size", 0400, qcq_dentry, &qcq->cq_size);
+ debugfs_create_x64("sg_base_pa", 0400, qcq_dentry, &qcq->sg_base_pa);
+ debugfs_create_x32("sg_size", 0400, qcq_dentry, &qcq->sg_size);
q_dentry = debugfs_create_dir("q", qcq->dentry);
debugfs_create_u32("index", 0400, q_dentry, &q->index);
- debugfs_create_x64("base_pa", 0400, q_dentry, &q->base_pa);
- if (qcq->flags & IONIC_QCQ_F_SG) {
- debugfs_create_x64("sg_base_pa", 0400, q_dentry,
- &q->sg_base_pa);
- debugfs_create_u32("sg_desc_size", 0400, q_dentry,
- &q->sg_desc_size);
- }
debugfs_create_u32("num_descs", 0400, q_dentry, &q->num_descs);
debugfs_create_u32("desc_size", 0400, q_dentry, &q->desc_size);
debugfs_create_u32("pid", 0400, q_dentry, &q->pid);
@@ -188,6 +189,8 @@ void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq)
&intr->index);
debugfs_create_u32("vector", 0400, intr_dentry,
&intr->vector);
+ debugfs_create_u32("dim_coal_hw", 0400, intr_dentry,
+ &intr->dim_coal_hw);
intr_ctrl_regset = devm_kzalloc(dev, sizeof(*intr_ctrl_regset),
GFP_KERNEL);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index d83eff0ae0ac..545c99b15df8 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -19,10 +19,13 @@ static void ionic_watchdog_cb(struct timer_list *t)
mod_timer(&ionic->watchdog_timer,
round_jiffies(jiffies + ionic->watchdog_period));
+ if (!ionic->lif)
+ return;
+
hb = ionic_heartbeat_check(ionic);
- if (hb >= 0 && ionic->master_lif)
- ionic_link_status_check_request(ionic->master_lif);
+ if (hb >= 0)
+ ionic_link_status_check_request(ionic->lif, false);
}
void ionic_init_devinfo(struct ionic *ionic)
@@ -98,11 +101,6 @@ int ionic_dev_setup(struct ionic *ionic)
return 0;
}
-void ionic_dev_teardown(struct ionic *ionic)
-{
- del_timer_sync(&ionic->watchdog_timer);
-}
-
/* Devcmd Interface */
int ionic_heartbeat_check(struct ionic *ionic)
{
@@ -126,7 +124,7 @@ int ionic_heartbeat_check(struct ionic *ionic)
/* is this a transition? */
if (fw_status != idev->last_fw_status &&
idev->last_fw_status != 0xff) {
- struct ionic_lif *lif = ionic->master_lif;
+ struct ionic_lif *lif = ionic->lif;
bool trigger = false;
if (!fw_status || fw_status == 0xff) {
@@ -467,9 +465,7 @@ int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
struct ionic_intr_info *intr,
unsigned int num_descs, size_t desc_size)
{
- struct ionic_cq_info *cur;
unsigned int ring_size;
- unsigned int i;
if (desc_size == 0 || !is_power_of_2(num_descs))
return -EINVAL;
@@ -482,22 +478,9 @@ int ionic_cq_init(struct ionic_lif *lif, struct ionic_cq *cq,
cq->bound_intr = intr;
cq->num_descs = num_descs;
cq->desc_size = desc_size;
- cq->tail = cq->info;
+ cq->tail_idx = 0;
cq->done_color = 1;
- cur = cq->info;
-
- for (i = 0; i < num_descs; i++) {
- if (i + 1 == num_descs) {
- cur->next = cq->info;
- cur->last = true;
- } else {
- cur->next = cur + 1;
- }
- cur->index = i;
- cur++;
- }
-
return 0;
}
@@ -522,15 +505,18 @@ unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do,
ionic_cq_cb cb, ionic_cq_done_cb done_cb,
void *done_arg)
{
+ struct ionic_cq_info *cq_info;
unsigned int work_done = 0;
if (work_to_do == 0)
return 0;
- while (cb(cq, cq->tail)) {
- if (cq->tail->last)
+ cq_info = &cq->info[cq->tail_idx];
+ while (cb(cq, cq_info)) {
+ if (cq->tail_idx == cq->num_descs - 1)
cq->done_color = !cq->done_color;
- cq->tail = cq->tail->next;
+ cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
+ cq_info = &cq->info[cq->tail_idx];
DEBUG_STATS_CQE_CNT(cq);
if (++work_done >= work_to_do)
@@ -548,9 +534,7 @@ int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
unsigned int num_descs, size_t desc_size,
size_t sg_desc_size, unsigned int pid)
{
- struct ionic_desc_info *cur;
unsigned int ring_size;
- unsigned int i;
if (desc_size == 0 || !is_power_of_2(num_descs))
return -EINVAL;
@@ -565,24 +549,12 @@ int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
q->num_descs = num_descs;
q->desc_size = desc_size;
q->sg_desc_size = sg_desc_size;
- q->tail = q->info;
- q->head = q->tail;
+ q->tail_idx = 0;
+ q->head_idx = 0;
q->pid = pid;
snprintf(q->name, sizeof(q->name), "L%d-%s%u", lif->index, name, index);
- cur = q->info;
-
- for (i = 0; i < num_descs; i++) {
- if (i + 1 == num_descs)
- cur->next = q->info;
- else
- cur->next = cur + 1;
- cur->index = i;
- cur->left = num_descs - i;
- cur++;
- }
-
return 0;
}
@@ -614,19 +586,22 @@ void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb,
void *cb_arg)
{
struct device *dev = q->lif->ionic->dev;
+ struct ionic_desc_info *desc_info;
struct ionic_lif *lif = q->lif;
- q->head->cb = cb;
- q->head->cb_arg = cb_arg;
- q->head = q->head->next;
+ desc_info = &q->info[q->head_idx];
+ desc_info->cb = cb;
+ desc_info->cb_arg = cb_arg;
+
+ q->head_idx = (q->head_idx + 1) & (q->num_descs - 1);
dev_dbg(dev, "lif=%d qname=%s qid=%d qtype=%d p_index=%d ringdb=%d\n",
q->lif->index, q->name, q->hw_type, q->hw_index,
- q->head->index, ring_doorbell);
+ q->head_idx, ring_doorbell);
if (ring_doorbell)
ionic_dbell_ring(lif->kern_dbpage, q->hw_type,
- q->dbval | q->head->index);
+ q->dbval | q->head_idx);
}
static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos)
@@ -634,8 +609,8 @@ static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos)
unsigned int mask, tail, head;
mask = q->num_descs - 1;
- tail = q->tail->index;
- head = q->head->index;
+ tail = q->tail_idx;
+ head = q->head_idx;
return ((pos - tail) & mask) < ((head - tail) & mask);
}
@@ -646,20 +621,22 @@ void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
struct ionic_desc_info *desc_info;
ionic_desc_cb cb;
void *cb_arg;
+ u16 index;
/* check for empty queue */
- if (q->tail->index == q->head->index)
+ if (q->tail_idx == q->head_idx)
return;
/* stop index must be for a descriptor that is not yet completed */
if (unlikely(!ionic_q_is_posted(q, stop_index)))
dev_err(q->lif->ionic->dev,
"ionic stop is not posted %s stop %u tail %u head %u\n",
- q->name, stop_index, q->tail->index, q->head->index);
+ q->name, stop_index, q->tail_idx, q->head_idx);
do {
- desc_info = q->tail;
- q->tail = desc_info->next;
+ desc_info = &q->info[q->tail_idx];
+ index = q->tail_idx;
+ q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
cb = desc_info->cb;
cb_arg = desc_info->cb_arg;
@@ -669,5 +646,5 @@ void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info,
if (cb)
cb(q, desc_info, cq_info, cb_arg);
- } while (desc_info->index != stop_index);
+ } while (index != stop_index);
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index d5cba502abca..c109cd5a0471 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -149,10 +149,13 @@ struct ionic_dev {
};
struct ionic_cq_info {
- void *cq_desc;
- struct ionic_cq_info *next;
- unsigned int index;
- bool last;
+ union {
+ void *cq_desc;
+ struct ionic_txq_comp *txcq;
+ struct ionic_rxq_comp *rxcq;
+ struct ionic_admin_comp *admincq;
+ struct ionic_notifyq_event *notifyq;
+ };
};
struct ionic_queue;
@@ -169,11 +172,17 @@ struct ionic_page_info {
};
struct ionic_desc_info {
- void *desc;
- void *sg_desc;
- struct ionic_desc_info *next;
- unsigned int index;
- unsigned int left;
+ union {
+ void *desc;
+ struct ionic_txq_desc *txq_desc;
+ struct ionic_rxq_desc *rxq_desc;
+ struct ionic_admin_cmd *adminq_desc;
+ };
+ union {
+ void *sg_desc;
+ struct ionic_txq_sg_desc *txq_sg_desc;
+ struct ionic_rxq_sg_desc *rxq_sgl_desc;
+ };
unsigned int npages;
struct ionic_page_info pages[IONIC_RX_MAX_SG_ELEMS + 1];
ionic_desc_cb cb;
@@ -183,25 +192,35 @@ struct ionic_desc_info {
#define IONIC_QUEUE_NAME_MAX_SZ 32
struct ionic_queue {
+ struct device *dev;
+ struct ionic_lif *lif;
+ struct ionic_desc_info *info;
+ u16 head_idx;
+ u16 tail_idx;
+ unsigned int index;
+ unsigned int num_descs;
u64 dbell_count;
- u64 drop;
u64 stop;
u64 wake;
- struct ionic_lif *lif;
- struct ionic_desc_info *info;
- struct ionic_desc_info *tail;
- struct ionic_desc_info *head;
+ u64 drop;
struct ionic_dev *idev;
- unsigned int index;
unsigned int type;
unsigned int hw_index;
unsigned int hw_type;
u64 dbval;
- void *base;
- void *sg_base;
+ union {
+ void *base;
+ struct ionic_txq_desc *txq;
+ struct ionic_rxq_desc *rxq;
+ struct ionic_admin_cmd *adminq;
+ };
+ union {
+ void *sg_base;
+ struct ionic_txq_sg_desc *txq_sgl;
+ struct ionic_rxq_sg_desc *rxq_sgl;
+ };
dma_addr_t base_pa;
dma_addr_t sg_base_pa;
- unsigned int num_descs;
unsigned int desc_size;
unsigned int sg_desc_size;
unsigned int pid;
@@ -218,20 +237,21 @@ struct ionic_intr_info {
u64 rearm_count;
unsigned int cpu;
cpumask_t affinity_mask;
+ u32 dim_coal_hw;
};
struct ionic_cq {
- void *base;
- dma_addr_t base_pa;
struct ionic_lif *lif;
struct ionic_cq_info *info;
- struct ionic_cq_info *tail;
struct ionic_queue *bound_q;
struct ionic_intr_info *bound_intr;
+ u16 tail_idx;
bool done_color;
unsigned int num_descs;
- u64 compl_count;
unsigned int desc_size;
+ u64 compl_count;
+ void *base;
+ dma_addr_t base_pa;
};
struct ionic;
@@ -246,12 +266,12 @@ static inline void ionic_intr_init(struct ionic_dev *idev,
static inline unsigned int ionic_q_space_avail(struct ionic_queue *q)
{
- unsigned int avail = q->tail->index;
+ unsigned int avail = q->tail_idx;
- if (q->head->index >= avail)
- avail += q->head->left - 1;
+ if (q->head_idx >= avail)
+ avail += q->num_descs - q->head_idx - 1;
else
- avail -= q->head->index + 1;
+ avail -= q->head_idx + 1;
return avail;
}
@@ -263,7 +283,6 @@ static inline bool ionic_q_has_space(struct ionic_queue *q, unsigned int want)
void ionic_init_devinfo(struct ionic *ionic);
int ionic_dev_setup(struct ionic *ionic);
-void ionic_dev_teardown(struct ionic *ionic);
void ionic_dev_cmd_go(struct ionic_dev *idev, union ionic_dev_cmd *cmd);
u8 ionic_dev_cmd_status(struct ionic_dev *idev);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
index c4f4fd469fe3..51d64718ed9f 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
@@ -9,6 +9,15 @@
#include "ionic_lif.h"
#include "ionic_devlink.h"
+static int ionic_dl_flash_update(struct devlink *dl,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct ionic *ionic = devlink_priv(dl);
+
+ return ionic_firmware_update(ionic->lif, params->file_name, extack);
+}
+
static int ionic_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
struct netlink_ext_ack *extack)
{
@@ -48,6 +57,7 @@ static int ionic_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
static const struct devlink_ops ionic_dl_ops = {
.info_get = ionic_dl_info_get,
+ .flash_update = ionic_dl_flash_update,
};
struct ionic *ionic_devlink_alloc(struct device *dev)
@@ -85,7 +95,7 @@ int ionic_devlink_register(struct ionic *ionic)
dev_err(ionic->dev, "devlink_port_register failed: %d\n", err);
else
devlink_port_type_eth_set(&ionic->dl_port,
- ionic->master_lif->netdev);
+ ionic->lif->netdev);
return err;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.h b/drivers/net/ethernet/pensando/ionic/ionic_devlink.h
index 0690172fc57a..5c01a9e306d8 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_devlink.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.h
@@ -6,6 +6,9 @@
#include <net/devlink.h>
+int ionic_firmware_update(struct ionic_lif *lif, const char *fw_name,
+ struct netlink_ext_ack *extack);
+
struct ionic *ionic_devlink_alloc(struct device *dev);
void ionic_devlink_free(struct ionic *ionic);
int ionic_devlink_register(struct ionic *ionic);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 3c57c331729f..ed9808fc743b 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -298,8 +298,8 @@ static void ionic_get_pauseparam(struct net_device *netdev,
pause_type = lif->ionic->idev.port_info->config.pause_type;
if (pause_type) {
- pause->rx_pause = pause_type & IONIC_PAUSE_F_RX ? 1 : 0;
- pause->tx_pause = pause_type & IONIC_PAUSE_F_TX ? 1 : 0;
+ pause->rx_pause = (pause_type & IONIC_PAUSE_F_RX) ? 1 : 0;
+ pause->tx_pause = (pause_type & IONIC_PAUSE_F_TX) ? 1 : 0;
}
}
@@ -406,6 +406,13 @@ static int ionic_get_coalesce(struct net_device *netdev,
coalesce->tx_coalesce_usecs = lif->tx_coalesce_usecs;
coalesce->rx_coalesce_usecs = lif->rx_coalesce_usecs;
+ if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
+ coalesce->use_adaptive_tx_coalesce = test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state);
+ else
+ coalesce->use_adaptive_tx_coalesce = 0;
+
+ coalesce->use_adaptive_rx_coalesce = test_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state);
+
return 0;
}
@@ -414,10 +421,9 @@ static int ionic_set_coalesce(struct net_device *netdev,
{
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic_identity *ident;
- struct ionic_qcq *qcq;
+ u32 rx_coal, rx_dim;
+ u32 tx_coal, tx_dim;
unsigned int i;
- u32 rx_coal;
- u32 tx_coal;
ident = &lif->ionic->ident;
if (ident->dev.intr_coal_div == 0) {
@@ -426,10 +432,11 @@ static int ionic_set_coalesce(struct net_device *netdev,
return -EIO;
}
- /* Tx normally shares Rx interrupt, so only change Rx */
+ /* Tx normally shares Rx interrupt, so only change Rx if not split */
if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) &&
- coalesce->tx_coalesce_usecs != lif->rx_coalesce_usecs) {
- netdev_warn(netdev, "only the rx-usecs can be changed\n");
+ (coalesce->tx_coalesce_usecs != lif->rx_coalesce_usecs ||
+ coalesce->use_adaptive_tx_coalesce)) {
+ netdev_warn(netdev, "only rx parameters can be changed\n");
return -EINVAL;
}
@@ -449,32 +456,44 @@ static int ionic_set_coalesce(struct net_device *netdev,
/* Save the new values */
lif->rx_coalesce_usecs = coalesce->rx_coalesce_usecs;
- if (rx_coal != lif->rx_coalesce_hw) {
- lif->rx_coalesce_hw = rx_coal;
-
- if (test_bit(IONIC_LIF_F_UP, lif->state)) {
- for (i = 0; i < lif->nxqs; i++) {
- qcq = lif->rxqcqs[i].qcq;
- ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
- qcq->intr.index,
- lif->rx_coalesce_hw);
- }
- }
- }
+ lif->rx_coalesce_hw = rx_coal;
if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
lif->tx_coalesce_usecs = coalesce->tx_coalesce_usecs;
else
lif->tx_coalesce_usecs = coalesce->rx_coalesce_usecs;
- if (tx_coal != lif->tx_coalesce_hw) {
- lif->tx_coalesce_hw = tx_coal;
+ lif->tx_coalesce_hw = tx_coal;
- if (test_bit(IONIC_LIF_F_UP, lif->state)) {
- for (i = 0; i < lif->nxqs; i++) {
- qcq = lif->txqcqs[i].qcq;
+ if (coalesce->use_adaptive_rx_coalesce) {
+ set_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state);
+ rx_dim = rx_coal;
+ } else {
+ clear_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state);
+ rx_dim = 0;
+ }
+
+ if (coalesce->use_adaptive_tx_coalesce) {
+ set_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state);
+ tx_dim = tx_coal;
+ } else {
+ clear_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state);
+ tx_dim = 0;
+ }
+
+ if (test_bit(IONIC_LIF_F_UP, lif->state)) {
+ for (i = 0; i < lif->nxqs; i++) {
+ if (lif->rxqcqs[i]->flags & IONIC_QCQ_F_INTR) {
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
- qcq->intr.index,
+ lif->rxqcqs[i]->intr.index,
+ lif->rx_coalesce_hw);
+ lif->rxqcqs[i]->intr.dim_coal_hw = rx_dim;
+ }
+
+ if (lif->txqcqs[i]->flags & IONIC_QCQ_F_INTR) {
+ ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
+ lif->txqcqs[i]->intr.index,
lif->tx_coalesce_hw);
+ lif->txqcqs[i]->intr.dim_coal_hw = tx_dim;
}
}
}
@@ -493,18 +512,14 @@ static void ionic_get_ringparam(struct net_device *netdev,
ring->rx_pending = lif->nrxq_descs;
}
-static void ionic_set_ringsize(struct ionic_lif *lif, void *arg)
-{
- struct ethtool_ringparam *ring = arg;
-
- lif->ntxq_descs = ring->tx_pending;
- lif->nrxq_descs = ring->rx_pending;
-}
-
static int ionic_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct ionic_lif *lif = netdev_priv(netdev);
+ struct ionic_queue_params qparam;
+ int err;
+
+ ionic_init_queue_params(lif, &qparam);
if (ring->rx_mini_pending || ring->rx_jumbo_pending) {
netdev_info(netdev, "Changing jumbo or mini descriptors not supported\n");
@@ -522,7 +537,28 @@ static int ionic_set_ringparam(struct net_device *netdev,
ring->rx_pending == lif->nrxq_descs)
return 0;
- return ionic_reset_queues(lif, ionic_set_ringsize, ring);
+ if (ring->tx_pending != lif->ntxq_descs)
+ netdev_info(netdev, "Changing Tx ring size from %d to %d\n",
+ lif->ntxq_descs, ring->tx_pending);
+
+ if (ring->rx_pending != lif->nrxq_descs)
+ netdev_info(netdev, "Changing Rx ring size from %d to %d\n",
+ lif->nrxq_descs, ring->rx_pending);
+
+ /* if we're not running, just set the values and return */
+ if (!netif_running(lif->netdev)) {
+ lif->ntxq_descs = ring->tx_pending;
+ lif->nrxq_descs = ring->rx_pending;
+ return 0;
+ }
+
+ qparam.ntxq_descs = ring->tx_pending;
+ qparam.nrxq_descs = ring->rx_pending;
+ err = ionic_reconfigure_queues(lif, &qparam);
+ if (err)
+ netdev_info(netdev, "Ring reconfiguration failed, changes canceled: %d\n", err);
+
+ return err;
}
static void ionic_get_channels(struct net_device *netdev,
@@ -544,32 +580,15 @@ static void ionic_get_channels(struct net_device *netdev,
}
}
-static void ionic_set_queuecount(struct ionic_lif *lif, void *arg)
-{
- struct ethtool_channels *ch = arg;
-
- if (ch->combined_count) {
- lif->nxqs = ch->combined_count;
- if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
- clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
- lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
- lif->tx_coalesce_hw = lif->rx_coalesce_hw;
- netdev_info(lif->netdev, "Sharing queue interrupts\n");
- }
- } else {
- lif->nxqs = ch->rx_count;
- if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
- set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
- netdev_info(lif->netdev, "Splitting queue interrupts\n");
- }
- }
-}
-
static int ionic_set_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
struct ionic_lif *lif = netdev_priv(netdev);
- int new_cnt;
+ struct ionic_queue_params qparam;
+ int max_cnt;
+ int err;
+
+ ionic_init_queue_params(lif, &qparam);
if (ch->rx_count != ch->tx_count) {
netdev_info(netdev, "The rx and tx count must be equal\n");
@@ -577,20 +596,63 @@ static int ionic_set_channels(struct net_device *netdev,
}
if (ch->combined_count && ch->rx_count) {
- netdev_info(netdev, "Use either combined_count or rx/tx_count, not both\n");
+ netdev_info(netdev, "Use either combined or rx and tx, not both\n");
return -EINVAL;
}
- if (ch->combined_count)
- new_cnt = ch->combined_count;
- else
- new_cnt = ch->rx_count;
+ max_cnt = lif->ionic->ntxqs_per_lif;
+ if (ch->combined_count) {
+ if (ch->combined_count > max_cnt)
+ return -EINVAL;
+
+ if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
+ netdev_info(lif->netdev, "Sharing queue interrupts\n");
+ else if (ch->combined_count == lif->nxqs)
+ return 0;
- if (lif->nxqs != new_cnt)
- netdev_info(netdev, "Changing queue count from %d to %d\n",
- lif->nxqs, new_cnt);
+ if (lif->nxqs != ch->combined_count)
+ netdev_info(netdev, "Changing queue count from %d to %d\n",
+ lif->nxqs, ch->combined_count);
- return ionic_reset_queues(lif, ionic_set_queuecount, ch);
+ qparam.nxqs = ch->combined_count;
+ qparam.intr_split = 0;
+ } else {
+ max_cnt /= 2;
+ if (ch->rx_count > max_cnt)
+ return -EINVAL;
+
+ if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
+ netdev_info(lif->netdev, "Splitting queue interrupts\n");
+ else if (ch->rx_count == lif->nxqs)
+ return 0;
+
+ if (lif->nxqs != ch->rx_count)
+ netdev_info(netdev, "Changing queue count from %d to %d\n",
+ lif->nxqs, ch->rx_count);
+
+ qparam.nxqs = ch->rx_count;
+ qparam.intr_split = 1;
+ }
+
+ /* if we're not running, just set the values and return */
+ if (!netif_running(lif->netdev)) {
+ lif->nxqs = qparam.nxqs;
+
+ if (qparam.intr_split) {
+ set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
+ } else {
+ clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
+ lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
+ lif->tx_coalesce_hw = lif->rx_coalesce_hw;
+ }
+ return 0;
+ }
+
+ err = ionic_reconfigure_queues(lif, &qparam);
+ if (err)
+ netdev_info(netdev, "Queue reconfiguration failed, changes canceled: %d\n", err);
+
+ return err;
}
static u32 ionic_get_priv_flags(struct net_device *netdev)
@@ -807,7 +869,9 @@ static int ionic_nway_reset(struct net_device *netdev)
}
static const struct ethtool_ops ionic_ethtool_ops = {
- .supported_coalesce_params = ETHTOOL_COALESCE_USECS,
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_TX,
.get_drvinfo = ionic_get_drvinfo,
.get_regs_len = ionic_get_regs_len,
.get_regs = ionic_get_regs,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_fw.c b/drivers/net/ethernet/pensando/ionic/ionic_fw.c
new file mode 100644
index 000000000000..f492ae406a60
--- /dev/null
+++ b/drivers/net/ethernet/pensando/ionic/ionic_fw.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2020 Pensando Systems, Inc */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/firmware.h>
+
+#include "ionic.h"
+#include "ionic_dev.h"
+#include "ionic_lif.h"
+#include "ionic_devlink.h"
+
+/* The worst case wait for the install activity is about 25 minutes when
+ * installing a new CPLD, which is very seldom. Normal is about 30-35
+ * seconds. Since the driver can't tell if a CPLD update will happen we
+ * set the timeout for the ugly case.
+ */
+#define IONIC_FW_INSTALL_TIMEOUT (25 * 60)
+#define IONIC_FW_SELECT_TIMEOUT 30
+
+/* Number of periodic log updates during fw file download */
+#define IONIC_FW_INTERVAL_FRACTION 32
+
+static void ionic_dev_cmd_firmware_download(struct ionic_dev *idev, u64 addr,
+ u32 offset, u32 length)
+{
+ union ionic_dev_cmd cmd = {
+ .fw_download.opcode = IONIC_CMD_FW_DOWNLOAD,
+ .fw_download.offset = offset,
+ .fw_download.addr = addr,
+ .fw_download.length = length
+ };
+
+ ionic_dev_cmd_go(idev, &cmd);
+}
+
+static void ionic_dev_cmd_firmware_install(struct ionic_dev *idev)
+{
+ union ionic_dev_cmd cmd = {
+ .fw_control.opcode = IONIC_CMD_FW_CONTROL,
+ .fw_control.oper = IONIC_FW_INSTALL_ASYNC
+ };
+
+ ionic_dev_cmd_go(idev, &cmd);
+}
+
+static void ionic_dev_cmd_firmware_activate(struct ionic_dev *idev, u8 slot)
+{
+ union ionic_dev_cmd cmd = {
+ .fw_control.opcode = IONIC_CMD_FW_CONTROL,
+ .fw_control.oper = IONIC_FW_ACTIVATE_ASYNC,
+ .fw_control.slot = slot
+ };
+
+ ionic_dev_cmd_go(idev, &cmd);
+}
+
+static int ionic_fw_status_long_wait(struct ionic *ionic,
+ const char *label,
+ unsigned long timeout,
+ u8 fw_cmd,
+ struct netlink_ext_ack *extack)
+{
+ union ionic_dev_cmd cmd = {
+ .fw_control.opcode = IONIC_CMD_FW_CONTROL,
+ .fw_control.oper = fw_cmd,
+ };
+ unsigned long start_time;
+ unsigned long end_time;
+ int err;
+
+ start_time = jiffies;
+ end_time = start_time + (timeout * HZ);
+ do {
+ mutex_lock(&ionic->dev_cmd_lock);
+ ionic_dev_cmd_go(&ionic->idev, &cmd);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ mutex_unlock(&ionic->dev_cmd_lock);
+
+ msleep(20);
+ } while (time_before(jiffies, end_time) && (err == -EAGAIN || err == -ETIMEDOUT));
+
+ if (err == -EAGAIN || err == -ETIMEDOUT) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware wait timed out");
+ dev_err(ionic->dev, "DEV_CMD firmware wait %s timed out\n", label);
+ } else if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Firmware wait failed");
+ }
+
+ return err;
+}
+
+int ionic_firmware_update(struct ionic_lif *lif, const char *fw_name,
+ struct netlink_ext_ack *extack)
+{
+ struct ionic_dev *idev = &lif->ionic->idev;
+ struct net_device *netdev = lif->netdev;
+ struct ionic *ionic = lif->ionic;
+ union ionic_dev_cmd_comp comp;
+ u32 buf_sz, copy_sz, offset;
+ const struct firmware *fw;
+ struct devlink *dl;
+ int next_interval;
+ int err = 0;
+ u8 fw_slot;
+
+ netdev_info(netdev, "Installing firmware %s\n", fw_name);
+
+ dl = priv_to_devlink(ionic);
+ devlink_flash_update_begin_notify(dl);
+ devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0);
+
+ err = request_firmware(&fw, fw_name, ionic->dev);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to find firmware file");
+ goto err_out;
+ }
+
+ buf_sz = sizeof(idev->dev_cmd_regs->data);
+
+ netdev_dbg(netdev,
+ "downloading firmware - size %d part_sz %d nparts %lu\n",
+ (int)fw->size, buf_sz, DIV_ROUND_UP(fw->size, buf_sz));
+
+ offset = 0;
+ next_interval = 0;
+ while (offset < fw->size) {
+ if (offset >= next_interval) {
+ devlink_flash_update_status_notify(dl, "Downloading", NULL,
+ offset, fw->size);
+ next_interval = offset + (fw->size / IONIC_FW_INTERVAL_FRACTION);
+ }
+
+ copy_sz = min_t(unsigned int, buf_sz, fw->size - offset);
+ mutex_lock(&ionic->dev_cmd_lock);
+ memcpy_toio(&idev->dev_cmd_regs->data, fw->data + offset, copy_sz);
+ ionic_dev_cmd_firmware_download(idev,
+ offsetof(union ionic_dev_cmd_regs, data),
+ offset, copy_sz);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ mutex_unlock(&ionic->dev_cmd_lock);
+ if (err) {
+ netdev_err(netdev,
+ "download failed offset 0x%x addr 0x%lx len 0x%x\n",
+ offset, offsetof(union ionic_dev_cmd_regs, data),
+ copy_sz);
+ NL_SET_ERR_MSG_MOD(extack, "Segment download failed");
+ goto err_out;
+ }
+ offset += copy_sz;
+ }
+ devlink_flash_update_status_notify(dl, "Downloading", NULL,
+ fw->size, fw->size);
+
+ devlink_flash_update_timeout_notify(dl, "Installing", NULL,
+ IONIC_FW_INSTALL_TIMEOUT);
+
+ mutex_lock(&ionic->dev_cmd_lock);
+ ionic_dev_cmd_firmware_install(idev);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
+ fw_slot = comp.fw_control.slot;
+ mutex_unlock(&ionic->dev_cmd_lock);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to start firmware install");
+ goto err_out;
+ }
+
+ err = ionic_fw_status_long_wait(ionic, "Installing",
+ IONIC_FW_INSTALL_TIMEOUT,
+ IONIC_FW_INSTALL_STATUS,
+ extack);
+ if (err)
+ goto err_out;
+
+ devlink_flash_update_timeout_notify(dl, "Selecting", NULL,
+ IONIC_FW_SELECT_TIMEOUT);
+
+ mutex_lock(&ionic->dev_cmd_lock);
+ ionic_dev_cmd_firmware_activate(idev, fw_slot);
+ err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
+ mutex_unlock(&ionic->dev_cmd_lock);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to start firmware select");
+ goto err_out;
+ }
+
+ err = ionic_fw_status_long_wait(ionic, "Selecting",
+ IONIC_FW_SELECT_TIMEOUT,
+ IONIC_FW_ACTIVATE_STATUS,
+ extack);
+ if (err)
+ goto err_out;
+
+ netdev_info(netdev, "Firmware update completed\n");
+
+err_out:
+ if (err)
+ devlink_flash_update_status_notify(dl, "Flash failed", NULL, 0, 0);
+ else
+ devlink_flash_update_status_notify(dl, "Flash done", NULL, 0, 0);
+ release_firmware(fw);
+ devlink_flash_update_end_notify(dl);
+ return err;
+}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index acc94b244cf3..31ccfcdc2b0a 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -63,8 +63,10 @@ enum ionic_cmd_opcode {
IONIC_CMD_QOS_RESET = 245,
/* Firmware commands */
- IONIC_CMD_FW_DOWNLOAD = 254,
- IONIC_CMD_FW_CONTROL = 255,
+ IONIC_CMD_FW_DOWNLOAD = 252,
+ IONIC_CMD_FW_CONTROL = 253,
+ IONIC_CMD_FW_DOWNLOAD_V1 = 254,
+ IONIC_CMD_FW_CONTROL_V1 = 255,
};
/**
@@ -94,6 +96,7 @@ enum ionic_status_code {
IONIC_RC_ERROR = 29, /* Generic error */
IONIC_RC_ERDMA = 30, /* Generic RDMA error */
IONIC_RC_EVFID = 31, /* VF ID does not exist */
+ IONIC_RC_EBAD_FW = 32, /* FW file is invalid or corrupted */
};
enum ionic_notifyq_opcode {
@@ -2069,14 +2072,23 @@ typedef struct ionic_admin_comp ionic_fw_download_comp;
/**
* enum ionic_fw_control_oper - FW control operations
- * @IONIC_FW_RESET: Reset firmware
- * @IONIC_FW_INSTALL: Install firmware
- * @IONIC_FW_ACTIVATE: Activate firmware
+ * @IONIC_FW_RESET: Reset firmware
+ * @IONIC_FW_INSTALL: Install firmware
+ * @IONIC_FW_ACTIVATE: Activate firmware
+ * @IONIC_FW_INSTALL_ASYNC: Install firmware asynchronously
+ * @IONIC_FW_INSTALL_STATUS: Firmware installation status
+ * @IONIC_FW_ACTIVATE_ASYNC: Activate firmware asynchronously
+ * @IONIC_FW_ACTIVATE_STATUS: Firmware activate status
*/
enum ionic_fw_control_oper {
- IONIC_FW_RESET = 0,
- IONIC_FW_INSTALL = 1,
- IONIC_FW_ACTIVATE = 2,
+ IONIC_FW_RESET = 0,
+ IONIC_FW_INSTALL = 1,
+ IONIC_FW_ACTIVATE = 2,
+ IONIC_FW_INSTALL_ASYNC = 3,
+ IONIC_FW_INSTALL_STATUS = 4,
+ IONIC_FW_ACTIVATE_ASYNC = 5,
+ IONIC_FW_ACTIVATE_STATUS = 6,
+ IONIC_FW_UPDATE_CLEANUP = 7,
};
/**
@@ -2689,6 +2701,9 @@ union ionic_dev_cmd {
struct ionic_q_identify_cmd q_identify;
struct ionic_q_init_cmd q_init;
struct ionic_q_control_cmd q_control;
+
+ struct ionic_fw_download_cmd fw_download;
+ struct ionic_fw_control_cmd fw_control;
};
union ionic_dev_cmd_comp {
@@ -2722,6 +2737,9 @@ union ionic_dev_cmd_comp {
struct ionic_q_identify_comp q_identify;
struct ionic_q_init_comp q_init;
+
+ ionic_fw_download_comp fw_download;
+ struct ionic_fw_control_comp fw_control;
};
/**
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 26988ad7ec97..d655a7ae3058 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -36,25 +36,44 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif);
static void ionic_lif_handle_fw_up(struct ionic_lif *lif);
static void ionic_lif_set_netdev_info(struct ionic_lif *lif);
+static void ionic_txrx_deinit(struct ionic_lif *lif);
+static int ionic_txrx_init(struct ionic_lif *lif);
static int ionic_start_queues(struct ionic_lif *lif);
static void ionic_stop_queues(struct ionic_lif *lif);
static void ionic_lif_queue_identify(struct ionic_lif *lif);
+static void ionic_dim_work(struct work_struct *work)
+{
+ struct dim *dim = container_of(work, struct dim, work);
+ struct dim_cq_moder cur_moder;
+ struct ionic_qcq *qcq;
+ u32 new_coal;
+
+ cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+ qcq = container_of(dim, struct ionic_qcq, dim);
+ new_coal = ionic_coal_usec_to_hw(qcq->q.lif->ionic, cur_moder.usec);
+ qcq->intr.dim_coal_hw = new_coal ? new_coal : 1;
+ dim->state = DIM_START_MEASURE;
+}
+
static void ionic_lif_deferred_work(struct work_struct *work)
{
struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work);
struct ionic_deferred *def = &lif->deferred;
struct ionic_deferred_work *w = NULL;
- spin_lock_bh(&def->lock);
- if (!list_empty(&def->list)) {
- w = list_first_entry(&def->list,
- struct ionic_deferred_work, list);
- list_del(&w->list);
- }
- spin_unlock_bh(&def->lock);
+ do {
+ spin_lock_bh(&def->lock);
+ if (!list_empty(&def->list)) {
+ w = list_first_entry(&def->list,
+ struct ionic_deferred_work, list);
+ list_del(&w->list);
+ }
+ spin_unlock_bh(&def->lock);
+
+ if (!w)
+ break;
- if (w) {
switch (w->type) {
case IONIC_DW_TYPE_RX_MODE:
ionic_lif_rx_mode(lif, w->rx_mode);
@@ -78,8 +97,8 @@ static void ionic_lif_deferred_work(struct work_struct *work)
break;
}
kfree(w);
- schedule_work(&def->work);
- }
+ w = NULL;
+ } while (true);
}
void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
@@ -135,7 +154,7 @@ static void ionic_link_status_check(struct ionic_lif *lif)
clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
}
-void ionic_link_status_check_request(struct ionic_lif *lif)
+void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep)
{
struct ionic_deferred_work *work;
@@ -143,10 +162,12 @@ void ionic_link_status_check_request(struct ionic_lif *lif)
if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
return;
- if (in_interrupt()) {
+ if (!can_sleep) {
work = kzalloc(sizeof(*work), GFP_ATOMIC);
- if (!work)
+ if (!work) {
+ clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
return;
+ }
work->type = IONIC_DW_TYPE_LINK_STATUS;
ionic_lif_deferred_enqueue(&lif->deferred, work);
@@ -243,31 +264,30 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
return ionic_adminq_post_wait(lif, &ctx);
}
-static int ionic_qcq_disable(struct ionic_qcq *qcq)
+static int ionic_qcq_disable(struct ionic_qcq *qcq, bool send_to_hw)
{
- struct ionic_queue *q = &qcq->q;
- struct ionic_lif *lif = q->lif;
- struct ionic_dev *idev;
- struct device *dev;
+ struct ionic_queue *q;
+ struct ionic_lif *lif;
+ int err = 0;
struct ionic_admin_ctx ctx = {
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
.cmd.q_control = {
.opcode = IONIC_CMD_Q_CONTROL,
- .lif_index = cpu_to_le16(lif->index),
- .type = q->type,
- .index = cpu_to_le32(q->index),
.oper = IONIC_Q_DISABLE,
},
};
- idev = &lif->ionic->idev;
- dev = lif->ionic->dev;
+ if (!qcq)
+ return -ENXIO;
- dev_dbg(dev, "q_disable.index %d q_disable.qtype %d\n",
- ctx.cmd.q_control.index, ctx.cmd.q_control.type);
+ q = &qcq->q;
+ lif = q->lif;
if (qcq->flags & IONIC_QCQ_F_INTR) {
+ struct ionic_dev *idev = &lif->ionic->idev;
+
+ cancel_work_sync(&qcq->dim.work);
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_SET);
synchronize_irq(qcq->intr.vector);
@@ -275,7 +295,17 @@ static int ionic_qcq_disable(struct ionic_qcq *qcq)
napi_disable(&qcq->napi);
}
- return ionic_adminq_post_wait(lif, &ctx);
+ if (send_to_hw) {
+ ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index);
+ ctx.cmd.q_control.type = q->type;
+ ctx.cmd.q_control.index = cpu_to_le32(q->index);
+ dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n",
+ ctx.cmd.q_control.index, ctx.cmd.q_control.type);
+
+ err = ionic_adminq_post_wait(lif, &ctx);
+ }
+
+ return err;
}
static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
@@ -297,6 +327,18 @@ static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
qcq->flags &= ~IONIC_QCQ_F_INITED;
}
+static void ionic_qcq_intr_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
+{
+ if (!(qcq->flags & IONIC_QCQ_F_INTR) || qcq->intr.vector == 0)
+ return;
+
+ irq_set_affinity_hint(qcq->intr.vector, NULL);
+ devm_free_irq(lif->ionic->dev, qcq->intr.vector, &qcq->napi);
+ qcq->intr.vector = 0;
+ ionic_intr_free(lif->ionic, qcq->intr.index);
+ qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
+}
+
static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
{
struct device *dev = lif->ionic->dev;
@@ -306,51 +348,62 @@ static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
ionic_debugfs_del_qcq(qcq);
- dma_free_coherent(dev, qcq->total_size, qcq->base, qcq->base_pa);
- qcq->base = NULL;
- qcq->base_pa = 0;
+ if (qcq->q_base) {
+ dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa);
+ qcq->q_base = NULL;
+ qcq->q_base_pa = 0;
+ }
- if (qcq->flags & IONIC_QCQ_F_INTR) {
- irq_set_affinity_hint(qcq->intr.vector, NULL);
- devm_free_irq(dev, qcq->intr.vector, &qcq->napi);
- qcq->intr.vector = 0;
- ionic_intr_free(lif->ionic, qcq->intr.index);
+ if (qcq->cq_base) {
+ dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa);
+ qcq->cq_base = NULL;
+ qcq->cq_base_pa = 0;
+ }
+
+ if (qcq->sg_base) {
+ dma_free_coherent(dev, qcq->sg_size, qcq->sg_base, qcq->sg_base_pa);
+ qcq->sg_base = NULL;
+ qcq->sg_base_pa = 0;
}
- devm_kfree(dev, qcq->cq.info);
- qcq->cq.info = NULL;
- devm_kfree(dev, qcq->q.info);
- qcq->q.info = NULL;
- devm_kfree(dev, qcq);
+ ionic_qcq_intr_free(lif, qcq);
+
+ if (qcq->cq.info) {
+ devm_kfree(dev, qcq->cq.info);
+ qcq->cq.info = NULL;
+ }
+ if (qcq->q.info) {
+ devm_kfree(dev, qcq->q.info);
+ qcq->q.info = NULL;
+ }
}
static void ionic_qcqs_free(struct ionic_lif *lif)
{
struct device *dev = lif->ionic->dev;
- unsigned int i;
if (lif->notifyqcq) {
ionic_qcq_free(lif, lif->notifyqcq);
+ devm_kfree(dev, lif->notifyqcq);
lif->notifyqcq = NULL;
}
if (lif->adminqcq) {
ionic_qcq_free(lif, lif->adminqcq);
+ devm_kfree(dev, lif->adminqcq);
lif->adminqcq = NULL;
}
if (lif->rxqcqs) {
- for (i = 0; i < lif->nxqs; i++)
- if (lif->rxqcqs[i].stats)
- devm_kfree(dev, lif->rxqcqs[i].stats);
+ devm_kfree(dev, lif->rxqstats);
+ lif->rxqstats = NULL;
devm_kfree(dev, lif->rxqcqs);
lif->rxqcqs = NULL;
}
if (lif->txqcqs) {
- for (i = 0; i < lif->nxqs; i++)
- if (lif->txqcqs[i].stats)
- devm_kfree(dev, lif->txqcqs[i].stats);
+ devm_kfree(dev, lif->txqstats);
+ lif->txqstats = NULL;
devm_kfree(dev, lif->txqcqs);
lif->txqcqs = NULL;
}
@@ -368,6 +421,53 @@ static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
n_qcq->intr.index = src_qcq->intr.index;
}
+static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq)
+{
+ int err;
+
+ if (!(qcq->flags & IONIC_QCQ_F_INTR)) {
+ qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
+ return 0;
+ }
+
+ err = ionic_intr_alloc(lif, &qcq->intr);
+ if (err) {
+ netdev_warn(lif->netdev, "no intr for %s: %d\n",
+ qcq->q.name, err);
+ goto err_out;
+ }
+
+ err = ionic_bus_get_irq(lif->ionic, qcq->intr.index);
+ if (err < 0) {
+ netdev_warn(lif->netdev, "no vector for %s: %d\n",
+ qcq->q.name, err);
+ goto err_out_free_intr;
+ }
+ qcq->intr.vector = err;
+ ionic_intr_mask_assert(lif->ionic->idev.intr_ctrl, qcq->intr.index,
+ IONIC_INTR_MASK_SET);
+
+ err = ionic_request_irq(lif, qcq);
+ if (err) {
+ netdev_warn(lif->netdev, "irq request failed %d\n", err);
+ goto err_out_free_intr;
+ }
+
+ /* try to get the irq on the local numa node first */
+ qcq->intr.cpu = cpumask_local_spread(qcq->intr.index,
+ dev_to_node(lif->ionic->dev));
+ if (qcq->intr.cpu != -1)
+ cpumask_set_cpu(qcq->intr.cpu, &qcq->intr.affinity_mask);
+
+ netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index);
+ return 0;
+
+err_out_free_intr:
+ ionic_intr_free(lif->ionic, qcq->intr.index);
+err_out:
+ return err;
+}
+
static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
unsigned int index,
const char *name, unsigned int flags,
@@ -377,7 +477,6 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
unsigned int pid, struct ionic_qcq **qcq)
{
struct ionic_dev *idev = &lif->ionic->idev;
- u32 q_size, cq_size, sg_size, total_size;
struct device *dev = lif->ionic->dev;
void *q_base, *cq_base, *sg_base;
dma_addr_t cq_base_pa = 0;
@@ -388,21 +487,6 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
*qcq = NULL;
- q_size = num_descs * desc_size;
- cq_size = num_descs * cq_desc_size;
- sg_size = num_descs * sg_desc_size;
-
- total_size = ALIGN(q_size, PAGE_SIZE) + ALIGN(cq_size, PAGE_SIZE);
- /* Note: aligning q_size/cq_size is not enough due to cq_base
- * address aligning as q_base could be not aligned to the page.
- * Adding PAGE_SIZE.
- */
- total_size += PAGE_SIZE;
- if (flags & IONIC_QCQ_F_SG) {
- total_size += ALIGN(sg_size, PAGE_SIZE);
- total_size += PAGE_SIZE;
- }
-
new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL);
if (!new) {
netdev_err(lif->netdev, "Cannot allocate queue structure\n");
@@ -417,7 +501,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
if (!new->q.info) {
netdev_err(lif->netdev, "Cannot allocate queue info\n");
err = -ENOMEM;
- goto err_out;
+ goto err_out_free_qcq;
}
new->q.type = type;
@@ -426,41 +510,12 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
desc_size, sg_desc_size, pid);
if (err) {
netdev_err(lif->netdev, "Cannot initialize queue\n");
- goto err_out;
+ goto err_out_free_q_info;
}
- if (flags & IONIC_QCQ_F_INTR) {
- err = ionic_intr_alloc(lif, &new->intr);
- if (err) {
- netdev_warn(lif->netdev, "no intr for %s: %d\n",
- name, err);
- goto err_out;
- }
-
- err = ionic_bus_get_irq(lif->ionic, new->intr.index);
- if (err < 0) {
- netdev_warn(lif->netdev, "no vector for %s: %d\n",
- name, err);
- goto err_out_free_intr;
- }
- new->intr.vector = err;
- ionic_intr_mask_assert(idev->intr_ctrl, new->intr.index,
- IONIC_INTR_MASK_SET);
-
- err = ionic_request_irq(lif, new);
- if (err) {
- netdev_warn(lif->netdev, "irq request failed %d\n", err);
- goto err_out_free_intr;
- }
-
- new->intr.cpu = cpumask_local_spread(new->intr.index,
- dev_to_node(dev));
- if (new->intr.cpu != -1)
- cpumask_set_cpu(new->intr.cpu,
- &new->intr.affinity_mask);
- } else {
- new->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
- }
+ err = ionic_alloc_qcq_interrupt(lif, new);
+ if (err)
+ goto err_out;
new->cq.info = devm_kcalloc(dev, num_descs, sizeof(*new->cq.info),
GFP_KERNEL);
@@ -473,46 +528,95 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
if (err) {
netdev_err(lif->netdev, "Cannot initialize completion queue\n");
- goto err_out_free_irq;
+ goto err_out_free_cq_info;
}
- new->base = dma_alloc_coherent(dev, total_size, &new->base_pa,
- GFP_KERNEL);
- if (!new->base) {
- netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
- err = -ENOMEM;
- goto err_out_free_irq;
- }
-
- new->total_size = total_size;
+ if (flags & IONIC_QCQ_F_NOTIFYQ) {
+ int q_size, cq_size;
- q_base = new->base;
- q_base_pa = new->base_pa;
+ /* q & cq need to be contiguous in case of notifyq */
+ q_size = ALIGN(num_descs * desc_size, PAGE_SIZE);
+ cq_size = ALIGN(num_descs * cq_desc_size, PAGE_SIZE);
- cq_base = (void *)ALIGN((uintptr_t)q_base + q_size, PAGE_SIZE);
- cq_base_pa = ALIGN(q_base_pa + q_size, PAGE_SIZE);
+ new->q_size = PAGE_SIZE + q_size + cq_size;
+ new->q_base = dma_alloc_coherent(dev, new->q_size,
+ &new->q_base_pa, GFP_KERNEL);
+ if (!new->q_base) {
+ netdev_err(lif->netdev, "Cannot allocate qcq DMA memory\n");
+ err = -ENOMEM;
+ goto err_out_free_cq_info;
+ }
+ q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
+ q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
+ ionic_q_map(&new->q, q_base, q_base_pa);
+
+ cq_base = PTR_ALIGN(q_base + q_size, PAGE_SIZE);
+ cq_base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE);
+ ionic_cq_map(&new->cq, cq_base, cq_base_pa);
+ ionic_cq_bind(&new->cq, &new->q);
+ } else {
+ new->q_size = PAGE_SIZE + (num_descs * desc_size);
+ new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa,
+ GFP_KERNEL);
+ if (!new->q_base) {
+ netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
+ err = -ENOMEM;
+ goto err_out_free_cq_info;
+ }
+ q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
+ q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
+ ionic_q_map(&new->q, q_base, q_base_pa);
+
+ new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size);
+ new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa,
+ GFP_KERNEL);
+ if (!new->cq_base) {
+ netdev_err(lif->netdev, "Cannot allocate cq DMA memory\n");
+ err = -ENOMEM;
+ goto err_out_free_q;
+ }
+ cq_base = PTR_ALIGN(new->cq_base, PAGE_SIZE);
+ cq_base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE);
+ ionic_cq_map(&new->cq, cq_base, cq_base_pa);
+ ionic_cq_bind(&new->cq, &new->q);
+ }
if (flags & IONIC_QCQ_F_SG) {
- sg_base = (void *)ALIGN((uintptr_t)cq_base + cq_size,
- PAGE_SIZE);
- sg_base_pa = ALIGN(cq_base_pa + cq_size, PAGE_SIZE);
+ new->sg_size = PAGE_SIZE + (num_descs * sg_desc_size);
+ new->sg_base = dma_alloc_coherent(dev, new->sg_size, &new->sg_base_pa,
+ GFP_KERNEL);
+ if (!new->sg_base) {
+ netdev_err(lif->netdev, "Cannot allocate sg DMA memory\n");
+ err = -ENOMEM;
+ goto err_out_free_cq;
+ }
+ sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE);
+ sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE);
ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
}
- ionic_q_map(&new->q, q_base, q_base_pa);
- ionic_cq_map(&new->cq, cq_base, cq_base_pa);
- ionic_cq_bind(&new->cq, &new->q);
+ INIT_WORK(&new->dim.work, ionic_dim_work);
+ new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
*qcq = new;
return 0;
+err_out_free_cq:
+ dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa);
+err_out_free_q:
+ dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa);
+err_out_free_cq_info:
+ devm_kfree(dev, new->cq.info);
err_out_free_irq:
- if (flags & IONIC_QCQ_F_INTR)
+ if (flags & IONIC_QCQ_F_INTR) {
devm_free_irq(dev, new->intr.vector, &new->napi);
-err_out_free_intr:
- if (flags & IONIC_QCQ_F_INTR)
ionic_intr_free(lif->ionic, new->intr.index);
+ }
+err_out_free_q_info:
+ devm_kfree(dev, new->q.info);
+err_out_free_qcq:
+ devm_kfree(dev, new);
err_out:
dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
return err;
@@ -521,10 +625,8 @@ err_out:
static int ionic_qcqs_alloc(struct ionic_lif *lif)
{
struct device *dev = lif->ionic->dev;
- unsigned int q_list_size;
unsigned int flags;
int err;
- int i;
flags = IONIC_QCQ_F_INTR;
err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags,
@@ -544,63 +646,50 @@ static int ionic_qcqs_alloc(struct ionic_lif *lif)
sizeof(union ionic_notifyq_comp),
0, lif->kern_pid, &lif->notifyqcq);
if (err)
- goto err_out_free_adminqcq;
+ goto err_out;
ionic_debugfs_add_qcq(lif, lif->notifyqcq);
/* Let the notifyq ride on the adminq interrupt */
ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq);
}
- q_list_size = sizeof(*lif->txqcqs) * lif->nxqs;
err = -ENOMEM;
- lif->txqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL);
+ lif->txqcqs = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif,
+ sizeof(struct ionic_qcq *), GFP_KERNEL);
if (!lif->txqcqs)
- goto err_out_free_notifyqcq;
- for (i = 0; i < lif->nxqs; i++) {
- lif->txqcqs[i].stats = devm_kzalloc(dev,
- sizeof(struct ionic_q_stats),
- GFP_KERNEL);
- if (!lif->txqcqs[i].stats)
- goto err_out_free_tx_stats;
- }
-
- lif->rxqcqs = devm_kzalloc(dev, q_list_size, GFP_KERNEL);
+ goto err_out;
+ lif->rxqcqs = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif,
+ sizeof(struct ionic_qcq *), GFP_KERNEL);
if (!lif->rxqcqs)
- goto err_out_free_tx_stats;
- for (i = 0; i < lif->nxqs; i++) {
- lif->rxqcqs[i].stats = devm_kzalloc(dev,
- sizeof(struct ionic_q_stats),
- GFP_KERNEL);
- if (!lif->rxqcqs[i].stats)
- goto err_out_free_rx_stats;
- }
+ goto err_out;
- return 0;
+ lif->txqstats = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif,
+ sizeof(struct ionic_tx_stats), GFP_KERNEL);
+ if (!lif->txqstats)
+ goto err_out;
+ lif->rxqstats = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif,
+ sizeof(struct ionic_rx_stats), GFP_KERNEL);
+ if (!lif->rxqstats)
+ goto err_out;
-err_out_free_rx_stats:
- for (i = 0; i < lif->nxqs; i++)
- if (lif->rxqcqs[i].stats)
- devm_kfree(dev, lif->rxqcqs[i].stats);
- devm_kfree(dev, lif->rxqcqs);
- lif->rxqcqs = NULL;
-err_out_free_tx_stats:
- for (i = 0; i < lif->nxqs; i++)
- if (lif->txqcqs[i].stats)
- devm_kfree(dev, lif->txqcqs[i].stats);
- devm_kfree(dev, lif->txqcqs);
- lif->txqcqs = NULL;
-err_out_free_notifyqcq:
- if (lif->notifyqcq) {
- ionic_qcq_free(lif, lif->notifyqcq);
- lif->notifyqcq = NULL;
- }
-err_out_free_adminqcq:
- ionic_qcq_free(lif, lif->adminqcq);
- lif->adminqcq = NULL;
+ return 0;
+err_out:
+ ionic_qcqs_free(lif);
return err;
}
+static void ionic_qcq_sanitize(struct ionic_qcq *qcq)
+{
+ qcq->q.tail_idx = 0;
+ qcq->q.head_idx = 0;
+ qcq->cq.tail_idx = 0;
+ qcq->cq.done_color = 1;
+ memset(qcq->q_base, 0, qcq->q_size);
+ memset(qcq->cq_base, 0, qcq->cq_size);
+ memset(qcq->sg_base, 0, qcq->sg_size);
+}
+
static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
{
struct device *dev = lif->ionic->dev;
@@ -626,10 +715,10 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
unsigned int intr_index;
int err;
- if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
+ if (qcq->flags & IONIC_QCQ_F_INTR)
intr_index = qcq->intr.index;
else
- intr_index = lif->rxqcqs[q->index].qcq->intr.index;
+ intr_index = lif->rxqcqs[q->index]->intr.index;
ctx.cmd.q_init.intr_index = cpu_to_le16(intr_index);
dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
@@ -640,9 +729,7 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver);
dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
- q->tail = q->info;
- q->head = q->tail;
- cq->tail = cq->info;
+ ionic_qcq_sanitize(qcq);
err = ionic_adminq_post_wait(lif, &ctx);
if (err)
@@ -697,9 +784,7 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver);
dev_dbg(dev, "rxq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
- q->tail = q->info;
- q->head = q->tail;
- cq->tail = cq->info;
+ ionic_qcq_sanitize(qcq);
err = ionic_adminq_post_wait(lif, &ctx);
if (err)
@@ -751,7 +836,7 @@ static bool ionic_notifyq_service(struct ionic_cq *cq,
switch (le16_to_cpu(comp->event.ecode)) {
case IONIC_EVENT_LINK_CHANGE:
- ionic_link_status_check_request(lif);
+ ionic_link_status_check_request(lif, false);
break;
case IONIC_EVENT_RESET:
work = kzalloc(sizeof(*work), GFP_ATOMIC);
@@ -771,21 +856,6 @@ static bool ionic_notifyq_service(struct ionic_cq *cq,
return true;
}
-static int ionic_notifyq_clean(struct ionic_lif *lif, int budget)
-{
- struct ionic_dev *idev = &lif->ionic->idev;
- struct ionic_cq *cq = &lif->notifyqcq->cq;
- u32 work_done;
-
- work_done = ionic_cq_service(cq, budget, ionic_notifyq_service,
- NULL, NULL);
- if (work_done)
- ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
- work_done, IONIC_INTR_CRED_RESET_COALESCE);
-
- return work_done;
-}
-
static bool ionic_adminq_service(struct ionic_cq *cq,
struct ionic_cq_info *cq_info)
{
@@ -801,15 +871,36 @@ static bool ionic_adminq_service(struct ionic_cq *cq,
static int ionic_adminq_napi(struct napi_struct *napi, int budget)
{
+ struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr;
struct ionic_lif *lif = napi_to_cq(napi)->lif;
+ struct ionic_dev *idev = &lif->ionic->idev;
+ unsigned int flags = 0;
int n_work = 0;
int a_work = 0;
+ int work_done;
- if (likely(lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED))
- n_work = ionic_notifyq_clean(lif, budget);
- a_work = ionic_napi(napi, budget, ionic_adminq_service, NULL, NULL);
+ if (lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED)
+ n_work = ionic_cq_service(&lif->notifyqcq->cq, budget,
+ ionic_notifyq_service, NULL, NULL);
+
+ if (lif->adminqcq && lif->adminqcq->flags & IONIC_QCQ_F_INITED)
+ a_work = ionic_cq_service(&lif->adminqcq->cq, budget,
+ ionic_adminq_service, NULL, NULL);
+
+ work_done = max(n_work, a_work);
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ flags |= IONIC_INTR_CRED_UNMASK;
+ lif->adminqcq->cq.bound_intr->rearm_count++;
+ }
+
+ if (work_done || flags) {
+ flags |= IONIC_INTR_CRED_RESET_COALESCE;
+ ionic_intr_credits(idev->intr_ctrl,
+ intr->index,
+ n_work + a_work, flags);
+ }
- return max(n_work, a_work);
+ return work_done;
}
void ionic_get_stats64(struct net_device *netdev,
@@ -928,9 +1019,9 @@ static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
return 0;
}
-static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add)
+static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add,
+ bool can_sleep)
{
- struct ionic *ionic = lif->ionic;
struct ionic_deferred_work *work;
unsigned int nmfilters;
unsigned int nufilters;
@@ -940,8 +1031,8 @@ static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add)
* here before checking the need for deferral so that we
* can return an overflow error to the stack.
*/
- nmfilters = le32_to_cpu(ionic->ident.lif.eth.max_mcast_filters);
- nufilters = le32_to_cpu(ionic->ident.lif.eth.max_ucast_filters);
+ nmfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters);
+ nufilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
if ((is_multicast_ether_addr(addr) && lif->nmcast < nmfilters))
lif->nmcast++;
@@ -957,7 +1048,7 @@ static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add)
lif->nucast--;
}
- if (in_interrupt()) {
+ if (!can_sleep) {
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work) {
netdev_err(lif->netdev, "%s OOM\n", __func__);
@@ -983,12 +1074,22 @@ static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add)
static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
{
- return ionic_lif_addr(netdev_priv(netdev), addr, true);
+ return ionic_lif_addr(netdev_priv(netdev), addr, true, true);
+}
+
+static int ionic_ndo_addr_add(struct net_device *netdev, const u8 *addr)
+{
+ return ionic_lif_addr(netdev_priv(netdev), addr, true, false);
}
static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
{
- return ionic_lif_addr(netdev_priv(netdev), addr, false);
+ return ionic_lif_addr(netdev_priv(netdev), addr, false, true);
+}
+
+static int ionic_ndo_addr_del(struct net_device *netdev, const u8 *addr)
+{
+ return ionic_lif_addr(netdev_priv(netdev), addr, false, false);
}
static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
@@ -1028,11 +1129,12 @@ static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
lif->rx_mode = rx_mode;
}
-static void _ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
+static void _ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode,
+ bool from_ndo)
{
struct ionic_deferred_work *work;
- if (in_interrupt()) {
+ if (from_ndo) {
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work) {
netdev_err(lif->netdev, "%s OOM\n", __func__);
@@ -1047,15 +1149,21 @@ static void _ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
}
}
-static void ionic_set_rx_mode(struct net_device *netdev)
+static void ionic_dev_uc_sync(struct net_device *netdev, bool from_ndo)
+{
+ if (from_ndo)
+ __dev_uc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del);
+ else
+ __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
+
+}
+
+static void ionic_set_rx_mode(struct net_device *netdev, bool from_ndo)
{
struct ionic_lif *lif = netdev_priv(netdev);
- struct ionic_identity *ident;
unsigned int nfilters;
unsigned int rx_mode;
- ident = &lif->ionic->ident;
-
rx_mode = IONIC_RX_MODE_F_UNICAST;
rx_mode |= (netdev->flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
rx_mode |= (netdev->flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
@@ -1069,8 +1177,8 @@ static void ionic_set_rx_mode(struct net_device *netdev)
* we remove our overflow flag and check the netdev flags
* to see if we can disable NIC PROMISC
*/
- __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
- nfilters = le32_to_cpu(ident->lif.eth.max_ucast_filters);
+ ionic_dev_uc_sync(netdev, from_ndo);
+ nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
if (netdev_uc_count(netdev) + 1 > nfilters) {
rx_mode |= IONIC_RX_MODE_F_PROMISC;
lif->uc_overflow = true;
@@ -1081,8 +1189,8 @@ static void ionic_set_rx_mode(struct net_device *netdev)
}
/* same for multicast */
- __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
- nfilters = le32_to_cpu(ident->lif.eth.max_mcast_filters);
+ ionic_dev_uc_sync(netdev, from_ndo);
+ nfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters);
if (netdev_mc_count(netdev) > nfilters) {
rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
lif->mc_overflow = true;
@@ -1093,7 +1201,12 @@ static void ionic_set_rx_mode(struct net_device *netdev)
}
if (lif->rx_mode != rx_mode)
- _ionic_lif_rx_mode(lif, rx_mode);
+ _ionic_lif_rx_mode(lif, rx_mode, from_ndo);
+}
+
+static void ionic_ndo_set_rx_mode(struct net_device *netdev)
+{
+ ionic_set_rx_mode(netdev, true);
}
static __le64 ionic_netdev_features_to_nic(netdev_features_t features)
@@ -1315,6 +1428,35 @@ static int ionic_set_mac_address(struct net_device *netdev, void *sa)
return ionic_addr_add(netdev, mac);
}
+static void ionic_stop_queues_reconfig(struct ionic_lif *lif)
+{
+ /* Stop and clean the queues before reconfiguration */
+ mutex_lock(&lif->queue_lock);
+ netif_device_detach(lif->netdev);
+ ionic_stop_queues(lif);
+ ionic_txrx_deinit(lif);
+}
+
+static int ionic_start_queues_reconfig(struct ionic_lif *lif)
+{
+ int err;
+
+ /* Re-init the queues after reconfiguration */
+
+ /* The only way txrx_init can fail here is if communication
+ * with FW is suddenly broken. There's not much we can do
+ * at this point - error messages have already been printed,
+ * so we can continue on and the user can eventually do a
+ * DOWN and UP to try to reset and clear the issue.
+ */
+ err = ionic_txrx_init(lif);
+ mutex_unlock(&lif->queue_lock);
+ ionic_link_status_check_request(lif, true);
+ netif_device_attach(lif->netdev);
+
+ return err;
+}
+
static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ionic_lif *lif = netdev_priv(netdev);
@@ -1334,9 +1476,12 @@ static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
return err;
netdev->mtu = new_mtu;
- err = ionic_reset_queues(lif, NULL, NULL);
+ /* if we're not running, nothing more to do */
+ if (!netif_running(netdev))
+ return 0;
- return err;
+ ionic_stop_queues_reconfig(lif);
+ return ionic_start_queues_reconfig(lif);
}
static void ionic_tx_timeout_work(struct work_struct *ws)
@@ -1345,9 +1490,14 @@ static void ionic_tx_timeout_work(struct work_struct *ws)
netdev_info(lif->netdev, "Tx Timeout recovery\n");
- rtnl_lock();
- ionic_reset_queues(lif, NULL, NULL);
- rtnl_unlock();
+ /* if we were stopped before this scheduled job was launched,
+ * don't bother the queues as they are already stopped.
+ */
+ if (!netif_running(lif->netdev))
+ return;
+
+ ionic_stop_queues_reconfig(lif);
+ ionic_start_queues_reconfig(lif);
}
static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
@@ -1478,22 +1628,16 @@ static void ionic_lif_rss_deinit(struct ionic_lif *lif)
static void ionic_txrx_disable(struct ionic_lif *lif)
{
unsigned int i;
- int err;
+ int err = 0;
if (lif->txqcqs) {
- for (i = 0; i < lif->nxqs; i++) {
- err = ionic_qcq_disable(lif->txqcqs[i].qcq);
- if (err == -ETIMEDOUT)
- break;
- }
+ for (i = 0; i < lif->nxqs; i++)
+ err = ionic_qcq_disable(lif->txqcqs[i], (err != -ETIMEDOUT));
}
if (lif->rxqcqs) {
- for (i = 0; i < lif->nxqs; i++) {
- err = ionic_qcq_disable(lif->rxqcqs[i].qcq);
- if (err == -ETIMEDOUT)
- break;
- }
+ for (i = 0; i < lif->nxqs; i++)
+ err = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT));
}
}
@@ -1502,18 +1646,18 @@ static void ionic_txrx_deinit(struct ionic_lif *lif)
unsigned int i;
if (lif->txqcqs) {
- for (i = 0; i < lif->nxqs; i++) {
- ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
- ionic_tx_flush(&lif->txqcqs[i].qcq->cq);
- ionic_tx_empty(&lif->txqcqs[i].qcq->q);
+ for (i = 0; i < lif->nxqs && lif->txqcqs[i]; i++) {
+ ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
+ ionic_tx_flush(&lif->txqcqs[i]->cq);
+ ionic_tx_empty(&lif->txqcqs[i]->q);
}
}
if (lif->rxqcqs) {
- for (i = 0; i < lif->nxqs; i++) {
- ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
- ionic_rx_flush(&lif->rxqcqs[i].qcq->cq);
- ionic_rx_empty(&lif->rxqcqs[i].qcq->q);
+ for (i = 0; i < lif->nxqs && lif->rxqcqs[i]; i++) {
+ ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
+ ionic_rx_flush(&lif->rxqcqs[i]->cq);
+ ionic_rx_empty(&lif->rxqcqs[i]->q);
}
}
lif->rx_mode = 0;
@@ -1524,16 +1668,18 @@ static void ionic_txrx_free(struct ionic_lif *lif)
unsigned int i;
if (lif->txqcqs) {
- for (i = 0; i < lif->nxqs; i++) {
- ionic_qcq_free(lif, lif->txqcqs[i].qcq);
- lif->txqcqs[i].qcq = NULL;
+ for (i = 0; i < lif->ionic->ntxqs_per_lif && lif->txqcqs[i]; i++) {
+ ionic_qcq_free(lif, lif->txqcqs[i]);
+ devm_kfree(lif->ionic->dev, lif->txqcqs[i]);
+ lif->txqcqs[i] = NULL;
}
}
if (lif->rxqcqs) {
- for (i = 0; i < lif->nxqs; i++) {
- ionic_qcq_free(lif, lif->rxqcqs[i].qcq);
- lif->rxqcqs[i].qcq = NULL;
+ for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) {
+ ionic_qcq_free(lif, lif->rxqcqs[i]);
+ devm_kfree(lif->ionic->dev, lif->rxqcqs[i]);
+ lif->rxqcqs[i] = NULL;
}
}
}
@@ -1561,17 +1707,19 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
sizeof(struct ionic_txq_desc),
sizeof(struct ionic_txq_comp),
sg_desc_sz,
- lif->kern_pid, &lif->txqcqs[i].qcq);
+ lif->kern_pid, &lif->txqcqs[i]);
if (err)
goto err_out;
- if (flags & IONIC_QCQ_F_INTR)
+ if (flags & IONIC_QCQ_F_INTR) {
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
- lif->txqcqs[i].qcq->intr.index,
+ lif->txqcqs[i]->intr.index,
lif->tx_coalesce_hw);
+ if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state))
+ lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw;
+ }
- lif->txqcqs[i].qcq->stats = lif->txqcqs[i].stats;
- ionic_debugfs_add_qcq(lif, lif->txqcqs[i].qcq);
+ ionic_debugfs_add_qcq(lif, lif->txqcqs[i]);
}
flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
@@ -1581,20 +1729,21 @@ static int ionic_txrx_alloc(struct ionic_lif *lif)
sizeof(struct ionic_rxq_desc),
sizeof(struct ionic_rxq_comp),
sizeof(struct ionic_rxq_sg_desc),
- lif->kern_pid, &lif->rxqcqs[i].qcq);
+ lif->kern_pid, &lif->rxqcqs[i]);
if (err)
goto err_out;
ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
- lif->rxqcqs[i].qcq->intr.index,
+ lif->rxqcqs[i]->intr.index,
lif->rx_coalesce_hw);
+ if (test_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state))
+ lif->rxqcqs[i]->intr.dim_coal_hw = lif->rx_coalesce_hw;
if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
- ionic_link_qcq_interrupts(lif->rxqcqs[i].qcq,
- lif->txqcqs[i].qcq);
+ ionic_link_qcq_interrupts(lif->rxqcqs[i],
+ lif->txqcqs[i]);
- lif->rxqcqs[i].qcq->stats = lif->rxqcqs[i].stats;
- ionic_debugfs_add_qcq(lif, lif->rxqcqs[i].qcq);
+ ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]);
}
return 0;
@@ -1611,13 +1760,13 @@ static int ionic_txrx_init(struct ionic_lif *lif)
int err;
for (i = 0; i < lif->nxqs; i++) {
- err = ionic_lif_txq_init(lif, lif->txqcqs[i].qcq);
+ err = ionic_lif_txq_init(lif, lif->txqcqs[i]);
if (err)
goto err_out;
- err = ionic_lif_rxq_init(lif, lif->rxqcqs[i].qcq);
+ err = ionic_lif_rxq_init(lif, lif->rxqcqs[i]);
if (err) {
- ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
+ ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
goto err_out;
}
}
@@ -1625,14 +1774,14 @@ static int ionic_txrx_init(struct ionic_lif *lif)
if (lif->netdev->features & NETIF_F_RXHASH)
ionic_lif_rss_init(lif);
- ionic_set_rx_mode(lif->netdev);
+ ionic_set_rx_mode(lif->netdev, false);
return 0;
err_out:
while (i--) {
- ionic_lif_qcq_deinit(lif, lif->txqcqs[i].qcq);
- ionic_lif_qcq_deinit(lif, lif->rxqcqs[i].qcq);
+ ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
+ ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
}
return err;
@@ -1640,18 +1789,24 @@ err_out:
static int ionic_txrx_enable(struct ionic_lif *lif)
{
+ int derr = 0;
int i, err;
for (i = 0; i < lif->nxqs; i++) {
- ionic_rx_fill(&lif->rxqcqs[i].qcq->q);
- err = ionic_qcq_enable(lif->rxqcqs[i].qcq);
+ if (!(lif->rxqcqs[i] && lif->txqcqs[i])) {
+ dev_err(lif->ionic->dev, "%s: bad qcq %d\n", __func__, i);
+ err = -ENXIO;
+ goto err_out;
+ }
+
+ ionic_rx_fill(&lif->rxqcqs[i]->q);
+ err = ionic_qcq_enable(lif->rxqcqs[i]);
if (err)
goto err_out;
- err = ionic_qcq_enable(lif->txqcqs[i].qcq);
+ err = ionic_qcq_enable(lif->txqcqs[i]);
if (err) {
- if (err != -ETIMEDOUT)
- ionic_qcq_disable(lif->rxqcqs[i].qcq);
+ derr = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT));
goto err_out;
}
}
@@ -1660,12 +1815,8 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
err_out:
while (i--) {
- err = ionic_qcq_disable(lif->txqcqs[i].qcq);
- if (err == -ETIMEDOUT)
- break;
- err = ionic_qcq_disable(lif->rxqcqs[i].qcq);
- if (err == -ETIMEDOUT)
- break;
+ derr = ionic_qcq_disable(lif->txqcqs[i], (derr != -ETIMEDOUT));
+ derr = ionic_qcq_disable(lif->rxqcqs[i], (derr != -ETIMEDOUT));
}
return err;
@@ -1688,7 +1839,7 @@ static int ionic_start_queues(struct ionic_lif *lif)
return 0;
}
-int ionic_open(struct net_device *netdev)
+static int ionic_open(struct net_device *netdev)
{
struct ionic_lif *lif = netdev_priv(netdev);
int err;
@@ -1734,7 +1885,7 @@ static void ionic_stop_queues(struct ionic_lif *lif)
ionic_txrx_disable(lif);
}
-int ionic_stop(struct net_device *netdev)
+static int ionic_stop(struct net_device *netdev)
{
struct ionic_lif *lif = netdev_priv(netdev);
@@ -1998,7 +2149,7 @@ static const struct net_device_ops ionic_netdev_ops = {
.ndo_stop = ionic_stop,
.ndo_start_xmit = ionic_start_xmit,
.ndo_get_stats64 = ionic_get_stats64,
- .ndo_set_rx_mode = ionic_set_rx_mode,
+ .ndo_set_rx_mode = ionic_ndo_set_rx_mode,
.ndo_set_features = ionic_set_features,
.ndo_set_mac_address = ionic_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
@@ -2016,35 +2167,227 @@ static const struct net_device_ops ionic_netdev_ops = {
.ndo_get_vf_stats = ionic_get_vf_stats,
};
-int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg)
-{
- bool running;
- int err = 0;
+static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
+{
+ /* only swapping the queues, not the napi, flags, or other stuff */
+ swap(a->q.num_descs, b->q.num_descs);
+ swap(a->q.base, b->q.base);
+ swap(a->q.base_pa, b->q.base_pa);
+ swap(a->q.info, b->q.info);
+ swap(a->q_base, b->q_base);
+ swap(a->q_base_pa, b->q_base_pa);
+ swap(a->q_size, b->q_size);
+
+ swap(a->q.sg_base, b->q.sg_base);
+ swap(a->q.sg_base_pa, b->q.sg_base_pa);
+ swap(a->sg_base, b->sg_base);
+ swap(a->sg_base_pa, b->sg_base_pa);
+ swap(a->sg_size, b->sg_size);
+
+ swap(a->cq.num_descs, b->cq.num_descs);
+ swap(a->cq.base, b->cq.base);
+ swap(a->cq.base_pa, b->cq.base_pa);
+ swap(a->cq.info, b->cq.info);
+ swap(a->cq_base, b->cq_base);
+ swap(a->cq_base_pa, b->cq_base_pa);
+ swap(a->cq_size, b->cq_size);
+}
+
+int ionic_reconfigure_queues(struct ionic_lif *lif,
+ struct ionic_queue_params *qparam)
+{
+ struct ionic_qcq **tx_qcqs = NULL;
+ struct ionic_qcq **rx_qcqs = NULL;
+ unsigned int sg_desc_sz;
+ unsigned int flags;
+ int err = -ENOMEM;
+ unsigned int i;
- mutex_lock(&lif->queue_lock);
- running = netif_running(lif->netdev);
- if (running) {
- netif_device_detach(lif->netdev);
- err = ionic_stop(lif->netdev);
+ /* allocate temporary qcq arrays to hold new queue structs */
+ if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) {
+ tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif,
+ sizeof(struct ionic_qcq *), GFP_KERNEL);
+ if (!tx_qcqs)
+ goto err_out;
+ }
+ if (qparam->nxqs != lif->nxqs || qparam->nrxq_descs != lif->nrxq_descs) {
+ rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif,
+ sizeof(struct ionic_qcq *), GFP_KERNEL);
+ if (!rx_qcqs)
+ goto err_out;
+ }
+
+ /* allocate new desc_info and rings, but leave the interrupt setup
+ * until later so as to not mess with the still-running queues
+ */
+ if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
+ lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
+ sizeof(struct ionic_txq_sg_desc_v1))
+ sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
+ else
+ sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
+
+ if (tx_qcqs) {
+ for (i = 0; i < qparam->nxqs; i++) {
+ flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
+ err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
+ qparam->ntxq_descs,
+ sizeof(struct ionic_txq_desc),
+ sizeof(struct ionic_txq_comp),
+ sg_desc_sz,
+ lif->kern_pid, &tx_qcqs[i]);
+ if (err)
+ goto err_out;
+ }
+ }
+
+ if (rx_qcqs) {
+ for (i = 0; i < qparam->nxqs; i++) {
+ flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
+ err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
+ qparam->nrxq_descs,
+ sizeof(struct ionic_rxq_desc),
+ sizeof(struct ionic_rxq_comp),
+ sizeof(struct ionic_rxq_sg_desc),
+ lif->kern_pid, &rx_qcqs[i]);
+ if (err)
+ goto err_out;
+ }
+ }
+
+ /* stop and clean the queues */
+ ionic_stop_queues_reconfig(lif);
+
+ if (qparam->nxqs != lif->nxqs) {
+ err = netif_set_real_num_tx_queues(lif->netdev, qparam->nxqs);
if (err)
- goto reset_out;
+ goto err_out_reinit_unlock;
+ err = netif_set_real_num_rx_queues(lif->netdev, qparam->nxqs);
+ if (err) {
+ netif_set_real_num_tx_queues(lif->netdev, lif->nxqs);
+ goto err_out_reinit_unlock;
+ }
}
- if (cb)
- cb(lif, arg);
+ /* swap new desc_info and rings, keeping existing interrupt config */
+ if (tx_qcqs) {
+ lif->ntxq_descs = qparam->ntxq_descs;
+ for (i = 0; i < qparam->nxqs; i++)
+ ionic_swap_queues(lif->txqcqs[i], tx_qcqs[i]);
+ }
- if (running) {
- err = ionic_open(lif->netdev);
- netif_device_attach(lif->netdev);
+ if (rx_qcqs) {
+ lif->nrxq_descs = qparam->nrxq_descs;
+ for (i = 0; i < qparam->nxqs; i++)
+ ionic_swap_queues(lif->rxqcqs[i], rx_qcqs[i]);
}
-reset_out:
- mutex_unlock(&lif->queue_lock);
+ /* if we need to change the interrupt layout, this is the time */
+ if (qparam->intr_split != test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) ||
+ qparam->nxqs != lif->nxqs) {
+ if (qparam->intr_split) {
+ set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
+ } else {
+ clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
+ lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
+ lif->tx_coalesce_hw = lif->rx_coalesce_hw;
+ }
+
+ /* clear existing interrupt assignments */
+ for (i = 0; i < lif->ionic->ntxqs_per_lif; i++) {
+ ionic_qcq_intr_free(lif, lif->txqcqs[i]);
+ ionic_qcq_intr_free(lif, lif->rxqcqs[i]);
+ }
+
+ /* re-assign the interrupts */
+ for (i = 0; i < qparam->nxqs; i++) {
+ lif->rxqcqs[i]->flags |= IONIC_QCQ_F_INTR;
+ err = ionic_alloc_qcq_interrupt(lif, lif->rxqcqs[i]);
+ ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
+ lif->rxqcqs[i]->intr.index,
+ lif->rx_coalesce_hw);
+
+ if (qparam->intr_split) {
+ lif->txqcqs[i]->flags |= IONIC_QCQ_F_INTR;
+ err = ionic_alloc_qcq_interrupt(lif, lif->txqcqs[i]);
+ ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
+ lif->txqcqs[i]->intr.index,
+ lif->tx_coalesce_hw);
+ if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state))
+ lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw;
+ } else {
+ lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
+ ionic_link_qcq_interrupts(lif->rxqcqs[i], lif->txqcqs[i]);
+ }
+ }
+ }
+
+ /* now we can rework the debugfs mappings */
+ if (tx_qcqs) {
+ for (i = 0; i < qparam->nxqs; i++) {
+ ionic_debugfs_del_qcq(lif->txqcqs[i]);
+ ionic_debugfs_add_qcq(lif, lif->txqcqs[i]);
+ }
+ }
+
+ if (rx_qcqs) {
+ for (i = 0; i < qparam->nxqs; i++) {
+ ionic_debugfs_del_qcq(lif->rxqcqs[i]);
+ ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]);
+ }
+ }
+
+ swap(lif->nxqs, qparam->nxqs);
+
+err_out_reinit_unlock:
+ /* re-init the queues, but don't loose an error code */
+ if (err)
+ ionic_start_queues_reconfig(lif);
+ else
+ err = ionic_start_queues_reconfig(lif);
+
+err_out:
+ /* free old allocs without cleaning intr */
+ for (i = 0; i < qparam->nxqs; i++) {
+ if (tx_qcqs && tx_qcqs[i]) {
+ tx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
+ ionic_qcq_free(lif, tx_qcqs[i]);
+ devm_kfree(lif->ionic->dev, tx_qcqs[i]);
+ tx_qcqs[i] = NULL;
+ }
+ if (rx_qcqs && rx_qcqs[i]) {
+ rx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
+ ionic_qcq_free(lif, rx_qcqs[i]);
+ devm_kfree(lif->ionic->dev, rx_qcqs[i]);
+ rx_qcqs[i] = NULL;
+ }
+ }
+
+ /* free q array */
+ if (rx_qcqs) {
+ devm_kfree(lif->ionic->dev, rx_qcqs);
+ rx_qcqs = NULL;
+ }
+ if (tx_qcqs) {
+ devm_kfree(lif->ionic->dev, tx_qcqs);
+ tx_qcqs = NULL;
+ }
+
+ /* clean the unused dma and info allocations when new set is smaller
+ * than the full array, but leave the qcq shells in place
+ */
+ for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) {
+ lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
+ ionic_qcq_free(lif, lif->txqcqs[i]);
+
+ lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
+ ionic_qcq_free(lif, lif->rxqcqs[i]);
+ }
return err;
}
-static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index)
+int ionic_lif_alloc(struct ionic *ionic)
{
struct device *dev = ionic->dev;
union ionic_lif_identity *lid;
@@ -2055,7 +2398,7 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
lid = kzalloc(sizeof(*lid), GFP_KERNEL);
if (!lid)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
netdev = alloc_etherdev_mqs(sizeof(*lif),
ionic->ntxqs_per_lif, ionic->ntxqs_per_lif);
@@ -2069,7 +2412,7 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
lif = netdev_priv(netdev);
lif->netdev = netdev;
- ionic->master_lif = lif;
+ ionic->lif = lif;
netdev->netdev_ops = &ionic_netdev_ops;
ionic_ethtool_set_ops(netdev);
@@ -2078,8 +2421,14 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
lif->identity = lid;
lif->lif_type = IONIC_LIF_TYPE_CLASSIC;
- ionic_lif_identify(ionic, lif->lif_type, lif->identity);
- lif->netdev->min_mtu = le32_to_cpu(lif->identity->eth.min_frame_size);
+ err = ionic_lif_identify(ionic, lif->lif_type, lif->identity);
+ if (err) {
+ dev_err(ionic->dev, "Cannot identify type %d: %d\n",
+ lif->lif_type, err);
+ goto err_out_free_netdev;
+ }
+ lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU,
+ le32_to_cpu(lif->identity->eth.min_frame_size));
lif->netdev->max_mtu =
le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN;
@@ -2087,7 +2436,7 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
lif->nxqs = ionic->ntxqs_per_lif;
lif->ionic = ionic;
- lif->index = index;
+ lif->index = 0;
lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
lif->tx_budget = IONIC_TX_BUDGET_DEFAULT;
@@ -2098,8 +2447,10 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
lif->rx_coalesce_usecs);
lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
lif->tx_coalesce_hw = lif->rx_coalesce_hw;
+ set_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state);
+ set_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state);
- snprintf(lif->name, sizeof(lif->name), "lif%u", index);
+ snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index);
spin_lock_init(&lif->adminq_lock);
@@ -2119,7 +2470,8 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
ionic_debugfs_add_lif(lif);
- /* allocate queues */
+ /* allocate control queues and txrx queue arrays */
+ ionic_lif_queue_identify(lif);
err = ionic_qcqs_alloc(lif);
if (err)
goto err_out_free_lif_info;
@@ -2138,9 +2490,7 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
}
netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
- list_add_tail(&lif->list, &ionic->lifs);
-
- return lif;
+ return 0;
err_out_free_qcqs:
ionic_qcqs_free(lif);
@@ -2154,27 +2504,7 @@ err_out_free_netdev:
err_out_free_lid:
kfree(lid);
- return ERR_PTR(err);
-}
-
-int ionic_lifs_alloc(struct ionic *ionic)
-{
- struct ionic_lif *lif;
-
- INIT_LIST_HEAD(&ionic->lifs);
-
- /* only build the first lif, others are for later features */
- set_bit(0, ionic->lifbits);
-
- lif = ionic_lif_alloc(ionic, 0);
- if (IS_ERR_OR_NULL(lif)) {
- clear_bit(0, ionic->lifbits);
- return -ENOMEM;
- }
-
- ionic_lif_queue_identify(lif);
-
- return 0;
+ return err;
}
static void ionic_lif_reset(struct ionic_lif *lif)
@@ -2209,7 +2539,7 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
ionic_txrx_deinit(lif);
ionic_txrx_free(lif);
}
- ionic_lifs_deinit(ionic);
+ ionic_lif_deinit(lif);
ionic_reset(ionic);
ionic_qcqs_free(lif);
@@ -2227,12 +2557,20 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
dev_info(ionic->dev, "FW Up: restarting LIFs\n");
ionic_init_devinfo(ionic);
- ionic_port_init(ionic);
+ err = ionic_identify(ionic);
+ if (err)
+ goto err_out;
+ err = ionic_port_identify(ionic);
+ if (err)
+ goto err_out;
+ err = ionic_port_init(ionic);
+ if (err)
+ goto err_out;
err = ionic_qcqs_alloc(lif);
if (err)
goto err_out;
- err = ionic_lifs_init(ionic);
+ err = ionic_lif_init(lif);
if (err)
goto err_qcqs_free;
@@ -2252,7 +2590,7 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
}
clear_bit(IONIC_LIF_F_FW_RESET, lif->state);
- ionic_link_status_check_request(lif);
+ ionic_link_status_check_request(lif, true);
netif_device_attach(lif->netdev);
dev_info(ionic->dev, "FW Up: LIFs restarted\n");
@@ -2261,14 +2599,14 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
err_txrx_free:
ionic_txrx_free(lif);
err_lifs_deinit:
- ionic_lifs_deinit(ionic);
+ ionic_lif_deinit(lif);
err_qcqs_free:
ionic_qcqs_free(lif);
err_out:
dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err);
}
-static void ionic_lif_free(struct ionic_lif *lif)
+void ionic_lif_free(struct ionic_lif *lif)
{
struct device *dev = lif->ionic->dev;
@@ -2297,23 +2635,10 @@ static void ionic_lif_free(struct ionic_lif *lif)
/* free netdev & lif */
ionic_debugfs_del_lif(lif);
- list_del(&lif->list);
free_netdev(lif->netdev);
}
-void ionic_lifs_free(struct ionic *ionic)
-{
- struct list_head *cur, *tmp;
- struct ionic_lif *lif;
-
- list_for_each_safe(cur, tmp, &ionic->lifs) {
- lif = list_entry(cur, struct ionic_lif, list);
-
- ionic_lif_free(lif);
- }
-}
-
-static void ionic_lif_deinit(struct ionic_lif *lif)
+void ionic_lif_deinit(struct ionic_lif *lif)
{
if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state))
return;
@@ -2334,17 +2659,6 @@ static void ionic_lif_deinit(struct ionic_lif *lif)
ionic_lif_reset(lif);
}
-void ionic_lifs_deinit(struct ionic *ionic)
-{
- struct list_head *cur, *tmp;
- struct ionic_lif *lif;
-
- list_for_each_safe(cur, tmp, &ionic->lifs) {
- lif = list_entry(cur, struct ionic_lif, list);
- ionic_lif_deinit(lif);
- }
-}
-
static int ionic_lif_adminq_init(struct ionic_lif *lif)
{
struct device *dev = lif->ionic->dev;
@@ -2468,7 +2782,7 @@ static int ionic_station_set(struct ionic_lif *lif)
*/
if (!ether_addr_equal(ctx.comp.lif_getattr.mac,
netdev->dev_addr))
- ionic_lif_addr(lif, netdev->dev_addr, true);
+ ionic_lif_addr(lif, netdev->dev_addr, true, true);
} else {
/* Update the netdev mac with the device's mac */
memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
@@ -2485,12 +2799,12 @@ static int ionic_station_set(struct ionic_lif *lif)
netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
netdev->dev_addr);
- ionic_lif_addr(lif, netdev->dev_addr, true);
+ ionic_lif_addr(lif, netdev->dev_addr, true, true);
return 0;
}
-static int ionic_lif_init(struct ionic_lif *lif)
+int ionic_lif_init(struct ionic_lif *lif)
{
struct ionic_dev *idev = &lif->ionic->idev;
struct device *dev = lif->ionic->dev;
@@ -2580,22 +2894,6 @@ err_out_free_dbid:
return err;
}
-int ionic_lifs_init(struct ionic *ionic)
-{
- struct list_head *cur, *tmp;
- struct ionic_lif *lif;
- int err;
-
- list_for_each_safe(cur, tmp, &ionic->lifs) {
- lif = list_entry(cur, struct ionic_lif, list);
- err = ionic_lif_init(lif);
- if (err)
- return err;
- }
-
- return 0;
-}
-
static void ionic_lif_notify_work(struct work_struct *ws)
{
}
@@ -2644,45 +2942,41 @@ static int ionic_lif_notify(struct notifier_block *nb,
return NOTIFY_DONE;
}
-int ionic_lifs_register(struct ionic *ionic)
+int ionic_lif_register(struct ionic_lif *lif)
{
int err;
- INIT_WORK(&ionic->nb_work, ionic_lif_notify_work);
+ INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work);
- ionic->nb.notifier_call = ionic_lif_notify;
+ lif->ionic->nb.notifier_call = ionic_lif_notify;
- err = register_netdevice_notifier(&ionic->nb);
+ err = register_netdevice_notifier(&lif->ionic->nb);
if (err)
- ionic->nb.notifier_call = NULL;
+ lif->ionic->nb.notifier_call = NULL;
/* only register LIF0 for now */
- err = register_netdev(ionic->master_lif->netdev);
+ err = register_netdev(lif->netdev);
if (err) {
- dev_err(ionic->dev, "Cannot register net device, aborting\n");
+ dev_err(lif->ionic->dev, "Cannot register net device, aborting\n");
return err;
}
- ionic->master_lif->registered = true;
- ionic_lif_set_netdev_info(ionic->master_lif);
+ lif->registered = true;
+ ionic_lif_set_netdev_info(lif);
return 0;
}
-void ionic_lifs_unregister(struct ionic *ionic)
+void ionic_lif_unregister(struct ionic_lif *lif)
{
- if (ionic->nb.notifier_call) {
- unregister_netdevice_notifier(&ionic->nb);
- cancel_work_sync(&ionic->nb_work);
- ionic->nb.notifier_call = NULL;
+ if (lif->ionic->nb.notifier_call) {
+ unregister_netdevice_notifier(&lif->ionic->nb);
+ cancel_work_sync(&lif->ionic->nb_work);
+ lif->ionic->nb.notifier_call = NULL;
}
- /* There is only one lif ever registered in the
- * current model, so don't bother searching the
- * ionic->lif for candidates to unregister
- */
- if (ionic->master_lif &&
- ionic->master_lif->netdev->reg_state == NETREG_REGISTERED)
- unregister_netdev(ionic->master_lif->netdev);
+ if (lif->netdev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(lif->netdev);
+ lif->registered = false;
}
static void ionic_lif_queue_identify(struct ionic_lif *lif)
@@ -2801,7 +3095,7 @@ int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
return 0;
}
-int ionic_lifs_size(struct ionic *ionic)
+int ionic_lif_size(struct ionic *ionic)
{
struct ionic_identity *ident = &ionic->ident;
unsigned int nintrs, dev_nintrs;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 1ee3b14c8d50..0224dfd24b8a 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -4,6 +4,7 @@
#ifndef _IONIC_LIF_H_
#define _IONIC_LIF_H_
+#include <linux/dim.h>
#include <linux/pci.h>
#include "ionic_rx_filter.h"
@@ -16,32 +17,32 @@
#define IONIC_TX_BUDGET_DEFAULT 256
struct ionic_tx_stats {
- u64 dma_map_err;
u64 pkts;
u64 bytes;
- u64 clean;
- u64 linearize;
u64 csum_none;
u64 csum;
- u64 crc32_csum;
u64 tso;
u64 tso_bytes;
u64 frags;
u64 vlan_inserted;
+ u64 clean;
+ u64 linearize;
+ u64 crc32_csum;
u64 sg_cntr[IONIC_MAX_NUM_SG_CNTR];
+ u64 dma_map_err;
};
struct ionic_rx_stats {
- u64 dma_map_err;
- u64 alloc_err;
u64 pkts;
u64 bytes;
u64 csum_none;
u64 csum_complete;
- u64 csum_error;
u64 buffers_posted;
u64 dropped;
u64 vlan_stripped;
+ u64 csum_error;
+ u64 dma_map_err;
+ u64 alloc_err;
};
#define IONIC_QCQ_F_INITED BIT(0)
@@ -56,35 +57,29 @@ struct ionic_napi_stats {
u64 work_done_cntr[IONIC_MAX_NUM_NAPI_CNTR];
};
-struct ionic_q_stats {
- union {
- struct ionic_tx_stats tx;
- struct ionic_rx_stats rx;
- };
-};
-
struct ionic_qcq {
- void *base;
- dma_addr_t base_pa;
- unsigned int total_size;
+ void *q_base;
+ dma_addr_t q_base_pa;
+ u32 q_size;
+ void *cq_base;
+ dma_addr_t cq_base_pa;
+ u32 cq_size;
+ void *sg_base;
+ dma_addr_t sg_base_pa;
+ u32 sg_size;
+ struct dim dim;
struct ionic_queue q;
struct ionic_cq cq;
struct ionic_intr_info intr;
struct napi_struct napi;
struct ionic_napi_stats napi_stats;
- struct ionic_q_stats *stats;
unsigned int flags;
struct dentry *dentry;
};
-struct ionic_qcqst {
- struct ionic_qcq *qcq;
- struct ionic_q_stats *stats;
-};
-
#define q_to_qcq(q) container_of(q, struct ionic_qcq, q)
-#define q_to_tx_stats(q) (&q_to_qcq(q)->stats->tx)
-#define q_to_rx_stats(q) (&q_to_qcq(q)->stats->rx)
+#define q_to_tx_stats(q) (&(q)->lif->txqstats[(q)->index])
+#define q_to_rx_stats(q) (&(q)->lif->rxqstats[(q)->index])
#define napi_to_qcq(napi) container_of(napi, struct ionic_qcq, napi)
#define napi_to_cq(napi) (&napi_to_qcq(napi)->cq)
@@ -138,6 +133,8 @@ enum ionic_lif_state_flags {
IONIC_LIF_F_LINK_CHECK_REQUESTED,
IONIC_LIF_F_FW_RESET,
IONIC_LIF_F_SPLIT_INTR,
+ IONIC_LIF_F_TX_DIM_INTR,
+ IONIC_LIF_F_RX_DIM_INTR,
/* leave this as last */
IONIC_LIF_F_STATE_SIZE
@@ -170,8 +167,10 @@ struct ionic_lif {
spinlock_t adminq_lock; /* lock for AdminQ operations */
struct ionic_qcq *adminqcq;
struct ionic_qcq *notifyqcq;
- struct ionic_qcqst *txqcqs;
- struct ionic_qcqst *rxqcqs;
+ struct ionic_qcq **txqcqs;
+ struct ionic_tx_stats *txqstats;
+ struct ionic_qcq **rxqcqs;
+ struct ionic_rx_stats *rxqstats;
u64 last_eid;
unsigned int neqs;
unsigned int nxqs;
@@ -212,12 +211,21 @@ struct ionic_lif {
struct work_struct tx_timeout_work;
};
-#define lif_to_txqcq(lif, i) ((lif)->txqcqs[i].qcq)
-#define lif_to_rxqcq(lif, i) ((lif)->rxqcqs[i].qcq)
-#define lif_to_txstats(lif, i) ((lif)->txqcqs[i].stats->tx)
-#define lif_to_rxstats(lif, i) ((lif)->rxqcqs[i].stats->rx)
-#define lif_to_txq(lif, i) (&lif_to_txqcq((lif), i)->q)
-#define lif_to_rxq(lif, i) (&lif_to_txqcq((lif), i)->q)
+struct ionic_queue_params {
+ unsigned int nxqs;
+ unsigned int ntxq_descs;
+ unsigned int nrxq_descs;
+ unsigned int intr_split;
+};
+
+static inline void ionic_init_queue_params(struct ionic_lif *lif,
+ struct ionic_queue_params *qparam)
+{
+ qparam->nxqs = lif->nxqs;
+ qparam->ntxq_descs = lif->ntxq_descs;
+ qparam->nrxq_descs = lif->nrxq_descs;
+ qparam->intr_split = test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
+}
static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs)
{
@@ -237,39 +245,38 @@ static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs)
typedef void (*ionic_reset_cb)(struct ionic_lif *lif, void *arg);
-void ionic_link_status_check_request(struct ionic_lif *lif);
+void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep);
void ionic_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *ns);
void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
struct ionic_deferred_work *work);
-int ionic_lifs_alloc(struct ionic *ionic);
-void ionic_lifs_free(struct ionic *ionic);
-void ionic_lifs_deinit(struct ionic *ionic);
-int ionic_lifs_init(struct ionic *ionic);
-int ionic_lifs_register(struct ionic *ionic);
-void ionic_lifs_unregister(struct ionic *ionic);
+int ionic_lif_alloc(struct ionic *ionic);
+int ionic_lif_init(struct ionic_lif *lif);
+void ionic_lif_free(struct ionic_lif *lif);
+void ionic_lif_deinit(struct ionic_lif *lif);
+int ionic_lif_register(struct ionic_lif *lif);
+void ionic_lif_unregister(struct ionic_lif *lif);
int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
union ionic_lif_identity *lif_ident);
-int ionic_lifs_size(struct ionic *ionic);
+int ionic_lif_size(struct ionic *ionic);
int ionic_lif_rss_config(struct ionic_lif *lif, u16 types,
const u8 *key, const u32 *indir);
+int ionic_reconfigure_queues(struct ionic_lif *lif,
+ struct ionic_queue_params *qparam);
-int ionic_open(struct net_device *netdev);
-int ionic_stop(struct net_device *netdev);
-int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg);
-
-static inline void debug_stats_txq_post(struct ionic_qcq *qcq,
- struct ionic_txq_desc *desc, bool dbell)
+static inline void debug_stats_txq_post(struct ionic_queue *q, bool dbell)
{
- u8 num_sg_elems = ((le64_to_cpu(desc->cmd) >> IONIC_TXQ_DESC_NSGE_SHIFT)
- & IONIC_TXQ_DESC_NSGE_MASK);
+ struct ionic_txq_desc *desc = &q->txq[q->head_idx];
+ u8 num_sg_elems;
- qcq->q.dbell_count += dbell;
+ q->dbell_count += dbell;
+ num_sg_elems = ((le64_to_cpu(desc->cmd) >> IONIC_TXQ_DESC_NSGE_SHIFT)
+ & IONIC_TXQ_DESC_NSGE_MASK);
if (num_sg_elems > (IONIC_MAX_NUM_SG_CNTR - 1))
num_sg_elems = IONIC_MAX_NUM_SG_CNTR - 1;
- qcq->stats->tx.sg_cntr[num_sg_elems]++;
+ q->lif->txqstats[q->index].sg_cntr[num_sg_elems]++;
}
static inline void debug_stats_napi_poll(struct ionic_qcq *qcq,
@@ -284,10 +291,8 @@ static inline void debug_stats_napi_poll(struct ionic_qcq *qcq,
}
#define DEBUG_STATS_CQE_CNT(cq) ((cq)->compl_count++)
-#define DEBUG_STATS_RX_BUFF_CNT(qcq) ((qcq)->stats->rx.buffers_posted++)
-#define DEBUG_STATS_INTR_REARM(intr) ((intr)->rearm_count++)
-#define DEBUG_STATS_TXQ_POST(qcq, txdesc, dbell) \
- debug_stats_txq_post(qcq, txdesc, dbell)
+#define DEBUG_STATS_RX_BUFF_CNT(q) ((q)->lif->rxqstats[q->index].buffers_posted++)
+#define DEBUG_STATS_TXQ_POST(q, dbell) debug_stats_txq_post(q, dbell)
#define DEBUG_STATS_NAPI_POLL(qcq, work_done) \
debug_stats_napi_poll(qcq, work_done)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index df5b9bcc3aba..ee0740881af3 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -64,6 +64,8 @@ static const char *ionic_error_to_str(enum ionic_status_code code)
return "IONIC_RC_ERROR";
case IONIC_RC_ERDMA:
return "IONIC_RC_ERDMA";
+ case IONIC_RC_EBAD_FW:
+ return "IONIC_RC_EBAD_FW";
default:
return "IONIC_RC_UNKNOWN";
}
@@ -170,6 +172,10 @@ static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode)
return "IONIC_CMD_FW_DOWNLOAD";
case IONIC_CMD_FW_CONTROL:
return "IONIC_CMD_FW_CONTROL";
+ case IONIC_CMD_FW_DOWNLOAD_V1:
+ return "IONIC_CMD_FW_DOWNLOAD_V1";
+ case IONIC_CMD_FW_CONTROL_V1:
+ return "IONIC_CMD_FW_CONTROL_V1";
case IONIC_CMD_VF_GETATTR:
return "IONIC_CMD_VF_GETATTR";
case IONIC_CMD_VF_SETATTR:
@@ -181,15 +187,17 @@ static const char *ionic_opcode_to_str(enum ionic_cmd_opcode opcode)
static void ionic_adminq_flush(struct ionic_lif *lif)
{
- struct ionic_queue *adminq = &lif->adminqcq->q;
+ struct ionic_queue *q = &lif->adminqcq->q;
+ struct ionic_desc_info *desc_info;
spin_lock(&lif->adminq_lock);
- while (adminq->tail != adminq->head) {
- memset(adminq->tail->desc, 0, sizeof(union ionic_adminq_cmd));
- adminq->tail->cb = NULL;
- adminq->tail->cb_arg = NULL;
- adminq->tail = adminq->tail->next;
+ while (q->tail_idx != q->head_idx) {
+ desc_info = &q->info[q->tail_idx];
+ memset(desc_info->desc, 0, sizeof(union ionic_adminq_cmd));
+ desc_info->cb = NULL;
+ desc_info->cb_arg = NULL;
+ q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
}
spin_unlock(&lif->adminq_lock);
}
@@ -245,18 +253,17 @@ static void ionic_adminq_cb(struct ionic_queue *q,
static int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
{
- struct ionic_queue *adminq;
+ struct ionic_desc_info *desc_info;
+ struct ionic_queue *q;
int err = 0;
- WARN_ON(in_interrupt());
-
if (!lif->adminqcq)
return -EIO;
- adminq = &lif->adminqcq->q;
+ q = &lif->adminqcq->q;
spin_lock(&lif->adminq_lock);
- if (!ionic_q_has_space(adminq, 1)) {
+ if (!ionic_q_has_space(q, 1)) {
err = -ENOSPC;
goto err_out;
}
@@ -265,13 +272,14 @@ static int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
if (err)
goto err_out;
- memcpy(adminq->head->desc, &ctx->cmd, sizeof(ctx->cmd));
+ desc_info = &q->info[q->head_idx];
+ memcpy(desc_info->desc, &ctx->cmd, sizeof(ctx->cmd));
dev_dbg(&lif->netdev->dev, "post admin queue command:\n");
dynamic_hex_dump("cmd ", DUMP_PREFIX_OFFSET, 16, 1,
&ctx->cmd, sizeof(ctx->cmd), true);
- ionic_q_post(adminq, true, ionic_adminq_cb, ctx);
+ ionic_q_post(q, true, ionic_adminq_cb, ctx);
err_out:
spin_unlock(&lif->adminq_lock);
@@ -301,32 +309,6 @@ int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
return ionic_adminq_check_err(lif, ctx, (remaining == 0));
}
-int ionic_napi(struct napi_struct *napi, int budget, ionic_cq_cb cb,
- ionic_cq_done_cb done_cb, void *done_arg)
-{
- struct ionic_qcq *qcq = napi_to_qcq(napi);
- struct ionic_cq *cq = &qcq->cq;
- u32 work_done, flags = 0;
-
- work_done = ionic_cq_service(cq, budget, cb, done_cb, done_arg);
-
- if (work_done < budget && napi_complete_done(napi, work_done)) {
- flags |= IONIC_INTR_CRED_UNMASK;
- DEBUG_STATS_INTR_REARM(cq->bound_intr);
- }
-
- if (work_done || flags) {
- flags |= IONIC_INTR_CRED_RESET_COALESCE;
- ionic_intr_credits(cq->lif->ionic->idev.intr_ctrl,
- cq->bound_intr->index,
- work_done, flags);
- }
-
- DEBUG_STATS_NAPI_POLL(qcq, work_done);
-
- return work_done;
-}
-
static void ionic_dev_cmd_clean(struct ionic *ionic)
{
union ionic_dev_cmd_regs *regs = ionic->idev.dev_cmd_regs;
@@ -346,24 +328,27 @@ int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_seconds)
int done;
int err;
- WARN_ON(in_interrupt());
-
/* Wait for dev cmd to complete, retrying if we get EAGAIN,
* but don't wait any longer than max_seconds.
*/
max_wait = jiffies + (max_seconds * HZ);
try_again:
+ opcode = idev->dev_cmd_regs->cmd.cmd.opcode;
start_time = jiffies;
do {
done = ionic_dev_cmd_done(idev);
if (done)
break;
- msleep(5);
- hb = ionic_heartbeat_check(ionic);
+ usleep_range(100, 200);
+
+ /* Don't check the heartbeat on FW_CONTROL commands as they are
+ * notorious for interrupting the firmware's heartbeat update.
+ */
+ if (opcode != IONIC_CMD_FW_CONTROL)
+ hb = ionic_heartbeat_check(ionic);
} while (!done && !hb && time_before(jiffies, max_wait));
duration = jiffies - start_time;
- opcode = idev->dev_cmd_regs->cmd.cmd.opcode;
dev_dbg(ionic->dev, "DEVCMD %s (%d) done=%d took %ld secs (%ld jiffies)\n",
ionic_opcode_to_str(opcode), opcode,
done, duration / HZ, duration);
@@ -387,8 +372,9 @@ try_again:
err = ionic_dev_cmd_status(&ionic->idev);
if (err) {
- if (err == IONIC_RC_EAGAIN && !time_after(jiffies, max_wait)) {
- dev_err(ionic->dev, "DEV_CMD %s (%d) error, %s (%d) retrying...\n",
+ if (err == IONIC_RC_EAGAIN &&
+ time_before(jiffies, (max_wait - HZ))) {
+ dev_dbg(ionic->dev, "DEV_CMD %s (%d), %s (%d) retrying...\n",
ionic_opcode_to_str(opcode), opcode,
ionic_error_to_str(err), err);
@@ -398,9 +384,10 @@ try_again:
goto try_again;
}
- dev_err(ionic->dev, "DEV_CMD %s (%d) error, %s (%d) failed\n",
- ionic_opcode_to_str(opcode), opcode,
- ionic_error_to_str(err), err);
+ if (!(opcode == IONIC_CMD_FW_CONTROL && err == IONIC_RC_EAGAIN))
+ dev_err(ionic->dev, "DEV_CMD %s (%d) error, %s (%d) failed\n",
+ ionic_opcode_to_str(opcode), opcode,
+ ionic_error_to_str(err), err);
return ionic_error_to_errno(err);
}
@@ -444,17 +431,23 @@ int ionic_identify(struct ionic *ionic)
sz = min(sizeof(ident->dev), sizeof(idev->dev_cmd_regs->data));
memcpy_fromio(&ident->dev, &idev->dev_cmd_regs->data, sz);
}
-
mutex_unlock(&ionic->dev_cmd_lock);
- if (err)
- goto err_out_unmap;
+ if (err) {
+ dev_err(ionic->dev, "Cannot identify ionic: %dn", err);
+ goto err_out;
+ }
- ionic_debugfs_add_ident(ionic);
+ err = ionic_lif_identify(ionic, IONIC_LIF_TYPE_CLASSIC,
+ &ionic->ident.lif);
+ if (err) {
+ dev_err(ionic->dev, "Cannot identify LIFs: %d\n", err);
+ goto err_out;
+ }
return 0;
-err_out_unmap:
+err_out:
return err;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
index 2a1885da58a6..ff20a2ac4c2f 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
@@ -179,36 +179,28 @@ static const struct ionic_stat_desc ionic_dbg_napi_stats_desc[] = {
static void ionic_get_lif_stats(struct ionic_lif *lif,
struct ionic_lif_sw_stats *stats)
{
- struct ionic_tx_stats *tstats;
- struct ionic_rx_stats *rstats;
+ struct ionic_tx_stats *txstats;
+ struct ionic_rx_stats *rxstats;
struct rtnl_link_stats64 ns;
- struct ionic_qcq *txqcq;
- struct ionic_qcq *rxqcq;
int q_num;
memset(stats, 0, sizeof(*stats));
for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
- txqcq = lif_to_txqcq(lif, q_num);
- if (txqcq && txqcq->stats) {
- tstats = &txqcq->stats->tx;
- stats->tx_packets += tstats->pkts;
- stats->tx_bytes += tstats->bytes;
- stats->tx_tso += tstats->tso;
- stats->tx_tso_bytes += tstats->tso_bytes;
- stats->tx_csum_none += tstats->csum_none;
- stats->tx_csum += tstats->csum;
- }
-
- rxqcq = lif_to_rxqcq(lif, q_num);
- if (rxqcq && rxqcq->stats) {
- rstats = &rxqcq->stats->rx;
- stats->rx_packets += rstats->pkts;
- stats->rx_bytes += rstats->bytes;
- stats->rx_csum_none += rstats->csum_none;
- stats->rx_csum_complete += rstats->csum_complete;
- stats->rx_csum_error += rstats->csum_error;
- }
+ txstats = &lif->txqstats[q_num];
+ stats->tx_packets += txstats->pkts;
+ stats->tx_bytes += txstats->bytes;
+ stats->tx_tso += txstats->tso;
+ stats->tx_tso_bytes += txstats->tso_bytes;
+ stats->tx_csum_none += txstats->csum_none;
+ stats->tx_csum += txstats->csum;
+
+ rxstats = &lif->rxqstats[q_num];
+ stats->rx_packets += rxstats->pkts;
+ stats->rx_bytes += rxstats->bytes;
+ stats->rx_csum_none += rxstats->csum_none;
+ stats->rx_csum_complete += rxstats->csum_complete;
+ stats->rx_csum_error += rxstats->csum_error;
}
ionic_get_stats64(lif->netdev, &ns);
@@ -371,7 +363,7 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
}
for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
- txstats = &lif_to_txstats(lif, q_num);
+ txstats = &lif->txqstats[q_num];
for (i = 0; i < IONIC_NUM_TX_STATS; i++) {
**buf = IONIC_READ_STAT64(txstats,
@@ -381,7 +373,7 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
if (test_bit(IONIC_LIF_F_UP, lif->state) &&
test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
- txqcq = lif_to_txqcq(lif, q_num);
+ txqcq = lif->txqcqs[q_num];
for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) {
**buf = IONIC_READ_STAT64(&txqcq->q,
&ionic_txq_stats_desc[i]);
@@ -405,7 +397,7 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
}
for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
- rxstats = &lif_to_rxstats(lif, q_num);
+ rxstats = &lif->rxqstats[q_num];
for (i = 0; i < IONIC_NUM_RX_STATS; i++) {
**buf = IONIC_READ_STAT64(rxstats,
@@ -415,7 +407,7 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
if (test_bit(IONIC_LIF_F_UP, lif->state) &&
test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
- rxqcq = lif_to_rxqcq(lif, q_num);
+ rxqcq = lif->rxqcqs[q_num];
for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
**buf = IONIC_READ_STAT64(&rxqcq->cq,
&ionic_dbg_cq_stats_desc[i]);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index def65fee27b5..169ac4f54640 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -22,7 +22,7 @@ static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
ionic_desc_cb cb_func, void *cb_arg)
{
- DEBUG_STATS_TXQ_POST(q_to_qcq(q), q->head->desc, ring_dbell);
+ DEBUG_STATS_TXQ_POST(q, ring_dbell);
ionic_q_post(q, ring_dbell, cb_func, cb_arg);
}
@@ -32,7 +32,7 @@ static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
{
ionic_q_post(q, ring_dbell, cb_func, cb_arg);
- DEBUG_STATS_RX_BUFF_CNT(q_to_qcq(q));
+ DEBUG_STATS_RX_BUFF_CNT(q);
}
static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
@@ -49,7 +49,7 @@ static struct sk_buff *ionic_rx_skb_alloc(struct ionic_queue *q,
struct sk_buff *skb;
netdev = lif->netdev;
- stats = q_to_rx_stats(q);
+ stats = &q->lif->rxqstats[q->index];
if (frags)
skb = napi_get_frags(&q_to_qcq(q)->napi);
@@ -235,14 +235,14 @@ static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
return false;
/* check for empty queue */
- if (q->tail->index == q->head->index)
+ if (q->tail_idx == q->head_idx)
return false;
- desc_info = q->tail;
- if (desc_info->index != le16_to_cpu(comp->comp_index))
+ if (q->tail_idx != le16_to_cpu(comp->comp_index))
return false;
- q->tail = desc_info->next;
+ desc_info = &q->info[q->tail_idx];
+ q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
/* clean the related q entry, only one per qc completion */
ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg);
@@ -266,40 +266,49 @@ void ionic_rx_flush(struct ionic_cq *cq)
work_done, IONIC_INTR_CRED_RESET_COALESCE);
}
-static struct page *ionic_rx_page_alloc(struct ionic_queue *q,
- dma_addr_t *dma_addr)
+static int ionic_rx_page_alloc(struct ionic_queue *q,
+ struct ionic_page_info *page_info)
{
struct ionic_lif *lif = q->lif;
struct ionic_rx_stats *stats;
struct net_device *netdev;
struct device *dev;
- struct page *page;
netdev = lif->netdev;
dev = lif->ionic->dev;
stats = q_to_rx_stats(q);
- page = alloc_page(GFP_ATOMIC);
- if (unlikely(!page)) {
- net_err_ratelimited("%s: Page alloc failed on %s!\n",
+
+ if (unlikely(!page_info)) {
+ net_err_ratelimited("%s: %s invalid page_info in alloc\n",
+ netdev->name, q->name);
+ return -EINVAL;
+ }
+
+ page_info->page = dev_alloc_page();
+ if (unlikely(!page_info->page)) {
+ net_err_ratelimited("%s: %s page alloc failed\n",
netdev->name, q->name);
stats->alloc_err++;
- return NULL;
+ return -ENOMEM;
}
- *dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, *dma_addr))) {
- __free_page(page);
- net_err_ratelimited("%s: DMA single map failed on %s!\n",
+ page_info->dma_addr = dma_map_page(dev, page_info->page, 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(dev, page_info->dma_addr))) {
+ put_page(page_info->page);
+ page_info->dma_addr = 0;
+ page_info->page = NULL;
+ net_err_ratelimited("%s: %s dma map failed\n",
netdev->name, q->name);
stats->dma_map_err++;
- return NULL;
+ return -EIO;
}
- return page;
+ return 0;
}
-static void ionic_rx_page_free(struct ionic_queue *q, struct page *page,
- dma_addr_t dma_addr)
+static void ionic_rx_page_free(struct ionic_queue *q,
+ struct ionic_page_info *page_info)
{
struct ionic_lif *lif = q->lif;
struct net_device *netdev;
@@ -308,15 +317,23 @@ static void ionic_rx_page_free(struct ionic_queue *q, struct page *page,
netdev = lif->netdev;
dev = lif->ionic->dev;
- if (unlikely(!page)) {
- net_err_ratelimited("%s: Trying to free unallocated buffer on %s!\n",
+ if (unlikely(!page_info)) {
+ net_err_ratelimited("%s: %s invalid page_info in free\n",
+ netdev->name, q->name);
+ return;
+ }
+
+ if (unlikely(!page_info->page)) {
+ net_err_ratelimited("%s: %s invalid page in free\n",
netdev->name, q->name);
return;
}
- dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
+ dma_unmap_page(dev, page_info->dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
- __free_page(page);
+ put_page(page_info->page);
+ page_info->dma_addr = 0;
+ page_info->page = NULL;
}
void ionic_rx_fill(struct ionic_queue *q)
@@ -338,7 +355,7 @@ void ionic_rx_fill(struct ionic_queue *q)
for (i = ionic_q_space_avail(q); i; i--) {
remain_len = len;
- desc_info = q->head;
+ desc_info = &q->info[q->head_idx];
desc = desc_info->desc;
sg_desc = desc_info->sg_desc;
page_info = &desc_info->pages[0];
@@ -352,8 +369,7 @@ void ionic_rx_fill(struct ionic_queue *q)
desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
IONIC_RXQ_DESC_OPCODE_SIMPLE;
desc_info->npages = nfrags;
- page_info->page = ionic_rx_page_alloc(q, &page_info->dma_addr);
- if (unlikely(!page_info->page)) {
+ if (unlikely(ionic_rx_page_alloc(q, page_info))) {
desc->addr = 0;
desc->len = 0;
return;
@@ -370,8 +386,7 @@ void ionic_rx_fill(struct ionic_queue *q)
continue;
sg_elem = &sg_desc->elems[j];
- page_info->page = ionic_rx_page_alloc(q, &page_info->dma_addr);
- if (unlikely(!page_info->page)) {
+ if (unlikely(ionic_rx_page_alloc(q, page_info))) {
sg_elem->addr = 0;
sg_elem->len = 0;
return;
@@ -387,7 +402,7 @@ void ionic_rx_fill(struct ionic_queue *q)
}
ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
- q->dbval | q->head->index);
+ q->dbval | q->head_idx);
}
static void ionic_rx_fill_cb(void *arg)
@@ -397,28 +412,50 @@ static void ionic_rx_fill_cb(void *arg)
void ionic_rx_empty(struct ionic_queue *q)
{
- struct ionic_desc_info *cur;
+ struct ionic_desc_info *desc_info;
struct ionic_rxq_desc *desc;
unsigned int i;
+ u16 idx;
- for (cur = q->tail; cur != q->head; cur = cur->next) {
- desc = cur->desc;
+ idx = q->tail_idx;
+ while (idx != q->head_idx) {
+ desc_info = &q->info[idx];
+ desc = desc_info->desc;
desc->addr = 0;
desc->len = 0;
- for (i = 0; i < cur->npages; i++) {
- if (likely(cur->pages[i].page)) {
- ionic_rx_page_free(q, cur->pages[i].page,
- cur->pages[i].dma_addr);
- cur->pages[i].page = NULL;
- cur->pages[i].dma_addr = 0;
- }
- }
+ for (i = 0; i < desc_info->npages; i++)
+ ionic_rx_page_free(q, &desc_info->pages[i]);
- cur->cb_arg = NULL;
+ desc_info->cb_arg = NULL;
+ idx = (idx + 1) & (q->num_descs - 1);
}
}
+static void ionic_dim_update(struct ionic_qcq *qcq)
+{
+ struct dim_sample dim_sample;
+ struct ionic_lif *lif;
+ unsigned int qi;
+
+ if (!qcq->intr.dim_coal_hw)
+ return;
+
+ lif = qcq->q.lif;
+ qi = qcq->cq.bound_q->index;
+
+ ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
+ lif->rxqcqs[qi]->intr.index,
+ qcq->intr.dim_coal_hw);
+
+ dim_update_sample(qcq->cq.bound_intr->rearm_count,
+ lif->txqstats[qi].pkts,
+ lif->txqstats[qi].bytes,
+ &dim_sample);
+
+ net_dim(&qcq->dim, dim_sample);
+}
+
int ionic_tx_napi(struct napi_struct *napi, int budget)
{
struct ionic_qcq *qcq = napi_to_qcq(napi);
@@ -435,8 +472,9 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
ionic_tx_service, NULL, NULL);
if (work_done < budget && napi_complete_done(napi, work_done)) {
+ ionic_dim_update(qcq);
flags |= IONIC_INTR_CRED_UNMASK;
- DEBUG_STATS_INTR_REARM(cq->bound_intr);
+ cq->bound_intr->rearm_count++;
}
if (work_done || flags) {
@@ -470,8 +508,9 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
ionic_rx_fill(cq->bound_q);
if (work_done < budget && napi_complete_done(napi, work_done)) {
+ ionic_dim_update(qcq);
flags |= IONIC_INTR_CRED_UNMASK;
- DEBUG_STATS_INTR_REARM(cq->bound_intr);
+ cq->bound_intr->rearm_count++;
}
if (work_done || flags) {
@@ -500,7 +539,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
lif = rxcq->bound_q->lif;
idev = &lif->ionic->idev;
- txcq = &lif->txqcqs[qi].qcq->cq;
+ txcq = &lif->txqcqs[qi]->cq;
tx_work_done = ionic_cq_service(txcq, lif->tx_budget,
ionic_tx_service, NULL, NULL);
@@ -511,8 +550,9 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
ionic_rx_fill_cb(rxcq->bound_q);
if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
+ ionic_dim_update(qcq);
flags |= IONIC_INTR_CRED_UNMASK;
- DEBUG_STATS_INTR_REARM(rxcq->bound_intr);
+ rxcq->bound_intr->rearm_count++;
}
if (rx_work_done || flags) {
@@ -615,6 +655,7 @@ static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
struct ionic_txq_comp *comp = cq_info->cq_desc;
struct ionic_queue *q = cq->bound_q;
struct ionic_desc_info *desc_info;
+ u16 index;
if (!color_match(comp->color, cq->done_color))
return false;
@@ -623,12 +664,13 @@ static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
* several q entries completed for each cq completion
*/
do {
- desc_info = q->tail;
- q->tail = desc_info->next;
- ionic_tx_clean(q, desc_info, cq->tail, desc_info->cb_arg);
+ desc_info = &q->info[q->tail_idx];
+ index = q->tail_idx;
+ q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
+ ionic_tx_clean(q, desc_info, cq_info, desc_info->cb_arg);
desc_info->cb = NULL;
desc_info->cb_arg = NULL;
- } while (desc_info->index != le16_to_cpu(comp->comp_index));
+ } while (index != le16_to_cpu(comp->comp_index));
return true;
}
@@ -648,16 +690,14 @@ void ionic_tx_flush(struct ionic_cq *cq)
void ionic_tx_empty(struct ionic_queue *q)
{
struct ionic_desc_info *desc_info;
- int done = 0;
/* walk the not completed tx entries, if any */
- while (q->head != q->tail) {
- desc_info = q->tail;
- q->tail = desc_info->next;
+ while (q->head_idx != q->tail_idx) {
+ desc_info = &q->info[q->tail_idx];
+ q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg);
desc_info->cb = NULL;
desc_info->cb_arg = NULL;
- done++;
}
}
@@ -741,8 +781,8 @@ static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc
static struct ionic_txq_desc *ionic_tx_tso_next(struct ionic_queue *q,
struct ionic_txq_sg_elem **elem)
{
- struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc;
- struct ionic_txq_desc *desc = q->head->desc;
+ struct ionic_txq_sg_desc *sg_desc = q->info[q->head_idx].txq_sg_desc;
+ struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc;
*elem = sg_desc->elems;
return desc;
@@ -751,13 +791,13 @@ static struct ionic_txq_desc *ionic_tx_tso_next(struct ionic_queue *q,
static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
{
struct ionic_tx_stats *stats = q_to_tx_stats(q);
- struct ionic_desc_info *abort = q->head;
+ struct ionic_desc_info *rewind_desc_info;
struct device *dev = q->lif->ionic->dev;
- struct ionic_desc_info *rewind = abort;
struct ionic_txq_sg_elem *elem;
struct ionic_txq_desc *desc;
unsigned int frag_left = 0;
unsigned int offset = 0;
+ u16 abort = q->head_idx;
unsigned int len_left;
dma_addr_t desc_addr;
unsigned int hdrlen;
@@ -765,6 +805,7 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
unsigned int seglen;
u64 total_bytes = 0;
u64 total_pkts = 0;
+ u16 rewind = abort;
unsigned int left;
unsigned int len;
unsigned int mss;
@@ -909,19 +950,20 @@ static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
return 0;
err_out_abort:
- while (rewind->desc != q->head->desc) {
- ionic_tx_clean(q, rewind, NULL, NULL);
- rewind = rewind->next;
+ while (rewind != q->head_idx) {
+ rewind_desc_info = &q->info[rewind];
+ ionic_tx_clean(q, rewind_desc_info, NULL, NULL);
+ rewind = (rewind + 1) & (q->num_descs - 1);
}
- q->head = abort;
+ q->head_idx = abort;
return -ENOMEM;
}
static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb)
{
+ struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
- struct ionic_txq_desc *desc = q->head->desc;
struct device *dev = q->lif->ionic->dev;
dma_addr_t dma_addr;
bool has_vlan;
@@ -960,8 +1002,8 @@ static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb)
static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb)
{
+ struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
- struct ionic_txq_desc *desc = q->head->desc;
struct device *dev = q->lif->ionic->dev;
dma_addr_t dma_addr;
bool has_vlan;
@@ -995,7 +1037,7 @@ static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb)
static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb)
{
- struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc;
+ struct ionic_txq_sg_desc *sg_desc = q->info[q->head_idx].txq_sg_desc;
unsigned int len_left = skb->len - skb_headlen(skb);
struct ionic_txq_sg_elem *elem = sg_desc->elems;
struct ionic_tx_stats *stats = q_to_tx_stats(q);
@@ -1104,9 +1146,9 @@ netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
- if (unlikely(!lif_to_txqcq(lif, queue_index)))
+ if (unlikely(queue_index >= lif->nxqs))
queue_index = 0;
- q = lif_to_txq(lif, queue_index);
+ q = &lif->txqcqs[queue_index]->q;
ndescs = ionic_tx_descs_needed(q, skb);
if (ndescs < 0)
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 8f743d80760b..4366c7a8de95 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -80,7 +80,7 @@ config QED
select CRC8
select NET_DEVLINK
help
- This enables the support for ...
+ This enables the support for Marvell FastLinQ adapters family.
config QED_LL2
bool
@@ -100,7 +100,8 @@ config QEDE
depends on QED
imply PTP_1588_CLOCK
help
- This enables the support for ...
+ This enables the support for Marvell FastLinQ adapters family,
+ ethernet driver.
config QED_RDMA
bool
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index 86153660d245..e5c51256243a 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -1189,9 +1189,6 @@ typedef struct {
#define NX_FORCE_FW_RESET 0xdeaddead
-/* Fw dump levels */
-static const u32 FW_DUMP_LEVELS[] = { 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff };
-
/* Flash read/write address */
#define NX_FW_DUMP_REG1 0x00130060
#define NX_FW_DUMP_REG2 0x001e0000
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index c3f50ddbe824..dd22cb056d03 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -814,6 +814,9 @@ netxen_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
return 0;
}
+/* Fw dump levels */
+static const u32 FW_DUMP_LEVELS[] = { 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff };
+
static int
netxen_set_dump(struct net_device *netdev, struct ethtool_dump *val)
{
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index f947b105cf14..8251755ec18c 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -9,6 +9,7 @@ qed-y := \
qed_dcbx.o \
qed_debug.o \
qed_dev.o \
+ qed_devlink.o \
qed_hw.o \
qed_init_fw_funcs.o \
qed_init_ops.o \
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index b2a7b53ee760..a20cb8a0c377 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -572,7 +572,7 @@ struct qed_hwfn {
struct qed_consq *p_consq;
/* Slow-Path definitions */
- struct tasklet_struct *sp_dpc;
+ struct tasklet_struct sp_dpc;
bool b_sp_dpc_enabled;
struct qed_ptt *p_main_ptt;
@@ -807,6 +807,7 @@ struct qed_dev {
struct qed_llh_info *p_llh_info;
/* Linux specific here */
+ struct qed_dev_info common_dev_info;
struct qede_dev *edev;
struct pci_dev *pdev;
u32 flags;
@@ -849,7 +850,6 @@ struct qed_dev {
u32 rdma_max_srq_sge;
u16 tunn_feature_mask;
- struct devlink *dl;
bool iwarp_cmt;
};
@@ -981,6 +981,7 @@ void qed_bw_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt);
u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
u32 input_len, u8 *input_buf,
u32 max_size, u8 *unzip_buf);
+int qed_recovery_process(struct qed_dev *cdev);
void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn);
void qed_hw_error_occurred(struct qed_hwfn *p_hwfn,
enum qed_hw_err_type err_type);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 3db181f3617a..d2f5855b2ea7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -3973,6 +3973,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
struct qed_mcp_link_speed_params *ext_speed;
struct qed_mcp_link_capabilities *p_caps;
struct qed_mcp_link_params *link;
+ int i;
/* Read global nvm_cfg address */
nvm_cfg_addr = qed_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
@@ -4299,6 +4300,14 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
__set_bit(QED_DEV_CAP_ROCE,
&p_hwfn->hw_info.device_capabilities);
+ /* Read device serial number information from shmem */
+ addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
+ offsetof(struct nvm_cfg1, glob) +
+ offsetof(struct nvm_cfg1_glob, serial_number);
+
+ for (i = 0; i < 4; i++)
+ p_hwfn->hw_info.part_num[i] = qed_rd(p_hwfn, p_ptt, addr + i * 4);
+
return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_devlink.c b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
new file mode 100644
index 000000000000..cf7f4da68e69
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Marvell/Qlogic FastLinQ NIC driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+
+#include <linux/kernel.h>
+#include <linux/qed/qed_if.h>
+#include <linux/vmalloc.h>
+#include "qed.h"
+#include "qed_devlink.h"
+
+enum qed_devlink_param_id {
+ QED_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ QED_DEVLINK_PARAM_ID_IWARP_CMT,
+};
+
+struct qed_fw_fatal_ctx {
+ enum qed_hw_err_type err_type;
+};
+
+int qed_report_fatal_error(struct devlink *devlink, enum qed_hw_err_type err_type)
+{
+ struct qed_devlink *qdl = devlink_priv(devlink);
+ struct qed_fw_fatal_ctx fw_fatal_ctx = {
+ .err_type = err_type,
+ };
+
+ if (qdl->fw_reporter)
+ devlink_health_report(qdl->fw_reporter,
+ "Fatal error occurred", &fw_fatal_ctx);
+
+ return 0;
+}
+
+static int
+qed_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct qed_devlink *qdl = devlink_health_reporter_priv(reporter);
+ struct qed_fw_fatal_ctx *fw_fatal_ctx = priv_ctx;
+ struct qed_dev *cdev = qdl->cdev;
+ u32 dbg_data_buf_size;
+ u8 *p_dbg_data_buf;
+ int err;
+
+ /* Having context means that was a dump request after fatal,
+ * so we enable extra debugging while gathering the dump,
+ * just in case
+ */
+ cdev->print_dbg_data = fw_fatal_ctx ? true : false;
+
+ dbg_data_buf_size = qed_dbg_all_data_size(cdev);
+ p_dbg_data_buf = vzalloc(dbg_data_buf_size);
+ if (!p_dbg_data_buf) {
+ DP_NOTICE(cdev,
+ "Failed to allocate memory for a debug data buffer\n");
+ return -ENOMEM;
+ }
+
+ err = qed_dbg_all_data(cdev, p_dbg_data_buf);
+ if (err) {
+ DP_NOTICE(cdev, "Failed to obtain debug data\n");
+ vfree(p_dbg_data_buf);
+ return err;
+ }
+
+ err = devlink_fmsg_binary_pair_put(fmsg, "dump_data",
+ p_dbg_data_buf, dbg_data_buf_size);
+
+ vfree(p_dbg_data_buf);
+
+ return err;
+}
+
+static int
+qed_fw_fatal_reporter_recover(struct devlink_health_reporter *reporter,
+ void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct qed_devlink *qdl = devlink_health_reporter_priv(reporter);
+ struct qed_dev *cdev = qdl->cdev;
+
+ qed_recovery_process(cdev);
+
+ return 0;
+}
+
+static const struct devlink_health_reporter_ops qed_fw_fatal_reporter_ops = {
+ .name = "fw_fatal",
+ .recover = qed_fw_fatal_reporter_recover,
+ .dump = qed_fw_fatal_reporter_dump,
+};
+
+#define QED_REPORTER_FW_GRACEFUL_PERIOD 1200000
+
+void qed_fw_reporters_create(struct devlink *devlink)
+{
+ struct qed_devlink *dl = devlink_priv(devlink);
+
+ dl->fw_reporter = devlink_health_reporter_create(devlink, &qed_fw_fatal_reporter_ops,
+ QED_REPORTER_FW_GRACEFUL_PERIOD, dl);
+ if (IS_ERR(dl->fw_reporter)) {
+ DP_NOTICE(dl->cdev, "Failed to create fw reporter, err = %ld\n",
+ PTR_ERR(dl->fw_reporter));
+ dl->fw_reporter = NULL;
+ }
+}
+
+void qed_fw_reporters_destroy(struct devlink *devlink)
+{
+ struct qed_devlink *dl = devlink_priv(devlink);
+ struct devlink_health_reporter *rep;
+
+ rep = dl->fw_reporter;
+
+ if (!IS_ERR_OR_NULL(rep))
+ devlink_health_reporter_destroy(rep);
+}
+
+static int qed_dl_param_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct qed_devlink *qed_dl = devlink_priv(dl);
+ struct qed_dev *cdev;
+
+ cdev = qed_dl->cdev;
+ ctx->val.vbool = cdev->iwarp_cmt;
+
+ return 0;
+}
+
+static int qed_dl_param_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct qed_devlink *qed_dl = devlink_priv(dl);
+ struct qed_dev *cdev;
+
+ cdev = qed_dl->cdev;
+ cdev->iwarp_cmt = ctx->val.vbool;
+
+ return 0;
+}
+
+static const struct devlink_param qed_devlink_params[] = {
+ DEVLINK_PARAM_DRIVER(QED_DEVLINK_PARAM_ID_IWARP_CMT,
+ "iwarp_cmt", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ qed_dl_param_get, qed_dl_param_set, NULL),
+};
+
+static int qed_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct qed_devlink *qed_dl = devlink_priv(devlink);
+ struct qed_dev *cdev = qed_dl->cdev;
+ struct qed_dev_info *dev_info;
+ char buf[100];
+ int err;
+
+ dev_info = &cdev->common_dev_info;
+
+ err = devlink_info_driver_name_put(req, KBUILD_MODNAME);
+ if (err)
+ return err;
+
+ memcpy(buf, cdev->hwfns[0].hw_info.part_num, sizeof(cdev->hwfns[0].hw_info.part_num));
+ buf[sizeof(cdev->hwfns[0].hw_info.part_num)] = 0;
+
+ if (buf[0]) {
+ err = devlink_info_board_serial_number_put(req, buf);
+ if (err)
+ return err;
+ }
+
+ snprintf(buf, sizeof(buf), "%d.%d.%d.%d",
+ GET_MFW_FIELD(dev_info->mfw_rev, QED_MFW_VERSION_3),
+ GET_MFW_FIELD(dev_info->mfw_rev, QED_MFW_VERSION_2),
+ GET_MFW_FIELD(dev_info->mfw_rev, QED_MFW_VERSION_1),
+ GET_MFW_FIELD(dev_info->mfw_rev, QED_MFW_VERSION_0));
+
+ err = devlink_info_version_stored_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, buf);
+ if (err)
+ return err;
+
+ snprintf(buf, sizeof(buf), "%d.%d.%d.%d",
+ dev_info->fw_major,
+ dev_info->fw_minor,
+ dev_info->fw_rev,
+ dev_info->fw_eng);
+
+ return devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW_APP, buf);
+}
+
+static const struct devlink_ops qed_dl_ops = {
+ .info_get = qed_devlink_info_get,
+};
+
+struct devlink *qed_devlink_register(struct qed_dev *cdev)
+{
+ union devlink_param_value value;
+ struct qed_devlink *qdevlink;
+ struct devlink *dl;
+ int rc;
+
+ dl = devlink_alloc(&qed_dl_ops, sizeof(struct qed_devlink));
+ if (!dl)
+ return ERR_PTR(-ENOMEM);
+
+ qdevlink = devlink_priv(dl);
+ qdevlink->cdev = cdev;
+
+ rc = devlink_register(dl, &cdev->pdev->dev);
+ if (rc)
+ goto err_free;
+
+ rc = devlink_params_register(dl, qed_devlink_params,
+ ARRAY_SIZE(qed_devlink_params));
+ if (rc)
+ goto err_unregister;
+
+ value.vbool = false;
+ devlink_param_driverinit_value_set(dl,
+ QED_DEVLINK_PARAM_ID_IWARP_CMT,
+ value);
+
+ devlink_params_publish(dl);
+ cdev->iwarp_cmt = false;
+
+ qed_fw_reporters_create(dl);
+
+ return dl;
+
+err_unregister:
+ devlink_unregister(dl);
+
+err_free:
+ devlink_free(dl);
+
+ return ERR_PTR(rc);
+}
+
+void qed_devlink_unregister(struct devlink *devlink)
+{
+ if (!devlink)
+ return;
+
+ qed_fw_reporters_destroy(devlink);
+
+ devlink_params_unregister(devlink, qed_devlink_params,
+ ARRAY_SIZE(qed_devlink_params));
+
+ devlink_unregister(devlink);
+ devlink_free(devlink);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_devlink.h b/drivers/net/ethernet/qlogic/qed/qed_devlink.h
new file mode 100644
index 000000000000..ccc7d1d1bfd4
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_devlink.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Marvell/Qlogic FastLinQ NIC driver
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ */
+#ifndef _QED_DEVLINK_H
+#define _QED_DEVLINK_H
+
+#include <linux/qed/qed_if.h>
+#include <net/devlink.h>
+
+struct devlink *qed_devlink_register(struct qed_dev *cdev);
+void qed_devlink_unregister(struct devlink *devlink);
+
+void qed_fw_reporters_create(struct devlink *devlink);
+void qed_fw_reporters_destroy(struct devlink *devlink);
+
+int qed_report_fatal_error(struct devlink *dl, enum qed_hw_err_type err_type);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index f8c5a864812d..578935f643b8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -1216,9 +1216,9 @@ static void qed_sb_ack_attn(struct qed_hwfn *p_hwfn,
barrier();
}
-void qed_int_sp_dpc(unsigned long hwfn_cookie)
+void qed_int_sp_dpc(struct tasklet_struct *t)
{
- struct qed_hwfn *p_hwfn = (struct qed_hwfn *)hwfn_cookie;
+ struct qed_hwfn *p_hwfn = from_tasklet(p_hwfn, t, sp_dpc);
struct qed_pi_info *pi_info = NULL;
struct qed_sb_attn_info *sb_attn;
struct qed_sb_info *sb_info;
@@ -2285,34 +2285,14 @@ u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn)
static void qed_int_sp_dpc_setup(struct qed_hwfn *p_hwfn)
{
- tasklet_init(p_hwfn->sp_dpc,
- qed_int_sp_dpc, (unsigned long)p_hwfn);
+ tasklet_setup(&p_hwfn->sp_dpc, qed_int_sp_dpc);
p_hwfn->b_sp_dpc_enabled = true;
}
-static int qed_int_sp_dpc_alloc(struct qed_hwfn *p_hwfn)
-{
- p_hwfn->sp_dpc = kmalloc(sizeof(*p_hwfn->sp_dpc), GFP_KERNEL);
- if (!p_hwfn->sp_dpc)
- return -ENOMEM;
-
- return 0;
-}
-
-static void qed_int_sp_dpc_free(struct qed_hwfn *p_hwfn)
-{
- kfree(p_hwfn->sp_dpc);
- p_hwfn->sp_dpc = NULL;
-}
-
int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
int rc = 0;
- rc = qed_int_sp_dpc_alloc(p_hwfn);
- if (rc)
- return rc;
-
rc = qed_int_sp_sb_alloc(p_hwfn, p_ptt);
if (rc)
return rc;
@@ -2326,7 +2306,6 @@ void qed_int_free(struct qed_hwfn *p_hwfn)
{
qed_int_sp_sb_free(p_hwfn);
qed_int_sb_attn_free(p_hwfn);
- qed_int_sp_dpc_free(p_hwfn);
}
void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index 86809d7bc2de..c5550e96bbe1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -140,7 +140,7 @@ int qed_int_sb_release(struct qed_hwfn *p_hwfn,
* @param p_hwfn - pointer to hwfn
*
*/
-void qed_int_sp_dpc(unsigned long hwfn_cookie);
+void qed_int_sp_dpc(struct tasklet_struct *t);
/**
* @brief qed_int_get_num_sbs - get the number of status
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 0452b728c527..49783f365079 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -1185,7 +1185,7 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
.elem_size = sizeof(struct core_tx_bd),
};
struct qed_ll2_tx_packet *p_descq;
- u32 desc_size;
+ size_t desc_size;
u32 capacity;
int rc = 0;
@@ -1198,10 +1198,9 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
goto out;
capacity = qed_chain_get_capacity(&p_ll2_info->tx_queue.txq_chain);
- /* First element is part of the packet, rest are flexibly added */
- desc_size = (sizeof(*p_descq) +
- (p_ll2_info->input.tx_max_bds_per_packet - 1) *
- sizeof(p_descq->bds_set));
+ /* All bds_set elements are flexibily added. */
+ desc_size = struct_size(p_descq, bds_set,
+ p_ll2_info->input.tx_max_bds_per_packet);
p_descq = kcalloc(capacity, desc_size, GFP_KERNEL);
if (!p_descq) {
@@ -1524,7 +1523,7 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
struct qed_ptt *p_ptt;
int rc = -EINVAL;
u32 i, capacity;
- u32 desc_size;
+ size_t desc_size;
u8 qid;
p_ptt = qed_ptt_acquire(p_hwfn);
@@ -1558,10 +1557,9 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
INIT_LIST_HEAD(&p_tx->sending_descq);
spin_lock_init(&p_tx->lock);
capacity = qed_chain_get_capacity(&p_tx->txq_chain);
- /* First element is part of the packet, rest are flexibly added */
- desc_size = (sizeof(*p_pkt) +
- (p_ll2_conn->input.tx_max_bds_per_packet - 1) *
- sizeof(p_pkt->bds_set));
+ /* All bds_set elements are flexibily added. */
+ desc_size = struct_size(p_pkt, bds_set,
+ p_ll2_conn->input.tx_max_bds_per_packet);
for (i = 0; i < capacity; i++) {
p_pkt = p_tx->descq_mem + desc_size * i;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index 500d0c4f8077..df88d00053a2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -56,7 +56,7 @@ struct qed_ll2_tx_packet {
struct core_tx_bd *txq_bd;
dma_addr_t tx_frag;
u16 frag_len;
- } bds_set[1];
+ } bds_set[];
};
struct qed_ll2_rx_queue {
@@ -86,9 +86,6 @@ struct qed_ll2_tx_queue {
struct list_head active_descq;
struct list_head free_descq;
struct list_head sending_descq;
- void *descq_mem; /* memory for variable sized qed_ll2_tx_packet*/
- struct qed_ll2_tx_packet *cur_send_packet;
- struct qed_ll2_tx_packet cur_completing_packet;
u16 cur_completing_bd_idx;
void __iomem *doorbell_addr;
struct core_db_data db_msg;
@@ -96,6 +93,9 @@ struct qed_ll2_tx_queue {
u16 cur_send_frag_num;
u16 cur_completing_frag_num;
bool b_completing_packet;
+ void *descq_mem; /* memory for variable sized qed_ll2_tx_packet*/
+ struct qed_ll2_tx_packet *cur_send_packet;
+ struct qed_ll2_tx_packet cur_completing_packet;
};
struct qed_ll2_info {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 50e5eb22e60a..5bd58c65e163 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -39,6 +39,7 @@
#include "qed_hw.h"
#include "qed_selftest.h"
#include "qed_debug.h"
+#include "qed_devlink.h"
#define QED_ROCE_QPS (8192)
#define QED_ROCE_DPIS (8)
@@ -480,6 +481,7 @@ int qed_fill_dev_info(struct qed_dev *cdev,
}
dev_info->mtu = hw_info->mtu;
+ cdev->common_dev_info = *dev_info;
return 0;
}
@@ -512,107 +514,6 @@ static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
return 0;
}
-struct qed_devlink {
- struct qed_dev *cdev;
-};
-
-enum qed_devlink_param_id {
- QED_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
- QED_DEVLINK_PARAM_ID_IWARP_CMT,
-};
-
-static int qed_dl_param_get(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
-{
- struct qed_devlink *qed_dl;
- struct qed_dev *cdev;
-
- qed_dl = devlink_priv(dl);
- cdev = qed_dl->cdev;
- ctx->val.vbool = cdev->iwarp_cmt;
-
- return 0;
-}
-
-static int qed_dl_param_set(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
-{
- struct qed_devlink *qed_dl;
- struct qed_dev *cdev;
-
- qed_dl = devlink_priv(dl);
- cdev = qed_dl->cdev;
- cdev->iwarp_cmt = ctx->val.vbool;
-
- return 0;
-}
-
-static const struct devlink_param qed_devlink_params[] = {
- DEVLINK_PARAM_DRIVER(QED_DEVLINK_PARAM_ID_IWARP_CMT,
- "iwarp_cmt", DEVLINK_PARAM_TYPE_BOOL,
- BIT(DEVLINK_PARAM_CMODE_RUNTIME),
- qed_dl_param_get, qed_dl_param_set, NULL),
-};
-
-static const struct devlink_ops qed_dl_ops;
-
-static int qed_devlink_register(struct qed_dev *cdev)
-{
- union devlink_param_value value;
- struct qed_devlink *qed_dl;
- struct devlink *dl;
- int rc;
-
- dl = devlink_alloc(&qed_dl_ops, sizeof(*qed_dl));
- if (!dl)
- return -ENOMEM;
-
- qed_dl = devlink_priv(dl);
-
- cdev->dl = dl;
- qed_dl->cdev = cdev;
-
- rc = devlink_register(dl, &cdev->pdev->dev);
- if (rc)
- goto err_free;
-
- rc = devlink_params_register(dl, qed_devlink_params,
- ARRAY_SIZE(qed_devlink_params));
- if (rc)
- goto err_unregister;
-
- value.vbool = false;
- devlink_param_driverinit_value_set(dl,
- QED_DEVLINK_PARAM_ID_IWARP_CMT,
- value);
-
- devlink_params_publish(dl);
- cdev->iwarp_cmt = false;
-
- return 0;
-
-err_unregister:
- devlink_unregister(dl);
-
-err_free:
- cdev->dl = NULL;
- devlink_free(dl);
-
- return rc;
-}
-
-static void qed_devlink_unregister(struct qed_dev *cdev)
-{
- if (!cdev->dl)
- return;
-
- devlink_params_unregister(cdev->dl, qed_devlink_params,
- ARRAY_SIZE(qed_devlink_params));
-
- devlink_unregister(cdev->dl);
- devlink_free(cdev->dl);
-}
-
/* probing */
static struct qed_dev *qed_probe(struct pci_dev *pdev,
struct qed_probe_params *params)
@@ -641,12 +542,6 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev,
}
DP_INFO(cdev, "PCI init completed successfully\n");
- rc = qed_devlink_register(cdev);
- if (rc) {
- DP_INFO(cdev, "Failed to register devlink.\n");
- goto err2;
- }
-
rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
if (rc) {
DP_ERR(cdev, "hw prepare failed\n");
@@ -676,8 +571,6 @@ static void qed_remove(struct qed_dev *cdev)
qed_set_power_state(cdev, PCI_D3hot);
- qed_devlink_unregister(cdev);
-
qed_free_cdev(cdev);
}
@@ -843,7 +736,7 @@ static irqreturn_t qed_single_int(int irq, void *dev_instance)
/* Slowpath interrupt */
if (unlikely(status & 0x1)) {
- tasklet_schedule(hwfn->sp_dpc);
+ tasklet_schedule(&hwfn->sp_dpc);
status &= ~0x1;
rc = IRQ_HANDLED;
}
@@ -889,7 +782,7 @@ int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
id, cdev->pdev->bus->number,
PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
rc = request_irq(cdev->int_params.msix_table[id].vector,
- qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
+ qed_msix_sp_int, 0, hwfn->name, &hwfn->sp_dpc);
} else {
unsigned long flags = 0;
@@ -921,8 +814,8 @@ static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn)
* enable function makes this sequence a flush-like operation.
*/
if (p_hwfn->b_sp_dpc_enabled) {
- tasklet_disable(p_hwfn->sp_dpc);
- tasklet_enable(p_hwfn->sp_dpc);
+ tasklet_disable(&p_hwfn->sp_dpc);
+ tasklet_enable(&p_hwfn->sp_dpc);
}
}
@@ -951,7 +844,7 @@ static void qed_slowpath_irq_free(struct qed_dev *cdev)
break;
synchronize_irq(cdev->int_params.msix_table[i].vector);
free_irq(cdev->int_params.msix_table[i].vector,
- cdev->hwfns[i].sp_dpc);
+ &cdev->hwfns[i].sp_dpc);
}
} else {
if (QED_LEADING_HWFN(cdev)->b_int_requested)
@@ -970,11 +863,11 @@ static int qed_nic_stop(struct qed_dev *cdev)
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
if (p_hwfn->b_sp_dpc_enabled) {
- tasklet_disable(p_hwfn->sp_dpc);
+ tasklet_disable(&p_hwfn->sp_dpc);
p_hwfn->b_sp_dpc_enabled = false;
DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
"Disabled sp tasklet [hwfn %d] at %p\n",
- i, p_hwfn->sp_dpc);
+ i, &p_hwfn->sp_dpc);
}
}
@@ -2926,7 +2819,7 @@ static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
return status;
}
-static int qed_recovery_process(struct qed_dev *cdev)
+int qed_recovery_process(struct qed_dev *cdev)
{
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
struct qed_ptt *p_ptt;
@@ -3114,6 +3007,9 @@ const struct qed_common_ops qed_common_ops_pass = {
.get_link = &qed_get_current_link,
.drain = &qed_drain,
.update_msglvl = &qed_init_dp,
+ .devlink_register = qed_devlink_register,
+ .devlink_unregister = qed_devlink_unregister,
+ .report_fatal_error = qed_report_fatal_error,
.dbg_all_data = &qed_dbg_all_data,
.dbg_all_data_size = &qed_dbg_all_data_size,
.chain_alloc = &qed_chain_alloc,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index a4bcde522cdf..d3136556a1e9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -1151,7 +1151,6 @@ qed_rdma_destroy_cq(void *rdma_cxt,
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", in_params->icid);
p_ramrod_res =
- (struct rdma_destroy_cq_output_params *)
dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(struct rdma_destroy_cq_output_params),
&ramrod_res_phys, GFP_KERNEL);
@@ -1463,14 +1462,14 @@ static int qed_rdma_modify_qp(void *rdma_cxt,
switch (qp->qp_type) {
case QED_RDMA_QP_TYPE_XRC_INI:
- qp->has_req = 1;
+ qp->has_req = true;
break;
case QED_RDMA_QP_TYPE_XRC_TGT:
- qp->has_resp = 1;
+ qp->has_resp = true;
break;
default:
- qp->has_req = 1;
- qp->has_resp = 1;
+ qp->has_req = true;
+ qp->has_resp = true;
}
if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 803c1fcca8ad..3efc5899f656 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -172,6 +172,7 @@ struct qede_dev {
struct qed_dev *cdev;
struct net_device *ndev;
struct pci_dev *pdev;
+ struct devlink *devlink;
u32 dp_module;
u8 dp_level;
@@ -263,6 +264,7 @@ struct qede_dev {
struct bpf_prog *xdp_prog;
+ enum qed_hw_err_type last_err_type;
unsigned long err_flags;
#define QEDE_ERR_IS_HANDLED 31
#define QEDE_ERR_ATTN_CLR_EN 0
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 9e1f41ba766c..05e3a3b60269 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -1170,10 +1170,23 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
rc = -ENOMEM;
goto err2;
}
+
+ edev->devlink = qed_ops->common->devlink_register(cdev);
+ if (IS_ERR(edev->devlink)) {
+ DP_NOTICE(edev, "Cannot register devlink\n");
+ edev->devlink = NULL;
+ /* Go on, we can live without devlink */
+ }
} else {
struct net_device *ndev = pci_get_drvdata(pdev);
edev = netdev_priv(ndev);
+
+ if (edev->devlink) {
+ struct qed_devlink *qdl = devlink_priv(edev->devlink);
+
+ qdl->cdev = cdev;
+ }
edev->cdev = cdev;
memset(&edev->stats, 0, sizeof(edev->stats));
memcpy(&edev->dev_info, &dev_info, sizeof(dev_info));
@@ -1225,7 +1238,10 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
err4:
qede_rdma_dev_remove(edev, (mode == QEDE_PROBE_RECOVERY));
err3:
- free_netdev(edev->ndev);
+ if (mode != QEDE_PROBE_RECOVERY)
+ free_netdev(edev->ndev);
+ else
+ edev->cdev = NULL;
err2:
qed_ops->common->slowpath_stop(cdev);
err1:
@@ -1296,6 +1312,11 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
qed_ops->common->slowpath_stop(cdev);
if (system_state == SYSTEM_POWER_OFF)
return;
+
+ if (mode != QEDE_REMOVE_RECOVERY && edev->devlink) {
+ qed_ops->common->devlink_unregister(edev->devlink);
+ edev->devlink = NULL;
+ }
qed_ops->common->remove(cdev);
edev->cdev = NULL;
@@ -2454,7 +2475,8 @@ static int qede_close(struct net_device *ndev)
qede_unload(edev, QEDE_UNLOAD_NORMAL, false);
- edev->ops->common->update_drv_state(edev->cdev, false);
+ if (edev->cdev)
+ edev->ops->common->update_drv_state(edev->cdev, false);
return 0;
}
@@ -2576,19 +2598,12 @@ static void qede_atomic_hw_err_handler(struct qede_dev *edev)
static void qede_generic_hw_err_handler(struct qede_dev *edev)
{
- struct qed_dev *cdev = edev->cdev;
-
DP_NOTICE(edev,
"Generic sleepable HW error handling started - err_flags 0x%lx\n",
edev->err_flags);
- /* Trigger a recovery process.
- * This is placed in the sleep requiring section just to make
- * sure it is the last one, and that all the other operations
- * were completed.
- */
- if (test_bit(QEDE_ERR_IS_RECOVERABLE, &edev->err_flags))
- edev->ops->common->recovery_process(cdev);
+ if (edev->devlink)
+ edev->ops->common->report_fatal_error(edev->devlink, edev->last_err_type);
clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags);
@@ -2642,6 +2657,7 @@ static void qede_schedule_hw_err_handler(void *dev,
return;
}
+ edev->last_err_type = err_type;
qede_set_hw_err_flags(edev, err_type);
qede_atomic_hw_err_handler(edev);
set_bit(QEDE_SP_HW_ERR, &edev->sp_flags);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 31ad3a5cd128..d8882d0b6b49 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -657,11 +657,10 @@ int qlcnic_83xx_cam_lock(struct qlcnic_adapter *adapter)
void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *adapter)
{
void __iomem *addr;
- u32 val;
struct qlcnic_hardware_context *ahw = adapter->ahw;
addr = ahw->pci_base0 + QLC_83XX_SEM_UNLOCK_FUNC(ahw->pci_func);
- val = readl(addr);
+ readl(addr);
}
void qlcnic_83xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
@@ -3812,7 +3811,6 @@ static int qlcnic_83xx_shutdown(struct pci_dev *pdev)
{
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
- int retval;
netif_device_detach(netdev);
qlcnic_cancel_idc_work(adapter);
@@ -3823,11 +3821,7 @@ static int qlcnic_83xx_shutdown(struct pci_dev *pdev)
qlcnic_83xx_disable_mbx_intr(adapter);
cancel_delayed_work_sync(&adapter->idc_aen_work);
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
-
- return 0;
+ return pci_save_state(pdev);
}
static int qlcnic_83xx_resume(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 1166b98d8bb2..8543bf3c3484 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -292,6 +292,7 @@ static void emac_tx_timeout(struct net_device *netdev, unsigned int txqueue)
/**
* emac_update_hw_stats - read the EMAC stat registers
+ * @adpt: pointer to adapter struct
*
* Reads the stats registers and write the values to adpt->stats.
*
diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c
index 375a844cd27c..362b4f5c162c 100644
--- a/drivers/net/ethernet/qualcomm/qca_uart.c
+++ b/drivers/net/ethernet/qualcomm/qca_uart.c
@@ -167,7 +167,7 @@ static void qca_tty_wakeup(struct serdev_device *serdev)
schedule_work(&qca->tx_work);
}
-static struct serdev_device_ops qca_serdev_ops = {
+static const struct serdev_device_ops qca_serdev_ops = {
.receive_buf = qca_tty_receive,
.write_wakeup = qca_tty_wakeup,
};
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index e291e6ac40cb..4e44313b7651 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -1239,7 +1239,7 @@ static void cp_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct cp_private *cp = netdev_priv(dev);
unsigned long flags;
- int rc, i;
+ int i;
netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
cpr8(Cmd), cpr16(CpCmd),
@@ -1260,7 +1260,7 @@ static void cp_tx_timeout(struct net_device *dev, unsigned int txqueue)
cp_stop_hw(cp);
cp_clean_rings(cp);
- rc = cp_init_rings(cp);
+ cp_init_rings(cp);
cp_start_hw(cp);
__cp_set_rx_mode(dev);
cpw16_f(IntrMask, cp_norx_intr_mask);
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 227139d42227..1e5a453dea14 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -978,7 +978,7 @@ static int rtl8139_init_one(struct pci_dev *pdev,
pdev->subsystem_vendor == PCI_VENDOR_ID_ATHEROS &&
pdev->subsystem_device == PCI_DEVICE_ID_REALTEK_8139) {
pr_info("OQO Model 2 detected. Forcing PIO\n");
- use_io = 1;
+ use_io = true;
}
dev = rtl8139_init_board (pdev);
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 11e6962a18e4..7d366b0362cb 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -617,7 +617,6 @@ struct rtl8169_private {
struct work_struct work;
} wk;
- unsigned irq_enabled:1;
unsigned supports_gmii:1;
unsigned aspm_manageable:1;
dma_addr_t counters_phys_addr;
@@ -701,6 +700,27 @@ static bool rtl_supports_eee(struct rtl8169_private *tp)
tp->mac_version != RTL_GIGA_MAC_VER_39;
}
+static void rtl_get_priv_stats(struct rtl8169_stats *stats,
+ u64 *pkts, u64 *bytes)
+{
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ *pkts = stats->packets;
+ *bytes = stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+}
+
+static void rtl_inc_priv_stats(struct rtl8169_stats *stats,
+ u64 pkts, u64 bytes)
+{
+ u64_stats_update_begin(&stats->syncp);
+ stats->packets += pkts;
+ stats->bytes += bytes;
+ u64_stats_update_end(&stats->syncp);
+}
+
static void rtl_read_mac_from_reg(struct rtl8169_private *tp, u8 *mac, int reg)
{
int i;
@@ -1280,12 +1300,10 @@ static void rtl_irq_disable(struct rtl8169_private *tp)
RTL_W32(tp, IntrMask_8125, 0);
else
RTL_W16(tp, IntrMask, 0);
- tp->irq_enabled = 0;
}
static void rtl_irq_enable(struct rtl8169_private *tp)
{
- tp->irq_enabled = 1;
if (rtl_is_8125(tp))
RTL_W32(tp, IntrMask_8125, tp->irq_mask);
else
@@ -4399,10 +4417,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
if (tp->dirty_tx != dirty_tx) {
netdev_completed_queue(dev, pkts_compl, bytes_compl);
- u64_stats_update_begin(&tp->tx_stats.syncp);
- tp->tx_stats.packets += pkts_compl;
- tp->tx_stats.bytes += bytes_compl;
- u64_stats_update_end(&tp->tx_stats.syncp);
+ rtl_inc_priv_stats(&tp->tx_stats, pkts_compl, bytes_compl);
tp->dirty_tx = dirty_tx;
/* Sync with rtl8169_start_xmit:
@@ -4524,11 +4539,7 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget
napi_gro_receive(&tp->napi, skb);
- u64_stats_update_begin(&tp->rx_stats.syncp);
- tp->rx_stats.packets++;
- tp->rx_stats.bytes += pkt_size;
- u64_stats_update_end(&tp->rx_stats.syncp);
-
+ rtl_inc_priv_stats(&tp->rx_stats, 1, pkt_size);
release_descriptor:
rtl8169_mark_to_asic(desc);
}
@@ -4544,8 +4555,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
struct rtl8169_private *tp = dev_instance;
u32 status = rtl_get_events(tp);
- if (!tp->irq_enabled || (status & 0xffff) == 0xffff ||
- !(status & tp->irq_mask))
+ if ((status & 0xffff) == 0xffff || !(status & tp->irq_mask))
return IRQ_NONE;
if (unlikely(status & SYSErr)) {
@@ -4599,10 +4609,8 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
rtl_tx(dev, tp, budget);
- if (work_done < budget) {
- napi_complete_done(napi, work_done);
+ if (work_done < budget && napi_complete_done(napi, work_done))
rtl_irq_enable(tp);
- }
return work_done;
}
@@ -4778,23 +4786,13 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
struct rtl8169_private *tp = netdev_priv(dev);
struct pci_dev *pdev = tp->pci_dev;
struct rtl8169_counters *counters = tp->counters;
- unsigned int start;
pm_runtime_get_noresume(&pdev->dev);
netdev_stats_to_stats64(stats, &dev->stats);
- do {
- start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp);
- stats->rx_packets = tp->rx_stats.packets;
- stats->rx_bytes = tp->rx_stats.bytes;
- } while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start));
-
- do {
- start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp);
- stats->tx_packets = tp->tx_stats.packets;
- stats->tx_bytes = tp->tx_stats.bytes;
- } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start));
+ rtl_get_priv_stats(&tp->rx_stats, &stats->rx_packets, &stats->rx_bytes);
+ rtl_get_priv_stats(&tp->tx_stats, &stats->tx_packets, &stats->tx_bytes);
/*
* Fetch additional counter values missing in stats collected by driver
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 9f88b5db4f89..7453b17a37a2 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -1036,7 +1036,10 @@ struct ravb_private {
unsigned no_avb_link:1;
unsigned avb_link_active_low:1;
unsigned wol_enabled:1;
- int num_tx_desc; /* TX descriptors per packet */
+ unsigned rxcidm:1; /* RX Clock Internal Delay Mode */
+ unsigned txcidm:1; /* TX Clock Internal Delay Mode */
+ unsigned rgmii_override:1; /* Deprecated rgmii-*id behavior */
+ int num_tx_desc; /* TX descriptors per packet */
};
static inline u32 ravb_read(struct net_device *ndev, enum ravb_reg reg)
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 99f7aae102ce..9c4df4ede011 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -162,7 +162,7 @@ static int ravb_get_mdio_data(struct mdiobb_ctrl *ctrl)
}
/* MDIO bus control struct */
-static struct mdiobb_ops bb_ops = {
+static const struct mdiobb_ops bb_ops = {
.owner = THIS_MODULE,
.set_mdc = ravb_set_mdc,
.set_mdio_dir = ravb_set_mdio_dir,
@@ -1034,11 +1034,8 @@ static int ravb_phy_init(struct net_device *ndev)
pn = of_node_get(np);
}
- iface = priv->phy_interface;
- if (priv->chip_id != RCAR_GEN2 && phy_interface_mode_is_rgmii(iface)) {
- /* ravb_set_delay_mode() takes care of internal delay mode */
- iface = PHY_INTERFACE_MODE_RGMII;
- }
+ iface = priv->rgmii_override ? PHY_INTERFACE_MODE_RGMII
+ : priv->phy_interface;
phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0, iface);
of_node_put(pn);
if (!phydev) {
@@ -1989,23 +1986,53 @@ static const struct soc_device_attribute ravb_delay_mode_quirk_match[] = {
};
/* Set tx and rx clock internal delay modes */
-static void ravb_set_delay_mode(struct net_device *ndev)
+static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
- int set = 0;
+ bool explicit_delay = false;
+ u32 delay;
+
+ if (!of_property_read_u32(np, "rx-internal-delay-ps", &delay)) {
+ /* Valid values are 0 and 1800, according to DT bindings */
+ priv->rxcidm = !!delay;
+ explicit_delay = true;
+ }
+ if (!of_property_read_u32(np, "tx-internal-delay-ps", &delay)) {
+ /* Valid values are 0 and 2000, according to DT bindings */
+ priv->txcidm = !!delay;
+ explicit_delay = true;
+ }
+ if (explicit_delay)
+ return;
+
+ /* Fall back to legacy rgmii-*id behavior */
if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
- priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID)
- set |= APSR_DM_RDM;
+ priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) {
+ priv->rxcidm = 1;
+ priv->rgmii_override = 1;
+ }
if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) {
if (!WARN(soc_device_match(ravb_delay_mode_quirk_match),
"phy-mode %s requires TX clock internal delay mode which is not supported by this hardware revision. Please update device tree",
- phy_modes(priv->phy_interface)))
- set |= APSR_DM_TDM;
+ phy_modes(priv->phy_interface))) {
+ priv->txcidm = 1;
+ priv->rgmii_override = 1;
+ }
}
+}
+
+static void ravb_set_delay_mode(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ u32 set = 0;
+ if (priv->rxcidm)
+ set |= APSR_DM_RDM;
+ if (priv->txcidm)
+ set |= APSR_DM_TDM;
ravb_modify(ndev, APSR, APSR_DM, set);
}
@@ -2138,8 +2165,10 @@ static int ravb_probe(struct platform_device *pdev)
/* Request GTI loading */
ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
- if (priv->chip_id != RCAR_GEN2)
+ if (priv->chip_id != RCAR_GEN2) {
+ ravb_parse_delay_mode(np, ndev);
ravb_set_delay_mode(ndev);
+ }
/* Allocate descriptor base address table */
priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index f45331ed90b0..c63304632935 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -45,6 +45,15 @@
#define SH_ETH_OFFSET_DEFAULTS \
[0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID
+/* use some intentionally tricky logic here to initialize the whole struct to
+ * 0xffff, but then override certain fields, requiring us to indicate that we
+ * "know" that there are overrides in this structure, and we'll need to disable
+ * that warning from W=1 builds. GCC has supported this option since 4.2.X, but
+ * the macros available to do this only define GCC 8.
+ */
+__diag_push();
+__diag_ignore(GCC, 8, "-Woverride-init",
+ "logic to initialize all and then override some is OK");
static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
SH_ETH_OFFSET_DEFAULTS,
@@ -332,6 +341,7 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
[TSU_ADRH0] = 0x0100,
};
+__diag_pop();
static void sh_eth_rcv_snd_disable(struct net_device *ndev);
static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
@@ -1202,7 +1212,7 @@ static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
}
/* mdio bus control struct */
-static struct mdiobb_ops bb_ops = {
+static const struct mdiobb_ops bb_ops = {
.owner = THIS_MODULE,
.set_mdc = sh_mdc_ctrl,
.set_mdio_dir = sh_mmd_ctrl,
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 9cc31f7e0df1..dd0bc7f0aaee 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -200,9 +200,9 @@ static int rocker_dma_test_offset(const struct rocker *rocker,
buf = alloc + offset;
expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
- dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(pdev, dma_handle)) {
+ dma_handle = dma_map_single(&pdev->dev, buf, ROCKER_TEST_DMA_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&pdev->dev, dma_handle)) {
err = -EIO;
goto free_alloc;
}
@@ -234,8 +234,8 @@ static int rocker_dma_test_offset(const struct rocker *rocker,
goto unmap;
unmap:
- pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
- PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_single(&pdev->dev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
free_alloc:
kfree(alloc);
@@ -441,9 +441,9 @@ static int rocker_dma_ring_create(const struct rocker *rocker,
if (!info->desc_info)
return -ENOMEM;
- info->desc = pci_alloc_consistent(rocker->pdev,
- info->size * sizeof(*info->desc),
- &info->mapaddr);
+ info->desc = dma_alloc_coherent(&rocker->pdev->dev,
+ info->size * sizeof(*info->desc),
+ &info->mapaddr, GFP_KERNEL);
if (!info->desc) {
kfree(info->desc_info);
return -ENOMEM;
@@ -465,9 +465,9 @@ static void rocker_dma_ring_destroy(const struct rocker *rocker,
{
rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
- pci_free_consistent(rocker->pdev,
- info->size * sizeof(struct rocker_desc),
- info->desc, info->mapaddr);
+ dma_free_coherent(&rocker->pdev->dev,
+ info->size * sizeof(struct rocker_desc), info->desc,
+ info->mapaddr);
kfree(info->desc_info);
}
@@ -506,8 +506,9 @@ static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker,
goto rollback;
}
- dma_handle = pci_map_single(pdev, buf, buf_size, direction);
- if (pci_dma_mapping_error(pdev, dma_handle)) {
+ dma_handle = dma_map_single(&pdev->dev, buf, buf_size,
+ direction);
+ if (dma_mapping_error(&pdev->dev, dma_handle)) {
kfree(buf);
err = -EIO;
goto rollback;
@@ -526,7 +527,8 @@ rollback:
for (i--; i >= 0; i--) {
const struct rocker_desc_info *desc_info = &info->desc_info[i];
- pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
+ dma_unmap_single(&pdev->dev,
+ dma_unmap_addr(desc_info, mapaddr),
desc_info->data_size, direction);
kfree(desc_info->data);
}
@@ -546,7 +548,8 @@ static void rocker_dma_ring_bufs_free(const struct rocker *rocker,
desc->buf_addr = 0;
desc->buf_size = 0;
- pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
+ dma_unmap_single(&pdev->dev,
+ dma_unmap_addr(desc_info, mapaddr),
desc_info->data_size, direction);
kfree(desc_info->data);
}
@@ -615,7 +618,7 @@ static int rocker_dma_rings_init(struct rocker *rocker)
spin_lock_init(&rocker->cmd_ring_lock);
err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
- PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
+ DMA_BIDIRECTIONAL, PAGE_SIZE);
if (err) {
dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
goto err_dma_cmd_ring_bufs_alloc;
@@ -636,7 +639,7 @@ static int rocker_dma_rings_init(struct rocker *rocker)
}
err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
- PCI_DMA_FROMDEVICE, PAGE_SIZE);
+ DMA_FROM_DEVICE, PAGE_SIZE);
if (err) {
dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
goto err_dma_event_ring_bufs_alloc;
@@ -650,7 +653,7 @@ err_dma_event_ring_create:
rocker_dma_cmd_ring_waits_free(rocker);
err_dma_cmd_ring_waits_alloc:
rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
- PCI_DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
err_dma_cmd_ring_bufs_alloc:
rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
return err;
@@ -659,11 +662,11 @@ err_dma_cmd_ring_bufs_alloc:
static void rocker_dma_rings_fini(struct rocker *rocker)
{
rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
- PCI_DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
rocker_dma_ring_destroy(rocker, &rocker->event_ring);
rocker_dma_cmd_ring_waits_free(rocker);
rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
- PCI_DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
}
@@ -675,9 +678,9 @@ static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
struct pci_dev *pdev = rocker->pdev;
dma_addr_t dma_handle;
- dma_handle = pci_map_single(pdev, skb->data, buf_len,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, dma_handle))
+ dma_handle = dma_map_single(&pdev->dev, skb->data, buf_len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, dma_handle))
return -EIO;
if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
goto tlv_put_failure;
@@ -686,7 +689,7 @@ static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
return 0;
tlv_put_failure:
- pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pdev->dev, dma_handle, buf_len, DMA_FROM_DEVICE);
desc_info->tlv_size = 0;
return -EMSGSIZE;
}
@@ -734,7 +737,7 @@ static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker,
return;
dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
- pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pdev->dev, dma_handle, len, DMA_FROM_DEVICE);
}
static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker,
@@ -796,7 +799,7 @@ static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
}
err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
- PCI_DMA_TODEVICE,
+ DMA_TO_DEVICE,
ROCKER_DMA_TX_DESC_SIZE);
if (err) {
netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
@@ -813,7 +816,7 @@ static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
}
err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
- PCI_DMA_BIDIRECTIONAL,
+ DMA_BIDIRECTIONAL,
ROCKER_DMA_RX_DESC_SIZE);
if (err) {
netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
@@ -831,12 +834,12 @@ static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
err_dma_rx_ring_skbs_alloc:
rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
- PCI_DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
err_dma_rx_ring_bufs_alloc:
rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
err_dma_rx_ring_create:
rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
err_dma_tx_ring_bufs_alloc:
rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
return err;
@@ -848,10 +851,10 @@ static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
rocker_dma_rx_ring_skbs_free(rocker_port);
rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
- PCI_DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
}
@@ -1858,7 +1861,7 @@ static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port,
continue;
dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
- pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
+ dma_unmap_single(&pdev->dev, dma_handle, len, DMA_TO_DEVICE);
}
}
@@ -1871,8 +1874,8 @@ static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
dma_addr_t dma_handle;
struct rocker_tlv *frag;
- dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
- if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
+ dma_handle = dma_map_single(&pdev->dev, buf, buf_len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&pdev->dev, dma_handle))) {
if (net_ratelimit())
netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
return -EIO;
@@ -1892,7 +1895,7 @@ static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
nest_cancel:
rocker_tlv_nest_cancel(desc_info, frag);
unmap_frag:
- pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
+ dma_unmap_single(&pdev->dev, dma_handle, buf_len, DMA_TO_DEVICE);
return -EMSGSIZE;
}
@@ -2905,17 +2908,17 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_pci_request_regions;
}
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (!err) {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
- dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
+ dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
goto err_pci_set_dma_mask;
}
} else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
+ dev_err(&pdev->dev, "dma_set_mask failed\n");
goto err_pci_set_dma_mask;
}
}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 2cc8184b7e6b..971f1e54b652 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -97,7 +97,7 @@ void sxgbe_disable_eee_mode(struct sxgbe_priv_data * const priv)
/**
* sxgbe_eee_ctrl_timer
- * @arg : data hook
+ * @t: timer list containing a data
* Description:
* If there is no data transfer and if we are not in LPI state,
* then MAC Transmitter can be moved to LPI state.
@@ -255,7 +255,7 @@ static void sxgbe_adjust_link(struct net_device *dev)
/**
* sxgbe_init_phy - PHY initialization
- * @dev: net device structure
+ * @ndev: net device structure
* Description: it initializes the driver's PHY state, and attaches the PHY
* to the mac driver.
* Return value:
@@ -364,8 +364,11 @@ static int sxgbe_init_rx_buffers(struct net_device *dev,
/**
* sxgbe_free_rx_buffers - free what sxgbe_init_rx_buffers() allocated
* @dev: net device structure
+ * @p: dec pointer
+ * @i: index
+ * @dma_buf_sz: size
* @rx_ring: ring to be freed
- * @rx_rsize: ring size
+ *
* Description: this function initializes the DMA RX descriptor
*/
static void sxgbe_free_rx_buffers(struct net_device *dev,
@@ -383,6 +386,7 @@ static void sxgbe_free_rx_buffers(struct net_device *dev,
/**
* init_tx_ring - init the TX descriptor ring
* @dev: net device structure
+ * @queue_no: queue
* @tx_ring: ring to be initialised
* @tx_rsize: ring size
* Description: this function initializes the DMA TX descriptor
@@ -449,6 +453,7 @@ static void free_rx_ring(struct device *dev, struct sxgbe_rx_queue *rx_ring,
/**
* init_rx_ring - init the RX descriptor ring
* @dev: net device structure
+ * @queue_no: queue
* @rx_ring: ring to be initialised
* @rx_rsize: ring size
* Description: this function initializes the DMA RX descriptor
@@ -548,7 +553,7 @@ static void free_tx_ring(struct device *dev, struct sxgbe_tx_queue *tx_ring,
/**
* init_dma_desc_rings - init the RX/TX descriptor rings
- * @dev: net device structure
+ * @netd: net device structure
* Description: this function initializes the DMA RX/TX descriptors
* and allocates the socket buffers. It suppors the chained and ring
* modes.
@@ -724,7 +729,7 @@ static void sxgbe_mtl_operation_mode(struct sxgbe_priv_data *priv)
/**
* sxgbe_tx_queue_clean:
- * @priv: driver private structure
+ * @tqueue: queue pointer
* Description: it reclaims resources after transmission completes.
*/
static void sxgbe_tx_queue_clean(struct sxgbe_tx_queue *tqueue)
@@ -807,6 +812,7 @@ static void sxgbe_tx_all_clean(struct sxgbe_priv_data * const priv)
/**
* sxgbe_restart_tx_queue: irq tx error mng function
* @priv: driver private structure
+ * @queue_num: queue number
* Description: it cleans the descriptors and restarts the transmission
* in case of errors.
*/
@@ -1567,6 +1573,7 @@ static int sxgbe_poll(struct napi_struct *napi, int budget)
/**
* sxgbe_tx_timeout
* @dev : Pointer to net device structure
+ * @txqueue: index of the hanging queue
* Description: this function is called when a packet transmission fails to
* complete within a reasonable time. The driver will mark the error in the
* netdev structure and arrange for the device to be reset to a sane state
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 4b0b2cf026a5..da6886dcac37 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -601,10 +601,14 @@ static int efx_ef10_probe(struct efx_nic *efx)
efx_ef10_read_licensed_features(efx);
/* We can have one VI for each vi_stride-byte region.
- * However, until we use TX option descriptors we need two TX queues
- * per channel.
+ * However, until we use TX option descriptors we need up to four
+ * TX queues per channel for different checksumming combinations.
*/
- efx->tx_queues_per_channel = 2;
+ if (nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
+ efx->tx_queues_per_channel = 4;
+ else
+ efx->tx_queues_per_channel = 2;
efx->max_vis = efx_ef10_mem_map_size(efx) / efx->vi_stride;
if (!efx->max_vis) {
netif_err(efx, drv, efx->net_dev, "error determining max VIs\n");
@@ -1300,6 +1304,7 @@ static void efx_ef10_fini_nic(struct efx_nic *efx)
static int efx_ef10_init_nic(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ netdev_features_t hw_enc_features = 0;
int rc;
if (nic_data->must_check_datapath_caps) {
@@ -1344,6 +1349,21 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
nic_data->must_restore_piobufs = false;
}
+ /* add encapsulated checksum offload features */
+ if (efx_has_cap(efx, VXLAN_NVGRE) && !efx_ef10_is_vf(efx))
+ hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ /* add encapsulated TSO features */
+ if (efx_has_cap(efx, TX_TSO_V2_ENCAP)) {
+ netdev_features_t encap_tso_features;
+
+ encap_tso_features = NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM;
+
+ hw_enc_features |= encap_tso_features | NETIF_F_TSO;
+ efx->net_dev->features |= encap_tso_features;
+ }
+ efx->net_dev->hw_enc_features = hw_enc_features;
+
/* don't fail init if RSS setup doesn't work */
rc = efx->type->rx_push_rss_config(efx, false,
efx->rss_context.rx_indir_table, NULL);
@@ -1851,18 +1871,9 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
spin_unlock_bh(&efx->stats_lock);
- if (in_interrupt()) {
- /* If in atomic context, cannot update stats. Just update the
- * software stats and return so the caller can continue.
- */
- spin_lock_bh(&efx->stats_lock);
- efx_update_sw_stats(efx, stats);
- return 0;
- }
-
efx_ef10_get_stat_mask(efx, mask);
- rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_ATOMIC);
+ rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_KERNEL);
if (rc) {
spin_lock_bh(&efx->stats_lock);
return rc;
@@ -1918,6 +1929,18 @@ static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats,
return efx_ef10_update_stats_common(efx, full_stats, core_stats);
}
+static size_t efx_ef10_update_stats_atomic_vf(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ /* In atomic context, cannot update HW stats. Just update the
+ * software stats and return so the caller can continue.
+ */
+ efx_update_sw_stats(efx, nic_data->stats);
+ return efx_ef10_update_stats_common(efx, full_stats, core_stats);
+}
+
static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
{
struct efx_nic *efx = channel->efx;
@@ -2146,6 +2169,9 @@ static int efx_ef10_irq_test_generate(struct efx_nic *efx)
static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue)
{
+ /* low two bits of label are what we want for type */
+ BUILD_BUG_ON((EFX_TXQ_TYPE_OUTER_CSUM | EFX_TXQ_TYPE_INNER_CSUM) != 3);
+ tx_queue->type = tx_queue->label & 3;
return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
(tx_queue->ptr_mask + 1) *
sizeof(efx_qword_t),
@@ -2168,15 +2194,15 @@ static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue,
/* Add Firmware-Assisted TSO v2 option descriptors to a queue.
*/
-static int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue,
- struct sk_buff *skb,
- bool *data_mapped)
+int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
+ bool *data_mapped)
{
struct efx_tx_buffer *buffer;
+ u16 inner_ipv4_id = 0;
+ u16 outer_ipv4_id = 0;
struct tcphdr *tcp;
struct iphdr *ip;
-
- u16 ipv4_id;
+ u16 ip_tot_len;
u32 seqnum;
u32 mss;
@@ -2189,21 +2215,43 @@ static int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue,
return -EINVAL;
}
- ip = ip_hdr(skb);
+ if (skb->encapsulation) {
+ if (!tx_queue->tso_encap)
+ return -EINVAL;
+ ip = ip_hdr(skb);
+ if (ip->version == 4)
+ outer_ipv4_id = ntohs(ip->id);
+
+ ip = inner_ip_hdr(skb);
+ tcp = inner_tcp_hdr(skb);
+ } else {
+ ip = ip_hdr(skb);
+ tcp = tcp_hdr(skb);
+ }
+
+ /* 8000-series EF10 hardware requires that IP Total Length be
+ * greater than or equal to the value it will have in each segment
+ * (which is at most mss + 208 + TCP header length), but also less
+ * than (0x10000 - inner_network_header). Otherwise the TCP
+ * checksum calculation will be broken for encapsulated packets.
+ * We fill in ip->tot_len with 0xff30, which should satisfy the
+ * first requirement unless the MSS is ridiculously large (which
+ * should be impossible as the driver max MTU is 9216); it is
+ * guaranteed to satisfy the second as we only attempt TSO if
+ * inner_network_header <= 208.
+ */
+ ip_tot_len = -EFX_TSO2_MAX_HDRLEN;
+ EFX_WARN_ON_ONCE_PARANOID(mss + EFX_TSO2_MAX_HDRLEN +
+ (tcp->doff << 2u) > ip_tot_len);
+
if (ip->version == 4) {
- /* Modify IPv4 header if needed. */
- ip->tot_len = 0;
+ ip->tot_len = htons(ip_tot_len);
ip->check = 0;
- ipv4_id = ntohs(ip->id);
+ inner_ipv4_id = ntohs(ip->id);
} else {
- /* Modify IPv6 header if needed. */
- struct ipv6hdr *ipv6 = ipv6_hdr(skb);
-
- ipv6->payload_len = 0;
- ipv4_id = 0;
+ ((struct ipv6hdr *)ip)->payload_len = htons(ip_tot_len);
}
- tcp = tcp_hdr(skb);
seqnum = ntohl(tcp->seq);
buffer = efx_tx_queue_get_insert_buffer(tx_queue);
@@ -2216,7 +2264,7 @@ static int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue,
ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO,
ESF_DZ_TX_TSO_OPTION_TYPE,
ESE_DZ_TX_TSO_OPTION_DESC_FATSO2A,
- ESF_DZ_TX_TSO_IP_ID, ipv4_id,
+ ESF_DZ_TX_TSO_IP_ID, inner_ipv4_id,
ESF_DZ_TX_TSO_TCP_SEQNO, seqnum
);
++tx_queue->insert_count;
@@ -2226,11 +2274,12 @@ static int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue,
buffer->flags = EFX_TX_BUF_OPTION;
buffer->len = 0;
buffer->unmap_len = 0;
- EFX_POPULATE_QWORD_4(buffer->option,
+ EFX_POPULATE_QWORD_5(buffer->option,
ESF_DZ_TX_DESC_IS_OPT, 1,
ESF_DZ_TX_OPTION_TYPE, ESE_DZ_TX_OPTION_DESC_TSO,
ESF_DZ_TX_TSO_OPTION_TYPE,
ESE_DZ_TX_TSO_OPTION_DESC_FATSO2B,
+ ESF_DZ_TX_TSO_OUTER_IPID, outer_ipv4_id,
ESF_DZ_TX_TSO_TCP_MSS, mss
);
++tx_queue->insert_count;
@@ -2254,11 +2303,11 @@ static u32 efx_ef10_tso_versions(struct efx_nic *efx)
static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
{
- bool csum_offload = tx_queue->label & EFX_TXQ_TYPE_OFFLOAD;
+ bool csum_offload = tx_queue->type & EFX_TXQ_TYPE_OUTER_CSUM;
+ bool inner_csum = tx_queue->type & EFX_TXQ_TYPE_INNER_CSUM;
struct efx_channel *channel = tx_queue->channel;
struct efx_nic *efx = tx_queue->efx;
struct efx_ef10_nic_data *nic_data;
- bool tso_v2 = false;
efx_qword_t *txd;
int rc;
@@ -2281,15 +2330,18 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
* TSOv2 cannot be used with Hardware timestamping, and is never needed
* for XDP tx.
*/
- if (csum_offload && (nic_data->datapath_caps2 &
- (1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN)) &&
- !tx_queue->timestamping && !tx_queue->xdp_tx) {
- tso_v2 = true;
- netif_dbg(efx, hw, efx->net_dev, "Using TSOv2 for channel %u\n",
- channel->channel);
+ if (efx_has_cap(efx, TX_TSO_V2)) {
+ if ((csum_offload || inner_csum) &&
+ !tx_queue->timestamping && !tx_queue->xdp_tx) {
+ tx_queue->tso_version = 2;
+ netif_dbg(efx, hw, efx->net_dev, "Using TSOv2 for channel %u\n",
+ channel->channel);
+ }
+ } else if (efx_has_cap(efx, TX_TSO)) {
+ tx_queue->tso_version = 1;
}
- rc = efx_mcdi_tx_init(tx_queue, tso_v2);
+ rc = efx_mcdi_tx_init(tx_queue);
if (rc)
goto fail;
@@ -2302,22 +2354,19 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION;
tx_queue->insert_count = 1;
txd = efx_tx_desc(tx_queue, 0);
- EFX_POPULATE_QWORD_5(*txd,
+ EFX_POPULATE_QWORD_7(*txd,
ESF_DZ_TX_DESC_IS_OPT, true,
ESF_DZ_TX_OPTION_TYPE,
ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload,
- ESF_DZ_TX_OPTION_IP_CSUM, csum_offload,
+ ESF_DZ_TX_OPTION_IP_CSUM, csum_offload && tx_queue->tso_version != 2,
+ ESF_DZ_TX_OPTION_INNER_UDP_TCP_CSUM, inner_csum,
+ ESF_DZ_TX_OPTION_INNER_IP_CSUM, inner_csum && tx_queue->tso_version != 2,
ESF_DZ_TX_TIMESTAMP, tx_queue->timestamping);
tx_queue->write_count = 1;
- if (tso_v2) {
- tx_queue->handle_tso = efx_ef10_tx_tso_desc;
- tx_queue->tso_version = 2;
- } else if (nic_data->datapath_caps &
- (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN)) {
- tx_queue->tso_version = 1;
- }
+ if (tx_queue->tso_version == 2 && efx_has_cap(efx, TX_TSO_V2_ENCAP))
+ tx_queue->tso_encap = true;
wmb();
efx_ef10_push_tx_desc(tx_queue, txd);
@@ -2367,7 +2416,7 @@ static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
unsigned int write_ptr;
efx_qword_t *txd;
- tx_queue->xmit_more_available = false;
+ tx_queue->xmit_pending = false;
if (unlikely(tx_queue->write_count == tx_queue->insert_count))
return;
@@ -2880,7 +2929,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
/* Get the transmit queue */
tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
tx_queue = efx_channel_get_tx_queue(channel,
- tx_ev_q_label % EFX_TXQ_TYPES);
+ tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
if (!tx_queue->timestamping) {
/* Transmit completion */
@@ -3952,10 +4001,10 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.finish_flr = efx_port_dummy_op_void,
.describe_stats = efx_ef10_describe_stats,
.update_stats = efx_ef10_update_stats_vf,
+ .update_stats_atomic = efx_ef10_update_stats_atomic_vf,
.start_stats = efx_port_dummy_op_void,
.pull_stats = efx_port_dummy_op_void,
.stop_stats = efx_port_dummy_op_void,
- .set_id_led = efx_mcdi_set_id_led,
.push_irq_moderation = efx_ef10_push_irq_moderation,
.reconfigure_mac = efx_ef10_mac_reconfigure,
.check_mac_fault = efx_mcdi_mac_check_fault,
@@ -4066,7 +4115,6 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.start_stats = efx_mcdi_mac_start_stats,
.pull_stats = efx_mcdi_mac_pull_stats,
.stop_stats = efx_mcdi_mac_stop_stats,
- .set_id_led = efx_mcdi_set_id_led,
.push_irq_moderation = efx_ef10_push_irq_moderation,
.reconfigure_mac = efx_ef10_mac_reconfigure,
.check_mac_fault = efx_mcdi_mac_check_fault,
diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.c b/drivers/net/ethernet/sfc/ef100_ethtool.c
index 729c425d0f78..835c838b7dfa 100644
--- a/drivers/net/ethernet/sfc/ef100_ethtool.c
+++ b/drivers/net/ethernet/sfc/ef100_ethtool.c
@@ -17,8 +17,49 @@
#include "ef100_ethtool.h"
#include "mcdi_functions.h"
+/* This is the maximum number of descriptor rings supported by the QDMA */
+#define EFX_EF100_MAX_DMAQ_SIZE 16384UL
+
+static void ef100_ethtool_get_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *ring)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ ring->rx_max_pending = EFX_EF100_MAX_DMAQ_SIZE;
+ ring->tx_max_pending = EFX_EF100_MAX_DMAQ_SIZE;
+ ring->rx_pending = efx->rxq_entries;
+ ring->tx_pending = efx->txq_entries;
+}
+
/* Ethtool options available
*/
const struct ethtool_ops ef100_ethtool_ops = {
.get_drvinfo = efx_ethtool_get_drvinfo,
+ .get_msglevel = efx_ethtool_get_msglevel,
+ .set_msglevel = efx_ethtool_set_msglevel,
+ .get_pauseparam = efx_ethtool_get_pauseparam,
+ .set_pauseparam = efx_ethtool_set_pauseparam,
+ .get_sset_count = efx_ethtool_get_sset_count,
+ .self_test = efx_ethtool_self_test,
+ .get_strings = efx_ethtool_get_strings,
+ .get_link_ksettings = efx_ethtool_get_link_ksettings,
+ .set_link_ksettings = efx_ethtool_set_link_ksettings,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = ef100_ethtool_get_ringparam,
+ .get_fecparam = efx_ethtool_get_fecparam,
+ .set_fecparam = efx_ethtool_set_fecparam,
+ .get_ethtool_stats = efx_ethtool_get_stats,
+ .get_rxnfc = efx_ethtool_get_rxnfc,
+ .set_rxnfc = efx_ethtool_set_rxnfc,
+ .reset = efx_ethtool_reset,
+
+ .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
+ .get_rxfh_key_size = efx_ethtool_get_rxfh_key_size,
+ .get_rxfh = efx_ethtool_get_rxfh,
+ .set_rxfh = efx_ethtool_set_rxfh,
+ .get_rxfh_context = efx_ethtool_get_rxfh_context,
+ .set_rxfh_context = efx_ethtool_set_rxfh_context,
+
+ .get_module_info = efx_ethtool_get_module_info,
+ .get_module_eeprom = efx_ethtool_get_module_eeprom,
};
diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c
index 63c311ba28b9..67fe44db6b61 100644
--- a/drivers/net/ethernet/sfc/ef100_netdev.c
+++ b/drivers/net/ethernet/sfc/ef100_netdev.c
@@ -217,9 +217,13 @@ static const struct net_device_ops ef100_netdev_ops = {
.ndo_open = ef100_net_open,
.ndo_stop = ef100_net_stop,
.ndo_start_xmit = ef100_hard_start_xmit,
+ .ndo_tx_timeout = efx_watchdog,
.ndo_get_stats64 = efx_net_stats,
+ .ndo_change_mtu = efx_change_mtu,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = efx_set_mac_address,
.ndo_set_rx_mode = efx_set_rx_mode, /* Lookout */
+ .ndo_set_features = efx_set_features,
.ndo_get_phys_port_id = efx_get_phys_port_id,
.ndo_get_phys_port_name = efx_get_phys_port_name,
#ifdef CONFIG_RFS_ACCEL
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
index 19fe86b3b316..3148fe770356 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.c
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -428,24 +428,12 @@ static int ef100_reset(struct efx_nic *efx, enum reset_type reset_type)
__clear_bit(reset_type, &efx->reset_pending);
rc = dev_open(efx->net_dev, NULL);
} else if (reset_type == RESET_TYPE_ALL) {
- /* A RESET_TYPE_ALL will cause filters to be removed, so we remove filters
- * and reprobe after reset to avoid removing filters twice
- */
- down_write(&efx->filter_sem);
- ef100_filter_table_down(efx);
- up_write(&efx->filter_sem);
rc = efx_mcdi_reset(efx, reset_type);
if (rc)
return rc;
netif_device_attach(efx->net_dev);
- down_write(&efx->filter_sem);
- rc = ef100_filter_table_up(efx);
- up_write(&efx->filter_sem);
- if (rc)
- return rc;
-
rc = dev_open(efx->net_dev, NULL);
} else {
rc = 1; /* Leave the device closed */
@@ -696,7 +684,7 @@ static unsigned int ef100_check_caps(const struct efx_nic *efx,
/* NIC level access functions
*/
#define EF100_OFFLOAD_FEATURES (NETIF_F_HW_CSUM | NETIF_F_RXCSUM | \
- NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_FRAGLIST | \
+ NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_NTUPLE | \
NETIF_F_RXHASH | NETIF_F_RXFCS | NETIF_F_TSO_ECN | NETIF_F_RXALL | \
NETIF_F_TSO_MANGLEID | NETIF_F_HW_VLAN_CTAG_TX)
@@ -769,6 +757,7 @@ const struct efx_nic_type ef100_pf_nic_type = {
.rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
.reconfigure_mac = ef100_reconfigure_mac,
+ .reconfigure_port = efx_mcdi_port_reconfigure,
.test_nvram = efx_new_mcdi_nvram_test_all,
.describe_stats = ef100_describe_stats,
.start_stats = efx_mcdi_mac_start_stats,
@@ -1172,6 +1161,10 @@ static int ef100_probe_main(struct efx_nic *efx)
rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
if (rc)
goto fail;
+ /* Enable event logging */
+ rc = efx_mcdi_log_ctrl(efx, true, false, 0);
+ if (rc)
+ goto fail;
rc = efx_get_pf_index(efx, &nic_data->pf_index);
if (rc)
@@ -1207,10 +1200,6 @@ static int ef100_probe_main(struct efx_nic *efx)
if (rc)
goto fail;
- rc = efx_init_channels(efx);
- if (rc)
- goto fail;
-
down_write(&efx->filter_sem);
rc = ef100_filter_table_probe(efx);
up_write(&efx->filter_sem);
diff --git a/drivers/net/ethernet/sfc/ef100_tx.c b/drivers/net/ethernet/sfc/ef100_tx.c
index a09546e43408..a90e5a9d2a37 100644
--- a/drivers/net/ethernet/sfc/ef100_tx.c
+++ b/drivers/net/ethernet/sfc/ef100_tx.c
@@ -27,7 +27,6 @@ int ef100_tx_probe(struct efx_tx_queue *tx_queue)
(tx_queue->ptr_mask + 2) *
sizeof(efx_oword_t),
GFP_KERNEL);
- return 0;
}
void ef100_tx_init(struct efx_tx_queue *tx_queue)
@@ -38,7 +37,14 @@ void ef100_tx_init(struct efx_tx_queue *tx_queue)
tx_queue->channel->channel -
tx_queue->efx->tx_channel_offset);
- if (efx_mcdi_tx_init(tx_queue, false))
+ /* This value is purely documentational; as EF100 never passes through
+ * the switch statement in tx.c:__efx_enqueue_skb(), that switch does
+ * not handle case 3. EF100's TSOv3 descriptors are generated by
+ * ef100_make_tso_desc().
+ * Meanwhile, all efx_mcdi_tx_init() cares about is that it's not 2.
+ */
+ tx_queue->tso_version = 3;
+ if (efx_mcdi_tx_init(tx_queue))
netdev_WARN(tx_queue->efx->net_dev,
"failed to initialise TXQ %d\n", tx_queue->queue);
}
@@ -117,11 +123,13 @@ static efx_oword_t *ef100_tx_desc(struct efx_tx_queue *tx_queue, unsigned int in
return NULL;
}
-void ef100_notify_tx_desc(struct efx_tx_queue *tx_queue)
+static void ef100_notify_tx_desc(struct efx_tx_queue *tx_queue)
{
unsigned int write_ptr;
efx_dword_t reg;
+ tx_queue->xmit_pending = false;
+
if (unlikely(tx_queue->notify_count == tx_queue->write_count))
return;
@@ -131,7 +139,6 @@ void ef100_notify_tx_desc(struct efx_tx_queue *tx_queue)
efx_writed_page(tx_queue->efx, &reg,
ER_GZ_TX_RING_DOORBELL, tx_queue->queue);
tx_queue->notify_count = tx_queue->write_count;
- tx_queue->xmit_more_available = false;
}
static void ef100_tx_push_buffers(struct efx_tx_queue *tx_queue)
@@ -359,28 +366,31 @@ int ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
goto err;
ef100_tx_make_descriptors(tx_queue, skb, segments);
- fill_level = efx_channel_tx_fill_level(tx_queue->channel);
+ fill_level = efx_channel_tx_old_fill_level(tx_queue->channel);
if (fill_level > efx->txq_stop_thresh) {
+ struct efx_tx_queue *txq2;
+
netif_tx_stop_queue(tx_queue->core_txq);
/* Re-read after a memory barrier in case we've raced with
* the completion path. Otherwise there's a danger we'll never
* restart the queue if all completions have just happened.
*/
smp_mb();
- fill_level = efx_channel_tx_fill_level(tx_queue->channel);
+ efx_for_each_channel_tx_queue(txq2, tx_queue->channel)
+ txq2->old_read_count = READ_ONCE(txq2->read_count);
+ fill_level = efx_channel_tx_old_fill_level(tx_queue->channel);
if (fill_level < efx->txq_stop_thresh)
netif_tx_start_queue(tx_queue->core_txq);
}
- if (__netdev_tx_sent_queue(tx_queue->core_txq, skb->len, xmit_more))
- tx_queue->xmit_more_available = false; /* push doorbell */
- else if (tx_queue->write_count - tx_queue->notify_count > 255)
- /* Ensure we never push more than 256 packets at once */
- tx_queue->xmit_more_available = false; /* push */
- else
- tx_queue->xmit_more_available = true; /* don't push yet */
+ tx_queue->xmit_pending = true;
- if (!tx_queue->xmit_more_available)
+ /* If xmit_more then we don't need to push the doorbell, unless there
+ * are 256 descriptors already queued in which case we have to push to
+ * ensure we never push more than 256 at once.
+ */
+ if (__netdev_tx_sent_queue(tx_queue->core_txq, skb->len, xmit_more) ||
+ tx_queue->write_count - tx_queue->notify_count > 255)
ef100_tx_push_buffers(tx_queue);
if (segments) {
@@ -399,10 +409,10 @@ err:
/* If we're not expecting another transmit and we had something to push
* on this queue then we need to push here to get the previous packets
- * out. We only enter this branch from before the 'Update BQL' section
- * above, so xmit_more_available still refers to the old state.
+ * out. We only enter this branch from before the xmit_more handling
+ * above, so xmit_pending still refers to the old state.
*/
- if (tx_queue->xmit_more_available && !xmit_more)
+ if (tx_queue->xmit_pending && !xmit_more)
ef100_tx_push_buffers(tx_queue);
return rc;
}
diff --git a/drivers/net/ethernet/sfc/ef100_tx.h b/drivers/net/ethernet/sfc/ef100_tx.h
index fa23e430bdd7..ddc4b98fa6db 100644
--- a/drivers/net/ethernet/sfc/ef100_tx.h
+++ b/drivers/net/ethernet/sfc/ef100_tx.h
@@ -17,7 +17,6 @@
int ef100_tx_probe(struct efx_tx_queue *tx_queue);
void ef100_tx_init(struct efx_tx_queue *tx_queue);
void ef100_tx_write(struct efx_tx_queue *tx_queue);
-void ef100_notify_tx_desc(struct efx_tx_queue *tx_queue);
unsigned int ef100_tx_max_skb_descs(struct efx_nic *efx);
void ef100_ev_tx(struct efx_channel *channel, const efx_qword_t *p_event);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index e06fa89f2d72..718308076341 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -33,7 +33,7 @@
#include "selftest.h"
#include "sriov.h"
-#include "mcdi.h"
+#include "mcdi_port_common.h"
#include "mcdi_pcol.h"
#include "workarounds.h"
@@ -149,23 +149,17 @@ static int efx_init_port(struct efx_nic *efx)
mutex_lock(&efx->mac_lock);
- rc = efx->phy_op->init(efx);
- if (rc)
- goto fail1;
-
efx->port_initialized = true;
/* Ensure the PHY advertises the correct flow control settings */
- rc = efx->phy_op->reconfigure(efx);
+ rc = efx_mcdi_port_reconfigure(efx);
if (rc && rc != -EPERM)
- goto fail2;
+ goto fail;
mutex_unlock(&efx->mac_lock);
return 0;
-fail2:
- efx->phy_op->fini(efx);
-fail1:
+fail:
mutex_unlock(&efx->mac_lock);
return rc;
}
@@ -177,7 +171,6 @@ static void efx_fini_port(struct efx_nic *efx)
if (!efx->port_initialized)
return;
- efx->phy_op->fini(efx);
efx->port_initialized = false;
efx->link_state.up = false;
@@ -603,6 +596,7 @@ static const struct net_device_ops efx_netdev_ops = {
.ndo_set_mac_address = efx_set_mac_address,
.ndo_set_rx_mode = efx_set_rx_mode,
.ndo_set_features = efx_set_features,
+ .ndo_features_check = efx_features_check,
.ndo_vlan_rx_add_vid = efx_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = efx_vlan_rx_kill_vid,
#ifdef CONFIG_SFC_SRIOV
@@ -1229,7 +1223,7 @@ static int efx_pm_thaw(struct device *dev)
goto fail;
mutex_lock(&efx->mac_lock);
- efx->phy_op->reconfigure(efx);
+ efx_mcdi_port_reconfigure(efx);
mutex_unlock(&efx->mac_lock);
efx_start_all(efx);
@@ -1336,7 +1330,7 @@ static int __init efx_init_module(void)
{
int rc;
- printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
+ printk(KERN_INFO "Solarflare NET driver\n");
rc = register_netdevice_notifier(&efx_netdev_notifier);
if (rc)
@@ -1398,4 +1392,3 @@ MODULE_AUTHOR("Solarflare Communications and "
MODULE_DESCRIPTION("Solarflare network driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, efx_pci_table);
-MODULE_VERSION(EFX_DRIVER_VERSION);
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index dd4f30ea48a8..a4a626e9cd9a 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -151,7 +151,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
*/
n_xdp_tx = num_possible_cpus();
- n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, EFX_TXQ_TYPES);
+ n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, EFX_MAX_TXQ_PER_CHANNEL);
vec_count = pci_msix_vec_count(efx->pci_dev);
if (vec_count < 0)
@@ -179,7 +179,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
efx->xdp_tx_queue_count = 0;
} else {
efx->n_xdp_channels = n_xdp_ev;
- efx->xdp_tx_per_channel = EFX_TXQ_TYPES;
+ efx->xdp_tx_per_channel = EFX_MAX_TXQ_PER_CHANNEL;
efx->xdp_tx_queue_count = n_xdp_tx;
n_channels += n_xdp_ev;
netif_dbg(efx, drv, efx->net_dev,
@@ -505,8 +505,7 @@ static void efx_filter_rfs_expire(struct work_struct *data)
#endif
/* Allocate and initialise a channel structure. */
-struct efx_channel *
-efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
+static struct efx_channel *efx_alloc_channel(struct efx_nic *efx, int i)
{
struct efx_rx_queue *rx_queue;
struct efx_tx_queue *tx_queue;
@@ -521,7 +520,7 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
channel->channel = i;
channel->type = &efx_default_channel_type;
- for (j = 0; j < EFX_TXQ_TYPES; j++) {
+ for (j = 0; j < EFX_MAX_TXQ_PER_CHANNEL; j++) {
tx_queue = &channel->tx_queue[j];
tx_queue->efx = efx;
tx_queue->queue = -1;
@@ -545,7 +544,7 @@ int efx_init_channels(struct efx_nic *efx)
unsigned int i;
for (i = 0; i < EFX_MAX_CHANNELS; i++) {
- efx->channel[i] = efx_alloc_channel(efx, i, NULL);
+ efx->channel[i] = efx_alloc_channel(efx, i);
if (!efx->channel[i])
return -ENOMEM;
efx->msi_context[i].efx = efx;
@@ -595,7 +594,7 @@ struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel)
channel->napi_str.state = 0;
memset(&channel->eventq, 0, sizeof(channel->eventq));
- for (j = 0; j < EFX_TXQ_TYPES; j++) {
+ for (j = 0; j < EFX_MAX_TXQ_PER_CHANNEL; j++) {
tx_queue = &channel->tx_queue[j];
if (tx_queue->channel)
tx_queue->channel = channel;
@@ -895,7 +894,7 @@ int efx_set_channels(struct efx_nic *efx)
xdp_queue_number, tx_queue->queue);
/* We may have a few left-over XDP TX
* queues owing to xdp_tx_queue_count
- * not dividing evenly by EFX_TXQ_TYPES.
+ * not dividing evenly by EFX_MAX_TXQ_PER_CHANNEL.
* We still allocate and probe those
* TXQs, but never use them.
*/
diff --git a/drivers/net/ethernet/sfc/efx_channels.h b/drivers/net/ethernet/sfc/efx_channels.h
index 2d71dc9a33dd..d77ec1f77fb1 100644
--- a/drivers/net/ethernet/sfc/efx_channels.h
+++ b/drivers/net/ethernet/sfc/efx_channels.h
@@ -31,8 +31,6 @@ void efx_stop_eventq(struct efx_channel *channel);
void efx_fini_eventq(struct efx_channel *channel);
void efx_remove_eventq(struct efx_channel *channel);
-struct efx_channel *
-efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel);
int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
void efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len);
void efx_set_channel_names(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index dfc6032e75f4..72a3f0e09f52 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -11,6 +11,7 @@
#include "net_driver.h"
#include <linux/module.h>
#include <linux/netdevice.h>
+#include <net/gre.h>
#include "efx_common.h"
#include "efx_channels.h"
#include "efx.h"
@@ -19,6 +20,7 @@
#include "rx_common.h"
#include "tx_common.h"
#include "nic.h"
+#include "mcdi_port_common.h"
#include "io.h"
#include "mcdi_pcol.h"
@@ -544,7 +546,7 @@ void efx_start_all(struct efx_nic *efx)
* to poll now because we could have missed a change
*/
mutex_lock(&efx->mac_lock);
- if (efx->phy_op->poll(efx))
+ if (efx_mcdi_phy_poll(efx))
efx_link_status_changed(efx);
mutex_unlock(&efx->mac_lock);
@@ -600,7 +602,7 @@ void efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats)
struct efx_nic *efx = netdev_priv(net_dev);
spin_lock_bh(&efx->stats_lock);
- efx->type->update_stats(efx, NULL, stats);
+ efx_nic_update_stats_atomic(efx, NULL, stats);
spin_unlock_bh(&efx->stats_lock);
}
@@ -714,9 +716,6 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
mutex_lock(&efx->mac_lock);
down_write(&efx->filter_sem);
mutex_lock(&efx->rss_lock);
- if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
- method != RESET_TYPE_DATAPATH)
- efx->phy_op->fini(efx);
efx->type->fini(efx);
}
@@ -759,10 +758,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
method != RESET_TYPE_DATAPATH) {
- rc = efx->phy_op->init(efx);
- if (rc)
- goto fail;
- rc = efx->phy_op->reconfigure(efx);
+ rc = efx_mcdi_port_reconfigure(efx);
if (rc && rc != -EPERM)
netif_err(efx, drv, efx->net_dev,
"could not restore PHY settings\n");
@@ -959,7 +955,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
/**************************************************************************
*
- * Dummy PHY/MAC operations
+ * Dummy NIC operations
*
* Can be used for some unimplemented operations
* Needed so all function pointers are valid and do not have to be tested
@@ -972,18 +968,6 @@ int efx_port_dummy_op_int(struct efx_nic *efx)
}
void efx_port_dummy_op_void(struct efx_nic *efx) {}
-static bool efx_port_dummy_op_poll(struct efx_nic *efx)
-{
- return false;
-}
-
-static const struct efx_phy_operations efx_dummy_phy_operations = {
- .init = efx_port_dummy_op_int,
- .reconfigure = efx_port_dummy_op_int,
- .poll = efx_port_dummy_op_poll,
- .fini = efx_port_dummy_op_void,
-};
-
/**************************************************************************
*
* Data housekeeping
@@ -1037,7 +1021,6 @@ int efx_init_struct(struct efx_nic *efx,
efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE,
sizeof(*efx->rps_hash_table), GFP_KERNEL);
#endif
- efx->phy_op = &efx_dummy_phy_operations;
efx->mdio.dev = net_dev;
INIT_WORK(&efx->mac_work, efx_mac_work);
init_waitqueue_head(&efx->flush_wq);
@@ -1104,17 +1087,7 @@ int efx_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
pci_set_master(pci_dev);
- /* Set the PCI DMA mask. Try all possibilities from our
- * genuine mask down to 32 bits, because some architectures
- * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
- * masks event though they reject 46 bit masks.
- */
- while (dma_mask > 0x7fffffffUL) {
- rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
- if (rc == 0)
- break;
- dma_mask >>= 1;
- }
+ rc = dma_set_mask_and_coherent(&pci_dev->dev, dma_mask);
if (rc) {
netif_err(efx, probe, efx->net_dev,
"could not find a suitable DMA mask\n");
@@ -1315,6 +1288,89 @@ const struct pci_error_handlers efx_err_handlers = {
.resume = efx_io_resume,
};
+/* Determine whether the NIC will be able to handle TX offloads for a given
+ * encapsulated packet.
+ */
+static bool efx_can_encap_offloads(struct efx_nic *efx, struct sk_buff *skb)
+{
+ struct gre_base_hdr *greh;
+ __be16 dst_port;
+ u8 ipproto;
+
+ /* Does the NIC support encap offloads?
+ * If not, we should never get here, because we shouldn't have
+ * advertised encap offload feature flags in the first place.
+ */
+ if (WARN_ON_ONCE(!efx->type->udp_tnl_has_port))
+ return false;
+
+ /* Determine encapsulation protocol in use */
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ ipproto = ip_hdr(skb)->protocol;
+ break;
+ case htons(ETH_P_IPV6):
+ /* If there are extension headers, this will cause us to
+ * think we can't offload something that we maybe could have.
+ */
+ ipproto = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ /* Not IP, so can't offload it */
+ return false;
+ }
+ switch (ipproto) {
+ case IPPROTO_GRE:
+ /* We support NVGRE but not IP over GRE or random gretaps.
+ * Specifically, the NIC will accept GRE as encapsulated if
+ * the inner protocol is Ethernet, but only handle it
+ * correctly if the GRE header is 8 bytes long. Moreover,
+ * it will not update the Checksum or Sequence Number fields
+ * if they are present. (The Routing Present flag,
+ * GRE_ROUTING, cannot be set else the header would be more
+ * than 8 bytes long; so we don't have to worry about it.)
+ */
+ if (skb->inner_protocol_type != ENCAP_TYPE_ETHER)
+ return false;
+ if (ntohs(skb->inner_protocol) != ETH_P_TEB)
+ return false;
+ if (skb_inner_mac_header(skb) - skb_transport_header(skb) != 8)
+ return false;
+ greh = (struct gre_base_hdr *)skb_transport_header(skb);
+ return !(greh->flags & (GRE_CSUM | GRE_SEQ));
+ case IPPROTO_UDP:
+ /* If the port is registered for a UDP tunnel, we assume the
+ * packet is for that tunnel, and the NIC will handle it as
+ * such. If not, the NIC won't know what to do with it.
+ */
+ dst_port = udp_hdr(skb)->dest;
+ return efx->type->udp_tnl_has_port(efx, dst_port);
+ default:
+ return false;
+ }
+}
+
+netdev_features_t efx_features_check(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features)
+{
+ struct efx_nic *efx = netdev_priv(dev);
+
+ if (skb->encapsulation) {
+ if (features & NETIF_F_GSO_MASK)
+ /* Hardware can only do TSO with at most 208 bytes
+ * of headers.
+ */
+ if (skb_inner_transport_offset(skb) >
+ EFX_TSO2_MAX_HDRLEN)
+ features &= ~(NETIF_F_GSO_MASK);
+ if (features & (NETIF_F_GSO_MASK | NETIF_F_CSUM_MASK))
+ if (!efx_can_encap_offloads(efx, skb))
+ features &= ~(NETIF_F_GSO_MASK |
+ NETIF_F_CSUM_MASK);
+ }
+ return features;
+}
+
int efx_get_phys_port_id(struct net_device *net_dev,
struct netdev_phys_item_id *ppid)
{
diff --git a/drivers/net/ethernet/sfc/efx_common.h b/drivers/net/ethernet/sfc/efx_common.h
index 4056f68f04e5..65513fd0cf6c 100644
--- a/drivers/net/ethernet/sfc/efx_common.h
+++ b/drivers/net/ethernet/sfc/efx_common.h
@@ -105,6 +105,9 @@ int efx_change_mtu(struct net_device *net_dev, int new_mtu);
extern const struct pci_error_handlers efx_err_handlers;
+netdev_features_t efx_features_check(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features);
+
int efx_get_phys_port_id(struct net_device *net_dev,
struct netdev_phys_item_id *ppid);
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 4ffda7782f68..12a91c559aa2 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -50,8 +50,7 @@ static int efx_ethtool_phys_id(struct net_device *net_dev,
return 1; /* cycle on/off once per second */
}
- efx->type->set_id_led(efx, mode);
- return 0;
+ return efx_mcdi_set_id_led(efx, mode);
}
static int efx_ethtool_get_regs_len(struct net_device *net_dev)
diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c
index 05ac87807929..bf1443539a1a 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/ethtool_common.c
@@ -15,6 +15,7 @@
#include "selftest.h"
#include "rx_common.h"
#include "ethtool_common.h"
+#include "mcdi_port_common.h"
struct efx_sw_stat_desc {
const char *name;
@@ -105,7 +106,6 @@ void efx_ethtool_get_drvinfo(struct net_device *net_dev,
struct efx_nic *efx = netdev_priv(net_dev);
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
efx_mcdi_print_fwver(efx, info->fw_version,
sizeof(info->fw_version));
strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
@@ -221,7 +221,7 @@ int efx_ethtool_set_pauseparam(struct net_device *net_dev,
efx_link_set_wanted_fc(efx, wanted_fc);
if (efx->link_advertising[0] != old_adv ||
(efx->wanted_fc ^ old_fc) & EFX_FC_AUTO) {
- rc = efx->phy_op->reconfigure(efx);
+ rc = efx_mcdi_port_reconfigure(efx);
if (rc) {
netif_err(efx, drv, efx->net_dev,
"Unable to advertise requested flow "
@@ -372,20 +372,15 @@ int efx_ethtool_fill_self_tests(struct efx_nic *efx,
efx_fill_test(n++, strings, data, &tests->registers,
"core", 0, "registers", NULL);
- if (efx->phy_op->run_tests != NULL) {
- EFX_WARN_ON_PARANOID(efx->phy_op->test_name == NULL);
+ for (i = 0; true; ++i) {
+ const char *name;
- for (i = 0; true; ++i) {
- const char *name;
-
- EFX_WARN_ON_PARANOID(i >= EFX_MAX_PHY_TESTS);
- name = efx->phy_op->test_name(efx, i);
- if (name == NULL)
- break;
+ EFX_WARN_ON_PARANOID(i >= EFX_MAX_PHY_TESTS);
+ name = efx_mcdi_phy_test_name(efx, i);
+ if (name == NULL)
+ break;
- efx_fill_test(n++, strings, data, &tests->phy_ext[i],
- "phy", 0, name, NULL);
- }
+ efx_fill_test(n++, strings, data, &tests->phy_ext[i], "phy", 0, name, NULL);
}
/* Loopback tests */
@@ -412,7 +407,7 @@ static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
snprintf(strings, ETH_GSTRING_LEN,
"tx-%u.tx_packets",
channel->tx_queue[0].queue /
- EFX_TXQ_TYPES);
+ EFX_MAX_TXQ_PER_CHANNEL);
strings += ETH_GSTRING_LEN;
}
@@ -571,7 +566,7 @@ int efx_ethtool_get_link_ksettings(struct net_device *net_dev,
u32 supported;
mutex_lock(&efx->mac_lock);
- efx->phy_op->get_link_ksettings(efx, cmd);
+ efx_mcdi_phy_get_link_ksettings(efx, cmd);
mutex_unlock(&efx->mac_lock);
/* Both MACs support pause frames (bidirectional and respond-only) */
@@ -607,7 +602,7 @@ int efx_ethtool_set_link_ksettings(struct net_device *net_dev,
}
mutex_lock(&efx->mac_lock);
- rc = efx->phy_op->set_link_ksettings(efx, cmd);
+ rc = efx_mcdi_phy_set_link_ksettings(efx, cmd);
mutex_unlock(&efx->mac_lock);
return rc;
}
@@ -618,10 +613,8 @@ int efx_ethtool_get_fecparam(struct net_device *net_dev,
struct efx_nic *efx = netdev_priv(net_dev);
int rc;
- if (!efx->phy_op || !efx->phy_op->get_fecparam)
- return -EOPNOTSUPP;
mutex_lock(&efx->mac_lock);
- rc = efx->phy_op->get_fecparam(efx, fecparam);
+ rc = efx_mcdi_phy_get_fecparam(efx, fecparam);
mutex_unlock(&efx->mac_lock);
return rc;
@@ -633,10 +626,8 @@ int efx_ethtool_set_fecparam(struct net_device *net_dev,
struct efx_nic *efx = netdev_priv(net_dev);
int rc;
- if (!efx->phy_op || !efx->phy_op->get_fecparam)
- return -EOPNOTSUPP;
mutex_lock(&efx->mac_lock);
- rc = efx->phy_op->set_fecparam(efx, fecparam);
+ rc = efx_mcdi_phy_set_fecparam(efx, fecparam);
mutex_unlock(&efx->mac_lock);
return rc;
@@ -1332,11 +1323,8 @@ int efx_ethtool_get_module_eeprom(struct net_device *net_dev,
struct efx_nic *efx = netdev_priv(net_dev);
int ret;
- if (!efx->phy_op || !efx->phy_op->get_module_eeprom)
- return -EOPNOTSUPP;
-
mutex_lock(&efx->mac_lock);
- ret = efx->phy_op->get_module_eeprom(efx, ee, data);
+ ret = efx_mcdi_phy_get_module_eeprom(efx, ee, data);
mutex_unlock(&efx->mac_lock);
return ret;
@@ -1348,11 +1336,8 @@ int efx_ethtool_get_module_info(struct net_device *net_dev,
struct efx_nic *efx = netdev_priv(net_dev);
int ret;
- if (!efx->phy_op || !efx->phy_op->get_module_info)
- return -EOPNOTSUPP;
-
mutex_lock(&efx->mac_lock);
- ret = efx->phy_op->get_module_info(efx, modinfo);
+ ret = efx_mcdi_phy_get_module_info(efx, modinfo);
mutex_unlock(&efx->mac_lock);
return ret;
diff --git a/drivers/net/ethernet/sfc/falcon/farch.c b/drivers/net/ethernet/sfc/falcon/farch.c
index fa1ade856b10..2c91792cec01 100644
--- a/drivers/net/ethernet/sfc/falcon/farch.c
+++ b/drivers/net/ethernet/sfc/falcon/farch.c
@@ -870,17 +870,12 @@ static u16 ef4_farch_handle_rx_not_ok(struct ef4_rx_queue *rx_queue,
{
struct ef4_channel *channel = ef4_rx_queue_channel(rx_queue);
struct ef4_nic *efx = rx_queue->efx;
- bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
+ bool __maybe_unused rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
- bool rx_ev_other_err, rx_ev_pause_frm;
- bool rx_ev_hdr_type, rx_ev_mcast_pkt;
- unsigned rx_ev_pkt_type;
+ bool rx_ev_pause_frm;
- rx_ev_hdr_type = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
- rx_ev_mcast_pkt = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
rx_ev_tobe_disc = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
- rx_ev_pkt_type = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
rx_ev_buf_owner_id_err = EF4_QWORD_FIELD(*event,
FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
rx_ev_ip_hdr_chksum_err = EF4_QWORD_FIELD(*event,
@@ -893,10 +888,6 @@ static u16 ef4_farch_handle_rx_not_ok(struct ef4_rx_queue *rx_queue,
0 : EF4_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
rx_ev_pause_frm = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
- /* Every error apart from tobe_disc and pause_frm */
- rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
- rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
- rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
/* Count errors that are not in MAC stats. Ignore expected
* checksum errors during self-test. */
@@ -916,6 +907,13 @@ static u16 ef4_farch_handle_rx_not_ok(struct ef4_rx_queue *rx_queue,
* to a FIFO overflow.
*/
#ifdef DEBUG
+ {
+ /* Every error apart from tobe_disc and pause_frm */
+
+ bool rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
+ rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
+ rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
+
if (rx_ev_other_err && net_ratelimit()) {
netif_dbg(efx, rx_err, efx->net_dev,
" RX queue %d unexpected RX event "
@@ -932,6 +930,7 @@ static u16 ef4_farch_handle_rx_not_ok(struct ef4_rx_queue *rx_queue,
rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
rx_ev_pause_frm ? " [PAUSE]" : "");
}
+ }
#endif
/* The frame must be discarded if any of these are true. */
@@ -1643,15 +1642,11 @@ void ef4_farch_rx_push_indir_table(struct ef4_nic *efx)
*/
void ef4_farch_dimension_resources(struct ef4_nic *efx, unsigned sram_lim_qw)
{
- unsigned vi_count, buftbl_min;
+ unsigned vi_count;
/* Account for the buffer table entries backing the datapath channels
* and the descriptor caches for those channels.
*/
- buftbl_min = ((efx->n_rx_channels * EF4_MAX_DMAQ_SIZE +
- efx->n_tx_channels * EF4_TXQ_TYPES * EF4_MAX_DMAQ_SIZE +
- efx->n_channels * EF4_MAX_EVQ_SIZE)
- * sizeof(ef4_qword_t) / EF4_BUF_SIZE);
vi_count = max(efx->n_channels, efx->n_tx_channels * EF4_TXQ_TYPES);
efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
@@ -2532,7 +2527,6 @@ int ef4_farch_filter_remove_safe(struct ef4_nic *efx,
enum ef4_farch_filter_table_id table_id;
struct ef4_farch_filter_table *table;
unsigned int filter_idx;
- struct ef4_farch_filter_spec *spec;
int rc;
table_id = ef4_farch_filter_id_table_id(filter_id);
@@ -2543,7 +2537,6 @@ int ef4_farch_filter_remove_safe(struct ef4_nic *efx,
filter_idx = ef4_farch_filter_id_index(filter_id);
if (filter_idx >= table->size)
return -ENOENT;
- spec = &table->spec[filter_idx];
spin_lock_bh(&efx->filter_lock);
rc = ef4_farch_filter_remove(efx, table, filter_idx, priority);
diff --git a/drivers/net/ethernet/sfc/falcon/rx.c b/drivers/net/ethernet/sfc/falcon/rx.c
index 05ea3523890a..966f13e7475d 100644
--- a/drivers/net/ethernet/sfc/falcon/rx.c
+++ b/drivers/net/ethernet/sfc/falcon/rx.c
@@ -140,6 +140,7 @@ static struct page *ef4_reuse_page(struct ef4_rx_queue *rx_queue)
* ef4_init_rx_buffers - create EF4_RX_BATCH page-based RX buffers
*
* @rx_queue: Efx RX queue
+ * @atomic: control memory allocation flags
*
* This allocates a batch of pages, maps them for DMA, and populates
* struct ef4_rx_buffers for each one. Return a negative error code or
@@ -316,6 +317,7 @@ static void ef4_discard_rx_packet(struct ef4_channel *channel,
* This will aim to fill the RX descriptor queue up to
* @rx_queue->@max_fill. If there is insufficient atomic
* memory to do so, a slow fill will be scheduled.
+ * @atomic: control memory allocation flags
*
* The caller must provide serialisation (none is used here). In practise,
* this means this function must run from the NAPI handler, or be called
diff --git a/drivers/net/ethernet/sfc/falcon/selftest.c b/drivers/net/ethernet/sfc/falcon/selftest.c
index 147677c7c72f..6a454ac6f876 100644
--- a/drivers/net/ethernet/sfc/falcon/selftest.c
+++ b/drivers/net/ethernet/sfc/falcon/selftest.c
@@ -65,7 +65,7 @@ static const char *const ef4_interrupt_mode_names[] = {
STRING_TABLE_LOOKUP(efx->interrupt_mode, ef4_interrupt_mode)
/**
- * ef4_loopback_state - persistent state during a loopback selftest
+ * struct ef4_loopback_state - persistent state during a loopback selftest
* @flush: Drop all packets in ef4_loopback_rx_packet
* @packet_count: Number of packets being used in this test
* @skbs: An array of skbs transmitted
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 4002f9a3ae90..d75cf5ff5686 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -320,7 +320,7 @@ void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
unsigned write_ptr;
unsigned old_write_count = tx_queue->write_count;
- tx_queue->xmit_more_available = false;
+ tx_queue->xmit_pending = false;
if (unlikely(tx_queue->write_count == tx_queue->insert_count))
return;
@@ -372,6 +372,8 @@ int efx_farch_tx_probe(struct efx_tx_queue *tx_queue)
struct efx_nic *efx = tx_queue->efx;
unsigned entries;
+ tx_queue->type = ((tx_queue->label & 1) ? EFX_TXQ_TYPE_OUTER_CSUM : 0) |
+ ((tx_queue->label & 2) ? EFX_TXQ_TYPE_HIGHPRI : 0);
entries = tx_queue->ptr_mask + 1;
return efx_alloc_special_buffer(efx, &tx_queue->txd,
entries * sizeof(efx_qword_t));
@@ -379,7 +381,7 @@ int efx_farch_tx_probe(struct efx_tx_queue *tx_queue)
void efx_farch_tx_init(struct efx_tx_queue *tx_queue)
{
- int csum = tx_queue->label & EFX_TXQ_TYPE_OFFLOAD;
+ int csum = tx_queue->type & EFX_TXQ_TYPE_OUTER_CSUM;
struct efx_nic *efx = tx_queue->efx;
efx_oword_t reg;
@@ -409,10 +411,12 @@ void efx_farch_tx_init(struct efx_tx_queue *tx_queue)
EFX_POPULATE_OWORD_1(reg,
FRF_BZ_TX_PACE,
- (tx_queue->label & EFX_TXQ_TYPE_HIGHPRI) ?
+ (tx_queue->type & EFX_TXQ_TYPE_HIGHPRI) ?
FFE_BZ_TX_PACE_OFF :
FFE_BZ_TX_PACE_RESERVED);
efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL, tx_queue->queue);
+
+ tx_queue->tso_version = 1;
}
static void efx_farch_flush_tx_queue(struct efx_tx_queue *tx_queue)
@@ -832,13 +836,13 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
tx_queue = efx_channel_get_tx_queue(
- channel, tx_ev_q_label % EFX_TXQ_TYPES);
+ channel, tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
efx_xmit_done(tx_queue, tx_ev_desc_ptr);
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
/* Rewrite the FIFO write pointer */
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
tx_queue = efx_channel_get_tx_queue(
- channel, tx_ev_q_label % EFX_TXQ_TYPES);
+ channel, tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
netif_tx_lock(efx->net_dev);
efx_farch_notify_tx_desc(tx_queue);
@@ -863,13 +867,8 @@ static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
bool rx_ev_frm_trunc, rx_ev_tobe_disc;
bool rx_ev_other_err, rx_ev_pause_frm;
- bool rx_ev_hdr_type, rx_ev_mcast_pkt;
- unsigned rx_ev_pkt_type;
- rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
- rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
- rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
@@ -918,6 +917,8 @@ static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
rx_ev_pause_frm ? " [PAUSE]" : "");
}
+#else
+ (void) rx_ev_other_err;
#endif
if (efx->net_dev->features & NETIF_F_RXALL)
@@ -1083,9 +1084,9 @@ efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
int qid;
qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
- if (qid < EFX_TXQ_TYPES * (efx->n_tx_channels + efx->n_extra_tx_channels)) {
- tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
- qid % EFX_TXQ_TYPES);
+ if (qid < EFX_MAX_TXQ_PER_CHANNEL * (efx->n_tx_channels + efx->n_extra_tx_channels)) {
+ tx_queue = efx_get_tx_queue(efx, qid / EFX_MAX_TXQ_PER_CHANNEL,
+ qid % EFX_MAX_TXQ_PER_CHANNEL);
if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
efx_farch_magic_event(tx_queue->channel,
EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
@@ -1678,10 +1679,10 @@ void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
* and the descriptor caches for those channels.
*/
buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
- total_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
+ total_tx_channels * EFX_MAX_TXQ_PER_CHANNEL * EFX_MAX_DMAQ_SIZE +
efx->n_channels * EFX_MAX_EVQ_SIZE)
* sizeof(efx_qword_t) / EFX_BUF_SIZE);
- vi_count = max(efx->n_channels, total_tx_channels * EFX_TXQ_TYPES);
+ vi_count = max(efx->n_channels, total_tx_channels * EFX_MAX_TXQ_PER_CHANNEL);
#ifdef CONFIG_SFC_SRIOV
if (efx->type->sriov_wanted) {
@@ -2592,7 +2593,6 @@ int efx_farch_filter_remove_safe(struct efx_nic *efx,
enum efx_farch_filter_table_id table_id;
struct efx_farch_filter_table *table;
unsigned int filter_idx;
- struct efx_farch_filter_spec *spec;
int rc;
table_id = efx_farch_filter_id_table_id(filter_id);
@@ -2604,7 +2604,6 @@ int efx_farch_filter_remove_safe(struct efx_nic *efx,
if (filter_idx >= table->size)
return -ENOENT;
down_write(&state->lock);
- spec = &table->spec[filter_idx];
rc = efx_farch_filter_remove(efx, table, filter_idx, priority);
up_write(&state->lock);
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 5467819aef6e..be6bfd6b7ec7 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1868,10 +1868,9 @@ int efx_mcdi_handle_assertion(struct efx_nic *efx)
return efx_mcdi_exit_assertion(efx);
}
-void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
+int efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN);
- int rc;
BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF);
BUILD_BUG_ON(EFX_LED_ON != MC_CMD_LED_ON);
@@ -1881,8 +1880,7 @@ void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
MCDI_SET_DWORD(inbuf, SET_ID_LED_IN_STATE, mode);
- rc = efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
+ return efx_mcdi_rpc(efx, MC_CMD_SET_ID_LED, inbuf, sizeof(inbuf), NULL, 0, NULL);
}
static int efx_mcdi_reset_func(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 658cf345420d..69c2924a147c 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -190,6 +190,7 @@ void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
* 32-bit-aligned. Also, on Siena we must copy to the MC shared
* memory strictly 32 bits at a time, so add any necessary padding.
*/
+#define MCDI_TX_BUF_LEN(_len) DIV_ROUND_UP((_len), 4)
#define _MCDI_DECLARE_BUF(_name, _len) \
efx_dword_t _name[DIV_ROUND_UP(_len, 4)]
#define MCDI_DECLARE_BUF(_name, _len) \
@@ -348,14 +349,13 @@ int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
int efx_new_mcdi_nvram_test_all(struct efx_nic *efx);
int efx_mcdi_nvram_test_all(struct efx_nic *efx);
int efx_mcdi_handle_assertion(struct efx_nic *efx);
-void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
+int efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac,
int *id_out);
int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
int efx_mcdi_flush_rxqs(struct efx_nic *efx);
-int efx_mcdi_port_reconfigure(struct efx_nic *efx);
void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
void efx_mcdi_mac_start_stats(struct efx_nic *efx);
void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/mcdi_functions.c b/drivers/net/ethernet/sfc/mcdi_functions.c
index d8a3af86ef78..d3e6d8239f5c 100644
--- a/drivers/net/ethernet/sfc/mcdi_functions.c
+++ b/drivers/net/ethernet/sfc/mcdi_functions.c
@@ -160,11 +160,12 @@ fail:
outbuf, outlen, rc);
}
-int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2)
+int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
EFX_BUF_SIZE));
- bool csum_offload = tx_queue->label & EFX_TXQ_TYPE_OFFLOAD;
+ bool csum_offload = tx_queue->type & EFX_TXQ_TYPE_OUTER_CSUM;
+ bool inner_csum = tx_queue->type & EFX_TXQ_TYPE_INNER_CSUM;
size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
struct efx_channel *channel = tx_queue->channel;
struct efx_nic *efx = tx_queue->efx;
@@ -194,22 +195,31 @@ int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2)
inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
do {
- MCDI_POPULATE_DWORD_4(inbuf, INIT_TXQ_IN_FLAGS,
+ bool tso_v2 = tx_queue->tso_version == 2;
+
+ /* TSOv2 implies IP header checksum offload for TSO frames,
+ * so we can safely disable IP header checksum offload for
+ * everything else. If we don't have TSOv2, then we have to
+ * enable IP header checksum offload, which is strictly
+ * incorrect but better than breaking TSO.
+ */
+ MCDI_POPULATE_DWORD_6(inbuf, INIT_TXQ_IN_FLAGS,
/* This flag was removed from mcdi_pcol.h for
* the non-_EXT version of INIT_TXQ. However,
* firmware still honours it.
*/
INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, tso_v2,
- INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
+ INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !(csum_offload && tso_v2),
INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload,
- INIT_TXQ_EXT_IN_FLAG_TIMESTAMP,
- tx_queue->timestamping);
+ INIT_TXQ_EXT_IN_FLAG_TIMESTAMP, tx_queue->timestamping,
+ INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN, inner_csum && !tso_v2,
+ INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN, inner_csum);
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
NULL, 0, NULL);
if (rc == -ENOSPC && tso_v2) {
/* Retry without TSOv2 if we're short on contexts. */
- tso_v2 = false;
+ tx_queue->tso_version = 0;
netif_warn(efx, probe, efx->net_dev,
"TSOv2 context not available to segment in "
"hardware. TCP performance may be reduced.\n"
diff --git a/drivers/net/ethernet/sfc/mcdi_functions.h b/drivers/net/ethernet/sfc/mcdi_functions.h
index 687be8b00cd8..b0e2f53a0d9b 100644
--- a/drivers/net/ethernet/sfc/mcdi_functions.h
+++ b/drivers/net/ethernet/sfc/mcdi_functions.h
@@ -19,7 +19,7 @@ int efx_mcdi_ev_probe(struct efx_channel *channel);
int efx_mcdi_ev_init(struct efx_channel *channel, bool v1_cut_thru, bool v2);
void efx_mcdi_ev_remove(struct efx_channel *channel);
void efx_mcdi_ev_fini(struct efx_channel *channel);
-int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue, bool tso_v2);
+int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue);
void efx_mcdi_tx_remove(struct efx_tx_queue *tx_queue);
void efx_mcdi_tx_fini(struct efx_tx_queue *tx_queue);
int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue);
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 98eeb404f68d..94c6a345c0b1 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -70,592 +70,6 @@ static int efx_mcdi_mdio_write(struct net_device *net_dev,
return 0;
}
-static int efx_mcdi_phy_probe(struct efx_nic *efx)
-{
- struct efx_mcdi_phy_data *phy_data;
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
- u32 caps;
- int rc;
-
- /* Initialise and populate phy_data */
- phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
- if (phy_data == NULL)
- return -ENOMEM;
-
- rc = efx_mcdi_get_phy_cfg(efx, phy_data);
- if (rc != 0)
- goto fail;
-
- /* Read initial link advertisement */
- BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
- outbuf, sizeof(outbuf), NULL);
- if (rc)
- goto fail;
-
- /* Fill out nic state */
- efx->phy_data = phy_data;
- efx->phy_type = phy_data->type;
-
- efx->mdio_bus = phy_data->channel;
- efx->mdio.prtad = phy_data->port;
- efx->mdio.mmds = phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22);
- efx->mdio.mode_support = 0;
- if (phy_data->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22))
- efx->mdio.mode_support |= MDIO_SUPPORTS_C22;
- if (phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22))
- efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
-
- caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
- if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
- mcdi_to_ethtool_linkset(phy_data->media, caps,
- efx->link_advertising);
- else
- phy_data->forced_cap = caps;
-
- /* Assert that we can map efx -> mcdi loopback modes */
- BUILD_BUG_ON(LOOPBACK_NONE != MC_CMD_LOOPBACK_NONE);
- BUILD_BUG_ON(LOOPBACK_DATA != MC_CMD_LOOPBACK_DATA);
- BUILD_BUG_ON(LOOPBACK_GMAC != MC_CMD_LOOPBACK_GMAC);
- BUILD_BUG_ON(LOOPBACK_XGMII != MC_CMD_LOOPBACK_XGMII);
- BUILD_BUG_ON(LOOPBACK_XGXS != MC_CMD_LOOPBACK_XGXS);
- BUILD_BUG_ON(LOOPBACK_XAUI != MC_CMD_LOOPBACK_XAUI);
- BUILD_BUG_ON(LOOPBACK_GMII != MC_CMD_LOOPBACK_GMII);
- BUILD_BUG_ON(LOOPBACK_SGMII != MC_CMD_LOOPBACK_SGMII);
- BUILD_BUG_ON(LOOPBACK_XGBR != MC_CMD_LOOPBACK_XGBR);
- BUILD_BUG_ON(LOOPBACK_XFI != MC_CMD_LOOPBACK_XFI);
- BUILD_BUG_ON(LOOPBACK_XAUI_FAR != MC_CMD_LOOPBACK_XAUI_FAR);
- BUILD_BUG_ON(LOOPBACK_GMII_FAR != MC_CMD_LOOPBACK_GMII_FAR);
- BUILD_BUG_ON(LOOPBACK_SGMII_FAR != MC_CMD_LOOPBACK_SGMII_FAR);
- BUILD_BUG_ON(LOOPBACK_XFI_FAR != MC_CMD_LOOPBACK_XFI_FAR);
- BUILD_BUG_ON(LOOPBACK_GPHY != MC_CMD_LOOPBACK_GPHY);
- BUILD_BUG_ON(LOOPBACK_PHYXS != MC_CMD_LOOPBACK_PHYXS);
- BUILD_BUG_ON(LOOPBACK_PCS != MC_CMD_LOOPBACK_PCS);
- BUILD_BUG_ON(LOOPBACK_PMAPMD != MC_CMD_LOOPBACK_PMAPMD);
- BUILD_BUG_ON(LOOPBACK_XPORT != MC_CMD_LOOPBACK_XPORT);
- BUILD_BUG_ON(LOOPBACK_XGMII_WS != MC_CMD_LOOPBACK_XGMII_WS);
- BUILD_BUG_ON(LOOPBACK_XAUI_WS != MC_CMD_LOOPBACK_XAUI_WS);
- BUILD_BUG_ON(LOOPBACK_XAUI_WS_FAR != MC_CMD_LOOPBACK_XAUI_WS_FAR);
- BUILD_BUG_ON(LOOPBACK_XAUI_WS_NEAR != MC_CMD_LOOPBACK_XAUI_WS_NEAR);
- BUILD_BUG_ON(LOOPBACK_GMII_WS != MC_CMD_LOOPBACK_GMII_WS);
- BUILD_BUG_ON(LOOPBACK_XFI_WS != MC_CMD_LOOPBACK_XFI_WS);
- BUILD_BUG_ON(LOOPBACK_XFI_WS_FAR != MC_CMD_LOOPBACK_XFI_WS_FAR);
- BUILD_BUG_ON(LOOPBACK_PHYXS_WS != MC_CMD_LOOPBACK_PHYXS_WS);
-
- rc = efx_mcdi_loopback_modes(efx, &efx->loopback_modes);
- if (rc != 0)
- goto fail;
- /* The MC indicates that LOOPBACK_NONE is a valid loopback mode,
- * but by convention we don't */
- efx->loopback_modes &= ~(1 << LOOPBACK_NONE);
-
- /* Set the initial link mode */
- efx_mcdi_phy_decode_link(
- efx, &efx->link_state,
- MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
- MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
- MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
-
- /* Record the initial FEC configuration (or nearest approximation
- * representable in the ethtool configuration space)
- */
- efx->fec_config = mcdi_fec_caps_to_ethtool(caps,
- efx->link_state.speed == 25000 ||
- efx->link_state.speed == 50000);
-
- /* Default to Autonegotiated flow control if the PHY supports it */
- efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
- if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
- efx->wanted_fc |= EFX_FC_AUTO;
- efx_link_set_wanted_fc(efx, efx->wanted_fc);
-
- return 0;
-
-fail:
- kfree(phy_data);
- return rc;
-}
-
-static void efx_mcdi_phy_remove(struct efx_nic *efx)
-{
- struct efx_mcdi_phy_data *phy_data = efx->phy_data;
-
- efx->phy_data = NULL;
- kfree(phy_data);
-}
-
-static void efx_mcdi_phy_get_link_ksettings(struct efx_nic *efx,
- struct ethtool_link_ksettings *cmd)
-{
- struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
- int rc;
-
- cmd->base.speed = efx->link_state.speed;
- cmd->base.duplex = efx->link_state.fd;
- cmd->base.port = mcdi_to_ethtool_media(phy_cfg->media);
- cmd->base.phy_address = phy_cfg->port;
- cmd->base.autoneg = !!(efx->link_advertising[0] & ADVERTISED_Autoneg);
- cmd->base.mdio_support = (efx->mdio.mode_support &
- (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22));
-
- mcdi_to_ethtool_linkset(phy_cfg->media, phy_cfg->supported_cap,
- cmd->link_modes.supported);
- memcpy(cmd->link_modes.advertising, efx->link_advertising,
- sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK()));
-
- BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
- outbuf, sizeof(outbuf), NULL);
- if (rc)
- return;
- mcdi_to_ethtool_linkset(phy_cfg->media,
- MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP),
- cmd->link_modes.lp_advertising);
-}
-
-static int
-efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx,
- const struct ethtool_link_ksettings *cmd)
-{
- struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
- u32 caps;
- int rc;
-
- if (cmd->base.autoneg) {
- caps = (ethtool_linkset_to_mcdi_cap(cmd->link_modes.advertising) |
- 1 << MC_CMD_PHY_CAP_AN_LBN);
- } else if (cmd->base.duplex) {
- switch (cmd->base.speed) {
- case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break;
- case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break;
- case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break;
- case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break;
- case 40000: caps = 1 << MC_CMD_PHY_CAP_40000FDX_LBN; break;
- case 100000: caps = 1 << MC_CMD_PHY_CAP_100000FDX_LBN; break;
- case 25000: caps = 1 << MC_CMD_PHY_CAP_25000FDX_LBN; break;
- case 50000: caps = 1 << MC_CMD_PHY_CAP_50000FDX_LBN; break;
- default: return -EINVAL;
- }
- } else {
- switch (cmd->base.speed) {
- case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break;
- case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break;
- case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break;
- default: return -EINVAL;
- }
- }
-
- caps |= ethtool_fec_caps_to_mcdi(efx->fec_config);
-
- rc = efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx),
- efx->loopback_mode, 0);
- if (rc)
- return rc;
-
- if (cmd->base.autoneg) {
- efx_link_set_advertising(efx, cmd->link_modes.advertising);
- phy_cfg->forced_cap = 0;
- } else {
- efx_link_clear_advertising(efx);
- phy_cfg->forced_cap = caps;
- }
- return 0;
-}
-
-static int efx_mcdi_phy_set_fecparam(struct efx_nic *efx,
- const struct ethtool_fecparam *fec)
-{
- struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
- u32 caps;
- int rc;
-
- /* Work out what efx_mcdi_phy_set_link_ksettings() would produce from
- * saved advertising bits
- */
- if (test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, efx->link_advertising))
- caps = (ethtool_linkset_to_mcdi_cap(efx->link_advertising) |
- 1 << MC_CMD_PHY_CAP_AN_LBN);
- else
- caps = phy_cfg->forced_cap;
-
- caps |= ethtool_fec_caps_to_mcdi(fec->fec);
- rc = efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx),
- efx->loopback_mode, 0);
- if (rc)
- return rc;
-
- /* Record the new FEC setting for subsequent set_link calls */
- efx->fec_config = fec->fec;
- return 0;
-}
-
-static const char *const mcdi_sft9001_cable_diag_names[] = {
- "cable.pairA.length",
- "cable.pairB.length",
- "cable.pairC.length",
- "cable.pairD.length",
- "cable.pairA.status",
- "cable.pairB.status",
- "cable.pairC.status",
- "cable.pairD.status",
-};
-
-static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode,
- int *results)
-{
- unsigned int retry, i, count = 0;
- size_t outlen;
- u32 status;
- MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_SFT9001_LEN);
- u8 *ptr;
- int rc;
-
- BUILD_BUG_ON(MC_CMD_START_BIST_OUT_LEN != 0);
- MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_mode);
- rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST,
- inbuf, MC_CMD_START_BIST_IN_LEN, NULL, 0, NULL);
- if (rc)
- goto out;
-
- /* Wait up to 10s for BIST to finish */
- for (retry = 0; retry < 100; ++retry) {
- BUILD_BUG_ON(MC_CMD_POLL_BIST_IN_LEN != 0);
- rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
- goto out;
-
- status = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
- if (status != MC_CMD_POLL_BIST_RUNNING)
- goto finished;
-
- msleep(100);
- }
-
- rc = -ETIMEDOUT;
- goto out;
-
-finished:
- results[count++] = (status == MC_CMD_POLL_BIST_PASSED) ? 1 : -1;
-
- /* SFT9001 specific cable diagnostics output */
- if (efx->phy_type == PHY_TYPE_SFT9001B &&
- (bist_mode == MC_CMD_PHY_BIST_CABLE_SHORT ||
- bist_mode == MC_CMD_PHY_BIST_CABLE_LONG)) {
- ptr = MCDI_PTR(outbuf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A);
- if (status == MC_CMD_POLL_BIST_PASSED &&
- outlen >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN) {
- for (i = 0; i < 8; i++) {
- results[count + i] =
- EFX_DWORD_FIELD(((efx_dword_t *)ptr)[i],
- EFX_DWORD_0);
- }
- }
- count += 8;
- }
- rc = count;
-
-out:
- return rc;
-}
-
-static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
- unsigned flags)
-{
- struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
- u32 mode;
- int rc;
-
- if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_LBN)) {
- rc = efx_mcdi_bist(efx, MC_CMD_PHY_BIST, results);
- if (rc < 0)
- return rc;
-
- results += rc;
- }
-
- /* If we support both LONG and SHORT, then run each in response to
- * break or not. Otherwise, run the one we support */
- mode = 0;
- if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN)) {
- if ((flags & ETH_TEST_FL_OFFLINE) &&
- (phy_cfg->flags &
- (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN)))
- mode = MC_CMD_PHY_BIST_CABLE_LONG;
- else
- mode = MC_CMD_PHY_BIST_CABLE_SHORT;
- } else if (phy_cfg->flags &
- (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN))
- mode = MC_CMD_PHY_BIST_CABLE_LONG;
-
- if (mode != 0) {
- rc = efx_mcdi_bist(efx, mode, results);
- if (rc < 0)
- return rc;
- results += rc;
- }
-
- return 0;
-}
-
-static const char *efx_mcdi_phy_test_name(struct efx_nic *efx,
- unsigned int index)
-{
- struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
-
- if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_LBN)) {
- if (index == 0)
- return "bist";
- --index;
- }
-
- if (phy_cfg->flags & ((1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN) |
- (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN))) {
- if (index == 0)
- return "cable";
- --index;
-
- if (efx->phy_type == PHY_TYPE_SFT9001B) {
- if (index < ARRAY_SIZE(mcdi_sft9001_cable_diag_names))
- return mcdi_sft9001_cable_diag_names[index];
- index -= ARRAY_SIZE(mcdi_sft9001_cable_diag_names);
- }
- }
-
- return NULL;
-}
-
-#define SFP_PAGE_SIZE 128
-#define SFF_DIAG_TYPE_OFFSET 92
-#define SFF_DIAG_ADDR_CHANGE BIT(2)
-#define SFF_8079_NUM_PAGES 2
-#define SFF_8472_NUM_PAGES 4
-#define SFF_8436_NUM_PAGES 5
-#define SFF_DMT_LEVEL_OFFSET 94
-
-/** efx_mcdi_phy_get_module_eeprom_page() - Get a single page of module eeprom
- * @efx: NIC context
- * @page: EEPROM page number
- * @data: Destination data pointer
- * @offset: Offset in page to copy from in to data
- * @space: Space available in data
- *
- * Return:
- * >=0 - amount of data copied
- * <0 - error
- */
-static int efx_mcdi_phy_get_module_eeprom_page(struct efx_nic *efx,
- unsigned int page,
- u8 *data, ssize_t offset,
- ssize_t space)
-{
- MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX);
- MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN);
- size_t outlen;
- unsigned int payload_len;
- unsigned int to_copy;
- int rc;
-
- if (offset > SFP_PAGE_SIZE)
- return -EINVAL;
-
- to_copy = min(space, SFP_PAGE_SIZE - offset);
-
- MCDI_SET_DWORD(inbuf, GET_PHY_MEDIA_INFO_IN_PAGE, page);
- rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_PHY_MEDIA_INFO,
- inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf),
- &outlen);
-
- if (rc)
- return rc;
-
- if (outlen < (MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST +
- SFP_PAGE_SIZE))
- return -EIO;
-
- payload_len = MCDI_DWORD(outbuf, GET_PHY_MEDIA_INFO_OUT_DATALEN);
- if (payload_len != SFP_PAGE_SIZE)
- return -EIO;
-
- memcpy(data, MCDI_PTR(outbuf, GET_PHY_MEDIA_INFO_OUT_DATA) + offset,
- to_copy);
-
- return to_copy;
-}
-
-static int efx_mcdi_phy_get_module_eeprom_byte(struct efx_nic *efx,
- unsigned int page,
- u8 byte)
-{
- int rc;
- u8 data;
-
- rc = efx_mcdi_phy_get_module_eeprom_page(efx, page, &data, byte, 1);
- if (rc == 1)
- return data;
-
- return rc;
-}
-
-static int efx_mcdi_phy_diag_type(struct efx_nic *efx)
-{
- /* Page zero of the EEPROM includes the diagnostic type at byte 92. */
- return efx_mcdi_phy_get_module_eeprom_byte(efx, 0,
- SFF_DIAG_TYPE_OFFSET);
-}
-
-static int efx_mcdi_phy_sff_8472_level(struct efx_nic *efx)
-{
- /* Page zero of the EEPROM includes the DMT level at byte 94. */
- return efx_mcdi_phy_get_module_eeprom_byte(efx, 0,
- SFF_DMT_LEVEL_OFFSET);
-}
-
-static u32 efx_mcdi_phy_module_type(struct efx_nic *efx)
-{
- struct efx_mcdi_phy_data *phy_data = efx->phy_data;
-
- if (phy_data->media != MC_CMD_MEDIA_QSFP_PLUS)
- return phy_data->media;
-
- /* A QSFP+ NIC may actually have an SFP+ module attached.
- * The ID is page 0, byte 0.
- */
- switch (efx_mcdi_phy_get_module_eeprom_byte(efx, 0, 0)) {
- case 0x3:
- return MC_CMD_MEDIA_SFP_PLUS;
- case 0xc:
- case 0xd:
- return MC_CMD_MEDIA_QSFP_PLUS;
- default:
- return 0;
- }
-}
-
-static int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx,
- struct ethtool_eeprom *ee, u8 *data)
-{
- int rc;
- ssize_t space_remaining = ee->len;
- unsigned int page_off;
- bool ignore_missing;
- int num_pages;
- int page;
-
- switch (efx_mcdi_phy_module_type(efx)) {
- case MC_CMD_MEDIA_SFP_PLUS:
- num_pages = efx_mcdi_phy_sff_8472_level(efx) > 0 ?
- SFF_8472_NUM_PAGES : SFF_8079_NUM_PAGES;
- page = 0;
- ignore_missing = false;
- break;
- case MC_CMD_MEDIA_QSFP_PLUS:
- num_pages = SFF_8436_NUM_PAGES;
- page = -1; /* We obtain the lower page by asking for -1. */
- ignore_missing = true; /* Ignore missing pages after page 0. */
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- page_off = ee->offset % SFP_PAGE_SIZE;
- page += ee->offset / SFP_PAGE_SIZE;
-
- while (space_remaining && (page < num_pages)) {
- rc = efx_mcdi_phy_get_module_eeprom_page(efx, page,
- data, page_off,
- space_remaining);
-
- if (rc > 0) {
- space_remaining -= rc;
- data += rc;
- page_off = 0;
- page++;
- } else if (rc == 0) {
- space_remaining = 0;
- } else if (ignore_missing && (page > 0)) {
- int intended_size = SFP_PAGE_SIZE - page_off;
-
- space_remaining -= intended_size;
- if (space_remaining < 0) {
- space_remaining = 0;
- } else {
- memset(data, 0, intended_size);
- data += intended_size;
- page_off = 0;
- page++;
- rc = 0;
- }
- } else {
- return rc;
- }
- }
-
- return 0;
-}
-
-static int efx_mcdi_phy_get_module_info(struct efx_nic *efx,
- struct ethtool_modinfo *modinfo)
-{
- int sff_8472_level;
- int diag_type;
-
- switch (efx_mcdi_phy_module_type(efx)) {
- case MC_CMD_MEDIA_SFP_PLUS:
- sff_8472_level = efx_mcdi_phy_sff_8472_level(efx);
-
- /* If we can't read the diagnostics level we have none. */
- if (sff_8472_level < 0)
- return -EOPNOTSUPP;
-
- /* Check if this module requires the (unsupported) address
- * change operation.
- */
- diag_type = efx_mcdi_phy_diag_type(efx);
-
- if ((sff_8472_level == 0) ||
- (diag_type & SFF_DIAG_ADDR_CHANGE)) {
- modinfo->type = ETH_MODULE_SFF_8079;
- modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
- } else {
- modinfo->type = ETH_MODULE_SFF_8472;
- modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
- }
- break;
-
- case MC_CMD_MEDIA_QSFP_PLUS:
- modinfo->type = ETH_MODULE_SFF_8436;
- modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
- break;
-
- default:
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static const struct efx_phy_operations efx_mcdi_phy_ops = {
- .probe = efx_mcdi_phy_probe,
- .init = efx_port_dummy_op_int,
- .reconfigure = efx_mcdi_port_reconfigure,
- .poll = efx_mcdi_phy_poll,
- .fini = efx_port_dummy_op_void,
- .remove = efx_mcdi_phy_remove,
- .get_link_ksettings = efx_mcdi_phy_get_link_ksettings,
- .set_link_ksettings = efx_mcdi_phy_set_link_ksettings,
- .get_fecparam = efx_mcdi_phy_get_fecparam,
- .set_fecparam = efx_mcdi_phy_set_fecparam,
- .test_alive = efx_mcdi_phy_test_alive,
- .run_tests = efx_mcdi_phy_run_tests,
- .test_name = efx_mcdi_phy_test_name,
- .get_module_eeprom = efx_mcdi_phy_get_module_eeprom,
- .get_module_info = efx_mcdi_phy_get_module_info,
-};
-
u32 efx_mcdi_phy_get_caps(struct efx_nic *efx)
{
struct efx_mcdi_phy_data *phy_data = efx->phy_data;
@@ -683,16 +97,13 @@ int efx_mcdi_port_probe(struct efx_nic *efx)
{
int rc;
- /* Hook in PHY operations table */
- efx->phy_op = &efx_mcdi_phy_ops;
-
/* Set up MDIO structure for PHY */
efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
efx->mdio.mdio_read = efx_mcdi_mdio_read;
efx->mdio.mdio_write = efx_mcdi_mdio_write;
/* Fill out MDIO structure, loopback modes, and initial link state */
- rc = efx->phy_op->probe(efx);
+ rc = efx_mcdi_phy_probe(efx);
if (rc != 0)
return rc;
@@ -701,6 +112,6 @@ int efx_mcdi_port_probe(struct efx_nic *efx)
void efx_mcdi_port_remove(struct efx_nic *efx)
{
- efx->phy_op->remove(efx);
+ efx_mcdi_phy_remove(efx);
efx_mcdi_mac_fini_stats(efx);
}
diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.c b/drivers/net/ethernet/sfc/mcdi_port_common.c
index 714d7f937212..4bd3ef8f3384 100644
--- a/drivers/net/ethernet/sfc/mcdi_port_common.c
+++ b/drivers/net/ethernet/sfc/mcdi_port_common.c
@@ -308,7 +308,7 @@ void efx_mcdi_phy_decode_link(struct efx_nic *efx,
* Both RS and BASER (whether AUTO or not) means use FEC if cable and link
* partner support it, preferring RS to BASER.
*/
-u32 ethtool_fec_caps_to_mcdi(u32 ethtool_cap)
+u32 ethtool_fec_caps_to_mcdi(u32 supported_cap, u32 ethtool_cap)
{
u32 ret = 0;
@@ -316,17 +316,21 @@ u32 ethtool_fec_caps_to_mcdi(u32 ethtool_cap)
return 0;
if (ethtool_cap & ETHTOOL_FEC_AUTO)
- ret |= (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) |
- (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) |
- (1 << MC_CMD_PHY_CAP_RS_FEC_LBN);
- if (ethtool_cap & ETHTOOL_FEC_RS)
+ ret |= ((1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) |
+ (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) |
+ (1 << MC_CMD_PHY_CAP_RS_FEC_LBN)) & supported_cap;
+ if (ethtool_cap & ETHTOOL_FEC_RS &&
+ supported_cap & (1 << MC_CMD_PHY_CAP_RS_FEC_LBN))
ret |= (1 << MC_CMD_PHY_CAP_RS_FEC_LBN) |
(1 << MC_CMD_PHY_CAP_RS_FEC_REQUESTED_LBN);
- if (ethtool_cap & ETHTOOL_FEC_BASER)
- ret |= (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) |
- (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) |
- (1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN) |
- (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN);
+ if (ethtool_cap & ETHTOOL_FEC_BASER) {
+ if (supported_cap & (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN))
+ ret |= (1 << MC_CMD_PHY_CAP_BASER_FEC_LBN) |
+ (1 << MC_CMD_PHY_CAP_BASER_FEC_REQUESTED_LBN);
+ if (supported_cap & (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN))
+ ret |= (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_LBN) |
+ (1 << MC_CMD_PHY_CAP_25G_BASER_FEC_REQUESTED_LBN);
+ }
return ret;
}
@@ -404,6 +408,196 @@ bool efx_mcdi_phy_poll(struct efx_nic *efx)
return !efx_link_state_equal(&efx->link_state, &old_state);
}
+int efx_mcdi_phy_probe(struct efx_nic *efx)
+{
+ struct efx_mcdi_phy_data *phy_data;
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
+ u32 caps;
+ int rc;
+
+ /* Initialise and populate phy_data */
+ phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
+ if (phy_data == NULL)
+ return -ENOMEM;
+
+ rc = efx_mcdi_get_phy_cfg(efx, phy_data);
+ if (rc != 0)
+ goto fail;
+
+ /* Read initial link advertisement */
+ BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ goto fail;
+
+ /* Fill out nic state */
+ efx->phy_data = phy_data;
+ efx->phy_type = phy_data->type;
+
+ efx->mdio_bus = phy_data->channel;
+ efx->mdio.prtad = phy_data->port;
+ efx->mdio.mmds = phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22);
+ efx->mdio.mode_support = 0;
+ if (phy_data->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22))
+ efx->mdio.mode_support |= MDIO_SUPPORTS_C22;
+ if (phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22))
+ efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+
+ caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
+ if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ mcdi_to_ethtool_linkset(phy_data->media, caps,
+ efx->link_advertising);
+ else
+ phy_data->forced_cap = caps;
+
+ /* Assert that we can map efx -> mcdi loopback modes */
+ BUILD_BUG_ON(LOOPBACK_NONE != MC_CMD_LOOPBACK_NONE);
+ BUILD_BUG_ON(LOOPBACK_DATA != MC_CMD_LOOPBACK_DATA);
+ BUILD_BUG_ON(LOOPBACK_GMAC != MC_CMD_LOOPBACK_GMAC);
+ BUILD_BUG_ON(LOOPBACK_XGMII != MC_CMD_LOOPBACK_XGMII);
+ BUILD_BUG_ON(LOOPBACK_XGXS != MC_CMD_LOOPBACK_XGXS);
+ BUILD_BUG_ON(LOOPBACK_XAUI != MC_CMD_LOOPBACK_XAUI);
+ BUILD_BUG_ON(LOOPBACK_GMII != MC_CMD_LOOPBACK_GMII);
+ BUILD_BUG_ON(LOOPBACK_SGMII != MC_CMD_LOOPBACK_SGMII);
+ BUILD_BUG_ON(LOOPBACK_XGBR != MC_CMD_LOOPBACK_XGBR);
+ BUILD_BUG_ON(LOOPBACK_XFI != MC_CMD_LOOPBACK_XFI);
+ BUILD_BUG_ON(LOOPBACK_XAUI_FAR != MC_CMD_LOOPBACK_XAUI_FAR);
+ BUILD_BUG_ON(LOOPBACK_GMII_FAR != MC_CMD_LOOPBACK_GMII_FAR);
+ BUILD_BUG_ON(LOOPBACK_SGMII_FAR != MC_CMD_LOOPBACK_SGMII_FAR);
+ BUILD_BUG_ON(LOOPBACK_XFI_FAR != MC_CMD_LOOPBACK_XFI_FAR);
+ BUILD_BUG_ON(LOOPBACK_GPHY != MC_CMD_LOOPBACK_GPHY);
+ BUILD_BUG_ON(LOOPBACK_PHYXS != MC_CMD_LOOPBACK_PHYXS);
+ BUILD_BUG_ON(LOOPBACK_PCS != MC_CMD_LOOPBACK_PCS);
+ BUILD_BUG_ON(LOOPBACK_PMAPMD != MC_CMD_LOOPBACK_PMAPMD);
+ BUILD_BUG_ON(LOOPBACK_XPORT != MC_CMD_LOOPBACK_XPORT);
+ BUILD_BUG_ON(LOOPBACK_XGMII_WS != MC_CMD_LOOPBACK_XGMII_WS);
+ BUILD_BUG_ON(LOOPBACK_XAUI_WS != MC_CMD_LOOPBACK_XAUI_WS);
+ BUILD_BUG_ON(LOOPBACK_XAUI_WS_FAR != MC_CMD_LOOPBACK_XAUI_WS_FAR);
+ BUILD_BUG_ON(LOOPBACK_XAUI_WS_NEAR != MC_CMD_LOOPBACK_XAUI_WS_NEAR);
+ BUILD_BUG_ON(LOOPBACK_GMII_WS != MC_CMD_LOOPBACK_GMII_WS);
+ BUILD_BUG_ON(LOOPBACK_XFI_WS != MC_CMD_LOOPBACK_XFI_WS);
+ BUILD_BUG_ON(LOOPBACK_XFI_WS_FAR != MC_CMD_LOOPBACK_XFI_WS_FAR);
+ BUILD_BUG_ON(LOOPBACK_PHYXS_WS != MC_CMD_LOOPBACK_PHYXS_WS);
+
+ rc = efx_mcdi_loopback_modes(efx, &efx->loopback_modes);
+ if (rc != 0)
+ goto fail;
+ /* The MC indicates that LOOPBACK_NONE is a valid loopback mode,
+ * but by convention we don't
+ */
+ efx->loopback_modes &= ~(1 << LOOPBACK_NONE);
+
+ /* Set the initial link mode */
+ efx_mcdi_phy_decode_link(efx, &efx->link_state,
+ MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
+ MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
+ MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
+
+ /* Record the initial FEC configuration (or nearest approximation
+ * representable in the ethtool configuration space)
+ */
+ efx->fec_config = mcdi_fec_caps_to_ethtool(caps,
+ efx->link_state.speed == 25000 ||
+ efx->link_state.speed == 50000);
+
+ /* Default to Autonegotiated flow control if the PHY supports it */
+ efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
+ if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ efx->wanted_fc |= EFX_FC_AUTO;
+ efx_link_set_wanted_fc(efx, efx->wanted_fc);
+
+ return 0;
+
+fail:
+ kfree(phy_data);
+ return rc;
+}
+
+void efx_mcdi_phy_remove(struct efx_nic *efx)
+{
+ struct efx_mcdi_phy_data *phy_data = efx->phy_data;
+
+ efx->phy_data = NULL;
+ kfree(phy_data);
+}
+
+void efx_mcdi_phy_get_link_ksettings(struct efx_nic *efx, struct ethtool_link_ksettings *cmd)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
+ int rc;
+
+ cmd->base.speed = efx->link_state.speed;
+ cmd->base.duplex = efx->link_state.fd;
+ cmd->base.port = mcdi_to_ethtool_media(phy_cfg->media);
+ cmd->base.phy_address = phy_cfg->port;
+ cmd->base.autoneg = !!(efx->link_advertising[0] & ADVERTISED_Autoneg);
+ cmd->base.mdio_support = (efx->mdio.mode_support &
+ (MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22));
+
+ mcdi_to_ethtool_linkset(phy_cfg->media, phy_cfg->supported_cap,
+ cmd->link_modes.supported);
+ memcpy(cmd->link_modes.advertising, efx->link_advertising,
+ sizeof(__ETHTOOL_DECLARE_LINK_MODE_MASK()));
+
+ BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ return;
+ mcdi_to_ethtool_linkset(phy_cfg->media,
+ MCDI_DWORD(outbuf, GET_LINK_OUT_LP_CAP),
+ cmd->link_modes.lp_advertising);
+}
+
+int efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx, const struct ethtool_link_ksettings *cmd)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+ u32 caps;
+ int rc;
+
+ if (cmd->base.autoneg) {
+ caps = (ethtool_linkset_to_mcdi_cap(cmd->link_modes.advertising) |
+ 1 << MC_CMD_PHY_CAP_AN_LBN);
+ } else if (cmd->base.duplex) {
+ switch (cmd->base.speed) {
+ case 10: caps = 1 << MC_CMD_PHY_CAP_10FDX_LBN; break;
+ case 100: caps = 1 << MC_CMD_PHY_CAP_100FDX_LBN; break;
+ case 1000: caps = 1 << MC_CMD_PHY_CAP_1000FDX_LBN; break;
+ case 10000: caps = 1 << MC_CMD_PHY_CAP_10000FDX_LBN; break;
+ case 40000: caps = 1 << MC_CMD_PHY_CAP_40000FDX_LBN; break;
+ case 100000: caps = 1 << MC_CMD_PHY_CAP_100000FDX_LBN; break;
+ case 25000: caps = 1 << MC_CMD_PHY_CAP_25000FDX_LBN; break;
+ case 50000: caps = 1 << MC_CMD_PHY_CAP_50000FDX_LBN; break;
+ default: return -EINVAL;
+ }
+ } else {
+ switch (cmd->base.speed) {
+ case 10: caps = 1 << MC_CMD_PHY_CAP_10HDX_LBN; break;
+ case 100: caps = 1 << MC_CMD_PHY_CAP_100HDX_LBN; break;
+ case 1000: caps = 1 << MC_CMD_PHY_CAP_1000HDX_LBN; break;
+ default: return -EINVAL;
+ }
+ }
+
+ caps |= ethtool_fec_caps_to_mcdi(phy_cfg->supported_cap, efx->fec_config);
+
+ rc = efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx),
+ efx->loopback_mode, 0);
+ if (rc)
+ return rc;
+
+ if (cmd->base.autoneg) {
+ efx_link_set_advertising(efx, cmd->link_modes.advertising);
+ phy_cfg->forced_cap = 0;
+ } else {
+ efx_link_clear_advertising(efx);
+ phy_cfg->forced_cap = caps;
+ }
+ return 0;
+}
+
int efx_mcdi_phy_get_fecparam(struct efx_nic *efx, struct ethtool_fecparam *fec)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_V2_LEN);
@@ -455,6 +649,50 @@ int efx_mcdi_phy_get_fecparam(struct efx_nic *efx, struct ethtool_fecparam *fec)
return 0;
}
+/* Basic validation to ensure that the caps we are going to attempt to set are
+ * in fact supported by the adapter. Note that 'no FEC' is always supported.
+ */
+static int ethtool_fec_supported(u32 supported_cap, u32 ethtool_cap)
+{
+ if (ethtool_cap & ETHTOOL_FEC_OFF)
+ return 0;
+
+ if (ethtool_cap &&
+ !ethtool_fec_caps_to_mcdi(supported_cap, ethtool_cap))
+ return -EINVAL;
+ return 0;
+}
+
+int efx_mcdi_phy_set_fecparam(struct efx_nic *efx, const struct ethtool_fecparam *fec)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+ u32 caps;
+ int rc;
+
+ rc = ethtool_fec_supported(phy_cfg->supported_cap, fec->fec);
+ if (rc)
+ return rc;
+
+ /* Work out what efx_mcdi_phy_set_link_ksettings() would produce from
+ * saved advertising bits
+ */
+ if (test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, efx->link_advertising))
+ caps = (ethtool_linkset_to_mcdi_cap(efx->link_advertising) |
+ 1 << MC_CMD_PHY_CAP_AN_LBN);
+ else
+ caps = phy_cfg->forced_cap;
+
+ caps |= ethtool_fec_caps_to_mcdi(phy_cfg->supported_cap, fec->fec);
+ rc = efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx),
+ efx->loopback_mode, 0);
+ if (rc)
+ return rc;
+
+ /* Record the new FEC setting for subsequent set_link calls */
+ efx->fec_config = fec->fec;
+ return 0;
+}
+
int efx_mcdi_phy_test_alive(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_STATE_OUT_LEN);
@@ -483,12 +721,357 @@ int efx_mcdi_port_reconfigure(struct efx_nic *efx)
ethtool_linkset_to_mcdi_cap(efx->link_advertising) :
phy_cfg->forced_cap);
- caps |= ethtool_fec_caps_to_mcdi(efx->fec_config);
+ caps |= ethtool_fec_caps_to_mcdi(phy_cfg->supported_cap, efx->fec_config);
return efx_mcdi_set_link(efx, caps, efx_get_mcdi_phy_flags(efx),
efx->loopback_mode, 0);
}
+static const char *const mcdi_sft9001_cable_diag_names[] = {
+ "cable.pairA.length",
+ "cable.pairB.length",
+ "cable.pairC.length",
+ "cable.pairD.length",
+ "cable.pairA.status",
+ "cable.pairB.status",
+ "cable.pairC.status",
+ "cable.pairD.status",
+};
+
+static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode,
+ int *results)
+{
+ unsigned int retry, i, count = 0;
+ size_t outlen;
+ u32 status;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_SFT9001_LEN);
+ u8 *ptr;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_START_BIST_OUT_LEN != 0);
+ MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_mode);
+ rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST,
+ inbuf, MC_CMD_START_BIST_IN_LEN, NULL, 0, NULL);
+ if (rc)
+ goto out;
+
+ /* Wait up to 10s for BIST to finish */
+ for (retry = 0; retry < 100; ++retry) {
+ BUILD_BUG_ON(MC_CMD_POLL_BIST_IN_LEN != 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto out;
+
+ status = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
+ if (status != MC_CMD_POLL_BIST_RUNNING)
+ goto finished;
+
+ msleep(100);
+ }
+
+ rc = -ETIMEDOUT;
+ goto out;
+
+finished:
+ results[count++] = (status == MC_CMD_POLL_BIST_PASSED) ? 1 : -1;
+
+ /* SFT9001 specific cable diagnostics output */
+ if (efx->phy_type == PHY_TYPE_SFT9001B &&
+ (bist_mode == MC_CMD_PHY_BIST_CABLE_SHORT ||
+ bist_mode == MC_CMD_PHY_BIST_CABLE_LONG)) {
+ ptr = MCDI_PTR(outbuf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A);
+ if (status == MC_CMD_POLL_BIST_PASSED &&
+ outlen >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN) {
+ for (i = 0; i < 8; i++) {
+ results[count + i] =
+ EFX_DWORD_FIELD(((efx_dword_t *)ptr)[i],
+ EFX_DWORD_0);
+ }
+ }
+ count += 8;
+ }
+ rc = count;
+
+out:
+ return rc;
+}
+
+int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results, unsigned int flags)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+ u32 mode;
+ int rc;
+
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_LBN)) {
+ rc = efx_mcdi_bist(efx, MC_CMD_PHY_BIST, results);
+ if (rc < 0)
+ return rc;
+
+ results += rc;
+ }
+
+ /* If we support both LONG and SHORT, then run each in response to
+ * break or not. Otherwise, run the one we support
+ */
+ mode = 0;
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN)) {
+ if ((flags & ETH_TEST_FL_OFFLINE) &&
+ (phy_cfg->flags &
+ (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN)))
+ mode = MC_CMD_PHY_BIST_CABLE_LONG;
+ else
+ mode = MC_CMD_PHY_BIST_CABLE_SHORT;
+ } else if (phy_cfg->flags &
+ (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN))
+ mode = MC_CMD_PHY_BIST_CABLE_LONG;
+
+ if (mode != 0) {
+ rc = efx_mcdi_bist(efx, mode, results);
+ if (rc < 0)
+ return rc;
+ results += rc;
+ }
+
+ return 0;
+}
+
+const char *efx_mcdi_phy_test_name(struct efx_nic *efx, unsigned int index)
+{
+ struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
+
+ if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_LBN)) {
+ if (index == 0)
+ return "bist";
+ --index;
+ }
+
+ if (phy_cfg->flags & ((1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_SHORT_LBN) |
+ (1 << MC_CMD_GET_PHY_CFG_OUT_BIST_CABLE_LONG_LBN))) {
+ if (index == 0)
+ return "cable";
+ --index;
+
+ if (efx->phy_type == PHY_TYPE_SFT9001B) {
+ if (index < ARRAY_SIZE(mcdi_sft9001_cable_diag_names))
+ return mcdi_sft9001_cable_diag_names[index];
+ index -= ARRAY_SIZE(mcdi_sft9001_cable_diag_names);
+ }
+ }
+
+ return NULL;
+}
+
+#define SFP_PAGE_SIZE 128
+#define SFF_DIAG_TYPE_OFFSET 92
+#define SFF_DIAG_ADDR_CHANGE BIT(2)
+#define SFF_8079_NUM_PAGES 2
+#define SFF_8472_NUM_PAGES 4
+#define SFF_8436_NUM_PAGES 5
+#define SFF_DMT_LEVEL_OFFSET 94
+
+/** efx_mcdi_phy_get_module_eeprom_page() - Get a single page of module eeprom
+ * @efx: NIC context
+ * @page: EEPROM page number
+ * @data: Destination data pointer
+ * @offset: Offset in page to copy from in to data
+ * @space: Space available in data
+ *
+ * Return:
+ * >=0 - amount of data copied
+ * <0 - error
+ */
+static int efx_mcdi_phy_get_module_eeprom_page(struct efx_nic *efx,
+ unsigned int page,
+ u8 *data, ssize_t offset,
+ ssize_t space)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN);
+ unsigned int payload_len;
+ unsigned int to_copy;
+ size_t outlen;
+ int rc;
+
+ if (offset > SFP_PAGE_SIZE)
+ return -EINVAL;
+
+ to_copy = min(space, SFP_PAGE_SIZE - offset);
+
+ MCDI_SET_DWORD(inbuf, GET_PHY_MEDIA_INFO_IN_PAGE, page);
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_GET_PHY_MEDIA_INFO,
+ inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf),
+ &outlen);
+
+ if (rc)
+ return rc;
+
+ if (outlen < (MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST +
+ SFP_PAGE_SIZE))
+ return -EIO;
+
+ payload_len = MCDI_DWORD(outbuf, GET_PHY_MEDIA_INFO_OUT_DATALEN);
+ if (payload_len != SFP_PAGE_SIZE)
+ return -EIO;
+
+ memcpy(data, MCDI_PTR(outbuf, GET_PHY_MEDIA_INFO_OUT_DATA) + offset,
+ to_copy);
+
+ return to_copy;
+}
+
+static int efx_mcdi_phy_get_module_eeprom_byte(struct efx_nic *efx,
+ unsigned int page,
+ u8 byte)
+{
+ u8 data;
+ int rc;
+
+ rc = efx_mcdi_phy_get_module_eeprom_page(efx, page, &data, byte, 1);
+ if (rc == 1)
+ return data;
+
+ return rc;
+}
+
+static int efx_mcdi_phy_diag_type(struct efx_nic *efx)
+{
+ /* Page zero of the EEPROM includes the diagnostic type at byte 92. */
+ return efx_mcdi_phy_get_module_eeprom_byte(efx, 0,
+ SFF_DIAG_TYPE_OFFSET);
+}
+
+static int efx_mcdi_phy_sff_8472_level(struct efx_nic *efx)
+{
+ /* Page zero of the EEPROM includes the DMT level at byte 94. */
+ return efx_mcdi_phy_get_module_eeprom_byte(efx, 0,
+ SFF_DMT_LEVEL_OFFSET);
+}
+
+static u32 efx_mcdi_phy_module_type(struct efx_nic *efx)
+{
+ struct efx_mcdi_phy_data *phy_data = efx->phy_data;
+
+ if (phy_data->media != MC_CMD_MEDIA_QSFP_PLUS)
+ return phy_data->media;
+
+ /* A QSFP+ NIC may actually have an SFP+ module attached.
+ * The ID is page 0, byte 0.
+ */
+ switch (efx_mcdi_phy_get_module_eeprom_byte(efx, 0, 0)) {
+ case 0x3:
+ return MC_CMD_MEDIA_SFP_PLUS;
+ case 0xc:
+ case 0xd:
+ return MC_CMD_MEDIA_QSFP_PLUS;
+ default:
+ return 0;
+ }
+}
+
+int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx, struct ethtool_eeprom *ee, u8 *data)
+{
+ int rc;
+ ssize_t space_remaining = ee->len;
+ unsigned int page_off;
+ bool ignore_missing;
+ int num_pages;
+ int page;
+
+ switch (efx_mcdi_phy_module_type(efx)) {
+ case MC_CMD_MEDIA_SFP_PLUS:
+ num_pages = efx_mcdi_phy_sff_8472_level(efx) > 0 ?
+ SFF_8472_NUM_PAGES : SFF_8079_NUM_PAGES;
+ page = 0;
+ ignore_missing = false;
+ break;
+ case MC_CMD_MEDIA_QSFP_PLUS:
+ num_pages = SFF_8436_NUM_PAGES;
+ page = -1; /* We obtain the lower page by asking for -1. */
+ ignore_missing = true; /* Ignore missing pages after page 0. */
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ page_off = ee->offset % SFP_PAGE_SIZE;
+ page += ee->offset / SFP_PAGE_SIZE;
+
+ while (space_remaining && (page < num_pages)) {
+ rc = efx_mcdi_phy_get_module_eeprom_page(efx, page,
+ data, page_off,
+ space_remaining);
+
+ if (rc > 0) {
+ space_remaining -= rc;
+ data += rc;
+ page_off = 0;
+ page++;
+ } else if (rc == 0) {
+ space_remaining = 0;
+ } else if (ignore_missing && (page > 0)) {
+ int intended_size = SFP_PAGE_SIZE - page_off;
+
+ space_remaining -= intended_size;
+ if (space_remaining < 0) {
+ space_remaining = 0;
+ } else {
+ memset(data, 0, intended_size);
+ data += intended_size;
+ page_off = 0;
+ page++;
+ rc = 0;
+ }
+ } else {
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+int efx_mcdi_phy_get_module_info(struct efx_nic *efx, struct ethtool_modinfo *modinfo)
+{
+ int sff_8472_level;
+ int diag_type;
+
+ switch (efx_mcdi_phy_module_type(efx)) {
+ case MC_CMD_MEDIA_SFP_PLUS:
+ sff_8472_level = efx_mcdi_phy_sff_8472_level(efx);
+
+ /* If we can't read the diagnostics level we have none. */
+ if (sff_8472_level < 0)
+ return -EOPNOTSUPP;
+
+ /* Check if this module requires the (unsupported) address
+ * change operation.
+ */
+ diag_type = efx_mcdi_phy_diag_type(efx);
+
+ if (sff_8472_level == 0 ||
+ (diag_type & SFF_DIAG_ADDR_CHANGE)) {
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ }
+ break;
+
+ case MC_CMD_MEDIA_QSFP_PLUS:
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static unsigned int efx_calc_mac_mtu(struct efx_nic *efx)
{
return EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.h b/drivers/net/ethernet/sfc/mcdi_port_common.h
index 9dbeee83266f..ed31690e591c 100644
--- a/drivers/net/ethernet/sfc/mcdi_port_common.h
+++ b/drivers/net/ethernet/sfc/mcdi_port_common.h
@@ -41,13 +41,22 @@ u8 mcdi_to_ethtool_media(u32 media);
void efx_mcdi_phy_decode_link(struct efx_nic *efx,
struct efx_link_state *link_state,
u32 speed, u32 flags, u32 fcntl);
-u32 ethtool_fec_caps_to_mcdi(u32 ethtool_cap);
+u32 ethtool_fec_caps_to_mcdi(u32 supported_cap, u32 ethtool_cap);
u32 mcdi_fec_caps_to_ethtool(u32 caps, bool is_25g);
void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa);
bool efx_mcdi_phy_poll(struct efx_nic *efx);
-int efx_mcdi_phy_get_fecparam(struct efx_nic *efx,
- struct ethtool_fecparam *fec);
+int efx_mcdi_phy_probe(struct efx_nic *efx);
+void efx_mcdi_phy_remove(struct efx_nic *efx);
+void efx_mcdi_phy_get_link_ksettings(struct efx_nic *efx, struct ethtool_link_ksettings *cmd);
+int efx_mcdi_phy_set_link_ksettings(struct efx_nic *efx, const struct ethtool_link_ksettings *cmd);
+int efx_mcdi_phy_get_fecparam(struct efx_nic *efx, struct ethtool_fecparam *fec);
+int efx_mcdi_phy_set_fecparam(struct efx_nic *efx, const struct ethtool_fecparam *fec);
int efx_mcdi_phy_test_alive(struct efx_nic *efx);
+int efx_mcdi_port_reconfigure(struct efx_nic *efx);
+int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results, unsigned int flags);
+const char *efx_mcdi_phy_test_name(struct efx_nic *efx, unsigned int index);
+int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx, struct ethtool_eeprom *ee, u8 *data);
+int efx_mcdi_phy_get_module_info(struct efx_nic *efx, struct ethtool_modinfo *modinfo);
int efx_mcdi_set_mac(struct efx_nic *efx);
int efx_mcdi_set_mtu(struct efx_nic *efx);
int efx_mcdi_mac_init_stats(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 062462a13847..9f7dfdf708cf 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -38,8 +38,6 @@
*
**************************************************************************/
-#define EFX_DRIVER_VERSION "4.1"
-
#ifdef DEBUG
#define EFX_WARN_ON_ONCE_PARANOID(x) WARN_ON_ONCE(x)
#define EFX_WARN_ON_PARANOID(x) WARN_ON(x)
@@ -65,10 +63,13 @@
* queues. */
#define EFX_MAX_TX_TC 2
#define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS)
-#define EFX_TXQ_TYPE_OFFLOAD 1 /* flag */
-#define EFX_TXQ_TYPE_HIGHPRI 2 /* flag */
-#define EFX_TXQ_TYPES 4
-#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
+#define EFX_TXQ_TYPE_OUTER_CSUM 1 /* Outer checksum offload */
+#define EFX_TXQ_TYPE_INNER_CSUM 2 /* Inner checksum offload */
+#define EFX_TXQ_TYPE_HIGHPRI 4 /* High-priority (for TC) */
+#define EFX_TXQ_TYPES 8
+/* HIGHPRI is Siena-only, and INNER_CSUM is EF10, so no need for both */
+#define EFX_MAX_TXQ_PER_CHANNEL 4
+#define EFX_MAX_TX_QUEUES (EFX_MAX_TXQ_PER_CHANNEL * EFX_MAX_CHANNELS)
/* Maximum possible MTU the driver supports */
#define EFX_MAX_MTU (9 * 1024)
@@ -76,6 +77,9 @@
/* Minimum MTU, from RFC791 (IP) */
#define EFX_MIN_MTU 68
+/* Maximum total header length for TSOv2 */
+#define EFX_TSO2_MAX_HDRLEN 208
+
/* Size of an RX scatter buffer. Small enough to pack 2 into a 4K page,
* and should be a multiple of the cache line size.
*/
@@ -192,7 +196,9 @@ struct efx_tx_buffer {
* @queue: DMA queue number
* @label: Label for TX completion events.
* Is our index within @channel->tx_queue array.
+ * @type: configuration type of this TX queue. A bitmask of %EFX_TXQ_TYPE_* flags.
* @tso_version: Version of TSO in use for this queue.
+ * @tso_encap: Is encapsulated TSO supported? Supported in TSOv2 on 8000 series.
* @channel: The associated channel
* @core_txq: The networking core TX queue structure
* @buffer: The software buffer ring
@@ -206,8 +212,6 @@ struct efx_tx_buffer {
* @initialised: Has hardware queue been initialised?
* @timestamping: Is timestamping enabled for this channel?
* @xdp_tx: Is this an XDP tx queue?
- * @handle_tso: TSO xmit preparation handler. Sets up the TSO metadata and
- * may also map tx data, depending on the nature of the TSO implementation.
* @read_count: Current read pointer.
* This is the number of buffers that have been removed from both rings.
* @old_write_count: The value of @write_count when last checked.
@@ -244,7 +248,7 @@ struct efx_tx_buffer {
* @tso_fallbacks: Number of times TSO fallback used
* @pushes: Number of times the TX push feature has been used
* @pio_packets: Number of times the TX PIO feature has been used
- * @xmit_more_available: Are any packets waiting to be pushed to the NIC
+ * @xmit_pending: Are any packets waiting to be pushed to the NIC
* @cb_packets: Number of times the TX copybreak feature has been used
* @notify_count: Count of notified descriptors to the NIC
* @empty_read_count: If the completion path has seen the queue as empty
@@ -256,7 +260,9 @@ struct efx_tx_queue {
struct efx_nic *efx ____cacheline_aligned_in_smp;
unsigned int queue;
unsigned int label;
+ unsigned int type;
unsigned int tso_version;
+ bool tso_encap;
struct efx_channel *channel;
struct netdev_queue *core_txq;
struct efx_tx_buffer *buffer;
@@ -269,9 +275,6 @@ struct efx_tx_queue {
bool timestamping;
bool xdp_tx;
- /* Function pointers used in the fast path. */
- int (*handle_tso)(struct efx_tx_queue*, struct sk_buff*, bool *);
-
/* Members used mainly on the completion path */
unsigned int read_count ____cacheline_aligned_in_smp;
unsigned int old_write_count;
@@ -292,7 +295,7 @@ struct efx_tx_queue {
unsigned int tso_fallbacks;
unsigned int pushes;
unsigned int pio_packets;
- bool xmit_more_available;
+ bool xmit_pending;
unsigned int cb_packets;
unsigned int notify_count;
/* Statistics to supplement MAC stats */
@@ -455,7 +458,7 @@ enum efx_sync_events_state {
* were checked for expiry
* @rfs_expire_index: next accelerated RFS filter ID to check for expiry
* @n_rfs_succeeded: number of successful accelerated RFS filter insertions
- * @n_rfs_failed; number of failed accelerated RFS filter insertions
+ * @n_rfs_failed: number of failed accelerated RFS filter insertions
* @filter_work: Work item for efx_filter_rfs_expire()
* @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
* indexed by filter ID
@@ -481,6 +484,7 @@ enum efx_sync_events_state {
* @rx_list: list of SKBs from current RX, awaiting processing
* @rx_queue: RX queue for this channel
* @tx_queue: TX queues for this channel
+ * @tx_queue_by_type: pointers into @tx_queue, or %NULL, indexed by txq type
* @sync_events_state: Current state of sync events on this channel
* @sync_timestamp_major: Major part of the last ptp sync event
* @sync_timestamp_minor: Minor part of the last ptp sync event
@@ -542,7 +546,8 @@ struct efx_channel {
struct list_head *rx_list;
struct efx_rx_queue rx_queue;
- struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
+ struct efx_tx_queue tx_queue[EFX_MAX_TXQ_PER_CHANNEL];
+ struct efx_tx_queue *tx_queue_by_type[EFX_TXQ_TYPES];
enum efx_sync_events_state sync_events_state;
u32 sync_timestamp_major;
@@ -658,51 +663,6 @@ static inline bool efx_link_state_equal(const struct efx_link_state *left,
}
/**
- * struct efx_phy_operations - Efx PHY operations table
- * @probe: Probe PHY and initialise efx->mdio.mode_support, efx->mdio.mmds,
- * efx->loopback_modes.
- * @init: Initialise PHY
- * @fini: Shut down PHY
- * @reconfigure: Reconfigure PHY (e.g. for new link parameters)
- * @poll: Update @link_state and report whether it changed.
- * Serialised by the mac_lock.
- * @get_link_ksettings: Get ethtool settings. Serialised by the mac_lock.
- * @set_link_ksettings: Set ethtool settings. Serialised by the mac_lock.
- * @get_fecparam: Get Forward Error Correction settings. Serialised by mac_lock.
- * @set_fecparam: Set Forward Error Correction settings. Serialised by mac_lock.
- * @set_npage_adv: Set abilities advertised in (Extended) Next Page
- * (only needed where AN bit is set in mmds)
- * @test_alive: Test that PHY is 'alive' (online)
- * @test_name: Get the name of a PHY-specific test/result
- * @run_tests: Run tests and record results as appropriate (offline).
- * Flags are the ethtool tests flags.
- */
-struct efx_phy_operations {
- int (*probe) (struct efx_nic *efx);
- int (*init) (struct efx_nic *efx);
- void (*fini) (struct efx_nic *efx);
- void (*remove) (struct efx_nic *efx);
- int (*reconfigure) (struct efx_nic *efx);
- bool (*poll) (struct efx_nic *efx);
- void (*get_link_ksettings)(struct efx_nic *efx,
- struct ethtool_link_ksettings *cmd);
- int (*set_link_ksettings)(struct efx_nic *efx,
- const struct ethtool_link_ksettings *cmd);
- int (*get_fecparam)(struct efx_nic *efx, struct ethtool_fecparam *fec);
- int (*set_fecparam)(struct efx_nic *efx,
- const struct ethtool_fecparam *fec);
- void (*set_npage_adv) (struct efx_nic *efx, u32);
- int (*test_alive) (struct efx_nic *efx);
- const char *(*test_name) (struct efx_nic *efx, unsigned int index);
- int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags);
- int (*get_module_eeprom) (struct efx_nic *efx,
- struct ethtool_eeprom *ee,
- u8 *data);
- int (*get_module_info) (struct efx_nic *efx,
- struct ethtool_modinfo *modinfo);
-};
-
-/**
* enum efx_phy_mode - PHY operating mode flags
* @PHY_MODE_NORMAL: on and should pass traffic
* @PHY_MODE_TX_DISABLED: on with TX disabled
@@ -920,7 +880,6 @@ struct efx_async_filter_insertion {
* field of %MC_CMD_GET_CAPABILITIES_V4 response, or %MC_CMD_MAC_NSTATS)
* @stats_buffer: DMA buffer for statistics
* @phy_type: PHY type
- * @phy_op: PHY interface
* @phy_data: PHY private data (including PHY-specific stats)
* @mdio: PHY MDIO interface
* @mdio_bus: PHY MDIO bus ID (only used by Siena)
@@ -1094,7 +1053,6 @@ struct efx_nic {
bool rx_nodesc_drops_prev_state;
unsigned int phy_type;
- const struct efx_phy_operations *phy_op;
void *phy_data;
struct mdio_if_info mdio;
unsigned int mdio_bus;
@@ -1214,10 +1172,12 @@ struct efx_udp_tunnel {
* @describe_stats: Describe statistics for ethtool
* @update_stats: Update statistics not provided by event handling.
* Either argument may be %NULL.
+ * @update_stats_atomic: Update statistics while in atomic context, if that
+ * is more limiting than @update_stats. Otherwise, leave %NULL and
+ * driver core will call @update_stats.
* @start_stats: Start the regular fetching of statistics
* @pull_stats: Pull stats from the NIC and wait until they arrive.
* @stop_stats: Stop the regular fetching of statistics
- * @set_id_led: Set state of identifying LED or revert to automatic function
* @push_irq_moderation: Apply interrupt moderation value
* @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY
* @prepare_enable_fc_tx: Prepare MAC to enable pause frame TX (may be %NULL)
@@ -1250,7 +1210,7 @@ struct efx_udp_tunnel {
* a pointer to the &struct efx_msi_context for the channel.
* @irq_handle_legacy: Handle legacy interrupt. The @dev_id argument
* is a pointer to the &struct efx_nic.
- * @tx_probe: Allocate resources for TX queue
+ * @tx_probe: Allocate resources for TX queue (and select TXQ type)
* @tx_init: Initialise TX queue on the NIC
* @tx_remove: Free resources for TX queue
* @tx_write: Write TX descriptors and doorbell
@@ -1359,10 +1319,11 @@ struct efx_nic_type {
size_t (*describe_stats)(struct efx_nic *efx, u8 *names);
size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats,
struct rtnl_link_stats64 *core_stats);
+ size_t (*update_stats_atomic)(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats);
void (*start_stats)(struct efx_nic *efx);
void (*pull_stats)(struct efx_nic *efx);
void (*stop_stats)(struct efx_nic *efx);
- void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode);
void (*push_irq_moderation)(struct efx_channel *channel);
int (*reconfigure_port)(struct efx_nic *efx);
void (*prepare_enable_fc_tx)(struct efx_nic *efx);
@@ -1546,14 +1507,6 @@ efx_get_tx_channel(struct efx_nic *efx, unsigned int index)
return efx->channel[efx->tx_channel_offset + index];
}
-static inline struct efx_tx_queue *
-efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
-{
- EFX_WARN_ON_ONCE_PARANOID(index >= efx->n_tx_channels ||
- type >= efx->tx_queues_per_channel);
- return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
-}
-
static inline struct efx_channel *
efx_get_xdp_channel(struct efx_nic *efx, unsigned int index)
{
@@ -1580,10 +1533,18 @@ static inline unsigned int efx_channel_num_tx_queues(struct efx_channel *channel
}
static inline struct efx_tx_queue *
-efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
+efx_channel_get_tx_queue(struct efx_channel *channel, unsigned int type)
+{
+ EFX_WARN_ON_ONCE_PARANOID(type >= EFX_TXQ_TYPES);
+ return channel->tx_queue_by_type[type];
+}
+
+static inline struct efx_tx_queue *
+efx_get_tx_queue(struct efx_nic *efx, unsigned int index, unsigned int type)
{
- EFX_WARN_ON_ONCE_PARANOID(type >= efx_channel_num_tx_queues(channel));
- return &channel->tx_queue[type];
+ struct efx_channel *channel = efx_get_tx_channel(efx, index);
+
+ return efx_channel_get_tx_queue(channel, type);
}
/* Iterate over all TX queues belonging to a channel */
@@ -1683,10 +1644,6 @@ efx_channel_tx_fill_level(struct efx_channel *channel)
struct efx_tx_queue *tx_queue;
unsigned int fill_level = 0;
- /* This function is currently only used by EF100, which maybe
- * could do something simpler and just compute the fill level
- * of the single TXQ that's really in use.
- */
efx_for_each_channel_tx_queue(tx_queue, channel)
fill_level = max(fill_level,
tx_queue->insert_count - tx_queue->read_count);
@@ -1694,6 +1651,20 @@ efx_channel_tx_fill_level(struct efx_channel *channel)
return fill_level;
}
+/* Conservative approximation of efx_channel_tx_fill_level using cached value */
+static inline unsigned int
+efx_channel_tx_old_fill_level(struct efx_channel *channel)
+{
+ struct efx_tx_queue *tx_queue;
+ unsigned int fill_level = 0;
+
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ fill_level = max(fill_level,
+ tx_queue->insert_count - tx_queue->old_read_count);
+
+ return fill_level;
+}
+
/* Get all supported features.
* If a feature is not fixed, it is present in hw_features.
* If a feature is fixed, it does not present in hw_features, but
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 724e2776b585..5c2fe3ce3f4d 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -297,6 +297,10 @@ struct efx_ef10_nic_data {
u64 licensed_features;
};
+/* TSOv2 */
+int efx_ef10_tx_tso_desc(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
+ bool *data_mapped);
+
int efx_init_sriov(void);
void efx_fini_sriov(void);
diff --git a/drivers/net/ethernet/sfc/nic_common.h b/drivers/net/ethernet/sfc/nic_common.h
index 974107354087..b9cafe9cd568 100644
--- a/drivers/net/ethernet/sfc/nic_common.h
+++ b/drivers/net/ethernet/sfc/nic_common.h
@@ -65,8 +65,7 @@ efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
/* Report whether this TX queue would be empty for the given write_count.
* May return false negative.
*/
-static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue,
- unsigned int write_count)
+static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, unsigned int write_count)
{
unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count);
@@ -76,41 +75,6 @@ static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue,
return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
}
-/* Report whether the NIC considers this TX queue empty, using
- * packet_write_count (the write count recorded for the last completable
- * doorbell push). May return false negative. EF10 only, which is OK
- * because only EF10 supports PIO.
- */
-static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue)
-{
- EFX_WARN_ON_ONCE_PARANOID(!tx_queue->efx->type->option_descriptors);
- return __efx_nic_tx_is_empty(tx_queue, tx_queue->packet_write_count);
-}
-
-/* Get partner of a TX queue, seen as part of the same net core queue */
-/* XXX is this a thing on EF100? */
-static inline struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue)
-{
- if (tx_queue->label & EFX_TXQ_TYPE_OFFLOAD)
- return tx_queue - EFX_TXQ_TYPE_OFFLOAD;
- else
- return tx_queue + EFX_TXQ_TYPE_OFFLOAD;
-}
-
-/* Decide whether we can use TX PIO, ie. write packet data directly into
- * a buffer on the device. This can reduce latency at the expense of
- * throughput, so we only do this if both hardware and software TX rings
- * are empty. This also ensures that only one packet at a time can be
- * using the PIO buffer.
- */
-static inline bool efx_nic_may_tx_pio(struct efx_tx_queue *tx_queue)
-{
- struct efx_tx_queue *partner = efx_tx_queue_partner(tx_queue);
-
- return tx_queue->piobuf && efx_nic_tx_is_empty(tx_queue) &&
- efx_nic_tx_is_empty(partner);
-}
-
int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
bool *data_mapped);
@@ -125,7 +89,7 @@ int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue,
unsigned int write_count)
{
- bool was_empty = __efx_nic_tx_is_empty(tx_queue, write_count);
+ bool was_empty = efx_nic_tx_is_empty(tx_queue, write_count);
tx_queue->empty_read_count = 0;
return was_empty && tx_queue->write_count - write_count == 1;
@@ -280,6 +244,13 @@ void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
const unsigned long *mask, u64 *stats,
const void *dma_buf, bool accumulate);
void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *stat);
+static inline size_t efx_nic_update_stats_atomic(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
+{
+ if (efx->type->update_stats_atomic)
+ return efx->type->update_stats_atomic(efx, full_stats, core_stats);
+ return efx->type->update_stats(efx, full_stats, core_stats);
+}
#define EFX_MAX_FLUSH_TIME 5000
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index bea4725a4499..a39c5143b386 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -43,6 +43,7 @@
#include "mcdi_pcol.h"
#include "io.h"
#include "farch_regs.h"
+#include "tx.h"
#include "nic.h" /* indirectly includes ptp.h */
/* Maximum number of events expected to make up a PTP event */
@@ -172,9 +173,11 @@ struct efx_ptp_match {
/**
* struct efx_ptp_event_rx - A PTP receive event (from MC)
+ * @link: list of events
* @seq0: First part of (PTP) UUID
* @seq1: Second part of (PTP) UUID and sequence number
* @hwtimestamp: Event timestamp
+ * @expiry: Time which the packet arrived
*/
struct efx_ptp_event_rx {
struct list_head link;
@@ -222,11 +225,13 @@ struct efx_ptp_timeset {
* reset (disable, enable).
* @rxfilter_event: Receive filter when operating
* @rxfilter_general: Receive filter when operating
+ * @rxfilter_installed: Receive filter installed
* @config: Current timestamp configuration
* @enabled: PTP operation enabled
* @mode: Mode in which PTP operating (PTP version)
* @ns_to_nic_time: Function to convert from scalar nanoseconds to NIC time
* @nic_to_kernel_time: Function to convert from NIC to kernel time
+ * @nic_time: contains time details
* @nic_time.minor_max: Wrap point for NIC minor times
* @nic_time.sync_event_diff_min: Minimum acceptable difference between time
* in packet prefix and last MCDI time sync event i.e. how much earlier than
@@ -238,6 +243,7 @@ struct efx_ptp_timeset {
* field in MCDI time sync event.
* @min_synchronisation_ns: Minimum acceptable corrected sync window
* @capabilities: Capabilities flags from the NIC
+ * @ts_corrections: contains corrections details
* @ts_corrections.ptp_tx: Required driver correction of PTP packet transmit
* timestamps
* @ts_corrections.ptp_rx: Required driver correction of PTP packet receive
@@ -325,7 +331,7 @@ struct efx_ptp_data {
struct work_struct pps_work;
struct workqueue_struct *pps_workwq;
bool nic_ts_enabled;
- _MCDI_DECLARE_BUF(txbuf, MC_CMD_PTP_IN_TRANSMIT_LENMAX);
+ efx_dword_t txbuf[MCDI_TX_BUF_LEN(MC_CMD_PTP_IN_TRANSMIT_LENMAX)];
unsigned int good_syncs;
unsigned int fast_syncs;
@@ -1082,10 +1088,10 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
static void efx_ptp_xmit_skb_queue(struct efx_nic *efx, struct sk_buff *skb)
{
struct efx_ptp_data *ptp_data = efx->ptp_data;
+ u8 type = efx_tx_csum_type_skb(skb);
struct efx_tx_queue *tx_queue;
- u8 type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
- tx_queue = &ptp_data->channel->tx_queue[type];
+ tx_queue = efx_channel_get_tx_queue(ptp_data->channel, type);
if (tx_queue && tx_queue->timestamping) {
efx_enqueue_skb(tx_queue, skb);
} else {
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index e71d6d37a317..3c5227afd497 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -21,6 +21,7 @@
#include "efx_common.h"
#include "efx_channels.h"
#include "nic.h"
+#include "mcdi_port_common.h"
#include "selftest.h"
#include "workarounds.h"
@@ -67,7 +68,7 @@ static const char *const efx_interrupt_mode_names[] = {
STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_interrupt_mode)
/**
- * efx_loopback_state - persistent state during a loopback selftest
+ * struct efx_loopback_state - persistent state during a loopback selftest
* @flush: Drop all packets in efx_loopback_rx_packet
* @packet_count: Number of packets being used in this test
* @skbs: An array of skbs transmitted
@@ -99,10 +100,8 @@ static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests)
{
int rc = 0;
- if (efx->phy_op->test_alive) {
- rc = efx->phy_op->test_alive(efx);
- tests->phy_alive = rc ? -1 : 1;
- }
+ rc = efx_mcdi_phy_test_alive(efx);
+ tests->phy_alive = rc ? -1 : 1;
return rc;
}
@@ -257,11 +256,8 @@ static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
{
int rc;
- if (!efx->phy_op->run_tests)
- return 0;
-
mutex_lock(&efx->mac_lock);
- rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags);
+ rc = efx_mcdi_phy_run_tests(efx, tests->phy_ext, flags);
mutex_unlock(&efx->mac_lock);
if (rc == -EPERM)
rc = 0;
@@ -660,8 +656,8 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
/* Test all enabled types of TX queue */
efx_for_each_channel_tx_queue(tx_queue, channel) {
- state->offload_csum = (tx_queue->label &
- EFX_TXQ_TYPE_OFFLOAD);
+ state->offload_csum = (tx_queue->type &
+ EFX_TXQ_TYPE_OUTER_CSUM);
rc = efx_test_loopback(tx_queue,
&tests->loopback[mode]);
if (rc)
diff --git a/drivers/net/ethernet/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h
index ca88ebb4f6b1..a23f085bf298 100644
--- a/drivers/net/ethernet/sfc/selftest.h
+++ b/drivers/net/ethernet/sfc/selftest.h
@@ -15,8 +15,8 @@
*/
struct efx_loopback_self_tests {
- int tx_sent[EFX_TXQ_TYPES];
- int tx_done[EFX_TXQ_TYPES];
+ int tx_sent[EFX_MAX_TXQ_PER_CHANNEL];
+ int tx_done[EFX_MAX_TXQ_PER_CHANNEL];
int rx_good;
int rx_bad;
};
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index a7ea630bb5e6..16347a6d0c47 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -994,7 +994,6 @@ const struct efx_nic_type siena_a0_nic_type = {
.start_stats = efx_mcdi_mac_start_stats,
.pull_stats = efx_mcdi_mac_pull_stats,
.stop_stats = efx_mcdi_mac_stop_stats,
- .set_id_led = efx_mcdi_set_id_led,
.push_irq_moderation = siena_push_irq_moderation,
.reconfigure_mac = siena_mac_reconfigure,
.check_mac_fault = efx_mcdi_mac_check_fault,
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 727201d5eb24..1665529a7271 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -59,13 +59,12 @@ u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
{
- /* We need to consider both queues that the net core sees as one */
- struct efx_tx_queue *txq2 = efx_tx_queue_partner(txq1);
+ /* We need to consider all queues that the net core sees as one */
struct efx_nic *efx = txq1->efx;
+ struct efx_tx_queue *txq2;
unsigned int fill_level;
- fill_level = max(txq1->insert_count - txq1->old_read_count,
- txq2->insert_count - txq2->old_read_count);
+ fill_level = efx_channel_tx_old_fill_level(txq1->channel);
if (likely(fill_level < efx->txq_stop_thresh))
return;
@@ -85,11 +84,10 @@ static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
*/
netif_tx_stop_queue(txq1->core_txq);
smp_mb();
- txq1->old_read_count = READ_ONCE(txq1->read_count);
- txq2->old_read_count = READ_ONCE(txq2->read_count);
+ efx_for_each_channel_tx_queue(txq2, txq1->channel)
+ txq2->old_read_count = READ_ONCE(txq2->read_count);
- fill_level = max(txq1->insert_count - txq1->old_read_count,
- txq2->insert_count - txq2->old_read_count);
+ fill_level = efx_channel_tx_old_fill_level(txq1->channel);
EFX_WARN_ON_ONCE_PARANOID(fill_level >= efx->txq_entries);
if (likely(fill_level < efx->txq_stop_thresh)) {
smp_mb();
@@ -266,8 +264,45 @@ static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue,
++tx_queue->insert_count;
return 0;
}
+
+/* Decide whether we can use TX PIO, ie. write packet data directly into
+ * a buffer on the device. This can reduce latency at the expense of
+ * throughput, so we only do this if both hardware and software TX rings
+ * are empty, including all queues for the channel. This also ensures that
+ * only one packet at a time can be using the PIO buffer. If the xmit_more
+ * flag is set then we don't use this - there'll be another packet along
+ * shortly and we want to hold off the doorbell.
+ */
+static bool efx_tx_may_pio(struct efx_tx_queue *tx_queue)
+{
+ struct efx_channel *channel = tx_queue->channel;
+
+ if (!tx_queue->piobuf)
+ return false;
+
+ EFX_WARN_ON_ONCE_PARANOID(!channel->efx->type->option_descriptors);
+
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ if (!efx_nic_tx_is_empty(tx_queue, tx_queue->packet_write_count))
+ return false;
+
+ return true;
+}
#endif /* EFX_USE_PIO */
+/* Send any pending traffic for a channel. xmit_more is shared across all
+ * queues for a channel, so we must check all of them.
+ */
+static void efx_tx_send_pending(struct efx_channel *channel)
+{
+ struct efx_tx_queue *q;
+
+ efx_for_each_channel_tx_queue(q, channel) {
+ if (q->xmit_pending)
+ efx_nic_push_buffers(q);
+ }
+}
+
/*
* Add a socket buffer to a TX queue
*
@@ -303,8 +338,18 @@ netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb
* size limit.
*/
if (segments) {
- EFX_WARN_ON_ONCE_PARANOID(!tx_queue->handle_tso);
- rc = tx_queue->handle_tso(tx_queue, skb, &data_mapped);
+ switch (tx_queue->tso_version) {
+ case 1:
+ rc = efx_enqueue_skb_tso(tx_queue, skb, &data_mapped);
+ break;
+ case 2:
+ rc = efx_ef10_tx_tso_desc(tx_queue, skb, &data_mapped);
+ break;
+ case 0: /* No TSO on this queue, SW fallback needed */
+ default:
+ rc = -EINVAL;
+ break;
+ }
if (rc == -EINVAL) {
rc = efx_tx_tso_fallback(tx_queue, skb);
tx_queue->tso_fallbacks++;
@@ -315,7 +360,7 @@ netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb
goto err;
#ifdef EFX_USE_PIO
} else if (skb_len <= efx_piobuf_size && !xmit_more &&
- efx_nic_may_tx_pio(tx_queue)) {
+ efx_tx_may_pio(tx_queue)) {
/* Use PIO for short packets with an empty queue. */
if (efx_enqueue_skb_pio(tx_queue, skb))
goto err;
@@ -336,21 +381,11 @@ netdev_tx_t __efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb
efx_tx_maybe_stop_queue(tx_queue);
- /* Pass off to hardware */
- if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more)) {
- struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
+ tx_queue->xmit_pending = true;
- /* There could be packets left on the partner queue if
- * xmit_more was set. If we do not push those they
- * could be left for a long time and cause a netdev watchdog.
- */
- if (txq2->xmit_more_available)
- efx_nic_push_buffers(txq2);
-
- efx_nic_push_buffers(tx_queue);
- } else {
- tx_queue->xmit_more_available = xmit_more;
- }
+ /* Pass off to hardware */
+ if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more))
+ efx_tx_send_pending(tx_queue->channel);
if (segments) {
tx_queue->tso_bursts++;
@@ -371,14 +406,8 @@ err:
* on this queue or a partner queue then we need to push here to get the
* previous packets out.
*/
- if (!xmit_more) {
- struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue);
-
- if (txq2->xmit_more_available)
- efx_nic_push_buffers(txq2);
-
- efx_nic_push_buffers(tx_queue);
- }
+ if (!xmit_more)
+ efx_tx_send_pending(tx_queue->channel);
return NETDEV_TX_OK;
}
@@ -472,13 +501,10 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
}
/* Initiate a packet transmission. We use one channel per CPU
- * (sharing when we have more CPUs than channels). On Falcon, the TX
- * completion events will be directed back to the CPU that transmitted
- * the packet, which should be cache-efficient.
+ * (sharing when we have more CPUs than channels).
*
* Context: non-blocking.
- * Note that returning anything other than NETDEV_TX_OK will cause the
- * OS to free the skb.
+ * Should always return NETDEV_TX_OK and consume the skb.
*/
netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
struct net_device *net_dev)
@@ -489,19 +515,39 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
+ index = skb_get_queue_mapping(skb);
+ type = efx_tx_csum_type_skb(skb);
+ if (index >= efx->n_tx_channels) {
+ index -= efx->n_tx_channels;
+ type |= EFX_TXQ_TYPE_HIGHPRI;
+ }
+
/* PTP "event" packet */
if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
unlikely(efx_ptp_is_ptp_tx(efx, skb))) {
+ /* There may be existing transmits on the channel that are
+ * waiting for this packet to trigger the doorbell write.
+ * We need to send the packets at this point.
+ */
+ efx_tx_send_pending(efx_get_tx_channel(efx, index));
return efx_ptp_tx(efx, skb);
}
- index = skb_get_queue_mapping(skb);
- type = skb->ip_summed == CHECKSUM_PARTIAL ? EFX_TXQ_TYPE_OFFLOAD : 0;
- if (index >= efx->n_tx_channels) {
- index -= efx->n_tx_channels;
- type |= EFX_TXQ_TYPE_HIGHPRI;
- }
tx_queue = efx_get_tx_queue(efx, index, type);
+ if (WARN_ON_ONCE(!tx_queue)) {
+ /* We don't have a TXQ of the right type.
+ * This should never happen, as we don't advertise offload
+ * features unless we can support them.
+ */
+ dev_kfree_skb_any(skb);
+ /* If we're not expecting another transmit and we had something to push
+ * on this queue or a partner queue then we need to push here to get the
+ * previous packets out.
+ */
+ if (!netdev_xmit_more())
+ efx_tx_send_pending(tx_queue->channel);
+ return NETDEV_TX_OK;
+ }
return __efx_enqueue_skb(tx_queue, skb);
}
@@ -552,7 +598,7 @@ void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
tx_queue->core_txq =
netdev_get_tx_queue(efx->net_dev,
tx_queue->channel->channel +
- ((tx_queue->label & EFX_TXQ_TYPE_HIGHPRI) ?
+ ((tx_queue->type & EFX_TXQ_TYPE_HIGHPRI) ?
efx->n_tx_channels : 0));
}
diff --git a/drivers/net/ethernet/sfc/tx.h b/drivers/net/ethernet/sfc/tx.h
index a3cf06c5570d..f2c4d2f89919 100644
--- a/drivers/net/ethernet/sfc/tx.h
+++ b/drivers/net/ethernet/sfc/tx.h
@@ -18,4 +18,30 @@ unsigned int efx_tx_limit_len(struct efx_tx_queue *tx_queue,
u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue,
struct efx_tx_buffer *buffer, size_t len);
+/* What TXQ type will satisfy the checksum offloads required for this skb? */
+static inline unsigned int efx_tx_csum_type_skb(struct sk_buff *skb)
+{
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0; /* no checksum offload */
+
+ if (skb->encapsulation &&
+ skb_checksum_start_offset(skb) == skb_inner_transport_offset(skb)) {
+ /* we only advertise features for IPv4 and IPv6 checksums on
+ * encapsulated packets, so if the checksum is for the inner
+ * packet, it must be one of them; no further checking required.
+ */
+
+ /* Do we also need to offload the outer header checksum? */
+ if (skb_shinfo(skb)->gso_segs > 1 &&
+ !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
+ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
+ return EFX_TXQ_TYPE_OUTER_CSUM | EFX_TXQ_TYPE_INNER_CSUM;
+ return EFX_TXQ_TYPE_INNER_CSUM;
+ }
+
+ /* similarly, we only advertise features for IPv4 and IPv6 checksums,
+ * so it must be one of them. No need for further checks.
+ */
+ return EFX_TXQ_TYPE_OUTER_CSUM;
+}
#endif /* EFX_TX_H */
diff --git a/drivers/net/ethernet/sfc/tx_common.c b/drivers/net/ethernet/sfc/tx_common.c
index 793e234819a8..d530cde2b864 100644
--- a/drivers/net/ethernet/sfc/tx_common.c
+++ b/drivers/net/ethernet/sfc/tx_common.c
@@ -47,11 +47,12 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
goto fail1;
}
- /* Allocate hardware ring */
+ /* Allocate hardware ring, determine TXQ type */
rc = efx_nic_probe_tx(tx_queue);
if (rc)
goto fail2;
+ tx_queue->channel->tx_queue_by_type[tx_queue->type] = tx_queue;
return 0;
fail2:
@@ -78,18 +79,14 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
tx_queue->read_count = 0;
tx_queue->old_read_count = 0;
tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
- tx_queue->xmit_more_available = false;
+ tx_queue->xmit_pending = false;
tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) &&
tx_queue->channel == efx_ptp_channel(efx));
tx_queue->completed_timestamp_major = 0;
tx_queue->completed_timestamp_minor = 0;
tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel);
-
- /* Set up default function pointers. These may get replaced by
- * efx_nic_init_tx() based off NIC/queue capabilities.
- */
- tx_queue->handle_tso = efx_enqueue_skb_tso;
+ tx_queue->tso_version = 0;
/* Set up TX descriptor ring */
efx_nic_init_tx(tx_queue);
@@ -116,7 +113,7 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
++tx_queue->read_count;
}
- tx_queue->xmit_more_available = false;
+ tx_queue->xmit_pending = false;
netdev_tx_reset_queue(tx_queue->core_txq);
}
@@ -141,6 +138,7 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
kfree(tx_queue->buffer);
tx_queue->buffer = NULL;
+ tx_queue->channel->tx_queue_by_type[tx_queue->type] = NULL;
}
void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
@@ -242,7 +240,6 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
{
unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
struct efx_nic *efx = tx_queue->efx;
- struct efx_tx_queue *txq2;
EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
@@ -261,9 +258,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
likely(efx->port_enabled) &&
likely(netif_device_present(efx->net_dev))) {
- txq2 = efx_tx_queue_partner(tx_queue);
- fill_level = max(tx_queue->insert_count - tx_queue->read_count,
- txq2->insert_count - txq2->read_count);
+ fill_level = efx_channel_tx_fill_level(tx_queue->channel);
if (fill_level <= efx->txq_wake_thresh)
netif_tx_wake_queue(tx_queue->core_txq);
}
diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c
index f94078f8ebe5..1fd08a04bd4e 100644
--- a/drivers/net/ethernet/silan/sc92031.c
+++ b/drivers/net/ethernet/silan/sc92031.c
@@ -301,6 +301,7 @@ struct sc92031_priv {
/* for dev->get_stats */
long rx_value;
+ struct net_device *ndev;
};
/* I don't know which registers can be safely read; however, I can guess
@@ -829,10 +830,10 @@ static void _sc92031_link_tasklet(struct net_device *dev)
}
}
-static void sc92031_tasklet(unsigned long data)
+static void sc92031_tasklet(struct tasklet_struct *t)
{
- struct net_device *dev = (struct net_device *)data;
- struct sc92031_priv *priv = netdev_priv(dev);
+ struct sc92031_priv *priv = from_tasklet(priv, t, tasklet);
+ struct net_device *dev = priv->ndev;
void __iomem *port_base = priv->port_base;
u32 intr_status, intr_mask;
@@ -993,15 +994,15 @@ static int sc92031_open(struct net_device *dev)
struct sc92031_priv *priv = netdev_priv(dev);
struct pci_dev *pdev = priv->pdev;
- priv->rx_ring = pci_alloc_consistent(pdev, RX_BUF_LEN,
- &priv->rx_ring_dma_addr);
+ priv->rx_ring = dma_alloc_coherent(&pdev->dev, RX_BUF_LEN,
+ &priv->rx_ring_dma_addr, GFP_KERNEL);
if (unlikely(!priv->rx_ring)) {
err = -ENOMEM;
goto out_alloc_rx_ring;
}
- priv->tx_bufs = pci_alloc_consistent(pdev, TX_BUF_TOT_LEN,
- &priv->tx_bufs_dma_addr);
+ priv->tx_bufs = dma_alloc_coherent(&pdev->dev, TX_BUF_TOT_LEN,
+ &priv->tx_bufs_dma_addr, GFP_KERNEL);
if (unlikely(!priv->tx_bufs)) {
err = -ENOMEM;
goto out_alloc_tx_bufs;
@@ -1031,11 +1032,11 @@ static int sc92031_open(struct net_device *dev)
return 0;
out_request_irq:
- pci_free_consistent(pdev, TX_BUF_TOT_LEN, priv->tx_bufs,
- priv->tx_bufs_dma_addr);
+ dma_free_coherent(&pdev->dev, TX_BUF_TOT_LEN, priv->tx_bufs,
+ priv->tx_bufs_dma_addr);
out_alloc_tx_bufs:
- pci_free_consistent(pdev, RX_BUF_LEN, priv->rx_ring,
- priv->rx_ring_dma_addr);
+ dma_free_coherent(&pdev->dev, RX_BUF_LEN, priv->rx_ring,
+ priv->rx_ring_dma_addr);
out_alloc_rx_ring:
return err;
}
@@ -1058,10 +1059,10 @@ static int sc92031_stop(struct net_device *dev)
spin_unlock_bh(&priv->lock);
free_irq(pdev->irq, dev);
- pci_free_consistent(pdev, TX_BUF_TOT_LEN, priv->tx_bufs,
- priv->tx_bufs_dma_addr);
- pci_free_consistent(pdev, RX_BUF_LEN, priv->rx_ring,
- priv->rx_ring_dma_addr);
+ dma_free_coherent(&pdev->dev, TX_BUF_TOT_LEN, priv->tx_bufs,
+ priv->tx_bufs_dma_addr);
+ dma_free_coherent(&pdev->dev, RX_BUF_LEN, priv->rx_ring,
+ priv->rx_ring_dma_addr);
return 0;
}
@@ -1108,7 +1109,7 @@ static void sc92031_poll_controller(struct net_device *dev)
disable_irq(irq);
if (sc92031_interrupt(irq, dev) != IRQ_NONE)
- sc92031_tasklet((unsigned long)dev);
+ sc92031_tasklet(&priv->tasklet);
enable_irq(irq);
}
#endif
@@ -1407,11 +1408,11 @@ static int sc92031_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (unlikely(err < 0))
goto out_set_dma_mask;
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (unlikely(err < 0))
goto out_set_dma_mask;
@@ -1443,10 +1444,11 @@ static int sc92031_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev->ethtool_ops = &sc92031_ethtool_ops;
priv = netdev_priv(dev);
+ priv->ndev = dev;
spin_lock_init(&priv->lock);
priv->port_base = port_base;
priv->pdev = pdev;
- tasklet_init(&priv->tasklet, sc92031_tasklet, (unsigned long)dev);
+ tasklet_setup(&priv->tasklet, sc92031_tasklet);
/* Fudge tasklet count so the call to sc92031_enable_interrupts at
* sc92031_open will work correctly */
tasklet_disable_nosync(&priv->tasklet);
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index cfa460c7db23..620c26f71be8 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -789,10 +789,9 @@ static u16 sis900_default_phy(struct net_device * net_dev)
static void sis900_set_capability(struct net_device *net_dev, struct mii_phy *phy)
{
u16 cap;
- u16 status;
- status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
- status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
+ mdio_read(net_dev, phy->phy_addr, MII_STATUS);
+ mdio_read(net_dev, phy->phy_addr, MII_STATUS);
cap = MII_NWAY_CSMA_CD |
((phy->status & MII_STAT_CAN_TX_FDX)? MII_NWAY_TX_FDX:0) |
@@ -1302,7 +1301,7 @@ static void sis630_set_eq(struct net_device *net_dev, u8 revision)
/**
* sis900_timer - sis900 timer routine
- * @data: pointer to sis900 net device
+ * @t: timer list containing a pointer to sis900 net device
*
* On each timer ticks we check two things,
* link status (ON/OFF) and link mode (10/100/Full/Half)
@@ -1536,6 +1535,7 @@ static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex
/**
* sis900_tx_timeout - sis900 transmit timeout routine
* @net_dev: the net device to transmit
+ * @txqueue: index of hanging queue
*
* print transmit timeout status
* disable interrupts and do some tasks
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index d950b312c418..51cd7dca91cd 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -374,13 +374,15 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
ep->mii.phy_id_mask = 0x1f;
ep->mii.reg_num_mask = 0x1f;
- ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
+ ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
+ GFP_KERNEL);
if (!ring_space)
goto err_out_iounmap;
ep->tx_ring = ring_space;
ep->tx_ring_dma = ring_dma;
- ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
+ ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
+ GFP_KERNEL);
if (!ring_space)
goto err_out_unmap_tx;
ep->rx_ring = ring_space;
@@ -493,9 +495,11 @@ out:
return ret;
err_out_unmap_rx:
- pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
+ dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, ep->rx_ring,
+ ep->rx_ring_dma);
err_out_unmap_tx:
- pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
+ dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, ep->tx_ring,
+ ep->tx_ring_dma);
err_out_iounmap:
pci_iounmap(pdev, ioaddr);
err_out_free_netdev:
@@ -918,8 +922,10 @@ static void epic_init_ring(struct net_device *dev)
if (skb == NULL)
break;
skb_reserve(skb, 2); /* 16 byte align the IP header. */
- ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
- skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ ep->rx_ring[i].bufaddr = dma_map_single(&ep->pci_dev->dev,
+ skb->data,
+ ep->rx_buf_sz,
+ DMA_FROM_DEVICE);
ep->rx_ring[i].rxstatus = DescOwn;
}
ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
@@ -955,8 +961,9 @@ static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
entry = ep->cur_tx % TX_RING_SIZE;
ep->tx_skbuff[entry] = skb;
- ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
- skb->len, PCI_DMA_TODEVICE);
+ ep->tx_ring[entry].bufaddr = dma_map_single(&ep->pci_dev->dev,
+ skb->data, skb->len,
+ DMA_TO_DEVICE);
if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
ctrl_word = 0x100000; /* No interrupt */
} else if (free_count == TX_QUEUE_LEN/2) {
@@ -1036,8 +1043,9 @@ static void epic_tx(struct net_device *dev, struct epic_private *ep)
/* Free the original skb. */
skb = ep->tx_skbuff[entry];
- pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&ep->pci_dev->dev,
+ ep->tx_ring[entry].bufaddr, skb->len,
+ DMA_TO_DEVICE);
dev_consume_skb_irq(skb);
ep->tx_skbuff[entry] = NULL;
}
@@ -1178,20 +1186,21 @@ static int epic_rx(struct net_device *dev, int budget)
if (pkt_len < rx_copybreak &&
(skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
skb_reserve(skb, 2); /* 16 byte align the IP header */
- pci_dma_sync_single_for_cpu(ep->pci_dev,
- ep->rx_ring[entry].bufaddr,
- ep->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&ep->pci_dev->dev,
+ ep->rx_ring[entry].bufaddr,
+ ep->rx_buf_sz,
+ DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len);
skb_put(skb, pkt_len);
- pci_dma_sync_single_for_device(ep->pci_dev,
- ep->rx_ring[entry].bufaddr,
- ep->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&ep->pci_dev->dev,
+ ep->rx_ring[entry].bufaddr,
+ ep->rx_buf_sz,
+ DMA_FROM_DEVICE);
} else {
- pci_unmap_single(ep->pci_dev,
- ep->rx_ring[entry].bufaddr,
- ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&ep->pci_dev->dev,
+ ep->rx_ring[entry].bufaddr,
+ ep->rx_buf_sz,
+ DMA_FROM_DEVICE);
skb_put(skb = ep->rx_skbuff[entry], pkt_len);
ep->rx_skbuff[entry] = NULL;
}
@@ -1213,8 +1222,10 @@ static int epic_rx(struct net_device *dev, int budget)
if (skb == NULL)
break;
skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
- ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
- skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ ep->rx_ring[entry].bufaddr = dma_map_single(&ep->pci_dev->dev,
+ skb->data,
+ ep->rx_buf_sz,
+ DMA_FROM_DEVICE);
work_done++;
}
/* AV: shouldn't we add a barrier here? */
@@ -1294,8 +1305,8 @@ static int epic_close(struct net_device *dev)
ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */
ep->rx_ring[i].buflength = 0;
if (skb) {
- pci_unmap_single(pdev, ep->rx_ring[i].bufaddr,
- ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pdev->dev, ep->rx_ring[i].bufaddr,
+ ep->rx_buf_sz, DMA_FROM_DEVICE);
dev_kfree_skb(skb);
}
ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
@@ -1305,8 +1316,8 @@ static int epic_close(struct net_device *dev)
ep->tx_skbuff[i] = NULL;
if (!skb)
continue;
- pci_unmap_single(pdev, ep->tx_ring[i].bufaddr, skb->len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&pdev->dev, ep->tx_ring[i].bufaddr, skb->len,
+ DMA_TO_DEVICE);
dev_kfree_skb(skb);
}
@@ -1502,8 +1513,10 @@ static void epic_remove_one(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct epic_private *ep = netdev_priv(dev);
- pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
- pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
+ dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, ep->tx_ring,
+ ep->tx_ring_dma);
+ dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, ep->rx_ring,
+ ep->rx_ring_dma);
unregister_netdev(dev);
pci_iounmap(pdev, ep->ioaddr);
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 1c4fea9c3ec4..f6b73afd1879 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -535,10 +535,10 @@ static inline void smc_rcv(struct net_device *dev)
/*
* This is called to actually send a packet to the chip.
*/
-static void smc_hardware_send_pkt(unsigned long data)
+static void smc_hardware_send_pkt(struct tasklet_struct *t)
{
- struct net_device *dev = (struct net_device *)data;
- struct smc_local *lp = netdev_priv(dev);
+ struct smc_local *lp = from_tasklet(lp, t, tx_task);
+ struct net_device *dev = lp->dev;
void __iomem *ioaddr = lp->base;
struct sk_buff *skb;
unsigned int packet_no, len;
@@ -688,7 +688,7 @@ smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
* Allocation succeeded: push packet to the chip's own memory
* immediately.
*/
- smc_hardware_send_pkt((unsigned long)dev);
+ smc_hardware_send_pkt(&lp->tx_task);
}
return NETDEV_TX_OK;
@@ -1036,7 +1036,6 @@ static void smc_phy_configure(struct work_struct *work)
int phyaddr = lp->mii.phy_id;
int my_phy_caps; /* My PHY capabilities */
int my_ad_caps; /* My Advertised capabilities */
- int status;
DBG(3, dev, "smc_program_phy()\n");
@@ -1110,7 +1109,7 @@ static void smc_phy_configure(struct work_struct *work)
* auto-negotiation is restarted, sometimes it isn't ready and
* the link does not come up.
*/
- status = smc_phy_read(dev, phyaddr, MII_ADVERTISE);
+ smc_phy_read(dev, phyaddr, MII_ADVERTISE);
DBG(2, dev, "phy caps=%x\n", my_phy_caps);
DBG(2, dev, "phy advertised caps=%x\n", my_ad_caps);
@@ -1965,7 +1964,7 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
dev->netdev_ops = &smc_netdev_ops;
dev->ethtool_ops = &smc_ethtool_ops;
- tasklet_init(&lp->tx_task, smc_hardware_send_pkt, (unsigned long)dev);
+ tasklet_setup(&lp->tx_task, smc_hardware_send_pkt);
INIT_WORK(&lp->phy_configure, smc_phy_configure);
lp->dev = dev;
lp->mii.phy_id_mask = 0x1f;
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index fc168f85e7af..823d9a7184fe 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1196,9 +1196,8 @@ smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktwords)
SMSC_WARN(pdata, hw, "Timed out waiting for "
"RX FFWD to finish, RX_DP_CTRL: 0x%08X", val);
} else {
- unsigned int temp;
while (pktwords--)
- temp = smsc911x_reg_read(pdata, RX_DATA_FIFO);
+ smsc911x_reg_read(pdata, RX_DATA_FIFO);
}
}
@@ -2055,7 +2054,6 @@ static int smsc911x_eeprom_write_location(struct smsc911x_data *pdata,
u8 address, u8 data)
{
u32 op = E2P_CMD_EPC_CMD_ERASE_ | address;
- u32 temp;
int ret;
SMSC_TRACE(pdata, drv, "address 0x%x, data 0x%x", address, data);
@@ -2066,7 +2064,7 @@ static int smsc911x_eeprom_write_location(struct smsc911x_data *pdata,
smsc911x_reg_write(pdata, E2P_DATA, (u32)data);
/* Workaround for hardware read-after-write restriction */
- temp = smsc911x_reg_read(pdata, BYTE_TEST);
+ smsc911x_reg_read(pdata, BYTE_TEST);
ret = smsc911x_eeprom_send_cmd(pdata, op);
}
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index 42bef04d65ba..c1dab009415d 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -497,8 +497,9 @@ static void smsc9420_free_tx_ring(struct smsc9420_pdata *pd)
if (skb) {
BUG_ON(!pd->tx_buffers[i].mapping);
- pci_unmap_single(pd->pdev, pd->tx_buffers[i].mapping,
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&pd->pdev->dev,
+ pd->tx_buffers[i].mapping, skb->len,
+ DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
}
@@ -530,8 +531,9 @@ static void smsc9420_free_rx_ring(struct smsc9420_pdata *pd)
dev_kfree_skb_any(pd->rx_buffers[i].skb);
if (pd->rx_buffers[i].mapping)
- pci_unmap_single(pd->pdev, pd->rx_buffers[i].mapping,
- PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pd->pdev->dev,
+ pd->rx_buffers[i].mapping,
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
pd->rx_ring[i].status = 0;
pd->rx_ring[i].length = 0;
@@ -749,8 +751,8 @@ static void smsc9420_rx_handoff(struct smsc9420_pdata *pd, const int index,
dev->stats.rx_packets++;
dev->stats.rx_bytes += packet_length;
- pci_unmap_single(pd->pdev, pd->rx_buffers[index].mapping,
- PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pd->pdev->dev, pd->rx_buffers[index].mapping,
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
pd->rx_buffers[index].mapping = 0;
skb = pd->rx_buffers[index].skb;
@@ -782,9 +784,9 @@ static int smsc9420_alloc_rx_buffer(struct smsc9420_pdata *pd, int index)
if (unlikely(!skb))
return -ENOMEM;
- mapping = pci_map_single(pd->pdev, skb_tail_pointer(skb),
- PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pd->pdev, mapping)) {
+ mapping = dma_map_single(&pd->pdev->dev, skb_tail_pointer(skb),
+ PKT_BUF_SZ, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pd->pdev->dev, mapping)) {
dev_kfree_skb_any(skb);
netif_warn(pd, rx_err, pd->dev, "pci_map_single failed!\n");
return -ENOMEM;
@@ -901,8 +903,10 @@ static void smsc9420_complete_tx(struct net_device *dev)
BUG_ON(!pd->tx_buffers[index].skb);
BUG_ON(!pd->tx_buffers[index].mapping);
- pci_unmap_single(pd->pdev, pd->tx_buffers[index].mapping,
- pd->tx_buffers[index].skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&pd->pdev->dev,
+ pd->tx_buffers[index].mapping,
+ pd->tx_buffers[index].skb->len,
+ DMA_TO_DEVICE);
pd->tx_buffers[index].mapping = 0;
dev_kfree_skb_any(pd->tx_buffers[index].skb);
@@ -932,9 +936,9 @@ static netdev_tx_t smsc9420_hard_start_xmit(struct sk_buff *skb,
BUG_ON(pd->tx_buffers[index].skb);
BUG_ON(pd->tx_buffers[index].mapping);
- mapping = pci_map_single(pd->pdev, skb->data,
- skb->len, PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pd->pdev, mapping)) {
+ mapping = dma_map_single(&pd->pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&pd->pdev->dev, mapping)) {
netif_warn(pd, tx_err, pd->dev,
"pci_map_single failed, dropping packet\n");
return NETDEV_TX_BUSY;
@@ -1522,7 +1526,7 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_free_netdev_2;
}
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
netdev_err(dev, "No usable DMA configuration, aborting\n");
goto out_free_regions_3;
}
@@ -1540,10 +1544,9 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pd = netdev_priv(dev);
/* pci descriptors are created in the PCI consistent area */
- pd->rx_ring = pci_alloc_consistent(pdev,
- sizeof(struct smsc9420_dma_desc) * RX_RING_SIZE +
- sizeof(struct smsc9420_dma_desc) * TX_RING_SIZE,
- &pd->rx_dma_addr);
+ pd->rx_ring = dma_alloc_coherent(&pdev->dev,
+ sizeof(struct smsc9420_dma_desc) * (RX_RING_SIZE + TX_RING_SIZE),
+ &pd->rx_dma_addr, GFP_KERNEL);
if (!pd->rx_ring)
goto out_free_io_4;
@@ -1599,8 +1602,9 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
out_free_dmadesc_5:
- pci_free_consistent(pdev, sizeof(struct smsc9420_dma_desc) *
- (RX_RING_SIZE + TX_RING_SIZE), pd->rx_ring, pd->rx_dma_addr);
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct smsc9420_dma_desc) * (RX_RING_SIZE + TX_RING_SIZE),
+ pd->rx_ring, pd->rx_dma_addr);
out_free_io_4:
iounmap(virt_addr - LAN9420_CPSR_ENDIAN_OFFSET);
out_free_regions_3:
@@ -1632,8 +1636,9 @@ static void smsc9420_remove(struct pci_dev *pdev)
BUG_ON(!pd->tx_ring);
BUG_ON(!pd->rx_ring);
- pci_free_consistent(pdev, sizeof(struct smsc9420_dma_desc) *
- (RX_RING_SIZE + TX_RING_SIZE), pd->rx_ring, pd->rx_dma_addr);
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct smsc9420_dma_desc) * (RX_RING_SIZE + TX_RING_SIZE),
+ pd->rx_ring, pd->rx_dma_addr);
iounmap(pd->ioaddr - LAN9420_CPSR_ENDIAN_OFFSET);
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index 81b554dd7221..501b9c7aba56 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1585,7 +1585,7 @@ static int ave_probe(struct platform_device *pdev)
if (IS_ERR(base))
return PTR_ERR(base);
- ndev = alloc_etherdev(sizeof(struct ave_private));
+ ndev = devm_alloc_etherdev(dev, sizeof(struct ave_private));
if (!ndev) {
dev_err(dev, "can't allocate ethernet device\n");
return -ENOMEM;
@@ -1632,7 +1632,7 @@ static int ave_probe(struct platform_device *pdev)
}
ret = dma_set_mask(dev, dma_mask);
if (ret)
- goto out_free_netdev;
+ return ret;
priv->tx.ndesc = AVE_NR_TXDESC;
priv->rx.ndesc = AVE_NR_RXDESC;
@@ -1645,10 +1645,8 @@ static int ave_probe(struct platform_device *pdev)
if (!name)
break;
priv->clk[i] = devm_clk_get(dev, name);
- if (IS_ERR(priv->clk[i])) {
- ret = PTR_ERR(priv->clk[i]);
- goto out_free_netdev;
- }
+ if (IS_ERR(priv->clk[i]))
+ return PTR_ERR(priv->clk[i]);
priv->nclks++;
}
@@ -1657,10 +1655,8 @@ static int ave_probe(struct platform_device *pdev)
if (!name)
break;
priv->rst[i] = devm_reset_control_get_shared(dev, name);
- if (IS_ERR(priv->rst[i])) {
- ret = PTR_ERR(priv->rst[i]);
- goto out_free_netdev;
- }
+ if (IS_ERR(priv->rst[i]))
+ return PTR_ERR(priv->rst[i]);
priv->nrsts++;
}
@@ -1669,26 +1665,23 @@ static int ave_probe(struct platform_device *pdev)
1, 0, &args);
if (ret) {
dev_err(dev, "can't get syscon-phy-mode property\n");
- goto out_free_netdev;
+ return ret;
}
priv->regmap = syscon_node_to_regmap(args.np);
of_node_put(args.np);
if (IS_ERR(priv->regmap)) {
dev_err(dev, "can't map syscon-phy-mode\n");
- ret = PTR_ERR(priv->regmap);
- goto out_free_netdev;
+ return PTR_ERR(priv->regmap);
}
ret = priv->data->get_pinmode(priv, phy_mode, args.args[0]);
if (ret) {
dev_err(dev, "invalid phy-mode setting\n");
- goto out_free_netdev;
+ return ret;
}
priv->mdio = devm_mdiobus_alloc(dev);
- if (!priv->mdio) {
- ret = -ENOMEM;
- goto out_free_netdev;
- }
+ if (!priv->mdio)
+ return -ENOMEM;
priv->mdio->priv = ndev;
priv->mdio->parent = dev;
priv->mdio->read = ave_mdiobus_read;
@@ -1725,8 +1718,6 @@ static int ave_probe(struct platform_device *pdev)
out_del_napi:
netif_napi_del(&priv->napi_rx);
netif_napi_del(&priv->napi_tx);
-out_free_netdev:
- free_netdev(ndev);
return ret;
}
@@ -1739,7 +1730,6 @@ static int ave_remove(struct platform_device *pdev)
unregister_netdev(ndev);
netif_napi_del(&priv->napi_rx);
netif_napi_del(&priv->napi_tx);
- free_netdev(ndev);
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 9a47c5aec91a..53f14c5a9e02 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -3,7 +3,7 @@ config STMMAC_ETH
tristate "STMicroelectronics Multi-Gigabit Ethernet driver"
depends on HAS_IOMEM && HAS_DMA
select MII
- select MDIO_XPCS
+ select PCS_XPCS
select PAGE_POOL
select PHYLINK
select CRC32
@@ -209,6 +209,16 @@ config DWMAC_IMX8
device driver. This driver is used for i.MX8 series like
iMX8MP/iMX8DXL GMAC ethernet controller.
+config DWMAC_INTEL_PLAT
+ tristate "Intel dwmac support"
+ depends on OF && COMMON_CLK
+ depends on STMMAC_ETH
+ help
+ Support for ethernet controllers on Intel SoCs
+
+ This selects the Intel platform specific glue layer support for
+ the stmmac device driver. This driver is used for the Intel Keem Bay
+ SoC.
endif
config DWMAC_INTEL
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 295615ab36a7..24e6145d4eae 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_DWMAC_STM32) += dwmac-stm32.o
obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
obj-$(CONFIG_DWMAC_SUN8I) += dwmac-sun8i.o
obj-$(CONFIG_DWMAC_DWC_QOS_ETH) += dwmac-dwc-qos-eth.o
+obj-$(CONFIG_DWMAC_INTEL_PLAT) += dwmac-intel-plat.o
obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o
obj-$(CONFIG_DWMAC_IMX8) += dwmac-imx.o
stmmac-platform-objs:= stmmac_platform.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index 52971f5293aa..d2cdc02d9f94 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -46,7 +46,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
while (len != 0) {
tx_q->tx_skbuff[entry] = NULL;
- entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
desc = tx_q->dma_tx + entry;
if (len > bmax) {
@@ -137,7 +137,7 @@ static void refill_desc3(void *priv_ptr, struct dma_desc *p)
*/
p->des3 = cpu_to_le32((unsigned int)(rx_q->dma_rx_phy +
(((rx_q->dirty_rx) + 1) %
- DMA_RX_SIZE) *
+ priv->dma_rx_size) *
sizeof(struct dma_desc)));
}
@@ -154,7 +154,8 @@ static void clean_desc3(void *priv_ptr, struct dma_desc *p)
* to keep explicit chaining in the descriptor.
*/
p->des3 = cpu_to_le32((unsigned int)((tx_q->dma_tx_phy +
- ((tx_q->dirty_tx + 1) % DMA_TX_SIZE))
+ ((tx_q->dirty_tx + 1) %
+ priv->dma_tx_size))
* sizeof(struct dma_desc)));
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 127f75862962..df7de50497a0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -15,7 +15,7 @@
#include <linux/netdevice.h>
#include <linux/stmmac.h>
#include <linux/phy.h>
-#include <linux/mdio-xpcs.h>
+#include <linux/pcs/pcs-xpcs.h>
#include <linux/module.h>
#if IS_ENABLED(CONFIG_VLAN_8021Q)
#define STMMAC_VLAN_TAG_USED
@@ -42,9 +42,16 @@
#define STMMAC_CHAN0 0 /* Always supported and default for all chips */
-/* These need to be power of two, and >= 4 */
-#define DMA_TX_SIZE 512
-#define DMA_RX_SIZE 512
+/* TX and RX Descriptor Length, these need to be power of two.
+ * TX descriptor length less than 64 may cause transmit queue timed out error.
+ * RX descriptor length less than 64 may cause inconsistent Rx chain error.
+ */
+#define DMA_MIN_TX_SIZE 64
+#define DMA_MAX_TX_SIZE 1024
+#define DMA_DEFAULT_TX_SIZE 512
+#define DMA_MIN_RX_SIZE 64
+#define DMA_MAX_RX_SIZE 1024
+#define DMA_DEFAULT_RX_SIZE 512
#define STMMAC_GET_ENTRY(x, size) ((x + 1) & (size - 1))
#undef FRAME_FILTER_DEBUG
@@ -474,6 +481,8 @@ struct mac_device_info {
unsigned int num_vlan;
u32 vlan_filter[32];
unsigned int promisc;
+ bool vlan_fail_q_en;
+ u8 vlan_fail_q;
};
struct stmmac_rx_routing {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
index 3c5df5eeed6c..efef5476a577 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -129,8 +129,7 @@ static void imx_dwmac_exit(struct platform_device *pdev, void *priv)
{
struct imx_priv_data *dwmac = priv;
- if (dwmac->clk_tx)
- clk_disable_unprepare(dwmac->clk_tx);
+ clk_disable_unprepare(dwmac->clk_tx);
clk_disable_unprepare(dwmac->clk_mem);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
new file mode 100644
index 000000000000..f61cb997a8f6
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Intel DWMAC platform driver
+ *
+ * Copyright(C) 2020 Intel Corporation
+ */
+
+#include <linux/ethtool.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/stmmac.h>
+
+#include "dwmac4.h"
+#include "stmmac.h"
+#include "stmmac_platform.h"
+
+struct intel_dwmac {
+ struct device *dev;
+ struct clk *tx_clk;
+ const struct intel_dwmac_data *data;
+};
+
+struct intel_dwmac_data {
+ void (*fix_mac_speed)(void *priv, unsigned int speed);
+ unsigned long ptp_ref_clk_rate;
+ unsigned long tx_clk_rate;
+ bool tx_clk_en;
+};
+
+static void kmb_eth_fix_mac_speed(void *priv, unsigned int speed)
+{
+ struct intel_dwmac *dwmac = priv;
+ unsigned long rate;
+ int ret;
+
+ rate = clk_get_rate(dwmac->tx_clk);
+
+ switch (speed) {
+ case SPEED_1000:
+ rate = 125000000;
+ break;
+
+ case SPEED_100:
+ rate = 25000000;
+ break;
+
+ case SPEED_10:
+ rate = 2500000;
+ break;
+
+ default:
+ dev_err(dwmac->dev, "Invalid speed\n");
+ break;
+ }
+
+ ret = clk_set_rate(dwmac->tx_clk, rate);
+ if (ret)
+ dev_err(dwmac->dev, "Failed to configure tx clock rate\n");
+}
+
+static const struct intel_dwmac_data kmb_data = {
+ .fix_mac_speed = kmb_eth_fix_mac_speed,
+ .ptp_ref_clk_rate = 200000000,
+ .tx_clk_rate = 125000000,
+ .tx_clk_en = true,
+};
+
+static const struct of_device_id intel_eth_plat_match[] = {
+ { .compatible = "intel,keembay-dwmac", .data = &kmb_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, intel_eth_plat_match);
+
+static int intel_eth_plat_probe(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ const struct of_device_id *match;
+ struct intel_dwmac *dwmac;
+ unsigned long rate;
+ int ret;
+
+ plat_dat = priv->plat;
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat)) {
+ dev_err(&pdev->dev, "dt configuration failed\n");
+ return PTR_ERR(plat_dat);
+ }
+
+ dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac) {
+ ret = -ENOMEM;
+ goto err_remove_config_dt;
+ }
+
+ dwmac->dev = &pdev->dev;
+ dwmac->tx_clk = NULL;
+
+ match = of_match_device(intel_eth_plat_match, &pdev->dev);
+ if (match && match->data) {
+ dwmac->data = (const struct intel_dwmac_data *)match->data;
+
+ if (dwmac->data->fix_mac_speed)
+ plat_dat->fix_mac_speed = dwmac->data->fix_mac_speed;
+
+ /* Enable TX clock */
+ if (dwmac->data->tx_clk_en) {
+ dwmac->tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
+ if (IS_ERR(dwmac->tx_clk))
+ goto err_remove_config_dt;
+
+ clk_prepare_enable(dwmac->tx_clk);
+
+ /* Check and configure TX clock rate */
+ rate = clk_get_rate(dwmac->tx_clk);
+ if (dwmac->data->tx_clk_rate &&
+ rate != dwmac->data->tx_clk_rate) {
+ rate = dwmac->data->tx_clk_rate;
+ ret = clk_set_rate(dwmac->tx_clk, rate);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to set tx_clk\n");
+ return ret;
+ }
+ }
+ }
+
+ /* Check and configure PTP ref clock rate */
+ rate = clk_get_rate(plat_dat->clk_ptp_ref);
+ if (dwmac->data->ptp_ref_clk_rate &&
+ rate != dwmac->data->ptp_ref_clk_rate) {
+ rate = dwmac->data->ptp_ref_clk_rate;
+ ret = clk_set_rate(plat_dat->clk_ptp_ref, rate);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to set clk_ptp_ref\n");
+ return ret;
+ }
+ }
+ }
+
+ plat_dat->bsp_priv = dwmac;
+ plat_dat->eee_usecs_rate = plat_dat->clk_ptp_rate;
+
+ if (plat_dat->eee_usecs_rate > 0) {
+ u32 tx_lpi_usec;
+
+ tx_lpi_usec = (plat_dat->eee_usecs_rate / 1000000) - 1;
+ writel(tx_lpi_usec, stmmac_res.addr + GMAC_1US_TIC_COUNTER);
+ }
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret) {
+ clk_disable_unprepare(dwmac->tx_clk);
+ goto err_remove_config_dt;
+ }
+
+ return 0;
+
+err_remove_config_dt:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
+}
+
+static int intel_eth_plat_remove(struct platform_device *pdev)
+{
+ struct intel_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev);
+ int ret;
+
+ ret = stmmac_pltfr_remove(pdev);
+ clk_disable_unprepare(dwmac->tx_clk);
+
+ return ret;
+}
+
+static struct platform_driver intel_eth_plat_driver = {
+ .probe = intel_eth_plat_probe,
+ .remove = intel_eth_plat_remove,
+ .driver = {
+ .name = "intel-eth-plat",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = intel_eth_plat_match,
+ },
+};
+module_platform_driver(intel_eth_plat_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel DWMAC platform driver");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 9e6d60e75f85..b6e5e3e36b63 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -6,6 +6,7 @@
#include <linux/pci.h>
#include <linux/dmi.h>
#include "dwmac-intel.h"
+#include "dwmac4.h"
#include "stmmac.h"
struct intel_priv_data {
@@ -295,6 +296,7 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
plat->axi->axi_blen[2] = 16;
plat->ptp_max_adj = plat->clk_ptp_rate;
+ plat->eee_usecs_rate = plat->clk_ptp_rate;
/* Set system clock */
plat->stmmac_clk = clk_register_fixed_rate(&pdev->dev,
@@ -321,6 +323,11 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
/* Set the maxmtu to a default of JUMBO_LEN */
plat->maxmtu = JUMBO_LEN;
+ plat->vlan_fail_q_en = true;
+
+ /* Use the last Rx queue */
+ plat->vlan_fail_q = plat->rx_queues_to_use - 1;
+
return 0;
}
@@ -618,6 +625,13 @@ static int intel_eth_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
+ if (plat->eee_usecs_rate > 0) {
+ u32 tx_lpi_usec;
+
+ tx_lpi_usec = (plat->eee_usecs_rate / 1000000) - 1;
+ writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER);
+ }
+
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
if (ret < 0)
return ret;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 2d5573b3dee1..6ef30252bfe0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/**
- * dwmac-rk.c - Rockchip RK3288 DWMAC specific glue layer
+ * DOC: dwmac-rk.c - Rockchip RK3288 DWMAC specific glue layer
*
* Copyright (C) 2014 Chen-Zhi (Roger Chen)
*
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index 61f3249bd724..592b043f9676 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -76,6 +76,7 @@
#define GMAC_PACKET_FILTER_HPF BIT(10)
#define GMAC_PACKET_FILTER_VTFE BIT(16)
#define GMAC_PACKET_FILTER_IPFE BIT(20)
+#define GMAC_PACKET_FILTER_RA BIT(31)
#define GMAC_MAX_PERFECT_ADDRESSES 128
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index ecd834e0e121..002791b77356 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -618,7 +618,18 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
value &= ~GMAC_PACKET_FILTER_PM;
value &= ~GMAC_PACKET_FILTER_PR;
if (dev->flags & IFF_PROMISC) {
- value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_PCF;
+ /* VLAN Tag Filter Fail Packets Queuing */
+ if (hw->vlan_fail_q_en) {
+ value = readl(ioaddr + GMAC_RXQ_CTRL4);
+ value &= ~GMAC_RXQCTRL_VFFQ_MASK;
+ value |= GMAC_RXQCTRL_VFFQE |
+ (hw->vlan_fail_q << GMAC_RXQCTRL_VFFQ_SHIFT);
+ writel(value, ioaddr + GMAC_RXQ_CTRL4);
+ value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_RA;
+ } else {
+ value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_PCF;
+ }
+
} else if ((dev->flags & IFF_ALLMULTI) ||
(netdev_mc_count(dev) > hw->multicast_filter_bins)) {
/* Pass all multi */
@@ -680,7 +691,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
writel(value, ioaddr + GMAC_PACKET_FILTER);
- if (dev->flags & IFF_PROMISC) {
+ if (dev->flags & IFF_PROMISC && !hw->vlan_fail_q_en) {
if (!hw->promisc) {
hw->promisc = 1;
dwmac4_vlan_promisc_enable(dev, hw);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index eff82065a501..c6540b003b43 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -494,10 +494,9 @@ static void dwmac4_set_vlan(struct dma_desc *p, u32 type)
p->des2 |= cpu_to_le32(type & TDES2_VLAN_TAG_MASK);
}
-static int dwmac4_get_rx_header_len(struct dma_desc *p, unsigned int *len)
+static void dwmac4_get_rx_header_len(struct dma_desc *p, unsigned int *len)
{
*len = le32_to_cpu(p->des2) & RDES2_HL;
- return 0;
}
static void dwmac4_set_sec_addr(struct dma_desc *p, dma_addr_t addr)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
index 3e8faa96b4d4..56b0762c1276 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
@@ -92,6 +92,12 @@
#define TCEIE BIT(0)
#define DMA_ECC_INT_STATUS 0x00001088
+/* EQoS version 5.xx VLAN Tag Filter Fail Packets Queuing */
+#define GMAC_RXQ_CTRL4 0x00000094
+#define GMAC_RXQCTRL_VFFQ_MASK GENMASK(19, 17)
+#define GMAC_RXQCTRL_VFFQ_SHIFT 17
+#define GMAC_RXQCTRL_VFFQE BIT(16)
+
int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp);
int dwmac5_safety_feat_irq_status(struct net_device *ndev,
void __iomem *ioaddr, unsigned int asp,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
index c3d654cfa9ef..0aaf19ab5672 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -286,11 +286,10 @@ static int dwxgmac2_get_rx_hash(struct dma_desc *p, u32 *hash,
return -EINVAL;
}
-static int dwxgmac2_get_rx_header_len(struct dma_desc *p, unsigned int *len)
+static void dwxgmac2_get_rx_header_len(struct dma_desc *p, unsigned int *len)
{
if (le32_to_cpu(p->des3) & XGMAC_RDES3_L34T)
*len = le32_to_cpu(p->des2) & XGMAC_RDES2_HL;
- return 0;
}
static void dwxgmac2_set_sec_addr(struct dma_desc *p, dma_addr_t addr)
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index ffe2d63389b8..e2dca9b6e992 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -90,7 +90,7 @@ struct stmmac_desc_ops {
/* RSS */
int (*get_rx_hash)(struct dma_desc *p, u32 *hash,
enum pkt_hash_types *type);
- int (*get_rx_header_len)(struct dma_desc *p, unsigned int *len);
+ void (*get_rx_header_len)(struct dma_desc *p, unsigned int *len);
void (*set_sec_addr)(struct dma_desc *p, dma_addr_t addr);
void (*set_sarc)(struct dma_desc *p, u32 sarc_type);
void (*set_vlan_tag)(struct dma_desc *p, u16 tag, u16 inner_tag,
@@ -150,7 +150,7 @@ struct stmmac_desc_ops {
#define stmmac_get_rx_hash(__priv, __args...) \
stmmac_do_callback(__priv, desc, get_rx_hash, __args)
#define stmmac_get_rx_header_len(__priv, __args...) \
- stmmac_do_callback(__priv, desc, get_rx_header_len, __args)
+ stmmac_do_void_callback(__priv, desc, get_rx_header_len, __args)
#define stmmac_set_desc_sec_addr(__priv, __args...) \
stmmac_do_void_callback(__priv, desc, set_sec_addr, __args)
#define stmmac_set_desc_sarc(__priv, __args...) \
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index 14bd5e7b9875..8ad900949dc8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -51,7 +51,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum,
STMMAC_RING_MODE, 0, false, skb->len);
tx_q->tx_skbuff[entry] = NULL;
- entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
if (priv->extend_desc)
desc = (struct dma_desc *)(tx_q->dma_etx + entry);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 545696971f65..727e68dfaf1c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -171,9 +171,11 @@ struct stmmac_priv {
/* RX Queue */
struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES];
+ unsigned int dma_rx_size;
/* TX Queue */
struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
+ unsigned int dma_tx_size;
/* Generic channel for NAPI */
struct stmmac_channel channel[STMMAC_CH_MAX];
@@ -266,6 +268,8 @@ int stmmac_dvr_probe(struct device *device,
struct stmmac_resources *res);
void stmmac_disable_eee_mode(struct stmmac_priv *priv);
bool stmmac_eee_init(struct stmmac_priv *priv);
+int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt);
+int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size);
#if IS_ENABLED(CONFIG_STMMAC_SELFTESTS)
void stmmac_selftest_run(struct net_device *dev,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 814879f91f76..9e54f953634b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -440,6 +440,33 @@ static int stmmac_nway_reset(struct net_device *dev)
return phylink_ethtool_nway_reset(priv->phylink);
}
+static void stmmac_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct stmmac_priv *priv = netdev_priv(netdev);
+
+ ring->rx_max_pending = DMA_MAX_RX_SIZE;
+ ring->tx_max_pending = DMA_MAX_TX_SIZE;
+ ring->rx_pending = priv->dma_rx_size;
+ ring->tx_pending = priv->dma_tx_size;
+}
+
+static int stmmac_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
+ ring->rx_pending < DMA_MIN_RX_SIZE ||
+ ring->rx_pending > DMA_MAX_RX_SIZE ||
+ !is_power_of_2(ring->rx_pending) ||
+ ring->tx_pending < DMA_MIN_TX_SIZE ||
+ ring->tx_pending > DMA_MAX_TX_SIZE ||
+ !is_power_of_2(ring->tx_pending))
+ return -EINVAL;
+
+ return stmmac_reinit_ringparam(netdev, ring->rx_pending,
+ ring->tx_pending);
+}
+
static void
stmmac_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
@@ -843,6 +870,30 @@ static int stmmac_set_rxfh(struct net_device *dev, const u32 *indir,
priv->plat->rx_queues_to_use);
}
+static void stmmac_get_channels(struct net_device *dev,
+ struct ethtool_channels *chan)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ chan->rx_count = priv->plat->rx_queues_to_use;
+ chan->tx_count = priv->plat->tx_queues_to_use;
+ chan->max_rx = priv->dma_cap.number_rx_queues;
+ chan->max_tx = priv->dma_cap.number_tx_queues;
+}
+
+static int stmmac_set_channels(struct net_device *dev,
+ struct ethtool_channels *chan)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (chan->rx_count > priv->dma_cap.number_rx_queues ||
+ chan->tx_count > priv->dma_cap.number_tx_queues ||
+ !chan->rx_count || !chan->tx_count)
+ return -EINVAL;
+
+ return stmmac_reinit_queues(dev, chan->rx_count, chan->tx_count);
+}
+
static int stmmac_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
@@ -926,6 +977,8 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
.get_regs_len = stmmac_ethtool_get_regs_len,
.get_link = ethtool_op_get_link,
.nway_reset = stmmac_nway_reset,
+ .get_ringparam = stmmac_get_ringparam,
+ .set_ringparam = stmmac_set_ringparam,
.get_pauseparam = stmmac_get_pauseparam,
.set_pauseparam = stmmac_set_pauseparam,
.self_test = stmmac_selftest_run,
@@ -944,6 +997,8 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
.get_ts_info = stmmac_get_ts_info,
.get_coalesce = stmmac_get_coalesce,
.set_coalesce = stmmac_set_coalesce,
+ .get_channels = stmmac_get_channels,
+ .set_channels = stmmac_set_channels,
.get_tunable = stmmac_get_tunable,
.set_tunable = stmmac_set_tunable,
.get_link_ksettings = stmmac_ethtool_get_link_ksettings,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index b56b13d64ab4..220626a8d499 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -63,8 +63,8 @@ static int phyaddr = -1;
module_param(phyaddr, int, 0444);
MODULE_PARM_DESC(phyaddr, "Physical device address");
-#define STMMAC_TX_THRESH (DMA_TX_SIZE / 4)
-#define STMMAC_RX_THRESH (DMA_RX_SIZE / 4)
+#define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4)
+#define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4)
static int flow_ctrl = FLOW_AUTO;
module_param(flow_ctrl, int, 0644);
@@ -176,32 +176,6 @@ static void stmmac_enable_all_queues(struct stmmac_priv *priv)
}
}
-/**
- * stmmac_stop_all_queues - Stop all queues
- * @priv: driver private structure
- */
-static void stmmac_stop_all_queues(struct stmmac_priv *priv)
-{
- u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
- u32 queue;
-
- for (queue = 0; queue < tx_queues_cnt; queue++)
- netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
-}
-
-/**
- * stmmac_start_all_queues - Start all queues
- * @priv: driver private structure
- */
-static void stmmac_start_all_queues(struct stmmac_priv *priv)
-{
- u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
- u32 queue;
-
- for (queue = 0; queue < tx_queues_cnt; queue++)
- netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
-}
-
static void stmmac_service_event_schedule(struct stmmac_priv *priv)
{
if (!test_bit(STMMAC_DOWN, &priv->state) &&
@@ -297,7 +271,7 @@ static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
if (tx_q->dirty_tx > tx_q->cur_tx)
avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
else
- avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
+ avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
return avail;
}
@@ -315,7 +289,7 @@ static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
if (rx_q->dirty_rx <= rx_q->cur_rx)
dirty = rx_q->cur_rx - rx_q->dirty_rx;
else
- dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
+ dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
return dirty;
}
@@ -360,7 +334,7 @@ void stmmac_disable_eee_mode(struct stmmac_priv *priv)
/**
* stmmac_eee_ctrl_timer - EEE TX SW timer.
- * @arg : data hook
+ * @t: timer_list struct containing private info
* Description:
* if there is no data transfer and if we are not in LPI state,
* then MAC Transmitter can be moved to LPI state.
@@ -736,7 +710,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
* a proprietary structure used to pass information to the driver.
* Description:
* This function obtain the current hardware timestamping settings
- as requested.
+ * as requested.
*/
static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
{
@@ -789,14 +763,14 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
static void stmmac_release_ptp(struct stmmac_priv *priv)
{
- if (priv->plat->clk_ptp_ref)
- clk_disable_unprepare(priv->plat->clk_ptp_ref);
+ clk_disable_unprepare(priv->plat->clk_ptp_ref);
stmmac_ptp_unregister(priv);
}
/**
* stmmac_mac_flow_ctrl - Configure flow control in all queues
* @priv: driver private structure
+ * @duplex: duplex passed to the next function
* Description: It is used for configuring the flow control in all queues
*/
static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
@@ -1150,7 +1124,7 @@ static void stmmac_display_rx_rings(struct stmmac_priv *priv)
head_rx = (void *)rx_q->dma_rx;
/* Display RX ring */
- stmmac_display_ring(priv, head_rx, DMA_RX_SIZE, true);
+ stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true);
}
}
@@ -1173,7 +1147,7 @@ static void stmmac_display_tx_rings(struct stmmac_priv *priv)
else
head_tx = (void *)tx_q->dma_tx;
- stmmac_display_ring(priv, head_tx, DMA_TX_SIZE, false);
+ stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false);
}
}
@@ -1217,16 +1191,16 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
int i;
/* Clear the RX descriptors */
- for (i = 0; i < DMA_RX_SIZE; i++)
+ for (i = 0; i < priv->dma_rx_size; i++)
if (priv->extend_desc)
stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
priv->use_riwt, priv->mode,
- (i == DMA_RX_SIZE - 1),
+ (i == priv->dma_rx_size - 1),
priv->dma_buf_sz);
else
stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
priv->use_riwt, priv->mode,
- (i == DMA_RX_SIZE - 1),
+ (i == priv->dma_rx_size - 1),
priv->dma_buf_sz);
}
@@ -1243,8 +1217,8 @@ static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
int i;
/* Clear the TX descriptors */
- for (i = 0; i < DMA_TX_SIZE; i++) {
- int last = (i == (DMA_TX_SIZE - 1));
+ for (i = 0; i < priv->dma_tx_size; i++) {
+ int last = (i == (priv->dma_tx_size - 1));
struct dma_desc *p;
if (priv->extend_desc)
@@ -1398,7 +1372,7 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
stmmac_clear_rx_descriptors(priv, queue);
- for (i = 0; i < DMA_RX_SIZE; i++) {
+ for (i = 0; i < priv->dma_rx_size; i++) {
struct dma_desc *p;
if (priv->extend_desc)
@@ -1413,16 +1387,18 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
}
rx_q->cur_rx = 0;
- rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
+ rx_q->dirty_rx = (unsigned int)(i - priv->dma_rx_size);
/* Setup the chained descriptor addresses */
if (priv->mode == STMMAC_CHAIN_MODE) {
if (priv->extend_desc)
stmmac_mode_init(priv, rx_q->dma_erx,
- rx_q->dma_rx_phy, DMA_RX_SIZE, 1);
+ rx_q->dma_rx_phy,
+ priv->dma_rx_size, 1);
else
stmmac_mode_init(priv, rx_q->dma_rx,
- rx_q->dma_rx_phy, DMA_RX_SIZE, 0);
+ rx_q->dma_rx_phy,
+ priv->dma_rx_size, 0);
}
}
@@ -1436,7 +1412,7 @@ err_init_rx_buffers:
if (queue == 0)
break;
- i = DMA_RX_SIZE;
+ i = priv->dma_rx_size;
queue--;
}
@@ -1468,13 +1444,15 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
if (priv->mode == STMMAC_CHAIN_MODE) {
if (priv->extend_desc)
stmmac_mode_init(priv, tx_q->dma_etx,
- tx_q->dma_tx_phy, DMA_TX_SIZE, 1);
+ tx_q->dma_tx_phy,
+ priv->dma_tx_size, 1);
else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
stmmac_mode_init(priv, tx_q->dma_tx,
- tx_q->dma_tx_phy, DMA_TX_SIZE, 0);
+ tx_q->dma_tx_phy,
+ priv->dma_tx_size, 0);
}
- for (i = 0; i < DMA_TX_SIZE; i++) {
+ for (i = 0; i < priv->dma_tx_size; i++) {
struct dma_desc *p;
if (priv->extend_desc)
p = &((tx_q->dma_etx + i)->basic);
@@ -1538,7 +1516,7 @@ static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
{
int i;
- for (i = 0; i < DMA_RX_SIZE; i++)
+ for (i = 0; i < priv->dma_rx_size; i++)
stmmac_free_rx_buffer(priv, queue, i);
}
@@ -1551,7 +1529,7 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
{
int i;
- for (i = 0; i < DMA_TX_SIZE; i++)
+ for (i = 0; i < priv->dma_tx_size; i++)
stmmac_free_tx_buffer(priv, queue, i);
}
@@ -1573,11 +1551,11 @@ static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
/* Free DMA regions of consistent memory previously allocated */
if (!priv->extend_desc)
- dma_free_coherent(priv->device,
- DMA_RX_SIZE * sizeof(struct dma_desc),
+ dma_free_coherent(priv->device, priv->dma_rx_size *
+ sizeof(struct dma_desc),
rx_q->dma_rx, rx_q->dma_rx_phy);
else
- dma_free_coherent(priv->device, DMA_RX_SIZE *
+ dma_free_coherent(priv->device, priv->dma_rx_size *
sizeof(struct dma_extended_desc),
rx_q->dma_erx, rx_q->dma_rx_phy);
@@ -1616,7 +1594,7 @@ static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
addr = tx_q->dma_tx;
}
- size *= DMA_TX_SIZE;
+ size *= priv->dma_tx_size;
dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
@@ -1649,7 +1627,7 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
rx_q->priv_data = priv;
pp_params.flags = PP_FLAG_DMA_MAP;
- pp_params.pool_size = DMA_RX_SIZE;
+ pp_params.pool_size = priv->dma_rx_size;
num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
pp_params.order = ilog2(num_pages);
pp_params.nid = dev_to_node(priv->device);
@@ -1663,14 +1641,16 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
goto err_dma;
}
- rx_q->buf_pool = kcalloc(DMA_RX_SIZE, sizeof(*rx_q->buf_pool),
+ rx_q->buf_pool = kcalloc(priv->dma_rx_size,
+ sizeof(*rx_q->buf_pool),
GFP_KERNEL);
if (!rx_q->buf_pool)
goto err_dma;
if (priv->extend_desc) {
rx_q->dma_erx = dma_alloc_coherent(priv->device,
- DMA_RX_SIZE * sizeof(struct dma_extended_desc),
+ priv->dma_rx_size *
+ sizeof(struct dma_extended_desc),
&rx_q->dma_rx_phy,
GFP_KERNEL);
if (!rx_q->dma_erx)
@@ -1678,7 +1658,8 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
} else {
rx_q->dma_rx = dma_alloc_coherent(priv->device,
- DMA_RX_SIZE * sizeof(struct dma_desc),
+ priv->dma_rx_size *
+ sizeof(struct dma_desc),
&rx_q->dma_rx_phy,
GFP_KERNEL);
if (!rx_q->dma_rx)
@@ -1717,13 +1698,13 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
tx_q->queue_index = queue;
tx_q->priv_data = priv;
- tx_q->tx_skbuff_dma = kcalloc(DMA_TX_SIZE,
+ tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size,
sizeof(*tx_q->tx_skbuff_dma),
GFP_KERNEL);
if (!tx_q->tx_skbuff_dma)
goto err_dma;
- tx_q->tx_skbuff = kcalloc(DMA_TX_SIZE,
+ tx_q->tx_skbuff = kcalloc(priv->dma_tx_size,
sizeof(struct sk_buff *),
GFP_KERNEL);
if (!tx_q->tx_skbuff)
@@ -1736,7 +1717,7 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
else
size = sizeof(struct dma_desc);
- size *= DMA_TX_SIZE;
+ size *= priv->dma_tx_size;
addr = dma_alloc_coherent(priv->device, size,
&tx_q->dma_tx_phy, GFP_KERNEL);
@@ -1965,6 +1946,7 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
/**
* stmmac_tx_clean - to manage the transmission completion
* @priv: driver private structure
+ * @budget: napi budget limiting this functions packet handling
* @queue: TX queue index
* Description: it reclaims the transmit resources after transmission completes.
*/
@@ -2046,7 +2028,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
stmmac_release_tx_desc(priv, p, priv->mode);
- entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
}
tx_q->dirty_tx = entry;
@@ -2055,7 +2037,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
queue))) &&
- stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
+ stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
netif_dbg(priv, tx_done, priv->dev,
"%s: restart transmit\n", __func__);
@@ -2328,7 +2310,8 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
rx_q->dma_rx_phy, chan);
rx_q->rx_tail_addr = rx_q->dma_rx_phy +
- (DMA_RX_SIZE * sizeof(struct dma_desc));
+ (priv->dma_rx_size *
+ sizeof(struct dma_desc));
stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
rx_q->rx_tail_addr, chan);
}
@@ -2357,7 +2340,7 @@ static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
/**
* stmmac_tx_timer - mitigation sw timer for tx.
- * @data: data pointer
+ * @t: data pointer
* Description:
* This is the timer handler to directly invoke the stmmac_tx_clean.
*/
@@ -2412,12 +2395,12 @@ static void stmmac_set_rings_length(struct stmmac_priv *priv)
/* set TX ring length */
for (chan = 0; chan < tx_channels_count; chan++)
stmmac_set_tx_ring_len(priv, priv->ioaddr,
- (DMA_TX_SIZE - 1), chan);
+ (priv->dma_tx_size - 1), chan);
/* set RX ring length */
for (chan = 0; chan < rx_channels_count; chan++)
stmmac_set_rx_ring_len(priv, priv->ioaddr,
- (DMA_RX_SIZE - 1), chan);
+ (priv->dma_rx_size - 1), chan);
}
/**
@@ -2620,6 +2603,7 @@ static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
/**
* stmmac_hw_setup - setup mac in a usable state.
* @dev : pointer to the device structure.
+ * @init_ptp: initialize PTP if set
* Description:
* this is the main function to setup the HW in a usable state because the
* dma engine is reset, the core registers are configured (e.g. AXI,
@@ -2740,6 +2724,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
}
+ /* Configure real RX and TX queues */
+ netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
+ netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
+
/* Start the ball rolling... */
stmmac_start_all_dma(priv);
@@ -2797,6 +2785,11 @@ static int stmmac_open(struct net_device *dev)
priv->rx_copybreak = STMMAC_RX_COPYBREAK;
+ if (!priv->dma_tx_size)
+ priv->dma_tx_size = DMA_DEFAULT_TX_SIZE;
+ if (!priv->dma_rx_size)
+ priv->dma_rx_size = DMA_DEFAULT_RX_SIZE;
+
/* Earlier check for TBS */
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
@@ -2868,7 +2861,7 @@ static int stmmac_open(struct net_device *dev)
}
stmmac_enable_all_queues(priv);
- stmmac_start_all_queues(priv);
+ netif_tx_start_all_queues(priv->dev);
return 0;
@@ -2911,8 +2904,6 @@ static int stmmac_release(struct net_device *dev)
phylink_stop(priv->phylink);
phylink_disconnect_phy(priv->phylink);
- stmmac_stop_all_queues(priv);
-
stmmac_disable_all_queues(priv);
for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
@@ -2968,7 +2959,7 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
return false;
stmmac_set_tx_owner(priv, p);
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
return true;
}
@@ -2977,7 +2968,7 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
* @priv: driver private structure
* @des: buffer start address
* @total_len: total length to fill in descriptors
- * @last_segmant: condition for the last descriptor
+ * @last_segment: condition for the last descriptor
* @queue: TX queue index
* Description:
* This function fills descriptor and request new descriptors according to
@@ -2996,7 +2987,8 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
while (tmp_len > 0) {
dma_addr_t curr_addr;
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
+ priv->dma_tx_size);
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
if (tx_q->tbs & STMMAC_TBS_AVAIL)
@@ -3103,7 +3095,8 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
stmmac_set_mss(priv, mss_desc, mss);
tx_q->mss = mss;
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
+ priv->dma_tx_size);
WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
}
@@ -3210,7 +3203,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
* ndo_start_xmit will fill this descriptor the next time it's
* called and stmmac_tx_clean may clean up to this descriptor.
*/
- tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size);
if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
@@ -3373,7 +3366,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
int len = skb_frag_size(frag);
bool last_segment = (i == (nfrags - 1));
- entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
WARN_ON(tx_q->tx_skbuff[entry]);
if (likely(priv->extend_desc))
@@ -3441,7 +3434,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
* ndo_start_xmit will fill this descriptor the next time it's
* called and stmmac_tx_clean may clean up to this descriptor.
*/
- entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size);
tx_q->cur_tx = entry;
if (netif_msg_pktdata(priv)) {
@@ -3626,7 +3619,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
dma_wmb();
stmmac_set_rx_owner(priv, p, use_rx_wd);
- entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size);
}
rx_q->dirty_rx = entry;
rx_q->rx_tail_addr = rx_q->dma_rx_phy +
@@ -3638,15 +3631,15 @@ static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
struct dma_desc *p,
int status, unsigned int len)
{
- int ret, coe = priv->hw->rx_csum;
unsigned int plen = 0, hlen = 0;
+ int coe = priv->hw->rx_csum;
/* Not first descriptor, buffer is always zero */
if (priv->sph && len)
return 0;
/* First descriptor, get split header length */
- ret = stmmac_get_rx_header_len(priv, p, &hlen);
+ stmmac_get_rx_header_len(priv, p, &hlen);
if (priv->sph && hlen) {
priv->xstats.rx_split_hdr_pkt_n++;
return hlen;
@@ -3709,7 +3702,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
else
rx_head = (void *)rx_q->dma_rx;
- stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
+ stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true);
}
while (count < limit) {
unsigned int buf1_len = 0, buf2_len = 0;
@@ -3751,7 +3744,8 @@ read_again:
if (unlikely(status & dma_own))
break;
- rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
+ rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
+ priv->dma_rx_size);
next_entry = rx_q->cur_rx;
if (priv->extend_desc)
@@ -3926,7 +3920,7 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
priv->xstats.napi_poll++;
- work_done = stmmac_tx_clean(priv, DMA_TX_SIZE, chan);
+ work_done = stmmac_tx_clean(priv, priv->dma_tx_size, chan);
work_done = min(work_done, budget);
if (work_done < budget && napi_complete_done(napi, work_done)) {
@@ -3943,6 +3937,7 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
/**
* stmmac_tx_timeout
* @dev : Pointer to net device structure
+ * @txqueue: the index of the hanging transmit queue
* Description: this function is called when a packet transmission fails to
* complete within a reasonable time. The driver will mark the error in the
* netdev structure and arrange for the device to be reset to a sane state
@@ -4319,11 +4314,11 @@ static int stmmac_rings_status_show(struct seq_file *seq, void *v)
if (priv->extend_desc) {
seq_printf(seq, "Extended descriptor ring:\n");
sysfs_display_ring((void *)rx_q->dma_erx,
- DMA_RX_SIZE, 1, seq);
+ priv->dma_rx_size, 1, seq);
} else {
seq_printf(seq, "Descriptor ring:\n");
sysfs_display_ring((void *)rx_q->dma_rx,
- DMA_RX_SIZE, 0, seq);
+ priv->dma_rx_size, 0, seq);
}
}
@@ -4335,11 +4330,11 @@ static int stmmac_rings_status_show(struct seq_file *seq, void *v)
if (priv->extend_desc) {
seq_printf(seq, "Extended descriptor ring:\n");
sysfs_display_ring((void *)tx_q->dma_etx,
- DMA_TX_SIZE, 1, seq);
+ priv->dma_tx_size, 1, seq);
} else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
seq_printf(seq, "Descriptor ring:\n");
sysfs_display_ring((void *)tx_q->dma_tx,
- DMA_TX_SIZE, 0, seq);
+ priv->dma_tx_size, 0, seq);
}
}
@@ -4725,6 +4720,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
if (priv->dma_cap.tsoen)
dev_info(priv->device, "TSO supported\n");
+ priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
+ priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
+
/* Run HW quirks, if any */
if (priv->hwif_quirks) {
ret = priv->hwif_quirks(priv);
@@ -4747,6 +4745,86 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
return 0;
}
+static void stmmac_napi_add(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 queue, maxq;
+
+ maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
+
+ for (queue = 0; queue < maxq; queue++) {
+ struct stmmac_channel *ch = &priv->channel[queue];
+
+ ch->priv_data = priv;
+ ch->index = queue;
+
+ if (queue < priv->plat->rx_queues_to_use) {
+ netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx,
+ NAPI_POLL_WEIGHT);
+ }
+ if (queue < priv->plat->tx_queues_to_use) {
+ netif_tx_napi_add(dev, &ch->tx_napi,
+ stmmac_napi_poll_tx,
+ NAPI_POLL_WEIGHT);
+ }
+ }
+}
+
+static void stmmac_napi_del(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 queue, maxq;
+
+ maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
+
+ for (queue = 0; queue < maxq; queue++) {
+ struct stmmac_channel *ch = &priv->channel[queue];
+
+ if (queue < priv->plat->rx_queues_to_use)
+ netif_napi_del(&ch->rx_napi);
+ if (queue < priv->plat->tx_queues_to_use)
+ netif_napi_del(&ch->tx_napi);
+ }
+}
+
+int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret = 0;
+
+ if (netif_running(dev))
+ stmmac_release(dev);
+
+ stmmac_napi_del(dev);
+
+ priv->plat->rx_queues_to_use = rx_cnt;
+ priv->plat->tx_queues_to_use = tx_cnt;
+
+ stmmac_napi_add(dev);
+
+ if (netif_running(dev))
+ ret = stmmac_open(dev);
+
+ return ret;
+}
+
+int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret = 0;
+
+ if (netif_running(dev))
+ stmmac_release(dev);
+
+ priv->dma_rx_size = rx_size;
+ priv->dma_tx_size = tx_size;
+
+ if (netif_running(dev))
+ ret = stmmac_open(dev);
+
+ return ret;
+}
+
/**
* stmmac_dvr_probe
* @device: device pointer
@@ -4763,7 +4841,7 @@ int stmmac_dvr_probe(struct device *device,
{
struct net_device *ndev = NULL;
struct stmmac_priv *priv;
- u32 queue, rxq, maxq;
+ u32 rxq;
int i, ret = 0;
ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
@@ -4827,10 +4905,6 @@ int stmmac_dvr_probe(struct device *device,
stmmac_check_ether_addr(priv);
- /* Configure real RX and TX queues */
- netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
- netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
-
ndev->netdev_ops = &stmmac_netdev_ops;
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -4928,25 +5002,7 @@ int stmmac_dvr_probe(struct device *device,
priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
/* Setup channels NAPI */
- maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
-
- for (queue = 0; queue < maxq; queue++) {
- struct stmmac_channel *ch = &priv->channel[queue];
-
- spin_lock_init(&ch->lock);
- ch->priv_data = priv;
- ch->index = queue;
-
- if (queue < priv->plat->rx_queues_to_use) {
- netif_napi_add(ndev, &ch->rx_napi, stmmac_napi_poll_rx,
- NAPI_POLL_WEIGHT);
- }
- if (queue < priv->plat->tx_queues_to_use) {
- netif_tx_napi_add(ndev, &ch->tx_napi,
- stmmac_napi_poll_tx,
- NAPI_POLL_WEIGHT);
- }
- }
+ stmmac_napi_add(ndev);
mutex_init(&priv->lock);
@@ -5011,14 +5067,7 @@ error_phy_setup:
priv->hw->pcs != STMMAC_PCS_RTBI)
stmmac_mdio_unregister(ndev);
error_mdio_register:
- for (queue = 0; queue < maxq; queue++) {
- struct stmmac_channel *ch = &priv->channel[queue];
-
- if (queue < priv->plat->rx_queues_to_use)
- netif_napi_del(&ch->rx_napi);
- if (queue < priv->plat->tx_queues_to_use)
- netif_napi_del(&ch->tx_napi);
- }
+ stmmac_napi_del(ndev);
error_hw_init:
destroy_workqueue(priv->wq);
@@ -5086,7 +5135,6 @@ int stmmac_suspend(struct device *dev)
mutex_lock(&priv->lock);
netif_device_detach(ndev);
- stmmac_stop_all_queues(priv);
stmmac_disable_all_queues(priv);
@@ -5115,8 +5163,7 @@ int stmmac_suspend(struct device *dev)
stmmac_mac_set(priv, priv->ioaddr, false);
pinctrl_pm_select_sleep_state(priv->device);
/* Disable clock in case of PWM is off */
- if (priv->plat->clk_ptp_ref)
- clk_disable_unprepare(priv->plat->clk_ptp_ref);
+ clk_disable_unprepare(priv->plat->clk_ptp_ref);
clk_disable_unprepare(priv->plat->pclk);
clk_disable_unprepare(priv->plat->stmmac_clk);
}
@@ -5129,7 +5176,7 @@ EXPORT_SYMBOL_GPL(stmmac_suspend);
/**
* stmmac_reset_queues_param - reset queue parameters
- * @dev: device pointer
+ * @priv: device pointer
*/
static void stmmac_reset_queues_param(struct stmmac_priv *priv)
{
@@ -5213,8 +5260,6 @@ int stmmac_resume(struct device *dev)
stmmac_enable_all_queues(priv);
- stmmac_start_all_queues(priv);
-
mutex_unlock(&priv->lock);
if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index f32317fa75c8..af34a4cadbb0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -125,6 +125,7 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
/**
* stmmac_mtl_setup - parse DT parameters for multiple queues configuration
* @pdev: platform device
+ * @plat: enet data
*/
static int stmmac_mtl_setup(struct platform_device *pdev,
struct plat_stmmacenet_data *plat)
@@ -360,7 +361,7 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
/**
* stmmac_of_get_mac_mode - retrieves the interface of the MAC
- * @np - device-tree node
+ * @np: - device-tree node
* Description:
* Similar to `of_get_phy_mode()`, this function will retrieve (from
* the device-tree) the interface mode on the MAC side. This assumes
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index bf195adee393..0462dcc93e53 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -796,7 +796,7 @@ static int stmmac_test_flowctrl(struct stmmac_priv *priv)
u32 tail;
tail = priv->rx_queue[i].dma_rx_phy +
- (DMA_RX_SIZE * sizeof(struct dma_desc));
+ (priv->dma_rx_size * sizeof(struct dma_desc));
stmmac_set_rx_tail_ptr(priv, priv->ioaddr, tail, i);
stmmac_start_rx(priv, priv->ioaddr, i);
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index b624e177ec71..9ff894ba8d3e 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -454,8 +454,8 @@ static int cas_page_free(struct cas *cp, cas_page_t *page)
#define RX_USED_ADD(x, y) ((x)->used += (y))
#define RX_USED_SET(x, y) ((x)->used = (y))
#else
-#define RX_USED_ADD(x, y)
-#define RX_USED_SET(x, y)
+#define RX_USED_ADD(x, y) do { } while(0)
+#define RX_USED_SET(x, y) do { } while(0)
#endif
/* local page allocation routines for the receive buffers. jumbo pages
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 34fdbc6d6031..c646575e79d5 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -209,13 +209,13 @@ static void bigmac_clean_rings(struct bigmac *bp)
}
}
-static void bigmac_init_rings(struct bigmac *bp, int from_irq)
+static void bigmac_init_rings(struct bigmac *bp, bool non_blocking)
{
struct bmac_init_block *bb = bp->bmac_block;
int i;
gfp_t gfp_flags = GFP_KERNEL;
- if (from_irq || in_interrupt())
+ if (non_blocking)
gfp_flags = GFP_ATOMIC;
bp->rx_new = bp->rx_old = bp->tx_new = bp->tx_old = 0;
@@ -489,7 +489,7 @@ static void bigmac_tcvr_init(struct bigmac *bp)
}
}
-static int bigmac_init_hw(struct bigmac *, int);
+static int bigmac_init_hw(struct bigmac *, bool);
static int try_next_permutation(struct bigmac *bp, void __iomem *tregs)
{
@@ -549,7 +549,7 @@ static void bigmac_timer(struct timer_list *t)
if (ret == -1) {
printk(KERN_ERR "%s: Link down, cable problem?\n",
bp->dev->name);
- ret = bigmac_init_hw(bp, 0);
+ ret = bigmac_init_hw(bp, true);
if (ret) {
printk(KERN_ERR "%s: Error, cannot re-init the "
"BigMAC.\n", bp->dev->name);
@@ -617,7 +617,7 @@ static void bigmac_begin_auto_negotiation(struct bigmac *bp)
add_timer(&bp->bigmac_timer);
}
-static int bigmac_init_hw(struct bigmac *bp, int from_irq)
+static int bigmac_init_hw(struct bigmac *bp, bool non_blocking)
{
void __iomem *gregs = bp->gregs;
void __iomem *cregs = bp->creg;
@@ -635,7 +635,7 @@ static int bigmac_init_hw(struct bigmac *bp, int from_irq)
qec_init(bp);
/* Alloc and reset the tx/rx descriptor chains. */
- bigmac_init_rings(bp, from_irq);
+ bigmac_init_rings(bp, non_blocking);
/* Initialize the PHY. */
bigmac_tcvr_init(bp);
@@ -749,7 +749,7 @@ static void bigmac_is_medium_rare(struct bigmac *bp, u32 qec_status, u32 bmac_st
}
printk(" RESET\n");
- bigmac_init_hw(bp, 1);
+ bigmac_init_hw(bp, true);
}
/* BigMAC transmit complete service routines. */
@@ -921,7 +921,7 @@ static int bigmac_open(struct net_device *dev)
return ret;
}
timer_setup(&bp->bigmac_timer, bigmac_timer, 0);
- ret = bigmac_init_hw(bp, 0);
+ ret = bigmac_init_hw(bp, false);
if (ret)
free_irq(dev->irq, bp);
return ret;
@@ -945,7 +945,7 @@ static void bigmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct bigmac *bp = netdev_priv(dev);
- bigmac_init_hw(bp, 0);
+ bigmac_init_hw(bp, true);
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 8deb943ca5de..58f142ee78a3 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -2965,9 +2965,8 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* It is guaranteed that the returned buffer will be at least
* PAGE_SIZE aligned.
*/
- gp->init_block = (struct gem_init_block *)
- dma_alloc_coherent(&pdev->dev, sizeof(struct gem_init_block),
- &gp->gblock_dvma, GFP_KERNEL);
+ gp->init_block = dma_alloc_coherent(&pdev->dev, sizeof(struct gem_init_block),
+ &gp->gblock_dvma, GFP_KERNEL);
if (!gp->init_block) {
pr_err("Cannot allocate init block, aborting\n");
err = -ENOMEM;
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
index eb1c6b03c329..df26cea45904 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
@@ -513,7 +513,7 @@ void xlgmac_get_all_hw_features(struct xlgmac_pdata *pdata)
void xlgmac_print_all_hw_features(struct xlgmac_pdata *pdata)
{
- char *str = NULL;
+ char __maybe_unused *str = NULL;
XLGMAC_PR("\n");
XLGMAC_PR("=====================================================\n");
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index e28727297563..b8f4f419173f 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -138,7 +138,10 @@ static void print_eth_id(struct net_device *ndev)
* @priv: NIC private structure
* @f: fifo to initialize
* @fsz_type: fifo size type: 0-4KB, 1-8KB, 2-16KB, 3-32KB
- * @reg_XXX: offsets of registers relative to base address
+ * @reg_CFG0: offsets of registers relative to base address
+ * @reg_CFG1: offsets of registers relative to base address
+ * @reg_RPTR: offsets of registers relative to base address
+ * @reg_WPTR: offsets of registers relative to base address
*
* 1K extra space is allocated at the end of the fifo to simplify
* processing of descriptors that wraps around fifo's end
@@ -153,11 +156,11 @@ bdx_fifo_init(struct bdx_priv *priv, struct fifo *f, int fsz_type,
u16 memsz = FIFO_SIZE * (1 << fsz_type);
memset(f, 0, sizeof(struct fifo));
- /* pci_alloc_consistent gives us 4k-aligned memory */
- f->va = pci_alloc_consistent(priv->pdev,
- memsz + FIFO_EXTRA_SPACE, &f->da);
+ /* dma_alloc_coherent gives us 4k-aligned memory */
+ f->va = dma_alloc_coherent(&priv->pdev->dev, memsz + FIFO_EXTRA_SPACE,
+ &f->da, GFP_ATOMIC);
if (!f->va) {
- pr_err("pci_alloc_consistent failed\n");
+ pr_err("dma_alloc_coherent failed\n");
RET(-ENOMEM);
}
f->reg_CFG0 = reg_CFG0;
@@ -183,8 +186,8 @@ static void bdx_fifo_free(struct bdx_priv *priv, struct fifo *f)
{
ENTER;
if (f->va) {
- pci_free_consistent(priv->pdev,
- f->memsz + FIFO_EXTRA_SPACE, f->va, f->da);
+ dma_free_coherent(&priv->pdev->dev,
+ f->memsz + FIFO_EXTRA_SPACE, f->va, f->da);
f->va = NULL;
}
RET();
@@ -558,7 +561,7 @@ static int bdx_reset(struct bdx_priv *priv)
/**
* bdx_close - Disables a network interface
- * @netdev: network interface device structure
+ * @ndev: network interface device structure
*
* Returns 0, this is not allowed to fail
*
@@ -585,7 +588,7 @@ static int bdx_close(struct net_device *ndev)
/**
* bdx_open - Called when a network interface is made active
- * @netdev: network interface device structure
+ * @ndev: network interface device structure
*
* Returns 0 on success, negative value on failure
*
@@ -698,7 +701,7 @@ static int bdx_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
* __bdx_vlan_rx_vid - private helper for adding/killing VLAN vid
* @ndev: network device
* @vid: VLAN vid
- * @op: add or kill operation
+ * @enable: enable or disable vlan
*
* Passes VLAN filter table to hardware
*/
@@ -729,6 +732,7 @@ static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
/**
* bdx_vlan_rx_add_vid - kernel hook for adding VLAN vid to hw filtering table
* @ndev: network device
+ * @proto: unused
* @vid: VLAN vid to add
*/
static int bdx_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
@@ -740,6 +744,7 @@ static int bdx_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
/**
* bdx_vlan_rx_kill_vid - kernel hook for killing VLAN vid in hw filtering table
* @ndev: network device
+ * @proto: unused
* @vid: VLAN vid to kill
*/
static int bdx_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
@@ -750,7 +755,7 @@ static int bdx_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
/**
* bdx_change_mtu - Change the Maximum Transfer Unit
- * @netdev: network interface device structure
+ * @ndev: network interface device structure
* @new_mtu: new value for maximum frame size
*
* Returns 0 on success, negative on failure
@@ -1033,9 +1038,8 @@ static void bdx_rx_free_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
for (i = 0; i < db->nelem; i++) {
dm = bdx_rxdb_addr_elem(db, i);
if (dm->dma) {
- pci_unmap_single(priv->pdev,
- dm->dma, f->m.pktsz,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&priv->pdev->dev, dm->dma,
+ f->m.pktsz, DMA_FROM_DEVICE);
dev_kfree_skb(dm->skb);
}
}
@@ -1097,9 +1101,8 @@ static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
idx = bdx_rxdb_alloc_elem(db);
dm = bdx_rxdb_addr_elem(db, idx);
- dm->dma = pci_map_single(priv->pdev,
- skb->data, f->m.pktsz,
- PCI_DMA_FROMDEVICE);
+ dm->dma = dma_map_single(&priv->pdev->dev, skb->data,
+ f->m.pktsz, DMA_FROM_DEVICE);
dm->skb = skb;
rxfd = (struct rxf_desc *)(f->m.va + f->m.wptr);
rxfd->info = CPU_CHIP_SWAP32(0x10003); /* INFO=1 BC=3 */
@@ -1259,16 +1262,15 @@ static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
(skb2 = netdev_alloc_skb(priv->ndev, len + NET_IP_ALIGN))) {
skb_reserve(skb2, NET_IP_ALIGN);
/*skb_put(skb2, len); */
- pci_dma_sync_single_for_cpu(priv->pdev,
- dm->dma, rxf_fifo->m.pktsz,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&priv->pdev->dev, dm->dma,
+ rxf_fifo->m.pktsz,
+ DMA_FROM_DEVICE);
memcpy(skb2->data, skb->data, len);
bdx_recycle_skb(priv, rxdd);
skb = skb2;
} else {
- pci_unmap_single(priv->pdev,
- dm->dma, rxf_fifo->m.pktsz,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&priv->pdev->dev, dm->dma,
+ rxf_fifo->m.pktsz, DMA_FROM_DEVICE);
bdx_rxdb_free_elem(db, rxdd->va_lo);
}
@@ -1478,8 +1480,8 @@ bdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb,
int i;
db->wptr->len = skb_headlen(skb);
- db->wptr->addr.dma = pci_map_single(priv->pdev, skb->data,
- db->wptr->len, PCI_DMA_TODEVICE);
+ db->wptr->addr.dma = dma_map_single(&priv->pdev->dev, skb->data,
+ db->wptr->len, DMA_TO_DEVICE);
pbl->len = CPU_CHIP_SWAP32(db->wptr->len);
pbl->pa_lo = CPU_CHIP_SWAP32(L32_64(db->wptr->addr.dma));
pbl->pa_hi = CPU_CHIP_SWAP32(H32_64(db->wptr->addr.dma));
@@ -1716,8 +1718,8 @@ static void bdx_tx_cleanup(struct bdx_priv *priv)
BDX_ASSERT(db->rptr->len == 0);
do {
BDX_ASSERT(db->rptr->addr.dma == 0);
- pci_unmap_page(priv->pdev, db->rptr->addr.dma,
- db->rptr->len, PCI_DMA_TODEVICE);
+ dma_unmap_page(&priv->pdev->dev, db->rptr->addr.dma,
+ db->rptr->len, DMA_TO_DEVICE);
bdx_tx_db_inc_rptr(db);
} while (db->rptr->len > 0);
tx_level -= db->rptr->len; /* '-' koz len is negative */
@@ -1756,6 +1758,8 @@ static void bdx_tx_cleanup(struct bdx_priv *priv)
/**
* bdx_tx_free_skbs - frees all skbs from TXD fifo.
+ * @priv: NIC private structure
+ *
* It gets called when OS stops this dev, eg upon "ifconfig down" or rmmod
*/
static void bdx_tx_free_skbs(struct bdx_priv *priv)
@@ -1765,8 +1769,8 @@ static void bdx_tx_free_skbs(struct bdx_priv *priv)
ENTER;
while (db->rptr != db->wptr) {
if (likely(db->rptr->len))
- pci_unmap_page(priv->pdev, db->rptr->addr.dma,
- db->rptr->len, PCI_DMA_TODEVICE);
+ dma_unmap_page(&priv->pdev->dev, db->rptr->addr.dma,
+ db->rptr->len, DMA_TO_DEVICE);
else
dev_kfree_skb(db->rptr->addr.skb);
bdx_tx_db_inc_rptr(db);
@@ -1902,12 +1906,12 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) /* it triggers interrupt, dunno why. */
goto err_pci; /* it's not a problem though */
- if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) &&
- !(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))) {
+ if (!(err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) &&
+ !(err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)))) {
pci_using_dac = 1;
} else {
- if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) ||
- (err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+ if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) ||
+ (err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)))) {
pr_err("No usable DMA configuration, aborting\n");
goto err_dma;
}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index 496dafb25128..6e4d4f9e32e0 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -572,13 +572,14 @@ static int am65_cpsw_nway_reset(struct net_device *ndev)
static int am65_cpsw_get_regs_len(struct net_device *ndev)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
- u32 i, regdump_len = 0;
+ u32 ale_entries, i, regdump_len = 0;
+ ale_entries = cpsw_ale_get_num_entries(common->ale);
for (i = 0; i < ARRAY_SIZE(am65_cpsw_regdump); i++) {
if (am65_cpsw_regdump[i].hdr.module_id ==
AM65_CPSW_REGDUMP_MOD_CPSW_ALE_TBL) {
regdump_len += sizeof(struct am65_cpsw_regdump_hdr);
- regdump_len += common->ale->params.ale_entries *
+ regdump_len += ale_entries *
ALE_ENTRY_WORDS * sizeof(u32);
continue;
}
@@ -592,10 +593,11 @@ static void am65_cpsw_get_regs(struct net_device *ndev,
struct ethtool_regs *regs, void *p)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
- u32 i, j, pos, *reg = p;
+ u32 ale_entries, i, j, pos, *reg = p;
/* update CPSW IP version */
regs->version = AM65_CPSW_REGDUMP_VER;
+ ale_entries = cpsw_ale_get_num_entries(common->ale);
pos = 0;
for (i = 0; i < ARRAY_SIZE(am65_cpsw_regdump); i++) {
@@ -603,7 +605,7 @@ static void am65_cpsw_get_regs(struct net_device *ndev,
if (am65_cpsw_regdump[i].hdr.module_id ==
AM65_CPSW_REGDUMP_MOD_CPSW_ALE_TBL) {
- u32 ale_tbl_len = common->ale->params.ale_entries *
+ u32 ale_tbl_len = ale_entries *
ALE_ENTRY_WORDS * sizeof(u32) +
sizeof(struct am65_cpsw_regdump_hdr);
reg[pos++] = ale_tbl_len;
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 9baf3f3da91e..501d676fd88b 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -5,6 +5,7 @@
*
*/
+#include <linux/clk.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
#include <linux/interrupt.h>
@@ -2038,6 +2039,7 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
struct am65_cpsw_common *common;
struct device_node *node;
struct resource *res;
+ struct clk *clk;
int ret, i;
common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL);
@@ -2086,6 +2088,16 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
if (!common->ports)
return -ENOMEM;
+ clk = devm_clk_get(dev, "fck");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "error getting fck clock %d\n", ret);
+ return ret;
+ }
+ common->bus_freq = clk_get_rate(clk);
+
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
@@ -2131,10 +2143,10 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
/* init common data */
ale_params.dev = dev;
ale_params.ale_ageout = AM65_CPSW_ALE_AGEOUT_DEFAULT;
- ale_params.ale_entries = 0;
ale_params.ale_ports = common->port_num + 1;
ale_params.ale_regs = common->cpsw_base + AM65_CPSW_NU_ALE_BASE;
- ale_params.nu_switch_ale = true;
+ ale_params.dev_id = "am65x-cpsw2g";
+ ale_params.bus_freq = common->bus_freq;
common->ale = cpsw_ale_create(&ale_params);
if (IS_ERR(common->ale)) {
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index 94f666ea0e53..993e1d4d3222 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -106,6 +106,7 @@ struct am65_cpsw_common {
u32 nuss_ver;
u32 cpsw_ver;
+ unsigned long bus_freq;
bool pf_p0_rx_ptype_rrobin;
struct am65_cpts *cpts;
int est_enabled;
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
index c59a289e428c..75056c14b161 100644
--- a/drivers/net/ethernet/ti/am65-cpts.c
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -83,6 +83,8 @@ struct am65_cpts_regs {
#define AM65_CPTS_CONTROL_HW8_TS_PUSH_EN BIT(15)
#define AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET (8)
+#define AM65_CPTS_CONTROL_TX_GENF_CLR_EN BIT(17)
+
#define AM65_CPTS_CONTROL_TS_SYNC_SEL_MASK (0xF)
#define AM65_CPTS_CONTROL_TS_SYNC_SEL_SHIFT (28)
@@ -748,42 +750,23 @@ EXPORT_SYMBOL_GPL(am65_cpts_rx_enable);
static int am65_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid)
{
unsigned int ptp_class = ptp_classify_raw(skb);
- u8 *msgtype, *data = skb->data;
- unsigned int offset = 0;
- __be16 *seqid;
+ struct ptp_header *hdr;
+ u8 msgtype;
+ u16 seqid;
if (ptp_class == PTP_CLASS_NONE)
return 0;
- if (ptp_class & PTP_CLASS_VLAN)
- offset += VLAN_HLEN;
-
- switch (ptp_class & PTP_CLASS_PMASK) {
- case PTP_CLASS_IPV4:
- offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
- break;
- case PTP_CLASS_IPV6:
- offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
- break;
- case PTP_CLASS_L2:
- offset += ETH_HLEN;
- break;
- default:
- return 0;
- }
-
- if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
+ hdr = ptp_parse_header(skb, ptp_class);
+ if (!hdr)
return 0;
- if (unlikely(ptp_class & PTP_CLASS_V1))
- msgtype = data + offset + OFF_PTP_CONTROL;
- else
- msgtype = data + offset;
+ msgtype = ptp_get_msgtype(hdr, ptp_class);
+ seqid = ntohs(hdr->sequence_id);
- seqid = (__be16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
- *mtype_seqid = (*msgtype << AM65_CPTS_EVENT_1_MESSAGE_TYPE_SHIFT) &
+ *mtype_seqid = (msgtype << AM65_CPTS_EVENT_1_MESSAGE_TYPE_SHIFT) &
AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK;
- *mtype_seqid |= (ntohs(*seqid) & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK);
+ *mtype_seqid |= (seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK);
return 1;
}
@@ -1005,7 +988,9 @@ struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
am65_cpts_set_add_val(cpts);
- am65_cpts_write32(cpts, AM65_CPTS_CONTROL_EN | AM65_CPTS_CONTROL_64MODE,
+ am65_cpts_write32(cpts, AM65_CPTS_CONTROL_EN |
+ AM65_CPTS_CONTROL_64MODE |
+ AM65_CPTS_CONTROL_TX_GENF_CLR_EN,
control);
am65_cpts_write32(cpts, AM65_CPTS_INT_ENABLE_TS_PEND_EN, int_enable);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 4a65edc5a375..9fd1f77190ad 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1278,12 +1278,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
}
data->channels = prop;
- if (of_property_read_u32(node, "ale_entries", &prop)) {
- dev_err(&pdev->dev, "Missing ale_entries property in the DT.\n");
- return -EINVAL;
- }
- data->ale_entries = prop;
-
if (of_property_read_u32(node, "bd_ram_size", &prop)) {
dev_err(&pdev->dev, "Missing bd_ram_size property in the DT.\n");
return -EINVAL;
@@ -1297,7 +1291,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
data->mac_control = prop;
if (of_property_read_bool(node, "dual_emac"))
- data->dual_emac = 1;
+ data->dual_emac = true;
/*
* Populate all the child nodes here...
@@ -1596,7 +1590,7 @@ static int cpsw_probe(struct platform_device *pdev)
soc = soc_device_match(cpsw_soc_devices);
if (soc)
- cpsw->quirk_irq = 1;
+ cpsw->quirk_irq = true;
data = &cpsw->data;
cpsw->slaves = devm_kcalloc(dev,
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 9ad872bfae3a..a6a455c32628 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -32,6 +32,7 @@
#define ALE_STATUS 0x04
#define ALE_CONTROL 0x08
#define ALE_PRESCALE 0x10
+#define ALE_AGING_TIMER 0x14
#define ALE_UNKNOWNVLAN 0x18
#define ALE_TABLE_CONTROL 0x20
#define ALE_TABLE 0x34
@@ -46,6 +47,46 @@
#define AM65_CPSW_ALE_THREAD_DEF_REG 0x134
+/* ALE_AGING_TIMER */
+#define ALE_AGING_TIMER_MASK GENMASK(23, 0)
+
+/**
+ * struct ale_entry_fld - The ALE tbl entry field description
+ * @start_bit: field start bit
+ * @num_bits: field bit length
+ * @flags: field flags
+ */
+struct ale_entry_fld {
+ u8 start_bit;
+ u8 num_bits;
+ u8 flags;
+};
+
+enum {
+ CPSW_ALE_F_STATUS_REG = BIT(0), /* Status register present */
+ CPSW_ALE_F_HW_AUTOAGING = BIT(1), /* HW auto aging */
+
+ CPSW_ALE_F_COUNT
+};
+
+/**
+ * struct ale_dev_id - The ALE version/SoC specific configuration
+ * @dev_id: ALE version/SoC id
+ * @features: features supported by ALE
+ * @tbl_entries: number of ALE entries
+ * @major_ver_mask: mask of ALE Major Version Value in ALE_IDVER reg.
+ * @nu_switch_ale: NU Switch ALE
+ * @vlan_entry_tbl: ALE vlan entry fields description tbl
+ */
+struct cpsw_ale_dev_id {
+ const char *dev_id;
+ u32 features;
+ u32 tbl_entries;
+ u32 major_ver_mask;
+ bool nu_switch_ale;
+ const struct ale_entry_fld *vlan_entry_tbl;
+};
+
#define ALE_TABLE_WRITE BIT(31)
#define ALE_TYPE_FREE 0
@@ -60,7 +101,6 @@
#define ALE_TABLE_SIZE_MULTIPLIER 1024
#define ALE_STATUS_SIZE_MASK 0x1f
-#define ALE_TABLE_SIZE_DEFAULT 64
static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
{
@@ -106,6 +146,59 @@ static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value, \
cpsw_ale_set_field(ale_entry, start, bits, value); \
}
+enum {
+ ALE_ENT_VID_MEMBER_LIST = 0,
+ ALE_ENT_VID_UNREG_MCAST_MSK,
+ ALE_ENT_VID_REG_MCAST_MSK,
+ ALE_ENT_VID_FORCE_UNTAGGED_MSK,
+ ALE_ENT_VID_UNREG_MCAST_IDX,
+ ALE_ENT_VID_REG_MCAST_IDX,
+ ALE_ENT_VID_LAST,
+};
+
+#define ALE_FLD_ALLOWED BIT(0)
+#define ALE_FLD_SIZE_PORT_MASK_BITS BIT(1)
+#define ALE_FLD_SIZE_PORT_NUM_BITS BIT(2)
+
+#define ALE_ENTRY_FLD(id, start, bits) \
+[id] = { \
+ .start_bit = start, \
+ .num_bits = bits, \
+ .flags = ALE_FLD_ALLOWED, \
+}
+
+#define ALE_ENTRY_FLD_DYN_MSK_SIZE(id, start) \
+[id] = { \
+ .start_bit = start, \
+ .num_bits = 0, \
+ .flags = ALE_FLD_ALLOWED | \
+ ALE_FLD_SIZE_PORT_MASK_BITS, \
+}
+
+/* dm814x, am3/am4/am5, k2hk */
+static const struct ale_entry_fld vlan_entry_cpsw[ALE_ENT_VID_LAST] = {
+ ALE_ENTRY_FLD(ALE_ENT_VID_MEMBER_LIST, 0, 3),
+ ALE_ENTRY_FLD(ALE_ENT_VID_UNREG_MCAST_MSK, 8, 3),
+ ALE_ENTRY_FLD(ALE_ENT_VID_REG_MCAST_MSK, 16, 3),
+ ALE_ENTRY_FLD(ALE_ENT_VID_FORCE_UNTAGGED_MSK, 24, 3),
+};
+
+/* k2e/k2l, k3 am65/j721e cpsw2g */
+static const struct ale_entry_fld vlan_entry_nu[ALE_ENT_VID_LAST] = {
+ ALE_ENTRY_FLD_DYN_MSK_SIZE(ALE_ENT_VID_MEMBER_LIST, 0),
+ ALE_ENTRY_FLD(ALE_ENT_VID_UNREG_MCAST_IDX, 20, 3),
+ ALE_ENTRY_FLD_DYN_MSK_SIZE(ALE_ENT_VID_FORCE_UNTAGGED_MSK, 24),
+ ALE_ENTRY_FLD(ALE_ENT_VID_REG_MCAST_IDX, 44, 3),
+};
+
+/* K3 j721e/j7200 cpsw9g/5g, am64x cpsw3g */
+static const struct ale_entry_fld vlan_entry_k3_cpswxg[] = {
+ ALE_ENTRY_FLD_DYN_MSK_SIZE(ALE_ENT_VID_MEMBER_LIST, 0),
+ ALE_ENTRY_FLD_DYN_MSK_SIZE(ALE_ENT_VID_UNREG_MCAST_MSK, 12),
+ ALE_ENTRY_FLD_DYN_MSK_SIZE(ALE_ENT_VID_FORCE_UNTAGGED_MSK, 24),
+ ALE_ENTRY_FLD_DYN_MSK_SIZE(ALE_ENT_VID_REG_MCAST_MSK, 36),
+};
+
DEFINE_ALE_FIELD(entry_type, 60, 2)
DEFINE_ALE_FIELD(vlan_id, 48, 12)
DEFINE_ALE_FIELD(mcast_state, 62, 2)
@@ -115,17 +208,76 @@ DEFINE_ALE_FIELD(ucast_type, 62, 2)
DEFINE_ALE_FIELD1(port_num, 66)
DEFINE_ALE_FIELD(blocked, 65, 1)
DEFINE_ALE_FIELD(secure, 64, 1)
-DEFINE_ALE_FIELD1(vlan_untag_force, 24)
-DEFINE_ALE_FIELD1(vlan_reg_mcast, 16)
-DEFINE_ALE_FIELD1(vlan_unreg_mcast, 8)
-DEFINE_ALE_FIELD1(vlan_member_list, 0)
DEFINE_ALE_FIELD(mcast, 40, 1)
-/* ALE NetCP nu switch specific */
-DEFINE_ALE_FIELD(vlan_unreg_mcast_idx, 20, 3)
-DEFINE_ALE_FIELD(vlan_reg_mcast_idx, 44, 3)
#define NU_VLAN_UNREG_MCAST_IDX 1
+static int cpsw_ale_entry_get_fld(struct cpsw_ale *ale,
+ u32 *ale_entry,
+ const struct ale_entry_fld *entry_tbl,
+ int fld_id)
+{
+ const struct ale_entry_fld *entry_fld;
+ u32 bits;
+
+ if (!ale || !ale_entry)
+ return -EINVAL;
+
+ entry_fld = &entry_tbl[fld_id];
+ if (!(entry_fld->flags & ALE_FLD_ALLOWED)) {
+ dev_err(ale->params.dev, "get: wrong ale fld id %d\n", fld_id);
+ return -ENOENT;
+ }
+
+ bits = entry_fld->num_bits;
+ if (entry_fld->flags & ALE_FLD_SIZE_PORT_MASK_BITS)
+ bits = ale->port_mask_bits;
+
+ return cpsw_ale_get_field(ale_entry, entry_fld->start_bit, bits);
+}
+
+static void cpsw_ale_entry_set_fld(struct cpsw_ale *ale,
+ u32 *ale_entry,
+ const struct ale_entry_fld *entry_tbl,
+ int fld_id,
+ u32 value)
+{
+ const struct ale_entry_fld *entry_fld;
+ u32 bits;
+
+ if (!ale || !ale_entry)
+ return;
+
+ entry_fld = &entry_tbl[fld_id];
+ if (!(entry_fld->flags & ALE_FLD_ALLOWED)) {
+ dev_err(ale->params.dev, "set: wrong ale fld id %d\n", fld_id);
+ return;
+ }
+
+ bits = entry_fld->num_bits;
+ if (entry_fld->flags & ALE_FLD_SIZE_PORT_MASK_BITS)
+ bits = ale->port_mask_bits;
+
+ cpsw_ale_set_field(ale_entry, entry_fld->start_bit, bits, value);
+}
+
+static int cpsw_ale_vlan_get_fld(struct cpsw_ale *ale,
+ u32 *ale_entry,
+ int fld_id)
+{
+ return cpsw_ale_entry_get_fld(ale, ale_entry,
+ ale->vlan_entry_tbl, fld_id);
+}
+
+static void cpsw_ale_vlan_set_fld(struct cpsw_ale *ale,
+ u32 *ale_entry,
+ int fld_id,
+ u32 value)
+{
+ cpsw_ale_entry_set_fld(ale, ale_entry,
+ ale->vlan_entry_tbl, fld_id, value);
+}
+
/* The MAC address field in the ALE entry cannot be macroized as above */
static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr)
{
@@ -420,19 +572,22 @@ static void cpsw_ale_set_vlan_mcast(struct cpsw_ale *ale, u32 *ale_entry,
int idx;
/* Set VLAN registered multicast flood mask */
- idx = cpsw_ale_get_vlan_reg_mcast_idx(ale_entry);
+ idx = cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_REG_MCAST_IDX);
writel(reg_mcast, ale->params.ale_regs + ALE_VLAN_MASK_MUX(idx));
/* Set VLAN unregistered multicast flood mask */
- idx = cpsw_ale_get_vlan_unreg_mcast_idx(ale_entry);
+ idx = cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_UNREG_MCAST_IDX);
writel(unreg_mcast, ale->params.ale_regs + ALE_VLAN_MASK_MUX(idx));
}
static void cpsw_ale_set_vlan_untag(struct cpsw_ale *ale, u32 *ale_entry,
u16 vid, int untag_mask)
{
- cpsw_ale_set_vlan_untag_force(ale_entry,
- untag_mask, ale->vlan_field_bits);
+ cpsw_ale_vlan_set_fld(ale, ale_entry,
+ ALE_ENT_VID_FORCE_UNTAGGED_MSK,
+ untag_mask);
if (untag_mask & ALE_PORT_HOST)
bitmap_set(ale->p0_untag_vid_mask, vid, 1);
else
@@ -454,17 +609,19 @@ int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port_mask, int untag,
cpsw_ale_set_vlan_untag(ale, ale_entry, vid, untag);
if (!ale->params.nu_switch_ale) {
- cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast,
- ale->vlan_field_bits);
- cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast,
- ale->vlan_field_bits);
+ cpsw_ale_vlan_set_fld(ale, ale_entry,
+ ALE_ENT_VID_REG_MCAST_MSK, reg_mcast);
+ cpsw_ale_vlan_set_fld(ale, ale_entry,
+ ALE_ENT_VID_UNREG_MCAST_MSK, unreg_mcast);
} else {
- cpsw_ale_set_vlan_unreg_mcast_idx(ale_entry,
- NU_VLAN_UNREG_MCAST_IDX);
+ cpsw_ale_vlan_set_fld(ale, ale_entry,
+ ALE_ENT_VID_UNREG_MCAST_IDX,
+ NU_VLAN_UNREG_MCAST_IDX);
cpsw_ale_set_vlan_mcast(ale, ale_entry, reg_mcast, unreg_mcast);
}
- cpsw_ale_set_vlan_member_list(ale_entry, port_mask,
- ale->vlan_field_bits);
+
+ cpsw_ale_vlan_set_fld(ale, ale_entry,
+ ALE_ENT_VID_MEMBER_LIST, port_mask);
if (idx < 0)
idx = cpsw_ale_match_free(ale);
@@ -483,20 +640,20 @@ static void cpsw_ale_del_vlan_modify(struct cpsw_ale *ale, u32 *ale_entry,
int reg_mcast, unreg_mcast;
int members, untag;
- members = cpsw_ale_get_vlan_member_list(ale_entry,
- ale->vlan_field_bits);
+ members = cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_MEMBER_LIST);
members &= ~port_mask;
if (!members) {
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
return;
}
- untag = cpsw_ale_get_vlan_untag_force(ale_entry,
- ale->vlan_field_bits);
- reg_mcast = cpsw_ale_get_vlan_reg_mcast(ale_entry,
- ale->vlan_field_bits);
- unreg_mcast = cpsw_ale_get_vlan_unreg_mcast(ale_entry,
- ale->vlan_field_bits);
+ untag = cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_FORCE_UNTAGGED_MSK);
+ reg_mcast = cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_REG_MCAST_MSK);
+ unreg_mcast = cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_UNREG_MCAST_MSK);
untag &= members;
reg_mcast &= members;
unreg_mcast &= members;
@@ -504,16 +661,16 @@ static void cpsw_ale_del_vlan_modify(struct cpsw_ale *ale, u32 *ale_entry,
cpsw_ale_set_vlan_untag(ale, ale_entry, vid, untag);
if (!ale->params.nu_switch_ale) {
- cpsw_ale_set_vlan_reg_mcast(ale_entry, reg_mcast,
- ale->vlan_field_bits);
- cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast,
- ale->vlan_field_bits);
+ cpsw_ale_vlan_set_fld(ale, ale_entry,
+ ALE_ENT_VID_REG_MCAST_MSK, reg_mcast);
+ cpsw_ale_vlan_set_fld(ale, ale_entry,
+ ALE_ENT_VID_UNREG_MCAST_MSK, unreg_mcast);
} else {
cpsw_ale_set_vlan_mcast(ale, ale_entry, reg_mcast,
unreg_mcast);
}
- cpsw_ale_set_vlan_member_list(ale_entry, members,
- ale->vlan_field_bits);
+ cpsw_ale_vlan_set_fld(ale, ale_entry,
+ ALE_ENT_VID_MEMBER_LIST, members);
}
int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
@@ -551,15 +708,15 @@ int cpsw_ale_vlan_add_modify(struct cpsw_ale *ale, u16 vid, int port_mask,
if (idx >= 0)
cpsw_ale_read(ale, idx, ale_entry);
- vlan_members = cpsw_ale_get_vlan_member_list(ale_entry,
- ale->vlan_field_bits);
- reg_mcast_members = cpsw_ale_get_vlan_reg_mcast(ale_entry,
- ale->vlan_field_bits);
+ vlan_members = cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_MEMBER_LIST);
+ reg_mcast_members = cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_REG_MCAST_MSK);
unreg_mcast_members =
- cpsw_ale_get_vlan_unreg_mcast(ale_entry,
- ale->vlan_field_bits);
- untag_members = cpsw_ale_get_vlan_untag_force(ale_entry,
- ale->vlan_field_bits);
+ cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_UNREG_MCAST_MSK);
+ untag_members = cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_FORCE_UNTAGGED_MSK);
vlan_members |= port_mask;
untag_members = (untag_members & ~port_mask) | untag_mask;
@@ -592,14 +749,15 @@ void cpsw_ale_set_unreg_mcast(struct cpsw_ale *ale, int unreg_mcast_mask,
continue;
unreg_members =
- cpsw_ale_get_vlan_unreg_mcast(ale_entry,
- ale->vlan_field_bits);
+ cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_UNREG_MCAST_MSK);
if (add)
unreg_members |= unreg_mcast_mask;
else
unreg_members &= ~unreg_mcast_mask;
- cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_members,
- ale->vlan_field_bits);
+ cpsw_ale_vlan_set_fld(ale, ale_entry,
+ ALE_ENT_VID_UNREG_MCAST_MSK,
+ unreg_members);
cpsw_ale_write(ale, idx, ale_entry);
}
}
@@ -609,15 +767,15 @@ static void cpsw_ale_vlan_set_unreg_mcast(struct cpsw_ale *ale, u32 *ale_entry,
{
int unreg_mcast;
- unreg_mcast =
- cpsw_ale_get_vlan_unreg_mcast(ale_entry,
- ale->vlan_field_bits);
+ unreg_mcast = cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_UNREG_MCAST_MSK);
if (allmulti)
unreg_mcast |= ALE_PORT_HOST;
else
unreg_mcast &= ~ALE_PORT_HOST;
- cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast,
- ale->vlan_field_bits);
+
+ cpsw_ale_vlan_set_fld(ale, ale_entry,
+ ALE_ENT_VID_UNREG_MCAST_MSK, unreg_mcast);
}
static void
@@ -627,7 +785,8 @@ cpsw_ale_vlan_set_unreg_mcast_idx(struct cpsw_ale *ale, u32 *ale_entry,
int unreg_mcast;
int idx;
- idx = cpsw_ale_get_vlan_unreg_mcast_idx(ale_entry);
+ idx = cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_UNREG_MCAST_IDX);
unreg_mcast = readl(ale->params.ale_regs + ALE_VLAN_MASK_MUX(idx));
@@ -651,9 +810,9 @@ void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti, int port)
type = cpsw_ale_get_entry_type(ale_entry);
if (type != ALE_TYPE_VLAN)
continue;
- vlan_members =
- cpsw_ale_get_vlan_member_list(ale_entry,
- ale->vlan_field_bits);
+
+ vlan_members = cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_MEMBER_LIST);
if (port != -1 && !(vlan_members & BIT(port)))
continue;
@@ -960,30 +1119,146 @@ static void cpsw_ale_timer(struct timer_list *t)
}
}
+static void cpsw_ale_hw_aging_timer_start(struct cpsw_ale *ale)
+{
+ u32 aging_timer;
+
+ aging_timer = ale->params.bus_freq / 1000000;
+ aging_timer *= ale->params.ale_ageout;
+
+ if (aging_timer & ~ALE_AGING_TIMER_MASK) {
+ aging_timer = ALE_AGING_TIMER_MASK;
+ dev_warn(ale->params.dev,
+ "ALE aging timer overflow, set to max\n");
+ }
+
+ writel(aging_timer, ale->params.ale_regs + ALE_AGING_TIMER);
+}
+
+static void cpsw_ale_hw_aging_timer_stop(struct cpsw_ale *ale)
+{
+ writel(0, ale->params.ale_regs + ALE_AGING_TIMER);
+}
+
+static void cpsw_ale_aging_start(struct cpsw_ale *ale)
+{
+ if (!ale->params.ale_ageout)
+ return;
+
+ if (ale->features & CPSW_ALE_F_HW_AUTOAGING) {
+ cpsw_ale_hw_aging_timer_start(ale);
+ return;
+ }
+
+ timer_setup(&ale->timer, cpsw_ale_timer, 0);
+ ale->timer.expires = jiffies + ale->ageout;
+ add_timer(&ale->timer);
+}
+
+static void cpsw_ale_aging_stop(struct cpsw_ale *ale)
+{
+ if (!ale->params.ale_ageout)
+ return;
+
+ if (ale->features & CPSW_ALE_F_HW_AUTOAGING) {
+ cpsw_ale_hw_aging_timer_stop(ale);
+ return;
+ }
+
+ del_timer_sync(&ale->timer);
+}
+
void cpsw_ale_start(struct cpsw_ale *ale)
{
cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1);
cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
- timer_setup(&ale->timer, cpsw_ale_timer, 0);
- if (ale->ageout) {
- ale->timer.expires = jiffies + ale->ageout;
- add_timer(&ale->timer);
- }
+ cpsw_ale_aging_start(ale);
}
void cpsw_ale_stop(struct cpsw_ale *ale)
{
- del_timer_sync(&ale->timer);
+ cpsw_ale_aging_stop(ale);
cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
}
+static const struct cpsw_ale_dev_id cpsw_ale_id_match[] = {
+ {
+ /* am3/4/5, dra7. dm814x, 66ak2hk-gbe */
+ .dev_id = "cpsw",
+ .tbl_entries = 1024,
+ .major_ver_mask = 0xff,
+ .vlan_entry_tbl = vlan_entry_cpsw,
+ },
+ {
+ /* 66ak2h_xgbe */
+ .dev_id = "66ak2h-xgbe",
+ .tbl_entries = 2048,
+ .major_ver_mask = 0xff,
+ .vlan_entry_tbl = vlan_entry_cpsw,
+ },
+ {
+ .dev_id = "66ak2el",
+ .features = CPSW_ALE_F_STATUS_REG,
+ .major_ver_mask = 0x7,
+ .nu_switch_ale = true,
+ .vlan_entry_tbl = vlan_entry_nu,
+ },
+ {
+ .dev_id = "66ak2g",
+ .features = CPSW_ALE_F_STATUS_REG,
+ .tbl_entries = 64,
+ .major_ver_mask = 0x7,
+ .nu_switch_ale = true,
+ .vlan_entry_tbl = vlan_entry_nu,
+ },
+ {
+ .dev_id = "am65x-cpsw2g",
+ .features = CPSW_ALE_F_STATUS_REG | CPSW_ALE_F_HW_AUTOAGING,
+ .tbl_entries = 64,
+ .major_ver_mask = 0x7,
+ .nu_switch_ale = true,
+ .vlan_entry_tbl = vlan_entry_nu,
+ },
+ {
+ .dev_id = "j721e-cpswxg",
+ .features = CPSW_ALE_F_STATUS_REG | CPSW_ALE_F_HW_AUTOAGING,
+ .major_ver_mask = 0x7,
+ .vlan_entry_tbl = vlan_entry_k3_cpswxg,
+ },
+ { },
+};
+
+static const struct
+cpsw_ale_dev_id *cpsw_ale_match_id(const struct cpsw_ale_dev_id *id,
+ const char *dev_id)
+{
+ if (!dev_id)
+ return NULL;
+
+ while (id->dev_id) {
+ if (strcmp(dev_id, id->dev_id) == 0)
+ return id;
+ id++;
+ }
+ return NULL;
+}
+
struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
{
+ const struct cpsw_ale_dev_id *ale_dev_id;
struct cpsw_ale *ale;
u32 rev, ale_entries;
+ ale_dev_id = cpsw_ale_match_id(cpsw_ale_id_match, params->dev_id);
+ if (!ale_dev_id)
+ return ERR_PTR(-EINVAL);
+
+ params->ale_entries = ale_dev_id->tbl_entries;
+ params->major_ver_mask = ale_dev_id->major_ver_mask;
+ params->nu_switch_ale = ale_dev_id->nu_switch_ale;
+
ale = devm_kzalloc(params->dev, sizeof(*ale), GFP_KERNEL);
if (!ale)
return ERR_PTR(-ENOMEM);
@@ -997,10 +1272,10 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
ale->params = *params;
ale->ageout = ale->params.ale_ageout * HZ;
+ ale->features = ale_dev_id->features;
+ ale->vlan_entry_tbl = ale_dev_id->vlan_entry_tbl;
rev = readl_relaxed(ale->params.ale_regs + ALE_IDVER);
- if (!ale->params.major_ver_mask)
- ale->params.major_ver_mask = 0xff;
ale->version =
(ALE_VERSION_MAJOR(rev, ale->params.major_ver_mask) << 8) |
ALE_VERSION_MINOR(rev);
@@ -1008,7 +1283,8 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
ALE_VERSION_MAJOR(rev, ale->params.major_ver_mask),
ALE_VERSION_MINOR(rev));
- if (!ale->params.ale_entries) {
+ if (ale->features & CPSW_ALE_F_STATUS_REG &&
+ !ale->params.ale_entries) {
ale_entries =
readl_relaxed(ale->params.ale_regs + ALE_STATUS) &
ALE_STATUS_SIZE_MASK;
@@ -1017,16 +1293,12 @@ struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
* table which shows the size as a multiple of 1024 entries.
* For these, params.ale_entries will be set to zero. So
* read the register and update the value of ale_entries.
- * ALE table on NetCP lite, is much smaller and is indicated
- * by a value of zero in ALE_STATUS. So use a default value
- * of ALE_TABLE_SIZE_DEFAULT for this. Caller is expected
- * to set the value of ale_entries for all other versions
- * of ALE.
+ * return error if ale_entries is zero in ALE_STATUS.
*/
if (!ale_entries)
- ale_entries = ALE_TABLE_SIZE_DEFAULT;
- else
- ale_entries *= ALE_TABLE_SIZE_MULTIPLIER;
+ return ERR_PTR(-EINVAL);
+
+ ale_entries *= ALE_TABLE_SIZE_MULTIPLIER;
ale->params.ale_entries = ale_entries;
}
dev_info(ale->params.dev,
@@ -1079,3 +1351,8 @@ void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data)
data += ALE_ENTRY_WORDS;
}
}
+
+u32 cpsw_ale_get_num_entries(struct cpsw_ale *ale)
+{
+ return ale ? ale->params.ale_entries : 0;
+}
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index 6a3cb6898728..5e4a69662c5f 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -24,18 +24,24 @@ struct cpsw_ale_params {
* pass it from caller.
*/
u32 major_ver_mask;
+ const char *dev_id;
+ unsigned long bus_freq;
};
+struct ale_entry_fld;
+
struct cpsw_ale {
struct cpsw_ale_params params;
struct timer_list timer;
unsigned long ageout;
u32 version;
+ u32 features;
/* These bits are different on NetCP NU Switch ALE */
u32 port_mask_bits;
u32 port_num_bits;
u32 vlan_field_bits;
unsigned long *p0_untag_vid_mask;
+ const struct ale_entry_fld *vlan_entry_tbl;
};
enum cpsw_ale_control {
@@ -119,6 +125,7 @@ int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control);
int cpsw_ale_control_set(struct cpsw_ale *ale, int port,
int control, int value);
void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data);
+u32 cpsw_ale_get_num_entries(struct cpsw_ale *ale);
static inline int cpsw_ale_get_vlan_p0_untag(struct cpsw_ale *ale, u16 vid)
{
diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
index fa54efe3be63..4d02c5135611 100644
--- a/drivers/net/ethernet/ti/cpsw_ethtool.c
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -339,7 +339,8 @@ int cpsw_get_regs_len(struct net_device *ndev)
{
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
- return cpsw->data.ale_entries * ALE_ENTRY_WORDS * sizeof(u32);
+ return cpsw_ale_get_num_entries(cpsw->ale) *
+ ALE_ENTRY_WORDS * sizeof(u32);
}
void cpsw_get_regs(struct net_device *ndev, struct ethtool_regs *regs, void *p)
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 15672d0a4de6..f779d2e1b5c5 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -1244,7 +1244,6 @@ static int cpsw_probe_dt(struct cpsw_common *cpsw)
data->active_slave = 0;
data->channels = CPSW_MAX_QUEUES;
- data->ale_entries = CPSW_ALE_NUM_ENTRIES;
data->dual_emac = true;
data->bd_ram_size = CPSW_BD_RAM_SIZE;
data->mac_control = 0;
@@ -1661,12 +1660,10 @@ static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
for (i = 0; i < cpsw->data.slaves; i++) {
struct cpsw_slave *slave = &cpsw->slaves[i];
struct net_device *sl_ndev = slave->ndev;
- struct cpsw_priv *priv;
if (!sl_ndev)
continue;
- priv = netdev_priv(sl_ndev);
if (switch_en)
vlan = cpsw->data.default_vlan;
else
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index 482a1a451e43..51cc29f39038 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -500,8 +500,8 @@ int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
ale_params.dev = dev;
ale_params.ale_ageout = ale_ageout;
- ale_params.ale_entries = data->ale_entries;
ale_params.ale_ports = CPSW_ALE_PORTS_NUM;
+ ale_params.dev_id = "cpsw";
cpsw->ale = cpsw_ale_create(&ale_params);
if (IS_ERR(cpsw->ale)) {
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index bf4e179b4ca4..7b7f3596b20d 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -117,7 +117,6 @@ do { \
#define CPSW_MAX_QUEUES 8
#define CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT 256
#define CPSW_ALE_AGEOUT_DEFAULT 10 /* sec */
-#define CPSW_ALE_NUM_ENTRIES 1024
#define CPSW_FIFO_QUEUE_TYPE_SHIFT 16
#define CPSW_FIFO_SHAPE_EN_SHIFT 16
#define CPSW_FIFO_RATE_EN_SHIFT 20
@@ -294,7 +293,6 @@ struct cpsw_platform_data {
u32 channels; /* number of cpdma channels (symmetric) */
u32 slaves; /* number of slave cpgmac ports */
u32 active_slave;/* time stamping, ethtool and SIOCGMIIPHY slave */
- u32 ale_entries; /* ale table size */
u32 bd_ram_size; /*buffer descriptor ram size */
u32 mac_control; /* Mac control register */
u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 7c55d395de2c..d1fc7955d422 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -446,41 +446,22 @@ static const struct ptp_clock_info cpts_info = {
static int cpts_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid)
{
unsigned int ptp_class = ptp_classify_raw(skb);
- u8 *msgtype, *data = skb->data;
- unsigned int offset = 0;
- u16 *seqid;
+ struct ptp_header *hdr;
+ u8 msgtype;
+ u16 seqid;
if (ptp_class == PTP_CLASS_NONE)
return 0;
- if (ptp_class & PTP_CLASS_VLAN)
- offset += VLAN_HLEN;
-
- switch (ptp_class & PTP_CLASS_PMASK) {
- case PTP_CLASS_IPV4:
- offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
- break;
- case PTP_CLASS_IPV6:
- offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
- break;
- case PTP_CLASS_L2:
- offset += ETH_HLEN;
- break;
- default:
- return 0;
- }
-
- if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
+ hdr = ptp_parse_header(skb, ptp_class);
+ if (!hdr)
return 0;
- if (unlikely(ptp_class & PTP_CLASS_V1))
- msgtype = data + offset + OFF_PTP_CONTROL;
- else
- msgtype = data + offset;
+ msgtype = ptp_get_msgtype(hdr, ptp_class);
+ seqid = ntohs(hdr->sequence_id);
- seqid = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
- *mtype_seqid = (*msgtype & MESSAGE_TYPE_MASK) << MESSAGE_TYPE_SHIFT;
- *mtype_seqid |= (ntohs(*seqid) & SEQUENCE_ID_MASK) << SEQUENCE_ID_SHIFT;
+ *mtype_seqid = (msgtype & MESSAGE_TYPE_MASK) << MESSAGE_TYPE_SHIFT;
+ *mtype_seqid |= (seqid & SEQUENCE_ID_MASK) << SEQUENCE_ID_SHIFT;
return 1;
}
@@ -528,6 +509,11 @@ void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
int ret;
u64 ns;
+ /* cpts_rx_timestamp() is called before eth_type_trans(), so
+ * skb MAC Hdr properties are not configured yet. Hence need to
+ * reset skb MAC header here
+ */
+ skb_reset_mac_header(skb);
ret = cpts_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid);
if (!ret)
return;
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 6614fa3089b2..d2eab5cd1e0c 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -718,7 +718,7 @@ static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr,
most_chan->desc_num += desc_cnt;
}
-/**
+/*
* cpdma_chan_split_pool - Splits ctrl pool between all channels.
* Has to be called under ctlr lock
*/
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index de282531f68b..c7031e1960d4 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -671,7 +671,7 @@ static int emac_hash_del(struct emac_priv *priv, u8 *mac_addr)
* emac_add_mcast - Set multicast address in the EMAC adapter (Internal)
* @priv: The DaVinci EMAC private adapter structure
* @action: multicast operation to perform
- * mac_addr: mac address to set
+ * @mac_addr: mac address to set
*
* Set multicast addresses in EMAC adapter - internal function
*
@@ -977,6 +977,7 @@ fail_tx:
/**
* emac_dev_tx_timeout - EMAC Transmit timeout function
* @ndev: The DaVinci EMAC network adapter
+ * @txqueue: the index of the hung transmit queue
*
* Called when system detects that a skb timeout period has expired
* potentially due to a fault in the adapter in not being able to send
@@ -1209,7 +1210,7 @@ static int emac_hw_enable(struct emac_priv *priv)
/**
* emac_poll - EMAC NAPI Poll function
- * @ndev: The DaVinci EMAC network adapter
+ * @napi: pointer to the napi_struct containing The DaVinci EMAC network adapter
* @budget: Number of receive packets to process (as told by NAPI layer)
*
* NAPI Poll function implemented to process packets as per budget. We check
@@ -1227,7 +1228,7 @@ static int emac_poll(struct napi_struct *napi, int budget)
struct net_device *ndev = priv->ndev;
struct device *emac_dev = &ndev->dev;
u32 status = 0;
- u32 num_tx_pkts = 0, num_rx_pkts = 0;
+ u32 num_rx_pkts = 0;
/* Check interrupt vectors and call packet processing */
status = emac_read(EMAC_MACINVECTOR);
@@ -1238,8 +1239,7 @@ static int emac_poll(struct napi_struct *napi, int budget)
mask = EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC;
if (status & mask) {
- num_tx_pkts = cpdma_chan_process(priv->txchan,
- EMAC_DEF_TX_MAX_SERVICE);
+ cpdma_chan_process(priv->txchan, EMAC_DEF_TX_MAX_SERVICE);
} /* TX processing */
mask = EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC;
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 28093923a7fb..33c1592d5381 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -51,7 +51,6 @@
#define GBE13_CPTS_OFFSET 0x500
#define GBE13_ALE_OFFSET 0x600
#define GBE13_HOST_PORT_NUM 0
-#define GBE13_NUM_ALE_ENTRIES 1024
/* 1G Ethernet NU SS defines */
#define GBENU_MODULE_NAME "netcp-gbenu"
@@ -101,7 +100,6 @@
#define XGBE10_ALE_OFFSET 0x700
#define XGBE10_HW_STATS_OFFSET 0x800
#define XGBE10_HOST_PORT_NUM 0
-#define XGBE10_NUM_ALE_ENTRIES 2048
#define GBE_TIMER_INTERVAL (HZ / 2)
@@ -711,7 +709,6 @@ struct gbe_priv {
struct netcp_device *netcp_device;
struct timer_list timer;
u32 num_slaves;
- u32 ale_entries;
u32 ale_ports;
bool enable_ale;
u8 max_num_slaves;
@@ -3309,7 +3306,6 @@ static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
gbe_dev->cpts_reg = gbe_dev->switch_regs + XGBE10_CPTS_OFFSET;
gbe_dev->ale_ports = gbe_dev->max_num_ports;
gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
- gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
/* Subsystem registers */
@@ -3433,7 +3429,6 @@ static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
gbe_dev->ale_ports = gbe_dev->max_num_ports;
gbe_dev->host_port = GBE13_HOST_PORT_NUM;
- gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL;
/* Subsystem registers */
@@ -3697,12 +3692,15 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
ale_params.dev = gbe_dev->dev;
ale_params.ale_regs = gbe_dev->ale_reg;
ale_params.ale_ageout = GBE_DEFAULT_ALE_AGEOUT;
- ale_params.ale_entries = gbe_dev->ale_entries;
ale_params.ale_ports = gbe_dev->ale_ports;
- if (IS_SS_ID_MU(gbe_dev)) {
- ale_params.major_ver_mask = 0x7;
- ale_params.nu_switch_ale = true;
- }
+ ale_params.dev_id = "cpsw";
+ if (IS_SS_ID_NU(gbe_dev))
+ ale_params.dev_id = "66ak2el";
+ else if (IS_SS_ID_2U(gbe_dev))
+ ale_params.dev_id = "66ak2g";
+ else if (IS_SS_ID_XGBE(gbe_dev))
+ ale_params.dev_id = "66ak2h-xgbe";
+
gbe_dev->ale = cpsw_ale_create(&ale_params);
if (IS_ERR(gbe_dev->ale)) {
dev_err(gbe_dev->dev, "error initializing ale engine\n");
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 76a342ea3797..267c080ee084 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -305,9 +305,8 @@ static void tlan_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
if (priv->dma_storage) {
- pci_free_consistent(priv->pci_dev,
- priv->dma_size, priv->dma_storage,
- priv->dma_storage_dma);
+ dma_free_coherent(&priv->pci_dev->dev, priv->dma_size,
+ priv->dma_storage, priv->dma_storage_dma);
}
#ifdef CONFIG_PCI
@@ -482,7 +481,7 @@ static int tlan_probe1(struct pci_dev *pdev, long ioaddr, int irq, int rev,
priv->adapter = &board_info[ent->driver_data];
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
pr_err("No suitable PCI mapping available\n");
goto err_out_free_dev;
@@ -584,8 +583,8 @@ static int tlan_probe1(struct pci_dev *pdev, long ioaddr, int irq, int rev,
return 0;
err_out_uninit:
- pci_free_consistent(priv->pci_dev, priv->dma_size, priv->dma_storage,
- priv->dma_storage_dma);
+ dma_free_coherent(&priv->pci_dev->dev, priv->dma_size,
+ priv->dma_storage, priv->dma_storage_dma);
err_out_free_dev:
free_netdev(dev);
err_out_regions:
@@ -609,9 +608,9 @@ static void tlan_eisa_cleanup(void)
dev = tlan_eisa_devices;
priv = netdev_priv(dev);
if (priv->dma_storage) {
- pci_free_consistent(priv->pci_dev, priv->dma_size,
- priv->dma_storage,
- priv->dma_storage_dma);
+ dma_free_coherent(&priv->pci_dev->dev, priv->dma_size,
+ priv->dma_storage,
+ priv->dma_storage_dma);
}
release_region(dev->base_addr, 0x10);
unregister_netdev(dev);
@@ -654,7 +653,6 @@ module_exit(tlan_exit);
static void __init tlan_eisa_probe(void)
{
long ioaddr;
- int rc = -ENODEV;
int irq;
u16 device_id;
@@ -719,8 +717,7 @@ static void __init tlan_eisa_probe(void)
/* Setup the newly found eisa adapter */
- rc = tlan_probe1(NULL, ioaddr, irq,
- 12, NULL);
+ tlan_probe1(NULL, ioaddr, irq, 12, NULL);
continue;
out:
@@ -826,9 +823,8 @@ static int tlan_init(struct net_device *dev)
dma_size = (TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS)
* (sizeof(struct tlan_list));
- priv->dma_storage = pci_alloc_consistent(priv->pci_dev,
- dma_size,
- &priv->dma_storage_dma);
+ priv->dma_storage = dma_alloc_coherent(&priv->pci_dev->dev, dma_size,
+ &priv->dma_storage_dma, GFP_KERNEL);
priv->dma_size = dma_size;
if (priv->dma_storage == NULL) {
@@ -1069,9 +1065,9 @@ static netdev_tx_t tlan_start_tx(struct sk_buff *skb, struct net_device *dev)
tail_list->forward = 0;
- tail_list->buffer[0].address = pci_map_single(priv->pci_dev,
+ tail_list->buffer[0].address = dma_map_single(&priv->pci_dev->dev,
skb->data, txlen,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
tlan_store_skb(tail_list, skb);
tail_list->frame_size = (u16) txlen;
@@ -1365,10 +1361,10 @@ static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int)
struct sk_buff *skb = tlan_get_skb(head_list);
ack++;
- pci_unmap_single(priv->pci_dev, head_list->buffer[0].address,
- max(skb->len,
- (unsigned int)TLAN_MIN_FRAME_SIZE),
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&priv->pci_dev->dev,
+ head_list->buffer[0].address,
+ max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE),
+ DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
head_list->buffer[8].address = 0;
head_list->buffer[9].address = 0;
@@ -1511,8 +1507,8 @@ static u32 tlan_handle_rx_eof(struct net_device *dev, u16 host_int)
goto drop_and_reuse;
skb = tlan_get_skb(head_list);
- pci_unmap_single(priv->pci_dev, frame_dma,
- TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&priv->pci_dev->dev, frame_dma,
+ TLAN_MAX_FRAME_SIZE, DMA_FROM_DEVICE);
skb_put(skb, frame_size);
dev->stats.rx_bytes += frame_size;
@@ -1521,8 +1517,8 @@ static u32 tlan_handle_rx_eof(struct net_device *dev, u16 host_int)
netif_rx(skb);
head_list->buffer[0].address =
- pci_map_single(priv->pci_dev, new_skb->data,
- TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE);
+ dma_map_single(&priv->pci_dev->dev, new_skb->data,
+ TLAN_MAX_FRAME_SIZE, DMA_FROM_DEVICE);
tlan_store_skb(head_list, new_skb);
drop_and_reuse:
@@ -1923,10 +1919,10 @@ static void tlan_reset_lists(struct net_device *dev)
if (!skb)
break;
- list->buffer[0].address = pci_map_single(priv->pci_dev,
+ list->buffer[0].address = dma_map_single(&priv->pci_dev->dev,
skb->data,
TLAN_MAX_FRAME_SIZE,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
tlan_store_skb(list, skb);
list->buffer[1].count = 0;
list->buffer[1].address = 0;
@@ -1954,12 +1950,10 @@ static void tlan_free_lists(struct net_device *dev)
list = priv->tx_list + i;
skb = tlan_get_skb(list);
if (skb) {
- pci_unmap_single(
- priv->pci_dev,
- list->buffer[0].address,
- max(skb->len,
- (unsigned int)TLAN_MIN_FRAME_SIZE),
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&priv->pci_dev->dev,
+ list->buffer[0].address,
+ max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE),
+ DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
list->buffer[8].address = 0;
list->buffer[9].address = 0;
@@ -1970,10 +1964,9 @@ static void tlan_free_lists(struct net_device *dev)
list = priv->rx_list + i;
skb = tlan_get_skb(list);
if (skb) {
- pci_unmap_single(priv->pci_dev,
+ dma_unmap_single(&priv->pci_dev->dev,
list->buffer[0].address,
- TLAN_MAX_FRAME_SIZE,
- PCI_DMA_FROMDEVICE);
+ TLAN_MAX_FRAME_SIZE, DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
list->buffer[8].address = 0;
list->buffer[9].address = 0;
@@ -2511,7 +2504,7 @@ static void tlan_phy_power_down(struct net_device *dev)
}
/* Wait for 50 ms and powerup
- * This is abitrary. It is intended to make sure the
+ * This is arbitrary. It is intended to make sure the
* transceiver settles.
*/
tlan_set_timer(dev, msecs_to_jiffies(50), TLAN_TIMER_PHY_PUP);
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 6bcda20ed7e7..7a6e5ff8e5d4 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -454,9 +454,9 @@ static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev,
skb = netdev_alloc_skb(dev, RX_BUF_SIZE);
if (!skb)
return NULL;
- *dma_handle = pci_map_single(hwdev, skb->data, RX_BUF_SIZE,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(hwdev, *dma_handle)) {
+ *dma_handle = dma_map_single(&hwdev->dev, skb->data, RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&hwdev->dev, *dma_handle)) {
dev_kfree_skb_any(skb);
return NULL;
}
@@ -466,8 +466,8 @@ static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev,
static void free_rxbuf_skb(struct pci_dev *hwdev, struct sk_buff *skb, dma_addr_t dma_handle)
{
- pci_unmap_single(hwdev, dma_handle, RX_BUF_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&hwdev->dev, dma_handle, RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
}
@@ -876,9 +876,9 @@ tc35815_init_queues(struct net_device *dev)
sizeof(struct TxFD) * TX_FD_NUM >
PAGE_SIZE * FD_PAGE_NUM);
- lp->fd_buf = pci_alloc_consistent(lp->pci_dev,
- PAGE_SIZE * FD_PAGE_NUM,
- &lp->fd_buf_dma);
+ lp->fd_buf = dma_alloc_coherent(&lp->pci_dev->dev,
+ PAGE_SIZE * FD_PAGE_NUM,
+ &lp->fd_buf_dma, GFP_ATOMIC);
if (!lp->fd_buf)
return -ENOMEM;
for (i = 0; i < RX_BUF_NUM; i++) {
@@ -892,10 +892,9 @@ tc35815_init_queues(struct net_device *dev)
lp->rx_skbs[i].skb_dma);
lp->rx_skbs[i].skb = NULL;
}
- pci_free_consistent(lp->pci_dev,
- PAGE_SIZE * FD_PAGE_NUM,
- lp->fd_buf,
- lp->fd_buf_dma);
+ dma_free_coherent(&lp->pci_dev->dev,
+ PAGE_SIZE * FD_PAGE_NUM,
+ lp->fd_buf, lp->fd_buf_dma);
lp->fd_buf = NULL;
return -ENOMEM;
}
@@ -990,7 +989,9 @@ tc35815_clear_queues(struct net_device *dev)
BUG_ON(lp->tx_skbs[i].skb != skb);
#endif
if (skb) {
- pci_unmap_single(lp->pci_dev, lp->tx_skbs[i].skb_dma, skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&lp->pci_dev->dev,
+ lp->tx_skbs[i].skb_dma, skb->len,
+ DMA_TO_DEVICE);
lp->tx_skbs[i].skb = NULL;
lp->tx_skbs[i].skb_dma = 0;
dev_kfree_skb_any(skb);
@@ -1022,7 +1023,9 @@ tc35815_free_queues(struct net_device *dev)
BUG_ON(lp->tx_skbs[i].skb != skb);
#endif
if (skb) {
- pci_unmap_single(lp->pci_dev, lp->tx_skbs[i].skb_dma, skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&lp->pci_dev->dev,
+ lp->tx_skbs[i].skb_dma,
+ skb->len, DMA_TO_DEVICE);
dev_kfree_skb(skb);
lp->tx_skbs[i].skb = NULL;
lp->tx_skbs[i].skb_dma = 0;
@@ -1044,8 +1047,8 @@ tc35815_free_queues(struct net_device *dev)
}
}
if (lp->fd_buf) {
- pci_free_consistent(lp->pci_dev, PAGE_SIZE * FD_PAGE_NUM,
- lp->fd_buf, lp->fd_buf_dma);
+ dma_free_coherent(&lp->pci_dev->dev, PAGE_SIZE * FD_PAGE_NUM,
+ lp->fd_buf, lp->fd_buf_dma);
lp->fd_buf = NULL;
}
}
@@ -1292,7 +1295,10 @@ tc35815_send_packet(struct sk_buff *skb, struct net_device *dev)
BUG_ON(lp->tx_skbs[lp->tfd_start].skb);
#endif
lp->tx_skbs[lp->tfd_start].skb = skb;
- lp->tx_skbs[lp->tfd_start].skb_dma = pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
+ lp->tx_skbs[lp->tfd_start].skb_dma = dma_map_single(&lp->pci_dev->dev,
+ skb->data,
+ skb->len,
+ DMA_TO_DEVICE);
/*add to ring */
txfd = &lp->tfd_base[lp->tfd_start];
@@ -1500,9 +1506,9 @@ tc35815_rx(struct net_device *dev, int limit)
skb = lp->rx_skbs[cur_bd].skb;
prefetch(skb->data);
lp->rx_skbs[cur_bd].skb = NULL;
- pci_unmap_single(lp->pci_dev,
+ dma_unmap_single(&lp->pci_dev->dev,
lp->rx_skbs[cur_bd].skb_dma,
- RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ RX_BUF_SIZE, DMA_FROM_DEVICE);
if (!HAVE_DMA_RXALIGN(lp) && NET_IP_ALIGN != 0)
memmove(skb->data, skb->data - NET_IP_ALIGN,
pkt_len);
@@ -1756,7 +1762,9 @@ tc35815_txdone(struct net_device *dev)
#endif
if (skb) {
dev->stats.tx_bytes += skb->len;
- pci_unmap_single(lp->pci_dev, lp->tx_skbs[lp->tfd_end].skb_dma, skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&lp->pci_dev->dev,
+ lp->tx_skbs[lp->tfd_end].skb_dma,
+ skb->len, DMA_TO_DEVICE);
lp->tx_skbs[lp->tfd_end].skb = NULL;
lp->tx_skbs[lp->tfd_end].skb_dma = 0;
dev_kfree_skb_any(skb);
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 55b0ddab1776..73ca597ebd1b 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -1504,7 +1504,7 @@ static void rhine_init_cam_filter(struct net_device *dev)
/**
* rhine_update_vcam - update VLAN CAM filters
- * @rp: rhine_private data of this Rhine
+ * @dev: rhine_private data of this Rhine
*
* Update VLAN CAM filters to match configuration change.
*/
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 6d2a31488a74..b65767f9e499 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -372,7 +372,7 @@ static const struct pci_device_id velocity_pci_id_table[] = {
MODULE_DEVICE_TABLE(pci, velocity_pci_id_table);
-/**
+/*
* Describe the OF device identifiers that we support in this
* device driver. Used for devicetree nodes.
*/
@@ -384,7 +384,7 @@ MODULE_DEVICE_TABLE(of, velocity_of_ids);
/**
* get_chip_name - identifier to name
- * @id: chip identifier
+ * @chip_id: chip identifier
*
* Given a chip identifier return a suitable description. Returns
* a pointer a static string valid while the driver is loaded.
@@ -748,7 +748,7 @@ static u32 mii_check_media_mode(struct mac_regs __iomem *regs)
/**
* velocity_mii_write - write MII data
* @regs: velocity registers
- * @index: MII register index
+ * @mii_addr: MII register index
* @data: 16bit data for the MII register
*
* Perform a single write to an MII 16bit register. Returns zero
@@ -869,6 +869,7 @@ static u32 check_connection_type(struct mac_regs __iomem *regs)
/**
* velocity_set_media_mode - set media mode
+ * @vptr: velocity adapter
* @mii_status: old MII link state
*
* Check the media link state and configure the flow control
@@ -877,26 +878,13 @@ static u32 check_connection_type(struct mac_regs __iomem *regs)
*/
static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
{
- u32 curr_status;
struct mac_regs __iomem *regs = vptr->mac_regs;
vptr->mii_status = mii_check_media_mode(vptr->mac_regs);
- curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL);
/* Set mii link status */
set_mii_flow_control(vptr);
- /*
- Check if new status is consistent with current status
- if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) ||
- (mii_status==curr_status)) {
- vptr->mii_status=mii_check_media_mode(vptr->mac_regs);
- vptr->mii_status=check_connection_type(vptr->mac_regs);
- netdev_info(vptr->netdev, "Velocity link no change\n");
- return 0;
- }
- */
-
if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
@@ -1269,6 +1257,7 @@ static void mii_init(struct velocity_info *vptr, u32 mii_status)
/**
* setup_queue_timers - Setup interrupt timers
+ * @vptr: velocity adapter
*
* Setup interrupt frequency during suppression (timeout if the frame
* count isn't filled).
@@ -1293,8 +1282,7 @@ static void setup_queue_timers(struct velocity_info *vptr)
/**
* setup_adaptive_interrupts - Setup interrupt suppression
- *
- * @vptr velocity adapter
+ * @vptr: velocity adapter
*
* The velocity is able to suppress interrupt during high interrupt load.
* This function turns on that feature.
@@ -1735,6 +1723,7 @@ err_free_dma_rings_0:
* velocity_free_tx_buf - free transmit buffer
* @vptr: velocity
* @tdinfo: buffer
+ * @td: transmit descriptor to free
*
* Release an transmit buffer. If the buffer was preallocated then
* recycle it, if not then unmap the buffer.
@@ -1909,7 +1898,7 @@ static void velocity_error(struct velocity_info *vptr, int status)
/**
* tx_srv - transmit interrupt service
- * @vptr; Velocity
+ * @vptr: Velocity
*
* Scan the queues looking for transmitted packets that
* we can complete and clean up. Update any statistics as
@@ -2003,8 +1992,7 @@ static inline void velocity_rx_csum(struct rx_desc *rd, struct sk_buff *skb)
* velocity_rx_copy - in place Rx copy for small packets
* @rx_skb: network layer packet buffer candidate
* @pkt_size: received data size
- * @rd: receive packet descriptor
- * @dev: network device
+ * @vptr: velocity adapter
*
* Replace the current skb that is scheduled for Rx processing by a
* shorter, immediately allocated skb, if the received packet is small
@@ -2110,6 +2098,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
/**
* velocity_rx_srv - service RX interrupt
* @vptr: velocity
+ * @budget_left: remaining budget
*
* Walk the receive ring of the velocity adapter and remove
* any received packets from the receive queue. Hand the ring
@@ -2658,7 +2647,6 @@ static const struct net_device_ops velocity_netdev_ops = {
/**
* velocity_init_info - init private data
- * @pdev: PCI device
* @vptr: Velocity info
* @info: Board type
*
@@ -2677,7 +2665,6 @@ static void velocity_init_info(struct velocity_info *vptr,
/**
* velocity_get_pci_info - retrieve PCI info for device
* @vptr: velocity device
- * @pdev: PCI device it matches
*
* Retrieve the PCI configuration space data that interests us from
* the kernel PCI layer
@@ -2714,7 +2701,6 @@ static int velocity_get_pci_info(struct velocity_info *vptr)
/**
* velocity_get_platform_info - retrieve platform info for device
* @vptr: velocity device
- * @pdev: platform device it matches
*
* Retrieve the Platform configuration data that interests us
*/
@@ -2764,8 +2750,9 @@ static u32 velocity_get_link(struct net_device *dev)
/**
* velocity_probe - set up discovered velocity device
- * @pdev: PCI device
- * @ent: PCI device table entry that matched
+ * @dev: PCI device
+ * @info: table of match
+ * @irq: interrupt info
* @bustype: bus that device is connected to
*
* Configure a discovered adapter from scratch. Return a negative
@@ -2982,6 +2969,7 @@ static int velocity_platform_remove(struct platform_device *pdev)
#ifdef CONFIG_PM_SLEEP
/**
* wol_calc_crc - WOL CRC
+ * @size: size of the wake mask
* @pattern: data pattern
* @mask_pattern: mask
*
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 9a15f14daa47..60c199fcb91e 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -106,7 +106,7 @@ static bool hard_acs_rdy_or_timeout(struct temac_local *lp, ktime_t timeout)
*/
#define HARD_ACS_RDY_POLL_NS (20 * NSEC_PER_MSEC)
-/**
+/*
* temac_indirect_busywait - Wait for current indirect register access
* to complete.
*/
@@ -121,7 +121,7 @@ int temac_indirect_busywait(struct temac_local *lp)
return 0;
}
-/**
+/*
* temac_indirect_in32 - Indirect register read access. This function
* must be called without lp->indirect_lock being held.
*/
@@ -136,7 +136,7 @@ u32 temac_indirect_in32(struct temac_local *lp, int reg)
return val;
}
-/**
+/*
* temac_indirect_in32_locked - Indirect register read access. This
* function must be called with lp->indirect_lock being held. Use
* this together with spin_lock_irqsave/spin_lock_irqrestore to avoid
@@ -164,7 +164,7 @@ u32 temac_indirect_in32_locked(struct temac_local *lp, int reg)
return temac_ior(lp, XTE_LSW0_OFFSET);
}
-/**
+/*
* temac_indirect_out32 - Indirect register write access. This function
* must be called without lp->indirect_lock being held.
*/
@@ -177,7 +177,7 @@ void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
spin_unlock_irqrestore(lp->indirect_lock, flags);
}
-/**
+/*
* temac_indirect_out32_locked - Indirect register write access. This
* function must be called with lp->indirect_lock being held. Use
* this together with spin_lock_irqsave/spin_lock_irqrestore to avoid
@@ -202,7 +202,7 @@ void temac_indirect_out32_locked(struct temac_local *lp, int reg, u32 value)
WARN_ON(temac_indirect_busywait(lp));
}
-/**
+/*
* temac_dma_in32_* - Memory mapped DMA read, these function expects a
* register input that is based on DCR word addresses which are then
* converted to memory mapped byte addresses. To be assigned to
@@ -218,7 +218,7 @@ static u32 temac_dma_in32_le(struct temac_local *lp, int reg)
return ioread32(lp->sdma_regs + (reg << 2));
}
-/**
+/*
* temac_dma_out32_* - Memory mapped DMA read, these function expects
* a register input that is based on DCR word addresses which are then
* converted to memory mapped byte addresses. To be assigned to
@@ -240,7 +240,7 @@ static void temac_dma_out32_le(struct temac_local *lp, int reg, u32 value)
*/
#ifdef CONFIG_PPC_DCR
-/**
+/*
* temac_dma_dcr_in32 - DCR based DMA read
*/
static u32 temac_dma_dcr_in(struct temac_local *lp, int reg)
@@ -248,7 +248,7 @@ static u32 temac_dma_dcr_in(struct temac_local *lp, int reg)
return dcr_read(lp->sdma_dcrs, reg);
}
-/**
+/*
* temac_dma_dcr_out32 - DCR based DMA write
*/
static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
@@ -256,7 +256,7 @@ static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
dcr_write(lp->sdma_dcrs, reg, value);
}
-/**
+/*
* temac_dcr_setup - If the DMA is DCR based, then setup the address and
* I/O functions
*/
@@ -293,7 +293,7 @@ static int temac_dcr_setup(struct temac_local *lp, struct platform_device *op,
#endif
-/**
+/*
* temac_dma_bd_release - Release buffer descriptor rings
*/
static void temac_dma_bd_release(struct net_device *ndev)
@@ -323,7 +323,7 @@ static void temac_dma_bd_release(struct net_device *ndev)
lp->tx_bd_v, lp->tx_bd_p);
}
-/**
+/*
* temac_dma_bd_init - Setup buffer descriptor rings
*/
static int temac_dma_bd_init(struct net_device *ndev)
@@ -593,7 +593,7 @@ static struct temac_option {
{}
};
-/**
+/*
* temac_setoptions
*/
static u32 temac_setoptions(struct net_device *ndev, u32 options)
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index fa5dc2993520..9aafd3ecdaa4 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -2038,8 +2038,7 @@ static int axienet_remove(struct platform_device *pdev)
axienet_mdio_teardown(lp);
- if (lp->clk)
- clk_disable_unprepare(lp->clk);
+ clk_disable_unprepare(lp->clk);
of_node_put(lp->phy_node);
lp->phy_node = NULL;
diff --git a/drivers/net/fddi/skfp/h/smc.h b/drivers/net/fddi/skfp/h/smc.h
index 991857f6a83c..706fa619b703 100644
--- a/drivers/net/fddi/skfp/h/smc.h
+++ b/drivers/net/fddi/skfp/h/smc.h
@@ -122,7 +122,7 @@ struct s_rmt {
u_char timer1_exp ; /* flag : timer 1 expired */
u_char timer2_exp ; /* flag : timer 2 expired */
- u_char rm_pad1[1] ;
+ u_char rm_pad1;
} ;
/*
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 974a244f45ba..d07008a818df 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -217,7 +217,6 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
{
struct genevehdr *gnvh = geneve_hdr(skb);
struct metadata_dst *tun_dst = NULL;
- struct pcpu_sw_netstats *stats;
unsigned int len;
int err = 0;
void *oiph;
@@ -296,13 +295,9 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
len = skb->len;
err = gro_cells_receive(&geneve->gro_cells, skb);
- if (likely(err == NET_RX_SUCCESS)) {
- stats = this_cpu_ptr(geneve->dev->tstats);
- u64_stats_update_begin(&stats->syncp);
- stats->rx_packets++;
- stats->rx_bytes += len;
- u64_stats_update_end(&stats->syncp);
- }
+ if (likely(err == NET_RX_SUCCESS))
+ dev_sw_netstats_rx_add(geneve->dev, len);
+
return;
drop:
/* Consume bad packet */
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 8e47d0112e5d..030a1a5afe05 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -182,8 +182,6 @@ static bool gtp_check_ms(struct sk_buff *skb, struct pdp_ctx *pctx,
static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb,
unsigned int hdrlen, unsigned int role)
{
- struct pcpu_sw_netstats *stats;
-
if (!gtp_check_ms(skb, pctx, hdrlen, role)) {
netdev_dbg(pctx->dev, "No PDP ctx for this MS\n");
return 1;
@@ -204,11 +202,7 @@ static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb,
skb->dev = pctx->dev;
- stats = this_cpu_ptr(pctx->dev->tstats);
- u64_stats_update_begin(&stats->syncp);
- stats->rx_packets++;
- stats->rx_bytes += skb->len;
- u64_stats_update_end(&stats->syncp);
+ dev_sw_netstats_rx_add(pctx->dev, skb->len);
netif_rx(skb);
return 0;
@@ -928,8 +922,8 @@ static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
}
}
-static int gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
- struct genl_info *info)
+static struct pdp_ctx *gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
+ struct genl_info *info)
{
struct pdp_ctx *pctx, *pctx_tid = NULL;
struct net_device *dev = gtp->dev;
@@ -956,12 +950,12 @@ static int gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
if (found) {
if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
- return -EEXIST;
+ return ERR_PTR(-EEXIST);
if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE)
- return -EOPNOTSUPP;
+ return ERR_PTR(-EOPNOTSUPP);
if (pctx && pctx_tid)
- return -EEXIST;
+ return ERR_PTR(-EEXIST);
if (!pctx)
pctx = pctx_tid;
@@ -974,13 +968,13 @@ static int gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
netdev_dbg(dev, "GTPv1-U: update tunnel id = %x/%x (pdp %p)\n",
pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
- return 0;
+ return pctx;
}
pctx = kmalloc(sizeof(*pctx), GFP_ATOMIC);
if (pctx == NULL)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
sock_hold(sk);
pctx->sk = sk;
@@ -1018,7 +1012,7 @@ static int gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
break;
}
- return 0;
+ return pctx;
}
static void pdp_context_free(struct rcu_head *head)
@@ -1036,9 +1030,12 @@ static void pdp_context_delete(struct pdp_ctx *pctx)
call_rcu(&pctx->rcu_head, pdp_context_free);
}
+static int gtp_tunnel_notify(struct pdp_ctx *pctx, u8 cmd, gfp_t allocation);
+
static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
{
unsigned int version;
+ struct pdp_ctx *pctx;
struct gtp_dev *gtp;
struct sock *sk;
int err;
@@ -1068,7 +1065,6 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
}
rtnl_lock();
- rcu_read_lock();
gtp = gtp_find_dev(sock_net(skb->sk), info->attrs);
if (!gtp) {
@@ -1088,10 +1084,15 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
goto out_unlock;
}
- err = gtp_pdp_add(gtp, sk, info);
+ pctx = gtp_pdp_add(gtp, sk, info);
+ if (IS_ERR(pctx)) {
+ err = PTR_ERR(pctx);
+ } else {
+ gtp_tunnel_notify(pctx, GTP_CMD_NEWPDP, GFP_KERNEL);
+ err = 0;
+ }
out_unlock:
- rcu_read_unlock();
rtnl_unlock();
return err;
}
@@ -1159,6 +1160,7 @@ static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info)
netdev_dbg(pctx->dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n",
pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
+ gtp_tunnel_notify(pctx, GTP_CMD_DELPDP, GFP_ATOMIC);
pdp_context_delete(pctx);
out_unlock:
@@ -1168,6 +1170,14 @@ out_unlock:
static struct genl_family gtp_genl_family;
+enum gtp_multicast_groups {
+ GTP_GENL_MCGRP,
+};
+
+static const struct genl_multicast_group gtp_genl_mcgrps[] = {
+ [GTP_GENL_MCGRP] = { .name = GTP_GENL_MCGRP_NAME },
+};
+
static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
int flags, u32 type, struct pdp_ctx *pctx)
{
@@ -1205,6 +1215,26 @@ nla_put_failure:
return -EMSGSIZE;
}
+static int gtp_tunnel_notify(struct pdp_ctx *pctx, u8 cmd, gfp_t allocation)
+{
+ struct sk_buff *msg;
+ int ret;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, allocation);
+ if (!msg)
+ return -ENOMEM;
+
+ ret = gtp_genl_fill_info(msg, 0, 0, 0, cmd, pctx);
+ if (ret < 0) {
+ nlmsg_free(msg);
+ return ret;
+ }
+
+ ret = genlmsg_multicast_netns(&gtp_genl_family, dev_net(pctx->dev), msg,
+ 0, GTP_GENL_MCGRP, GFP_ATOMIC);
+ return ret;
+}
+
static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info)
{
struct pdp_ctx *pctx = NULL;
@@ -1303,7 +1333,7 @@ static const struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = {
[GTPA_O_TEI] = { .type = NLA_U32, },
};
-static const struct genl_ops gtp_genl_ops[] = {
+static const struct genl_small_ops gtp_genl_ops[] = {
{
.cmd = GTP_CMD_NEWPDP,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
@@ -1333,8 +1363,10 @@ static struct genl_family gtp_genl_family __ro_after_init = {
.policy = gtp_genl_policy,
.netnsok = true,
.module = THIS_MODULE,
- .ops = gtp_genl_ops,
- .n_ops = ARRAY_SIZE(gtp_genl_ops),
+ .small_ops = gtp_genl_ops,
+ .n_small_ops = ARRAY_SIZE(gtp_genl_ops),
+ .mcgrps = gtp_genl_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(gtp_genl_mcgrps),
};
static int __net_init gtp_net_init(struct net *net)
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index a4b3fce69ecd..22010384c4a3 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -151,7 +151,8 @@ static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out;
}
- tmpptr = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma);
+ tmpptr = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
+ GFP_KERNEL);
rrpriv->tx_ring = tmpptr;
rrpriv->tx_ring_dma = ring_dma;
@@ -160,7 +161,8 @@ static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out;
}
- tmpptr = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma);
+ tmpptr = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
+ GFP_KERNEL);
rrpriv->rx_ring = tmpptr;
rrpriv->rx_ring_dma = ring_dma;
@@ -169,7 +171,8 @@ static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out;
}
- tmpptr = pci_alloc_consistent(pdev, EVT_RING_SIZE, &ring_dma);
+ tmpptr = dma_alloc_coherent(&pdev->dev, EVT_RING_SIZE, &ring_dma,
+ GFP_KERNEL);
rrpriv->evt_ring = tmpptr;
rrpriv->evt_ring_dma = ring_dma;
@@ -198,14 +201,14 @@ static int rr_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
out:
if (rrpriv->evt_ring)
- pci_free_consistent(pdev, EVT_RING_SIZE, rrpriv->evt_ring,
- rrpriv->evt_ring_dma);
+ dma_free_coherent(&pdev->dev, EVT_RING_SIZE, rrpriv->evt_ring,
+ rrpriv->evt_ring_dma);
if (rrpriv->rx_ring)
- pci_free_consistent(pdev, RX_TOTAL_SIZE, rrpriv->rx_ring,
- rrpriv->rx_ring_dma);
+ dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, rrpriv->rx_ring,
+ rrpriv->rx_ring_dma);
if (rrpriv->tx_ring)
- pci_free_consistent(pdev, TX_TOTAL_SIZE, rrpriv->tx_ring,
- rrpriv->tx_ring_dma);
+ dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, rrpriv->tx_ring,
+ rrpriv->tx_ring_dma);
if (rrpriv->regs)
pci_iounmap(pdev, rrpriv->regs);
if (pdev)
@@ -228,12 +231,12 @@ static void rr_remove_one(struct pci_dev *pdev)
}
unregister_netdev(dev);
- pci_free_consistent(pdev, EVT_RING_SIZE, rr->evt_ring,
- rr->evt_ring_dma);
- pci_free_consistent(pdev, RX_TOTAL_SIZE, rr->rx_ring,
- rr->rx_ring_dma);
- pci_free_consistent(pdev, TX_TOTAL_SIZE, rr->tx_ring,
- rr->tx_ring_dma);
+ dma_free_coherent(&pdev->dev, EVT_RING_SIZE, rr->evt_ring,
+ rr->evt_ring_dma);
+ dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, rr->rx_ring,
+ rr->rx_ring_dma);
+ dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, rr->tx_ring,
+ rr->tx_ring_dma);
pci_iounmap(pdev, rr->regs);
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -648,8 +651,8 @@ static int rr_init1(struct net_device *dev)
goto error;
}
rrpriv->rx_skbuff[i] = skb;
- addr = pci_map_single(rrpriv->pci_dev, skb->data,
- dev->mtu + HIPPI_HLEN, PCI_DMA_FROMDEVICE);
+ addr = dma_map_single(&rrpriv->pci_dev->dev, skb->data,
+ dev->mtu + HIPPI_HLEN, DMA_FROM_DEVICE);
/*
* Sanity test to see if we conflict with the DMA
* limitations of the Roadrunner.
@@ -699,10 +702,10 @@ static int rr_init1(struct net_device *dev)
struct sk_buff *skb = rrpriv->rx_skbuff[i];
if (skb) {
- pci_unmap_single(rrpriv->pci_dev,
+ dma_unmap_single(&rrpriv->pci_dev->dev,
rrpriv->rx_ring[i].addr.addrlo,
dev->mtu + HIPPI_HLEN,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
rrpriv->rx_ring[i].size = 0;
set_rraddr(&rrpriv->rx_ring[i].addr, 0);
dev_kfree_skb(skb);
@@ -953,18 +956,18 @@ static void rx_int(struct net_device *dev, u32 rxlimit, u32 index)
dev->stats.rx_dropped++;
goto defer;
} else {
- pci_dma_sync_single_for_cpu(rrpriv->pci_dev,
- desc->addr.addrlo,
- pkt_len,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&rrpriv->pci_dev->dev,
+ desc->addr.addrlo,
+ pkt_len,
+ DMA_FROM_DEVICE);
skb_put_data(skb, rx_skb->data,
pkt_len);
- pci_dma_sync_single_for_device(rrpriv->pci_dev,
- desc->addr.addrlo,
- pkt_len,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&rrpriv->pci_dev->dev,
+ desc->addr.addrlo,
+ pkt_len,
+ DMA_FROM_DEVICE);
}
}else{
struct sk_buff *newskb;
@@ -974,16 +977,17 @@ static void rx_int(struct net_device *dev, u32 rxlimit, u32 index)
if (newskb){
dma_addr_t addr;
- pci_unmap_single(rrpriv->pci_dev,
- desc->addr.addrlo, dev->mtu +
- HIPPI_HLEN, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&rrpriv->pci_dev->dev,
+ desc->addr.addrlo,
+ dev->mtu + HIPPI_HLEN,
+ DMA_FROM_DEVICE);
skb = rx_skb;
skb_put(skb, pkt_len);
rrpriv->rx_skbuff[index] = newskb;
- addr = pci_map_single(rrpriv->pci_dev,
- newskb->data,
- dev->mtu + HIPPI_HLEN,
- PCI_DMA_FROMDEVICE);
+ addr = dma_map_single(&rrpriv->pci_dev->dev,
+ newskb->data,
+ dev->mtu + HIPPI_HLEN,
+ DMA_FROM_DEVICE);
set_rraddr(&desc->addr, addr);
} else {
printk("%s: Out of memory, deferring "
@@ -1068,9 +1072,9 @@ static irqreturn_t rr_interrupt(int irq, void *dev_id)
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
- pci_unmap_single(rrpriv->pci_dev,
+ dma_unmap_single(&rrpriv->pci_dev->dev,
desc->addr.addrlo, skb->len,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
dev_kfree_skb_irq(skb);
rrpriv->tx_skbuff[txcon] = NULL;
@@ -1110,8 +1114,9 @@ static inline void rr_raz_tx(struct rr_private *rrpriv,
if (skb) {
struct tx_desc *desc = &(rrpriv->tx_ring[i]);
- pci_unmap_single(rrpriv->pci_dev, desc->addr.addrlo,
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&rrpriv->pci_dev->dev,
+ desc->addr.addrlo, skb->len,
+ DMA_TO_DEVICE);
desc->size = 0;
set_rraddr(&desc->addr, 0);
dev_kfree_skb(skb);
@@ -1132,8 +1137,10 @@ static inline void rr_raz_rx(struct rr_private *rrpriv,
if (skb) {
struct rx_desc *desc = &(rrpriv->rx_ring[i]);
- pci_unmap_single(rrpriv->pci_dev, desc->addr.addrlo,
- dev->mtu + HIPPI_HLEN, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&rrpriv->pci_dev->dev,
+ desc->addr.addrlo,
+ dev->mtu + HIPPI_HLEN,
+ DMA_FROM_DEVICE);
desc->size = 0;
set_rraddr(&desc->addr, 0);
dev_kfree_skb(skb);
@@ -1188,17 +1195,17 @@ static int rr_open(struct net_device *dev)
goto error;
}
- rrpriv->rx_ctrl = pci_alloc_consistent(pdev,
- 256 * sizeof(struct ring_ctrl),
- &dma_addr);
+ rrpriv->rx_ctrl = dma_alloc_coherent(&pdev->dev,
+ 256 * sizeof(struct ring_ctrl),
+ &dma_addr, GFP_KERNEL);
if (!rrpriv->rx_ctrl) {
ecode = -ENOMEM;
goto error;
}
rrpriv->rx_ctrl_dma = dma_addr;
- rrpriv->info = pci_alloc_consistent(pdev, sizeof(struct rr_info),
- &dma_addr);
+ rrpriv->info = dma_alloc_coherent(&pdev->dev, sizeof(struct rr_info),
+ &dma_addr, GFP_KERNEL);
if (!rrpriv->info) {
ecode = -ENOMEM;
goto error;
@@ -1237,13 +1244,13 @@ static int rr_open(struct net_device *dev)
spin_unlock_irqrestore(&rrpriv->lock, flags);
if (rrpriv->info) {
- pci_free_consistent(pdev, sizeof(struct rr_info), rrpriv->info,
- rrpriv->info_dma);
+ dma_free_coherent(&pdev->dev, sizeof(struct rr_info),
+ rrpriv->info, rrpriv->info_dma);
rrpriv->info = NULL;
}
if (rrpriv->rx_ctrl) {
- pci_free_consistent(pdev, 256 * sizeof(struct ring_ctrl),
- rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
+ dma_free_coherent(&pdev->dev, 256 * sizeof(struct ring_ctrl),
+ rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
rrpriv->rx_ctrl = NULL;
}
@@ -1365,12 +1372,12 @@ static int rr_close(struct net_device *dev)
rr_raz_tx(rrpriv, dev);
rr_raz_rx(rrpriv, dev);
- pci_free_consistent(pdev, 256 * sizeof(struct ring_ctrl),
- rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
+ dma_free_coherent(&pdev->dev, 256 * sizeof(struct ring_ctrl),
+ rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
rrpriv->rx_ctrl = NULL;
- pci_free_consistent(pdev, sizeof(struct rr_info), rrpriv->info,
- rrpriv->info_dma);
+ dma_free_coherent(&pdev->dev, sizeof(struct rr_info), rrpriv->info,
+ rrpriv->info_dma);
rrpriv->info = NULL;
spin_unlock_irqrestore(&rrpriv->lock, flags);
@@ -1430,8 +1437,8 @@ static netdev_tx_t rr_start_xmit(struct sk_buff *skb,
index = txctrl->pi;
rrpriv->tx_skbuff[index] = skb;
- set_rraddr(&rrpriv->tx_ring[index].addr, pci_map_single(
- rrpriv->pci_dev, skb->data, len + 8, PCI_DMA_TODEVICE));
+ set_rraddr(&rrpriv->tx_ring[index].addr,
+ dma_map_single(&rrpriv->pci_dev->dev, skb->data, len + 8, DMA_TO_DEVICE));
rrpriv->tx_ring[index].size = len + 8; /* include IFIELD */
rrpriv->tx_ring[index].mode = PACKET_START | PACKET_END;
txctrl->pi = (index + 1) % TX_RING_ENTRIES;
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index c20e7ef18bc9..c0bf7d78276e 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -583,7 +583,7 @@ static const struct nla_policy hwsim_genl_policy[MAC802154_HWSIM_ATTR_MAX + 1] =
};
/* Generic Netlink operations array */
-static const struct genl_ops hwsim_nl_ops[] = {
+static const struct genl_small_ops hwsim_nl_ops[] = {
{
.cmd = MAC802154_HWSIM_CMD_NEW_RADIO,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
@@ -628,8 +628,8 @@ static struct genl_family hwsim_genl_family __ro_after_init = {
.maxattr = MAC802154_HWSIM_ATTR_MAX,
.policy = hwsim_genl_policy,
.module = THIS_MODULE,
- .ops = hwsim_nl_ops,
- .n_ops = ARRAY_SIZE(hwsim_nl_ops),
+ .small_ops = hwsim_nl_ops,
+ .n_small_ops = ARRAY_SIZE(hwsim_nl_ops),
.mcgrps = hwsim_mcgrps,
.n_mcgrps = ARRAY_SIZE(hwsim_mcgrps),
};
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index 0e63d35320aa..6bfac1efe037 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -254,8 +254,8 @@ static void gsi_irq_enable(struct gsi *gsi)
/* We don't use inter-EE channel or event interrupts */
val = GSI_CNTXT_TYPE_IRQ_MSK_ALL;
- val &= ~MSK_INTER_EE_CH_CTRL_FMASK;
- val &= ~MSK_INTER_EE_EV_CTRL_FMASK;
+ val &= ~INTER_EE_CH_CTRL_FMASK;
+ val &= ~INTER_EE_EV_CTRL_FMASK;
iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
val = GENMASK(gsi->channel_count - 1, 0);
@@ -271,7 +271,7 @@ static void gsi_irq_enable(struct gsi *gsi)
iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
/* Never enable GSI_BREAK_POINT */
- val = GSI_CNTXT_GSI_IRQ_ALL & ~EN_BREAK_POINT_FMASK;
+ val = GSI_CNTXT_GSI_IRQ_ALL & ~BREAK_POINT_FMASK;
iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
}
@@ -1074,8 +1074,8 @@ static void gsi_isr_glob_ee(struct gsi *gsi)
val &= ~ERROR_INT_FMASK;
- if (val & EN_GP_INT1_FMASK) {
- val ^= EN_GP_INT1_FMASK;
+ if (val & GP_INT1_FMASK) {
+ val ^= GP_INT1_FMASK;
gsi_isr_gp_int1(gsi);
}
@@ -1600,7 +1600,7 @@ err_unwind_modem:
/* Compute which modem channels need to be deallocated */
mask ^= gsi->modem_channel_bitmap;
while (mask) {
- u32 channel_id = __fls(mask);
+ channel_id = __fls(mask);
mask ^= BIT(channel_id);
@@ -1628,7 +1628,7 @@ static void gsi_channel_teardown(struct gsi *gsi)
mutex_lock(&gsi->mutex);
while (mask) {
- u32 channel_id = __fls(mask);
+ channel_id = __fls(mask);
mask ^= BIT(channel_id);
@@ -1972,7 +1972,6 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
*/
init_dummy_netdev(&gsi->dummy_dev);
- /* Get the GSI IRQ and request for it to wake the system */
ret = platform_get_irq_byname(pdev, "gsi");
if (ret <= 0) {
dev_err(dev, "DT error %d getting \"gsi\" IRQ property\n", ret);
@@ -1987,31 +1986,26 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
}
gsi->irq = irq;
- ret = enable_irq_wake(gsi->irq);
- if (ret)
- dev_warn(dev, "error %d enabling gsi wake irq\n", ret);
- gsi->irq_wake_enabled = !ret;
-
/* Get GSI memory range and map it */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
if (!res) {
dev_err(dev, "DT error getting \"gsi\" memory property\n");
ret = -ENODEV;
- goto err_disable_irq_wake;
+ goto err_free_irq;
}
size = resource_size(res);
if (res->start > U32_MAX || size > U32_MAX - res->start) {
dev_err(dev, "DT memory resource \"gsi\" out of range\n");
ret = -EINVAL;
- goto err_disable_irq_wake;
+ goto err_free_irq;
}
gsi->virt = ioremap(res->start, size);
if (!gsi->virt) {
dev_err(dev, "unable to remap \"gsi\" memory\n");
ret = -ENOMEM;
- goto err_disable_irq_wake;
+ goto err_free_irq;
}
ret = gsi_channel_init(gsi, prefetch, count, data, modem_alloc);
@@ -2025,9 +2019,7 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, bool prefetch,
err_iounmap:
iounmap(gsi->virt);
-err_disable_irq_wake:
- if (gsi->irq_wake_enabled)
- (void)disable_irq_wake(gsi->irq);
+err_free_irq:
free_irq(gsi->irq, gsi);
return ret;
@@ -2038,8 +2030,6 @@ void gsi_exit(struct gsi *gsi)
{
mutex_destroy(&gsi->mutex);
gsi_channel_exit(gsi);
- if (gsi->irq_wake_enabled)
- (void)disable_irq_wake(gsi->irq);
free_irq(gsi->irq, gsi);
iounmap(gsi->virt);
}
diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h
index 061312773df0..3f9f29d531c4 100644
--- a/drivers/net/ipa/gsi.h
+++ b/drivers/net/ipa/gsi.h
@@ -150,7 +150,6 @@ struct gsi {
struct net_device dummy_dev; /* needed for NAPI */
void __iomem *virt;
u32 irq;
- bool irq_wake_enabled;
u32 channel_count;
u32 evt_ring_count;
struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX];
diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h
index acc9e744c67d..8e0e9350c383 100644
--- a/drivers/net/ipa/gsi_reg.h
+++ b/drivers/net/ipa/gsi_reg.h
@@ -258,6 +258,11 @@
GSI_EE_N_CNTXT_TYPE_IRQ_OFFSET(GSI_EE_AP)
#define GSI_EE_N_CNTXT_TYPE_IRQ_OFFSET(ee) \
(0x0001f080 + 0x4000 * (ee))
+#define GSI_CNTXT_TYPE_IRQ_MSK_OFFSET \
+ GSI_EE_N_CNTXT_TYPE_IRQ_MSK_OFFSET(GSI_EE_AP)
+#define GSI_EE_N_CNTXT_TYPE_IRQ_MSK_OFFSET(ee) \
+ (0x0001f088 + 0x4000 * (ee))
+/* The masks below are used for the TYPE_IRQ and TYPE_IRQ_MASK registers */
#define CH_CTRL_FMASK GENMASK(0, 0)
#define EV_CTRL_FMASK GENMASK(1, 1)
#define GLOB_EE_FMASK GENMASK(2, 2)
@@ -265,18 +270,6 @@
#define INTER_EE_CH_CTRL_FMASK GENMASK(4, 4)
#define INTER_EE_EV_CTRL_FMASK GENMASK(5, 5)
#define GENERAL_FMASK GENMASK(6, 6)
-
-#define GSI_CNTXT_TYPE_IRQ_MSK_OFFSET \
- GSI_EE_N_CNTXT_TYPE_IRQ_MSK_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_TYPE_IRQ_MSK_OFFSET(ee) \
- (0x0001f088 + 0x4000 * (ee))
-#define MSK_CH_CTRL_FMASK GENMASK(0, 0)
-#define MSK_EV_CTRL_FMASK GENMASK(1, 1)
-#define MSK_GLOB_EE_FMASK GENMASK(2, 2)
-#define MSK_IEOB_FMASK GENMASK(3, 3)
-#define MSK_INTER_EE_CH_CTRL_FMASK GENMASK(4, 4)
-#define MSK_INTER_EE_EV_CTRL_FMASK GENMASK(5, 5)
-#define MSK_GENERAL_FMASK GENMASK(6, 6)
#define GSI_CNTXT_TYPE_IRQ_MSK_ALL GENMASK(6, 0)
#define GSI_CNTXT_SRC_CH_IRQ_OFFSET \
@@ -328,57 +321,39 @@
GSI_EE_N_CNTXT_GLOB_IRQ_STTS_OFFSET(GSI_EE_AP)
#define GSI_EE_N_CNTXT_GLOB_IRQ_STTS_OFFSET(ee) \
(0x0001f100 + 0x4000 * (ee))
-#define ERROR_INT_FMASK GENMASK(0, 0)
-#define GP_INT1_FMASK GENMASK(1, 1)
-#define GP_INT2_FMASK GENMASK(2, 2)
-#define GP_INT3_FMASK GENMASK(3, 3)
-
#define GSI_CNTXT_GLOB_IRQ_EN_OFFSET \
GSI_EE_N_CNTXT_GLOB_IRQ_EN_OFFSET(GSI_EE_AP)
#define GSI_EE_N_CNTXT_GLOB_IRQ_EN_OFFSET(ee) \
(0x0001f108 + 0x4000 * (ee))
-#define EN_ERROR_INT_FMASK GENMASK(0, 0)
-#define EN_GP_INT1_FMASK GENMASK(1, 1)
-#define EN_GP_INT2_FMASK GENMASK(2, 2)
-#define EN_GP_INT3_FMASK GENMASK(3, 3)
-#define GSI_CNTXT_GLOB_IRQ_ALL GENMASK(3, 0)
-
#define GSI_CNTXT_GLOB_IRQ_CLR_OFFSET \
GSI_EE_N_CNTXT_GLOB_IRQ_CLR_OFFSET(GSI_EE_AP)
#define GSI_EE_N_CNTXT_GLOB_IRQ_CLR_OFFSET(ee) \
(0x0001f110 + 0x4000 * (ee))
-#define CLR_ERROR_INT_FMASK GENMASK(0, 0)
-#define CLR_GP_INT1_FMASK GENMASK(1, 1)
-#define CLR_GP_INT2_FMASK GENMASK(2, 2)
-#define CLR_GP_INT3_FMASK GENMASK(3, 3)
+/* The masks below are used for the general IRQ STTS, EN, and CLR registers */
+#define ERROR_INT_FMASK GENMASK(0, 0)
+#define GP_INT1_FMASK GENMASK(1, 1)
+#define GP_INT2_FMASK GENMASK(2, 2)
+#define GP_INT3_FMASK GENMASK(3, 3)
+#define GSI_CNTXT_GLOB_IRQ_ALL GENMASK(3, 0)
#define GSI_CNTXT_GSI_IRQ_STTS_OFFSET \
GSI_EE_N_CNTXT_GSI_IRQ_STTS_OFFSET(GSI_EE_AP)
#define GSI_EE_N_CNTXT_GSI_IRQ_STTS_OFFSET(ee) \
(0x0001f118 + 0x4000 * (ee))
-#define BREAK_POINT_FMASK GENMASK(0, 0)
-#define BUS_ERROR_FMASK GENMASK(1, 1)
-#define CMD_FIFO_OVRFLOW_FMASK GENMASK(2, 2)
-#define MCS_STACK_OVRFLOW_FMASK GENMASK(3, 3)
-
#define GSI_CNTXT_GSI_IRQ_EN_OFFSET \
GSI_EE_N_CNTXT_GSI_IRQ_EN_OFFSET(GSI_EE_AP)
#define GSI_EE_N_CNTXT_GSI_IRQ_EN_OFFSET(ee) \
(0x0001f120 + 0x4000 * (ee))
-#define EN_BREAK_POINT_FMASK GENMASK(0, 0)
-#define EN_BUS_ERROR_FMASK GENMASK(1, 1)
-#define EN_CMD_FIFO_OVRFLOW_FMASK GENMASK(2, 2)
-#define EN_MCS_STACK_OVRFLOW_FMASK GENMASK(3, 3)
-#define GSI_CNTXT_GSI_IRQ_ALL GENMASK(3, 0)
-
#define GSI_CNTXT_GSI_IRQ_CLR_OFFSET \
GSI_EE_N_CNTXT_GSI_IRQ_CLR_OFFSET(GSI_EE_AP)
#define GSI_EE_N_CNTXT_GSI_IRQ_CLR_OFFSET(ee) \
(0x0001f128 + 0x4000 * (ee))
-#define CLR_BREAK_POINT_FMASK GENMASK(0, 0)
-#define CLR_BUS_ERROR_FMASK GENMASK(1, 1)
-#define CLR_CMD_FIFO_OVRFLOW_FMASK GENMASK(2, 2)
-#define CLR_MCS_STACK_OVRFLOW_FMASK GENMASK(3, 3)
+/* The masks below are used for the general IRQ STTS, EN, and CLR registers */
+#define BREAK_POINT_FMASK GENMASK(0, 0)
+#define BUS_ERROR_FMASK GENMASK(1, 1)
+#define CMD_FIFO_OVRFLOW_FMASK GENMASK(2, 2)
+#define MCS_STACK_OVRFLOW_FMASK GENMASK(3, 3)
+#define GSI_CNTXT_GSI_IRQ_ALL GENMASK(3, 0)
#define GSI_CNTXT_INTSET_OFFSET \
GSI_EE_N_CNTXT_INTSET_OFFSET(GSI_EE_AP)
diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c
index bdbfeed359db..43f5f5d93cb0 100644
--- a/drivers/net/ipa/gsi_trans.c
+++ b/drivers/net/ipa/gsi_trans.c
@@ -81,7 +81,6 @@ struct gsi_tre {
/* gsi_tre->flags mask values (in CPU byte order) */
#define TRE_FLAGS_CHAIN_FMASK GENMASK(0, 0)
-#define TRE_FLAGS_IEOB_FMASK GENMASK(8, 8)
#define TRE_FLAGS_IEOT_FMASK GENMASK(9, 9)
#define TRE_FLAGS_BEI_FMASK GENMASK(10, 10)
#define TRE_FLAGS_TYPE_FMASK GENMASK(23, 16)
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
index 55115cfb2972..6c2371084c55 100644
--- a/drivers/net/ipa/ipa.h
+++ b/drivers/net/ipa/ipa.h
@@ -10,7 +10,6 @@
#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/pm_wakeup.h>
-#include <linux/notifier.h>
#include "ipa_version.h"
#include "gsi.h"
@@ -29,14 +28,24 @@ struct ipa_smp2p;
struct ipa_interrupt;
/**
+ * enum ipa_flag - IPA state flags
+ * @IPA_FLAG_RESUMED: Whether resume from suspend has been signaled
+ * @IPA_FLAG_COUNT: Number of defined IPA flags
+ */
+enum ipa_flag {
+ IPA_FLAG_RESUMED,
+ IPA_FLAG_COUNT, /* Last; not a flag */
+};
+
+/**
* struct ipa - IPA information
* @gsi: Embedded GSI structure
+ * @flags: Boolean state flags
* @version: IPA hardware version
* @pdev: Platform device
* @modem_rproc: Remoteproc handle for modem subsystem
* @smp2p: SMP2P information
* @clock: IPA clocking information
- * @suspend_ref: Whether clock reference preventing suspend taken
* @table_addr: DMA address of filter/route table content
* @table_virt: Virtual address of filter/route table content
* @interrupt: IPA Interrupt information
@@ -71,6 +80,7 @@ struct ipa_interrupt;
*/
struct ipa {
struct gsi gsi;
+ DECLARE_BITMAP(flags, IPA_FLAG_COUNT);
enum ipa_version version;
struct platform_device *pdev;
struct rproc *modem_rproc;
@@ -78,7 +88,6 @@ struct ipa {
void *notifier;
struct ipa_smp2p *smp2p;
struct ipa_clock *clock;
- atomic_t suspend_ref;
dma_addr_t table_addr;
__le64 *table_virt;
@@ -105,8 +114,6 @@ struct ipa {
void *zero_virt;
size_t zero_size;
- struct wakeup_source *wakeup_source;
-
/* Bit masks indicating endpoint state */
u32 available; /* supported by hardware */
u32 filter_map;
diff --git a/drivers/net/ipa/ipa_clock.c b/drivers/net/ipa/ipa_clock.c
index 398f2e47043d..a2c0fde05819 100644
--- a/drivers/net/ipa/ipa_clock.c
+++ b/drivers/net/ipa/ipa_clock.c
@@ -4,7 +4,7 @@
* Copyright (C) 2018-2020 Linaro Ltd.
*/
-#include <linux/atomic.h>
+#include <linux/refcount.h>
#include <linux/mutex.h>
#include <linux/clk.h>
#include <linux/device.h>
@@ -51,7 +51,7 @@
* @config_path: Configuration space interconnect
*/
struct ipa_clock {
- atomic_t count;
+ refcount_t count;
struct mutex mutex; /* protects clock enable/disable */
struct clk *core;
struct icc_path *memory_path;
@@ -195,14 +195,13 @@ static void ipa_clock_disable(struct ipa *ipa)
*/
bool ipa_clock_get_additional(struct ipa *ipa)
{
- return !!atomic_inc_not_zero(&ipa->clock->count);
+ return refcount_inc_not_zero(&ipa->clock->count);
}
/* Get an IPA clock reference. If the reference count is non-zero, it is
* incremented and return is immediate. Otherwise it is checked again
- * under protection of the mutex, and if appropriate the clock (and
- * interconnects) are enabled suspended endpoints (if any) are resumed
- * before returning.
+ * under protection of the mutex, and if appropriate the IPA clock
+ * is enabled.
*
* Incrementing the reference count is intentionally deferred until
* after the clock is running and endpoints are resumed.
@@ -229,28 +228,23 @@ void ipa_clock_get(struct ipa *ipa)
goto out_mutex_unlock;
}
- ipa_endpoint_resume(ipa);
-
- atomic_inc(&clock->count);
+ refcount_set(&clock->count, 1);
out_mutex_unlock:
mutex_unlock(&clock->mutex);
}
-/* Attempt to remove an IPA clock reference. If this represents the last
- * reference, suspend endpoints and disable the clock (and interconnects)
- * under protection of a mutex.
+/* Attempt to remove an IPA clock reference. If this represents the
+ * last reference, disable the IPA clock under protection of the mutex.
*/
void ipa_clock_put(struct ipa *ipa)
{
struct ipa_clock *clock = ipa->clock;
/* If this is not the last reference there's nothing more to do */
- if (!atomic_dec_and_mutex_lock(&clock->count, &clock->mutex))
+ if (!refcount_dec_and_mutex_lock(&clock->count, &clock->mutex))
return;
- ipa_endpoint_suspend(ipa);
-
ipa_clock_disable(ipa);
mutex_unlock(&clock->mutex);
@@ -294,7 +288,7 @@ struct ipa_clock *ipa_clock_init(struct device *dev)
goto err_kfree;
mutex_init(&clock->mutex);
- atomic_set(&clock->count, 0);
+ refcount_set(&clock->count, 0);
return clock;
@@ -311,7 +305,7 @@ void ipa_clock_exit(struct ipa_clock *clock)
{
struct clk *clk = clock->core;
- WARN_ON(atomic_read(&clock->count) != 0);
+ WARN_ON(refcount_read(&clock->count) != 0);
mutex_destroy(&clock->mutex);
ipa_interconnect_exit(clock);
kfree(clock);
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index b7efd7c95e9c..b40b711cf4bd 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -42,11 +42,8 @@
/** enum ipa_status_opcode - status element opcode hardware values */
enum ipa_status_opcode {
IPA_STATUS_OPCODE_PACKET = 0x01,
- IPA_STATUS_OPCODE_NEW_FRAG_RULE = 0x02,
IPA_STATUS_OPCODE_DROPPED_PACKET = 0x04,
IPA_STATUS_OPCODE_SUSPENDED_PACKET = 0x08,
- IPA_STATUS_OPCODE_LOG = 0x10,
- IPA_STATUS_OPCODE_DCMP = 0x20,
IPA_STATUS_OPCODE_PACKET_2ND_PASS = 0x40,
};
@@ -54,13 +51,6 @@ enum ipa_status_opcode {
enum ipa_status_exception {
/* 0 means no exception */
IPA_STATUS_EXCEPTION_DEAGGR = 0x01,
- IPA_STATUS_EXCEPTION_IPTYPE = 0x04,
- IPA_STATUS_EXCEPTION_PACKET_LENGTH = 0x08,
- IPA_STATUS_EXCEPTION_FRAG_RULE_MISS = 0x10,
- IPA_STATUS_EXCEPTION_SW_FILT = 0x20,
- /* The meaning of the next value depends on whether the IP version */
- IPA_STATUS_EXCEPTION_NAT = 0x40, /* IPv4 */
- IPA_STATUS_EXCEPTION_IPV6CT = IPA_STATUS_EXCEPTION_NAT,
};
/* Status element provided by hardware */
@@ -79,36 +69,9 @@ struct ipa_status {
};
/* Field masks for struct ipa_status structure fields */
-
-#define IPA_STATUS_SRC_IDX_FMASK GENMASK(4, 0)
-
#define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0)
-
-#define IPA_STATUS_FLAGS1_FLT_LOCAL_FMASK GENMASK(0, 0)
-#define IPA_STATUS_FLAGS1_FLT_HASH_FMASK GENMASK(1, 1)
-#define IPA_STATUS_FLAGS1_FLT_GLOBAL_FMASK GENMASK(2, 2)
-#define IPA_STATUS_FLAGS1_FLT_RET_HDR_FMASK GENMASK(3, 3)
-#define IPA_STATUS_FLAGS1_FLT_RULE_ID_FMASK GENMASK(13, 4)
-#define IPA_STATUS_FLAGS1_RT_LOCAL_FMASK GENMASK(14, 14)
-#define IPA_STATUS_FLAGS1_RT_HASH_FMASK GENMASK(15, 15)
-#define IPA_STATUS_FLAGS1_UCP_FMASK GENMASK(16, 16)
-#define IPA_STATUS_FLAGS1_RT_TBL_IDX_FMASK GENMASK(21, 17)
#define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22)
-#define IPA_STATUS_FLAGS2_NAT_HIT_FMASK GENMASK_ULL(0, 0)
-#define IPA_STATUS_FLAGS2_NAT_ENTRY_IDX_FMASK GENMASK_ULL(13, 1)
-#define IPA_STATUS_FLAGS2_NAT_TYPE_FMASK GENMASK_ULL(15, 14)
-#define IPA_STATUS_FLAGS2_TAG_INFO_FMASK GENMASK_ULL(63, 16)
-
-#define IPA_STATUS_FLAGS3_SEQ_NUM_FMASK GENMASK(7, 0)
-#define IPA_STATUS_FLAGS3_TOD_CTR_FMASK GENMASK(31, 8)
-
-#define IPA_STATUS_FLAGS4_HDR_LOCAL_FMASK GENMASK(0, 0)
-#define IPA_STATUS_FLAGS4_HDR_OFFSET_FMASK GENMASK(10, 1)
-#define IPA_STATUS_FLAGS4_FRAG_HIT_FMASK GENMASK(11, 11)
-#define IPA_STATUS_FLAGS4_FRAG_RULE_FMASK GENMASK(15, 12)
-#define IPA_STATUS_FLAGS4_HW_SPECIFIC_FMASK GENMASK(31, 16)
-
#ifdef IPA_VALIDATE
static void ipa_endpoint_validate_build(void)
@@ -1048,8 +1011,7 @@ static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
}
/* The format of a packet status element is the same for several status
- * types (opcodes). The NEW_FRAG_RULE, LOG, DCMP (decompression) types
- * aren't currently supported
+ * types (opcodes). Other types aren't currently supported.
*/
static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
{
@@ -1086,7 +1048,7 @@ static bool ipa_status_drop_packet(const struct ipa_status *status)
{
u32 val;
- /* Deaggregation exceptions we drop; others we consume */
+ /* Deaggregation exceptions we drop; all other types we consume */
if (status->exception)
return status->exception == IPA_STATUS_EXCEPTION_DEAGGR;
@@ -1432,11 +1394,10 @@ void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
return;
- if (!endpoint->toward_ipa)
+ if (!endpoint->toward_ipa) {
ipa_endpoint_replenish_disable(endpoint);
-
- if (!endpoint->toward_ipa)
(void)ipa_endpoint_program_suspend(endpoint, true);
+ }
/* IPA v3.5.1 doesn't use channel stop for suspend */
stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
@@ -1471,6 +1432,9 @@ void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
void ipa_endpoint_suspend(struct ipa *ipa)
{
+ if (!ipa->setup_complete)
+ return;
+
if (ipa->modem_netdev)
ipa_modem_suspend(ipa->modem_netdev);
@@ -1482,6 +1446,9 @@ void ipa_endpoint_suspend(struct ipa *ipa)
void ipa_endpoint_resume(struct ipa *ipa)
{
+ if (!ipa->setup_complete)
+ return;
+
ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c
index 90353987c45f..cc1ea28f7bc2 100644
--- a/drivers/net/ipa/ipa_interrupt.c
+++ b/drivers/net/ipa/ipa_interrupt.c
@@ -237,8 +237,16 @@ struct ipa_interrupt *ipa_interrupt_setup(struct ipa *ipa)
goto err_kfree;
}
+ ret = enable_irq_wake(irq);
+ if (ret) {
+ dev_err(dev, "error %d enabling wakeup for \"ipa\" IRQ\n", ret);
+ goto err_free_irq;
+ }
+
return interrupt;
+err_free_irq:
+ free_irq(interrupt->irq, interrupt);
err_kfree:
kfree(interrupt);
@@ -248,6 +256,12 @@ err_kfree:
/* Tear down the IPA interrupt framework */
void ipa_interrupt_teardown(struct ipa_interrupt *interrupt)
{
+ struct device *dev = &interrupt->ipa->pdev->dev;
+ int ret;
+
+ ret = disable_irq_wake(interrupt->irq);
+ if (ret)
+ dev_err(dev, "error %d disabling \"ipa\" IRQ wakeup\n", ret);
free_irq(interrupt->irq, interrupt);
kfree(interrupt);
}
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index 1fdfec41e442..cd4d993b0bbb 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -75,17 +75,19 @@
* @ipa: IPA pointer
* @irq_id: IPA interrupt type (unused)
*
- * When in suspended state, the IPA can trigger a resume by sending a SUSPEND
- * IPA interrupt.
+ * If an RX endpoint is in suspend state, and the IPA has a packet
+ * destined for that endpoint, the IPA generates a SUSPEND interrupt
+ * to inform the AP that it should resume the endpoint. If we get
+ * one of these interrupts we just resume everything.
*/
static void ipa_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
{
- /* Take a a single clock reference to prevent suspend. All
- * endpoints will be resumed as a result. This reference will
- * be dropped when we get a power management suspend request.
+ /* Just report the event, and let system resume handle the rest.
+ * More than one endpoint could signal this; if so, ignore
+ * all but the first.
*/
- if (!atomic_xchg(&ipa->suspend_ref, 1))
- ipa_clock_get(ipa);
+ if (!test_and_set_bit(IPA_FLAG_RESUMED, ipa->flags))
+ pm_wakeup_dev_event(&ipa->pdev->dev, 0, true);
/* Acknowledge/clear the suspend interrupt on all endpoints */
ipa_interrupt_suspend_clear_all(ipa->interrupt);
@@ -106,6 +108,7 @@ int ipa_setup(struct ipa *ipa)
{
struct ipa_endpoint *exception_endpoint;
struct ipa_endpoint *command_endpoint;
+ struct device *dev = &ipa->pdev->dev;
int ret;
/* Setup for IPA v3.5.1 has some slight differences */
@@ -123,6 +126,10 @@ int ipa_setup(struct ipa *ipa)
ipa_uc_setup(ipa);
+ ret = device_init_wakeup(dev, true);
+ if (ret)
+ goto err_uc_teardown;
+
ipa_endpoint_setup(ipa);
/* We need to use the AP command TX endpoint to perform other
@@ -158,7 +165,7 @@ int ipa_setup(struct ipa *ipa)
ipa->setup_complete = true;
- dev_info(&ipa->pdev->dev, "IPA driver setup completed successfully\n");
+ dev_info(dev, "IPA driver setup completed successfully\n");
return 0;
@@ -173,6 +180,8 @@ err_command_disable:
ipa_endpoint_disable_one(command_endpoint);
err_endpoint_teardown:
ipa_endpoint_teardown(ipa);
+ (void)device_init_wakeup(dev, false);
+err_uc_teardown:
ipa_uc_teardown(ipa);
ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND);
ipa_interrupt_teardown(ipa->interrupt);
@@ -200,6 +209,7 @@ static void ipa_teardown(struct ipa *ipa)
command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
ipa_endpoint_disable_one(command_endpoint);
ipa_endpoint_teardown(ipa);
+ (void)device_init_wakeup(&ipa->pdev->dev, false);
ipa_uc_teardown(ipa);
ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND);
ipa_interrupt_teardown(ipa->interrupt);
@@ -508,7 +518,6 @@ static int ipa_config(struct ipa *ipa, const struct ipa_data *data)
* is held after initialization completes, and won't get dropped
* unless/until a system suspend request arrives.
*/
- atomic_set(&ipa->suspend_ref, 1);
ipa_clock_get(ipa);
ipa_hardware_config(ipa);
@@ -544,7 +553,6 @@ err_endpoint_deconfig:
err_hardware_deconfig:
ipa_hardware_deconfig(ipa);
ipa_clock_put(ipa);
- atomic_set(&ipa->suspend_ref, 0);
return ret;
}
@@ -562,7 +570,6 @@ static void ipa_deconfig(struct ipa *ipa)
ipa_endpoint_deconfig(ipa);
ipa_hardware_deconfig(ipa);
ipa_clock_put(ipa);
- atomic_set(&ipa->suspend_ref, 0);
}
static int ipa_firmware_load(struct device *dev)
@@ -709,7 +716,6 @@ static void ipa_validate_build(void)
*/
static int ipa_probe(struct platform_device *pdev)
{
- struct wakeup_source *wakeup_source;
struct device *dev = &pdev->dev;
const struct ipa_data *data;
struct ipa_clock *clock;
@@ -717,8 +723,8 @@ static int ipa_probe(struct platform_device *pdev)
bool modem_alloc;
bool modem_init;
struct ipa *ipa;
- phandle phandle;
bool prefetch;
+ phandle ph;
int ret;
ipa_validate_build();
@@ -730,13 +736,13 @@ static int ipa_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
/* We rely on remoteproc to tell us about modem state changes */
- phandle = of_property_read_phandle(dev->of_node, "modem-remoteproc");
- if (!phandle) {
+ ph = of_property_read_phandle(dev->of_node, "modem-remoteproc");
+ if (!ph) {
dev_err(dev, "DT missing \"modem-remoteproc\" property\n");
return -EINVAL;
}
- rproc = rproc_get_by_phandle(phandle);
+ rproc = rproc_get_by_phandle(ph);
if (!rproc)
return -EPROBE_DEFER;
@@ -758,27 +764,17 @@ static int ipa_probe(struct platform_device *pdev)
goto err_clock_exit;
}
- /* Create a wakeup source. */
- wakeup_source = wakeup_source_register(dev, "ipa");
- if (!wakeup_source) {
- /* The most likely reason for failure is memory exhaustion */
- ret = -ENOMEM;
- goto err_clock_exit;
- }
-
/* Allocate and initialize the IPA structure */
ipa = kzalloc(sizeof(*ipa), GFP_KERNEL);
if (!ipa) {
ret = -ENOMEM;
- goto err_wakeup_source_unregister;
+ goto err_clock_exit;
}
ipa->pdev = pdev;
dev_set_drvdata(dev, ipa);
ipa->modem_rproc = rproc;
ipa->clock = clock;
- atomic_set(&ipa->suspend_ref, 0);
- ipa->wakeup_source = wakeup_source;
ipa->version = data->version;
ret = ipa_reg_init(ipa);
@@ -857,8 +853,6 @@ err_reg_exit:
ipa_reg_exit(ipa);
err_kfree_ipa:
kfree(ipa);
-err_wakeup_source_unregister:
- wakeup_source_unregister(wakeup_source);
err_clock_exit:
ipa_clock_exit(clock);
err_rproc_put:
@@ -872,11 +866,8 @@ static int ipa_remove(struct platform_device *pdev)
struct ipa *ipa = dev_get_drvdata(&pdev->dev);
struct rproc *rproc = ipa->modem_rproc;
struct ipa_clock *clock = ipa->clock;
- struct wakeup_source *wakeup_source;
int ret;
- wakeup_source = ipa->wakeup_source;
-
if (ipa->setup_complete) {
ret = ipa_modem_stop(ipa);
if (ret)
@@ -893,7 +884,6 @@ static int ipa_remove(struct platform_device *pdev)
ipa_mem_exit(ipa);
ipa_reg_exit(ipa);
kfree(ipa);
- wakeup_source_unregister(wakeup_source);
ipa_clock_exit(clock);
rproc_put(rproc);
@@ -907,13 +897,22 @@ static int ipa_remove(struct platform_device *pdev)
* Return: Always returns zero
*
* Called by the PM framework when a system suspend operation is invoked.
+ * Suspends endpoints and releases the clock reference held to keep
+ * the IPA clock running until this point.
*/
static int ipa_suspend(struct device *dev)
{
struct ipa *ipa = dev_get_drvdata(dev);
+ /* When a suspended RX endpoint has a packet ready to receive, we
+ * get an IPA SUSPEND interrupt. We trigger a system resume in
+ * that case, but only on the first such interrupt since suspend.
+ */
+ __clear_bit(IPA_FLAG_RESUMED, ipa->flags);
+
+ ipa_endpoint_suspend(ipa);
+
ipa_clock_put(ipa);
- atomic_set(&ipa->suspend_ref, 0);
return 0;
}
@@ -925,6 +924,8 @@ static int ipa_suspend(struct device *dev)
* Return: Always returns 0
*
* Called by the PM framework when a system resume operation is invoked.
+ * Takes an IPA clock reference to keep the clock running until suspend,
+ * and resumes endpoints.
*/
static int ipa_resume(struct device *dev)
{
@@ -933,9 +934,10 @@ static int ipa_resume(struct device *dev)
/* This clock reference will keep the IPA out of suspend
* until we get a power management suspend request.
*/
- atomic_set(&ipa->suspend_ref, 1);
ipa_clock_get(ipa);
+ ipa_endpoint_resume(ipa);
+
return 0;
}
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
index eb4e39fa7d4b..e542598fd775 100644
--- a/drivers/net/ipa/ipa_reg.h
+++ b/drivers/net/ipa/ipa_reg.h
@@ -426,7 +426,7 @@ enum ipa_cs_offload_en {
IPA_CS_RSVD
};
-/** enum ipa_aggr_en - aggregation type field in ENDP_INIT_AGGR_N */
+/** enum ipa_aggr_en - aggregation enable field in ENDP_INIT_AGGR_N */
enum ipa_aggr_en {
IPA_BYPASS_AGGR = 0,
IPA_ENABLE_AGGR = 1,
diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c
index 1a0b04e0ab74..b382d47bc70d 100644
--- a/drivers/net/ipa/ipa_uc.c
+++ b/drivers/net/ipa/ipa_uc.c
@@ -144,7 +144,7 @@ static void ipa_uc_response_hdlr(struct ipa *ipa, enum ipa_irq_id irq_id)
* should only receive responses from the microcontroller when it has
* sent it a request message.
*
- * We can drop the clock reference taken in ipa_uc_init() once we
+ * We can drop the clock reference taken in ipa_uc_setup() once we
* know the microcontroller has finished its initialization.
*/
switch (shared->response) {
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 5bca94c99006..60b7d93bb834 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -684,6 +684,13 @@ static const struct nla_policy ipvlan_nl_policy[IFLA_IPVLAN_MAX + 1] =
[IFLA_IPVLAN_FLAGS] = { .type = NLA_U16 },
};
+static struct net *ipvlan_get_link_net(const struct net_device *dev)
+{
+ struct ipvl_dev *ipvlan = netdev_priv(dev);
+
+ return dev_net(ipvlan->phy_dev);
+}
+
static struct rtnl_link_ops ipvlan_link_ops = {
.kind = "ipvlan",
.priv_size = sizeof(struct ipvl_dev),
@@ -691,6 +698,7 @@ static struct rtnl_link_ops ipvlan_link_ops = {
.setup = ipvlan_link_setup,
.newlink = ipvlan_link_new,
.dellink = ipvlan_link_delete,
+ .get_link_net = ipvlan_get_link_net,
};
int ipvlan_link_register(struct rtnl_link_ops *ops)
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 787ac2c8e74e..11ca5fa902a1 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -1613,7 +1613,7 @@ static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = {
static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
[MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
[MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 },
- [MACSEC_SA_ATTR_PN] = { .type = NLA_MIN_LEN, .len = 4 },
+ [MACSEC_SA_ATTR_PN] = NLA_POLICY_MIN_LEN(4),
[MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
.len = MACSEC_KEYID_LEN, },
[MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
@@ -3287,7 +3287,7 @@ done:
return skb->len;
}
-static const struct genl_ops macsec_genl_ops[] = {
+static const struct genl_small_ops macsec_genl_ops[] = {
{
.cmd = MACSEC_CMD_GET_TXSC,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
@@ -3363,8 +3363,8 @@ static struct genl_family macsec_fam __ro_after_init = {
.policy = macsec_genl_policy,
.netnsok = true,
.module = THIS_MODULE,
- .ops = macsec_genl_ops,
- .n_ops = ARRAY_SIZE(macsec_genl_ops),
+ .small_ops = macsec_genl_ops,
+ .n_small_ops = ARRAY_SIZE(macsec_genl_ops),
};
static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
@@ -3647,30 +3647,10 @@ static int macsec_change_mtu(struct net_device *dev, int new_mtu)
static void macsec_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *s)
{
- int cpu;
-
if (!dev->tstats)
return;
- for_each_possible_cpu(cpu) {
- struct pcpu_sw_netstats *stats;
- struct pcpu_sw_netstats tmp;
- int start;
-
- stats = per_cpu_ptr(dev->tstats, cpu);
- do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- tmp.rx_packets = stats->rx_packets;
- tmp.rx_bytes = stats->rx_bytes;
- tmp.tx_packets = stats->tx_packets;
- tmp.tx_bytes = stats->tx_bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
-
- s->rx_packets += tmp.rx_packets;
- s->rx_bytes += tmp.rx_bytes;
- s->tx_packets += tmp.tx_packets;
- s->tx_bytes += tmp.tx_bytes;
- }
+ dev_fetch_sw_netstats(s, dev->tstats);
s->rx_dropped = dev->stats.rx_dropped;
s->tx_dropped = dev->stats.tx_dropped;
diff --git a/drivers/net/mdio/Kconfig b/drivers/net/mdio/Kconfig
new file mode 100644
index 000000000000..a10cc460d7cf
--- /dev/null
+++ b/drivers/net/mdio/Kconfig
@@ -0,0 +1,251 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# MDIO Layer Configuration
+#
+
+menuconfig MDIO_DEVICE
+ tristate "MDIO bus device drivers"
+ help
+ MDIO devices and driver infrastructure code.
+
+if MDIO_DEVICE
+
+config MDIO_BUS
+ tristate
+ default m if PHYLIB=m
+ default MDIO_DEVICE
+ help
+ This internal symbol is used for link time dependencies and it
+ reflects whether the mdio_bus/mdio_device code is built as a
+ loadable module or built-in.
+
+config OF_MDIO
+ def_tristate PHYLIB
+ depends on OF
+ depends on PHYLIB
+ select FIXED_PHY
+ help
+ OpenFirmware MDIO bus (Ethernet PHY) accessors
+
+if MDIO_BUS
+
+config MDIO_DEVRES
+ tristate
+
+config MDIO_SUN4I
+ tristate "Allwinner sun4i MDIO interface support"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ help
+ This driver supports the MDIO interface found in the network
+ interface units of the Allwinner SoC that have an EMAC (A10,
+ A12, A10s, etc.)
+
+config MDIO_XGENE
+ tristate "APM X-Gene SoC MDIO bus controller"
+ depends on ARCH_XGENE || COMPILE_TEST
+ help
+ This module provides a driver for the MDIO busses found in the
+ APM X-Gene SoC's.
+
+config MDIO_ASPEED
+ tristate "ASPEED MDIO bus controller"
+ depends on ARCH_ASPEED || COMPILE_TEST
+ depends on OF_MDIO && HAS_IOMEM
+ help
+ This module provides a driver for the independent MDIO bus
+ controllers found in the ASPEED AST2600 SoC. This is a driver for the
+ third revision of the ASPEED MDIO register interface - the first two
+ revisions are the "old" and "new" interfaces found in the AST2400 and
+ AST2500, embedded in the MAC. For legacy reasons, FTGMAC100 driver
+ continues to drive the embedded MDIO controller for the AST2400 and
+ AST2500 SoCs, so say N if AST2600 support is not required.
+
+config MDIO_BITBANG
+ tristate "Bitbanged MDIO buses"
+ help
+ This module implements the MDIO bus protocol in software,
+ for use by low level drivers that export the ability to
+ drive the relevant pins.
+
+ If in doubt, say N.
+
+config MDIO_BCM_IPROC
+ tristate "Broadcom iProc MDIO bus controller"
+ depends on ARCH_BCM_IPROC || COMPILE_TEST
+ depends on HAS_IOMEM && OF_MDIO
+ default ARCH_BCM_IPROC
+ help
+ This module provides a driver for the MDIO busses found in the
+ Broadcom iProc SoC's.
+
+config MDIO_BCM_UNIMAC
+ tristate "Broadcom UniMAC MDIO bus controller"
+ depends on HAS_IOMEM
+ help
+ This module provides a driver for the Broadcom UniMAC MDIO busses.
+ This hardware can be found in the Broadcom GENET Ethernet MAC
+ controllers as well as some Broadcom Ethernet switches such as the
+ Starfighter 2 switches.
+
+config MDIO_CAVIUM
+ tristate
+
+config MDIO_GPIO
+ tristate "GPIO lib-based bitbanged MDIO buses"
+ depends on MDIO_BITBANG
+ depends on GPIOLIB || COMPILE_TEST
+ help
+ Supports GPIO lib-based MDIO busses.
+
+ To compile this driver as a module, choose M here: the module
+ will be called mdio-gpio.
+
+config MDIO_HISI_FEMAC
+ tristate "Hisilicon FEMAC MDIO bus controller"
+ depends on HAS_IOMEM && OF_MDIO
+ help
+ This module provides a driver for the MDIO busses found in the
+ Hisilicon SoC that have an Fast Ethernet MAC.
+
+config MDIO_I2C
+ tristate
+ depends on I2C
+ help
+ Support I2C based PHYs. This provides a MDIO bus bridged
+ to I2C to allow PHYs connected in I2C mode to be accessed
+ using the existing infrastructure.
+
+ This is library mode.
+
+config MDIO_MVUSB
+ tristate "Marvell USB to MDIO Adapter"
+ depends on USB
+ select MDIO_DEVRES
+ help
+ A USB to MDIO converter present on development boards for
+ Marvell's Link Street family of Ethernet switches.
+
+config MDIO_MSCC_MIIM
+ tristate "Microsemi MIIM interface support"
+ depends on HAS_IOMEM
+ select MDIO_DEVRES
+ help
+ This driver supports the MIIM (MDIO) interface found in the network
+ switches of the Microsemi SoCs; it is recommended to switch on
+ CONFIG_HIGH_RES_TIMERS
+
+config MDIO_MOXART
+ tristate "MOXA ART MDIO interface support"
+ depends on ARCH_MOXART || COMPILE_TEST
+ help
+ This driver supports the MDIO interface found in the network
+ interface units of the MOXA ART SoC
+
+config MDIO_OCTEON
+ tristate "Octeon and some ThunderX SOCs MDIO buses"
+ depends on (64BIT && OF_MDIO) || COMPILE_TEST
+ depends on HAS_IOMEM
+ select MDIO_CAVIUM
+ select MDIO_DEVRES
+ help
+ This module provides a driver for the Octeon and ThunderX MDIO
+ buses. It is required by the Octeon and ThunderX ethernet device
+ drivers on some systems.
+
+config MDIO_IPQ4019
+ tristate "Qualcomm IPQ4019 MDIO interface support"
+ depends on HAS_IOMEM && OF_MDIO
+ help
+ This driver supports the MDIO interface found in Qualcomm
+ IPQ40xx series Soc-s.
+
+config MDIO_IPQ8064
+ tristate "Qualcomm IPQ8064 MDIO interface support"
+ depends on HAS_IOMEM && OF_MDIO
+ depends on MFD_SYSCON
+ help
+ This driver supports the MDIO interface found in the network
+ interface units of the IPQ8064 SoC
+
+config MDIO_THUNDER
+ tristate "ThunderX SOCs MDIO buses"
+ depends on 64BIT
+ depends on PCI
+ select MDIO_CAVIUM
+ select MDIO_DEVRES
+ help
+ This driver supports the MDIO interfaces found on Cavium
+ ThunderX SoCs when the MDIO bus device appears as a PCI
+ device.
+
+comment "MDIO Multiplexers"
+
+config MDIO_BUS_MUX
+ tristate
+ depends on OF_MDIO
+ help
+ This module provides a driver framework for MDIO bus
+ multiplexers which connect one of several child MDIO busses
+ to a parent bus. Switching between child busses is done by
+ device specific drivers.
+
+config MDIO_BUS_MUX_MESON_G12A
+ tristate "Amlogic G12a based MDIO bus multiplexer"
+ depends on ARCH_MESON || COMPILE_TEST
+ depends on OF_MDIO && HAS_IOMEM && COMMON_CLK
+ select MDIO_BUS_MUX
+ default m if ARCH_MESON
+ help
+ This module provides a driver for the MDIO multiplexer/glue of
+ the amlogic g12a SoC. The multiplexers connects either the external
+ or the internal MDIO bus to the parent bus.
+
+config MDIO_BUS_MUX_BCM_IPROC
+ tristate "Broadcom iProc based MDIO bus multiplexers"
+ depends on OF && OF_MDIO && (ARCH_BCM_IPROC || COMPILE_TEST)
+ select MDIO_BUS_MUX
+ default ARCH_BCM_IPROC
+ help
+ This module provides a driver for MDIO bus multiplexers found in
+ iProc based Broadcom SoCs. This multiplexer connects one of several
+ child MDIO bus to a parent bus. Buses could be internal as well as
+ external and selection logic lies inside the same multiplexer.
+
+config MDIO_BUS_MUX_GPIO
+ tristate "GPIO controlled MDIO bus multiplexers"
+ depends on OF_GPIO && OF_MDIO
+ select MDIO_BUS_MUX
+ help
+ This module provides a driver for MDIO bus multiplexers that
+ are controlled via GPIO lines. The multiplexer connects one of
+ several child MDIO busses to a parent bus. Child bus
+ selection is under the control of GPIO lines.
+
+config MDIO_BUS_MUX_MULTIPLEXER
+ tristate "MDIO bus multiplexer using kernel multiplexer subsystem"
+ depends on OF_MDIO
+ select MULTIPLEXER
+ select MDIO_BUS_MUX
+ help
+ This module provides a driver for MDIO bus multiplexer
+ that is controlled via the kernel multiplexer subsystem. The
+ bus multiplexer connects one of several child MDIO busses to
+ a parent bus. Child bus selection is under the control of
+ the kernel multiplexer subsystem.
+
+config MDIO_BUS_MUX_MMIOREG
+ tristate "MMIO device-controlled MDIO bus multiplexers"
+ depends on OF_MDIO && HAS_IOMEM
+ select MDIO_BUS_MUX
+ help
+ This module provides a driver for MDIO bus multiplexers that
+ are controlled via a simple memory-mapped device, like an FPGA.
+ The multiplexer connects one of several child MDIO busses to a
+ parent bus. Child bus selection is under the control of one of
+ the FPGA's registers.
+
+ Currently, only 8/16/32 bits registers are supported.
+
+
+endif
+endif
diff --git a/drivers/net/mdio/Makefile b/drivers/net/mdio/Makefile
new file mode 100644
index 000000000000..5c498dde463f
--- /dev/null
+++ b/drivers/net/mdio/Makefile
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for Linux MDIO bus drivers
+
+obj-$(CONFIG_OF_MDIO) += of_mdio.o
+
+obj-$(CONFIG_MDIO_ASPEED) += mdio-aspeed.o
+obj-$(CONFIG_MDIO_BCM_IPROC) += mdio-bcm-iproc.o
+obj-$(CONFIG_MDIO_BCM_UNIMAC) += mdio-bcm-unimac.o
+obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
+obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o
+obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
+obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o
+obj-$(CONFIG_MDIO_I2C) += mdio-i2c.o
+obj-$(CONFIG_MDIO_IPQ4019) += mdio-ipq4019.o
+obj-$(CONFIG_MDIO_IPQ8064) += mdio-ipq8064.o
+obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
+obj-$(CONFIG_MDIO_MSCC_MIIM) += mdio-mscc-miim.o
+obj-$(CONFIG_MDIO_MVUSB) += mdio-mvusb.o
+obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
+obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
+obj-$(CONFIG_MDIO_THUNDER) += mdio-thunder.o
+obj-$(CONFIG_MDIO_XGENE) += mdio-xgene.o
+
+obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o
+obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) += mdio-mux-bcm-iproc.o
+obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
+obj-$(CONFIG_MDIO_BUS_MUX_MESON_G12A) += mdio-mux-meson-g12a.o
+obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
+obj-$(CONFIG_MDIO_BUS_MUX_MULTIPLEXER) += mdio-mux-multiplexer.o
diff --git a/drivers/net/phy/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c
index cad820568f75..cad820568f75 100644
--- a/drivers/net/phy/mdio-aspeed.c
+++ b/drivers/net/mdio/mdio-aspeed.c
diff --git a/drivers/net/phy/mdio-bcm-iproc.c b/drivers/net/mdio/mdio-bcm-iproc.c
index 77fc970cdfde..77fc970cdfde 100644
--- a/drivers/net/phy/mdio-bcm-iproc.c
+++ b/drivers/net/mdio/mdio-bcm-iproc.c
diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/mdio/mdio-bcm-unimac.c
index fbd36891ee64..fbd36891ee64 100644
--- a/drivers/net/phy/mdio-bcm-unimac.c
+++ b/drivers/net/mdio/mdio-bcm-unimac.c
diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/mdio/mdio-bitbang.c
index 5136275c8e73..5136275c8e73 100644
--- a/drivers/net/phy/mdio-bitbang.c
+++ b/drivers/net/mdio/mdio-bitbang.c
diff --git a/drivers/net/phy/mdio-cavium.c b/drivers/net/mdio/mdio-cavium.c
index 1afd6fc1a351..1afd6fc1a351 100644
--- a/drivers/net/phy/mdio-cavium.c
+++ b/drivers/net/mdio/mdio-cavium.c
diff --git a/drivers/net/phy/mdio-cavium.h b/drivers/net/mdio/mdio-cavium.h
index a2245d436f5d..a2245d436f5d 100644
--- a/drivers/net/phy/mdio-cavium.h
+++ b/drivers/net/mdio/mdio-cavium.h
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/mdio/mdio-gpio.c
index 1b00235d7dc5..1b00235d7dc5 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/mdio/mdio-gpio.c
diff --git a/drivers/net/phy/mdio-hisi-femac.c b/drivers/net/mdio/mdio-hisi-femac.c
index f231c2fbb1de..f231c2fbb1de 100644
--- a/drivers/net/phy/mdio-hisi-femac.c
+++ b/drivers/net/mdio/mdio-hisi-femac.c
diff --git a/drivers/net/phy/mdio-i2c.c b/drivers/net/mdio/mdio-i2c.c
index 0746e2cc39ae..09200a70b315 100644
--- a/drivers/net/phy/mdio-i2c.c
+++ b/drivers/net/mdio/mdio-i2c.c
@@ -10,10 +10,9 @@
* of their settings.
*/
#include <linux/i2c.h>
+#include <linux/mdio/mdio-i2c.h>
#include <linux/phy.h>
-#include "mdio-i2c.h"
-
/*
* I2C bus addresses 0x50 and 0x51 are normally an EEPROM, which is
* specified to be present in SFP modules. These correspond with PHY
diff --git a/drivers/net/phy/mdio-ipq4019.c b/drivers/net/mdio/mdio-ipq4019.c
index 1ce81ff2f41d..25c25ea6da66 100644
--- a/drivers/net/phy/mdio-ipq4019.c
+++ b/drivers/net/mdio/mdio-ipq4019.c
@@ -12,6 +12,7 @@
#include <linux/phy.h>
#include <linux/platform_device.h>
+#define MDIO_MODE_REG 0x40
#define MDIO_ADDR_REG 0x44
#define MDIO_DATA_WRITE_REG 0x48
#define MDIO_DATA_READ_REG 0x4c
@@ -20,9 +21,15 @@
#define MDIO_CMD_ACCESS_START BIT(8)
#define MDIO_CMD_ACCESS_CODE_READ 0
#define MDIO_CMD_ACCESS_CODE_WRITE 1
+#define MDIO_CMD_ACCESS_CODE_C45_ADDR 0
+#define MDIO_CMD_ACCESS_CODE_C45_WRITE 1
+#define MDIO_CMD_ACCESS_CODE_C45_READ 2
-#define ipq4019_MDIO_TIMEOUT 10000
-#define ipq4019_MDIO_SLEEP 10
+/* 0 = Clause 22, 1 = Clause 45 */
+#define MDIO_MODE_C45 BIT(8)
+
+#define IPQ4019_MDIO_TIMEOUT 10000
+#define IPQ4019_MDIO_SLEEP 10
struct ipq4019_mdio_data {
void __iomem *membase;
@@ -35,25 +42,50 @@ static int ipq4019_mdio_wait_busy(struct mii_bus *bus)
return readl_poll_timeout(priv->membase + MDIO_CMD_REG, busy,
(busy & MDIO_CMD_ACCESS_BUSY) == 0,
- ipq4019_MDIO_SLEEP, ipq4019_MDIO_TIMEOUT);
+ IPQ4019_MDIO_SLEEP, IPQ4019_MDIO_TIMEOUT);
}
static int ipq4019_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
{
struct ipq4019_mdio_data *priv = bus->priv;
+ unsigned int data;
unsigned int cmd;
- /* Reject clause 45 */
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
if (ipq4019_mdio_wait_busy(bus))
return -ETIMEDOUT;
- /* issue the phy address and reg */
- writel((mii_id << 8) | regnum, priv->membase + MDIO_ADDR_REG);
+ /* Clause 45 support */
+ if (regnum & MII_ADDR_C45) {
+ unsigned int mmd = (regnum >> 16) & 0x1F;
+ unsigned int reg = regnum & 0xFFFF;
+
+ /* Enter Clause 45 mode */
+ data = readl(priv->membase + MDIO_MODE_REG);
+
+ data |= MDIO_MODE_C45;
+
+ writel(data, priv->membase + MDIO_MODE_REG);
+
+ /* issue the phy address and mmd */
+ writel((mii_id << 8) | mmd, priv->membase + MDIO_ADDR_REG);
+
+ /* issue reg */
+ writel(reg, priv->membase + MDIO_DATA_WRITE_REG);
+
+ cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_ADDR;
+ } else {
+ /* Enter Clause 22 mode */
+ data = readl(priv->membase + MDIO_MODE_REG);
- cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_READ;
+ data &= ~MDIO_MODE_C45;
+
+ writel(data, priv->membase + MDIO_MODE_REG);
+
+ /* issue the phy address and reg */
+ writel((mii_id << 8) | regnum, priv->membase + MDIO_ADDR_REG);
+
+ cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_READ;
+ }
/* issue read command */
writel(cmd, priv->membase + MDIO_CMD_REG);
@@ -62,6 +94,15 @@ static int ipq4019_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
if (ipq4019_mdio_wait_busy(bus))
return -ETIMEDOUT;
+ if (regnum & MII_ADDR_C45) {
+ cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_READ;
+
+ writel(cmd, priv->membase + MDIO_CMD_REG);
+
+ if (ipq4019_mdio_wait_busy(bus))
+ return -ETIMEDOUT;
+ }
+
/* Read and return data */
return readl(priv->membase + MDIO_DATA_READ_REG);
}
@@ -70,23 +111,57 @@ static int ipq4019_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
u16 value)
{
struct ipq4019_mdio_data *priv = bus->priv;
+ unsigned int data;
unsigned int cmd;
- /* Reject clause 45 */
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
if (ipq4019_mdio_wait_busy(bus))
return -ETIMEDOUT;
- /* issue the phy address and reg */
- writel((mii_id << 8) | regnum, priv->membase + MDIO_ADDR_REG);
+ /* Clause 45 support */
+ if (regnum & MII_ADDR_C45) {
+ unsigned int mmd = (regnum >> 16) & 0x1F;
+ unsigned int reg = regnum & 0xFFFF;
+
+ /* Enter Clause 45 mode */
+ data = readl(priv->membase + MDIO_MODE_REG);
+
+ data |= MDIO_MODE_C45;
+
+ writel(data, priv->membase + MDIO_MODE_REG);
+
+ /* issue the phy address and mmd */
+ writel((mii_id << 8) | mmd, priv->membase + MDIO_ADDR_REG);
+
+ /* issue reg */
+ writel(reg, priv->membase + MDIO_DATA_WRITE_REG);
+
+ cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_ADDR;
+
+ writel(cmd, priv->membase + MDIO_CMD_REG);
+
+ if (ipq4019_mdio_wait_busy(bus))
+ return -ETIMEDOUT;
+ } else {
+ /* Enter Clause 22 mode */
+ data = readl(priv->membase + MDIO_MODE_REG);
+
+ data &= ~MDIO_MODE_C45;
+
+ writel(data, priv->membase + MDIO_MODE_REG);
+
+ /* issue the phy address and reg */
+ writel((mii_id << 8) | regnum, priv->membase + MDIO_ADDR_REG);
+ }
/* issue write data */
writel(value, priv->membase + MDIO_DATA_WRITE_REG);
- cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_WRITE;
/* issue write command */
+ if (regnum & MII_ADDR_C45)
+ cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_WRITE;
+ else
+ cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_WRITE;
+
writel(cmd, priv->membase + MDIO_CMD_REG);
/* Wait write complete */
diff --git a/drivers/net/phy/mdio-ipq8064.c b/drivers/net/mdio/mdio-ipq8064.c
index 1bd18857e1c5..1bd18857e1c5 100644
--- a/drivers/net/phy/mdio-ipq8064.c
+++ b/drivers/net/mdio/mdio-ipq8064.c
diff --git a/drivers/net/phy/mdio-moxart.c b/drivers/net/mdio/mdio-moxart.c
index b72c6d185175..b72c6d185175 100644
--- a/drivers/net/phy/mdio-moxart.c
+++ b/drivers/net/mdio/mdio-moxart.c
diff --git a/drivers/net/phy/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c
index 11f583fd4611..11f583fd4611 100644
--- a/drivers/net/phy/mdio-mscc-miim.c
+++ b/drivers/net/mdio/mdio-mscc-miim.c
diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/mdio/mdio-mux-bcm-iproc.c
index 42fb5f166136..42fb5f166136 100644
--- a/drivers/net/phy/mdio-mux-bcm-iproc.c
+++ b/drivers/net/mdio/mdio-mux-bcm-iproc.c
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/mdio/mdio-mux-gpio.c
index 10a758fdc9e6..10a758fdc9e6 100644
--- a/drivers/net/phy/mdio-mux-gpio.c
+++ b/drivers/net/mdio/mdio-mux-gpio.c
diff --git a/drivers/net/phy/mdio-mux-meson-g12a.c b/drivers/net/mdio/mdio-mux-meson-g12a.c
index bf86c9c7a288..bf86c9c7a288 100644
--- a/drivers/net/phy/mdio-mux-meson-g12a.c
+++ b/drivers/net/mdio/mdio-mux-meson-g12a.c
diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/mdio/mdio-mux-mmioreg.c
index d1a8780e24d8..d1a8780e24d8 100644
--- a/drivers/net/phy/mdio-mux-mmioreg.c
+++ b/drivers/net/mdio/mdio-mux-mmioreg.c
diff --git a/drivers/net/phy/mdio-mux-multiplexer.c b/drivers/net/mdio/mdio-mux-multiplexer.c
index d6564381aa3e..d6564381aa3e 100644
--- a/drivers/net/phy/mdio-mux-multiplexer.c
+++ b/drivers/net/mdio/mdio-mux-multiplexer.c
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/mdio/mdio-mux.c
index 6a1d3540210b..6a1d3540210b 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/mdio/mdio-mux.c
diff --git a/drivers/net/phy/mdio-mvusb.c b/drivers/net/mdio/mdio-mvusb.c
index d5eabddfdf51..d5eabddfdf51 100644
--- a/drivers/net/phy/mdio-mvusb.c
+++ b/drivers/net/mdio/mdio-mvusb.c
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/mdio/mdio-octeon.c
index d1e1009d51af..d1e1009d51af 100644
--- a/drivers/net/phy/mdio-octeon.c
+++ b/drivers/net/mdio/mdio-octeon.c
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/mdio/mdio-sun4i.c
index f798de3276dc..f798de3276dc 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/mdio/mdio-sun4i.c
diff --git a/drivers/net/phy/mdio-thunder.c b/drivers/net/mdio/mdio-thunder.c
index 3d7eda99d34e..3d7eda99d34e 100644
--- a/drivers/net/phy/mdio-thunder.c
+++ b/drivers/net/mdio/mdio-thunder.c
diff --git a/drivers/net/phy/mdio-xgene.c b/drivers/net/mdio/mdio-xgene.c
index 34990eaa3298..461207cdf5d6 100644
--- a/drivers/net/phy/mdio-xgene.c
+++ b/drivers/net/mdio/mdio-xgene.c
@@ -11,6 +11,7 @@
#include <linux/efi.h>
#include <linux/if_vlan.h>
#include <linux/io.h>
+#include <linux/mdio/mdio-xgene.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/of_net.h>
@@ -18,7 +19,6 @@
#include <linux/prefetch.h>
#include <linux/phy.h>
#include <net/ip.h>
-#include "mdio-xgene.h"
static bool xgene_mdio_status;
diff --git a/drivers/of/of_mdio.c b/drivers/net/mdio/of_mdio.c
index cb32d7ef4938..4daf94bb56a5 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/net/mdio/of_mdio.c
@@ -338,6 +338,29 @@ unregister:
EXPORT_SYMBOL(of_mdiobus_register);
/**
+ * of_mdio_find_device - Given a device tree node, find the mdio_device
+ * @np: pointer to the mdio_device's device tree node
+ *
+ * If successful, returns a pointer to the mdio_device with the embedded
+ * struct device refcount incremented by one, or NULL on failure.
+ * The caller should call put_device() on the mdio_device after its use
+ */
+struct mdio_device *of_mdio_find_device(struct device_node *np)
+{
+ struct device *d;
+
+ if (!np)
+ return NULL;
+
+ d = bus_find_device_by_of_node(&mdio_bus_type, np);
+ if (!d)
+ return NULL;
+
+ return to_mdio_device(d);
+}
+EXPORT_SYMBOL(of_mdio_find_device);
+
+/**
* of_phy_find_device - Give a PHY node, find the phy_device
* @phy_np: Pointer to the phy's device tree node
*
@@ -346,19 +369,16 @@ EXPORT_SYMBOL(of_mdiobus_register);
*/
struct phy_device *of_phy_find_device(struct device_node *phy_np)
{
- struct device *d;
struct mdio_device *mdiodev;
- if (!phy_np)
+ mdiodev = of_mdio_find_device(phy_np);
+ if (!mdiodev)
return NULL;
- d = bus_find_device_by_of_node(&mdio_bus_type, phy_np);
- if (d) {
- mdiodev = to_mdio_device(d);
- if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY)
- return to_phy_device(d);
- put_device(d);
- }
+ if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY)
+ return to_phy_device(&mdiodev->dev);
+
+ put_device(&mdiodev->dev);
return NULL;
}
diff --git a/drivers/net/netdevsim/Makefile b/drivers/net/netdevsim/Makefile
index 4dfb389dbfd8..ade086eed955 100644
--- a/drivers/net/netdevsim/Makefile
+++ b/drivers/net/netdevsim/Makefile
@@ -3,7 +3,7 @@
obj-$(CONFIG_NETDEVSIM) += netdevsim.o
netdevsim-objs := \
- netdev.o dev.o fib.o bus.o health.o udp_tunnels.o
+ netdev.o dev.o ethtool.o fib.o bus.o health.o udp_tunnels.o
ifeq ($(CONFIG_BPF_SYSCALL),y)
netdevsim-objs += \
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index 32f339fedb21..d07061417675 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -40,7 +40,9 @@ static struct dentry *nsim_dev_ddir;
#define NSIM_DEV_DUMMY_REGION_SIZE (1024 * 32)
static int
-nsim_dev_take_snapshot(struct devlink *devlink, struct netlink_ext_ack *extack,
+nsim_dev_take_snapshot(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
u8 **data)
{
void *dummy_data;
@@ -68,7 +70,7 @@ static ssize_t nsim_dev_take_snapshot_write(struct file *file,
devlink = priv_to_devlink(nsim_dev);
- err = nsim_dev_take_snapshot(devlink, NULL, &dummy_data);
+ err = nsim_dev_take_snapshot(devlink, NULL, NULL, &dummy_data);
if (err)
return err;
@@ -201,6 +203,8 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
return PTR_ERR(nsim_dev->ports_ddir);
debugfs_create_bool("fw_update_status", 0600, nsim_dev->ddir,
&nsim_dev->fw_update_status);
+ debugfs_create_u32("fw_update_overwrite_mask", 0600, nsim_dev->ddir,
+ &nsim_dev->fw_update_overwrite_mask);
debugfs_create_u32("max_macs", 0600, nsim_dev->ddir,
&nsim_dev->max_macs);
debugfs_create_bool("test1", 0600, nsim_dev->ddir,
@@ -697,6 +701,7 @@ static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
static void nsim_dev_reload_destroy(struct nsim_dev *nsim_dev);
static int nsim_dev_reload_down(struct devlink *devlink, bool netns_change,
+ enum devlink_reload_action action, enum devlink_reload_limit limit,
struct netlink_ext_ack *extack)
{
struct nsim_dev *nsim_dev = devlink_priv(devlink);
@@ -713,7 +718,8 @@ static int nsim_dev_reload_down(struct devlink *devlink, bool netns_change,
return 0;
}
-static int nsim_dev_reload_up(struct devlink *devlink,
+static int nsim_dev_reload_up(struct devlink *devlink, enum devlink_reload_action action,
+ enum devlink_reload_limit limit, u32 *actions_performed,
struct netlink_ext_ack *extack)
{
struct nsim_dev *nsim_dev = devlink_priv(devlink);
@@ -726,6 +732,7 @@ static int nsim_dev_reload_up(struct devlink *devlink,
return -EINVAL;
}
+ *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
return nsim_dev_reload_create(nsim_dev, extack);
}
@@ -740,24 +747,27 @@ static int nsim_dev_info_get(struct devlink *devlink,
#define NSIM_DEV_FLASH_CHUNK_SIZE 1000
#define NSIM_DEV_FLASH_CHUNK_TIME_MS 10
-static int nsim_dev_flash_update(struct devlink *devlink, const char *file_name,
- const char *component,
+static int nsim_dev_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
struct netlink_ext_ack *extack)
{
struct nsim_dev *nsim_dev = devlink_priv(devlink);
int i;
+ if ((params->overwrite_mask & ~nsim_dev->fw_update_overwrite_mask) != 0)
+ return -EOPNOTSUPP;
+
if (nsim_dev->fw_update_status) {
devlink_flash_update_begin_notify(devlink);
devlink_flash_update_status_notify(devlink,
"Preparing to flash",
- component, 0, 0);
+ params->component, 0, 0);
}
for (i = 0; i < NSIM_DEV_FLASH_SIZE / NSIM_DEV_FLASH_CHUNK_SIZE; i++) {
if (nsim_dev->fw_update_status)
devlink_flash_update_status_notify(devlink, "Flashing",
- component,
+ params->component,
i * NSIM_DEV_FLASH_CHUNK_SIZE,
NSIM_DEV_FLASH_SIZE);
msleep(NSIM_DEV_FLASH_CHUNK_TIME_MS);
@@ -765,11 +775,13 @@ static int nsim_dev_flash_update(struct devlink *devlink, const char *file_name,
if (nsim_dev->fw_update_status) {
devlink_flash_update_status_notify(devlink, "Flashing",
- component,
+ params->component,
NSIM_DEV_FLASH_SIZE,
NSIM_DEV_FLASH_SIZE);
+ devlink_flash_update_timeout_notify(devlink, "Flash select",
+ params->component, 81);
devlink_flash_update_status_notify(devlink, "Flashing done",
- component, 0, 0);
+ params->component, 0, 0);
devlink_flash_update_end_notify(devlink);
}
@@ -875,6 +887,9 @@ nsim_dev_devlink_trap_policer_counter_get(struct devlink *devlink,
}
static const struct devlink_ops nsim_dev_devlink_ops = {
+ .supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_COMPONENT |
+ DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT),
.reload_down = nsim_dev_reload_down,
.reload_up = nsim_dev_reload_up,
.info_get = nsim_dev_info_get,
@@ -989,6 +1004,7 @@ static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
INIT_LIST_HEAD(&nsim_dev->port_list);
mutex_init(&nsim_dev->port_list_lock);
nsim_dev->fw_update_status = true;
+ nsim_dev->fw_update_overwrite_mask = 0;
nsim_dev->fib_data = nsim_fib_create(devlink, extack);
if (IS_ERR(nsim_dev->fib_data))
@@ -1047,6 +1063,7 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
INIT_LIST_HEAD(&nsim_dev->port_list);
mutex_init(&nsim_dev->port_list_lock);
nsim_dev->fw_update_status = true;
+ nsim_dev->fw_update_overwrite_mask = 0;
nsim_dev->max_macs = NSIM_DEV_MAX_MACS_DEFAULT;
nsim_dev->test1 = NSIM_DEV_TEST1_DEFAULT;
spin_lock_init(&nsim_dev->fa_cookie_lock);
diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c
new file mode 100644
index 000000000000..f1884d90a876
--- /dev/null
+++ b/drivers/net/netdevsim/ethtool.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Facebook
+
+#include <linux/debugfs.h>
+#include <linux/ethtool.h>
+#include <linux/random.h>
+
+#include "netdevsim.h"
+
+static void
+nsim_get_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ if (ns->ethtool.report_stats_rx)
+ pause_stats->rx_pause_frames = 1;
+ if (ns->ethtool.report_stats_tx)
+ pause_stats->tx_pause_frames = 2;
+}
+
+static void
+nsim_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ pause->autoneg = 0; /* We don't support ksettings, so can't pretend */
+ pause->rx_pause = ns->ethtool.rx;
+ pause->tx_pause = ns->ethtool.tx;
+}
+
+static int
+nsim_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ if (pause->autoneg)
+ return -EINVAL;
+
+ ns->ethtool.rx = pause->rx_pause;
+ ns->ethtool.tx = pause->tx_pause;
+ return 0;
+}
+
+static const struct ethtool_ops nsim_ethtool_ops = {
+ .get_pause_stats = nsim_get_pause_stats,
+ .get_pauseparam = nsim_get_pauseparam,
+ .set_pauseparam = nsim_set_pauseparam,
+};
+
+void nsim_ethtool_init(struct netdevsim *ns)
+{
+ struct dentry *ethtool, *dir;
+
+ ns->netdev->ethtool_ops = &nsim_ethtool_ops;
+
+ ethtool = debugfs_create_dir("ethtool", ns->nsim_dev_port->ddir);
+
+ dir = debugfs_create_dir("pause", ethtool);
+ debugfs_create_bool("report_stats_rx", 0600, dir,
+ &ns->ethtool.report_stats_rx);
+ debugfs_create_bool("report_stats_tx", 0600, dir,
+ &ns->ethtool.report_stats_tx);
+}
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 97cfb015a50b..7178468302c8 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -301,6 +301,7 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
ns->nsim_bus_dev = nsim_dev->nsim_bus_dev;
SET_NETDEV_DEV(dev, &ns->nsim_bus_dev->dev);
dev->netdev_ops = &nsim_netdev_ops;
+ nsim_ethtool_init(ns);
err = nsim_udp_tunnels_info_create(nsim_dev, dev);
if (err)
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 284f7092241d..827fc80f50a0 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -20,6 +20,7 @@
#include <linux/netdevice.h>
#include <linux/u64_stats_sync.h>
#include <net/devlink.h>
+#include <net/udp_tunnel.h>
#include <net/xdp.h>
#define DRV_NAME "netdevsim"
@@ -50,6 +51,13 @@ struct nsim_ipsec {
u32 ok;
};
+struct nsim_ethtool {
+ bool rx;
+ bool tx;
+ bool report_stats_rx;
+ bool report_stats_tx;
+};
+
struct netdevsim {
struct net_device *netdev;
struct nsim_dev *nsim_dev;
@@ -77,15 +85,20 @@ struct netdevsim {
struct {
u32 inject_error;
u32 sleep;
- u32 ports[2][NSIM_UDP_TUNNEL_N_PORTS];
+ u32 __ports[2][NSIM_UDP_TUNNEL_N_PORTS];
+ u32 (*ports)[NSIM_UDP_TUNNEL_N_PORTS];
struct debugfs_u32_array dfs_ports[2];
} udp_ports;
+
+ struct nsim_ethtool ethtool;
};
struct netdevsim *
nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port);
void nsim_destroy(struct netdevsim *ns);
+void nsim_ethtool_init(struct netdevsim *ns);
+
void nsim_udp_tunnels_debugfs_create(struct nsim_dev *nsim_dev);
int nsim_udp_tunnels_info_create(struct nsim_dev *nsim_dev,
struct net_device *dev);
@@ -185,6 +198,7 @@ struct nsim_dev {
struct list_head port_list;
struct mutex port_list_lock; /* protects port list */
bool fw_update_status;
+ u32 fw_update_overwrite_mask;
u32 max_macs;
bool test1;
bool dont_allow_reload;
@@ -197,9 +211,13 @@ struct nsim_dev {
bool fail_trap_policer_set;
bool fail_trap_policer_counter_get;
struct {
+ struct udp_tunnel_nic_shared utn_shared;
+ u32 __ports[2][NSIM_UDP_TUNNEL_N_PORTS];
bool sync_all;
bool open_only;
bool ipv4_only;
+ bool shared;
+ bool static_iana_vxlan;
u32 sleep;
} udp_ports;
};
diff --git a/drivers/net/netdevsim/udp_tunnels.c b/drivers/net/netdevsim/udp_tunnels.c
index 22c06a76033c..6ab023acefd6 100644
--- a/drivers/net/netdevsim/udp_tunnels.c
+++ b/drivers/net/netdevsim/udp_tunnels.c
@@ -22,11 +22,13 @@ nsim_udp_tunnel_set_port(struct net_device *dev, unsigned int table,
msleep(ns->udp_ports.sleep);
if (!ret) {
- if (ns->udp_ports.ports[table][entry])
+ if (ns->udp_ports.ports[table][entry]) {
+ WARN(1, "entry already in use\n");
ret = -EBUSY;
- else
+ } else {
ns->udp_ports.ports[table][entry] =
be16_to_cpu(ti->port) << 16 | ti->type;
+ }
}
netdev_info(dev, "set [%d, %d] type %d family %d port %d - %d\n",
@@ -50,10 +52,13 @@ nsim_udp_tunnel_unset_port(struct net_device *dev, unsigned int table,
if (!ret) {
u32 val = be16_to_cpu(ti->port) << 16 | ti->type;
- if (val == ns->udp_ports.ports[table][entry])
+ if (val == ns->udp_ports.ports[table][entry]) {
ns->udp_ports.ports[table][entry] = 0;
- else
+ } else {
+ WARN(1, "entry not installed %x vs %x\n",
+ val, ns->udp_ports.ports[table][entry]);
ret = -ENOENT;
+ }
}
netdev_info(dev, "unset [%d, %d] type %d family %d port %d - %d\n",
@@ -107,7 +112,7 @@ nsim_udp_tunnels_info_reset_write(struct file *file, const char __user *data,
struct net_device *dev = file->private_data;
struct netdevsim *ns = netdev_priv(dev);
- memset(&ns->udp_ports.ports, 0, sizeof(ns->udp_ports.ports));
+ memset(ns->udp_ports.ports, 0, sizeof(ns->udp_ports.__ports));
rtnl_lock();
udp_tunnel_nic_reset_ntf(dev);
rtnl_unlock();
@@ -127,6 +132,17 @@ int nsim_udp_tunnels_info_create(struct nsim_dev *nsim_dev,
struct netdevsim *ns = netdev_priv(dev);
struct udp_tunnel_nic_info *info;
+ if (nsim_dev->udp_ports.shared && nsim_dev->udp_ports.open_only) {
+ dev_err(&nsim_dev->nsim_bus_dev->dev,
+ "shared can't be used in conjunction with open_only\n");
+ return -EINVAL;
+ }
+
+ if (!nsim_dev->udp_ports.shared)
+ ns->udp_ports.ports = ns->udp_ports.__ports;
+ else
+ ns->udp_ports.ports = nsim_dev->udp_ports.__ports;
+
debugfs_create_u32("udp_ports_inject_error", 0600,
ns->nsim_dev_port->ddir,
&ns->udp_ports.inject_error);
@@ -168,6 +184,10 @@ int nsim_udp_tunnels_info_create(struct nsim_dev *nsim_dev,
info->flags |= UDP_TUNNEL_NIC_INFO_OPEN_ONLY;
if (nsim_dev->udp_ports.ipv4_only)
info->flags |= UDP_TUNNEL_NIC_INFO_IPV4_ONLY;
+ if (nsim_dev->udp_ports.shared)
+ info->shared = &nsim_dev->udp_ports.utn_shared;
+ if (nsim_dev->udp_ports.static_iana_vxlan)
+ info->flags |= UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN;
dev->udp_tunnel_nic_info = info;
return 0;
@@ -187,6 +207,10 @@ void nsim_udp_tunnels_debugfs_create(struct nsim_dev *nsim_dev)
&nsim_dev->udp_ports.open_only);
debugfs_create_bool("udp_ports_ipv4_only", 0600, nsim_dev->ddir,
&nsim_dev->udp_ports.ipv4_only);
+ debugfs_create_bool("udp_ports_shared", 0600, nsim_dev->ddir,
+ &nsim_dev->udp_ports.shared);
+ debugfs_create_bool("udp_ports_static_iana_vxlan", 0600, nsim_dev->ddir,
+ &nsim_dev->udp_ports.static_iana_vxlan);
debugfs_create_u32("udp_ports_sleep", 0600, nsim_dev->ddir,
&nsim_dev->udp_ports.sleep);
}
diff --git a/drivers/net/pcs/Kconfig b/drivers/net/pcs/Kconfig
new file mode 100644
index 000000000000..074fb3f5db18
--- /dev/null
+++ b/drivers/net/pcs/Kconfig
@@ -0,0 +1,22 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# PCS Layer Configuration
+#
+
+menu "PCS device drivers"
+
+config PCS_XPCS
+ tristate "Synopsys DesignWare XPCS controller"
+ select MDIO_BUS
+ depends on MDIO_DEVICE
+ help
+ This module provides helper functions for Synopsys DesignWare XPCS
+ controllers.
+
+config PCS_LYNX
+ tristate
+ help
+ This module provides helpers to phylink for managing the Lynx PCS
+ which is part of the Layerscape and QorIQ Ethernet SERDES.
+
+endmenu
diff --git a/drivers/net/pcs/Makefile b/drivers/net/pcs/Makefile
new file mode 100644
index 000000000000..c23146755972
--- /dev/null
+++ b/drivers/net/pcs/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for Linux PCS drivers
+
+obj-$(CONFIG_PCS_XPCS) += pcs-xpcs.o
+obj-$(CONFIG_PCS_LYNX) += pcs-lynx.o
diff --git a/drivers/net/pcs/pcs-lynx.c b/drivers/net/pcs/pcs-lynx.c
new file mode 100644
index 000000000000..62bb9272dcb2
--- /dev/null
+++ b/drivers/net/pcs/pcs-lynx.c
@@ -0,0 +1,318 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/* Copyright 2020 NXP
+ * Lynx PCS MDIO helpers
+ */
+
+#include <linux/mdio.h>
+#include <linux/phylink.h>
+#include <linux/pcs-lynx.h>
+
+#define SGMII_CLOCK_PERIOD_NS 8 /* PCS is clocked at 125 MHz */
+#define LINK_TIMER_VAL(ns) ((u32)((ns) / SGMII_CLOCK_PERIOD_NS))
+
+#define SGMII_AN_LINK_TIMER_NS 1600000 /* defined by SGMII spec */
+
+#define LINK_TIMER_LO 0x12
+#define LINK_TIMER_HI 0x13
+#define IF_MODE 0x14
+#define IF_MODE_SGMII_EN BIT(0)
+#define IF_MODE_USE_SGMII_AN BIT(1)
+#define IF_MODE_SPEED(x) (((x) << 2) & GENMASK(3, 2))
+#define IF_MODE_SPEED_MSK GENMASK(3, 2)
+#define IF_MODE_HALF_DUPLEX BIT(4)
+
+enum sgmii_speed {
+ SGMII_SPEED_10 = 0,
+ SGMII_SPEED_100 = 1,
+ SGMII_SPEED_1000 = 2,
+ SGMII_SPEED_2500 = 2,
+};
+
+#define phylink_pcs_to_lynx(pl_pcs) container_of((pl_pcs), struct lynx_pcs, pcs)
+
+static void lynx_pcs_get_state_usxgmii(struct mdio_device *pcs,
+ struct phylink_link_state *state)
+{
+ struct mii_bus *bus = pcs->bus;
+ int addr = pcs->addr;
+ int status, lpa;
+
+ status = mdiobus_c45_read(bus, addr, MDIO_MMD_VEND2, MII_BMSR);
+ if (status < 0)
+ return;
+
+ state->link = !!(status & MDIO_STAT1_LSTATUS);
+ state->an_complete = !!(status & MDIO_AN_STAT1_COMPLETE);
+ if (!state->link || !state->an_complete)
+ return;
+
+ lpa = mdiobus_c45_read(bus, addr, MDIO_MMD_VEND2, MII_LPA);
+ if (lpa < 0)
+ return;
+
+ phylink_decode_usxgmii_word(state, lpa);
+}
+
+static void lynx_pcs_get_state_2500basex(struct mdio_device *pcs,
+ struct phylink_link_state *state)
+{
+ struct mii_bus *bus = pcs->bus;
+ int addr = pcs->addr;
+ int bmsr, lpa;
+
+ bmsr = mdiobus_read(bus, addr, MII_BMSR);
+ lpa = mdiobus_read(bus, addr, MII_LPA);
+ if (bmsr < 0 || lpa < 0) {
+ state->link = false;
+ return;
+ }
+
+ state->link = !!(bmsr & BMSR_LSTATUS);
+ state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE);
+ if (!state->link)
+ return;
+
+ state->speed = SPEED_2500;
+ state->pause |= MLO_PAUSE_TX | MLO_PAUSE_RX;
+ state->duplex = DUPLEX_FULL;
+}
+
+static void lynx_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct lynx_pcs *lynx = phylink_pcs_to_lynx(pcs);
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ phylink_mii_c22_pcs_get_state(lynx->mdio, state);
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ lynx_pcs_get_state_2500basex(lynx->mdio, state);
+ break;
+ case PHY_INTERFACE_MODE_USXGMII:
+ lynx_pcs_get_state_usxgmii(lynx->mdio, state);
+ break;
+ case PHY_INTERFACE_MODE_10GBASER:
+ phylink_mii_c45_pcs_get_state(lynx->mdio, state);
+ break;
+ default:
+ break;
+ }
+
+ dev_dbg(&lynx->mdio->dev,
+ "mode=%s/%s/%s link=%u an_enabled=%u an_complete=%u\n",
+ phy_modes(state->interface),
+ phy_speed_to_str(state->speed),
+ phy_duplex_to_str(state->duplex),
+ state->link, state->an_enabled, state->an_complete);
+}
+
+static int lynx_pcs_config_sgmii(struct mdio_device *pcs, unsigned int mode,
+ const unsigned long *advertising)
+{
+ struct mii_bus *bus = pcs->bus;
+ int addr = pcs->addr;
+ u16 if_mode;
+ int err;
+
+ if_mode = IF_MODE_SGMII_EN;
+ if (mode == MLO_AN_INBAND) {
+ u32 link_timer;
+
+ if_mode |= IF_MODE_USE_SGMII_AN;
+
+ /* Adjust link timer for SGMII */
+ link_timer = LINK_TIMER_VAL(SGMII_AN_LINK_TIMER_NS);
+ mdiobus_write(bus, addr, LINK_TIMER_LO, link_timer & 0xffff);
+ mdiobus_write(bus, addr, LINK_TIMER_HI, link_timer >> 16);
+ }
+ err = mdiobus_modify(bus, addr, IF_MODE,
+ IF_MODE_SGMII_EN | IF_MODE_USE_SGMII_AN,
+ if_mode);
+ if (err)
+ return err;
+
+ return phylink_mii_c22_pcs_config(pcs, mode, PHY_INTERFACE_MODE_SGMII,
+ advertising);
+}
+
+static int lynx_pcs_config_usxgmii(struct mdio_device *pcs, unsigned int mode,
+ const unsigned long *advertising)
+{
+ struct mii_bus *bus = pcs->bus;
+ int addr = pcs->addr;
+
+ if (!phylink_autoneg_inband(mode)) {
+ dev_err(&pcs->dev, "USXGMII only supports in-band AN for now\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Configure device ability for the USXGMII Replicator */
+ return mdiobus_c45_write(bus, addr, MDIO_MMD_VEND2, MII_ADVERTISE,
+ MDIO_USXGMII_10G | MDIO_USXGMII_LINK |
+ MDIO_USXGMII_FULL_DUPLEX |
+ ADVERTISE_SGMII | ADVERTISE_LPACK);
+}
+
+static int lynx_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t ifmode,
+ const unsigned long *advertising,
+ bool permit)
+{
+ struct lynx_pcs *lynx = phylink_pcs_to_lynx(pcs);
+
+ switch (ifmode) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ return lynx_pcs_config_sgmii(lynx->mdio, mode, advertising);
+ case PHY_INTERFACE_MODE_2500BASEX:
+ if (phylink_autoneg_inband(mode)) {
+ dev_err(&lynx->mdio->dev,
+ "AN not supported on 3.125GHz SerDes lane\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+ case PHY_INTERFACE_MODE_USXGMII:
+ return lynx_pcs_config_usxgmii(lynx->mdio, mode, advertising);
+ case PHY_INTERFACE_MODE_10GBASER:
+ /* Nothing to do here for 10GBASER */
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void lynx_pcs_link_up_sgmii(struct mdio_device *pcs, unsigned int mode,
+ int speed, int duplex)
+{
+ struct mii_bus *bus = pcs->bus;
+ u16 if_mode = 0, sgmii_speed;
+ int addr = pcs->addr;
+
+ /* The PCS needs to be configured manually only
+ * when not operating on in-band mode
+ */
+ if (mode == MLO_AN_INBAND)
+ return;
+
+ if (duplex == DUPLEX_HALF)
+ if_mode |= IF_MODE_HALF_DUPLEX;
+
+ switch (speed) {
+ case SPEED_1000:
+ sgmii_speed = SGMII_SPEED_1000;
+ break;
+ case SPEED_100:
+ sgmii_speed = SGMII_SPEED_100;
+ break;
+ case SPEED_10:
+ sgmii_speed = SGMII_SPEED_10;
+ break;
+ case SPEED_UNKNOWN:
+ /* Silently don't do anything */
+ return;
+ default:
+ dev_err(&pcs->dev, "Invalid PCS speed %d\n", speed);
+ return;
+ }
+ if_mode |= IF_MODE_SPEED(sgmii_speed);
+
+ mdiobus_modify(bus, addr, IF_MODE,
+ IF_MODE_HALF_DUPLEX | IF_MODE_SPEED_MSK,
+ if_mode);
+}
+
+/* 2500Base-X is SerDes protocol 7 on Felix and 6 on ENETC. It is a SerDes lane
+ * clocked at 3.125 GHz which encodes symbols with 8b/10b and does not have
+ * auto-negotiation of any link parameters. Electrically it is compatible with
+ * a single lane of XAUI.
+ * The hardware reference manual wants to call this mode SGMII, but it isn't
+ * really, since the fundamental features of SGMII:
+ * - Downgrading the link speed by duplicating symbols
+ * - Auto-negotiation
+ * are not there.
+ * The speed is configured at 1000 in the IF_MODE because the clock frequency
+ * is actually given by a PLL configured in the Reset Configuration Word (RCW).
+ * Since there is no difference between fixed speed SGMII w/o AN and 802.3z w/o
+ * AN, we call this PHY interface type 2500Base-X. In case a PHY negotiates a
+ * lower link speed on line side, the system-side interface remains fixed at
+ * 2500 Mbps and we do rate adaptation through pause frames.
+ */
+static void lynx_pcs_link_up_2500basex(struct mdio_device *pcs,
+ unsigned int mode,
+ int speed, int duplex)
+{
+ struct mii_bus *bus = pcs->bus;
+ int addr = pcs->addr;
+ u16 if_mode = 0;
+
+ if (mode == MLO_AN_INBAND) {
+ dev_err(&pcs->dev, "AN not supported for 2500BaseX\n");
+ return;
+ }
+
+ if (duplex == DUPLEX_HALF)
+ if_mode |= IF_MODE_HALF_DUPLEX;
+ if_mode |= IF_MODE_SPEED(SGMII_SPEED_2500);
+
+ mdiobus_modify(bus, addr, IF_MODE,
+ IF_MODE_HALF_DUPLEX | IF_MODE_SPEED_MSK,
+ if_mode);
+}
+
+static void lynx_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
+ phy_interface_t interface,
+ int speed, int duplex)
+{
+ struct lynx_pcs *lynx = phylink_pcs_to_lynx(pcs);
+
+ switch (interface) {
+ case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
+ lynx_pcs_link_up_sgmii(lynx->mdio, mode, speed, duplex);
+ break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ lynx_pcs_link_up_2500basex(lynx->mdio, mode, speed, duplex);
+ break;
+ case PHY_INTERFACE_MODE_USXGMII:
+ /* At the moment, only in-band AN is supported for USXGMII
+ * so nothing to do in link_up
+ */
+ break;
+ default:
+ break;
+ }
+}
+
+static const struct phylink_pcs_ops lynx_pcs_phylink_ops = {
+ .pcs_get_state = lynx_pcs_get_state,
+ .pcs_config = lynx_pcs_config,
+ .pcs_link_up = lynx_pcs_link_up,
+};
+
+struct lynx_pcs *lynx_pcs_create(struct mdio_device *mdio)
+{
+ struct lynx_pcs *lynx_pcs;
+
+ lynx_pcs = kzalloc(sizeof(*lynx_pcs), GFP_KERNEL);
+ if (!lynx_pcs)
+ return NULL;
+
+ lynx_pcs->mdio = mdio;
+ lynx_pcs->pcs.ops = &lynx_pcs_phylink_ops;
+ lynx_pcs->pcs.poll = true;
+
+ return lynx_pcs;
+}
+EXPORT_SYMBOL(lynx_pcs_create);
+
+void lynx_pcs_destroy(struct lynx_pcs *pcs)
+{
+ kfree(pcs);
+}
+EXPORT_SYMBOL(lynx_pcs_destroy);
+
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/phy/mdio-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index 0d66a8ba7eb6..1aa9903d602e 100644
--- a/drivers/net/phy/mdio-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -7,8 +7,8 @@
*/
#include <linux/delay.h>
+#include <linux/pcs/pcs-xpcs.h>
#include <linux/mdio.h>
-#include <linux/mdio-xpcs.h>
#include <linux/phylink.h>
#include <linux/workqueue.h>
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 1c5a10b672fc..698bea312adc 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -3,247 +3,6 @@
# PHY Layer Configuration
#
-menuconfig MDIO_DEVICE
- tristate "MDIO bus device drivers"
- help
- MDIO devices and driver infrastructure code.
-
-if MDIO_DEVICE
-
-config MDIO_BUS
- tristate
- default m if PHYLIB=m
- default MDIO_DEVICE
- help
- This internal symbol is used for link time dependencies and it
- reflects whether the mdio_bus/mdio_device code is built as a
- loadable module or built-in.
-
-if MDIO_BUS
-
-config MDIO_DEVRES
- tristate
-
-config MDIO_ASPEED
- tristate "ASPEED MDIO bus controller"
- depends on ARCH_ASPEED || COMPILE_TEST
- depends on OF_MDIO && HAS_IOMEM
- help
- This module provides a driver for the independent MDIO bus
- controllers found in the ASPEED AST2600 SoC. This is a driver for the
- third revision of the ASPEED MDIO register interface - the first two
- revisions are the "old" and "new" interfaces found in the AST2400 and
- AST2500, embedded in the MAC. For legacy reasons, FTGMAC100 driver
- continues to drive the embedded MDIO controller for the AST2400 and
- AST2500 SoCs, so say N if AST2600 support is not required.
-
-config MDIO_BCM_IPROC
- tristate "Broadcom iProc MDIO bus controller"
- depends on ARCH_BCM_IPROC || COMPILE_TEST
- depends on HAS_IOMEM && OF_MDIO
- default ARCH_BCM_IPROC
- help
- This module provides a driver for the MDIO busses found in the
- Broadcom iProc SoC's.
-
-config MDIO_BCM_UNIMAC
- tristate "Broadcom UniMAC MDIO bus controller"
- depends on HAS_IOMEM
- help
- This module provides a driver for the Broadcom UniMAC MDIO busses.
- This hardware can be found in the Broadcom GENET Ethernet MAC
- controllers as well as some Broadcom Ethernet switches such as the
- Starfighter 2 switches.
-
-config MDIO_BITBANG
- tristate "Bitbanged MDIO buses"
- help
- This module implements the MDIO bus protocol in software,
- for use by low level drivers that export the ability to
- drive the relevant pins.
-
- If in doubt, say N.
-
-config MDIO_BUS_MUX
- tristate
- depends on OF_MDIO
- help
- This module provides a driver framework for MDIO bus
- multiplexers which connect one of several child MDIO busses
- to a parent bus. Switching between child busses is done by
- device specific drivers.
-
-config MDIO_BUS_MUX_BCM_IPROC
- tristate "Broadcom iProc based MDIO bus multiplexers"
- depends on OF && OF_MDIO && (ARCH_BCM_IPROC || COMPILE_TEST)
- select MDIO_BUS_MUX
- default ARCH_BCM_IPROC
- help
- This module provides a driver for MDIO bus multiplexers found in
- iProc based Broadcom SoCs. This multiplexer connects one of several
- child MDIO bus to a parent bus. Buses could be internal as well as
- external and selection logic lies inside the same multiplexer.
-
-config MDIO_BUS_MUX_GPIO
- tristate "GPIO controlled MDIO bus multiplexers"
- depends on OF_GPIO && OF_MDIO
- select MDIO_BUS_MUX
- help
- This module provides a driver for MDIO bus multiplexers that
- are controlled via GPIO lines. The multiplexer connects one of
- several child MDIO busses to a parent bus. Child bus
- selection is under the control of GPIO lines.
-
-config MDIO_BUS_MUX_MESON_G12A
- tristate "Amlogic G12a based MDIO bus multiplexer"
- depends on ARCH_MESON || COMPILE_TEST
- depends on OF_MDIO && HAS_IOMEM && COMMON_CLK
- select MDIO_BUS_MUX
- default m if ARCH_MESON
- help
- This module provides a driver for the MDIO multiplexer/glue of
- the amlogic g12a SoC. The multiplexers connects either the external
- or the internal MDIO bus to the parent bus.
-
-config MDIO_BUS_MUX_MMIOREG
- tristate "MMIO device-controlled MDIO bus multiplexers"
- depends on OF_MDIO && HAS_IOMEM
- select MDIO_BUS_MUX
- help
- This module provides a driver for MDIO bus multiplexers that
- are controlled via a simple memory-mapped device, like an FPGA.
- The multiplexer connects one of several child MDIO busses to a
- parent bus. Child bus selection is under the control of one of
- the FPGA's registers.
-
- Currently, only 8/16/32 bits registers are supported.
-
-config MDIO_BUS_MUX_MULTIPLEXER
- tristate "MDIO bus multiplexer using kernel multiplexer subsystem"
- depends on OF_MDIO
- select MULTIPLEXER
- select MDIO_BUS_MUX
- help
- This module provides a driver for MDIO bus multiplexer
- that is controlled via the kernel multiplexer subsystem. The
- bus multiplexer connects one of several child MDIO busses to
- a parent bus. Child bus selection is under the control of
- the kernel multiplexer subsystem.
-
-config MDIO_CAVIUM
- tristate
-
-config MDIO_GPIO
- tristate "GPIO lib-based bitbanged MDIO buses"
- depends on MDIO_BITBANG
- depends on GPIOLIB || COMPILE_TEST
- help
- Supports GPIO lib-based MDIO busses.
-
- To compile this driver as a module, choose M here: the module
- will be called mdio-gpio.
-
-config MDIO_HISI_FEMAC
- tristate "Hisilicon FEMAC MDIO bus controller"
- depends on HAS_IOMEM && OF_MDIO
- help
- This module provides a driver for the MDIO busses found in the
- Hisilicon SoC that have an Fast Ethernet MAC.
-
-config MDIO_I2C
- tristate
- depends on I2C
- help
- Support I2C based PHYs. This provides a MDIO bus bridged
- to I2C to allow PHYs connected in I2C mode to be accessed
- using the existing infrastructure.
-
- This is library mode.
-
-config MDIO_IPQ4019
- tristate "Qualcomm IPQ4019 MDIO interface support"
- depends on HAS_IOMEM && OF_MDIO
- help
- This driver supports the MDIO interface found in Qualcomm
- IPQ40xx series Soc-s.
-
-config MDIO_IPQ8064
- tristate "Qualcomm IPQ8064 MDIO interface support"
- depends on HAS_IOMEM && OF_MDIO
- depends on MFD_SYSCON
- help
- This driver supports the MDIO interface found in the network
- interface units of the IPQ8064 SoC
-
-config MDIO_MOXART
- tristate "MOXA ART MDIO interface support"
- depends on ARCH_MOXART || COMPILE_TEST
- help
- This driver supports the MDIO interface found in the network
- interface units of the MOXA ART SoC
-
-config MDIO_MSCC_MIIM
- tristate "Microsemi MIIM interface support"
- depends on HAS_IOMEM
- select MDIO_DEVRES
- help
- This driver supports the MIIM (MDIO) interface found in the network
- switches of the Microsemi SoCs; it is recommended to switch on
- CONFIG_HIGH_RES_TIMERS
-
-config MDIO_MVUSB
- tristate "Marvell USB to MDIO Adapter"
- depends on USB
- select MDIO_DEVRES
- help
- A USB to MDIO converter present on development boards for
- Marvell's Link Street family of Ethernet switches.
-
-config MDIO_OCTEON
- tristate "Octeon and some ThunderX SOCs MDIO buses"
- depends on (64BIT && OF_MDIO) || COMPILE_TEST
- depends on HAS_IOMEM
- select MDIO_CAVIUM
- help
- This module provides a driver for the Octeon and ThunderX MDIO
- buses. It is required by the Octeon and ThunderX ethernet device
- drivers on some systems.
-
-config MDIO_SUN4I
- tristate "Allwinner sun4i MDIO interface support"
- depends on ARCH_SUNXI || COMPILE_TEST
- help
- This driver supports the MDIO interface found in the network
- interface units of the Allwinner SoC that have an EMAC (A10,
- A12, A10s, etc.)
-
-config MDIO_THUNDER
- tristate "ThunderX SOCs MDIO buses"
- depends on 64BIT
- depends on PCI
- select MDIO_CAVIUM
- select MDIO_DEVRES
- help
- This driver supports the MDIO interfaces found on Cavium
- ThunderX SoCs when the MDIO bus device appears as a PCI
- device.
-
-config MDIO_XGENE
- tristate "APM X-Gene SoC MDIO bus controller"
- depends on ARCH_XGENE || COMPILE_TEST
- help
- This module provides a driver for the MDIO busses found in the
- APM X-Gene SoC's.
-
-config MDIO_XPCS
- tristate "Synopsys DesignWare XPCS controller"
- help
- This module provides helper functions for Synopsys DesignWare XPCS
- controllers.
-
-endif
-endif
-
config PHYLINK
tristate
depends on NETDEVICES
@@ -286,7 +45,15 @@ config LED_TRIGGER_PHY
for any speed known to the PHY.
-comment "MII PHY device drivers"
+config FIXED_PHY
+ tristate "MDIO Bus/PHY emulation with fixed speed/link PHYs"
+ depends on PHYLIB
+ select SWPHY
+ help
+ Adds the platform "fixed" MDIO Bus to cover the boards that use
+ PHYs that are not connected to the real MDIO bus.
+
+ Currently tested with mpc866ads and mpc8349e-mitx.
config SFP
tristate "SFP cage support"
@@ -294,6 +61,19 @@ config SFP
depends on HWMON || HWMON=n
select MDIO_I2C
+comment "MII PHY device drivers"
+
+config AMD_PHY
+ tristate "AMD PHYs"
+ help
+ Currently supports the am79c874
+
+config MESON_GXL_PHY
+ tristate "Amlogic Meson GXL Internal PHY"
+ depends on ARCH_MESON || COMPILE_TEST
+ help
+ Currently has a driver for the Amlogic Meson GXL Internal PHY
+
config ADIN_PHY
tristate "Analog Devices Industrial Ethernet PHYs"
help
@@ -303,11 +83,6 @@ config ADIN_PHY
- ADIN1300 - Robust,Industrial, Low Latency 10/100/1000 Gigabit
Ethernet PHY
-config AMD_PHY
- tristate "AMD PHYs"
- help
- Currently supports the am79c874
-
config AQUANTIA_PHY
tristate "Aquantia PHYs"
help
@@ -319,6 +94,24 @@ config AX88796B_PHY
Currently supports the Asix Electronics PHY found in the X-Surf 100
AX88796B package.
+config BROADCOM_PHY
+ tristate "Broadcom 54XX PHYs"
+ select BCM_NET_PHYLIB
+ help
+ Currently supports the BCM5411, BCM5421, BCM5461, BCM54616S, BCM5464,
+ BCM5481, BCM54810 and BCM5482 PHYs.
+
+config BCM54140_PHY
+ tristate "Broadcom BCM54140 PHY"
+ depends on PHYLIB
+ depends on HWMON || HWMON=n
+ select BCM_NET_PHYLIB
+ help
+ Support the Broadcom BCM54140 Quad SGMII/QSGMII PHY.
+
+ This driver also supports the hardware monitoring of this PHY and
+ exposes voltage and temperature sensors.
+
config BCM63XX_PHY
tristate "Broadcom 63xx SOCs internal PHY"
depends on BCM63XX || COMPILE_TEST
@@ -333,6 +126,12 @@ config BCM7XXX_PHY
Currently supports the BCM7366, BCM7439, BCM7445, and
40nm and 65nm generation of BCM7xxx Set Top Box SoCs.
+config BCM84881_PHY
+ tristate "Broadcom BCM84881 PHY"
+ depends on PHYLIB
+ help
+ Support the Broadcom BCM84881 PHY.
+
config BCM87XX_PHY
tristate "Broadcom BCM8706 and BCM8727 PHYs"
help
@@ -354,30 +153,6 @@ config BCM_CYGNUS_PHY
config BCM_NET_PHYLIB
tristate
-config BROADCOM_PHY
- tristate "Broadcom PHYs"
- select BCM_NET_PHYLIB
- help
- Currently supports the BCM5411, BCM5421, BCM5461, BCM54616S, BCM5464,
- BCM5481, BCM54810 and BCM5482 PHYs.
-
-config BCM54140_PHY
- tristate "Broadcom BCM54140 PHY"
- depends on PHYLIB
- depends on HWMON || HWMON=n
- select BCM_NET_PHYLIB
- help
- Support the Broadcom BCM54140 Quad SGMII/QSGMII PHY.
-
- This driver also supports the hardware monitoring of this PHY and
- exposes voltage and temperature sensors.
-
-config BCM84881_PHY
- tristate "Broadcom BCM84881 PHY"
- depends on PHYLIB
- help
- Support the Broadcom BCM84881 PHY.
-
config CICADA_PHY
tristate "Cicada PHYs"
help
@@ -393,48 +168,16 @@ config DAVICOM_PHY
help
Currently supports dm9161e and dm9131
-config DP83822_PHY
- tristate "Texas Instruments DP83822/825/826 PHYs"
- help
- Supports the DP83822, DP83825I, DP83825CM, DP83825CS, DP83825S,
- DP83826C and DP83826NC PHYs.
-
-config DP83TC811_PHY
- tristate "Texas Instruments DP83TC811 PHY"
- help
- Supports the DP83TC811 PHY.
-
-config DP83848_PHY
- tristate "Texas Instruments DP83848 PHY"
- help
- Supports the DP83848 PHY.
-
-config DP83867_PHY
- tristate "Texas Instruments DP83867 Gigabit PHY"
- help
- Currently supports the DP83867 PHY.
-
-config DP83869_PHY
- tristate "Texas Instruments DP83869 Gigabit PHY"
- help
- Currently supports the DP83869 PHY. This PHY supports copper and
- fiber connections.
-
-config FIXED_PHY
- tristate "MDIO Bus/PHY emulation with fixed speed/link PHYs"
- depends on PHYLIB
- select SWPHY
- help
- Adds the platform "fixed" MDIO Bus to cover the boards that use
- PHYs that are not connected to the real MDIO bus.
-
- Currently tested with mpc866ads and mpc8349e-mitx.
-
config ICPLUS_PHY
tristate "ICPlus PHYs"
help
Currently supports the IP175C and IP1001 PHYs.
+config LXT_PHY
+ tristate "Intel LXT PHYs"
+ help
+ Currently supports the lxt970, lxt971
+
config INTEL_XWAY_PHY
tristate "Intel XWAY PHYs"
help
@@ -448,27 +191,16 @@ config LSI_ET1011C_PHY
help
Supports the LSI ET1011C PHY.
-config LXT_PHY
- tristate "Intel LXT PHYs"
- help
- Currently supports the lxt970, lxt971
-
config MARVELL_PHY
- tristate "Marvell PHYs"
+ tristate "Marvell Alaska PHYs"
help
- Currently has a driver for the 88E1011S
+ Currently has a driver for the 88E1XXX
config MARVELL_10G_PHY
tristate "Marvell Alaska 10Gbit PHYs"
help
Support for the Marvell Alaska MV88X3310 and compatible PHYs.
-config MESON_GXL_PHY
- tristate "Amlogic Meson GXL Internal PHY"
- depends on ARCH_MESON || COMPILE_TEST
- help
- Currently has a driver for the Amlogic Meson GXL Internal PHY
-
config MICREL_PHY
tristate "Micrel PHYs"
help
@@ -519,12 +251,12 @@ config REALTEK_PHY
Supports the Realtek 821x PHY.
config RENESAS_PHY
- tristate "Driver for Renesas PHYs"
+ tristate "Renesas PHYs"
help
Supports the Renesas PHYs uPD60620 and uPD60620A.
config ROCKCHIP_PHY
- tristate "Driver for Rockchip Ethernet PHYs"
+ tristate "Rockchip Ethernet PHYs"
help
Currently supports the integrated Ethernet PHY.
@@ -543,6 +275,33 @@ config TERANETICS_PHY
help
Currently supports the Teranetics TN2020
+config DP83822_PHY
+ tristate "Texas Instruments DP83822/825/826 PHYs"
+ help
+ Supports the DP83822, DP83825I, DP83825CM, DP83825CS, DP83825S,
+ DP83826C and DP83826NC PHYs.
+
+config DP83TC811_PHY
+ tristate "Texas Instruments DP83TC811 PHY"
+ help
+ Supports the DP83TC811 PHY.
+
+config DP83848_PHY
+ tristate "Texas Instruments DP83848 PHY"
+ help
+ Supports the DP83848 PHY.
+
+config DP83867_PHY
+ tristate "Texas Instruments DP83867 Gigabit PHY"
+ help
+ Currently supports the DP83867 PHY.
+
+config DP83869_PHY
+ tristate "Texas Instruments DP83869 Gigabit PHY"
+ help
+ Currently supports the DP83869 PHY. This PHY supports copper and
+ fiber connections.
+
config VITESSE_PHY
tristate "Vitesse PHYs"
help
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index d84bab489a53..a13e402074cf 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-# Makefile for Linux PHY drivers and MDIO bus drivers
+# Makefile for Linux PHY drivers
libphy-y := phy.o phy-c45.o phy-core.o phy_device.o \
linkmode.o
@@ -24,31 +24,6 @@ libphy-$(CONFIG_LED_TRIGGER_PHY) += phy_led_triggers.o
obj-$(CONFIG_PHYLINK) += phylink.o
obj-$(CONFIG_PHYLIB) += libphy.o
-obj-$(CONFIG_MDIO_ASPEED) += mdio-aspeed.o
-obj-$(CONFIG_MDIO_BCM_IPROC) += mdio-bcm-iproc.o
-obj-$(CONFIG_MDIO_BCM_UNIMAC) += mdio-bcm-unimac.o
-obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
-obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o
-obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) += mdio-mux-bcm-iproc.o
-obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
-obj-$(CONFIG_MDIO_BUS_MUX_MESON_G12A) += mdio-mux-meson-g12a.o
-obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
-obj-$(CONFIG_MDIO_BUS_MUX_MULTIPLEXER) += mdio-mux-multiplexer.o
-obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o
-obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
-obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o
-obj-$(CONFIG_MDIO_I2C) += mdio-i2c.o
-obj-$(CONFIG_MDIO_IPQ4019) += mdio-ipq4019.o
-obj-$(CONFIG_MDIO_IPQ8064) += mdio-ipq8064.o
-obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
-obj-$(CONFIG_MDIO_MSCC_MIIM) += mdio-mscc-miim.o
-obj-$(CONFIG_MDIO_MVUSB) += mdio-mvusb.o
-obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
-obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o
-obj-$(CONFIG_MDIO_THUNDER) += mdio-thunder.o
-obj-$(CONFIG_MDIO_XGENE) += mdio-xgene.o
-obj-$(CONFIG_MDIO_XPCS) += mdio-xpcs.o
-
obj-$(CONFIG_NETWORK_PHY_TIMESTAMPING) += mii_timestamper.o
obj-$(CONFIG_SFP) += sfp.o
@@ -62,32 +37,32 @@ ifdef CONFIG_HWMON
aquantia-objs += aquantia_hwmon.o
endif
obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o
-obj-$(CONFIG_AX88796B_PHY) += ax88796b.o
obj-$(CONFIG_AT803X_PHY) += at803x.o
+obj-$(CONFIG_AX88796B_PHY) += ax88796b.o
+obj-$(CONFIG_BCM54140_PHY) += bcm54140.o
obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o
obj-$(CONFIG_BCM7XXX_PHY) += bcm7xxx.o
+obj-$(CONFIG_BCM84881_PHY) += bcm84881.o
obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o
obj-$(CONFIG_BCM_CYGNUS_PHY) += bcm-cygnus.o
obj-$(CONFIG_BCM_NET_PHYLIB) += bcm-phy-lib.o
obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
-obj-$(CONFIG_BCM54140_PHY) += bcm54140.o
-obj-$(CONFIG_BCM84881_PHY) += bcm84881.o
obj-$(CONFIG_CICADA_PHY) += cicada.o
obj-$(CONFIG_CORTINA_PHY) += cortina.o
obj-$(CONFIG_DAVICOM_PHY) += davicom.o
obj-$(CONFIG_DP83640_PHY) += dp83640.o
obj-$(CONFIG_DP83822_PHY) += dp83822.o
-obj-$(CONFIG_DP83TC811_PHY) += dp83tc811.o
obj-$(CONFIG_DP83848_PHY) += dp83848.o
obj-$(CONFIG_DP83867_PHY) += dp83867.o
obj-$(CONFIG_DP83869_PHY) += dp83869.o
+obj-$(CONFIG_DP83TC811_PHY) += dp83tc811.o
obj-$(CONFIG_FIXED_PHY) += fixed_phy.o
obj-$(CONFIG_ICPLUS_PHY) += icplus.o
obj-$(CONFIG_INTEL_XWAY_PHY) += intel-xway.o
obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o
obj-$(CONFIG_LXT_PHY) += lxt.o
-obj-$(CONFIG_MARVELL_PHY) += marvell.o
obj-$(CONFIG_MARVELL_10G_PHY) += marvell10g.o
+obj-$(CONFIG_MARVELL_PHY) += marvell.o
obj-$(CONFIG_MESON_GXL_PHY) += meson-gxl.o
obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
obj-$(CONFIG_MICREL_PHY) += micrel.o
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 101651b2de54..ed601a7e46a0 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -343,7 +343,7 @@ static int at803x_rgmii_reg_get_voltage_sel(struct regulator_dev *rdev)
return (val & AT803X_DEBUG_RGMII_1V8) ? 1 : 0;
}
-static struct regulator_ops vddio_regulator_ops = {
+static const struct regulator_ops vddio_regulator_ops = {
.list_voltage = regulator_list_voltage_table,
.set_voltage_sel = at803x_rgmii_reg_set_voltage_sel,
.get_voltage_sel = at803x_rgmii_reg_get_voltage_sel,
@@ -364,7 +364,7 @@ static const struct regulator_desc vddio_desc = {
.owner = THIS_MODULE,
};
-static struct regulator_ops vddh_regulator_ops = {
+static const struct regulator_ops vddh_regulator_ops = {
};
static const struct regulator_desc vddh_desc = {
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 692048d86ab1..15812001b3ff 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -11,6 +11,7 @@
#include "bcm-phy-lib.h"
#include <linux/bitops.h>
#include <linux/brcmphy.h>
+#include <linux/clk.h>
#include <linux/mdio.h>
/* Broadcom BCM7xxx internal PHY registers */
@@ -39,6 +40,7 @@
struct bcm7xxx_phy_priv {
u64 *stats;
+ struct clk *clk;
};
static int bcm7xxx_28nm_d0_afe_config_init(struct phy_device *phydev)
@@ -521,6 +523,7 @@ static void bcm7xxx_28nm_get_phy_stats(struct phy_device *phydev,
static int bcm7xxx_28nm_probe(struct phy_device *phydev)
{
struct bcm7xxx_phy_priv *priv;
+ int ret = 0;
priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -534,7 +537,30 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
if (!priv->stats)
return -ENOMEM;
- return 0;
+ priv->clk = devm_clk_get_optional(&phydev->mdio.dev, NULL);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ /* Dummy read to a register to workaround an issue upon reset where the
+ * internal inverter may not allow the first MDIO transaction to pass
+ * the MDIO management controller and make us return 0xffff for such
+ * reads. This is needed to ensure that any subsequent reads to the
+ * PHY will succeed.
+ */
+ phy_read(phydev, MII_BMSR);
+
+ return ret;
+}
+
+static void bcm7xxx_28nm_remove(struct phy_device *phydev)
+{
+ struct bcm7xxx_phy_priv *priv = phydev->priv;
+
+ clk_disable_unprepare(priv->clk);
}
#define BCM7XXX_28NM_GPHY(_oui, _name) \
@@ -552,6 +578,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
.get_strings = bcm_phy_get_strings, \
.get_stats = bcm7xxx_28nm_get_phy_stats, \
.probe = bcm7xxx_28nm_probe, \
+ .remove = bcm7xxx_28nm_remove, \
}
#define BCM7XXX_28NM_EPHY(_oui, _name) \
@@ -567,6 +594,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
.get_strings = bcm_phy_get_strings, \
.get_stats = bcm7xxx_28nm_get_phy_stats, \
.probe = bcm7xxx_28nm_probe, \
+ .remove = bcm7xxx_28nm_remove, \
}
#define BCM7XXX_40NM_EPHY(_oui, _name) \
@@ -583,6 +611,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
}
static struct phy_driver bcm7xxx_driver[] = {
+ BCM7XXX_28NM_EPHY(PHY_ID_BCM72113, "Broadcom BCM72113"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7250, "Broadcom BCM7250"),
BCM7XXX_28NM_EPHY(PHY_ID_BCM7255, "Broadcom BCM7255"),
BCM7XXX_28NM_EPHY(PHY_ID_BCM7260, "Broadcom BCM7260"),
@@ -603,6 +632,7 @@ static struct phy_driver bcm7xxx_driver[] = {
};
static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
+ { PHY_ID_BCM72113, 0xfffffff0 },
{ PHY_ID_BCM7250, 0xfffffff0, },
{ PHY_ID_BCM7255, 0xfffffff0, },
{ PHY_ID_BCM7260, 0xfffffff0, },
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 79e67f2fe00a..f2caccaf4408 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -798,51 +798,32 @@ static int decode_evnt(struct dp83640_private *dp83640,
return parsed;
}
-#define DP83640_PACKET_HASH_OFFSET 20
#define DP83640_PACKET_HASH_LEN 10
static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts)
{
- unsigned int offset = 0;
- u8 *msgtype, *data = skb_mac_header(skb);
- __be16 *seqid;
+ struct ptp_header *hdr;
+ u8 msgtype;
+ u16 seqid;
u16 hash;
/* check sequenceID, messageType, 12 bit hash of offset 20-29 */
- if (type & PTP_CLASS_VLAN)
- offset += VLAN_HLEN;
-
- switch (type & PTP_CLASS_PMASK) {
- case PTP_CLASS_IPV4:
- offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
- break;
- case PTP_CLASS_IPV6:
- offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
- break;
- case PTP_CLASS_L2:
- offset += ETH_HLEN;
- break;
- default:
+ hdr = ptp_parse_header(skb, type);
+ if (!hdr)
return 0;
- }
- if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
- return 0;
+ msgtype = ptp_get_msgtype(hdr, type);
- if (unlikely(type & PTP_CLASS_V1))
- msgtype = data + offset + OFF_PTP_CONTROL;
- else
- msgtype = data + offset;
- if (rxts->msgtype != (*msgtype & 0xf))
+ if (rxts->msgtype != (msgtype & 0xf))
return 0;
- seqid = (__be16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
- if (rxts->seqid != ntohs(*seqid))
+ seqid = be16_to_cpu(hdr->sequence_id);
+ if (rxts->seqid != seqid)
return 0;
hash = ether_crc(DP83640_PACKET_HASH_LEN,
- data + offset + DP83640_PACKET_HASH_OFFSET) >> 20;
+ (unsigned char *)&hdr->source_port_identity) >> 20;
if (rxts->hash != hash)
return 0;
@@ -982,35 +963,16 @@ static void decode_status_frame(struct dp83640_private *dp83640,
static int is_sync(struct sk_buff *skb, int type)
{
- u8 *data = skb->data, *msgtype;
- unsigned int offset = 0;
-
- if (type & PTP_CLASS_VLAN)
- offset += VLAN_HLEN;
-
- switch (type & PTP_CLASS_PMASK) {
- case PTP_CLASS_IPV4:
- offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
- break;
- case PTP_CLASS_IPV6:
- offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
- break;
- case PTP_CLASS_L2:
- offset += ETH_HLEN;
- break;
- default:
- return 0;
- }
-
- if (type & PTP_CLASS_V1)
- offset += OFF_PTP_CONTROL;
+ struct ptp_header *hdr;
+ u8 msgtype;
- if (skb->len < offset + 1)
+ hdr = ptp_parse_header(skb, type);
+ if (!hdr)
return 0;
- msgtype = data + offset;
+ msgtype = ptp_get_msgtype(hdr, type);
- return (*msgtype & 0xf) == 0;
+ return (msgtype & 0xf) == 0;
}
static void dp83640_free_clocks(void)
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index 37643c468e19..c162c9551bd1 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -23,16 +23,31 @@
#define DP83822_DEVADDR 0x1f
+#define MII_DP83822_CTRL_2 0x0a
+#define MII_DP83822_PHYSTS 0x10
#define MII_DP83822_PHYSCR 0x11
#define MII_DP83822_MISR1 0x12
#define MII_DP83822_MISR2 0x13
+#define MII_DP83822_FCSCR 0x14
#define MII_DP83822_RCSR 0x17
#define MII_DP83822_RESET_CTRL 0x1f
#define MII_DP83822_GENCFG 0x465
+#define MII_DP83822_SOR1 0x467
+
+/* GENCFG */
+#define DP83822_SIG_DET_LOW BIT(0)
+
+/* Control Register 2 bits */
+#define DP83822_FX_ENABLE BIT(14)
#define DP83822_HW_RESET BIT(15)
#define DP83822_SW_RESET BIT(14)
+/* PHY STS bits */
+#define DP83822_PHYSTS_DUPLEX BIT(2)
+#define DP83822_PHYSTS_10 BIT(1)
+#define DP83822_PHYSTS_LINK BIT(0)
+
/* PHYSCR Register Fields */
#define DP83822_PHYSCR_INT_OE BIT(0) /* Interrupt Output Enable */
#define DP83822_PHYSCR_INTEN BIT(1) /* Interrupt Enable */
@@ -83,6 +98,27 @@
#define DP83822_RX_CLK_SHIFT BIT(12)
#define DP83822_TX_CLK_SHIFT BIT(11)
+/* SOR1 mode */
+#define DP83822_STRAP_MODE1 0
+#define DP83822_STRAP_MODE2 BIT(0)
+#define DP83822_STRAP_MODE3 BIT(1)
+#define DP83822_STRAP_MODE4 GENMASK(1, 0)
+
+#define DP83822_COL_STRAP_MASK GENMASK(11, 10)
+#define DP83822_COL_SHIFT 10
+#define DP83822_RX_ER_STR_MASK GENMASK(9, 8)
+#define DP83822_RX_ER_SHIFT 8
+
+#define MII_DP83822_FIBER_ADVERTISE (ADVERTISED_TP | ADVERTISED_MII | \
+ ADVERTISED_FIBRE | \
+ ADVERTISED_Pause | ADVERTISED_Asym_Pause)
+
+struct dp83822_private {
+ bool fx_signal_det_low;
+ int fx_enabled;
+ u16 fx_sd_enable;
+};
+
static int dp83822_ack_interrupt(struct phy_device *phydev)
{
int err;
@@ -197,6 +233,7 @@ static void dp83822_get_wol(struct phy_device *phydev,
static int dp83822_config_intr(struct phy_device *phydev)
{
+ struct dp83822_private *dp83822 = phydev->priv;
int misr_status;
int physcr_status;
int err;
@@ -208,13 +245,16 @@ static int dp83822_config_intr(struct phy_device *phydev)
misr_status |= (DP83822_RX_ERR_HF_INT_EN |
DP83822_FALSE_CARRIER_HF_INT_EN |
- DP83822_ANEG_COMPLETE_INT_EN |
- DP83822_DUP_MODE_CHANGE_INT_EN |
- DP83822_SPEED_CHANGED_INT_EN |
DP83822_LINK_STAT_INT_EN |
DP83822_ENERGY_DET_INT_EN |
DP83822_LINK_QUAL_INT_EN);
+ if (!dp83822->fx_enabled)
+ misr_status |= DP83822_ANEG_COMPLETE_INT_EN |
+ DP83822_DUP_MODE_CHANGE_INT_EN |
+ DP83822_SPEED_CHANGED_INT_EN;
+
+
err = phy_write(phydev, MII_DP83822_MISR1, misr_status);
if (err < 0)
return err;
@@ -224,14 +264,16 @@ static int dp83822_config_intr(struct phy_device *phydev)
return misr_status;
misr_status |= (DP83822_JABBER_DET_INT_EN |
- DP83822_WOL_PKT_INT_EN |
DP83822_SLEEP_MODE_INT_EN |
- DP83822_MDI_XOVER_INT_EN |
DP83822_LB_FIFO_INT_EN |
DP83822_PAGE_RX_INT_EN |
- DP83822_ANEG_ERR_INT_EN |
DP83822_EEE_ERROR_CHANGE_INT_EN);
+ if (!dp83822->fx_enabled)
+ misr_status |= DP83822_MDI_XOVER_INT_EN |
+ DP83822_ANEG_ERR_INT_EN |
+ DP83822_WOL_PKT_INT_EN;
+
err = phy_write(phydev, MII_DP83822_MISR2, misr_status);
if (err < 0)
return err;
@@ -270,13 +312,60 @@ static int dp8382x_disable_wol(struct phy_device *phydev)
MII_DP83822_WOL_CFG, value);
}
+static int dp83822_read_status(struct phy_device *phydev)
+{
+ struct dp83822_private *dp83822 = phydev->priv;
+ int status = phy_read(phydev, MII_DP83822_PHYSTS);
+ int ctrl2;
+ int ret;
+
+ if (dp83822->fx_enabled) {
+ if (status & DP83822_PHYSTS_LINK) {
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ } else {
+ ctrl2 = phy_read(phydev, MII_DP83822_CTRL_2);
+ if (ctrl2 < 0)
+ return ctrl2;
+
+ if (!(ctrl2 & DP83822_FX_ENABLE)) {
+ ret = phy_write(phydev, MII_DP83822_CTRL_2,
+ DP83822_FX_ENABLE | ctrl2);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ }
+
+ ret = genphy_read_status(phydev);
+ if (ret)
+ return ret;
+
+ if (status < 0)
+ return status;
+
+ if (status & DP83822_PHYSTS_DUPLEX)
+ phydev->duplex = DUPLEX_FULL;
+ else
+ phydev->duplex = DUPLEX_HALF;
+
+ if (status & DP83822_PHYSTS_10)
+ phydev->speed = SPEED_10;
+ else
+ phydev->speed = SPEED_100;
+
+ return 0;
+}
+
static int dp83822_config_init(struct phy_device *phydev)
{
+ struct dp83822_private *dp83822 = phydev->priv;
struct device *dev = &phydev->mdio.dev;
int rgmii_delay;
s32 rx_int_delay;
s32 tx_int_delay;
int err = 0;
+ int bmcr;
if (phy_interface_is_rgmii(phydev)) {
rx_int_delay = phy_get_internal_delay(phydev, dev, NULL, 0,
@@ -302,6 +391,61 @@ static int dp83822_config_init(struct phy_device *phydev)
}
}
+ if (dp83822->fx_enabled) {
+ err = phy_modify(phydev, MII_DP83822_CTRL_2,
+ DP83822_FX_ENABLE, 1);
+ if (err < 0)
+ return err;
+
+ /* Only allow advertising what this PHY supports */
+ linkmode_and(phydev->advertising, phydev->advertising,
+ phydev->supported);
+
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+ phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT,
+ phydev->advertising);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseFX_Full_BIT,
+ phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseFX_Half_BIT,
+ phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseFX_Full_BIT,
+ phydev->advertising);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseFX_Half_BIT,
+ phydev->advertising);
+
+ /* Auto neg is not supported in fiber mode */
+ bmcr = phy_read(phydev, MII_BMCR);
+ if (bmcr < 0)
+ return bmcr;
+
+ if (bmcr & BMCR_ANENABLE) {
+ err = phy_modify(phydev, MII_BMCR, BMCR_ANENABLE, 0);
+ if (err < 0)
+ return err;
+ }
+ phydev->autoneg = AUTONEG_DISABLE;
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->advertising);
+
+ /* Setup fiber advertisement */
+ err = phy_modify_changed(phydev, MII_ADVERTISE,
+ MII_DP83822_FIBER_ADVERTISE,
+ MII_DP83822_FIBER_ADVERTISE);
+
+ if (err < 0)
+ return err;
+
+ if (dp83822->fx_signal_det_low) {
+ err = phy_set_bits_mmd(phydev, DP83822_DEVADDR,
+ MII_DP83822_GENCFG,
+ DP83822_SIG_DET_LOW);
+ if (err)
+ return err;
+ }
+ }
return dp8382x_disable_wol(phydev);
}
@@ -314,13 +458,85 @@ static int dp83822_phy_reset(struct phy_device *phydev)
{
int err;
- err = phy_write(phydev, MII_DP83822_RESET_CTRL, DP83822_HW_RESET);
+ err = phy_write(phydev, MII_DP83822_RESET_CTRL, DP83822_SW_RESET);
if (err < 0)
return err;
return phydev->drv->config_init(phydev);
}
+#ifdef CONFIG_OF_MDIO
+static int dp83822_of_init(struct phy_device *phydev)
+{
+ struct dp83822_private *dp83822 = phydev->priv;
+ struct device *dev = &phydev->mdio.dev;
+
+ /* Signal detection for the PHY is only enabled if the FX_EN and the
+ * SD_EN pins are strapped. Signal detection can only enabled if FX_EN
+ * is strapped otherwise signal detection is disabled for the PHY.
+ */
+ if (dp83822->fx_enabled && dp83822->fx_sd_enable)
+ dp83822->fx_signal_det_low = device_property_present(dev,
+ "ti,link-loss-low");
+ if (!dp83822->fx_enabled)
+ dp83822->fx_enabled = device_property_present(dev,
+ "ti,fiber-mode");
+
+ return 0;
+}
+#else
+static int dp83822_of_init(struct phy_device *phydev)
+{
+ return 0;
+}
+#endif /* CONFIG_OF_MDIO */
+
+static int dp83822_read_straps(struct phy_device *phydev)
+{
+ struct dp83822_private *dp83822 = phydev->priv;
+ int fx_enabled, fx_sd_enable;
+ int val;
+
+ val = phy_read_mmd(phydev, DP83822_DEVADDR, MII_DP83822_SOR1);
+ if (val < 0)
+ return val;
+
+ fx_enabled = (val & DP83822_COL_STRAP_MASK) >> DP83822_COL_SHIFT;
+ if (fx_enabled == DP83822_STRAP_MODE2 ||
+ fx_enabled == DP83822_STRAP_MODE3)
+ dp83822->fx_enabled = 1;
+
+ if (dp83822->fx_enabled) {
+ fx_sd_enable = (val & DP83822_RX_ER_STR_MASK) >> DP83822_RX_ER_SHIFT;
+ if (fx_sd_enable == DP83822_STRAP_MODE3 ||
+ fx_sd_enable == DP83822_STRAP_MODE4)
+ dp83822->fx_sd_enable = 1;
+ }
+
+ return 0;
+}
+
+static int dp83822_probe(struct phy_device *phydev)
+{
+ struct dp83822_private *dp83822;
+ int ret;
+
+ dp83822 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83822),
+ GFP_KERNEL);
+ if (!dp83822)
+ return -ENOMEM;
+
+ phydev->priv = dp83822;
+
+ ret = dp83822_read_straps(phydev);
+ if (ret)
+ return ret;
+
+ dp83822_of_init(phydev);
+
+ return 0;
+}
+
static int dp83822_suspend(struct phy_device *phydev)
{
int value;
@@ -352,8 +568,10 @@ static int dp83822_resume(struct phy_device *phydev)
PHY_ID_MATCH_MODEL(_id), \
.name = (_name), \
/* PHY_BASIC_FEATURES */ \
+ .probe = dp83822_probe, \
.soft_reset = dp83822_phy_reset, \
.config_init = dp83822_config_init, \
+ .read_status = dp83822_read_status, \
.get_wol = dp83822_get_wol, \
.set_wol = dp83822_set_wol, \
.ack_interrupt = dp83822_ack_interrupt, \
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index cd7032628a28..69d3eacc2b96 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -1,6 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/*
- * Driver for the Texas Instruments DP83867 PHY
+/* Driver for the Texas Instruments DP83867 PHY
*
* Copyright (C) 2015 Texas Instruments Inc.
*/
@@ -113,7 +112,6 @@
#define DP83867_RGMII_RX_CLK_DELAY_SHIFT 0
#define DP83867_RGMII_RX_CLK_DELAY_INV (DP83867_RGMII_RX_CLK_DELAY_MAX + 1)
-
/* IO_MUX_CFG bits */
#define DP83867_IO_MUX_CFG_IO_IMPEDANCE_MASK 0x1f
#define DP83867_IO_MUX_CFG_IO_IMPEDANCE_MAX 0x0
@@ -384,22 +382,22 @@ static int dp83867_set_downshift(struct phy_device *phydev, u8 cnt)
DP83867_DOWNSHIFT_EN);
switch (cnt) {
- case DP83867_DOWNSHIFT_1_COUNT:
- count = DP83867_DOWNSHIFT_1_COUNT_VAL;
- break;
- case DP83867_DOWNSHIFT_2_COUNT:
- count = DP83867_DOWNSHIFT_2_COUNT_VAL;
- break;
- case DP83867_DOWNSHIFT_4_COUNT:
- count = DP83867_DOWNSHIFT_4_COUNT_VAL;
- break;
- case DP83867_DOWNSHIFT_8_COUNT:
- count = DP83867_DOWNSHIFT_8_COUNT_VAL;
- break;
- default:
- phydev_err(phydev,
- "Downshift count must be 1, 2, 4 or 8\n");
- return -EINVAL;
+ case DP83867_DOWNSHIFT_1_COUNT:
+ count = DP83867_DOWNSHIFT_1_COUNT_VAL;
+ break;
+ case DP83867_DOWNSHIFT_2_COUNT:
+ count = DP83867_DOWNSHIFT_2_COUNT_VAL;
+ break;
+ case DP83867_DOWNSHIFT_4_COUNT:
+ count = DP83867_DOWNSHIFT_4_COUNT_VAL;
+ break;
+ case DP83867_DOWNSHIFT_8_COUNT:
+ count = DP83867_DOWNSHIFT_8_COUNT_VAL;
+ break;
+ default:
+ phydev_err(phydev,
+ "Downshift count must be 1, 2, 4 or 8\n");
+ return -EINVAL;
}
val = DP83867_DOWNSHIFT_EN;
@@ -411,7 +409,7 @@ static int dp83867_set_downshift(struct phy_device *phydev, u8 cnt)
}
static int dp83867_get_tunable(struct phy_device *phydev,
- struct ethtool_tunable *tuna, void *data)
+ struct ethtool_tunable *tuna, void *data)
{
switch (tuna->id) {
case ETHTOOL_PHY_DOWNSHIFT:
@@ -422,7 +420,7 @@ static int dp83867_get_tunable(struct phy_device *phydev,
}
static int dp83867_set_tunable(struct phy_device *phydev,
- struct ethtool_tunable *tuna, const void *data)
+ struct ethtool_tunable *tuna, const void *data)
{
switch (tuna->id) {
case ETHTOOL_PHY_DOWNSHIFT:
@@ -524,11 +522,10 @@ static int dp83867_of_init(struct phy_device *phydev)
dp83867->io_impedance = -1; /* leave at default */
dp83867->rxctrl_strap_quirk = of_property_read_bool(of_node,
- "ti,dp83867-rxctrl-strap-quirk");
+ "ti,dp83867-rxctrl-strap-quirk");
dp83867->sgmii_ref_clk_en = of_property_read_bool(of_node,
- "ti,sgmii-ref-clock-output-enable");
-
+ "ti,sgmii-ref-clock-output-enable");
dp83867->rx_id_delay = DP83867_RGMII_RX_CLK_DELAY_INV;
ret = of_property_read_u32(of_node, "ti,rx-internal-delay",
diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
index 6b98d74b5102..cf6dec7b7d8e 100644
--- a/drivers/net/phy/dp83869.c
+++ b/drivers/net/phy/dp83869.c
@@ -4,12 +4,14 @@
*/
#include <linux/ethtool.h>
+#include <linux/etherdevice.h>
#include <linux/kernel.h>
#include <linux/mii.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/phy.h>
#include <linux/delay.h>
+#include <linux/bitfield.h>
#include <dt-bindings/net/ti-dp83869.h>
@@ -19,6 +21,7 @@
#define MII_DP83869_PHYCTRL 0x10
#define MII_DP83869_MICR 0x12
#define MII_DP83869_ISR 0x13
+#define DP83869_CFG2 0x14
#define DP83869_CTRL 0x1f
#define DP83869_CFG4 0x1e
@@ -27,6 +30,13 @@
#define DP83869_RGMIICTL 0x0032
#define DP83869_STRAP_STS1 0x006e
#define DP83869_RGMIIDCTL 0x0086
+#define DP83869_RXFCFG 0x0134
+#define DP83869_RXFPMD1 0x0136
+#define DP83869_RXFPMD2 0x0137
+#define DP83869_RXFPMD3 0x0138
+#define DP83869_RXFSOP1 0x0139
+#define DP83869_RXFSOP2 0x013A
+#define DP83869_RXFSOP3 0x013B
#define DP83869_IO_MUX_CFG 0x0170
#define DP83869_OP_MODE 0x01df
#define DP83869_FX_CTRL 0x0c00
@@ -52,6 +62,10 @@
BMCR_FULLDPLX | \
BMCR_SPEED1000)
+#define MII_DP83869_FIBER_ADVERTISE (ADVERTISED_FIBRE | \
+ ADVERTISED_Pause | \
+ ADVERTISED_Asym_Pause)
+
/* This is the same bit mask as the BMCR so re-use the BMCR default */
#define DP83869_FX_CTRL_DEFAULT MII_DP83869_BMCR_DEFAULT
@@ -100,6 +114,26 @@
#define DP83869_OP_MODE_MII BIT(5)
#define DP83869_SGMII_RGMII_BRIDGE BIT(6)
+/* RXFCFG bits*/
+#define DP83869_WOL_MAGIC_EN BIT(0)
+#define DP83869_WOL_PATTERN_EN BIT(1)
+#define DP83869_WOL_BCAST_EN BIT(2)
+#define DP83869_WOL_UCAST_EN BIT(4)
+#define DP83869_WOL_SEC_EN BIT(5)
+#define DP83869_WOL_ENH_MAC BIT(7)
+
+/* CFG2 bits */
+#define DP83869_DOWNSHIFT_EN (BIT(8) | BIT(9))
+#define DP83869_DOWNSHIFT_ATTEMPT_MASK (BIT(10) | BIT(11))
+#define DP83869_DOWNSHIFT_1_COUNT_VAL 0
+#define DP83869_DOWNSHIFT_2_COUNT_VAL 1
+#define DP83869_DOWNSHIFT_4_COUNT_VAL 2
+#define DP83869_DOWNSHIFT_8_COUNT_VAL 3
+#define DP83869_DOWNSHIFT_1_COUNT 1
+#define DP83869_DOWNSHIFT_2_COUNT 2
+#define DP83869_DOWNSHIFT_4_COUNT 4
+#define DP83869_DOWNSHIFT_8_COUNT 8
+
enum {
DP83869_PORT_MIRRORING_KEEP,
DP83869_PORT_MIRRORING_EN,
@@ -118,6 +152,28 @@ struct dp83869_private {
int mode;
};
+static int dp83869_read_status(struct phy_device *phydev)
+{
+ struct dp83869_private *dp83869 = phydev->priv;
+ int ret;
+
+ ret = genphy_read_status(phydev);
+ if (ret)
+ return ret;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported)) {
+ if (phydev->link) {
+ if (dp83869->mode == DP83869_RGMII_100_BASE)
+ phydev->speed = SPEED_100;
+ } else {
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ }
+ }
+
+ return 0;
+}
+
static int dp83869_ack_interrupt(struct phy_device *phydev)
{
int err = phy_read(phydev, MII_DP83869_ISR);
@@ -151,6 +207,256 @@ static int dp83869_config_intr(struct phy_device *phydev)
return phy_write(phydev, MII_DP83869_MICR, micr_status);
}
+static int dp83869_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ struct net_device *ndev = phydev->attached_dev;
+ int val_rxcfg, val_micr;
+ u8 *mac;
+ int ret;
+
+ val_rxcfg = phy_read_mmd(phydev, DP83869_DEVADDR, DP83869_RXFCFG);
+ if (val_rxcfg < 0)
+ return val_rxcfg;
+
+ val_micr = phy_read(phydev, MII_DP83869_MICR);
+ if (val_micr < 0)
+ return val_micr;
+
+ if (wol->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_UCAST |
+ WAKE_BCAST)) {
+ val_rxcfg |= DP83869_WOL_ENH_MAC;
+ val_micr |= MII_DP83869_MICR_WOL_INT_EN;
+
+ if (wol->wolopts & WAKE_MAGIC ||
+ wol->wolopts & WAKE_MAGICSECURE) {
+ mac = (u8 *)ndev->dev_addr;
+
+ if (!is_valid_ether_addr(mac))
+ return -EINVAL;
+
+ ret = phy_write_mmd(phydev, DP83869_DEVADDR,
+ DP83869_RXFPMD1,
+ mac[1] << 8 | mac[0]);
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, DP83869_DEVADDR,
+ DP83869_RXFPMD2,
+ mac[3] << 8 | mac[2]);
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, DP83869_DEVADDR,
+ DP83869_RXFPMD3,
+ mac[5] << 8 | mac[4]);
+ if (ret)
+ return ret;
+
+ val_rxcfg |= DP83869_WOL_MAGIC_EN;
+ } else {
+ val_rxcfg &= ~DP83869_WOL_MAGIC_EN;
+ }
+
+ if (wol->wolopts & WAKE_MAGICSECURE) {
+ ret = phy_write_mmd(phydev, DP83869_DEVADDR,
+ DP83869_RXFSOP1,
+ (wol->sopass[1] << 8) | wol->sopass[0]);
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, DP83869_DEVADDR,
+ DP83869_RXFSOP2,
+ (wol->sopass[3] << 8) | wol->sopass[2]);
+ if (ret)
+ return ret;
+ ret = phy_write_mmd(phydev, DP83869_DEVADDR,
+ DP83869_RXFSOP3,
+ (wol->sopass[5] << 8) | wol->sopass[4]);
+ if (ret)
+ return ret;
+
+ val_rxcfg |= DP83869_WOL_SEC_EN;
+ } else {
+ val_rxcfg &= ~DP83869_WOL_SEC_EN;
+ }
+
+ if (wol->wolopts & WAKE_UCAST)
+ val_rxcfg |= DP83869_WOL_UCAST_EN;
+ else
+ val_rxcfg &= ~DP83869_WOL_UCAST_EN;
+
+ if (wol->wolopts & WAKE_BCAST)
+ val_rxcfg |= DP83869_WOL_BCAST_EN;
+ else
+ val_rxcfg &= ~DP83869_WOL_BCAST_EN;
+ } else {
+ val_rxcfg &= ~DP83869_WOL_ENH_MAC;
+ val_micr &= ~MII_DP83869_MICR_WOL_INT_EN;
+ }
+
+ ret = phy_write_mmd(phydev, DP83869_DEVADDR, DP83869_RXFCFG, val_rxcfg);
+ if (ret)
+ return ret;
+
+ return phy_write(phydev, MII_DP83869_MICR, val_micr);
+}
+
+static void dp83869_get_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ int value, sopass_val;
+
+ wol->supported = (WAKE_UCAST | WAKE_BCAST | WAKE_MAGIC |
+ WAKE_MAGICSECURE);
+ wol->wolopts = 0;
+
+ value = phy_read_mmd(phydev, DP83869_DEVADDR, DP83869_RXFCFG);
+ if (value < 0) {
+ phydev_err(phydev, "Failed to read RX CFG\n");
+ return;
+ }
+
+ if (value & DP83869_WOL_UCAST_EN)
+ wol->wolopts |= WAKE_UCAST;
+
+ if (value & DP83869_WOL_BCAST_EN)
+ wol->wolopts |= WAKE_BCAST;
+
+ if (value & DP83869_WOL_MAGIC_EN)
+ wol->wolopts |= WAKE_MAGIC;
+
+ if (value & DP83869_WOL_SEC_EN) {
+ sopass_val = phy_read_mmd(phydev, DP83869_DEVADDR,
+ DP83869_RXFSOP1);
+ if (sopass_val < 0) {
+ phydev_err(phydev, "Failed to read RX SOP 1\n");
+ return;
+ }
+
+ wol->sopass[0] = (sopass_val & 0xff);
+ wol->sopass[1] = (sopass_val >> 8);
+
+ sopass_val = phy_read_mmd(phydev, DP83869_DEVADDR,
+ DP83869_RXFSOP2);
+ if (sopass_val < 0) {
+ phydev_err(phydev, "Failed to read RX SOP 2\n");
+ return;
+ }
+
+ wol->sopass[2] = (sopass_val & 0xff);
+ wol->sopass[3] = (sopass_val >> 8);
+
+ sopass_val = phy_read_mmd(phydev, DP83869_DEVADDR,
+ DP83869_RXFSOP3);
+ if (sopass_val < 0) {
+ phydev_err(phydev, "Failed to read RX SOP 3\n");
+ return;
+ }
+
+ wol->sopass[4] = (sopass_val & 0xff);
+ wol->sopass[5] = (sopass_val >> 8);
+
+ wol->wolopts |= WAKE_MAGICSECURE;
+ }
+
+ if (!(value & DP83869_WOL_ENH_MAC))
+ wol->wolopts = 0;
+}
+
+static int dp83869_get_downshift(struct phy_device *phydev, u8 *data)
+{
+ int val, cnt, enable, count;
+
+ val = phy_read(phydev, DP83869_CFG2);
+ if (val < 0)
+ return val;
+
+ enable = FIELD_GET(DP83869_DOWNSHIFT_EN, val);
+ cnt = FIELD_GET(DP83869_DOWNSHIFT_ATTEMPT_MASK, val);
+
+ switch (cnt) {
+ case DP83869_DOWNSHIFT_1_COUNT_VAL:
+ count = DP83869_DOWNSHIFT_1_COUNT;
+ break;
+ case DP83869_DOWNSHIFT_2_COUNT_VAL:
+ count = DP83869_DOWNSHIFT_2_COUNT;
+ break;
+ case DP83869_DOWNSHIFT_4_COUNT_VAL:
+ count = DP83869_DOWNSHIFT_4_COUNT;
+ break;
+ case DP83869_DOWNSHIFT_8_COUNT_VAL:
+ count = DP83869_DOWNSHIFT_8_COUNT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *data = enable ? count : DOWNSHIFT_DEV_DISABLE;
+
+ return 0;
+}
+
+static int dp83869_set_downshift(struct phy_device *phydev, u8 cnt)
+{
+ int val, count;
+
+ if (cnt > DP83869_DOWNSHIFT_8_COUNT)
+ return -EINVAL;
+
+ if (!cnt)
+ return phy_clear_bits(phydev, DP83869_CFG2,
+ DP83869_DOWNSHIFT_EN);
+
+ switch (cnt) {
+ case DP83869_DOWNSHIFT_1_COUNT:
+ count = DP83869_DOWNSHIFT_1_COUNT_VAL;
+ break;
+ case DP83869_DOWNSHIFT_2_COUNT:
+ count = DP83869_DOWNSHIFT_2_COUNT_VAL;
+ break;
+ case DP83869_DOWNSHIFT_4_COUNT:
+ count = DP83869_DOWNSHIFT_4_COUNT_VAL;
+ break;
+ case DP83869_DOWNSHIFT_8_COUNT:
+ count = DP83869_DOWNSHIFT_8_COUNT_VAL;
+ break;
+ default:
+ phydev_err(phydev,
+ "Downshift count must be 1, 2, 4 or 8\n");
+ return -EINVAL;
+ }
+
+ val = DP83869_DOWNSHIFT_EN;
+ val |= FIELD_PREP(DP83869_DOWNSHIFT_ATTEMPT_MASK, count);
+
+ return phy_modify(phydev, DP83869_CFG2,
+ DP83869_DOWNSHIFT_EN | DP83869_DOWNSHIFT_ATTEMPT_MASK,
+ val);
+}
+
+static int dp83869_get_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return dp83869_get_downshift(phydev, data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int dp83869_set_tunable(struct phy_device *phydev,
+ struct ethtool_tunable *tuna, const void *data)
+{
+ switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return dp83869_set_downshift(phydev, *(const u8 *)data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int dp83869_config_port_mirroring(struct phy_device *phydev)
{
struct dp83869_private *dp83869 = phydev->priv;
@@ -295,6 +601,51 @@ static int dp83869_configure_rgmii(struct phy_device *phydev,
return ret;
}
+static int dp83869_configure_fiber(struct phy_device *phydev,
+ struct dp83869_private *dp83869)
+{
+ int bmcr;
+ int ret;
+
+ /* Only allow advertising what this PHY supports */
+ linkmode_and(phydev->advertising, phydev->advertising,
+ phydev->supported);
+
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported);
+ linkmode_set_bit(ADVERTISED_FIBRE, phydev->advertising);
+
+ if (dp83869->mode == DP83869_RGMII_1000_BASE) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ phydev->supported);
+ } else {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseFX_Full_BIT,
+ phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseFX_Half_BIT,
+ phydev->supported);
+
+ /* Auto neg is not supported in 100base FX mode */
+ bmcr = phy_read(phydev, MII_BMCR);
+ if (bmcr < 0)
+ return bmcr;
+
+ phydev->autoneg = AUTONEG_DISABLE;
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->advertising);
+
+ if (bmcr & BMCR_ANENABLE) {
+ ret = phy_modify(phydev, MII_BMCR, BMCR_ANENABLE, 0);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ /* Update advertising from supported */
+ linkmode_or(phydev->advertising, phydev->advertising,
+ phydev->supported);
+
+ return 0;
+}
+
static int dp83869_configure_mode(struct phy_device *phydev,
struct dp83869_private *dp83869)
{
@@ -384,6 +735,7 @@ static int dp83869_configure_mode(struct phy_device *phydev,
break;
case DP83869_RGMII_1000_BASE:
case DP83869_RGMII_100_BASE:
+ ret = dp83869_configure_fiber(phydev, dp83869);
break;
default:
return -EINVAL;
@@ -397,6 +749,12 @@ static int dp83869_config_init(struct phy_device *phydev)
struct dp83869_private *dp83869 = phydev->priv;
int ret, val;
+ /* Force speed optimization for the PHY even if it strapped */
+ ret = phy_modify(phydev, DP83869_CFG2, DP83869_DOWNSHIFT_EN,
+ DP83869_DOWNSHIFT_EN);
+ if (ret)
+ return ret;
+
ret = dp83869_configure_mode(phydev, dp83869);
if (ret)
return ret;
@@ -494,6 +852,13 @@ static struct phy_driver dp83869_driver[] = {
/* IRQ related */
.ack_interrupt = dp83869_ack_interrupt,
.config_intr = dp83869_config_intr,
+ .read_status = dp83869_read_status,
+
+ .get_tunable = dp83869_get_tunable,
+ .set_tunable = dp83869_set_tunable,
+
+ .get_wol = dp83869_get_wol,
+ .set_wol = dp83869_set_wol,
.suspend = genphy_suspend,
.resume = genphy_resume,
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index bb86ac0bd092..5aec673a0120 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1598,21 +1598,15 @@ static int m88e1121_did_interrupt(struct phy_device *phydev)
static void m88e1318_get_wol(struct phy_device *phydev,
struct ethtool_wolinfo *wol)
{
- int oldpage, ret = 0;
+ int ret;
wol->supported = WAKE_MAGIC;
wol->wolopts = 0;
- oldpage = phy_select_page(phydev, MII_MARVELL_WOL_PAGE);
- if (oldpage < 0)
- goto error;
-
- ret = __phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL);
- if (ret & MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE)
+ ret = phy_read_paged(phydev, MII_MARVELL_WOL_PAGE,
+ MII_88E1318S_PHY_WOL_CTRL);
+ if (ret >= 0 && ret & MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE)
wol->wolopts |= WAKE_MAGIC;
-
-error:
- phy_restore_page(phydev, oldpage, ret);
}
static int m88e1318_set_wol(struct phy_device *phydev,
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 0af20faad69d..757e950fb745 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -825,9 +825,6 @@ int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum)
{
int retval;
- if (WARN_ON_ONCE(in_interrupt()))
- return -EINVAL;
-
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
retval = __mdiobus_read(bus, addr, regnum);
mutex_unlock(&bus->mdio_lock);
@@ -850,9 +847,6 @@ int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
{
int retval;
- if (WARN_ON_ONCE(in_interrupt()))
- return -EINVAL;
-
mutex_lock(&bus->mdio_lock);
retval = __mdiobus_read(bus, addr, regnum);
mutex_unlock(&bus->mdio_lock);
@@ -879,9 +873,6 @@ int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val)
{
int err;
- if (WARN_ON_ONCE(in_interrupt()))
- return -EINVAL;
-
mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
err = __mdiobus_write(bus, addr, regnum, val);
mutex_unlock(&bus->mdio_lock);
@@ -905,9 +896,6 @@ int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
{
int err;
- if (WARN_ON_ONCE(in_interrupt()))
- return -EINVAL;
-
mutex_lock(&bus->mdio_lock);
err = __mdiobus_write(bus, addr, regnum, val);
mutex_unlock(&bus->mdio_lock);
@@ -929,9 +917,6 @@ int mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask, u16 set)
{
int err;
- if (WARN_ON_ONCE(in_interrupt()))
- return -EINVAL;
-
mutex_lock(&bus->mdio_lock);
err = __mdiobus_modify_changed(bus, addr, regnum, mask, set);
mutex_unlock(&bus->mdio_lock);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 3fe552675dd2..a7f74b3b97af 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -1315,6 +1315,19 @@ static struct phy_driver ksphy_driver[] = {
.suspend = genphy_suspend,
.resume = kszphy_resume,
}, {
+ .phy_id = PHY_ID_LAN8814,
+ .phy_id_mask = MICREL_PHY_ID_MASK,
+ .name = "Microchip INDY Gigabit Quad PHY",
+ .driver_data = &ksz9021_type,
+ .probe = kszphy_probe,
+ .soft_reset = genphy_soft_reset,
+ .read_status = ksz9031_read_status,
+ .get_sset_count = kszphy_get_sset_count,
+ .get_strings = kszphy_get_strings,
+ .get_stats = kszphy_get_stats,
+ .suspend = genphy_suspend,
+ .resume = kszphy_resume,
+}, {
.phy_id = PHY_ID_KSZ9131,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Microchip KSZ9131 Gigabit PHY",
@@ -1387,6 +1400,7 @@ static struct mdio_device_id __maybe_unused micrel_tbl[] = {
{ PHY_ID_KSZ8081, MICREL_PHY_ID_MASK },
{ PHY_ID_KSZ8873MLL, MICREL_PHY_ID_MASK },
{ PHY_ID_KSZ886X, MICREL_PHY_ID_MASK },
+ { PHY_ID_LAN8814, MICREL_PHY_ID_MASK },
{ }
};
diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c
index 1d4c012194e9..6cf9b798b710 100644
--- a/drivers/net/phy/mscc/mscc_macsec.c
+++ b/drivers/net/phy/mscc/mscc_macsec.c
@@ -958,7 +958,7 @@ static int vsc8584_macsec_del_txsa(struct macsec_context *ctx)
return 0;
}
-static struct macsec_ops vsc8584_macsec_ops = {
+static const struct macsec_ops vsc8584_macsec_ops = {
.mdo_dev_open = vsc8584_macsec_dev_open,
.mdo_dev_stop = vsc8584_macsec_dev_stop,
.mdo_add_secy = vsc8584_macsec_add_secy,
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index ff8e14b01eeb..8d333d3084ed 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -6,9 +6,14 @@
#include <linux/phy.h>
#include <linux/of.h>
+/**
+ * phy_speed_to_str - Return a string representing the PHY link speed
+ *
+ * @speed: Speed of the link
+ */
const char *phy_speed_to_str(int speed)
{
- BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 90,
+ BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 92,
"Enum ethtool_link_mode_bit_indices and phylib are out of sync. "
"If a speed or mode has been added please update phy_speed_to_str "
"and the PHY settings array.\n");
@@ -52,6 +57,11 @@ const char *phy_speed_to_str(int speed)
}
EXPORT_SYMBOL_GPL(phy_speed_to_str);
+/**
+ * phy_duplex_to_str - Return string describing the duplex
+ *
+ * @duplex: Duplex setting to describe
+ */
const char *phy_duplex_to_str(unsigned int duplex)
{
if (duplex == DUPLEX_HALF)
@@ -160,6 +170,8 @@ static const struct phy_setting settings[] = {
PHY_SETTING( 100, FULL, 100baseT_Full ),
PHY_SETTING( 100, FULL, 100baseT1_Full ),
PHY_SETTING( 100, HALF, 100baseT_Half ),
+ PHY_SETTING( 100, HALF, 100baseFX_Half ),
+ PHY_SETTING( 100, FULL, 100baseFX_Full ),
/* 10M */
PHY_SETTING( 10, FULL, 10baseT_Full ),
PHY_SETTING( 10, HALF, 10baseT_Half ),
@@ -250,6 +262,16 @@ static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
return __set_linkmode_max_speed(max_speed, phydev->supported);
}
+/**
+ * phy_set_max_speed - Set the maximum speed the PHY should support
+ *
+ * @phydev: The phy_device struct
+ * @max_speed: Maximum speed
+ *
+ * The PHY might be more capable than the MAC. For example a Fast Ethernet
+ * is connected to a 1G PHY. This function allows the MAC to indicate its
+ * maximum speed, and so limit what the PHY will advertise.
+ */
int phy_set_max_speed(struct phy_device *phydev, u32 max_speed)
{
int err;
@@ -306,6 +328,16 @@ void of_set_phy_eee_broken(struct phy_device *phydev)
phydev->eee_broken_modes = broken;
}
+/**
+ * phy_resolve_aneg_pause - Determine pause autoneg results
+ *
+ * @phydev: The phy_device struct
+ *
+ * Once autoneg has completed the local pause settings can be
+ * resolved. Determine if pause and asymmetric pause should be used
+ * by the MAC.
+ */
+
void phy_resolve_aneg_pause(struct phy_device *phydev)
{
if (phydev->duplex == DUPLEX_FULL) {
@@ -319,7 +351,7 @@ void phy_resolve_aneg_pause(struct phy_device *phydev)
EXPORT_SYMBOL_GPL(phy_resolve_aneg_pause);
/**
- * phy_resolve_aneg_linkmode - resolve the advertisements into phy settings
+ * phy_resolve_aneg_linkmode - resolve the advertisements into PHY settings
* @phydev: The phy_device struct
*
* Resolve our and the link partner advertisements into their corresponding
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 8947d58f2a25..35525a671400 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -456,7 +456,16 @@ int phy_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
EXPORT_SYMBOL(phy_do_ioctl);
-/* same as phy_do_ioctl, but ensures that net_device is running */
+/**
+ * phy_do_ioctl_running - generic ndo_do_ioctl implementation but test first
+ *
+ * @dev: the net_device struct
+ * @ifr: &struct ifreq for socket ioctl's
+ * @cmd: ioctl cmd to execute
+ *
+ * Same as phy_do_ioctl, but ensures that net_device is running before
+ * handling the ioctl.
+ */
int phy_do_ioctl_running(struct net_device *dev, struct ifreq *ifr, int cmd)
{
if (!netif_running(dev))
@@ -466,6 +475,12 @@ int phy_do_ioctl_running(struct net_device *dev, struct ifreq *ifr, int cmd)
}
EXPORT_SYMBOL(phy_do_ioctl_running);
+/**
+ * phy_queue_state_machine - Trigger the state machine to run soon
+ *
+ * @phydev: the phy_device struct
+ * @jiffies: Run the state machine after these jiffies
+ */
void phy_queue_state_machine(struct phy_device *phydev, unsigned long jiffies)
{
mod_delayed_work(system_power_efficient_wq, &phydev->state_queue,
@@ -473,6 +488,11 @@ void phy_queue_state_machine(struct phy_device *phydev, unsigned long jiffies)
}
EXPORT_SYMBOL(phy_queue_state_machine);
+/**
+ * phy_queue_state_machine - Trigger the state machine to run now
+ *
+ * @phydev: the phy_device struct
+ */
static void phy_trigger_machine(struct phy_device *phydev)
{
phy_queue_state_machine(phydev, 0);
@@ -489,6 +509,12 @@ static void phy_abort_cable_test(struct phy_device *phydev)
phydev_err(phydev, "Error while aborting cable test");
}
+/**
+ * phy_ethtool_get_strings - Get the statistic counter names
+ *
+ * @phydev: the phy_device struct
+ * @data: Where to put the strings
+ */
int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data)
{
if (!phydev->drv)
@@ -502,6 +528,11 @@ int phy_ethtool_get_strings(struct phy_device *phydev, u8 *data)
}
EXPORT_SYMBOL(phy_ethtool_get_strings);
+/**
+ * phy_ethtool_get_sset_count - Get the number of statistic counters
+ *
+ * @phydev: the phy_device struct
+ */
int phy_ethtool_get_sset_count(struct phy_device *phydev)
{
int ret;
@@ -523,6 +554,13 @@ int phy_ethtool_get_sset_count(struct phy_device *phydev)
}
EXPORT_SYMBOL(phy_ethtool_get_sset_count);
+/**
+ * phy_ethtool_get_stats - Get the statistic counters
+ *
+ * @phydev: the phy_device struct
+ * @stats: What counters to get
+ * @data: Where to store the counters
+ */
int phy_ethtool_get_stats(struct phy_device *phydev,
struct ethtool_stats *stats, u64 *data)
{
@@ -537,6 +575,12 @@ int phy_ethtool_get_stats(struct phy_device *phydev,
}
EXPORT_SYMBOL(phy_ethtool_get_stats);
+/**
+ * phy_start_cable_test - Start a cable test
+ *
+ * @phydev: the phy_device struct
+ * @extack: extack for reporting useful error messages
+ */
int phy_start_cable_test(struct phy_device *phydev,
struct netlink_ext_ack *extack)
{
@@ -600,6 +644,13 @@ out:
}
EXPORT_SYMBOL(phy_start_cable_test);
+/**
+ * phy_start_cable_test_tdr - Start a raw TDR cable test
+ *
+ * @phydev: the phy_device struct
+ * @extack: extack for reporting useful error messages
+ * @config: Configuration of the test to run
+ */
int phy_start_cable_test_tdr(struct phy_device *phydev,
struct netlink_ext_ack *extack,
const struct phy_tdr_config *config)
@@ -1363,6 +1414,12 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
}
EXPORT_SYMBOL(phy_ethtool_set_eee);
+/**
+ * phy_ethtool_set_wol - Configure Wake On LAN
+ *
+ * @phydev: target phy_device struct
+ * @wol: Configuration requested
+ */
int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
{
if (phydev->drv && phydev->drv->set_wol)
@@ -1372,6 +1429,12 @@ int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
}
EXPORT_SYMBOL(phy_ethtool_set_wol);
+/**
+ * phy_ethtool_get_wol - Get the current Wake On LAN configuration
+ *
+ * @phydev: target phy_device struct
+ * @wol: Store the current configuration here
+ */
void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
{
if (phydev->drv && phydev->drv->get_wol)
@@ -1405,6 +1468,10 @@ int phy_ethtool_set_link_ksettings(struct net_device *ndev,
}
EXPORT_SYMBOL(phy_ethtool_set_link_ksettings);
+/**
+ * phy_ethtool_nway_reset - Restart auto negotiation
+ * @ndev: Network device to restart autoneg for
+ */
int phy_ethtool_nway_reset(struct net_device *ndev)
{
struct phy_device *phydev = ndev->phydev;
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 32f4e8ec96cf..fe2296fdda19 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -535,8 +535,10 @@ static void phylink_mac_pcs_get_state(struct phylink *pl,
if (pl->pcs_ops)
pl->pcs_ops->pcs_get_state(pl->pcs, state);
- else
+ else if (pl->mac_ops->mac_pcs_get_state)
pl->mac_ops->mac_pcs_get_state(pl->config, state);
+ else
+ state->link = 0;
}
/* The fixed state is... fixed except for the link state,
@@ -2319,6 +2321,49 @@ static void phylink_decode_sgmii_word(struct phylink_link_state *state,
}
/**
+ * phylink_decode_usxgmii_word() - decode the USXGMII word from a MAC PCS
+ * @state: a pointer to a struct phylink_link_state.
+ * @lpa: a 16 bit value which stores the USXGMII auto-negotiation word
+ *
+ * Helper for MAC PCS supporting the USXGMII protocol and the auto-negotiation
+ * code word. Decode the USXGMII code word and populate the corresponding fields
+ * (speed, duplex) into the phylink_link_state structure.
+ */
+void phylink_decode_usxgmii_word(struct phylink_link_state *state,
+ uint16_t lpa)
+{
+ switch (lpa & MDIO_USXGMII_SPD_MASK) {
+ case MDIO_USXGMII_10:
+ state->speed = SPEED_10;
+ break;
+ case MDIO_USXGMII_100:
+ state->speed = SPEED_100;
+ break;
+ case MDIO_USXGMII_1000:
+ state->speed = SPEED_1000;
+ break;
+ case MDIO_USXGMII_2500:
+ state->speed = SPEED_2500;
+ break;
+ case MDIO_USXGMII_5000:
+ state->speed = SPEED_5000;
+ break;
+ case MDIO_USXGMII_10G:
+ state->speed = SPEED_10000;
+ break;
+ default:
+ state->link = false;
+ return;
+ }
+
+ if (lpa & MDIO_USXGMII_FULL_DUPLEX)
+ state->duplex = DUPLEX_FULL;
+ else
+ state->duplex = DUPLEX_HALF;
+}
+EXPORT_SYMBOL_GPL(phylink_decode_usxgmii_word);
+
+/**
* phylink_mii_c22_pcs_get_state() - read the MAC PCS state
* @pcs: a pointer to a &struct mdio_device.
* @state: a pointer to a &struct phylink_link_state.
@@ -2361,6 +2406,7 @@ void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs,
break;
case PHY_INTERFACE_MODE_SGMII:
+ case PHY_INTERFACE_MODE_QSGMII:
phylink_decode_sgmii_word(state, lpa);
break;
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 0f0960971800..fb1db713b7fb 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -26,11 +26,16 @@
#define RTL821x_EXT_PAGE_SELECT 0x1e
#define RTL821x_PAGE_SELECT 0x1f
+#define RTL8211F_PHYCR1 0x18
#define RTL8211F_INSR 0x1d
#define RTL8211F_TX_DELAY BIT(8)
#define RTL8211F_RX_DELAY BIT(3)
+#define RTL8211F_ALDPS_PLL_OFF BIT(1)
+#define RTL8211F_ALDPS_ENABLE BIT(2)
+#define RTL8211F_ALDPS_XTAL_OFF BIT(12)
+
#define RTL8211E_CTRL_DELAY BIT(13)
#define RTL8211E_TX_DELAY BIT(12)
#define RTL8211E_RX_DELAY BIT(11)
@@ -177,8 +182,12 @@ static int rtl8211f_config_init(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
u16 val_txdly, val_rxdly;
+ u16 val;
int ret;
+ val = RTL8211F_ALDPS_ENABLE | RTL8211F_ALDPS_PLL_OFF | RTL8211F_ALDPS_XTAL_OFF;
+ phy_modify_paged_changed(phydev, 0xa43, RTL8211F_PHYCR1, val, val);
+
switch (phydev->interface) {
case PHY_INTERFACE_MODE_RGMII:
val_txdly = 0;
@@ -401,7 +410,7 @@ static int rtlgen_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
return ret;
}
-static int rtl8125_read_mmd(struct phy_device *phydev, int devnum, u16 regnum)
+static int rtl822x_read_mmd(struct phy_device *phydev, int devnum, u16 regnum)
{
int ret = rtlgen_read_mmd(phydev, devnum, regnum);
@@ -425,7 +434,7 @@ static int rtl8125_read_mmd(struct phy_device *phydev, int devnum, u16 regnum)
return ret;
}
-static int rtl8125_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
+static int rtl822x_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
u16 val)
{
int ret = rtlgen_write_mmd(phydev, devnum, regnum, val);
@@ -442,7 +451,7 @@ static int rtl8125_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
return ret;
}
-static int rtl8125_get_features(struct phy_device *phydev)
+static int rtl822x_get_features(struct phy_device *phydev)
{
int val;
@@ -460,7 +469,7 @@ static int rtl8125_get_features(struct phy_device *phydev)
return genphy_read_abilities(phydev);
}
-static int rtl8125_config_aneg(struct phy_device *phydev)
+static int rtl822x_config_aneg(struct phy_device *phydev)
{
int ret = 0;
@@ -480,7 +489,7 @@ static int rtl8125_config_aneg(struct phy_device *phydev)
return __genphy_config_aneg(phydev, ret);
}
-static int rtl8125_read_status(struct phy_device *phydev)
+static int rtl822x_read_status(struct phy_device *phydev)
{
int ret;
@@ -522,7 +531,7 @@ static int rtlgen_match_phy_device(struct phy_device *phydev)
!rtlgen_supports_2_5gbps(phydev);
}
-static int rtl8125_match_phy_device(struct phy_device *phydev)
+static int rtl8226_match_phy_device(struct phy_device *phydev)
{
return phydev->phy_id == RTL_GENERIC_PHYID &&
rtlgen_supports_2_5gbps(phydev);
@@ -627,29 +636,29 @@ static struct phy_driver realtek_drvs[] = {
.read_mmd = rtlgen_read_mmd,
.write_mmd = rtlgen_write_mmd,
}, {
- .name = "RTL8125 2.5Gbps internal",
- .match_phy_device = rtl8125_match_phy_device,
- .get_features = rtl8125_get_features,
- .config_aneg = rtl8125_config_aneg,
- .read_status = rtl8125_read_status,
+ .name = "RTL8226 2.5Gbps PHY",
+ .match_phy_device = rtl8226_match_phy_device,
+ .get_features = rtl822x_get_features,
+ .config_aneg = rtl822x_config_aneg,
+ .read_status = rtl822x_read_status,
.suspend = genphy_suspend,
.resume = rtlgen_resume,
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
- .read_mmd = rtl8125_read_mmd,
- .write_mmd = rtl8125_write_mmd,
+ .read_mmd = rtl822x_read_mmd,
+ .write_mmd = rtl822x_write_mmd,
}, {
PHY_ID_MATCH_EXACT(0x001cc840),
- .name = "RTL8125B 2.5Gbps internal",
- .get_features = rtl8125_get_features,
- .config_aneg = rtl8125_config_aneg,
- .read_status = rtl8125_read_status,
+ .name = "RTL8226B_RTL8221B 2.5Gbps PHY",
+ .get_features = rtl822x_get_features,
+ .config_aneg = rtl822x_config_aneg,
+ .read_status = rtl822x_read_status,
.suspend = genphy_suspend,
.resume = rtlgen_resume,
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
- .read_mmd = rtl8125_read_mmd,
- .write_mmd = rtl8125_write_mmd,
+ .read_mmd = rtl822x_read_mmd,
+ .write_mmd = rtl822x_write_mmd,
}, {
PHY_ID_MATCH_EXACT(0x001cc961),
.name = "RTL8366RB Gigabit Ethernet",
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index cf83314c8591..1d18c10e8f82 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -7,6 +7,7 @@
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
+#include <linux/mdio/mdio-i2c.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
@@ -16,7 +17,6 @@
#include <linux/slab.h>
#include <linux/workqueue.h>
-#include "mdio-i2c.h"
#include "sfp.h"
#include "swphy.h"
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 74568ae16125..ec97669be5c2 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -12,6 +12,7 @@
*
*/
+#include <linux/clk.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mii.h>
@@ -21,6 +22,17 @@
#include <linux/netdevice.h>
#include <linux/smscphy.h>
+/* Vendor-specific PHY Definitions */
+/* EDPD NLP / crossover time configuration */
+#define PHY_EDPD_CONFIG 16
+#define PHY_EDPD_CONFIG_EXT_CROSSOVER_ 0x0001
+
+/* Control/Status Indication Register */
+#define SPECIAL_CTRL_STS 27
+#define SPECIAL_CTRL_STS_OVRRD_AMDIX_ 0x8000
+#define SPECIAL_CTRL_STS_AMDIX_ENABLE_ 0x4000
+#define SPECIAL_CTRL_STS_AMDIX_STATE_ 0x2000
+
struct smsc_hw_stat {
const char *string;
u8 reg;
@@ -33,14 +45,22 @@ static struct smsc_hw_stat smsc_hw_stats[] = {
struct smsc_phy_priv {
bool energy_enable;
+ struct clk *refclk;
};
static int smsc_phy_config_intr(struct phy_device *phydev)
{
- int rc = phy_write (phydev, MII_LAN83C185_IM,
- ((PHY_INTERRUPT_ENABLED == phydev->interrupts)
- ? MII_LAN83C185_ISF_INT_PHYLIB_EVENTS
- : 0));
+ struct smsc_phy_priv *priv = phydev->priv;
+ u16 intmask = 0;
+ int rc;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ intmask = MII_LAN83C185_ISF_INT4 | MII_LAN83C185_ISF_INT6;
+ if (priv->energy_enable)
+ intmask |= MII_LAN83C185_ISF_INT7;
+ }
+
+ rc = phy_write(phydev, MII_LAN83C185_IM, intmask);
return rc < 0 ? rc : 0;
}
@@ -55,19 +75,21 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev)
static int smsc_phy_config_init(struct phy_device *phydev)
{
struct smsc_phy_priv *priv = phydev->priv;
+ int rc;
- int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
+ if (!priv->energy_enable)
+ return 0;
+
+ rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
if (rc < 0)
return rc;
- if (priv->energy_enable) {
- /* Enable energy detect mode for this SMSC Transceivers */
- rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
- rc | MII_LAN83C185_EDPWRDOWN);
- if (rc < 0)
- return rc;
- }
+ /* Enable energy detect mode for this SMSC Transceivers */
+ rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
+ rc | MII_LAN83C185_EDPWRDOWN);
+ if (rc < 0)
+ return rc;
return smsc_phy_ack_interrupt(phydev);
}
@@ -96,6 +118,54 @@ static int lan911x_config_init(struct phy_device *phydev)
return smsc_phy_ack_interrupt(phydev);
}
+static int lan87xx_config_aneg(struct phy_device *phydev)
+{
+ int rc;
+ int val;
+
+ switch (phydev->mdix_ctrl) {
+ case ETH_TP_MDI:
+ val = SPECIAL_CTRL_STS_OVRRD_AMDIX_;
+ break;
+ case ETH_TP_MDI_X:
+ val = SPECIAL_CTRL_STS_OVRRD_AMDIX_ |
+ SPECIAL_CTRL_STS_AMDIX_STATE_;
+ break;
+ case ETH_TP_MDI_AUTO:
+ val = SPECIAL_CTRL_STS_AMDIX_ENABLE_;
+ break;
+ default:
+ return genphy_config_aneg(phydev);
+ }
+
+ rc = phy_read(phydev, SPECIAL_CTRL_STS);
+ if (rc < 0)
+ return rc;
+
+ rc &= ~(SPECIAL_CTRL_STS_OVRRD_AMDIX_ |
+ SPECIAL_CTRL_STS_AMDIX_ENABLE_ |
+ SPECIAL_CTRL_STS_AMDIX_STATE_);
+ rc |= val;
+ phy_write(phydev, SPECIAL_CTRL_STS, rc);
+
+ phydev->mdix = phydev->mdix_ctrl;
+ return genphy_config_aneg(phydev);
+}
+
+static int lan87xx_config_aneg_ext(struct phy_device *phydev)
+{
+ int rc;
+
+ /* Extend Manual AutoMDIX timer */
+ rc = phy_read(phydev, PHY_EDPD_CONFIG);
+ if (rc < 0)
+ return rc;
+
+ rc |= PHY_EDPD_CONFIG_EXT_CROSSOVER_;
+ phy_write(phydev, PHY_EDPD_CONFIG, rc);
+ return lan87xx_config_aneg(phydev);
+}
+
/*
* The LAN87xx suffers from rare absence of the ENERGYON-bit when Ethernet cable
* plugs in while LAN87xx is in Energy Detect Power-Down mode. This leads to
@@ -185,11 +255,20 @@ static void smsc_get_stats(struct phy_device *phydev,
data[i] = smsc_get_stat(phydev, i);
}
+static void smsc_phy_remove(struct phy_device *phydev)
+{
+ struct smsc_phy_priv *priv = phydev->priv;
+
+ clk_disable_unprepare(priv->refclk);
+ clk_put(priv->refclk);
+}
+
static int smsc_phy_probe(struct phy_device *phydev)
{
struct device *dev = &phydev->mdio.dev;
struct device_node *of_node = dev->of_node;
struct smsc_phy_priv *priv;
+ int ret;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -202,6 +281,19 @@ static int smsc_phy_probe(struct phy_device *phydev)
phydev->priv = priv;
+ /* Make clk optional to keep DTB backward compatibility. */
+ priv->refclk = clk_get_optional(dev, NULL);
+ if (IS_ERR(priv->refclk))
+ dev_err_probe(dev, PTR_ERR(priv->refclk), "Failed to request clock\n");
+
+ ret = clk_prepare_enable(priv->refclk);
+ if (ret)
+ return ret;
+
+ ret = clk_set_rate(priv->refclk, 50 * 1000 * 1000);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -250,6 +342,9 @@ static struct phy_driver smsc_phy_driver[] = {
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
+ /* This covers internal PHY (phy_id: 0x0007C0C3) for
+ * LAN9500 (PID: 0x9500), LAN9514 (PID: 0xec00), LAN9505 (PID: 0x9505)
+ */
.phy_id = 0x0007c0c0, /* OUI=0x00800f, Model#=0x0c */
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN8700",
@@ -262,6 +357,7 @@ static struct phy_driver smsc_phy_driver[] = {
.read_status = lan87xx_read_status,
.config_init = smsc_phy_config_init,
.soft_reset = smsc_phy_reset,
+ .config_aneg = lan87xx_config_aneg,
/* IRQ related */
.ack_interrupt = smsc_phy_ack_interrupt,
@@ -293,19 +389,23 @@ static struct phy_driver smsc_phy_driver[] = {
.suspend = genphy_suspend,
.resume = genphy_resume,
}, {
+ /* This covers internal PHY (phy_id: 0x0007C0F0) for
+ * LAN9500A (PID: 0x9E00), LAN9505A (PID: 0x9E01)
+ */
.phy_id = 0x0007c0f0, /* OUI=0x00800f, Model#=0x0f */
.phy_id_mask = 0xfffffff0,
.name = "SMSC LAN8710/LAN8720",
/* PHY_BASIC_FEATURES */
- .flags = PHY_RST_AFTER_CLK_EN,
.probe = smsc_phy_probe,
+ .remove = smsc_phy_remove,
/* basic functions */
.read_status = lan87xx_read_status,
.config_init = smsc_phy_config_init,
.soft_reset = smsc_phy_reset,
+ .config_aneg = lan87xx_config_aneg_ext,
/* IRQ related */
.ack_interrupt = smsc_phy_ack_interrupt,
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 7475cef17cf7..ca49c1ad3efc 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -300,7 +300,7 @@ static ssize_t ks8995_registers_read(struct file *filp, struct kobject *kobj,
struct device *dev;
struct ks8995_switch *ks8995;
- dev = container_of(kobj, struct device, kobj);
+ dev = kobj_to_dev(kobj);
ks8995 = dev_get_drvdata(dev);
return ks8995_read(ks8995, buf, off, count);
@@ -312,7 +312,7 @@ static ssize_t ks8995_registers_write(struct file *filp, struct kobject *kobj,
struct device *dev;
struct ks8995_switch *ks8995;
- dev = container_of(kobj, struct device, kobj);
+ dev = kobj_to_dev(kobj);
ks8995 = dev_get_drvdata(dev);
return ks8995_write(ks8995, buf, off, count);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index bcc4a4c011f1..07f1f3933927 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2796,7 +2796,7 @@ static int team_nl_cmd_port_list_get(struct sk_buff *skb,
return err;
}
-static const struct genl_ops team_nl_ops[] = {
+static const struct genl_small_ops team_nl_ops[] = {
{
.cmd = TEAM_CMD_NOOP,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
@@ -2833,8 +2833,8 @@ static struct genl_family team_nl_family __ro_after_init = {
.policy = team_nl_policy,
.netnsok = true,
.module = THIS_MODULE,
- .ops = team_nl_ops,
- .n_ops = ARRAY_SIZE(team_nl_ops),
+ .small_ops = team_nl_ops,
+ .n_small_ops = ARRAY_SIZE(team_nl_ops),
.mcgrps = team_nl_mcgrps,
.n_mcgrps = ARRAY_SIZE(team_nl_mcgrps),
};
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 7959b5c2d11f..be69d272052f 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -219,24 +219,6 @@ struct veth {
__be16 h_vlan_TCI;
};
-bool tun_is_xdp_frame(void *ptr)
-{
- return (unsigned long)ptr & TUN_XDP_FLAG;
-}
-EXPORT_SYMBOL(tun_is_xdp_frame);
-
-void *tun_xdp_to_ptr(void *ptr)
-{
- return (void *)((unsigned long)ptr | TUN_XDP_FLAG);
-}
-EXPORT_SYMBOL(tun_xdp_to_ptr);
-
-void *tun_ptr_to_xdp(void *ptr)
-{
- return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG);
-}
-EXPORT_SYMBOL(tun_ptr_to_xdp);
-
static int tun_napi_receive(struct napi_struct *napi, int budget)
{
struct tun_file *tfile = container_of(napi, struct tun_file, napi);
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index c7bcfca7d70b..b46993d5f997 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -346,6 +346,8 @@ config USB_NET_SMSC75XX
config USB_NET_SMSC95XX
tristate "SMSC LAN95XX based USB 2.0 10/100 ethernet devices"
depends on USB_USBNET
+ select PHYLIB
+ select SMSC_PHY
select BITREVERSE
select CRC16
select CRC32
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
index 32b08b18e120..ca89d8258dd3 100644
--- a/drivers/net/usb/cx82310_eth.c
+++ b/drivers/net/usb/cx82310_eth.c
@@ -40,6 +40,11 @@ enum cx82310_status {
#define CX82310_MTU 1514
#define CMD_EP 0x01
+struct cx82310_priv {
+ struct work_struct reenable_work;
+ struct usbnet *dev;
+};
+
/*
* execute control command
* - optionally send some data (command parameters)
@@ -66,8 +71,8 @@ static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply,
CMD_PACKET_SIZE, &actual_len, CMD_TIMEOUT);
if (ret < 0) {
if (cmd != CMD_GET_LINK_STATUS)
- dev_err(&dev->udev->dev, "send command %#x: error %d\n",
- cmd, ret);
+ netdev_err(dev->net, "send command %#x: error %d\n",
+ cmd, ret);
goto end;
}
@@ -79,30 +84,27 @@ static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply,
CMD_TIMEOUT);
if (ret < 0) {
if (cmd != CMD_GET_LINK_STATUS)
- dev_err(&dev->udev->dev,
- "reply receive error %d\n",
- ret);
+ netdev_err(dev->net, "reply receive error %d\n",
+ ret);
goto end;
}
if (actual_len > 0)
break;
}
if (actual_len == 0) {
- dev_err(&dev->udev->dev, "no reply to command %#x\n",
- cmd);
+ netdev_err(dev->net, "no reply to command %#x\n", cmd);
ret = -EIO;
goto end;
}
if (buf[0] != cmd) {
- dev_err(&dev->udev->dev,
- "got reply to command %#x, expected: %#x\n",
- buf[0], cmd);
+ netdev_err(dev->net, "got reply to command %#x, expected: %#x\n",
+ buf[0], cmd);
ret = -EIO;
goto end;
}
if (buf[1] != STATUS_SUCCESS) {
- dev_err(&dev->udev->dev, "command %#x failed: %#x\n",
- cmd, buf[1]);
+ netdev_err(dev->net, "command %#x failed: %#x\n", cmd,
+ buf[1]);
ret = -EIO;
goto end;
}
@@ -115,6 +117,23 @@ end:
return ret;
}
+static int cx82310_enable_ethernet(struct usbnet *dev)
+{
+ int ret = cx82310_cmd(dev, CMD_ETHERNET_MODE, true, "\x01", 1, NULL, 0);
+
+ if (ret)
+ netdev_err(dev->net, "unable to enable ethernet mode: %d\n",
+ ret);
+ return ret;
+}
+
+static void cx82310_reenable_work(struct work_struct *work)
+{
+ struct cx82310_priv *priv = container_of(work, struct cx82310_priv,
+ reenable_work);
+ cx82310_enable_ethernet(priv->dev);
+}
+
#define partial_len data[0] /* length of partial packet data */
#define partial_rem data[1] /* remaining (missing) data length */
#define partial_data data[2] /* partial packet data */
@@ -126,6 +145,7 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
struct usb_device *udev = dev->udev;
u8 link[3];
int timeout = 50;
+ struct cx82310_priv *priv;
/* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */
if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0
@@ -152,6 +172,15 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
if (!dev->partial_data)
return -ENOMEM;
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ ret = -ENOMEM;
+ goto err_partial;
+ }
+ dev->driver_priv = priv;
+ INIT_WORK(&priv->reenable_work, cx82310_reenable_work);
+ priv->dev = dev;
+
/* wait for firmware to become ready (indicated by the link being up) */
while (--timeout) {
ret = cx82310_cmd(dev, CMD_GET_LINK_STATUS, true, NULL, 0,
@@ -162,24 +191,20 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
msleep(500);
}
if (!timeout) {
- dev_err(&udev->dev, "firmware not ready in time\n");
+ netdev_err(dev->net, "firmware not ready in time\n");
ret = -ETIMEDOUT;
goto err;
}
/* enable ethernet mode (?) */
- ret = cx82310_cmd(dev, CMD_ETHERNET_MODE, true, "\x01", 1, NULL, 0);
- if (ret) {
- dev_err(&udev->dev, "unable to enable ethernet mode: %d\n",
- ret);
+ if (cx82310_enable_ethernet(dev))
goto err;
- }
/* get the MAC address */
ret = cx82310_cmd(dev, CMD_GET_MAC_ADDR, true, NULL, 0,
dev->net->dev_addr, ETH_ALEN);
if (ret) {
- dev_err(&udev->dev, "unable to read MAC address: %d\n", ret);
+ netdev_err(dev->net, "unable to read MAC address: %d\n", ret);
goto err;
}
@@ -190,13 +215,19 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
return 0;
err:
+ kfree(dev->driver_priv);
+err_partial:
kfree((void *)dev->partial_data);
return ret;
}
static void cx82310_unbind(struct usbnet *dev, struct usb_interface *intf)
{
+ struct cx82310_priv *priv = dev->driver_priv;
+
kfree((void *)dev->partial_data);
+ cancel_work_sync(&priv->reenable_work);
+ kfree(dev->driver_priv);
}
/*
@@ -211,6 +242,7 @@ static int cx82310_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
int len;
struct sk_buff *skb2;
+ struct cx82310_priv *priv = dev->driver_priv;
/*
* If the last skb ended with an incomplete packet, this skb contains
@@ -245,9 +277,11 @@ static int cx82310_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
break;
}
- if (len > CX82310_MTU) {
- dev_err(&dev->udev->dev, "RX packet too long: %d B\n",
- len);
+ if (len == 0xffff) {
+ netdev_info(dev->net, "router was rebooted, re-enabling ethernet mode");
+ schedule_work(&priv->reenable_work);
+ } else if (len > CX82310_MTU) {
+ netdev_err(dev->net, "RX packet too long: %d B\n", len);
return 0;
}
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index ed01dc964c99..144c686b4333 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -103,10 +103,6 @@ static int kaweth_probe(
const struct usb_device_id *id /* from id_table */
);
static void kaweth_disconnect(struct usb_interface *intf);
-static int kaweth_internal_control_msg(struct usb_device *usb_dev,
- unsigned int pipe,
- struct usb_ctrlrequest *cmd, void *data,
- int len, int timeout);
static int kaweth_suspend(struct usb_interface *intf, pm_message_t message);
static int kaweth_resume(struct usb_interface *intf);
@@ -236,65 +232,17 @@ struct kaweth_device
};
/****************************************************************
- * kaweth_control
- ****************************************************************/
-static int kaweth_control(struct kaweth_device *kaweth,
- unsigned int pipe,
- __u8 request,
- __u8 requesttype,
- __u16 value,
- __u16 index,
- void *data,
- __u16 size,
- int timeout)
-{
- struct usb_ctrlrequest *dr;
- int retval;
-
- if(in_interrupt()) {
- netdev_dbg(kaweth->net, "in_interrupt()\n");
- return -EBUSY;
- }
-
- dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
- if (!dr)
- return -ENOMEM;
-
- dr->bRequestType = requesttype;
- dr->bRequest = request;
- dr->wValue = cpu_to_le16(value);
- dr->wIndex = cpu_to_le16(index);
- dr->wLength = cpu_to_le16(size);
-
- retval = kaweth_internal_control_msg(kaweth->dev,
- pipe,
- dr,
- data,
- size,
- timeout);
-
- kfree(dr);
- return retval;
-}
-
-/****************************************************************
* kaweth_read_configuration
****************************************************************/
static int kaweth_read_configuration(struct kaweth_device *kaweth)
{
- int retval;
-
- retval = kaweth_control(kaweth,
- usb_rcvctrlpipe(kaweth->dev, 0),
+ return usb_control_msg(kaweth->dev, usb_rcvctrlpipe(kaweth->dev, 0),
KAWETH_COMMAND_GET_ETHERNET_DESC,
USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
- 0,
- 0,
- (void *)&kaweth->configuration,
+ 0, 0,
+ &kaweth->configuration,
sizeof(kaweth->configuration),
KAWETH_CONTROL_TIMEOUT);
-
- return retval;
}
/****************************************************************
@@ -302,21 +250,14 @@ static int kaweth_read_configuration(struct kaweth_device *kaweth)
****************************************************************/
static int kaweth_set_urb_size(struct kaweth_device *kaweth, __u16 urb_size)
{
- int retval;
-
netdev_dbg(kaweth->net, "Setting URB size to %d\n", (unsigned)urb_size);
- retval = kaweth_control(kaweth,
- usb_sndctrlpipe(kaweth->dev, 0),
- KAWETH_COMMAND_SET_URB_SIZE,
- USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
- urb_size,
- 0,
- (void *)&kaweth->scratch,
- 0,
- KAWETH_CONTROL_TIMEOUT);
-
- return retval;
+ return usb_control_msg(kaweth->dev, usb_sndctrlpipe(kaweth->dev, 0),
+ KAWETH_COMMAND_SET_URB_SIZE,
+ USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
+ urb_size, 0,
+ &kaweth->scratch, 0,
+ KAWETH_CONTROL_TIMEOUT);
}
/****************************************************************
@@ -324,21 +265,14 @@ static int kaweth_set_urb_size(struct kaweth_device *kaweth, __u16 urb_size)
****************************************************************/
static int kaweth_set_sofs_wait(struct kaweth_device *kaweth, __u16 sofs_wait)
{
- int retval;
-
netdev_dbg(kaweth->net, "Set SOFS wait to %d\n", (unsigned)sofs_wait);
- retval = kaweth_control(kaweth,
- usb_sndctrlpipe(kaweth->dev, 0),
- KAWETH_COMMAND_SET_SOFS_WAIT,
- USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
- sofs_wait,
- 0,
- (void *)&kaweth->scratch,
- 0,
- KAWETH_CONTROL_TIMEOUT);
-
- return retval;
+ return usb_control_msg(kaweth->dev, usb_sndctrlpipe(kaweth->dev, 0),
+ KAWETH_COMMAND_SET_SOFS_WAIT,
+ USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
+ sofs_wait, 0,
+ &kaweth->scratch, 0,
+ KAWETH_CONTROL_TIMEOUT);
}
/****************************************************************
@@ -347,22 +281,15 @@ static int kaweth_set_sofs_wait(struct kaweth_device *kaweth, __u16 sofs_wait)
static int kaweth_set_receive_filter(struct kaweth_device *kaweth,
__u16 receive_filter)
{
- int retval;
-
netdev_dbg(kaweth->net, "Set receive filter to %d\n",
(unsigned)receive_filter);
- retval = kaweth_control(kaweth,
- usb_sndctrlpipe(kaweth->dev, 0),
- KAWETH_COMMAND_SET_PACKET_FILTER,
- USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
- receive_filter,
- 0,
- (void *)&kaweth->scratch,
- 0,
- KAWETH_CONTROL_TIMEOUT);
-
- return retval;
+ return usb_control_msg(kaweth->dev, usb_sndctrlpipe(kaweth->dev, 0),
+ KAWETH_COMMAND_SET_PACKET_FILTER,
+ USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
+ receive_filter, 0,
+ &kaweth->scratch, 0,
+ KAWETH_CONTROL_TIMEOUT);
}
/****************************************************************
@@ -407,14 +334,11 @@ static int kaweth_download_firmware(struct kaweth_device *kaweth,
kaweth->firmware_buf, kaweth);
netdev_dbg(kaweth->net, "Firmware length: %d\n", data_len);
- return kaweth_control(kaweth,
- usb_sndctrlpipe(kaweth->dev, 0),
+ return usb_control_msg(kaweth->dev, usb_sndctrlpipe(kaweth->dev, 0),
KAWETH_COMMAND_SCAN,
USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
- 0,
- 0,
- (void *)kaweth->firmware_buf,
- data_len,
+ 0, 0,
+ kaweth->firmware_buf, data_len,
KAWETH_CONTROL_TIMEOUT);
}
@@ -433,15 +357,12 @@ static int kaweth_trigger_firmware(struct kaweth_device *kaweth,
kaweth->firmware_buf[6] = 0x00;
kaweth->firmware_buf[7] = 0x00;
- return kaweth_control(kaweth,
- usb_sndctrlpipe(kaweth->dev, 0),
- KAWETH_COMMAND_SCAN,
- USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
- 0,
- 0,
- (void *)kaweth->firmware_buf,
- 8,
- KAWETH_CONTROL_TIMEOUT);
+ return usb_control_msg(kaweth->dev, usb_sndctrlpipe(kaweth->dev, 0),
+ KAWETH_COMMAND_SCAN,
+ USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
+ 0, 0,
+ (void *)kaweth->firmware_buf, 8,
+ KAWETH_CONTROL_TIMEOUT);
}
/****************************************************************
@@ -564,7 +485,8 @@ static int kaweth_resubmit_rx_urb(struct kaweth_device *kaweth,
return result;
}
-static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth);
+static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth,
+ bool may_sleep);
/****************************************************************
* kaweth_usb_receive
@@ -694,7 +616,7 @@ static int kaweth_open(struct net_device *net)
netif_start_queue(net);
- kaweth_async_set_rx_mode(kaweth);
+ kaweth_async_set_rx_mode(kaweth, true);
return 0;
err_out:
@@ -782,7 +704,7 @@ static netdev_tx_t kaweth_start_xmit(struct sk_buff *skb,
spin_lock_irq(&kaweth->device_lock);
- kaweth_async_set_rx_mode(kaweth);
+ kaweth_async_set_rx_mode(kaweth, false);
netif_stop_queue(net);
if (IS_BLOCKED(kaweth->status)) {
goto skip;
@@ -859,36 +781,31 @@ static void kaweth_set_rx_mode(struct net_device *net)
/****************************************************************
* kaweth_async_set_rx_mode
****************************************************************/
-static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth)
+static void kaweth_async_set_rx_mode(struct kaweth_device *kaweth,
+ bool may_sleep)
{
- int result;
+ int ret;
__u16 packet_filter_bitmap = kaweth->packet_filter_bitmap;
kaweth->packet_filter_bitmap = 0;
if (packet_filter_bitmap == 0)
return;
- if (in_interrupt())
+ if (!may_sleep)
return;
- result = kaweth_control(kaweth,
- usb_sndctrlpipe(kaweth->dev, 0),
- KAWETH_COMMAND_SET_PACKET_FILTER,
- USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
- packet_filter_bitmap,
- 0,
- (void *)&kaweth->scratch,
- 0,
- KAWETH_CONTROL_TIMEOUT);
-
- if(result < 0) {
+ ret = usb_control_msg(kaweth->dev, usb_sndctrlpipe(kaweth->dev, 0),
+ KAWETH_COMMAND_SET_PACKET_FILTER,
+ USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
+ packet_filter_bitmap, 0,
+ &kaweth->scratch, 0,
+ KAWETH_CONTROL_TIMEOUT);
+ if (ret < 0)
dev_err(&kaweth->intf->dev, "Failed to set Rx mode: %d\n",
- result);
- }
- else {
+ ret);
+ else
netdev_dbg(kaweth->net, "Set Rx mode to %d\n",
packet_filter_bitmap);
- }
}
/****************************************************************
@@ -1196,88 +1113,4 @@ static void kaweth_disconnect(struct usb_interface *intf)
}
-// FIXME this completion stuff is a modified clone of
-// an OLD version of some stuff in usb.c ...
-struct usb_api_data {
- wait_queue_head_t wqh;
- int done;
-};
-
-/*-------------------------------------------------------------------*
- * completion handler for compatibility wrappers (sync control/bulk) *
- *-------------------------------------------------------------------*/
-static void usb_api_blocking_completion(struct urb *urb)
-{
- struct usb_api_data *awd = (struct usb_api_data *)urb->context;
-
- awd->done=1;
- wake_up(&awd->wqh);
-}
-
-/*-------------------------------------------------------------------*
- * COMPATIBILITY STUFF *
- *-------------------------------------------------------------------*/
-
-// Starts urb and waits for completion or timeout
-static int usb_start_wait_urb(struct urb *urb, int timeout, int* actual_length)
-{
- struct usb_api_data awd;
- int status;
-
- init_waitqueue_head(&awd.wqh);
- awd.done = 0;
-
- urb->context = &awd;
- status = usb_submit_urb(urb, GFP_ATOMIC);
- if (status) {
- // something went wrong
- usb_free_urb(urb);
- return status;
- }
-
- if (!wait_event_timeout(awd.wqh, awd.done, timeout)) {
- // timeout
- dev_warn(&urb->dev->dev, "usb_control/bulk_msg: timeout\n");
- usb_kill_urb(urb); // remove urb safely
- status = -ETIMEDOUT;
- }
- else {
- status = urb->status;
- }
-
- if (actual_length) {
- *actual_length = urb->actual_length;
- }
-
- usb_free_urb(urb);
- return status;
-}
-
-/*-------------------------------------------------------------------*/
-// returns status (negative) or length (positive)
-static int kaweth_internal_control_msg(struct usb_device *usb_dev,
- unsigned int pipe,
- struct usb_ctrlrequest *cmd, void *data,
- int len, int timeout)
-{
- struct urb *urb;
- int retv;
- int length = 0; /* shut up GCC */
-
- urb = usb_alloc_urb(0, GFP_ATOMIC);
- if (!urb)
- return -ENOMEM;
-
- usb_fill_control_urb(urb, usb_dev, pipe, (unsigned char*)cmd, data,
- len, usb_api_blocking_completion, NULL);
-
- retv = usb_start_wait_urb(urb, timeout, &length);
- if (retv < 0) {
- return retv;
- }
- else {
- return length;
- }
-}
-
module_usb_driver(kaweth_driver);
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index 1f04f1720426..b0c0c9dd6a02 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -113,7 +113,6 @@ nc_register_read(struct usbnet *dev, u8 regnum, u16 *retval_ptr)
return nc_vendor_read(dev, REQUEST_REGISTER, regnum, retval_ptr);
}
-// no retval ... can become async, usable in_interrupt()
static void
nc_vendor_write(struct usbnet *dev, u8 req, u8 regnum, u16 value)
{
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 5ca1356b8656..a322f51873d0 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -126,31 +126,9 @@ static void qmimux_get_stats64(struct net_device *net,
struct rtnl_link_stats64 *stats)
{
struct qmimux_priv *priv = netdev_priv(net);
- unsigned int start;
- int cpu;
netdev_stats_to_stats64(stats, &net->stats);
-
- for_each_possible_cpu(cpu) {
- struct pcpu_sw_netstats *stats64;
- u64 rx_packets, rx_bytes;
- u64 tx_packets, tx_bytes;
-
- stats64 = per_cpu_ptr(priv->stats64, cpu);
-
- do {
- start = u64_stats_fetch_begin_irq(&stats64->syncp);
- rx_packets = stats64->rx_packets;
- rx_bytes = stats64->rx_bytes;
- tx_packets = stats64->tx_packets;
- tx_bytes = stats64->tx_bytes;
- } while (u64_stats_fetch_retry_irq(&stats64->syncp, start));
-
- stats->rx_packets += rx_packets;
- stats->rx_bytes += rx_bytes;
- stats->tx_packets += tx_packets;
- stats->tx_bytes += tx_bytes;
- }
+ dev_fetch_sw_netstats(stats, priv->stats64);
}
static const struct net_device_ops qmimux_netdev_ops = {
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 9556d431885f..8689835a5214 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -757,13 +757,14 @@ static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
static void smsc75xx_init_mac_address(struct usbnet *dev)
{
- const u8 *mac_addr;
-
/* maybe the boot loader passed the MAC address in devicetree */
- mac_addr = of_get_mac_address(dev->udev->dev.of_node);
- if (!IS_ERR(mac_addr)) {
- ether_addr_copy(dev->net->dev_addr, mac_addr);
- return;
+ if (!eth_platform_get_mac_address(&dev->udev->dev,
+ dev->net->dev_addr)) {
+ if (is_valid_ether_addr(dev->net->dev_addr)) {
+ /* device tree values are valid so use them */
+ netif_dbg(dev, ifup, dev->net, "MAC address read from the device tree\n");
+ return;
+ }
}
/* try reading mac address from EEPROM */
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index bb4ccbda031a..ea0d5f04dc3a 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -18,10 +18,12 @@
#include <linux/usb/usbnet.h>
#include <linux/slab.h>
#include <linux/of_net.h>
+#include <linux/mdio.h>
+#include <linux/phy.h>
#include "smsc95xx.h"
#define SMSC_CHIPNAME "smsc95xx"
-#define SMSC_DRIVER_VERSION "1.0.6"
+#define SMSC_DRIVER_VERSION "2.0.0"
#define HS_USB_PKT_SIZE (512)
#define FS_USB_PKT_SIZE (64)
#define DEFAULT_HS_BURST_CAP_SIZE (16 * 1024 + 5 * HS_USB_PKT_SIZE)
@@ -49,10 +51,7 @@
#define SUSPEND_ALLMODES (SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \
SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3)
-#define CARRIER_CHECK_DELAY (2 * HZ)
-
struct smsc95xx_priv {
- u32 chip_id;
u32 mac_cr;
u32 hash_hi;
u32 hash_lo;
@@ -60,10 +59,8 @@ struct smsc95xx_priv {
spinlock_t mac_cr_lock;
u8 features;
u8 suspend_flags;
- u8 mdix_ctrl;
- bool link_ok;
- struct delayed_work carrier_check;
- struct usbnet *dev;
+ struct mii_bus *mdiobus;
+ struct phy_device *phydev;
};
static bool turbo_mode = true;
@@ -173,10 +170,14 @@ static int __must_check __smsc95xx_phy_wait_not_busy(struct usbnet *dev,
return -EIO;
}
-static int __smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx,
+static u32 mii_address_cmd(int phy_id, int idx, u16 op)
+{
+ return (phy_id & 0x1f) << 11 | (idx & 0x1f) << 6 | op;
+}
+
+static int __smsc95xx_mdio_read(struct usbnet *dev, int phy_id, int idx,
int in_pm)
{
- struct usbnet *dev = netdev_priv(netdev);
u32 val, addr;
int ret;
@@ -185,14 +186,12 @@ static int __smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx,
/* confirm MII not busy */
ret = __smsc95xx_phy_wait_not_busy(dev, in_pm);
if (ret < 0) {
- netdev_warn(dev->net, "MII is busy in smsc95xx_mdio_read\n");
+ netdev_warn(dev->net, "%s: MII is busy\n", __func__);
goto done;
}
/* set the address, index & direction (read from PHY) */
- phy_id &= dev->mii.phy_id_mask;
- idx &= dev->mii.reg_num_mask;
- addr = (phy_id << 11) | (idx << 6) | MII_READ_ | MII_BUSY_;
+ addr = mii_address_cmd(phy_id, idx, MII_READ_ | MII_BUSY_);
ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm);
if (ret < 0) {
netdev_warn(dev->net, "Error writing MII_ADDR\n");
@@ -218,10 +217,9 @@ done:
return ret;
}
-static void __smsc95xx_mdio_write(struct net_device *netdev, int phy_id,
+static void __smsc95xx_mdio_write(struct usbnet *dev, int phy_id,
int idx, int regval, int in_pm)
{
- struct usbnet *dev = netdev_priv(netdev);
u32 val, addr;
int ret;
@@ -230,7 +228,7 @@ static void __smsc95xx_mdio_write(struct net_device *netdev, int phy_id,
/* confirm MII not busy */
ret = __smsc95xx_phy_wait_not_busy(dev, in_pm);
if (ret < 0) {
- netdev_warn(dev->net, "MII is busy in smsc95xx_mdio_write\n");
+ netdev_warn(dev->net, "%s: MII is busy\n", __func__);
goto done;
}
@@ -242,9 +240,7 @@ static void __smsc95xx_mdio_write(struct net_device *netdev, int phy_id,
}
/* set the address, index & direction (write to PHY) */
- phy_id &= dev->mii.phy_id_mask;
- idx &= dev->mii.reg_num_mask;
- addr = (phy_id << 11) | (idx << 6) | MII_WRITE_ | MII_BUSY_;
+ addr = mii_address_cmd(phy_id, idx, MII_WRITE_ | MII_BUSY_);
ret = __smsc95xx_write_reg(dev, MII_ADDR, addr, in_pm);
if (ret < 0) {
netdev_warn(dev->net, "Error writing MII_ADDR\n");
@@ -261,27 +257,34 @@ done:
mutex_unlock(&dev->phy_mutex);
}
-static int smsc95xx_mdio_read_nopm(struct net_device *netdev, int phy_id,
- int idx)
+static int smsc95xx_mdio_read_nopm(struct usbnet *dev, int idx)
{
- return __smsc95xx_mdio_read(netdev, phy_id, idx, 1);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
+
+ return __smsc95xx_mdio_read(dev, pdata->phydev->mdio.addr, idx, 1);
}
-static void smsc95xx_mdio_write_nopm(struct net_device *netdev, int phy_id,
- int idx, int regval)
+static void smsc95xx_mdio_write_nopm(struct usbnet *dev, int idx, int regval)
{
- __smsc95xx_mdio_write(netdev, phy_id, idx, regval, 1);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
+
+ __smsc95xx_mdio_write(dev, pdata->phydev->mdio.addr, idx, regval, 1);
}
-static int smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
+static int smsc95xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
{
- return __smsc95xx_mdio_read(netdev, phy_id, idx, 0);
+ struct usbnet *dev = bus->priv;
+
+ return __smsc95xx_mdio_read(dev, phy_id, idx, 0);
}
-static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
- int regval)
+static int smsc95xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
+ u16 regval)
{
- __smsc95xx_mdio_write(netdev, phy_id, idx, regval, 0);
+ struct usbnet *dev = bus->priv;
+
+ __smsc95xx_mdio_write(dev, phy_id, idx, regval, 0);
+ return 0;
}
static int __must_check smsc95xx_wait_eeprom(struct usbnet *dev)
@@ -455,7 +458,7 @@ static unsigned int smsc95xx_hash(char addr[ETH_ALEN])
static void smsc95xx_set_multicast(struct net_device *netdev)
{
struct usbnet *dev = netdev_priv(netdev);
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
unsigned long flags;
int ret;
@@ -511,22 +514,23 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
netdev_warn(dev->net, "failed to initiate async write to MAC_CR\n");
}
-static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
- u16 lcladv, u16 rmtadv)
+static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev)
{
u32 flow = 0, afc_cfg;
+ struct smsc95xx_priv *pdata = dev->driver_priv;
+ bool tx_pause, rx_pause;
int ret = smsc95xx_read_reg(dev, AFC_CFG, &afc_cfg);
if (ret < 0)
return ret;
- if (duplex == DUPLEX_FULL) {
- u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
+ if (pdata->phydev->duplex == DUPLEX_FULL) {
+ phy_get_pause(pdata->phydev, &tx_pause, &rx_pause);
- if (cap & FLOW_CTRL_RX)
+ if (rx_pause)
flow = 0xFFFF0002;
- if (cap & FLOW_CTRL_TX) {
+ if (tx_pause) {
afc_cfg |= 0xF;
flow |= 0xFFFF0000;
} else {
@@ -534,8 +538,8 @@ static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
}
netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s\n",
- cap & FLOW_CTRL_RX ? "enabled" : "disabled",
- cap & FLOW_CTRL_TX ? "enabled" : "disabled");
+ rx_pause ? "enabled" : "disabled",
+ tx_pause ? "enabled" : "disabled");
} else {
netif_dbg(dev, link, dev->net, "half duplex\n");
afc_cfg |= 0xF;
@@ -550,33 +554,16 @@ static int smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
static int smsc95xx_link_reset(struct usbnet *dev)
{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
- struct mii_if_info *mii = &dev->mii;
- struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
+ struct smsc95xx_priv *pdata = dev->driver_priv;
unsigned long flags;
- u16 lcladv, rmtadv;
int ret;
- /* clear interrupt status */
- ret = smsc95xx_mdio_read(dev->net, mii->phy_id, PHY_INT_SRC);
- if (ret < 0)
- return ret;
-
ret = smsc95xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
if (ret < 0)
return ret;
- mii_check_media(mii, 1, 1);
- mii_ethtool_gset(&dev->mii, &ecmd);
- lcladv = smsc95xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE);
- rmtadv = smsc95xx_mdio_read(dev->net, mii->phy_id, MII_LPA);
-
- netif_dbg(dev, link, dev->net,
- "speed: %u duplex: %d lcladv: %04x rmtadv: %04x\n",
- ethtool_cmd_speed(&ecmd), ecmd.duplex, lcladv, rmtadv);
-
spin_lock_irqsave(&pdata->mac_cr_lock, flags);
- if (ecmd.duplex != DUPLEX_FULL) {
+ if (pdata->phydev->duplex != DUPLEX_FULL) {
pdata->mac_cr &= ~MAC_CR_FDPX_;
pdata->mac_cr |= MAC_CR_RCVOWN_;
} else {
@@ -589,7 +576,7 @@ static int smsc95xx_link_reset(struct usbnet *dev)
if (ret < 0)
return ret;
- ret = smsc95xx_phy_update_flowcontrol(dev, ecmd.duplex, lcladv, rmtadv);
+ ret = smsc95xx_phy_update_flowcontrol(dev);
if (ret < 0)
netdev_warn(dev->net, "Error updating PHY flow control\n");
@@ -616,44 +603,6 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb)
intdata);
}
-static void set_carrier(struct usbnet *dev, bool link)
-{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
-
- if (pdata->link_ok == link)
- return;
-
- pdata->link_ok = link;
-
- if (link)
- usbnet_link_change(dev, 1, 0);
- else
- usbnet_link_change(dev, 0, 0);
-}
-
-static void check_carrier(struct work_struct *work)
-{
- struct smsc95xx_priv *pdata = container_of(work, struct smsc95xx_priv,
- carrier_check.work);
- struct usbnet *dev = pdata->dev;
- int ret;
-
- if (pdata->suspend_flags != 0)
- return;
-
- ret = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMSR);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read MII_BMSR\n");
- return;
- }
- if (ret & BMSR_LSTATUS)
- set_carrier(dev, 1);
- else
- set_carrier(dev, 0);
-
- schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
-}
-
/* Enable or disable Tx & Rx checksum offload engines */
static int smsc95xx_set_features(struct net_device *netdev,
netdev_features_t features)
@@ -747,7 +696,7 @@ static void smsc95xx_ethtool_get_wol(struct net_device *net,
struct ethtool_wolinfo *wolinfo)
{
struct usbnet *dev = netdev_priv(net);
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
wolinfo->supported = SUPPORTED_WAKE;
wolinfo->wolopts = pdata->wolopts;
@@ -757,7 +706,7 @@ static int smsc95xx_ethtool_set_wol(struct net_device *net,
struct ethtool_wolinfo *wolinfo)
{
struct usbnet *dev = netdev_priv(net);
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
int ret;
if (wolinfo->wolopts & ~SUPPORTED_WAKE)
@@ -772,108 +721,15 @@ static int smsc95xx_ethtool_set_wol(struct net_device *net,
return ret;
}
-static int get_mdix_status(struct net_device *net)
+static u32 smsc95xx_get_link(struct net_device *net)
{
- struct usbnet *dev = netdev_priv(net);
- u32 val;
- int buf;
-
- buf = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, SPECIAL_CTRL_STS);
- if (buf & SPECIAL_CTRL_STS_OVRRD_AMDIX_) {
- if (buf & SPECIAL_CTRL_STS_AMDIX_ENABLE_)
- return ETH_TP_MDI_AUTO;
- else if (buf & SPECIAL_CTRL_STS_AMDIX_STATE_)
- return ETH_TP_MDI_X;
- } else {
- buf = smsc95xx_read_reg(dev, STRAP_STATUS, &val);
- if (val & STRAP_STATUS_AMDIX_EN_)
- return ETH_TP_MDI_AUTO;
- }
-
- return ETH_TP_MDI;
-}
-
-static void set_mdix_status(struct net_device *net, __u8 mdix_ctrl)
-{
- struct usbnet *dev = netdev_priv(net);
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
- int buf;
-
- if ((pdata->chip_id == ID_REV_CHIP_ID_9500A_) ||
- (pdata->chip_id == ID_REV_CHIP_ID_9530_) ||
- (pdata->chip_id == ID_REV_CHIP_ID_89530_) ||
- (pdata->chip_id == ID_REV_CHIP_ID_9730_)) {
- /* Extend Manual AutoMDIX timer for 9500A/9500Ai */
- buf = smsc95xx_mdio_read(dev->net, dev->mii.phy_id,
- PHY_EDPD_CONFIG);
- buf |= PHY_EDPD_CONFIG_EXT_CROSSOVER_;
- smsc95xx_mdio_write(dev->net, dev->mii.phy_id,
- PHY_EDPD_CONFIG, buf);
- }
-
- if (mdix_ctrl == ETH_TP_MDI) {
- buf = smsc95xx_mdio_read(dev->net, dev->mii.phy_id,
- SPECIAL_CTRL_STS);
- buf |= SPECIAL_CTRL_STS_OVRRD_AMDIX_;
- buf &= ~(SPECIAL_CTRL_STS_AMDIX_ENABLE_ |
- SPECIAL_CTRL_STS_AMDIX_STATE_);
- smsc95xx_mdio_write(dev->net, dev->mii.phy_id,
- SPECIAL_CTRL_STS, buf);
- } else if (mdix_ctrl == ETH_TP_MDI_X) {
- buf = smsc95xx_mdio_read(dev->net, dev->mii.phy_id,
- SPECIAL_CTRL_STS);
- buf |= SPECIAL_CTRL_STS_OVRRD_AMDIX_;
- buf &= ~(SPECIAL_CTRL_STS_AMDIX_ENABLE_ |
- SPECIAL_CTRL_STS_AMDIX_STATE_);
- buf |= SPECIAL_CTRL_STS_AMDIX_STATE_;
- smsc95xx_mdio_write(dev->net, dev->mii.phy_id,
- SPECIAL_CTRL_STS, buf);
- } else if (mdix_ctrl == ETH_TP_MDI_AUTO) {
- buf = smsc95xx_mdio_read(dev->net, dev->mii.phy_id,
- SPECIAL_CTRL_STS);
- buf &= ~SPECIAL_CTRL_STS_OVRRD_AMDIX_;
- buf &= ~(SPECIAL_CTRL_STS_AMDIX_ENABLE_ |
- SPECIAL_CTRL_STS_AMDIX_STATE_);
- buf |= SPECIAL_CTRL_STS_AMDIX_ENABLE_;
- smsc95xx_mdio_write(dev->net, dev->mii.phy_id,
- SPECIAL_CTRL_STS, buf);
- }
- pdata->mdix_ctrl = mdix_ctrl;
-}
-
-static int smsc95xx_get_link_ksettings(struct net_device *net,
- struct ethtool_link_ksettings *cmd)
-{
- struct usbnet *dev = netdev_priv(net);
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
- int retval;
-
- retval = usbnet_get_link_ksettings(net, cmd);
-
- cmd->base.eth_tp_mdix = pdata->mdix_ctrl;
- cmd->base.eth_tp_mdix_ctrl = pdata->mdix_ctrl;
-
- return retval;
-}
-
-static int smsc95xx_set_link_ksettings(struct net_device *net,
- const struct ethtool_link_ksettings *cmd)
-{
- struct usbnet *dev = netdev_priv(net);
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
- int retval;
-
- if (pdata->mdix_ctrl != cmd->base.eth_tp_mdix_ctrl)
- set_mdix_status(net, cmd->base.eth_tp_mdix_ctrl);
-
- retval = usbnet_set_link_ksettings(net, cmd);
-
- return retval;
+ phy_read_status(net->phydev);
+ return net->phydev->link;
}
static const struct ethtool_ops smsc95xx_ethtool_ops = {
- .get_link = usbnet_get_link,
- .nway_reset = usbnet_nway_reset,
+ .get_link = smsc95xx_get_link,
+ .nway_reset = phy_ethtool_nway_reset,
.get_drvinfo = usbnet_get_drvinfo,
.get_msglevel = usbnet_get_msglevel,
.set_msglevel = usbnet_set_msglevel,
@@ -884,30 +740,29 @@ static const struct ethtool_ops smsc95xx_ethtool_ops = {
.get_regs = smsc95xx_ethtool_getregs,
.get_wol = smsc95xx_ethtool_get_wol,
.set_wol = smsc95xx_ethtool_set_wol,
- .get_link_ksettings = smsc95xx_get_link_ksettings,
- .set_link_ksettings = smsc95xx_set_link_ksettings,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_ts_info = ethtool_op_get_ts_info,
};
static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
{
- struct usbnet *dev = netdev_priv(netdev);
-
if (!netif_running(netdev))
return -EINVAL;
- return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
+ return phy_mii_ioctl(netdev->phydev, rq, cmd);
}
static void smsc95xx_init_mac_address(struct usbnet *dev)
{
- const u8 *mac_addr;
-
/* maybe the boot loader passed the MAC address in devicetree */
- mac_addr = of_get_mac_address(dev->udev->dev.of_node);
- if (!IS_ERR(mac_addr)) {
- ether_addr_copy(dev->net->dev_addr, mac_addr);
- return;
+ if (!eth_platform_get_mac_address(&dev->udev->dev,
+ dev->net->dev_addr)) {
+ if (is_valid_ether_addr(dev->net->dev_addr)) {
+ /* device tree values are valid so use them */
+ netif_dbg(dev, ifup, dev->net, "MAC address read from the device tree\n");
+ return;
+ }
}
/* try reading mac address from EEPROM */
@@ -942,7 +797,7 @@ static int smsc95xx_set_mac_address(struct usbnet *dev)
/* starts the TX path */
static int smsc95xx_start_tx_path(struct usbnet *dev)
{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
unsigned long flags;
int ret;
@@ -962,7 +817,7 @@ static int smsc95xx_start_tx_path(struct usbnet *dev)
/* Starts the Receive path */
static int smsc95xx_start_rx_path(struct usbnet *dev, int in_pm)
{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
unsigned long flags;
spin_lock_irqsave(&pdata->mac_cr_lock, flags);
@@ -972,54 +827,9 @@ static int smsc95xx_start_rx_path(struct usbnet *dev, int in_pm)
return __smsc95xx_write_reg(dev, MAC_CR, pdata->mac_cr, in_pm);
}
-static int smsc95xx_phy_initialize(struct usbnet *dev)
-{
- int bmcr, ret, timeout = 0;
-
- /* Initialize MII structure */
- dev->mii.dev = dev->net;
- dev->mii.mdio_read = smsc95xx_mdio_read;
- dev->mii.mdio_write = smsc95xx_mdio_write;
- dev->mii.phy_id_mask = 0x1f;
- dev->mii.reg_num_mask = 0x1f;
- dev->mii.phy_id = SMSC95XX_INTERNAL_PHY_ID;
-
- /* reset phy and wait for reset to complete */
- smsc95xx_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET);
-
- do {
- msleep(10);
- bmcr = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMCR);
- timeout++;
- } while ((bmcr & BMCR_RESET) && (timeout < 100));
-
- if (timeout >= 100) {
- netdev_warn(dev->net, "timeout on PHY Reset");
- return -EIO;
- }
-
- smsc95xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
- ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP |
- ADVERTISE_PAUSE_ASYM);
-
- /* read to clear */
- ret = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to read PHY_INT_SRC during init\n");
- return ret;
- }
-
- smsc95xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_MASK,
- PHY_INT_MASK_DEFAULT_);
- mii_nway_restart(&dev->mii);
-
- netif_dbg(dev, ifup, dev->net, "phy initialised successfully\n");
- return 0;
-}
-
static int smsc95xx_reset(struct usbnet *dev)
{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
u32 read_buf, write_buf, burst_cap;
int ret = 0, timeout;
@@ -1198,12 +1008,6 @@ static int smsc95xx_reset(struct usbnet *dev)
smsc95xx_set_multicast(dev->net);
- ret = smsc95xx_phy_initialize(dev);
- if (ret < 0) {
- netdev_warn(dev->net, "Failed to init PHY\n");
- return ret;
- }
-
ret = smsc95xx_read_reg(dev, INT_EP_CTL, &read_buf);
if (ret < 0)
return ret;
@@ -1247,7 +1051,8 @@ static const struct net_device_ops smsc95xx_netdev_ops = {
static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
{
- struct smsc95xx_priv *pdata = NULL;
+ struct smsc95xx_priv *pdata;
+ bool is_internal_phy;
u32 val;
int ret;
@@ -1259,13 +1064,12 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
return ret;
}
- dev->data[0] = (unsigned long)kzalloc(sizeof(struct smsc95xx_priv),
- GFP_KERNEL);
-
- pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
+ dev->driver_priv = pdata;
+
spin_lock_init(&pdata->mac_cr_lock);
/* LAN95xx devices do not alter the computed checksum of 0 to 0xffff.
@@ -1290,15 +1094,50 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
if (ret)
goto free_pdata;
+ pdata->mdiobus = mdiobus_alloc();
+ if (!pdata->mdiobus) {
+ ret = -ENOMEM;
+ goto free_pdata;
+ }
+
+ ret = smsc95xx_read_reg(dev, HW_CFG, &val);
+ if (ret < 0)
+ goto free_mdio;
+
+ is_internal_phy = !(val & HW_CFG_PSEL_);
+ if (is_internal_phy)
+ pdata->mdiobus->phy_mask = ~(1u << SMSC95XX_INTERNAL_PHY_ID);
+
+ pdata->mdiobus->priv = dev;
+ pdata->mdiobus->read = smsc95xx_mdiobus_read;
+ pdata->mdiobus->write = smsc95xx_mdiobus_write;
+ pdata->mdiobus->name = "smsc95xx-mdiobus";
+ pdata->mdiobus->parent = &dev->udev->dev;
+
+ snprintf(pdata->mdiobus->id, ARRAY_SIZE(pdata->mdiobus->id),
+ "usb-%03d:%03d", dev->udev->bus->busnum, dev->udev->devnum);
+
+ ret = mdiobus_register(pdata->mdiobus);
+ if (ret) {
+ netdev_err(dev->net, "Could not register MDIO bus\n");
+ goto free_mdio;
+ }
+
+ pdata->phydev = phy_find_first(pdata->mdiobus);
+ if (!pdata->phydev) {
+ netdev_err(dev->net, "no PHY found\n");
+ ret = -ENODEV;
+ goto unregister_mdio;
+ }
+
+ pdata->phydev->is_internal = is_internal_phy;
+
/* detect device revision as different features may be available */
ret = smsc95xx_read_reg(dev, ID_REV, &val);
if (ret < 0)
- goto free_pdata;
+ goto unregister_mdio;
val >>= 16;
- pdata->chip_id = val;
- pdata->mdix_ctrl = get_mdix_status(dev->net);
-
if ((val == ID_REV_CHIP_ID_9500A_) || (val == ID_REV_CHIP_ID_9530_) ||
(val == ID_REV_CHIP_ID_89530_) || (val == ID_REV_CHIP_ID_9730_))
pdata->features = (FEATURE_8_WAKEUP_FILTERS |
@@ -1314,12 +1153,13 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->min_mtu = ETH_MIN_MTU;
dev->net->max_mtu = ETH_DATA_LEN;
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+ return 0;
- pdata->dev = dev;
- INIT_DELAYED_WORK(&pdata->carrier_check, check_carrier);
- schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
+unregister_mdio:
+ mdiobus_unregister(pdata->mdiobus);
- return 0;
+free_mdio:
+ mdiobus_free(pdata->mdiobus);
free_pdata:
kfree(pdata);
@@ -1328,15 +1168,47 @@ free_pdata:
static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
-
- if (pdata) {
- cancel_delayed_work_sync(&pdata->carrier_check);
- netif_dbg(dev, ifdown, dev->net, "free pdata\n");
- kfree(pdata);
- pdata = NULL;
- dev->data[0] = 0;
+ struct smsc95xx_priv *pdata = dev->driver_priv;
+
+ mdiobus_unregister(pdata->mdiobus);
+ mdiobus_free(pdata->mdiobus);
+ netif_dbg(dev, ifdown, dev->net, "free pdata\n");
+ kfree(pdata);
+}
+
+static void smsc95xx_handle_link_change(struct net_device *net)
+{
+ phy_print_status(net->phydev);
+}
+
+static int smsc95xx_start_phy(struct usbnet *dev)
+{
+ struct smsc95xx_priv *pdata = dev->driver_priv;
+ struct net_device *net = dev->net;
+ int ret;
+
+ ret = smsc95xx_reset(dev);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_connect_direct(net, pdata->phydev,
+ &smsc95xx_handle_link_change,
+ PHY_INTERFACE_MODE_MII);
+ if (ret) {
+ netdev_err(net, "can't attach PHY to %s\n", pdata->mdiobus->id);
+ return ret;
}
+
+ phy_attached_info(net->phydev);
+ phy_start(net->phydev);
+ return 0;
+}
+
+static int smsc95xx_disconnect_phy(struct usbnet *dev)
+{
+ phy_stop(dev->net->phydev);
+ phy_disconnect(dev->net->phydev);
+ return 0;
}
static u32 smsc_crc(const u8 *buffer, size_t len, int filter)
@@ -1347,39 +1219,37 @@ static u32 smsc_crc(const u8 *buffer, size_t len, int filter)
static int smsc95xx_enable_phy_wakeup_interrupts(struct usbnet *dev, u16 mask)
{
- struct mii_if_info *mii = &dev->mii;
int ret;
netdev_dbg(dev->net, "enabling PHY wakeup interrupts\n");
/* read to clear */
- ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_INT_SRC);
+ ret = smsc95xx_mdio_read_nopm(dev, PHY_INT_SRC);
if (ret < 0)
return ret;
/* enable interrupt source */
- ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_INT_MASK);
+ ret = smsc95xx_mdio_read_nopm(dev, PHY_INT_MASK);
if (ret < 0)
return ret;
ret |= mask;
- smsc95xx_mdio_write_nopm(dev->net, mii->phy_id, PHY_INT_MASK, ret);
+ smsc95xx_mdio_write_nopm(dev, PHY_INT_MASK, ret);
return 0;
}
static int smsc95xx_link_ok_nopm(struct usbnet *dev)
{
- struct mii_if_info *mii = &dev->mii;
int ret;
/* first, a dummy read, needed to latch some MII phys */
- ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, MII_BMSR);
+ ret = smsc95xx_mdio_read_nopm(dev, MII_BMSR);
if (ret < 0)
return ret;
- ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, MII_BMSR);
+ ret = smsc95xx_mdio_read_nopm(dev, MII_BMSR);
if (ret < 0)
return ret;
@@ -1388,7 +1258,7 @@ static int smsc95xx_link_ok_nopm(struct usbnet *dev)
static int smsc95xx_enter_suspend0(struct usbnet *dev)
{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
u32 val;
int ret;
@@ -1427,8 +1297,7 @@ static int smsc95xx_enter_suspend0(struct usbnet *dev)
static int smsc95xx_enter_suspend1(struct usbnet *dev)
{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
- struct mii_if_info *mii = &dev->mii;
+ struct smsc95xx_priv *pdata = dev->driver_priv;
u32 val;
int ret;
@@ -1436,17 +1305,17 @@ static int smsc95xx_enter_suspend1(struct usbnet *dev)
* compatibility with non-standard link partners
*/
if (pdata->features & FEATURE_PHY_NLP_CROSSOVER)
- smsc95xx_mdio_write_nopm(dev->net, mii->phy_id, PHY_EDPD_CONFIG,
- PHY_EDPD_CONFIG_DEFAULT);
+ smsc95xx_mdio_write_nopm(dev, PHY_EDPD_CONFIG,
+ PHY_EDPD_CONFIG_DEFAULT);
/* enable energy detect power-down mode */
- ret = smsc95xx_mdio_read_nopm(dev->net, mii->phy_id, PHY_MODE_CTRL_STS);
+ ret = smsc95xx_mdio_read_nopm(dev, PHY_MODE_CTRL_STS);
if (ret < 0)
return ret;
ret |= MODE_CTRL_STS_EDPWRDOWN_;
- smsc95xx_mdio_write_nopm(dev->net, mii->phy_id, PHY_MODE_CTRL_STS, ret);
+ smsc95xx_mdio_write_nopm(dev, PHY_MODE_CTRL_STS, ret);
/* enter SUSPEND1 mode */
ret = smsc95xx_read_reg_nopm(dev, PM_CTRL, &val);
@@ -1475,7 +1344,7 @@ static int smsc95xx_enter_suspend1(struct usbnet *dev)
static int smsc95xx_enter_suspend2(struct usbnet *dev)
{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
u32 val;
int ret;
@@ -1497,7 +1366,7 @@ static int smsc95xx_enter_suspend2(struct usbnet *dev)
static int smsc95xx_enter_suspend3(struct usbnet *dev)
{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
u32 val;
int ret;
@@ -1536,7 +1405,7 @@ static int smsc95xx_enter_suspend3(struct usbnet *dev)
static int smsc95xx_autosuspend(struct usbnet *dev, u32 link_up)
{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
int ret;
if (!netif_running(dev->net)) {
@@ -1584,7 +1453,7 @@ static int smsc95xx_autosuspend(struct usbnet *dev, u32 link_up)
static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usbnet *dev = usb_get_intfdata(intf);
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
u32 val, link_up;
int ret;
@@ -1594,8 +1463,6 @@ static int smsc95xx_suspend(struct usb_interface *intf, pm_message_t message)
return ret;
}
- cancel_delayed_work_sync(&pdata->carrier_check);
-
if (pdata->suspend_flags) {
netdev_warn(dev->net, "error during last resume\n");
pdata->suspend_flags = 0;
@@ -1839,10 +1706,6 @@ done:
if (ret && PMSG_IS_AUTO(message))
usbnet_resume(intf);
- if (ret)
- schedule_delayed_work(&pdata->carrier_check,
- CARRIER_CHECK_DELAY);
-
return ret;
}
@@ -1855,14 +1718,13 @@ static int smsc95xx_resume(struct usb_interface *intf)
u32 val;
BUG_ON(!dev);
- pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ pdata = dev->driver_priv;
suspend_flags = pdata->suspend_flags;
netdev_dbg(dev->net, "resume suspend_flags=0x%02x\n", suspend_flags);
/* do this first to ensure it's cleared even in error case */
pdata->suspend_flags = 0;
- schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
if (suspend_flags & SUSPEND_ALLMODES) {
/* clear wake-up sources */
@@ -1893,6 +1755,7 @@ static int smsc95xx_resume(struct usb_interface *intf)
if (ret < 0)
netdev_warn(dev->net, "usbnet_resume error\n");
+ phy_init_hw(pdata->phydev);
return ret;
}
@@ -2075,7 +1938,7 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
static int smsc95xx_manage_power(struct usbnet *dev, int on)
{
- struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+ struct smsc95xx_priv *pdata = dev->driver_priv;
dev->intf->needs_remote_wakeup = on;
@@ -2098,7 +1961,8 @@ static const struct driver_info smsc95xx_info = {
.bind = smsc95xx_bind,
.unbind = smsc95xx_unbind,
.link_reset = smsc95xx_link_reset,
- .reset = smsc95xx_reset,
+ .reset = smsc95xx_start_phy,
+ .stop = smsc95xx_disconnect_phy,
.rx_fixup = smsc95xx_rx_fixup,
.tx_fixup = smsc95xx_tx_fixup,
.status = smsc95xx_status,
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 2b2a841cd938..6062dc27870e 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -34,9 +34,6 @@
#include <linux/kernel.h>
#include <linux/pm_runtime.h>
-#define DRIVER_VERSION "22-Aug-2005"
-
-
/*-------------------------------------------------------------------------*/
/*
@@ -597,7 +594,7 @@ static void rx_complete (struct urb *urb)
case -EPIPE:
dev->net->stats.rx_errors++;
usbnet_defer_kevent (dev, EVENT_RX_HALT);
- // FALLTHROUGH
+ fallthrough;
/* software-driven interface shutdown */
case -ECONNRESET: /* async unlink */
@@ -986,31 +983,9 @@ EXPORT_SYMBOL_GPL(usbnet_set_link_ksettings);
void usbnet_get_stats64(struct net_device *net, struct rtnl_link_stats64 *stats)
{
struct usbnet *dev = netdev_priv(net);
- unsigned int start;
- int cpu;
netdev_stats_to_stats64(stats, &net->stats);
-
- for_each_possible_cpu(cpu) {
- struct pcpu_sw_netstats *stats64;
- u64 rx_packets, rx_bytes;
- u64 tx_packets, tx_bytes;
-
- stats64 = per_cpu_ptr(dev->stats64, cpu);
-
- do {
- start = u64_stats_fetch_begin_irq(&stats64->syncp);
- rx_packets = stats64->rx_packets;
- rx_bytes = stats64->rx_bytes;
- tx_packets = stats64->tx_packets;
- tx_bytes = stats64->tx_bytes;
- } while (u64_stats_fetch_retry_irq(&stats64->syncp, start));
-
- stats->rx_packets += rx_packets;
- stats->rx_bytes += rx_bytes;
- stats->tx_packets += tx_packets;
- stats->tx_bytes += tx_bytes;
- }
+ dev_fetch_sw_netstats(stats, dev->stats64);
}
EXPORT_SYMBOL_GPL(usbnet_get_stats64);
@@ -1047,7 +1022,6 @@ void usbnet_get_drvinfo (struct net_device *net, struct ethtool_drvinfo *info)
struct usbnet *dev = netdev_priv(net);
strlcpy (info->driver, dev->driver_name, sizeof info->driver);
- strlcpy (info->version, DRIVER_VERSION, sizeof info->version);
strlcpy (info->fw_version, dev->driver_info->description,
sizeof info->fw_version);
usb_make_path (dev->udev, info->bus_info, sizeof info->bus_info);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index a475f48d43c4..8c737668008a 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -234,14 +234,14 @@ static bool veth_is_xdp_frame(void *ptr)
return (unsigned long)ptr & VETH_XDP_FLAG;
}
-static void *veth_ptr_to_xdp(void *ptr)
+static struct xdp_frame *veth_ptr_to_xdp(void *ptr)
{
return (void *)((unsigned long)ptr & ~VETH_XDP_FLAG);
}
-static void *veth_xdp_to_ptr(void *ptr)
+static void *veth_xdp_to_ptr(struct xdp_frame *xdp)
{
- return (void *)((unsigned long)ptr | VETH_XDP_FLAG);
+ return (void *)((unsigned long)xdp | VETH_XDP_FLAG);
}
static void veth_ptr_free(void *ptr)
@@ -420,6 +420,14 @@ static int veth_select_rxq(struct net_device *dev)
return smp_processor_id() % dev->real_num_rx_queues;
}
+static struct net_device *veth_peer_dev(struct net_device *dev)
+{
+ struct veth_priv *priv = netdev_priv(dev);
+
+ /* Callers must be under RCU read side. */
+ return rcu_dereference(priv->peer);
+}
+
static int veth_xdp_xmit(struct net_device *dev, int n,
struct xdp_frame **frames,
u32 flags, bool ndo_xmit)
@@ -897,14 +905,13 @@ static void veth_napi_del(struct net_device *dev)
struct veth_rq *rq = &priv->rq[i];
napi_disable(&rq->xdp_napi);
- napi_hash_del(&rq->xdp_napi);
+ __netif_napi_del(&rq->xdp_napi);
}
synchronize_net();
for (i = 0; i < dev->real_num_rx_queues; i++) {
struct veth_rq *rq = &priv->rq[i];
- netif_napi_del(&rq->xdp_napi);
rq->rx_notify_masked = false;
ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free);
}
@@ -1225,6 +1232,7 @@ static const struct net_device_ops veth_netdev_ops = {
.ndo_set_rx_headroom = veth_set_rx_headroom,
.ndo_bpf = veth_xdp,
.ndo_xdp_xmit = veth_ndo_xdp_xmit,
+ .ndo_get_peer_dev = veth_peer_dev,
};
#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 668685c09e65..d2d2c4a53cf2 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -68,6 +68,8 @@ static const unsigned long guest_offloads[] = {
(1ULL << VIRTIO_NET_F_GUEST_ECN) | \
(1ULL << VIRTIO_NET_F_GUEST_UFO))
+#define GUEST_OFFLOAD_CSUM_MASK (1ULL << VIRTIO_NET_F_GUEST_CSUM)
+
struct virtnet_stat_desc {
char desc[ETH_GSTRING_LEN];
size_t offset;
@@ -2522,29 +2524,48 @@ static int virtnet_get_phys_port_name(struct net_device *dev, char *buf,
return 0;
}
+static netdev_features_t virtnet_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ /* If Rx checksum is disabled, LRO should also be disabled. */
+ if (!(features & NETIF_F_RXCSUM))
+ features &= ~NETIF_F_LRO;
+
+ return features;
+}
+
static int virtnet_set_features(struct net_device *dev,
netdev_features_t features)
{
struct virtnet_info *vi = netdev_priv(dev);
- u64 offloads;
+ u64 offloads = vi->guest_offloads;
int err;
- if ((dev->features ^ features) & NETIF_F_LRO) {
- if (vi->xdp_queue_pairs)
- return -EBUSY;
+ /* Don't allow configuration while XDP is active. */
+ if (vi->xdp_queue_pairs)
+ return -EBUSY;
+ if ((dev->features ^ features) & NETIF_F_LRO) {
if (features & NETIF_F_LRO)
- offloads = vi->guest_offloads_capable;
+ offloads |= GUEST_OFFLOAD_LRO_MASK &
+ vi->guest_offloads_capable;
else
- offloads = vi->guest_offloads_capable &
- ~GUEST_OFFLOAD_LRO_MASK;
+ offloads &= ~GUEST_OFFLOAD_LRO_MASK;
+ }
- err = virtnet_set_guest_offloads(vi, offloads);
- if (err)
- return err;
- vi->guest_offloads = offloads;
+ if ((dev->features ^ features) & NETIF_F_RXCSUM) {
+ if (features & NETIF_F_RXCSUM)
+ offloads |= GUEST_OFFLOAD_CSUM_MASK &
+ vi->guest_offloads_capable;
+ else
+ offloads &= ~GUEST_OFFLOAD_CSUM_MASK;
}
+ err = virtnet_set_guest_offloads(vi, offloads);
+ if (err)
+ return err;
+
+ vi->guest_offloads = offloads;
return 0;
}
@@ -2563,6 +2584,7 @@ static const struct net_device_ops virtnet_netdev = {
.ndo_features_check = passthru_features_check,
.ndo_get_phys_port_name = virtnet_get_phys_port_name,
.ndo_set_features = virtnet_set_features,
+ .ndo_fix_features = virtnet_fix_features,
};
static void virtnet_config_changed_work(struct work_struct *work)
@@ -2610,12 +2632,11 @@ static void virtnet_free_queues(struct virtnet_info *vi)
int i;
for (i = 0; i < vi->max_queue_pairs; i++) {
- napi_hash_del(&vi->rq[i].napi);
- netif_napi_del(&vi->rq[i].napi);
- netif_napi_del(&vi->sq[i].napi);
+ __netif_napi_del(&vi->rq[i].napi);
+ __netif_napi_del(&vi->sq[i].napi);
}
- /* We called napi_hash_del() before netif_napi_del(),
+ /* We called __netif_napi_del(),
* we need to respect an RCU grace period before freeing vi->rq
*/
synchronize_net();
@@ -3014,8 +3035,10 @@ static int virtnet_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
dev->features |= NETIF_F_LRO;
- if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS))
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
+ dev->hw_features |= NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_LRO;
+ }
dev->vlan_features = dev->features;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index b9fefe27e3e8..1a557aeba32b 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -190,8 +190,9 @@ static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
}
-/* Find VXLAN socket based on network namespace, address family and UDP port
- * and enabled unshareable flags.
+/* Find VXLAN socket based on network namespace, address family, UDP port,
+ * enabled unshareable flags and socket device binding (see l3mdev with
+ * non-default VRF).
*/
static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family,
__be16 port, u32 flags, int ifindex)
@@ -1825,7 +1826,6 @@ static bool vxlan_ecn_decapsulate(struct vxlan_sock *vs, void *oiph,
/* Callback from net/ipv4/udp.c to receive packets */
static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
{
- struct pcpu_sw_netstats *stats;
struct vxlan_dev *vxlan;
struct vxlan_sock *vs;
struct vxlanhdr unparsed;
@@ -1875,6 +1875,10 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
!net_eq(vxlan->net, dev_net(vxlan->dev))))
goto drop;
+ if (vs->flags & VXLAN_F_REMCSUM_RX)
+ if (unlikely(!vxlan_remcsum(&unparsed, skb, vs->flags)))
+ goto drop;
+
if (vxlan_collect_metadata(vs)) {
struct metadata_dst *tun_dst;
@@ -1891,9 +1895,6 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
memset(md, 0, sizeof(*md));
}
- if (vs->flags & VXLAN_F_REMCSUM_RX)
- if (!vxlan_remcsum(&unparsed, skb, vs->flags))
- goto drop;
if (vs->flags & VXLAN_F_GBP)
vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md);
/* Note that GBP and GPE can never be active together. This is
@@ -1938,12 +1939,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
goto drop;
}
- stats = this_cpu_ptr(vxlan->dev->tstats);
- u64_stats_update_begin(&stats->syncp);
- stats->rx_packets++;
- stats->rx_bytes += skb->len;
- u64_stats_update_end(&stats->syncp);
-
+ dev_sw_netstats_rx_add(vxlan->dev, skb->len);
gro_cells_receive(&vxlan->gro_cells, skb);
rcu_read_unlock();
@@ -3890,7 +3886,7 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
}
err = rtnl_configure_link(dev, NULL);
- if (err)
+ if (err < 0)
goto unlink;
if (f) {
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 9edd94679283..dca97cd7c4e7 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -1295,3 +1295,4 @@ static struct platform_driver ucc_hdlc_driver = {
module_platform_driver(ucc_hdlc_driver);
MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(DRV_DESC);
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index d6cfd51613ed..409e5a7ad8e2 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -271,65 +271,62 @@ static inline struct net_device **get_dev_p(struct pvc_device *pvc,
}
-static int fr_hard_header(struct sk_buff **skb_p, u16 dlci)
+static int fr_hard_header(struct sk_buff *skb, u16 dlci)
{
- u16 head_len;
- struct sk_buff *skb = *skb_p;
-
- switch (skb->protocol) {
- case cpu_to_be16(NLPID_CCITT_ANSI_LMI):
- head_len = 4;
- skb_push(skb, head_len);
- skb->data[3] = NLPID_CCITT_ANSI_LMI;
- break;
-
- case cpu_to_be16(NLPID_CISCO_LMI):
- head_len = 4;
- skb_push(skb, head_len);
- skb->data[3] = NLPID_CISCO_LMI;
- break;
-
- case cpu_to_be16(ETH_P_IP):
- head_len = 4;
- skb_push(skb, head_len);
- skb->data[3] = NLPID_IP;
- break;
-
- case cpu_to_be16(ETH_P_IPV6):
- head_len = 4;
- skb_push(skb, head_len);
- skb->data[3] = NLPID_IPV6;
- break;
-
- case cpu_to_be16(ETH_P_802_3):
- head_len = 10;
- if (skb_headroom(skb) < head_len) {
- struct sk_buff *skb2 = skb_realloc_headroom(skb,
- head_len);
- if (!skb2)
- return -ENOBUFS;
- dev_kfree_skb(skb);
- skb = *skb_p = skb2;
+ if (!skb->dev) { /* Control packets */
+ switch (dlci) {
+ case LMI_CCITT_ANSI_DLCI:
+ skb_push(skb, 4);
+ skb->data[3] = NLPID_CCITT_ANSI_LMI;
+ break;
+
+ case LMI_CISCO_DLCI:
+ skb_push(skb, 4);
+ skb->data[3] = NLPID_CISCO_LMI;
+ break;
+
+ default:
+ return -EINVAL;
}
- skb_push(skb, head_len);
+
+ } else if (skb->dev->type == ARPHRD_DLCI) {
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ skb_push(skb, 4);
+ skb->data[3] = NLPID_IP;
+ break;
+
+ case htons(ETH_P_IPV6):
+ skb_push(skb, 4);
+ skb->data[3] = NLPID_IPV6;
+ break;
+
+ default:
+ skb_push(skb, 10);
+ skb->data[3] = FR_PAD;
+ skb->data[4] = NLPID_SNAP;
+ /* OUI 00-00-00 indicates an Ethertype follows */
+ skb->data[5] = 0x00;
+ skb->data[6] = 0x00;
+ skb->data[7] = 0x00;
+ /* This should be an Ethertype: */
+ *(__be16 *)(skb->data + 8) = skb->protocol;
+ }
+
+ } else if (skb->dev->type == ARPHRD_ETHER) {
+ skb_push(skb, 10);
skb->data[3] = FR_PAD;
skb->data[4] = NLPID_SNAP;
- skb->data[5] = FR_PAD;
+ /* OUI 00-80-C2 stands for the 802.1 organization */
+ skb->data[5] = 0x00;
skb->data[6] = 0x80;
skb->data[7] = 0xC2;
+ /* PID 00-07 stands for Ethernet frames without FCS */
skb->data[8] = 0x00;
- skb->data[9] = 0x07; /* bridged Ethernet frame w/out FCS */
- break;
+ skb->data[9] = 0x07;
- default:
- head_len = 10;
- skb_push(skb, head_len);
- skb->data[3] = FR_PAD;
- skb->data[4] = NLPID_SNAP;
- skb->data[5] = FR_PAD;
- skb->data[6] = FR_PAD;
- skb->data[7] = FR_PAD;
- *(__be16*)(skb->data + 8) = skb->protocol;
+ } else {
+ return -EINVAL;
}
dlci_to_q922(skb->data, dlci);
@@ -410,38 +407,49 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct pvc_device *pvc = dev->ml_priv;
- if (pvc->state.active) {
- if (dev->type == ARPHRD_ETHER) {
- int pad = ETH_ZLEN - skb->len;
- if (pad > 0) { /* Pad the frame with zeros */
- int len = skb->len;
- if (skb_tailroom(skb) < pad)
- if (pskb_expand_head(skb, 0, pad,
- GFP_ATOMIC)) {
- dev->stats.tx_dropped++;
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
- }
- skb_put(skb, pad);
- memset(skb->data + len, 0, pad);
- }
- skb->protocol = cpu_to_be16(ETH_P_802_3);
- }
- if (!fr_hard_header(&skb, pvc->dlci)) {
- dev->stats.tx_bytes += skb->len;
- dev->stats.tx_packets++;
- if (pvc->state.fecn) /* TX Congestion counter */
- dev->stats.tx_compressed++;
- skb->dev = pvc->frad;
- skb->protocol = htons(ETH_P_HDLC);
- skb_reset_network_header(skb);
- dev_queue_xmit(skb);
- return NETDEV_TX_OK;
+ if (!pvc->state.active)
+ goto drop;
+
+ if (dev->type == ARPHRD_ETHER) {
+ int pad = ETH_ZLEN - skb->len;
+
+ if (pad > 0) { /* Pad the frame with zeros */
+ if (__skb_pad(skb, pad, false))
+ goto drop;
+ skb_put(skb, pad);
}
}
+ /* We already requested the header space with dev->needed_headroom.
+ * So this is just a protection in case the upper layer didn't take
+ * dev->needed_headroom into consideration.
+ */
+ if (skb_headroom(skb) < 10) {
+ struct sk_buff *skb2 = skb_realloc_headroom(skb, 10);
+
+ if (!skb2)
+ goto drop;
+ dev_kfree_skb(skb);
+ skb = skb2;
+ }
+
+ skb->dev = dev;
+ if (fr_hard_header(skb, pvc->dlci))
+ goto drop;
+
+ dev->stats.tx_bytes += skb->len;
+ dev->stats.tx_packets++;
+ if (pvc->state.fecn) /* TX Congestion counter */
+ dev->stats.tx_compressed++;
+ skb->dev = pvc->frad;
+ skb->protocol = htons(ETH_P_HDLC);
+ skb_reset_network_header(skb);
+ dev_queue_xmit(skb);
+ return NETDEV_TX_OK;
+
+drop:
dev->stats.tx_dropped++;
- dev_kfree_skb(skb);
+ kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -494,11 +502,9 @@ static void fr_lmi_send(struct net_device *dev, int fullrep)
memset(skb->data, 0, len);
skb_reserve(skb, 4);
if (lmi == LMI_CISCO) {
- skb->protocol = cpu_to_be16(NLPID_CISCO_LMI);
- fr_hard_header(&skb, LMI_CISCO_DLCI);
+ fr_hard_header(skb, LMI_CISCO_DLCI);
} else {
- skb->protocol = cpu_to_be16(NLPID_CCITT_ANSI_LMI);
- fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI);
+ fr_hard_header(skb, LMI_CCITT_ANSI_DLCI);
}
data = skb_tail_pointer(skb);
data[i++] = LMI_CALLREF;
diff --git a/drivers/net/wan/lmc/lmc_debug.c b/drivers/net/wan/lmc/lmc_debug.c
index f999db788506..2b6051bda3fb 100644
--- a/drivers/net/wan/lmc/lmc_debug.c
+++ b/drivers/net/wan/lmc/lmc_debug.c
@@ -62,22 +62,4 @@ void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3)
}
#endif /* DEBUG */
-void lmc_trace(struct net_device *dev, char *msg){
-#ifdef LMC_TRACE
- unsigned long j = jiffies + 3; /* Wait for 50 ms */
-
- if(in_interrupt()){
- printk("%s: * %s\n", dev->name, msg);
-// while(time_before(jiffies, j+10))
-// ;
- }
- else {
- printk("%s: %s\n", dev->name, msg);
- while(time_before(jiffies, j))
- schedule();
- }
-#endif
-}
-
-
/* --------------------------- end if_lmc_linux.c ------------------------ */
diff --git a/drivers/net/wan/lmc/lmc_debug.h b/drivers/net/wan/lmc/lmc_debug.h
index 820adcae5d67..cfae9eddf003 100644
--- a/drivers/net/wan/lmc/lmc_debug.h
+++ b/drivers/net/wan/lmc/lmc_debug.h
@@ -48,6 +48,5 @@ extern u32 lmcEventLogBuf[LMC_EVENTLOGSIZE * LMC_EVENTLOGARGS];
void lmcConsoleLog(char *type, unsigned char *ucData, int iLen);
void lmcEventLog(u32 EventNum, u32 arg2, u32 arg3);
-void lmc_trace(struct net_device *dev, char *msg);
#endif
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 842def21e089..36600b0a0ab0 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -113,8 +113,6 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
u16 regVal;
unsigned long flags;
- lmc_trace(dev, "lmc_ioctl in");
-
/*
* Most functions mess with the structure
* Disable interrupts while we do the polling
@@ -619,8 +617,6 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
break;
}
- lmc_trace(dev, "lmc_ioctl out");
-
return ret;
}
@@ -634,8 +630,6 @@ static void lmc_watchdog(struct timer_list *t) /*fold00*/
u32 ticks;
unsigned long flags;
- lmc_trace(dev, "lmc_watchdog in");
-
spin_lock_irqsave(&sc->lmc_lock, flags);
if(sc->check != 0xBEAFCAFE){
@@ -782,9 +776,6 @@ kick_timer:
add_timer (&sc->timer);
spin_unlock_irqrestore(&sc->lmc_lock, flags);
-
- lmc_trace(dev, "lmc_watchdog out");
-
}
static int lmc_attach(struct net_device *dev, unsigned short encoding,
@@ -813,8 +804,6 @@ static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
int err;
static int cards_found;
- /* lmc_trace(dev, "lmc_init_one in"); */
-
err = pcim_enable_device(pdev);
if (err) {
printk(KERN_ERR "lmc: pci enable failed: %d\n", err);
@@ -955,7 +944,6 @@ static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
sc->lmc_ok = 0;
sc->last_link_status = 0;
- lmc_trace(dev, "lmc_init_one out");
return 0;
}
@@ -981,8 +969,6 @@ static int lmc_open(struct net_device *dev)
lmc_softc_t *sc = dev_to_sc(dev);
int err;
- lmc_trace(dev, "lmc_open in");
-
lmc_led_on(sc, LMC_DS3_LED0);
lmc_dec_reset(sc);
@@ -992,17 +978,14 @@ static int lmc_open(struct net_device *dev)
LMC_EVENT_LOG(LMC_EVENT_RESET2, lmc_mii_readreg(sc, 0, 16),
lmc_mii_readreg(sc, 0, 17));
- if (sc->lmc_ok){
- lmc_trace(dev, "lmc_open lmc_ok out");
+ if (sc->lmc_ok)
return 0;
- }
lmc_softreset (sc);
/* Since we have to use PCI bus, this should work on x86,alpha,ppc */
if (request_irq (dev->irq, lmc_interrupt, IRQF_SHARED, dev->name, dev)){
printk(KERN_WARNING "%s: could not get irq: %d\n", dev->name, dev->irq);
- lmc_trace(dev, "lmc_open irq failed out");
return -EAGAIN;
}
sc->got_irq = 1;
@@ -1078,8 +1061,6 @@ static int lmc_open(struct net_device *dev)
sc->timer.expires = jiffies + HZ;
add_timer (&sc->timer);
- lmc_trace(dev, "lmc_open out");
-
return 0;
}
@@ -1091,8 +1072,6 @@ static void lmc_running_reset (struct net_device *dev) /*fold00*/
{
lmc_softc_t *sc = dev_to_sc(dev);
- lmc_trace(dev, "lmc_running_reset in");
-
/* stop interrupts */
/* Clear the interrupt mask */
LMC_CSR_WRITE (sc, csr_intr, 0x00000000);
@@ -1114,8 +1093,6 @@ static void lmc_running_reset (struct net_device *dev) /*fold00*/
sc->lmc_cmdmode |= (TULIP_CMD_TXRUN | TULIP_CMD_RXRUN);
LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
-
- lmc_trace(dev, "lmc_running_reset_out");
}
@@ -1128,16 +1105,12 @@ static int lmc_close(struct net_device *dev)
/* not calling release_region() as we should */
lmc_softc_t *sc = dev_to_sc(dev);
- lmc_trace(dev, "lmc_close in");
-
sc->lmc_ok = 0;
sc->lmc_media->set_link_status (sc, 0);
del_timer (&sc->timer);
lmc_proto_close(sc);
lmc_ifdown (dev);
- lmc_trace(dev, "lmc_close out");
-
return 0;
}
@@ -1149,8 +1122,6 @@ static int lmc_ifdown (struct net_device *dev) /*fold00*/
u32 csr6;
int i;
- lmc_trace(dev, "lmc_ifdown in");
-
/* Don't let anything else go on right now */
// dev->start = 0;
netif_stop_queue(dev);
@@ -1200,8 +1171,6 @@ static int lmc_ifdown (struct net_device *dev) /*fold00*/
netif_wake_queue(dev);
sc->extra_stats.tx_tbusy0++;
- lmc_trace(dev, "lmc_ifdown out");
-
return 0;
}
@@ -1220,8 +1189,6 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
int max_work = LMC_RXDESCS;
int handled = 0;
- lmc_trace(dev, "lmc_interrupt in");
-
spin_lock(&sc->lmc_lock);
/*
@@ -1264,12 +1231,10 @@ static irqreturn_t lmc_interrupt (int irq, void *dev_instance) /*fold00*/
lmc_running_reset (dev);
break;
}
-
- if (csr & TULIP_STS_RXINTR){
- lmc_trace(dev, "rx interrupt");
+
+ if (csr & TULIP_STS_RXINTR)
lmc_rx (dev);
-
- }
+
if (csr & (TULIP_STS_TXINTR | TULIP_STS_TXNOBUF | TULIP_STS_TXSTOPPED)) {
int n_compl = 0 ;
@@ -1389,7 +1354,6 @@ lmc_int_fail_out:
spin_unlock(&sc->lmc_lock);
- lmc_trace(dev, "lmc_interrupt out");
return IRQ_RETVAL(handled);
}
@@ -1401,8 +1365,6 @@ static netdev_tx_t lmc_start_xmit(struct sk_buff *skb,
int entry;
unsigned long flags;
- lmc_trace(dev, "lmc_start_xmit in");
-
spin_lock_irqsave(&sc->lmc_lock, flags);
/* normal path, tbusy known to be zero */
@@ -1477,7 +1439,6 @@ static netdev_tx_t lmc_start_xmit(struct sk_buff *skb,
spin_unlock_irqrestore(&sc->lmc_lock, flags);
- lmc_trace(dev, "lmc_start_xmit_out");
return NETDEV_TX_OK;
}
@@ -1493,8 +1454,6 @@ static int lmc_rx(struct net_device *dev)
struct sk_buff *skb, *nsb;
u16 len;
- lmc_trace(dev, "lmc_rx in");
-
lmc_led_on(sc, LMC_DS3_LED3);
rxIntLoopCnt = 0; /* debug -baz */
@@ -1673,9 +1632,6 @@ static int lmc_rx(struct net_device *dev)
lmc_led_off(sc, LMC_DS3_LED3);
skip_out_of_mem:
-
- lmc_trace(dev, "lmc_rx out");
-
return 0;
}
@@ -1684,16 +1640,12 @@ static struct net_device_stats *lmc_get_stats(struct net_device *dev)
lmc_softc_t *sc = dev_to_sc(dev);
unsigned long flags;
- lmc_trace(dev, "lmc_get_stats in");
-
spin_lock_irqsave(&sc->lmc_lock, flags);
sc->lmc_device->stats.rx_missed_errors += LMC_CSR_READ(sc, csr_missed_frames) & 0xffff;
spin_unlock_irqrestore(&sc->lmc_lock, flags);
- lmc_trace(dev, "lmc_get_stats out");
-
return &sc->lmc_device->stats;
}
@@ -1712,12 +1664,8 @@ unsigned lmc_mii_readreg (lmc_softc_t * const sc, unsigned devaddr, unsigned reg
int command = (0xf6 << 10) | (devaddr << 5) | regno;
int retval = 0;
- lmc_trace(sc->lmc_device, "lmc_mii_readreg in");
-
LMC_MII_SYNC (sc);
- lmc_trace(sc->lmc_device, "lmc_mii_readreg: done sync");
-
for (i = 15; i >= 0; i--)
{
int dataval = (command & (1 << i)) ? 0x20000 : 0;
@@ -1730,8 +1678,6 @@ unsigned lmc_mii_readreg (lmc_softc_t * const sc, unsigned devaddr, unsigned reg
/* __SLOW_DOWN_IO; */
}
- lmc_trace(sc->lmc_device, "lmc_mii_readreg: done1");
-
for (i = 19; i > 0; i--)
{
LMC_CSR_WRITE (sc, csr_9, 0x40000);
@@ -1743,8 +1689,6 @@ unsigned lmc_mii_readreg (lmc_softc_t * const sc, unsigned devaddr, unsigned reg
/* __SLOW_DOWN_IO; */
}
- lmc_trace(sc->lmc_device, "lmc_mii_readreg out");
-
return (retval >> 1) & 0xffff;
}
@@ -1753,8 +1697,6 @@ void lmc_mii_writereg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno,
int i = 32;
int command = (0x5002 << 16) | (devaddr << 23) | (regno << 18) | data;
- lmc_trace(sc->lmc_device, "lmc_mii_writereg in");
-
LMC_MII_SYNC (sc);
i = 31;
@@ -1787,16 +1729,12 @@ void lmc_mii_writereg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno,
/* __SLOW_DOWN_IO; */
i--;
}
-
- lmc_trace(sc->lmc_device, "lmc_mii_writereg out");
}
static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/
{
int i;
- lmc_trace(sc->lmc_device, "lmc_softreset in");
-
/* Initialize the receive rings and buffers. */
sc->lmc_txfull = 0;
sc->lmc_next_rx = 0;
@@ -1871,55 +1809,40 @@ static void lmc_softreset (lmc_softc_t * const sc) /*fold00*/
}
sc->lmc_txring[i - 1].buffer2 = virt_to_bus (&sc->lmc_txring[0]);
LMC_CSR_WRITE (sc, csr_txlist, virt_to_bus (sc->lmc_txring));
-
- lmc_trace(sc->lmc_device, "lmc_softreset out");
}
void lmc_gpio_mkinput(lmc_softc_t * const sc, u32 bits) /*fold00*/
{
- lmc_trace(sc->lmc_device, "lmc_gpio_mkinput in");
sc->lmc_gpio_io &= ~bits;
LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io));
- lmc_trace(sc->lmc_device, "lmc_gpio_mkinput out");
}
void lmc_gpio_mkoutput(lmc_softc_t * const sc, u32 bits) /*fold00*/
{
- lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput in");
sc->lmc_gpio_io |= bits;
LMC_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET | (sc->lmc_gpio_io));
- lmc_trace(sc->lmc_device, "lmc_gpio_mkoutput out");
}
void lmc_led_on(lmc_softc_t * const sc, u32 led) /*fold00*/
{
- lmc_trace(sc->lmc_device, "lmc_led_on in");
- if((~sc->lmc_miireg16) & led){ /* Already on! */
- lmc_trace(sc->lmc_device, "lmc_led_on aon out");
+ if ((~sc->lmc_miireg16) & led) /* Already on! */
return;
- }
-
+
sc->lmc_miireg16 &= ~led;
lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
- lmc_trace(sc->lmc_device, "lmc_led_on out");
}
void lmc_led_off(lmc_softc_t * const sc, u32 led) /*fold00*/
{
- lmc_trace(sc->lmc_device, "lmc_led_off in");
- if(sc->lmc_miireg16 & led){ /* Already set don't do anything */
- lmc_trace(sc->lmc_device, "lmc_led_off aoff out");
+ if (sc->lmc_miireg16 & led) /* Already set don't do anything */
return;
- }
-
+
sc->lmc_miireg16 |= led;
lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
- lmc_trace(sc->lmc_device, "lmc_led_off out");
}
static void lmc_reset(lmc_softc_t * const sc) /*fold00*/
{
- lmc_trace(sc->lmc_device, "lmc_reset in");
sc->lmc_miireg16 |= LMC_MII16_FIFO_RESET;
lmc_mii_writereg(sc, 0, 16, sc->lmc_miireg16);
@@ -1955,13 +1878,11 @@ static void lmc_reset(lmc_softc_t * const sc) /*fold00*/
sc->lmc_media->init(sc);
sc->extra_stats.resetCount++;
- lmc_trace(sc->lmc_device, "lmc_reset out");
}
static void lmc_dec_reset(lmc_softc_t * const sc) /*fold00*/
{
u32 val;
- lmc_trace(sc->lmc_device, "lmc_dec_reset in");
/*
* disable all interrupts
@@ -2017,14 +1938,11 @@ static void lmc_dec_reset(lmc_softc_t * const sc) /*fold00*/
val = LMC_CSR_READ(sc, csr_sia_general);
val |= (TULIP_WATCHDOG_TXDISABLE | TULIP_WATCHDOG_RXDISABLE);
LMC_CSR_WRITE(sc, csr_sia_general, val);
-
- lmc_trace(sc->lmc_device, "lmc_dec_reset out");
}
static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, /*fold00*/
size_t csr_size)
{
- lmc_trace(sc->lmc_device, "lmc_initcsrs in");
sc->lmc_csrs.csr_busmode = csr_base + 0 * csr_size;
sc->lmc_csrs.csr_txpoll = csr_base + 1 * csr_size;
sc->lmc_csrs.csr_rxpoll = csr_base + 2 * csr_size;
@@ -2041,7 +1959,6 @@ static void lmc_initcsrs(lmc_softc_t * const sc, lmc_csrptr_t csr_base, /*fold00
sc->lmc_csrs.csr_13 = csr_base + 13 * csr_size;
sc->lmc_csrs.csr_14 = csr_base + 14 * csr_size;
sc->lmc_csrs.csr_15 = csr_base + 15 * csr_size;
- lmc_trace(sc->lmc_device, "lmc_initcsrs out");
}
static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue)
@@ -2050,8 +1967,6 @@ static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue)
u32 csr6;
unsigned long flags;
- lmc_trace(dev, "lmc_driver_timeout in");
-
spin_lock_irqsave(&sc->lmc_lock, flags);
printk("%s: Xmitter busy|\n", dev->name);
@@ -2094,8 +2009,4 @@ static void lmc_driver_timeout(struct net_device *dev, unsigned int txqueue)
bug_out:
spin_unlock_irqrestore(&sc->lmc_lock, flags);
-
- lmc_trace(dev, "lmc_driver_timeout out");
-
-
}
diff --git a/drivers/net/wan/lmc/lmc_media.c b/drivers/net/wan/lmc/lmc_media.c
index 23ca4a62d6f5..ec1ac7b1f3fd 100644
--- a/drivers/net/wan/lmc/lmc_media.c
+++ b/drivers/net/wan/lmc/lmc_media.c
@@ -1026,7 +1026,6 @@ lmc_t1_get_link_status (lmc_softc_t * const sc)
* led3 red = Loss of Signal (LOS) or out of frame (OOF)
* conditions detected on T3 receive signal
*/
- lmc_trace(sc->lmc_device, "lmc_t1_get_link_status in");
lmc_led_on(sc, LMC_DS3_LED2);
lmc_mii_writereg (sc, 0, 17, T1FRAMER_ALARM1_STATUS);
@@ -1120,9 +1119,6 @@ lmc_t1_get_link_status (lmc_softc_t * const sc)
lmc_mii_writereg (sc, 0, 17, T1FRAMER_ALARM2_STATUS);
sc->lmc_xinfo.t1_alarm2_status = lmc_mii_readreg (sc, 0, 18);
-
- lmc_trace(sc->lmc_device, "lmc_t1_get_link_status out");
-
return ret;
}
diff --git a/drivers/net/wan/lmc/lmc_proto.c b/drivers/net/wan/lmc/lmc_proto.c
index a58301dd0c1f..e8b0b902b424 100644
--- a/drivers/net/wan/lmc/lmc_proto.c
+++ b/drivers/net/wan/lmc/lmc_proto.c
@@ -47,7 +47,6 @@
// attach
void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/
{
- lmc_trace(sc->lmc_device, "lmc_proto_attach in");
if (sc->if_type == LMC_NET) {
struct net_device *dev = sc->lmc_device;
/*
@@ -57,12 +56,10 @@ void lmc_proto_attach(lmc_softc_t *sc) /*FOLD00*/
dev->hard_header_len = 0;
dev->addr_len = 0;
}
- lmc_trace(sc->lmc_device, "lmc_proto_attach out");
}
int lmc_proto_ioctl(lmc_softc_t *sc, struct ifreq *ifr, int cmd)
{
- lmc_trace(sc->lmc_device, "lmc_proto_ioctl");
if (sc->if_type == LMC_PPP)
return hdlc_ioctl(sc->lmc_device, ifr, cmd);
return -EOPNOTSUPP;
@@ -72,32 +69,23 @@ int lmc_proto_open(lmc_softc_t *sc)
{
int ret = 0;
- lmc_trace(sc->lmc_device, "lmc_proto_open in");
-
if (sc->if_type == LMC_PPP) {
ret = hdlc_open(sc->lmc_device);
if (ret < 0)
printk(KERN_WARNING "%s: HDLC open failed: %d\n",
sc->name, ret);
}
-
- lmc_trace(sc->lmc_device, "lmc_proto_open out");
return ret;
}
void lmc_proto_close(lmc_softc_t *sc)
{
- lmc_trace(sc->lmc_device, "lmc_proto_close in");
-
if (sc->if_type == LMC_PPP)
hdlc_close(sc->lmc_device);
-
- lmc_trace(sc->lmc_device, "lmc_proto_close out");
}
__be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
{
- lmc_trace(sc->lmc_device, "lmc_proto_type in");
switch(sc->if_type){
case LMC_PPP:
return hdlc_type_trans(skb, sc->lmc_device);
@@ -113,13 +101,10 @@ __be16 lmc_proto_type(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
return htons(ETH_P_802_2);
break;
}
- lmc_trace(sc->lmc_device, "lmc_proto_tye out");
-
}
void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
{
- lmc_trace(sc->lmc_device, "lmc_proto_netif in");
switch(sc->if_type){
case LMC_PPP:
case LMC_NET:
@@ -129,5 +114,4 @@ void lmc_proto_netif(lmc_softc_t *sc, struct sk_buff *skb) /*FOLD00*/
case LMC_RAW:
break;
}
- lmc_trace(sc->lmc_device, "lmc_proto_netif out");
}
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 40c04ea1200a..2fde439543fb 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -260,11 +260,12 @@ static int __init sbni_init(struct net_device *dev)
return sbni_isa_probe( dev );
/* otherwise we have to perform search our adapter */
- if( io[ num ] != -1 )
- dev->base_addr = io[ num ],
+ if( io[ num ] != -1 ) {
+ dev->base_addr = io[ num ];
dev->irq = irq[ num ];
- else if( scandone || io[ 0 ] != -1 )
+ } else if( scandone || io[ 0 ] != -1 ) {
return -ENODEV;
+ }
/* if io[ num ] contains non-zero address, then that is on ISA bus */
if( dev->base_addr )
@@ -399,12 +400,13 @@ sbni_probe1( struct net_device *dev, unsigned long ioaddr, int irq )
nl->maxframe = DEFAULT_FRAME_LEN;
nl->csr1.rate = baud[ num ];
- if( (nl->cur_rxl_index = rxl[ num ]) == -1 )
+ if( (nl->cur_rxl_index = rxl[ num ]) == -1 ) {
/* autotune rxl */
- nl->cur_rxl_index = DEF_RXL,
+ nl->cur_rxl_index = DEF_RXL;
nl->delta_rxl = DEF_RXL_DELTA;
- else
+ } else {
nl->delta_rxl = 0;
+ }
nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ];
if( inb( ioaddr + CSR0 ) & 0x01 )
nl->state |= FL_SLOW_MODE;
@@ -512,13 +514,15 @@ sbni_interrupt( int irq, void *dev_id )
do {
repeat = 0;
- if( inb( dev->base_addr + CSR0 ) & (RC_RDY | TR_RDY) )
- handle_channel( dev ),
+ if( inb( dev->base_addr + CSR0 ) & (RC_RDY | TR_RDY) ) {
+ handle_channel( dev );
repeat = 1;
+ }
if( nl->second && /* second channel present */
- (inb( nl->second->base_addr+CSR0 ) & (RC_RDY | TR_RDY)) )
- handle_channel( nl->second ),
+ (inb( nl->second->base_addr+CSR0 ) & (RC_RDY | TR_RDY)) ) {
+ handle_channel( nl->second );
repeat = 1;
+ }
} while( repeat );
if( nl->second )
@@ -610,11 +614,12 @@ recv_frame( struct net_device *dev )
nl->state |= FL_PREV_OK;
if( framelen > 4 )
nl->in_stats.all_rx_number++;
- } else
- nl->state &= ~FL_PREV_OK,
- change_level( dev ),
- nl->in_stats.all_rx_number++,
+ } else {
+ nl->state &= ~FL_PREV_OK;
+ change_level( dev );
+ nl->in_stats.all_rx_number++;
nl->in_stats.bad_rx_number++;
+ }
return !frame_ok || framelen > 4;
}
@@ -689,9 +694,10 @@ download_data( struct net_device *dev, u32 *crc_p )
*crc_p = calc_crc32( *crc_p, skb->data + nl->outpos, len );
/* if packet too short we should write some more bytes to pad */
- for( len = nl->framelen - len; len--; )
- outb( 0, dev->base_addr + DAT ),
+ for( len = nl->framelen - len; len--; ) {
+ outb( 0, dev->base_addr + DAT );
*crc_p = CRC32( 0, *crc_p );
+ }
}
@@ -703,9 +709,10 @@ upload_data( struct net_device *dev, unsigned framelen, unsigned frameno,
int frame_ok;
- if( is_first )
- nl->wait_frameno = frameno,
+ if( is_first ) {
+ nl->wait_frameno = frameno;
nl->inppos = 0;
+ }
if( nl->wait_frameno == frameno ) {
@@ -717,33 +724,35 @@ upload_data( struct net_device *dev, unsigned framelen, unsigned frameno,
* error was occurred... drop entire packet
*/
else if( (frame_ok = skip_tail( dev->base_addr, framelen, crc ))
- != 0 )
- nl->wait_frameno = 0,
- nl->inppos = 0,
+ != 0 ) {
+ nl->wait_frameno = 0;
+ nl->inppos = 0;
#ifdef CONFIG_SBNI_MULTILINE
- nl->master->stats.rx_errors++,
+ nl->master->stats.rx_errors++;
nl->master->stats.rx_missed_errors++;
#else
- dev->stats.rx_errors++,
+ dev->stats.rx_errors++;
dev->stats.rx_missed_errors++;
#endif
+ }
/* now skip all frames until is_first != 0 */
} else
frame_ok = skip_tail( dev->base_addr, framelen, crc );
- if( is_first && !frame_ok )
+ if( is_first && !frame_ok ) {
/*
* Frame has been broken, but we had already stored
* is_first... Drop entire packet.
*/
- nl->wait_frameno = 0,
+ nl->wait_frameno = 0;
#ifdef CONFIG_SBNI_MULTILINE
- nl->master->stats.rx_errors++,
+ nl->master->stats.rx_errors++;
nl->master->stats.rx_crc_errors++;
#else
- dev->stats.rx_errors++,
+ dev->stats.rx_errors++;
dev->stats.rx_crc_errors++;
#endif
+ }
return frame_ok;
}
@@ -782,17 +791,18 @@ interpret_ack( struct net_device *dev, unsigned ack )
if( nl->state & FL_WAIT_ACK ) {
nl->outpos += nl->framelen;
- if( --nl->tx_frameno )
+ if( --nl->tx_frameno ) {
nl->framelen = min_t(unsigned int,
nl->maxframe,
nl->tx_buf_p->len - nl->outpos);
- else
- send_complete( dev ),
+ } else {
+ send_complete( dev );
#ifdef CONFIG_SBNI_MULTILINE
netif_wake_queue( nl->master );
#else
netif_wake_queue( dev );
#endif
+ }
}
}
@@ -872,16 +882,17 @@ drop_xmit_queue( struct net_device *dev )
{
struct net_local *nl = netdev_priv(dev);
- if( nl->tx_buf_p )
- dev_kfree_skb_any( nl->tx_buf_p ),
- nl->tx_buf_p = NULL,
+ if( nl->tx_buf_p ) {
+ dev_kfree_skb_any( nl->tx_buf_p );
+ nl->tx_buf_p = NULL;
#ifdef CONFIG_SBNI_MULTILINE
- nl->master->stats.tx_errors++,
+ nl->master->stats.tx_errors++;
nl->master->stats.tx_carrier_errors++;
#else
- dev->stats.tx_errors++,
+ dev->stats.tx_errors++;
dev->stats.tx_carrier_errors++;
#endif
+ }
nl->tx_frameno = 0;
nl->framelen = 0;
@@ -1327,12 +1338,13 @@ sbni_ioctl( struct net_device *dev, struct ifreq *ifr, int cmd )
spin_lock( &nl->lock );
flags = *(struct sbni_flags*) &ifr->ifr_ifru;
- if( flags.fixed_rxl )
- nl->delta_rxl = 0,
+ if( flags.fixed_rxl ) {
+ nl->delta_rxl = 0;
nl->cur_rxl_index = flags.rxl;
- else
- nl->delta_rxl = DEF_RXL_DELTA,
+ } else {
+ nl->delta_rxl = DEF_RXL_DELTA;
nl->cur_rxl_index = DEF_RXL;
+ }
nl->csr1.rxl = rxl_tab[ nl->cur_rxl_index ];
nl->csr1.rate = flags.rate;
@@ -1526,13 +1538,16 @@ sbni_setup( char *p )
(*dest[ parm ])[ n ] = simple_strtol( p, &p, 0 );
if( !*p || *p == ')' )
return 1;
- if( *p == ';' )
- ++p, ++n, parm = 0;
- else if( *p++ != ',' )
+ if( *p == ';' ) {
+ ++p;
+ ++n;
+ parm = 0;
+ } else if( *p++ != ',' ) {
break;
- else
+ } else {
if( ++parm >= 5 )
break;
+ }
}
bad_param:
pr_err("Error in sbni kernel parameter!\n");
diff --git a/drivers/net/wan/slic_ds26522.c b/drivers/net/wan/slic_ds26522.c
index 29053bec694e..8e3b1c717c10 100644
--- a/drivers/net/wan/slic_ds26522.c
+++ b/drivers/net/wan/slic_ds26522.c
@@ -22,8 +22,6 @@
#include <linux/io.h>
#include "slic_ds26522.h"
-#define DRV_NAME "ds26522"
-
#define SLIC_TRANS_LEN 1
#define SLIC_TWO_LEN 2
#define SLIC_THREE_LEN 3
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index c418767a890a..54b1a5aee82d 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -202,8 +202,7 @@ static void x25_asy_bump(struct x25_asy *sl)
return;
}
skb_put_data(skb, sl->rbuff, count);
- skb->protocol = x25_type_trans(skb, sl->dev);
- err = lapb_data_received(skb->dev, skb);
+ err = lapb_data_received(sl->dev, skb);
if (err != LAPB_OK) {
kfree_skb(skb);
printk(KERN_DEBUG "x25_asy: data received err - %d\n", err);
@@ -243,8 +242,6 @@ static void x25_asy_encaps(struct x25_asy *sl, unsigned char *icp, int len)
actual = sl->tty->ops->write(sl->tty, sl->xbuff, count);
sl->xleft = count - actual;
sl->xhead = sl->xbuff + actual;
- /* VSV */
- clear_bit(SLF_OUTWAIT, &sl->flags); /* reset outfill flag */
}
/*
diff --git a/drivers/net/wan/x25_asy.h b/drivers/net/wan/x25_asy.h
index eb4a4216ee94..87798287c9ca 100644
--- a/drivers/net/wan/x25_asy.h
+++ b/drivers/net/wan/x25_asy.h
@@ -35,7 +35,6 @@ struct x25_asy {
#define SLF_INUSE 0 /* Channel in use */
#define SLF_ESCAPE 1 /* ESC received */
#define SLF_ERROR 2 /* Parity, etc. error */
-#define SLF_OUTWAIT 4 /* Waiting for output */
};
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index 9afed3b133d3..8df98757d901 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -656,8 +656,6 @@ void i2400m_msg_to_dev_cancel_wait(struct i2400m *i2400m, int code)
*
* @i2400m: device descriptor
*
- * @msg_skb: an skb *
- *
* @buf: pointer to the buffer containing the message to be sent; it
* has to start with a &struct i2400M_l3l4_hdr and then
* followed by the payload. Once this function returns, the
diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c
index 20a4f3c0a0a1..d0f3b6d7f408 100644
--- a/drivers/net/wireguard/netlink.c
+++ b/drivers/net/wireguard/netlink.c
@@ -22,8 +22,8 @@ static struct genl_family genl_family;
static const struct nla_policy device_policy[WGDEVICE_A_MAX + 1] = {
[WGDEVICE_A_IFINDEX] = { .type = NLA_U32 },
[WGDEVICE_A_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
- [WGDEVICE_A_PRIVATE_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_PUBLIC_KEY_LEN },
- [WGDEVICE_A_PUBLIC_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_PUBLIC_KEY_LEN },
+ [WGDEVICE_A_PRIVATE_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN),
+ [WGDEVICE_A_PUBLIC_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN),
[WGDEVICE_A_FLAGS] = { .type = NLA_U32 },
[WGDEVICE_A_LISTEN_PORT] = { .type = NLA_U16 },
[WGDEVICE_A_FWMARK] = { .type = NLA_U32 },
@@ -31,12 +31,12 @@ static const struct nla_policy device_policy[WGDEVICE_A_MAX + 1] = {
};
static const struct nla_policy peer_policy[WGPEER_A_MAX + 1] = {
- [WGPEER_A_PUBLIC_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_PUBLIC_KEY_LEN },
- [WGPEER_A_PRESHARED_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_SYMMETRIC_KEY_LEN },
+ [WGPEER_A_PUBLIC_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN),
+ [WGPEER_A_PRESHARED_KEY] = NLA_POLICY_EXACT_LEN(NOISE_SYMMETRIC_KEY_LEN),
[WGPEER_A_FLAGS] = { .type = NLA_U32 },
- [WGPEER_A_ENDPOINT] = { .type = NLA_MIN_LEN, .len = sizeof(struct sockaddr) },
+ [WGPEER_A_ENDPOINT] = NLA_POLICY_MIN_LEN(sizeof(struct sockaddr)),
[WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL] = { .type = NLA_U16 },
- [WGPEER_A_LAST_HANDSHAKE_TIME] = { .type = NLA_EXACT_LEN, .len = sizeof(struct __kernel_timespec) },
+ [WGPEER_A_LAST_HANDSHAKE_TIME] = NLA_POLICY_EXACT_LEN(sizeof(struct __kernel_timespec)),
[WGPEER_A_RX_BYTES] = { .type = NLA_U64 },
[WGPEER_A_TX_BYTES] = { .type = NLA_U64 },
[WGPEER_A_ALLOWEDIPS] = { .type = NLA_NESTED },
@@ -45,7 +45,7 @@ static const struct nla_policy peer_policy[WGPEER_A_MAX + 1] = {
static const struct nla_policy allowedip_policy[WGALLOWEDIP_A_MAX + 1] = {
[WGALLOWEDIP_A_FAMILY] = { .type = NLA_U16 },
- [WGALLOWEDIP_A_IPADDR] = { .type = NLA_MIN_LEN, .len = sizeof(struct in_addr) },
+ [WGALLOWEDIP_A_IPADDR] = NLA_POLICY_MIN_LEN(sizeof(struct in_addr)),
[WGALLOWEDIP_A_CIDR_MASK] = { .type = NLA_U8 }
};
diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c
index 22f9f2f8af10..5cf2045fadef 100644
--- a/drivers/net/wireless/admtek/adm8211.c
+++ b/drivers/net/wireless/admtek/adm8211.c
@@ -324,8 +324,8 @@ static void adm8211_interrupt_tci(struct ieee80211_hw *dev)
/* TODO: check TDES0_STATUS_TUF and TDES0_STATUS_TRO */
- pci_unmap_single(priv->pdev, info->mapping,
- info->skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&priv->pdev->dev, info->mapping,
+ info->skb->len, DMA_TO_DEVICE);
ieee80211_tx_info_clear_status(txi);
@@ -382,35 +382,34 @@ static void adm8211_interrupt_rci(struct ieee80211_hw *dev)
} else if (pktlen < RX_COPY_BREAK) {
skb = dev_alloc_skb(pktlen);
if (skb) {
- pci_dma_sync_single_for_cpu(
- priv->pdev,
- priv->rx_buffers[entry].mapping,
- pktlen, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&priv->pdev->dev,
+ priv->rx_buffers[entry].mapping,
+ pktlen,
+ DMA_FROM_DEVICE);
skb_put_data(skb,
skb_tail_pointer(priv->rx_buffers[entry].skb),
pktlen);
- pci_dma_sync_single_for_device(
- priv->pdev,
- priv->rx_buffers[entry].mapping,
- RX_PKT_SIZE, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&priv->pdev->dev,
+ priv->rx_buffers[entry].mapping,
+ RX_PKT_SIZE,
+ DMA_FROM_DEVICE);
}
} else {
newskb = dev_alloc_skb(RX_PKT_SIZE);
if (newskb) {
skb = priv->rx_buffers[entry].skb;
skb_put(skb, pktlen);
- pci_unmap_single(
- priv->pdev,
- priv->rx_buffers[entry].mapping,
- RX_PKT_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&priv->pdev->dev,
+ priv->rx_buffers[entry].mapping,
+ RX_PKT_SIZE, DMA_FROM_DEVICE);
priv->rx_buffers[entry].skb = newskb;
priv->rx_buffers[entry].mapping =
- pci_map_single(priv->pdev,
+ dma_map_single(&priv->pdev->dev,
skb_tail_pointer(newskb),
RX_PKT_SIZE,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(priv->pdev,
- priv->rx_buffers[entry].mapping)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&priv->pdev->dev,
+ priv->rx_buffers[entry].mapping)) {
priv->rx_buffers[entry].skb = NULL;
dev_kfree_skb(newskb);
skb = NULL;
@@ -1449,11 +1448,11 @@ static int adm8211_init_rings(struct ieee80211_hw *dev)
rx_info->skb = dev_alloc_skb(RX_PKT_SIZE);
if (rx_info->skb == NULL)
break;
- rx_info->mapping = pci_map_single(priv->pdev,
+ rx_info->mapping = dma_map_single(&priv->pdev->dev,
skb_tail_pointer(rx_info->skb),
RX_PKT_SIZE,
- PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(priv->pdev, rx_info->mapping)) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&priv->pdev->dev, rx_info->mapping)) {
dev_kfree_skb(rx_info->skb);
rx_info->skb = NULL;
break;
@@ -1490,10 +1489,9 @@ static void adm8211_free_rings(struct ieee80211_hw *dev)
if (!priv->rx_buffers[i].skb)
continue;
- pci_unmap_single(
- priv->pdev,
- priv->rx_buffers[i].mapping,
- RX_PKT_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&priv->pdev->dev,
+ priv->rx_buffers[i].mapping, RX_PKT_SIZE,
+ DMA_FROM_DEVICE);
dev_kfree_skb(priv->rx_buffers[i].skb);
}
@@ -1502,10 +1500,9 @@ static void adm8211_free_rings(struct ieee80211_hw *dev)
if (!priv->tx_buffers[i].skb)
continue;
- pci_unmap_single(priv->pdev,
+ dma_unmap_single(&priv->pdev->dev,
priv->tx_buffers[i].mapping,
- priv->tx_buffers[i].skb->len,
- PCI_DMA_TODEVICE);
+ priv->tx_buffers[i].skb->len, DMA_TO_DEVICE);
dev_kfree_skb(priv->tx_buffers[i].skb);
}
@@ -1632,9 +1629,9 @@ static int adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
unsigned int entry;
u32 flag;
- mapping = pci_map_single(priv->pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(priv->pdev, mapping))
+ mapping = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&priv->pdev->dev, mapping))
return -ENOMEM;
spin_lock_irqsave(&priv->lock, flags);
@@ -1745,8 +1742,8 @@ static int adm8211_alloc_rings(struct ieee80211_hw *dev)
/* Allocate TX/RX descriptors */
ring_size = sizeof(struct adm8211_desc) * priv->rx_ring_size +
sizeof(struct adm8211_desc) * priv->tx_ring_size;
- priv->rx_ring = pci_alloc_consistent(priv->pdev, ring_size,
- &priv->rx_ring_dma);
+ priv->rx_ring = dma_alloc_coherent(&priv->pdev->dev, ring_size,
+ &priv->rx_ring_dma, GFP_KERNEL);
if (!priv->rx_ring) {
kfree(priv->rx_buffers);
@@ -1818,8 +1815,8 @@ static int adm8211_probe(struct pci_dev *pdev,
return err; /* someone else grabbed it? don't disable it */
}
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) ||
- pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) ||
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) {
printk(KERN_ERR "%s (adm8211): No suitable DMA available\n",
pci_name(pdev));
goto err_free_reg;
@@ -1929,10 +1926,10 @@ static int adm8211_probe(struct pci_dev *pdev,
kfree(priv->eeprom);
err_free_desc:
- pci_free_consistent(pdev,
- sizeof(struct adm8211_desc) * priv->rx_ring_size +
- sizeof(struct adm8211_desc) * priv->tx_ring_size,
- priv->rx_ring, priv->rx_ring_dma);
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct adm8211_desc) * priv->rx_ring_size +
+ sizeof(struct adm8211_desc) * priv->tx_ring_size,
+ priv->rx_ring, priv->rx_ring_dma);
kfree(priv->rx_buffers);
err_iounmap:
@@ -1962,10 +1959,10 @@ static void adm8211_remove(struct pci_dev *pdev)
priv = dev->priv;
- pci_free_consistent(pdev,
- sizeof(struct adm8211_desc) * priv->rx_ring_size +
- sizeof(struct adm8211_desc) * priv->tx_ring_size,
- priv->rx_ring, priv->rx_ring_dma);
+ dma_free_coherent(&pdev->dev,
+ sizeof(struct adm8211_desc) * priv->rx_ring_size +
+ sizeof(struct adm8211_desc) * priv->tx_ring_size,
+ priv->rx_ring, priv->rx_ring_dma);
kfree(priv->rx_buffers);
kfree(priv->eeprom);
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index 5b6db6e66f65..4481ed375f55 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -12,18 +12,11 @@
void ath10k_bmi_start(struct ath10k *ar)
{
- int ret;
-
ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi start\n");
ar->bmi.done_sent = false;
-
- /* Enable hardware clock to speed up firmware download */
- if (ar->hw_params.hw_ops->enable_pll_clk) {
- ret = ar->hw_params.hw_ops->enable_pll_clk(ar);
- ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi enable pll ret %d\n", ret);
- }
}
+EXPORT_SYMBOL(ath10k_bmi_start);
int ath10k_bmi_done(struct ath10k *ar)
{
@@ -197,6 +190,7 @@ int ath10k_bmi_read_memory(struct ath10k *ar,
return 0;
}
+EXPORT_SYMBOL(ath10k_bmi_read_memory);
int ath10k_bmi_write_soc_reg(struct ath10k *ar, u32 address, u32 reg_val)
{
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index 294fbc1e89ab..c45c814fd122 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -1299,29 +1299,24 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
struct ath10k_hw_ce_host_wm_regs *wm_regs = ar->hw_ce_regs->wm_regs;
u32 ctrl_addr = ce_state->ctrl_addr;
- spin_lock_bh(&ce->ce_lock);
-
- /* Clear the copy-complete interrupts that will be handled here. */
+ /*
+ * Clear before handling
+ *
+ * Misc CE interrupts are not being handled, but still need
+ * to be cleared.
+ *
+ * NOTE: When the last copy engine interrupt is cleared the
+ * hardware will go to sleep. Once this happens any access to
+ * the CE registers can cause a hardware fault.
+ */
ath10k_ce_engine_int_status_clear(ar, ctrl_addr,
- wm_regs->cc_mask);
-
- spin_unlock_bh(&ce->ce_lock);
+ wm_regs->cc_mask | wm_regs->wm_mask);
if (ce_state->recv_cb)
ce_state->recv_cb(ce_state);
if (ce_state->send_cb)
ce_state->send_cb(ce_state);
-
- spin_lock_bh(&ce->ce_lock);
-
- /*
- * Misc CE interrupts are not being handled, but still need
- * to be cleared.
- */
- ath10k_ce_engine_int_status_clear(ar, ctrl_addr, wm_regs->wm_mask);
-
- spin_unlock_bh(&ce->ce_lock);
}
EXPORT_SYMBOL(ath10k_ce_per_engine_service);
@@ -1372,45 +1367,55 @@ static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state)
ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
}
-int ath10k_ce_disable_interrupts(struct ath10k *ar)
+void ath10k_ce_disable_interrupt(struct ath10k *ar, int ce_id)
{
struct ath10k_ce *ce = ath10k_ce_priv(ar);
struct ath10k_ce_pipe *ce_state;
u32 ctrl_addr;
- int ce_id;
- for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
- ce_state = &ce->ce_states[ce_id];
- if (ce_state->attr_flags & CE_ATTR_POLL)
- continue;
+ ce_state = &ce->ce_states[ce_id];
+ if (ce_state->attr_flags & CE_ATTR_POLL)
+ return;
- ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+ ctrl_addr = ath10k_ce_base_address(ar, ce_id);
- ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
- ath10k_ce_error_intr_disable(ar, ctrl_addr);
- ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
- }
+ ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
+ ath10k_ce_error_intr_disable(ar, ctrl_addr);
+ ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
+}
+EXPORT_SYMBOL(ath10k_ce_disable_interrupt);
- return 0;
+void ath10k_ce_disable_interrupts(struct ath10k *ar)
+{
+ int ce_id;
+
+ for (ce_id = 0; ce_id < CE_COUNT; ce_id++)
+ ath10k_ce_disable_interrupt(ar, ce_id);
}
EXPORT_SYMBOL(ath10k_ce_disable_interrupts);
-void ath10k_ce_enable_interrupts(struct ath10k *ar)
+void ath10k_ce_enable_interrupt(struct ath10k *ar, int ce_id)
{
struct ath10k_ce *ce = ath10k_ce_priv(ar);
- int ce_id;
struct ath10k_ce_pipe *ce_state;
+ ce_state = &ce->ce_states[ce_id];
+ if (ce_state->attr_flags & CE_ATTR_POLL)
+ return;
+
+ ath10k_ce_per_engine_handler_adjust(ce_state);
+}
+EXPORT_SYMBOL(ath10k_ce_enable_interrupt);
+
+void ath10k_ce_enable_interrupts(struct ath10k *ar)
+{
+ int ce_id;
+
/* Enable interrupts for copy engine that
* are not using polling mode.
*/
- for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
- ce_state = &ce->ce_states[ce_id];
- if (ce_state->attr_flags & CE_ATTR_POLL)
- continue;
-
- ath10k_ce_per_engine_handler_adjust(ce_state);
- }
+ for (ce_id = 0; ce_id < CE_COUNT; ce_id++)
+ ath10k_ce_enable_interrupt(ar, ce_id);
}
EXPORT_SYMBOL(ath10k_ce_enable_interrupts);
@@ -1555,7 +1560,7 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
if (ret) {
dma_free_coherent(ar->dev,
- (nentries * sizeof(struct ce_desc_64) +
+ (nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN),
src_ring->base_addr_owner_space_unaligned,
base_addr);
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index 75df79d43120..666ce384a1d8 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -255,10 +255,13 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
/*==================CE Interrupt Handlers====================*/
void ath10k_ce_per_engine_service_any(struct ath10k *ar);
void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
-int ath10k_ce_disable_interrupts(struct ath10k *ar);
+void ath10k_ce_disable_interrupt(struct ath10k *ar, int ce_id);
+void ath10k_ce_disable_interrupts(struct ath10k *ar);
+void ath10k_ce_enable_interrupt(struct ath10k *ar, int ce_id);
void ath10k_ce_enable_interrupts(struct ath10k *ar);
void ath10k_ce_dump_registers(struct ath10k *ar,
struct ath10k_fw_crash_data *crash_data);
+
void ath10k_ce_alloc_rri(struct ath10k *ar);
void ath10k_ce_free_rri(struct ath10k *ar);
@@ -369,18 +372,14 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
(((x) & CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_MASK) >> \
CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB)
#define CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS 0x0000
-#define CE_INTERRUPT_SUMMARY (GENMASK(CE_COUNT_MAX - 1, 0))
static inline u32 ath10k_ce_interrupt_summary(struct ath10k *ar)
{
struct ath10k_ce *ce = ath10k_ce_priv(ar);
- if (!ar->hw_params.per_ce_irq)
- return CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(
- ce->bus_ops->read32((ar), CE_WRAPPER_BASE_ADDRESS +
- CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
- else
- return CE_INTERRUPT_SUMMARY;
+ return CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_GET(
+ ce->bus_ops->read32((ar), CE_WRAPPER_BASE_ADDRESS +
+ CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
}
/* Host software's Copy Engine configuration. */
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 340ce327ac14..d73ad60b571c 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -119,7 +119,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
- .per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
.hw_filter_reset_required = true,
@@ -155,7 +154,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
- .per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
.hw_filter_reset_required = true,
@@ -220,7 +218,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
- .per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
.hw_filter_reset_required = true,
@@ -255,7 +252,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
- .per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
.hw_filter_reset_required = true,
@@ -290,7 +286,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
- .per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
.hw_filter_reset_required = true,
@@ -328,12 +323,12 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
- .per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
.hw_filter_reset_required = true,
.fw_diag_ce_download = true,
.tx_stats_over_pktlog = false,
+ .supports_peer_stats_info = true,
},
{
.id = QCA99X0_HW_2_0_DEV_VERSION,
@@ -369,7 +364,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
- .per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
.hw_filter_reset_required = true,
@@ -417,7 +411,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
- .per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
.hw_filter_reset_required = true,
@@ -462,7 +455,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
- .per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
.hw_filter_reset_required = true,
@@ -497,7 +489,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
- .per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
.hw_filter_reset_required = true,
@@ -534,7 +525,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
- .per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
.hw_filter_reset_required = true,
@@ -603,7 +593,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = 0x20,
.target_64bit = false,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL,
- .per_ce_irq = false,
.shadow_reg_support = false,
.rri_on_ddr = false,
.hw_filter_reset_required = true,
@@ -631,7 +620,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.num_wds_entries = TARGET_HL_TLV_NUM_WDS_ENTRIES,
.target_64bit = true,
.rx_ring_fill_level = HTT_RX_RING_FILL_LEVEL_DUAL_MAC,
- .per_ce_irq = true,
.shadow_reg_support = true,
.rri_on_ddr = true,
.hw_filter_reset_required = false,
@@ -740,6 +728,16 @@ static int ath10k_init_sdio(struct ath10k *ar, enum ath10k_firmware_mode mode)
if (ret)
return ret;
+ ret = ath10k_bmi_read32(ar, hi_option_flag2, &param);
+ if (ret)
+ return ret;
+
+ param |= HI_OPTION_SDIO_CRASH_DUMP_ENHANCEMENT_HOST;
+
+ ret = ath10k_bmi_write32(ar, hi_option_flag2, param);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -1024,7 +1022,7 @@ static int ath10k_core_check_smbios(struct ath10k *ar)
return 0;
}
-static int ath10k_core_check_dt(struct ath10k *ar)
+int ath10k_core_check_dt(struct ath10k *ar)
{
struct device_node *node;
const char *variant = NULL;
@@ -1045,6 +1043,7 @@ static int ath10k_core_check_dt(struct ath10k *ar)
return 0;
}
+EXPORT_SYMBOL(ath10k_core_check_dt);
static int ath10k_download_fw(struct ath10k *ar)
{
@@ -1439,10 +1438,17 @@ static int ath10k_core_create_board_name(struct ath10k *ar, char *name,
}
if (ar->id.qmi_ids_valid) {
- scnprintf(name, name_len,
- "bus=%s,qmi-board-id=%x",
- ath10k_bus_str(ar->hif.bus),
- ar->id.qmi_board_id);
+ if (with_variant && ar->id.bdf_ext[0] != '\0')
+ scnprintf(name, name_len,
+ "bus=%s,qmi-board-id=%x,qmi-chip-id=%x%s",
+ ath10k_bus_str(ar->hif.bus),
+ ar->id.qmi_board_id, ar->id.qmi_chip_id,
+ variant);
+ else
+ scnprintf(name, name_len,
+ "bus=%s,qmi-board-id=%x",
+ ath10k_bus_str(ar->hif.bus),
+ ar->id.qmi_board_id);
goto out;
}
@@ -2320,7 +2326,7 @@ static void ath10k_core_restart(struct work_struct *work)
break;
case ATH10K_STATE_RESTARTED:
ar->state = ATH10K_STATE_WEDGED;
- /* fall through */
+ fallthrough;
case ATH10K_STATE_WEDGED:
ath10k_warn(ar, "device is wedged, will not restart\n");
break;
@@ -2614,6 +2620,13 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
ar->running_fw->fw_file.fw_features)) {
ath10k_bmi_start(ar);
+ /* Enable hardware clock to speed up firmware download */
+ if (ar->hw_params.hw_ops->enable_pll_clk) {
+ status = ar->hw_params.hw_ops->enable_pll_clk(ar);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot enable pll ret %d\n",
+ status);
+ }
+
if (ath10k_init_configure_target(ar)) {
status = -EINVAL;
goto err;
@@ -2797,6 +2810,10 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
if (test_bit(WMI_SERVICE_REPORT_AIRTIME, ar->wmi.svc_map))
val |= WMI_10_4_REPORT_AIRTIME;
+ if (test_bit(WMI_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT,
+ ar->wmi.svc_map))
+ val |= WMI_10_4_EXT_PEER_TID_CONFIGS_SUPPORT;
+
status = ath10k_mac_ext_resource_config(ar, val);
if (status) {
ath10k_err(ar,
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 5c18f6c20462..b50ab9e229dc 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -82,6 +82,8 @@
/* Default Airtime weight multipler (Tuned for multiclient performance) */
#define ATH10K_AIRTIME_WEIGHT_MULTIPLIER 4
+#define ATH10K_MAX_RETRY_COUNT 30
+
struct ath10k;
static inline const char *ath10k_bus_str(enum ath10k_bus bus)
@@ -109,6 +111,7 @@ enum ath10k_skb_flags {
ATH10K_SKB_F_MGMT = BIT(3),
ATH10K_SKB_F_QOS = BIT(4),
ATH10K_SKB_F_RAW_TX = BIT(5),
+ ATH10K_SKB_F_NOACK_TID = BIT(6),
};
struct ath10k_skb_cb {
@@ -509,6 +512,8 @@ struct ath10k_htt_tx_stats {
u64 ack_fails;
};
+#define ATH10K_TID_MAX 8
+
struct ath10k_sta {
struct ath10k_vif *arvif;
@@ -542,6 +547,13 @@ struct ath10k_sta {
#endif
/* Protected with ar->data_lock */
u32 peer_ps_state;
+ struct work_struct tid_config_wk;
+ int noack[ATH10K_TID_MAX];
+ int retry_long[ATH10K_TID_MAX];
+ int ampdu[ATH10K_TID_MAX];
+ u8 rate_ctrl[ATH10K_TID_MAX];
+ u32 rate_code[ATH10K_TID_MAX];
+ int rtscts[ATH10K_TID_MAX];
};
#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5 * HZ)
@@ -614,6 +626,14 @@ struct ath10k_vif {
/* For setting VHT peer fixed rate, protected by conf_mutex */
int vht_num_rates;
u8 vht_pfr;
+ u32 tid_conf_changed[ATH10K_TID_MAX];
+ int noack[ATH10K_TID_MAX];
+ int retry_long[ATH10K_TID_MAX];
+ int ampdu[ATH10K_TID_MAX];
+ u8 rate_ctrl[ATH10K_TID_MAX];
+ u32 rate_code[ATH10K_TID_MAX];
+ int rtscts[ATH10K_TID_MAX];
+ u32 tids_rst;
};
struct ath10k_vif_iter {
@@ -1056,6 +1076,7 @@ struct ath10k {
bool bmi_ids_valid;
bool qmi_ids_valid;
u32 qmi_board_id;
+ u32 qmi_chip_id;
u8 bmi_board_id;
u8 bmi_eboard_id;
u8 bmi_chip_id;
@@ -1295,6 +1316,7 @@ int ath10k_core_register(struct ath10k *ar,
const struct ath10k_bus_params *bus_params);
void ath10k_core_unregister(struct ath10k *ar);
int ath10k_core_fetch_board_file(struct ath10k *ar, int bd_ie_type);
+int ath10k_core_check_dt(struct ath10k *ar);
void ath10k_core_free_board_files(struct ath10k *ar);
#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c
index 2a4498067024..7eb72290a925 100644
--- a/drivers/net/wireless/ath/ath10k/coredump.c
+++ b/drivers/net/wireless/ath/ath10k/coredump.c
@@ -270,6 +270,277 @@ static const struct ath10k_mem_section qca6174_hw21_register_sections[] = {
{0x80010, 0x80020},
};
+static const struct ath10k_mem_section qca6174_hw30_sdio_register_sections[] = {
+ {0x800, 0x810},
+ {0x820, 0x82C},
+ {0x830, 0x8F4},
+ {0x90C, 0x91C},
+ {0xA14, 0xA18},
+ {0xA84, 0xA94},
+ {0xAA8, 0xAD4},
+ {0xADC, 0xB40},
+ {0x1000, 0x10A4},
+ {0x10BC, 0x111C},
+ {0x1134, 0x1138},
+ {0x1144, 0x114C},
+ {0x1150, 0x115C},
+ {0x1160, 0x1178},
+ {0x1240, 0x1260},
+ {0x2000, 0x207C},
+ {0x3000, 0x3014},
+ {0x4000, 0x4014},
+ {0x5000, 0x5124},
+ {0x6000, 0x6040},
+ {0x6080, 0x60CC},
+ {0x6100, 0x611C},
+ {0x6140, 0x61D8},
+ {0x6200, 0x6238},
+ {0x6240, 0x628C},
+ {0x62C0, 0x62EC},
+ {0x6380, 0x63E8},
+ {0x6400, 0x6440},
+ {0x6480, 0x64CC},
+ {0x6500, 0x651C},
+ {0x6540, 0x6580},
+ {0x6600, 0x6638},
+ {0x6640, 0x668C},
+ {0x66C0, 0x66EC},
+ {0x6780, 0x67E8},
+ {0x7080, 0x708C},
+ {0x70C0, 0x70C8},
+ {0x7400, 0x741C},
+ {0x7440, 0x7454},
+ {0x7800, 0x7818},
+ {0x8010, 0x8060},
+ {0x8080, 0x8084},
+ {0x80A0, 0x80A4},
+ {0x80C0, 0x80C4},
+ {0x80E0, 0x80ec},
+ {0x8110, 0x8128},
+ {0x9000, 0x9004},
+ {0xF000, 0xF0E0},
+ {0xF140, 0xF190},
+ {0xF250, 0xF25C},
+ {0xF260, 0xF268},
+ {0xF26C, 0xF2A8},
+ {0x10008, 0x1000C},
+ {0x10014, 0x10018},
+ {0x1001C, 0x10020},
+ {0x10024, 0x10028},
+ {0x10030, 0x10034},
+ {0x10040, 0x10054},
+ {0x10058, 0x1007C},
+ {0x10080, 0x100C4},
+ {0x100C8, 0x10114},
+ {0x1012C, 0x10130},
+ {0x10138, 0x10144},
+ {0x10200, 0x10220},
+ {0x10230, 0x10250},
+ {0x10260, 0x10280},
+ {0x10290, 0x102B0},
+ {0x102C0, 0x102DC},
+ {0x102E0, 0x102F4},
+ {0x102FC, 0x1037C},
+ {0x10380, 0x10390},
+ {0x10800, 0x10828},
+ {0x10840, 0x10844},
+ {0x10880, 0x10884},
+ {0x108C0, 0x108E8},
+ {0x10900, 0x10928},
+ {0x10940, 0x10944},
+ {0x10980, 0x10984},
+ {0x109C0, 0x109E8},
+ {0x10A00, 0x10A28},
+ {0x10A40, 0x10A50},
+ {0x11000, 0x11028},
+ {0x11030, 0x11034},
+ {0x11038, 0x11068},
+ {0x11070, 0x11074},
+ {0x11078, 0x110A8},
+ {0x110B0, 0x110B4},
+ {0x110B8, 0x110E8},
+ {0x110F0, 0x110F4},
+ {0x110F8, 0x11128},
+ {0x11138, 0x11144},
+ {0x11178, 0x11180},
+ {0x111B8, 0x111C0},
+ {0x111F8, 0x11200},
+ {0x11238, 0x1123C},
+ {0x11270, 0x11274},
+ {0x11278, 0x1127C},
+ {0x112B0, 0x112B4},
+ {0x112B8, 0x112BC},
+ {0x112F0, 0x112F4},
+ {0x112F8, 0x112FC},
+ {0x11338, 0x1133C},
+ {0x11378, 0x1137C},
+ {0x113B8, 0x113BC},
+ {0x113F8, 0x113FC},
+ {0x11438, 0x11440},
+ {0x11478, 0x11480},
+ {0x114B8, 0x114BC},
+ {0x114F8, 0x114FC},
+ {0x11538, 0x1153C},
+ {0x11578, 0x1157C},
+ {0x115B8, 0x115BC},
+ {0x115F8, 0x115FC},
+ {0x11638, 0x1163C},
+ {0x11678, 0x1167C},
+ {0x116B8, 0x116BC},
+ {0x116F8, 0x116FC},
+ {0x11738, 0x1173C},
+ {0x11778, 0x1177C},
+ {0x117B8, 0x117BC},
+ {0x117F8, 0x117FC},
+ {0x17000, 0x1701C},
+ {0x17020, 0x170AC},
+ {0x18000, 0x18050},
+ {0x18054, 0x18074},
+ {0x18080, 0x180D4},
+ {0x180DC, 0x18104},
+ {0x18108, 0x1813C},
+ {0x18144, 0x18148},
+ {0x18168, 0x18174},
+ {0x18178, 0x18180},
+ {0x181C8, 0x181E0},
+ {0x181E4, 0x181E8},
+ {0x181EC, 0x1820C},
+ {0x1825C, 0x18280},
+ {0x18284, 0x18290},
+ {0x18294, 0x182A0},
+ {0x18300, 0x18304},
+ {0x18314, 0x18320},
+ {0x18328, 0x18350},
+ {0x1835C, 0x1836C},
+ {0x18370, 0x18390},
+ {0x18398, 0x183AC},
+ {0x183BC, 0x183D8},
+ {0x183DC, 0x183F4},
+ {0x18400, 0x186F4},
+ {0x186F8, 0x1871C},
+ {0x18720, 0x18790},
+ {0x19800, 0x19830},
+ {0x19834, 0x19840},
+ {0x19880, 0x1989C},
+ {0x198A4, 0x198B0},
+ {0x198BC, 0x19900},
+ {0x19C00, 0x19C88},
+ {0x19D00, 0x19D20},
+ {0x19E00, 0x19E7C},
+ {0x19E80, 0x19E94},
+ {0x19E98, 0x19EAC},
+ {0x19EB0, 0x19EBC},
+ {0x19F70, 0x19F74},
+ {0x19F80, 0x19F8C},
+ {0x19FA0, 0x19FB4},
+ {0x19FC0, 0x19FD8},
+ {0x1A000, 0x1A200},
+ {0x1A204, 0x1A210},
+ {0x1A228, 0x1A22C},
+ {0x1A230, 0x1A248},
+ {0x1A250, 0x1A270},
+ {0x1A280, 0x1A290},
+ {0x1A2A0, 0x1A2A4},
+ {0x1A2C0, 0x1A2EC},
+ {0x1A300, 0x1A3BC},
+ {0x1A3F0, 0x1A3F4},
+ {0x1A3F8, 0x1A434},
+ {0x1A438, 0x1A444},
+ {0x1A448, 0x1A468},
+ {0x1A580, 0x1A58C},
+ {0x1A644, 0x1A654},
+ {0x1A670, 0x1A698},
+ {0x1A6AC, 0x1A6B0},
+ {0x1A6D0, 0x1A6D4},
+ {0x1A6EC, 0x1A70C},
+ {0x1A710, 0x1A738},
+ {0x1A7C0, 0x1A7D0},
+ {0x1A7D4, 0x1A7D8},
+ {0x1A7DC, 0x1A7E4},
+ {0x1A7F0, 0x1A7F8},
+ {0x1A888, 0x1A89C},
+ {0x1A8A8, 0x1A8AC},
+ {0x1A8C0, 0x1A8DC},
+ {0x1A8F0, 0x1A8FC},
+ {0x1AE04, 0x1AE08},
+ {0x1AE18, 0x1AE24},
+ {0x1AF80, 0x1AF8C},
+ {0x1AFA0, 0x1AFB4},
+ {0x1B000, 0x1B200},
+ {0x1B284, 0x1B288},
+ {0x1B2D0, 0x1B2D8},
+ {0x1B2DC, 0x1B2EC},
+ {0x1B300, 0x1B340},
+ {0x1B374, 0x1B378},
+ {0x1B380, 0x1B384},
+ {0x1B388, 0x1B38C},
+ {0x1B404, 0x1B408},
+ {0x1B420, 0x1B428},
+ {0x1B440, 0x1B444},
+ {0x1B448, 0x1B44C},
+ {0x1B450, 0x1B458},
+ {0x1B45C, 0x1B468},
+ {0x1B584, 0x1B58C},
+ {0x1B68C, 0x1B690},
+ {0x1B6AC, 0x1B6B0},
+ {0x1B7F0, 0x1B7F8},
+ {0x1C800, 0x1CC00},
+ {0x1CE00, 0x1CE04},
+ {0x1CF80, 0x1CF84},
+ {0x1D200, 0x1D800},
+ {0x1E000, 0x20014},
+ {0x20100, 0x20124},
+ {0x21400, 0x217A8},
+ {0x21800, 0x21BA8},
+ {0x21C00, 0x21FA8},
+ {0x22000, 0x223A8},
+ {0x22400, 0x227A8},
+ {0x22800, 0x22BA8},
+ {0x22C00, 0x22FA8},
+ {0x23000, 0x233A8},
+ {0x24000, 0x24034},
+
+ /* EFUSE0,1,2 is disabled here
+ * because its state may be reset
+ *
+ * {0x24800, 0x24804},
+ * {0x25000, 0x25004},
+ * {0x25800, 0x25804},
+ */
+
+ {0x26000, 0x26064},
+ {0x27000, 0x27024},
+ {0x34000, 0x3400C},
+ {0x34400, 0x3445C},
+ {0x34800, 0x3485C},
+ {0x34C00, 0x34C5C},
+ {0x35000, 0x3505C},
+ {0x35400, 0x3545C},
+ {0x35800, 0x3585C},
+ {0x35C00, 0x35C5C},
+ {0x36000, 0x3605C},
+ {0x38000, 0x38064},
+ {0x38070, 0x380E0},
+ {0x3A000, 0x3A074},
+
+ /* DBI windows is skipped here, it can be only accessed when pcie
+ * is active (not in reset) and CORE_CTRL_PCIE_LTSSM_EN = 0 &&
+ * PCIE_CTRL_APP_LTSSM_ENALBE=0.
+ * {0x3C000 , 0x3C004},
+ */
+
+ {0x40000, 0x400A4},
+
+ /* SI register is skiped here.
+ * Because it will cause bus hang
+ *
+ * {0x50000, 0x50018},
+ */
+
+ {0x80000, 0x8000C},
+ {0x80010, 0x80020},
+};
+
static const struct ath10k_mem_section qca6174_hw30_register_sections[] = {
{0x800, 0x810},
{0x820, 0x82C},
@@ -602,6 +873,59 @@ static const struct ath10k_mem_region qca6174_hw21_mem_regions[] = {
},
};
+static const struct ath10k_mem_region qca6174_hw30_sdio_mem_regions[] = {
+ {
+ .type = ATH10K_MEM_REGION_TYPE_DRAM,
+ .start = 0x400000,
+ .len = 0xa8000,
+ .name = "DRAM",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_AXI,
+ .start = 0xa0000,
+ .len = 0x18000,
+ .name = "AXI",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IRAM1,
+ .start = 0x00980000,
+ .len = 0x00080000,
+ .name = "IRAM1",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_IRAM2,
+ .start = 0x00a00000,
+ .len = 0x00040000,
+ .name = "IRAM2",
+ .section_table = {
+ .sections = NULL,
+ .size = 0,
+ },
+ },
+ {
+ .type = ATH10K_MEM_REGION_TYPE_REG,
+ .start = 0x800,
+ .len = 0x80020 - 0x800,
+ .name = "REG_TOTAL",
+ .section_table = {
+ .sections = qca6174_hw30_sdio_register_sections,
+ .size = ARRAY_SIZE(qca6174_hw30_sdio_register_sections),
+ },
+ },
+};
+
static const struct ath10k_mem_region qca6174_hw30_mem_regions[] = {
{
.type = ATH10K_MEM_REGION_TYPE_DRAM,
@@ -968,6 +1292,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
{
.hw_id = QCA6174_HW_1_0_VERSION,
.hw_rev = ATH10K_HW_QCA6174,
+ .bus = ATH10K_BUS_PCI,
.region_table = {
.regions = qca6174_hw10_mem_regions,
.size = ARRAY_SIZE(qca6174_hw10_mem_regions),
@@ -976,6 +1301,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
{
.hw_id = QCA6174_HW_1_1_VERSION,
.hw_rev = ATH10K_HW_QCA6174,
+ .bus = ATH10K_BUS_PCI,
.region_table = {
.regions = qca6174_hw10_mem_regions,
.size = ARRAY_SIZE(qca6174_hw10_mem_regions),
@@ -984,6 +1310,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
{
.hw_id = QCA6174_HW_1_3_VERSION,
.hw_rev = ATH10K_HW_QCA6174,
+ .bus = ATH10K_BUS_PCI,
.region_table = {
.regions = qca6174_hw10_mem_regions,
.size = ARRAY_SIZE(qca6174_hw10_mem_regions),
@@ -992,6 +1319,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
{
.hw_id = QCA6174_HW_2_1_VERSION,
.hw_rev = ATH10K_HW_QCA6174,
+ .bus = ATH10K_BUS_PCI,
.region_table = {
.regions = qca6174_hw21_mem_regions,
.size = ARRAY_SIZE(qca6174_hw21_mem_regions),
@@ -1000,6 +1328,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
{
.hw_id = QCA6174_HW_3_0_VERSION,
.hw_rev = ATH10K_HW_QCA6174,
+ .bus = ATH10K_BUS_PCI,
.region_table = {
.regions = qca6174_hw30_mem_regions,
.size = ARRAY_SIZE(qca6174_hw30_mem_regions),
@@ -1008,14 +1337,25 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
{
.hw_id = QCA6174_HW_3_2_VERSION,
.hw_rev = ATH10K_HW_QCA6174,
+ .bus = ATH10K_BUS_PCI,
.region_table = {
.regions = qca6174_hw30_mem_regions,
.size = ARRAY_SIZE(qca6174_hw30_mem_regions),
},
},
{
+ .hw_id = QCA6174_HW_3_2_VERSION,
+ .hw_rev = ATH10K_HW_QCA6174,
+ .bus = ATH10K_BUS_SDIO,
+ .region_table = {
+ .regions = qca6174_hw30_sdio_mem_regions,
+ .size = ARRAY_SIZE(qca6174_hw30_sdio_mem_regions),
+ },
+ },
+ {
.hw_id = QCA9377_HW_1_1_DEV_VERSION,
.hw_rev = ATH10K_HW_QCA9377,
+ .bus = ATH10K_BUS_PCI,
.region_table = {
.regions = qca6174_hw30_mem_regions,
.size = ARRAY_SIZE(qca6174_hw30_mem_regions),
@@ -1024,6 +1364,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
{
.hw_id = QCA988X_HW_2_0_VERSION,
.hw_rev = ATH10K_HW_QCA988X,
+ .bus = ATH10K_BUS_PCI,
.region_table = {
.regions = qca988x_hw20_mem_regions,
.size = ARRAY_SIZE(qca988x_hw20_mem_regions),
@@ -1032,6 +1373,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
{
.hw_id = QCA9984_HW_1_0_DEV_VERSION,
.hw_rev = ATH10K_HW_QCA9984,
+ .bus = ATH10K_BUS_PCI,
.region_table = {
.regions = qca9984_hw10_mem_regions,
.size = ARRAY_SIZE(qca9984_hw10_mem_regions),
@@ -1040,6 +1382,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
{
.hw_id = QCA9888_HW_2_0_DEV_VERSION,
.hw_rev = ATH10K_HW_QCA9888,
+ .bus = ATH10K_BUS_PCI,
.region_table = {
.regions = qca9984_hw10_mem_regions,
.size = ARRAY_SIZE(qca9984_hw10_mem_regions),
@@ -1048,6 +1391,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
{
.hw_id = QCA99X0_HW_2_0_DEV_VERSION,
.hw_rev = ATH10K_HW_QCA99X0,
+ .bus = ATH10K_BUS_PCI,
.region_table = {
.regions = qca99x0_hw20_mem_regions,
.size = ARRAY_SIZE(qca99x0_hw20_mem_regions),
@@ -1056,6 +1400,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
{
.hw_id = QCA4019_HW_1_0_DEV_VERSION,
.hw_rev = ATH10K_HW_QCA4019,
+ .bus = ATH10K_BUS_AHB,
.region_table = {
.regions = qca4019_hw10_mem_regions,
.size = ARRAY_SIZE(qca4019_hw10_mem_regions),
@@ -1064,6 +1409,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = {
{
.hw_id = WCN3990_HW_1_0_DEV_VERSION,
.hw_rev = ATH10K_HW_WCN3990,
+ .bus = ATH10K_BUS_SNOC,
.region_table = {
.regions = wcn399x_hw10_mem_regions,
.size = ARRAY_SIZE(wcn399x_hw10_mem_regions),
@@ -1111,7 +1457,8 @@ const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k
for (i = 0; i < ARRAY_SIZE(hw_mem_layouts); i++) {
if (ar->target_version == hw_mem_layouts[i].hw_id &&
- ar->hw_rev == hw_mem_layouts[i].hw_rev)
+ ar->hw_rev == hw_mem_layouts[i].hw_rev &&
+ hw_mem_layouts[i].bus == ar->hif.bus)
return &hw_mem_layouts[i];
}
diff --git a/drivers/net/wireless/ath/ath10k/coredump.h b/drivers/net/wireless/ath/ath10k/coredump.h
index e760ce1a5f1e..42404e246e0e 100644
--- a/drivers/net/wireless/ath/ath10k/coredump.h
+++ b/drivers/net/wireless/ath/ath10k/coredump.h
@@ -156,6 +156,7 @@ struct ath10k_mem_region {
struct ath10k_hw_mem_layout {
u32 hw_id;
u32 hw_rev;
+ enum ath10k_bus bus;
struct {
const struct ath10k_mem_region *regions;
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index d787cbead56a..5c1af2021883 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -142,6 +142,14 @@ static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
+
+ if (idx < 0 || idx >= htt->rx_ring.size) {
+ ath10k_err(htt->ar, "rx ring index is not valid, firmware malfunctioning?\n");
+ idx &= htt->rx_ring.size_mask;
+ ret = -ENOMEM;
+ goto fail;
+ }
+
while (num > 0) {
skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
if (!skb) {
@@ -941,6 +949,7 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
u8 preamble = 0;
u8 group_id;
u32 info1, info2, info3;
+ u32 stbc, nsts_su;
info1 = __le32_to_cpu(rxd->ppdu_start.info1);
info2 = __le32_to_cpu(rxd->ppdu_start.info2);
@@ -985,11 +994,16 @@ static void ath10k_htt_rx_h_rates(struct ath10k *ar,
*/
bw = info2 & 3;
sgi = info3 & 1;
+ stbc = (info2 >> 3) & 1;
group_id = (info2 >> 4) & 0x3F;
if (GROUP_ID_IS_SU_MIMO(group_id)) {
mcs = (info3 >> 4) & 0x0F;
- nss = ((info2 >> 10) & 0x07) + 1;
+ nsts_su = ((info2 >> 10) & 0x07);
+ if (stbc)
+ nss = (nsts_su >> 2) + 1;
+ else
+ nss = (nsts_su + 1);
} else {
/* Hardware doesn't decode VHT-SIG-B into Rx descriptor
* so it's impossible to decode MCS. Also since
@@ -3017,7 +3031,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
ath10k_htt_rx_h_enqueue(ar, &amsdu, status);
break;
case -EAGAIN:
- /* fall through */
+ fallthrough;
default:
/* Should not happen. */
ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
@@ -3575,12 +3589,14 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar,
}
if (ar->htt.disable_tx_comp) {
- arsta->tx_retries += peer_stats->retry_pkts;
arsta->tx_failed += peer_stats->failed_pkts;
- ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx retries %d tx failed %d\n",
- arsta->tx_retries, arsta->tx_failed);
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "tx failed %d\n",
+ arsta->tx_failed);
}
+ arsta->tx_retries += peer_stats->retry_pkts;
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx retries %d", arsta->tx_retries);
+
if (ath10k_debug_is_extd_tx_stats_enabled(ar))
ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats,
rate_idx);
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index bbe869575855..1fc0a312ab58 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -1314,7 +1314,7 @@ static int ath10k_htt_tx_hl(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txm
case ATH10K_HW_TXRX_RAW:
case ATH10K_HW_TXRX_NATIVE_WIFI:
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
- /* fall through */
+ fallthrough;
case ATH10K_HW_TXRX_ETHERNET:
flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
break;
@@ -1460,7 +1460,7 @@ static int ath10k_htt_tx_32(struct ath10k_htt *htt,
case ATH10K_HW_TXRX_RAW:
case ATH10K_HW_TXRX_NATIVE_WIFI:
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
- /* fall through */
+ fallthrough;
case ATH10K_HW_TXRX_ETHERNET:
if (ar->hw_params.continuous_frag_desc) {
ext_desc_t = htt->frag_desc.vaddr_desc_32;
@@ -1662,7 +1662,7 @@ static int ath10k_htt_tx_64(struct ath10k_htt *htt,
case ATH10K_HW_TXRX_RAW:
case ATH10K_HW_TXRX_NATIVE_WIFI:
flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;
- /* fall through */
+ fallthrough;
case ATH10K_HW_TXRX_ETHERNET:
if (ar->hw_params.continuous_frag_desc) {
ext_desc_t = htt->frag_desc.vaddr_desc_64;
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index f16edcb9f326..c6ded21f5ed6 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -593,9 +593,6 @@ struct ath10k_hw_params {
/* Target rx ring fill level */
u32 rx_ring_fill_level;
- /* target supporting per ce IRQ */
- bool per_ce_irq;
-
/* target supporting shadow register for ce write */
bool shadow_reg_support;
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 3c0c33a9f30c..2e3eb5bbe49c 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -2019,8 +2019,8 @@ static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif)
if (!arvif->is_up)
return;
- if (!ieee80211_csa_is_complete(vif)) {
- ieee80211_csa_update_counter(vif);
+ if (!ieee80211_beacon_cntdwn_is_complete(vif)) {
+ ieee80211_beacon_update_cntdwn(vif);
ret = ath10k_mac_setup_bcn_tmpl(arvif);
if (ret)
@@ -2468,17 +2468,17 @@ ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
idx_limit = -1;
switch (idx_limit) {
- case 0: /* fall through */
- case 1: /* fall through */
- case 2: /* fall through */
- case 3: /* fall through */
- case 4: /* fall through */
- case 5: /* fall through */
- case 6: /* fall through */
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
default:
/* see ath10k_mac_can_set_bitrate_mask() */
WARN_ON(1);
- /* fall through */
+ fallthrough;
case -1:
mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED;
break;
@@ -3013,6 +3013,69 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
cancel_delayed_work_sync(&arvif->connection_loss_work);
}
+static int ath10k_new_peer_tid_config(struct ath10k *ar,
+ struct ieee80211_sta *sta,
+ struct ath10k_vif *arvif)
+{
+ struct wmi_per_peer_per_tid_cfg_arg arg = {};
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ bool config_apply;
+ int ret, i;
+
+ for (i = 0; i < ATH10K_TID_MAX; i++) {
+ config_apply = false;
+ if (arvif->retry_long[i] || arvif->ampdu[i] ||
+ arvif->rate_ctrl[i] || arvif->rtscts[i]) {
+ config_apply = true;
+ arg.tid = i;
+ arg.vdev_id = arvif->vdev_id;
+ arg.retry_count = arvif->retry_long[i];
+ arg.aggr_control = arvif->ampdu[i];
+ arg.rate_ctrl = arvif->rate_ctrl[i];
+ arg.rcode_flags = arvif->rate_code[i];
+
+ if (arvif->rtscts[i])
+ arg.ext_tid_cfg_bitmap =
+ WMI_EXT_TID_RTS_CTS_CONFIG;
+ else
+ arg.ext_tid_cfg_bitmap = 0;
+
+ arg.rtscts_ctrl = arvif->rtscts[i];
+ }
+
+ if (arvif->noack[i]) {
+ arg.ack_policy = arvif->noack[i];
+ arg.rate_ctrl = WMI_TID_CONFIG_RATE_CONTROL_DEFAULT_LOWEST_RATE;
+ arg.aggr_control = WMI_TID_CONFIG_AGGR_CONTROL_DISABLE;
+ config_apply = true;
+ }
+
+ /* Assign default value(-1) to newly connected station.
+ * This is to identify station specific tid configuration not
+ * configured for the station.
+ */
+ arsta->retry_long[i] = -1;
+ arsta->noack[i] = -1;
+ arsta->ampdu[i] = -1;
+
+ if (!config_apply)
+ continue;
+
+ ether_addr_copy(arg.peer_macaddr.addr, sta->addr);
+
+ ret = ath10k_wmi_set_per_peer_per_tid_cfg(ar, &arg);
+ if (ret) {
+ ath10k_warn(ar, "failed to set per tid retry/aggr config for sta %pM: %d\n",
+ sta->addr, ret);
+ return ret;
+ }
+
+ memset(&arg, 0, sizeof(arg));
+ }
+
+ return 0;
+}
+
static int ath10k_station_assoc(struct ath10k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -3078,7 +3141,10 @@ static int ath10k_station_assoc(struct ath10k *ar,
}
}
- return ret;
+ if (!test_bit(WMI_SERVICE_PEER_TID_CONFIGS_SUPPORT, ar->wmi.svc_map))
+ return ret;
+
+ return ath10k_new_peer_tid_config(ar, sta, arvif);
}
static int ath10k_station_disassoc(struct ath10k *ar,
@@ -3626,7 +3692,10 @@ static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
bool is_data = ieee80211_is_data(hdr->frame_control) ||
ieee80211_is_data_qos(hdr->frame_control);
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
struct ath10k_sta *arsta;
+ u8 tid, *qos_ctl;
+ bool noack = false;
cb->flags = 0;
if (!ath10k_tx_h_use_hwcrypto(vif, skb))
@@ -3635,8 +3704,27 @@ static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
if (ieee80211_is_mgmt(hdr->frame_control))
cb->flags |= ATH10K_SKB_F_MGMT;
- if (ieee80211_is_data_qos(hdr->frame_control))
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
cb->flags |= ATH10K_SKB_F_QOS;
+ qos_ctl = ieee80211_get_qos_ctl(hdr);
+ tid = (*qos_ctl) & IEEE80211_QOS_CTL_TID_MASK;
+
+ if (arvif->noack[tid] == WMI_PEER_TID_CONFIG_NOACK)
+ noack = true;
+
+ if (sta) {
+ arsta = (struct ath10k_sta *)sta->drv_priv;
+
+ if (arsta->noack[tid] == WMI_PEER_TID_CONFIG_NOACK)
+ noack = true;
+
+ if (arsta->noack[tid] == WMI_PEER_TID_CONFIG_ACK)
+ noack = false;
+ }
+
+ if (noack)
+ cb->flags |= ATH10K_SKB_F_NOACK_TID;
+ }
/* Data frames encrypted in software will be posted to firmware
* with tx encap mode set to RAW. Ex: Multicast traffic generated
@@ -4238,7 +4326,7 @@ void __ath10k_scan_finish(struct ath10k *ar)
} else if (ar->scan.roc_notify) {
ieee80211_remain_on_channel_expired(ar->hw);
}
- /* fall through */
+ fallthrough;
case ATH10K_SCAN_STARTING:
ar->scan.state = ATH10K_SCAN_IDLE;
ar->scan_channel = NULL;
@@ -6597,6 +6685,581 @@ out:
return ret;
}
+struct ath10k_mac_iter_tid_conf_data {
+ struct ieee80211_vif *curr_vif;
+ struct ath10k *ar;
+ bool reset_config;
+};
+
+static bool
+ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask,
+ int *vht_num_rates)
+{
+ int num_rates = 0;
+ int i, tmp;
+
+ num_rates += hweight32(mask->control[band].legacy);
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++)
+ num_rates += hweight8(mask->control[band].ht_mcs[i]);
+
+ *vht_num_rates = 0;
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
+ tmp = hweight16(mask->control[band].vht_mcs[i]);
+ num_rates += tmp;
+ *vht_num_rates += tmp;
+ }
+
+ return num_rates == 1;
+}
+
+static int
+ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask,
+ u8 *rate, u8 *nss, bool vht_only)
+{
+ int rate_idx;
+ int i;
+ u16 bitrate;
+ u8 preamble;
+ u8 hw_rate;
+
+ if (vht_only)
+ goto next;
+
+ if (hweight32(mask->control[band].legacy) == 1) {
+ rate_idx = ffs(mask->control[band].legacy) - 1;
+
+ if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY)
+ rate_idx += ATH10K_MAC_FIRST_OFDM_RATE_IDX;
+
+ hw_rate = ath10k_wmi_legacy_rates[rate_idx].hw_value;
+ bitrate = ath10k_wmi_legacy_rates[rate_idx].bitrate;
+
+ if (ath10k_mac_bitrate_is_cck(bitrate))
+ preamble = WMI_RATE_PREAMBLE_CCK;
+ else
+ preamble = WMI_RATE_PREAMBLE_OFDM;
+
+ *nss = 1;
+ *rate = preamble << 6 |
+ (*nss - 1) << 4 |
+ hw_rate << 0;
+
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
+ if (hweight8(mask->control[band].ht_mcs[i]) == 1) {
+ *nss = i + 1;
+ *rate = WMI_RATE_PREAMBLE_HT << 6 |
+ (*nss - 1) << 4 |
+ (ffs(mask->control[band].ht_mcs[i]) - 1);
+
+ return 0;
+ }
+ }
+
+next:
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
+ if (hweight16(mask->control[band].vht_mcs[i]) == 1) {
+ *nss = i + 1;
+ *rate = WMI_RATE_PREAMBLE_VHT << 6 |
+ (*nss - 1) << 4 |
+ (ffs(mask->control[band].vht_mcs[i]) - 1);
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int ath10k_mac_validate_rate_mask(struct ath10k *ar,
+ struct ieee80211_sta *sta,
+ u32 rate_ctrl_flag, u8 nss)
+{
+ if (nss > sta->rx_nss) {
+ ath10k_warn(ar, "Invalid nss field, configured %u limit %u\n",
+ nss, sta->rx_nss);
+ return -EINVAL;
+ }
+
+ if (ATH10K_HW_PREAMBLE(rate_ctrl_flag) == WMI_RATE_PREAMBLE_VHT) {
+ if (!sta->vht_cap.vht_supported) {
+ ath10k_warn(ar, "Invalid VHT rate for sta %pM\n",
+ sta->addr);
+ return -EINVAL;
+ }
+ } else if (ATH10K_HW_PREAMBLE(rate_ctrl_flag) == WMI_RATE_PREAMBLE_HT) {
+ if (!sta->ht_cap.ht_supported || sta->vht_cap.vht_supported) {
+ ath10k_warn(ar, "Invalid HT rate for sta %pM\n",
+ sta->addr);
+ return -EINVAL;
+ }
+ } else {
+ if (sta->ht_cap.ht_supported || sta->vht_cap.vht_supported)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+ath10k_mac_tid_bitrate_config(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 *rate_ctrl_flag, u8 *rate_ctrl,
+ enum nl80211_tx_rate_setting txrate_type,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ u8 nss, rate;
+ int vht_num_rates, ret;
+
+ if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
+ return -EINVAL;
+
+ if (txrate_type == NL80211_TX_RATE_AUTOMATIC) {
+ *rate_ctrl = WMI_TID_CONFIG_RATE_CONTROL_AUTO;
+ *rate_ctrl_flag = 0;
+ return 0;
+ }
+
+ band = def.chan->band;
+
+ if (!ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask,
+ &vht_num_rates)) {
+ return -EINVAL;
+ }
+
+ ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask,
+ &rate, &nss, false);
+ if (ret) {
+ ath10k_warn(ar, "failed to get single rate: %d\n",
+ ret);
+ return ret;
+ }
+
+ *rate_ctrl_flag = rate;
+
+ if (sta && ath10k_mac_validate_rate_mask(ar, sta, *rate_ctrl_flag, nss))
+ return -EINVAL;
+
+ if (txrate_type == NL80211_TX_RATE_FIXED)
+ *rate_ctrl = WMI_TID_CONFIG_RATE_CONTROL_FIXED_RATE;
+ else if (txrate_type == NL80211_TX_RATE_LIMITED &&
+ (test_bit(WMI_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT,
+ ar->wmi.svc_map)))
+ *rate_ctrl = WMI_PEER_TID_CONFIG_RATE_UPPER_CAP;
+ else
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int ath10k_mac_set_tid_config(struct ath10k *ar, struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif, u32 changed,
+ struct wmi_per_peer_per_tid_cfg_arg *arg)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ath10k_sta *arsta;
+ int ret;
+
+ if (sta) {
+ if (!sta->wme)
+ return -ENOTSUPP;
+
+ arsta = (struct ath10k_sta *)sta->drv_priv;
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_NOACK)) {
+ if ((arsta->retry_long[arg->tid] > 0 ||
+ arsta->rate_code[arg->tid] > 0 ||
+ arsta->ampdu[arg->tid] ==
+ WMI_TID_CONFIG_AGGR_CONTROL_ENABLE) &&
+ arg->ack_policy == WMI_PEER_TID_CONFIG_NOACK) {
+ changed &= ~BIT(NL80211_TID_CONFIG_ATTR_NOACK);
+ arg->ack_policy = 0;
+ arg->aggr_control = 0;
+ arg->rate_ctrl = 0;
+ arg->rcode_flags = 0;
+ }
+ }
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL)) {
+ if (arsta->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK ||
+ arvif->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK) {
+ arg->aggr_control = 0;
+ changed &= ~BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG);
+ }
+ }
+
+ if (changed & (BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) |
+ BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE))) {
+ if (arsta->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK ||
+ arvif->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK) {
+ arg->rate_ctrl = 0;
+ arg->rcode_flags = 0;
+ }
+ }
+
+ ether_addr_copy(arg->peer_macaddr.addr, sta->addr);
+
+ ret = ath10k_wmi_set_per_peer_per_tid_cfg(ar, arg);
+ if (ret)
+ return ret;
+
+ /* Store the configured parameters in success case */
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_NOACK)) {
+ arsta->noack[arg->tid] = arg->ack_policy;
+ arg->ack_policy = 0;
+ arg->aggr_control = 0;
+ arg->rate_ctrl = 0;
+ arg->rcode_flags = 0;
+ }
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG)) {
+ arsta->retry_long[arg->tid] = arg->retry_count;
+ arg->retry_count = 0;
+ }
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL)) {
+ arsta->ampdu[arg->tid] = arg->aggr_control;
+ arg->aggr_control = 0;
+ }
+
+ if (changed & (BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) |
+ BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE))) {
+ arsta->rate_ctrl[arg->tid] = arg->rate_ctrl;
+ arg->rate_ctrl = 0;
+ arg->rcode_flags = 0;
+ }
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL)) {
+ arsta->rtscts[arg->tid] = arg->rtscts_ctrl;
+ arg->ext_tid_cfg_bitmap = 0;
+ }
+ } else {
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_NOACK)) {
+ if ((arvif->retry_long[arg->tid] ||
+ arvif->rate_code[arg->tid] ||
+ arvif->ampdu[arg->tid] ==
+ WMI_TID_CONFIG_AGGR_CONTROL_ENABLE) &&
+ arg->ack_policy == WMI_PEER_TID_CONFIG_NOACK) {
+ changed &= ~BIT(NL80211_TID_CONFIG_ATTR_NOACK);
+ } else {
+ arvif->noack[arg->tid] = arg->ack_policy;
+ arvif->ampdu[arg->tid] = arg->aggr_control;
+ arvif->rate_ctrl[arg->tid] = arg->rate_ctrl;
+ }
+ }
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG)) {
+ if (arvif->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK)
+ changed &= ~BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG);
+ else
+ arvif->retry_long[arg->tid] = arg->retry_count;
+ }
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL)) {
+ if (arvif->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK)
+ changed &= ~BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL);
+ else
+ arvif->ampdu[arg->tid] = arg->aggr_control;
+ }
+
+ if (changed & (BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) |
+ BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE))) {
+ if (arvif->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK) {
+ changed &= ~(BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) |
+ BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE));
+ } else {
+ arvif->rate_ctrl[arg->tid] = arg->rate_ctrl;
+ arvif->rate_code[arg->tid] = arg->rcode_flags;
+ }
+ }
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL)) {
+ arvif->rtscts[arg->tid] = arg->rtscts_ctrl;
+ arg->ext_tid_cfg_bitmap = 0;
+ }
+
+ if (changed)
+ arvif->tid_conf_changed[arg->tid] |= changed;
+ }
+
+ return 0;
+}
+
+static int
+ath10k_mac_parse_tid_config(struct ath10k *ar,
+ struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif,
+ struct cfg80211_tid_cfg *tid_conf,
+ struct wmi_per_peer_per_tid_cfg_arg *arg)
+{
+ u32 changed = tid_conf->mask;
+ int ret = 0, i = 0;
+
+ if (!changed)
+ return -EINVAL;
+
+ while (i < ATH10K_TID_MAX) {
+ if (!(tid_conf->tids & BIT(i))) {
+ i++;
+ continue;
+ }
+
+ arg->tid = i;
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_NOACK)) {
+ if (tid_conf->noack == NL80211_TID_CONFIG_ENABLE) {
+ arg->ack_policy = WMI_PEER_TID_CONFIG_NOACK;
+ arg->rate_ctrl =
+ WMI_TID_CONFIG_RATE_CONTROL_DEFAULT_LOWEST_RATE;
+ arg->aggr_control =
+ WMI_TID_CONFIG_AGGR_CONTROL_DISABLE;
+ } else {
+ arg->ack_policy =
+ WMI_PEER_TID_CONFIG_ACK;
+ arg->rate_ctrl =
+ WMI_TID_CONFIG_RATE_CONTROL_AUTO;
+ arg->aggr_control =
+ WMI_TID_CONFIG_AGGR_CONTROL_ENABLE;
+ }
+ }
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG))
+ arg->retry_count = tid_conf->retry_long;
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL)) {
+ if (tid_conf->noack == NL80211_TID_CONFIG_ENABLE)
+ arg->aggr_control = WMI_TID_CONFIG_AGGR_CONTROL_ENABLE;
+ else
+ arg->aggr_control = WMI_TID_CONFIG_AGGR_CONTROL_DISABLE;
+ }
+
+ if (changed & (BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) |
+ BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE))) {
+ ret = ath10k_mac_tid_bitrate_config(ar, vif, sta,
+ &arg->rcode_flags,
+ &arg->rate_ctrl,
+ tid_conf->txrate_type,
+ &tid_conf->txrate_mask);
+ if (ret) {
+ ath10k_warn(ar, "failed to configure bitrate mask %d\n",
+ ret);
+ arg->rcode_flags = 0;
+ arg->rate_ctrl = 0;
+ }
+ }
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL)) {
+ if (tid_conf->rtscts)
+ arg->rtscts_ctrl = tid_conf->rtscts;
+
+ arg->ext_tid_cfg_bitmap = WMI_EXT_TID_RTS_CTS_CONFIG;
+ }
+
+ ret = ath10k_mac_set_tid_config(ar, sta, vif, changed, arg);
+ if (ret)
+ return ret;
+ i++;
+ }
+
+ return ret;
+}
+
+static int ath10k_mac_reset_tid_config(struct ath10k *ar,
+ struct ieee80211_sta *sta,
+ struct ath10k_vif *arvif,
+ u8 tids)
+{
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct wmi_per_peer_per_tid_cfg_arg arg;
+ int ret = 0, i = 0;
+
+ arg.vdev_id = arvif->vdev_id;
+ while (i < ATH10K_TID_MAX) {
+ if (!(tids & BIT(i))) {
+ i++;
+ continue;
+ }
+
+ arg.tid = i;
+ arg.ack_policy = WMI_PEER_TID_CONFIG_ACK;
+ arg.retry_count = ATH10K_MAX_RETRY_COUNT;
+ arg.rate_ctrl = WMI_TID_CONFIG_RATE_CONTROL_AUTO;
+ arg.aggr_control = WMI_TID_CONFIG_AGGR_CONTROL_ENABLE;
+ arg.rtscts_ctrl = WMI_TID_CONFIG_RTSCTS_CONTROL_ENABLE;
+ arg.ext_tid_cfg_bitmap = WMI_EXT_TID_RTS_CTS_CONFIG;
+
+ ether_addr_copy(arg.peer_macaddr.addr, sta->addr);
+
+ ret = ath10k_wmi_set_per_peer_per_tid_cfg(ar, &arg);
+ if (ret)
+ return ret;
+
+ if (!arvif->tids_rst) {
+ arsta->retry_long[i] = -1;
+ arsta->noack[i] = -1;
+ arsta->ampdu[i] = -1;
+ arsta->rate_code[i] = -1;
+ arsta->rate_ctrl[i] = 0;
+ arsta->rtscts[i] = -1;
+ } else {
+ arvif->retry_long[i] = 0;
+ arvif->noack[i] = 0;
+ arvif->ampdu[i] = 0;
+ arvif->rate_code[i] = 0;
+ arvif->rate_ctrl[i] = 0;
+ arvif->rtscts[i] = 0;
+ }
+
+ i++;
+ }
+
+ return ret;
+}
+
+static void ath10k_sta_tid_cfg_wk(struct work_struct *wk)
+{
+ struct wmi_per_peer_per_tid_cfg_arg arg = {};
+ struct ieee80211_sta *sta;
+ struct ath10k_sta *arsta;
+ struct ath10k_vif *arvif;
+ struct ath10k *ar;
+ bool config_apply;
+ int ret, i;
+ u32 changed;
+ u8 nss;
+
+ arsta = container_of(wk, struct ath10k_sta, tid_config_wk);
+ sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
+ arvif = arsta->arvif;
+ ar = arvif->ar;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (arvif->tids_rst) {
+ ret = ath10k_mac_reset_tid_config(ar, sta, arvif,
+ arvif->tids_rst);
+ goto exit;
+ }
+
+ ether_addr_copy(arg.peer_macaddr.addr, sta->addr);
+
+ for (i = 0; i < ATH10K_TID_MAX; i++) {
+ config_apply = false;
+ changed = arvif->tid_conf_changed[i];
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_NOACK)) {
+ if (arsta->noack[i] != -1) {
+ arg.ack_policy = 0;
+ } else {
+ config_apply = true;
+ arg.ack_policy = arvif->noack[i];
+ arg.aggr_control = arvif->ampdu[i];
+ arg.rate_ctrl = arvif->rate_ctrl[i];
+ }
+ }
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG)) {
+ if (arsta->retry_long[i] != -1 ||
+ arsta->noack[i] == WMI_PEER_TID_CONFIG_NOACK ||
+ arvif->noack[i] == WMI_PEER_TID_CONFIG_NOACK) {
+ arg.retry_count = 0;
+ } else {
+ arg.retry_count = arvif->retry_long[i];
+ config_apply = true;
+ }
+ }
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL)) {
+ if (arsta->ampdu[i] != -1 ||
+ arsta->noack[i] == WMI_PEER_TID_CONFIG_NOACK ||
+ arvif->noack[i] == WMI_PEER_TID_CONFIG_NOACK) {
+ arg.aggr_control = 0;
+ } else {
+ arg.aggr_control = arvif->ampdu[i];
+ config_apply = true;
+ }
+ }
+
+ if (changed & (BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) |
+ BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE))) {
+ nss = ATH10K_HW_NSS(arvif->rate_code[i]);
+ ret = ath10k_mac_validate_rate_mask(ar, sta,
+ arvif->rate_code[i],
+ nss);
+ if (ret &&
+ arvif->rate_ctrl[i] > WMI_TID_CONFIG_RATE_CONTROL_AUTO) {
+ arg.rate_ctrl = 0;
+ arg.rcode_flags = 0;
+ }
+
+ if (arsta->rate_ctrl[i] >
+ WMI_TID_CONFIG_RATE_CONTROL_AUTO ||
+ arsta->noack[i] == WMI_PEER_TID_CONFIG_NOACK ||
+ arvif->noack[i] == WMI_PEER_TID_CONFIG_NOACK) {
+ arg.rate_ctrl = 0;
+ arg.rcode_flags = 0;
+ } else {
+ arg.rate_ctrl = arvif->rate_ctrl[i];
+ arg.rcode_flags = arvif->rate_code[i];
+ config_apply = true;
+ }
+ }
+
+ if (changed & BIT(NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL)) {
+ if (arsta->rtscts[i]) {
+ arg.rtscts_ctrl = 0;
+ arg.ext_tid_cfg_bitmap = 0;
+ } else {
+ arg.rtscts_ctrl = arvif->rtscts[i] - 1;
+ arg.ext_tid_cfg_bitmap =
+ WMI_EXT_TID_RTS_CTS_CONFIG;
+ config_apply = true;
+ }
+ }
+
+ arg.tid = i;
+
+ if (config_apply) {
+ ret = ath10k_wmi_set_per_peer_per_tid_cfg(ar, &arg);
+ if (ret)
+ ath10k_warn(ar, "failed to set per tid config for sta %pM: %d\n",
+ sta->addr, ret);
+ }
+
+ arg.ack_policy = 0;
+ arg.retry_count = 0;
+ arg.aggr_control = 0;
+ arg.rate_ctrl = 0;
+ arg.rcode_flags = 0;
+ }
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static void ath10k_mac_vif_stations_tid_conf(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k_mac_iter_tid_conf_data *iter_data = data;
+ struct ieee80211_vif *sta_vif = arsta->arvif->vif;
+
+ if (sta_vif != iter_data->curr_vif || !sta->wme)
+ return;
+
+ ieee80211_queue_work(iter_data->ar->hw, &arsta->tid_config_wk);
+}
+
static int ath10k_sta_state(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -6616,6 +7279,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
arsta->arvif = arvif;
arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED;
INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
+ INIT_WORK(&arsta->tid_config_wk, ath10k_sta_tid_cfg_wk);
for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
ath10k_mac_txq_init(sta->txq[i]);
@@ -6623,8 +7287,10 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
/* cancel must be done outside the mutex to avoid deadlock */
if ((old_state == IEEE80211_STA_NONE &&
- new_state == IEEE80211_STA_NOTEXIST))
+ new_state == IEEE80211_STA_NOTEXIST)) {
cancel_work_sync(&arsta->update_wk);
+ cancel_work_sync(&arsta->tid_config_wk);
+ }
mutex_lock(&ar->conf_mutex);
@@ -7033,8 +7699,6 @@ exit:
return ret;
}
-#define ATH10K_ROC_TIMEOUT_HZ (2 * HZ)
-
static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_channel *chan,
@@ -7278,7 +7942,7 @@ ath10k_mac_update_bss_chan_survey(struct ath10k *ar,
struct ieee80211_channel *channel)
{
int ret;
- enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR;
+ enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ;
lockdep_assert_held(&ar->conf_mutex);
@@ -7347,30 +8011,6 @@ exit:
}
static bool
-ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar,
- enum nl80211_band band,
- const struct cfg80211_bitrate_mask *mask,
- int *vht_num_rates)
-{
- int num_rates = 0;
- int i, tmp;
-
- num_rates += hweight32(mask->control[band].legacy);
-
- for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++)
- num_rates += hweight8(mask->control[band].ht_mcs[i]);
-
- *vht_num_rates = 0;
- for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
- tmp = hweight16(mask->control[band].vht_mcs[i]);
- num_rates += tmp;
- *vht_num_rates += tmp;
- }
-
- return num_rates == 1;
-}
-
-static bool
ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar,
enum nl80211_band band,
const struct cfg80211_bitrate_mask *mask,
@@ -7419,69 +8059,6 @@ ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar,
return true;
}
-static int
-ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
- enum nl80211_band band,
- const struct cfg80211_bitrate_mask *mask,
- u8 *rate, u8 *nss, bool vht_only)
-{
- int rate_idx;
- int i;
- u16 bitrate;
- u8 preamble;
- u8 hw_rate;
-
- if (vht_only)
- goto next;
-
- if (hweight32(mask->control[band].legacy) == 1) {
- rate_idx = ffs(mask->control[band].legacy) - 1;
-
- if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY)
- rate_idx += ATH10K_MAC_FIRST_OFDM_RATE_IDX;
-
- hw_rate = ath10k_wmi_legacy_rates[rate_idx].hw_value;
- bitrate = ath10k_wmi_legacy_rates[rate_idx].bitrate;
-
- if (ath10k_mac_bitrate_is_cck(bitrate))
- preamble = WMI_RATE_PREAMBLE_CCK;
- else
- preamble = WMI_RATE_PREAMBLE_OFDM;
-
- *nss = 1;
- *rate = preamble << 6 |
- (*nss - 1) << 4 |
- hw_rate << 0;
-
- return 0;
- }
-
- for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
- if (hweight8(mask->control[band].ht_mcs[i]) == 1) {
- *nss = i + 1;
- *rate = WMI_RATE_PREAMBLE_HT << 6 |
- (*nss - 1) << 4 |
- (ffs(mask->control[band].ht_mcs[i]) - 1);
-
- return 0;
- }
- }
-
-next:
- for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
- if (hweight16(mask->control[band].vht_mcs[i]) == 1) {
- *nss = i + 1;
- *rate = WMI_RATE_PREAMBLE_VHT << 6 |
- (*nss - 1) << 4 |
- (ffs(mask->control[band].vht_mcs[i]) - 1);
-
- return 0;
- }
- }
-
- return -EINVAL;
-}
-
static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif,
u8 rate, u8 nss, u8 sgi, u8 ldpc)
{
@@ -8363,19 +8940,32 @@ static void ath10k_mac_get_rate_flags_ht(struct ath10k *ar, u32 rate, u8 nss, u8
u8 *flags, u8 *bw)
{
struct ath10k_index_ht_data_rate_type *mcs_rate;
+ u8 index;
+ size_t len_nss1 = ARRAY_SIZE(supported_ht_mcs_rate_nss1);
+ size_t len_nss2 = ARRAY_SIZE(supported_ht_mcs_rate_nss2);
+
+ if (mcs >= (len_nss1 + len_nss2)) {
+ ath10k_warn(ar, "not supported mcs %d in current rate table", mcs);
+ return;
+ }
mcs_rate = (struct ath10k_index_ht_data_rate_type *)
((nss == 1) ? &supported_ht_mcs_rate_nss1 :
&supported_ht_mcs_rate_nss2);
- if (rate == mcs_rate[mcs].supported_rate[0]) {
+ if (mcs >= len_nss1)
+ index = mcs - len_nss1;
+ else
+ index = mcs;
+
+ if (rate == mcs_rate[index].supported_rate[0]) {
*bw = RATE_INFO_BW_20;
- } else if (rate == mcs_rate[mcs].supported_rate[1]) {
+ } else if (rate == mcs_rate[index].supported_rate[1]) {
*bw |= RATE_INFO_BW_40;
- } else if (rate == mcs_rate[mcs].supported_rate[2]) {
+ } else if (rate == mcs_rate[index].supported_rate[2]) {
*bw |= RATE_INFO_BW_20;
*flags |= RATE_INFO_FLAGS_SHORT_GI;
- } else if (rate == mcs_rate[mcs].supported_rate[3]) {
+ } else if (rate == mcs_rate[index].supported_rate[3]) {
*bw |= RATE_INFO_BW_40;
*flags |= RATE_INFO_FLAGS_SHORT_GI;
} else {
@@ -8436,6 +9026,9 @@ static void ath10k_mac_parse_bitrate(struct ath10k *ar, u32 rate_code,
u8 mcs = WMI_TLV_GET_HW_RC_RATE_V1(rate_code);
u8 flags = 0, bw = 0;
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac parse rate code 0x%x bitrate %d kbps\n",
+ rate_code, bitrate_kbps);
+
if (preamble == WMI_RATE_PREAMBLE_HT)
mode = ATH10K_PHY_MODE_HT;
else if (preamble == WMI_RATE_PREAMBLE_VHT)
@@ -8528,29 +9121,99 @@ static void ath10k_sta_statistics(struct ieee80211_hw *hw,
sinfo->rx_duration = arsta->rx_duration;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
- if (!arsta->txrate.legacy && !arsta->txrate.nss)
- return;
-
- if (arsta->txrate.legacy) {
- sinfo->txrate.legacy = arsta->txrate.legacy;
- } else {
- sinfo->txrate.mcs = arsta->txrate.mcs;
- sinfo->txrate.nss = arsta->txrate.nss;
- sinfo->txrate.bw = arsta->txrate.bw;
+ if (arsta->txrate.legacy || arsta->txrate.nss) {
+ if (arsta->txrate.legacy) {
+ sinfo->txrate.legacy = arsta->txrate.legacy;
+ } else {
+ sinfo->txrate.mcs = arsta->txrate.mcs;
+ sinfo->txrate.nss = arsta->txrate.nss;
+ sinfo->txrate.bw = arsta->txrate.bw;
+ }
+ sinfo->txrate.flags = arsta->txrate.flags;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
}
- sinfo->txrate.flags = arsta->txrate.flags;
- sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
if (ar->htt.disable_tx_comp) {
- sinfo->tx_retries = arsta->tx_retries;
- sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
sinfo->tx_failed = arsta->tx_failed;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
}
+ sinfo->tx_retries = arsta->tx_retries;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
+
ath10k_mac_sta_get_peer_stats_info(ar, sta, sinfo);
}
+static int ath10k_mac_op_set_tid_config(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct cfg80211_tid_config *tid_config)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ath10k_mac_iter_tid_conf_data data = {};
+ struct wmi_per_peer_per_tid_cfg_arg arg = {};
+ int ret, i;
+
+ mutex_lock(&ar->conf_mutex);
+ arg.vdev_id = arvif->vdev_id;
+
+ arvif->tids_rst = 0;
+ memset(arvif->tid_conf_changed, 0, sizeof(arvif->tid_conf_changed));
+
+ for (i = 0; i < tid_config->n_tid_conf; i++) {
+ ret = ath10k_mac_parse_tid_config(ar, sta, vif,
+ &tid_config->tid_conf[i],
+ &arg);
+ if (ret)
+ goto exit;
+ }
+
+ if (sta)
+ goto exit;
+
+ ret = 0;
+ arvif->tids_rst = 0;
+ data.curr_vif = vif;
+ data.ar = ar;
+
+ ieee80211_iterate_stations_atomic(hw, ath10k_mac_vif_stations_tid_conf,
+ &data);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath10k_mac_op_reset_tid_config(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u8 tids)
+{
+ struct ath10k_vif *arvif = (void *)vif->drv_priv;
+ struct ath10k_mac_iter_tid_conf_data data = {};
+ struct ath10k *ar = hw->priv;
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (sta) {
+ arvif->tids_rst = 0;
+ ret = ath10k_mac_reset_tid_config(ar, sta, arvif, tids);
+ goto exit;
+ }
+
+ arvif->tids_rst = tids;
+ data.curr_vif = vif;
+ data.ar = ar;
+ ieee80211_iterate_stations_atomic(hw, ath10k_mac_vif_stations_tid_conf,
+ &data);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
static const struct ieee80211_ops ath10k_ops = {
.tx = ath10k_mac_op_tx,
.wake_tx_queue = ath10k_mac_op_wake_tx_queue,
@@ -8594,6 +9257,8 @@ static const struct ieee80211_ops ath10k_ops = {
.switch_vif_chanctx = ath10k_mac_op_switch_vif_chanctx,
.sta_pre_rcu_remove = ath10k_mac_op_sta_pre_rcu_remove,
.sta_statistics = ath10k_sta_statistics,
+ .set_tid_config = ath10k_mac_op_set_tid_config,
+ .reset_tid_config = ath10k_mac_op_reset_tid_config,
CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
@@ -9264,6 +9929,28 @@ int ath10k_mac_register(struct ath10k *ar)
if (test_bit(WMI_SERVICE_TX_PWR_PER_PEER, ar->wmi.svc_map))
wiphy_ext_feature_set(ar->hw->wiphy,
NL80211_EXT_FEATURE_STA_TX_PWR);
+
+ if (test_bit(WMI_SERVICE_PEER_TID_CONFIGS_SUPPORT, ar->wmi.svc_map)) {
+ ar->hw->wiphy->tid_config_support.vif |=
+ BIT(NL80211_TID_CONFIG_ATTR_NOACK) |
+ BIT(NL80211_TID_CONFIG_ATTR_RETRY_SHORT) |
+ BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG) |
+ BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL) |
+ BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) |
+ BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE);
+
+ if (test_bit(WMI_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT,
+ ar->wmi.svc_map)) {
+ ar->hw->wiphy->tid_config_support.vif |=
+ BIT(NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL);
+ }
+
+ ar->hw->wiphy->tid_config_support.peer =
+ ar->hw->wiphy->tid_config_support.vif;
+ ar->hw->wiphy->max_data_retry_count = ATH10K_MAX_RETRY_COUNT;
+ } else {
+ ar->ops->set_tid_config = NULL;
+ }
/*
* on LL hardware queues are managed entirely by the FW
* so we only advertise to mac we can do the queues thing
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index cfde7791291a..36426efdb2ea 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -2184,7 +2184,7 @@ err_req:
if (ret == 0 && resp_len) {
*resp_len = min(*resp_len, xfer.resp_len);
- memcpy(resp, tresp, xfer.resp_len);
+ memcpy(resp, tresp, *resp_len);
}
err_dma:
kfree(treq);
diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c
index 5468a41e928e..ae6b1f402adf 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.c
+++ b/drivers/net/wireless/ath/ath10k/qmi.c
@@ -576,6 +576,8 @@ static int ath10k_qmi_cap_send_sync_msg(struct ath10k_qmi *qmi)
if (resp->chip_info_valid) {
qmi->chip_info.chip_id = resp->chip_info.chip_id;
qmi->chip_info.chip_family = resp->chip_info.chip_family;
+ } else {
+ qmi->chip_info.chip_id = 0xFF;
}
if (resp->board_info_valid)
@@ -817,12 +819,18 @@ err_setup_msa:
static int ath10k_qmi_fetch_board_file(struct ath10k_qmi *qmi)
{
struct ath10k *ar = qmi->ar;
+ int ret;
ar->hif.bus = ATH10K_BUS_SNOC;
ar->id.qmi_ids_valid = true;
ar->id.qmi_board_id = qmi->board_info.board_id;
+ ar->id.qmi_chip_id = qmi->chip_info.chip_id;
ar->hw_params.fw.dir = WCN3990_HW_1_0_FW_DIR;
+ ret = ath10k_core_check_dt(ar);
+ if (ret)
+ ath10k_dbg(ar, ATH10K_DBG_QMI, "DT bdf variant name not set.\n");
+
return ath10k_core_fetch_board_file(qmi->ar, ATH10K_BD_IE_BOARD);
}
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index 63f882c690bf..81ddaafb6721 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -23,6 +23,9 @@
#include "targaddrs.h"
#include "trace.h"
#include "sdio.h"
+#include "coredump.h"
+
+void ath10k_sdio_fw_crashed_dump(struct ath10k *ar);
#define ATH10K_SDIO_VSG_BUF_SIZE (64 * 1024)
@@ -557,6 +560,10 @@ static int ath10k_sdio_mbox_rx_alloc(struct ath10k *ar,
le16_to_cpu(htc_hdr->len),
ATH10K_HTC_MBOX_MAX_PAYLOAD_LENGTH);
ret = -ENOMEM;
+
+ queue_work(ar->workqueue, &ar->restart_work);
+ ath10k_warn(ar, "exceeds length, start recovery\n");
+
goto err;
}
@@ -912,10 +919,9 @@ static int ath10k_sdio_mbox_proc_cpu_intr(struct ath10k *ar)
out:
mutex_unlock(&irq_data->mtx);
- if (cpu_int_status & MBOX_CPU_STATUS_ENABLE_ASSERT_MASK) {
- ath10k_err(ar, "firmware crashed!\n");
- queue_work(ar->workqueue, &ar->restart_work);
- }
+ if (cpu_int_status & MBOX_CPU_STATUS_ENABLE_ASSERT_MASK)
+ ath10k_sdio_fw_crashed_dump(ar);
+
return ret;
}
@@ -2181,6 +2187,323 @@ static int ath10k_sdio_napi_poll(struct napi_struct *ctx, int budget)
return done;
}
+static int ath10k_sdio_read_host_interest_value(struct ath10k *ar,
+ u32 item_offset,
+ u32 *val)
+{
+ u32 addr;
+ int ret;
+
+ addr = host_interest_item_address(item_offset);
+
+ ret = ath10k_sdio_diag_read32(ar, addr, val);
+
+ if (ret)
+ ath10k_warn(ar, "unable to read host interest offset %d value\n",
+ item_offset);
+
+ return ret;
+}
+
+static int ath10k_sdio_read_mem(struct ath10k *ar, u32 address, void *buf,
+ u32 buf_len)
+{
+ u32 val;
+ int i, ret;
+
+ for (i = 0; i < buf_len; i += 4) {
+ ret = ath10k_sdio_diag_read32(ar, address + i, &val);
+ if (ret) {
+ ath10k_warn(ar, "unable to read mem %d value\n", address + i);
+ break;
+ }
+ memcpy(buf + i, &val, 4);
+ }
+
+ return ret;
+}
+
+static bool ath10k_sdio_is_fast_dump_supported(struct ath10k *ar)
+{
+ u32 param;
+
+ ath10k_sdio_read_host_interest_value(ar, HI_ITEM(hi_option_flag2), &param);
+
+ ath10k_dbg(ar, ATH10K_DBG_SDIO, "sdio hi_option_flag2 %x\n", param);
+
+ return param & HI_OPTION_SDIO_CRASH_DUMP_ENHANCEMENT_FW;
+}
+
+static void ath10k_sdio_dump_registers(struct ath10k *ar,
+ struct ath10k_fw_crash_data *crash_data,
+ bool fast_dump)
+{
+ u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
+ int i, ret;
+ u32 reg_dump_area;
+
+ ret = ath10k_sdio_read_host_interest_value(ar, HI_ITEM(hi_failure_state),
+ &reg_dump_area);
+ if (ret) {
+ ath10k_warn(ar, "failed to read firmware dump area: %d\n", ret);
+ return;
+ }
+
+ if (fast_dump)
+ ret = ath10k_bmi_read_memory(ar, reg_dump_area, reg_dump_values,
+ sizeof(reg_dump_values));
+ else
+ ret = ath10k_sdio_read_mem(ar, reg_dump_area, reg_dump_values,
+ sizeof(reg_dump_values));
+
+ if (ret) {
+ ath10k_warn(ar, "failed to read firmware dump value: %d\n", ret);
+ return;
+ }
+
+ ath10k_err(ar, "firmware register dump:\n");
+ for (i = 0; i < ARRAY_SIZE(reg_dump_values); i += 4)
+ ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ i,
+ reg_dump_values[i],
+ reg_dump_values[i + 1],
+ reg_dump_values[i + 2],
+ reg_dump_values[i + 3]);
+
+ if (!crash_data)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(reg_dump_values); i++)
+ crash_data->registers[i] = __cpu_to_le32(reg_dump_values[i]);
+}
+
+static int ath10k_sdio_dump_memory_section(struct ath10k *ar,
+ const struct ath10k_mem_region *mem_region,
+ u8 *buf, size_t buf_len)
+{
+ const struct ath10k_mem_section *cur_section, *next_section;
+ unsigned int count, section_size, skip_size;
+ int ret, i, j;
+
+ if (!mem_region || !buf)
+ return 0;
+
+ cur_section = &mem_region->section_table.sections[0];
+
+ if (mem_region->start > cur_section->start) {
+ ath10k_warn(ar, "incorrect memdump region 0x%x with section start address 0x%x.\n",
+ mem_region->start, cur_section->start);
+ return 0;
+ }
+
+ skip_size = cur_section->start - mem_region->start;
+
+ /* fill the gap between the first register section and register
+ * start address
+ */
+ for (i = 0; i < skip_size; i++) {
+ *buf = ATH10K_MAGIC_NOT_COPIED;
+ buf++;
+ }
+
+ count = 0;
+
+ for (i = 0; cur_section; i++) {
+ section_size = cur_section->end - cur_section->start;
+
+ if (section_size <= 0) {
+ ath10k_warn(ar, "incorrect ramdump format with start address 0x%x and stop address 0x%x\n",
+ cur_section->start,
+ cur_section->end);
+ break;
+ }
+
+ if ((i + 1) == mem_region->section_table.size) {
+ /* last section */
+ next_section = NULL;
+ skip_size = 0;
+ } else {
+ next_section = cur_section + 1;
+
+ if (cur_section->end > next_section->start) {
+ ath10k_warn(ar, "next ramdump section 0x%x is smaller than current end address 0x%x\n",
+ next_section->start,
+ cur_section->end);
+ break;
+ }
+
+ skip_size = next_section->start - cur_section->end;
+ }
+
+ if (buf_len < (skip_size + section_size)) {
+ ath10k_warn(ar, "ramdump buffer is too small: %zu\n", buf_len);
+ break;
+ }
+
+ buf_len -= skip_size + section_size;
+
+ /* read section to dest memory */
+ ret = ath10k_sdio_read_mem(ar, cur_section->start,
+ buf, section_size);
+ if (ret) {
+ ath10k_warn(ar, "failed to read ramdump from section 0x%x: %d\n",
+ cur_section->start, ret);
+ break;
+ }
+
+ buf += section_size;
+ count += section_size;
+
+ /* fill in the gap between this section and the next */
+ for (j = 0; j < skip_size; j++) {
+ *buf = ATH10K_MAGIC_NOT_COPIED;
+ buf++;
+ }
+
+ count += skip_size;
+
+ if (!next_section)
+ /* this was the last section */
+ break;
+
+ cur_section = next_section;
+ }
+
+ return count;
+}
+
+/* if an error happened returns < 0, otherwise the length */
+static int ath10k_sdio_dump_memory_generic(struct ath10k *ar,
+ const struct ath10k_mem_region *current_region,
+ u8 *buf,
+ bool fast_dump)
+{
+ int ret;
+
+ if (current_region->section_table.size > 0)
+ /* Copy each section individually. */
+ return ath10k_sdio_dump_memory_section(ar,
+ current_region,
+ buf,
+ current_region->len);
+
+ /* No individiual memory sections defined so we can
+ * copy the entire memory region.
+ */
+ if (fast_dump)
+ ret = ath10k_bmi_read_memory(ar,
+ current_region->start,
+ buf,
+ current_region->len);
+ else
+ ret = ath10k_sdio_read_mem(ar,
+ current_region->start,
+ buf,
+ current_region->len);
+
+ if (ret) {
+ ath10k_warn(ar, "failed to copy ramdump region %s: %d\n",
+ current_region->name, ret);
+ return ret;
+ }
+
+ return current_region->len;
+}
+
+static void ath10k_sdio_dump_memory(struct ath10k *ar,
+ struct ath10k_fw_crash_data *crash_data,
+ bool fast_dump)
+{
+ const struct ath10k_hw_mem_layout *mem_layout;
+ const struct ath10k_mem_region *current_region;
+ struct ath10k_dump_ram_data_hdr *hdr;
+ u32 count;
+ size_t buf_len;
+ int ret, i;
+ u8 *buf;
+
+ if (!crash_data)
+ return;
+
+ mem_layout = ath10k_coredump_get_mem_layout(ar);
+ if (!mem_layout)
+ return;
+
+ current_region = &mem_layout->region_table.regions[0];
+
+ buf = crash_data->ramdump_buf;
+ buf_len = crash_data->ramdump_buf_len;
+
+ memset(buf, 0, buf_len);
+
+ for (i = 0; i < mem_layout->region_table.size; i++) {
+ count = 0;
+
+ if (current_region->len > buf_len) {
+ ath10k_warn(ar, "memory region %s size %d is larger that remaining ramdump buffer size %zu\n",
+ current_region->name,
+ current_region->len,
+ buf_len);
+ break;
+ }
+
+ /* Reserve space for the header. */
+ hdr = (void *)buf;
+ buf += sizeof(*hdr);
+ buf_len -= sizeof(*hdr);
+
+ ret = ath10k_sdio_dump_memory_generic(ar, current_region, buf,
+ fast_dump);
+ if (ret >= 0)
+ count = ret;
+
+ hdr->region_type = cpu_to_le32(current_region->type);
+ hdr->start = cpu_to_le32(current_region->start);
+ hdr->length = cpu_to_le32(count);
+
+ if (count == 0)
+ /* Note: the header remains, just with zero length. */
+ break;
+
+ buf += count;
+ buf_len -= count;
+
+ current_region++;
+ }
+}
+
+void ath10k_sdio_fw_crashed_dump(struct ath10k *ar)
+{
+ struct ath10k_fw_crash_data *crash_data;
+ char guid[UUID_STRING_LEN + 1];
+ bool fast_dump;
+
+ fast_dump = ath10k_sdio_is_fast_dump_supported(ar);
+
+ if (fast_dump)
+ ath10k_bmi_start(ar);
+
+ ar->stats.fw_crash_counter++;
+
+ ath10k_sdio_disable_intrs(ar);
+
+ crash_data = ath10k_coredump_new(ar);
+
+ if (crash_data)
+ scnprintf(guid, sizeof(guid), "%pUl", &crash_data->guid);
+ else
+ scnprintf(guid, sizeof(guid), "n/a");
+
+ ath10k_err(ar, "firmware crashed! (guid %s)\n", guid);
+ ath10k_print_driver_info(ar);
+ ath10k_sdio_dump_registers(ar, crash_data, fast_dump);
+ ath10k_sdio_dump_memory(ar, crash_data, fast_dump);
+
+ ath10k_sdio_enable_intrs(ar);
+
+ queue_work(ar->workqueue, &ar->restart_work);
+}
+
static int ath10k_sdio_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index 354d49b1cd45..fd41f25456dc 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -3,6 +3,7 @@
* Copyright (c) 2018 The Linux Foundation. All rights reserved.
*/
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -45,6 +46,7 @@ static const char * const ath10k_regulators[] = {
"vdd-1.8-xo",
"vdd-1.3-rfa",
"vdd-3.3-ch0",
+ "vdd-3.3-ch1",
};
static const char * const ath10k_clocks[] = {
@@ -923,6 +925,7 @@ static int ath10k_snoc_hif_start(struct ath10k *ar)
{
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ bitmap_clear(ar_snoc->pending_ce_irqs, 0, CE_COUNT_MAX);
napi_enable(&ar->napi);
ath10k_snoc_irq_enable(ar);
ath10k_snoc_rx_post(ar);
@@ -1158,7 +1161,9 @@ static irqreturn_t ath10k_snoc_per_engine_handler(int irq, void *arg)
return IRQ_HANDLED;
}
- ath10k_snoc_irq_disable(ar);
+ ath10k_ce_disable_interrupt(ar, ce_id);
+ set_bit(ce_id, ar_snoc->pending_ce_irqs);
+
napi_schedule(&ar->napi);
return IRQ_HANDLED;
@@ -1167,20 +1172,25 @@ static irqreturn_t ath10k_snoc_per_engine_handler(int irq, void *arg)
static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget)
{
struct ath10k *ar = container_of(ctx, struct ath10k, napi);
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
int done = 0;
+ int ce_id;
if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) {
napi_complete(ctx);
return done;
}
- ath10k_ce_per_engine_service_any(ar);
+ for (ce_id = 0; ce_id < CE_COUNT; ce_id++)
+ if (test_and_clear_bit(ce_id, ar_snoc->pending_ce_irqs)) {
+ ath10k_ce_per_engine_service(ar, ce_id);
+ ath10k_ce_enable_interrupt(ar, ce_id);
+ }
+
done = ath10k_htt_txrx_compl_task(ar, budget);
- if (done < budget) {
+ if (done < budget)
napi_complete(ctx);
- ath10k_snoc_irq_enable(ar);
- }
return done;
}
@@ -1772,9 +1782,18 @@ static int ath10k_snoc_remove(struct platform_device *pdev)
return 0;
}
+static void ath10k_snoc_shutdown(struct platform_device *pdev)
+{
+ struct ath10k *ar = platform_get_drvdata(pdev);
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc shutdown\n");
+ ath10k_snoc_remove(pdev);
+}
+
static struct platform_driver ath10k_snoc_driver = {
.probe = ath10k_snoc_probe,
.remove = ath10k_snoc_remove,
+ .shutdown = ath10k_snoc_shutdown,
.driver = {
.name = "ath10k_snoc",
.of_match_table = ath10k_snoc_dt_match,
diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h
index a3dd06f6ac62..5095d1893681 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.h
+++ b/drivers/net/wireless/ath/ath10k/snoc.h
@@ -78,6 +78,7 @@ struct ath10k_snoc {
unsigned long flags;
bool xo_cal_supported;
u32 xo_cal_data;
+ DECLARE_BITMAP(pending_ce_irqs, CE_COUNT_MAX);
};
static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
index dff6c8ac9dba..ec556bb88d65 100644
--- a/drivers/net/wireless/ath/ath10k/targaddrs.h
+++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
@@ -334,6 +334,17 @@ struct host_interest {
#define HI_ACS_FLAGS_SDIO_REDUCE_TX_COMPL_FW_ACK (1 << 17)
/*
+ * If both SDIO_CRASH_DUMP_ENHANCEMENT_HOST and SDIO_CRASH_DUMP_ENHANCEMENT_FW
+ * flags are set, then crashdump upload will be done using the BMI host/target
+ * communication channel.
+ */
+/* HOST to support using BMI dump FW memory when hit assert */
+#define HI_OPTION_SDIO_CRASH_DUMP_ENHANCEMENT_HOST 0x400
+
+/* FW to support using BMI dump FW memory when hit assert */
+#define HI_OPTION_SDIO_CRASH_DUMP_ENHANCEMENT_FW 0x800
+
+/*
* CONSOLE FLAGS
*
* Bit Range Meaning
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index f46b9083bbf1..aefe1f7f906c 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -50,6 +50,7 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
struct ath10k_skb_cb *skb_cb;
struct ath10k_txq *artxq;
struct sk_buff *msdu;
+ u8 flags;
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt tx completion msdu_id %u status %d\n",
@@ -78,6 +79,7 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
artxq->num_fw_queued--;
}
+ flags = skb_cb->flags;
ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
ath10k_htt_tx_dec_pending(htt);
if (htt->num_pending_tx == 0)
@@ -101,18 +103,21 @@ int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id);
- if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
+ !(flags & ATH10K_SKB_F_NOACK_TID))
info->flags |= IEEE80211_TX_STAT_ACK;
if (tx_done->status == HTT_TX_COMPL_STATE_NOACK)
info->flags &= ~IEEE80211_TX_STAT_ACK;
if ((tx_done->status == HTT_TX_COMPL_STATE_ACK) &&
- (info->flags & IEEE80211_TX_CTL_NO_ACK))
+ ((info->flags & IEEE80211_TX_CTL_NO_ACK) ||
+ (flags & ATH10K_SKB_F_NOACK_TID)))
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
if (tx_done->status == HTT_TX_COMPL_STATE_DISCARD) {
- if (info->flags & IEEE80211_TX_CTL_NO_ACK)
+ if ((info->flags & IEEE80211_TX_CTL_NO_ACK) ||
+ (flags & ATH10K_SKB_F_NOACK_TID))
info->flags &= ~IEEE80211_TX_STAT_NOACK_TRANSMITTED;
else
info->flags &= ~IEEE80211_TX_STAT_ACK;
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index 0dd484f85082..aa57d807491c 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -224,6 +224,8 @@ struct wmi_ops {
struct sk_buff *(*gen_bb_timing)
(struct ath10k *ar,
const struct wmi_bb_timing_cfg_arg *arg);
+ struct sk_buff *(*gen_per_peer_per_tid_cfg)(struct ath10k *ar,
+ const struct wmi_per_peer_per_tid_cfg_arg *arg);
};
@@ -1656,4 +1658,21 @@ ath10k_wmi_pdev_bb_timing(struct ath10k *ar,
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->set_bb_timing_cmdid);
}
+
+static inline int
+ath10k_wmi_set_per_peer_per_tid_cfg(struct ath10k *ar,
+ const struct wmi_per_peer_per_tid_cfg_arg *arg)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_per_peer_per_tid_cfg)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_per_peer_per_tid_cfg(ar, arg);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->per_peer_per_tid_config_cmdid);
+}
#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index e77b97ca5c7f..b39c9b78b32b 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -1614,6 +1614,8 @@ wmi_tlv_svc_map(const __le32 *in, unsigned long *out, size_t len)
WMI_SERVICE_MESH_11S, len);
SVCMAP(WMI_TLV_SERVICE_SYNC_DELETE_CMDS,
WMI_SERVICE_SYNC_DELETE_CMDS, len);
+ SVCMAP(WMI_TLV_SERVICE_PEER_STATS_INFO,
+ WMI_SERVICE_PEER_STATS, len);
}
static inline void
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index a81a1ab2de19..1fa7107a5051 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -740,6 +740,7 @@ static struct wmi_cmd_map wmi_10_4_cmd_map = {
.tdls_peer_update_cmdid = WMI_10_4_TDLS_PEER_UPDATE_CMDID,
.tdls_set_offchan_mode_cmdid = WMI_10_4_TDLS_SET_OFFCHAN_MODE_CMDID,
.radar_found_cmdid = WMI_10_4_RADAR_FOUND_CMDID,
+ .per_peer_per_tid_config_cmdid = WMI_10_4_PER_PEER_PER_TID_CONFIG_CMDID,
};
static struct wmi_peer_param_map wmi_peer_param_map = {
@@ -3878,7 +3879,7 @@ void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
* actual channel switch is done
*/
if (arvif->vif->csa_active &&
- ieee80211_csa_is_complete(arvif->vif)) {
+ ieee80211_beacon_cntdwn_is_complete(arvif->vif)) {
ieee80211_csa_finish(arvif->vif);
continue;
}
@@ -6551,7 +6552,7 @@ static struct sk_buff *ath10k_wmi_op_gen_init(struct ath10k *ar)
struct wmi_init_cmd *cmd;
struct sk_buff *buf;
struct wmi_resource_config config = {};
- u32 len, val;
+ u32 val;
config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS);
@@ -6603,10 +6604,8 @@ static struct sk_buff *ath10k_wmi_op_gen_init(struct ath10k *ar)
config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC);
config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES);
- len = sizeof(*cmd) +
- (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
-
- buf = ath10k_wmi_alloc_skb(ar, len);
+ buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items,
+ ar->wmi.num_mem_chunks));
if (!buf)
return ERR_PTR(-ENOMEM);
@@ -6624,7 +6623,7 @@ static struct sk_buff *ath10k_wmi_10_1_op_gen_init(struct ath10k *ar)
struct wmi_init_cmd_10x *cmd;
struct sk_buff *buf;
struct wmi_resource_config_10x config = {};
- u32 len, val;
+ u32 val;
config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
@@ -6668,10 +6667,8 @@ static struct sk_buff *ath10k_wmi_10_1_op_gen_init(struct ath10k *ar)
config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
- len = sizeof(*cmd) +
- (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
-
- buf = ath10k_wmi_alloc_skb(ar, len);
+ buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items,
+ ar->wmi.num_mem_chunks));
if (!buf)
return ERR_PTR(-ENOMEM);
@@ -6689,7 +6686,7 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
struct wmi_init_cmd_10_2 *cmd;
struct sk_buff *buf;
struct wmi_resource_config_10x config = {};
- u32 len, val, features;
+ u32 val, features;
config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
@@ -6741,10 +6738,8 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
- len = sizeof(*cmd) +
- (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
-
- buf = ath10k_wmi_alloc_skb(ar, len);
+ buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items,
+ ar->wmi.num_mem_chunks));
if (!buf)
return ERR_PTR(-ENOMEM);
@@ -6776,7 +6771,6 @@ static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar)
struct wmi_init_cmd_10_4 *cmd;
struct sk_buff *buf;
struct wmi_resource_config_10_4 config = {};
- u32 len;
config.num_vdevs = __cpu_to_le32(ar->max_num_vdevs);
config.num_peers = __cpu_to_le32(ar->max_num_peers);
@@ -6838,10 +6832,8 @@ static struct sk_buff *ath10k_wmi_10_4_op_gen_init(struct ath10k *ar)
config.iphdr_pad_config = __cpu_to_le32(TARGET_10_4_IPHDR_PAD_CONFIG);
config.qwrap_config = __cpu_to_le32(TARGET_10_4_QWRAP_CONFIG);
- len = sizeof(*cmd) +
- (sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
-
- buf = ath10k_wmi_alloc_skb(ar, len);
+ buf = ath10k_wmi_alloc_skb(ar, struct_size(cmd, mem_chunks.items,
+ ar->wmi.num_mem_chunks));
if (!buf)
return ERR_PTR(-ENOMEM);
@@ -7549,12 +7541,9 @@ ath10k_wmi_op_gen_scan_chan_list(struct ath10k *ar,
struct sk_buff *skb;
struct wmi_channel_arg *ch;
struct wmi_channel *ci;
- int len;
int i;
- len = sizeof(*cmd) + arg->n_channels * sizeof(struct wmi_channel);
-
- skb = ath10k_wmi_alloc_skb(ar, len);
+ skb = ath10k_wmi_alloc_skb(ar, struct_size(cmd, chan_info, arg->n_channels));
if (!skb)
return ERR_PTR(-EINVAL);
@@ -9005,6 +8994,39 @@ ath10k_wmi_10_4_gen_radar_found(struct ath10k *ar,
}
static struct sk_buff *
+ath10k_wmi_10_4_gen_per_peer_per_tid_cfg(struct ath10k *ar,
+ const struct wmi_per_peer_per_tid_cfg_arg *arg)
+{
+ struct wmi_peer_per_tid_cfg_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ memset(skb->data, 0, sizeof(*cmd));
+
+ cmd = (struct wmi_peer_per_tid_cfg_cmd *)skb->data;
+ cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+ ether_addr_copy(cmd->peer_macaddr.addr, arg->peer_macaddr.addr);
+ cmd->tid = cpu_to_le32(arg->tid);
+ cmd->ack_policy = cpu_to_le32(arg->ack_policy);
+ cmd->aggr_control = cpu_to_le32(arg->aggr_control);
+ cmd->rate_control = cpu_to_le32(arg->rate_ctrl);
+ cmd->retry_count = cpu_to_le32(arg->retry_count);
+ cmd->rcode_flags = cpu_to_le32(arg->rcode_flags);
+ cmd->ext_tid_cfg_bitmap = cpu_to_le32(arg->ext_tid_cfg_bitmap);
+ cmd->rtscts_ctrl = cpu_to_le32(arg->rtscts_ctrl);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi noack tid %d vdev id %d ack_policy %d aggr %u rate_ctrl %u rcflag %u retry_count %d rtscts %d ext_tid_cfg_bitmap %d mac_addr %pM\n",
+ arg->tid, arg->vdev_id, arg->ack_policy, arg->aggr_control,
+ arg->rate_ctrl, arg->rcode_flags, arg->retry_count,
+ arg->rtscts_ctrl, arg->ext_tid_cfg_bitmap, arg->peer_macaddr.addr);
+ return skb;
+}
+
+static struct sk_buff *
ath10k_wmi_op_gen_echo(struct ath10k *ar, u32 value)
{
struct wmi_echo_cmd *cmd;
@@ -9413,6 +9435,7 @@ static const struct wmi_ops wmi_10_4_ops = {
.gen_pdev_get_tpc_table_cmdid =
ath10k_wmi_10_4_op_gen_pdev_get_tpc_table_cmdid,
.gen_radar_found = ath10k_wmi_10_4_gen_radar_found,
+ .gen_per_peer_per_tid_cfg = ath10k_wmi_10_4_gen_per_peer_per_tid_cfg,
/* shared with 10.2 */
.pull_echo_ev = ath10k_wmi_op_pull_echo_ev,
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 511144b36231..4898e19b0af6 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -203,6 +203,8 @@ enum wmi_service {
WMI_SERVICE_SYNC_DELETE_CMDS,
WMI_SERVICE_TX_PWR_PER_PEER,
WMI_SERVICE_SUPPORT_EXTEND_ADDRESS,
+ WMI_SERVICE_PEER_TID_CONFIGS_SUPPORT,
+ WMI_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT,
/* Remember to add the new value to wmi_service_name()! */
@@ -503,6 +505,8 @@ static inline char *wmi_service_name(enum wmi_service service_id)
SVCSTR(WMI_SERVICE_SYNC_DELETE_CMDS);
SVCSTR(WMI_SERVICE_TX_PWR_PER_PEER);
SVCSTR(WMI_SERVICE_SUPPORT_EXTEND_ADDRESS);
+ SVCSTR(WMI_SERVICE_PEER_TID_CONFIGS_SUPPORT);
+ SVCSTR(WMI_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT);
case WMI_SERVICE_MAX:
return NULL;
@@ -834,6 +838,10 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
WMI_SERVICE_TX_PWR_PER_PEER, len);
SVCMAP(WMI_10_4_SERVICE_RESET_CHIP,
WMI_SERVICE_RESET_CHIP, len);
+ SVCMAP(WMI_10_4_SERVICE_PEER_TID_CONFIGS_SUPPORT,
+ WMI_SERVICE_PEER_TID_CONFIGS_SUPPORT, len);
+ SVCMAP(WMI_10_4_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT,
+ WMI_SERVICE_PEER_TID_CONFIGS_SUPPORT, len);
}
#undef SVCMAP
@@ -1036,6 +1044,7 @@ struct wmi_cmd_map {
u32 tdls_set_offchan_mode_cmdid;
u32 radar_found_cmdid;
u32 set_bb_timing_cmdid;
+ u32 per_peer_per_tid_config_cmdid;
};
/*
@@ -1877,6 +1886,8 @@ enum wmi_10_4_cmd_id {
WMI_10_4_PDEV_SET_BRIDGE_MACADDR_CMDID,
WMI_10_4_ATF_GROUP_WMM_AC_CONFIG_REQUEST_CMDID,
WMI_10_4_RADAR_FOUND_CMDID,
+ WMI_10_4_PEER_CFR_CAPTURE_CMDID,
+ WMI_10_4_PER_PEER_PER_TID_CONFIG_CMDID,
WMI_10_4_PDEV_UTF_CMDID = WMI_10_4_END_CMDID - 1,
};
@@ -7220,6 +7231,71 @@ struct wmi_tdls_peer_event {
__le32 vdev_id;
} __packed;
+enum wmi_tid_aggr_control_conf {
+ WMI_TID_CONFIG_AGGR_CONTROL_IGNORE,
+ WMI_TID_CONFIG_AGGR_CONTROL_ENABLE,
+ WMI_TID_CONFIG_AGGR_CONTROL_DISABLE,
+};
+
+enum wmi_noack_tid_conf {
+ WMI_NOACK_TID_CONFIG_IGNORE_ACK_POLICY,
+ WMI_PEER_TID_CONFIG_ACK,
+ WMI_PEER_TID_CONFIG_NOACK,
+};
+
+enum wmi_tid_rate_ctrl_conf {
+ WMI_TID_CONFIG_RATE_CONTROL_IGNORE,
+ WMI_TID_CONFIG_RATE_CONTROL_AUTO,
+ WMI_TID_CONFIG_RATE_CONTROL_FIXED_RATE,
+ WMI_TID_CONFIG_RATE_CONTROL_DEFAULT_LOWEST_RATE,
+ WMI_PEER_TID_CONFIG_RATE_UPPER_CAP,
+};
+
+enum wmi_tid_rtscts_control_conf {
+ WMI_TID_CONFIG_RTSCTS_CONTROL_ENABLE,
+ WMI_TID_CONFIG_RTSCTS_CONTROL_DISABLE,
+};
+
+enum wmi_ext_tid_config_map {
+ WMI_EXT_TID_RTS_CTS_CONFIG = BIT(0),
+};
+
+struct wmi_per_peer_per_tid_cfg_arg {
+ u32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ u32 tid;
+ enum wmi_noack_tid_conf ack_policy;
+ enum wmi_tid_aggr_control_conf aggr_control;
+ u8 rate_ctrl;
+ u32 retry_count;
+ u32 rcode_flags;
+ u32 ext_tid_cfg_bitmap;
+ u32 rtscts_ctrl;
+};
+
+struct wmi_peer_per_tid_cfg_cmd {
+ __le32 vdev_id;
+ struct wmi_mac_addr peer_macaddr;
+ __le32 tid;
+
+ /* see enum wmi_noack_tid_conf */
+ __le32 ack_policy;
+
+ /* see enum wmi_tid_aggr_control_conf */
+ __le32 aggr_control;
+
+ /* see enum wmi_tid_rate_ctrl_conf */
+ __le32 rate_control;
+ __le32 rcode_flags;
+ __le32 retry_count;
+
+ /* See enum wmi_ext_tid_config_map */
+ __le32 ext_tid_cfg_bitmap;
+
+ /* see enum wmi_tid_rtscts_control_conf */
+ __le32 rtscts_ctrl;
+} __packed;
+
enum wmi_txbf_conf {
WMI_TXBF_CONF_UNSUPPORTED,
WMI_TXBF_CONF_BEFORE_ASSOC,
diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c
index 8c26adddd034..7d65c115669f 100644
--- a/drivers/net/wireless/ath/ath10k/wow.c
+++ b/drivers/net/wireless/ath/ath10k/wow.c
@@ -275,7 +275,7 @@ static int ath10k_vif_wow_set_wakeups(struct ath10k_vif *arvif,
switch (arvif->vdev_type) {
case WMI_VDEV_TYPE_IBSS:
__set_bit(WOW_BEACON_EVENT, &wow_mask);
- /* fall through */
+ fallthrough;
case WMI_VDEV_TYPE_AP:
__set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
__set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
diff --git a/drivers/net/wireless/ath/ath11k/Kconfig b/drivers/net/wireless/ath/ath11k/Kconfig
index 88a97356f0a1..ad5cc6cac05b 100644
--- a/drivers/net/wireless/ath/ath11k/Kconfig
+++ b/drivers/net/wireless/ath/ath11k/Kconfig
@@ -2,9 +2,7 @@
config ATH11K
tristate "Qualcomm Technologies 802.11ax chipset support"
depends on MAC80211 && HAS_DMA
- depends on REMOTEPROC
depends on CRYPTO_MICHAEL_MIC
- depends on ARCH_QCOM || COMPILE_TEST
select ATH_COMMON
select QCOM_QMI_HELPERS
help
@@ -13,6 +11,22 @@ config ATH11K
If you choose to build a module, it'll be called ath11k.
+config ATH11K_AHB
+ tristate "Atheros ath11k AHB support"
+ depends on ATH11K
+ depends on REMOTEPROC
+ help
+ This module adds support for AHB bus
+
+config ATH11K_PCI
+ tristate "Atheros ath11k PCI support"
+ depends on ATH11K && PCI
+ select MHI_BUS
+ select QRTR
+ select QRTR_MHI
+ help
+ This module adds support for PCIE bus
+
config ATH11K_DEBUG
bool "QCA ath11k debugging"
depends on ATH11K
diff --git a/drivers/net/wireless/ath/ath11k/Makefile b/drivers/net/wireless/ath/ath11k/Makefile
index 104186373c9e..c41d87bd025a 100644
--- a/drivers/net/wireless/ath/ath11k/Makefile
+++ b/drivers/net/wireless/ath/ath11k/Makefile
@@ -4,7 +4,6 @@ ath11k-y += core.o \
hal.o \
hal_tx.o \
hal_rx.o \
- ahb.o \
wmi.o \
mac.o \
reg.o \
@@ -16,13 +15,20 @@ ath11k-y += core.o \
debug.o \
ce.o \
peer.o \
- dbring.o
+ dbring.o \
+ hw.o
-ath11k-$(CONFIG_ATH11K_DEBUGFS) += debug_htt_stats.o debugfs_sta.o
+ath11k-$(CONFIG_ATH11K_DEBUGFS) += debugfs.o debugfs_htt_stats.o debugfs_sta.o
ath11k-$(CONFIG_NL80211_TESTMODE) += testmode.o
ath11k-$(CONFIG_ATH11K_TRACING) += trace.o
ath11k-$(CONFIG_THERMAL) += thermal.o
ath11k-$(CONFIG_ATH11K_SPECTRAL) += spectral.o
+obj-$(CONFIG_ATH11K_AHB) += ath11k_ahb.o
+ath11k_ahb-y += ahb.o
+
+obj-$(CONFIG_ATH11K_PCI) += ath11k_pci.o
+ath11k_pci-y += mhi.o pci.o
+
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
index 30092841ac46..430723c64adc 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.c
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -20,248 +20,19 @@ static const struct of_device_id ath11k_ahb_of_match[] = {
{ .compatible = "qcom,ipq8074-wifi",
.data = (void *)ATH11K_HW_IPQ8074,
},
+ { .compatible = "qcom,ipq6018-wifi",
+ .data = (void *)ATH11K_HW_IPQ6018_HW10,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, ath11k_ahb_of_match);
-/* Target firmware's Copy Engine configuration. */
-static const struct ce_pipe_config target_ce_config_wlan[] = {
- /* CE0: host->target HTC control and raw streams */
- {
- .pipenum = __cpu_to_le32(0),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(2048),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE1: target->host HTT + HTC control */
- {
- .pipenum = __cpu_to_le32(1),
- .pipedir = __cpu_to_le32(PIPEDIR_IN),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(2048),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE2: target->host WMI */
- {
- .pipenum = __cpu_to_le32(2),
- .pipedir = __cpu_to_le32(PIPEDIR_IN),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(2048),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE3: host->target WMI */
- {
- .pipenum = __cpu_to_le32(3),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(2048),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE4: host->target HTT */
- {
- .pipenum = __cpu_to_le32(4),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT),
- .nentries = __cpu_to_le32(256),
- .nbytes_max = __cpu_to_le32(256),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE5: target->host Pktlog */
- {
- .pipenum = __cpu_to_le32(5),
- .pipedir = __cpu_to_le32(PIPEDIR_IN),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(2048),
- .flags = __cpu_to_le32(0),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE6: Reserved for target autonomous hif_memcpy */
- {
- .pipenum = __cpu_to_le32(6),
- .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(65535),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE7 used only by Host */
- {
- .pipenum = __cpu_to_le32(7),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(2048),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE8 target->host used only by IPA */
- {
- .pipenum = __cpu_to_le32(8),
- .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(65535),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE9 host->target HTT */
- {
- .pipenum = __cpu_to_le32(9),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT),
- .nentries = __cpu_to_le32(32),
- .nbytes_max = __cpu_to_le32(2048),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE10 target->host HTT */
- {
- .pipenum = __cpu_to_le32(10),
- .pipedir = __cpu_to_le32(PIPEDIR_INOUT_H2H),
- .nentries = __cpu_to_le32(0),
- .nbytes_max = __cpu_to_le32(0),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
-
- /* CE11 Not used */
- {
- .pipenum = __cpu_to_le32(0),
- .pipedir = __cpu_to_le32(0),
- .nentries = __cpu_to_le32(0),
- .nbytes_max = __cpu_to_le32(0),
- .flags = __cpu_to_le32(CE_ATTR_FLAGS),
- .reserved = __cpu_to_le32(0),
- },
-};
-
-/* Map from service/endpoint to Copy Engine.
- * This table is derived from the CE_PCI TABLE, above.
- * It is passed to the Target at startup for use by firmware.
- */
-static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
- .pipenum = __cpu_to_le32(3),
- },
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
- .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- .pipenum = __cpu_to_le32(2),
- },
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
- .pipenum = __cpu_to_le32(3),
- },
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
- .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- .pipenum = __cpu_to_le32(2),
- },
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
- .pipenum = __cpu_to_le32(3),
- },
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
- .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- .pipenum = __cpu_to_le32(2),
- },
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
- .pipenum = __cpu_to_le32(3),
- },
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
- .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- .pipenum = __cpu_to_le32(2),
- },
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
- .pipenum = __cpu_to_le32(3),
- },
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
- .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- .pipenum = __cpu_to_le32(2),
- },
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
- .pipenum = __cpu_to_le32(7),
- },
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
- .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- .pipenum = __cpu_to_le32(2),
- },
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
- .pipenum = __cpu_to_le32(9),
- },
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2),
- .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- .pipenum = __cpu_to_le32(2),
- },
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
- .pipenum = __cpu_to_le32(0),
- },
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
- .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- .pipenum = __cpu_to_le32(1),
- },
- { /* not used */
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
- .pipenum = __cpu_to_le32(0),
- },
- { /* not used */
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
- .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- .pipenum = __cpu_to_le32(1),
- },
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
- .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
- .pipenum = __cpu_to_le32(4),
- },
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
- .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- .pipenum = __cpu_to_le32(1),
- },
- {
- .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_PKT_LOG),
- .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
- .pipenum = __cpu_to_le32(5),
- },
-
- /* (Additions here) */
-
- { /* terminator entry */ }
+static const struct ath11k_bus_params ath11k_ahb_bus_params = {
+ .mhi_support = false,
+ .m3_fw_support = false,
+ .fixed_bdf_addr = true,
+ .fixed_mem_region = true,
};
#define ATH11K_IRQ_CE0_OFFSET 4
@@ -321,78 +92,6 @@ static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
"tcl2host-status-ring",
};
-#define ATH11K_TX_RING_MASK_0 0x1
-#define ATH11K_TX_RING_MASK_1 0x2
-#define ATH11K_TX_RING_MASK_2 0x4
-
-#define ATH11K_RX_RING_MASK_0 0x1
-#define ATH11K_RX_RING_MASK_1 0x2
-#define ATH11K_RX_RING_MASK_2 0x4
-#define ATH11K_RX_RING_MASK_3 0x8
-
-#define ATH11K_RX_ERR_RING_MASK_0 0x1
-
-#define ATH11K_RX_WBM_REL_RING_MASK_0 0x1
-
-#define ATH11K_REO_STATUS_RING_MASK_0 0x1
-
-#define ATH11K_RXDMA2HOST_RING_MASK_0 0x1
-#define ATH11K_RXDMA2HOST_RING_MASK_1 0x2
-#define ATH11K_RXDMA2HOST_RING_MASK_2 0x4
-
-#define ATH11K_HOST2RXDMA_RING_MASK_0 0x1
-#define ATH11K_HOST2RXDMA_RING_MASK_1 0x2
-#define ATH11K_HOST2RXDMA_RING_MASK_2 0x4
-
-#define ATH11K_RX_MON_STATUS_RING_MASK_0 0x1
-#define ATH11K_RX_MON_STATUS_RING_MASK_1 0x2
-#define ATH11K_RX_MON_STATUS_RING_MASK_2 0x4
-
-const u8 ath11k_tx_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
- ATH11K_TX_RING_MASK_0,
- ATH11K_TX_RING_MASK_1,
- ATH11K_TX_RING_MASK_2,
-};
-
-const u8 rx_mon_status_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
- 0, 0, 0, 0,
- ATH11K_RX_MON_STATUS_RING_MASK_0,
- ATH11K_RX_MON_STATUS_RING_MASK_1,
- ATH11K_RX_MON_STATUS_RING_MASK_2,
-};
-
-const u8 ath11k_rx_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
- 0, 0, 0, 0, 0, 0, 0,
- ATH11K_RX_RING_MASK_0,
- ATH11K_RX_RING_MASK_1,
- ATH11K_RX_RING_MASK_2,
- ATH11K_RX_RING_MASK_3,
-};
-
-const u8 ath11k_rx_err_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
- ATH11K_RX_ERR_RING_MASK_0,
-};
-
-const u8 ath11k_rx_wbm_rel_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
- ATH11K_RX_WBM_REL_RING_MASK_0,
-};
-
-const u8 ath11k_reo_status_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
- ATH11K_REO_STATUS_RING_MASK_0,
-};
-
-const u8 ath11k_rxdma2host_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
- ATH11K_RXDMA2HOST_RING_MASK_0,
- ATH11K_RXDMA2HOST_RING_MASK_1,
- ATH11K_RXDMA2HOST_RING_MASK_2,
-};
-
-const u8 ath11k_host2rxdma_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX] = {
- ATH11K_HOST2RXDMA_RING_MASK_0,
- ATH11K_HOST2RXDMA_RING_MASK_1,
- ATH11K_HOST2RXDMA_RING_MASK_2,
-};
-
/* enum ext_irq_num - irq numbers that can be used by external modules
* like datapath
*/
@@ -449,10 +148,10 @@ static void ath11k_ahb_kill_tasklets(struct ath11k_base *ab)
{
int i;
- for (i = 0; i < CE_COUNT; i++) {
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
- if (ath11k_ce_get_attr_flags(i) & CE_ATTR_DIS_INTR)
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
tasklet_kill(&ce_pipe->intr_tq);
@@ -509,7 +208,7 @@ static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
{
const struct ce_pipe_config *ce_config;
- ce_config = &target_ce_config_wlan[ce_id];
+ ce_config = &ab->hw_params.target_ce_config[ce_id];
if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_OUT)
ath11k_ahb_setbit32(ab, ce_id, CE_HOST_IE_ADDRESS);
@@ -524,7 +223,7 @@ static void ath11k_ahb_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
{
const struct ce_pipe_config *ce_config;
- ce_config = &target_ce_config_wlan[ce_id];
+ ce_config = &ab->hw_params.target_ce_config[ce_id];
if (__le32_to_cpu(ce_config->pipedir) & PIPEDIR_OUT)
ath11k_ahb_clearbit32(ab, ce_id, CE_HOST_IE_ADDRESS);
@@ -540,8 +239,8 @@ static void ath11k_ahb_sync_ce_irqs(struct ath11k_base *ab)
int i;
int irq_idx;
- for (i = 0; i < CE_COUNT; i++) {
- if (ath11k_ce_get_attr_flags(i) & CE_ATTR_DIS_INTR)
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
@@ -568,8 +267,8 @@ static void ath11k_ahb_ce_irqs_enable(struct ath11k_base *ab)
{
int i;
- for (i = 0; i < CE_COUNT; i++) {
- if (ath11k_ce_get_attr_flags(i) & CE_ATTR_DIS_INTR)
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
ath11k_ahb_ce_irq_enable(ab, i);
}
@@ -579,8 +278,8 @@ static void ath11k_ahb_ce_irqs_disable(struct ath11k_base *ab)
{
int i;
- for (i = 0; i < CE_COUNT; i++) {
- if (ath11k_ce_get_attr_flags(i) & CE_ATTR_DIS_INTR)
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
ath11k_ahb_ce_irq_disable(ab, i);
}
@@ -624,9 +323,10 @@ static void ath11k_ahb_stop(struct ath11k_base *ab)
static int ath11k_ahb_power_up(struct ath11k_base *ab)
{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
int ret;
- ret = rproc_boot(ab->tgt_rproc);
+ ret = rproc_boot(ab_ahb->tgt_rproc);
if (ret)
ath11k_err(ab, "failed to boot the remote processor Q6\n");
@@ -635,17 +335,20 @@ static int ath11k_ahb_power_up(struct ath11k_base *ab)
static void ath11k_ahb_power_down(struct ath11k_base *ab)
{
- rproc_shutdown(ab->tgt_rproc);
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+
+ rproc_shutdown(ab_ahb->tgt_rproc);
}
static void ath11k_ahb_init_qmi_ce_config(struct ath11k_base *ab)
{
struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
- cfg->tgt_ce_len = ARRAY_SIZE(target_ce_config_wlan) - 1;
- cfg->tgt_ce = target_ce_config_wlan;
- cfg->svc_to_ce_map_len = ARRAY_SIZE(target_service_to_ce_map_wlan);
- cfg->svc_to_ce_map = target_service_to_ce_map_wlan;
+ cfg->tgt_ce_len = ab->hw_params.target_ce_count;
+ cfg->tgt_ce = ab->hw_params.target_ce_config;
+ cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len;
+ cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map;
+ ab->qmi.service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074;
}
static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab)
@@ -665,8 +368,8 @@ static void ath11k_ahb_free_irq(struct ath11k_base *ab)
int irq_idx;
int i;
- for (i = 0; i < CE_COUNT; i++) {
- if (ath11k_ce_get_attr_flags(i) & CE_ATTR_DIS_INTR)
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
@@ -675,9 +378,9 @@ static void ath11k_ahb_free_irq(struct ath11k_base *ab)
ath11k_ahb_free_ext_irq(ab);
}
-static void ath11k_ahb_ce_tasklet(unsigned long data)
+static void ath11k_ahb_ce_tasklet(struct tasklet_struct *t)
{
- struct ath11k_ce_pipe *ce_pipe = (struct ath11k_ce_pipe *)data;
+ struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
@@ -734,6 +437,7 @@ static irqreturn_t ath11k_ahb_ext_interrupt_handler(int irq, void *arg)
static int ath11k_ahb_ext_irq_config(struct ath11k_base *ab)
{
+ struct ath11k_hw_params *hw = &ab->hw_params;
int i, j;
int irq;
int ret;
@@ -749,45 +453,45 @@ static int ath11k_ahb_ext_irq_config(struct ath11k_base *ab)
ath11k_ahb_ext_grp_napi_poll, NAPI_POLL_WEIGHT);
for (j = 0; j < ATH11K_EXT_IRQ_NUM_MAX; j++) {
- if (ath11k_tx_ring_mask[i] & BIT(j)) {
+ if (ab->hw_params.ring_mask->tx[i] & BIT(j)) {
irq_grp->irqs[num_irq++] =
wbm2host_tx_completions_ring1 - j;
}
- if (ath11k_rx_ring_mask[i] & BIT(j)) {
+ if (ab->hw_params.ring_mask->rx[i] & BIT(j)) {
irq_grp->irqs[num_irq++] =
reo2host_destination_ring1 - j;
}
- if (ath11k_rx_err_ring_mask[i] & BIT(j))
+ if (ab->hw_params.ring_mask->rx_err[i] & BIT(j))
irq_grp->irqs[num_irq++] = reo2host_exception;
- if (ath11k_rx_wbm_rel_ring_mask[i] & BIT(j))
+ if (ab->hw_params.ring_mask->rx_wbm_rel[i] & BIT(j))
irq_grp->irqs[num_irq++] = wbm2host_rx_release;
- if (ath11k_reo_status_ring_mask[i] & BIT(j))
+ if (ab->hw_params.ring_mask->reo_status[i] & BIT(j))
irq_grp->irqs[num_irq++] = reo2host_status;
- if (j < MAX_RADIOS) {
- if (ath11k_rxdma2host_ring_mask[i] & BIT(j)) {
+ if (j < ab->hw_params.max_radios) {
+ if (ab->hw_params.ring_mask->rxdma2host[i] & BIT(j)) {
irq_grp->irqs[num_irq++] =
- rxdma2host_destination_ring_mac1
- - ath11k_core_get_hw_mac_id(ab, j);
+ rxdma2host_destination_ring_mac1 -
+ ath11k_hw_get_mac_from_pdev_id(hw, j);
}
- if (ath11k_host2rxdma_ring_mask[i] & BIT(j)) {
+ if (ab->hw_params.ring_mask->host2rxdma[i] & BIT(j)) {
irq_grp->irqs[num_irq++] =
- host2rxdma_host_buf_ring_mac1
- - ath11k_core_get_hw_mac_id(ab, j);
+ host2rxdma_host_buf_ring_mac1 -
+ ath11k_hw_get_mac_from_pdev_id(hw, j);
}
- if (rx_mon_status_ring_mask[i] & BIT(j)) {
+ if (ab->hw_params.ring_mask->rx_mon_status[i] & BIT(j)) {
irq_grp->irqs[num_irq++] =
ppdu_end_interrupts_mac1 -
- ath11k_core_get_hw_mac_id(ab, j);
+ ath11k_hw_get_mac_from_pdev_id(hw, j);
irq_grp->irqs[num_irq++] =
rxdma2host_monitor_status_ring_mac1 -
- ath11k_core_get_hw_mac_id(ab, j);
+ ath11k_hw_get_mac_from_pdev_id(hw, j);
}
}
}
@@ -819,16 +523,15 @@ static int ath11k_ahb_config_irq(struct ath11k_base *ab)
int ret;
/* Configure CE irqs */
- for (i = 0; i < CE_COUNT; i++) {
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
- if (ath11k_ce_get_attr_flags(i) & CE_ATTR_DIS_INTR)
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
irq_idx = ATH11K_IRQ_CE0_OFFSET + i;
- tasklet_init(&ce_pipe->intr_tq, ath11k_ahb_ce_tasklet,
- (unsigned long)ce_pipe);
+ tasklet_setup(&ce_pipe->intr_tq, ath11k_ahb_ce_tasklet);
irq = platform_get_irq_byname(ab->pdev, irq_name[irq_idx]);
ret = request_irq(irq, ath11k_ahb_ce_interrupt_handler,
IRQF_TRIGGER_RISING, irq_name[irq_idx],
@@ -852,8 +555,8 @@ static int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id
bool ul_set = false, dl_set = false;
int i;
- for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
- entry = &target_service_to_ce_map_wlan[i];
+ for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) {
+ entry = &ab->hw_params.svc_to_ce_map[i];
if (__le32_to_cpu(entry->service_id) != service_id)
continue;
@@ -900,6 +603,28 @@ static const struct ath11k_hif_ops ath11k_ahb_hif_ops = {
.power_up = ath11k_ahb_power_up,
};
+static int ath11k_core_get_rproc(struct ath11k_base *ab)
+{
+ struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
+ struct device *dev = ab->dev;
+ struct rproc *prproc;
+ phandle rproc_phandle;
+
+ if (of_property_read_u32(dev->of_node, "qcom,rproc", &rproc_phandle)) {
+ ath11k_err(ab, "failed to get q6_rproc handle\n");
+ return -ENOENT;
+ }
+
+ prproc = rproc_get_by_phandle(rproc_phandle);
+ if (!prproc) {
+ ath11k_err(ab, "failed to get rproc\n");
+ return -EINVAL;
+ }
+ ab_ahb->tgt_rproc = prproc;
+
+ return 0;
+}
+
static int ath11k_ahb_probe(struct platform_device *pdev)
{
struct ath11k_base *ab;
@@ -926,7 +651,9 @@ static int ath11k_ahb_probe(struct platform_device *pdev)
return ret;
}
- ab = ath11k_core_alloc(&pdev->dev, 0, ATH11K_BUS_AHB);
+ ab = ath11k_core_alloc(&pdev->dev, sizeof(struct ath11k_ahb),
+ ATH11K_BUS_AHB,
+ &ath11k_ahb_bus_params);
if (!ab) {
dev_err(&pdev->dev, "failed to allocate ath11k base\n");
return -ENOMEM;
@@ -939,6 +666,10 @@ static int ath11k_ahb_probe(struct platform_device *pdev)
ab->mem_len = resource_size(mem_res);
platform_set_drvdata(pdev, ab);
+ ret = ath11k_core_pre_init(ab);
+ if (ret)
+ goto err_core_free;
+
ret = ath11k_hal_srng_init(ab);
if (ret)
goto err_core_free;
@@ -951,9 +682,9 @@ static int ath11k_ahb_probe(struct platform_device *pdev)
ath11k_ahb_init_qmi_ce_config(ab);
- ret = ath11k_ahb_config_irq(ab);
+ ret = ath11k_core_get_rproc(ab);
if (ret) {
- ath11k_err(ab, "failed to configure irq: %d\n", ret);
+ ath11k_err(ab, "failed to get rproc: %d\n", ret);
goto err_ce_free;
}
@@ -963,6 +694,12 @@ static int ath11k_ahb_probe(struct platform_device *pdev)
goto err_ce_free;
}
+ ret = ath11k_ahb_config_irq(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to configure irq: %d\n", ret);
+ goto err_ce_free;
+ }
+
return 0;
err_ce_free:
@@ -981,12 +718,16 @@ err_core_free:
static int ath11k_ahb_remove(struct platform_device *pdev)
{
struct ath11k_base *ab = platform_get_drvdata(pdev);
+ unsigned long left;
reinit_completion(&ab->driver_recovery);
- if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags))
- wait_for_completion_timeout(&ab->driver_recovery,
- ATH11K_AHB_RECOVERY_TIMEOUT);
+ if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags)) {
+ left = wait_for_completion_timeout(&ab->driver_recovery,
+ ATH11K_AHB_RECOVERY_TIMEOUT);
+ if (!left)
+ ath11k_warn(ab, "failed to receive recovery response completion\n");
+ }
set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
cancel_work_sync(&ab->restart_work);
@@ -1023,5 +764,5 @@ static void ath11k_ahb_exit(void)
}
module_exit(ath11k_ahb_exit);
-MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax wireless chip");
+MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax WLAN AHB devices");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath11k/ahb.h b/drivers/net/wireless/ath/ath11k/ahb.h
index 6c7b26ac6545..51e6e4a5f686 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.h
+++ b/drivers/net/wireless/ath/ath11k/ahb.h
@@ -10,4 +10,12 @@
#define ATH11K_AHB_RECOVERY_TIMEOUT (3 * HZ)
struct ath11k_base;
+struct ath11k_ahb {
+ struct rproc *tgt_rproc;
+};
+
+static inline struct ath11k_ahb *ath11k_ahb_priv(struct ath11k_base *ab)
+{
+ return (struct ath11k_ahb *)ab->drv_priv;
+}
#endif
diff --git a/drivers/net/wireless/ath/ath11k/ce.c b/drivers/net/wireless/ath/ath11k/ce.c
index cdd40c8fc867..9d730f8ac816 100644
--- a/drivers/net/wireless/ath/ath11k/ce.c
+++ b/drivers/net/wireless/ath/ath11k/ce.c
@@ -5,8 +5,9 @@
#include "dp_rx.h"
#include "debug.h"
+#include "hif.h"
-static const struct ce_attr host_ce_config_wlan[] = {
+const struct ce_attr ath11k_host_ce_config_ipq8074[] = {
/* CE0: host->target HTC control and raw streams */
{
.flags = CE_ATTR_FLAGS,
@@ -108,6 +109,104 @@ static const struct ce_attr host_ce_config_wlan[] = {
},
};
+const struct ce_attr ath11k_host_ce_config_qca6390[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 16,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_htc_rx_completion_handler,
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_htc_rx_completion_handler,
+ },
+
+ /* CE3: host->target WMI (mac0) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 2048,
+ .src_sz_max = 256,
+ .dest_nentries = 0,
+ },
+
+ /* CE5: target->host pktlog */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 2048,
+ .dest_nentries = 512,
+ .recv_cb = ath11k_dp_htt_htc_t2h_msg_handler,
+ },
+
+ /* CE6: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+ /* CE7: host->target WMI (mac1) */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 32,
+ .src_sz_max = 2048,
+ .dest_nentries = 0,
+ },
+
+ /* CE8: target autonomous hif_memcpy */
+ {
+ .flags = CE_ATTR_FLAGS,
+ .src_nentries = 0,
+ .src_sz_max = 0,
+ .dest_nentries = 0,
+ },
+
+};
+
+static bool ath11k_ce_need_shadow_fix(int ce_id)
+{
+ /* only ce4 needs shadow workaroud*/
+ if (ce_id == 4)
+ return true;
+ return false;
+}
+
+static void ath11k_ce_stop_shadow_timers(struct ath11k_base *ab)
+{
+ int i;
+
+ if (!ab->hw_params.supports_shadow_regs)
+ return;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++)
+ if (ath11k_ce_need_shadow_fix(i))
+ ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]);
+}
+
static int ath11k_ce_rx_buf_enqueue_pipe(struct ath11k_ce_pipe *pipe,
struct sk_buff *skb, dma_addr_t paddr)
{
@@ -352,6 +451,31 @@ static void ath11k_ce_send_done_cb(struct ath11k_ce_pipe *pipe)
}
}
+static void ath11k_ce_srng_msi_ring_params_setup(struct ath11k_base *ab, u32 ce_id,
+ struct hal_srng_params *ring_params)
+{
+ u32 msi_data_start;
+ u32 msi_data_count;
+ u32 msi_irq_start;
+ u32 addr_lo;
+ u32 addr_hi;
+ int ret;
+
+ ret = ath11k_get_user_msi_vector(ab, "CE",
+ &msi_data_count, &msi_data_start,
+ &msi_irq_start);
+
+ if (ret)
+ return;
+
+ ath11k_get_msi_address(ab, &addr_lo, &addr_hi);
+
+ ring_params->msi_addr = addr_lo;
+ ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
+ ring_params->msi_data = (ce_id % msi_data_count) + msi_data_start;
+ ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
+}
+
static int ath11k_ce_init_ring(struct ath11k_base *ab,
struct ath11k_ce_ring *ce_ring,
int ce_id, enum hal_ring_type type)
@@ -363,21 +487,24 @@ static int ath11k_ce_init_ring(struct ath11k_base *ab,
params.ring_base_vaddr = ce_ring->base_addr_owner_space;
params.num_entries = ce_ring->nentries;
+ if (!(CE_ATTR_DIS_INTR & ab->hw_params.host_ce_config[ce_id].flags))
+ ath11k_ce_srng_msi_ring_params_setup(ab, ce_id, &params);
+
switch (type) {
case HAL_CE_SRC:
- if (!(CE_ATTR_DIS_INTR & host_ce_config_wlan[ce_id].flags))
+ if (!(CE_ATTR_DIS_INTR & ab->hw_params.host_ce_config[ce_id].flags))
params.intr_batch_cntr_thres_entries = 1;
break;
case HAL_CE_DST:
- params.max_buffer_len = host_ce_config_wlan[ce_id].src_sz_max;
- if (!(host_ce_config_wlan[ce_id].flags & CE_ATTR_DIS_INTR)) {
+ params.max_buffer_len = ab->hw_params.host_ce_config[ce_id].src_sz_max;
+ if (!(ab->hw_params.host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
params.intr_timer_thres_us = 1024;
params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
params.low_threshold = ce_ring->nentries - 3;
}
break;
case HAL_CE_DST_STATUS:
- if (!(host_ce_config_wlan[ce_id].flags & CE_ATTR_DIS_INTR)) {
+ if (!(ab->hw_params.host_ce_config[ce_id].flags & CE_ATTR_DIS_INTR)) {
params.intr_batch_cntr_thres_entries = 1;
params.intr_timer_thres_us = 0x1000;
}
@@ -395,8 +522,15 @@ static int ath11k_ce_init_ring(struct ath11k_base *ab,
ret, ce_id);
return ret;
}
+
ce_ring->hal_ring_id = ret;
+ if (ab->hw_params.supports_shadow_regs &&
+ ath11k_ce_need_shadow_fix(ce_id))
+ ath11k_dp_shadow_init_timer(ab, &ab->ce.hp_timer[ce_id],
+ ATH11K_SHADOW_CTRL_TIMER_INTERVAL,
+ ce_ring->hal_ring_id);
+
return 0;
}
@@ -440,7 +574,7 @@ ath11k_ce_alloc_ring(struct ath11k_base *ab, int nentries, int desc_sz)
static int ath11k_ce_alloc_pipe(struct ath11k_base *ab, int ce_id)
{
struct ath11k_ce_pipe *pipe = &ab->ce.ce_pipe[ce_id];
- const struct ce_attr *attr = &host_ce_config_wlan[ce_id];
+ const struct ce_attr *attr = &ab->hw_params.host_ce_config[ce_id];
struct ath11k_ce_ring *ring;
int nentries;
int desc_sz;
@@ -494,6 +628,7 @@ void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id)
if ((pipe->attr_flags & CE_ATTR_DIS_INTR) && pipe->send_cb)
pipe->send_cb(pipe);
}
+EXPORT_SYMBOL(ath11k_ce_per_engine_service);
int ath11k_ce_send(struct ath11k_base *ab, struct sk_buff *skb, u8 pipe_id,
u16 transfer_id)
@@ -568,6 +703,9 @@ int ath11k_ce_send(struct ath11k_base *ab, struct sk_buff *skb, u8 pipe_id,
ath11k_hal_srng_access_end(ab, srng);
+ if (ath11k_ce_need_shadow_fix(pipe_id))
+ ath11k_dp_shadow_start_timer(ab, srng, &ab->ce.hp_timer[pipe_id]);
+
spin_unlock_bh(&srng->lock);
spin_unlock_bh(&ab->ce.ce_lock);
@@ -604,12 +742,57 @@ static void ath11k_ce_rx_pipe_cleanup(struct ath11k_ce_pipe *pipe)
}
}
+static void ath11k_ce_shadow_config(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ab->hw_params.host_ce_config[i].src_nentries)
+ ath11k_hal_srng_update_shadow_config(ab,
+ HAL_CE_SRC, i);
+
+ if (ab->hw_params.host_ce_config[i].dest_nentries) {
+ ath11k_hal_srng_update_shadow_config(ab,
+ HAL_CE_DST, i);
+
+ ath11k_hal_srng_update_shadow_config(ab,
+ HAL_CE_DST_STATUS, i);
+ }
+ }
+}
+
+void ath11k_ce_get_shadow_config(struct ath11k_base *ab,
+ u32 **shadow_cfg, u32 *shadow_cfg_len)
+{
+ if (!ab->hw_params.supports_shadow_regs)
+ return;
+
+ ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
+
+ /* shadow is already configured */
+ if (*shadow_cfg_len)
+ return;
+
+ /* shadow isn't configured yet, configure now.
+ * non-CE srngs are configured firstly, then
+ * all CE srngs.
+ */
+ ath11k_hal_srng_shadow_config(ab);
+ ath11k_ce_shadow_config(ab);
+
+ /* get the shadow configuration */
+ ath11k_hal_srng_get_shadow_config(ab, shadow_cfg, shadow_cfg_len);
+}
+EXPORT_SYMBOL(ath11k_ce_get_shadow_config);
+
void ath11k_ce_cleanup_pipes(struct ath11k_base *ab)
{
struct ath11k_ce_pipe *pipe;
int pipe_num;
- for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
+ ath11k_ce_stop_shadow_timers(ab);
+
+ for (pipe_num = 0; pipe_num < ab->hw_params.ce_count; pipe_num++) {
pipe = &ab->ce.ce_pipe[pipe_num];
ath11k_ce_rx_pipe_cleanup(pipe);
@@ -619,6 +802,7 @@ void ath11k_ce_cleanup_pipes(struct ath11k_base *ab)
/* NOTE: Should we also clean up tx buffer in all pipes? */
}
}
+EXPORT_SYMBOL(ath11k_ce_cleanup_pipes);
void ath11k_ce_rx_post_buf(struct ath11k_base *ab)
{
@@ -626,7 +810,7 @@ void ath11k_ce_rx_post_buf(struct ath11k_base *ab)
int i;
int ret;
- for (i = 0; i < CE_COUNT; i++) {
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
pipe = &ab->ce.ce_pipe[i];
ret = ath11k_ce_rx_post_pipe(pipe);
if (ret) {
@@ -642,6 +826,7 @@ void ath11k_ce_rx_post_buf(struct ath11k_base *ab)
}
}
}
+EXPORT_SYMBOL(ath11k_ce_rx_post_buf);
void ath11k_ce_rx_replenish_retry(struct timer_list *t)
{
@@ -656,7 +841,10 @@ int ath11k_ce_init_pipes(struct ath11k_base *ab)
int i;
int ret;
- for (i = 0; i < CE_COUNT; i++) {
+ ath11k_ce_get_shadow_config(ab, &ab->qmi.ce_cfg.shadow_reg_v2,
+ &ab->qmi.ce_cfg.shadow_reg_v2_len);
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
pipe = &ab->ce.ce_pipe[i];
if (pipe->src_ring) {
@@ -714,9 +902,12 @@ void ath11k_ce_free_pipes(struct ath11k_base *ab)
int desc_sz;
int i;
- for (i = 0; i < CE_COUNT; i++) {
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
pipe = &ab->ce.ce_pipe[i];
+ if (ath11k_ce_need_shadow_fix(i))
+ ath11k_dp_shadow_stop_timer(ab, &ab->ce.hp_timer[i]);
+
if (pipe->src_ring) {
desc_sz = ath11k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
dma_free_coherent(ab->dev,
@@ -752,6 +943,7 @@ void ath11k_ce_free_pipes(struct ath11k_base *ab)
}
}
}
+EXPORT_SYMBOL(ath11k_ce_free_pipes);
int ath11k_ce_alloc_pipes(struct ath11k_base *ab)
{
@@ -762,8 +954,8 @@ int ath11k_ce_alloc_pipes(struct ath11k_base *ab)
spin_lock_init(&ab->ce.ce_lock);
- for (i = 0; i < CE_COUNT; i++) {
- attr = &host_ce_config_wlan[i];
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ attr = &ab->hw_params.host_ce_config[i];
pipe = &ab->ce.ce_pipe[i];
pipe->pipe_num = i;
pipe->ab = ab;
@@ -779,6 +971,7 @@ int ath11k_ce_alloc_pipes(struct ath11k_base *ab)
return 0;
}
+EXPORT_SYMBOL(ath11k_ce_alloc_pipes);
/* For Big Endian Host, Copy Engine byte_swap is enabled
* When Copy Engine does byte_swap, need to byte swap again for the
@@ -799,10 +992,11 @@ void ath11k_ce_byte_swap(void *mem, u32 len)
}
}
-int ath11k_ce_get_attr_flags(int ce_id)
+int ath11k_ce_get_attr_flags(struct ath11k_base *ab, int ce_id)
{
- if (ce_id >= CE_COUNT)
+ if (ce_id >= ab->hw_params.ce_count)
return -EINVAL;
- return host_ce_config_wlan[ce_id].flags;
+ return ab->hw_params.host_ce_config[ce_id].flags;
}
+EXPORT_SYMBOL(ath11k_ce_get_attr_flags);
diff --git a/drivers/net/wireless/ath/ath11k/ce.h b/drivers/net/wireless/ath/ath11k/ce.h
index 688f357e6eaf..269b599ac0b0 100644
--- a/drivers/net/wireless/ath/ath11k/ce.h
+++ b/drivers/net/wireless/ath/ath11k/ce.h
@@ -6,7 +6,7 @@
#ifndef ATH11K_CE_H
#define ATH11K_CE_H
-#define CE_COUNT 12
+#define CE_COUNT_MAX 12
/* Byte swap data words */
#define CE_ATTR_BYTE_SWAP_DATA 2
@@ -165,11 +165,15 @@ struct ath11k_ce_pipe {
};
struct ath11k_ce {
- struct ath11k_ce_pipe ce_pipe[CE_COUNT];
+ struct ath11k_ce_pipe ce_pipe[CE_COUNT_MAX];
/* Protects rings of all ce pipes */
spinlock_t ce_lock;
+ struct ath11k_hp_update_timer hp_timer[CE_COUNT_MAX];
};
+extern const struct ce_attr ath11k_host_ce_config_ipq8074[];
+extern const struct ce_attr ath11k_host_ce_config_qca6390[];
+
void ath11k_ce_cleanup_pipes(struct ath11k_base *ab);
void ath11k_ce_rx_replenish_retry(struct timer_list *t);
void ath11k_ce_per_engine_service(struct ath11k_base *ab, u16 ce_id);
@@ -179,6 +183,11 @@ void ath11k_ce_rx_post_buf(struct ath11k_base *ab);
int ath11k_ce_init_pipes(struct ath11k_base *ab);
int ath11k_ce_alloc_pipes(struct ath11k_base *ab);
void ath11k_ce_free_pipes(struct ath11k_base *ab);
-int ath11k_ce_get_attr_flags(int ce_id);
+int ath11k_ce_get_attr_flags(struct ath11k_base *ab, int ce_id);
void ath11k_ce_poll_send_completed(struct ath11k_base *ab, u8 pipe_id);
+int ath11k_ce_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe);
+int ath11k_ce_attr_attach(struct ath11k_base *ab);
+void ath11k_ce_get_shadow_config(struct ath11k_base *ab,
+ u32 **shadow_cfg, u32 *shadow_cfg_len);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index 905cd8beaf28..ebd6886a8c18 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -14,43 +14,139 @@
#include "hif.h"
unsigned int ath11k_debug_mask;
+EXPORT_SYMBOL(ath11k_debug_mask);
module_param_named(debug_mask, ath11k_debug_mask, uint, 0644);
MODULE_PARM_DESC(debug_mask, "Debugging mask");
-static const struct ath11k_hw_params ath11k_hw_params = {
- .name = "ipq8074",
- .fw = {
- .dir = IPQ8074_FW_DIR,
- .board_size = IPQ8074_MAX_BOARD_DATA_SZ,
- .cal_size = IPQ8074_MAX_CAL_DATA_SZ,
+static unsigned int ath11k_crypto_mode;
+module_param_named(crypto_mode, ath11k_crypto_mode, uint, 0644);
+MODULE_PARM_DESC(crypto_mode, "crypto mode: 0-hardware, 1-software");
+
+/* frame mode values are mapped as per enum ath11k_hw_txrx_mode */
+unsigned int ath11k_frame_mode = ATH11K_HW_TXRX_NATIVE_WIFI;
+module_param_named(frame_mode, ath11k_frame_mode, uint, 0644);
+MODULE_PARM_DESC(frame_mode,
+ "Datapath frame mode (0: raw, 1: native wifi (default), 2: ethernet)");
+
+static const struct ath11k_hw_params ath11k_hw_params[] = {
+ {
+ .hw_rev = ATH11K_HW_IPQ8074,
+ .name = "ipq8074 hw2.0",
+ .fw = {
+ .dir = "IPQ8074/hw2.0",
+ .board_size = 256 * 1024,
+ .cal_size = 256 * 1024,
+ },
+ .max_radios = 3,
+ .bdf_addr = 0x4B0C0000,
+ .hw_ops = &ipq8074_ops,
+ .ring_mask = &ath11k_hw_ring_mask_ipq8074,
+ .internal_sleep_clock = false,
+ .regs = &ipq8074_regs,
+ .host_ce_config = ath11k_host_ce_config_ipq8074,
+ .ce_count = 12,
+ .target_ce_config = ath11k_target_ce_config_wlan_ipq8074,
+ .target_ce_count = 11,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq8074,
+ .svc_to_ce_map_len = 21,
+ .single_pdev_only = false,
+ .needs_band_to_mac = true,
+ .rxdma1_enable = true,
+ .num_rxmda_per_pdev = 1,
+ .rx_mac_buf_ring = false,
+ .vdev_start_delay = false,
+ .htt_peer_map_v2 = true,
+ .tcl_0_only = false,
+ .spectral_fft_sz = 2,
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_MESH_POINT),
+ .supports_monitor = true,
+ .supports_shadow_regs = false,
+ .idle_ps = false,
+ },
+ {
+ .hw_rev = ATH11K_HW_IPQ6018_HW10,
+ .name = "ipq6018 hw1.0",
+ .fw = {
+ .dir = "IPQ6018/hw1.0",
+ .board_size = 256 * 1024,
+ .cal_size = 256 * 1024,
+ },
+ .max_radios = 2,
+ .bdf_addr = 0x4ABC0000,
+ .hw_ops = &ipq6018_ops,
+ .ring_mask = &ath11k_hw_ring_mask_ipq8074,
+ .internal_sleep_clock = false,
+ .regs = &ipq8074_regs,
+ .host_ce_config = ath11k_host_ce_config_ipq8074,
+ .ce_count = 12,
+ .target_ce_config = ath11k_target_ce_config_wlan_ipq8074,
+ .target_ce_count = 11,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq6018,
+ .svc_to_ce_map_len = 19,
+ .single_pdev_only = false,
+ .needs_band_to_mac = true,
+ .rxdma1_enable = true,
+ .num_rxmda_per_pdev = 1,
+ .rx_mac_buf_ring = false,
+ .vdev_start_delay = false,
+ .htt_peer_map_v2 = true,
+ .tcl_0_only = false,
+ .spectral_fft_sz = 4,
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_MESH_POINT),
+ .supports_monitor = true,
+ .supports_shadow_regs = false,
+ .idle_ps = false,
+ },
+ {
+ .name = "qca6390 hw2.0",
+ .hw_rev = ATH11K_HW_QCA6390_HW20,
+ .fw = {
+ .dir = "QCA6390/hw2.0",
+ .board_size = 256 * 1024,
+ .cal_size = 256 * 1024,
+ },
+ .max_radios = 3,
+ .bdf_addr = 0x4B0C0000,
+ .hw_ops = &qca6390_ops,
+ .ring_mask = &ath11k_hw_ring_mask_qca6390,
+ .internal_sleep_clock = true,
+ .regs = &qca6390_regs,
+ .host_ce_config = ath11k_host_ce_config_qca6390,
+ .ce_count = 9,
+ .target_ce_config = ath11k_target_ce_config_wlan_qca6390,
+ .target_ce_count = 9,
+ .svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
+ .svc_to_ce_map_len = 14,
+ .single_pdev_only = true,
+ .needs_band_to_mac = false,
+ .rxdma1_enable = false,
+ .num_rxmda_per_pdev = 2,
+ .rx_mac_buf_ring = true,
+ .vdev_start_delay = true,
+ .htt_peer_map_v2 = false,
+ .tcl_0_only = true,
+ .spectral_fft_sz = 0,
+
+ .interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP),
+ .supports_monitor = false,
+ .supports_shadow_regs = true,
+ .idle_ps = true,
},
};
-/* Map from pdev index to hw mac index */
-u8 ath11k_core_get_hw_mac_id(struct ath11k_base *ab, int pdev_idx)
-{
- switch (pdev_idx) {
- case 0:
- return 0;
- case 1:
- return 2;
- case 2:
- return 1;
- default:
- ath11k_warn(ab, "Invalid pdev idx %d\n", pdev_idx);
- return ATH11K_INVALID_HW_MAC_ID;
- }
-}
-EXPORT_SYMBOL(ath11k_core_get_hw_mac_id);
-
static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
size_t name_len)
{
- /* Note: bus is fixed to ahb. When other bus type supported,
- * make it to dynamic.
- */
scnprintf(name, name_len,
- "bus=ahb,qmi-chip-id=%d,qmi-board-id=%d",
+ "bus=%s,qmi-chip-id=%d,qmi-board-id=%d",
+ ath11k_bus_str(ab->hif.bus),
ab->qmi.target.chip_id,
ab->qmi.target.board_id);
@@ -59,29 +155,24 @@ static int ath11k_core_create_board_name(struct ath11k_base *ab, char *name,
return 0;
}
-static const struct firmware *ath11k_fetch_fw_file(struct ath11k_base *ab,
- const char *dir,
- const char *file)
+const struct firmware *ath11k_core_firmware_request(struct ath11k_base *ab,
+ const char *file)
{
- char filename[100];
const struct firmware *fw;
+ char path[100];
int ret;
if (file == NULL)
return ERR_PTR(-ENOENT);
- if (dir == NULL)
- dir = ".";
-
- snprintf(filename, sizeof(filename), "%s/%s", dir, file);
- ret = firmware_request_nowarn(&fw, filename, ab->dev);
- ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot fw request '%s': %d\n",
- filename, ret);
+ ath11k_core_create_firmware_path(ab, file, path, sizeof(path));
+ ret = firmware_request_nowarn(&fw, path, ab->dev);
if (ret)
return ERR_PTR(ret);
- ath11k_warn(ab, "Downloading BDF: %s, size: %zu\n",
- filename, fw->size);
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot firmware request %s size %zu\n",
+ path, fw->size);
return fw;
}
@@ -181,26 +272,30 @@ static int ath11k_core_fetch_board_data_api_n(struct ath11k_base *ab,
{
size_t len, magic_len;
const u8 *data;
- char *filename = ATH11K_BOARD_API2_FILE;
+ char *filename, filepath[100];
size_t ie_len;
struct ath11k_fw_ie *hdr;
int ret, ie_id;
+ filename = ATH11K_BOARD_API2_FILE;
+
if (!bd->fw)
- bd->fw = ath11k_fetch_fw_file(ab,
- ab->hw_params.fw.dir,
- filename);
+ bd->fw = ath11k_core_firmware_request(ab, filename);
+
if (IS_ERR(bd->fw))
return PTR_ERR(bd->fw);
data = bd->fw->data;
len = bd->fw->size;
+ ath11k_core_create_firmware_path(ab, filename,
+ filepath, sizeof(filepath));
+
/* magic has extra null byte padded */
magic_len = strlen(ATH11K_BOARD_MAGIC) + 1;
if (len < magic_len) {
- ath11k_err(ab, "failed to find magic value in %s/%s, file too short: %zu\n",
- ab->hw_params.fw.dir, filename, len);
+ ath11k_err(ab, "failed to find magic value in %s, file too short: %zu\n",
+ filepath, len);
ret = -EINVAL;
goto err;
}
@@ -214,8 +309,8 @@ static int ath11k_core_fetch_board_data_api_n(struct ath11k_base *ab,
/* magic is padded to 4 bytes */
magic_len = ALIGN(magic_len, 4);
if (len < magic_len) {
- ath11k_err(ab, "failed: %s/%s too small to contain board data, len: %zu\n",
- ab->hw_params.fw.dir, filename, len);
+ ath11k_err(ab, "failed: %s too small to contain board data, len: %zu\n",
+ filepath, len);
ret = -EINVAL;
goto err;
}
@@ -263,8 +358,8 @@ static int ath11k_core_fetch_board_data_api_n(struct ath11k_base *ab,
out:
if (!bd->data || !bd->len) {
ath11k_err(ab,
- "failed to fetch board data for %s from %s/%s\n",
- boardname, ab->hw_params.fw.dir, filename);
+ "failed to fetch board data for %s from %s\n",
+ boardname, filepath);
ret = -ENODATA;
goto err;
}
@@ -279,9 +374,7 @@ err:
static int ath11k_core_fetch_board_data_api_1(struct ath11k_base *ab,
struct ath11k_board_data *bd)
{
- bd->fw = ath11k_fetch_fw_file(ab,
- ab->hw_params.fw.dir,
- ATH11K_DEFAULT_BOARD_FILE);
+ bd->fw = ath11k_core_firmware_request(ab, ATH11K_DEFAULT_BOARD_FILE);
if (IS_ERR(bd->fw))
return PTR_ERR(bd->fw);
@@ -325,6 +418,7 @@ static void ath11k_core_stop(struct ath11k_base *ab)
{
if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
ath11k_qmi_firmware_stop(ab);
+
ath11k_hif_stop(ab);
ath11k_wmi_detach(ab);
ath11k_dp_pdev_reo_cleanup(ab);
@@ -342,7 +436,7 @@ static int ath11k_core_soc_create(struct ath11k_base *ab)
return ret;
}
- ret = ath11k_debug_soc_create(ab);
+ ret = ath11k_debugfs_soc_create(ab);
if (ret) {
ath11k_err(ab, "failed to create ath11k debugfs\n");
goto err_qmi_deinit;
@@ -357,7 +451,7 @@ static int ath11k_core_soc_create(struct ath11k_base *ab)
return 0;
err_debugfs_reg:
- ath11k_debug_soc_destroy(ab);
+ ath11k_debugfs_soc_destroy(ab);
err_qmi_deinit:
ath11k_qmi_deinit_service(ab);
return ret;
@@ -365,7 +459,7 @@ err_qmi_deinit:
static void ath11k_core_soc_destroy(struct ath11k_base *ab)
{
- ath11k_debug_soc_destroy(ab);
+ ath11k_debugfs_soc_destroy(ab);
ath11k_dp_free(ab);
ath11k_reg_free(ab);
ath11k_qmi_deinit_service(ab);
@@ -375,7 +469,7 @@ static int ath11k_core_pdev_create(struct ath11k_base *ab)
{
int ret;
- ret = ath11k_debug_pdev_create(ab);
+ ret = ath11k_debugfs_pdev_create(ab);
if (ret) {
ath11k_err(ab, "failed to create core pdev debugfs: %d\n", ret);
return ret;
@@ -415,7 +509,7 @@ err_dp_pdev_free:
err_mac_unregister:
ath11k_mac_unregister(ab);
err_pdev_debug:
- ath11k_debug_pdev_destroy(ab);
+ ath11k_debugfs_pdev_destroy(ab);
return ret;
}
@@ -427,7 +521,7 @@ static void ath11k_core_pdev_destroy(struct ath11k_base *ab)
ath11k_mac_unregister(ab);
ath11k_hif_irq_disable(ab);
ath11k_dp_pdev_free(ab);
- ath11k_debug_pdev_destroy(ab);
+ ath11k_debugfs_pdev_destroy(ab);
}
static int ath11k_core_start(struct ath11k_base *ab,
@@ -557,6 +651,23 @@ int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab)
return ret;
}
+ switch (ath11k_crypto_mode) {
+ case ATH11K_CRYPT_MODE_SW:
+ set_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags);
+ set_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
+ break;
+ case ATH11K_CRYPT_MODE_HW:
+ clear_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags);
+ clear_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
+ break;
+ default:
+ ath11k_info(ab, "invalid crypto_mode: %d\n", ath11k_crypto_mode);
+ return -EINVAL;
+ }
+
+ if (ath11k_frame_mode == ATH11K_HW_TXRX_RAW)
+ set_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
+
mutex_lock(&ab->core_lock);
ret = ath11k_core_start(ab, ATH11K_FIRMWARE_MODE_NORMAL);
if (ret) {
@@ -706,7 +817,7 @@ static void ath11k_core_restart(struct work_struct *work)
break;
case ATH11K_STATE_RESTARTED:
ar->state = ATH11K_STATE_WEDGED;
- /* fall through */
+ fallthrough;
case ATH11K_STATE_WEDGED:
ath11k_warn(ab,
"device is wedged, will not restart radio %d\n", i);
@@ -717,25 +828,47 @@ static void ath11k_core_restart(struct work_struct *work)
complete(&ab->driver_recovery);
}
-int ath11k_core_init(struct ath11k_base *ab)
+static int ath11k_init_hw_params(struct ath11k_base *ab)
{
- struct device *dev = ab->dev;
- struct rproc *prproc;
- phandle rproc_phandle;
- int ret;
+ const struct ath11k_hw_params *hw_params = NULL;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ath11k_hw_params); i++) {
+ hw_params = &ath11k_hw_params[i];
- if (of_property_read_u32(dev->of_node, "qcom,rproc", &rproc_phandle)) {
- ath11k_err(ab, "failed to get q6_rproc handle\n");
- return -ENOENT;
+ if (hw_params->hw_rev == ab->hw_rev)
+ break;
}
- prproc = rproc_get_by_phandle(rproc_phandle);
- if (!prproc) {
- ath11k_err(ab, "failed to get rproc\n");
+ if (i == ARRAY_SIZE(ath11k_hw_params)) {
+ ath11k_err(ab, "Unsupported hardware version: 0x%x\n", ab->hw_rev);
return -EINVAL;
}
- ab->tgt_rproc = prproc;
- ab->hw_params = ath11k_hw_params;
+
+ ab->hw_params = *hw_params;
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "Hardware name %s\n", ab->hw_params.name);
+
+ return 0;
+}
+
+int ath11k_core_pre_init(struct ath11k_base *ab)
+{
+ int ret;
+
+ ret = ath11k_init_hw_params(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to get hw params: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_core_pre_init);
+
+int ath11k_core_init(struct ath11k_base *ab)
+{
+ int ret;
ret = ath11k_core_soc_create(ab);
if (ret) {
@@ -745,6 +878,7 @@ int ath11k_core_init(struct ath11k_base *ab)
return 0;
}
+EXPORT_SYMBOL(ath11k_core_init);
void ath11k_core_deinit(struct ath11k_base *ab)
{
@@ -759,14 +893,17 @@ void ath11k_core_deinit(struct ath11k_base *ab)
ath11k_mac_destroy(ab);
ath11k_core_soc_destroy(ab);
}
+EXPORT_SYMBOL(ath11k_core_deinit);
void ath11k_core_free(struct ath11k_base *ab)
{
kfree(ab);
}
+EXPORT_SYMBOL(ath11k_core_free);
struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
- enum ath11k_bus bus)
+ enum ath11k_bus bus,
+ const struct ath11k_bus_params *bus_params)
{
struct ath11k_base *ab;
@@ -789,6 +926,8 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
INIT_WORK(&ab->restart_work, ath11k_core_restart);
timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0);
ab->dev = dev;
+ ab->bus_params = *bus_params;
+ ab->hif.bus = bus;
return ab;
@@ -796,3 +935,7 @@ err_sc_free:
kfree(ab);
return NULL;
}
+EXPORT_SYMBOL(ath11k_core_alloc);
+
+MODULE_DESCRIPTION("Core module for Qualcomm Atheros 802.11ax wireless LAN cards.");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index e5c4e19020ee..18b97420f0d8 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -35,6 +35,10 @@
#define ATH11K_INVALID_HW_MAC_ID 0xFF
+extern unsigned int ath11k_frame_mode;
+
+#define ATH11K_MON_TIMER_INTERVAL 10
+
enum ath11k_supported_bw {
ATH11K_BW_20 = 0,
ATH11K_BW_40 = 1,
@@ -54,6 +58,13 @@ enum wme_ac {
#define ATH11K_VHT_MCS_MAX 9
#define ATH11K_HE_MCS_MAX 11
+enum ath11k_crypt_mode {
+ /* Only use hardware crypto engine */
+ ATH11K_CRYPT_MODE_HW,
+ /* Only use software crypto */
+ ATH11K_CRYPT_MODE_SW,
+};
+
static inline enum wme_ac ath11k_tid_to_ac(u32 tid)
{
return (((tid == 0) || (tid == 3)) ? WME_AC_BE :
@@ -90,6 +101,8 @@ struct ath11k_skb_rxcb {
enum ath11k_hw_rev {
ATH11K_HW_IPQ8074,
+ ATH11K_HW_QCA6390_HW20,
+ ATH11K_HW_IPQ6018_HW10,
};
enum ath11k_firmware_mode {
@@ -101,18 +114,8 @@ enum ath11k_firmware_mode {
};
#define ATH11K_IRQ_NUM_MAX 52
-#define ATH11K_EXT_IRQ_GRP_NUM_MAX 11
#define ATH11K_EXT_IRQ_NUM_MAX 16
-extern const u8 ath11k_reo_status_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
-extern const u8 ath11k_tx_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
-extern const u8 ath11k_rx_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
-extern const u8 ath11k_rx_err_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
-extern const u8 ath11k_rx_wbm_rel_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
-extern const u8 ath11k_rxdma2host_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
-extern const u8 ath11k_host2rxdma_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
-extern const u8 rx_mon_status_ring_mask[ATH11K_EXT_IRQ_GRP_NUM_MAX];
-
struct ath11k_ext_irq_grp {
struct ath11k_base *ab;
u32 irqs[ATH11K_EXT_IRQ_NUM_MAX];
@@ -226,6 +229,7 @@ struct ath11k_vif {
int txpower;
bool rsnie_present;
bool wpaie_present;
+ struct ieee80211_chanctx_conf chanctx;
};
struct ath11k_vif_iter {
@@ -554,6 +558,7 @@ struct ath11k {
};
struct ath11k_band_cap {
+ u32 phy_id;
u32 max_bw_supported;
u32 ht_cap_info;
u32 he_cap_info[2];
@@ -589,6 +594,13 @@ struct ath11k_board_data {
size_t len;
};
+struct ath11k_bus_params {
+ bool mhi_support;
+ bool m3_fw_support;
+ bool fixed_bdf_addr;
+ bool fixed_mem_region;
+};
+
/* IPQ8074 HW channel counters frequency value in hertz */
#define IPQ8074_CC_FREQ_HERTZ 320000
@@ -638,7 +650,6 @@ struct ath11k_base {
struct ath11k_qmi qmi;
struct ath11k_wmi_base wmi_ab;
struct completion fw_ready;
- struct rproc *tgt_rproc;
int num_radios;
/* HW channel counters frequency value in hertz common to all MACs */
u32 cc_freq_hz;
@@ -651,6 +662,7 @@ struct ath11k_base {
unsigned long mem_len;
struct {
+ enum ath11k_bus bus;
const struct ath11k_hif_ops *ops;
} hif;
@@ -677,7 +689,10 @@ struct ath11k_base {
u32 ext_service_bitmap[WMI_SERVICE_EXT_BM_SIZE];
bool pdevs_macaddr_valid;
int bd_api;
+
struct ath11k_hw_params hw_params;
+ struct ath11k_bus_params bus_params;
+
const struct firmware *cal_file;
/* Below regd's are protected by ab->data_lock */
@@ -714,6 +729,7 @@ struct ath11k_base {
struct ath11k_dbring_cap *db_caps;
u32 num_db_cap;
+ struct timer_list mon_reap_timer;
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));
};
@@ -841,6 +857,13 @@ struct ath11k_fw_stats_bcn {
u32 tx_bcn_outage_cnt;
};
+extern const struct ce_pipe_config ath11k_target_ce_config_wlan_ipq8074[];
+extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq8074[];
+extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq6018[];
+
+extern const struct ce_pipe_config ath11k_target_ce_config_wlan_qca6390[];
+extern const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qca6390[];
+
void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id);
void ath11k_peer_map_event(struct ath11k_base *ab, u8 vdev_id, u16 peer_id,
u8 *mac_addr, u16 ast_hash);
@@ -850,17 +873,21 @@ struct ath11k_peer *ath11k_peer_find_by_addr(struct ath11k_base *ab,
const u8 *addr);
struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab, int peer_id);
int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab);
+int ath11k_core_pre_init(struct ath11k_base *ab);
int ath11k_core_init(struct ath11k_base *ath11k);
void ath11k_core_deinit(struct ath11k_base *ath11k);
struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
- enum ath11k_bus bus);
+ enum ath11k_bus bus,
+ const struct ath11k_bus_params *bus_params);
void ath11k_core_free(struct ath11k_base *ath11k);
int ath11k_core_fetch_bdf(struct ath11k_base *ath11k,
struct ath11k_board_data *bd);
void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd);
void ath11k_core_halt(struct ath11k *ar);
-u8 ath11k_core_get_hw_mac_id(struct ath11k_base *ab, int pdev_idx);
+
+const struct firmware *ath11k_core_firmware_request(struct ath11k_base *ab,
+ const char *filename);
static inline const char *ath11k_scan_state_str(enum ath11k_scan_state state)
{
@@ -894,4 +921,30 @@ static inline struct ath11k_vif *ath11k_vif_to_arvif(struct ieee80211_vif *vif)
return (struct ath11k_vif *)vif->drv_priv;
}
+static inline struct ath11k *ath11k_ab_to_ar(struct ath11k_base *ab,
+ int mac_id)
+{
+ return ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
+}
+
+static inline void ath11k_core_create_firmware_path(struct ath11k_base *ab,
+ const char *filename,
+ void *buf, size_t buf_len)
+{
+ snprintf(buf, buf_len, "%s/%s/%s", ATH11K_FW_DIR,
+ ab->hw_params.fw.dir, filename);
+}
+
+static inline const char *ath11k_bus_str(enum ath11k_bus bus)
+{
+ switch (bus) {
+ case ATH11K_BUS_PCI:
+ return "pci";
+ case ATH11K_BUS_AHB:
+ return "ahb";
+ }
+
+ return "unknown";
+}
+
#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/dbring.c b/drivers/net/wireless/ath/ath11k/dbring.c
index cf20db370123..5e1f5437b418 100644
--- a/drivers/net/wireless/ath/ath11k/dbring.c
+++ b/drivers/net/wireless/ath/ath11k/dbring.c
@@ -168,7 +168,7 @@ int ath11k_dbring_buf_setup(struct ath11k *ar,
srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
ring->bufs_max = ring->refill_srng.size /
- ath11k_hal_srng_get_entrysize(HAL_RXDMA_DIR_BUF);
+ ath11k_hal_srng_get_entrysize(ab, HAL_RXDMA_DIR_BUF);
ring->buf_sz = db_cap->min_buf_sz;
ring->buf_align = db_cap->min_buf_align;
diff --git a/drivers/net/wireless/ath/ath11k/debug.c b/drivers/net/wireless/ath/ath11k/debug.c
index 62a1aa0565a9..c86de95fbdc5 100644
--- a/drivers/net/wireless/ath/ath11k/debug.c
+++ b/drivers/net/wireless/ath/ath11k/debug.c
@@ -6,48 +6,6 @@
#include <linux/vmalloc.h>
#include "core.h"
#include "debug.h"
-#include "wmi.h"
-#include "hal_rx.h"
-#include "dp_tx.h"
-#include "debug_htt_stats.h"
-#include "peer.h"
-
-static const char *htt_bp_umac_ring[HTT_SW_UMAC_RING_IDX_MAX] = {
- "REO2SW1_RING",
- "REO2SW2_RING",
- "REO2SW3_RING",
- "REO2SW4_RING",
- "WBM2REO_LINK_RING",
- "REO2TCL_RING",
- "REO2FW_RING",
- "RELEASE_RING",
- "PPE_RELEASE_RING",
- "TCL2TQM_RING",
- "TQM_RELEASE_RING",
- "REO_RELEASE_RING",
- "WBM2SW0_RELEASE_RING",
- "WBM2SW1_RELEASE_RING",
- "WBM2SW2_RELEASE_RING",
- "WBM2SW3_RELEASE_RING",
- "REO_CMD_RING",
- "REO_STATUS_RING",
-};
-
-static const char *htt_bp_lmac_ring[HTT_SW_LMAC_RING_IDX_MAX] = {
- "FW2RXDMA_BUF_RING",
- "FW2RXDMA_STATUS_RING",
- "FW2RXDMA_LINK_RING",
- "SW2RXDMA_BUF_RING",
- "WBM2RXDMA_LINK_RING",
- "RXDMA2FW_RING",
- "RXDMA2SW_RING",
- "RXDMA2RELEASE_RING",
- "RXDMA2REO_RING",
- "MONITOR_STATUS_RING",
- "MONITOR_BUF_RING",
- "MONITOR_DESC_RING",
- "MONITOR_DEST_RING",
-};
void ath11k_info(struct ath11k_base *ab, const char *fmt, ...)
{
@@ -62,6 +20,7 @@ void ath11k_info(struct ath11k_base *ab, const char *fmt, ...)
/* TODO: Trace the log */
va_end(args);
}
+EXPORT_SYMBOL(ath11k_info);
void ath11k_err(struct ath11k_base *ab, const char *fmt, ...)
{
@@ -76,6 +35,7 @@ void ath11k_err(struct ath11k_base *ab, const char *fmt, ...)
/* TODO: Trace the log */
va_end(args);
}
+EXPORT_SYMBOL(ath11k_err);
void ath11k_warn(struct ath11k_base *ab, const char *fmt, ...)
{
@@ -90,8 +50,10 @@ void ath11k_warn(struct ath11k_base *ab, const char *fmt, ...)
/* TODO: Trace the log */
va_end(args);
}
+EXPORT_SYMBOL(ath11k_warn);
#ifdef CONFIG_ATH11K_DEBUG
+
void __ath11k_dbg(struct ath11k_base *ab, enum ath11k_debug_mask mask,
const char *fmt, ...)
{
@@ -110,6 +72,7 @@ void __ath11k_dbg(struct ath11k_base *ab, enum ath11k_debug_mask mask,
va_end(args);
}
+EXPORT_SYMBOL(__ath11k_dbg);
void ath11k_dbg_dump(struct ath11k_base *ab,
enum ath11k_debug_mask mask,
@@ -138,1059 +101,6 @@ void ath11k_dbg_dump(struct ath11k_base *ab,
}
}
}
+EXPORT_SYMBOL(ath11k_dbg_dump);
-#endif
-
-#ifdef CONFIG_ATH11K_DEBUGFS
-static void ath11k_fw_stats_pdevs_free(struct list_head *head)
-{
- struct ath11k_fw_stats_pdev *i, *tmp;
-
- list_for_each_entry_safe(i, tmp, head, list) {
- list_del(&i->list);
- kfree(i);
- }
-}
-
-static void ath11k_fw_stats_vdevs_free(struct list_head *head)
-{
- struct ath11k_fw_stats_vdev *i, *tmp;
-
- list_for_each_entry_safe(i, tmp, head, list) {
- list_del(&i->list);
- kfree(i);
- }
-}
-
-static void ath11k_fw_stats_bcn_free(struct list_head *head)
-{
- struct ath11k_fw_stats_bcn *i, *tmp;
-
- list_for_each_entry_safe(i, tmp, head, list) {
- list_del(&i->list);
- kfree(i);
- }
-}
-
-static void ath11k_debug_fw_stats_reset(struct ath11k *ar)
-{
- spin_lock_bh(&ar->data_lock);
- ar->debug.fw_stats_done = false;
- ath11k_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs);
- ath11k_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs);
- spin_unlock_bh(&ar->data_lock);
-}
-
-void ath11k_debug_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb)
-{
- struct ath11k_fw_stats stats = {};
- struct ath11k *ar;
- struct ath11k_pdev *pdev;
- bool is_end;
- static unsigned int num_vdev, num_bcn;
- size_t total_vdevs_started = 0;
- int i, ret;
-
- INIT_LIST_HEAD(&stats.pdevs);
- INIT_LIST_HEAD(&stats.vdevs);
- INIT_LIST_HEAD(&stats.bcn);
-
- ret = ath11k_wmi_pull_fw_stats(ab, skb, &stats);
- if (ret) {
- ath11k_warn(ab, "failed to pull fw stats: %d\n", ret);
- goto free;
- }
-
- rcu_read_lock();
- ar = ath11k_mac_get_ar_by_pdev_id(ab, stats.pdev_id);
- if (!ar) {
- rcu_read_unlock();
- ath11k_warn(ab, "failed to get ar for pdev_id %d: %d\n",
- stats.pdev_id, ret);
- goto free;
- }
-
- spin_lock_bh(&ar->data_lock);
-
- if (stats.stats_id == WMI_REQUEST_PDEV_STAT) {
- list_splice_tail_init(&stats.pdevs, &ar->debug.fw_stats.pdevs);
- ar->debug.fw_stats_done = true;
- goto complete;
- }
-
- if (stats.stats_id == WMI_REQUEST_VDEV_STAT) {
- if (list_empty(&stats.vdevs)) {
- ath11k_warn(ab, "empty vdev stats");
- goto complete;
- }
- /* FW sends all the active VDEV stats irrespective of PDEV,
- * hence limit until the count of all VDEVs started
- */
- for (i = 0; i < ab->num_radios; i++) {
- pdev = rcu_dereference(ab->pdevs_active[i]);
- if (pdev && pdev->ar)
- total_vdevs_started += ar->num_started_vdevs;
- }
-
- is_end = ((++num_vdev) == total_vdevs_started);
-
- list_splice_tail_init(&stats.vdevs,
- &ar->debug.fw_stats.vdevs);
-
- if (is_end) {
- ar->debug.fw_stats_done = true;
- num_vdev = 0;
- }
- goto complete;
- }
-
- if (stats.stats_id == WMI_REQUEST_BCN_STAT) {
- if (list_empty(&stats.bcn)) {
- ath11k_warn(ab, "empty bcn stats");
- goto complete;
- }
- /* Mark end until we reached the count of all started VDEVs
- * within the PDEV
- */
- is_end = ((++num_bcn) == ar->num_started_vdevs);
-
- list_splice_tail_init(&stats.bcn,
- &ar->debug.fw_stats.bcn);
-
- if (is_end) {
- ar->debug.fw_stats_done = true;
- num_bcn = 0;
- }
- }
-complete:
- complete(&ar->debug.fw_stats_complete);
- rcu_read_unlock();
- spin_unlock_bh(&ar->data_lock);
-
-free:
- ath11k_fw_stats_pdevs_free(&stats.pdevs);
- ath11k_fw_stats_vdevs_free(&stats.vdevs);
- ath11k_fw_stats_bcn_free(&stats.bcn);
-}
-
-static int ath11k_debug_fw_stats_request(struct ath11k *ar,
- struct stats_request_params *req_param)
-{
- struct ath11k_base *ab = ar->ab;
- unsigned long timeout, time_left;
- int ret;
-
- lockdep_assert_held(&ar->conf_mutex);
-
- /* FW stats can get split when exceeding the stats data buffer limit.
- * In that case, since there is no end marking for the back-to-back
- * received 'update stats' event, we keep a 3 seconds timeout in case,
- * fw_stats_done is not marked yet
- */
- timeout = jiffies + msecs_to_jiffies(3 * HZ);
-
- ath11k_debug_fw_stats_reset(ar);
-
- reinit_completion(&ar->debug.fw_stats_complete);
-
- ret = ath11k_wmi_send_stats_request_cmd(ar, req_param);
-
- if (ret) {
- ath11k_warn(ab, "could not request fw stats (%d)\n",
- ret);
- return ret;
- }
-
- time_left =
- wait_for_completion_timeout(&ar->debug.fw_stats_complete,
- 1 * HZ);
- if (!time_left)
- return -ETIMEDOUT;
-
- for (;;) {
- if (time_after(jiffies, timeout))
- break;
-
- spin_lock_bh(&ar->data_lock);
- if (ar->debug.fw_stats_done) {
- spin_unlock_bh(&ar->data_lock);
- break;
- }
- spin_unlock_bh(&ar->data_lock);
- }
- return 0;
-}
-
-static int ath11k_open_pdev_stats(struct inode *inode, struct file *file)
-{
- struct ath11k *ar = inode->i_private;
- struct ath11k_base *ab = ar->ab;
- struct stats_request_params req_param;
- void *buf = NULL;
- int ret;
-
- mutex_lock(&ar->conf_mutex);
-
- if (ar->state != ATH11K_STATE_ON) {
- ret = -ENETDOWN;
- goto err_unlock;
- }
-
- buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE);
- if (!buf) {
- ret = -ENOMEM;
- goto err_unlock;
- }
-
- req_param.pdev_id = ar->pdev->pdev_id;
- req_param.vdev_id = 0;
- req_param.stats_id = WMI_REQUEST_PDEV_STAT;
-
- ret = ath11k_debug_fw_stats_request(ar, &req_param);
- if (ret) {
- ath11k_warn(ab, "failed to request fw pdev stats: %d\n", ret);
- goto err_free;
- }
-
- ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id,
- buf);
-
- file->private_data = buf;
-
- mutex_unlock(&ar->conf_mutex);
- return 0;
-
-err_free:
- vfree(buf);
-
-err_unlock:
- mutex_unlock(&ar->conf_mutex);
- return ret;
-}
-
-static int ath11k_release_pdev_stats(struct inode *inode, struct file *file)
-{
- vfree(file->private_data);
-
- return 0;
-}
-
-static ssize_t ath11k_read_pdev_stats(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- const char *buf = file->private_data;
- size_t len = strlen(buf);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static const struct file_operations fops_pdev_stats = {
- .open = ath11k_open_pdev_stats,
- .release = ath11k_release_pdev_stats,
- .read = ath11k_read_pdev_stats,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
-static int ath11k_open_vdev_stats(struct inode *inode, struct file *file)
-{
- struct ath11k *ar = inode->i_private;
- struct stats_request_params req_param;
- void *buf = NULL;
- int ret;
-
- mutex_lock(&ar->conf_mutex);
-
- if (ar->state != ATH11K_STATE_ON) {
- ret = -ENETDOWN;
- goto err_unlock;
- }
-
- buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE);
- if (!buf) {
- ret = -ENOMEM;
- goto err_unlock;
- }
-
- req_param.pdev_id = ar->pdev->pdev_id;
- /* VDEV stats is always sent for all active VDEVs from FW */
- req_param.vdev_id = 0;
- req_param.stats_id = WMI_REQUEST_VDEV_STAT;
-
- ret = ath11k_debug_fw_stats_request(ar, &req_param);
- if (ret) {
- ath11k_warn(ar->ab, "failed to request fw vdev stats: %d\n", ret);
- goto err_free;
- }
-
- ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id,
- buf);
-
- file->private_data = buf;
-
- mutex_unlock(&ar->conf_mutex);
- return 0;
-
-err_free:
- vfree(buf);
-
-err_unlock:
- mutex_unlock(&ar->conf_mutex);
- return ret;
-}
-
-static int ath11k_release_vdev_stats(struct inode *inode, struct file *file)
-{
- vfree(file->private_data);
-
- return 0;
-}
-
-static ssize_t ath11k_read_vdev_stats(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- const char *buf = file->private_data;
- size_t len = strlen(buf);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static const struct file_operations fops_vdev_stats = {
- .open = ath11k_open_vdev_stats,
- .release = ath11k_release_vdev_stats,
- .read = ath11k_read_vdev_stats,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
-static int ath11k_open_bcn_stats(struct inode *inode, struct file *file)
-{
- struct ath11k *ar = inode->i_private;
- struct ath11k_vif *arvif;
- struct stats_request_params req_param;
- void *buf = NULL;
- int ret;
-
- mutex_lock(&ar->conf_mutex);
-
- if (ar->state != ATH11K_STATE_ON) {
- ret = -ENETDOWN;
- goto err_unlock;
- }
-
- buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE);
- if (!buf) {
- ret = -ENOMEM;
- goto err_unlock;
- }
-
- req_param.stats_id = WMI_REQUEST_BCN_STAT;
- req_param.pdev_id = ar->pdev->pdev_id;
-
- /* loop all active VDEVs for bcn stats */
- list_for_each_entry(arvif, &ar->arvifs, list) {
- if (!arvif->is_up)
- continue;
-
- req_param.vdev_id = arvif->vdev_id;
- ret = ath11k_debug_fw_stats_request(ar, &req_param);
- if (ret) {
- ath11k_warn(ar->ab, "failed to request fw bcn stats: %d\n", ret);
- goto err_free;
- }
- }
-
- ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id,
- buf);
-
- /* since beacon stats request is looped for all active VDEVs, saved fw
- * stats is not freed for each request until done for all active VDEVs
- */
- spin_lock_bh(&ar->data_lock);
- ath11k_fw_stats_bcn_free(&ar->debug.fw_stats.bcn);
- spin_unlock_bh(&ar->data_lock);
-
- file->private_data = buf;
-
- mutex_unlock(&ar->conf_mutex);
- return 0;
-
-err_free:
- vfree(buf);
-
-err_unlock:
- mutex_unlock(&ar->conf_mutex);
- return ret;
-}
-
-static int ath11k_release_bcn_stats(struct inode *inode, struct file *file)
-{
- vfree(file->private_data);
-
- return 0;
-}
-
-static ssize_t ath11k_read_bcn_stats(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- const char *buf = file->private_data;
- size_t len = strlen(buf);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static const struct file_operations fops_bcn_stats = {
- .open = ath11k_open_bcn_stats,
- .release = ath11k_release_bcn_stats,
- .read = ath11k_read_bcn_stats,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
-static ssize_t ath11k_read_simulate_fw_crash(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- const char buf[] =
- "To simulate firmware crash write one of the keywords to this file:\n"
- "`assert` - this will send WMI_FORCE_FW_HANG_CMDID to firmware to cause assert.\n"
- "`hw-restart` - this will simply queue hw restart without fw/hw actually crashing.\n";
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
-}
-
-/* Simulate firmware crash:
- * 'soft': Call wmi command causing firmware hang. This firmware hang is
- * recoverable by warm firmware reset.
- * 'hard': Force firmware crash by setting any vdev parameter for not allowed
- * vdev id. This is hard firmware crash because it is recoverable only by cold
- * firmware reset.
- */
-static ssize_t ath11k_write_simulate_fw_crash(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath11k_base *ab = file->private_data;
- struct ath11k_pdev *pdev;
- struct ath11k *ar = ab->pdevs[0].ar;
- char buf[32] = {0};
- ssize_t rc;
- int i, ret, radioup = 0;
-
- for (i = 0; i < ab->num_radios; i++) {
- pdev = &ab->pdevs[i];
- ar = pdev->ar;
- if (ar && ar->state == ATH11K_STATE_ON) {
- radioup = 1;
- break;
- }
- }
- /* filter partial writes and invalid commands */
- if (*ppos != 0 || count >= sizeof(buf) || count == 0)
- return -EINVAL;
-
- rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
- if (rc < 0)
- return rc;
-
- /* drop the possible '\n' from the end */
- if (buf[*ppos - 1] == '\n')
- buf[*ppos - 1] = '\0';
-
- if (radioup == 0) {
- ret = -ENETDOWN;
- goto exit;
- }
-
- if (!strcmp(buf, "assert")) {
- ath11k_info(ab, "simulating firmware assert crash\n");
- ret = ath11k_wmi_force_fw_hang_cmd(ar,
- ATH11K_WMI_FW_HANG_ASSERT_TYPE,
- ATH11K_WMI_FW_HANG_DELAY);
- } else {
- ret = -EINVAL;
- goto exit;
- }
-
- if (ret) {
- ath11k_warn(ab, "failed to simulate firmware crash: %d\n", ret);
- goto exit;
- }
-
- ret = count;
-
-exit:
- return ret;
-}
-
-static const struct file_operations fops_simulate_fw_crash = {
- .read = ath11k_read_simulate_fw_crash,
- .write = ath11k_write_simulate_fw_crash,
- .open = simple_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
-static ssize_t ath11k_write_enable_extd_tx_stats(struct file *file,
- const char __user *ubuf,
- size_t count, loff_t *ppos)
-{
- struct ath11k *ar = file->private_data;
- u32 filter;
- int ret;
-
- if (kstrtouint_from_user(ubuf, count, 0, &filter))
- return -EINVAL;
-
- mutex_lock(&ar->conf_mutex);
-
- if (ar->state != ATH11K_STATE_ON) {
- ret = -ENETDOWN;
- goto out;
- }
-
- if (filter == ar->debug.extd_tx_stats) {
- ret = count;
- goto out;
- }
-
- ar->debug.extd_tx_stats = filter;
- ret = count;
-
-out:
- mutex_unlock(&ar->conf_mutex);
- return ret;
-}
-
-static ssize_t ath11k_read_enable_extd_tx_stats(struct file *file,
- char __user *ubuf,
- size_t count, loff_t *ppos)
-
-{
- char buf[32] = {0};
- struct ath11k *ar = file->private_data;
- int len = 0;
-
- mutex_lock(&ar->conf_mutex);
- len = scnprintf(buf, sizeof(buf) - len, "%08x\n",
- ar->debug.extd_tx_stats);
- mutex_unlock(&ar->conf_mutex);
-
- return simple_read_from_buffer(ubuf, count, ppos, buf, len);
-}
-
-static const struct file_operations fops_extd_tx_stats = {
- .read = ath11k_read_enable_extd_tx_stats,
- .write = ath11k_write_enable_extd_tx_stats,
- .open = simple_open
-};
-
-static ssize_t ath11k_write_extd_rx_stats(struct file *file,
- const char __user *ubuf,
- size_t count, loff_t *ppos)
-{
- struct ath11k *ar = file->private_data;
- struct htt_rx_ring_tlv_filter tlv_filter = {0};
- u32 enable, rx_filter = 0, ring_id;
- int ret;
-
- if (kstrtouint_from_user(ubuf, count, 0, &enable))
- return -EINVAL;
-
- mutex_lock(&ar->conf_mutex);
-
- if (ar->state != ATH11K_STATE_ON) {
- ret = -ENETDOWN;
- goto exit;
- }
-
- if (enable > 1) {
- ret = -EINVAL;
- goto exit;
- }
-
- if (enable == ar->debug.extd_rx_stats) {
- ret = count;
- goto exit;
- }
-
- if (enable) {
- rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START;
- rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_START;
- rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END;
- rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS;
- rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT;
- rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE;
-
- tlv_filter.rx_filter = rx_filter;
- tlv_filter.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0;
- tlv_filter.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1;
- tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2;
- tlv_filter.pkt_filter_flags3 = HTT_RX_FP_CTRL_FILTER_FLASG3 |
- HTT_RX_FP_DATA_FILTER_FLASG3;
- } else {
- tlv_filter = ath11k_mac_mon_status_filter_default;
- }
-
- ar->debug.rx_filter = tlv_filter.rx_filter;
-
- ring_id = ar->dp.rx_mon_status_refill_ring.refill_buf_ring.ring_id;
- ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id,
- HAL_RXDMA_MONITOR_STATUS,
- DP_RX_BUFFER_SIZE, &tlv_filter);
-
- if (ret) {
- ath11k_warn(ar->ab, "failed to set rx filter for monitor status ring\n");
- goto exit;
- }
-
- ar->debug.extd_rx_stats = enable;
- ret = count;
-exit:
- mutex_unlock(&ar->conf_mutex);
- return ret;
-}
-
-static ssize_t ath11k_read_extd_rx_stats(struct file *file,
- char __user *ubuf,
- size_t count, loff_t *ppos)
-{
- struct ath11k *ar = file->private_data;
- char buf[32];
- int len = 0;
-
- mutex_lock(&ar->conf_mutex);
- len = scnprintf(buf, sizeof(buf) - len, "%d\n",
- ar->debug.extd_rx_stats);
- mutex_unlock(&ar->conf_mutex);
-
- return simple_read_from_buffer(ubuf, count, ppos, buf, len);
-}
-
-static const struct file_operations fops_extd_rx_stats = {
- .read = ath11k_read_extd_rx_stats,
- .write = ath11k_write_extd_rx_stats,
- .open = simple_open,
-};
-
-static int ath11k_fill_bp_stats(struct ath11k_base *ab,
- struct ath11k_bp_stats *bp_stats,
- char *buf, int len, int size)
-{
- lockdep_assert_held(&ab->base_lock);
-
- len += scnprintf(buf + len, size - len, "count: %u\n",
- bp_stats->count);
- len += scnprintf(buf + len, size - len, "hp: %u\n",
- bp_stats->hp);
- len += scnprintf(buf + len, size - len, "tp: %u\n",
- bp_stats->tp);
- len += scnprintf(buf + len, size - len, "seen before: %ums\n\n",
- jiffies_to_msecs(jiffies - bp_stats->jiffies));
- return len;
-}
-
-static ssize_t ath11k_debug_dump_soc_ring_bp_stats(struct ath11k_base *ab,
- char *buf, int size)
-{
- struct ath11k_bp_stats *bp_stats;
- bool stats_rxd = false;
- u8 i, pdev_idx;
- int len = 0;
-
- len += scnprintf(buf + len, size - len, "\nBackpressure Stats\n");
- len += scnprintf(buf + len, size - len, "==================\n");
-
- spin_lock_bh(&ab->base_lock);
- for (i = 0; i < HTT_SW_UMAC_RING_IDX_MAX; i++) {
- bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[i];
-
- if (!bp_stats->count)
- continue;
-
- len += scnprintf(buf + len, size - len, "Ring: %s\n",
- htt_bp_umac_ring[i]);
- len = ath11k_fill_bp_stats(ab, bp_stats, buf, len, size);
- stats_rxd = true;
- }
-
- for (i = 0; i < HTT_SW_LMAC_RING_IDX_MAX; i++) {
- for (pdev_idx = 0; pdev_idx < MAX_RADIOS; pdev_idx++) {
- bp_stats =
- &ab->soc_stats.bp_stats.lmac_ring_bp_stats[i][pdev_idx];
-
- if (!bp_stats->count)
- continue;
-
- len += scnprintf(buf + len, size - len, "Ring: %s\n",
- htt_bp_lmac_ring[i]);
- len += scnprintf(buf + len, size - len, "pdev: %d\n",
- pdev_idx);
- len = ath11k_fill_bp_stats(ab, bp_stats, buf, len, size);
- stats_rxd = true;
- }
- }
- spin_unlock_bh(&ab->base_lock);
-
- if (!stats_rxd)
- len += scnprintf(buf + len, size - len,
- "No Ring Backpressure stats received\n\n");
-
- return len;
-}
-
-static ssize_t ath11k_debug_dump_soc_dp_stats(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath11k_base *ab = file->private_data;
- struct ath11k_soc_dp_stats *soc_stats = &ab->soc_stats;
- int len = 0, i, retval;
- const int size = 4096;
- static const char *rxdma_err[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX] = {
- "Overflow", "MPDU len", "FCS", "Decrypt", "TKIP MIC",
- "Unencrypt", "MSDU len", "MSDU limit", "WiFi parse",
- "AMSDU parse", "SA timeout", "DA timeout",
- "Flow timeout", "Flush req"};
- static const char *reo_err[HAL_REO_DEST_RING_ERROR_CODE_MAX] = {
- "Desc addr zero", "Desc inval", "AMPDU in non BA",
- "Non BA dup", "BA dup", "Frame 2k jump", "BAR 2k jump",
- "Frame OOR", "BAR OOR", "No BA session",
- "Frame SN equal SSN", "PN check fail", "2k err",
- "PN err", "Desc blocked"};
-
- char *buf;
-
- buf = kzalloc(size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- len += scnprintf(buf + len, size - len, "SOC RX STATS:\n\n");
- len += scnprintf(buf + len, size - len, "err ring pkts: %u\n",
- soc_stats->err_ring_pkts);
- len += scnprintf(buf + len, size - len, "Invalid RBM: %u\n\n",
- soc_stats->invalid_rbm);
- len += scnprintf(buf + len, size - len, "RXDMA errors:\n");
- for (i = 0; i < HAL_REO_ENTR_RING_RXDMA_ECODE_MAX; i++)
- len += scnprintf(buf + len, size - len, "%s: %u\n",
- rxdma_err[i], soc_stats->rxdma_error[i]);
-
- len += scnprintf(buf + len, size - len, "\nREO errors:\n");
- for (i = 0; i < HAL_REO_DEST_RING_ERROR_CODE_MAX; i++)
- len += scnprintf(buf + len, size - len, "%s: %u\n",
- reo_err[i], soc_stats->reo_error[i]);
-
- len += scnprintf(buf + len, size - len, "\nHAL REO errors:\n");
- len += scnprintf(buf + len, size - len,
- "ring0: %u\nring1: %u\nring2: %u\nring3: %u\n",
- soc_stats->hal_reo_error[0],
- soc_stats->hal_reo_error[1],
- soc_stats->hal_reo_error[2],
- soc_stats->hal_reo_error[3]);
-
- len += scnprintf(buf + len, size - len, "\nSOC TX STATS:\n");
- len += scnprintf(buf + len, size - len, "\nTCL Ring Full Failures:\n");
-
- for (i = 0; i < DP_TCL_NUM_RING_MAX; i++)
- len += scnprintf(buf + len, size - len, "ring%d: %u\n",
- i, soc_stats->tx_err.desc_na[i]);
-
- len += scnprintf(buf + len, size - len,
- "\nMisc Transmit Failures: %d\n",
- atomic_read(&soc_stats->tx_err.misc_fail));
-
- len += ath11k_debug_dump_soc_ring_bp_stats(ab, buf + len, size - len);
-
- if (len > size)
- len = size;
- retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
-
- return retval;
-}
-
-static const struct file_operations fops_soc_dp_stats = {
- .read = ath11k_debug_dump_soc_dp_stats,
- .open = simple_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
-int ath11k_debug_pdev_create(struct ath11k_base *ab)
-{
- if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
- return 0;
-
- ab->debugfs_soc = debugfs_create_dir(ab->hw_params.name, ab->debugfs_ath11k);
-
- if (IS_ERR_OR_NULL(ab->debugfs_soc)) {
- if (IS_ERR(ab->debugfs_soc))
- return PTR_ERR(ab->debugfs_soc);
- return -ENOMEM;
- }
-
- debugfs_create_file("simulate_fw_crash", 0600, ab->debugfs_soc, ab,
- &fops_simulate_fw_crash);
-
- debugfs_create_file("soc_dp_stats", 0600, ab->debugfs_soc, ab,
- &fops_soc_dp_stats);
-
- return 0;
-}
-
-void ath11k_debug_pdev_destroy(struct ath11k_base *ab)
-{
- debugfs_remove_recursive(ab->debugfs_ath11k);
- ab->debugfs_ath11k = NULL;
-}
-
-int ath11k_debug_soc_create(struct ath11k_base *ab)
-{
- ab->debugfs_ath11k = debugfs_create_dir("ath11k", NULL);
-
- if (IS_ERR_OR_NULL(ab->debugfs_ath11k)) {
- if (IS_ERR(ab->debugfs_ath11k))
- return PTR_ERR(ab->debugfs_ath11k);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-void ath11k_debug_soc_destroy(struct ath11k_base *ab)
-{
- debugfs_remove_recursive(ab->debugfs_soc);
- ab->debugfs_soc = NULL;
-}
-
-void ath11k_debug_fw_stats_init(struct ath11k *ar)
-{
- struct dentry *fwstats_dir = debugfs_create_dir("fw_stats",
- ar->debug.debugfs_pdev);
-
- ar->debug.fw_stats.debugfs_fwstats = fwstats_dir;
-
- /* all stats debugfs files created are under "fw_stats" directory
- * created per PDEV
- */
- debugfs_create_file("pdev_stats", 0600, fwstats_dir, ar,
- &fops_pdev_stats);
- debugfs_create_file("vdev_stats", 0600, fwstats_dir, ar,
- &fops_vdev_stats);
- debugfs_create_file("beacon_stats", 0600, fwstats_dir, ar,
- &fops_bcn_stats);
-
- INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs);
- INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs);
- INIT_LIST_HEAD(&ar->debug.fw_stats.bcn);
-
- init_completion(&ar->debug.fw_stats_complete);
-}
-
-static ssize_t ath11k_write_pktlog_filter(struct file *file,
- const char __user *ubuf,
- size_t count, loff_t *ppos)
-{
- struct ath11k *ar = file->private_data;
- struct htt_rx_ring_tlv_filter tlv_filter = {0};
- u32 rx_filter = 0, ring_id, filter, mode;
- u8 buf[128] = {0};
- int ret;
- ssize_t rc;
-
- mutex_lock(&ar->conf_mutex);
- if (ar->state != ATH11K_STATE_ON) {
- ret = -ENETDOWN;
- goto out;
- }
-
- rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
- if (rc < 0) {
- ret = rc;
- goto out;
- }
- buf[rc] = '\0';
-
- ret = sscanf(buf, "0x%x %u", &filter, &mode);
- if (ret != 2) {
- ret = -EINVAL;
- goto out;
- }
-
- if (filter) {
- ret = ath11k_wmi_pdev_pktlog_enable(ar, filter);
- if (ret) {
- ath11k_warn(ar->ab,
- "failed to enable pktlog filter %x: %d\n",
- ar->debug.pktlog_filter, ret);
- goto out;
- }
- } else {
- ret = ath11k_wmi_pdev_pktlog_disable(ar);
- if (ret) {
- ath11k_warn(ar->ab, "failed to disable pktlog: %d\n", ret);
- goto out;
- }
- }
-
-#define HTT_RX_FILTER_TLV_LITE_MODE \
- (HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
- HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
- HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
- HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
- HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE | \
- HTT_RX_FILTER_TLV_FLAGS_MPDU_START)
-
- if (mode == ATH11K_PKTLOG_MODE_FULL) {
- rx_filter = HTT_RX_FILTER_TLV_LITE_MODE |
- HTT_RX_FILTER_TLV_FLAGS_MSDU_START |
- HTT_RX_FILTER_TLV_FLAGS_MSDU_END |
- HTT_RX_FILTER_TLV_FLAGS_MPDU_END |
- HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER |
- HTT_RX_FILTER_TLV_FLAGS_ATTENTION;
- } else if (mode == ATH11K_PKTLOG_MODE_LITE) {
- ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar,
- HTT_PPDU_STATS_TAG_PKTLOG);
- if (ret) {
- ath11k_err(ar->ab, "failed to enable pktlog lite: %d\n", ret);
- goto out;
- }
-
- rx_filter = HTT_RX_FILTER_TLV_LITE_MODE;
- } else {
- ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar,
- HTT_PPDU_STATS_TAG_DEFAULT);
- if (ret) {
- ath11k_err(ar->ab, "failed to send htt ppdu stats req: %d\n",
- ret);
- goto out;
- }
- }
-
- tlv_filter.rx_filter = rx_filter;
- if (rx_filter) {
- tlv_filter.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0;
- tlv_filter.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1;
- tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2;
- tlv_filter.pkt_filter_flags3 = HTT_RX_FP_CTRL_FILTER_FLASG3 |
- HTT_RX_FP_DATA_FILTER_FLASG3;
- }
-
- ring_id = ar->dp.rx_mon_status_refill_ring.refill_buf_ring.ring_id;
- ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id,
- HAL_RXDMA_MONITOR_STATUS,
- DP_RX_BUFFER_SIZE, &tlv_filter);
- if (ret) {
- ath11k_warn(ar->ab, "failed to set rx filter for monitor status ring\n");
- goto out;
- }
-
- ath11k_dbg(ar->ab, ATH11K_DBG_WMI, "pktlog filter %d mode %s\n",
- filter, ((mode == ATH11K_PKTLOG_MODE_FULL) ? "full" : "lite"));
-
- ar->debug.pktlog_filter = filter;
- ar->debug.pktlog_mode = mode;
- ret = count;
-
-out:
- mutex_unlock(&ar->conf_mutex);
- return ret;
-}
-
-static ssize_t ath11k_read_pktlog_filter(struct file *file,
- char __user *ubuf,
- size_t count, loff_t *ppos)
-
-{
- char buf[32] = {0};
- struct ath11k *ar = file->private_data;
- int len = 0;
-
- mutex_lock(&ar->conf_mutex);
- len = scnprintf(buf, sizeof(buf) - len, "%08x %08x\n",
- ar->debug.pktlog_filter,
- ar->debug.pktlog_mode);
- mutex_unlock(&ar->conf_mutex);
-
- return simple_read_from_buffer(ubuf, count, ppos, buf, len);
-}
-
-static const struct file_operations fops_pktlog_filter = {
- .read = ath11k_read_pktlog_filter,
- .write = ath11k_write_pktlog_filter,
- .open = simple_open
-};
-
-static ssize_t ath11k_write_simulate_radar(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath11k *ar = file->private_data;
- int ret;
-
- ret = ath11k_wmi_simulate_radar(ar);
- if (ret)
- return ret;
-
- return count;
-}
-
-static const struct file_operations fops_simulate_radar = {
- .write = ath11k_write_simulate_radar,
- .open = simple_open
-};
-
-int ath11k_debug_register(struct ath11k *ar)
-{
- struct ath11k_base *ab = ar->ab;
- char pdev_name[5];
- char buf[100] = {0};
-
- snprintf(pdev_name, sizeof(pdev_name), "%s%d", "mac", ar->pdev_idx);
-
- ar->debug.debugfs_pdev = debugfs_create_dir(pdev_name, ab->debugfs_soc);
-
- if (IS_ERR_OR_NULL(ar->debug.debugfs_pdev)) {
- if (IS_ERR(ar->debug.debugfs_pdev))
- return PTR_ERR(ar->debug.debugfs_pdev);
-
- return -ENOMEM;
- }
-
- /* Create a symlink under ieee80211/phy* */
- snprintf(buf, 100, "../../ath11k/%pd2", ar->debug.debugfs_pdev);
- debugfs_create_symlink("ath11k", ar->hw->wiphy->debugfsdir, buf);
-
- ath11k_debug_htt_stats_init(ar);
-
- ath11k_debug_fw_stats_init(ar);
-
- debugfs_create_file("ext_tx_stats", 0644,
- ar->debug.debugfs_pdev, ar,
- &fops_extd_tx_stats);
- debugfs_create_file("ext_rx_stats", 0644,
- ar->debug.debugfs_pdev, ar,
- &fops_extd_rx_stats);
- debugfs_create_file("pktlog_filter", 0644,
- ar->debug.debugfs_pdev, ar,
- &fops_pktlog_filter);
-
- if (ar->hw->wiphy->bands[NL80211_BAND_5GHZ]) {
- debugfs_create_file("dfs_simulate_radar", 0200,
- ar->debug.debugfs_pdev, ar,
- &fops_simulate_radar);
- debugfs_create_bool("dfs_block_radar_events", 0200,
- ar->debug.debugfs_pdev,
- &ar->dfs_block_radar_events);
- }
-
- return 0;
-}
-
-void ath11k_debug_unregister(struct ath11k *ar)
-{
-}
-#endif /* CONFIG_ATH11K_DEBUGFS */
+#endif /* CONFIG_ATH11K_DEBUG */
diff --git a/drivers/net/wireless/ath/ath11k/debug.h b/drivers/net/wireless/ath/ath11k/debug.h
index c30085406bfb..659a275e2eb3 100644
--- a/drivers/net/wireless/ath/ath11k/debug.h
+++ b/drivers/net/wireless/ath/ath11k/debug.h
@@ -6,11 +6,8 @@
#ifndef _ATH11K_DEBUG_H_
#define _ATH11K_DEBUG_H_
-#include "hal_tx.h"
#include "trace.h"
-
-#define ATH11K_TX_POWER_MAX_VAL 70
-#define ATH11K_TX_POWER_MIN_VAL 0
+#include "debugfs.h"
enum ath11k_debug_mask {
ATH11K_DBG_AHB = 0x00000001,
@@ -25,101 +22,12 @@ enum ath11k_debug_mask {
ATH11K_DBG_REG = 0x00000200,
ATH11K_DBG_TESTMODE = 0x00000400,
ATH11k_DBG_HAL = 0x00000800,
+ ATH11K_DBG_PCI = 0x00001000,
+ ATH11K_DBG_DP_TX = 0x00001000,
+ ATH11K_DBG_DP_RX = 0x00002000,
ATH11K_DBG_ANY = 0xffffffff,
};
-/* htt_dbg_ext_stats_type */
-enum ath11k_dbg_htt_ext_stats_type {
- ATH11K_DBG_HTT_EXT_STATS_RESET = 0,
- ATH11K_DBG_HTT_EXT_STATS_PDEV_TX = 1,
- ATH11K_DBG_HTT_EXT_STATS_PDEV_RX = 2,
- ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_HWQ = 3,
- ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4,
- ATH11K_DBG_HTT_EXT_STATS_PDEV_ERROR = 5,
- ATH11K_DBG_HTT_EXT_STATS_PDEV_TQM = 6,
- ATH11K_DBG_HTT_EXT_STATS_TQM_CMDQ = 7,
- ATH11K_DBG_HTT_EXT_STATS_TX_DE_INFO = 8,
- ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_RATE = 9,
- ATH11K_DBG_HTT_EXT_STATS_PDEV_RX_RATE = 10,
- ATH11K_DBG_HTT_EXT_STATS_PEER_INFO = 11,
- ATH11K_DBG_HTT_EXT_STATS_TX_SELFGEN_INFO = 12,
- ATH11K_DBG_HTT_EXT_STATS_TX_MU_HWQ = 13,
- ATH11K_DBG_HTT_EXT_STATS_RING_IF_INFO = 14,
- ATH11K_DBG_HTT_EXT_STATS_SRNG_INFO = 15,
- ATH11K_DBG_HTT_EXT_STATS_SFM_INFO = 16,
- ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_MU = 17,
- ATH11K_DBG_HTT_EXT_STATS_ACTIVE_PEERS_LIST = 18,
- ATH11K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS = 19,
- ATH11K_DBG_HTT_EXT_STATS_TWT_SESSIONS = 20,
- ATH11K_DBG_HTT_EXT_STATS_REO_RESOURCE_STATS = 21,
- ATH11K_DBG_HTT_EXT_STATS_TX_SOUNDING_INFO = 22,
- ATH11K_DBG_HTT_EXT_STATS_PDEV_OBSS_PD_STATS = 23,
- ATH11K_DBG_HTT_EXT_STATS_RING_BACKPRESSURE_STATS = 24,
-
- /* keep this last */
- ATH11K_DBG_HTT_NUM_EXT_STATS,
-};
-
-struct debug_htt_stats_req {
- bool done;
- u8 pdev_id;
- u8 type;
- u8 peer_addr[ETH_ALEN];
- struct completion cmpln;
- u32 buf_len;
- u8 buf[];
-};
-
-struct ath_pktlog_hdr {
- u16 flags;
- u16 missed_cnt;
- u16 log_type;
- u16 size;
- u32 timestamp;
- u32 type_specific_data;
- u8 payload[];
-};
-
-#define ATH11K_HTT_PEER_STATS_RESET BIT(16)
-
-#define ATH11K_HTT_STATS_BUF_SIZE (1024 * 512)
-#define ATH11K_FW_STATS_BUF_SIZE (1024 * 1024)
-
-enum ath11k_pktlog_filter {
- ATH11K_PKTLOG_RX = 0x000000001,
- ATH11K_PKTLOG_TX = 0x000000002,
- ATH11K_PKTLOG_RCFIND = 0x000000004,
- ATH11K_PKTLOG_RCUPDATE = 0x000000008,
- ATH11K_PKTLOG_EVENT_SMART_ANT = 0x000000020,
- ATH11K_PKTLOG_EVENT_SW = 0x000000040,
- ATH11K_PKTLOG_ANY = 0x00000006f,
-};
-
-enum ath11k_pktlog_mode {
- ATH11K_PKTLOG_MODE_LITE = 1,
- ATH11K_PKTLOG_MODE_FULL = 2,
-};
-
-enum ath11k_pktlog_enum {
- ATH11K_PKTLOG_TYPE_TX_CTRL = 1,
- ATH11K_PKTLOG_TYPE_TX_STAT = 2,
- ATH11K_PKTLOG_TYPE_TX_MSDU_ID = 3,
- ATH11K_PKTLOG_TYPE_RX_STAT = 5,
- ATH11K_PKTLOG_TYPE_RC_FIND = 6,
- ATH11K_PKTLOG_TYPE_RC_UPDATE = 7,
- ATH11K_PKTLOG_TYPE_TX_VIRT_ADDR = 8,
- ATH11K_PKTLOG_TYPE_RX_CBF = 10,
- ATH11K_PKTLOG_TYPE_RX_STATBUF = 22,
- ATH11K_PKTLOG_TYPE_PPDU_STATS = 23,
- ATH11K_PKTLOG_TYPE_LITE_RX = 24,
-};
-
-enum ath11k_dbg_aggr_mode {
- ATH11K_DBG_AGGR_MODE_AUTO,
- ATH11K_DBG_AGGR_MODE_MANUAL,
- ATH11K_DBG_AGGR_MODE_MAX,
-};
-
__printf(2, 3) void ath11k_info(struct ath11k_base *ab, const char *fmt, ...);
__printf(2, 3) void ath11k_err(struct ath11k_base *ab, const char *fmt, ...);
__printf(2, 3) void ath11k_warn(struct ath11k_base *ab, const char *fmt, ...);
@@ -150,153 +58,6 @@ static inline void ath11k_dbg_dump(struct ath11k_base *ab,
}
#endif /* CONFIG_ATH11K_DEBUG */
-#ifdef CONFIG_ATH11K_DEBUGFS
-int ath11k_debug_soc_create(struct ath11k_base *ab);
-void ath11k_debug_soc_destroy(struct ath11k_base *ab);
-int ath11k_debug_pdev_create(struct ath11k_base *ab);
-void ath11k_debug_pdev_destroy(struct ath11k_base *ab);
-int ath11k_debug_register(struct ath11k *ar);
-void ath11k_debug_unregister(struct ath11k *ar);
-void ath11k_dbg_htt_ext_stats_handler(struct ath11k_base *ab,
- struct sk_buff *skb);
-void ath11k_debug_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb);
-
-void ath11k_debug_fw_stats_init(struct ath11k *ar);
-int ath11k_dbg_htt_stats_req(struct ath11k *ar);
-
-static inline bool ath11k_debug_is_pktlog_lite_mode_enabled(struct ath11k *ar)
-{
- return (ar->debug.pktlog_mode == ATH11K_PKTLOG_MODE_LITE);
-}
-
-static inline bool ath11k_debug_is_pktlog_rx_stats_enabled(struct ath11k *ar)
-{
- return (!ar->debug.pktlog_peer_valid && ar->debug.pktlog_mode);
-}
-
-static inline bool ath11k_debug_is_pktlog_peer_valid(struct ath11k *ar, u8 *addr)
-{
- return (ar->debug.pktlog_peer_valid && ar->debug.pktlog_mode &&
- ether_addr_equal(addr, ar->debug.pktlog_peer_addr));
-}
-
-static inline int ath11k_debug_is_extd_tx_stats_enabled(struct ath11k *ar)
-{
- return ar->debug.extd_tx_stats;
-}
-
-static inline int ath11k_debug_is_extd_rx_stats_enabled(struct ath11k *ar)
-{
- return ar->debug.extd_rx_stats;
-}
-
-static inline int ath11k_debug_rx_filter(struct ath11k *ar)
-{
- return ar->debug.rx_filter;
-}
-
-void ath11k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, struct dentry *dir);
-void
-ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta,
- struct ath11k_per_peer_tx_stats *peer_stats,
- u8 legacy_rate_idx);
-void ath11k_update_per_peer_stats_from_txcompl(struct ath11k *ar,
- struct sk_buff *msdu,
- struct hal_tx_status *ts);
-#else
-static inline int ath11k_debug_soc_create(struct ath11k_base *ab)
-{
- return 0;
-}
-
-static inline void ath11k_debug_soc_destroy(struct ath11k_base *ab)
-{
-}
-
-static inline int ath11k_debug_pdev_create(struct ath11k_base *ab)
-{
- return 0;
-}
-
-static inline void ath11k_debug_pdev_destroy(struct ath11k_base *ab)
-{
-}
-
-static inline int ath11k_debug_register(struct ath11k *ar)
-{
- return 0;
-}
-
-static inline void ath11k_debug_unregister(struct ath11k *ar)
-{
-}
-
-static inline void ath11k_dbg_htt_ext_stats_handler(struct ath11k_base *ab,
- struct sk_buff *skb)
-{
-}
-
-static inline void ath11k_debug_fw_stats_process(struct ath11k_base *ab,
- struct sk_buff *skb)
-{
-}
-
-static inline void ath11k_debug_fw_stats_init(struct ath11k *ar)
-{
-}
-
-static inline int ath11k_debug_is_extd_tx_stats_enabled(struct ath11k *ar)
-{
- return 0;
-}
-
-static inline int ath11k_debug_is_extd_rx_stats_enabled(struct ath11k *ar)
-{
- return 0;
-}
-
-static inline int ath11k_dbg_htt_stats_req(struct ath11k *ar)
-{
- return 0;
-}
-
-static inline bool ath11k_debug_is_pktlog_lite_mode_enabled(struct ath11k *ar)
-{
- return false;
-}
-
-static inline bool ath11k_debug_is_pktlog_rx_stats_enabled(struct ath11k *ar)
-{
- return false;
-}
-
-static inline bool ath11k_debug_is_pktlog_peer_valid(struct ath11k *ar, u8 *addr)
-{
- return false;
-}
-
-static inline int ath11k_debug_rx_filter(struct ath11k *ar)
-{
- return 0;
-}
-
-static inline void
-ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta,
- struct ath11k_per_peer_tx_stats *peer_stats,
- u8 legacy_rate_idx)
-{
-}
-
-static inline void
-ath11k_update_per_peer_stats_from_txcompl(struct ath11k *ar,
- struct sk_buff *msdu,
- struct hal_tx_status *ts)
-{
-}
-
-#endif /* CONFIG_MAC80211_DEBUGFS*/
-
#define ath11k_dbg(ar, dbg_mask, fmt, ...) \
do { \
if (ath11k_debug_mask & dbg_mask) \
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
new file mode 100644
index 000000000000..1b914e67d314
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/debugfs.c
@@ -0,0 +1,1097 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
+ */
+
+#include "debugfs.h"
+
+#include "core.h"
+#include "debug.h"
+#include "wmi.h"
+#include "hal_rx.h"
+#include "dp_tx.h"
+#include "debugfs_htt_stats.h"
+#include "peer.h"
+
+static const char *htt_bp_umac_ring[HTT_SW_UMAC_RING_IDX_MAX] = {
+ "REO2SW1_RING",
+ "REO2SW2_RING",
+ "REO2SW3_RING",
+ "REO2SW4_RING",
+ "WBM2REO_LINK_RING",
+ "REO2TCL_RING",
+ "REO2FW_RING",
+ "RELEASE_RING",
+ "PPE_RELEASE_RING",
+ "TCL2TQM_RING",
+ "TQM_RELEASE_RING",
+ "REO_RELEASE_RING",
+ "WBM2SW0_RELEASE_RING",
+ "WBM2SW1_RELEASE_RING",
+ "WBM2SW2_RELEASE_RING",
+ "WBM2SW3_RELEASE_RING",
+ "REO_CMD_RING",
+ "REO_STATUS_RING",
+};
+
+static const char *htt_bp_lmac_ring[HTT_SW_LMAC_RING_IDX_MAX] = {
+ "FW2RXDMA_BUF_RING",
+ "FW2RXDMA_STATUS_RING",
+ "FW2RXDMA_LINK_RING",
+ "SW2RXDMA_BUF_RING",
+ "WBM2RXDMA_LINK_RING",
+ "RXDMA2FW_RING",
+ "RXDMA2SW_RING",
+ "RXDMA2RELEASE_RING",
+ "RXDMA2REO_RING",
+ "MONITOR_STATUS_RING",
+ "MONITOR_BUF_RING",
+ "MONITOR_DESC_RING",
+ "MONITOR_DEST_RING",
+};
+
+static void ath11k_fw_stats_pdevs_free(struct list_head *head)
+{
+ struct ath11k_fw_stats_pdev *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+static void ath11k_fw_stats_vdevs_free(struct list_head *head)
+{
+ struct ath11k_fw_stats_vdev *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+static void ath11k_fw_stats_bcn_free(struct list_head *head)
+{
+ struct ath11k_fw_stats_bcn *i, *tmp;
+
+ list_for_each_entry_safe(i, tmp, head, list) {
+ list_del(&i->list);
+ kfree(i);
+ }
+}
+
+static void ath11k_debugfs_fw_stats_reset(struct ath11k *ar)
+{
+ spin_lock_bh(&ar->data_lock);
+ ar->debug.fw_stats_done = false;
+ ath11k_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs);
+ ath11k_fw_stats_vdevs_free(&ar->debug.fw_stats.vdevs);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+void ath11k_debugfs_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb)
+{
+ struct ath11k_fw_stats stats = {};
+ struct ath11k *ar;
+ struct ath11k_pdev *pdev;
+ bool is_end;
+ static unsigned int num_vdev, num_bcn;
+ size_t total_vdevs_started = 0;
+ int i, ret;
+
+ INIT_LIST_HEAD(&stats.pdevs);
+ INIT_LIST_HEAD(&stats.vdevs);
+ INIT_LIST_HEAD(&stats.bcn);
+
+ ret = ath11k_wmi_pull_fw_stats(ab, skb, &stats);
+ if (ret) {
+ ath11k_warn(ab, "failed to pull fw stats: %d\n", ret);
+ goto free;
+ }
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_pdev_id(ab, stats.pdev_id);
+ if (!ar) {
+ rcu_read_unlock();
+ ath11k_warn(ab, "failed to get ar for pdev_id %d: %d\n",
+ stats.pdev_id, ret);
+ goto free;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (stats.stats_id == WMI_REQUEST_PDEV_STAT) {
+ list_splice_tail_init(&stats.pdevs, &ar->debug.fw_stats.pdevs);
+ ar->debug.fw_stats_done = true;
+ goto complete;
+ }
+
+ if (stats.stats_id == WMI_REQUEST_VDEV_STAT) {
+ if (list_empty(&stats.vdevs)) {
+ ath11k_warn(ab, "empty vdev stats");
+ goto complete;
+ }
+ /* FW sends all the active VDEV stats irrespective of PDEV,
+ * hence limit until the count of all VDEVs started
+ */
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar)
+ total_vdevs_started += ar->num_started_vdevs;
+ }
+
+ is_end = ((++num_vdev) == total_vdevs_started);
+
+ list_splice_tail_init(&stats.vdevs,
+ &ar->debug.fw_stats.vdevs);
+
+ if (is_end) {
+ ar->debug.fw_stats_done = true;
+ num_vdev = 0;
+ }
+ goto complete;
+ }
+
+ if (stats.stats_id == WMI_REQUEST_BCN_STAT) {
+ if (list_empty(&stats.bcn)) {
+ ath11k_warn(ab, "empty bcn stats");
+ goto complete;
+ }
+ /* Mark end until we reached the count of all started VDEVs
+ * within the PDEV
+ */
+ is_end = ((++num_bcn) == ar->num_started_vdevs);
+
+ list_splice_tail_init(&stats.bcn,
+ &ar->debug.fw_stats.bcn);
+
+ if (is_end) {
+ ar->debug.fw_stats_done = true;
+ num_bcn = 0;
+ }
+ }
+complete:
+ complete(&ar->debug.fw_stats_complete);
+ rcu_read_unlock();
+ spin_unlock_bh(&ar->data_lock);
+
+free:
+ ath11k_fw_stats_pdevs_free(&stats.pdevs);
+ ath11k_fw_stats_vdevs_free(&stats.vdevs);
+ ath11k_fw_stats_bcn_free(&stats.bcn);
+}
+
+static int ath11k_debugfs_fw_stats_request(struct ath11k *ar,
+ struct stats_request_params *req_param)
+{
+ struct ath11k_base *ab = ar->ab;
+ unsigned long timeout, time_left;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ /* FW stats can get split when exceeding the stats data buffer limit.
+ * In that case, since there is no end marking for the back-to-back
+ * received 'update stats' event, we keep a 3 seconds timeout in case,
+ * fw_stats_done is not marked yet
+ */
+ timeout = jiffies + msecs_to_jiffies(3 * HZ);
+
+ ath11k_debugfs_fw_stats_reset(ar);
+
+ reinit_completion(&ar->debug.fw_stats_complete);
+
+ ret = ath11k_wmi_send_stats_request_cmd(ar, req_param);
+
+ if (ret) {
+ ath11k_warn(ab, "could not request fw stats (%d)\n",
+ ret);
+ return ret;
+ }
+
+ time_left =
+ wait_for_completion_timeout(&ar->debug.fw_stats_complete,
+ 1 * HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ for (;;) {
+ if (time_after(jiffies, timeout))
+ break;
+
+ spin_lock_bh(&ar->data_lock);
+ if (ar->debug.fw_stats_done) {
+ spin_unlock_bh(&ar->data_lock);
+ break;
+ }
+ spin_unlock_bh(&ar->data_lock);
+ }
+ return 0;
+}
+
+static int ath11k_open_pdev_stats(struct inode *inode, struct file *file)
+{
+ struct ath11k *ar = inode->i_private;
+ struct ath11k_base *ab = ar->ab;
+ struct stats_request_params req_param;
+ void *buf = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ req_param.pdev_id = ar->pdev->pdev_id;
+ req_param.vdev_id = 0;
+ req_param.stats_id = WMI_REQUEST_PDEV_STAT;
+
+ ret = ath11k_debugfs_fw_stats_request(ar, &req_param);
+ if (ret) {
+ ath11k_warn(ab, "failed to request fw pdev stats: %d\n", ret);
+ goto err_free;
+ }
+
+ ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id,
+ buf);
+
+ file->private_data = buf;
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_free:
+ vfree(buf);
+
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath11k_release_pdev_stats(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath11k_read_pdev_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_pdev_stats = {
+ .open = ath11k_open_pdev_stats,
+ .release = ath11k_release_pdev_stats,
+ .read = ath11k_read_pdev_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath11k_open_vdev_stats(struct inode *inode, struct file *file)
+{
+ struct ath11k *ar = inode->i_private;
+ struct stats_request_params req_param;
+ void *buf = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ req_param.pdev_id = ar->pdev->pdev_id;
+ /* VDEV stats is always sent for all active VDEVs from FW */
+ req_param.vdev_id = 0;
+ req_param.stats_id = WMI_REQUEST_VDEV_STAT;
+
+ ret = ath11k_debugfs_fw_stats_request(ar, &req_param);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request fw vdev stats: %d\n", ret);
+ goto err_free;
+ }
+
+ ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id,
+ buf);
+
+ file->private_data = buf;
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_free:
+ vfree(buf);
+
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath11k_release_vdev_stats(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath11k_read_vdev_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_vdev_stats = {
+ .open = ath11k_open_vdev_stats,
+ .release = ath11k_release_vdev_stats,
+ .read = ath11k_read_vdev_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath11k_open_bcn_stats(struct inode *inode, struct file *file)
+{
+ struct ath11k *ar = inode->i_private;
+ struct ath11k_vif *arvif;
+ struct stats_request_params req_param;
+ void *buf = NULL;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ buf = vmalloc(ATH11K_FW_STATS_BUF_SIZE);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ req_param.stats_id = WMI_REQUEST_BCN_STAT;
+ req_param.pdev_id = ar->pdev->pdev_id;
+
+ /* loop all active VDEVs for bcn stats */
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (!arvif->is_up)
+ continue;
+
+ req_param.vdev_id = arvif->vdev_id;
+ ret = ath11k_debugfs_fw_stats_request(ar, &req_param);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request fw bcn stats: %d\n", ret);
+ goto err_free;
+ }
+ }
+
+ ath11k_wmi_fw_stats_fill(ar, &ar->debug.fw_stats, req_param.stats_id,
+ buf);
+
+ /* since beacon stats request is looped for all active VDEVs, saved fw
+ * stats is not freed for each request until done for all active VDEVs
+ */
+ spin_lock_bh(&ar->data_lock);
+ ath11k_fw_stats_bcn_free(&ar->debug.fw_stats.bcn);
+ spin_unlock_bh(&ar->data_lock);
+
+ file->private_data = buf;
+
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+
+err_free:
+ vfree(buf);
+
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static int ath11k_release_bcn_stats(struct inode *inode, struct file *file)
+{
+ vfree(file->private_data);
+
+ return 0;
+}
+
+static ssize_t ath11k_read_bcn_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char *buf = file->private_data;
+ size_t len = strlen(buf);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_bcn_stats = {
+ .open = ath11k_open_bcn_stats,
+ .release = ath11k_release_bcn_stats,
+ .read = ath11k_read_bcn_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_read_simulate_fw_crash(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char buf[] =
+ "To simulate firmware crash write one of the keywords to this file:\n"
+ "`assert` - this will send WMI_FORCE_FW_HANG_CMDID to firmware to cause assert.\n"
+ "`hw-restart` - this will simply queue hw restart without fw/hw actually crashing.\n";
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
+}
+
+/* Simulate firmware crash:
+ * 'soft': Call wmi command causing firmware hang. This firmware hang is
+ * recoverable by warm firmware reset.
+ * 'hard': Force firmware crash by setting any vdev parameter for not allowed
+ * vdev id. This is hard firmware crash because it is recoverable only by cold
+ * firmware reset.
+ */
+static ssize_t ath11k_write_simulate_fw_crash(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_base *ab = file->private_data;
+ struct ath11k_pdev *pdev;
+ struct ath11k *ar = ab->pdevs[0].ar;
+ char buf[32] = {0};
+ ssize_t rc;
+ int i, ret, radioup = 0;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = &ab->pdevs[i];
+ ar = pdev->ar;
+ if (ar && ar->state == ATH11K_STATE_ON) {
+ radioup = 1;
+ break;
+ }
+ }
+ /* filter partial writes and invalid commands */
+ if (*ppos != 0 || count >= sizeof(buf) || count == 0)
+ return -EINVAL;
+
+ rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+ if (rc < 0)
+ return rc;
+
+ /* drop the possible '\n' from the end */
+ if (buf[*ppos - 1] == '\n')
+ buf[*ppos - 1] = '\0';
+
+ if (radioup == 0) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (!strcmp(buf, "assert")) {
+ ath11k_info(ab, "simulating firmware assert crash\n");
+ ret = ath11k_wmi_force_fw_hang_cmd(ar,
+ ATH11K_WMI_FW_HANG_ASSERT_TYPE,
+ ATH11K_WMI_FW_HANG_DELAY);
+ } else {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (ret) {
+ ath11k_warn(ab, "failed to simulate firmware crash: %d\n", ret);
+ goto exit;
+ }
+
+ ret = count;
+
+exit:
+ return ret;
+}
+
+static const struct file_operations fops_simulate_fw_crash = {
+ .read = ath11k_read_simulate_fw_crash,
+ .write = ath11k_write_simulate_fw_crash,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath11k_write_enable_extd_tx_stats(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ u32 filter;
+ int ret;
+
+ if (kstrtouint_from_user(ubuf, count, 0, &filter))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ if (filter == ar->debug.extd_tx_stats) {
+ ret = count;
+ goto out;
+ }
+
+ ar->debug.extd_tx_stats = filter;
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath11k_read_enable_extd_tx_stats(struct file *file,
+ char __user *ubuf,
+ size_t count, loff_t *ppos)
+
+{
+ char buf[32] = {0};
+ struct ath11k *ar = file->private_data;
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%08x\n",
+ ar->debug.extd_tx_stats);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_extd_tx_stats = {
+ .read = ath11k_read_enable_extd_tx_stats,
+ .write = ath11k_write_enable_extd_tx_stats,
+ .open = simple_open
+};
+
+static ssize_t ath11k_write_extd_rx_stats(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ struct ath11k_base *ab = ar->ab;
+ struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ u32 enable, rx_filter = 0, ring_id;
+ int i;
+ int ret;
+
+ if (kstrtouint_from_user(ubuf, count, 0, &enable))
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (enable > 1) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (enable == ar->debug.extd_rx_stats) {
+ ret = count;
+ goto exit;
+ }
+
+ if (enable) {
+ rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_START;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT;
+ rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE;
+
+ tlv_filter.rx_filter = rx_filter;
+ tlv_filter.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0;
+ tlv_filter.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1;
+ tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2;
+ tlv_filter.pkt_filter_flags3 = HTT_RX_FP_CTRL_FILTER_FLASG3 |
+ HTT_RX_FP_DATA_FILTER_FLASG3;
+ } else {
+ tlv_filter = ath11k_mac_mon_status_filter_default;
+ }
+
+ ar->debug.rx_filter = tlv_filter.rx_filter;
+
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id,
+ HAL_RXDMA_MONITOR_STATUS,
+ DP_RX_BUFFER_SIZE, &tlv_filter);
+
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set rx filter for monitor status ring\n");
+ goto exit;
+ }
+ }
+
+ ar->debug.extd_rx_stats = enable;
+ ret = count;
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath11k_read_extd_rx_stats(struct file *file,
+ char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ char buf[32];
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+ ar->debug.extd_rx_stats);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_extd_rx_stats = {
+ .read = ath11k_read_extd_rx_stats,
+ .write = ath11k_write_extd_rx_stats,
+ .open = simple_open,
+};
+
+static int ath11k_fill_bp_stats(struct ath11k_base *ab,
+ struct ath11k_bp_stats *bp_stats,
+ char *buf, int len, int size)
+{
+ lockdep_assert_held(&ab->base_lock);
+
+ len += scnprintf(buf + len, size - len, "count: %u\n",
+ bp_stats->count);
+ len += scnprintf(buf + len, size - len, "hp: %u\n",
+ bp_stats->hp);
+ len += scnprintf(buf + len, size - len, "tp: %u\n",
+ bp_stats->tp);
+ len += scnprintf(buf + len, size - len, "seen before: %ums\n\n",
+ jiffies_to_msecs(jiffies - bp_stats->jiffies));
+ return len;
+}
+
+static ssize_t ath11k_debugfs_dump_soc_ring_bp_stats(struct ath11k_base *ab,
+ char *buf, int size)
+{
+ struct ath11k_bp_stats *bp_stats;
+ bool stats_rxd = false;
+ u8 i, pdev_idx;
+ int len = 0;
+
+ len += scnprintf(buf + len, size - len, "\nBackpressure Stats\n");
+ len += scnprintf(buf + len, size - len, "==================\n");
+
+ spin_lock_bh(&ab->base_lock);
+ for (i = 0; i < HTT_SW_UMAC_RING_IDX_MAX; i++) {
+ bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[i];
+
+ if (!bp_stats->count)
+ continue;
+
+ len += scnprintf(buf + len, size - len, "Ring: %s\n",
+ htt_bp_umac_ring[i]);
+ len = ath11k_fill_bp_stats(ab, bp_stats, buf, len, size);
+ stats_rxd = true;
+ }
+
+ for (i = 0; i < HTT_SW_LMAC_RING_IDX_MAX; i++) {
+ for (pdev_idx = 0; pdev_idx < MAX_RADIOS; pdev_idx++) {
+ bp_stats =
+ &ab->soc_stats.bp_stats.lmac_ring_bp_stats[i][pdev_idx];
+
+ if (!bp_stats->count)
+ continue;
+
+ len += scnprintf(buf + len, size - len, "Ring: %s\n",
+ htt_bp_lmac_ring[i]);
+ len += scnprintf(buf + len, size - len, "pdev: %d\n",
+ pdev_idx);
+ len = ath11k_fill_bp_stats(ab, bp_stats, buf, len, size);
+ stats_rxd = true;
+ }
+ }
+ spin_unlock_bh(&ab->base_lock);
+
+ if (!stats_rxd)
+ len += scnprintf(buf + len, size - len,
+ "No Ring Backpressure stats received\n\n");
+
+ return len;
+}
+
+static ssize_t ath11k_debugfs_dump_soc_dp_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k_base *ab = file->private_data;
+ struct ath11k_soc_dp_stats *soc_stats = &ab->soc_stats;
+ int len = 0, i, retval;
+ const int size = 4096;
+ static const char *rxdma_err[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX] = {
+ "Overflow", "MPDU len", "FCS", "Decrypt", "TKIP MIC",
+ "Unencrypt", "MSDU len", "MSDU limit", "WiFi parse",
+ "AMSDU parse", "SA timeout", "DA timeout",
+ "Flow timeout", "Flush req"};
+ static const char *reo_err[HAL_REO_DEST_RING_ERROR_CODE_MAX] = {
+ "Desc addr zero", "Desc inval", "AMPDU in non BA",
+ "Non BA dup", "BA dup", "Frame 2k jump", "BAR 2k jump",
+ "Frame OOR", "BAR OOR", "No BA session",
+ "Frame SN equal SSN", "PN check fail", "2k err",
+ "PN err", "Desc blocked"};
+
+ char *buf;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += scnprintf(buf + len, size - len, "SOC RX STATS:\n\n");
+ len += scnprintf(buf + len, size - len, "err ring pkts: %u\n",
+ soc_stats->err_ring_pkts);
+ len += scnprintf(buf + len, size - len, "Invalid RBM: %u\n\n",
+ soc_stats->invalid_rbm);
+ len += scnprintf(buf + len, size - len, "RXDMA errors:\n");
+ for (i = 0; i < HAL_REO_ENTR_RING_RXDMA_ECODE_MAX; i++)
+ len += scnprintf(buf + len, size - len, "%s: %u\n",
+ rxdma_err[i], soc_stats->rxdma_error[i]);
+
+ len += scnprintf(buf + len, size - len, "\nREO errors:\n");
+ for (i = 0; i < HAL_REO_DEST_RING_ERROR_CODE_MAX; i++)
+ len += scnprintf(buf + len, size - len, "%s: %u\n",
+ reo_err[i], soc_stats->reo_error[i]);
+
+ len += scnprintf(buf + len, size - len, "\nHAL REO errors:\n");
+ len += scnprintf(buf + len, size - len,
+ "ring0: %u\nring1: %u\nring2: %u\nring3: %u\n",
+ soc_stats->hal_reo_error[0],
+ soc_stats->hal_reo_error[1],
+ soc_stats->hal_reo_error[2],
+ soc_stats->hal_reo_error[3]);
+
+ len += scnprintf(buf + len, size - len, "\nSOC TX STATS:\n");
+ len += scnprintf(buf + len, size - len, "\nTCL Ring Full Failures:\n");
+
+ for (i = 0; i < DP_TCL_NUM_RING_MAX; i++)
+ len += scnprintf(buf + len, size - len, "ring%d: %u\n",
+ i, soc_stats->tx_err.desc_na[i]);
+
+ len += scnprintf(buf + len, size - len,
+ "\nMisc Transmit Failures: %d\n",
+ atomic_read(&soc_stats->tx_err.misc_fail));
+
+ len += ath11k_debugfs_dump_soc_ring_bp_stats(ab, buf + len, size - len);
+
+ if (len > size)
+ len = size;
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static const struct file_operations fops_soc_dp_stats = {
+ .read = ath11k_debugfs_dump_soc_dp_stats,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+int ath11k_debugfs_pdev_create(struct ath11k_base *ab)
+{
+ if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
+ return 0;
+
+ ab->debugfs_soc = debugfs_create_dir(ab->hw_params.name, ab->debugfs_ath11k);
+ if (IS_ERR(ab->debugfs_soc))
+ return PTR_ERR(ab->debugfs_soc);
+
+ debugfs_create_file("simulate_fw_crash", 0600, ab->debugfs_soc, ab,
+ &fops_simulate_fw_crash);
+
+ debugfs_create_file("soc_dp_stats", 0600, ab->debugfs_soc, ab,
+ &fops_soc_dp_stats);
+
+ return 0;
+}
+
+void ath11k_debugfs_pdev_destroy(struct ath11k_base *ab)
+{
+ debugfs_remove_recursive(ab->debugfs_soc);
+ ab->debugfs_soc = NULL;
+}
+
+int ath11k_debugfs_soc_create(struct ath11k_base *ab)
+{
+ ab->debugfs_ath11k = debugfs_create_dir("ath11k", NULL);
+
+ return PTR_ERR_OR_ZERO(ab->debugfs_ath11k);
+}
+
+void ath11k_debugfs_soc_destroy(struct ath11k_base *ab)
+{
+ debugfs_remove_recursive(ab->debugfs_ath11k);
+ ab->debugfs_ath11k = NULL;
+}
+
+void ath11k_debugfs_fw_stats_init(struct ath11k *ar)
+{
+ struct dentry *fwstats_dir = debugfs_create_dir("fw_stats",
+ ar->debug.debugfs_pdev);
+
+ ar->debug.fw_stats.debugfs_fwstats = fwstats_dir;
+
+ /* all stats debugfs files created are under "fw_stats" directory
+ * created per PDEV
+ */
+ debugfs_create_file("pdev_stats", 0600, fwstats_dir, ar,
+ &fops_pdev_stats);
+ debugfs_create_file("vdev_stats", 0600, fwstats_dir, ar,
+ &fops_vdev_stats);
+ debugfs_create_file("beacon_stats", 0600, fwstats_dir, ar,
+ &fops_bcn_stats);
+
+ INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs);
+ INIT_LIST_HEAD(&ar->debug.fw_stats.vdevs);
+ INIT_LIST_HEAD(&ar->debug.fw_stats.bcn);
+
+ init_completion(&ar->debug.fw_stats_complete);
+}
+
+static ssize_t ath11k_write_pktlog_filter(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ struct ath11k_base *ab = ar->ab;
+ struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ u32 rx_filter = 0, ring_id, filter, mode;
+ u8 buf[128] = {0};
+ int i, ret;
+ ssize_t rc;
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state != ATH11K_STATE_ON) {
+ ret = -ENETDOWN;
+ goto out;
+ }
+
+ rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, ubuf, count);
+ if (rc < 0) {
+ ret = rc;
+ goto out;
+ }
+ buf[rc] = '\0';
+
+ ret = sscanf(buf, "0x%x %u", &filter, &mode);
+ if (ret != 2) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (filter) {
+ ret = ath11k_wmi_pdev_pktlog_enable(ar, filter);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to enable pktlog filter %x: %d\n",
+ ar->debug.pktlog_filter, ret);
+ goto out;
+ }
+ } else {
+ ret = ath11k_wmi_pdev_pktlog_disable(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to disable pktlog: %d\n", ret);
+ goto out;
+ }
+ }
+
+#define HTT_RX_FILTER_TLV_LITE_MODE \
+ (HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
+ HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE | \
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_START)
+
+ if (mode == ATH11K_PKTLOG_MODE_FULL) {
+ rx_filter = HTT_RX_FILTER_TLV_LITE_MODE |
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_START |
+ HTT_RX_FILTER_TLV_FLAGS_MSDU_END |
+ HTT_RX_FILTER_TLV_FLAGS_MPDU_END |
+ HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER |
+ HTT_RX_FILTER_TLV_FLAGS_ATTENTION;
+ } else if (mode == ATH11K_PKTLOG_MODE_LITE) {
+ ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar,
+ HTT_PPDU_STATS_TAG_PKTLOG);
+ if (ret) {
+ ath11k_err(ar->ab, "failed to enable pktlog lite: %d\n", ret);
+ goto out;
+ }
+
+ rx_filter = HTT_RX_FILTER_TLV_LITE_MODE;
+ } else {
+ ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar,
+ HTT_PPDU_STATS_TAG_DEFAULT);
+ if (ret) {
+ ath11k_err(ar->ab, "failed to send htt ppdu stats req: %d\n",
+ ret);
+ goto out;
+ }
+ }
+
+ tlv_filter.rx_filter = rx_filter;
+ if (rx_filter) {
+ tlv_filter.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0;
+ tlv_filter.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1;
+ tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2;
+ tlv_filter.pkt_filter_flags3 = HTT_RX_FP_CTRL_FILTER_FLASG3 |
+ HTT_RX_FP_DATA_FILTER_FLASG3;
+ }
+
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ab, ring_id,
+ ar->dp.mac_id + i,
+ HAL_RXDMA_MONITOR_STATUS,
+ DP_RX_BUFFER_SIZE, &tlv_filter);
+
+ if (ret) {
+ ath11k_warn(ab, "failed to set rx filter for monitor status ring\n");
+ goto out;
+ }
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "pktlog filter %d mode %s\n",
+ filter, ((mode == ATH11K_PKTLOG_MODE_FULL) ? "full" : "lite"));
+
+ ar->debug.pktlog_filter = filter;
+ ar->debug.pktlog_mode = mode;
+ ret = count;
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath11k_read_pktlog_filter(struct file *file,
+ char __user *ubuf,
+ size_t count, loff_t *ppos)
+
+{
+ char buf[32] = {0};
+ struct ath11k *ar = file->private_data;
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%08x %08x\n",
+ ar->debug.pktlog_filter,
+ ar->debug.pktlog_mode);
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_pktlog_filter = {
+ .read = ath11k_read_pktlog_filter,
+ .write = ath11k_write_pktlog_filter,
+ .open = simple_open
+};
+
+static ssize_t ath11k_write_simulate_radar(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath11k *ar = file->private_data;
+ int ret;
+
+ ret = ath11k_wmi_simulate_radar(ar);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations fops_simulate_radar = {
+ .write = ath11k_write_simulate_radar,
+ .open = simple_open
+};
+
+int ath11k_debugfs_register(struct ath11k *ar)
+{
+ struct ath11k_base *ab = ar->ab;
+ char pdev_name[5];
+ char buf[100] = {0};
+
+ snprintf(pdev_name, sizeof(pdev_name), "%s%d", "mac", ar->pdev_idx);
+
+ ar->debug.debugfs_pdev = debugfs_create_dir(pdev_name, ab->debugfs_soc);
+ if (IS_ERR(ar->debug.debugfs_pdev))
+ return PTR_ERR(ar->debug.debugfs_pdev);
+
+ /* Create a symlink under ieee80211/phy* */
+ snprintf(buf, 100, "../../ath11k/%pd2", ar->debug.debugfs_pdev);
+ debugfs_create_symlink("ath11k", ar->hw->wiphy->debugfsdir, buf);
+
+ ath11k_debugfs_htt_stats_init(ar);
+
+ ath11k_debugfs_fw_stats_init(ar);
+
+ debugfs_create_file("ext_tx_stats", 0644,
+ ar->debug.debugfs_pdev, ar,
+ &fops_extd_tx_stats);
+ debugfs_create_file("ext_rx_stats", 0644,
+ ar->debug.debugfs_pdev, ar,
+ &fops_extd_rx_stats);
+ debugfs_create_file("pktlog_filter", 0644,
+ ar->debug.debugfs_pdev, ar,
+ &fops_pktlog_filter);
+
+ if (ar->hw->wiphy->bands[NL80211_BAND_5GHZ]) {
+ debugfs_create_file("dfs_simulate_radar", 0200,
+ ar->debug.debugfs_pdev, ar,
+ &fops_simulate_radar);
+ debugfs_create_bool("dfs_block_radar_events", 0200,
+ ar->debug.debugfs_pdev,
+ &ar->dfs_block_radar_events);
+ }
+
+ return 0;
+}
+
+void ath11k_debugfs_unregister(struct ath11k *ar)
+{
+}
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.h b/drivers/net/wireless/ath/ath11k/debugfs.h
new file mode 100644
index 000000000000..e5346af71f24
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/debugfs.h
@@ -0,0 +1,217 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _ATH11K_DEBUGFS_H_
+#define _ATH11K_DEBUGFS_H_
+
+#include "hal_tx.h"
+
+#define ATH11K_TX_POWER_MAX_VAL 70
+#define ATH11K_TX_POWER_MIN_VAL 0
+
+/* htt_dbg_ext_stats_type */
+enum ath11k_dbg_htt_ext_stats_type {
+ ATH11K_DBG_HTT_EXT_STATS_RESET = 0,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX = 1,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_RX = 2,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_HWQ = 3,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_ERROR = 5,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TQM = 6,
+ ATH11K_DBG_HTT_EXT_STATS_TQM_CMDQ = 7,
+ ATH11K_DBG_HTT_EXT_STATS_TX_DE_INFO = 8,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_RATE = 9,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_RX_RATE = 10,
+ ATH11K_DBG_HTT_EXT_STATS_PEER_INFO = 11,
+ ATH11K_DBG_HTT_EXT_STATS_TX_SELFGEN_INFO = 12,
+ ATH11K_DBG_HTT_EXT_STATS_TX_MU_HWQ = 13,
+ ATH11K_DBG_HTT_EXT_STATS_RING_IF_INFO = 14,
+ ATH11K_DBG_HTT_EXT_STATS_SRNG_INFO = 15,
+ ATH11K_DBG_HTT_EXT_STATS_SFM_INFO = 16,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_MU = 17,
+ ATH11K_DBG_HTT_EXT_STATS_ACTIVE_PEERS_LIST = 18,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS = 19,
+ ATH11K_DBG_HTT_EXT_STATS_TWT_SESSIONS = 20,
+ ATH11K_DBG_HTT_EXT_STATS_REO_RESOURCE_STATS = 21,
+ ATH11K_DBG_HTT_EXT_STATS_TX_SOUNDING_INFO = 22,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_OBSS_PD_STATS = 23,
+ ATH11K_DBG_HTT_EXT_STATS_RING_BACKPRESSURE_STATS = 24,
+
+ /* keep this last */
+ ATH11K_DBG_HTT_NUM_EXT_STATS,
+};
+
+struct debug_htt_stats_req {
+ bool done;
+ u8 pdev_id;
+ u8 type;
+ u8 peer_addr[ETH_ALEN];
+ struct completion cmpln;
+ u32 buf_len;
+ u8 buf[];
+};
+
+struct ath_pktlog_hdr {
+ u16 flags;
+ u16 missed_cnt;
+ u16 log_type;
+ u16 size;
+ u32 timestamp;
+ u32 type_specific_data;
+ u8 payload[];
+};
+
+#define ATH11K_HTT_PEER_STATS_RESET BIT(16)
+
+#define ATH11K_HTT_STATS_BUF_SIZE (1024 * 512)
+#define ATH11K_FW_STATS_BUF_SIZE (1024 * 1024)
+
+enum ath11k_pktlog_filter {
+ ATH11K_PKTLOG_RX = 0x000000001,
+ ATH11K_PKTLOG_TX = 0x000000002,
+ ATH11K_PKTLOG_RCFIND = 0x000000004,
+ ATH11K_PKTLOG_RCUPDATE = 0x000000008,
+ ATH11K_PKTLOG_EVENT_SMART_ANT = 0x000000020,
+ ATH11K_PKTLOG_EVENT_SW = 0x000000040,
+ ATH11K_PKTLOG_ANY = 0x00000006f,
+};
+
+enum ath11k_pktlog_mode {
+ ATH11K_PKTLOG_MODE_LITE = 1,
+ ATH11K_PKTLOG_MODE_FULL = 2,
+};
+
+enum ath11k_pktlog_enum {
+ ATH11K_PKTLOG_TYPE_TX_CTRL = 1,
+ ATH11K_PKTLOG_TYPE_TX_STAT = 2,
+ ATH11K_PKTLOG_TYPE_TX_MSDU_ID = 3,
+ ATH11K_PKTLOG_TYPE_RX_STAT = 5,
+ ATH11K_PKTLOG_TYPE_RC_FIND = 6,
+ ATH11K_PKTLOG_TYPE_RC_UPDATE = 7,
+ ATH11K_PKTLOG_TYPE_TX_VIRT_ADDR = 8,
+ ATH11K_PKTLOG_TYPE_RX_CBF = 10,
+ ATH11K_PKTLOG_TYPE_RX_STATBUF = 22,
+ ATH11K_PKTLOG_TYPE_PPDU_STATS = 23,
+ ATH11K_PKTLOG_TYPE_LITE_RX = 24,
+};
+
+enum ath11k_dbg_aggr_mode {
+ ATH11K_DBG_AGGR_MODE_AUTO,
+ ATH11K_DBG_AGGR_MODE_MANUAL,
+ ATH11K_DBG_AGGR_MODE_MAX,
+};
+
+#ifdef CONFIG_ATH11K_DEBUGFS
+int ath11k_debugfs_soc_create(struct ath11k_base *ab);
+void ath11k_debugfs_soc_destroy(struct ath11k_base *ab);
+int ath11k_debugfs_pdev_create(struct ath11k_base *ab);
+void ath11k_debugfs_pdev_destroy(struct ath11k_base *ab);
+int ath11k_debugfs_register(struct ath11k *ar);
+void ath11k_debugfs_unregister(struct ath11k *ar);
+void ath11k_debugfs_fw_stats_process(struct ath11k_base *ab, struct sk_buff *skb);
+
+void ath11k_debugfs_fw_stats_init(struct ath11k *ar);
+
+static inline bool ath11k_debugfs_is_pktlog_lite_mode_enabled(struct ath11k *ar)
+{
+ return (ar->debug.pktlog_mode == ATH11K_PKTLOG_MODE_LITE);
+}
+
+static inline bool ath11k_debugfs_is_pktlog_rx_stats_enabled(struct ath11k *ar)
+{
+ return (!ar->debug.pktlog_peer_valid && ar->debug.pktlog_mode);
+}
+
+static inline bool ath11k_debugfs_is_pktlog_peer_valid(struct ath11k *ar, u8 *addr)
+{
+ return (ar->debug.pktlog_peer_valid && ar->debug.pktlog_mode &&
+ ether_addr_equal(addr, ar->debug.pktlog_peer_addr));
+}
+
+static inline int ath11k_debugfs_is_extd_tx_stats_enabled(struct ath11k *ar)
+{
+ return ar->debug.extd_tx_stats;
+}
+
+static inline int ath11k_debugfs_is_extd_rx_stats_enabled(struct ath11k *ar)
+{
+ return ar->debug.extd_rx_stats;
+}
+
+static inline int ath11k_debugfs_rx_filter(struct ath11k *ar)
+{
+ return ar->debug.rx_filter;
+}
+
+#else
+static inline int ath11k_debugfs_soc_create(struct ath11k_base *ab)
+{
+ return 0;
+}
+
+static inline void ath11k_debugfs_soc_destroy(struct ath11k_base *ab)
+{
+}
+
+static inline int ath11k_debugfs_pdev_create(struct ath11k_base *ab)
+{
+ return 0;
+}
+
+static inline void ath11k_debugfs_pdev_destroy(struct ath11k_base *ab)
+{
+}
+
+static inline int ath11k_debugfs_register(struct ath11k *ar)
+{
+ return 0;
+}
+
+static inline void ath11k_debugfs_unregister(struct ath11k *ar)
+{
+}
+
+static inline void ath11k_debugfs_fw_stats_process(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+}
+
+static inline void ath11k_debugfs_fw_stats_init(struct ath11k *ar)
+{
+}
+
+static inline int ath11k_debugfs_is_extd_tx_stats_enabled(struct ath11k *ar)
+{
+ return 0;
+}
+
+static inline int ath11k_debugfs_is_extd_rx_stats_enabled(struct ath11k *ar)
+{
+ return 0;
+}
+
+static inline bool ath11k_debugfs_is_pktlog_lite_mode_enabled(struct ath11k *ar)
+{
+ return false;
+}
+
+static inline bool ath11k_debugfs_is_pktlog_rx_stats_enabled(struct ath11k *ar)
+{
+ return false;
+}
+
+static inline bool ath11k_debugfs_is_pktlog_peer_valid(struct ath11k *ar, u8 *addr)
+{
+ return false;
+}
+
+static inline int ath11k_debugfs_rx_filter(struct ath11k *ar)
+{
+ return 0;
+}
+
+#endif /* CONFIG_MAC80211_DEBUGFS*/
+
+#endif /* _ATH11K_DEBUGFS_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/debug_htt_stats.c b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
index 6b532dc99c98..9191ffa081c2 100644
--- a/drivers/net/wireless/ath/ath11k/debug_htt_stats.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
@@ -8,7 +8,7 @@
#include "dp_tx.h"
#include "dp_rx.h"
#include "debug.h"
-#include "debug_htt_stats.h"
+#include "debugfs_htt_stats.h"
#define HTT_DBG_OUT(buf, len, fmt, ...) \
scnprintf(buf, len, fmt "\n", ##__VA_ARGS__)
@@ -3895,50 +3895,6 @@ static inline void htt_print_backpressure_stats_tlv_v(const u32 *tag_buf,
}
}
-static inline void htt_htt_stats_debug_dump(const u32 *tag_buf,
- struct debug_htt_stats_req *stats_req)
-{
- u8 *buf = stats_req->buf;
- u32 len = stats_req->buf_len;
- u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- u32 tlv_len = 0, i = 0, word_len = 0;
-
- tlv_len = FIELD_GET(HTT_TLV_LEN, *tag_buf) + HTT_TLV_HDR_LEN;
- word_len = (tlv_len % 4) == 0 ? (tlv_len / 4) : ((tlv_len / 4) + 1);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "============================================");
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HKDBG TLV DUMP: (tag_len=%u bytes, words=%u)",
- tlv_len, word_len);
-
- for (i = 0; i + 3 < word_len; i += 4) {
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "0x%08x 0x%08x 0x%08x 0x%08x",
- tag_buf[i], tag_buf[i + 1],
- tag_buf[i + 2], tag_buf[i + 3]);
- }
-
- if (i + 3 == word_len) {
- len += HTT_DBG_OUT(buf + len, buf_len - len, "0x%08x 0x%08x 0x%08x ",
- tag_buf[i], tag_buf[i + 1], tag_buf[i + 2]);
- } else if (i + 2 == word_len) {
- len += HTT_DBG_OUT(buf + len, buf_len - len, "0x%08x 0x%08x ",
- tag_buf[i], tag_buf[i + 1]);
- } else if (i + 1 == word_len) {
- len += HTT_DBG_OUT(buf + len, buf_len - len, "0x%08x ",
- tag_buf[i]);
- }
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "============================================");
-
- if (len >= buf_len)
- buf[buf_len - 1] = 0;
- else
- buf[len] = 0;
-
- stats_req->buf_len = len;
-}
-
static int ath11k_dbg_htt_ext_stats_parse(struct ath11k_base *ab,
u16 tag, u16 len, const void *tag_buf,
void *user_data)
@@ -4297,8 +4253,8 @@ static int ath11k_dbg_htt_ext_stats_parse(struct ath11k_base *ab,
return 0;
}
-void ath11k_dbg_htt_ext_stats_handler(struct ath11k_base *ab,
- struct sk_buff *skb)
+void ath11k_debugfs_htt_ext_stats_handler(struct ath11k_base *ab,
+ struct sk_buff *skb)
{
struct ath11k_htt_extd_stats_msg *msg;
struct debug_htt_stats_req *stats_req;
@@ -4446,7 +4402,7 @@ static int ath11k_prep_htt_stats_cfg_params(struct ath11k *ar, u8 type,
return 0;
}
-int ath11k_dbg_htt_stats_req(struct ath11k *ar)
+int ath11k_debugfs_htt_stats_req(struct ath11k *ar)
{
struct debug_htt_stats_req *stats_req = ar->debug.htt_stats.stats_req;
u8 type = stats_req->type;
@@ -4520,7 +4476,7 @@ static int ath11k_open_htt_stats(struct inode *inode, struct file *file)
ar->debug.htt_stats.stats_req = stats_req;
stats_req->type = type;
- ret = ath11k_dbg_htt_stats_req(ar);
+ ret = ath11k_debugfs_htt_stats_req(ar);
if (ret < 0)
goto out;
@@ -4630,7 +4586,7 @@ static const struct file_operations fops_htt_stats_reset = {
.llseek = default_llseek,
};
-void ath11k_debug_htt_stats_init(struct ath11k *ar)
+void ath11k_debugfs_htt_stats_init(struct ath11k *ar)
{
spin_lock_init(&ar->debug.htt_stats.lock);
debugfs_create_file("htt_stats_type", 0600, ar->debug.debugfs_pdev,
diff --git a/drivers/net/wireless/ath/ath11k/debug_htt_stats.h b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
index 682a6ff222bd..74b2086eed9d 100644
--- a/drivers/net/wireless/ath/ath11k/debug_htt_stats.h
+++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
@@ -1660,8 +1660,6 @@ struct htt_pdev_obss_pd_stats_tlv {
u32 num_obss_tx_ppdu_failure;
};
-void ath11k_debug_htt_stats_init(struct ath11k *ar);
-
struct htt_ring_backpressure_stats_tlv {
u32 pdev_id;
u32 current_head_idx;
@@ -1687,4 +1685,29 @@ struct htt_ring_backpressure_stats_tlv {
u32 backpressure_hist[5];
};
+#ifdef CONFIG_ATH11K_DEBUGFS
+
+void ath11k_debugfs_htt_stats_init(struct ath11k *ar);
+void ath11k_debugfs_htt_ext_stats_handler(struct ath11k_base *ab,
+ struct sk_buff *skb);
+int ath11k_debugfs_htt_stats_req(struct ath11k *ar);
+
+#else /* CONFIG_ATH11K_DEBUGFS */
+
+static inline void ath11k_debugfs_htt_stats_init(struct ath11k *ar)
+{
+}
+
+static inline void ath11k_debugfs_htt_ext_stats_handler(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+}
+
+static inline int ath11k_debugfs_htt_stats_req(struct ath11k *ar)
+{
+ return 0;
+}
+
+#endif /* CONFIG_ATH11K_DEBUGFS */
+
#endif
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_sta.c b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
index 7308ed254232..270c0edbb10f 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
@@ -5,16 +5,16 @@
#include <linux/vmalloc.h>
+#include "debugfs_sta.h"
#include "core.h"
#include "peer.h"
#include "debug.h"
#include "dp_tx.h"
-#include "debug_htt_stats.h"
+#include "debugfs_htt_stats.h"
-void
-ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta,
- struct ath11k_per_peer_tx_stats *peer_stats,
- u8 legacy_rate_idx)
+void ath11k_debugfs_sta_add_tx_stats(struct ath11k_sta *arsta,
+ struct ath11k_per_peer_tx_stats *peer_stats,
+ u8 legacy_rate_idx)
{
struct rate_info *txrate = &arsta->txrate;
struct ath11k_htt_tx_stats *tx_stats;
@@ -125,9 +125,9 @@ ath11k_accumulate_per_peer_tx_stats(struct ath11k_sta *arsta,
tx_stats->tx_duration += peer_stats->duration;
}
-void ath11k_update_per_peer_stats_from_txcompl(struct ath11k *ar,
- struct sk_buff *msdu,
- struct hal_tx_status *ts)
+void ath11k_debugfs_sta_update_txcompl(struct ath11k *ar,
+ struct sk_buff *msdu,
+ struct hal_tx_status *ts)
{
struct ath11k_base *ab = ar->ab;
struct ath11k_per_peer_tx_stats *peer_stats = &ar->cached_stats;
@@ -200,7 +200,8 @@ void ath11k_update_per_peer_stats_from_txcompl(struct ath11k *ar,
arsta->txrate.nss = arsta->last_txrate.nss;
arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
- ath11k_accumulate_per_peer_tx_stats(arsta, peer_stats, rate_idx);
+ ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx);
+
err_out:
spin_unlock_bh(&ab->base_lock);
rcu_read_unlock();
@@ -428,7 +429,7 @@ ath11k_dbg_sta_open_htt_peer_stats(struct inode *inode, struct file *file)
ar->debug.htt_stats.stats_req = stats_req;
stats_req->type = ATH11K_DBG_HTT_EXT_STATS_PEER_INFO;
memcpy(stats_req->peer_addr, sta->addr, ETH_ALEN);
- ret = ath11k_dbg_htt_stats_req(ar);
+ ret = ath11k_debugfs_htt_stats_req(ar);
mutex_unlock(&ar->conf_mutex);
if (ret < 0)
goto out;
@@ -820,15 +821,15 @@ static const struct file_operations fops_htt_peer_stats_reset = {
.llseek = default_llseek,
};
-void ath11k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, struct dentry *dir)
+void ath11k_debugfs_sta_op_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct dentry *dir)
{
struct ath11k *ar = hw->priv;
- if (ath11k_debug_is_extd_tx_stats_enabled(ar))
+ if (ath11k_debugfs_is_extd_tx_stats_enabled(ar))
debugfs_create_file("tx_stats", 0400, dir, sta,
&fops_tx_stats);
- if (ath11k_debug_is_extd_rx_stats_enabled(ar))
+ if (ath11k_debugfs_is_extd_rx_stats_enabled(ar))
debugfs_create_file("rx_stats", 0400, dir, sta,
&fops_rx_stats);
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_sta.h b/drivers/net/wireless/ath/ath11k/debugfs_sta.h
new file mode 100644
index 000000000000..18dc65d9edcf
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/debugfs_sta.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _ATH11K_DEBUGFS_STA_H_
+#define _ATH11K_DEBUGFS_STA_H_
+
+#include <net/mac80211.h>
+
+#include "core.h"
+#include "hal_tx.h"
+
+#ifdef CONFIG_ATH11K_DEBUGFS
+
+void ath11k_debugfs_sta_op_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct dentry *dir);
+void ath11k_debugfs_sta_add_tx_stats(struct ath11k_sta *arsta,
+ struct ath11k_per_peer_tx_stats *peer_stats,
+ u8 legacy_rate_idx);
+void ath11k_debugfs_sta_update_txcompl(struct ath11k *ar,
+ struct sk_buff *msdu,
+ struct hal_tx_status *ts);
+
+#else /* CONFIG_ATH11K_DEBUGFS */
+
+#define ath11k_debugfs_sta_op_add NULL
+
+static inline void
+ath11k_debugfs_sta_add_tx_stats(struct ath11k_sta *arsta,
+ struct ath11k_per_peer_tx_stats *peer_stats,
+ u8 legacy_rate_idx)
+{
+}
+
+static inline void ath11k_debugfs_sta_update_txcompl(struct ath11k *ar,
+ struct sk_buff *msdu,
+ struct hal_tx_status *ts)
+{
+}
+
+#endif /* CONFIG_ATH11K_DEBUGFS */
+
+#endif /* _ATH11K_DEBUGFS_STA_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c
index 1d64c3c51ac9..59dd185a0cfc 100644
--- a/drivers/net/wireless/ath/ath11k/dp.c
+++ b/drivers/net/wireless/ath/ath11k/dp.c
@@ -7,6 +7,7 @@
#include "core.h"
#include "dp_tx.h"
#include "hal_tx.h"
+#include "hif.h"
#include "debug.h"
#include "dp_rx.h"
#include "peer.h"
@@ -106,13 +107,120 @@ void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
ring->vaddr_unaligned = NULL;
}
+static int ath11k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask)
+{
+ int ext_group_num;
+ u8 mask = 1 << ring_num;
+
+ for (ext_group_num = 0; ext_group_num < ATH11K_EXT_IRQ_GRP_NUM_MAX;
+ ext_group_num++) {
+ if (mask & grp_mask[ext_group_num])
+ return ext_group_num;
+ }
+
+ return -ENOENT;
+}
+
+static int ath11k_dp_srng_calculate_msi_group(struct ath11k_base *ab,
+ enum hal_ring_type type, int ring_num)
+{
+ const u8 *grp_mask;
+
+ switch (type) {
+ case HAL_WBM2SW_RELEASE:
+ if (ring_num < 3) {
+ grp_mask = &ab->hw_params.ring_mask->tx[0];
+ } else if (ring_num == 3) {
+ grp_mask = &ab->hw_params.ring_mask->rx_wbm_rel[0];
+ ring_num = 0;
+ } else {
+ return -ENOENT;
+ }
+ break;
+ case HAL_REO_EXCEPTION:
+ grp_mask = &ab->hw_params.ring_mask->rx_err[0];
+ break;
+ case HAL_REO_DST:
+ grp_mask = &ab->hw_params.ring_mask->rx[0];
+ break;
+ case HAL_REO_STATUS:
+ grp_mask = &ab->hw_params.ring_mask->reo_status[0];
+ break;
+ case HAL_RXDMA_MONITOR_STATUS:
+ case HAL_RXDMA_MONITOR_DST:
+ grp_mask = &ab->hw_params.ring_mask->rx_mon_status[0];
+ break;
+ case HAL_RXDMA_DST:
+ grp_mask = &ab->hw_params.ring_mask->rxdma2host[0];
+ break;
+ case HAL_RXDMA_BUF:
+ grp_mask = &ab->hw_params.ring_mask->host2rxdma[0];
+ break;
+ case HAL_RXDMA_MONITOR_BUF:
+ case HAL_TCL_DATA:
+ case HAL_TCL_CMD:
+ case HAL_REO_CMD:
+ case HAL_SW2WBM_RELEASE:
+ case HAL_WBM_IDLE_LINK:
+ case HAL_TCL_STATUS:
+ case HAL_REO_REINJECT:
+ case HAL_CE_SRC:
+ case HAL_CE_DST:
+ case HAL_CE_DST_STATUS:
+ default:
+ return -ENOENT;
+ }
+
+ return ath11k_dp_srng_find_ring_in_mask(ring_num, grp_mask);
+}
+
+static void ath11k_dp_srng_msi_setup(struct ath11k_base *ab,
+ struct hal_srng_params *ring_params,
+ enum hal_ring_type type, int ring_num)
+{
+ int msi_group_number, msi_data_count;
+ u32 msi_data_start, msi_irq_start, addr_lo, addr_hi;
+ int ret;
+
+ ret = ath11k_get_user_msi_vector(ab, "DP",
+ &msi_data_count, &msi_data_start,
+ &msi_irq_start);
+ if (ret)
+ return;
+
+ msi_group_number = ath11k_dp_srng_calculate_msi_group(ab, type,
+ ring_num);
+ if (msi_group_number < 0) {
+ ath11k_dbg(ab, ATH11K_DBG_PCI,
+ "ring not part of an ext_group; ring_type: %d,ring_num %d",
+ type, ring_num);
+ ring_params->msi_addr = 0;
+ ring_params->msi_data = 0;
+ return;
+ }
+
+ if (msi_group_number > msi_data_count) {
+ ath11k_dbg(ab, ATH11K_DBG_PCI,
+ "multiple msi_groups share one msi, msi_group_num %d",
+ msi_group_number);
+ }
+
+ ath11k_get_msi_address(ab, &addr_lo, &addr_hi);
+
+ ring_params->msi_addr = addr_lo;
+ ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
+ ring_params->msi_data = (msi_group_number % msi_data_count)
+ + msi_data_start;
+ ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
+}
+
int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
enum hal_ring_type type, int ring_num,
int mac_id, int num_entries)
{
struct hal_srng_params params = { 0 };
- int entry_sz = ath11k_hal_srng_get_entrysize(type);
- int max_entries = ath11k_hal_srng_get_max_entries(type);
+ int entry_sz = ath11k_hal_srng_get_entrysize(ab, type);
+ int max_entries = ath11k_hal_srng_get_max_entries(ab, type);
int ret;
if (max_entries < 0 || entry_sz < 0)
@@ -135,6 +243,7 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
params.ring_base_vaddr = ring->vaddr;
params.ring_base_paddr = ring->paddr;
params.num_entries = num_entries;
+ ath11k_dp_srng_msi_setup(ab, &params, type, ring_num + mac_id);
switch (type) {
case HAL_REO_DST:
@@ -159,7 +268,7 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
break;
}
/* follow through when ring_num >= 3 */
- /* fall through */
+ fallthrough;
case HAL_REO_EXCEPTION:
case HAL_REO_REINJECT:
case HAL_REO_CMD:
@@ -195,11 +304,25 @@ int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
return 0;
}
+static void ath11k_dp_stop_shadow_timers(struct ath11k_base *ab)
+{
+ int i;
+
+ if (!ab->hw_params.supports_shadow_regs)
+ return;
+
+ for (i = 0; i < DP_TCL_NUM_RING_MAX; i++)
+ ath11k_dp_shadow_stop_timer(ab, &ab->dp.tx_ring_timer[i]);
+
+ ath11k_dp_shadow_stop_timer(ab, &ab->dp.reo_cmd_timer);
+}
+
static void ath11k_dp_srng_common_cleanup(struct ath11k_base *ab)
{
struct ath11k_dp *dp = &ab->dp;
int i;
+ ath11k_dp_stop_shadow_timers(ab);
ath11k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
ath11k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
ath11k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
@@ -265,6 +388,10 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
srng = &ab->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id];
ath11k_hal_tx_init_data_ring(ab, srng);
+
+ ath11k_dp_shadow_init_timer(ab, &dp->tx_ring_timer[i],
+ ATH11K_SHADOW_DP_TIMER_INTERVAL,
+ dp->tx_ring[i].tcl_data_ring.ring_id);
}
ret = ath11k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT,
@@ -300,6 +427,10 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
ath11k_hal_reo_init_cmd_ring(ab, srng);
+ ath11k_dp_shadow_init_timer(ab, &dp->reo_cmd_timer,
+ ATH11K_SHADOW_CTRL_TIMER_INTERVAL,
+ dp->reo_cmd_ring.ring_id);
+
ret = ath11k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS,
0, 0, DP_REO_STATUS_RING_SIZE);
if (ret) {
@@ -367,7 +498,7 @@ static int ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base *ab,
u32 end_offset;
n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
- ath11k_hal_srng_get_entrysize(HAL_WBM_IDLE_LINK);
+ ath11k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK);
num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
@@ -565,7 +696,7 @@ int ath11k_dp_link_desc_setup(struct ath11k_base *ab,
return ret;
/* Setup link desc idle list for HW internal usage */
- entry_sz = ath11k_hal_srng_get_entrysize(ring_type);
+ entry_sz = ath11k_hal_srng_get_entrysize(ab, ring_type);
tot_mem_sz = entry_sz * n_link_desc;
/* Setup scatter desc list when the total memory requirement is more */
@@ -622,16 +753,16 @@ int ath11k_dp_service_srng(struct ath11k_base *ab,
struct napi_struct *napi = &irq_grp->napi;
int grp_id = irq_grp->grp_id;
int work_done = 0;
- int i = 0;
+ int i = 0, j;
int tot_work_done = 0;
- while (ath11k_tx_ring_mask[grp_id] >> i) {
- if (ath11k_tx_ring_mask[grp_id] & BIT(i))
+ while (ab->hw_params.ring_mask->tx[grp_id] >> i) {
+ if (ab->hw_params.ring_mask->tx[grp_id] & BIT(i))
ath11k_dp_tx_completion_handler(ab, i);
i++;
}
- if (ath11k_rx_err_ring_mask[grp_id]) {
+ if (ab->hw_params.ring_mask->rx_err[grp_id]) {
work_done = ath11k_dp_process_rx_err(ab, napi, budget);
budget -= work_done;
tot_work_done += work_done;
@@ -639,7 +770,7 @@ int ath11k_dp_service_srng(struct ath11k_base *ab,
goto done;
}
- if (ath11k_rx_wbm_rel_ring_mask[grp_id]) {
+ if (ab->hw_params.ring_mask->rx_wbm_rel[grp_id]) {
work_done = ath11k_dp_rx_process_wbm_err(ab,
napi,
budget);
@@ -650,8 +781,8 @@ int ath11k_dp_service_srng(struct ath11k_base *ab,
goto done;
}
- if (ath11k_rx_ring_mask[grp_id]) {
- i = fls(ath11k_rx_ring_mask[grp_id]) - 1;
+ if (ab->hw_params.ring_mask->rx[grp_id]) {
+ i = fls(ab->hw_params.ring_mask->rx[grp_id]) - 1;
work_done = ath11k_dp_process_rx(ab, i, napi,
budget);
budget -= work_done;
@@ -660,41 +791,51 @@ int ath11k_dp_service_srng(struct ath11k_base *ab,
goto done;
}
- if (rx_mon_status_ring_mask[grp_id]) {
- for (i = 0; i < ab->num_radios; i++) {
- if (rx_mon_status_ring_mask[grp_id] & BIT(i)) {
- work_done =
- ath11k_dp_rx_process_mon_rings(ab,
- i, napi,
- budget);
- budget -= work_done;
- tot_work_done += work_done;
+ if (ab->hw_params.ring_mask->rx_mon_status[grp_id]) {
+ for (i = 0; i < ab->num_radios; i++) {
+ for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
+ int id = i * ab->hw_params.num_rxmda_per_pdev + j;
+
+ if (ab->hw_params.ring_mask->rx_mon_status[grp_id] &
+ BIT(id)) {
+ work_done =
+ ath11k_dp_rx_process_mon_rings(ab,
+ id,
+ napi, budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+
+ if (budget <= 0)
+ goto done;
+ }
}
- if (budget <= 0)
- goto done;
}
}
- if (ath11k_reo_status_ring_mask[grp_id])
+ if (ab->hw_params.ring_mask->reo_status[grp_id])
ath11k_dp_process_reo_status(ab);
for (i = 0; i < ab->num_radios; i++) {
- if (ath11k_rxdma2host_ring_mask[grp_id] & BIT(i)) {
- work_done = ath11k_dp_process_rxdma_err(ab, i, budget);
- budget -= work_done;
- tot_work_done += work_done;
- }
+ for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
+ int id = i * ab->hw_params.num_rxmda_per_pdev + j;
- if (budget <= 0)
- goto done;
+ if (ab->hw_params.ring_mask->rxdma2host[grp_id] & BIT(id)) {
+ work_done = ath11k_dp_process_rxdma_err(ab, id, budget);
+ budget -= work_done;
+ tot_work_done += work_done;
+ }
- if (ath11k_host2rxdma_ring_mask[grp_id] & BIT(i)) {
- struct ath11k_pdev_dp *dp = &ab->pdevs[i].ar->dp;
- struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+ if (budget <= 0)
+ goto done;
- ath11k_dp_rxbufs_replenish(ab, i, rx_ring, 0,
- HAL_RX_BUF_RBM_SW3_BM,
- GFP_ATOMIC);
+ if (ab->hw_params.ring_mask->host2rxdma[grp_id] & BIT(id)) {
+ struct ath11k *ar = ath11k_ab_to_ar(ab, id);
+ struct ath11k_pdev_dp *dp = &ar->dp;
+ struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+
+ ath11k_dp_rxbufs_replenish(ab, id, rx_ring, 0,
+ HAL_RX_BUF_RBM_SW3_BM);
+ }
}
}
/* TODO: Implement handler for other interrupts */
@@ -709,10 +850,12 @@ void ath11k_dp_pdev_free(struct ath11k_base *ab)
struct ath11k *ar;
int i;
+ del_timer_sync(&ab->mon_reap_timer);
+
for (i = 0; i < ab->num_radios; i++) {
ar = ab->pdevs[i].ar;
ath11k_dp_rx_pdev_free(ab, i);
- ath11k_debug_unregister(ar);
+ ath11k_debugfs_unregister(ar);
ath11k_dp_rx_pdev_mon_detach(ar);
}
}
@@ -722,6 +865,7 @@ void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab)
struct ath11k *ar;
struct ath11k_pdev_dp *dp;
int i;
+ int j;
for (i = 0; i < ab->num_radios; i++) {
ar = ab->pdevs[i].ar;
@@ -731,8 +875,10 @@ void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab)
spin_lock_init(&dp->rx_refill_buf_ring.idr_lock);
atomic_set(&dp->num_tx_pending, 0);
init_waitqueue_head(&dp->tx_empty_waitq);
- idr_init(&dp->rx_mon_status_refill_ring.bufs_idr);
- spin_lock_init(&dp->rx_mon_status_refill_ring.idr_lock);
+ for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
+ idr_init(&dp->rx_mon_status_refill_ring[j].bufs_idr);
+ spin_lock_init(&dp->rx_mon_status_refill_ring[j].idr_lock);
+ }
idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
}
@@ -797,13 +943,20 @@ int ath11k_dp_htt_connect(struct ath11k_dp *dp)
static void ath11k_dp_update_vdev_search(struct ath11k_vif *arvif)
{
- /* For STA mode, enable address search index,
- * tcl uses ast_hash value in the descriptor.
+ /* When v2_map_support is true:for STA mode, enable address
+ * search index, tcl uses ast_hash value in the descriptor.
+ * When v2_map_support is false: for STA mode, dont' enable
+ * address search index.
*/
switch (arvif->vdev_type) {
case WMI_VDEV_TYPE_STA:
- arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
- arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
+ if (arvif->ar->ab->hw_params.htt_peer_map_v2) {
+ arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
+ arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
+ } else {
+ arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
+ arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
+ }
break;
case WMI_VDEV_TYPE_AP:
case WMI_VDEV_TYPE_IBSS:
@@ -935,3 +1088,78 @@ fail_link_desc_cleanup:
return ret;
}
+
+static void ath11k_dp_shadow_timer_handler(struct timer_list *t)
+{
+ struct ath11k_hp_update_timer *update_timer = from_timer(update_timer,
+ t, timer);
+ struct ath11k_base *ab = update_timer->ab;
+ struct hal_srng *srng = &ab->hal.srng_list[update_timer->ring_id];
+
+ spin_lock_bh(&srng->lock);
+
+ /* when the timer is fired, the handler checks whether there
+ * are new TX happened. The handler updates HP only when there
+ * are no TX operations during the timeout interval, and stop
+ * the timer. Timer will be started again when TX happens again.
+ */
+ if (update_timer->timer_tx_num != update_timer->tx_num) {
+ update_timer->timer_tx_num = update_timer->tx_num;
+ mod_timer(&update_timer->timer, jiffies +
+ msecs_to_jiffies(update_timer->interval));
+ } else {
+ update_timer->started = false;
+ ath11k_hal_srng_shadow_update_hp_tp(ab, srng);
+ }
+
+ spin_unlock_bh(&srng->lock);
+}
+
+void ath11k_dp_shadow_start_timer(struct ath11k_base *ab,
+ struct hal_srng *srng,
+ struct ath11k_hp_update_timer *update_timer)
+{
+ lockdep_assert_held(&srng->lock);
+
+ if (!ab->hw_params.supports_shadow_regs)
+ return;
+
+ update_timer->tx_num++;
+
+ if (update_timer->started)
+ return;
+
+ update_timer->started = true;
+ update_timer->timer_tx_num = update_timer->tx_num;
+ mod_timer(&update_timer->timer, jiffies +
+ msecs_to_jiffies(update_timer->interval));
+}
+
+void ath11k_dp_shadow_stop_timer(struct ath11k_base *ab,
+ struct ath11k_hp_update_timer *update_timer)
+{
+ if (!ab->hw_params.supports_shadow_regs)
+ return;
+
+ if (!update_timer->init)
+ return;
+
+ del_timer_sync(&update_timer->timer);
+}
+
+void ath11k_dp_shadow_init_timer(struct ath11k_base *ab,
+ struct ath11k_hp_update_timer *update_timer,
+ u32 interval, u32 ring_id)
+{
+ if (!ab->hw_params.supports_shadow_regs)
+ return;
+
+ update_timer->tx_num = 0;
+ update_timer->timer_tx_num = 0;
+ update_timer->ab = ab;
+ update_timer->ring_id = ring_id;
+ update_timer->interval = interval;
+ update_timer->init = true;
+ timer_setup(&update_timer->timer,
+ ath11k_dp_shadow_timer_handler, 0);
+}
diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h
index 7587862d2e32..ee8db812589b 100644
--- a/drivers/net/wireless/ath/ath11k/dp.h
+++ b/drivers/net/wireless/ath/ath11k/dp.h
@@ -8,6 +8,8 @@
#include "hal_rx.h"
+#define MAX_RXDMA_PER_PDEV 2
+
struct ath11k_base;
struct ath11k_peer;
struct ath11k_dp;
@@ -38,6 +40,7 @@ struct dp_rx_tid {
#define DP_REO_DESC_FREE_THRESHOLD 64
#define DP_REO_DESC_FREE_TIMEOUT_MS 1000
+#define DP_MON_SERVICE_BUDGET 128
struct dp_reo_cache_flush_elem {
struct list_head list;
@@ -142,12 +145,13 @@ struct ath11k_pdev_dp {
atomic_t num_tx_pending;
wait_queue_head_t tx_empty_waitq;
struct dp_rxdma_ring rx_refill_buf_ring;
- struct dp_srng rxdma_err_dst_ring;
+ struct dp_srng rx_mac_buf_ring[MAX_RXDMA_PER_PDEV];
+ struct dp_srng rxdma_err_dst_ring[MAX_RXDMA_PER_PDEV];
struct dp_srng rxdma_mon_dst_ring;
struct dp_srng rxdma_mon_desc_ring;
struct dp_rxdma_ring rxdma_mon_buf_ring;
- struct dp_rxdma_ring rx_mon_status_refill_ring;
+ struct dp_rxdma_ring rx_mon_status_refill_ring[MAX_RXDMA_PER_PDEV];
struct ieee80211_rx_status rx_status;
struct ath11k_mon_data mon_data;
};
@@ -202,6 +206,20 @@ struct ath11k_pdev_dp {
#define DP_TX_DESC_ID_MSDU_ID GENMASK(18, 2)
#define DP_TX_DESC_ID_POOL_ID GENMASK(20, 19)
+#define ATH11K_SHADOW_DP_TIMER_INTERVAL 20
+#define ATH11K_SHADOW_CTRL_TIMER_INTERVAL 10
+
+struct ath11k_hp_update_timer {
+ struct timer_list timer;
+ bool started;
+ bool init;
+ u32 tx_num;
+ u32 timer_tx_num;
+ u32 ring_id;
+ u32 interval;
+ struct ath11k_base *ab;
+};
+
struct ath11k_dp {
struct ath11k_base *ab;
enum ath11k_htc_ep_id eid;
@@ -231,6 +249,8 @@ struct ath11k_dp {
* - reo_cmd_cache_flush_count
*/
spinlock_t reo_cmd_lock;
+ struct ath11k_hp_update_timer reo_cmd_timer;
+ struct ath11k_hp_update_timer tx_ring_timer[DP_TCL_NUM_RING_MAX];
};
/* HTT definitions */
@@ -494,7 +514,7 @@ struct htt_ppdu_stats_cfg_cmd {
} __packed;
#define HTT_PPDU_STATS_CFG_MSG_TYPE GENMASK(7, 0)
-#define HTT_PPDU_STATS_CFG_PDEV_ID GENMASK(16, 9)
+#define HTT_PPDU_STATS_CFG_PDEV_ID GENMASK(15, 8)
#define HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK GENMASK(31, 16)
enum htt_ppdu_stats_tag_type {
@@ -936,11 +956,13 @@ struct htt_rx_ring_tlv_filter {
enum htt_t2h_msg_type {
HTT_T2H_MSG_TYPE_VERSION_CONF,
+ HTT_T2H_MSG_TYPE_PEER_MAP = 0x3,
+ HTT_T2H_MSG_TYPE_PEER_UNMAP = 0x4,
HTT_T2H_MSG_TYPE_RX_ADDBA = 0x5,
HTT_T2H_MSG_TYPE_PKTLOG = 0x8,
HTT_T2H_MSG_TYPE_SEC_IND = 0xb,
- HTT_T2H_MSG_TYPE_PEER_MAP = 0x1e,
- HTT_T2H_MSG_TYPE_PEER_UNMAP = 0x1f,
+ HTT_T2H_MSG_TYPE_PEER_MAP2 = 0x1e,
+ HTT_T2H_MSG_TYPE_PEER_UNMAP2 = 0x1f,
HTT_T2H_MSG_TYPE_PPDU_STATS_IND = 0x1d,
HTT_T2H_MSG_TYPE_EXT_STATS_CONF = 0x1c,
HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND = 0x24,
@@ -1610,5 +1632,13 @@ int ath11k_dp_link_desc_setup(struct ath11k_base *ab,
struct dp_link_desc_bank *link_desc_banks,
u32 ring_type, struct hal_srng *srng,
u32 n_link_desc);
+void ath11k_dp_shadow_start_timer(struct ath11k_base *ab,
+ struct hal_srng *srng,
+ struct ath11k_hp_update_timer *update_timer);
+void ath11k_dp_shadow_stop_timer(struct ath11k_base *ab,
+ struct ath11k_hp_update_timer *update_timer);
+void ath11k_dp_shadow_init_timer(struct ath11k_base *ab,
+ struct ath11k_hp_update_timer *update_timer,
+ u32 interval, u32 ring_id);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index 791d971784ce..01625327eef7 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -9,6 +9,8 @@
#include <crypto/hash.h>
#include "core.h"
#include "debug.h"
+#include "debugfs_htt_stats.h"
+#include "debugfs_sta.h"
#include "hal_desc.h"
#include "hw.h"
#include "dp_rx.h"
@@ -260,12 +262,23 @@ static u32 ath11k_dp_rxdesc_get_ppduid(struct hal_rx_desc *rx_desc)
return __le16_to_cpu(rx_desc->mpdu_start.phy_ppdu_id);
}
+static void ath11k_dp_service_mon_ring(struct timer_list *t)
+{
+ struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer);
+ int i;
+
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++)
+ ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET);
+
+ mod_timer(&ab->mon_reap_timer, jiffies +
+ msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
+}
+
/* Returns number of Rx buffers replenished */
int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
struct dp_rxdma_ring *rx_ring,
int req_entries,
- enum hal_rx_buf_return_buf_manager mgr,
- gfp_t gfp)
+ enum hal_rx_buf_return_buf_manager mgr)
{
struct hal_srng *srng;
u32 *desc;
@@ -312,7 +325,7 @@ int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
spin_lock_bh(&rx_ring->idr_lock);
buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
- rx_ring->bufs_max * 3, gfp);
+ rx_ring->bufs_max * 3, GFP_ATOMIC);
spin_unlock_bh(&rx_ring->idr_lock);
if (buf_id < 0)
goto fail_dma_unmap;
@@ -375,7 +388,13 @@ static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar,
idr_destroy(&rx_ring->bufs_idr);
spin_unlock_bh(&rx_ring->idr_lock);
- rx_ring = &dp->rx_mon_status_refill_ring;
+ /* if rxdma1_enable is false, mon_status_refill_ring
+ * isn't setup, so don't clean.
+ */
+ if (!ar->ab->hw_params.rxdma1_enable)
+ return 0;
+
+ rx_ring = &dp->rx_mon_status_refill_ring[0];
spin_lock_bh(&rx_ring->idr_lock);
idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
@@ -390,21 +409,27 @@ static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar,
idr_destroy(&rx_ring->bufs_idr);
spin_unlock_bh(&rx_ring->idr_lock);
+
return 0;
}
static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar)
{
struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_base *ab = ar->ab;
struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+ int i;
ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
rx_ring = &dp->rxdma_mon_buf_ring;
ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
- rx_ring = &dp->rx_mon_status_refill_ring;
- ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ rx_ring = &dp->rx_mon_status_refill_ring[i];
+ ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
+ }
+
return 0;
}
@@ -416,26 +441,32 @@ static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar,
int num_entries;
num_entries = rx_ring->refill_buf_ring.size /
- ath11k_hal_srng_get_entrysize(ringtype);
+ ath11k_hal_srng_get_entrysize(ar->ab, ringtype);
rx_ring->bufs_max = num_entries;
ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries,
- HAL_RX_BUF_RBM_SW3_BM, GFP_KERNEL);
+ HAL_RX_BUF_RBM_SW3_BM);
return 0;
}
static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar)
{
struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_base *ab = ar->ab;
struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+ int i;
ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF);
- rx_ring = &dp->rxdma_mon_buf_ring;
- ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF);
+ if (ar->ab->hw_params.rxdma1_enable) {
+ rx_ring = &dp->rxdma_mon_buf_ring;
+ ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF);
+ }
- rx_ring = &dp->rx_mon_status_refill_ring;
- ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS);
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ rx_ring = &dp->rx_mon_status_refill_ring[i];
+ ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS);
+ }
return 0;
}
@@ -443,11 +474,21 @@ static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar)
static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar)
{
struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_base *ab = ar->ab;
+ int i;
- ath11k_dp_srng_cleanup(ar->ab, &dp->rx_refill_buf_ring.refill_buf_ring);
- ath11k_dp_srng_cleanup(ar->ab, &dp->rxdma_err_dst_ring);
- ath11k_dp_srng_cleanup(ar->ab, &dp->rx_mon_status_refill_ring.refill_buf_ring);
- ath11k_dp_srng_cleanup(ar->ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
+ ath11k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
+
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ if (ab->hw_params.rx_mac_buf_ring)
+ ath11k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
+
+ ath11k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]);
+ ath11k_dp_srng_cleanup(ab,
+ &dp->rx_mon_status_refill_ring[i].refill_buf_ring);
+ }
+
+ ath11k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
}
void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab)
@@ -486,7 +527,9 @@ err_reo_cleanup:
static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar)
{
struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_base *ab = ar->ab;
struct dp_srng *srng = NULL;
+ int i;
int ret;
ret = ath11k_dp_srng_setup(ar->ab,
@@ -498,24 +541,55 @@ static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar)
return ret;
}
- ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring,
- HAL_RXDMA_DST, 0, dp->mac_id,
- DP_RXDMA_ERR_DST_RING_SIZE);
- if (ret) {
- ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring\n");
- return ret;
+ if (ar->ab->hw_params.rx_mac_buf_ring) {
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ ret = ath11k_dp_srng_setup(ar->ab,
+ &dp->rx_mac_buf_ring[i],
+ HAL_RXDMA_BUF, 1,
+ dp->mac_id + i, 1024);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup rx_mac_buf_ring %d\n",
+ i);
+ return ret;
+ }
+ }
}
- srng = &dp->rx_mon_status_refill_ring.refill_buf_ring;
- ret = ath11k_dp_srng_setup(ar->ab,
- srng,
- HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id,
- DP_RXDMA_MON_STATUS_RING_SIZE);
- if (ret) {
- ath11k_warn(ar->ab,
- "failed to setup rx_mon_status_refill_ring\n");
- return ret;
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring[i],
+ HAL_RXDMA_DST, 0, dp->mac_id + i,
+ DP_RXDMA_ERR_DST_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring %d\n", i);
+ return ret;
+ }
}
+
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
+ ret = ath11k_dp_srng_setup(ar->ab,
+ srng,
+ HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id + i,
+ DP_RXDMA_MON_STATUS_RING_SIZE);
+ if (ret) {
+ ath11k_warn(ar->ab,
+ "failed to setup rx_mon_status_refill_ring %d\n", i);
+ return ret;
+ }
+ }
+
+ /* if rxdma1_enable is false, then it doesn't need
+ * to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring
+ * and rxdma_mon_desc_ring.
+ * init reap timer for QCA6390.
+ */
+ if (!ar->ab->hw_params.rxdma1_enable) {
+ //init mon status buffer reap timer
+ timer_setup(&ar->ab->mon_reap_timer,
+ ath11k_dp_service_mon_ring, 0);
+ return 0;
+ }
+
ret = ath11k_dp_srng_setup(ar->ab,
&dp->rxdma_mon_buf_ring.refill_buf_ring,
HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id,
@@ -1377,9 +1451,8 @@ ath11k_update_per_peer_tx_stats(struct ath11k *ar,
HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
- if (ath11k_debug_is_extd_tx_stats_enabled(ar))
- ath11k_accumulate_per_peer_tx_stats(arsta,
- peer_stats, rate_idx);
+ if (ath11k_debugfs_is_extd_tx_stats_enabled(ar))
+ ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx);
}
spin_unlock_bh(&ab->base_lock);
@@ -1421,7 +1494,7 @@ struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar,
}
spin_unlock_bh(&ar->data_lock);
- ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_KERNEL);
+ ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC);
if (!ppdu_info)
return NULL;
@@ -1455,7 +1528,7 @@ static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab,
goto exit;
}
- if (ath11k_debug_is_pktlog_lite_mode_enabled(ar))
+ if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar))
trace_ath11k_htt_ppdu_stats(ar, skb->data, len);
ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id);
@@ -1577,11 +1650,23 @@ void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
resp->peer_map_ev.info1);
ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
peer_mac_h16, mac_addr);
+ ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0);
+ break;
+ case HTT_T2H_MSG_TYPE_PEER_MAP2:
+ vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
+ resp->peer_map_ev.info);
+ peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
+ resp->peer_map_ev.info);
+ peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
+ resp->peer_map_ev.info1);
+ ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
+ peer_mac_h16, mac_addr);
ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL,
resp->peer_map_ev.info2);
ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash);
break;
case HTT_T2H_MSG_TYPE_PEER_UNMAP:
+ case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID,
resp->peer_unmap_ev.info);
ath11k_peer_unmap_event(ab, peer_id);
@@ -1590,7 +1675,7 @@ void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
ath11k_htt_pull_ppdu_stats(ab, skb);
break;
case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
- ath11k_dbg_htt_ext_stats_handler(ab, skb);
+ ath11k_debugfs_htt_ext_stats_handler(ab, skb);
break;
case HTT_T2H_MSG_TYPE_PKTLOG:
ath11k_htt_pktlog(ab, skb);
@@ -2065,8 +2150,6 @@ static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
mcast = is_multicast_ether_addr(hdr->addr1);
fill_crypto_hdr = mcast;
- is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc);
-
spin_lock_bh(&ar->ab->base_lock);
peer = ath11k_peer_find_by_addr(ar->ab, hdr->addr2);
if (peer) {
@@ -2080,6 +2163,8 @@ static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
spin_unlock_bh(&ar->ab->base_lock);
err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_desc);
+ if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap)
+ is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_desc);
/* Clear per-MPDU flags while leaving per-PPDU flags intact */
rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
@@ -2282,6 +2367,9 @@ static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *nap
!!(status->flag & RX_FLAG_MMIC_ERROR),
!!(status->flag & RX_FLAG_AMSDU_MORE));
+ ath11k_dbg_dump(ar->ab, ATH11K_DBG_DP_RX, NULL, "dp rx msdu: ",
+ msdu->data, msdu->len);
+
/* TODO: trace rx packet */
ieee80211_rx_napi(ar->hw, NULL, msdu, napi);
@@ -2526,7 +2614,7 @@ try_again:
rx_ring = &ar->dp.rx_refill_buf_ring;
ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
- HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
+ HAL_RX_BUF_RBM_SW3_BM);
}
ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list,
@@ -2608,7 +2696,7 @@ static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab,
struct dp_rxdma_ring *rx_ring,
- int *buf_id, gfp_t gfp)
+ int *buf_id)
{
struct sk_buff *skb;
dma_addr_t paddr;
@@ -2633,7 +2721,7 @@ static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab,
spin_lock_bh(&rx_ring->idr_lock);
*buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
- rx_ring->bufs_max, gfp);
+ rx_ring->bufs_max, GFP_ATOMIC);
spin_unlock_bh(&rx_ring->idr_lock);
if (*buf_id < 0)
goto fail_dma_unmap;
@@ -2653,8 +2741,7 @@ fail_alloc_skb:
int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
struct dp_rxdma_ring *rx_ring,
int req_entries,
- enum hal_rx_buf_return_buf_manager mgr,
- gfp_t gfp)
+ enum hal_rx_buf_return_buf_manager mgr)
{
struct hal_srng *srng;
u32 *desc;
@@ -2680,7 +2767,7 @@ int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
while (num_remain > 0) {
skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
- &buf_id, gfp);
+ &buf_id);
if (!skb)
break;
paddr = ATH11K_SKB_RXCB(skb)->paddr;
@@ -2719,20 +2806,25 @@ fail_desc_get:
static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
int *budget, struct sk_buff_head *skb_list)
{
- struct ath11k *ar = ab->pdevs[mac_id].ar;
- struct ath11k_pdev_dp *dp = &ar->dp;
- struct dp_rxdma_ring *rx_ring = &dp->rx_mon_status_refill_ring;
+ struct ath11k *ar;
+ struct ath11k_pdev_dp *dp;
+ struct dp_rxdma_ring *rx_ring;
struct hal_srng *srng;
void *rx_mon_status_desc;
struct sk_buff *skb;
struct ath11k_skb_rxcb *rxcb;
struct hal_tlv_hdr *tlv;
u32 cookie;
- int buf_id;
+ int buf_id, srng_id;
dma_addr_t paddr;
u8 rbm;
int num_buffs_reaped = 0;
+ ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
+ dp = &ar->dp;
+ srng_id = ath11k_hw_mac_id_to_srng_id(&ab->hw_params, mac_id);
+ rx_ring = &dp->rx_mon_status_refill_ring[srng_id];
+
srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
spin_lock_bh(&srng->lock);
@@ -2786,7 +2878,7 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
}
move_next:
skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
- &buf_id, GFP_ATOMIC);
+ &buf_id);
if (!skb) {
ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
@@ -2813,7 +2905,7 @@ move_next:
int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
struct napi_struct *napi, int budget)
{
- struct ath11k *ar = ab->pdevs[mac_id].ar;
+ struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
enum hal_rx_mon_status hal_status;
struct sk_buff *skb;
struct sk_buff_head skb_list;
@@ -2833,7 +2925,7 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
memset(&ppdu_info, 0, sizeof(ppdu_info));
ppdu_info.peer_id = HAL_INVALID_PEERID;
- if (ath11k_debug_is_pktlog_rx_stats_enabled(ar))
+ if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar))
trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE);
hal_status = ath11k_hal_rx_parse_mon_status(ab, &ppdu_info, skb);
@@ -2861,7 +2953,7 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
arsta = (struct ath11k_sta *)peer->sta->drv_priv;
ath11k_dp_rx_update_peer_stats(arsta, &ppdu_info);
- if (ath11k_debug_is_pktlog_peer_valid(ar, peer->addr))
+ if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr))
trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE);
spin_unlock_bh(&ab->base_lock);
@@ -3599,7 +3691,7 @@ exit:
rx_ring = &ar->dp.rx_refill_buf_ring;
ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i],
- HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
+ HAL_RX_BUF_RBM_SW3_BM);
}
return tot_n_bufs_reaped;
@@ -3709,8 +3801,7 @@ static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu,
* instead, it is good to drop such packets in mac80211
* after incrementing the replay counters.
*/
-
- /* fall through */
+ fallthrough;
default:
/* TODO: Review other errors and process them to mac80211
* as appropriate.
@@ -3820,7 +3911,7 @@ int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
int total_num_buffs_reaped = 0;
int ret, i;
- for (i = 0; i < MAX_RADIOS; i++)
+ for (i = 0; i < ab->num_radios; i++)
__skb_queue_head_init(&msdu_list[i]);
srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
@@ -3896,7 +3987,7 @@ int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
rx_ring = &ar->dp.rx_refill_buf_ring;
ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
- HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
+ HAL_RX_BUF_RBM_SW3_BM);
}
rcu_read_lock();
@@ -3923,9 +4014,9 @@ done:
int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget)
{
- struct ath11k *ar = ab->pdevs[mac_id].ar;
- struct dp_srng *err_ring = &ar->dp.rxdma_err_dst_ring;
- struct dp_rxdma_ring *rx_ring = &ar->dp.rx_refill_buf_ring;
+ struct ath11k *ar;
+ struct dp_srng *err_ring;
+ struct dp_rxdma_ring *rx_ring;
struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks;
struct hal_srng *srng;
u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
@@ -3944,6 +4035,11 @@ int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget)
int i;
int buf_id;
+ ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
+ err_ring = &ar->dp.rxdma_err_dst_ring[ath11k_hw_mac_id_to_srng_id(&ab->hw_params,
+ mac_id)];
+ rx_ring = &ar->dp.rx_refill_buf_ring;
+
srng = &ab->hal.srng_list[err_ring->ring_id];
spin_lock_bh(&srng->lock);
@@ -4000,7 +4096,7 @@ int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget)
if (num_buf_freed)
ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed,
- HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
+ HAL_RX_BUF_RBM_SW3_BM);
return budget - quota;
}
@@ -4097,6 +4193,7 @@ int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)
struct ath11k *ar = ab->pdevs[mac_id].ar;
struct ath11k_pdev_dp *dp = &ar->dp;
u32 ring_id;
+ int i;
int ret;
ret = ath11k_dp_rx_pdev_srng_alloc(ar);
@@ -4119,14 +4216,33 @@ int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)
return ret;
}
- ring_id = dp->rxdma_err_dst_ring.ring_id;
- ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_DST);
- if (ret) {
- ath11k_warn(ab, "failed to configure rxdma_err_dest_ring %d\n",
- ret);
- return ret;
+ if (ab->hw_params.rx_mac_buf_ring) {
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ ring_id = dp->rx_mac_buf_ring[i].ring_id;
+ ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
+ mac_id + i, HAL_RXDMA_BUF);
+ if (ret) {
+ ath11k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n",
+ i, ret);
+ return ret;
+ }
+ }
+ }
+
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ ring_id = dp->rxdma_err_dst_ring[i].ring_id;
+ ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
+ mac_id + i, HAL_RXDMA_DST);
+ if (ret) {
+ ath11k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n",
+ i, ret);
+ return ret;
+ }
}
+ if (!ab->hw_params.rxdma1_enable)
+ goto config_refill_ring;
+
ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
mac_id, HAL_RXDMA_MONITOR_BUF);
@@ -4151,15 +4267,20 @@ int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)
ret);
return ret;
}
- ring_id = dp->rx_mon_status_refill_ring.refill_buf_ring.ring_id;
- ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id,
- HAL_RXDMA_MONITOR_STATUS);
- if (ret) {
- ath11k_warn(ab,
- "failed to configure mon_status_refill_ring %d\n",
- ret);
- return ret;
+
+config_refill_ring:
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i,
+ HAL_RXDMA_MONITOR_STATUS);
+ if (ret) {
+ ath11k_warn(ab,
+ "failed to configure mon_status_refill_ring%d %d\n",
+ i, ret);
+ return ret;
+ }
}
+
return 0;
}
@@ -4185,8 +4306,13 @@ int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar,
void *src_srng_desc;
int ret = 0;
- dp_srng = &dp->rxdma_mon_desc_ring;
- hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
+ if (ar->ab->hw_params.rxdma1_enable) {
+ dp_srng = &dp->rxdma_mon_desc_ring;
+ hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
+ } else {
+ dp_srng = &ar->ab->dp.wbm_desc_rel_ring;
+ hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
+ }
ath11k_hal_srng_access_begin(ar->ab, hal_srng);
@@ -4210,16 +4336,16 @@ int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar,
static
void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc,
dma_addr_t *paddr, u32 *sw_cookie,
+ u8 *rbm,
void **pp_buf_addr_info)
{
struct hal_rx_msdu_link *msdu_link =
(struct hal_rx_msdu_link *)rx_msdu_link_desc;
struct ath11k_buffer_addr *buf_addr_info;
- u8 rbm = 0;
buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info;
- ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, &rbm);
+ ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm);
*pp_buf_addr_info = (void *)buf_addr_info;
}
@@ -4330,7 +4456,7 @@ static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info,
}
static u32
-ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar,
+ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id,
void *ring_entry, struct sk_buff **head_msdu,
struct sk_buff **tail_msdu, u32 *npackets,
u32 *ppdu_id)
@@ -4355,9 +4481,15 @@ ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar,
struct hal_reo_entrance_ring *ent_desc =
(struct hal_reo_entrance_ring *)ring_entry;
int buf_id;
+ u32 rx_link_buf_info[2];
+ u8 rbm;
+
+ if (!ar->ab->hw_params.rxdma1_enable)
+ rx_ring = &dp->rx_refill_buf_ring;
ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr,
- &sw_cookie, &p_last_buf_addr_info,
+ &sw_cookie,
+ &p_last_buf_addr_info, &rbm,
&msdu_cnt);
if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON,
@@ -4383,9 +4515,14 @@ ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar,
return rx_bufs_used;
}
- rx_msdu_link_desc =
- (void *)pmon->link_desc_banks[sw_cookie].vaddr +
- (paddr - pmon->link_desc_banks[sw_cookie].paddr);
+ if (ar->ab->hw_params.rxdma1_enable)
+ rx_msdu_link_desc =
+ (void *)pmon->link_desc_banks[sw_cookie].vaddr +
+ (paddr - pmon->link_desc_banks[sw_cookie].paddr);
+ else
+ rx_msdu_link_desc =
+ (void *)ar->ab->dp.link_desc_banks[sw_cookie].vaddr +
+ (paddr - ar->ab->dp.link_desc_banks[sw_cookie].paddr);
ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
&num_msdus);
@@ -4481,15 +4618,22 @@ next_msdu:
spin_unlock_bh(&rx_ring->idr_lock);
}
+ ath11k_hal_rx_buf_addr_info_set(rx_link_buf_info, paddr, sw_cookie, rbm);
+
ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr,
- &sw_cookie,
+ &sw_cookie, &rbm,
&p_buf_addr_info);
- if (ath11k_dp_rx_monitor_link_desc_return(ar,
- p_last_buf_addr_info,
- dp->mac_id))
- ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
- "dp_rx_monitor_link_desc_return failed");
+ if (ar->ab->hw_params.rxdma1_enable) {
+ if (ath11k_dp_rx_monitor_link_desc_return(ar,
+ p_last_buf_addr_info,
+ dp->mac_id))
+ ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
+ "dp_rx_monitor_link_desc_return failed");
+ } else {
+ ath11k_dp_rx_link_desc_return(ar->ab, rx_link_buf_info,
+ HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
+ }
p_last_buf_addr_info = p_buf_addr_info;
@@ -4673,8 +4817,8 @@ mon_deliver_fail:
return -EINVAL;
}
-static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, u32 quota,
- struct napi_struct *napi)
+static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
+ u32 quota, struct napi_struct *napi)
{
struct ath11k_pdev_dp *dp = &ar->dp;
struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
@@ -4682,10 +4826,16 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, u32 quota,
void *mon_dst_srng;
u32 ppdu_id;
u32 rx_bufs_used;
+ u32 ring_id;
struct ath11k_pdev_mon_stats *rx_mon_stats;
u32 npackets = 0;
- mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
+ if (ar->ab->hw_params.rxdma1_enable)
+ ring_id = dp->rxdma_mon_dst_ring.ring_id;
+ else
+ ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id;
+
+ mon_dst_srng = &ar->ab->hal.srng_list[ring_id];
if (!mon_dst_srng) {
ath11k_warn(ar->ab,
@@ -4708,7 +4858,7 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, u32 quota,
head_msdu = NULL;
tail_msdu = NULL;
- rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, ring_entry,
+ rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry,
&head_msdu,
&tail_msdu,
&npackets, &ppdu_id);
@@ -4735,15 +4885,21 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, u32 quota,
if (rx_bufs_used) {
rx_mon_stats->dest_ppdu_done++;
- ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
- &dp->rxdma_mon_buf_ring,
- rx_bufs_used,
- HAL_RX_BUF_RBM_SW3_BM, GFP_ATOMIC);
+ if (ar->ab->hw_params.rxdma1_enable)
+ ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
+ &dp->rxdma_mon_buf_ring,
+ rx_bufs_used,
+ HAL_RX_BUF_RBM_SW3_BM);
+ else
+ ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
+ &dp->rx_refill_buf_ring,
+ rx_bufs_used,
+ HAL_RX_BUF_RBM_SW3_BM);
}
}
static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar,
- u32 quota,
+ int mac_id, u32 quota,
struct napi_struct *napi)
{
struct ath11k_pdev_dp *dp = &ar->dp;
@@ -4767,7 +4923,7 @@ static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar,
if (tlv_status == HAL_TLV_STATUS_PPDU_DONE) {
rx_mon_stats->status_ppdu_done++;
pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
- ath11k_dp_rx_mon_dest_process(ar, quota, napi);
+ ath11k_dp_rx_mon_dest_process(ar, mac_id, quota, napi);
pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
}
dev_kfree_skb_any(status_skb);
@@ -4777,15 +4933,15 @@ static void ath11k_dp_rx_mon_status_process_tlv(struct ath11k *ar,
static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id,
struct napi_struct *napi, int budget)
{
- struct ath11k *ar = ab->pdevs[mac_id].ar;
+ struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
struct ath11k_pdev_dp *dp = &ar->dp;
struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
int num_buffs_reaped = 0;
- num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ar->ab, dp->mac_id, &budget,
+ num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ar->ab, mac_id, &budget,
&pmon->rx_status_q);
if (num_buffs_reaped)
- ath11k_dp_rx_mon_status_process_tlv(ar, budget, napi);
+ ath11k_dp_rx_mon_status_process_tlv(ar, mac_id, budget, napi);
return num_buffs_reaped;
}
@@ -4793,7 +4949,7 @@ static int ath11k_dp_mon_process_rx(struct ath11k_base *ab, int mac_id,
int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
struct napi_struct *napi, int budget)
{
- struct ath11k *ar = ab->pdevs[mac_id].ar;
+ struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
int ret = 0;
if (test_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags))
@@ -4832,9 +4988,15 @@ int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar)
return ret;
}
+ /* if rxdma1_enable is false, no need to setup
+ * rxdma_mon_desc_ring.
+ */
+ if (!ar->ab->hw_params.rxdma1_enable)
+ return 0;
+
dp_srng = &dp->rxdma_mon_desc_ring;
n_link_desc = dp_srng->size /
- ath11k_hal_srng_get_entrysize(HAL_RXDMA_MONITOR_DESC);
+ ath11k_hal_srng_get_entrysize(ar->ab, HAL_RXDMA_MONITOR_DESC);
mon_desc_srng =
&ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id];
@@ -4848,6 +5010,7 @@ int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar)
pmon->mon_last_linkdesc_paddr = 0;
pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
spin_lock_init(&pmon->mon_lock);
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.h b/drivers/net/wireless/ath/ath11k/dp_rx.h
index 88bbcae14e34..fbea45f79c9b 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.h
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.h
@@ -74,8 +74,7 @@ int ath11k_dp_process_rx(struct ath11k_base *ab, int mac_id,
int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
struct dp_rxdma_ring *rx_ring,
int req_entries,
- enum hal_rx_buf_return_buf_manager mgr,
- gfp_t gfp);
+ enum hal_rx_buf_return_buf_manager mgr);
int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
int (*iter)(struct ath11k_base *ar, u16 tag, u16 len,
const void *ptr, void *data),
@@ -87,8 +86,7 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
struct dp_rxdma_ring *rx_ring,
int req_entries,
- enum hal_rx_buf_return_buf_manager mgr,
- gfp_t gfp);
+ enum hal_rx_buf_return_buf_manager mgr);
int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar);
int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar);
int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id);
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
index 1af76775b1a8..3d962eee4d61 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
@@ -6,6 +6,7 @@
#include "core.h"
#include "dp_tx.h"
#include "debug.h"
+#include "debugfs_sta.h"
#include "hw.h"
#include "peer.h"
@@ -13,8 +14,12 @@ static enum hal_tcl_encap_type
ath11k_dp_tx_get_encap_type(struct ath11k_vif *arvif, struct sk_buff *skb)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ath11k_base *ab = arvif->ar->ab;
- if (tx_info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP)
+ if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags))
+ return HAL_TCL_ENCAP_TYPE_RAW;
+
+ if (tx_info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
return HAL_TCL_ENCAP_TYPE_ETHERNET;
return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI;
@@ -79,6 +84,7 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
struct ath11k_dp *dp = &ab->dp;
struct hal_tx_info ti = {0};
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_key_conf *key = info->control.hw_key;
struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
struct hal_srng *tcl_ring;
struct ieee80211_hdr *hdr = (void *)skb->data;
@@ -93,7 +99,7 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
return -ESHUTDOWN;
- if (!(info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP) &&
+ if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
!ieee80211_is_data(hdr->frame_control))
return -ENOTSUPP;
@@ -110,7 +116,12 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
tcl_ring_sel:
tcl_ring_retry = false;
- ti.ring_id = ring_selector % DP_TCL_NUM_RING_MAX;
+ /* For some chip, it can only use tcl0 to tx */
+ if (ar->ab->hw_params.tcl_0_only)
+ ti.ring_id = 0;
+ else
+ ti.ring_id = ring_selector % DP_TCL_NUM_RING_MAX;
+
ring_map |= BIT(ti.ring_id);
tx_ring = &dp->tx_ring[ti.ring_id];
@@ -137,11 +148,17 @@ tcl_ring_sel:
ti.encap_type = ath11k_dp_tx_get_encap_type(arvif, skb);
ti.meta_data_flags = arvif->tcl_metadata;
- if (info->control.hw_key)
- ti.encrypt_type =
- ath11k_dp_tx_get_encrypt_type(info->control.hw_key->cipher);
- else
- ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
+ if (ti.encap_type == HAL_TCL_ENCAP_TYPE_RAW) {
+ if (key) {
+ ti.encrypt_type =
+ ath11k_dp_tx_get_encrypt_type(key->cipher);
+
+ if (ieee80211_has_protected(hdr->frame_control))
+ skb_put(skb, IEEE80211_CCMP_MIC_LEN);
+ } else {
+ ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
+ }
+ }
ti.addr_search_flags = arvif->hal_addr_search_flags;
ti.search_type = arvif->search_type;
@@ -151,7 +168,8 @@ tcl_ring_sel:
ti.bss_ast_hash = arvif->ast_hash;
ti.dscp_tid_tbl_idx = 0;
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW) {
ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_IP4_CKSUM_EN, 1) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP4_CKSUM_EN, 1) |
FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP6_CKSUM_EN, 1) |
@@ -171,10 +189,11 @@ tcl_ring_sel:
ath11k_dp_tx_encap_nwifi(skb);
break;
case HAL_TCL_ENCAP_TYPE_RAW:
- /* TODO: for CHECKSUM_PARTIAL case in raw mode, HW checksum offload
- * is not applicable, hence manual checksum calculation using
- * skb_checksum_help() is needed
- */
+ if (!test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) {
+ ret = -EINVAL;
+ goto fail_remove_idr;
+ }
+ break;
case HAL_TCL_ENCAP_TYPE_ETHERNET:
/* no need to encap */
break;
@@ -221,7 +240,8 @@ tcl_ring_sel:
* checking this ring earlier for each pkt tx.
* Restart ring selection if some rings are not checked yet.
*/
- if (ring_map != (BIT(DP_TCL_NUM_RING_MAX) - 1)) {
+ if (ring_map != (BIT(DP_TCL_NUM_RING_MAX) - 1) &&
+ !ar->ab->hw_params.tcl_0_only) {
tcl_ring_retry = true;
ring_selector++;
}
@@ -234,8 +254,13 @@ tcl_ring_sel:
ath11k_hal_srng_access_end(ab, tcl_ring);
+ ath11k_dp_shadow_start_timer(ab, tcl_ring, &dp->tx_ring_timer[ti.ring_id]);
+
spin_unlock_bh(&tcl_ring->lock);
+ ath11k_dbg_dump(ab, ATH11K_DBG_DP_TX, NULL, "dp tx msdu: ",
+ skb->data, skb->len);
+
atomic_inc(&ar->dp.num_tx_pending);
return 0;
@@ -346,7 +371,6 @@ ath11k_dp_tx_process_htt_tx_complete(struct ath11k_base *ab,
wbm_status = FIELD_GET(HTT_TX_WBM_COMP_INFO0_STATUS,
status_desc->info0);
-
switch (wbm_status) {
case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK:
case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
@@ -436,7 +460,7 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
(info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
- if (ath11k_debug_is_extd_tx_stats_enabled(ar)) {
+ if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) {
if (ts->flags & HAL_TX_STATUS_FLAGS_FIRST_MSDU) {
if (ar->last_ppdu_id == 0) {
ar->last_ppdu_id = ts->ppdu_id;
@@ -444,12 +468,12 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
ar->cached_ppdu_id == ar->last_ppdu_id) {
ar->cached_ppdu_id = ar->last_ppdu_id;
ar->cached_stats.is_ampdu = true;
- ath11k_update_per_peer_stats_from_txcompl(ar, msdu, ts);
+ ath11k_debugfs_sta_update_txcompl(ar, msdu, ts);
memset(&ar->cached_stats, 0,
sizeof(struct ath11k_per_peer_tx_stats));
} else {
ar->cached_stats.is_ampdu = false;
- ath11k_update_per_peer_stats_from_txcompl(ar, msdu, ts);
+ ath11k_debugfs_sta_update_txcompl(ar, msdu, ts);
memset(&ar->cached_stats, 0,
sizeof(struct ath11k_per_peer_tx_stats));
}
@@ -514,6 +538,8 @@ void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id)
u32 msdu_id;
u8 mac_id;
+ spin_lock_bh(&status_ring->lock);
+
ath11k_hal_srng_access_begin(ab, status_ring);
while ((ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) !=
@@ -533,6 +559,8 @@ void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id)
ath11k_hal_srng_access_end(ab, status_ring);
+ spin_unlock_bh(&status_ring->lock);
+
while (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_tail) != tx_ring->tx_status_head) {
struct hal_wbm_release_ring *tx_status;
u32 desc_id;
@@ -633,14 +661,28 @@ ath11k_dp_tx_get_ring_id_type(struct ath11k_base *ab,
switch (ring_type) {
case HAL_RXDMA_BUF:
lmac_ring_id_offset = mac_id * HAL_SRNG_RINGS_PER_LMAC;
- if (!(ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF +
- lmac_ring_id_offset) ||
- ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_BUF +
- lmac_ring_id_offset))) {
- ret = -EINVAL;
+
+ /* for QCA6390, host fills rx buffer to fw and fw fills to
+ * rxbuf ring for each rxdma
+ */
+ if (!ab->hw_params.rx_mac_buf_ring) {
+ if (!(ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF +
+ lmac_ring_id_offset) ||
+ ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_BUF +
+ lmac_ring_id_offset))) {
+ ret = -EINVAL;
+ }
+ *htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ } else {
+ if (ring_id == HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF) {
+ *htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
+ *htt_ring_type = HTT_SW_TO_SW_RING;
+ } else {
+ *htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
+ *htt_ring_type = HTT_SW_TO_HW_RING;
+ }
}
- *htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
- *htt_ring_type = HTT_SW_TO_HW_RING;
break;
case HAL_RXDMA_DST:
*htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
@@ -720,7 +762,7 @@ int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id,
cmd->ring_base_addr_hi = (u64)params.ring_base_paddr >>
HAL_ADDR_MSB_REG_SHIFT;
- ret = ath11k_hal_srng_get_entrysize(ring_type);
+ ret = ath11k_hal_srng_get_entrysize(ab, ring_type);
if (ret < 0)
goto err_free;
@@ -750,9 +792,9 @@ int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id,
cmd->ring_tail_off32_remote_addr_hi = (u64)tp_addr >>
HAL_ADDR_MSB_REG_SHIFT;
- cmd->ring_msi_addr_lo = 0;
- cmd->ring_msi_addr_hi = 0;
- cmd->msi_data = 0;
+ cmd->ring_msi_addr_lo = params.msi_addr & 0xffffffff;
+ cmd->ring_msi_addr_hi = ((uint64_t)(params.msi_addr) >> 32) & 0xffffffff;
+ cmd->msi_data = params.msi_data;
cmd->intr_info = FIELD_PREP(
HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH,
@@ -768,6 +810,15 @@ int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id,
params.low_threshold);
}
+ ath11k_dbg(ab, ATH11k_DBG_HAL,
+ "%s msi_addr_lo:0x%x, msi_addr_hi:0x%x, msi_data:0x%x\n",
+ __func__, cmd->ring_msi_addr_lo, cmd->ring_msi_addr_hi,
+ cmd->msi_data);
+
+ ath11k_dbg(ab, ATH11k_DBG_HAL,
+ "ring_id:%d, ring_type:%d, intr_info:0x%x, flags:0x%x\n",
+ ring_id, ring_type, cmd->intr_info, cmd->info2);
+
ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb);
if (ret)
goto err_free;
@@ -832,24 +883,27 @@ int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k *ar, u32 mask)
int len = sizeof(*cmd);
u8 pdev_mask;
int ret;
-
- skb = ath11k_htc_alloc_skb(ab, len);
- if (!skb)
- return -ENOMEM;
-
- skb_put(skb, len);
- cmd = (struct htt_ppdu_stats_cfg_cmd *)skb->data;
- cmd->msg = FIELD_PREP(HTT_PPDU_STATS_CFG_MSG_TYPE,
- HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
-
- pdev_mask = 1 << (ar->pdev_idx);
- cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_PDEV_ID, pdev_mask);
- cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK, mask);
-
- ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
- if (ret) {
- dev_kfree_skb_any(skb);
- return ret;
+ int i;
+
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ skb = ath11k_htc_alloc_skb(ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_ppdu_stats_cfg_cmd *)skb->data;
+ cmd->msg = FIELD_PREP(HTT_PPDU_STATS_CFG_MSG_TYPE,
+ HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
+
+ pdev_mask = 1 << (i + 1);
+ cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_PDEV_ID, pdev_mask);
+ cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK, mask);
+
+ ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
}
return 0;
@@ -968,8 +1022,9 @@ ath11k_dp_tx_htt_h2t_ext_stats_req(struct ath11k *ar, u8 type,
int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset)
{
struct ath11k_pdev_dp *dp = &ar->dp;
+ struct ath11k_base *ab = ar->ab;
struct htt_rx_ring_tlv_filter tlv_filter = {0};
- int ret = 0, ring_id = 0;
+ int ret = 0, ring_id = 0, i;
ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
@@ -991,23 +1046,44 @@ int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset)
HTT_RX_MON_MO_DATA_FILTER_FLASG3;
}
- ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, dp->mac_id,
- HAL_RXDMA_MONITOR_BUF,
- DP_RXDMA_REFILL_RING_SIZE,
- &tlv_filter);
+ if (ab->hw_params.rxdma1_enable) {
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, dp->mac_id,
+ HAL_RXDMA_MONITOR_BUF,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+ } else if (!reset) {
+ /* set in monitor mode only */
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ ring_id = dp->rx_mac_buf_ring[i].ring_id;
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
+ dp->mac_id + i,
+ HAL_RXDMA_BUF,
+ 1024,
+ &tlv_filter);
+ }
+ }
+
if (ret)
return ret;
- ring_id = dp->rx_mon_status_refill_ring.refill_buf_ring.ring_id;
- if (!reset)
- tlv_filter.rx_filter =
- HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING;
- else
- tlv_filter = ath11k_mac_mon_status_filter_default;
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
+ if (!reset)
+ tlv_filter.rx_filter =
+ HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING;
+ else
+ tlv_filter = ath11k_mac_mon_status_filter_default;
+
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ab, ring_id,
+ dp->mac_id + i,
+ HAL_RXDMA_MONITOR_STATUS,
+ DP_RXDMA_REFILL_RING_SIZE,
+ &tlv_filter);
+ }
+
+ if (!ar->ab->hw_params.rxdma1_enable)
+ mod_timer(&ar->ab->mon_reap_timer, jiffies +
+ msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
- ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, dp->mac_id,
- HAL_RXDMA_MONITOR_STATUS,
- DP_RXDMA_REFILL_RING_SIZE,
- &tlv_filter);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
index d63785178afa..9904c0eb7587 100644
--- a/drivers/net/wireless/ath/ath11k/hal.c
+++ b/drivers/net/wireless/ath/ath11k/hal.c
@@ -8,7 +8,7 @@
#include "hal_desc.h"
#include "hif.h"
-static const struct hal_srng_config hw_srng_config[] = {
+static const struct hal_srng_config hw_srng_config_template[] = {
/* TODO: max_rings can populated by querying HW capabilities */
{ /* REO_DST */
.start_ring_id = HAL_SRNG_RING_ID_REO2SW1,
@@ -16,14 +16,6 @@ static const struct hal_srng_config hw_srng_config[] = {
.entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_DST,
- .reg_start = {
- HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB,
- HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP,
- },
- .reg_size = {
- HAL_REO2_RING_BASE_LSB - HAL_REO1_RING_BASE_LSB,
- HAL_REO2_RING_HP - HAL_REO1_RING_HP,
- },
.max_size = HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE,
},
{ /* REO_EXCEPTION */
@@ -36,10 +28,6 @@ static const struct hal_srng_config hw_srng_config[] = {
.entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_DST,
- .reg_start = {
- HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_BASE_LSB,
- HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_HP,
- },
.max_size = HAL_REO_REO2TCL_RING_BASE_MSB_RING_SIZE,
},
{ /* REO_REINJECT */
@@ -48,10 +36,6 @@ static const struct hal_srng_config hw_srng_config[] = {
.entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_SRC,
- .reg_start = {
- HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB,
- HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP,
- },
.max_size = HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE,
},
{ /* REO_CMD */
@@ -61,10 +45,6 @@ static const struct hal_srng_config hw_srng_config[] = {
sizeof(struct hal_reo_get_queue_stats)) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_SRC,
- .reg_start = {
- HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB,
- HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP,
- },
.max_size = HAL_REO_CMD_RING_BASE_MSB_RING_SIZE,
},
{ /* REO_STATUS */
@@ -74,11 +54,6 @@ static const struct hal_srng_config hw_srng_config[] = {
sizeof(struct hal_reo_get_queue_stats_status)) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_DST,
- .reg_start = {
- HAL_SEQ_WCSS_UMAC_REO_REG +
- HAL_REO_STATUS_RING_BASE_LSB,
- HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP,
- },
.max_size = HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE,
},
{ /* TCL_DATA */
@@ -88,14 +63,6 @@ static const struct hal_srng_config hw_srng_config[] = {
sizeof(struct hal_tcl_data_cmd)) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_SRC,
- .reg_start = {
- HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB,
- HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP,
- },
- .reg_size = {
- HAL_TCL2_RING_BASE_LSB - HAL_TCL1_RING_BASE_LSB,
- HAL_TCL2_RING_HP - HAL_TCL1_RING_HP,
- },
.max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE,
},
{ /* TCL_CMD */
@@ -105,10 +72,6 @@ static const struct hal_srng_config hw_srng_config[] = {
sizeof(struct hal_tcl_gse_cmd)) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_SRC,
- .reg_start = {
- HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB,
- HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP,
- },
.max_size = HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE,
},
{ /* TCL_STATUS */
@@ -118,11 +81,6 @@ static const struct hal_srng_config hw_srng_config[] = {
sizeof(struct hal_tcl_status_ring)) >> 2,
.lmac_ring = false,
.ring_dir = HAL_SRNG_DIR_DST,
- .reg_start = {
- HAL_SEQ_WCSS_UMAC_TCL_REG +
- HAL_TCL_STATUS_RING_BASE_LSB,
- HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP,
- },
.max_size = HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE,
},
{ /* CE_SRC */
@@ -344,7 +302,7 @@ static void ath11k_hal_free_cont_wrp(struct ath11k_base *ab)
static void ath11k_hal_ce_dst_setup(struct ath11k_base *ab,
struct hal_srng *srng, int ring_num)
{
- const struct hal_srng_config *srng_config = &hw_srng_config[HAL_CE_DST];
+ struct hal_srng_config *srng_config = &ab->hal.srng_config[HAL_CE_DST];
u32 addr;
u32 val;
@@ -371,33 +329,33 @@ static void ath11k_hal_srng_dst_hw_init(struct ath11k_base *ab,
if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
ath11k_hif_write32(ab, reg_base +
- HAL_REO1_RING_MSI1_BASE_LSB_OFFSET,
- (u32)srng->msi_addr);
+ HAL_REO1_RING_MSI1_BASE_LSB_OFFSET(ab),
+ srng->msi_addr);
val = FIELD_PREP(HAL_REO1_RING_MSI1_BASE_MSB_ADDR,
((u64)srng->msi_addr >>
HAL_ADDR_MSB_REG_SHIFT)) |
HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
ath11k_hif_write32(ab, reg_base +
- HAL_REO1_RING_MSI1_BASE_MSB_OFFSET, val);
+ HAL_REO1_RING_MSI1_BASE_MSB_OFFSET(ab), val);
ath11k_hif_write32(ab,
- reg_base + HAL_REO1_RING_MSI1_DATA_OFFSET,
+ reg_base + HAL_REO1_RING_MSI1_DATA_OFFSET(ab),
srng->msi_data);
}
- ath11k_hif_write32(ab, reg_base, (u32)srng->ring_base_paddr);
+ ath11k_hif_write32(ab, reg_base, srng->ring_base_paddr);
val = FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
((u64)srng->ring_base_paddr >>
HAL_ADDR_MSB_REG_SHIFT)) |
FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_SIZE,
(srng->entry_size * srng->num_entries));
- ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_BASE_MSB_OFFSET, val);
+ ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_BASE_MSB_OFFSET(ab), val);
val = FIELD_PREP(HAL_REO1_RING_ID_RING_ID, srng->ring_id) |
FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
- ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_ID_OFFSET, val);
+ ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_ID_OFFSET(ab), val);
/* interrupt setup */
val = FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD,
@@ -408,21 +366,21 @@ static void ath11k_hal_srng_dst_hw_init(struct ath11k_base *ab,
srng->entry_size));
ath11k_hif_write32(ab,
- reg_base + HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET,
+ reg_base + HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET(ab),
val);
hp_addr = hal->rdp.paddr +
((unsigned long)srng->u.dst_ring.hp_addr -
(unsigned long)hal->rdp.vaddr);
- ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_LSB_OFFSET,
+ ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_LSB_OFFSET(ab),
hp_addr & HAL_ADDR_LSB_REG_MASK);
- ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_MSB_OFFSET,
+ ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_MSB_OFFSET(ab),
hp_addr >> HAL_ADDR_MSB_REG_SHIFT);
/* Initialize head and tail pointers to indicate ring is empty */
reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
ath11k_hif_write32(ab, reg_base, 0);
- ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_TP_OFFSET, 0);
+ ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_TP_OFFSET(ab), 0);
*srng->u.dst_ring.hp_addr = 0;
reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
@@ -435,7 +393,7 @@ static void ath11k_hal_srng_dst_hw_init(struct ath11k_base *ab,
val |= HAL_REO1_RING_MISC_MSI_SWAP;
val |= HAL_REO1_RING_MISC_SRNG_ENABLE;
- ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_MISC_OFFSET, val);
+ ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_MISC_OFFSET(ab), val);
}
static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab,
@@ -450,33 +408,33 @@ static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab,
if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
ath11k_hif_write32(ab, reg_base +
- HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET,
- (u32)srng->msi_addr);
+ HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab),
+ srng->msi_addr);
val = FIELD_PREP(HAL_TCL1_RING_MSI1_BASE_MSB_ADDR,
((u64)srng->msi_addr >>
HAL_ADDR_MSB_REG_SHIFT)) |
HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
ath11k_hif_write32(ab, reg_base +
- HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET,
+ HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(ab),
val);
ath11k_hif_write32(ab, reg_base +
- HAL_TCL1_RING_MSI1_DATA_OFFSET,
+ HAL_TCL1_RING_MSI1_DATA_OFFSET(ab),
srng->msi_data);
}
- ath11k_hif_write32(ab, reg_base, (u32)srng->ring_base_paddr);
+ ath11k_hif_write32(ab, reg_base, srng->ring_base_paddr);
val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
((u64)srng->ring_base_paddr >>
HAL_ADDR_MSB_REG_SHIFT)) |
FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE,
(srng->entry_size * srng->num_entries));
- ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET, val);
+ ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(ab), val);
val = FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
- ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET, val);
+ ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET(ab), val);
/* interrupt setup */
/* NOTE: IPQ8074 v2 requires the interrupt timer threshold in the
@@ -490,7 +448,7 @@ static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab,
srng->entry_size));
ath11k_hif_write32(ab,
- reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET,
+ reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(ab),
val);
val = 0;
@@ -499,7 +457,7 @@ static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab,
srng->u.src_ring.low_threshold);
}
ath11k_hif_write32(ab,
- reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET,
+ reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(ab),
val);
if (srng->ring_id != HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
@@ -507,10 +465,10 @@ static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab,
((unsigned long)srng->u.src_ring.tp_addr -
(unsigned long)hal->rdp.vaddr);
ath11k_hif_write32(ab,
- reg_base + HAL_TCL1_RING_TP_ADDR_LSB_OFFSET,
+ reg_base + HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(ab),
tp_addr & HAL_ADDR_LSB_REG_MASK);
ath11k_hif_write32(ab,
- reg_base + HAL_TCL1_RING_TP_ADDR_MSB_OFFSET,
+ reg_base + HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(ab),
tp_addr >> HAL_ADDR_MSB_REG_SHIFT);
}
@@ -534,7 +492,7 @@ static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab,
val |= HAL_TCL1_RING_MISC_SRNG_ENABLE;
- ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_MISC_OFFSET, val);
+ ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_MISC_OFFSET(ab), val);
}
static void ath11k_hal_srng_hw_init(struct ath11k_base *ab,
@@ -550,7 +508,7 @@ static int ath11k_hal_srng_get_ring_id(struct ath11k_base *ab,
enum hal_ring_type type,
int ring_num, int mac_id)
{
- const struct hal_srng_config *srng_config = &hw_srng_config[type];
+ struct hal_srng_config *srng_config = &ab->hal.srng_config[type];
int ring_id;
if (ring_num >= srng_config->max_rings) {
@@ -568,26 +526,26 @@ static int ath11k_hal_srng_get_ring_id(struct ath11k_base *ab,
return ring_id;
}
-int ath11k_hal_srng_get_entrysize(u32 ring_type)
+int ath11k_hal_srng_get_entrysize(struct ath11k_base *ab, u32 ring_type)
{
- const struct hal_srng_config *srng_config;
+ struct hal_srng_config *srng_config;
if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
return -EINVAL;
- srng_config = &hw_srng_config[ring_type];
+ srng_config = &ab->hal.srng_config[ring_type];
return (srng_config->entry_size << 2);
}
-int ath11k_hal_srng_get_max_entries(u32 ring_type)
+int ath11k_hal_srng_get_max_entries(struct ath11k_base *ab, u32 ring_type)
{
- const struct hal_srng_config *srng_config;
+ struct hal_srng_config *srng_config;
if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
return -EINVAL;
- srng_config = &hw_srng_config[ring_type];
+ srng_config = &ab->hal.srng_config[ring_type];
return (srng_config->max_size / srng_config->entry_size);
}
@@ -602,6 +560,8 @@ void ath11k_hal_srng_get_params(struct ath11k_base *ab, struct hal_srng *srng,
params->intr_batch_cntr_thres_entries =
srng->intr_batch_cntr_thres_entries;
params->low_threshold = srng->u.src_ring.low_threshold;
+ params->msi_addr = srng->msi_addr;
+ params->msi_data = srng->msi_data;
params->flags = srng->flags;
}
@@ -1003,7 +963,7 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
struct hal_srng_params *params)
{
struct ath11k_hal *hal = &ab->hal;
- const struct hal_srng_config *srng_config = &hw_srng_config[type];
+ struct hal_srng_config *srng_config = &ab->hal.srng_config[type];
struct hal_srng *srng;
int ring_id;
u32 lmac_idx;
@@ -1027,6 +987,8 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
params->intr_batch_cntr_thres_entries;
srng->intr_timer_thres_us = params->intr_timer_thres_us;
srng->flags = params->flags;
+ srng->msi_addr = params->msi_addr;
+ srng->msi_data = params->msi_data;
srng->initialized = 1;
spin_lock_init(&srng->lock);
@@ -1058,8 +1020,16 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
lmac_idx);
srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
} else {
- srng->u.src_ring.hp_addr =
+ if (!ab->hw_params.supports_shadow_regs)
+ srng->u.src_ring.hp_addr =
(u32 *)((unsigned long)ab->mem + reg_base);
+ else
+ ath11k_dbg(ab, ATH11k_DBG_HAL,
+ "hal type %d ring_num %d reg_base 0x%x shadow 0x%lx\n",
+ type, ring_num,
+ reg_base,
+ (unsigned long)srng->u.src_ring.hp_addr -
+ (unsigned long)ab->mem);
}
} else {
/* During initialization loop count in all the descriptors
@@ -1083,9 +1053,18 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
lmac_idx);
srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
} else {
- srng->u.dst_ring.tp_addr =
+ if (!ab->hw_params.supports_shadow_regs)
+ srng->u.dst_ring.tp_addr =
(u32 *)((unsigned long)ab->mem + reg_base +
- (HAL_REO1_RING_TP - HAL_REO1_RING_HP));
+ (HAL_REO1_RING_TP(ab) - HAL_REO1_RING_HP(ab)));
+ else
+ ath11k_dbg(ab, ATH11k_DBG_HAL,
+ "type %d ring_num %d target_reg 0x%x shadow 0x%lx\n",
+ type, ring_num,
+ reg_base + (HAL_REO1_RING_TP(ab) -
+ HAL_REO1_RING_HP(ab)),
+ (unsigned long)srng->u.dst_ring.tp_addr -
+ (unsigned long)ab->mem);
}
}
@@ -1102,6 +1081,162 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
return ring_id;
}
+static void ath11k_hal_srng_update_hp_tp_addr(struct ath11k_base *ab,
+ int shadow_cfg_idx,
+ enum hal_ring_type ring_type,
+ int ring_num)
+{
+ struct hal_srng *srng;
+ struct ath11k_hal *hal = &ab->hal;
+ int ring_id;
+ struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
+
+ ring_id = ath11k_hal_srng_get_ring_id(ab, ring_type, ring_num, 0);
+ if (ring_id < 0)
+ return;
+
+ srng = &hal->srng_list[ring_id];
+
+ if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
+ srng->u.dst_ring.tp_addr = (u32 *)(HAL_SHADOW_REG(shadow_cfg_idx) +
+ (unsigned long)ab->mem);
+ else
+ srng->u.src_ring.hp_addr = (u32 *)(HAL_SHADOW_REG(shadow_cfg_idx) +
+ (unsigned long)ab->mem);
+}
+
+int ath11k_hal_srng_update_shadow_config(struct ath11k_base *ab,
+ enum hal_ring_type ring_type,
+ int ring_num)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
+ int shadow_cfg_idx = hal->num_shadow_reg_configured;
+ u32 target_reg;
+
+ if (shadow_cfg_idx >= HAL_SHADOW_NUM_REGS)
+ return -EINVAL;
+
+ hal->num_shadow_reg_configured++;
+
+ target_reg = srng_config->reg_start[HAL_HP_OFFSET_IN_REG_START];
+ target_reg += srng_config->reg_size[HAL_HP_OFFSET_IN_REG_START] *
+ ring_num;
+
+ /* For destination ring, shadow the TP */
+ if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
+ target_reg += HAL_OFFSET_FROM_HP_TO_TP;
+
+ hal->shadow_reg_addr[shadow_cfg_idx] = target_reg;
+
+ /* update hp/tp addr to hal structure*/
+ ath11k_hal_srng_update_hp_tp_addr(ab, shadow_cfg_idx, ring_type,
+ ring_num);
+
+ ath11k_dbg(ab, ATH11k_DBG_HAL,
+ "target_reg %x, shadow reg 0x%x shadow_idx 0x%x, ring_type %d, ring num %d",
+ target_reg,
+ HAL_SHADOW_REG(shadow_cfg_idx),
+ shadow_cfg_idx,
+ ring_type, ring_num);
+
+ return 0;
+}
+
+void ath11k_hal_srng_shadow_config(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ int ring_type, ring_num;
+
+ /* update all the non-CE srngs. */
+ for (ring_type = 0; ring_type < HAL_MAX_RING_TYPES; ring_type++) {
+ struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
+
+ if (ring_type == HAL_CE_SRC ||
+ ring_type == HAL_CE_DST ||
+ ring_type == HAL_CE_DST_STATUS)
+ continue;
+
+ if (srng_config->lmac_ring)
+ continue;
+
+ for (ring_num = 0; ring_num < srng_config->max_rings; ring_num++)
+ ath11k_hal_srng_update_shadow_config(ab, ring_type, ring_num);
+ }
+}
+
+void ath11k_hal_srng_get_shadow_config(struct ath11k_base *ab,
+ u32 **cfg, u32 *len)
+{
+ struct ath11k_hal *hal = &ab->hal;
+
+ *len = hal->num_shadow_reg_configured;
+ *cfg = hal->shadow_reg_addr;
+}
+
+void ath11k_hal_srng_shadow_update_hp_tp(struct ath11k_base *ab,
+ struct hal_srng *srng)
+{
+ lockdep_assert_held(&srng->lock);
+
+ /* check whether the ring is emptry. Update the shadow
+ * HP only when then ring isn't' empty.
+ */
+ if (srng->ring_dir == HAL_SRNG_DIR_SRC &&
+ *srng->u.src_ring.tp_addr != srng->u.src_ring.hp)
+ ath11k_hal_srng_access_end(ab, srng);
+}
+
+static int ath11k_hal_srng_create_config(struct ath11k_base *ab)
+{
+ struct ath11k_hal *hal = &ab->hal;
+ struct hal_srng_config *s;
+
+ hal->srng_config = kmemdup(hw_srng_config_template,
+ sizeof(hw_srng_config_template),
+ GFP_KERNEL);
+ if (!hal->srng_config)
+ return -ENOMEM;
+
+ s = &hal->srng_config[HAL_REO_DST];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP(ab);
+ s->reg_size[0] = HAL_REO2_RING_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
+ s->reg_size[1] = HAL_REO2_RING_HP(ab) - HAL_REO1_RING_HP(ab);
+
+ s = &hal->srng_config[HAL_REO_EXCEPTION];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_HP(ab);
+
+ s = &hal->srng_config[HAL_REO_REINJECT];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP;
+
+ s = &hal->srng_config[HAL_REO_CMD];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB;
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP;
+
+ s = &hal->srng_config[HAL_REO_STATUS];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP(ab);
+
+ s = &hal->srng_config[HAL_TCL_DATA];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP;
+ s->reg_size[0] = HAL_TCL2_RING_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab);
+ s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP;
+
+ s = &hal->srng_config[HAL_TCL_CMD];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP;
+
+ s = &hal->srng_config[HAL_TCL_STATUS];
+ s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(ab);
+ s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP;
+
+ return 0;
+}
+
int ath11k_hal_srng_init(struct ath11k_base *ab)
{
struct ath11k_hal *hal = &ab->hal;
@@ -1109,7 +1244,9 @@ int ath11k_hal_srng_init(struct ath11k_base *ab)
memset(hal, 0, sizeof(*hal));
- hal->srng_config = hw_srng_config;
+ ret = ath11k_hal_srng_create_config(ab);
+ if (ret)
+ goto err_hal;
ret = ath11k_hal_alloc_cont_rdp(ab);
if (ret)
@@ -1127,12 +1264,17 @@ err_free_cont_rdp:
err_hal:
return ret;
}
+EXPORT_SYMBOL(ath11k_hal_srng_init);
void ath11k_hal_srng_deinit(struct ath11k_base *ab)
{
+ struct ath11k_hal *hal = &ab->hal;
+
ath11k_hal_free_cont_rdp(ab);
ath11k_hal_free_cont_wrp(ab);
+ kfree(hal->srng_config);
}
+EXPORT_SYMBOL(ath11k_hal_srng_deinit);
void ath11k_hal_dump_srng_stats(struct ath11k_base *ab)
{
@@ -1142,10 +1284,10 @@ void ath11k_hal_dump_srng_stats(struct ath11k_base *ab)
int i;
ath11k_err(ab, "Last interrupt received for each CE:\n");
- for (i = 0; i < CE_COUNT; i++) {
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
ce_pipe = &ab->ce.ce_pipe[i];
- if (ath11k_ce_get_attr_flags(i) & CE_ATTR_DIS_INTR)
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
continue;
ath11k_err(ab, "CE_id %d pipe_num %d %ums before\n",
diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
index 780a3e11b609..1f1b29cd0aa3 100644
--- a/drivers/net/wireless/ath/ath11k/hal.h
+++ b/drivers/net/wireless/ath/ath11k/hal.h
@@ -31,8 +31,12 @@ struct ath11k_base;
#define HAL_DSCP_TID_TBL_SIZE 24
/* calculate the register address from bar0 of shadow register x */
-#define SHADOW_BASE_ADDRESS 0x00003024
-#define SHADOW_NUM_REGISTERS 36
+#define HAL_SHADOW_BASE_ADDR 0x000008fc
+#define HAL_SHADOW_NUM_REGS 36
+#define HAL_HP_OFFSET_IN_REG_START 1
+#define HAL_OFFSET_FROM_HP_TO_TP 4
+
+#define HAL_SHADOW_REG(x) (HAL_SHADOW_BASE_ADDR + (4 * (x)))
/* WCSS Relative address */
#define HAL_SEQ_WCSS_UMAC_REO_REG 0x00a38000
@@ -46,40 +50,47 @@ struct ath11k_base;
/* SW2TCL(x) R0 ring configuration address */
#define HAL_TCL1_RING_CMN_CTRL_REG 0x00000014
#define HAL_TCL1_RING_DSCP_TID_MAP 0x0000002c
-#define HAL_TCL1_RING_BASE_LSB 0x00000510
-#define HAL_TCL1_RING_BASE_MSB 0x00000514
-#define HAL_TCL1_RING_ID 0x00000518
-#define HAL_TCL1_RING_MISC 0x00000520
-#define HAL_TCL1_RING_TP_ADDR_LSB 0x0000052c
-#define HAL_TCL1_RING_TP_ADDR_MSB 0x00000530
-#define HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0 0x00000540
-#define HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1 0x00000544
-#define HAL_TCL1_RING_MSI1_BASE_LSB 0x00000558
-#define HAL_TCL1_RING_MSI1_BASE_MSB 0x0000055c
-#define HAL_TCL1_RING_MSI1_DATA 0x00000560
-#define HAL_TCL2_RING_BASE_LSB 0x00000568
-#define HAL_TCL_RING_BASE_LSB 0x00000618
-
-#define HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET \
- (HAL_TCL1_RING_MSI1_BASE_LSB - HAL_TCL1_RING_BASE_LSB)
-#define HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET \
- (HAL_TCL1_RING_MSI1_BASE_MSB - HAL_TCL1_RING_BASE_LSB)
-#define HAL_TCL1_RING_MSI1_DATA_OFFSET \
- (HAL_TCL1_RING_MSI1_DATA - HAL_TCL1_RING_BASE_LSB)
-#define HAL_TCL1_RING_BASE_MSB_OFFSET \
- (HAL_TCL1_RING_BASE_MSB - HAL_TCL1_RING_BASE_LSB)
-#define HAL_TCL1_RING_ID_OFFSET \
- (HAL_TCL1_RING_ID - HAL_TCL1_RING_BASE_LSB)
-#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET \
- (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0 - HAL_TCL1_RING_BASE_LSB)
-#define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET \
- (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1 - HAL_TCL1_RING_BASE_LSB)
-#define HAL_TCL1_RING_TP_ADDR_LSB_OFFSET \
- (HAL_TCL1_RING_TP_ADDR_LSB - HAL_TCL1_RING_BASE_LSB)
-#define HAL_TCL1_RING_TP_ADDR_MSB_OFFSET \
- (HAL_TCL1_RING_TP_ADDR_MSB - HAL_TCL1_RING_BASE_LSB)
-#define HAL_TCL1_RING_MISC_OFFSET \
- (HAL_TCL1_RING_MISC - HAL_TCL1_RING_BASE_LSB)
+#define HAL_TCL1_RING_BASE_LSB(ab) ab->hw_params.regs->hal_tcl1_ring_base_lsb
+#define HAL_TCL1_RING_BASE_MSB(ab) ab->hw_params.regs->hal_tcl1_ring_base_msb
+#define HAL_TCL1_RING_ID(ab) ab->hw_params.regs->hal_tcl1_ring_id
+#define HAL_TCL1_RING_MISC(ab) ab->hw_params.regs->hal_tcl1_ring_misc
+#define HAL_TCL1_RING_TP_ADDR_LSB(ab) \
+ ab->hw_params.regs->hal_tcl1_ring_tp_addr_lsb
+#define HAL_TCL1_RING_TP_ADDR_MSB(ab) \
+ ab->hw_params.regs->hal_tcl1_ring_tp_addr_msb
+#define HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0(ab) \
+ ab->hw_params.regs->hal_tcl1_ring_consumer_int_setup_ix0
+#define HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1(ab) \
+ ab->hw_params.regs->hal_tcl1_ring_consumer_int_setup_ix1
+#define HAL_TCL1_RING_MSI1_BASE_LSB(ab) \
+ ab->hw_params.regs->hal_tcl1_ring_msi1_base_lsb
+#define HAL_TCL1_RING_MSI1_BASE_MSB(ab) \
+ ab->hw_params.regs->hal_tcl1_ring_msi1_base_msb
+#define HAL_TCL1_RING_MSI1_DATA(ab) \
+ ab->hw_params.regs->hal_tcl1_ring_msi1_data
+#define HAL_TCL2_RING_BASE_LSB(ab) ab->hw_params.regs->hal_tcl2_ring_base_lsb
+#define HAL_TCL_RING_BASE_LSB(ab) ab->hw_params.regs->hal_tcl_ring_base_lsb
+
+#define HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab) \
+ (HAL_TCL1_RING_MSI1_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+#define HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(ab) \
+ (HAL_TCL1_RING_MSI1_BASE_MSB(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+#define HAL_TCL1_RING_MSI1_DATA_OFFSET(ab) \
+ (HAL_TCL1_RING_MSI1_DATA(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+#define HAL_TCL1_RING_BASE_MSB_OFFSET(ab) \
+ (HAL_TCL1_RING_BASE_MSB(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+#define HAL_TCL1_RING_ID_OFFSET(ab) \
+ (HAL_TCL1_RING_ID(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(ab) \
+ (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX0(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+#define HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(ab) \
+ (HAL_TCL1_RING_CONSUMER_INT_SETUP_IX1(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+#define HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(ab) \
+ (HAL_TCL1_RING_TP_ADDR_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+#define HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(ab) \
+ (HAL_TCL1_RING_TP_ADDR_MSB(ab) - HAL_TCL1_RING_BASE_LSB(ab))
+#define HAL_TCL1_RING_MISC_OFFSET(ab) \
+ (HAL_TCL1_RING_MISC(ab) - HAL_TCL1_RING_BASE_LSB(ab))
/* SW2TCL(x) R2 ring pointers (head/tail) address */
#define HAL_TCL1_RING_HP 0x00002000
@@ -91,7 +102,8 @@ struct ath11k_base;
(HAL_TCL1_RING_TP - HAL_TCL1_RING_HP)
/* TCL STATUS ring address */
-#define HAL_TCL_STATUS_RING_BASE_LSB 0x00000720
+#define HAL_TCL_STATUS_RING_BASE_LSB(ab) \
+ ab->hw_params.regs->hal_tcl_status_ring_base_lsb
#define HAL_TCL_STATUS_RING_HP 0x00002030
/* REO2SW(x) R0 ring configuration address */
@@ -100,51 +112,63 @@ struct ath11k_base;
#define HAL_REO1_DEST_RING_CTRL_IX_1 0x00000008
#define HAL_REO1_DEST_RING_CTRL_IX_2 0x0000000c
#define HAL_REO1_DEST_RING_CTRL_IX_3 0x00000010
-#define HAL_REO1_RING_BASE_LSB 0x0000029c
-#define HAL_REO1_RING_BASE_MSB 0x000002a0
-#define HAL_REO1_RING_ID 0x000002a4
-#define HAL_REO1_RING_MISC 0x000002ac
-#define HAL_REO1_RING_HP_ADDR_LSB 0x000002b0
-#define HAL_REO1_RING_HP_ADDR_MSB 0x000002b4
-#define HAL_REO1_RING_PRODUCER_INT_SETUP 0x000002c0
-#define HAL_REO1_RING_MSI1_BASE_LSB 0x000002e4
-#define HAL_REO1_RING_MSI1_BASE_MSB 0x000002e8
-#define HAL_REO1_RING_MSI1_DATA 0x000002ec
-#define HAL_REO2_RING_BASE_LSB 0x000002f4
-#define HAL_REO1_AGING_THRESH_IX_0 0x00000564
-#define HAL_REO1_AGING_THRESH_IX_1 0x00000568
-#define HAL_REO1_AGING_THRESH_IX_2 0x0000056c
-#define HAL_REO1_AGING_THRESH_IX_3 0x00000570
-
-#define HAL_REO1_RING_MSI1_BASE_LSB_OFFSET \
- (HAL_REO1_RING_MSI1_BASE_LSB - HAL_REO1_RING_BASE_LSB)
-#define HAL_REO1_RING_MSI1_BASE_MSB_OFFSET \
- (HAL_REO1_RING_MSI1_BASE_MSB - HAL_REO1_RING_BASE_LSB)
-#define HAL_REO1_RING_MSI1_DATA_OFFSET \
- (HAL_REO1_RING_MSI1_DATA - HAL_REO1_RING_BASE_LSB)
-#define HAL_REO1_RING_BASE_MSB_OFFSET \
- (HAL_REO1_RING_BASE_MSB - HAL_REO1_RING_BASE_LSB)
-#define HAL_REO1_RING_ID_OFFSET (HAL_REO1_RING_ID - HAL_REO1_RING_BASE_LSB)
-#define HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET \
- (HAL_REO1_RING_PRODUCER_INT_SETUP - HAL_REO1_RING_BASE_LSB)
-#define HAL_REO1_RING_HP_ADDR_LSB_OFFSET \
- (HAL_REO1_RING_HP_ADDR_LSB - HAL_REO1_RING_BASE_LSB)
-#define HAL_REO1_RING_HP_ADDR_MSB_OFFSET \
- (HAL_REO1_RING_HP_ADDR_MSB - HAL_REO1_RING_BASE_LSB)
-#define HAL_REO1_RING_MISC_OFFSET (HAL_REO1_RING_MISC - HAL_REO1_RING_BASE_LSB)
+#define HAL_REO1_RING_BASE_LSB(ab) ab->hw_params.regs->hal_reo1_ring_base_lsb
+#define HAL_REO1_RING_BASE_MSB(ab) ab->hw_params.regs->hal_reo1_ring_base_msb
+#define HAL_REO1_RING_ID(ab) ab->hw_params.regs->hal_reo1_ring_id
+#define HAL_REO1_RING_MISC(ab) ab->hw_params.regs->hal_reo1_ring_misc
+#define HAL_REO1_RING_HP_ADDR_LSB(ab) \
+ ab->hw_params.regs->hal_reo1_ring_hp_addr_lsb
+#define HAL_REO1_RING_HP_ADDR_MSB(ab) \
+ ab->hw_params.regs->hal_reo1_ring_hp_addr_msb
+#define HAL_REO1_RING_PRODUCER_INT_SETUP(ab) \
+ ab->hw_params.regs->hal_reo1_ring_producer_int_setup
+#define HAL_REO1_RING_MSI1_BASE_LSB(ab) \
+ ab->hw_params.regs->hal_reo1_ring_msi1_base_lsb
+#define HAL_REO1_RING_MSI1_BASE_MSB(ab) \
+ ab->hw_params.regs->hal_reo1_ring_msi1_base_msb
+#define HAL_REO1_RING_MSI1_DATA(ab) \
+ ab->hw_params.regs->hal_reo1_ring_msi1_data
+#define HAL_REO2_RING_BASE_LSB(ab) ab->hw_params.regs->hal_reo2_ring_base_lsb
+#define HAL_REO1_AGING_THRESH_IX_0(ab) \
+ ab->hw_params.regs->hal_reo1_aging_thresh_ix_0
+#define HAL_REO1_AGING_THRESH_IX_1(ab) \
+ ab->hw_params.regs->hal_reo1_aging_thresh_ix_1
+#define HAL_REO1_AGING_THRESH_IX_2(ab) \
+ ab->hw_params.regs->hal_reo1_aging_thresh_ix_2
+#define HAL_REO1_AGING_THRESH_IX_3(ab) \
+ ab->hw_params.regs->hal_reo1_aging_thresh_ix_3
+
+#define HAL_REO1_RING_MSI1_BASE_LSB_OFFSET(ab) \
+ (HAL_REO1_RING_MSI1_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_MSI1_BASE_MSB_OFFSET(ab) \
+ (HAL_REO1_RING_MSI1_BASE_MSB(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_MSI1_DATA_OFFSET(ab) \
+ (HAL_REO1_RING_MSI1_DATA(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_BASE_MSB_OFFSET(ab) \
+ (HAL_REO1_RING_BASE_MSB(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_ID_OFFSET(ab) (HAL_REO1_RING_ID(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET(ab) \
+ (HAL_REO1_RING_PRODUCER_INT_SETUP(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_HP_ADDR_LSB_OFFSET(ab) \
+ (HAL_REO1_RING_HP_ADDR_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_HP_ADDR_MSB_OFFSET(ab) \
+ (HAL_REO1_RING_HP_ADDR_MSB(ab) - HAL_REO1_RING_BASE_LSB(ab))
+#define HAL_REO1_RING_MISC_OFFSET(ab) \
+ (HAL_REO1_RING_MISC(ab) - HAL_REO1_RING_BASE_LSB(ab))
/* REO2SW(x) R2 ring pointers (head/tail) address */
-#define HAL_REO1_RING_HP 0x00003038
-#define HAL_REO1_RING_TP 0x0000303c
-#define HAL_REO2_RING_HP 0x00003040
+#define HAL_REO1_RING_HP(ab) ab->hw_params.regs->hal_reo1_ring_hp
+#define HAL_REO1_RING_TP(ab) ab->hw_params.regs->hal_reo1_ring_tp
+#define HAL_REO2_RING_HP(ab) ab->hw_params.regs->hal_reo2_ring_hp
-#define HAL_REO1_RING_TP_OFFSET (HAL_REO1_RING_TP - HAL_REO1_RING_HP)
+#define HAL_REO1_RING_TP_OFFSET(ab) (HAL_REO1_RING_TP(ab) - HAL_REO1_RING_HP(ab))
/* REO2TCL R0 ring configuration address */
-#define HAL_REO_TCL_RING_BASE_LSB 0x000003fc
+#define HAL_REO_TCL_RING_BASE_LSB(ab) \
+ ab->hw_params.regs->hal_reo_tcl_ring_base_lsb
/* REO2TCL R2 ring pointer (head/tail) address */
-#define HAL_REO_TCL_RING_HP 0x00003058
+#define HAL_REO_TCL_RING_HP(ab) ab->hw_params.regs->hal_reo_tcl_ring_hp
/* REO CMD R0 address */
#define HAL_REO_CMD_RING_BASE_LSB 0x00000194
@@ -168,8 +192,9 @@ struct ath11k_base;
#define HAL_CE_DST_STATUS_RING_HP 0x00000408
/* REO status address */
-#define HAL_REO_STATUS_RING_BASE_LSB 0x00000504
-#define HAL_REO_STATUS_HP 0x00003070
+#define HAL_REO_STATUS_RING_BASE_LSB(ab) \
+ ab->hw_params.regs->hal_reo_status_ring_base_lsb
+#define HAL_REO_STATUS_HP(ab) ab->hw_params.regs->hal_reo_status_hp
/* WBM Idle R0 address */
#define HAL_WBM_IDLE_LINK_RING_BASE_LSB 0x00000860
@@ -458,6 +483,8 @@ struct hal_srng_params {
u32 flags;
u32 max_buffer_len;
u32 low_threshold;
+ dma_addr_t msi_addr;
+ u32 msi_data;
/* Add more params as needed */
};
@@ -839,7 +866,7 @@ struct ath11k_hal {
struct hal_srng srng_list[HAL_SRNG_RING_ID_MAX];
/* SRNG configuration table */
- const struct hal_srng_config *srng_config;
+ struct hal_srng_config *srng_config;
/* Remote pointer memory for HW/FW updates */
struct {
@@ -859,7 +886,7 @@ struct ath11k_hal {
u8 current_blk_index;
/* shadow register configuration */
- u32 shadow_reg_addr[SHADOW_NUM_REGISTERS];
+ u32 shadow_reg_addr[HAL_SHADOW_NUM_REGS];
int num_shadow_reg_configured;
};
@@ -885,8 +912,8 @@ void ath11k_hal_ce_src_set_desc(void *buf, dma_addr_t paddr, u32 len, u32 id,
u8 byte_swap_data);
void ath11k_hal_ce_dst_set_desc(void *buf, dma_addr_t paddr);
u32 ath11k_hal_ce_dst_status_get_length(void *buf);
-int ath11k_hal_srng_get_entrysize(u32 ring_type);
-int ath11k_hal_srng_get_max_entries(u32 ring_type);
+int ath11k_hal_srng_get_entrysize(struct ath11k_base *ab, u32 ring_type);
+int ath11k_hal_srng_get_max_entries(struct ath11k_base *ab, u32 ring_type);
void ath11k_hal_srng_get_params(struct ath11k_base *ab, struct hal_srng *srng,
struct hal_srng_params *params);
u32 *ath11k_hal_srng_dst_get_next_entry(struct ath11k_base *ab,
@@ -912,5 +939,12 @@ int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
int ath11k_hal_srng_init(struct ath11k_base *ath11k);
void ath11k_hal_srng_deinit(struct ath11k_base *ath11k);
void ath11k_hal_dump_srng_stats(struct ath11k_base *ab);
-
+void ath11k_hal_srng_get_shadow_config(struct ath11k_base *ab,
+ u32 **cfg, u32 *len);
+int ath11k_hal_srng_update_shadow_config(struct ath11k_base *ab,
+ enum hal_ring_type ring_type,
+ int ring_num);
+void ath11k_hal_srng_shadow_config(struct ath11k_base *ab);
+void ath11k_hal_srng_shadow_update_hp_tp(struct ath11k_base *ab,
+ struct hal_srng *srng);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c
index 129c9e1efeb9..fac2396edf32 100644
--- a/drivers/net/wireless/ath/ath11k/hal_rx.c
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.c
@@ -256,6 +256,8 @@ int ath11k_hal_reo_cmd_send(struct ath11k_base *ab, struct hal_srng *srng,
break;
}
+ ath11k_dp_shadow_start_timer(ab, srng, &ab->dp.reo_cmd_timer);
+
out:
ath11k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
@@ -786,7 +788,7 @@ void ath11k_hal_reo_init_cmd_ring(struct ath11k_base *ab,
memset(&params, 0, sizeof(params));
- entry_size = ath11k_hal_srng_get_entrysize(HAL_REO_CMD);
+ entry_size = ath11k_hal_srng_get_entrysize(ab, HAL_REO_CMD);
ath11k_hal_srng_get_params(ab, srng, &params);
entry = (u8 *)params.ring_base_vaddr;
@@ -813,13 +815,13 @@ void ath11k_hal_reo_hw_setup(struct ath11k_base *ab, u32 ring_hash_map)
FIELD_PREP(HAL_REO1_GEN_ENABLE_AGING_FLUSH_ENABLE, 1);
ath11k_hif_write32(ab, reo_base + HAL_REO1_GEN_ENABLE, val);
- ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0,
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_0(ab),
HAL_DEFAULT_REO_TIMEOUT_USEC);
- ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_1,
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_1(ab),
HAL_DEFAULT_REO_TIMEOUT_USEC);
- ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_2,
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_2(ab),
HAL_DEFAULT_REO_TIMEOUT_USEC);
- ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3,
+ ath11k_hif_write32(ab, reo_base + HAL_REO1_AGING_THRESH_IX_3(ab),
HAL_DEFAULT_REO_TIMEOUT_USEC);
ath11k_hif_write32(ab, reo_base + HAL_REO1_DEST_RING_CTRL_IX_0,
@@ -1195,7 +1197,7 @@ ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab,
void ath11k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr,
u32 *sw_cookie, void **pp_buf_addr,
- u32 *msdu_cnt)
+ u8 *rbm, u32 *msdu_cnt)
{
struct hal_reo_entrance_ring *reo_ent_ring =
(struct hal_reo_entrance_ring *)rx_desc;
@@ -1217,6 +1219,8 @@ void ath11k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc, dma_addr_t *paddr,
*sw_cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
buf_addr_info->info1);
+ *rbm = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
+ buf_addr_info->info1);
*pp_buf_addr = (void *)buf_addr_info;
}
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.h b/drivers/net/wireless/ath/ath11k/hal_rx.h
index c436191ae1e8..d464a270c049 100644
--- a/drivers/net/wireless/ath/ath11k/hal_rx.h
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.h
@@ -321,7 +321,7 @@ void ath11k_hal_rx_reo_ent_paddr_get(struct ath11k_base *ab, void *desc,
dma_addr_t *paddr, u32 *desc_bank);
void ath11k_hal_rx_reo_ent_buf_paddr_get(void *rx_desc,
dma_addr_t *paddr, u32 *sw_cookie,
- void **pp_buf_addr_info,
+ void **pp_buf_addr_info, u8 *rbm,
u32 *msdu_cnt);
enum hal_rx_mon_status
ath11k_hal_rx_parse_mon_status(struct ath11k_base *ab,
diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.c b/drivers/net/wireless/ath/ath11k/hal_tx.c
index 81937c29ffca..a755aa86c5de 100644
--- a/drivers/net/wireless/ath/ath11k/hal_tx.c
+++ b/drivers/net/wireless/ath/ath11k/hal_tx.c
@@ -141,7 +141,7 @@ void ath11k_hal_tx_init_data_ring(struct ath11k_base *ab, struct hal_srng *srng)
memset(&params, 0, sizeof(params));
- entry_size = ath11k_hal_srng_get_entrysize(HAL_TCL_DATA);
+ entry_size = ath11k_hal_srng_get_entrysize(ab, HAL_TCL_DATA);
ath11k_hal_srng_get_params(ab, srng, &params);
desc = (u8 *)params.ring_base_vaddr;
diff --git a/drivers/net/wireless/ath/ath11k/hif.h b/drivers/net/wireless/ath/ath11k/hif.h
index 165f7e51c238..dbe5568916e8 100644
--- a/drivers/net/wireless/ath/ath11k/hif.h
+++ b/drivers/net/wireless/ath/ath11k/hif.h
@@ -3,6 +3,9 @@
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
*/
+#ifndef _HIF_H_
+#define _HIF_H_
+
#include "core.h"
struct ath11k_hif_ops {
@@ -16,6 +19,11 @@ struct ath11k_hif_ops {
void (*power_down)(struct ath11k_base *sc);
int (*map_service_to_pipe)(struct ath11k_base *sc, u16 service_id,
u8 *ul_pipe, u8 *dl_pipe);
+ int (*get_user_msi_vector)(struct ath11k_base *ab, char *user_name,
+ int *num_vectors, u32 *user_base_data,
+ u32 *base_vector);
+ void (*get_msi_address)(struct ath11k_base *ab, u32 *msi_addr_lo,
+ u32 *msi_addr_hi);
};
static inline int ath11k_hif_start(struct ath11k_base *sc)
@@ -63,3 +71,25 @@ static inline int ath11k_hif_map_service_to_pipe(struct ath11k_base *sc, u16 ser
{
return sc->hif.ops->map_service_to_pipe(sc, service_id, ul_pipe, dl_pipe);
}
+
+static inline int ath11k_get_user_msi_vector(struct ath11k_base *ab, char *user_name,
+ int *num_vectors, u32 *user_base_data,
+ u32 *base_vector)
+{
+ if (!ab->hif.ops->get_user_msi_vector)
+ return -EOPNOTSUPP;
+
+ return ab->hif.ops->get_user_msi_vector(ab, user_name, num_vectors,
+ user_base_data,
+ base_vector);
+}
+
+static inline void ath11k_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo,
+ u32 *msi_addr_hi)
+{
+ if (!ab->hif.ops->get_msi_address)
+ return;
+
+ ab->hif.ops->get_msi_address(ab, msi_addr_lo, msi_addr_hi);
+}
+#endif /* _HIF_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/htc.c b/drivers/net/wireless/ath/ath11k/htc.c
index ad13c648b679..6b57dc273e0b 100644
--- a/drivers/net/wireless/ath/ath11k/htc.c
+++ b/drivers/net/wireless/ath/ath11k/htc.c
@@ -50,15 +50,6 @@ static struct sk_buff *ath11k_htc_build_tx_ctrl_skb(void *ab)
return skb;
}
-static inline void ath11k_htc_restore_tx_skb(struct ath11k_htc *htc,
- struct sk_buff *skb)
-{
- struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
-
- dma_unmap_single(htc->ab->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
- skb_pull(skb, sizeof(struct ath11k_htc_hdr));
-}
-
static void ath11k_htc_prepare_tx_skb(struct ath11k_htc_ep *ep,
struct sk_buff *skb)
{
@@ -478,7 +469,7 @@ int ath11k_htc_wait_target(struct ath11k_htc *htc)
if (!time_left) {
ath11k_warn(ab, "failed to receive control response completion, polling..\n");
- for (i = 0; i < CE_COUNT; i++)
+ for (i = 0; i < ab->hw_params.ce_count; i++)
ath11k_ce_per_engine_service(htc->ab, i);
time_left =
@@ -524,6 +515,12 @@ int ath11k_htc_wait_target(struct ath11k_htc *htc)
return -ECOMM;
}
+ /* For QCA6390, wmi endpoint uses 1 credit to avoid
+ * back-to-back write.
+ */
+ if (ab->hw_params.supports_shadow_regs)
+ htc->total_transmit_credits = 1;
+
ath11k_htc_setup_target_buffer_assignments(htc);
return 0;
@@ -748,7 +745,7 @@ int ath11k_htc_init(struct ath11k_base *ab)
htc->wmi_ep_count = 3;
break;
default:
- htc->wmi_ep_count = 3;
+ htc->wmi_ep_count = ab->hw_params.max_radios;
break;
}
diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c
new file mode 100644
index 000000000000..11a411b76fe4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/hw.c
@@ -0,0 +1,894 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/bitfield.h>
+
+#include "hw.h"
+#include "core.h"
+#include "ce.h"
+
+/* Map from pdev index to hw mac index */
+static u8 ath11k_hw_ipq8074_mac_from_pdev_id(int pdev_idx)
+{
+ switch (pdev_idx) {
+ case 0:
+ return 0;
+ case 1:
+ return 2;
+ case 2:
+ return 1;
+ default:
+ return ATH11K_INVALID_HW_MAC_ID;
+ }
+}
+
+static u8 ath11k_hw_ipq6018_mac_from_pdev_id(int pdev_idx)
+{
+ return pdev_idx;
+}
+
+static void ath11k_init_wmi_config_qca6390(struct ath11k_base *ab,
+ struct target_resource_config *config)
+{
+ config->num_vdevs = 4;
+ config->num_peers = 16;
+ config->num_tids = 32;
+
+ config->num_offload_peers = 3;
+ config->num_offload_reorder_buffs = 3;
+ config->num_peer_keys = TARGET_NUM_PEER_KEYS;
+ config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
+ config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
+ config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
+ config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
+ config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
+ config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
+ config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
+ config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
+ config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
+ config->num_mcast_groups = 0;
+ config->num_mcast_table_elems = 0;
+ config->mcast2ucast_mode = 0;
+ config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
+ config->num_wds_entries = 0;
+ config->dma_burst_size = 0;
+ config->rx_skip_defrag_timeout_dup_detection_check = 0;
+ config->vow_config = TARGET_VOW_CONFIG;
+ config->gtk_offload_max_vdev = 2;
+ config->num_msdu_desc = 0x400;
+ config->beacon_tx_offload_max_vdev = 2;
+ config->rx_batchmode = TARGET_RX_BATCHMODE;
+
+ config->peer_map_unmap_v2_support = 0;
+ config->use_pdev_id = 1;
+ config->max_frag_entries = 0xa;
+ config->num_tdls_vdevs = 0x1;
+ config->num_tdls_conn_table_entries = 8;
+ config->beacon_tx_offload_max_vdev = 0x2;
+ config->num_multicast_filter_entries = 0x20;
+ config->num_wow_filters = 0x16;
+ config->num_keep_alive_pattern = 0;
+}
+
+static void ath11k_init_wmi_config_ipq8074(struct ath11k_base *ab,
+ struct target_resource_config *config)
+{
+ config->num_vdevs = ab->num_radios * TARGET_NUM_VDEVS;
+
+ if (ab->num_radios == 2) {
+ config->num_peers = TARGET_NUM_PEERS(DBS);
+ config->num_tids = TARGET_NUM_TIDS(DBS);
+ } else if (ab->num_radios == 3) {
+ config->num_peers = TARGET_NUM_PEERS(DBS_SBS);
+ config->num_tids = TARGET_NUM_TIDS(DBS_SBS);
+ } else {
+ /* Control should not reach here */
+ config->num_peers = TARGET_NUM_PEERS(SINGLE);
+ config->num_tids = TARGET_NUM_TIDS(SINGLE);
+ }
+ config->num_offload_peers = TARGET_NUM_OFFLD_PEERS;
+ config->num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
+ config->num_peer_keys = TARGET_NUM_PEER_KEYS;
+ config->ast_skid_limit = TARGET_AST_SKID_LIMIT;
+ config->tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
+ config->rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
+ config->rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
+ config->rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
+
+ if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags))
+ config->rx_decap_mode = TARGET_DECAP_MODE_RAW;
+ else
+ config->rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
+
+ config->scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
+ config->bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
+ config->roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
+ config->roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
+ config->num_mcast_groups = TARGET_NUM_MCAST_GROUPS;
+ config->num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS;
+ config->mcast2ucast_mode = TARGET_MCAST2UCAST_MODE;
+ config->tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
+ config->num_wds_entries = TARGET_NUM_WDS_ENTRIES;
+ config->dma_burst_size = TARGET_DMA_BURST_SIZE;
+ config->rx_skip_defrag_timeout_dup_detection_check =
+ TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
+ config->vow_config = TARGET_VOW_CONFIG;
+ config->gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV;
+ config->num_msdu_desc = TARGET_NUM_MSDU_DESC;
+ config->beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD;
+ config->rx_batchmode = TARGET_RX_BATCHMODE;
+ config->peer_map_unmap_v2_support = 1;
+ config->twt_ap_pdev_count = 2;
+ config->twt_ap_sta_count = 1000;
+}
+
+static int ath11k_hw_mac_id_to_pdev_id_ipq8074(struct ath11k_hw_params *hw,
+ int mac_id)
+{
+ return mac_id;
+}
+
+static int ath11k_hw_mac_id_to_srng_id_ipq8074(struct ath11k_hw_params *hw,
+ int mac_id)
+{
+ return 0;
+}
+
+static int ath11k_hw_mac_id_to_pdev_id_qca6390(struct ath11k_hw_params *hw,
+ int mac_id)
+{
+ return 0;
+}
+
+static int ath11k_hw_mac_id_to_srng_id_qca6390(struct ath11k_hw_params *hw,
+ int mac_id)
+{
+ return mac_id;
+}
+
+const struct ath11k_hw_ops ipq8074_ops = {
+ .get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
+ .wmi_init_config = ath11k_init_wmi_config_qca6390,
+ .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_ipq8074,
+ .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_ipq8074,
+};
+
+const struct ath11k_hw_ops ipq6018_ops = {
+ .get_hw_mac_from_pdev_id = ath11k_hw_ipq6018_mac_from_pdev_id,
+ .wmi_init_config = ath11k_init_wmi_config_ipq8074,
+ .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_ipq8074,
+ .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_ipq8074,
+};
+
+const struct ath11k_hw_ops qca6390_ops = {
+ .get_hw_mac_from_pdev_id = ath11k_hw_ipq8074_mac_from_pdev_id,
+ .wmi_init_config = ath11k_init_wmi_config_qca6390,
+ .mac_id_to_pdev_id = ath11k_hw_mac_id_to_pdev_id_qca6390,
+ .mac_id_to_srng_id = ath11k_hw_mac_id_to_srng_id_qca6390,
+};
+
+#define ATH11K_TX_RING_MASK_0 0x1
+#define ATH11K_TX_RING_MASK_1 0x2
+#define ATH11K_TX_RING_MASK_2 0x4
+
+#define ATH11K_RX_RING_MASK_0 0x1
+#define ATH11K_RX_RING_MASK_1 0x2
+#define ATH11K_RX_RING_MASK_2 0x4
+#define ATH11K_RX_RING_MASK_3 0x8
+
+#define ATH11K_RX_ERR_RING_MASK_0 0x1
+
+#define ATH11K_RX_WBM_REL_RING_MASK_0 0x1
+
+#define ATH11K_REO_STATUS_RING_MASK_0 0x1
+
+#define ATH11K_RXDMA2HOST_RING_MASK_0 0x1
+#define ATH11K_RXDMA2HOST_RING_MASK_1 0x2
+#define ATH11K_RXDMA2HOST_RING_MASK_2 0x4
+
+#define ATH11K_HOST2RXDMA_RING_MASK_0 0x1
+#define ATH11K_HOST2RXDMA_RING_MASK_1 0x2
+#define ATH11K_HOST2RXDMA_RING_MASK_2 0x4
+
+#define ATH11K_RX_MON_STATUS_RING_MASK_0 0x1
+#define ATH11K_RX_MON_STATUS_RING_MASK_1 0x2
+#define ATH11K_RX_MON_STATUS_RING_MASK_2 0x4
+
+const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_ipq8074 = {
+ .tx = {
+ ATH11K_TX_RING_MASK_0,
+ ATH11K_TX_RING_MASK_1,
+ ATH11K_TX_RING_MASK_2,
+ },
+ .rx_mon_status = {
+ 0, 0, 0, 0,
+ ATH11K_RX_MON_STATUS_RING_MASK_0,
+ ATH11K_RX_MON_STATUS_RING_MASK_1,
+ ATH11K_RX_MON_STATUS_RING_MASK_2,
+ },
+ .rx = {
+ 0, 0, 0, 0, 0, 0, 0,
+ ATH11K_RX_RING_MASK_0,
+ ATH11K_RX_RING_MASK_1,
+ ATH11K_RX_RING_MASK_2,
+ ATH11K_RX_RING_MASK_3,
+ },
+ .rx_err = {
+ ATH11K_RX_ERR_RING_MASK_0,
+ },
+ .rx_wbm_rel = {
+ ATH11K_RX_WBM_REL_RING_MASK_0,
+ },
+ .reo_status = {
+ ATH11K_REO_STATUS_RING_MASK_0,
+ },
+ .rxdma2host = {
+ ATH11K_RXDMA2HOST_RING_MASK_0,
+ ATH11K_RXDMA2HOST_RING_MASK_1,
+ ATH11K_RXDMA2HOST_RING_MASK_2,
+ },
+ .host2rxdma = {
+ ATH11K_HOST2RXDMA_RING_MASK_0,
+ ATH11K_HOST2RXDMA_RING_MASK_1,
+ ATH11K_HOST2RXDMA_RING_MASK_2,
+ },
+};
+
+const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qca6390 = {
+ .tx = {
+ ATH11K_TX_RING_MASK_0,
+ ATH11K_TX_RING_MASK_1,
+ ATH11K_TX_RING_MASK_2,
+ },
+ .rx_mon_status = {
+ 0, 0, 0, 0,
+ ATH11K_RX_MON_STATUS_RING_MASK_0,
+ ATH11K_RX_MON_STATUS_RING_MASK_1,
+ ATH11K_RX_MON_STATUS_RING_MASK_2,
+ },
+ .rx = {
+ 0, 0, 0, 0, 0, 0, 0,
+ ATH11K_RX_RING_MASK_0,
+ ATH11K_RX_RING_MASK_1,
+ ATH11K_RX_RING_MASK_2,
+ ATH11K_RX_RING_MASK_3,
+ },
+ .rx_err = {
+ ATH11K_RX_ERR_RING_MASK_0,
+ },
+ .rx_wbm_rel = {
+ ATH11K_RX_WBM_REL_RING_MASK_0,
+ },
+ .reo_status = {
+ ATH11K_REO_STATUS_RING_MASK_0,
+ },
+ .rxdma2host = {
+ ATH11K_RXDMA2HOST_RING_MASK_0,
+ ATH11K_RXDMA2HOST_RING_MASK_1,
+ ATH11K_RXDMA2HOST_RING_MASK_2,
+ },
+ .host2rxdma = {
+ },
+};
+
+/* Target firmware's Copy Engine configuration. */
+const struct ce_pipe_config ath11k_target_ce_config_wlan_ipq8074[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .pipenum = __cpu_to_le32(0),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .pipenum = __cpu_to_le32(1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .pipenum = __cpu_to_le32(2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .pipenum = __cpu_to_le32(3),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(4),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(256),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE5: target->host Pktlog */
+ {
+ .pipenum = __cpu_to_le32(5),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(0),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE6: Reserved for target autonomous hif_memcpy */
+ {
+ .pipenum = __cpu_to_le32(6),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(65535),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE7 used only by Host */
+ {
+ .pipenum = __cpu_to_le32(7),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE8 target->host used only by IPA */
+ {
+ .pipenum = __cpu_to_le32(8),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(65535),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE9 host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(9),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE10 target->host HTT */
+ {
+ .pipenum = __cpu_to_le32(10),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT_H2H),
+ .nentries = __cpu_to_le32(0),
+ .nbytes_max = __cpu_to_le32(0),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE11 Not used */
+};
+
+/* Map from service/endpoint to Copy Engine.
+ * This table is derived from the CE_PCI TABLE, above.
+ * It is passed to the Target at startup for use by firmware.
+ */
+const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq8074[] = {
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(7),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(9),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(0),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ { /* not used */
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(0),
+ },
+ { /* not used */
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(4),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_PKT_LOG),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(5),
+ },
+
+ /* (Additions here) */
+
+ { /* terminator entry */ }
+};
+
+const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_ipq6018[] = {
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(3),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(7),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL_MAC1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(2),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(0),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ { /* not used */
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(0),
+ },
+ { /* not used */
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_TEST_RAW_STREAMS),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ .pipenum = __cpu_to_le32(4),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(1),
+ },
+ {
+ .service_id = __cpu_to_le32(ATH11K_HTC_SVC_ID_PKT_LOG),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ .pipenum = __cpu_to_le32(5),
+ },
+
+ /* (Additions here) */
+
+ { /* terminator entry */ }
+};
+
+/* Target firmware's Copy Engine configuration. */
+const struct ce_pipe_config ath11k_target_ce_config_wlan_qca6390[] = {
+ /* CE0: host->target HTC control and raw streams */
+ {
+ .pipenum = __cpu_to_le32(0),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE1: target->host HTT + HTC control */
+ {
+ .pipenum = __cpu_to_le32(1),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE2: target->host WMI */
+ {
+ .pipenum = __cpu_to_le32(2),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE3: host->target WMI */
+ {
+ .pipenum = __cpu_to_le32(3),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE4: host->target HTT */
+ {
+ .pipenum = __cpu_to_le32(4),
+ .pipedir = __cpu_to_le32(PIPEDIR_OUT),
+ .nentries = __cpu_to_le32(256),
+ .nbytes_max = __cpu_to_le32(256),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE5: target->host Pktlog */
+ {
+ .pipenum = __cpu_to_le32(5),
+ .pipedir = __cpu_to_le32(PIPEDIR_IN),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(2048),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE6: Reserved for target autonomous hif_memcpy */
+ {
+ .pipenum = __cpu_to_le32(6),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE7 used only by Host */
+ {
+ .pipenum = __cpu_to_le32(7),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT_H2H),
+ .nentries = __cpu_to_le32(0),
+ .nbytes_max = __cpu_to_le32(0),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+ .reserved = __cpu_to_le32(0),
+ },
+
+ /* CE8 target->host used only by IPA */
+ {
+ .pipenum = __cpu_to_le32(8),
+ .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
+ .nentries = __cpu_to_le32(32),
+ .nbytes_max = __cpu_to_le32(16384),
+ .flags = __cpu_to_le32(CE_ATTR_FLAGS),
+ .reserved = __cpu_to_le32(0),
+ },
+ /* CE 9, 10, 11 are used by MHI driver */
+};
+
+/* Map from service/endpoint to Copy Engine.
+ * This table is derived from the CE_PCI TABLE, above.
+ * It is passed to the Target at startup for use by firmware.
+ */
+const struct service_to_pipe ath11k_target_service_to_ce_map_wlan_qca6390[] = {
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VO),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BK),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_BE),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_DATA_VI),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(3),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_WMI_CONTROL),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(0),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_RSVD_CTRL),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(2),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
+ __cpu_to_le32(4),
+ },
+ {
+ __cpu_to_le32(ATH11K_HTC_SVC_ID_HTT_DATA_MSG),
+ __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
+ __cpu_to_le32(1),
+ },
+
+ /* (Additions here) */
+
+ { /* must be last */
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ __cpu_to_le32(0),
+ },
+};
+
+const struct ath11k_hw_regs ipq8074_regs = {
+ /* SW2TCL(x) R0 ring configuration address */
+ .hal_tcl1_ring_base_lsb = 0x00000510,
+ .hal_tcl1_ring_base_msb = 0x00000514,
+ .hal_tcl1_ring_id = 0x00000518,
+ .hal_tcl1_ring_misc = 0x00000520,
+ .hal_tcl1_ring_tp_addr_lsb = 0x0000052c,
+ .hal_tcl1_ring_tp_addr_msb = 0x00000530,
+ .hal_tcl1_ring_consumer_int_setup_ix0 = 0x00000540,
+ .hal_tcl1_ring_consumer_int_setup_ix1 = 0x00000544,
+ .hal_tcl1_ring_msi1_base_lsb = 0x00000558,
+ .hal_tcl1_ring_msi1_base_msb = 0x0000055c,
+ .hal_tcl1_ring_msi1_data = 0x00000560,
+ .hal_tcl2_ring_base_lsb = 0x00000568,
+ .hal_tcl_ring_base_lsb = 0x00000618,
+
+ /* TCL STATUS ring address */
+ .hal_tcl_status_ring_base_lsb = 0x00000720,
+
+ /* REO2SW(x) R0 ring configuration address */
+ .hal_reo1_ring_base_lsb = 0x0000029c,
+ .hal_reo1_ring_base_msb = 0x000002a0,
+ .hal_reo1_ring_id = 0x000002a4,
+ .hal_reo1_ring_misc = 0x000002ac,
+ .hal_reo1_ring_hp_addr_lsb = 0x000002b0,
+ .hal_reo1_ring_hp_addr_msb = 0x000002b4,
+ .hal_reo1_ring_producer_int_setup = 0x000002c0,
+ .hal_reo1_ring_msi1_base_lsb = 0x000002e4,
+ .hal_reo1_ring_msi1_base_msb = 0x000002e8,
+ .hal_reo1_ring_msi1_data = 0x000002ec,
+ .hal_reo2_ring_base_lsb = 0x000002f4,
+ .hal_reo1_aging_thresh_ix_0 = 0x00000564,
+ .hal_reo1_aging_thresh_ix_1 = 0x00000568,
+ .hal_reo1_aging_thresh_ix_2 = 0x0000056c,
+ .hal_reo1_aging_thresh_ix_3 = 0x00000570,
+
+ /* REO2SW(x) R2 ring pointers (head/tail) address */
+ .hal_reo1_ring_hp = 0x00003038,
+ .hal_reo1_ring_tp = 0x0000303c,
+ .hal_reo2_ring_hp = 0x00003040,
+
+ /* REO2TCL R0 ring configuration address */
+ .hal_reo_tcl_ring_base_lsb = 0x000003fc,
+ .hal_reo_tcl_ring_hp = 0x00003058,
+
+ /* REO status address */
+ .hal_reo_status_ring_base_lsb = 0x00000504,
+ .hal_reo_status_hp = 0x00003070,
+
+};
+
+const struct ath11k_hw_regs qca6390_regs = {
+ /* SW2TCL(x) R0 ring configuration address */
+ .hal_tcl1_ring_base_lsb = 0x00000684,
+ .hal_tcl1_ring_base_msb = 0x00000688,
+ .hal_tcl1_ring_id = 0x0000068c,
+ .hal_tcl1_ring_misc = 0x00000694,
+ .hal_tcl1_ring_tp_addr_lsb = 0x000006a0,
+ .hal_tcl1_ring_tp_addr_msb = 0x000006a4,
+ .hal_tcl1_ring_consumer_int_setup_ix0 = 0x000006b4,
+ .hal_tcl1_ring_consumer_int_setup_ix1 = 0x000006b8,
+ .hal_tcl1_ring_msi1_base_lsb = 0x000006cc,
+ .hal_tcl1_ring_msi1_base_msb = 0x000006d0,
+ .hal_tcl1_ring_msi1_data = 0x000006d4,
+ .hal_tcl2_ring_base_lsb = 0x000006dc,
+ .hal_tcl_ring_base_lsb = 0x0000078c,
+
+ /* TCL STATUS ring address */
+ .hal_tcl_status_ring_base_lsb = 0x00000894,
+
+ /* REO2SW(x) R0 ring configuration address */
+ .hal_reo1_ring_base_lsb = 0x00000244,
+ .hal_reo1_ring_base_msb = 0x00000248,
+ .hal_reo1_ring_id = 0x0000024c,
+ .hal_reo1_ring_misc = 0x00000254,
+ .hal_reo1_ring_hp_addr_lsb = 0x00000258,
+ .hal_reo1_ring_hp_addr_msb = 0x0000025c,
+ .hal_reo1_ring_producer_int_setup = 0x00000268,
+ .hal_reo1_ring_msi1_base_lsb = 0x0000028c,
+ .hal_reo1_ring_msi1_base_msb = 0x00000290,
+ .hal_reo1_ring_msi1_data = 0x00000294,
+ .hal_reo2_ring_base_lsb = 0x0000029c,
+ .hal_reo1_aging_thresh_ix_0 = 0x0000050c,
+ .hal_reo1_aging_thresh_ix_1 = 0x00000510,
+ .hal_reo1_aging_thresh_ix_2 = 0x00000514,
+ .hal_reo1_aging_thresh_ix_3 = 0x00000518,
+
+ /* REO2SW(x) R2 ring pointers (head/tail) address */
+ .hal_reo1_ring_hp = 0x00003030,
+ .hal_reo1_ring_tp = 0x00003034,
+ .hal_reo2_ring_hp = 0x00003038,
+
+ /* REO2TCL R0 ring configuration address */
+ .hal_reo_tcl_ring_base_lsb = 0x000003a4,
+ .hal_reo_tcl_ring_hp = 0x00003050,
+
+ /* REO status address */
+ .hal_reo_status_ring_base_lsb = 0x000004ac,
+ .hal_reo_status_hp = 0x00003068,
+};
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index dc4434aefbbe..1dda4257e6d7 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -6,6 +6,8 @@
#ifndef ATH11K_HW_H
#define ATH11K_HW_H
+#include "wmi.h"
+
/* Target configuration defines */
/* Num VDEVS per radio */
@@ -68,15 +70,12 @@
#define ATH11K_FW_DIR "ath11k"
-/* IPQ8074 definitions */
-#define IPQ8074_FW_DIR "IPQ8074"
-#define IPQ8074_MAX_BOARD_DATA_SZ (256 * 1024)
-#define IPQ8074_MAX_CAL_DATA_SZ IPQ8074_MAX_BOARD_DATA_SZ
-
#define ATH11K_BOARD_MAGIC "QCA-ATH11K-BOARD"
#define ATH11K_BOARD_API2_FILE "board-2.bin"
-#define ATH11K_DEFAULT_BOARD_FILE "bdwlan.bin"
+#define ATH11K_DEFAULT_BOARD_FILE "board.bin"
#define ATH11K_DEFAULT_CAL_FILE "caldata.bin"
+#define ATH11K_AMSS_FILE "amss.bin"
+#define ATH11K_M3_FILE "m3.bin"
enum ath11k_hw_rate_cck {
ATH11K_HW_RATE_CCK_LP_11M = 0,
@@ -104,15 +103,109 @@ enum ath11k_bus {
ATH11K_BUS_PCI,
};
+#define ATH11K_EXT_IRQ_GRP_NUM_MAX 11
+
+struct ath11k_hw_ring_mask {
+ u8 tx[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+ u8 rx_mon_status[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+ u8 rx[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+ u8 rx_err[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+ u8 rx_wbm_rel[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+ u8 reo_status[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+ u8 rxdma2host[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+ u8 host2rxdma[ATH11K_EXT_IRQ_GRP_NUM_MAX];
+};
+
struct ath11k_hw_params {
const char *name;
+ u16 hw_rev;
+ u8 max_radios;
+ u32 bdf_addr;
+
struct {
const char *dir;
size_t board_size;
size_t cal_size;
} fw;
+
+ const struct ath11k_hw_ops *hw_ops;
+ const struct ath11k_hw_ring_mask *ring_mask;
+
+ bool internal_sleep_clock;
+
+ const struct ath11k_hw_regs *regs;
+ const struct ce_attr *host_ce_config;
+ u32 ce_count;
+ const struct ce_pipe_config *target_ce_config;
+ u32 target_ce_count;
+ const struct service_to_pipe *svc_to_ce_map;
+ u32 svc_to_ce_map_len;
+
+ bool single_pdev_only;
+
+ /* For example on QCA6390 struct
+ * wmi_init_cmd_param::band_to_mac_config needs to be false as the
+ * firmware creates the mapping.
+ */
+ bool needs_band_to_mac;
+
+ bool rxdma1_enable;
+ int num_rxmda_per_pdev;
+ bool rx_mac_buf_ring;
+ bool vdev_start_delay;
+ bool htt_peer_map_v2;
+ bool tcl_0_only;
+ u8 spectral_fft_sz;
+
+ u16 interface_modes;
+ bool supports_monitor;
+ bool supports_shadow_regs;
+ bool idle_ps;
+};
+
+struct ath11k_hw_ops {
+ u8 (*get_hw_mac_from_pdev_id)(int pdev_id);
+ void (*wmi_init_config)(struct ath11k_base *ab,
+ struct target_resource_config *config);
+ int (*mac_id_to_pdev_id)(struct ath11k_hw_params *hw, int mac_id);
+ int (*mac_id_to_srng_id)(struct ath11k_hw_params *hw, int mac_id);
};
+extern const struct ath11k_hw_ops ipq8074_ops;
+extern const struct ath11k_hw_ops ipq6018_ops;
+extern const struct ath11k_hw_ops qca6390_ops;
+
+extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_ipq8074;
+extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qca6390;
+
+static inline
+int ath11k_hw_get_mac_from_pdev_id(struct ath11k_hw_params *hw,
+ int pdev_idx)
+{
+ if (hw->hw_ops->get_hw_mac_from_pdev_id)
+ return hw->hw_ops->get_hw_mac_from_pdev_id(pdev_idx);
+
+ return 0;
+}
+
+static inline int ath11k_hw_mac_id_to_pdev_id(struct ath11k_hw_params *hw,
+ int mac_id)
+{
+ if (hw->hw_ops->mac_id_to_pdev_id)
+ return hw->hw_ops->mac_id_to_pdev_id(hw, mac_id);
+
+ return 0;
+}
+
+static inline int ath11k_hw_mac_id_to_srng_id(struct ath11k_hw_params *hw,
+ int mac_id)
+{
+ if (hw->hw_ops->mac_id_to_srng_id)
+ return hw->hw_ops->mac_id_to_srng_id(hw, mac_id);
+
+ return 0;
+}
+
struct ath11k_fw_ie {
__le32 id;
__le32 len;
@@ -130,4 +223,51 @@ enum ath11k_bd_ie_type {
ATH11K_BD_IE_BOARD_EXT = 1,
};
+struct ath11k_hw_regs {
+ u32 hal_tcl1_ring_base_lsb;
+ u32 hal_tcl1_ring_base_msb;
+ u32 hal_tcl1_ring_id;
+ u32 hal_tcl1_ring_misc;
+ u32 hal_tcl1_ring_tp_addr_lsb;
+ u32 hal_tcl1_ring_tp_addr_msb;
+ u32 hal_tcl1_ring_consumer_int_setup_ix0;
+ u32 hal_tcl1_ring_consumer_int_setup_ix1;
+ u32 hal_tcl1_ring_msi1_base_lsb;
+ u32 hal_tcl1_ring_msi1_base_msb;
+ u32 hal_tcl1_ring_msi1_data;
+ u32 hal_tcl2_ring_base_lsb;
+ u32 hal_tcl_ring_base_lsb;
+
+ u32 hal_tcl_status_ring_base_lsb;
+
+ u32 hal_reo1_ring_base_lsb;
+ u32 hal_reo1_ring_base_msb;
+ u32 hal_reo1_ring_id;
+ u32 hal_reo1_ring_misc;
+ u32 hal_reo1_ring_hp_addr_lsb;
+ u32 hal_reo1_ring_hp_addr_msb;
+ u32 hal_reo1_ring_producer_int_setup;
+ u32 hal_reo1_ring_msi1_base_lsb;
+ u32 hal_reo1_ring_msi1_base_msb;
+ u32 hal_reo1_ring_msi1_data;
+ u32 hal_reo2_ring_base_lsb;
+ u32 hal_reo1_aging_thresh_ix_0;
+ u32 hal_reo1_aging_thresh_ix_1;
+ u32 hal_reo1_aging_thresh_ix_2;
+ u32 hal_reo1_aging_thresh_ix_3;
+
+ u32 hal_reo1_ring_hp;
+ u32 hal_reo1_ring_tp;
+ u32 hal_reo2_ring_hp;
+
+ u32 hal_reo_tcl_ring_base_lsb;
+ u32 hal_reo_tcl_ring_hp;
+
+ u32 hal_reo_status_ring_base_lsb;
+ u32 hal_reo_status_hp;
+};
+
+extern const struct ath11k_hw_regs ipq8074_regs;
+extern const struct ath11k_hw_regs qca6390_regs;
+
#endif
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 94ae2b9ea663..7f8dd47d2333 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -14,6 +14,7 @@
#include "dp_rx.h"
#include "testmode.h"
#include "peer.h"
+#include "debugfs_sta.h"
#define CHAN2G(_channel, _freq, _flags) { \
.band = NL80211_BAND_2GHZ, \
@@ -42,12 +43,6 @@
.max_power = 30, \
}
-/* frame mode values are mapped as per enum ath11k_hw_txrx_mode */
-static unsigned int ath11k_frame_mode = ATH11K_HW_TXRX_NATIVE_WIFI;
-module_param_named(frame_mode, ath11k_frame_mode, uint, 0644);
-MODULE_PARM_DESC(frame_mode,
- "Datapath frame mode (0: raw, 1: native wifi (default), 2: ethernet)");
-
static const struct ieee80211_channel ath11k_2ghz_channels[] = {
CHAN2G(1, 2412, 0),
CHAN2G(2, 2417, 0),
@@ -244,6 +239,9 @@ static const u32 ath11k_smps_map[] = {
[WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
};
+static int ath11k_start_vdev_delay(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+
u8 ath11k_mac_bw_to_mac80211_bw(u8 bw)
{
u8 ret = 0;
@@ -521,6 +519,11 @@ struct ath11k *ath11k_mac_get_ar_by_pdev_id(struct ath11k_base *ab, u32 pdev_id)
int i;
struct ath11k_pdev *pdev;
+ if (ab->hw_params.single_pdev_only) {
+ pdev = rcu_dereference(ab->pdevs_active[0]);
+ return pdev ? pdev->ar : NULL;
+ }
+
if (WARN_ON(pdev_id > ab->num_radios))
return NULL;
@@ -755,21 +758,12 @@ static int ath11k_monitor_vdev_up(struct ath11k *ar, int vdev_id)
static int ath11k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
{
- struct ath11k *ar = hw->priv;
- int ret = 0;
-
/* mac80211 requires this op to be present and that's why
* there's an empty function, this can be extended when
* required.
*/
- mutex_lock(&ar->conf_mutex);
-
- /* TODO: Handle configuration changes as appropriate */
-
- mutex_unlock(&ar->conf_mutex);
-
- return ret;
+ return 0;
}
static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif)
@@ -1138,13 +1132,13 @@ ath11k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
idx_limit = -1;
switch (idx_limit) {
- case 0: /* fall through */
- case 1: /* fall through */
- case 2: /* fall through */
- case 3: /* fall through */
- case 4: /* fall through */
- case 5: /* fall through */
- case 6: /* fall through */
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
case 7:
mcs = IEEE80211_VHT_MCS_SUPPORT_0_7;
break;
@@ -1156,7 +1150,7 @@ ath11k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
break;
default:
WARN_ON(1);
- /* fall through */
+ fallthrough;
case -1:
mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED;
break;
@@ -1268,6 +1262,7 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
struct peer_assoc_params *arg)
{
const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
+ u8 ampdu_factor;
u16 v;
if (!he_cap->has_he)
@@ -1284,6 +1279,30 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
/* the top most byte is used to indicate BSS color info */
arg->peer_he_ops &= 0xffffff;
+ /* As per section 26.6.1 11ax Draft5.0, if the Max AMPDU Exponent Extension
+ * in HE cap is zero, use the arg->peer_max_mpdu as calculated while parsing
+ * VHT caps(if VHT caps is present) or HT caps (if VHT caps is not present).
+ *
+ * For non-zero value of Max AMPDU Extponent Extension in HE MAC caps,
+ * if a HE STA sends VHT cap and HE cap IE in assoc request then, use
+ * MAX_AMPDU_LEN_FACTOR as 20 to calculate max_ampdu length.
+ * If a HE STA that does not send VHT cap, but HE and HT cap in assoc
+ * request, then use MAX_AMPDU_LEN_FACTOR as 16 to calculate max_ampdu
+ * length.
+ */
+ ampdu_factor = (he_cap->he_cap_elem.mac_cap_info[3] &
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK) >>
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_SHIFT;
+
+ if (ampdu_factor) {
+ if (sta->vht_cap.vht_supported)
+ arg->peer_max_mpdu = (1 << (IEEE80211_HE_VHT_MAX_AMPDU_FACTOR +
+ ampdu_factor)) - 1;
+ else if (sta->ht_cap.ht_supported)
+ arg->peer_max_mpdu = (1 << (IEEE80211_HE_HT_MAX_AMPDU_FACTOR +
+ ampdu_factor)) - 1;
+ }
+
if (he_cap->he_cap_elem.phy_cap_info[6] &
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
int bit = 7;
@@ -1339,7 +1358,7 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v;
arg->peer_he_mcs_count++;
- /* fall through */
+ fallthrough;
default:
v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80);
@@ -2114,7 +2133,7 @@ void __ath11k_mac_scan_finish(struct ath11k *ar)
} else if (ar->scan.roc_notify) {
ieee80211_remain_on_channel_expired(ar->hw);
}
- /* fall through */
+ fallthrough;
case ATH11K_SCAN_STARTING:
ar->scan.state = ATH11K_SCAN_IDLE;
ar->scan_channel = NULL;
@@ -2372,6 +2391,9 @@ static int ath11k_install_key(struct ath11k_vif *arvif,
reinit_completion(&ar->install_key_done);
+ if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags))
+ return 0;
+
if (cmd == DISABLE_KEY) {
/* TODO: Check if FW expects value other than NONE for del */
/* arg.key_cipher = WMI_CIPHER_NONE; */
@@ -2403,8 +2425,13 @@ static int ath11k_install_key(struct ath11k_vif *arvif,
return -EOPNOTSUPP;
}
+ if (test_bit(ATH11K_FLAG_RAW_MODE, &ar->ab->dev_flags))
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV |
+ IEEE80211_KEY_FLAG_RESERVE_TAILROOM;
+
install:
ret = ath11k_wmi_vdev_install_key(arvif->ar, &arg);
+
if (ret)
return ret;
@@ -2476,6 +2503,9 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256)
return 1;
+ if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags))
+ return 1;
+
if (key->keyidx > WMI_MAX_KEY_INDEX)
return -ENOSPC;
@@ -2929,7 +2959,7 @@ static int ath11k_mac_station_add(struct ath11k *ar,
ath11k_dbg(ab, ATH11K_DBG_MAC, "Added peer: %pM for VDEV: %d\n",
sta->addr, arvif->vdev_id);
- if (ath11k_debug_is_extd_tx_stats_enabled(ar)) {
+ if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) {
arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats), GFP_KERNEL);
if (!arsta->tx_stats) {
ret = -ENOMEM;
@@ -2955,6 +2985,15 @@ static int ath11k_mac_station_add(struct ath11k *ar,
goto free_tx_stats;
}
+ if (ab->hw_params.vdev_start_delay &&
+ arvif->vdev_type != WMI_VDEV_TYPE_AP) {
+ ret = ath11k_start_vdev_delay(ar->hw, vif);
+ if (ret) {
+ ath11k_warn(ab, "failed to delay vdev start: %d\n", ret);
+ goto free_tx_stats;
+ }
+ }
+
return 0;
free_tx_stats:
@@ -3039,10 +3078,6 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
if (ret)
ath11k_warn(ar->ab, "Failed to associate station: %pM\n",
sta->addr);
- else
- ath11k_info(ar->ab,
- "Station %pM moved to assoc state\n",
- sta->addr);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH &&
(vif->type == NL80211_IFTYPE_AP ||
@@ -3052,10 +3087,6 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
if (ret)
ath11k_warn(ar->ab, "Failed to disassociate station: %pM\n",
sta->addr);
- else
- ath11k_info(ar->ab,
- "Station %pM moved to disassociated state\n",
- sta->addr);
}
mutex_unlock(&ar->conf_mutex);
@@ -3898,7 +3929,7 @@ static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif,
return -ENOSPC;
info = IEEE80211_SKB_CB(skb);
- if (!(info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP)) {
+ if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) {
if ((ieee80211_is_action(hdr->frame_control) ||
ieee80211_is_deauth(hdr->frame_control) ||
ieee80211_is_disassoc(hdr->frame_control)) &&
@@ -4025,7 +4056,7 @@ static void ath11k_mac_op_tx(struct ieee80211_hw *hw,
bool is_prb_rsp;
int ret;
- if (info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP) {
+ if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
skb_cb->flags |= ATH11K_SKB_HW_80211_ENCAP;
} else if (ieee80211_is_mgmt(hdr->frame_control)) {
is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
@@ -4057,18 +4088,25 @@ void ath11k_mac_drain_tx(struct ath11k *ar)
static int ath11k_mac_config_mon_status_default(struct ath11k *ar, bool enable)
{
struct htt_rx_ring_tlv_filter tlv_filter = {0};
+ struct ath11k_base *ab = ar->ab;
+ int i, ret = 0;
u32 ring_id;
if (enable) {
tlv_filter = ath11k_mac_mon_status_filter_default;
- tlv_filter.rx_filter = ath11k_debug_rx_filter(ar);
+ tlv_filter.rx_filter = ath11k_debugfs_rx_filter(ar);
}
- ring_id = ar->dp.rx_mon_status_refill_ring.refill_buf_ring.ring_id;
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
+ ar->dp.mac_id + i,
+ HAL_RXDMA_MONITOR_STATUS,
+ DP_RX_BUFFER_SIZE,
+ &tlv_filter);
+ }
- return ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id,
- HAL_RXDMA_MONITOR_STATUS,
- DP_RX_BUFFER_SIZE, &tlv_filter);
+ return ret;
}
static int ath11k_mac_op_start(struct ieee80211_hw *hw)
@@ -4170,6 +4208,15 @@ static int ath11k_mac_op_start(struct ieee80211_hw *hw)
rcu_assign_pointer(ab->pdevs_active[ar->pdev_idx],
&ab->pdevs[ar->pdev_idx]);
+ /* allow device to enter IMPS */
+ if (ab->hw_params.idle_ps) {
+ ret = ath11k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_IDLE_PS_CONFIG,
+ 1, pdev->pdev_id);
+ if (ret) {
+ ath11k_err(ab, "failed to enable idle ps: %d\n", ret);
+ goto err;
+ }
+ }
return 0;
err:
@@ -4304,6 +4351,37 @@ static int ath11k_set_he_mu_sounding_mode(struct ath11k *ar,
return ret;
}
+static void ath11k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ u32 param_id, param_value;
+ int ret;
+
+ param_id = WMI_VDEV_PARAM_TX_ENCAP_TYPE;
+ if (ath11k_frame_mode != ATH11K_HW_TXRX_ETHERNET ||
+ (vif->type != NL80211_IFTYPE_STATION &&
+ vif->type != NL80211_IFTYPE_AP))
+ vif->offload_flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED;
+
+ if (vif->offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED)
+ param_value = ATH11K_HW_TXRX_ETHERNET;
+ else if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags))
+ param_value = ATH11K_HW_TXRX_RAW;
+ else
+ param_value = ATH11K_HW_TXRX_NATIVE_WIFI;
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ab, "failed to set vdev %d tx encap mode: %d\n",
+ arvif->vdev_id, ret);
+ vif->offload_flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED;
+ }
+}
+
static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -4313,7 +4391,6 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
struct vdev_create_params vdev_param = {0};
struct peer_create_params peer_param;
u32 param_id, param_value;
- int hw_encap = 0;
u16 nss;
int i;
int ret;
@@ -4368,7 +4445,7 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
break;
case NL80211_IFTYPE_MESH_POINT:
arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S;
- /* fall through */
+ fallthrough;
case NL80211_IFTYPE_AP:
arvif->vdev_type = WMI_VDEV_TYPE_AP;
break;
@@ -4407,30 +4484,7 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
list_add(&arvif->list, &ar->arvifs);
spin_unlock_bh(&ar->data_lock);
- param_id = WMI_VDEV_PARAM_TX_ENCAP_TYPE;
- if (ath11k_frame_mode == ATH11K_HW_TXRX_ETHERNET)
- switch (vif->type) {
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_AP_VLAN:
- case NL80211_IFTYPE_AP:
- hw_encap = 1;
- break;
- default:
- break;
- }
-
- if (ieee80211_set_hw_80211_encap(vif, hw_encap))
- param_value = ATH11K_HW_TXRX_ETHERNET;
- else
- param_value = ATH11K_HW_TXRX_NATIVE_WIFI;
-
- ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
- param_id, param_value);
- if (ret) {
- ath11k_warn(ab, "failed to set vdev %d tx encap mode: %d\n",
- arvif->vdev_id, ret);
- goto err_vdev_del;
- }
+ ath11k_mac_op_update_vif_offload(hw, vif);
nss = get_num_chains(ar->cfg_tx_chainmask) ? : 1;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
@@ -4649,6 +4703,10 @@ static void ath11k_mac_op_configure_filter(struct ieee80211_hw *hw,
ath11k_warn(ar->ab,
"fail to set monitor filter: %d\n", ret);
}
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "changed_flags:0x%x, total_flags:0x%x, reset_flag:%d\n",
+ changed_flags, *total_flags, reset_flag);
+
mutex_unlock(&ar->conf_mutex);
}
@@ -5112,6 +5170,39 @@ unlock:
mutex_unlock(&ar->conf_mutex);
}
+static int ath11k_start_vdev_delay(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_base *ab = ar->ab;
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ int ret;
+
+ if (WARN_ON(arvif->is_started))
+ return -EBUSY;
+
+ ret = ath11k_mac_vdev_start(arvif, &arvif->chanctx.def);
+ if (ret) {
+ ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
+ arvif->vdev_id, vif->addr,
+ arvif->chanctx.def.chan->center_freq, ret);
+ return ret;
+ }
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ ret = ath11k_monitor_vdev_up(ar, arvif->vdev_id);
+ if (ret) {
+ ath11k_warn(ab, "failed put monitor up: %d\n", ret);
+ return ret;
+ }
+ }
+
+ arvif->is_started = true;
+
+ /* TODO: Setup ps and cts/rts protection */
+ return 0;
+}
+
static int
ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -5121,6 +5212,7 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ath11k_base *ab = ar->ab;
struct ath11k_vif *arvif = (void *)vif->drv_priv;
int ret;
+ struct peer_create_params param;
mutex_lock(&ar->conf_mutex);
@@ -5128,11 +5220,27 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
"mac chanctx assign ptr %pK vdev_id %i\n",
ctx, arvif->vdev_id);
+ /* for QCA6390 bss peer must be created before vdev_start */
+ if (ab->hw_params.vdev_start_delay &&
+ arvif->vdev_type != WMI_VDEV_TYPE_AP &&
+ arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) {
+ memcpy(&arvif->chanctx, ctx, sizeof(*ctx));
+ mutex_unlock(&ar->conf_mutex);
+ return 0;
+ }
+
if (WARN_ON(arvif->is_started)) {
mutex_unlock(&ar->conf_mutex);
return -EBUSY;
}
+ if (ab->hw_params.vdev_start_delay) {
+ param.vdev_id = arvif->vdev_id;
+ param.peer_type = WMI_PEER_TYPE_DEFAULT;
+ param.peer_addr = ar->mac_addr;
+ ret = ath11k_peer_create(ar, arvif, NULL, &param);
+ }
+
ret = ath11k_mac_vdev_start(arvif, &ctx->def);
if (ret) {
ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
@@ -5178,6 +5286,11 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
WARN_ON(!arvif->is_started);
+ if (ab->hw_params.vdev_start_delay &&
+ arvif->vdev_type == WMI_VDEV_TYPE_MONITOR &&
+ ath11k_peer_find_by_addr(ab, ar->mac_addr))
+ ath11k_peer_delete(ar, arvif->vdev_id, ar->mac_addr);
+
ret = ath11k_mac_vdev_stop(arvif);
if (ret)
ath11k_warn(ab, "failed to stop vdev %i: %d\n",
@@ -5185,6 +5298,10 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
arvif->is_started = false;
+ if (ab->hw_params.vdev_start_delay &&
+ arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
+ ath11k_wmi_vdev_down(ar, arvif->vdev_id);
+
mutex_unlock(&ar->conf_mutex);
}
@@ -5753,6 +5870,7 @@ static const struct ieee80211_ops ath11k_ops = {
.reconfig_complete = ath11k_mac_op_reconfig_complete,
.add_interface = ath11k_mac_op_add_interface,
.remove_interface = ath11k_mac_op_remove_interface,
+ .update_vif_offload = ath11k_mac_op_update_vif_offload,
.config = ath11k_mac_op_config,
.bss_info_changed = ath11k_mac_op_bss_info_changed,
.configure_filter = ath11k_mac_op_configure_filter,
@@ -5780,39 +5898,10 @@ static const struct ieee80211_ops ath11k_ops = {
.sta_statistics = ath11k_mac_op_sta_statistics,
CFG80211_TESTMODE_CMD(ath11k_tm_cmd)
#ifdef CONFIG_ATH11K_DEBUGFS
- .sta_add_debugfs = ath11k_sta_add_debugfs,
+ .sta_add_debugfs = ath11k_debugfs_sta_op_add,
#endif
};
-static const struct ieee80211_iface_limit ath11k_if_limits[] = {
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_STATION),
- },
- {
- .max = 16,
- .types = BIT(NL80211_IFTYPE_AP)
-#ifdef CONFIG_MAC80211_MESH
- | BIT(NL80211_IFTYPE_MESH_POINT)
-#endif
- },
-};
-
-static const struct ieee80211_iface_combination ath11k_if_comb[] = {
- {
- .limits = ath11k_if_limits,
- .n_limits = ARRAY_SIZE(ath11k_if_limits),
- .max_interfaces = 16,
- .num_different_channels = 1,
- .beacon_int_infra_match = true,
- .beacon_int_min_gcd = 100,
- .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
- BIT(NL80211_CHAN_WIDTH_20) |
- BIT(NL80211_CHAN_WIDTH_40) |
- BIT(NL80211_CHAN_WIDTH_80),
- },
-};
-
static void ath11k_mac_update_ch_list(struct ath11k *ar,
struct ieee80211_supported_band *band,
u32 freq_low, u32 freq_high)
@@ -5829,12 +5918,29 @@ static void ath11k_mac_update_ch_list(struct ath11k *ar,
}
}
+static u32 ath11k_get_phy_id(struct ath11k *ar, u32 band)
+{
+ struct ath11k_pdev *pdev = ar->pdev;
+ struct ath11k_pdev_cap *pdev_cap = &pdev->cap;
+
+ if (band == WMI_HOST_WLAN_2G_CAP)
+ return pdev_cap->band[NL80211_BAND_2GHZ].phy_id;
+
+ if (band == WMI_HOST_WLAN_5G_CAP)
+ return pdev_cap->band[NL80211_BAND_5GHZ].phy_id;
+
+ ath11k_warn(ar->ab, "unsupported phy cap:%d\n", band);
+
+ return 0;
+}
+
static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
u32 supported_bands)
{
struct ieee80211_supported_band *band;
struct ath11k_hal_reg_capabilities_ext *reg_cap;
void *channels;
+ u32 phy_id;
BUILD_BUG_ON((ARRAY_SIZE(ath11k_2ghz_channels) +
ARRAY_SIZE(ath11k_5ghz_channels) +
@@ -5857,6 +5963,11 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
band->n_bitrates = ath11k_g_rates_size;
band->bitrates = ath11k_g_rates;
ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
+
+ if (ar->ab->hw_params.single_pdev_only) {
+ phy_id = ath11k_get_phy_id(ar, WMI_HOST_WLAN_2G_CAP);
+ reg_cap = &ar->ab->hal_reg_cap[phy_id];
+ }
ath11k_mac_update_ch_list(ar, band,
reg_cap->low_2ghz_chan,
reg_cap->high_2ghz_chan);
@@ -5901,6 +6012,12 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
band->n_bitrates = ath11k_a_rates_size;
band->bitrates = ath11k_a_rates;
ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
+
+ if (ar->ab->hw_params.single_pdev_only) {
+ phy_id = ath11k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP);
+ reg_cap = &ar->ab->hal_reg_cap[phy_id];
+ }
+
ath11k_mac_update_ch_list(ar, band,
reg_cap->low_5ghz_chan,
reg_cap->high_5ghz_chan);
@@ -5910,6 +6027,52 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
return 0;
}
+static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct ieee80211_iface_combination *combinations;
+ struct ieee80211_iface_limit *limits;
+ int n_limits;
+
+ combinations = kzalloc(sizeof(*combinations), GFP_KERNEL);
+ if (!combinations)
+ return -ENOMEM;
+
+ n_limits = 2;
+
+ limits = kcalloc(n_limits, sizeof(*limits), GFP_KERNEL);
+ if (!limits) {
+ kfree(combinations);
+ return -ENOMEM;
+ }
+
+ limits[0].max = 1;
+ limits[0].types |= BIT(NL80211_IFTYPE_STATION);
+
+ limits[1].max = 16;
+ limits[1].types |= BIT(NL80211_IFTYPE_AP);
+
+ if (IS_ENABLED(CONFIG_MAC80211_MESH) &&
+ ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT))
+ limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT);
+
+ combinations[0].limits = limits;
+ combinations[0].n_limits = n_limits;
+ combinations[0].max_interfaces = 16;
+ combinations[0].num_different_channels = 1;
+ combinations[0].beacon_int_infra_match = true;
+ combinations[0].beacon_int_min_gcd = 100;
+ combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80);
+
+ ar->hw->wiphy->iface_combinations = combinations;
+ ar->hw->wiphy->n_iface_combinations = 1;
+
+ return 0;
+}
+
static const u8 ath11k_if_types_ext_capa[] = {
[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
@@ -5960,6 +6123,9 @@ static void __ath11k_mac_unregister(struct ath11k *ar)
kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
+ kfree(ar->hw->wiphy->iface_combinations[0].limits);
+ kfree(ar->hw->wiphy->iface_combinations);
+
SET_IEEE80211_DEV(ar->hw, NULL);
}
@@ -6006,17 +6172,21 @@ static int __ath11k_mac_register(struct ath11k *ar)
ret = ath11k_mac_setup_channels_rates(ar,
cap->supported_bands);
if (ret)
- goto err_free;
+ goto err;
ath11k_mac_setup_ht_vht_cap(ar, cap, &ht_cap);
ath11k_mac_setup_he_cap(ar, cap);
+ ret = ath11k_mac_setup_iface_combinations(ar);
+ if (ret) {
+ ath11k_err(ar->ab, "failed to setup interface combinations: %d\n", ret);
+ goto err_free_channels;
+ }
+
ar->hw->wiphy->available_antennas_rx = cap->rx_chain_mask;
ar->hw->wiphy->available_antennas_tx = cap->tx_chain_mask;
- ar->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_MESH_POINT);
+ ar->hw->wiphy->interface_modes = ab->hw_params.interface_modes;
ieee80211_hw_set(ar->hw, SIGNAL_DBM);
ieee80211_hw_set(ar->hw, SUPPORTS_PS);
@@ -6026,7 +6196,6 @@ static int __ath11k_mac_register(struct ath11k *ar)
ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
ieee80211_hw_set(ar->hw, AP_LINK_PS);
ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
- ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF);
@@ -6034,6 +6203,7 @@ static int __ath11k_mac_register(struct ath11k *ar)
ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG);
ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK);
+ ieee80211_hw_set(ar->hw, SUPPORTS_TX_ENCAP_OFFLOAD);
if (ht_cap & WMI_HT_CAP_ENABLED) {
ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION);
ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
@@ -6078,9 +6248,6 @@ static int __ath11k_mac_register(struct ath11k *ar)
ar->hw->vif_data_size = sizeof(struct ath11k_vif);
ar->hw->sta_data_size = sizeof(struct ath11k_sta);
- ar->hw->wiphy->iface_combinations = ath11k_if_comb;
- ar->hw->wiphy->n_iface_combinations = ARRAY_SIZE(ath11k_if_comb);
-
wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_STA_TX_PWR);
@@ -6093,34 +6260,51 @@ static int __ath11k_mac_register(struct ath11k *ar)
ath11k_reg_init(ar);
- /* advertise HW checksum offload capabilities */
- ar->hw->netdev_features = NETIF_F_HW_CSUM;
+ if (!test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) {
+ ar->hw->netdev_features = NETIF_F_HW_CSUM;
+ ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
+ ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
+ }
ret = ieee80211_register_hw(ar->hw);
if (ret) {
ath11k_err(ar->ab, "ieee80211 registration failed: %d\n", ret);
- goto err_free;
+ goto err_free_if_combs;
}
+ if (!ab->hw_params.supports_monitor)
+ /* There's a race between calling ieee80211_register_hw()
+ * and here where the monitor mode is enabled for a little
+ * while. But that time is so short and in practise it make
+ * a difference in real life.
+ */
+ ar->hw->wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MONITOR);
+
/* Apply the regd received during initialization */
ret = ath11k_regd_update(ar, true);
if (ret) {
ath11k_err(ar->ab, "ath11k regd update failed: %d\n", ret);
- goto err_free;
+ goto err_free_if_combs;
}
- ret = ath11k_debug_register(ar);
+ ret = ath11k_debugfs_register(ar);
if (ret) {
ath11k_err(ar->ab, "debugfs registration failed: %d\n", ret);
- goto err_free;
+ goto err_free_if_combs;
}
return 0;
-err_free:
+err_free_if_combs:
+ kfree(ar->hw->wiphy->iface_combinations[0].limits);
+ kfree(ar->hw->wiphy->iface_combinations);
+
+err_free_channels:
kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_6GHZ].channels);
+err:
SET_IEEE80211_DEV(ar->hw, NULL);
return ret;
}
@@ -6194,7 +6378,7 @@ int ath11k_mac_allocate(struct ath11k_base *ab)
ar->ab = ab;
ar->pdev = pdev;
ar->pdev_idx = i;
- ar->lmac_id = ath11k_core_get_hw_mac_id(ab, i);
+ ar->lmac_id = ath11k_hw_get_mac_from_pdev_id(&ab->hw_params, i);
ar->wmi = &ab->wmi_ab.wmi[i];
/* FIXME wmi[0] is already initialized during attach,
diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
new file mode 100644
index 000000000000..aded9a719d51
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/mhi.c
@@ -0,0 +1,467 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/* Copyright (c) 2020 The Linux Foundation. All rights reserved. */
+
+#include <linux/msi.h>
+#include <linux/pci.h>
+
+#include "core.h"
+#include "debug.h"
+#include "mhi.h"
+
+#define MHI_TIMEOUT_DEFAULT_MS 90000
+
+static struct mhi_channel_config ath11k_mhi_channels[] = {
+ {
+ .num = 0,
+ .name = "LOOPBACK",
+ .num_elements = 32,
+ .event_ring = 0,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .auto_start = false,
+ },
+ {
+ .num = 1,
+ .name = "LOOPBACK",
+ .num_elements = 32,
+ .event_ring = 0,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .auto_start = false,
+ },
+ {
+ .num = 20,
+ .name = "IPCR",
+ .num_elements = 64,
+ .event_ring = 1,
+ .dir = DMA_TO_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = false,
+ .auto_start = true,
+ },
+ {
+ .num = 21,
+ .name = "IPCR",
+ .num_elements = 64,
+ .event_ring = 1,
+ .dir = DMA_FROM_DEVICE,
+ .ee_mask = 0x4,
+ .pollcfg = 0,
+ .doorbell = MHI_DB_BRST_DISABLE,
+ .lpm_notify = false,
+ .offload_channel = false,
+ .doorbell_mode_switch = false,
+ .auto_queue = true,
+ .auto_start = true,
+ },
+};
+
+static struct mhi_event_config ath11k_mhi_events[] = {
+ {
+ .num_elements = 32,
+ .irq_moderation_ms = 0,
+ .irq = 1,
+ .mode = MHI_DB_BRST_DISABLE,
+ .data_type = MHI_ER_CTRL,
+ .hardware_event = false,
+ .client_managed = false,
+ .offload_channel = false,
+ },
+ {
+ .num_elements = 256,
+ .irq_moderation_ms = 1,
+ .irq = 2,
+ .mode = MHI_DB_BRST_DISABLE,
+ .priority = 1,
+ .hardware_event = false,
+ .client_managed = false,
+ .offload_channel = false,
+ },
+};
+
+static struct mhi_controller_config ath11k_mhi_config = {
+ .max_channels = 128,
+ .timeout_ms = 2000,
+ .use_bounce_buf = false,
+ .buf_len = 0,
+ .num_channels = ARRAY_SIZE(ath11k_mhi_channels),
+ .ch_cfg = ath11k_mhi_channels,
+ .num_events = ARRAY_SIZE(ath11k_mhi_events),
+ .event_cfg = ath11k_mhi_events,
+};
+
+void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab)
+{
+ u32 val;
+
+ val = ath11k_pci_read32(ab, MHISTATUS);
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "MHISTATUS 0x%x\n", val);
+
+ /* Observed on QCA6390 that after SOC_GLOBAL_RESET, MHISTATUS
+ * has SYSERR bit set and thus need to set MHICTRL_RESET
+ * to clear SYSERR.
+ */
+ ath11k_pci_write32(ab, MHICTRL, MHICTRL_RESET_MASK);
+
+ mdelay(10);
+}
+
+static void ath11k_mhi_reset_txvecdb(struct ath11k_base *ab)
+{
+ ath11k_pci_write32(ab, PCIE_TXVECDB, 0);
+}
+
+static void ath11k_mhi_reset_txvecstatus(struct ath11k_base *ab)
+{
+ ath11k_pci_write32(ab, PCIE_TXVECSTATUS, 0);
+}
+
+static void ath11k_mhi_reset_rxvecdb(struct ath11k_base *ab)
+{
+ ath11k_pci_write32(ab, PCIE_RXVECDB, 0);
+}
+
+static void ath11k_mhi_reset_rxvecstatus(struct ath11k_base *ab)
+{
+ ath11k_pci_write32(ab, PCIE_RXVECSTATUS, 0);
+}
+
+void ath11k_mhi_clear_vector(struct ath11k_base *ab)
+{
+ ath11k_mhi_reset_txvecdb(ab);
+ ath11k_mhi_reset_txvecstatus(ab);
+ ath11k_mhi_reset_rxvecdb(ab);
+ ath11k_mhi_reset_rxvecstatus(ab);
+}
+
+static int ath11k_mhi_get_msi(struct ath11k_pci *ab_pci)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+ u32 user_base_data, base_vector;
+ int ret, num_vectors, i;
+ int *irq;
+
+ ret = ath11k_pci_get_user_msi_assignment(ab_pci,
+ "MHI", &num_vectors,
+ &user_base_data, &base_vector);
+ if (ret)
+ return ret;
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "Number of assigned MSI for MHI is %d, base vector is %d\n",
+ num_vectors, base_vector);
+
+ irq = kcalloc(num_vectors, sizeof(int), GFP_KERNEL);
+ if (!irq)
+ return -ENOMEM;
+
+ for (i = 0; i < num_vectors; i++)
+ irq[i] = ath11k_pci_get_msi_irq(ab->dev,
+ base_vector + i);
+
+ ab_pci->mhi_ctrl->irq = irq;
+ ab_pci->mhi_ctrl->nr_irqs = num_vectors;
+
+ return 0;
+}
+
+static int ath11k_mhi_op_runtime_get(struct mhi_controller *mhi_cntrl)
+{
+ return 0;
+}
+
+static void ath11k_mhi_op_runtime_put(struct mhi_controller *mhi_cntrl)
+{
+}
+
+static void ath11k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl,
+ enum mhi_callback cb)
+{
+}
+
+static int ath11k_mhi_op_read_reg(struct mhi_controller *mhi_cntrl,
+ void __iomem *addr,
+ u32 *out)
+{
+ *out = readl(addr);
+
+ return 0;
+}
+
+static void ath11k_mhi_op_write_reg(struct mhi_controller *mhi_cntrl,
+ void __iomem *addr,
+ u32 val)
+{
+ writel(val, addr);
+}
+
+int ath11k_mhi_register(struct ath11k_pci *ab_pci)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+ struct mhi_controller *mhi_ctrl;
+ int ret;
+
+ mhi_ctrl = kzalloc(sizeof(*mhi_ctrl), GFP_KERNEL);
+ if (!mhi_ctrl)
+ return -ENOMEM;
+
+ ath11k_core_create_firmware_path(ab, ATH11K_AMSS_FILE,
+ ab_pci->amss_path,
+ sizeof(ab_pci->amss_path));
+
+ ab_pci->mhi_ctrl = mhi_ctrl;
+ mhi_ctrl->cntrl_dev = ab->dev;
+ mhi_ctrl->fw_image = ab_pci->amss_path;
+ mhi_ctrl->regs = ab->mem;
+
+ ret = ath11k_mhi_get_msi(ab_pci);
+ if (ret) {
+ ath11k_err(ab, "failed to get msi for mhi\n");
+ kfree(mhi_ctrl);
+ return ret;
+ }
+
+ mhi_ctrl->iova_start = 0;
+ mhi_ctrl->iova_stop = 0xffffffff;
+ mhi_ctrl->sbl_size = SZ_512K;
+ mhi_ctrl->seg_len = SZ_512K;
+ mhi_ctrl->fbc_download = true;
+ mhi_ctrl->runtime_get = ath11k_mhi_op_runtime_get;
+ mhi_ctrl->runtime_put = ath11k_mhi_op_runtime_put;
+ mhi_ctrl->status_cb = ath11k_mhi_op_status_cb;
+ mhi_ctrl->read_reg = ath11k_mhi_op_read_reg;
+ mhi_ctrl->write_reg = ath11k_mhi_op_write_reg;
+
+ ret = mhi_register_controller(mhi_ctrl, &ath11k_mhi_config);
+ if (ret) {
+ ath11k_err(ab, "failed to register to mhi bus, err = %d\n", ret);
+ kfree(mhi_ctrl);
+ return ret;
+ }
+
+ return 0;
+}
+
+void ath11k_mhi_unregister(struct ath11k_pci *ab_pci)
+{
+ struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl;
+
+ mhi_unregister_controller(mhi_ctrl);
+ kfree(mhi_ctrl->irq);
+}
+
+static char *ath11k_mhi_state_to_str(enum ath11k_mhi_state mhi_state)
+{
+ switch (mhi_state) {
+ case ATH11K_MHI_INIT:
+ return "INIT";
+ case ATH11K_MHI_DEINIT:
+ return "DEINIT";
+ case ATH11K_MHI_POWER_ON:
+ return "POWER_ON";
+ case ATH11K_MHI_POWER_OFF:
+ return "POWER_OFF";
+ case ATH11K_MHI_FORCE_POWER_OFF:
+ return "FORCE_POWER_OFF";
+ case ATH11K_MHI_SUSPEND:
+ return "SUSPEND";
+ case ATH11K_MHI_RESUME:
+ return "RESUME";
+ case ATH11K_MHI_TRIGGER_RDDM:
+ return "TRIGGER_RDDM";
+ case ATH11K_MHI_RDDM_DONE:
+ return "RDDM_DONE";
+ default:
+ return "UNKNOWN";
+ }
+};
+
+static void ath11k_mhi_set_state_bit(struct ath11k_pci *ab_pci,
+ enum ath11k_mhi_state mhi_state)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+
+ switch (mhi_state) {
+ case ATH11K_MHI_INIT:
+ set_bit(ATH11K_MHI_INIT, &ab_pci->mhi_state);
+ break;
+ case ATH11K_MHI_DEINIT:
+ clear_bit(ATH11K_MHI_INIT, &ab_pci->mhi_state);
+ break;
+ case ATH11K_MHI_POWER_ON:
+ set_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state);
+ break;
+ case ATH11K_MHI_POWER_OFF:
+ case ATH11K_MHI_FORCE_POWER_OFF:
+ clear_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state);
+ clear_bit(ATH11K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state);
+ clear_bit(ATH11K_MHI_RDDM_DONE, &ab_pci->mhi_state);
+ break;
+ case ATH11K_MHI_SUSPEND:
+ set_bit(ATH11K_MHI_SUSPEND, &ab_pci->mhi_state);
+ break;
+ case ATH11K_MHI_RESUME:
+ clear_bit(ATH11K_MHI_SUSPEND, &ab_pci->mhi_state);
+ break;
+ case ATH11K_MHI_TRIGGER_RDDM:
+ set_bit(ATH11K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state);
+ break;
+ case ATH11K_MHI_RDDM_DONE:
+ set_bit(ATH11K_MHI_RDDM_DONE, &ab_pci->mhi_state);
+ break;
+ default:
+ ath11k_err(ab, "unhandled mhi state (%d)\n", mhi_state);
+ }
+}
+
+static int ath11k_mhi_check_state_bit(struct ath11k_pci *ab_pci,
+ enum ath11k_mhi_state mhi_state)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+
+ switch (mhi_state) {
+ case ATH11K_MHI_INIT:
+ if (!test_bit(ATH11K_MHI_INIT, &ab_pci->mhi_state))
+ return 0;
+ break;
+ case ATH11K_MHI_DEINIT:
+ case ATH11K_MHI_POWER_ON:
+ if (test_bit(ATH11K_MHI_INIT, &ab_pci->mhi_state) &&
+ !test_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state))
+ return 0;
+ break;
+ case ATH11K_MHI_FORCE_POWER_OFF:
+ if (test_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state))
+ return 0;
+ break;
+ case ATH11K_MHI_POWER_OFF:
+ case ATH11K_MHI_SUSPEND:
+ if (test_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state) &&
+ !test_bit(ATH11K_MHI_SUSPEND, &ab_pci->mhi_state))
+ return 0;
+ break;
+ case ATH11K_MHI_RESUME:
+ if (test_bit(ATH11K_MHI_SUSPEND, &ab_pci->mhi_state))
+ return 0;
+ break;
+ case ATH11K_MHI_TRIGGER_RDDM:
+ if (test_bit(ATH11K_MHI_POWER_ON, &ab_pci->mhi_state) &&
+ !test_bit(ATH11K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state))
+ return 0;
+ break;
+ case ATH11K_MHI_RDDM_DONE:
+ return 0;
+ default:
+ ath11k_err(ab, "unhandled mhi state: %s(%d)\n",
+ ath11k_mhi_state_to_str(mhi_state), mhi_state);
+ }
+
+ ath11k_err(ab, "failed to set mhi state %s(%d) in current mhi state (0x%lx)\n",
+ ath11k_mhi_state_to_str(mhi_state), mhi_state,
+ ab_pci->mhi_state);
+
+ return -EINVAL;
+}
+
+static int ath11k_mhi_set_state(struct ath11k_pci *ab_pci,
+ enum ath11k_mhi_state mhi_state)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+ int ret;
+
+ ret = ath11k_mhi_check_state_bit(ab_pci, mhi_state);
+ if (ret)
+ goto out;
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "setting mhi state: %s(%d)\n",
+ ath11k_mhi_state_to_str(mhi_state), mhi_state);
+
+ switch (mhi_state) {
+ case ATH11K_MHI_INIT:
+ ret = mhi_prepare_for_power_up(ab_pci->mhi_ctrl);
+ break;
+ case ATH11K_MHI_DEINIT:
+ mhi_unprepare_after_power_down(ab_pci->mhi_ctrl);
+ ret = 0;
+ break;
+ case ATH11K_MHI_POWER_ON:
+ ret = mhi_async_power_up(ab_pci->mhi_ctrl);
+ break;
+ case ATH11K_MHI_POWER_OFF:
+ mhi_power_down(ab_pci->mhi_ctrl, true);
+ ret = 0;
+ break;
+ case ATH11K_MHI_FORCE_POWER_OFF:
+ mhi_power_down(ab_pci->mhi_ctrl, false);
+ ret = 0;
+ break;
+ case ATH11K_MHI_SUSPEND:
+ break;
+ case ATH11K_MHI_RESUME:
+ break;
+ case ATH11K_MHI_TRIGGER_RDDM:
+ ret = mhi_force_rddm_mode(ab_pci->mhi_ctrl);
+ break;
+ case ATH11K_MHI_RDDM_DONE:
+ break;
+ default:
+ ath11k_err(ab, "unhandled MHI state (%d)\n", mhi_state);
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ goto out;
+
+ ath11k_mhi_set_state_bit(ab_pci, mhi_state);
+
+ return 0;
+
+out:
+ ath11k_err(ab, "failed to set mhi state: %s(%d)\n",
+ ath11k_mhi_state_to_str(mhi_state), mhi_state);
+ return ret;
+}
+
+int ath11k_mhi_start(struct ath11k_pci *ab_pci)
+{
+ int ret;
+
+ ab_pci->mhi_ctrl->timeout_ms = MHI_TIMEOUT_DEFAULT_MS;
+
+ ret = ath11k_mhi_set_state(ab_pci, ATH11K_MHI_INIT);
+ if (ret)
+ goto out;
+
+ ret = ath11k_mhi_set_state(ab_pci, ATH11K_MHI_POWER_ON);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ return ret;
+}
+
+void ath11k_mhi_stop(struct ath11k_pci *ab_pci)
+{
+ ath11k_mhi_set_state(ab_pci, ATH11K_MHI_POWER_OFF);
+ ath11k_mhi_set_state(ab_pci, ATH11K_MHI_DEINIT);
+}
+
diff --git a/drivers/net/wireless/ath/ath11k/mhi.h b/drivers/net/wireless/ath/ath11k/mhi.h
new file mode 100644
index 000000000000..a7fd5e201d18
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/mhi.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ */
+#ifndef _ATH11K_MHI_H
+#define _ATH11K_MHI_H
+
+#include "pci.h"
+
+#define PCIE_TXVECDB 0x360
+#define PCIE_TXVECSTATUS 0x368
+#define PCIE_RXVECDB 0x394
+#define PCIE_RXVECSTATUS 0x39C
+
+#define MHISTATUS 0x48
+#define MHICTRL 0x38
+#define MHICTRL_RESET_MASK 0x2
+
+enum ath11k_mhi_state {
+ ATH11K_MHI_INIT,
+ ATH11K_MHI_DEINIT,
+ ATH11K_MHI_POWER_ON,
+ ATH11K_MHI_POWER_OFF,
+ ATH11K_MHI_FORCE_POWER_OFF,
+ ATH11K_MHI_SUSPEND,
+ ATH11K_MHI_RESUME,
+ ATH11K_MHI_TRIGGER_RDDM,
+ ATH11K_MHI_RDDM,
+ ATH11K_MHI_RDDM_DONE,
+};
+
+int ath11k_mhi_start(struct ath11k_pci *ar_pci);
+void ath11k_mhi_stop(struct ath11k_pci *ar_pci);
+int ath11k_mhi_register(struct ath11k_pci *ar_pci);
+void ath11k_mhi_unregister(struct ath11k_pci *ar_pci);
+void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab);
+void ath11k_mhi_clear_vector(struct ath11k_base *ab);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
new file mode 100644
index 000000000000..d7eb6b7160bb
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -0,0 +1,1062 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/pci.h>
+
+#include "pci.h"
+#include "core.h"
+#include "hif.h"
+#include "mhi.h"
+#include "debug.h"
+
+#define ATH11K_PCI_BAR_NUM 0
+#define ATH11K_PCI_DMA_MASK 32
+
+#define ATH11K_PCI_IRQ_CE0_OFFSET 3
+
+#define WINDOW_ENABLE_BIT 0x40000000
+#define WINDOW_REG_ADDRESS 0x310c
+#define WINDOW_VALUE_MASK GENMASK(24, 19)
+#define WINDOW_START 0x80000
+#define WINDOW_RANGE_MASK GENMASK(18, 0)
+
+#define TCSR_SOC_HW_VERSION 0x0224
+#define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(16, 8)
+#define TCSR_SOC_HW_VERSION_MINOR_MASK GENMASK(7, 0)
+
+/* BAR0 + 4k is always accessible, and no
+ * need to force wakeup.
+ * 4K - 32 = 0xFE0
+ */
+#define ACCESS_ALWAYS_OFF 0xFE0
+
+#define QCA6390_DEVICE_ID 0x1101
+
+static const struct pci_device_id ath11k_pci_id_table[] = {
+ { PCI_VDEVICE(QCOM, QCA6390_DEVICE_ID) },
+ {0}
+};
+
+MODULE_DEVICE_TABLE(pci, ath11k_pci_id_table);
+
+static const struct ath11k_bus_params ath11k_pci_bus_params = {
+ .mhi_support = true,
+ .m3_fw_support = true,
+ .fixed_bdf_addr = false,
+ .fixed_mem_region = false,
+};
+
+static const struct ath11k_msi_config msi_config = {
+ .total_vectors = 32,
+ .total_users = 4,
+ .users = (struct ath11k_msi_user[]) {
+ { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+ { .name = "CE", .num_vectors = 10, .base_vector = 3 },
+ { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
+ { .name = "DP", .num_vectors = 18, .base_vector = 14 },
+ },
+};
+
+static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
+ "bhi",
+ "mhi-er0",
+ "mhi-er1",
+ "ce0",
+ "ce1",
+ "ce2",
+ "ce3",
+ "ce4",
+ "ce5",
+ "ce6",
+ "ce7",
+ "ce8",
+ "ce9",
+ "ce10",
+ "ce11",
+ "host2wbm-desc-feed",
+ "host2reo-re-injection",
+ "host2reo-command",
+ "host2rxdma-monitor-ring3",
+ "host2rxdma-monitor-ring2",
+ "host2rxdma-monitor-ring1",
+ "reo2ost-exception",
+ "wbm2host-rx-release",
+ "reo2host-status",
+ "reo2host-destination-ring4",
+ "reo2host-destination-ring3",
+ "reo2host-destination-ring2",
+ "reo2host-destination-ring1",
+ "rxdma2host-monitor-destination-mac3",
+ "rxdma2host-monitor-destination-mac2",
+ "rxdma2host-monitor-destination-mac1",
+ "ppdu-end-interrupts-mac3",
+ "ppdu-end-interrupts-mac2",
+ "ppdu-end-interrupts-mac1",
+ "rxdma2host-monitor-status-ring-mac3",
+ "rxdma2host-monitor-status-ring-mac2",
+ "rxdma2host-monitor-status-ring-mac1",
+ "host2rxdma-host-buf-ring-mac3",
+ "host2rxdma-host-buf-ring-mac2",
+ "host2rxdma-host-buf-ring-mac1",
+ "rxdma2host-destination-ring-mac3",
+ "rxdma2host-destination-ring-mac2",
+ "rxdma2host-destination-ring-mac1",
+ "host2tcl-input-ring4",
+ "host2tcl-input-ring3",
+ "host2tcl-input-ring2",
+ "host2tcl-input-ring1",
+ "wbm2host-tx-completions-ring3",
+ "wbm2host-tx-completions-ring2",
+ "wbm2host-tx-completions-ring1",
+ "tcl2host-status-ring",
+};
+
+static inline void ath11k_pci_select_window(struct ath11k_pci *ab_pci, u32 offset)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+
+ u32 window = FIELD_GET(WINDOW_VALUE_MASK, offset);
+
+ lockdep_assert_held(&ab_pci->window_lock);
+
+ if (window != ab_pci->register_window) {
+ iowrite32(WINDOW_ENABLE_BIT | window,
+ ab->mem + WINDOW_REG_ADDRESS);
+ ab_pci->register_window = window;
+ }
+}
+
+void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+
+ /* for offset beyond BAR + 4K - 32, may
+ * need to wakeup MHI to access.
+ */
+ if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
+ offset >= ACCESS_ALWAYS_OFF)
+ mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
+
+ if (offset < WINDOW_START) {
+ iowrite32(value, ab->mem + offset);
+ } else {
+ spin_lock_bh(&ab_pci->window_lock);
+ ath11k_pci_select_window(ab_pci, offset);
+ iowrite32(value, ab->mem + WINDOW_START + (offset & WINDOW_RANGE_MASK));
+ spin_unlock_bh(&ab_pci->window_lock);
+ }
+
+ if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
+ offset >= ACCESS_ALWAYS_OFF)
+ mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
+}
+
+u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+ u32 val;
+
+ /* for offset beyond BAR + 4K - 32, may
+ * need to wakeup MHI to access.
+ */
+ if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
+ offset >= ACCESS_ALWAYS_OFF)
+ mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
+
+ if (offset < WINDOW_START) {
+ val = ioread32(ab->mem + offset);
+ } else {
+ spin_lock_bh(&ab_pci->window_lock);
+ ath11k_pci_select_window(ab_pci, offset);
+ val = ioread32(ab->mem + WINDOW_START + (offset & WINDOW_RANGE_MASK));
+ spin_unlock_bh(&ab_pci->window_lock);
+ }
+
+ if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
+ offset >= ACCESS_ALWAYS_OFF)
+ mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
+
+ return val;
+}
+
+static void ath11k_pci_soc_global_reset(struct ath11k_base *ab)
+{
+ u32 val, delay;
+
+ val = ath11k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET);
+
+ val |= PCIE_SOC_GLOBAL_RESET_V;
+
+ ath11k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
+
+ /* TODO: exact time to sleep is uncertain */
+ delay = 10;
+ mdelay(delay);
+
+ /* Need to toggle V bit back otherwise stuck in reset status */
+ val &= ~PCIE_SOC_GLOBAL_RESET_V;
+
+ ath11k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
+
+ mdelay(delay);
+
+ val = ath11k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET);
+ if (val == 0xffffffff)
+ ath11k_warn(ab, "link down error during global reset\n");
+}
+
+static void ath11k_pci_clear_dbg_registers(struct ath11k_base *ab)
+{
+ u32 val;
+
+ /* read cookie */
+ val = ath11k_pci_read32(ab, PCIE_Q6_COOKIE_ADDR);
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "cookie:0x%x\n", val);
+
+ val = ath11k_pci_read32(ab, WLAON_WARM_SW_ENTRY);
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val);
+
+ /* TODO: exact time to sleep is uncertain */
+ mdelay(10);
+
+ /* write 0 to WLAON_WARM_SW_ENTRY to prevent Q6 from
+ * continuing warm path and entering dead loop.
+ */
+ ath11k_pci_write32(ab, WLAON_WARM_SW_ENTRY, 0);
+ mdelay(10);
+
+ val = ath11k_pci_read32(ab, WLAON_WARM_SW_ENTRY);
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val);
+
+ /* A read clear register. clear the register to prevent
+ * Q6 from entering wrong code path.
+ */
+ val = ath11k_pci_read32(ab, WLAON_SOC_RESET_CAUSE_REG);
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "soc reset cause:%d\n", val);
+}
+
+static void ath11k_pci_force_wake(struct ath11k_base *ab)
+{
+ ath11k_pci_write32(ab, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1);
+ mdelay(5);
+}
+
+static void ath11k_pci_sw_reset(struct ath11k_base *ab)
+{
+ ath11k_pci_soc_global_reset(ab);
+ ath11k_mhi_clear_vector(ab);
+ ath11k_pci_soc_global_reset(ab);
+ ath11k_mhi_set_mhictrl_reset(ab);
+ ath11k_pci_clear_dbg_registers(ab);
+}
+
+int ath11k_pci_get_msi_irq(struct device *dev, unsigned int vector)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+
+ return pci_irq_vector(pci_dev, vector);
+}
+
+static void ath11k_pci_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo,
+ u32 *msi_addr_hi)
+{
+ struct pci_dev *pci_dev = to_pci_dev(ab->dev);
+
+ pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO,
+ msi_addr_lo);
+
+ pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI,
+ msi_addr_hi);
+}
+
+int ath11k_pci_get_user_msi_assignment(struct ath11k_pci *ab_pci, char *user_name,
+ int *num_vectors, u32 *user_base_data,
+ u32 *base_vector)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+ int idx;
+
+ for (idx = 0; idx < msi_config.total_users; idx++) {
+ if (strcmp(user_name, msi_config.users[idx].name) == 0) {
+ *num_vectors = msi_config.users[idx].num_vectors;
+ *user_base_data = msi_config.users[idx].base_vector
+ + ab_pci->msi_ep_base_data;
+ *base_vector = msi_config.users[idx].base_vector;
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
+ user_name, *num_vectors, *user_base_data,
+ *base_vector);
+
+ return 0;
+ }
+ }
+
+ ath11k_err(ab, "Failed to find MSI assignment for %s!\n", user_name);
+
+ return -EINVAL;
+}
+
+static int ath11k_get_user_msi_assignment(struct ath11k_base *ab, char *user_name,
+ int *num_vectors, u32 *user_base_data,
+ u32 *base_vector)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+
+ return ath11k_pci_get_user_msi_assignment(ab_pci, user_name,
+ num_vectors, user_base_data,
+ base_vector);
+}
+
+static void ath11k_pci_free_ext_irq(struct ath11k_base *ab)
+{
+ int i, j;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ for (j = 0; j < irq_grp->num_irq; j++)
+ free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
+
+ netif_napi_del(&irq_grp->napi);
+ }
+}
+
+static void ath11k_pci_free_irq(struct ath11k_base *ab)
+{
+ int i, irq_idx;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
+ free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
+ }
+
+ ath11k_pci_free_ext_irq(ab);
+}
+
+static void ath11k_pci_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
+{
+ u32 irq_idx;
+
+ irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
+ enable_irq(ab->irq_num[irq_idx]);
+}
+
+static void ath11k_pci_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
+{
+ u32 irq_idx;
+
+ irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
+ disable_irq_nosync(ab->irq_num[irq_idx]);
+}
+
+static void ath11k_pci_ce_irqs_disable(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ ath11k_pci_ce_irq_disable(ab, i);
+ }
+}
+
+static void ath11k_pci_sync_ce_irqs(struct ath11k_base *ab)
+{
+ int i;
+ int irq_idx;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
+ synchronize_irq(ab->irq_num[irq_idx]);
+ }
+}
+
+static void ath11k_pci_ce_tasklet(unsigned long data)
+{
+ struct ath11k_ce_pipe *ce_pipe = (struct ath11k_ce_pipe *)data;
+
+ ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
+
+ ath11k_pci_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
+}
+
+static irqreturn_t ath11k_pci_ce_interrupt_handler(int irq, void *arg)
+{
+ struct ath11k_ce_pipe *ce_pipe = arg;
+
+ ath11k_pci_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
+ tasklet_schedule(&ce_pipe->intr_tq);
+
+ return IRQ_HANDLED;
+}
+
+static void ath11k_pci_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
+{
+ int i;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+}
+
+static void __ath11k_pci_ext_irq_disable(struct ath11k_base *sc)
+{
+ int i;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i];
+
+ ath11k_pci_ext_grp_disable(irq_grp);
+
+ napi_synchronize(&irq_grp->napi);
+ napi_disable(&irq_grp->napi);
+ }
+}
+
+static void ath11k_pci_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
+{
+ int i;
+
+ for (i = 0; i < irq_grp->num_irq; i++)
+ enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
+}
+
+static void ath11k_pci_ext_irq_enable(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ napi_enable(&irq_grp->napi);
+ ath11k_pci_ext_grp_enable(irq_grp);
+ }
+}
+
+static void ath11k_pci_sync_ext_irqs(struct ath11k_base *ab)
+{
+ int i, j, irq_idx;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+
+ for (j = 0; j < irq_grp->num_irq; j++) {
+ irq_idx = irq_grp->irqs[j];
+ synchronize_irq(ab->irq_num[irq_idx]);
+ }
+ }
+}
+
+static void ath11k_pci_ext_irq_disable(struct ath11k_base *ab)
+{
+ __ath11k_pci_ext_irq_disable(ab);
+ ath11k_pci_sync_ext_irqs(ab);
+}
+
+static int ath11k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
+ struct ath11k_ext_irq_grp,
+ napi);
+ struct ath11k_base *ab = irq_grp->ab;
+ int work_done;
+
+ work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ ath11k_pci_ext_grp_enable(irq_grp);
+ }
+
+ if (work_done > budget)
+ work_done = budget;
+
+ return work_done;
+}
+
+static irqreturn_t ath11k_pci_ext_interrupt_handler(int irq, void *arg)
+{
+ struct ath11k_ext_irq_grp *irq_grp = arg;
+
+ ath11k_dbg(irq_grp->ab, ATH11K_DBG_PCI, "ext irq:%d\n", irq);
+
+ ath11k_pci_ext_grp_disable(irq_grp);
+
+ napi_schedule(&irq_grp->napi);
+
+ return IRQ_HANDLED;
+}
+
+static int ath11k_pci_ext_irq_config(struct ath11k_base *ab)
+{
+ int i, j, ret, num_vectors = 0;
+ u32 user_base_data = 0, base_vector = 0;
+
+ ret = ath11k_pci_get_user_msi_assignment(ath11k_pci_priv(ab), "DP",
+ &num_vectors,
+ &user_base_data,
+ &base_vector);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
+ struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+ u32 num_irq = 0;
+
+ irq_grp->ab = ab;
+ irq_grp->grp_id = i;
+ init_dummy_netdev(&irq_grp->napi_ndev);
+ netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
+ ath11k_pci_ext_grp_napi_poll, NAPI_POLL_WEIGHT);
+
+ if (ab->hw_params.ring_mask->tx[i] ||
+ ab->hw_params.ring_mask->rx[i] ||
+ ab->hw_params.ring_mask->rx_err[i] ||
+ ab->hw_params.ring_mask->rx_wbm_rel[i] ||
+ ab->hw_params.ring_mask->reo_status[i] ||
+ ab->hw_params.ring_mask->rxdma2host[i] ||
+ ab->hw_params.ring_mask->host2rxdma[i] ||
+ ab->hw_params.ring_mask->rx_mon_status[i]) {
+ num_irq = 1;
+ }
+
+ irq_grp->num_irq = num_irq;
+ irq_grp->irqs[0] = base_vector + i;
+
+ for (j = 0; j < irq_grp->num_irq; j++) {
+ int irq_idx = irq_grp->irqs[j];
+ int vector = (i % num_vectors) + base_vector;
+ int irq = ath11k_pci_get_msi_irq(ab->dev, vector);
+
+ ab->irq_num[irq_idx] = irq;
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI,
+ "irq:%d group:%d\n", irq, i);
+ ret = request_irq(irq, ath11k_pci_ext_interrupt_handler,
+ IRQF_SHARED,
+ "DP_EXT_IRQ", irq_grp);
+ if (ret) {
+ ath11k_err(ab, "failed request irq %d: %d\n",
+ vector, ret);
+ return ret;
+ }
+
+ disable_irq_nosync(ab->irq_num[irq_idx]);
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_pci_config_irq(struct ath11k_base *ab)
+{
+ struct ath11k_ce_pipe *ce_pipe;
+ u32 msi_data_start;
+ u32 msi_data_count;
+ u32 msi_irq_start;
+ unsigned int msi_data;
+ int irq, i, ret, irq_idx;
+
+ ret = ath11k_pci_get_user_msi_assignment(ath11k_pci_priv(ab),
+ "CE", &msi_data_count,
+ &msi_data_start, &msi_irq_start);
+ if (ret)
+ return ret;
+
+ /* Configure CE irqs */
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ msi_data = (i % msi_data_count) + msi_irq_start;
+ irq = ath11k_pci_get_msi_irq(ab->dev, msi_data);
+ ce_pipe = &ab->ce.ce_pipe[i];
+
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
+
+ tasklet_init(&ce_pipe->intr_tq, ath11k_pci_ce_tasklet,
+ (unsigned long)ce_pipe);
+
+ ret = request_irq(irq, ath11k_pci_ce_interrupt_handler,
+ IRQF_SHARED, irq_name[irq_idx],
+ ce_pipe);
+ if (ret) {
+ ath11k_err(ab, "failed to request irq %d: %d\n",
+ irq_idx, ret);
+ return ret;
+ }
+
+ ab->irq_num[irq_idx] = irq;
+ ath11k_pci_ce_irq_disable(ab, i);
+ }
+
+ ret = ath11k_pci_ext_irq_config(ab);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void ath11k_pci_init_qmi_ce_config(struct ath11k_base *ab)
+{
+ struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
+
+ cfg->tgt_ce = ab->hw_params.target_ce_config;
+ cfg->tgt_ce_len = ab->hw_params.target_ce_count;
+
+ cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map;
+ cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len;
+ ab->qmi.service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390;
+
+ ath11k_ce_get_shadow_config(ab, &cfg->shadow_reg_v2,
+ &cfg->shadow_reg_v2_len);
+}
+
+static void ath11k_pci_ce_irqs_enable(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+ ath11k_pci_ce_irq_enable(ab, i);
+ }
+}
+
+static int ath11k_pci_enable_msi(struct ath11k_pci *ab_pci)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+ struct msi_desc *msi_desc;
+ int num_vectors;
+ int ret;
+
+ num_vectors = pci_alloc_irq_vectors(ab_pci->pdev,
+ msi_config.total_vectors,
+ msi_config.total_vectors,
+ PCI_IRQ_MSI);
+ if (num_vectors != msi_config.total_vectors) {
+ ath11k_err(ab, "failed to get %d MSI vectors, only %d available",
+ msi_config.total_vectors, num_vectors);
+
+ if (num_vectors >= 0)
+ return -EINVAL;
+ else
+ return num_vectors;
+ }
+
+ msi_desc = irq_get_msi_desc(ab_pci->pdev->irq);
+ if (!msi_desc) {
+ ath11k_err(ab, "msi_desc is NULL!\n");
+ ret = -EINVAL;
+ goto free_msi_vector;
+ }
+
+ ab_pci->msi_ep_base_data = msi_desc->msg.data;
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "msi base data is %d\n", ab_pci->msi_ep_base_data);
+
+ return 0;
+
+free_msi_vector:
+ pci_free_irq_vectors(ab_pci->pdev);
+
+ return ret;
+}
+
+static void ath11k_pci_disable_msi(struct ath11k_pci *ab_pci)
+{
+ pci_free_irq_vectors(ab_pci->pdev);
+}
+
+static int ath11k_pci_claim(struct ath11k_pci *ab_pci, struct pci_dev *pdev)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+ u16 device_id;
+ int ret = 0;
+
+ pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
+ if (device_id != ab_pci->dev_id) {
+ ath11k_err(ab, "pci device id mismatch: 0x%x 0x%x\n",
+ device_id, ab_pci->dev_id);
+ ret = -EIO;
+ goto out;
+ }
+
+ ret = pci_assign_resource(pdev, ATH11K_PCI_BAR_NUM);
+ if (ret) {
+ ath11k_err(ab, "failed to assign pci resource: %d\n", ret);
+ goto out;
+ }
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ ath11k_err(ab, "failed to enable pci device: %d\n", ret);
+ goto out;
+ }
+
+ ret = pci_request_region(pdev, ATH11K_PCI_BAR_NUM, "ath11k_pci");
+ if (ret) {
+ ath11k_err(ab, "failed to request pci region: %d\n", ret);
+ goto disable_device;
+ }
+
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(ATH11K_PCI_DMA_MASK));
+ if (ret) {
+ ath11k_err(ab, "failed to set pci dma mask to %d: %d\n",
+ ATH11K_PCI_DMA_MASK, ret);
+ goto release_region;
+ }
+
+ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(ATH11K_PCI_DMA_MASK));
+ if (ret) {
+ ath11k_err(ab, "failed to set pci consistent dma mask to %d: %d\n",
+ ATH11K_PCI_DMA_MASK, ret);
+ goto release_region;
+ }
+
+ pci_set_master(pdev);
+
+ ab->mem_len = pci_resource_len(pdev, ATH11K_PCI_BAR_NUM);
+ ab->mem = pci_iomap(pdev, ATH11K_PCI_BAR_NUM, 0);
+ if (!ab->mem) {
+ ath11k_err(ab, "failed to map pci bar %d\n", ATH11K_PCI_BAR_NUM);
+ ret = -EIO;
+ goto clear_master;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot pci_mem 0x%pK\n", ab->mem);
+ return 0;
+
+clear_master:
+ pci_clear_master(pdev);
+release_region:
+ pci_release_region(pdev, ATH11K_PCI_BAR_NUM);
+disable_device:
+ pci_disable_device(pdev);
+out:
+ return ret;
+}
+
+static void ath11k_pci_free_region(struct ath11k_pci *ab_pci)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+ struct pci_dev *pci_dev = ab_pci->pdev;
+
+ pci_iounmap(pci_dev, ab->mem);
+ ab->mem = NULL;
+ pci_clear_master(pci_dev);
+ pci_release_region(pci_dev, ATH11K_PCI_BAR_NUM);
+ if (pci_is_enabled(pci_dev))
+ pci_disable_device(pci_dev);
+}
+
+static int ath11k_pci_power_up(struct ath11k_base *ab)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+ int ret;
+
+ ab_pci->register_window = 0;
+ clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
+ ath11k_pci_sw_reset(ab_pci->ab);
+
+ ret = ath11k_mhi_start(ab_pci);
+ if (ret) {
+ ath11k_err(ab, "failed to start mhi: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ath11k_pci_power_down(struct ath11k_base *ab)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+
+ ath11k_mhi_stop(ab_pci);
+ clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
+ ath11k_pci_force_wake(ab_pci->ab);
+ ath11k_pci_sw_reset(ab_pci->ab);
+}
+
+static void ath11k_pci_kill_tasklets(struct ath11k_base *ab)
+{
+ int i;
+
+ for (i = 0; i < ab->hw_params.ce_count; i++) {
+ struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
+
+ if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
+ continue;
+
+ tasklet_kill(&ce_pipe->intr_tq);
+ }
+}
+
+static void ath11k_pci_stop(struct ath11k_base *ab)
+{
+ ath11k_pci_ce_irqs_disable(ab);
+ ath11k_pci_sync_ce_irqs(ab);
+ ath11k_pci_kill_tasklets(ab);
+ ath11k_ce_cleanup_pipes(ab);
+}
+
+static int ath11k_pci_start(struct ath11k_base *ab)
+{
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+
+ set_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
+
+ ath11k_pci_ce_irqs_enable(ab);
+ ath11k_ce_rx_post_buf(ab);
+
+ return 0;
+}
+
+static int ath11k_pci_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
+ u8 *ul_pipe, u8 *dl_pipe)
+{
+ const struct service_to_pipe *entry;
+ bool ul_set = false, dl_set = false;
+ int i;
+
+ for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) {
+ entry = &ab->hw_params.svc_to_ce_map[i];
+
+ if (__le32_to_cpu(entry->service_id) != service_id)
+ continue;
+
+ switch (__le32_to_cpu(entry->pipedir)) {
+ case PIPEDIR_NONE:
+ break;
+ case PIPEDIR_IN:
+ WARN_ON(dl_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ break;
+ case PIPEDIR_OUT:
+ WARN_ON(ul_set);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ ul_set = true;
+ break;
+ case PIPEDIR_INOUT:
+ WARN_ON(dl_set);
+ WARN_ON(ul_set);
+ *dl_pipe = __le32_to_cpu(entry->pipenum);
+ *ul_pipe = __le32_to_cpu(entry->pipenum);
+ dl_set = true;
+ ul_set = true;
+ break;
+ }
+ }
+
+ if (WARN_ON(!ul_set || !dl_set))
+ return -ENOENT;
+
+ return 0;
+}
+
+static const struct ath11k_hif_ops ath11k_pci_hif_ops = {
+ .start = ath11k_pci_start,
+ .stop = ath11k_pci_stop,
+ .read32 = ath11k_pci_read32,
+ .write32 = ath11k_pci_write32,
+ .power_down = ath11k_pci_power_down,
+ .power_up = ath11k_pci_power_up,
+ .irq_enable = ath11k_pci_ext_irq_enable,
+ .irq_disable = ath11k_pci_ext_irq_disable,
+ .get_msi_address = ath11k_pci_get_msi_address,
+ .get_user_msi_vector = ath11k_get_user_msi_assignment,
+ .map_service_to_pipe = ath11k_pci_map_service_to_pipe,
+};
+
+static int ath11k_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_dev)
+{
+ struct ath11k_base *ab;
+ struct ath11k_pci *ab_pci;
+ u32 soc_hw_version, soc_hw_version_major, soc_hw_version_minor;
+ int ret;
+
+ dev_warn(&pdev->dev, "WARNING: ath11k PCI support is experimental!\n");
+
+ ab = ath11k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH11K_BUS_PCI,
+ &ath11k_pci_bus_params);
+ if (!ab) {
+ dev_err(&pdev->dev, "failed to allocate ath11k base\n");
+ return -ENOMEM;
+ }
+
+ ab->dev = &pdev->dev;
+ pci_set_drvdata(pdev, ab);
+ ab_pci = ath11k_pci_priv(ab);
+ ab_pci->dev_id = pci_dev->device;
+ ab_pci->ab = ab;
+ ab_pci->pdev = pdev;
+ ab->hif.ops = &ath11k_pci_hif_ops;
+ pci_set_drvdata(pdev, ab);
+ spin_lock_init(&ab_pci->window_lock);
+
+ ret = ath11k_pci_claim(ab_pci, pdev);
+ if (ret) {
+ ath11k_err(ab, "failed to claim device: %d\n", ret);
+ goto err_free_core;
+ }
+
+ switch (pci_dev->device) {
+ case QCA6390_DEVICE_ID:
+ soc_hw_version = ath11k_pci_read32(ab, TCSR_SOC_HW_VERSION);
+ soc_hw_version_major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK,
+ soc_hw_version);
+ soc_hw_version_minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK,
+ soc_hw_version);
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "pci tcsr_soc_hw_version major %d minor %d\n",
+ soc_hw_version_major, soc_hw_version_minor);
+
+ switch (soc_hw_version_major) {
+ case 2:
+ ab->hw_rev = ATH11K_HW_QCA6390_HW20;
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported QCA6390 SOC hardware version: %d %d\n",
+ soc_hw_version_major, soc_hw_version_minor);
+ ret = -EOPNOTSUPP;
+ goto err_pci_free_region;
+ }
+ break;
+ default:
+ dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n",
+ pci_dev->device);
+ ret = -EOPNOTSUPP;
+ goto err_pci_free_region;
+ }
+
+ ret = ath11k_pci_enable_msi(ab_pci);
+ if (ret) {
+ ath11k_err(ab, "failed to enable msi: %d\n", ret);
+ goto err_pci_free_region;
+ }
+
+ ret = ath11k_core_pre_init(ab);
+ if (ret)
+ goto err_pci_disable_msi;
+
+ ret = ath11k_mhi_register(ab_pci);
+ if (ret) {
+ ath11k_err(ab, "failed to register mhi: %d\n", ret);
+ goto err_pci_disable_msi;
+ }
+
+ ret = ath11k_hal_srng_init(ab);
+ if (ret)
+ goto err_mhi_unregister;
+
+ ret = ath11k_ce_alloc_pipes(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret);
+ goto err_hal_srng_deinit;
+ }
+
+ ath11k_pci_init_qmi_ce_config(ab);
+
+ ret = ath11k_pci_config_irq(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to config irq: %d\n", ret);
+ goto err_ce_free;
+ }
+
+ ret = ath11k_core_init(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to init core: %d\n", ret);
+ goto err_free_irq;
+ }
+ return 0;
+
+err_free_irq:
+ ath11k_pci_free_irq(ab);
+
+err_ce_free:
+ ath11k_ce_free_pipes(ab);
+
+err_hal_srng_deinit:
+ ath11k_hal_srng_deinit(ab);
+
+err_mhi_unregister:
+ ath11k_mhi_unregister(ab_pci);
+
+err_pci_disable_msi:
+ ath11k_pci_disable_msi(ab_pci);
+
+err_pci_free_region:
+ ath11k_pci_free_region(ab_pci);
+
+err_free_core:
+ ath11k_core_free(ab);
+
+ return ret;
+}
+
+static void ath11k_pci_remove(struct pci_dev *pdev)
+{
+ struct ath11k_base *ab = pci_get_drvdata(pdev);
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+
+ set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
+
+ ath11k_core_deinit(ab);
+
+ ath11k_mhi_unregister(ab_pci);
+
+ ath11k_pci_free_irq(ab);
+ ath11k_pci_disable_msi(ab_pci);
+ ath11k_pci_free_region(ab_pci);
+
+ ath11k_hal_srng_deinit(ab);
+ ath11k_ce_free_pipes(ab);
+ ath11k_core_free(ab);
+}
+
+static void ath11k_pci_shutdown(struct pci_dev *pdev)
+{
+ struct ath11k_base *ab = pci_get_drvdata(pdev);
+
+ ath11k_pci_power_down(ab);
+}
+
+static struct pci_driver ath11k_pci_driver = {
+ .name = "ath11k_pci",
+ .id_table = ath11k_pci_id_table,
+ .probe = ath11k_pci_probe,
+ .remove = ath11k_pci_remove,
+ .shutdown = ath11k_pci_shutdown,
+};
+
+static int ath11k_pci_init(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&ath11k_pci_driver);
+ if (ret)
+ pr_err("failed to register ath11k pci driver: %d\n",
+ ret);
+
+ return ret;
+}
+module_init(ath11k_pci_init);
+
+static void ath11k_pci_exit(void)
+{
+ pci_unregister_driver(&ath11k_pci_driver);
+}
+
+module_exit(ath11k_pci_exit);
+
+MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax WLAN PCIe devices");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/ath/ath11k/pci.h b/drivers/net/wireless/ath/ath11k/pci.h
new file mode 100644
index 000000000000..43562f774a37
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/pci.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ */
+#ifndef _ATH11K_PCI_H
+#define _ATH11K_PCI_H
+
+#include <linux/mhi.h>
+
+#include "core.h"
+
+#define PCIE_SOC_GLOBAL_RESET 0x3008
+#define PCIE_SOC_GLOBAL_RESET_V 1
+
+#define WLAON_WARM_SW_ENTRY 0x1f80504
+#define WLAON_SOC_RESET_CAUSE_REG 0x01f8060c
+
+#define PCIE_Q6_COOKIE_ADDR 0x01f80500
+#define PCIE_Q6_COOKIE_DATA 0xc0000000
+
+/* register to wake the UMAC from power collapse */
+#define PCIE_SCRATCH_0_SOC_PCIE_REG 0x4040
+
+/* register used for handshake mechanism to validate UMAC is awake */
+#define PCIE_SOC_WAKE_PCIE_LOCAL_REG 0x3004
+
+struct ath11k_msi_user {
+ char *name;
+ int num_vectors;
+ u32 base_vector;
+};
+
+struct ath11k_msi_config {
+ int total_vectors;
+ int total_users;
+ struct ath11k_msi_user *users;
+};
+
+enum ath11k_pci_flags {
+ ATH11K_PCI_FLAG_INIT_DONE,
+};
+
+struct ath11k_pci {
+ struct pci_dev *pdev;
+ struct ath11k_base *ab;
+ u16 dev_id;
+ char amss_path[100];
+ u32 msi_ep_base_data;
+ struct mhi_controller *mhi_ctrl;
+ unsigned long mhi_state;
+ u32 register_window;
+
+ /* protects register_window above */
+ spinlock_t window_lock;
+
+ /* enum ath11k_pci_flags */
+ unsigned long flags;
+};
+
+static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab)
+{
+ return (struct ath11k_pci *)ab->drv_priv;
+}
+
+int ath11k_pci_get_user_msi_assignment(struct ath11k_pci *ar_pci, char *user_name,
+ int *num_vectors, u32 *user_base_data,
+ u32 *base_vector);
+int ath11k_pci_get_msi_irq(struct device *dev, unsigned int vector);
+void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value);
+u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset);
+
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c
index 297172538620..61ad9300eafb 100644
--- a/drivers/net/wireless/ath/ath11k/peer.c
+++ b/drivers/net/wireless/ath/ath11k/peer.c
@@ -223,9 +223,6 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
peer = ath11k_peer_find_by_pdev_idx(ar->ab, ar->pdev_idx, param->peer_addr);
if (peer) {
spin_unlock_bh(&ar->ab->base_lock);
- ath11k_info(ar->ab,
- "ignoring the peer %pM creation on same pdev idx %d\n",
- param->peer_addr, ar->pdev_idx);
return -EINVAL;
}
spin_unlock_bh(&ar->ab->base_lock);
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index c00a99ad8dbc..c2b165158225 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -3,12 +3,17 @@
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
+#include <linux/elf.h>
+
#include "qmi.h"
#include "core.h"
#include "debug.h"
#include <linux/of.h>
#include <linux/firmware.h>
+#define SLEEP_CLOCK_SELECT_INTERNAL_BIT 0x02
+#define HOST_CSTATE_BIT 0x04
+
static struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
@@ -1516,15 +1521,35 @@ static int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
req.bdf_support_valid = 1;
req.bdf_support = 1;
- req.m3_support_valid = 0;
- req.m3_support = 0;
-
- req.m3_cache_support_valid = 0;
- req.m3_cache_support = 0;
+ if (ab->bus_params.m3_fw_support) {
+ req.m3_support_valid = 1;
+ req.m3_support = 1;
+ req.m3_cache_support_valid = 1;
+ req.m3_cache_support = 1;
+ } else {
+ req.m3_support_valid = 0;
+ req.m3_support = 0;
+ req.m3_cache_support_valid = 0;
+ req.m3_cache_support = 0;
+ }
req.cal_done_valid = 1;
req.cal_done = ab->qmi.cal_done;
+ if (ab->hw_params.internal_sleep_clock) {
+ req.nm_modem_valid = 1;
+
+ /* Notify firmware that this is non-qualcomm platform. */
+ req.nm_modem |= HOST_CSTATE_BIT;
+
+ /* Notify firmware about the sleep clock selection,
+ * nm_modem_bit[1] is used for this purpose. Host driver on
+ * non-qualcomm platforms should select internal sleep
+ * clock.
+ */
+ req.nm_modem |= SLEEP_CLOCK_SELECT_INTERNAL_BIT;
+ }
+
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_host_cap_resp_msg_v01_ei, &resp);
if (ret < 0)
@@ -1634,19 +1659,30 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
memset(&resp, 0, sizeof(resp));
- req->mem_seg_len = ab->qmi.mem_seg_count;
+ /* For QCA6390 by default FW requests a block of ~4M contiguous
+ * DMA memory, it's hard to allocate from OS. So host returns
+ * failure to FW and FW will then request mulitple blocks of small
+ * chunk size memory.
+ */
+ if (!ab->bus_params.fixed_mem_region && ab->qmi.mem_seg_count <= 2) {
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi delays mem_request %d\n",
+ ab->qmi.mem_seg_count);
+ memset(req, 0, sizeof(*req));
+ } else {
+ req->mem_seg_len = ab->qmi.mem_seg_count;
+
+ for (i = 0; i < req->mem_seg_len ; i++) {
+ req->mem_seg[i].addr = ab->qmi.target_mem[i].paddr;
+ req->mem_seg[i].size = ab->qmi.target_mem[i].size;
+ req->mem_seg[i].type = ab->qmi.target_mem[i].type;
+ }
+ }
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_respond_mem_resp_msg_v01_ei, &resp);
if (ret < 0)
goto out;
- for (i = 0; i < req->mem_seg_len ; i++) {
- req->mem_seg[i].addr = ab->qmi.target_mem[i].paddr;
- req->mem_seg[i].size = ab->qmi.target_mem[i].size;
- req->mem_seg[i].type = ab->qmi.target_mem[i].type;
- }
-
ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
QMI_WLANFW_RESPOND_MEM_REQ_V01,
QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN,
@@ -1674,15 +1710,56 @@ out:
return ret;
}
+static void ath11k_qmi_free_target_mem_chunk(struct ath11k_base *ab)
+{
+ int i;
+
+ if (ab->bus_params.fixed_mem_region)
+ return;
+
+ for (i = 0; i < ab->qmi.mem_seg_count; i++) {
+ if (!ab->qmi.target_mem[i].vaddr)
+ continue;
+
+ dma_free_coherent(ab->dev,
+ ab->qmi.target_mem[i].size,
+ ab->qmi.target_mem[i].vaddr,
+ ab->qmi.target_mem[i].paddr);
+ ab->qmi.target_mem[i].vaddr = NULL;
+ }
+}
+
static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
{
+ int i;
+ struct target_mem_chunk *chunk;
+
+ for (i = 0; i < ab->qmi.mem_seg_count; i++) {
+ chunk = &ab->qmi.target_mem[i];
+ chunk->vaddr = dma_alloc_coherent(ab->dev,
+ chunk->size,
+ &chunk->paddr,
+ GFP_KERNEL);
+ if (!chunk->vaddr) {
+ ath11k_err(ab, "failed to alloc memory, size: 0x%x, type: %u\n",
+ chunk->size,
+ chunk->type);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int ath11k_qmi_assign_target_mem_chunk(struct ath11k_base *ab)
+{
int i, idx;
for (i = 0, idx = 0; i < ab->qmi.mem_seg_count; i++) {
switch (ab->qmi.target_mem[i].type) {
case BDF_MEM_REGION_TYPE:
- ab->qmi.target_mem[idx].paddr = ATH11K_QMI_BDF_ADDRESS;
- ab->qmi.target_mem[idx].vaddr = ATH11K_QMI_BDF_ADDRESS;
+ ab->qmi.target_mem[idx].paddr = ab->hw_params.bdf_addr;
+ ab->qmi.target_mem[idx].vaddr = NULL;
ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
idx++;
@@ -1694,7 +1771,7 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
}
/* TODO ath11k does not support cold boot calibration */
ab->qmi.target_mem[idx].paddr = 0;
- ab->qmi.target_mem[idx].vaddr = 0;
+ ab->qmi.target_mem[idx].vaddr = NULL;
ab->qmi.target_mem[idx].size = ab->qmi.target_mem[i].size;
ab->qmi.target_mem[idx].type = ab->qmi.target_mem[i].type;
idx++;
@@ -1772,11 +1849,11 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab)
strlcpy(ab->qmi.target.fw_build_id, resp.fw_build_id,
sizeof(ab->qmi.target.fw_build_id));
- ath11k_info(ab, "qmi target: chip_id: 0x%x, chip_family: 0x%x, board_id: 0x%x, soc_id: 0x%x\n",
+ ath11k_info(ab, "chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x\n",
ab->qmi.target.chip_id, ab->qmi.target.chip_family,
ab->qmi.target.board_id, ab->qmi.target.soc_id);
- ath11k_info(ab, "qmi fw_version: 0x%x fw_build_timestamp: %s fw_build_id: %s",
+ ath11k_info(ab, "fw_version 0x%x fw_build_timestamp %s fw_build_id %s",
ab->qmi.target.fw_version,
ab->qmi.target.fw_build_timestamp,
ab->qmi.target.fw_build_id);
@@ -1790,21 +1867,19 @@ ath11k_qmi_prepare_bdf_download(struct ath11k_base *ab, int type,
struct qmi_wlanfw_bdf_download_req_msg_v01 *req,
void __iomem *bdf_addr)
{
- struct device *dev = ab->dev;
- char filename[ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE];
const struct firmware *fw_entry;
struct ath11k_board_data bd;
u32 fw_size;
- int ret = 0;
-
- memset(&bd, 0, sizeof(bd));
+ int ret;
switch (type) {
case ATH11K_QMI_FILE_TYPE_BDF_GOLDEN:
+ memset(&bd, 0, sizeof(bd));
+
ret = ath11k_core_fetch_bdf(ab, &bd);
if (ret) {
ath11k_warn(ab, "qmi failed to load BDF\n");
- goto out;
+ return ret;
}
fw_size = min_t(u32, ab->hw_params.fw.board_size, bd.len);
@@ -1812,12 +1887,12 @@ ath11k_qmi_prepare_bdf_download(struct ath11k_base *ab, int type,
ath11k_core_free_bdf(ab, &bd);
break;
case ATH11K_QMI_FILE_TYPE_CALDATA:
- snprintf(filename, sizeof(filename),
- "%s/%s", ab->hw_params.fw.dir, ATH11K_QMI_DEFAULT_CAL_FILE_NAME);
- ret = request_firmware(&fw_entry, filename, dev);
- if (ret) {
- ath11k_warn(ab, "qmi failed to load CAL: %s\n", filename);
- goto out;
+ fw_entry = ath11k_core_firmware_request(ab, ATH11K_DEFAULT_CAL_FILE);
+ if (IS_ERR(fw_entry)) {
+ ret = PTR_ERR(fw_entry);
+ ath11k_warn(ab, "failed to load %s: %d\n",
+ ATH11K_DEFAULT_CAL_FILE, ret);
+ return ret;
}
fw_size = min_t(u32, ab->hw_params.fw.board_size,
@@ -1825,23 +1900,18 @@ ath11k_qmi_prepare_bdf_download(struct ath11k_base *ab, int type,
memcpy_toio(bdf_addr + ATH11K_QMI_CALDATA_OFFSET,
fw_entry->data, fw_size);
- ath11k_info(ab, "qmi downloading BDF: %s, size: %zu\n",
- filename, fw_entry->size);
release_firmware(fw_entry);
break;
default:
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
req->total_size = fw_size;
-
-out:
- return ret;
+ return 0;
}
-static int ath11k_qmi_load_bdf(struct ath11k_base *ab)
+static int ath11k_qmi_load_bdf_fixed_addr(struct ath11k_base *ab)
{
struct qmi_wlanfw_bdf_download_req_msg_v01 *req;
struct qmi_wlanfw_bdf_download_resp_msg_v01 resp;
@@ -1854,7 +1924,7 @@ static int ath11k_qmi_load_bdf(struct ath11k_base *ab)
return -ENOMEM;
memset(&resp, 0, sizeof(resp));
- bdf_addr = ioremap(ATH11K_QMI_BDF_ADDRESS, ATH11K_QMI_BDF_MAX_SIZE);
+ bdf_addr = ioremap(ab->hw_params.bdf_addr, ATH11K_QMI_BDF_MAX_SIZE);
if (!bdf_addr) {
ath11k_warn(ab, "qmi ioremap error for BDF\n");
ret = -EIO;
@@ -1905,7 +1975,6 @@ static int ath11k_qmi_load_bdf(struct ath11k_base *ab)
goto out_qmi_bdf;
}
}
- ath11k_info(ab, "qmi BDF downloaded\n");
out_qmi_bdf:
iounmap(bdf_addr);
@@ -1914,8 +1983,151 @@ out:
return ret;
}
+static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab)
+{
+ struct qmi_wlanfw_bdf_download_req_msg_v01 *req;
+ struct qmi_wlanfw_bdf_download_resp_msg_v01 resp;
+ struct ath11k_board_data bd;
+ unsigned int remaining;
+ struct qmi_txn txn = {};
+ int ret;
+ const u8 *temp;
+ int bdf_type;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+ memset(&resp, 0, sizeof(resp));
+
+ memset(&bd, 0, sizeof(bd));
+ ret = ath11k_core_fetch_bdf(ab, &bd);
+ if (ret) {
+ ath11k_warn(ab, "qmi failed to load bdf:\n");
+ goto out;
+ }
+
+ temp = bd.data;
+ remaining = bd.len;
+
+ if (bd.len >= SELFMAG && memcmp(bd.data, ELFMAG, SELFMAG) == 0)
+ bdf_type = ATH11K_QMI_BDF_TYPE_ELF;
+ else
+ bdf_type = ATH11K_QMI_BDF_TYPE_BIN;
+
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi bdf_type %d\n", bdf_type);
+
+ while (remaining) {
+ req->valid = 1;
+ req->file_id_valid = 1;
+ req->file_id = ab->qmi.target.board_id;
+ req->total_size_valid = 1;
+ req->total_size = bd.len;
+ req->seg_id_valid = 1;
+ req->data_valid = 1;
+ req->data_len = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE;
+ req->bdf_type = bdf_type;
+ req->bdf_type_valid = 1;
+ req->end_valid = 1;
+ req->end = 0;
+
+ if (remaining > QMI_WLANFW_MAX_DATA_SIZE_V01) {
+ req->data_len = QMI_WLANFW_MAX_DATA_SIZE_V01;
+ } else {
+ req->data_len = remaining;
+ req->end = 1;
+ }
+
+ memcpy(req->data, temp, req->data_len);
+
+ ret = qmi_txn_init(&ab->qmi.handle, &txn,
+ qmi_wlanfw_bdf_download_resp_msg_v01_ei,
+ &resp);
+ if (ret < 0)
+ goto out_qmi_bdf;
+
+ ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+ QMI_WLANFW_BDF_DOWNLOAD_REQ_V01,
+ QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN,
+ qmi_wlanfw_bdf_download_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ goto out_qmi_bdf;
+ }
+
+ ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+ if (ret < 0)
+ goto out_qmi_bdf;
+
+ if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ ath11k_warn(ab, "qmi BDF download failed, result: %d, err: %d\n",
+ resp.resp.result, resp.resp.error);
+ ret = resp.resp.result;
+ goto out_qmi_bdf;
+ }
+ remaining -= req->data_len;
+ temp += req->data_len;
+ req->seg_id++;
+ }
+
+out_qmi_bdf:
+ ath11k_core_free_bdf(ab, &bd);
+
+out:
+ kfree(req);
+ return ret;
+}
+
+static int ath11k_qmi_m3_load(struct ath11k_base *ab)
+{
+ struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
+ const struct firmware *fw;
+ char path[100];
+ int ret;
+
+ if (m3_mem->vaddr || m3_mem->size)
+ return 0;
+
+ fw = ath11k_core_firmware_request(ab, ATH11K_M3_FILE);
+ if (IS_ERR(fw)) {
+ ret = PTR_ERR(fw);
+ ath11k_core_create_firmware_path(ab, ATH11K_M3_FILE,
+ path, sizeof(path));
+ ath11k_err(ab, "failed to load %s: %d\n", path, ret);
+ return ret;
+ }
+
+ m3_mem->vaddr = dma_alloc_coherent(ab->dev,
+ fw->size, &m3_mem->paddr,
+ GFP_KERNEL);
+ if (!m3_mem->vaddr) {
+ ath11k_err(ab, "failed to allocate memory for M3 with size %zu\n",
+ fw->size);
+ release_firmware(fw);
+ return -ENOMEM;
+ }
+
+ memcpy(m3_mem->vaddr, fw->data, fw->size);
+ m3_mem->size = fw->size;
+ release_firmware(fw);
+
+ return 0;
+}
+
+static void ath11k_qmi_m3_free(struct ath11k_base *ab)
+{
+ struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
+
+ if (!ab->bus_params.m3_fw_support || !m3_mem->vaddr)
+ return;
+
+ dma_free_coherent(ab->dev, m3_mem->size,
+ m3_mem->vaddr, m3_mem->paddr);
+ m3_mem->vaddr = NULL;
+}
+
static int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab)
{
+ struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
struct qmi_wlanfw_m3_info_req_msg_v01 req;
struct qmi_wlanfw_m3_info_resp_msg_v01 resp;
struct qmi_txn txn = {};
@@ -1923,8 +2135,20 @@ static int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab)
memset(&req, 0, sizeof(req));
memset(&resp, 0, sizeof(resp));
- req.addr = 0;
- req.size = 0;
+
+ if (ab->bus_params.m3_fw_support) {
+ ret = ath11k_qmi_m3_load(ab);
+ if (ret) {
+ ath11k_err(ab, "failed to load m3 firmware: %d", ret);
+ return ret;
+ }
+
+ req.addr = m3_mem->paddr;
+ req.size = m3_mem->size;
+ } else {
+ req.addr = 0;
+ req.size = 0;
+ }
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_m3_info_resp_msg_v01_ei, &resp);
@@ -2034,7 +2258,7 @@ static int ath11k_qmi_wlanfw_wlan_cfg_send(struct ath11k_base *ab)
req->tgt_cfg_valid = 1;
/* This is number of CE configs */
req->tgt_cfg_len = ab->qmi.ce_cfg.tgt_ce_len;
- for (pipe_num = 0; pipe_num <= req->tgt_cfg_len ; pipe_num++) {
+ for (pipe_num = 0; pipe_num < req->tgt_cfg_len ; pipe_num++) {
req->tgt_cfg[pipe_num].pipe_num = ce_cfg[pipe_num].pipenum;
req->tgt_cfg[pipe_num].pipe_dir = ce_cfg[pipe_num].pipedir;
req->tgt_cfg[pipe_num].nentries = ce_cfg[pipe_num].nentries;
@@ -2051,7 +2275,18 @@ static int ath11k_qmi_wlanfw_wlan_cfg_send(struct ath11k_base *ab)
req->svc_cfg[pipe_num].pipe_num = svc_cfg[pipe_num].pipenum;
}
req->shadow_reg_valid = 0;
- req->shadow_reg_v2_valid = 0;
+
+ /* set shadow v2 configuration */
+ if (ab->hw_params.supports_shadow_regs) {
+ req->shadow_reg_v2_valid = 1;
+ req->shadow_reg_v2_len = min_t(u32,
+ ab->qmi.ce_cfg.shadow_reg_v2_len,
+ QMI_WLANFW_MAX_NUM_SHADOW_REG_V2_V01);
+ memcpy(&req->shadow_reg_v2, ab->qmi.ce_cfg.shadow_reg_v2,
+ sizeof(u32) * req->shadow_reg_v2_len);
+ } else {
+ req->shadow_reg_v2_valid = 0;
+ }
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_wlan_cfg_resp_msg_v01_ei, &resp);
@@ -2181,7 +2416,10 @@ static void ath11k_qmi_event_load_bdf(struct ath11k_qmi *qmi)
return;
}
- ret = ath11k_qmi_load_bdf(ab);
+ if (ab->bus_params.fixed_bdf_addr)
+ ret = ath11k_qmi_load_bdf_fixed_addr(ab);
+ else
+ ret = ath11k_qmi_load_bdf_qmi(ab);
if (ret < 0) {
ath11k_warn(ab, "qmi failed to load board data file:%d\n", ret);
return;
@@ -2220,10 +2458,20 @@ static void ath11k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl,
msg->mem_seg[i].type, msg->mem_seg[i].size);
}
- ret = ath11k_qmi_alloc_target_mem_chunk(ab);
- if (ret < 0) {
- ath11k_warn(ab, "qmi failed to alloc target memory:%d\n", ret);
- return;
+ if (ab->bus_params.fixed_mem_region) {
+ ret = ath11k_qmi_assign_target_mem_chunk(ab);
+ if (ret) {
+ ath11k_warn(ab, "qmi failed to assign target memory: %d\n",
+ ret);
+ return;
+ }
+ } else if (msg->mem_seg_len > 2) {
+ ret = ath11k_qmi_alloc_target_mem_chunk(ab);
+ if (ret) {
+ ath11k_warn(ab, "qmi failed to alloc target memory: %d\n",
+ ret);
+ return;
+ }
}
ath11k_qmi_driver_event_post(qmi, ATH11K_QMI_EVENT_REQUEST_MEM, NULL);
@@ -2265,21 +2513,21 @@ static const struct qmi_msg_handler ath11k_qmi_msg_handlers[] = {
.type = QMI_INDICATION,
.msg_id = QMI_WLFW_REQUEST_MEM_IND_V01,
.ei = qmi_wlanfw_request_mem_ind_msg_v01_ei,
- .decoded_size = sizeof(qmi_wlanfw_request_mem_ind_msg_v01_ei),
+ .decoded_size = sizeof(struct qmi_wlanfw_request_mem_ind_msg_v01),
.fn = ath11k_qmi_msg_mem_request_cb,
},
{
.type = QMI_INDICATION,
.msg_id = QMI_WLFW_FW_MEM_READY_IND_V01,
.ei = qmi_wlanfw_mem_ready_ind_msg_v01_ei,
- .decoded_size = sizeof(qmi_wlanfw_mem_ready_ind_msg_v01_ei),
+ .decoded_size = sizeof(struct qmi_wlanfw_fw_mem_ready_ind_msg_v01),
.fn = ath11k_qmi_msg_mem_ready_cb,
},
{
.type = QMI_INDICATION,
.msg_id = QMI_WLFW_FW_READY_IND_V01,
.ei = qmi_wlanfw_fw_ready_ind_msg_v01_ei,
- .decoded_size = sizeof(qmi_wlanfw_fw_ready_ind_msg_v01_ei),
+ .decoded_size = sizeof(struct qmi_wlanfw_fw_ready_ind_msg_v01),
.fn = ath11k_qmi_msg_fw_ready_cb,
},
{
@@ -2287,7 +2535,7 @@ static const struct qmi_msg_handler ath11k_qmi_msg_handlers[] = {
.msg_id = QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01,
.ei = qmi_wlanfw_cold_boot_cal_done_ind_msg_v01_ei,
.decoded_size =
- sizeof(qmi_wlanfw_cold_boot_cal_done_ind_msg_v01_ei),
+ sizeof(struct qmi_wlanfw_fw_cold_cal_done_ind_msg_v01),
.fn = ath11k_qmi_msg_cold_boot_cal_done_cb,
},
};
@@ -2416,9 +2664,10 @@ int ath11k_qmi_init_service(struct ath11k_base *ab)
ret = qmi_add_lookup(&ab->qmi.handle, ATH11K_QMI_WLFW_SERVICE_ID_V01,
ATH11K_QMI_WLFW_SERVICE_VERS_V01,
- ATH11K_QMI_WLFW_SERVICE_INS_ID_V01);
+ ab->qmi.service_ins_id);
if (ret < 0) {
ath11k_warn(ab, "failed to add qmi lookup\n");
+ destroy_workqueue(ab->qmi.event_wq);
return ret;
}
@@ -2430,5 +2679,7 @@ void ath11k_qmi_deinit_service(struct ath11k_base *ab)
qmi_handle_release(&ab->qmi.handle);
cancel_work_sync(&ab->qmi.event_work);
destroy_workqueue(ab->qmi.event_wq);
+ ath11k_qmi_m3_free(ab);
+ ath11k_qmi_free_target_mem_chunk(ab);
}
diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h
index 3f7db642d869..b0a818f0401b 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.h
+++ b/drivers/net/wireless/ath/ath11k/qmi.h
@@ -12,18 +12,18 @@
#define ATH11K_HOST_VERSION_STRING "WIN"
#define ATH11K_QMI_WLANFW_TIMEOUT_MS 5000
#define ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE 64
-#define ATH11K_QMI_BDF_ADDRESS 0x4B0C0000
#define ATH11K_QMI_BDF_MAX_SIZE (256 * 1024)
#define ATH11K_QMI_CALDATA_OFFSET (128 * 1024)
#define ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 128
#define ATH11K_QMI_WLFW_SERVICE_ID_V01 0x45
#define ATH11K_QMI_WLFW_SERVICE_VERS_V01 0x01
#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01 0x02
+#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390 0x01
+#define ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074 0x02
#define ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 32
#define ATH11K_QMI_RESP_LEN_MAX 8192
#define ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01 32
#define ATH11K_QMI_CALDB_SIZE 0x480000
-#define ATH11K_QMI_DEFAULT_CAL_FILE_NAME "caldata.bin"
#define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035
#define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037
@@ -42,6 +42,11 @@ enum ath11k_qmi_file_type {
ATH11K_QMI_MAX_FILE_TYPE,
};
+enum ath11k_qmi_bdf_type {
+ ATH11K_QMI_BDF_TYPE_BIN = 0,
+ ATH11K_QMI_BDF_TYPE_ELF = 1,
+};
+
enum ath11k_qmi_event_type {
ATH11K_QMI_EVENT_SERVER_ARRIVE,
ATH11K_QMI_EVENT_SERVER_EXIT,
@@ -72,7 +77,7 @@ struct ath11k_qmi_ce_cfg {
int svc_to_ce_map_len;
const u8 *shadow_reg;
int shadow_reg_len;
- u8 *shadow_reg_v2;
+ u32 *shadow_reg_v2;
int shadow_reg_v2_len;
};
@@ -85,7 +90,7 @@ struct target_mem_chunk {
u32 size;
u32 type;
dma_addr_t paddr;
- u32 vaddr;
+ u32 *vaddr;
};
struct target_info {
@@ -98,6 +103,12 @@ struct target_info {
char fw_build_id[ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1];
};
+struct m3_mem_region {
+ u32 size;
+ dma_addr_t paddr;
+ void *vaddr;
+};
+
struct ath11k_qmi {
struct ath11k_base *ab;
struct qmi_handle handle;
@@ -112,6 +123,8 @@ struct ath11k_qmi {
u32 target_mem_mode;
u8 cal_done;
struct target_info target;
+ struct m3_mem_region m3_mem;
+ unsigned int service_ins_id;
};
#define QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN 189
@@ -254,6 +267,14 @@ struct qmi_wlanfw_fw_mem_ready_ind_msg_v01 {
char placeholder;
};
+struct qmi_wlanfw_fw_ready_ind_msg_v01 {
+ char placeholder;
+};
+
+struct qmi_wlanfw_fw_cold_cal_done_ind_msg_v01 {
+ char placeholder;
+};
+
#define QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN 0
#define QMI_WLANFW_CAP_RESP_MSG_V01_MAX_LEN 207
#define QMI_WLANFW_CAP_REQ_V01 0x0024
diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
index 7c9dc91cc48a..f6a1f0352989 100644
--- a/drivers/net/wireless/ath/ath11k/reg.c
+++ b/drivers/net/wireless/ath/ath11k/reg.c
@@ -206,7 +206,7 @@ int ath11k_regd_update(struct ath11k *ar, bool init)
ab = ar->ab;
pdev_id = ar->pdev_idx;
- spin_lock(&ab->base_lock);
+ spin_lock_bh(&ab->base_lock);
if (init) {
/* Apply the regd received during init through
@@ -227,7 +227,7 @@ int ath11k_regd_update(struct ath11k *ar, bool init)
if (!regd) {
ret = -EINVAL;
- spin_unlock(&ab->base_lock);
+ spin_unlock_bh(&ab->base_lock);
goto err;
}
@@ -238,7 +238,7 @@ int ath11k_regd_update(struct ath11k *ar, bool init)
if (regd_copy)
ath11k_copy_regd(regd, regd_copy);
- spin_unlock(&ab->base_lock);
+ spin_unlock_bh(&ab->base_lock);
if (!regd_copy) {
ret = -ENOMEM;
@@ -699,7 +699,7 @@ void ath11k_reg_free(struct ath11k_base *ab)
{
int i;
- for (i = 0; i < MAX_RADIOS; i++) {
+ for (i = 0; i < ab->hw_params.max_radios; i++) {
kfree(ab->default_regd[i]);
kfree(ab->new_regd[i]);
}
diff --git a/drivers/net/wireless/ath/ath11k/spectral.c b/drivers/net/wireless/ath/ath11k/spectral.c
index 1c5d65bb411f..ac2a8cfdc1c0 100644
--- a/drivers/net/wireless/ath/ath11k/spectral.c
+++ b/drivers/net/wireless/ath/ath11k/spectral.c
@@ -17,8 +17,6 @@
#define ATH11K_SPECTRAL_ATH11K_MIN_IB_BINS 32
#define ATH11K_SPECTRAL_ATH11K_MAX_IB_BINS 256
-#define ATH11K_SPECTRAL_SAMPLE_FFT_BIN_MASK 0xFF
-
#define ATH11K_SPECTRAL_SCAN_COUNT_MAX 4095
/* Max channel computed by sum of 2g and 5g band channels */
@@ -557,16 +555,16 @@ static u8 ath11k_spectral_get_max_exp(s8 max_index, u8 max_magnitude,
return max_exp;
}
-static void ath11k_spectral_parse_16bit_fft(u8 *outbins, u8 *inbins, int num_bins)
+static void ath11k_spectral_parse_fft(u8 *outbins, u8 *inbins, int num_bins, u8 fft_sz)
{
- int i;
- __le16 *data = (__le16 *)inbins;
+ int i, j;
i = 0;
+ j = 0;
while (i < num_bins) {
- outbins[i] = (__le16_to_cpu(data[i])) &
- ATH11K_SPECTRAL_SAMPLE_FFT_BIN_MASK;
+ outbins[i] = inbins[j];
i++;
+ j += fft_sz;
}
}
@@ -588,6 +586,12 @@ int ath11k_spectral_process_fft(struct ath11k *ar,
lockdep_assert_held(&ar->spectral.lock);
+ if (!ab->hw_params.spectral_fft_sz) {
+ ath11k_warn(ab, "invalid bin size type for hw rev %d\n",
+ ab->hw_rev);
+ return -EINVAL;
+ }
+
tlv = (struct spectral_tlv *)data;
tlv_len = FIELD_GET(SPECTRAL_TLV_HDR_LEN, __le32_to_cpu(tlv->header));
/* convert Dword into bytes */
@@ -649,9 +653,8 @@ int ath11k_spectral_process_fft(struct ath11k *ar,
freq = summary->meta.freq2;
fft_sample->freq2 = __cpu_to_be16(freq);
- ath11k_spectral_parse_16bit_fft(fft_sample->data,
- fft_report->bins,
- num_bins);
+ ath11k_spectral_parse_fft(fft_sample->data, fft_report->bins, num_bins,
+ ab->hw_params.spectral_fft_sz);
fft_sample->max_exp = ath11k_spectral_get_max_exp(fft_sample->max_index,
search.peak_mag,
@@ -773,6 +776,8 @@ static int ath11k_spectral_process_data(struct ath11k *ar,
i += sizeof(*tlv) + tlv_len;
}
+ ret = 0;
+
err:
kfree(fft_sample);
unlock:
@@ -954,10 +959,11 @@ int ath11k_spectral_init(struct ath11k_base *ab)
int i;
if (!test_bit(WMI_TLV_SERVICE_FREQINFO_IN_METADATA,
- ab->wmi_ab.svc_map)) {
- ath11k_info(ab, "spectral not supported\n");
+ ab->wmi_ab.svc_map))
+ return 0;
+
+ if (!ab->hw_params.spectral_fft_sz)
return 0;
- }
for (i = 0; i < ab->num_radios; i++) {
ar = ab->pdevs[i].ar;
@@ -966,10 +972,8 @@ int ath11k_spectral_init(struct ath11k_base *ab)
ret = ath11k_dbring_get_cap(ar->ab, ar->pdev_idx,
WMI_DIRECT_BUF_SPECTRAL,
&db_cap);
- if (ret) {
- ath11k_info(ab, "spectral not enabled for pdev %d\n", i);
+ if (ret)
continue;
- }
idr_init(&sp->rx_ring.bufs_idr);
spin_lock_init(&sp->rx_ring.idr_lock);
diff --git a/drivers/net/wireless/ath/ath11k/thermal.c b/drivers/net/wireless/ath/ath11k/thermal.c
index 5a7e150c621b..c96b26f39a25 100644
--- a/drivers/net/wireless/ath/ath11k/thermal.c
+++ b/drivers/net/wireless/ath/ath11k/thermal.c
@@ -53,7 +53,7 @@ ath11k_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
return ret;
}
-static struct thermal_cooling_device_ops ath11k_thermal_ops = {
+static const struct thermal_cooling_device_ops ath11k_thermal_ops = {
.get_max_state = ath11k_thermal_get_max_throttle_state,
.get_cur_state = ath11k_thermal_get_cur_throttle_state,
.set_cur_state = ath11k_thermal_set_cur_throttle_state,
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 8e3437a65673..8eca92520837 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -338,7 +338,7 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
mac_phy_caps = wmi_mac_phy_caps + phy_idx;
pdev->pdev_id = mac_phy_caps->pdev_id;
- pdev_cap->supported_bands = mac_phy_caps->supported_bands;
+ pdev_cap->supported_bands |= mac_phy_caps->supported_bands;
pdev_cap->ampdu_density = mac_phy_caps->ampdu_density;
/* Take non-zero tx/rx chainmask. If tx/rx chainmask differs from
@@ -371,27 +371,33 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
pdev_cap->rx_chain_mask_shift =
find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32);
- cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
- cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_2g;
- cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_2g;
- cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_2g;
- cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_2g_ext;
- cap_band->he_mcs = mac_phy_caps->he_supp_mcs_2g;
- memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_2g,
- sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
- memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet2g,
- sizeof(struct ath11k_ppe_threshold));
-
- cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
- cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g;
- cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g;
- cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g;
- cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext;
- cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g;
- memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g,
- sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
- memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
- sizeof(struct ath11k_ppe_threshold));
+ if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
+ cap_band->phy_id = mac_phy_caps->phy_id;
+ cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_2g;
+ cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_2g;
+ cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_2g;
+ cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_2g_ext;
+ cap_band->he_mcs = mac_phy_caps->he_supp_mcs_2g;
+ memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_2g,
+ sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
+ memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet2g,
+ sizeof(struct ath11k_ppe_threshold));
+ }
+
+ if (mac_phy_caps->supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
+ cap_band->phy_id = mac_phy_caps->phy_id;
+ cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g;
+ cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g;
+ cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g;
+ cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext;
+ cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g;
+ memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g,
+ sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
+ memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
+ sizeof(struct ath11k_ppe_threshold));
+ }
cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g;
@@ -1593,8 +1599,8 @@ int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id,
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->vdev_id = vdev_id;
cmd->tim_ie_offset = offs->tim_offset;
- cmd->csa_switch_count_offset = offs->csa_counter_offs[0];
- cmd->ext_csa_switch_count_offset = offs->csa_counter_offs[1];
+ cmd->csa_switch_count_offset = offs->cntdwn_counter_offs[0];
+ cmd->ext_csa_switch_count_offset = offs->cntdwn_counter_offs[1];
cmd->buf_len = bcn->len;
ptr = skb->data + sizeof(*cmd);
@@ -3175,7 +3181,7 @@ static int ath11k_init_cmd_send(struct ath11k_pdev_wmi *wmi,
(param->num_band_to_mac * sizeof(*band_to_mac));
len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*cfg) + hw_mode_len +
- (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS);
+ (param->num_mem_chunks ? (sizeof(*host_mem_chunks) * WMI_MAX_MEM_REQS) : 0);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
if (!skb)
@@ -3336,50 +3342,7 @@ int ath11k_wmi_cmd_init(struct ath11k_base *ab)
memset(&init_param, 0, sizeof(init_param));
memset(&config, 0, sizeof(config));
- config.num_vdevs = ab->num_radios * TARGET_NUM_VDEVS;
-
- if (ab->num_radios == 2) {
- config.num_peers = TARGET_NUM_PEERS(DBS);
- config.num_tids = TARGET_NUM_TIDS(DBS);
- } else if (ab->num_radios == 3) {
- config.num_peers = TARGET_NUM_PEERS(DBS_SBS);
- config.num_tids = TARGET_NUM_TIDS(DBS_SBS);
- } else {
- /* Control should not reach here */
- config.num_peers = TARGET_NUM_PEERS(SINGLE);
- config.num_tids = TARGET_NUM_TIDS(SINGLE);
- }
- config.num_offload_peers = TARGET_NUM_OFFLD_PEERS;
- config.num_offload_reorder_buffs = TARGET_NUM_OFFLD_REORDER_BUFFS;
- config.num_peer_keys = TARGET_NUM_PEER_KEYS;
- config.ast_skid_limit = TARGET_AST_SKID_LIMIT;
- config.tx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
- config.rx_chain_mask = (1 << ab->target_caps.num_rf_chains) - 1;
- config.rx_timeout_pri[0] = TARGET_RX_TIMEOUT_LO_PRI;
- config.rx_timeout_pri[1] = TARGET_RX_TIMEOUT_LO_PRI;
- config.rx_timeout_pri[2] = TARGET_RX_TIMEOUT_LO_PRI;
- config.rx_timeout_pri[3] = TARGET_RX_TIMEOUT_HI_PRI;
- config.rx_decap_mode = TARGET_DECAP_MODE_NATIVE_WIFI;
- config.scan_max_pending_req = TARGET_SCAN_MAX_PENDING_REQS;
- config.bmiss_offload_max_vdev = TARGET_BMISS_OFFLOAD_MAX_VDEV;
- config.roam_offload_max_vdev = TARGET_ROAM_OFFLOAD_MAX_VDEV;
- config.roam_offload_max_ap_profiles = TARGET_ROAM_OFFLOAD_MAX_AP_PROFILES;
- config.num_mcast_groups = TARGET_NUM_MCAST_GROUPS;
- config.num_mcast_table_elems = TARGET_NUM_MCAST_TABLE_ELEMS;
- config.mcast2ucast_mode = TARGET_MCAST2UCAST_MODE;
- config.tx_dbg_log_size = TARGET_TX_DBG_LOG_SIZE;
- config.num_wds_entries = TARGET_NUM_WDS_ENTRIES;
- config.dma_burst_size = TARGET_DMA_BURST_SIZE;
- config.rx_skip_defrag_timeout_dup_detection_check =
- TARGET_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
- config.vow_config = TARGET_VOW_CONFIG;
- config.gtk_offload_max_vdev = TARGET_GTK_OFFLOAD_MAX_VDEV;
- config.num_msdu_desc = TARGET_NUM_MSDU_DESC;
- config.beacon_tx_offload_max_vdev = ab->num_radios * TARGET_MAX_BCN_OFFLD;
- config.rx_batchmode = TARGET_RX_BATCHMODE;
- config.peer_map_unmap_v2_support = 1;
- config.twt_ap_pdev_count = ab->num_radios;
- config.twt_ap_sta_count = 1000;
+ ab->hw_params.hw_ops->wmi_init_config(ab, &config);
memcpy(&wmi_sc->wlan_resource_config, &config, sizeof(config));
@@ -3391,9 +3354,10 @@ int ath11k_wmi_cmd_init(struct ath11k_base *ab)
if (wmi_sc->preferred_hw_mode == WMI_HOST_HW_MODE_SINGLE)
init_param.hw_mode_id = WMI_HOST_HW_MODE_MAX;
- init_param.num_band_to_mac = ab->num_radios;
-
- ath11k_fill_band_to_mac_param(ab, init_param.band_to_mac);
+ if (ab->hw_params.needs_band_to_mac) {
+ init_param.num_band_to_mac = ab->num_radios;
+ ath11k_fill_band_to_mac_param(ab, init_param.band_to_mac);
+ }
return ath11k_init_cmd_send(&wmi_sc->wmi[0], &init_param);
}
@@ -3688,6 +3652,8 @@ static int ath11k_wmi_tlv_hw_mode_caps(struct ath11k_base *soc,
i++;
}
+ ath11k_dbg(soc, ATH11K_DBG_WMI, "preferred_hw_mode:%d\n",
+ soc->wmi_ab.preferred_hw_mode);
if (soc->wmi_ab.preferred_hw_mode == WMI_HOST_HW_MODE_MAX)
return -EINVAL;
@@ -3778,6 +3744,7 @@ static int ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(struct ath11k_base *soc,
struct wmi_tlv_svc_rdy_ext_parse *svc_rdy_ext = data;
u8 hw_mode_id = svc_rdy_ext->pref_hw_mode_caps.hw_mode_id;
u32 phy_id_map;
+ int pdev_index = 0;
int ret;
svc_rdy_ext->soc_hal_reg_caps = (struct wmi_soc_hal_reg_capabilities *)ptr;
@@ -3793,7 +3760,7 @@ static int ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(struct ath11k_base *soc,
svc_rdy_ext->soc_hal_reg_caps,
svc_rdy_ext->mac_phy_caps,
hw_mode_id, soc->num_radios,
- &soc->pdevs[soc->num_radios]);
+ &soc->pdevs[pdev_index]);
if (ret) {
ath11k_warn(soc, "failed to extract mac caps, idx :%d\n",
soc->num_radios);
@@ -3802,9 +3769,25 @@ static int ath11k_wmi_tlv_ext_soc_hal_reg_caps_parse(struct ath11k_base *soc,
soc->num_radios++;
+ /* For QCA6390, save mac_phy capability in the same pdev */
+ if (soc->hw_params.single_pdev_only)
+ pdev_index = 0;
+ else
+ pdev_index = soc->num_radios;
+
/* TODO: mac_phy_cap prints */
phy_id_map >>= 1;
}
+
+ /* For QCA6390, set num_radios to 1 because host manages
+ * both 2G and 5G radio in one pdev.
+ * Set pdev_id = 0 and 0 means soc level.
+ */
+ if (soc->hw_params.single_pdev_only) {
+ soc->num_radios = 1;
+ soc->pdevs[0].pdev_id = 0;
+ }
+
return 0;
}
@@ -5434,8 +5417,17 @@ static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *sk
pdev_idx = reg_info->phy_id;
- if (pdev_idx >= ab->num_radios)
- goto fallback;
+ if (pdev_idx >= ab->num_radios) {
+ /* Process the event for phy0 only if single_pdev_only
+ * is true. If pdev_idx is valid but not 0, discard the
+ * event. Otherwise, it goes to fallback.
+ */
+ if (ab->hw_params.single_pdev_only &&
+ pdev_idx < ab->hw_params.num_rxmda_per_pdev)
+ goto mem_free;
+ else
+ goto fallback;
+ }
/* Avoid multiple overwrites to default regd, during core
* stop-start after mac registration.
@@ -6260,7 +6252,7 @@ static void ath11k_peer_assoc_conf_event(struct ath11k_base *ab, struct sk_buff
static void ath11k_update_stats_event(struct ath11k_base *ab, struct sk_buff *skb)
{
- ath11k_debug_fw_stats_process(ab, skb);
+ ath11k_debugfs_fw_stats_process(ab, skb);
}
/* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
@@ -6538,7 +6530,7 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
break;
/* TODO: Add remaining events */
default:
- ath11k_warn(ab, "Unknown eventid: 0x%x\n", id);
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
break;
}
@@ -6682,7 +6674,7 @@ int ath11k_wmi_connect(struct ath11k_base *ab)
u8 wmi_ep_count;
wmi_ep_count = ab->htc.wmi_ep_count;
- if (wmi_ep_count > MAX_RADIOS)
+ if (wmi_ep_count > ab->hw_params.max_radios)
return -1;
for (i = 0; i < wmi_ep_count; i++)
@@ -6704,7 +6696,7 @@ int ath11k_wmi_pdev_attach(struct ath11k_base *ab,
{
struct ath11k_pdev_wmi *wmi_handle;
- if (pdev_id >= MAX_RADIOS)
+ if (pdev_id >= ab->hw_params.max_radios)
return -EINVAL;
wmi_handle = &ab->wmi_ab.wmi[pdev_id];
@@ -6728,6 +6720,10 @@ int ath11k_wmi_attach(struct ath11k_base *ab)
ab->wmi_ab.ab = ab;
ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX;
+ /* It's overwritten when service_ext_ready is handled */
+ if (ab->hw_params.single_pdev_only)
+ ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE;
+
/* TODO: Init remaining wmi soc resources required */
init_completion(&ab->wmi_ab.service_ready);
init_completion(&ab->wmi_ab.unified_ready);
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 979800c6f57f..234ea939d316 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -410,7 +410,7 @@ enum ath5k_radio {
* This article claims Super G sticks to bonding of channels 5 and 6 for
* USA:
*
- * http://www.pcworld.com/article/id,113428-page,1/article.html
+ * https://www.pcworld.com/article/id,113428-page,1/article.html
*
* The channel bonding seems to be driver specific though.
*
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 65a4c142640d..4c6e57f9976d 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -1098,7 +1098,7 @@ err:
/**
* ath5k_drain_tx_buffs - Empty tx buffers
*
- * @ah The &struct ath5k_hw
+ * @ah: The &struct ath5k_hw
*
* Empty tx buffers from all queues in preparation
* of a reset or during shutdown.
@@ -1536,12 +1536,12 @@ ath5k_set_current_imask(struct ath5k_hw *ah)
}
static void
-ath5k_tasklet_rx(unsigned long data)
+ath5k_tasklet_rx(struct tasklet_struct *t)
{
struct ath5k_rx_status rs = {};
struct sk_buff *skb, *next_skb;
dma_addr_t next_skb_addr;
- struct ath5k_hw *ah = (void *)data;
+ struct ath5k_hw *ah = from_tasklet(ah, t, rxtq);
struct ath_common *common = ath5k_hw_common(ah);
struct ath5k_buf *bf;
struct ath5k_desc *ds;
@@ -1784,10 +1784,10 @@ ath5k_tx_processq(struct ath5k_hw *ah, struct ath5k_txq *txq)
}
static void
-ath5k_tasklet_tx(unsigned long data)
+ath5k_tasklet_tx(struct tasklet_struct *t)
{
int i;
- struct ath5k_hw *ah = (void *)data;
+ struct ath5k_hw *ah = from_tasklet(ah, t, txtq);
for (i = 0; i < AR5K_NUM_TX_QUEUES; i++)
if (ah->txqs[i].setup && (ah->ah_txq_isr_txok_all & BIT(i)))
@@ -2176,9 +2176,9 @@ ath5k_beacon_config(struct ath5k_hw *ah)
spin_unlock_bh(&ah->block);
}
-static void ath5k_tasklet_beacon(unsigned long data)
+static void ath5k_tasklet_beacon(struct tasklet_struct *t)
{
- struct ath5k_hw *ah = (struct ath5k_hw *) data;
+ struct ath5k_hw *ah = from_tasklet(ah, t, beacontq);
/*
* Software beacon alert--time to send a beacon.
@@ -2447,9 +2447,9 @@ ath5k_calibrate_work(struct work_struct *work)
static void
-ath5k_tasklet_ani(unsigned long data)
+ath5k_tasklet_ani(struct tasklet_struct *t)
{
- struct ath5k_hw *ah = (void *)data;
+ struct ath5k_hw *ah = from_tasklet(ah, t, ani_tasklet);
ah->ah_cal_mask |= AR5K_CALIBRATION_ANI;
ath5k_ani_calibration(ah);
@@ -3069,10 +3069,10 @@ ath5k_init(struct ieee80211_hw *hw)
hw->queues = 1;
}
- tasklet_init(&ah->rxtq, ath5k_tasklet_rx, (unsigned long)ah);
- tasklet_init(&ah->txtq, ath5k_tasklet_tx, (unsigned long)ah);
- tasklet_init(&ah->beacontq, ath5k_tasklet_beacon, (unsigned long)ah);
- tasklet_init(&ah->ani_tasklet, ath5k_tasklet_ani, (unsigned long)ah);
+ tasklet_setup(&ah->rxtq, ath5k_tasklet_rx);
+ tasklet_setup(&ah->txtq, ath5k_tasklet_tx);
+ tasklet_setup(&ah->beacontq, ath5k_tasklet_beacon);
+ tasklet_setup(&ah->ani_tasklet, ath5k_tasklet_ani);
INIT_WORK(&ah->reset_work, ath5k_reset_work);
INIT_WORK(&ah->calib_work, ath5k_calibrate_work);
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index 2eaba1ccab20..4b41160e5d38 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -161,33 +161,14 @@ static int reg_show(struct seq_file *seq, void *p)
return 0;
}
-static const struct seq_operations register_seq_ops = {
+static const struct seq_operations registers_sops = {
.start = reg_start,
.next = reg_next,
.stop = reg_stop,
.show = reg_show
};
-static int open_file_registers(struct inode *inode, struct file *file)
-{
- struct seq_file *s;
- int res;
- res = seq_open(file, &register_seq_ops);
- if (res == 0) {
- s = file->private_data;
- s->private = inode->i_private;
- }
- return res;
-}
-
-static const struct file_operations fops_registers = {
- .open = open_file_registers,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
- .owner = THIS_MODULE,
-};
-
+DEFINE_SEQ_ATTRIBUTE(registers);
/* debugfs: beacons */
@@ -1005,7 +986,7 @@ ath5k_debug_init_device(struct ath5k_hw *ah)
return;
debugfs_create_file("debug", 0600, phydir, ah, &fops_debug);
- debugfs_create_file("registers", 0400, phydir, ah, &fops_registers);
+ debugfs_create_file("registers", 0400, phydir, ah, &registers_fops);
debugfs_create_file("beacon", 0600, phydir, ah, &fops_beacon);
debugfs_create_file("reset", 0200, phydir, ah, &fops_reset);
debugfs_create_file("antenna", 0600, phydir, ah, &fops_antenna);
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 307f1fea0a88..1fbc2c19848f 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -1172,13 +1172,13 @@ ath5k_cal_data_offset_2413(struct ath5k_eeprom_info *ee, int mode)
offset += ath5k_pdgains_size_2413(ee,
AR5K_EEPROM_MODE_11B) +
AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2;
- /* fall through */
+ fallthrough;
case AR5K_EEPROM_MODE_11B:
if (AR5K_EEPROM_HDR_11A(ee->ee_header))
offset += ath5k_pdgains_size_2413(ee,
AR5K_EEPROM_MODE_11A) +
AR5K_EEPROM_N_5GHZ_CHAN / 2;
- /* fall through */
+ fallthrough;
case AR5K_EEPROM_MODE_11A:
break;
default:
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index 05140d8baa36..f2db7cf16566 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -101,6 +101,7 @@ static const unsigned int ack_rates_high[] =
/**
* ath5k_hw_get_frame_duration() - Get tx time of a frame
* @ah: The &struct ath5k_hw
+ * @band: One of enum nl80211_band
* @len: Frame's length in bytes
* @rate: The @struct ieee80211_rate
* @shortpre: Indicate short preample
@@ -670,7 +671,7 @@ ath5k_hw_init_beacon_timers(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
break;
case NL80211_IFTYPE_ADHOC:
AR5K_REG_ENABLE_BITS(ah, AR5K_TXCFG, AR5K_TXCFG_ADHOC_BCN_ATIM);
- /* fall through */
+ fallthrough;
default:
/* On non-STA modes timer1 is used as next DMA
* beacon alert (DBA) timer and timer2 as next
@@ -913,7 +914,7 @@ ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
pcu_reg |= AR5K_STA_ID1_KEYSRCH_MODE
| (ah->ah_version == AR5K_AR5210 ?
AR5K_STA_ID1_PWR_SV : 0);
- /* fall through */
+ fallthrough;
case NL80211_IFTYPE_MONITOR:
pcu_reg |= AR5K_STA_ID1_KEYSRCH_MODE
| (ah->ah_version == AR5K_AR5210 ?
@@ -945,7 +946,6 @@ ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
* ath5k_hw_pcu_init() - Initialize PCU
* @ah: The &struct ath5k_hw
* @op_mode: One of enum nl80211_iftype
- * @mode: One of enum ath5k_driver_mode
*
* This function is used to initialize PCU by setting current
* operation mode and various other settings.
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index ae08572c4b58..00f9e347d414 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -3229,10 +3229,10 @@ ath5k_write_pwr_to_pdadc_table(struct ath5k_hw *ah, u8 ee_mode)
switch (pdcurves) {
case 3:
reg |= AR5K_REG_SM(pdg_to_idx[2], AR5K_PHY_TPC_RG1_PDGAIN_3);
- /* Fall through */
+ fallthrough;
case 2:
reg |= AR5K_REG_SM(pdg_to_idx[1], AR5K_PHY_TPC_RG1_PDGAIN_2);
- /* Fall through */
+ fallthrough;
case 1:
reg |= AR5K_REG_SM(pdg_to_idx[0], AR5K_PHY_TPC_RG1_PDGAIN_1);
break;
@@ -3353,7 +3353,7 @@ ath5k_setup_channel_powertable(struct ath5k_hw *ah,
table_min[pdg] = table_max[pdg] - 126;
}
- /* Fall through */
+ fallthrough;
case AR5K_PWRTABLE_PWR_TO_PCDAC:
case AR5K_PWRTABLE_PWR_TO_PDADC:
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 56d7925a0c2c..9fdb5283b39c 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -522,7 +522,7 @@ ath5k_hw_set_power_mode(struct ath5k_hw *ah, enum ath5k_power_mode mode,
switch (mode) {
case AR5K_PM_AUTO:
staid &= ~AR5K_STA_ID1_DEFAULT_ANTENNA;
- /* fallthrough */
+ fallthrough;
case AR5K_PM_NETWORK_SLEEP:
if (set_chip)
ath5k_hw_reg_write(ah,
diff --git a/drivers/net/wireless/ath/ath5k/rfbuffer.h b/drivers/net/wireless/ath/ath5k/rfbuffer.h
index aed34d9954c0..151935c4827f 100644
--- a/drivers/net/wireless/ath/ath5k/rfbuffer.h
+++ b/drivers/net/wireless/ath/ath5k/rfbuffer.h
@@ -42,7 +42,7 @@
* Also check out reg.h and U.S. Patent 6677779 B1 (about buffer
* registers and control registers):
*
- * http://www.google.com/patents?id=qNURAAAAEBAJ
+ * https://www.google.com/patents?id=qNURAAAAEBAJ
*/
diff --git a/drivers/net/wireless/ath/ath5k/rfkill.c b/drivers/net/wireless/ath/ath5k/rfkill.c
index 270a319f3aeb..855ed7fc720d 100644
--- a/drivers/net/wireless/ath/ath5k/rfkill.c
+++ b/drivers/net/wireless/ath/ath5k/rfkill.c
@@ -73,9 +73,9 @@ ath5k_is_rfkill_set(struct ath5k_hw *ah)
}
static void
-ath5k_tasklet_rfkill_toggle(unsigned long data)
+ath5k_tasklet_rfkill_toggle(struct tasklet_struct *t)
{
- struct ath5k_hw *ah = (void *)data;
+ struct ath5k_hw *ah = from_tasklet(ah, t, rf_kill.toggleq);
bool blocked;
blocked = ath5k_is_rfkill_set(ah);
@@ -90,8 +90,7 @@ ath5k_rfkill_hw_start(struct ath5k_hw *ah)
ah->rf_kill.gpio = ah->ah_capabilities.cap_eeprom.ee_rfkill_pin;
ah->rf_kill.polarity = ah->ah_capabilities.cap_eeprom.ee_rfkill_pol;
- tasklet_init(&ah->rf_kill.toggleq, ath5k_tasklet_rfkill_toggle,
- (unsigned long)ah);
+ tasklet_setup(&ah->rf_kill.toggleq, ath5k_tasklet_rfkill_toggle);
ath5k_rfkill_disable(ah);
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 67f8f2aa7a53..9c83e9a4299b 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -3897,19 +3897,19 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
switch (ar->hw.cap) {
case WMI_11AN_CAP:
ht = true;
- /* fall through */
+ fallthrough;
case WMI_11A_CAP:
band_5gig = true;
break;
case WMI_11GN_CAP:
ht = true;
- /* fall through */
+ fallthrough;
case WMI_11G_CAP:
band_2gig = true;
break;
case WMI_11AGN_CAP:
ht = true;
- /* fall through */
+ fallthrough;
case WMI_11AG_CAP:
band_2gig = true;
band_5gig = true;
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 811fad6d60c0..39bf19686175 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -1752,7 +1752,7 @@ static int __ath6kl_init_hw_start(struct ath6kl *ar)
ret = ath6kl_init_service_ep(ar);
if (ret) {
- ath6kl_err("Endpoint service initilisation failed: %d\n", ret);
+ ath6kl_err("Endpoint service initialization failed: %d\n", ret);
goto err_cleanup_scatter;
}
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index 5e7ea838a921..d3aa9e7a37c2 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -389,7 +389,7 @@ void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel)
if (!ik->valid || ik->key_type != WAPI_CRYPT)
break;
/* for WAPI, we need to set the delayed group key, continue: */
- /* fall through */
+ fallthrough;
case WPA_PSK_AUTH:
case WPA2_PSK_AUTH:
case (WPA_PSK_AUTH | WPA2_PSK_AUTH):
@@ -430,6 +430,9 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
ath6kl_dbg(ATH6KL_DBG_TRC, "new station %pM aid=%d\n", mac_addr, aid);
+ if (aid < 1 || aid > AP_MAX_NUM_STA)
+ return;
+
if (assoc_req_len > sizeof(struct ieee80211_hdr_3addr)) {
struct ieee80211_mgmt *mgmt =
(struct ieee80211_mgmt *) assoc_info;
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 6885d2ded53a..dbc47702a268 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -1201,8 +1201,7 @@ static int ath6kl_wmi_pstream_timeout_event_rx(struct wmi *wmi, u8 *datap,
static int ath6kl_wmi_bitrate_reply_rx(struct wmi *wmi, u8 *datap, int len)
{
struct wmi_bit_rate_reply *reply;
- s32 rate;
- u32 sgi, index;
+ u32 index;
if (len < sizeof(struct wmi_bit_rate_reply))
return -EINVAL;
@@ -1211,15 +1210,10 @@ static int ath6kl_wmi_bitrate_reply_rx(struct wmi *wmi, u8 *datap, int len)
ath6kl_dbg(ATH6KL_DBG_WMI, "rateindex %d\n", reply->rate_index);
- if (reply->rate_index == (s8) RATE_AUTO) {
- rate = RATE_AUTO;
- } else {
+ if (reply->rate_index != (s8) RATE_AUTO) {
index = reply->rate_index & 0x7f;
if (WARN_ON_ONCE(index > (RATE_MCS_7_40 + 1)))
return -EINVAL;
-
- sgi = (reply->rate_index & 0x80) ? 1 : 0;
- rate = wmi_rate_tbl[index][sgi];
}
ath6kl_wakeup_event(wmi->parent_dev);
@@ -2645,6 +2639,11 @@ int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class,
return -EINVAL;
}
+ if (tsid >= 16) {
+ ath6kl_err("invalid tsid: %d\n", tsid);
+ return -EINVAL;
+ }
+
skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
if (!skb)
return -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index d5e9af2dddd8..a84bb9b6573f 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -22,10 +22,10 @@ config ATH9K
tristate "Atheros 802.11n wireless cards support"
depends on MAC80211 && HAS_DMA
select ATH9K_HW
- select MAC80211_LEDS
- select LEDS_CLASS
- select NEW_LEDS
select ATH9K_COMMON
+ imply NEW_LEDS
+ imply LEDS_CLASS
+ imply MAC80211_LEDS
help
This module adds support for wireless adapters based on
Atheros IEEE 802.11n AR5008, AR9001 and AR9002 family
@@ -177,10 +177,10 @@ config ATH9K_HTC
tristate "Atheros HTC based wireless cards support"
depends on USB && MAC80211
select ATH9K_HW
- select MAC80211_LEDS
- select LEDS_CLASS
- select NEW_LEDS
select ATH9K_COMMON
+ imply NEW_LEDS
+ imply LEDS_CLASS
+ imply MAC80211_LEDS
help
Support for Atheros HTC based cards.
Chipsets supported: AR9271
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index 5214dd7a3936..41d192709e8e 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -74,7 +74,7 @@ static const struct ani_ofdm_level_entry ofdm_level_table[] = {
* Regardless of alignment in time, the antenna signals add constructively after
* FFT and improve your reception. For more information:
*
- * http://en.wikipedia.org/wiki/Maximal-ratio_combining
+ * https://en.wikipedia.org/wiki/Maximal-ratio_combining
*/
struct ani_cck_level_entry {
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_initvals.h b/drivers/net/wireless/ath/ath9k/ar5008_initvals.h
index 467ccfae2cee..7da8365ae69a 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar5008_initvals.h
@@ -459,12 +459,6 @@ static const u32 ar5416Common[][2] = {
{0x0000a3e0, 0x000001ce},
};
-static const u32 ar5416Bank0[][2] = {
- /* Addr allmodes */
- {0x000098b0, 0x1e5795e5},
- {0x000098e0, 0x02008020},
-};
-
static const u32 ar5416BB_RfGain[][3] = {
/* Addr 5G 2G */
{0x00009a00, 0x00000000, 0x00000000},
@@ -533,60 +527,6 @@ static const u32 ar5416BB_RfGain[][3] = {
{0x00009afc, 0x000000f9, 0x000000f9},
};
-static const u32 ar5416Bank1[][2] = {
- /* Addr allmodes */
- {0x000098b0, 0x02108421},
- {0x000098ec, 0x00000008},
-};
-
-static const u32 ar5416Bank2[][2] = {
- /* Addr allmodes */
- {0x000098b0, 0x0e73ff17},
- {0x000098e0, 0x00000420},
-};
-
-static const u32 ar5416Bank3[][3] = {
- /* Addr 5G 2G */
- {0x000098f0, 0x01400018, 0x01c00018},
-};
-
-static const u32 ar5416Bank6[][3] = {
- /* Addr 5G 2G */
- {0x0000989c, 0x00000000, 0x00000000},
- {0x0000989c, 0x00000000, 0x00000000},
- {0x0000989c, 0x00000000, 0x00000000},
- {0x0000989c, 0x00e00000, 0x00e00000},
- {0x0000989c, 0x005e0000, 0x005e0000},
- {0x0000989c, 0x00120000, 0x00120000},
- {0x0000989c, 0x00620000, 0x00620000},
- {0x0000989c, 0x00020000, 0x00020000},
- {0x0000989c, 0x00ff0000, 0x00ff0000},
- {0x0000989c, 0x00ff0000, 0x00ff0000},
- {0x0000989c, 0x00ff0000, 0x00ff0000},
- {0x0000989c, 0x40ff0000, 0x40ff0000},
- {0x0000989c, 0x005f0000, 0x005f0000},
- {0x0000989c, 0x00870000, 0x00870000},
- {0x0000989c, 0x00f90000, 0x00f90000},
- {0x0000989c, 0x007b0000, 0x007b0000},
- {0x0000989c, 0x00ff0000, 0x00ff0000},
- {0x0000989c, 0x00f50000, 0x00f50000},
- {0x0000989c, 0x00dc0000, 0x00dc0000},
- {0x0000989c, 0x00110000, 0x00110000},
- {0x0000989c, 0x006100a8, 0x006100a8},
- {0x0000989c, 0x004210a2, 0x004210a2},
- {0x0000989c, 0x0014008f, 0x0014008f},
- {0x0000989c, 0x00c40003, 0x00c40003},
- {0x0000989c, 0x003000f2, 0x003000f2},
- {0x0000989c, 0x00440016, 0x00440016},
- {0x0000989c, 0x00410040, 0x00410040},
- {0x0000989c, 0x0001805e, 0x0001805e},
- {0x0000989c, 0x0000c0ab, 0x0000c0ab},
- {0x0000989c, 0x000000f1, 0x000000f1},
- {0x0000989c, 0x00002081, 0x00002081},
- {0x0000989c, 0x000000d4, 0x000000d4},
- {0x000098d0, 0x0000000f, 0x0010000f},
-};
-
static const u32 ar5416Bank6TPC[][3] = {
/* Addr 5G 2G */
{0x0000989c, 0x00000000, 0x00000000},
@@ -624,13 +564,6 @@ static const u32 ar5416Bank6TPC[][3] = {
{0x000098d0, 0x0000000f, 0x0010000f},
};
-static const u32 ar5416Bank7[][2] = {
- /* Addr allmodes */
- {0x0000989c, 0x00000500},
- {0x0000989c, 0x00000800},
- {0x000098cc, 0x0000000e},
-};
-
static const u32 ar5416Addac[][2] = {
/* Addr allmodes */
{0x0000989c, 0x00000000},
@@ -671,4 +604,3 @@ static const u32 ar5416Addac[][2] = {
{0x0000989c, 0x00000000},
{0x000098c4, 0x00000000},
};
-
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index dae95402eb3a..2fa30834a88d 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -18,7 +18,6 @@
#include "hw-ops.h"
#include "../regd.h"
#include "ar9002_phy.h"
-#include "ar5008_initvals.h"
/* All code below is for AR5008, AR9001, AR9002 */
@@ -51,6 +50,36 @@ static const int m2ThreshLowExt_off = 127;
static const int m1ThreshExt_off = 127;
static const int m2ThreshExt_off = 127;
+static const u32 ar5416Bank0[][2] = {
+ /* Addr allmodes */
+ {0x000098b0, 0x1e5795e5},
+ {0x000098e0, 0x02008020},
+};
+
+static const u32 ar5416Bank1[][2] = {
+ /* Addr allmodes */
+ {0x000098b0, 0x02108421},
+ {0x000098ec, 0x00000008},
+};
+
+static const u32 ar5416Bank2[][2] = {
+ /* Addr allmodes */
+ {0x000098b0, 0x0e73ff17},
+ {0x000098e0, 0x00000420},
+};
+
+static const u32 ar5416Bank3[][3] = {
+ /* Addr 5G 2G */
+ {0x000098f0, 0x01400018, 0x01c00018},
+};
+
+static const u32 ar5416Bank7[][2] = {
+ /* Addr allmodes */
+ {0x0000989c, 0x00000500},
+ {0x0000989c, 0x00000800},
+ {0x000098cc, 0x0000000e},
+};
+
static const struct ar5416IniArray bank0 = STATIC_INI_ARRAY(ar5416Bank0);
static const struct ar5416IniArray bank1 = STATIC_INI_ARRAY(ar5416Bank1);
static const struct ar5416IniArray bank2 = STATIC_INI_ARRAY(ar5416Bank2);
@@ -579,14 +608,14 @@ static void ar5008_hw_init_chain_masks(struct ath_hw *ah)
case 0x5:
REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
AR_PHY_SWAP_ALT_CHAIN);
- /* fall through */
+ fallthrough;
case 0x3:
if (ah->hw_version.macVersion == AR_SREV_REVISION_5416_10) {
REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7);
REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7);
break;
}
- /* fall through */
+ fallthrough;
case 0x1:
case 0x2:
case 0x7:
diff --git a/drivers/net/wireless/ath/ath9k/ar9001_initvals.h b/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
index 59524e1d4678..aa5f086fa3b0 100644
--- a/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
@@ -459,43 +459,6 @@ static const u32 ar5416Common_9100[][2] = {
{0x0000a3e0, 0x000001ce},
};
-static const u32 ar5416Bank6_9100[][3] = {
- /* Addr 5G 2G */
- {0x0000989c, 0x00000000, 0x00000000},
- {0x0000989c, 0x00000000, 0x00000000},
- {0x0000989c, 0x00000000, 0x00000000},
- {0x0000989c, 0x00e00000, 0x00e00000},
- {0x0000989c, 0x005e0000, 0x005e0000},
- {0x0000989c, 0x00120000, 0x00120000},
- {0x0000989c, 0x00620000, 0x00620000},
- {0x0000989c, 0x00020000, 0x00020000},
- {0x0000989c, 0x00ff0000, 0x00ff0000},
- {0x0000989c, 0x00ff0000, 0x00ff0000},
- {0x0000989c, 0x00ff0000, 0x00ff0000},
- {0x0000989c, 0x00ff0000, 0x00ff0000},
- {0x0000989c, 0x005f0000, 0x005f0000},
- {0x0000989c, 0x00870000, 0x00870000},
- {0x0000989c, 0x00f90000, 0x00f90000},
- {0x0000989c, 0x007b0000, 0x007b0000},
- {0x0000989c, 0x00ff0000, 0x00ff0000},
- {0x0000989c, 0x00f50000, 0x00f50000},
- {0x0000989c, 0x00dc0000, 0x00dc0000},
- {0x0000989c, 0x00110000, 0x00110000},
- {0x0000989c, 0x006100a8, 0x006100a8},
- {0x0000989c, 0x004210a2, 0x004210a2},
- {0x0000989c, 0x0014000f, 0x0014000f},
- {0x0000989c, 0x00c40002, 0x00c40002},
- {0x0000989c, 0x003000f2, 0x003000f2},
- {0x0000989c, 0x00440016, 0x00440016},
- {0x0000989c, 0x00410040, 0x00410040},
- {0x0000989c, 0x000180d6, 0x000180d6},
- {0x0000989c, 0x0000c0aa, 0x0000c0aa},
- {0x0000989c, 0x000000b1, 0x000000b1},
- {0x0000989c, 0x00002000, 0x00002000},
- {0x0000989c, 0x000000d4, 0x000000d4},
- {0x000098d0, 0x0000000f, 0x0010000f},
-};
-
static const u32 ar5416Bank6TPC_9100[][3] = {
/* Addr 5G 2G */
{0x0000989c, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_initvals.h b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
index 4d18c66a6790..e01b5c3728b8 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
@@ -897,20 +897,6 @@ static const u32 ar9280Modes_original_tx_gain_9280_2[][5] = {
{0x00007844, 0x92592480, 0x92592480, 0x92592480, 0x92592480},
};
-static const u32 ar9280PciePhy_clkreq_off_L1_9280[][2] = {
- /* Addr allmodes */
- {0x00004040, 0x9248fd00},
- {0x00004040, 0x24924924},
- {0x00004040, 0xa8000019},
- {0x00004040, 0x13160820},
- {0x00004040, 0xe5980560},
- {0x00004040, 0xc01dcffc},
- {0x00004040, 0x1aaabe41},
- {0x00004040, 0xbe105554},
- {0x00004040, 0x00043007},
- {0x00004044, 0x00000000},
-};
-
static const u32 ar9280PciePhy_clkreq_always_on_L1_9280[][2] = {
/* Addr allmodes */
{0x00004040, 0x9248fd00},
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index 4b3c9b108197..ce9a0a53771e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -267,7 +267,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
switch (i->aggr) {
case AGGR_BUF_FIRST:
ctl6 |= SM(i->aggr_len, AR_AggrLen);
- /* fall through */
+ fallthrough;
case AGGR_BUF_MIDDLE:
ctl1 |= AR_IsAggr | AR_MoreAggr;
ctl6 |= SM(i->ndelim, AR_PadDelim);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index 6f32b8d2ec7f..fcfed8e59d29 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -119,7 +119,7 @@ static int ar9002_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
aModeRefSel = 2;
if (aModeRefSel)
break;
- /* fall through */
+ fallthrough;
case 1:
default:
aModeRefSel = 0;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index e1fe7a7c3ad8..76b538942a79 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -120,7 +120,7 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
switch (i->aggr) {
case AGGR_BUF_FIRST:
ctl17 |= SM(i->aggr_len, AR_AggrLen);
- /* fall through */
+ fallthrough;
case AGGR_BUF_MIDDLE:
ctl12 |= AR_IsAggr | AR_MoreAggr;
ctl17 |= SM(i->ndelim, AR_PadDelim);
diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
index f4c9befb3949..fab14e0a87b9 100644
--- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
@@ -1328,27 +1328,6 @@ static const u32 ar9580_1p0_baseband_postamble[][5] = {
{0x0000c284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
};
-static const u32 ar9580_1p0_pcie_phy_clkreq_enable_L1[][2] = {
- /* Addr allmodes */
- {0x00004040, 0x0835365e},
- {0x00004040, 0x0008003b},
- {0x00004044, 0x00000000},
-};
-
-static const u32 ar9580_1p0_pcie_phy_clkreq_disable_L1[][2] = {
- /* Addr allmodes */
- {0x00004040, 0x0831365e},
- {0x00004040, 0x0008003b},
- {0x00004044, 0x00000000},
-};
-
-static const u32 ar9580_1p0_pcie_phy_pll_on_clkreq[][2] = {
- /* Addr allmodes */
- {0x00004040, 0x0831265e},
- {0x00004040, 0x0008003b},
- {0x00004044, 0x00000000},
-};
-
static const u32 ar9580_1p0_baseband_postamble_dfs_channel[][3] = {
/* Addr 5G 2G */
{0x00009814, 0x3400c00f, 0x3400c00f},
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index a412b352182c..e06b74a54a69 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -713,7 +713,7 @@ struct ath_beacon {
bool tx_last;
};
-void ath9k_beacon_tasklet(unsigned long data);
+void ath9k_beacon_tasklet(struct tasklet_struct *t);
void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *main_vif,
bool beacons);
void ath9k_beacon_assign_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
@@ -1117,7 +1117,7 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz)
common->bus_ops->read_cachesize(common, csz);
}
-void ath9k_tasklet(unsigned long data);
+void ath9k_tasklet(struct tasklet_struct *t);
int ath_cabq_update(struct ath_softc *);
u8 ath9k_parse_mpdudensity(u8 mpdudensity);
irqreturn_t ath_isr(int irq, void *dev);
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index e36f947e19fc..71e2ada86793 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -365,7 +365,7 @@ bool ath9k_csa_is_finished(struct ath_softc *sc, struct ieee80211_vif *vif)
if (!vif || !vif->csa_active)
return false;
- if (!ieee80211_csa_is_complete(vif))
+ if (!ieee80211_beacon_cntdwn_is_complete(vif))
return false;
ieee80211_csa_finish(vif);
@@ -385,9 +385,9 @@ void ath9k_csa_update(struct ath_softc *sc)
ath9k_csa_update_vif, sc);
}
-void ath9k_beacon_tasklet(unsigned long data)
+void ath9k_beacon_tasklet(struct tasklet_struct *t)
{
- struct ath_softc *sc = (struct ath_softc *)data;
+ struct ath_softc *sc = from_tasklet(sc, t, bcon_tasklet);
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath_buf *bf = NULL;
diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c
index fd61ae4782b6..6cf087522157 100644
--- a/drivers/net/wireless/ath/ath9k/channel.c
+++ b/drivers/net/wireless/ath/ath9k/channel.c
@@ -706,7 +706,7 @@ void ath_chanctx_event(struct ath_softc *sc, struct ieee80211_vif *vif,
"Move chanctx state from FORCE_ACTIVE to IDLE\n");
sc->sched.state = ATH_CHANCTX_STATE_IDLE;
- /* fall through */
+ fallthrough;
case ATH_CHANCTX_EVENT_SWITCH:
if (!test_bit(ATH_OP_MULTI_CHANNEL, &common->op_flags) ||
sc->sched.state == ATH_CHANCTX_STATE_FORCE_ACTIVE ||
@@ -1080,7 +1080,7 @@ static void ath_offchannel_timer(struct timer_list *t)
mod_timer(&sc->offchannel.timer, jiffies + HZ / 10);
break;
}
- /* fall through */
+ fallthrough;
case ATH_OFFCHANNEL_SUSPEND:
if (!sc->offchannel.scan_req)
return;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 56b44fc7a8e6..9729a69d3e2e 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -402,7 +402,7 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah,
return AR5416_PWR_TABLE_OFFSET_DB;
case EEP_ANTENNA_GAIN_2G:
band = 1;
- /* fall through */
+ fallthrough;
case EEP_ANTENNA_GAIN_5G:
return max_t(u8, max_t(u8,
pModal[band].antennaGainCh[0],
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 3f563e02d17d..860da13bfb6a 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -449,10 +449,19 @@ static void hif_usb_stop(void *hif_handle)
spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
/* The pending URBs have to be canceled. */
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
list_for_each_entry_safe(tx_buf, tx_buf_tmp,
&hif_dev->tx.tx_pending, list) {
+ usb_get_urb(tx_buf->urb);
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
usb_kill_urb(tx_buf->urb);
+ list_del(&tx_buf->list);
+ usb_free_urb(tx_buf->urb);
+ kfree(tx_buf->buf);
+ kfree(tx_buf);
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
}
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
usb_kill_anchored_urbs(&hif_dev->mgmt_submitted);
}
@@ -762,27 +771,37 @@ static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev)
struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL;
unsigned long flags;
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
list_for_each_entry_safe(tx_buf, tx_buf_tmp,
&hif_dev->tx.tx_buf, list) {
+ usb_get_urb(tx_buf->urb);
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
usb_kill_urb(tx_buf->urb);
list_del(&tx_buf->list);
usb_free_urb(tx_buf->urb);
kfree(tx_buf->buf);
kfree(tx_buf);
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
}
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
hif_dev->tx.flags |= HIF_USB_TX_FLUSH;
spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
list_for_each_entry_safe(tx_buf, tx_buf_tmp,
&hif_dev->tx.tx_pending, list) {
+ usb_get_urb(tx_buf->urb);
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
usb_kill_urb(tx_buf->urb);
list_del(&tx_buf->list);
usb_free_urb(tx_buf->urb);
kfree(tx_buf->buf);
kfree(tx_buf);
+ spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
}
+ spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
usb_kill_anchored_urbs(&hif_dev->mgmt_submitted);
}
@@ -1375,7 +1394,7 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
if (hif_dev->flags & HIF_USB_READY) {
ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged);
ath9k_hif_usb_dev_deinit(hif_dev);
- ath9k_destoy_wmi(hif_dev->htc_handle->drv_priv);
+ ath9k_destroy_wmi(hif_dev->htc_handle->drv_priv);
ath9k_htc_hw_free(hif_dev->htc_handle);
}
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 9f64e32381f9..0a1634238e67 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -583,14 +583,14 @@ int ath9k_htc_tx_get_slot(struct ath9k_htc_priv *priv);
void ath9k_htc_tx_clear_slot(struct ath9k_htc_priv *priv, int slot);
void ath9k_htc_tx_drain(struct ath9k_htc_priv *priv);
void ath9k_htc_txstatus(struct ath9k_htc_priv *priv, void *wmi_event);
-void ath9k_tx_failed_tasklet(unsigned long data);
+void ath9k_tx_failed_tasklet(struct tasklet_struct *t);
void ath9k_htc_tx_cleanup_timer(struct timer_list *t);
bool ath9k_htc_csa_is_finished(struct ath9k_htc_priv *priv);
int ath9k_rx_init(struct ath9k_htc_priv *priv);
void ath9k_rx_cleanup(struct ath9k_htc_priv *priv);
void ath9k_host_rx_init(struct ath9k_htc_priv *priv);
-void ath9k_rx_tasklet(unsigned long data);
+void ath9k_rx_tasklet(struct tasklet_struct *t);
u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv);
void ath9k_htc_ps_wakeup(struct ath9k_htc_priv *priv);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index f20c839aeda2..c745897aa3d6 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -514,7 +514,7 @@ bool ath9k_htc_csa_is_finished(struct ath9k_htc_priv *priv)
if (!vif || !vif->csa_active)
return false;
- if (!ieee80211_csa_is_complete(vif))
+ if (!ieee80211_beacon_cntdwn_is_complete(vif))
return false;
ieee80211_csa_finish(vif);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 1d6ad8d46607..db0c6fa9c9dc 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -645,10 +645,8 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
spin_lock_init(&priv->tx.tx_lock);
mutex_init(&priv->mutex);
mutex_init(&priv->htc_pm_lock);
- tasklet_init(&priv->rx_tasklet, ath9k_rx_tasklet,
- (unsigned long)priv);
- tasklet_init(&priv->tx_failed_tasklet, ath9k_tx_failed_tasklet,
- (unsigned long)priv);
+ tasklet_setup(&priv->rx_tasklet, ath9k_rx_tasklet);
+ tasklet_setup(&priv->tx_failed_tasklet, ath9k_tx_failed_tasklet);
INIT_DELAYED_WORK(&priv->ani_work, ath9k_htc_ani_work);
INIT_WORK(&priv->ps_work, ath9k_ps_work);
INIT_WORK(&priv->fatal_work, ath9k_fatal_work);
@@ -973,7 +971,7 @@ err_init:
ath9k_stop_wmi(priv);
hif_dev = (struct hif_device_usb *)htc_handle->hif_dev;
ath9k_hif_usb_dealloc_urbs(hif_dev);
- ath9k_destoy_wmi(priv);
+ ath9k_destroy_wmi(priv);
err_free:
ieee80211_free_hw(hw);
return ret;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index b353995bdd45..0bdc4dcb7b8f 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -570,9 +570,9 @@ void ath9k_htc_tx_drain(struct ath9k_htc_priv *priv)
spin_unlock_bh(&priv->tx.tx_lock);
}
-void ath9k_tx_failed_tasklet(unsigned long data)
+void ath9k_tx_failed_tasklet(struct tasklet_struct *t)
{
- struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
+ struct ath9k_htc_priv *priv = from_tasklet(priv, t, tx_failed_tasklet);
spin_lock(&priv->tx.tx_lock);
if (priv->tx.flags & ATH9K_HTC_OP_TX_DRAIN) {
@@ -974,7 +974,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
struct ath_htc_rx_status *rxstatus;
struct ath_rx_status rx_stats;
bool decrypt_error = false;
- __be16 rs_datalen;
+ u16 rs_datalen;
bool is_phyerr;
if (skb->len < HTC_RX_FRAME_HEADER_SIZE) {
@@ -1062,9 +1062,9 @@ rx_next:
/*
* FIXME: Handle FLUSH later on.
*/
-void ath9k_rx_tasklet(unsigned long data)
+void ath9k_rx_tasklet(struct tasklet_struct *t)
{
- struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
+ struct ath9k_htc_priv *priv = from_tasklet(priv, t, rx_tasklet);
struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL;
struct ieee80211_rx_status rx_status;
struct sk_buff *skb;
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index d2e062eaf561..510e61e97dbc 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -339,6 +339,8 @@ void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
if (skb) {
htc_hdr = (struct htc_frame_hdr *) skb->data;
+ if (htc_hdr->endpoint_id >= ARRAY_SIZE(htc_handle->endpoint))
+ goto ret;
endpoint = &htc_handle->endpoint[htc_hdr->endpoint_id];
skb_pull(skb, sizeof(struct htc_frame_hdr));
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 8c97db73e34c..6609ce122e6e 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1277,12 +1277,12 @@ static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
break;
}
- /* fall through */
+ fallthrough;
case NL80211_IFTYPE_OCB:
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP:
set |= AR_STA_ID1_STA_AP;
- /* fall through */
+ fallthrough;
case NL80211_IFTYPE_STATION:
REG_CLR_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
break;
@@ -2293,7 +2293,7 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
case NL80211_IFTYPE_ADHOC:
REG_SET_BIT(ah, AR_TXCFG,
AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY);
- /* fall through */
+ fallthrough;
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP:
REG_WRITE(ah, AR_NEXT_TBTT_TIMER, next_beacon);
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 4d72cd7daaa2..690fe3a1b516 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -728,9 +728,8 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
spin_lock_init(&sc->sc_pm_lock);
spin_lock_init(&sc->chan_lock);
mutex_init(&sc->mutex);
- tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
- tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
- (unsigned long)sc);
+ tasklet_setup(&sc->intr_tq, ath9k_tasklet);
+ tasklet_setup(&sc->bcon_tasklet, ath9k_beacon_tasklet);
timer_setup(&sc->sleep_timer, ath_ps_full_sleep, 0);
INIT_WORK(&sc->hw_reset_work, ath_reset_work);
@@ -1014,6 +1013,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_MULTICAST_REGISTRATIONS);
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0);
}
int ath9k_init_device(u16 devid, struct ath_softc *sc,
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index a47f6e978095..8dbf68b94228 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -19,6 +19,9 @@
#include "ath9k.h"
#include "btcoex.h"
+static void ath9k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop);
+
u8 ath9k_parse_mpdudensity(u8 mpdudensity)
{
/*
@@ -368,9 +371,9 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
ath_dynack_node_deinit(sc->sc_ah, an);
}
-void ath9k_tasklet(unsigned long data)
+void ath9k_tasklet(struct tasklet_struct *t)
{
- struct ath_softc *sc = (struct ath_softc *)data;
+ struct ath_softc *sc = from_tasklet(sc, t, intr_tq);
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
enum ath_reset_type type;
@@ -1701,6 +1704,15 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
return -EOPNOTSUPP;
}
+ /* There may be MPDUs queued for the outgoing PTK key. Flush queues to
+ * make sure these are not send unencrypted or with a wrong (new) key
+ */
+ if (cmd == DISABLE_KEY && key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+ ieee80211_stop_queues(hw);
+ ath9k_flush(hw, vif, 0, true);
+ ieee80211_wake_queues(hw);
+ }
+
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
ath_dbg(common, CONFIG, "Set HW Key %d\n", cmd);
@@ -1934,7 +1946,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
flush = true;
- /* fall through */
+ fallthrough;
case IEEE80211_AMPDU_TX_STOP_CONT:
ath9k_ps_wakeup(sc);
ath_tx_aggr_stop(sc, sta, tid);
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index f3461b193c7a..cff9af3af38d 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -825,6 +825,7 @@ static void ath_pci_aspm_init(struct ath_common *common)
struct pci_dev *pdev = to_pci_dev(sc->dev);
struct pci_dev *parent;
u16 aspm;
+ int ret;
if (!ah->is_pciexpress)
return;
@@ -866,8 +867,8 @@ static void ath_pci_aspm_init(struct ath_common *common)
if (AR_SREV_9462(ah))
pci_read_config_dword(pdev, 0x70c, &ah->config.aspm_l1_fix);
- pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &aspm);
- if (aspm & (PCI_EXP_LNKCTL_ASPM_L0S | PCI_EXP_LNKCTL_ASPM_L1)) {
+ ret = pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &aspm);
+ if (!ret && (aspm & (PCI_EXP_LNKCTL_ASPM_L0S | PCI_EXP_LNKCTL_ASPM_L1))) {
ah->aspm_enabled = true;
/* Initialize PCIe PM and SERDES registers. */
ath9k_hw_configpcipowersave(ah, false);
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index e7a3127395be..fe29ad4b9023 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -106,8 +106,7 @@ struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv)
mutex_init(&wmi->multi_rmw_mutex);
init_completion(&wmi->cmd_wait);
INIT_LIST_HEAD(&wmi->pending_tx_events);
- tasklet_init(&wmi->wmi_event_tasklet, ath9k_wmi_event_tasklet,
- (unsigned long)wmi);
+ tasklet_setup(&wmi->wmi_event_tasklet, ath9k_wmi_event_tasklet);
return wmi;
}
@@ -121,7 +120,7 @@ void ath9k_stop_wmi(struct ath9k_htc_priv *priv)
mutex_unlock(&wmi->op_mutex);
}
-void ath9k_destoy_wmi(struct ath9k_htc_priv *priv)
+void ath9k_destroy_wmi(struct ath9k_htc_priv *priv)
{
kfree(priv->wmi);
}
@@ -136,9 +135,9 @@ void ath9k_wmi_event_drain(struct ath9k_htc_priv *priv)
spin_unlock_irqrestore(&priv->wmi->wmi_lock, flags);
}
-void ath9k_wmi_event_tasklet(unsigned long data)
+void ath9k_wmi_event_tasklet(struct tasklet_struct *t)
{
- struct wmi *wmi = (struct wmi *)data;
+ struct wmi *wmi = from_tasklet(wmi, t, wmi_event_tasklet);
struct ath9k_htc_priv *priv = wmi->drv_priv;
struct wmi_cmd_hdr *hdr;
void *wmi_event;
diff --git a/drivers/net/wireless/ath/ath9k/wmi.h b/drivers/net/wireless/ath/ath9k/wmi.h
index d8b912206232..5c3b710b8f31 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.h
+++ b/drivers/net/wireless/ath/ath9k/wmi.h
@@ -185,11 +185,11 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
u8 *cmd_buf, u32 cmd_len,
u8 *rsp_buf, u32 rsp_len,
u32 timeout);
-void ath9k_wmi_event_tasklet(unsigned long data);
+void ath9k_wmi_event_tasklet(struct tasklet_struct *t);
void ath9k_fatal_work(struct work_struct *work);
void ath9k_wmi_event_drain(struct ath9k_htc_priv *priv);
void ath9k_stop_wmi(struct ath9k_htc_priv *priv);
-void ath9k_destoy_wmi(struct ath9k_htc_priv *priv);
+void ath9k_destroy_wmi(struct ath9k_htc_priv *priv);
#define WMI_CMD(_wmi_cmd) \
do { \
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
index 237d0cda1bcb..0d38100d6e4f 100644
--- a/drivers/net/wireless/ath/carl9170/carl9170.h
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -68,7 +68,10 @@
#define PAYLOAD_MAX (CARL9170_MAX_CMD_LEN / 4 - 1)
-static const u8 ar9170_qmap[__AR9170_NUM_TXQ] = { 3, 2, 1, 0 };
+static inline u8 ar9170_qmap(u8 idx)
+{
+ return 3 - idx; /* { 3, 2, 1, 0 } */
+}
#define CARL9170_MAX_RX_BUFFER_SIZE 8192
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 816929fb5b14..dbef9d8fc893 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1374,7 +1374,7 @@ static int carl9170_op_conf_tx(struct ieee80211_hw *hw,
int ret;
mutex_lock(&ar->mutex);
- memcpy(&ar->edcf[ar9170_qmap[queue]], param, sizeof(*param));
+ memcpy(&ar->edcf[ar9170_qmap(queue)], param, sizeof(*param));
ret = carl9170_set_qos(ar);
mutex_unlock(&ar->mutex);
return ret;
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index 23ab8a80c18c..908c4c8b7f82 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -766,7 +766,7 @@ static void carl9170_rx_untie_data(struct ar9170 *ar, u8 *buf, int len)
goto drop;
}
- /* fall through */
+ fallthrough;
case AR9170_RX_STATUS_MPDU_MIDDLE:
/* These are just data + mac status */
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 2407931440ed..235cf77cd60c 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -663,7 +663,7 @@ static void __carl9170_tx_process_status(struct ar9170 *ar,
unsigned int r, t, q;
bool success = true;
- q = ar9170_qmap[info & CARL9170_TX_STATUS_QUEUE];
+ q = ar9170_qmap(info & CARL9170_TX_STATUS_QUEUE);
skb = carl9170_get_queued_skb(ar, cookie, &ar->tx_status[q]);
if (!skb) {
@@ -830,12 +830,12 @@ static bool carl9170_tx_rts_check(struct ar9170 *ar,
case CARL9170_ERP_AUTO:
if (ampdu)
break;
- /* fall through */
+ fallthrough;
case CARL9170_ERP_MAC80211:
if (!(rate->flags & IEEE80211_TX_RC_USE_RTS_CTS))
break;
- /* fall through */
+ fallthrough;
case CARL9170_ERP_RTS:
if (likely(!multi))
@@ -856,7 +856,7 @@ static bool carl9170_tx_cts_check(struct ar9170 *ar,
case CARL9170_ERP_MAC80211:
if (!(rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
break;
- /* fall through */
+ fallthrough;
case CARL9170_ERP_CTS:
return true;
@@ -979,7 +979,7 @@ static int carl9170_tx_prepare(struct ar9170 *ar,
((CARL9170_TX_SUPER_MISC_VIF_ID >>
CARL9170_TX_SUPER_MISC_VIF_ID_S) + 1));
- hw_queue = ar9170_qmap[carl9170_get_queue(ar, skb)];
+ hw_queue = ar9170_qmap(carl9170_get_queue(ar, skb));
hdr = (void *)skb->data;
info = IEEE80211_SKB_CB(skb);
@@ -1279,7 +1279,7 @@ void carl9170_tx_drop(struct ar9170 *ar, struct sk_buff *skb)
super = (void *)skb->data;
SET_VAL(CARL9170_TX_SUPER_MISC_QUEUE, q,
- ar9170_qmap[carl9170_get_queue(ar, skb)]);
+ ar9170_qmap(carl9170_get_queue(ar, skb)));
__carl9170_tx_process_status(ar, super->s.cookie, q);
}
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index ead79335823a..e4eb666c6eea 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -377,9 +377,9 @@ void carl9170_usb_handle_tx_err(struct ar9170 *ar)
}
}
-static void carl9170_usb_tasklet(unsigned long data)
+static void carl9170_usb_tasklet(struct tasklet_struct *t)
{
- struct ar9170 *ar = (struct ar9170 *) data;
+ struct ar9170 *ar = from_tasklet(ar, t, usb_tasklet);
if (!IS_INITIALIZED(ar))
return;
@@ -1082,8 +1082,7 @@ static int carl9170_usb_probe(struct usb_interface *intf,
init_completion(&ar->cmd_wait);
init_completion(&ar->fw_boot_wait);
init_completion(&ar->fw_load_wait);
- tasklet_init(&ar->usb_tasklet, carl9170_usb_tasklet,
- (unsigned long)ar);
+ tasklet_setup(&ar->usb_tasklet, carl9170_usb_tasklet);
atomic_set(&ar->tx_cmd_urbs, 0);
atomic_set(&ar->tx_anch_urbs, 0);
diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
index a274eb0d1968..0813473793df 100644
--- a/drivers/net/wireless/ath/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
@@ -253,17 +253,15 @@ channel_detector_get(struct dfs_pattern_detector *dpd, u16 freq)
static void dpd_reset(struct dfs_pattern_detector *dpd)
{
struct channel_detector *cd;
- if (!list_empty(&dpd->channel_detectors))
- list_for_each_entry(cd, &dpd->channel_detectors, head)
- channel_detector_reset(dpd, cd);
+ list_for_each_entry(cd, &dpd->channel_detectors, head)
+ channel_detector_reset(dpd, cd);
}
static void dpd_exit(struct dfs_pattern_detector *dpd)
{
struct channel_detector *cd, *cd0;
- if (!list_empty(&dpd->channel_detectors))
- list_for_each_entry_safe(cd, cd0, &dpd->channel_detectors, head)
- channel_detector_exit(dpd, cd);
+ list_for_each_entry_safe(cd, cd0, &dpd->channel_detectors, head)
+ channel_detector_exit(dpd, cd);
kfree(dpd);
}
@@ -331,9 +329,8 @@ static bool dpd_set_domain(struct dfs_pattern_detector *dpd,
return false;
/* delete all channel detectors for previous DFS domain */
- if (!list_empty(&dpd->channel_detectors))
- list_for_each_entry_safe(cd, cd0, &dpd->channel_detectors, head)
- channel_detector_exit(dpd, cd);
+ list_for_each_entry_safe(cd, cd0, &dpd->channel_detectors, head)
+ channel_detector_exit(dpd, cd);
dpd->radar_spec = rt->radar_types;
dpd->num_radar_types = rt->num_radar_types;
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
index bab30f7a443c..63079231e48e 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.c
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -334,6 +334,7 @@ void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
spin_lock_irqsave(&wcn->dxe_lock, flags);
skb = wcn->tx_ack_skb;
wcn->tx_ack_skb = NULL;
+ del_timer(&wcn->tx_ack_timer);
spin_unlock_irqrestore(&wcn->dxe_lock, flags);
if (!skb) {
@@ -345,6 +346,8 @@ void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
if (status == 1)
info->flags |= IEEE80211_TX_STAT_ACK;
+ else
+ info->flags &= ~IEEE80211_TX_STAT_ACK;
wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ack status: %d\n", status);
@@ -352,6 +355,32 @@ void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
ieee80211_wake_queues(wcn->hw);
}
+static void wcn36xx_dxe_tx_timer(struct timer_list *t)
+{
+ struct wcn36xx *wcn = from_timer(wcn, t, tx_ack_timer);
+ struct ieee80211_tx_info *info;
+ unsigned long flags;
+ struct sk_buff *skb;
+
+ /* TX Timeout */
+ wcn36xx_dbg(WCN36XX_DBG_DXE, "TX timeout\n");
+
+ spin_lock_irqsave(&wcn->dxe_lock, flags);
+ skb = wcn->tx_ack_skb;
+ wcn->tx_ack_skb = NULL;
+ spin_unlock_irqrestore(&wcn->dxe_lock, flags);
+
+ if (!skb)
+ return;
+
+ info = IEEE80211_SKB_CB(skb);
+ info->flags &= ~IEEE80211_TX_STAT_ACK;
+ info->flags &= ~IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+
+ ieee80211_tx_status_irqsafe(wcn->hw, skb);
+ ieee80211_wake_queues(wcn->hw);
+}
+
static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
{
struct wcn36xx_dxe_ctl *ctl;
@@ -397,6 +426,7 @@ static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
{
struct wcn36xx *wcn = (struct wcn36xx *)dev;
int int_src, int_reason;
+ bool transmitted = false;
wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
@@ -434,8 +464,10 @@ static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
int_reason);
if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
- WCN36XX_CH_STAT_INT_ED_MASK))
+ WCN36XX_CH_STAT_INT_ED_MASK)) {
reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
+ transmitted = true;
+ }
}
if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) {
@@ -473,9 +505,27 @@ static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
int_reason);
if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
- WCN36XX_CH_STAT_INT_ED_MASK))
+ WCN36XX_CH_STAT_INT_ED_MASK)) {
reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
+ transmitted = true;
+ }
+ }
+
+ spin_lock(&wcn->dxe_lock);
+ if (wcn->tx_ack_skb && transmitted) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(wcn->tx_ack_skb);
+
+ /* TX complete, no need to wait for 802.11 ack indication */
+ if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS &&
+ info->flags & IEEE80211_TX_CTL_NO_ACK) {
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+ del_timer(&wcn->tx_ack_timer);
+ ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
+ wcn->tx_ack_skb = NULL;
+ ieee80211_wake_queues(wcn->hw);
+ }
}
+ spin_unlock(&wcn->dxe_lock);
return IRQ_HANDLED;
}
@@ -916,6 +966,8 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
if (ret < 0)
goto out_err_irq;
+ timer_setup(&wcn->tx_ack_timer, wcn36xx_dxe_tx_timer, 0);
+
return 0;
out_err_irq:
@@ -934,6 +986,7 @@ void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
{
free_irq(wcn->tx_irq, wcn);
free_irq(wcn->rx_irq, wcn);
+ del_timer(&wcn->tx_ack_timer);
if (wcn->tx_ack_skb) {
ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index aab5a58616fc..65ef893f2736 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -726,7 +726,137 @@ enum pe_stats_mask {
#define WCN36XX_HAL_CFG_AP_LINK_MONITOR_TIMEOUT 102
#define WCN36XX_HAL_CFG_BTC_DWELL_TIME_MULTIPLIER 103
#define WCN36XX_HAL_CFG_ENABLE_TDLS_OXYGEN_MODE 104
-#define WCN36XX_HAL_CFG_MAX_PARAMS 105
+#define WCN36XX_HAL_CFG_ENABLE_NAT_KEEP_ALIVE_FILTER 105
+#define WCN36XX_HAL_CFG_ENABLE_SAP_OBSS_PROT 106
+#define WCN36XX_HAL_CFG_PSPOLL_DATA_RECEP_TIMEOUT 107
+#define WCN36XX_HAL_CFG_TDLS_PUAPSD_BUFFER_STA_CAPABLE 108
+#define WCN36XX_HAL_CFG_TDLS_PUAPSD_MASK 109
+#define WCN36XX_HAL_CFG_TDLS_PUAPSD_INACTIVITY_TIME 110
+#define WCN36XX_HAL_CFG_TDLS_PUAPSD_RX_FRAME_THRESHOLD 111
+#define WCN36XX_HAL_CFG_ANTENNA_DIVERSITY 112
+#define WCN36XX_HAL_CFG_ATH_DISABLE 113
+#define WCN36XX_HAL_CFG_FLEXCONNECT_POWER_FACTOR 114
+#define WCN36XX_HAL_CFG_ENABLE_ADAPTIVE_RX_DRAIN 115
+#define WCN36XX_HAL_CFG_TDLS_OFF_CHANNEL_CAPABLE 116
+#define WCN36XX_HAL_CFG_MWS_COEX_V1_WAN_FREQ 117
+#define WCN36XX_HAL_CFG_MWS_COEX_V1_WLAN_FREQ 118
+#define WCN36XX_HAL_CFG_MWS_COEX_V1_CONFIG 119
+#define WCN36XX_HAL_CFG_MWS_COEX_V1_CONFIG2 120
+#define WCN36XX_HAL_CFG_MWS_COEX_V2_WAN_FREQ 121
+#define WCN36XX_HAL_CFG_MWS_COEX_V2_WLAN_FREQ 122
+#define WCN36XX_HAL_CFG_MWS_COEX_V2_CONFIG 123
+#define WCN36XX_HAL_CFG_MWS_COEX_V2_CONFIG2 124
+#define WCN36XX_HAL_CFG_MWS_COEX_V3_WAN_FREQ 125
+#define WCN36XX_HAL_CFG_MWS_COEX_V3_WLAN_FREQ 126
+#define WCN36XX_HAL_CFG_MWS_COEX_V3_CONFIG 127
+#define WCN36XX_HAL_CFG_MWS_COEX_V3_CONFIG2 128
+#define WCN36XX_HAL_CFG_MWS_COEX_V4_WAN_FREQ 129
+#define WCN36XX_HAL_CFG_MWS_COEX_V4_WLAN_FREQ 130
+#define WCN36XX_HAL_CFG_MWS_COEX_V4_CONFIG 131
+#define WCN36XX_HAL_CFG_MWS_COEX_V4_CONFIG2 132
+#define WCN36XX_HAL_CFG_MWS_COEX_V5_WAN_FREQ 133
+#define WCN36XX_HAL_CFG_MWS_COEX_V5_WLAN_FREQ 134
+#define WCN36XX_HAL_CFG_MWS_COEX_V5_CONFIG 135
+#define WCN36XX_HAL_CFG_MWS_COEX_V5_CONFIG2 136
+#define WCN36XX_HAL_CFG_MWS_COEX_V6_WAN_FREQ 137
+#define WCN36XX_HAL_CFG_MWS_COEX_V6_WLAN_FREQ 138
+#define WCN36XX_HAL_CFG_MWS_COEX_V6_CONFIG 139
+#define WCN36XX_HAL_CFG_MWS_COEX_V6_CONFIG2 140
+#define WCN36XX_HAL_CFG_MWS_COEX_V7_WAN_FREQ 141
+#define WCN36XX_HAL_CFG_MWS_COEX_V7_WLAN_FREQ 142
+#define WCN36XX_HAL_CFG_MWS_COEX_V7_CONFIG 143
+#define WCN36XX_HAL_CFG_MWS_COEX_V7_CONFIG2 144
+#define WCN36XX_HAL_CFG_MWS_COEX_V8_WAN_FREQ 145
+#define WCN36XX_HAL_CFG_MWS_COEX_V8_WLAN_FREQ 146
+#define WCN36XX_HAL_CFG_MWS_COEX_V8_CONFIG 147
+#define WCN36XX_HAL_CFG_MWS_COEX_V8_CONFIG2 148
+#define WCN36XX_HAL_CFG_MWS_COEX_V9_WAN_FREQ 149
+#define WCN36XX_HAL_CFG_MWS_COEX_V9_WLAN_FREQ 150
+#define WCN36XX_HAL_CFG_MWS_COEX_V9_CONFIG 151
+#define WCN36XX_HAL_CFG_MWS_COEX_V9_CONFIG2 152
+#define WCN36XX_HAL_CFG_MWS_COEX_V10_WAN_FREQ 153
+#define WCN36XX_HAL_CFG_MWS_COEX_V10_WLAN_FREQ 154
+#define WCN36XX_HAL_CFG_MWS_COEX_V10_CONFIG 155
+#define WCN36XX_HAL_CFG_MWS_COEX_V10_CONFIG2 156
+#define WCN36XX_HAL_CFG_MWS_COEX_MODEM_BACKOFF 157
+#define WCN36XX_HAL_CFG_MWS_COEX_CONFIG1 158
+#define WCN36XX_HAL_CFG_MWS_COEX_CONFIG2 159
+#define WCN36XX_HAL_CFG_MWS_COEX_CONFIG3 160
+#define WCN36XX_HAL_CFG_MWS_COEX_CONFIG4 161
+#define WCN36XX_HAL_CFG_MWS_COEX_CONFIG5 162
+#define WCN36XX_HAL_CFG_MWS_COEX_CONFIG6 163
+#define WCN36XX_HAL_CFG_SAR_POWER_BACKOFF 164
+#define WCN36XX_HAL_CFG_GO_LINK_MONITOR_TIMEOUT 165
+#define WCN36XX_HAL_CFG_BTC_STATIC_OPP_WLAN_ACTIVE_WLAN_LEN 166
+#define WCN36XX_HAL_CFG_BTC_STATIC_OPP_WLAN_ACTIVE_BT_LEN 167
+#define WCN36XX_HAL_CFG_BTC_SAP_STATIC_OPP_ACTIVE_WLAN_LEN 168
+#define WCN36XX_HAL_CFG_BTC_SAP_STATIC_OPP_ACTIVE_BT_LEN 169
+#define WCN36XX_HAL_CFG_RMC_FIXED_RATE 170
+#define WCN36XX_HAL_CFG_ASD_PROBE_INTERVAL 171
+#define WCN36XX_HAL_CFG_ASD_TRIGGER_THRESHOLD 172
+#define WCN36XX_HAL_CFG_ASD_RTT_RSSI_HYST_THRESHOLD 173
+#define WCN36XX_HAL_CFG_BTC_CTS2S_ON_STA_DURING_SCO 174
+#define WCN36XX_HAL_CFG_SHORT_PREAMBLE 175
+#define WCN36XX_HAL_CFG_SHORT_SLOT_TIME 176
+#define WCN36XX_HAL_CFG_DELAYED_BA 177
+#define WCN36XX_HAL_CFG_IMMEDIATE_BA 178
+#define WCN36XX_HAL_CFG_DOT11_MODE 179
+#define WCN36XX_HAL_CFG_HT_CAPS 180
+#define WCN36XX_HAL_CFG_AMPDU_PARAMS 181
+#define WCN36XX_HAL_CFG_TX_BF_INFO 182
+#define WCN36XX_HAL_CFG_ASC_CAP_INFO 183
+#define WCN36XX_HAL_CFG_EXT_HT_CAPS 184
+#define WCN36XX_HAL_CFG_QOS_ENABLED 185
+#define WCN36XX_HAL_CFG_WME_ENABLED 186
+#define WCN36XX_HAL_CFG_WSM_ENABLED 187
+#define WCN36XX_HAL_CFG_WMM_ENABLED 188
+#define WCN36XX_HAL_CFG_UAPSD_PER_AC_BITMASK 189
+#define WCN36XX_HAL_CFG_MCS_RATES 190
+#define WCN36XX_HAL_CFG_VHT_CAPS 191
+#define WCN36XX_HAL_CFG_VHT_RX_SUPP_MCS 192
+#define WCN36XX_HAL_CFG_VHT_TX_SUPP_MCS 193
+#define WCN36XX_HAL_CFG_RA_FILTER_ENABLE 194
+#define WCN36XX_HAL_CFG_RA_RATE_LIMIT_INTERVAL 195
+#define WCN36XX_HAL_CFG_BTC_FATAL_HID_NSNIFF_BLK 196
+#define WCN36XX_HAL_CFG_BTC_CRITICAL_HID_NSNIFF_BLK 197
+#define WCN36XX_HAL_CFG_BTC_DYN_A2DP_TX_QUEUE_THOLD 198
+#define WCN36XX_HAL_CFG_BTC_DYN_OPP_TX_QUEUE_THOLD 199
+#define WCN36XX_HAL_CFG_LINK_FAIL_TIMEOUT 200
+#define WCN36XX_HAL_CFG_MAX_UAPSD_CONSEC_SP 201
+#define WCN36XX_HAL_CFG_MAX_UAPSD_CONSEC_RX_CNT 202
+#define WCN36XX_HAL_CFG_MAX_UAPSD_CONSEC_TX_CNT 203
+#define WCN36XX_HAL_CFG_MAX_UAPSD_CONSEC_RX_CNT_MEAS_WINDOW 204
+#define WCN36XX_HAL_CFG_MAX_UAPSD_CONSEC_TX_CNT_MEAS_WINDOW 205
+#define WCN36XX_HAL_CFG_MAX_PSPOLL_IN_WMM_UAPSD_PS_MODE 206
+#define WCN36XX_HAL_CFG_MAX_UAPSD_INACTIVITY_INTERVALS 207
+#define WCN36XX_HAL_CFG_ENABLE_DYNAMIC_WMMPS 208
+#define WCN36XX_HAL_CFG_BURST_MODE_BE_TXOP_VALUE 209
+#define WCN36XX_HAL_CFG_ENABLE_DYNAMIC_RA_START_RATE 210
+#define WCN36XX_HAL_CFG_BTC_FAST_WLAN_CONN_PREF 211
+#define WCN36XX_HAL_CFG_ENABLE_RTSCTS_HTVHT 212
+#define WCN36XX_HAL_CFG_BTC_STATIC_OPP_WLAN_IDLE_WLAN_LEN 213
+#define WCN36XX_HAL_CFG_BTC_STATIC_OPP_WLAN_IDLE_BT_LEN 214
+#define WCN36XX_HAL_CFG_LINK_FAIL_TX_CNT 215
+#define WCN36XX_HAL_CFG_TOGGLE_ARP_BDRATES 216
+#define WCN36XX_HAL_CFG_OPTIMIZE_CA_EVENT 217
+#define WCN36XX_HAL_CFG_EXT_SCAN_CONC_MODE 218
+#define WCN36XX_HAL_CFG_BAR_WAKEUP_HOST_DISABLE 219
+#define WCN36XX_HAL_CFG_SAR_BOFFSET_CORRECTION_ENABLE 220
+#define WCN36XX_HAL_CFG_UNITS_OF_BCN_WAIT_TIME 221
+#define WCN36XX_HAL_CFG_CONS_BCNMISS_COUNT 222
+#define WCN36XX_HAL_CFG_BTC_DISABLE_WLAN_LINK_CRITICAL 223
+#define WCN36XX_HAL_CFG_DISABLE_SCAN_DURING_SCO 224
+#define WCN36XX_HAL_CFG_TRIGGER_NULLFRAME_BEFORE_HB 225
+#define WCN36XX_HAL_CFG_ENABLE_POWERSAVE_OFFLOAD 226
+#define WCN36XX_HAL_CFG_MAX_PARAMS 227
+
+/* Specify the starting bitrate, 11B and 11A/G rates can be specified in
+ * multiples of 0.5 So for 5.5 mbps => 11. for MCS 0 - 7 rates, Bit 7 should
+ * set to 1 and Bit 0-6 represent the MCS index. so for MCS2 => 130.
+ * Any invalid non-zero value or unsupported rate will set the start rate
+ * to 6 mbps.
+ */
+#define WCN36XX_HAL_CFG_ENABLE_DYNAMIC_RA_START_RATE 210
/* Message definitons - All the messages below need to be packed */
@@ -1405,6 +1535,76 @@ struct wcn36xx_hal_config_sta_req_msg {
struct wcn36xx_hal_config_sta_params sta_params;
} __packed;
+struct wcn36xx_hal_supported_rates_v1 {
+ /* For Self STA Entry: this represents Self Mode.
+ * For Peer Stations, this represents the mode of the peer.
+ * On Station:
+ *
+ * --this mode is updated when PE adds the Self Entry.
+ *
+ * -- OR when PE sends 'ADD_BSS' message and station context in BSS
+ * is used to indicate the mode of the AP.
+ *
+ * ON AP:
+ *
+ * -- this mode is updated when PE sends 'ADD_BSS' and Sta entry
+ * for that BSS is used to indicate the self mode of the AP.
+ *
+ * -- OR when a station is associated, PE sends 'ADD_STA' message
+ * with this mode updated.
+ */
+
+ enum sta_rate_mode op_rate_mode;
+
+ /* 11b, 11a and aniLegacyRates are IE rates which gives rate in
+ * unit of 500Kbps
+ */
+ u16 dsss_rates[WCN36XX_HAL_NUM_DSSS_RATES];
+ u16 ofdm_rates[WCN36XX_HAL_NUM_OFDM_RATES];
+ u16 legacy_rates[WCN36XX_HAL_NUM_POLARIS_RATES];
+ u16 reserved;
+
+ /* Taurus only supports 26 Titan Rates(no ESF/concat Rates will be
+ * supported) First 26 bits are reserved for those Titan rates and
+ * the last 4 bits(bit28-31) for Taurus, 2(bit26-27) bits are
+ * reserved
+ * Titan and Taurus Rates
+ */
+ u32 enhanced_rate_bitmap;
+
+ /* 0-76 bits used, remaining reserved
+ * bits 0-15 and 32 should be set.
+ */
+ u8 supported_mcs_set[WCN36XX_HAL_MAC_MAX_SUPPORTED_MCS_SET];
+
+ /* RX Highest Supported Data Rate defines the highest data
+ * rate that the STA is able to receive, in unites of 1Mbps.
+ * This value is derived from "Supported MCS Set field" inside
+ * the HT capability element.
+ */
+ u16 rx_highest_data_rate;
+
+ /* Indicates the Maximum MCS that can be received for each spatial
+ * stream.
+ */
+ u16 vht_rx_mcs_map;
+
+ /* Indicates the highest VHT data rate that the STA is able to
+ * receive.
+ */
+ u16 vht_rx_highest_data_rate;
+
+ /* Indicates the Maximum MCS that can be transmitted for each spatial
+ * stream.
+ */
+ u16 vht_tx_mcs_map;
+
+ /* Indicates the highest VHT data rate that the STA is able to
+ * transmit.
+ */
+ u16 vht_tx_highest_data_rate;
+} __packed;
+
struct wcn36xx_hal_config_sta_params_v1 {
/* BSSID of STA */
u8 bssid[ETH_ALEN];
@@ -1507,12 +1707,22 @@ struct wcn36xx_hal_config_sta_params_v1 {
u8 p2p;
/* Reserved to align next field on a dword boundary */
- u8 reserved;
+ u8 ht_ldpc_enabled:1;
+ u8 vht_ldpc_enabled:1;
+ u8 vht_tx_bf_enabled:1;
+ u8 vht_tx_mu_beamformee_capable:1;
+ u8 reserved:4;
/* These rates are the intersection of peer and self capabilities. */
- struct wcn36xx_hal_supported_rates supported_rates;
+ struct wcn36xx_hal_supported_rates_v1 supported_rates;
+
+ u8 vht_capable;
+ u8 vht_tx_channel_width_set;
+
} __packed;
+#define WCN36XX_DIFF_STA_PARAMS_V1_NOVHT 10
+
struct wcn36xx_hal_config_sta_req_msg_v1 {
struct wcn36xx_hal_msg_header header;
struct wcn36xx_hal_config_sta_params_v1 sta_params;
@@ -1933,8 +2143,14 @@ struct wcn36xx_hal_config_bss_params_v1 {
* "STA context"
*/
struct wcn36xx_hal_config_sta_params_v1 sta;
+
+ u8 vht_capable;
+ u8 vht_tx_channel_width_set;
+
} __packed;
+#define WCN36XX_DIFF_BSS_PARAMS_V1_NOVHT (WCN36XX_DIFF_STA_PARAMS_V1_NOVHT + 2)
+
struct wcn36xx_hal_config_bss_req_msg_v1 {
struct wcn36xx_hal_msg_header header;
struct wcn36xx_hal_config_bss_params_v1 bss_params;
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 702b689c06df..706728fba72d 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -39,10 +39,10 @@ MODULE_PARM_DESC(debug_mask, "Debugging mask");
.max_power = 25, \
}
-#define CHAN5G(_freq, _idx) { \
+#define CHAN5G(_freq, _idx, _phy_val) { \
.band = NL80211_BAND_5GHZ, \
.center_freq = (_freq), \
- .hw_value = (_idx), \
+ .hw_value = (_phy_val) << HW_VALUE_PHY_SHIFT | HW_VALUE_CHANNEL(_idx), \
.max_power = 25, \
}
@@ -67,29 +67,29 @@ static struct ieee80211_channel wcn_2ghz_channels[] = {
};
static struct ieee80211_channel wcn_5ghz_channels[] = {
- CHAN5G(5180, 36),
- CHAN5G(5200, 40),
- CHAN5G(5220, 44),
- CHAN5G(5240, 48),
- CHAN5G(5260, 52),
- CHAN5G(5280, 56),
- CHAN5G(5300, 60),
- CHAN5G(5320, 64),
- CHAN5G(5500, 100),
- CHAN5G(5520, 104),
- CHAN5G(5540, 108),
- CHAN5G(5560, 112),
- CHAN5G(5580, 116),
- CHAN5G(5600, 120),
- CHAN5G(5620, 124),
- CHAN5G(5640, 128),
- CHAN5G(5660, 132),
- CHAN5G(5700, 140),
- CHAN5G(5745, 149),
- CHAN5G(5765, 153),
- CHAN5G(5785, 157),
- CHAN5G(5805, 161),
- CHAN5G(5825, 165)
+ CHAN5G(5180, 36, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW),
+ CHAN5G(5200, 40, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_LOW),
+ CHAN5G(5220, 44, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH),
+ CHAN5G(5240, 48, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_HIGH),
+ CHAN5G(5260, 52, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW),
+ CHAN5G(5280, 56, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_LOW),
+ CHAN5G(5300, 60, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH),
+ CHAN5G(5320, 64, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_HIGH),
+ CHAN5G(5500, 100, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW),
+ CHAN5G(5520, 104, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_LOW),
+ CHAN5G(5540, 108, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH),
+ CHAN5G(5560, 112, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_HIGH),
+ CHAN5G(5580, 116, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW),
+ CHAN5G(5600, 120, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_LOW),
+ CHAN5G(5620, 124, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH),
+ CHAN5G(5640, 128, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_HIGH),
+ CHAN5G(5660, 132, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW),
+ CHAN5G(5700, 140, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH),
+ CHAN5G(5745, 149, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW),
+ CHAN5G(5765, 153, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_LOW),
+ CHAN5G(5785, 157, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH),
+ CHAN5G(5805, 161, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_HIGH),
+ CHAN5G(5825, 165, 0)
};
#define RATE(_bitrate, _hw_rate, _flags) { \
@@ -163,7 +163,7 @@ static struct ieee80211_supported_band wcn_band_5ghz = {
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
.mcs = {
.rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
- .rx_highest = cpu_to_le16(72),
+ .rx_highest = cpu_to_le16(150),
.tx_params = IEEE80211_HT_MCS_TX_DEFINED,
}
}
@@ -354,8 +354,6 @@ static void wcn36xx_stop(struct ieee80211_hw *hw)
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac stop\n");
- cancel_work_sync(&wcn->scan_work);
-
mutex_lock(&wcn->scan_lock);
if (wcn->scan_req) {
struct cfg80211_scan_info scan_info = {
@@ -378,12 +376,37 @@ static void wcn36xx_stop(struct ieee80211_hw *hw)
kfree(wcn->hal_buf);
}
-static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
+static void wcn36xx_change_ps(struct wcn36xx *wcn, bool enable)
{
- struct wcn36xx *wcn = hw->priv;
struct ieee80211_vif *vif = NULL;
struct wcn36xx_vif *tmp;
+ list_for_each_entry(tmp, &wcn->vif_list, list) {
+ vif = wcn36xx_priv_to_vif(tmp);
+ if (enable && !wcn->sw_scan) {
+ if (vif->bss_conf.ps) /* ps allowed ? */
+ wcn36xx_pmc_enter_bmps_state(wcn, vif);
+ } else {
+ wcn36xx_pmc_exit_bmps_state(wcn, vif);
+ }
+ }
+}
+
+static void wcn36xx_change_opchannel(struct wcn36xx *wcn, int ch)
+{
+ struct ieee80211_vif *vif = NULL;
+ struct wcn36xx_vif *tmp;
+
+ list_for_each_entry(tmp, &wcn->vif_list, list) {
+ vif = wcn36xx_priv_to_vif(tmp);
+ wcn36xx_smd_switch_channel(wcn, vif, ch);
+ }
+}
+
+static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct wcn36xx *wcn = hw->priv;
+
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac config changed 0x%08x\n", changed);
mutex_lock(&wcn->conf_mutex);
@@ -392,24 +415,29 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
int ch = WCN36XX_HW_CHANNEL(wcn);
wcn36xx_dbg(WCN36XX_DBG_MAC, "wcn36xx_config channel switch=%d\n",
ch);
- list_for_each_entry(tmp, &wcn->vif_list, list) {
- vif = wcn36xx_priv_to_vif(tmp);
- wcn36xx_smd_switch_channel(wcn, vif, ch);
- }
- }
- if (changed & IEEE80211_CONF_CHANGE_PS) {
- list_for_each_entry(tmp, &wcn->vif_list, list) {
- vif = wcn36xx_priv_to_vif(tmp);
- if (hw->conf.flags & IEEE80211_CONF_PS) {
- if (vif->bss_conf.ps) /* ps allowed ? */
- wcn36xx_pmc_enter_bmps_state(wcn, vif);
- } else {
- wcn36xx_pmc_exit_bmps_state(wcn, vif);
- }
+ if (wcn->sw_scan_opchannel == ch) {
+ /* If channel is the initial operating channel, we may
+ * want to receive/transmit regular data packets, then
+ * simply stop the scan session and exit PS mode.
+ */
+ wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN,
+ wcn->sw_scan_vif);
+ } else if (wcn->sw_scan) {
+ /* A scan is ongoing, do not change the operating
+ * channel, but start a scan session on the channel.
+ */
+ wcn36xx_smd_init_scan(wcn, HAL_SYS_MODE_SCAN,
+ wcn->sw_scan_vif);
+ wcn36xx_smd_start_scan(wcn, ch);
+ } else {
+ wcn36xx_change_opchannel(wcn, ch);
}
}
+ if (changed & IEEE80211_CONF_CHANGE_PS)
+ wcn36xx_change_ps(wcn, hw->conf.flags & IEEE80211_CONF_PS);
+
mutex_unlock(&wcn->conf_mutex);
return 0;
@@ -582,6 +610,15 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
}
}
}
+ /* FIXME: Only enable bmps support when encryption is enabled.
+ * For any reasons, when connected to open/no-security BSS,
+ * the wcn36xx controller in bmps mode does not forward
+ * 'wake-up' beacons despite AP sends DTIM with station AID.
+ * It could be due to a firmware issue or to the way driver
+ * configure the station.
+ */
+ if (vif->type == NL80211_IFTYPE_STATION)
+ vif_priv->allow_bmps = true;
break;
case DISABLE_KEY:
if (!(IEEE80211_KEY_FLAG_PAIRWISE & key_conf->flags)) {
@@ -614,55 +651,26 @@ out:
return ret;
}
-static void wcn36xx_hw_scan_worker(struct work_struct *work)
+static int wcn36xx_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_scan_request *hw_req)
{
- struct wcn36xx *wcn = container_of(work, struct wcn36xx, scan_work);
- struct cfg80211_scan_request *req = wcn->scan_req;
- u8 channels[WCN36XX_HAL_PNO_MAX_NETW_CHANNELS_EX];
- struct cfg80211_scan_info scan_info = {};
- bool aborted = false;
+ struct wcn36xx *wcn = hw->priv;
int i;
- wcn36xx_dbg(WCN36XX_DBG_MAC, "mac80211 scan %d channels worker\n", req->n_channels);
-
- for (i = 0; i < req->n_channels; i++)
- channels[i] = req->channels[i]->hw_value;
-
- wcn36xx_smd_update_scan_params(wcn, channels, req->n_channels);
-
- wcn36xx_smd_init_scan(wcn, HAL_SYS_MODE_SCAN);
- for (i = 0; i < req->n_channels; i++) {
- mutex_lock(&wcn->scan_lock);
- aborted = wcn->scan_aborted;
- mutex_unlock(&wcn->scan_lock);
-
- if (aborted)
- break;
-
- wcn->scan_freq = req->channels[i]->center_freq;
- wcn->scan_band = req->channels[i]->band;
-
- wcn36xx_smd_start_scan(wcn, req->channels[i]->hw_value);
- msleep(30);
- wcn36xx_smd_end_scan(wcn, req->channels[i]->hw_value);
-
- wcn->scan_freq = 0;
+ if (!get_feat_caps(wcn->fw_feat_caps, SCAN_OFFLOAD)) {
+ /* fallback to mac80211 software scan */
+ return 1;
}
- wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN);
-
- scan_info.aborted = aborted;
- ieee80211_scan_completed(wcn->hw, &scan_info);
- mutex_lock(&wcn->scan_lock);
- wcn->scan_req = NULL;
- mutex_unlock(&wcn->scan_lock);
-}
+ /* For unknown reason, the hardware offloaded scan only works with
+ * 2.4Ghz channels, fallback to software scan in other cases.
+ */
+ for (i = 0; i < hw_req->req.n_channels; i++) {
+ if (hw_req->req.channels[i]->band != NL80211_BAND_2GHZ)
+ return 1;
+ }
-static int wcn36xx_hw_scan(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_scan_request *hw_req)
-{
- struct wcn36xx *wcn = hw->priv;
mutex_lock(&wcn->scan_lock);
if (wcn->scan_req) {
mutex_unlock(&wcn->scan_lock);
@@ -674,12 +682,6 @@ static int wcn36xx_hw_scan(struct ieee80211_hw *hw,
mutex_unlock(&wcn->scan_lock);
- if (!get_feat_caps(wcn->fw_feat_caps, SCAN_OFFLOAD)) {
- /* legacy manual/sw scan */
- schedule_work(&wcn->scan_work);
- return 0;
- }
-
return wcn36xx_smd_start_hw_scan(wcn, vif, &hw_req->req);
}
@@ -696,16 +698,35 @@ static void wcn36xx_cancel_hw_scan(struct ieee80211_hw *hw,
/* ieee80211_scan_completed will be called on FW scan
* indication */
wcn36xx_smd_stop_hw_scan(wcn);
- } else {
- struct cfg80211_scan_info scan_info = {
- .aborted = true,
- };
-
- cancel_work_sync(&wcn->scan_work);
- ieee80211_scan_completed(wcn->hw, &scan_info);
}
}
+static void wcn36xx_sw_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr)
+{
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+
+ wcn->sw_scan = true;
+ wcn->sw_scan_vif = vif;
+ if (vif_priv->sta_assoc)
+ wcn->sw_scan_opchannel = WCN36XX_HW_CHANNEL(wcn);
+ else
+ wcn->sw_scan_opchannel = 0;
+}
+
+static void wcn36xx_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct wcn36xx *wcn = hw->priv;
+
+ /* ensure that any scan session is finished */
+ wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN, wcn->sw_scan_vif);
+ wcn->sw_scan = false;
+ wcn->sw_scan_opchannel = 0;
+}
+
static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta,
enum nl80211_band band)
{
@@ -745,7 +766,16 @@ static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta,
sta->ht_cap.mcs.rx_mask,
sizeof(sta->ht_cap.mcs.rx_mask));
}
+
+ if (sta->vht_cap.vht_supported) {
+ sta_priv->supported_rates.op_rate_mode = STA_11ac;
+ sta_priv->supported_rates.vht_rx_mcs_map =
+ sta->vht_cap.vht_mcs.rx_mcs_map;
+ sta_priv->supported_rates.vht_tx_mcs_map =
+ sta->vht_cap.vht_mcs.tx_mcs_map;
+ }
}
+
void wcn36xx_set_default_rates(struct wcn36xx_hal_supported_rates *rates)
{
u16 ofdm_rates[WCN36XX_HAL_NUM_OFDM_RATES] = {
@@ -772,6 +802,14 @@ void wcn36xx_set_default_rates(struct wcn36xx_hal_supported_rates *rates)
sizeof(*ofdm_rates) * WCN36XX_HAL_NUM_OFDM_RATES);
rates->supported_mcs_set[0] = 0xFF;
}
+
+void wcn36xx_set_default_rates_v1(struct wcn36xx_hal_supported_rates_v1 *rates)
+{
+ rates->op_rate_mode = STA_11ac;
+ rates->vht_rx_mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9;
+ rates->vht_tx_mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9;
+}
+
static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
@@ -879,6 +917,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
vif->addr,
bss_conf->aid);
vif_priv->sta_assoc = false;
+ vif_priv->allow_bmps = false;
wcn36xx_smd_set_link_st(wcn,
bss_conf->bssid,
vif->addr,
@@ -1083,6 +1122,7 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
u16 tid = params->tid;
u16 *ssn = &params->ssn;
int ret = 0;
+ u8 session;
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu action action %d tid %d\n",
action, tid);
@@ -1092,10 +1132,11 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
switch (action) {
case IEEE80211_AMPDU_RX_START:
sta_priv->tid = tid;
- wcn36xx_smd_add_ba_session(wcn, sta, tid, ssn, 0,
- get_sta_index(vif, sta_priv));
- wcn36xx_smd_add_ba(wcn);
- wcn36xx_smd_trigger_ba(wcn, get_sta_index(vif, sta_priv));
+ session = wcn36xx_smd_add_ba_session(wcn, sta, tid, ssn, 0,
+ get_sta_index(vif, sta_priv));
+ wcn36xx_smd_add_ba(wcn, session);
+ wcn36xx_smd_trigger_ba(wcn, get_sta_index(vif, sta_priv), tid,
+ session);
break;
case IEEE80211_AMPDU_RX_STOP:
wcn36xx_smd_del_ba(wcn, tid, get_sta_index(vif, sta_priv));
@@ -1149,6 +1190,8 @@ static const struct ieee80211_ops wcn36xx_ops = {
.set_key = wcn36xx_set_key,
.hw_scan = wcn36xx_hw_scan,
.cancel_hw_scan = wcn36xx_cancel_hw_scan,
+ .sw_scan_start = wcn36xx_sw_scan_start,
+ .sw_scan_complete = wcn36xx_sw_scan_complete,
.bss_info_changed = wcn36xx_bss_info_changed,
.set_rts_threshold = wcn36xx_set_rts_threshold,
.sta_add = wcn36xx_sta_add,
@@ -1158,6 +1201,35 @@ static const struct ieee80211_ops wcn36xx_ops = {
CFG80211_TESTMODE_CMD(wcn36xx_tm_cmd)
};
+static void
+wcn36xx_set_ieee80211_vht_caps(struct ieee80211_sta_vht_cap *vht_cap)
+{
+ vht_cap->vht_supported = true;
+
+ vht_cap->cap = (IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 |
+ IEEE80211_VHT_CAP_SHORT_GI_80 |
+ IEEE80211_VHT_CAP_RXSTBC_1 |
+ IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
+ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
+ 3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT |
+ 7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
+
+ vht_cap->vht_mcs.rx_mcs_map =
+ cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_9 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 2 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 14);
+
+ vht_cap->vht_mcs.rx_highest = cpu_to_le16(433);
+ vht_cap->vht_mcs.tx_highest = vht_cap->vht_mcs.rx_highest;
+
+ vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map;
+}
+
static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
{
static const u32 cipher_suites[] = {
@@ -1173,6 +1245,7 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
ieee80211_hw_set(wcn->hw, SIGNAL_DBM);
ieee80211_hw_set(wcn->hw, HAS_RATE_CONTROL);
ieee80211_hw_set(wcn->hw, SINGLE_SCAN_ON_ALL_BANDS);
+ ieee80211_hw_set(wcn->hw, REPORTS_TX_ACK_STATUS);
wcn->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
@@ -1183,6 +1256,9 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
if (wcn->rf_id != RF_IRIS_WCN3620)
wcn->hw->wiphy->bands[NL80211_BAND_5GHZ] = &wcn_band_5ghz;
+ if (wcn->rf_id == RF_IRIS_WCN3680)
+ wcn36xx_set_ieee80211_vht_caps(&wcn_band_5ghz.vht_cap);
+
wcn->hw->wiphy->max_scan_ssids = WCN36XX_MAX_SCAN_SSIDS;
wcn->hw->wiphy->max_scan_ie_len = WCN36XX_MAX_SCAN_IE_LEN;
@@ -1280,6 +1356,8 @@ static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
if (iris_node) {
if (of_device_is_compatible(iris_node, "qcom,wcn3620"))
wcn->rf_id = RF_IRIS_WCN3620;
+ if (of_device_is_compatible(iris_node, "qcom,wcn3680"))
+ wcn->rf_id = RF_IRIS_WCN3680;
of_node_put(iris_node);
}
@@ -1326,8 +1404,6 @@ static int wcn36xx_probe(struct platform_device *pdev)
goto out_wq;
}
- INIT_WORK(&wcn->scan_work, wcn36xx_hw_scan_worker);
-
wcn->smd_channel = qcom_wcnss_open_channel(wcnss, "WLAN_CTRL", wcn36xx_smd_rsp_process, hw);
if (IS_ERR(wcn->smd_channel)) {
wcn36xx_err("failed to open WLAN_CTRL channel\n");
diff --git a/drivers/net/wireless/ath/wcn36xx/pmc.c b/drivers/net/wireless/ath/wcn36xx/pmc.c
index 1976b80c235f..2d0780fefd47 100644
--- a/drivers/net/wireless/ath/wcn36xx/pmc.c
+++ b/drivers/net/wireless/ath/wcn36xx/pmc.c
@@ -23,11 +23,15 @@ int wcn36xx_pmc_enter_bmps_state(struct wcn36xx *wcn,
{
int ret = 0;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
- /* TODO: Make sure the TX chain clean */
+
+ if (!vif_priv->allow_bmps)
+ return -ENOTSUPP;
+
ret = wcn36xx_smd_enter_bmps(wcn, vif);
if (!ret) {
wcn36xx_dbg(WCN36XX_DBG_PMC, "Entered BMPS\n");
vif_priv->pw_state = WCN36XX_BMPS;
+ vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
} else {
/*
* One of the reasons why HW will not enter BMPS is because
@@ -52,6 +56,7 @@ int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn,
}
wcn36xx_smd_exit_bmps(wcn, vif);
vif_priv->pw_state = WCN36XX_FULL_POWER;
+ vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
return 0;
}
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 77269ac7f352..766400f7b61c 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -45,8 +45,8 @@ static struct wcn36xx_cfg_val wcn36xx_cfg_vals[] = {
WCN36XX_CFG_VAL(MAX_MEDIUM_TIME, 6000),
WCN36XX_CFG_VAL(MAX_MPDUS_IN_AMPDU, 64),
WCN36XX_CFG_VAL(RTS_THRESHOLD, 2347),
- WCN36XX_CFG_VAL(SHORT_RETRY_LIMIT, 6),
- WCN36XX_CFG_VAL(LONG_RETRY_LIMIT, 6),
+ WCN36XX_CFG_VAL(SHORT_RETRY_LIMIT, 15),
+ WCN36XX_CFG_VAL(LONG_RETRY_LIMIT, 15),
WCN36XX_CFG_VAL(FRAGMENTATION_THRESHOLD, 8000),
WCN36XX_CFG_VAL(DYNAMIC_THRESHOLD_ZERO, 5),
WCN36XX_CFG_VAL(DYNAMIC_THRESHOLD_ONE, 10),
@@ -77,6 +77,103 @@ static struct wcn36xx_cfg_val wcn36xx_cfg_vals[] = {
WCN36XX_CFG_VAL(BTC_STATIC_LEN_LE_WLAN, 30000),
WCN36XX_CFG_VAL(MAX_ASSOC_LIMIT, 10),
WCN36XX_CFG_VAL(ENABLE_MCC_ADAPTIVE_SCHEDULER, 0),
+ WCN36XX_CFG_VAL(ENABLE_DYNAMIC_RA_START_RATE, 133), /* MCS 5 */
+};
+
+static struct wcn36xx_cfg_val wcn3680_cfg_vals[] = {
+ WCN36XX_CFG_VAL(CURRENT_TX_ANTENNA, 1),
+ WCN36XX_CFG_VAL(CURRENT_RX_ANTENNA, 1),
+ WCN36XX_CFG_VAL(LOW_GAIN_OVERRIDE, 0),
+ WCN36XX_CFG_VAL(POWER_STATE_PER_CHAIN, 785),
+ WCN36XX_CFG_VAL(CAL_PERIOD, 5),
+ WCN36XX_CFG_VAL(CAL_CONTROL, 1),
+ WCN36XX_CFG_VAL(PROXIMITY, 0),
+ WCN36XX_CFG_VAL(NETWORK_DENSITY, 3),
+ WCN36XX_CFG_VAL(MAX_MEDIUM_TIME, 4096),
+ WCN36XX_CFG_VAL(MAX_MPDUS_IN_AMPDU, 64),
+ WCN36XX_CFG_VAL(RTS_THRESHOLD, 2347),
+ WCN36XX_CFG_VAL(SHORT_RETRY_LIMIT, 15),
+ WCN36XX_CFG_VAL(LONG_RETRY_LIMIT, 15),
+ WCN36XX_CFG_VAL(FRAGMENTATION_THRESHOLD, 8000),
+ WCN36XX_CFG_VAL(DYNAMIC_THRESHOLD_ZERO, 5),
+ WCN36XX_CFG_VAL(DYNAMIC_THRESHOLD_ONE, 10),
+ WCN36XX_CFG_VAL(DYNAMIC_THRESHOLD_TWO, 15),
+ WCN36XX_CFG_VAL(FIXED_RATE, 0),
+ WCN36XX_CFG_VAL(RETRYRATE_POLICY, 4),
+ WCN36XX_CFG_VAL(RETRYRATE_SECONDARY, 0),
+ WCN36XX_CFG_VAL(RETRYRATE_TERTIARY, 0),
+ WCN36XX_CFG_VAL(FORCE_POLICY_PROTECTION, 5),
+ WCN36XX_CFG_VAL(FIXED_RATE_MULTICAST_24GHZ, 1),
+ WCN36XX_CFG_VAL(FIXED_RATE_MULTICAST_5GHZ, 5),
+ WCN36XX_CFG_VAL(DEFAULT_RATE_INDEX_24GHZ, 1),
+ WCN36XX_CFG_VAL(DEFAULT_RATE_INDEX_5GHZ, 5),
+ WCN36XX_CFG_VAL(MAX_BA_SESSIONS, 40),
+ WCN36XX_CFG_VAL(PS_DATA_INACTIVITY_TIMEOUT, 200),
+ WCN36XX_CFG_VAL(PS_ENABLE_BCN_FILTER, 1),
+ WCN36XX_CFG_VAL(PS_ENABLE_RSSI_MONITOR, 1),
+ WCN36XX_CFG_VAL(NUM_BEACON_PER_RSSI_AVERAGE, 20),
+ WCN36XX_CFG_VAL(STATS_PERIOD, 10),
+ WCN36XX_CFG_VAL(CFP_MAX_DURATION, 30000),
+ WCN36XX_CFG_VAL(FRAME_TRANS_ENABLED, 0),
+ WCN36XX_CFG_VAL(BA_THRESHOLD_HIGH, 128),
+ WCN36XX_CFG_VAL(MAX_BA_BUFFERS, 2560),
+ WCN36XX_CFG_VAL(DYNAMIC_PS_POLL_VALUE, 0),
+ WCN36XX_CFG_VAL(TX_PWR_CTRL_ENABLE, 1),
+ WCN36XX_CFG_VAL(ENABLE_CLOSE_LOOP, 1),
+ WCN36XX_CFG_VAL(ENABLE_LPWR_IMG_TRANSITION, 0),
+ WCN36XX_CFG_VAL(BTC_STATIC_LEN_LE_BT, 120000),
+ WCN36XX_CFG_VAL(BTC_STATIC_LEN_LE_WLAN, 30000),
+ WCN36XX_CFG_VAL(MAX_ASSOC_LIMIT, 10),
+ WCN36XX_CFG_VAL(ENABLE_MCC_ADAPTIVE_SCHEDULER, 0),
+ WCN36XX_CFG_VAL(TDLS_PUAPSD_MASK, 0),
+ WCN36XX_CFG_VAL(TDLS_PUAPSD_BUFFER_STA_CAPABLE, 1),
+ WCN36XX_CFG_VAL(TDLS_PUAPSD_INACTIVITY_TIME, 0),
+ WCN36XX_CFG_VAL(TDLS_PUAPSD_RX_FRAME_THRESHOLD, 10),
+ WCN36XX_CFG_VAL(TDLS_OFF_CHANNEL_CAPABLE, 1),
+ WCN36XX_CFG_VAL(ENABLE_ADAPTIVE_RX_DRAIN, 1),
+ WCN36XX_CFG_VAL(FLEXCONNECT_POWER_FACTOR, 0),
+ WCN36XX_CFG_VAL(ANTENNA_DIVERSITY, 3),
+ WCN36XX_CFG_VAL(ATH_DISABLE, 0),
+ WCN36XX_CFG_VAL(BTC_STATIC_OPP_WLAN_ACTIVE_WLAN_LEN, 60000),
+ WCN36XX_CFG_VAL(BTC_STATIC_OPP_WLAN_ACTIVE_BT_LEN, 90000),
+ WCN36XX_CFG_VAL(BTC_SAP_STATIC_OPP_ACTIVE_WLAN_LEN, 30000),
+ WCN36XX_CFG_VAL(BTC_SAP_STATIC_OPP_ACTIVE_BT_LEN, 30000),
+ WCN36XX_CFG_VAL(ASD_PROBE_INTERVAL, 50),
+ WCN36XX_CFG_VAL(ASD_TRIGGER_THRESHOLD, -60),
+ WCN36XX_CFG_VAL(ASD_RTT_RSSI_HYST_THRESHOLD, 3),
+ WCN36XX_CFG_VAL(BTC_CTS2S_ON_STA_DURING_SCO, 0),
+ WCN36XX_CFG_VAL(RA_FILTER_ENABLE, 0),
+ WCN36XX_CFG_VAL(RA_RATE_LIMIT_INTERVAL, 60),
+ WCN36XX_CFG_VAL(BTC_FATAL_HID_NSNIFF_BLK, 2),
+ WCN36XX_CFG_VAL(BTC_CRITICAL_HID_NSNIFF_BLK, 1),
+ WCN36XX_CFG_VAL(BTC_DYN_A2DP_TX_QUEUE_THOLD, 0),
+ WCN36XX_CFG_VAL(BTC_DYN_OPP_TX_QUEUE_THOLD, 1),
+ WCN36XX_CFG_VAL(MAX_UAPSD_CONSEC_SP, 10),
+ WCN36XX_CFG_VAL(MAX_UAPSD_CONSEC_RX_CNT, 50),
+ WCN36XX_CFG_VAL(MAX_UAPSD_CONSEC_TX_CNT, 50),
+ WCN36XX_CFG_VAL(MAX_UAPSD_CONSEC_TX_CNT_MEAS_WINDOW, 500),
+ WCN36XX_CFG_VAL(MAX_UAPSD_CONSEC_RX_CNT_MEAS_WINDOW, 500),
+ WCN36XX_CFG_VAL(MAX_PSPOLL_IN_WMM_UAPSD_PS_MODE, 0),
+ WCN36XX_CFG_VAL(MAX_UAPSD_INACTIVITY_INTERVALS, 10),
+ WCN36XX_CFG_VAL(ENABLE_DYNAMIC_WMMPS, 1),
+ WCN36XX_CFG_VAL(BURST_MODE_BE_TXOP_VALUE, 0),
+ WCN36XX_CFG_VAL(ENABLE_DYNAMIC_RA_START_RATE, 136),
+ WCN36XX_CFG_VAL(BTC_FAST_WLAN_CONN_PREF, 1),
+ WCN36XX_CFG_VAL(ENABLE_RTSCTS_HTVHT, 0),
+ WCN36XX_CFG_VAL(BTC_STATIC_OPP_WLAN_IDLE_WLAN_LEN, 30000),
+ WCN36XX_CFG_VAL(BTC_STATIC_OPP_WLAN_IDLE_BT_LEN, 120000),
+ WCN36XX_CFG_VAL(LINK_FAIL_TX_CNT, 200),
+ WCN36XX_CFG_VAL(TOGGLE_ARP_BDRATES, 0),
+ WCN36XX_CFG_VAL(OPTIMIZE_CA_EVENT, 0),
+ WCN36XX_CFG_VAL(EXT_SCAN_CONC_MODE, 0),
+ WCN36XX_CFG_VAL(BAR_WAKEUP_HOST_DISABLE, 0),
+ WCN36XX_CFG_VAL(SAR_BOFFSET_CORRECTION_ENABLE, 0),
+ WCN36XX_CFG_VAL(BTC_DISABLE_WLAN_LINK_CRITICAL, 5),
+ WCN36XX_CFG_VAL(DISABLE_SCAN_DURING_SCO, 2),
+ WCN36XX_CFG_VAL(CONS_BCNMISS_COUNT, 0),
+ WCN36XX_CFG_VAL(UNITS_OF_BCN_WAIT_TIME, 0),
+ WCN36XX_CFG_VAL(TRIGGER_NULLFRAME_BEFORE_HB, 0),
+ WCN36XX_CFG_VAL(ENABLE_POWERSAVE_OFFLOAD, 0),
};
static int put_cfg_tlv_u32(struct wcn36xx *wcn, size_t *len, u32 id, u32 value)
@@ -121,6 +218,7 @@ static inline u8 is_cap_supported(unsigned long caps, unsigned long flag)
{
return caps & flag ? 1 : 0;
}
+
static void wcn36xx_smd_set_bss_ht_params(struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct wcn36xx_hal_config_bss_params *bss_params)
@@ -145,6 +243,15 @@ static void wcn36xx_smd_set_bss_ht_params(struct ieee80211_vif *vif,
}
}
+static void
+wcn36xx_smd_set_bss_vht_params(struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct wcn36xx_hal_config_bss_params_v1 *bss)
+{
+ if (sta && sta->vht_cap.vht_supported)
+ bss->vht_capable = 1;
+}
+
static void wcn36xx_smd_set_sta_ht_params(struct ieee80211_sta *sta,
struct wcn36xx_hal_config_sta_params *sta_params)
{
@@ -173,6 +280,37 @@ static void wcn36xx_smd_set_sta_ht_params(struct ieee80211_sta *sta,
}
}
+static void wcn36xx_smd_set_sta_vht_params(struct wcn36xx *wcn,
+ struct ieee80211_sta *sta,
+ struct wcn36xx_hal_config_sta_params_v1 *sta_params)
+{
+ if (sta->vht_cap.vht_supported) {
+ unsigned long caps = sta->vht_cap.cap;
+
+ sta_params->vht_capable = sta->vht_cap.vht_supported;
+ sta_params->vht_ldpc_enabled =
+ is_cap_supported(caps, IEEE80211_VHT_CAP_RXLDPC);
+ if (get_feat_caps(wcn->fw_feat_caps, MU_MIMO)) {
+ sta_params->vht_tx_mu_beamformee_capable =
+ is_cap_supported(caps, IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE);
+ if (sta_params->vht_tx_mu_beamformee_capable)
+ sta_params->vht_tx_bf_enabled = 1;
+ } else {
+ sta_params->vht_tx_mu_beamformee_capable = 0;
+ }
+ sta_params->vht_tx_channel_width_set = 0;
+ }
+}
+
+static void wcn36xx_smd_set_sta_ht_ldpc_params(struct ieee80211_sta *sta,
+ struct wcn36xx_hal_config_sta_params_v1 *sta_params)
+{
+ if (sta->ht_cap.ht_supported) {
+ sta_params->ht_ldpc_enabled =
+ is_cap_supported(sta->ht_cap.cap, IEEE80211_HT_CAP_LDPC_CODING);
+ }
+}
+
static void wcn36xx_smd_set_sta_default_ht_params(
struct wcn36xx_hal_config_sta_params *sta_params)
{
@@ -189,6 +327,31 @@ static void wcn36xx_smd_set_sta_default_ht_params(
sta_params->dsss_cck_mode_40mhz = 1;
}
+static void wcn36xx_smd_set_sta_default_vht_params(struct wcn36xx *wcn,
+ struct wcn36xx_hal_config_sta_params_v1 *sta_params)
+{
+ if (wcn->rf_id == RF_IRIS_WCN3680) {
+ sta_params->vht_capable = 1;
+ sta_params->vht_tx_mu_beamformee_capable = 1;
+ } else {
+ sta_params->vht_capable = 0;
+ sta_params->vht_tx_mu_beamformee_capable = 0;
+ }
+
+ sta_params->vht_ldpc_enabled = 0;
+ sta_params->vht_tx_channel_width_set = 0;
+ sta_params->vht_tx_bf_enabled = 0;
+}
+
+static void wcn36xx_smd_set_sta_default_ht_ldpc_params(struct wcn36xx *wcn,
+ struct wcn36xx_hal_config_sta_params_v1 *sta_params)
+{
+ if (wcn->rf_id == RF_IRIS_WCN3680)
+ sta_params->ht_ldpc_enabled = 1;
+ else
+ sta_params->ht_ldpc_enabled = 0;
+}
+
static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -241,9 +404,10 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
sta_params->aid = sta_priv->aid;
wcn36xx_smd_set_sta_ht_params(sta, sta_params);
memcpy(&sta_params->supported_rates, &sta_priv->supported_rates,
- sizeof(sta_priv->supported_rates));
+ sizeof(struct wcn36xx_hal_supported_rates));
} else {
- wcn36xx_set_default_rates(&sta_params->supported_rates);
+ wcn36xx_set_default_rates((struct wcn36xx_hal_supported_rates *)
+ &sta_params->supported_rates);
wcn36xx_smd_set_sta_default_ht_params(sta_params);
}
}
@@ -290,14 +454,20 @@ static void init_hal_msg(struct wcn36xx_hal_msg_header *hdr,
hdr->len = msg_size + sizeof(*hdr);
}
-#define INIT_HAL_MSG(msg_body, type) \
+#define __INIT_HAL_MSG(msg_body, type, version) \
do { \
memset(&msg_body, 0, sizeof(msg_body)); \
msg_body.header.msg_type = type; \
- msg_body.header.msg_version = WCN36XX_HAL_MSG_VERSION0; \
+ msg_body.header.msg_version = version; \
msg_body.header.len = sizeof(msg_body); \
} while (0) \
+#define INIT_HAL_MSG(msg_body, type) \
+ __INIT_HAL_MSG(msg_body, type, WCN36XX_HAL_MSG_VERSION0)
+
+#define INIT_HAL_MSG_V1(msg_body, type) \
+ __INIT_HAL_MSG(msg_body, type, WCN36XX_HAL_MSG_VERSION1)
+
#define INIT_HAL_PTT_MSG(p_msg_body, ppt_msg_len) \
do { \
memset(p_msg_body, 0, sizeof(*p_msg_body) + ppt_msg_len); \
@@ -449,6 +619,8 @@ int wcn36xx_smd_start(struct wcn36xx *wcn)
int ret;
int i;
size_t len;
+ int cfg_elements;
+ static struct wcn36xx_cfg_val *cfg_vals;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_REQ);
@@ -461,9 +633,17 @@ int wcn36xx_smd_start(struct wcn36xx *wcn)
body = (struct wcn36xx_hal_mac_start_req_msg *)wcn->hal_buf;
len = body->header.len;
- for (i = 0; i < ARRAY_SIZE(wcn36xx_cfg_vals); i++) {
- ret = put_cfg_tlv_u32(wcn, &len, wcn36xx_cfg_vals[i].cfg_id,
- wcn36xx_cfg_vals[i].value);
+ if (wcn->rf_id == RF_IRIS_WCN3680) {
+ cfg_vals = wcn3680_cfg_vals;
+ cfg_elements = ARRAY_SIZE(wcn3680_cfg_vals);
+ } else {
+ cfg_vals = wcn36xx_cfg_vals;
+ cfg_elements = ARRAY_SIZE(wcn36xx_cfg_vals);
+ }
+
+ for (i = 0; i < cfg_elements; i++) {
+ ret = put_cfg_tlv_u32(wcn, &len, cfg_vals[i].cfg_id,
+ cfg_vals[i].value);
if (ret)
goto out;
}
@@ -517,8 +697,10 @@ out:
return ret;
}
-int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode)
+int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode,
+ struct ieee80211_vif *vif)
{
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
struct wcn36xx_hal_init_scan_req_msg msg_body;
int ret;
@@ -526,6 +708,13 @@ int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode)
INIT_HAL_MSG(msg_body, WCN36XX_HAL_INIT_SCAN_REQ);
msg_body.mode = mode;
+ if (vif_priv->bss_index != WCN36XX_HAL_BSS_INVALID_IDX) {
+ /* Notify BSSID with null DATA packet */
+ msg_body.frame_type = 2;
+ msg_body.notify = 1;
+ msg_body.scan_entry.bss_index[0] = vif_priv->bss_index;
+ msg_body.scan_entry.active_bss_count = 1;
+ }
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
@@ -607,8 +796,10 @@ out:
}
int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
- enum wcn36xx_hal_sys_mode mode)
+ enum wcn36xx_hal_sys_mode mode,
+ struct ieee80211_vif *vif)
{
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
struct wcn36xx_hal_finish_scan_req_msg msg_body;
int ret;
@@ -616,6 +807,14 @@ int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
INIT_HAL_MSG(msg_body, WCN36XX_HAL_FINISH_SCAN_REQ);
msg_body.mode = mode;
+ msg_body.oper_channel = WCN36XX_HW_CHANNEL(wcn);
+ if (vif_priv->bss_index != WCN36XX_HAL_BSS_INVALID_IDX) {
+ /* Notify BSSID with null data packet */
+ msg_body.notify = 1;
+ msg_body.frame_type = 2;
+ msg_body.scan_entry.bss_index[0] = vif_priv->bss_index;
+ msg_body.scan_entry.active_bss_count = 1;
+ }
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
@@ -674,8 +873,10 @@ int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif,
msg_body->num_channel = min_t(u8, req->n_channels,
sizeof(msg_body->channels));
- for (i = 0; i < msg_body->num_channel; i++)
- msg_body->channels[i] = req->channels[i]->hw_value;
+ for (i = 0; i < msg_body->num_channel; i++) {
+ msg_body->channels[i] =
+ HW_VALUE_CHANNEL(req->channels[i]->hw_value);
+ }
msg_body->header.len -= WCN36XX_MAX_SCAN_IE_LEN;
@@ -1163,6 +1364,31 @@ static void wcn36xx_smd_convert_sta_to_v1(struct wcn36xx *wcn,
v1->p2p = orig->p2p;
}
+static void
+wcn36xx_smd_set_sta_params_v1(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct wcn36xx_hal_config_sta_params_v1 *sta_par)
+{
+ struct wcn36xx_sta *sta_priv = NULL;
+ struct wcn36xx_hal_config_sta_params sta_par_v0;
+
+ wcn36xx_smd_set_sta_params(wcn, vif, sta, &sta_par_v0);
+ wcn36xx_smd_convert_sta_to_v1(wcn, &sta_par_v0, sta_par);
+
+ if (sta) {
+ sta_priv = wcn36xx_sta_to_priv(sta);
+ wcn36xx_smd_set_sta_vht_params(wcn, sta, sta_par);
+ wcn36xx_smd_set_sta_ht_ldpc_params(sta, sta_par);
+ memcpy(&sta_par->supported_rates, &sta_priv->supported_rates,
+ sizeof(sta_par->supported_rates));
+ } else {
+ wcn36xx_set_default_rates_v1(&sta_par->supported_rates);
+ wcn36xx_smd_set_sta_default_vht_params(wcn, sta_par);
+ wcn36xx_smd_set_sta_default_ht_ldpc_params(wcn, sta_par);
+ }
+}
+
static int wcn36xx_smd_config_sta_rsp(struct wcn36xx *wcn,
struct ieee80211_sta *sta,
void *buf,
@@ -1197,53 +1423,69 @@ static int wcn36xx_smd_config_sta_rsp(struct wcn36xx *wcn,
}
static int wcn36xx_smd_config_sta_v1(struct wcn36xx *wcn,
- const struct wcn36xx_hal_config_sta_req_msg *orig)
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
struct wcn36xx_hal_config_sta_req_msg_v1 msg_body;
- struct wcn36xx_hal_config_sta_params_v1 *sta = &msg_body.sta_params;
+ struct wcn36xx_hal_config_sta_params_v1 *sta_params;
- INIT_HAL_MSG(msg_body, WCN36XX_HAL_CONFIG_STA_REQ);
+ if (wcn->rf_id == RF_IRIS_WCN3680) {
+ INIT_HAL_MSG_V1(msg_body, WCN36XX_HAL_CONFIG_STA_REQ);
+ } else {
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_CONFIG_STA_REQ);
+ msg_body.header.len -= WCN36XX_DIFF_STA_PARAMS_V1_NOVHT;
+ }
- wcn36xx_smd_convert_sta_to_v1(wcn, &orig->sta_params,
- &msg_body.sta_params);
+ sta_params = &msg_body.sta_params;
+
+ wcn36xx_smd_set_sta_params_v1(wcn, vif, sta, sta_params);
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
wcn36xx_dbg(WCN36XX_DBG_HAL,
"hal config sta v1 action %d sta_index %d bssid_index %d bssid %pM type %d mac %pM aid %d\n",
- sta->action, sta->sta_index, sta->bssid_index,
- sta->bssid, sta->type, sta->mac, sta->aid);
+ sta_params->action, sta_params->sta_index, sta_params->bssid_index,
+ sta_params->bssid, sta_params->type, sta_params->mac, sta_params->aid);
return wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
}
-int wcn36xx_smd_config_sta(struct wcn36xx *wcn, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+static int wcn36xx_smd_config_sta_v0(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
struct wcn36xx_hal_config_sta_req_msg msg;
struct wcn36xx_hal_config_sta_params *sta_params;
- int ret;
- mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_STA_REQ);
sta_params = &msg.sta_params;
wcn36xx_smd_set_sta_params(wcn, vif, sta, sta_params);
- if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
- ret = wcn36xx_smd_config_sta_v1(wcn, &msg);
- } else {
- PREPARE_HAL_BUF(wcn->hal_buf, msg);
+ PREPARE_HAL_BUF(wcn->hal_buf, msg);
- wcn36xx_dbg(WCN36XX_DBG_HAL,
- "hal config sta action %d sta_index %d bssid_index %d bssid %pM type %d mac %pM aid %d\n",
- sta_params->action, sta_params->sta_index,
- sta_params->bssid_index, sta_params->bssid,
- sta_params->type, sta_params->mac, sta_params->aid);
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal config sta action %d sta_index %d bssid_index %d bssid %pM type %d mac %pM aid %d\n",
+ sta_params->action, sta_params->sta_index,
+ sta_params->bssid_index, sta_params->bssid,
+ sta_params->type, sta_params->mac, sta_params->aid);
+
+ return wcn36xx_smd_send_and_wait(wcn, msg.header.len);
+}
+
+int wcn36xx_smd_config_sta(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ int ret;
+
+ mutex_lock(&wcn->hal_mutex);
+
+ if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24))
+ ret = wcn36xx_smd_config_sta_v1(wcn, vif, sta);
+ else
+ ret = wcn36xx_smd_config_sta_v0(wcn, vif, sta);
- ret = wcn36xx_smd_send_and_wait(wcn, msg.header.len);
- }
if (ret) {
wcn36xx_err("Sending hal_config_sta failed\n");
goto out;
@@ -1261,189 +1503,14 @@ out:
return ret;
}
-static int wcn36xx_smd_config_bss_v1(struct wcn36xx *wcn,
- const struct wcn36xx_hal_config_bss_req_msg *orig)
-{
- struct wcn36xx_hal_config_bss_req_msg_v1 *msg_body;
- struct wcn36xx_hal_config_bss_params_v1 *bss;
- struct wcn36xx_hal_config_sta_params_v1 *sta;
- int ret;
-
- msg_body = kzalloc(sizeof(*msg_body), GFP_KERNEL);
- if (!msg_body)
- return -ENOMEM;
-
- INIT_HAL_MSG((*msg_body), WCN36XX_HAL_CONFIG_BSS_REQ);
-
- bss = &msg_body->bss_params;
- sta = &bss->sta;
-
- /* convert orig to v1 */
- memcpy(&msg_body->bss_params.bssid,
- &orig->bss_params.bssid, ETH_ALEN);
- memcpy(&msg_body->bss_params.self_mac_addr,
- &orig->bss_params.self_mac_addr, ETH_ALEN);
-
- msg_body->bss_params.bss_type = orig->bss_params.bss_type;
- msg_body->bss_params.oper_mode = orig->bss_params.oper_mode;
- msg_body->bss_params.nw_type = orig->bss_params.nw_type;
-
- msg_body->bss_params.short_slot_time_supported =
- orig->bss_params.short_slot_time_supported;
- msg_body->bss_params.lla_coexist = orig->bss_params.lla_coexist;
- msg_body->bss_params.llb_coexist = orig->bss_params.llb_coexist;
- msg_body->bss_params.llg_coexist = orig->bss_params.llg_coexist;
- msg_body->bss_params.ht20_coexist = orig->bss_params.ht20_coexist;
- msg_body->bss_params.lln_non_gf_coexist =
- orig->bss_params.lln_non_gf_coexist;
-
- msg_body->bss_params.lsig_tx_op_protection_full_support =
- orig->bss_params.lsig_tx_op_protection_full_support;
- msg_body->bss_params.rifs_mode = orig->bss_params.rifs_mode;
- msg_body->bss_params.beacon_interval = orig->bss_params.beacon_interval;
- msg_body->bss_params.dtim_period = orig->bss_params.dtim_period;
- msg_body->bss_params.tx_channel_width_set =
- orig->bss_params.tx_channel_width_set;
- msg_body->bss_params.oper_channel = orig->bss_params.oper_channel;
- msg_body->bss_params.ext_channel = orig->bss_params.ext_channel;
-
- msg_body->bss_params.reserved = orig->bss_params.reserved;
-
- memcpy(&msg_body->bss_params.ssid,
- &orig->bss_params.ssid,
- sizeof(orig->bss_params.ssid));
-
- msg_body->bss_params.action = orig->bss_params.action;
- msg_body->bss_params.rateset = orig->bss_params.rateset;
- msg_body->bss_params.ht = orig->bss_params.ht;
- msg_body->bss_params.obss_prot_enabled =
- orig->bss_params.obss_prot_enabled;
- msg_body->bss_params.rmf = orig->bss_params.rmf;
- msg_body->bss_params.ht_oper_mode = orig->bss_params.ht_oper_mode;
- msg_body->bss_params.dual_cts_protection =
- orig->bss_params.dual_cts_protection;
-
- msg_body->bss_params.max_probe_resp_retry_limit =
- orig->bss_params.max_probe_resp_retry_limit;
- msg_body->bss_params.hidden_ssid = orig->bss_params.hidden_ssid;
- msg_body->bss_params.proxy_probe_resp =
- orig->bss_params.proxy_probe_resp;
- msg_body->bss_params.edca_params_valid =
- orig->bss_params.edca_params_valid;
-
- memcpy(&msg_body->bss_params.acbe,
- &orig->bss_params.acbe,
- sizeof(orig->bss_params.acbe));
- memcpy(&msg_body->bss_params.acbk,
- &orig->bss_params.acbk,
- sizeof(orig->bss_params.acbk));
- memcpy(&msg_body->bss_params.acvi,
- &orig->bss_params.acvi,
- sizeof(orig->bss_params.acvi));
- memcpy(&msg_body->bss_params.acvo,
- &orig->bss_params.acvo,
- sizeof(orig->bss_params.acvo));
-
- msg_body->bss_params.ext_set_sta_key_param_valid =
- orig->bss_params.ext_set_sta_key_param_valid;
-
- memcpy(&msg_body->bss_params.ext_set_sta_key_param,
- &orig->bss_params.ext_set_sta_key_param,
- sizeof(orig->bss_params.acvo));
-
- msg_body->bss_params.wcn36xx_hal_persona =
- orig->bss_params.wcn36xx_hal_persona;
- msg_body->bss_params.spectrum_mgt_enable =
- orig->bss_params.spectrum_mgt_enable;
- msg_body->bss_params.tx_mgmt_power = orig->bss_params.tx_mgmt_power;
- msg_body->bss_params.max_tx_power = orig->bss_params.max_tx_power;
-
- wcn36xx_smd_convert_sta_to_v1(wcn, &orig->bss_params.sta,
- &msg_body->bss_params.sta);
-
- PREPARE_HAL_BUF(wcn->hal_buf, (*msg_body));
-
- wcn36xx_dbg(WCN36XX_DBG_HAL,
- "hal config bss v1 bssid %pM self_mac_addr %pM bss_type %d oper_mode %d nw_type %d\n",
- bss->bssid, bss->self_mac_addr, bss->bss_type,
- bss->oper_mode, bss->nw_type);
-
- wcn36xx_dbg(WCN36XX_DBG_HAL,
- "- sta bssid %pM action %d sta_index %d bssid_index %d aid %d type %d mac %pM\n",
- sta->bssid, sta->action, sta->sta_index,
- sta->bssid_index, sta->aid, sta->type, sta->mac);
-
- ret = wcn36xx_smd_send_and_wait(wcn, msg_body->header.len);
- kfree(msg_body);
-
- return ret;
-}
-
-
-static int wcn36xx_smd_config_bss_rsp(struct wcn36xx *wcn,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- void *buf,
- size_t len)
-{
- struct wcn36xx_hal_config_bss_rsp_msg *rsp;
- struct wcn36xx_hal_config_bss_rsp_params *params;
- struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
-
- if (len < sizeof(*rsp))
- return -EINVAL;
-
- rsp = (struct wcn36xx_hal_config_bss_rsp_msg *)buf;
- params = &rsp->bss_rsp_params;
-
- if (params->status != WCN36XX_FW_MSG_RESULT_SUCCESS) {
- wcn36xx_warn("hal config bss response failure: %d\n",
- params->status);
- return -EIO;
- }
-
- wcn36xx_dbg(WCN36XX_DBG_HAL,
- "hal config bss rsp status %d bss_idx %d dpu_desc_index %d"
- " sta_idx %d self_idx %d bcast_idx %d mac %pM"
- " power %d ucast_dpu_signature %d\n",
- params->status, params->bss_index, params->dpu_desc_index,
- params->bss_sta_index, params->bss_self_sta_index,
- params->bss_bcast_sta_idx, params->mac,
- params->tx_mgmt_power, params->ucast_dpu_signature);
-
- vif_priv->bss_index = params->bss_index;
-
- if (sta) {
- struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
- sta_priv->bss_sta_index = params->bss_sta_index;
- sta_priv->bss_dpu_desc_index = params->dpu_desc_index;
- }
-
- vif_priv->self_ucast_dpu_sign = params->ucast_dpu_signature;
-
- return 0;
-}
-
-int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, const u8 *bssid,
- bool update)
+static void wcn36xx_smd_set_bss_params(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ const u8 *bssid,
+ bool update,
+ struct wcn36xx_hal_config_bss_params *bss)
{
- struct wcn36xx_hal_config_bss_req_msg *msg;
- struct wcn36xx_hal_config_bss_params *bss;
- struct wcn36xx_hal_config_sta_params *sta_params;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
- int ret;
-
- mutex_lock(&wcn->hal_mutex);
- msg = kzalloc(sizeof(*msg), GFP_KERNEL);
- if (!msg) {
- ret = -ENOMEM;
- goto out;
- }
- INIT_HAL_MSG((*msg), WCN36XX_HAL_CONFIG_BSS_REQ);
-
- bss = &msg->bss_params;
- sta_params = &bss->sta;
WARN_ON(is_zero_ether_addr(bssid));
@@ -1498,7 +1565,6 @@ int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
bss->ext_channel = IEEE80211_HT_PARAM_CHA_SEC_NONE;
bss->reserved = 0;
- wcn36xx_smd_set_sta_params(wcn, vif, sta, sta_params);
/* wcn->ssid is only valid in AP and IBSS mode */
bss->ssid.length = vif_priv->ssid.length;
@@ -1523,6 +1589,154 @@ int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
bss->action = update;
vif_priv->bss_type = bss->bss_type;
+}
+
+static int wcn36xx_smd_config_bss_v1(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta_80211,
+ const u8 *bssid,
+ bool update)
+{
+ struct wcn36xx_hal_config_bss_req_msg_v1 *msg_body;
+ struct wcn36xx_hal_config_bss_params_v1 *bss;
+ struct wcn36xx_hal_config_bss_params bss_v0;
+ struct wcn36xx_hal_config_sta_params_v1 *sta;
+ struct cfg80211_chan_def *chandef;
+ int ret;
+
+ msg_body = kzalloc(sizeof(*msg_body), GFP_KERNEL);
+ if (!msg_body)
+ return -ENOMEM;
+
+ if (wcn->rf_id == RF_IRIS_WCN3680) {
+ INIT_HAL_MSG_V1((*msg_body), WCN36XX_HAL_CONFIG_BSS_REQ);
+ } else {
+ INIT_HAL_MSG((*msg_body), WCN36XX_HAL_CONFIG_BSS_REQ);
+ msg_body->header.len -= WCN36XX_DIFF_BSS_PARAMS_V1_NOVHT;
+ }
+
+ bss = &msg_body->bss_params;
+ sta = &bss->sta;
+
+ memset(&bss_v0, 0x00, sizeof(bss_v0));
+ wcn36xx_smd_set_bss_params(wcn, vif, sta_80211, bssid, update, &bss_v0);
+ wcn36xx_smd_set_sta_params_v1(wcn, vif, sta_80211, sta);
+
+ /* convert orig to v1 */
+ memcpy(bss->bssid, &bss_v0.bssid, ETH_ALEN);
+ memcpy(bss->self_mac_addr, &bss_v0.self_mac_addr, ETH_ALEN);
+
+ bss->bss_type = bss_v0.bss_type;
+ bss->oper_mode = bss_v0.oper_mode;
+ bss->nw_type = bss_v0.nw_type;
+
+ bss->short_slot_time_supported =
+ bss_v0.short_slot_time_supported;
+ bss->lla_coexist = bss_v0.lla_coexist;
+ bss->llb_coexist = bss_v0.llb_coexist;
+ bss->llg_coexist = bss_v0.llg_coexist;
+ bss->ht20_coexist = bss_v0.ht20_coexist;
+ bss->lln_non_gf_coexist = bss_v0.lln_non_gf_coexist;
+
+ bss->lsig_tx_op_protection_full_support =
+ bss_v0.lsig_tx_op_protection_full_support;
+ bss->rifs_mode = bss_v0.rifs_mode;
+ bss->beacon_interval = bss_v0.beacon_interval;
+ bss->dtim_period = bss_v0.dtim_period;
+ bss->tx_channel_width_set = bss_v0.tx_channel_width_set;
+ bss->oper_channel = bss_v0.oper_channel;
+
+ if (wcn->hw->conf.chandef.width == NL80211_CHAN_WIDTH_80) {
+ chandef = &wcn->hw->conf.chandef;
+ bss->ext_channel = HW_VALUE_PHY(chandef->chan->hw_value);
+ } else {
+ bss->ext_channel = bss_v0.ext_channel;
+ }
+
+ bss->reserved = bss_v0.reserved;
+
+ memcpy(&bss->ssid, &bss_v0.ssid,
+ sizeof(bss_v0.ssid));
+
+ bss->action = bss_v0.action;
+ bss->rateset = bss_v0.rateset;
+ bss->ht = bss_v0.ht;
+ bss->obss_prot_enabled = bss_v0.obss_prot_enabled;
+ bss->rmf = bss_v0.rmf;
+ bss->ht_oper_mode = bss_v0.ht_oper_mode;
+ bss->dual_cts_protection = bss_v0.dual_cts_protection;
+
+ bss->max_probe_resp_retry_limit =
+ bss_v0.max_probe_resp_retry_limit;
+ bss->hidden_ssid = bss_v0.hidden_ssid;
+ bss->proxy_probe_resp = bss_v0.proxy_probe_resp;
+ bss->edca_params_valid = bss_v0.edca_params_valid;
+
+ memcpy(&bss->acbe, &bss_v0.acbe,
+ sizeof(bss_v0.acbe));
+ memcpy(&bss->acbk, &bss_v0.acbk,
+ sizeof(bss_v0.acbk));
+ memcpy(&bss->acvi, &bss_v0.acvi,
+ sizeof(bss_v0.acvi));
+ memcpy(&bss->acvo, &bss_v0.acvo,
+ sizeof(bss_v0.acvo));
+
+ bss->ext_set_sta_key_param_valid =
+ bss_v0.ext_set_sta_key_param_valid;
+
+ memcpy(&bss->ext_set_sta_key_param,
+ &bss_v0.ext_set_sta_key_param,
+ sizeof(bss_v0.acvo));
+
+ bss->wcn36xx_hal_persona = bss_v0.wcn36xx_hal_persona;
+ bss->spectrum_mgt_enable = bss_v0.spectrum_mgt_enable;
+ bss->tx_mgmt_power = bss_v0.tx_mgmt_power;
+ bss->max_tx_power = bss_v0.max_tx_power;
+
+ wcn36xx_smd_set_bss_vht_params(vif, sta_80211, bss);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, (*msg_body));
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal config bss v1 bssid %pM self_mac_addr %pM bss_type %d oper_mode %d nw_type %d\n",
+ bss->bssid, bss->self_mac_addr, bss->bss_type,
+ bss->oper_mode, bss->nw_type);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "- sta bssid %pM action %d sta_index %d bssid_index %d aid %d type %d mac %pM\n",
+ sta->bssid, sta->action, sta->sta_index,
+ sta->bssid_index, sta->aid, sta->type, sta->mac);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body->header.len);
+ kfree(msg_body);
+
+ return ret;
+}
+
+static int wcn36xx_smd_config_bss_v0(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ const u8 *bssid,
+ bool update)
+{
+ struct wcn36xx_hal_config_bss_req_msg *msg;
+ struct wcn36xx_hal_config_bss_params *bss;
+ struct wcn36xx_hal_config_sta_params *sta_params;
+ int ret;
+
+ msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ INIT_HAL_MSG((*msg), WCN36XX_HAL_CONFIG_BSS_REQ);
+
+ bss = &msg->bss_params;
+ sta_params = &bss->sta;
+
+ wcn36xx_smd_set_bss_params(wcn, vif, sta, bssid, update, bss);
+ wcn36xx_smd_set_sta_params(wcn, vif, sta, sta_params);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, (*msg));
wcn36xx_dbg(WCN36XX_DBG_HAL,
"hal config bss bssid %pM self_mac_addr %pM bss_type %d oper_mode %d nw_type %d\n",
@@ -1536,13 +1750,69 @@ int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
sta_params->aid, sta_params->type,
sta_params->mac);
- if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
- ret = wcn36xx_smd_config_bss_v1(wcn, msg);
- } else {
- PREPARE_HAL_BUF(wcn->hal_buf, (*msg));
+ ret = wcn36xx_smd_send_and_wait(wcn, msg->header.len);
+ kfree(msg);
+
+ return ret;
+}
+
+static int wcn36xx_smd_config_bss_rsp(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ void *buf,
+ size_t len)
+{
+ struct wcn36xx_hal_config_bss_rsp_msg *rsp;
+ struct wcn36xx_hal_config_bss_rsp_params *params;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+
+ if (len < sizeof(*rsp))
+ return -EINVAL;
+
+ rsp = (struct wcn36xx_hal_config_bss_rsp_msg *)buf;
+ params = &rsp->bss_rsp_params;
+
+ if (params->status != WCN36XX_FW_MSG_RESULT_SUCCESS) {
+ wcn36xx_warn("hal config bss response failure: %d\n",
+ params->status);
+ return -EIO;
+ }
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal config bss rsp status %d bss_idx %d dpu_desc_index %d"
+ " sta_idx %d self_idx %d bcast_idx %d mac %pM"
+ " power %d ucast_dpu_signature %d\n",
+ params->status, params->bss_index, params->dpu_desc_index,
+ params->bss_sta_index, params->bss_self_sta_index,
+ params->bss_bcast_sta_idx, params->mac,
+ params->tx_mgmt_power, params->ucast_dpu_signature);
- ret = wcn36xx_smd_send_and_wait(wcn, msg->header.len);
+ vif_priv->bss_index = params->bss_index;
+
+ if (sta) {
+ struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
+ sta_priv->bss_sta_index = params->bss_sta_index;
+ sta_priv->bss_dpu_desc_index = params->dpu_desc_index;
}
+
+ vif_priv->self_ucast_dpu_sign = params->ucast_dpu_signature;
+
+ return 0;
+}
+
+int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, const u8 *bssid,
+ bool update)
+{
+ int ret;
+
+ mutex_lock(&wcn->hal_mutex);
+
+ if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24))
+ ret = wcn36xx_smd_config_bss_v1(wcn, vif, sta, bssid, update);
+ else
+ ret = wcn36xx_smd_config_bss_v0(wcn, vif, sta, bssid, update);
+
if (ret) {
wcn36xx_err("Sending hal_config_bss failed\n");
goto out;
@@ -1552,12 +1822,10 @@ int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
sta,
wcn->hal_buf,
wcn->hal_rsp_len);
- if (ret) {
+ if (ret)
wcn36xx_err("hal_config_bss response failed err=%d\n", ret);
- goto out;
- }
+
out:
- kfree(msg);
mutex_unlock(&wcn->hal_mutex);
return ret;
}
@@ -1924,6 +2192,7 @@ out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
+
int wcn36xx_smd_set_power_params(struct wcn36xx *wcn, bool ignore_dtim)
{
struct wcn36xx_hal_set_power_params_req_msg msg_body;
@@ -1953,6 +2222,7 @@ out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
+
/* Notice: This function should be called after associated, or else it
* will be invalid
*/
@@ -2080,6 +2350,8 @@ int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
INIT_HAL_MSG(msg_body, WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ);
set_feat_caps(msg_body.feat_caps, STA_POWERSAVE);
+ if (wcn->rf_id == RF_IRIS_WCN3680)
+ set_feat_caps(msg_body.feat_caps, DOT11AC);
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
@@ -2102,6 +2374,22 @@ out:
return ret;
}
+static int wcn36xx_smd_add_ba_session_rsp(void *buf, int len, u8 *session)
+{
+ struct wcn36xx_hal_add_ba_session_rsp_msg *rsp;
+
+ if (len < sizeof(*rsp))
+ return -EINVAL;
+
+ rsp = (struct wcn36xx_hal_add_ba_session_rsp_msg *)buf;
+ if (rsp->status != WCN36XX_FW_MSG_RESULT_SUCCESS)
+ return rsp->status;
+
+ *session = rsp->ba_session_id;
+
+ return 0;
+}
+
int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
struct ieee80211_sta *sta,
u16 tid,
@@ -2110,6 +2398,7 @@ int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
u8 sta_index)
{
struct wcn36xx_hal_add_ba_session_req_msg msg_body;
+ u8 session_id;
int ret;
mutex_lock(&wcn->hal_mutex);
@@ -2135,17 +2424,20 @@ int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
wcn36xx_err("Sending hal_add_ba_session failed\n");
goto out;
}
- ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ ret = wcn36xx_smd_add_ba_session_rsp(wcn->hal_buf, wcn->hal_rsp_len,
+ &session_id);
if (ret) {
wcn36xx_err("hal_add_ba_session response failed err=%d\n", ret);
goto out;
}
+
+ ret = session_id;
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
-int wcn36xx_smd_add_ba(struct wcn36xx *wcn)
+int wcn36xx_smd_add_ba(struct wcn36xx *wcn, u8 session_id)
{
struct wcn36xx_hal_add_ba_req_msg msg_body;
int ret;
@@ -2153,7 +2445,7 @@ int wcn36xx_smd_add_ba(struct wcn36xx *wcn)
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BA_REQ);
- msg_body.session_id = 0;
+ msg_body.session_id = session_id;
msg_body.win_size = WCN36XX_AGGR_BUFFER_SIZE;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
@@ -2212,7 +2504,7 @@ static int wcn36xx_smd_trigger_ba_rsp(void *buf, int len)
return rsp->status;
}
-int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index)
+int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index, u16 tid, u8 session_id)
{
struct wcn36xx_hal_trigger_ba_req_msg msg_body;
struct wcn36xx_hal_trigger_ba_req_candidate *candidate;
@@ -2221,7 +2513,7 @@ int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index)
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_TRIGGER_BA_REQ);
- msg_body.session_id = 0;
+ msg_body.session_id = session_id;
msg_body.candidate_cnt = 1;
msg_body.header.len += sizeof(*candidate);
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
@@ -2229,7 +2521,7 @@ int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index)
candidate = (struct wcn36xx_hal_trigger_ba_req_candidate *)
(wcn->hal_buf + sizeof(msg_body));
candidate->sta_index = sta_index;
- candidate->tid_bitmap = 1;
+ candidate->tid_bitmap = 1 << tid;
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
@@ -2610,6 +2902,7 @@ static void wcn36xx_ind_smd_work(struct work_struct *work)
kfree(hal_ind_msg);
}
}
+
int wcn36xx_smd_open(struct wcn36xx *wcn)
{
wcn->hal_ind_wq = create_freezable_workqueue("wcn36xx_smd_ind");
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
index ff15df8ab56f..b1d8083d9d9d 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.h
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -59,11 +59,13 @@ void wcn36xx_smd_close(struct wcn36xx *wcn);
int wcn36xx_smd_load_nv(struct wcn36xx *wcn);
int wcn36xx_smd_start(struct wcn36xx *wcn);
int wcn36xx_smd_stop(struct wcn36xx *wcn);
-int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode);
int wcn36xx_smd_start_scan(struct wcn36xx *wcn, u8 scan_channel);
int wcn36xx_smd_end_scan(struct wcn36xx *wcn, u8 scan_channel);
-int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
- enum wcn36xx_hal_sys_mode mode);
+int wcn36xx_smd_finish_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode,
+ struct ieee80211_vif *vif);
+int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode,
+ struct ieee80211_vif *vif);
+
int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn, u8 *channels, size_t channel_count);
int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif,
struct cfg80211_scan_request *req);
@@ -132,9 +134,9 @@ int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
u16 *ssn,
u8 direction,
u8 sta_index);
-int wcn36xx_smd_add_ba(struct wcn36xx *wcn);
+int wcn36xx_smd_add_ba(struct wcn36xx *wcn, u8 session_id);
int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index);
-int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index);
+int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index, u16 tid, u8 session_id);
int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value);
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
index a6902371e89c..1b831157ede1 100644
--- a/drivers/net/wireless/ath/wcn36xx/txrx.c
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
@@ -23,11 +23,214 @@ static inline int get_rssi0(struct wcn36xx_rx_bd *bd)
return 100 - ((bd->phy_stat0 >> 24) & 0xff);
}
+struct wcn36xx_rate {
+ u16 bitrate;
+ u16 mcs_or_legacy_index;
+ enum mac80211_rx_encoding encoding;
+ enum mac80211_rx_encoding_flags encoding_flags;
+ enum rate_info_bw bw;
+};
+
+static const struct wcn36xx_rate wcn36xx_rate_table[] = {
+ /* 11b rates */
+ { 10, 0, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
+ { 20, 1, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
+ { 55, 2, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
+ { 110, 3, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
+
+ /* 11b SP (short preamble) */
+ { 10, 0, RX_ENC_LEGACY, RX_ENC_FLAG_SHORTPRE, RATE_INFO_BW_20 },
+ { 20, 1, RX_ENC_LEGACY, RX_ENC_FLAG_SHORTPRE, RATE_INFO_BW_20 },
+ { 55, 2, RX_ENC_LEGACY, RX_ENC_FLAG_SHORTPRE, RATE_INFO_BW_20 },
+ { 110, 3, RX_ENC_LEGACY, RX_ENC_FLAG_SHORTPRE, RATE_INFO_BW_20 },
+
+ /* 11ag */
+ { 60, 4, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
+ { 90, 5, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
+ { 120, 6, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
+ { 180, 7, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
+ { 240, 8, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
+ { 360, 9, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
+ { 480, 10, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
+ { 540, 11, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
+
+ /* 11n */
+ { 65, 0, RX_ENC_HT, 0, RATE_INFO_BW_20 },
+ { 130, 1, RX_ENC_HT, 0, RATE_INFO_BW_20 },
+ { 195, 2, RX_ENC_HT, 0, RATE_INFO_BW_20 },
+ { 260, 3, RX_ENC_HT, 0, RATE_INFO_BW_20 },
+ { 390, 4, RX_ENC_HT, 0, RATE_INFO_BW_20 },
+ { 520, 5, RX_ENC_HT, 0, RATE_INFO_BW_20 },
+ { 585, 6, RX_ENC_HT, 0, RATE_INFO_BW_20 },
+ { 650, 7, RX_ENC_HT, 0, RATE_INFO_BW_20 },
+
+ /* 11n SGI */
+ { 72, 0, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_20 },
+ { 144, 1, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_20 },
+ { 217, 2, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_20 },
+ { 289, 3, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_20 },
+ { 434, 4, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_20 },
+ { 578, 5, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_20 },
+ { 650, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_20 },
+ { 722, 7, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_20 },
+
+ /* 11n GF (greenfield) */
+ { 65, 0, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_20 },
+ { 130, 1, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_20 },
+ { 195, 2, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_20 },
+ { 260, 3, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_20 },
+ { 390, 4, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_20 },
+ { 520, 5, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_20 },
+ { 585, 6, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_20 },
+ { 650, 7, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_20 },
+
+ /* 11n CB (channel bonding) */
+ { 135, 0, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+ { 270, 1, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+ { 405, 2, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+ { 540, 3, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+ { 810, 4, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+ { 1080, 5, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+ { 1215, 6, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+ { 1350, 7, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+
+ /* 11n CB + SGI */
+ { 150, 0, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 300, 1, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 450, 2, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 600, 3, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 900, 4, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1200, 5, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1500, 7, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+
+ /* 11n GF + CB */
+ { 135, 0, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_40 },
+ { 270, 1, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_40 },
+ { 405, 2, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_40 },
+ { 540, 3, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_40 },
+ { 810, 4, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_40 },
+ { 1080, 5, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_40 },
+ { 1215, 6, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_40 },
+ { 1350, 7, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_40 },
+
+ /* 11ac reserved indices */
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+
+ /* 11ac 20 MHz 800ns GI MCS 0-8 */
+ { 65, 0, RX_ENC_HT, 0, RATE_INFO_BW_20 },
+ { 130, 1, RX_ENC_HT, 0, RATE_INFO_BW_20 },
+ { 195, 2, RX_ENC_HT, 0, RATE_INFO_BW_20 },
+ { 260, 3, RX_ENC_HT, 0, RATE_INFO_BW_20 },
+ { 390, 4, RX_ENC_HT, 0, RATE_INFO_BW_20 },
+ { 520, 5, RX_ENC_HT, 0, RATE_INFO_BW_20 },
+ { 585, 6, RX_ENC_HT, 0, RATE_INFO_BW_20 },
+ { 650, 7, RX_ENC_HT, 0, RATE_INFO_BW_20 },
+ { 780, 8, RX_ENC_HT, 0, RATE_INFO_BW_20 },
+
+ /* 11ac reserved indices */
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+
+ /* 11ac 20 MHz 400ns SGI MCS 6-8 */
+ { 655, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_20 },
+ { 722, 7, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_20 },
+ { 866, 8, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_20 },
+
+ /* 11ac reserved indices */
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+
+ /* 11ac 40 MHz 800ns GI MCS 0-9 */
+ { 135, 0, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+ { 270, 1, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+ { 405, 2, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+ { 540, 3, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+ { 810, 4, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+ { 1080, 5, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+ { 1215, 6, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+ { 1350, 7, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+ { 1350, 7, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+ { 1620, 8, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+ { 1800, 9, RX_ENC_HT, 0, RATE_INFO_BW_40 },
+
+ /* 11ac reserved indices */
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+
+ /* 11ac 40 MHz 400ns SGI MCS 5-7 */
+ { 1200, 5, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1500, 7, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+
+ /* 11ac reserved index */
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+
+ /* 11ac 40 MHz 400ns SGI MCS 5-7 */
+ { 1800, 8, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 2000, 9, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+
+ /* 11ac reserved index */
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+
+ /* 11ac 80 MHz 800ns GI MCS 0-7 */
+ { 292, 0, RX_ENC_HT, 0, RATE_INFO_BW_80},
+ { 585, 1, RX_ENC_HT, 0, RATE_INFO_BW_80},
+ { 877, 2, RX_ENC_HT, 0, RATE_INFO_BW_80},
+ { 1170, 3, RX_ENC_HT, 0, RATE_INFO_BW_80},
+ { 1755, 4, RX_ENC_HT, 0, RATE_INFO_BW_80},
+ { 2340, 5, RX_ENC_HT, 0, RATE_INFO_BW_80},
+ { 2632, 6, RX_ENC_HT, 0, RATE_INFO_BW_80},
+ { 2925, 7, RX_ENC_HT, 0, RATE_INFO_BW_80},
+
+ /* 11 ac reserved index */
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+
+ /* 11ac 80 MHz 800 ns GI MCS 8-9 */
+ { 3510, 8, RX_ENC_HT, 0, RATE_INFO_BW_80},
+ { 3900, 9, RX_ENC_HT, 0, RATE_INFO_BW_80},
+
+ /* 11 ac reserved indices */
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+
+ /* 11ac 80 MHz 400 ns SGI MCS 6-7 */
+ { 2925, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_80 },
+ { 3250, 7, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_80 },
+
+ /* 11ac reserved index */
+ { 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
+
+ /* 11ac 80 MHz 400ns SGI MCS 8-9 */
+ { 3900, 8, RX_ENC_VHT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_80 },
+ { 4333, 9, RX_ENC_VHT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_80 },
+};
+
int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
{
struct ieee80211_rx_status status;
+ const struct wcn36xx_rate *rate;
struct ieee80211_hdr *hdr;
struct wcn36xx_rx_bd *bd;
+ struct ieee80211_supported_band *sband;
u16 fc, sn;
/*
@@ -49,19 +252,11 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
fc = __le16_to_cpu(hdr->frame_control);
sn = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl));
- /* When scanning associate beacons to this */
- if (ieee80211_is_beacon(hdr->frame_control) && wcn->scan_freq) {
- status.freq = wcn->scan_freq;
- status.band = wcn->scan_band;
- } else {
- status.freq = WCN36XX_CENTER_FREQ(wcn);
- status.band = WCN36XX_BAND(wcn);
- }
-
+ status.freq = WCN36XX_CENTER_FREQ(wcn);
+ status.band = WCN36XX_BAND(wcn);
status.mactime = 10;
status.signal = -get_rssi0(bd);
status.antenna = 1;
- status.rate_idx = 1;
status.flag = 0;
status.rx_flags = 0;
status.flag |= RX_FLAG_IV_STRIPPED |
@@ -70,6 +265,28 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x\n", status.flag);
+ if (bd->rate_id < ARRAY_SIZE(wcn36xx_rate_table)) {
+ rate = &wcn36xx_rate_table[bd->rate_id];
+ status.encoding = rate->encoding;
+ status.enc_flags = rate->encoding_flags;
+ status.bw = rate->bw;
+ status.rate_idx = rate->mcs_or_legacy_index;
+ sband = wcn->hw->wiphy->bands[status.band];
+ status.nss = 1;
+
+ if (status.band == NL80211_BAND_5GHZ &&
+ status.encoding == RX_ENC_LEGACY &&
+ status.rate_idx >= sband->n_bitrates) {
+ /* no dsss rates in 5Ghz rates table */
+ status.rate_idx -= 4;
+ }
+ } else {
+ status.encoding = 0;
+ status.bw = 0;
+ status.enc_flags = 0;
+ status.rate_idx = 0;
+ }
+
memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
if (ieee80211_is_beacon(hdr->frame_control)) {
@@ -100,7 +317,8 @@ static void wcn36xx_set_tx_pdu(struct wcn36xx_tx_bd *bd,
bd->pdu.mpdu_header_off;
bd->pdu.mpdu_len = len;
bd->pdu.tid = tid;
- bd->pdu.bd_ssn = WCN36XX_TXBD_SSN_FILL_DPU_QOS;
+ /* Use seq number generated by mac80211 */
+ bd->pdu.bd_ssn = WCN36XX_TXBD_SSN_FILL_HOST;
}
static inline struct wcn36xx_vif *get_vif_by_addr(struct wcn36xx *wcn,
@@ -160,9 +378,11 @@ static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
bool bcast)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = NULL;
struct wcn36xx_vif *__vif_priv = NULL;
- bool is_data_qos;
+ bool is_data_qos = ieee80211_is_data_qos(hdr->frame_control);
+ u16 tid = 0;
bd->bd_rate = WCN36XX_BD_RATE_DATA;
@@ -191,9 +411,21 @@ static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
bd->dpu_sign = __vif_priv->self_ucast_dpu_sign;
}
- if (ieee80211_is_nullfunc(hdr->frame_control) ||
- (sta_priv && !sta_priv->is_data_encrypted))
+ if (is_data_qos) {
+ tid = ieee80211_get_tid(hdr);
+ /* TID->QID is one-to-one mapping */
+ bd->queue_id = tid;
+ }
+
+ if (info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT ||
+ (sta_priv && !sta_priv->is_data_encrypted)) {
bd->dpu_ne = 1;
+ }
+
+ if (ieee80211_is_any_nullfunc(hdr->frame_control)) {
+ /* Don't use a regular queue for null packet (no ampdu) */
+ bd->queue_id = WCN36XX_TX_U_WQ_ID;
+ }
if (bcast) {
bd->ub = 1;
@@ -201,13 +433,11 @@ static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
}
*vif_priv = __vif_priv;
- is_data_qos = ieee80211_is_data_qos(hdr->frame_control);
-
wcn36xx_set_tx_pdu(bd,
is_data_qos ?
sizeof(struct ieee80211_qos_hdr) :
sizeof(struct ieee80211_hdr_3addr),
- skb->len, sta_priv ? sta_priv->tid : 0);
+ skb->len, tid);
if (sta_priv && is_data_qos)
wcn36xx_tx_start_ampdu(wcn, sta_priv, skb);
@@ -287,9 +517,9 @@ int wcn36xx_start_tx(struct wcn36xx *wcn,
bd.dpu_rf = WCN36XX_BMU_WQ_TX;
- bd.tx_comp = !!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS);
- if (bd.tx_comp) {
+ if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
wcn36xx_dbg(WCN36XX_DBG_DXE, "TX_ACK status requested\n");
+
spin_lock_irqsave(&wcn->dxe_lock, flags);
if (wcn->tx_ack_skb) {
spin_unlock_irqrestore(&wcn->dxe_lock, flags);
@@ -302,10 +532,15 @@ int wcn36xx_start_tx(struct wcn36xx *wcn,
/* Only one at a time is supported by fw. Stop the TX queues
* until the ack status gets back.
- *
- * TODO: Add watchdog in case FW does not answer
*/
ieee80211_stop_queues(wcn->hw);
+
+ /* TX watchdog if no TX irq or ack indication received */
+ mod_timer(&wcn->tx_ack_timer, jiffies + HZ / 10);
+
+ /* Request ack indication from the firmware */
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
+ bd.tx_comp = 1;
}
/* Data frames served first*/
@@ -319,7 +554,7 @@ int wcn36xx_start_tx(struct wcn36xx *wcn,
bd.tx_bd_sign = 0xbdbdbdbd;
ret = wcn36xx_dxe_tx_frame(wcn, vif_priv, &bd, skb, is_low);
- if (ret && bd.tx_comp) {
+ if (ret && (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
/* If the skb has not been transmitted,
* don't keep a reference to it.
*/
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
index a58f313983b9..71fa9992b118 100644
--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -83,7 +83,11 @@ enum wcn36xx_ampdu_state {
WCN36XX_AMPDU_OPERATIONAL,
};
-#define WCN36XX_HW_CHANNEL(__wcn) (__wcn->hw->conf.chandef.chan->hw_value)
+#define HW_VALUE_PHY_SHIFT 8
+#define HW_VALUE_PHY(hw_value) ((hw_value) >> HW_VALUE_PHY_SHIFT)
+#define HW_VALUE_CHANNEL(hw_value) ((hw_value) & 0xFF)
+#define WCN36XX_HW_CHANNEL(__wcn)\
+ HW_VALUE_CHANNEL(__wcn->hw->conf.chandef.chan->hw_value)
#define WCN36XX_BAND(__wcn) (__wcn->hw->conf.chandef.chan->band)
#define WCN36XX_CENTER_FREQ(__wcn) (__wcn->hw->conf.chandef.chan->center_freq)
#define WCN36XX_LISTEN_INTERVAL(__wcn) (__wcn->hw->conf.listen_interval)
@@ -92,6 +96,7 @@ enum wcn36xx_ampdu_state {
#define RF_UNKNOWN 0x0000
#define RF_IRIS_WCN3620 0x3620
+#define RF_IRIS_WCN3680 0x3680
static inline void buff_to_be(u32 *buf, size_t len)
{
@@ -122,6 +127,7 @@ struct wcn36xx_vif {
enum wcn36xx_hal_bss_type bss_type;
/* Power management */
+ bool allow_bmps;
enum wcn36xx_power_state pw_state;
u8 bss_index;
@@ -167,7 +173,7 @@ struct wcn36xx_sta {
u8 bss_dpu_desc_index;
bool is_data_encrypted;
/* Rates */
- struct wcn36xx_hal_supported_rates supported_rates;
+ struct wcn36xx_hal_supported_rates_v1 supported_rates;
spinlock_t ampdu_lock; /* protects next two fields */
enum wcn36xx_ampdu_state ampdu_state[16];
@@ -223,10 +229,10 @@ struct wcn36xx {
spinlock_t hal_ind_lock;
struct list_head hal_ind_queue;
- struct work_struct scan_work;
struct cfg80211_scan_request *scan_req;
- int scan_freq;
- int scan_band;
+ bool sw_scan;
+ u8 sw_scan_opchannel;
+ struct ieee80211_vif *sw_scan_vif;
struct mutex scan_lock;
bool scan_aborted;
@@ -245,6 +251,7 @@ struct wcn36xx {
struct wcn36xx_dxe_mem_pool data_mem_pool;
struct sk_buff *tx_ack_skb;
+ struct timer_list tx_ack_timer;
/* RF module */
unsigned rf_id;
@@ -268,6 +275,7 @@ static inline bool wcn36xx_is_fw_version(struct wcn36xx *wcn,
wcn->fw_revision == revision);
}
void wcn36xx_set_default_rates(struct wcn36xx_hal_supported_rates *rates);
+void wcn36xx_set_default_rates_v1(struct wcn36xx_hal_supported_rates_v1 *rates);
static inline
struct ieee80211_sta *wcn36xx_priv_to_sta(struct wcn36xx_sta *sta_priv)
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 0851d2bede89..1c42410d68e1 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -1739,7 +1739,7 @@ static int wil_cancel_remain_on_channel(struct wiphy *wiphy,
return wil_p2p_cancel_listen(vif, cookie);
}
-/**
+/*
* find a specific IE in a list of IEs
* return a pointer to the beginning of IE in the list
* or NULL if not found
@@ -1766,7 +1766,7 @@ static const u8 *_wil_cfg80211_find_ie(const u8 *ies, u16 ies_len, const u8 *ie,
ies_len);
}
-/**
+/*
* merge the IEs in two lists into a single list.
* do not include IEs from the second list which exist in the first list.
* add only vendor specific IEs from second list to keep
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 11d0c79e9056..2d618f90afa7 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -443,10 +443,10 @@ DEFINE_DEBUGFS_ATTRIBUTE(wil_fops_ulong, wil_debugfs_ulong_get,
/**
* wil6210_debugfs_init_offset - create set of debugfs files
- * @wil - driver's context, used for printing
- * @dbg - directory on the debugfs, where files will be created
- * @base - base address used in address calculation
- * @tbl - table with file descriptions. Should be terminated with empty element.
+ * @wil: driver's context, used for printing
+ * @dbg: directory on the debugfs, where files will be created
+ * @base: base address used in address calculation
+ * @tbl: table with file descriptions. Should be terminated with empty element.
*
* Creates files accordingly to the @tbl.
*/
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index b1480b41cd3a..d13d081fdcc6 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -645,9 +645,7 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
return IRQ_HANDLED;
}
-/**
- * thread IRQ handler
- */
+/* thread IRQ handler */
static irqreturn_t wil6210_thread_irq(int irq, void *cookie)
{
struct wil6210_priv *wil = cookie;
diff --git a/drivers/net/wireless/ath/wil6210/pmc.c b/drivers/net/wireless/ath/wil6210/pmc.c
index 9b4ca6b256d2..a2f7b4c1da48 100644
--- a/drivers/net/wireless/ath/wil6210/pmc.c
+++ b/drivers/net/wireless/ath/wil6210/pmc.c
@@ -29,8 +29,7 @@ void wil_pmc_init(struct wil6210_priv *wil)
mutex_init(&wil->pmc.lock);
}
-/**
- * Allocate the physical ring (p-ring) and the required
+/* Allocate the physical ring (p-ring) and the required
* number of descriptors of required size.
* Initialize the descriptors as required by pmc dma.
* The descriptors' buffers dwords are initialized to hold
@@ -221,8 +220,7 @@ no_release_err:
mutex_unlock(&pmc->lock);
}
-/**
- * Traverse the p-ring and release all buffers.
+/* Traverse the p-ring and release all buffers.
* At the end release the p-ring memory
*/
void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
@@ -299,8 +297,7 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
mutex_unlock(&pmc->lock);
}
-/**
- * Status of the last operation requested via debugfs: alloc/free/read.
+/* Status of the last operation requested via debugfs: alloc/free/read.
* 0 - success or negative errno
*/
int wil_pmc_last_cmd_status(struct wil6210_priv *wil)
@@ -311,8 +308,7 @@ int wil_pmc_last_cmd_status(struct wil6210_priv *wil)
return wil->pmc.last_cmd_status;
}
-/**
- * Read from required position up to the end of current descriptor,
+/* Read from required position up to the end of current descriptor,
* depends on descriptor size configured during alloc request.
*/
ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count,
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 080e5aa60bea..cc830c795b33 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -249,8 +249,7 @@ static void wil_vring_free(struct wil6210_priv *wil, struct wil_ring *vring)
vring->ctx = NULL;
}
-/**
- * Allocate one skb for Rx VRING
+/* Allocate one skb for Rx VRING
*
* Safe to call from IRQ
*/
@@ -295,8 +294,7 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct wil_ring *vring,
return 0;
}
-/**
- * Adds radiotap header
+/* Adds radiotap header
*
* Any error indicated as "Bad FCS"
*
@@ -432,8 +430,7 @@ static int wil_rx_get_cid_by_skb(struct wil6210_priv *wil, struct sk_buff *skb)
return cid;
}
-/**
- * reap 1 frame from @swhead
+/* reap 1 frame from @swhead
*
* Rx descriptor copied to skb->cb
*
@@ -597,8 +594,7 @@ again:
return skb;
}
-/**
- * allocate and fill up to @count buffers in rx ring
+/* allocate and fill up to @count buffers in rx ring
* buffers posted at @swtail
* Note: we have a single RX queue for servicing all VIFs, but we
* allocate skbs with headroom according to main interface only. This
@@ -1002,8 +998,7 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
wil_netif_rx(skb, ndev, cid, stats, true);
}
-/**
- * Proceed all completed skb's from Rx VRING
+/* Proceed all completed skb's from Rx VRING
*
* Safe to call from NAPI poll, i.e. softirq with interrupts enabled
*/
@@ -1629,8 +1624,7 @@ void wil_tx_desc_set_nr_frags(struct vring_tx_desc *d, int nr_frags)
d->mac.d[2] |= (nr_frags << MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
}
-/**
- * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
+/* Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
* @skb is used to obtain the protocol and headers length.
* @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
* 2 - middle, 3 - last descriptor.
@@ -1660,8 +1654,7 @@ static void wil_tx_desc_offload_setup_tso(struct vring_tx_desc *d,
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
}
-/**
- * Sets the descriptor @d up for csum. The corresponding
+/* Sets the descriptor @d up for csum. The corresponding
* @skb is used to obtain the protocol and headers length.
* Returns the protocol: 0 - not TCP, 1 - TCPv4, 2 - TCPv6.
* Note, if d==NULL, the function only returns the protocol result.
@@ -2216,8 +2209,7 @@ static int wil_tx_ring(struct wil6210_priv *wil, struct wil6210_vif *vif,
return rc;
}
-/**
- * Check status of tx vrings and stop/wake net queues if needed
+/* Check status of tx vrings and stop/wake net queues if needed
* It will start/stop net queues of a specific VIF net_device.
*
* This function does one of two checks:
@@ -2419,8 +2411,7 @@ void wil_tx_latency_calc(struct wil6210_priv *wil, struct sk_buff *skb,
sta->stats.tx_latency_max_us = skb_time_us;
}
-/**
- * Clean up transmitted skb's from the Tx VRING
+/* Clean up transmitted skb's from the Tx VRING
*
* Return number of descriptors cleared
*
@@ -2460,8 +2451,7 @@ int wil_tx_complete(struct wil6210_vif *vif, int ringid)
while (!wil_ring_is_empty(vring)) {
int new_swtail;
struct wil_ctx *ctx = &vring->ctx[vring->swtail];
- /**
- * For the fragmented skb, HW will set DU bit only for the
+ /* For the fragmented skb, HW will set DU bit only for the
* last fragment. look for it.
* In TSO the first DU will include hdr desc
*/
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
index 7bfe867c7509..8ca2ce51c83e 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.c
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
@@ -147,9 +147,7 @@ out_free:
return rc;
}
-/**
- * Allocate one skb for Rx descriptor RING
- */
+/* Allocate one skb for Rx descriptor RING */
static int wil_ring_alloc_skb_edma(struct wil6210_priv *wil,
struct wil_ring *ring, u32 i)
{
@@ -1152,8 +1150,7 @@ wil_get_next_tx_status_msg(struct wil_status_ring *sring, u8 *dr_bit,
*msg = *_msg;
}
-/**
- * Clean up transmitted skb's from the Tx descriptor RING.
+/* Clean up transmitted skb's from the Tx descriptor RING.
* Return number of descriptors cleared.
*/
int wil_tx_sring_handler(struct wil6210_priv *wil,
@@ -1314,8 +1311,7 @@ again:
return desc_cnt;
}
-/**
- * Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
+/* Sets the descriptor @d up for csum and/or TSO offloading. The corresponding
* @skb is used to obtain the protocol and headers length.
* @tso_desc_type is a descriptor type for TSO: 0 - a header, 1 - first data,
* 2 - middle, 3 - last descriptor.
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.c b/drivers/net/wireless/ath/wil6210/wil_platform.c
index 10e10dc9fedf..e152dc29d177 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.c
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.c
@@ -15,8 +15,7 @@ void wil_platform_modexit(void)
{
}
-/**
- * wil_platform_init() - wil6210 platform module init
+/* wil_platform_init() - wil6210 platform module init
*
* The function must be called before all other functions in this module.
* It returns a handle which is used with the rest of the API
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index c7136ce567ee..421aebbb49e5 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -32,7 +32,7 @@ MODULE_PARM_DESC(led_id,
#define WIL_WMI_PCP_STOP_TO_MS 5000
/**
- * WMI event receiving - theory of operations
+ * DOC: WMI event receiving - theory of operations
*
* When firmware about to report WMI event, it fills memory area
* in the mailbox and raises misc. IRQ. Thread interrupt handler invoked for
@@ -49,7 +49,7 @@ MODULE_PARM_DESC(led_id,
*/
/**
- * Addressing - theory of operations
+ * DOC: Addressing - theory of operations
*
* There are several buses present on the WIL6210 card.
* Same memory areas are visible at different address on
@@ -66,8 +66,7 @@ MODULE_PARM_DESC(led_id,
* AHB address must be used.
*/
-/**
- * @sparrow_fw_mapping provides memory remapping table for sparrow
+/* sparrow_fw_mapping provides memory remapping table for sparrow
*
* array size should be in sync with the declaration in the wil6210.h
*
@@ -103,16 +102,14 @@ const struct fw_map sparrow_fw_mapping[] = {
{0x800000, 0x804000, 0x940000, "uc_data", false, false},
};
-/**
- * @sparrow_d0_mac_rgf_ext - mac_rgf_ext section for Sparrow D0
+/* sparrow_d0_mac_rgf_ext - mac_rgf_ext section for Sparrow D0
* it is a bit larger to support extra features
*/
const struct fw_map sparrow_d0_mac_rgf_ext = {
0x88c000, 0x88c500, 0x88c000, "mac_rgf_ext", true, true
};
-/**
- * @talyn_fw_mapping provides memory remapping table for Talyn
+/* talyn_fw_mapping provides memory remapping table for Talyn
*
* array size should be in sync with the declaration in the wil6210.h
*
@@ -154,8 +151,7 @@ const struct fw_map talyn_fw_mapping[] = {
{0x800000, 0x808000, 0xa78000, "uc_data", false, false},
};
-/**
- * @talyn_mb_fw_mapping provides memory remapping table for Talyn-MB
+/* talyn_mb_fw_mapping provides memory remapping table for Talyn-MB
*
* array size should be in sync with the declaration in the wil6210.h
*
@@ -229,7 +225,7 @@ u8 led_polarity = LED_POLARITY_LOW_ACTIVE;
/**
* return AHB address for given firmware internal (linker) address
- * @x - internal address
+ * @x: internal address
* If address have no valid AHB mapping, return 0
*/
static u32 wmi_addr_remap(u32 x)
@@ -247,7 +243,7 @@ static u32 wmi_addr_remap(u32 x)
/**
* find fw_mapping entry by section name
- * @section - section name
+ * @section: section name
*
* Return pointer to section or NULL if not found
*/
@@ -265,8 +261,9 @@ struct fw_map *wil_find_fw_mapping(const char *section)
/**
* Check address validity for WMI buffer; remap if needed
- * @ptr - internal (linker) fw/ucode address
- * @size - if non zero, validate the block does not
+ * @wil: driver data
+ * @ptr: internal (linker) fw/ucode address
+ * @size: if non zero, validate the block does not
* exceed the device memory (bar)
*
* Valid buffer should be DWORD aligned
@@ -300,9 +297,7 @@ void __iomem *wmi_buffer(struct wil6210_priv *wil, __le32 ptr_)
return wmi_buffer_block(wil, ptr_, 0);
}
-/**
- * Check address validity
- */
+/* Check address validity */
void __iomem *wmi_addr(struct wil6210_priv *wil, u32 ptr)
{
u32 off;
@@ -1577,8 +1572,7 @@ wmi_evt_link_stats(struct wil6210_vif *vif, int id, void *d, int len)
evt->payload, payload_size);
}
-/**
- * find cid and ringid for the station vif
+/* find cid and ringid for the station vif
*
* return error, if other interfaces are used or ring was not found
*/
@@ -1868,8 +1862,7 @@ wmi_evt_link_monitor(struct wil6210_vif *vif, int id, void *d, int len)
cfg80211_cqm_rssi_notify(ndev, event_type, evt->rssi_level, GFP_KERNEL);
}
-/**
- * Some events are ignored for purpose; and need not be interpreted as
+/* Some events are ignored for purpose; and need not be interpreted as
* "unhandled events"
*/
static void wmi_evt_ignore(struct wil6210_vif *vif, int id, void *d, int len)
@@ -2578,6 +2571,7 @@ out:
/**
* wmi_rxon - turn radio on/off
+ * @wil: driver data
* @on: turn on if true, off otherwise
*
* Only switch radio. Channel should be set separately.
diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
index a63b5c2f1e17..404257800033 100644
--- a/drivers/net/wireless/atmel/at76c50x-usb.c
+++ b/drivers/net/wireless/atmel/at76c50x-usb.c
@@ -432,7 +432,7 @@ static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size,
case STATE_DFU_DOWNLOAD_IDLE:
at76_dbg(DBG_DFU, "DOWNLOAD...");
- /* fall through */
+ fallthrough;
case STATE_DFU_IDLE:
at76_dbg(DBG_DFU, "DFU IDLE");
@@ -1199,7 +1199,6 @@ static void at76_rx_callback(struct urb *urb)
{
struct at76_priv *priv = urb->context;
- priv->rx_tasklet.data = (unsigned long)urb;
tasklet_schedule(&priv->rx_tasklet);
}
@@ -1545,10 +1544,10 @@ exit:
return ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ);
}
-static void at76_rx_tasklet(unsigned long param)
+static void at76_rx_tasklet(struct tasklet_struct *t)
{
- struct urb *urb = (struct urb *)param;
- struct at76_priv *priv = urb->context;
+ struct at76_priv *priv = from_tasklet(priv, t, rx_tasklet);
+ struct urb *urb = priv->rx_urb;
struct at76_rx_buffer *buf;
struct ieee80211_rx_status rx_status = { 0 };
@@ -2215,7 +2214,7 @@ static struct at76_priv *at76_alloc_new_device(struct usb_device *udev)
INIT_WORK(&priv->work_join_bssid, at76_work_join_bssid);
INIT_DELAYED_WORK(&priv->dwork_hw_scan, at76_dwork_hw_scan);
- tasklet_init(&priv->rx_tasklet, at76_rx_tasklet, 0);
+ tasklet_setup(&priv->rx_tasklet, at76_rx_tasklet);
priv->pm_mode = AT76_PM_OFF;
priv->pm_period = 0;
diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c
index d5875836068c..707fe66727f8 100644
--- a/drivers/net/wireless/atmel/atmel.c
+++ b/drivers/net/wireless/atmel/atmel.c
@@ -1227,7 +1227,7 @@ static irqreturn_t service_interrupt(int irq, void *dev_id)
case ISR_RxFRAMELOST:
priv->wstats.discard.misc++;
- /* fall through */
+ fallthrough;
case ISR_RxCOMPLETE:
rx_done_irq(priv);
break;
@@ -4228,7 +4228,7 @@ static void atmel_wmem32(struct atmel_private *priv, u16 pos, u32 data)
/* Copyright 2003 Matthew T. Russotto */
/* But derived from the Atmel 76C502 firmware written by Atmel and */
/* included in "atmel wireless lan drivers" package */
-/**
+/*
This file is part of net.russotto.AtmelMACFW, hereto referred to
as AtmelMACFW
diff --git a/drivers/net/wireless/broadcom/b43/dma.c b/drivers/net/wireless/broadcom/b43/dma.c
index ca671fc13116..9a7c62bd5e43 100644
--- a/drivers/net/wireless/broadcom/b43/dma.c
+++ b/drivers/net/wireless/broadcom/b43/dma.c
@@ -1317,7 +1317,7 @@ static struct b43_dmaring *select_ring_by_priority(struct b43_wldev *dev,
switch (queue_prio) {
default:
B43_WARN_ON(1);
- /* fallthrough */
+ fallthrough;
case 0:
ring = dev->dma.tx_ring_AC_VO;
break;
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index a54dd4f7fa54..f175dbaffc30 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -781,8 +781,9 @@ void b43_dummy_transmission(struct b43_wldev *dev, bool ofdm, bool pa_on)
b43_write16(dev, B43_MMIO_XMTSEL, 0x0826);
b43_write16(dev, B43_MMIO_TXE0_CTL, 0x0000);
- if (!pa_on && phy->type == B43_PHYTYPE_N)
+ if (!pa_on && phy->type == B43_PHYTYPE_N) {
; /*b43_nphy_pa_override(dev, false) */
+ }
switch (phy->type) {
case B43_PHYTYPE_N:
@@ -1873,7 +1874,7 @@ static void b43_handle_firmware_panic(struct b43_wldev *dev)
switch (reason) {
default:
b43dbg(dev->wl, "The panic reason is unknown.\n");
- /* fallthrough */
+ fallthrough;
case B43_FWPANIC_DIE:
/* Do not restart the controller or firmware.
* The device is nonfunctional from now on.
@@ -2013,8 +2014,9 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
handle_irq_beacon(dev);
if (reason & B43_IRQ_PMQ)
handle_irq_pmq(dev);
- if (reason & B43_IRQ_TXFIFO_FLUSH_OK)
+ if (reason & B43_IRQ_TXFIFO_FLUSH_OK) {
;/* TODO */
+ }
if (reason & B43_IRQ_NOISESAMPLE_OK)
handle_irq_noise(dev);
@@ -2266,7 +2268,7 @@ fw_ready:
size = be32_to_cpu(hdr->size);
if (size != ctx->blob->size - sizeof(struct b43_fw_header))
goto err_format;
- /* fallthrough */
+ fallthrough;
case B43_FW_TYPE_IV:
if (hdr->ver != 1)
goto err_format;
@@ -3178,7 +3180,7 @@ static void b43_rate_memory_init(struct b43_wldev *dev)
b43_rate_memory_write(dev, B43_OFDM_RATE_36MB, 1);
b43_rate_memory_write(dev, B43_OFDM_RATE_48MB, 1);
b43_rate_memory_write(dev, B43_OFDM_RATE_54MB, 1);
- /* fallthrough */
+ fallthrough;
case B43_PHYTYPE_B:
b43_rate_memory_write(dev, B43_CCK_RATE_1MB, 0);
b43_rate_memory_write(dev, B43_CCK_RATE_2MB, 0);
@@ -5329,7 +5331,7 @@ static void b43_supported_bands(struct b43_wldev *dev, bool *have_2ghz_phy,
/* There are 14e4:4321 PCI devs with 2.4 GHz BCM4321 (N-PHY) */
if (dev->phy.type != B43_PHYTYPE_G)
break;
- /* fall through */
+ fallthrough;
case 0x4313: /* BCM4311 */
case 0x431a: /* BCM4318 */
case 0x432a: /* BCM4321 */
diff --git a/drivers/net/wireless/broadcom/b43/phy_common.c b/drivers/net/wireless/broadcom/b43/phy_common.c
index 1de4de094d61..285490f6f0a1 100644
--- a/drivers/net/wireless/broadcom/b43/phy_common.c
+++ b/drivers/net/wireless/broadcom/b43/phy_common.c
@@ -458,7 +458,7 @@ void b43_software_rfkill(struct b43_wldev *dev, bool blocked)
b43_mac_enable(dev);
}
-/**
+/*
* b43_phy_txpower_adjust_work - TX power workqueue.
*
* Workqueue for updating the TX power parameters in hardware.
diff --git a/drivers/net/wireless/broadcom/b43/phy_ht.c b/drivers/net/wireless/broadcom/b43/phy_ht.c
index c685b4bb5ed6..d050971d150a 100644
--- a/drivers/net/wireless/broadcom/b43/phy_ht.c
+++ b/drivers/net/wireless/broadcom/b43/phy_ht.c
@@ -900,9 +900,6 @@ static int b43_phy_ht_op_init(struct b43_wldev *dev)
b43_phy_write(dev, 0x70, 0x50);
b43_phy_write(dev, 0x1ff, 0x30);
- if (0) /* TODO: condition */
- ; /* TODO: PHY op on reg 0x217 */
-
if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
b43_phy_ht_classifier(dev, B43_PHY_HT_CLASS_CTL_CCK_EN, 0);
else
diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c
index ca2018da9753..b669dff24b6e 100644
--- a/drivers/net/wireless/broadcom/b43/phy_n.c
+++ b/drivers/net/wireless/broadcom/b43/phy_n.c
@@ -3239,7 +3239,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
if (!(dev->phy.rev >= 4 &&
b43_current_band(dev->wl) == NL80211_BAND_2GHZ))
break;
- /* FALL THROUGH */
+ fallthrough;
case 0:
case 1:
b43_ntab_write_bulk(dev, B43_NTAB16(8, 0x08), 4, vmid);
@@ -3342,8 +3342,9 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
b43_phy_write(dev, B43_NPHY_ED_CRS20UDEASSERTTHRESH0, 0x0381);
b43_phy_write(dev, B43_NPHY_ED_CRS20UDEASSERTTHRESH1, 0x0381);
- if (dev->phy.rev >= 6 && sprom->boardflags2_lo & B43_BFL2_SINGLEANT_CCK)
+ if (dev->phy.rev >= 6 && sprom->boardflags2_lo & B43_BFL2_SINGLEANT_CCK) {
; /* TODO: 0x0080000000000000 HF */
+ }
}
static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
@@ -4602,10 +4603,11 @@ static void b43_nphy_spur_workaround(struct b43_wldev *dev)
if (nphy->gband_spurwar_en) {
/* TODO: N PHY Adjust Analog Pfbw (7) */
- if (channel == 11 && b43_is_40mhz(dev))
+ if (channel == 11 && b43_is_40mhz(dev)) {
; /* TODO: N PHY Adjust Min Noise Var(2, tone, noise)*/
- else
+ } else {
; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/
+ }
/* TODO: N PHY Adjust CRS Min Power (0x1E) */
}
@@ -4635,10 +4637,11 @@ static void b43_nphy_spur_workaround(struct b43_wldev *dev)
noise[0] = 0;
}
- if (!tone[0] && !noise[0])
+ if (!tone[0] && !noise[0]) {
; /* TODO: N PHY Adjust Min Noise Var(1, tone, noise)*/
- else
+ } else {
; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/
+ }
}
if (nphy->hang_avoid)
@@ -6166,8 +6169,9 @@ static int b43_phy_initn(struct b43_wldev *dev)
if (nphy->phyrxchain != 3)
b43_nphy_set_rx_core_state(dev, nphy->phyrxchain);
- if (nphy->mphase_cal_phase_id > 0)
+ if (nphy->mphase_cal_phase_id > 0) {
;/* TODO PHY Periodic Calibration Multi-Phase Restart */
+ }
do_rssi_cal = false;
if (phy->rev >= 3) {
@@ -6211,8 +6215,9 @@ static int b43_phy_initn(struct b43_wldev *dev)
if (!b43_nphy_cal_tx_iq_lo(dev, target, true, false))
if (b43_nphy_cal_rx_iq(dev, target, 2, 0) == 0)
b43_nphy_save_cal(dev);
- } else if (nphy->mphase_cal_phase_id == 0)
+ } else if (nphy->mphase_cal_phase_id == 0) {
;/* N PHY Periodic Calibration with arg 3 */
+ }
} else {
b43_nphy_restore_cal(dev);
}
diff --git a/drivers/net/wireless/broadcom/b43/pio.c b/drivers/net/wireless/broadcom/b43/pio.c
index 1a11c5dfb8d9..8c28a9250cd1 100644
--- a/drivers/net/wireless/broadcom/b43/pio.c
+++ b/drivers/net/wireless/broadcom/b43/pio.c
@@ -294,7 +294,7 @@ static struct b43_pio_txqueue *select_queue_by_priority(struct b43_wldev *dev,
switch (queue_prio) {
default:
B43_WARN_ON(1);
- /* fallthrough */
+ fallthrough;
case 0:
q = dev->pio.tx_queue_AC_VO;
break;
diff --git a/drivers/net/wireless/broadcom/b43/tables_nphy.c b/drivers/net/wireless/broadcom/b43/tables_nphy.c
index 7957db94e84c..41a25d909d0d 100644
--- a/drivers/net/wireless/broadcom/b43/tables_nphy.c
+++ b/drivers/net/wireless/broadcom/b43/tables_nphy.c
@@ -3717,7 +3717,7 @@ const u32 *b43_nphy_get_tx_gain_table(struct b43_wldev *dev)
case 5:
if (sprom->fem.ghz2.extpa_gain == 3)
return b43_ntab_tx_gain_epa_rev3_hi_pwr_2g;
- /* fall through */
+ fallthrough;
case 4:
case 3:
return b43_ntab_tx_gain_epa_rev3_2g;
diff --git a/drivers/net/wireless/broadcom/b43legacy/dma.c b/drivers/net/wireless/broadcom/b43legacy/dma.c
index f7594e2a896e..7e2f70c4207c 100644
--- a/drivers/net/wireless/broadcom/b43legacy/dma.c
+++ b/drivers/net/wireless/broadcom/b43legacy/dma.c
@@ -189,7 +189,7 @@ return dev->dma.tx_ring1;
switch (queue_priority) {
default:
B43legacy_WARN_ON(1);
- /* fallthrough */
+ fallthrough;
case 0:
ring = dev->dma.tx_ring3;
break;
diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
index 2eaf481f03f1..a27125b7922c 100644
--- a/drivers/net/wireless/broadcom/b43legacy/main.c
+++ b/drivers/net/wireless/broadcom/b43legacy/main.c
@@ -1275,9 +1275,9 @@ static void handle_irq_ucode_debug(struct b43legacy_wldev *dev)
}
/* Interrupt handler bottom-half */
-static void b43legacy_interrupt_tasklet(unsigned long data)
+static void b43legacy_interrupt_tasklet(struct tasklet_struct *t)
{
- struct b43legacy_wldev *dev = (struct b43legacy_wldev *)data;
+ struct b43legacy_wldev *dev = from_tasklet(dev, t, isr_tasklet);
u32 reason;
u32 dma_reason[ARRAY_SIZE(dev->dma_reason)];
u32 merged_dma_reason = 0;
@@ -1340,8 +1340,9 @@ static void b43legacy_interrupt_tasklet(unsigned long data)
handle_irq_beacon(dev);
if (reason & B43legacy_IRQ_PMQ)
handle_irq_pmq(dev);
- if (reason & B43legacy_IRQ_TXFIFO_FLUSH_OK)
+ if (reason & B43legacy_IRQ_TXFIFO_FLUSH_OK) {
;/*TODO*/
+ }
if (reason & B43legacy_IRQ_NOISESAMPLE_OK)
handle_irq_noise(dev);
@@ -1537,7 +1538,7 @@ static int do_request_fw(struct b43legacy_wldev *dev,
size = be32_to_cpu(hdr->size);
if (size != (*fw)->size - sizeof(struct b43legacy_fw_header))
goto err_format;
- /* fallthrough */
+ fallthrough;
case B43legacy_FW_TYPE_IV:
if (hdr->ver != 1)
goto err_format;
@@ -2076,7 +2077,7 @@ static void b43legacy_rate_memory_init(struct b43legacy_wldev *dev)
b43legacy_rate_memory_write(dev, B43legacy_OFDM_RATE_36MB, 1);
b43legacy_rate_memory_write(dev, B43legacy_OFDM_RATE_48MB, 1);
b43legacy_rate_memory_write(dev, B43legacy_OFDM_RATE_54MB, 1);
- /* fallthrough */
+ fallthrough;
case B43legacy_PHYTYPE_B:
b43legacy_rate_memory_write(dev, B43legacy_CCK_RATE_1MB, 0);
b43legacy_rate_memory_write(dev, B43legacy_CCK_RATE_2MB, 0);
@@ -3741,9 +3742,7 @@ static int b43legacy_one_core_attach(struct ssb_device *dev,
wldev->wl = wl;
b43legacy_set_status(wldev, B43legacy_STAT_UNINIT);
wldev->bad_frames_preempt = modparam_bad_frames_preempt;
- tasklet_init(&wldev->isr_tasklet,
- b43legacy_interrupt_tasklet,
- (unsigned long)wldev);
+ tasklet_setup(&wldev->isr_tasklet, b43legacy_interrupt_tasklet);
if (modparam_pio)
wldev->__using_pio = true;
INIT_LIST_HEAD(&wldev->list);
diff --git a/drivers/net/wireless/broadcom/b43legacy/pio.c b/drivers/net/wireless/broadcom/b43legacy/pio.c
index cbb761378619..aac413d0f629 100644
--- a/drivers/net/wireless/broadcom/b43legacy/pio.c
+++ b/drivers/net/wireless/broadcom/b43legacy/pio.c
@@ -264,9 +264,9 @@ static int pio_tx_packet(struct b43legacy_pio_txpacket *packet)
return 0;
}
-static void tx_tasklet(unsigned long d)
+static void tx_tasklet(struct tasklet_struct *t)
{
- struct b43legacy_pioqueue *queue = (struct b43legacy_pioqueue *)d;
+ struct b43legacy_pioqueue *queue = from_tasklet(queue, t, txtask);
struct b43legacy_wldev *dev = queue->dev;
unsigned long flags;
struct b43legacy_pio_txpacket *packet, *tmp_packet;
@@ -331,8 +331,7 @@ struct b43legacy_pioqueue *b43legacy_setup_pioqueue(struct b43legacy_wldev *dev,
INIT_LIST_HEAD(&queue->txfree);
INIT_LIST_HEAD(&queue->txqueue);
INIT_LIST_HEAD(&queue->txrunning);
- tasklet_init(&queue->txtask, tx_tasklet,
- (unsigned long)queue);
+ tasklet_setup(&queue->txtask, tx_tasklet);
value = b43legacy_read32(dev, B43legacy_MMIO_MACCTL);
value &= ~B43legacy_MACCTL_BE;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
index 2c95a08a5871..3984fd7d918e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
@@ -397,9 +397,9 @@ brcmf_proto_bcdc_add_tdls_peer(struct brcmf_pub *drvr, int ifidx,
}
static void brcmf_proto_bcdc_rxreorder(struct brcmf_if *ifp,
- struct sk_buff *skb)
+ struct sk_buff *skb, bool inirq)
{
- brcmf_fws_rxreorder(ifp, skb);
+ brcmf_fws_rxreorder(ifp, skb, inirq);
}
static void
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 1a7ab49295aa..f9ebb98b0e3c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -45,6 +45,7 @@
#define SDIO_FUNC2_BLOCKSIZE 512
#define SDIO_4373_FUNC2_BLOCKSIZE 256
#define SDIO_435X_FUNC2_BLOCKSIZE 256
+#define SDIO_4329_FUNC2_BLOCKSIZE 128
/* Maximum milliseconds to wait for F2 to come up */
#define SDIO_WAIT_F2RDY 3000
@@ -73,7 +74,7 @@ static irqreturn_t brcmf_sdiod_oob_irqhandler(int irq, void *dev_id)
sdiodev->irq_en = false;
}
- brcmf_sdio_isr(sdiodev->bus);
+ brcmf_sdio_isr(sdiodev->bus, true);
return IRQ_HANDLED;
}
@@ -85,7 +86,7 @@ static void brcmf_sdiod_ib_irqhandler(struct sdio_func *func)
brcmf_dbg(INTR, "IB intr triggered\n");
- brcmf_sdio_isr(sdiodev->bus);
+ brcmf_sdio_isr(sdiodev->bus, false);
}
/* dummy handler for SDIO function 2 interrupt */
@@ -916,12 +917,13 @@ int brcmf_sdiod_probe(struct brcmf_sdio_dev *sdiodev)
f2_blksz = SDIO_4373_FUNC2_BLOCKSIZE;
break;
case SDIO_DEVICE_ID_BROADCOM_4359:
- /* fallthrough */
case SDIO_DEVICE_ID_BROADCOM_4354:
- /* fallthrough */
case SDIO_DEVICE_ID_BROADCOM_4356:
f2_blksz = SDIO_435X_FUNC2_BLOCKSIZE;
break;
+ case SDIO_DEVICE_ID_BROADCOM_4329:
+ f2_blksz = SDIO_4329_FUNC2_BLOCKSIZE;
+ break;
default:
break;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
index ec2bec0999d1..f9f18ff451ea 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
@@ -65,11 +65,12 @@ enum brcmf_btcoex_state {
* @reg68: saved value of btc_params 68
* @saved_regs_part1: flag indicating regs 66,41,68
* have been saved
+ * @reg50: saved value of btc_params 50
* @reg51: saved value of btc_params 51
* @reg64: saved value of btc_params 64
* @reg65: saved value of btc_params 65
* @reg71: saved value of btc_params 71
- * @saved_regs_part1: flag indicating regs 50,51,64,65,71
+ * @saved_regs_part2: flag indicating regs 50,51,64,65,71
* have been saved
*/
struct brcmf_btcoex_info {
@@ -226,7 +227,7 @@ static bool brcmf_btcoex_is_sco_active(struct brcmf_if *ifp)
return res;
}
-/**
+/*
* btcmf_btcoex_save_part1() - save first step parameters.
*/
static void btcmf_btcoex_save_part1(struct brcmf_btcoex_info *btci)
@@ -246,7 +247,7 @@ static void btcmf_btcoex_save_part1(struct brcmf_btcoex_info *btci)
}
}
-/**
+/*
* brcmf_btcoex_restore_part1() - restore first step parameters.
*/
static void brcmf_btcoex_restore_part1(struct brcmf_btcoex_info *btci)
@@ -266,7 +267,7 @@ static void brcmf_btcoex_restore_part1(struct brcmf_btcoex_info *btci)
}
}
-/**
+/*
* brcmf_btcoex_timerfunc() - BT coex timer callback
*/
static void brcmf_btcoex_timerfunc(struct timer_list *t)
@@ -441,9 +442,8 @@ static void brcmf_btcoex_dhcp_end(struct brcmf_btcoex_info *btci)
}
}
-/**
+/*
* brcmf_btcoex_set_mode - set BT coex mode
- * @cfg: driver private cfg80211 data
* @mode: Wifi-Bluetooth coexistence mode
*
* return: 0 on success
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
index 623c0168da79..08f9d47f2e5c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
@@ -249,7 +249,8 @@ int brcmf_bus_reset(struct brcmf_bus *bus)
*/
/* Receive frame for delivery to OS. Callee disposes of rxp. */
-void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp, bool handle_event);
+void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp, bool handle_event,
+ bool inirq);
/* Receive async event packet from firmware. Callee disposes of rxp. */
void brcmf_rx_event(struct device *dev, struct sk_buff *rxp);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index ab0da2ff982e..a2dbbb977d0c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -56,6 +56,7 @@
#define RSN_AKM_PSK 2 /* Pre-shared Key */
#define RSN_AKM_SHA256_1X 5 /* SHA256, 802.1X */
#define RSN_AKM_SHA256_PSK 6 /* SHA256, Pre-shared Key */
+#define RSN_AKM_SAE 8 /* SAE */
#define RSN_CAP_LEN 2 /* Length of RSN capabilities */
#define RSN_CAP_PTK_REPLAY_CNTR_MASK (BIT(2) | BIT(3))
#define RSN_CAP_MFPR_MASK BIT(6)
@@ -3477,7 +3478,7 @@ brcmf_get_netinfo_array(struct brcmf_pno_scanresults_le *pfn_v1)
switch (pfn_v1->version) {
default:
WARN_ON(1);
- /* fall-thru */
+ fallthrough;
case cpu_to_le32(1):
netinfo = (struct brcmf_pno_net_info_le *)(pfn_v1 + 1);
break;
@@ -3991,10 +3992,7 @@ brcmf_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *ndev,
}
brcmf_dbg(CONN, "set_pmksa - PMK bssid: %pM =\n", pmk[npmk].bssid);
- for (i = 0; i < WLAN_PMKID_LEN; i += 4)
- brcmf_dbg(CONN, "%02x %02x %02x %02x\n", pmk[npmk].pmkid[i],
- pmk[npmk].pmkid[i + 1], pmk[npmk].pmkid[i + 2],
- pmk[npmk].pmkid[i + 3]);
+ brcmf_dbg(CONN, "%*ph\n", WLAN_PMKID_LEN, pmk[npmk].pmkid);
err = brcmf_update_pmklist(cfg, ifp);
@@ -4245,6 +4243,10 @@ brcmf_configure_wpaie(struct brcmf_if *ifp,
brcmf_dbg(TRACE, "RSN_AKM_MFP_1X\n");
wpa_auth |= WPA2_AUTH_1X_SHA256;
break;
+ case RSN_AKM_SAE:
+ brcmf_dbg(TRACE, "RSN_AKM_SAE\n");
+ wpa_auth |= WPA3_AUTH_SAE_PSK;
+ break;
default:
bphy_err(drvr, "Invalid key mgmt info\n");
}
@@ -4262,11 +4264,12 @@ brcmf_configure_wpaie(struct brcmf_if *ifp,
brcmf_dbg(TRACE, "MFP Required\n");
mfp = BRCMF_MFP_REQUIRED;
/* Firmware only supports mfp required in
- * combination with WPA2_AUTH_PSK_SHA256 or
- * WPA2_AUTH_1X_SHA256.
+ * combination with WPA2_AUTH_PSK_SHA256,
+ * WPA2_AUTH_1X_SHA256, or WPA3_AUTH_SAE_PSK.
*/
if (!(wpa_auth & (WPA2_AUTH_PSK_SHA256 |
- WPA2_AUTH_1X_SHA256))) {
+ WPA2_AUTH_1X_SHA256 |
+ WPA3_AUTH_SAE_PSK))) {
err = -EINVAL;
goto exit;
}
@@ -4682,6 +4685,8 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_pub *drvr = cfg->pub;
+ struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
+ struct cfg80211_crypto_settings *crypto = &settings->crypto;
const struct brcmf_tlv *ssid_ie;
const struct brcmf_tlv *country_ie;
struct brcmf_ssid_le ssid_le;
@@ -4821,6 +4826,25 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
goto exit;
}
+ if (crypto->psk) {
+ brcmf_dbg(INFO, "using PSK offload\n");
+ profile->use_fwauth |= BIT(BRCMF_PROFILE_FWAUTH_PSK);
+ err = brcmf_set_pmk(ifp, crypto->psk,
+ BRCMF_WSEC_MAX_PSK_LEN);
+ if (err < 0)
+ goto exit;
+ }
+ if (crypto->sae_pwd) {
+ brcmf_dbg(INFO, "using SAE offload\n");
+ profile->use_fwauth |= BIT(BRCMF_PROFILE_FWAUTH_SAE);
+ err = brcmf_set_sae_password(ifp, crypto->sae_pwd,
+ crypto->sae_pwd_len);
+ if (err < 0)
+ goto exit;
+ }
+ if (profile->use_fwauth == 0)
+ profile->use_fwauth = BIT(BRCMF_PROFILE_FWAUTH_NONE);
+
err = brcmf_parse_configure_security(ifp, settings,
NL80211_IFTYPE_AP);
if (err < 0) {
@@ -4907,6 +4931,7 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_pub *drvr = cfg->pub;
+ struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
s32 err;
struct brcmf_fil_bss_enable_le bss_enable;
struct brcmf_join_params join_params;
@@ -4918,6 +4943,14 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
/* first to make sure they get processed by fw. */
msleep(400);
+ if (profile->use_fwauth != BIT(BRCMF_PROFILE_FWAUTH_NONE)) {
+ if (profile->use_fwauth & BIT(BRCMF_PROFILE_FWAUTH_PSK))
+ brcmf_set_pmk(ifp, NULL, 0);
+ if (profile->use_fwauth & BIT(BRCMF_PROFILE_FWAUTH_SAE))
+ brcmf_set_sae_password(ifp, NULL, 0);
+ profile->use_fwauth = BIT(BRCMF_PROFILE_FWAUTH_NONE);
+ }
+
if (ifp->vif->mbss) {
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_DOWN, 1);
return err;
@@ -6476,7 +6509,7 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
default:
wiphy_warn(wiphy, "Firmware reported unsupported bandwidth %d\n",
ch.bw);
- /* fall through */
+ fallthrough;
case BRCMU_CHAN_BW_20:
/* enable the channel and disable other bandwidths
* for now as mentioned order assure they are enabled
@@ -6614,10 +6647,10 @@ static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[])
switch (mimo_bwcap) {
case WLC_N_BW_40ALL:
bw_cap[NL80211_BAND_2GHZ] |= WLC_BW_40MHZ_BIT;
- /* fall-thru */
+ fallthrough;
case WLC_N_BW_20IN2G_40IN5G:
bw_cap[NL80211_BAND_5GHZ] |= WLC_BW_40MHZ_BIT;
- /* fall-thru */
+ fallthrough;
case WLC_N_BW_20ALL:
bw_cap[NL80211_BAND_2GHZ] |= WLC_BW_20MHZ_BIT;
bw_cap[NL80211_BAND_5GHZ] |= WLC_BW_20MHZ_BIT;
@@ -7066,6 +7099,13 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
wiphy_ext_feature_set(wiphy,
NL80211_EXT_FEATURE_SAE_OFFLOAD);
}
+ if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_FWAUTH)) {
+ wiphy_ext_feature_set(wiphy,
+ NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK);
+ if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_SAE))
+ wiphy_ext_feature_set(wiphy,
+ NL80211_EXT_FEATURE_SAE_OFFLOAD_AP);
+ }
wiphy->mgmt_stypes = brcmf_txrx_stypes;
wiphy->max_remain_on_channel_duration = 5000;
if (brcmf_feat_is_enabled(ifp, BRCMF_FEAT_PNO)) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index 333fdf394f95..17817cdb5de2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -129,6 +129,19 @@ enum brcmf_profile_fwsup {
};
/**
+ * enum brcmf_profile_fwauth - firmware authenticator profile
+ *
+ * @BRCMF_PROFILE_FWAUTH_NONE: no firmware authenticator
+ * @BRCMF_PROFILE_FWAUTH_PSK: authenticator for WPA/WPA2-PSK
+ * @BRCMF_PROFILE_FWAUTH_SAE: authenticator for SAE
+ */
+enum brcmf_profile_fwauth {
+ BRCMF_PROFILE_FWAUTH_NONE,
+ BRCMF_PROFILE_FWAUTH_PSK,
+ BRCMF_PROFILE_FWAUTH_SAE
+};
+
+/**
* struct brcmf_cfg80211_profile - profile information.
*
* @bssid: bssid of joined/joining ibss.
@@ -140,6 +153,7 @@ struct brcmf_cfg80211_profile {
struct brcmf_cfg80211_security sec;
struct brcmf_wsec_key key[BRCMF_MAX_DEFAULT_KEYS];
enum brcmf_profile_fwsup use_fwsup;
+ u16 use_fwauth;
bool is_ft;
};
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index a3a257089696..5bf11e46fc49 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -1390,7 +1390,7 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
case BRCM_CC_4345_CHIP_ID:
/* explicitly check SR engine enable bit */
pmu_cc3_mask = BIT(2);
- /* fall-through */
+ fallthrough;
case BRCM_CC_43241_CHIP_ID:
case BRCM_CC_4335_CHIP_ID:
case BRCM_CC_4339_CHIP_ID:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index f89010a81ffb..3dd28f5fef19 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -395,7 +395,7 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp,
spin_unlock_irqrestore(&ifp->netif_stop_lock, flags);
}
-void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
+void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq)
{
/* Most of Broadcom's firmwares send 802.11f ADD frame every time a new
* STA connects to the AP interface. This is an obsoleted standard most
@@ -418,14 +418,15 @@ void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
ifp->ndev->stats.rx_packets++;
brcmf_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol));
- if (in_interrupt())
+ if (inirq) {
netif_rx(skb);
- else
+ } else {
/* If the receive is not processed inside an ISR,
* the softirqd must be woken explicitly to service
* the NET_RX_SOFTIRQ. This is handled by netif_rx_ni().
*/
netif_rx_ni(skb);
+ }
}
void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb)
@@ -474,7 +475,7 @@ void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb)
skb->pkt_type = PACKET_OTHERHOST;
skb->protocol = htons(ETH_P_802_2);
- brcmf_netif_rx(ifp, skb);
+ brcmf_netif_rx(ifp, skb, false);
}
static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb,
@@ -486,7 +487,7 @@ static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb,
ret = brcmf_proto_hdrpull(drvr, true, skb, ifp);
if (ret || !(*ifp) || !(*ifp)->ndev) {
- if (ret != -ENODATA && *ifp)
+ if (ret != -ENODATA && *ifp && (*ifp)->ndev)
(*ifp)->ndev->stats.rx_errors++;
brcmu_pkt_buf_free_skb(skb);
return -ENODATA;
@@ -496,7 +497,8 @@ static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb,
return 0;
}
-void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event)
+void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event,
+ bool inirq)
{
struct brcmf_if *ifp;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
@@ -508,14 +510,16 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event)
return;
if (brcmf_proto_is_reorder_skb(skb)) {
- brcmf_proto_rxreorder(ifp, skb);
+ brcmf_proto_rxreorder(ifp, skb, inirq);
} else {
/* Process special event packets */
- if (handle_event)
- brcmf_fweh_process_skb(ifp->drvr, skb,
- BCMILCP_SUBTYPE_VENDOR_LONG);
+ if (handle_event) {
+ gfp_t gfp = inirq ? GFP_ATOMIC : GFP_KERNEL;
- brcmf_netif_rx(ifp, skb);
+ brcmf_fweh_process_skb(ifp->drvr, skb,
+ BCMILCP_SUBTYPE_VENDOR_LONG, gfp);
+ }
+ brcmf_netif_rx(ifp, skb, inirq);
}
}
@@ -530,7 +534,7 @@ void brcmf_rx_event(struct device *dev, struct sk_buff *skb)
if (brcmf_rx_hdrpull(drvr, skb, &ifp))
return;
- brcmf_fweh_process_skb(ifp->drvr, skb, 0);
+ brcmf_fweh_process_skb(ifp->drvr, skb, 0, GFP_KERNEL);
brcmu_pkt_buf_free_skb(skb);
}
@@ -1422,6 +1426,11 @@ void brcmf_detach(struct device *dev)
#endif
brcmf_bus_change_state(bus_if, BRCMF_BUS_DOWN);
+ /* make sure primary interface removed last */
+ for (i = BRCMF_MAX_IFS - 1; i > -1; i--) {
+ if (drvr->iflist[i])
+ brcmf_remove_interface(drvr->iflist[i], false);
+ }
brcmf_bus_stop(drvr->bus_if);
brcmf_fweh_detach(drvr);
@@ -1432,12 +1441,6 @@ void brcmf_detach(struct device *dev)
drvr->mon_if = NULL;
}
- /* make sure primary interface removed last */
- for (i = BRCMF_MAX_IFS - 1; i > -1; i--) {
- if (drvr->iflist[i])
- brcmf_del_if(drvr, drvr->iflist[i]->bsscfgidx, false);
- }
-
if (drvr->config) {
brcmf_p2p_detach(&drvr->config->p2p);
brcmf_cfg80211_detach(drvr->config);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index 33b2ab3b54b0..5767d665cee5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -208,7 +208,7 @@ void brcmf_remove_interface(struct brcmf_if *ifp, bool rtnl_locked);
void brcmf_txflowblock_if(struct brcmf_if *ifp,
enum brcmf_netif_stop_reason reason, bool state);
void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success);
-void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb);
+void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq);
void brcmf_netif_mon_rx(struct brcmf_if *ifp, struct sk_buff *skb);
void brcmf_net_detach(struct net_device *ndev, bool rtnl_locked);
int brcmf_net_mon_attach(struct brcmf_if *ifp);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 0dcefbd0c000..7c68d9849324 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -42,6 +42,7 @@ static const struct brcmf_feat_fwcap brcmf_fwcap_map[] = {
{ BRCMF_FEAT_MONITOR_FMT_RADIOTAP, "rtap" },
{ BRCMF_FEAT_DOT11H, "802.11h" },
{ BRCMF_FEAT_SAE, "sae" },
+ { BRCMF_FEAT_FWAUTH, "idauth" },
};
#ifdef DEBUG
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
index cda3fc1bab7f..d1f4257af696 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.h
@@ -28,6 +28,7 @@
* MONITOR_FMT_HW_RX_HDR: firmware provides monitor packets with hw/ucode header
* DOT11H: firmware supports 802.11h
* SAE: simultaneous authentication of equals
+ * FWAUTH: Firmware authenticator
*/
#define BRCMF_FEAT_LIST \
BRCMF_FEAT_DEF(MBSS) \
@@ -49,7 +50,8 @@
BRCMF_FEAT_DEF(MONITOR_FMT_RADIOTAP) \
BRCMF_FEAT_DEF(MONITOR_FMT_HW_RX_HDR) \
BRCMF_FEAT_DEF(DOT11H) \
- BRCMF_FEAT_DEF(SAE)
+ BRCMF_FEAT_DEF(SAE) \
+ BRCMF_FEAT_DEF(FWAUTH)
/*
* Quirks:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index 3aed4c4b887a..d821a4758f8c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -59,7 +59,7 @@ struct nvram_parser {
bool boardrev_found;
};
-/**
+/*
* is_nvram_char() - check if char is a valid one for NVRAM entry
*
* It accepts all printable ASCII chars except for '#' which opens a comment.
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
index a5cced2c89ac..430d2cca98b3 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
@@ -23,6 +23,7 @@
* @ifidx: interface index related to this event.
* @ifaddr: ethernet address for interface.
* @emsg: common parameters of the firmware event message.
+ * @datalen: length of the data array
* @data: event specific data part of the firmware event.
*/
struct brcmf_fweh_queue_item {
@@ -35,7 +36,7 @@ struct brcmf_fweh_queue_item {
u8 data[];
};
-/**
+/*
* struct brcmf_fweh_event_name - code, name mapping entry.
*/
struct brcmf_fweh_event_name {
@@ -118,8 +119,8 @@ static int brcmf_fweh_call_event_handler(struct brcmf_pub *drvr,
* brcmf_fweh_handle_if_event() - handle IF event.
*
* @drvr: driver information object.
- * @item: queue entry.
- * @ifpp: interface object (may change upon ADD action).
+ * @emsg: event message object.
+ * @data: event object.
*/
static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
struct brcmf_event_msg *emsg,
@@ -128,7 +129,6 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
struct brcmf_if_event *ifevent = data;
struct brcmf_if *ifp;
bool is_p2pdev;
- int err = 0;
brcmf_dbg(EVENT, "action: %u ifidx: %u bsscfgidx: %u flags: %u role: %u\n",
ifevent->action, ifevent->ifidx, ifevent->bsscfgidx,
@@ -171,8 +171,8 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
if (ifp && ifevent->action == BRCMF_E_IF_CHANGE)
brcmf_proto_reset_if(drvr, ifp);
- err = brcmf_fweh_call_event_handler(drvr, ifp, emsg->event_code, emsg,
- data);
+ brcmf_fweh_call_event_handler(drvr, ifp, emsg->event_code, emsg,
+ data);
if (ifp && ifevent->action == BRCMF_E_IF_DEL) {
bool armed = brcmf_cfg80211_vif_event_armed(drvr->config);
@@ -304,10 +304,12 @@ void brcmf_fweh_detach(struct brcmf_pub *drvr)
{
struct brcmf_fweh_info *fweh = &drvr->fweh;
- /* cancel the worker */
- cancel_work_sync(&fweh->event_work);
- WARN_ON(!list_empty(&fweh->event_q));
- memset(fweh->evt_handler, 0, sizeof(fweh->evt_handler));
+ /* cancel the worker if initialized */
+ if (fweh->event_work.func) {
+ cancel_work_sync(&fweh->event_work);
+ WARN_ON(!list_empty(&fweh->event_q));
+ memset(fweh->evt_handler, 0, sizeof(fweh->evt_handler));
+ }
}
/**
@@ -381,18 +383,18 @@ int brcmf_fweh_activate_events(struct brcmf_if *ifp)
*
* @drvr: driver information object.
* @event_packet: event packet to process.
+ * @packet_len: length of the packet
*
* If the packet buffer contains a firmware event message it will
* dispatch the event to a registered handler (using worker).
*/
void brcmf_fweh_process_event(struct brcmf_pub *drvr,
struct brcmf_event *event_packet,
- u32 packet_len)
+ u32 packet_len, gfp_t gfp)
{
enum brcmf_fweh_event_code code;
struct brcmf_fweh_info *fweh = &drvr->fweh;
struct brcmf_fweh_queue_item *event;
- gfp_t alloc_flag = GFP_KERNEL;
void *data;
u32 datalen;
@@ -411,10 +413,7 @@ void brcmf_fweh_process_event(struct brcmf_pub *drvr,
datalen + sizeof(*event_packet) > packet_len)
return;
- if (in_interrupt())
- alloc_flag = GFP_ATOMIC;
-
- event = kzalloc(sizeof(*event) + datalen, alloc_flag);
+ event = kzalloc(sizeof(*event) + datalen, gfp);
if (!event)
return;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
index a82f51bc1e69..48414e8b9389 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
@@ -319,11 +319,12 @@ void brcmf_fweh_unregister(struct brcmf_pub *drvr,
int brcmf_fweh_activate_events(struct brcmf_if *ifp);
void brcmf_fweh_process_event(struct brcmf_pub *drvr,
struct brcmf_event *event_packet,
- u32 packet_len);
+ u32 packet_len, gfp_t gfp);
void brcmf_fweh_p2pdev_setup(struct brcmf_if *ifp, bool ongoing);
static inline void brcmf_fweh_process_skb(struct brcmf_pub *drvr,
- struct sk_buff *skb, u16 stype)
+ struct sk_buff *skb, u16 stype,
+ gfp_t gfp)
{
struct brcmf_event *event_packet;
u16 subtype, usr_stype;
@@ -354,7 +355,7 @@ static inline void brcmf_fweh_process_skb(struct brcmf_pub *drvr,
if (usr_stype != BCMILCP_BCM_SUBTYPE_EVENT)
return;
- brcmf_fweh_process_event(drvr, event_packet, skb->len + ETH_HLEN);
+ brcmf_fweh_process_event(drvr, event_packet, skb->len + ETH_HLEN, gfp);
}
#endif /* FWEH_H_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index 2df6811c066e..437e83ea8902 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -354,6 +354,7 @@ enum brcmf_fws_mac_desc_state {
/**
* struct brcmf_fws_mac_descriptor - firmware signalling data per node/interface
*
+ * @name: name of the descriptor.
* @occupied: slot is in use.
* @mac_handle: handle for mac entry determined by firmware.
* @interface_id: interface index.
@@ -362,10 +363,15 @@ enum brcmf_fws_mac_desc_state {
* @generation: generation bit.
* @ac_bitmap: ac queue bitmap.
* @requested_credit: credits requested by firmware.
+ * @requested_packet: packet requested by firmware.
* @ea: ethernet address.
* @seq: per-node free-running sequence.
* @psq: power-save queue.
* @transit_count: packet in transit to firmware.
+ * @suppr_transit_count: suppressed packet in transit to firmware.
+ * @send_tim_signal: if set tim signal will be sent.
+ * @traffic_pending_bmp: traffic pending bitmap.
+ * @traffic_lastreported_bmp: traffic last reported bitmap.
*/
struct brcmf_fws_mac_descriptor {
char name[16];
@@ -498,20 +504,6 @@ struct brcmf_fws_info {
bool avoid_queueing;
};
-/*
- * brcmf_fws_prio2fifo - mapping from 802.1d priority to firmware fifo index.
- */
-static const int brcmf_fws_prio2fifo[] = {
- BRCMF_FWS_FIFO_AC_BE,
- BRCMF_FWS_FIFO_AC_BK,
- BRCMF_FWS_FIFO_AC_BK,
- BRCMF_FWS_FIFO_AC_BE,
- BRCMF_FWS_FIFO_AC_VI,
- BRCMF_FWS_FIFO_AC_VI,
- BRCMF_FWS_FIFO_AC_VO,
- BRCMF_FWS_FIFO_AC_VO
-};
-
#define BRCMF_FWS_TLV_DEF(name, id, len) \
case BRCMF_FWS_TYPE_ ## name: \
return len;
@@ -1672,7 +1664,7 @@ static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
rfi->pend_pkts -= skb_queue_len(skb_list);
}
-void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
+void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt, bool inirq)
{
struct brcmf_pub *drvr = ifp->drvr;
u8 *reorder_data;
@@ -1690,7 +1682,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
/* validate flags and flow id */
if (flags == 0xFF) {
bphy_err(drvr, "invalid flags...so ignore this packet\n");
- brcmf_netif_rx(ifp, pkt);
+ brcmf_netif_rx(ifp, pkt, inirq);
return;
}
@@ -1702,7 +1694,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
if (rfi == NULL) {
brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
flow_id);
- brcmf_netif_rx(ifp, pkt);
+ brcmf_netif_rx(ifp, pkt, inirq);
return;
}
@@ -1727,7 +1719,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
rfi = kzalloc(buf_size, GFP_ATOMIC);
if (rfi == NULL) {
bphy_err(drvr, "failed to alloc buffer\n");
- brcmf_netif_rx(ifp, pkt);
+ brcmf_netif_rx(ifp, pkt, inirq);
return;
}
@@ -1841,7 +1833,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
netif_rx:
skb_queue_walk_safe(&reorder_list, pkt, pnext) {
__skb_unlink(pkt, &reorder_list);
- brcmf_netif_rx(ifp, pkt);
+ brcmf_netif_rx(ifp, pkt, inirq);
}
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
index b16a9d1c0508..50e424b5880d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
@@ -42,6 +42,6 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp);
void brcmf_fws_del_interface(struct brcmf_if *ifp);
void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb);
void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked);
-void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb);
+void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq);
#endif /* FWSIGNAL_H_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index f1a20db8daab..7c8e08ee8f0f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -536,7 +536,8 @@ static int brcmf_msgbuf_hdrpull(struct brcmf_pub *drvr, bool do_fws,
return -ENODEV;
}
-static void brcmf_msgbuf_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb)
+static void brcmf_msgbuf_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb,
+ bool inirq)
{
}
@@ -1128,7 +1129,7 @@ static void brcmf_msgbuf_process_event(struct brcmf_msgbuf *msgbuf, void *buf)
skb->protocol = eth_type_trans(skb, ifp->ndev);
- brcmf_fweh_process_skb(ifp->drvr, skb, 0);
+ brcmf_fweh_process_skb(ifp->drvr, skb, 0, GFP_KERNEL);
exit:
brcmu_pkt_buf_free_skb(skb);
@@ -1190,7 +1191,7 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
}
skb->protocol = eth_type_trans(skb, ifp->ndev);
- brcmf_netif_rx(ifp, skb);
+ brcmf_netif_rx(ifp, skb, false);
}
static void brcmf_msgbuf_process_gen_status(struct brcmf_msgbuf *msgbuf,
@@ -1620,6 +1621,8 @@ fail:
BRCMF_TX_IOCTL_MAX_MSG_SIZE,
msgbuf->ioctbuf,
msgbuf->ioctbuf_handle);
+ if (msgbuf->txflow_wq)
+ destroy_workqueue(msgbuf->txflow_wq);
kfree(msgbuf);
}
return -ENOMEM;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index debd887e159e..ec6fc7a150a6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -145,11 +145,11 @@ struct brcmf_p2p_scan_le {
*
* @category: P2P_PUB_AF_CATEGORY
* @action: P2P_PUB_AF_ACTION
- * @oui[3]: P2P_OUI
+ * @oui: P2P_OUI
* @oui_type: OUI type - P2P_VER
* @subtype: OUI subtype - P2P_TYPE_*
* @dialog_token: nonzero, identifies req/rsp transaction
- * @elts[1]: Variable length information elements.
+ * @elts: Variable length information elements.
*/
struct brcmf_p2p_pub_act_frame {
u8 category;
@@ -165,11 +165,11 @@ struct brcmf_p2p_pub_act_frame {
* struct brcmf_p2p_action_frame - WiFi P2P Action Frame
*
* @category: P2P_AF_CATEGORY
- * @OUI[3]: OUI - P2P_OUI
+ * @oui: OUI - P2P_OUI
* @type: OUI Type - P2P_VER
* @subtype: OUI Subtype - P2P_AF_*
* @dialog_token: nonzero, identifies req/resp tranaction
- * @elts[1]: Variable length information elements.
+ * @elts: Variable length information elements.
*/
struct brcmf_p2p_action_frame {
u8 category;
@@ -186,7 +186,7 @@ struct brcmf_p2p_action_frame {
* @category: 0x04 Public Action Frame
* @action: 0x6c Advertisement Protocol
* @dialog_token: nonzero, identifies req/rsp transaction
- * @query_data[1]: Query Data. SD gas ireq SD gas iresp
+ * @query_data: Query Data. SD gas ireq SD gas iresp
*/
struct brcmf_p2psd_gas_pub_act_frame {
u8 category;
@@ -201,7 +201,7 @@ struct brcmf_p2psd_gas_pub_act_frame {
* @mpc_onoff: To make sure to send successfully action frame, we have to
* turn off mpc 0: off, 1: on, (-1): do nothing
* @search_channel: 1: search peer's channel to send af
- * extra_listen: keep the dwell time to get af response frame.
+ * @extra_listen: keep the dwell time to get af response frame.
*/
struct brcmf_config_af_params {
s32 mpc_onoff;
@@ -763,9 +763,8 @@ exit:
* brcmf_p2p_run_escan() - escan callback for peer-to-peer.
*
* @cfg: driver private data for cfg80211 interface.
- * @ndev: net device for which scan is requested.
+ * @ifp: interface control.
* @request: scan request from cfg80211.
- * @action: scan action.
*
* Determines the P2P discovery state based to scan request parameters and
* validates the channels in the request.
@@ -913,8 +912,6 @@ int brcmf_p2p_scan_prep(struct wiphy *wiphy,
if (err)
return err;
- vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
-
/* override .run_escan() callback. */
cfg->escan_info.run = brcmf_p2p_run_escan;
}
@@ -969,9 +966,10 @@ exit:
* brcmf_p2p_remain_on_channel() - put device on channel and stay there.
*
* @wiphy: wiphy device.
+ * @wdev: wireless device.
* @channel: channel to stay on.
* @duration: time in ms to remain on channel.
- *
+ * @cookie: cookie.
*/
int brcmf_p2p_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev,
struct ieee80211_channel *channel,
@@ -1056,7 +1054,7 @@ void brcmf_p2p_cancel_remain_on_channel(struct brcmf_if *ifp)
* brcmf_p2p_act_frm_search() - search function for action frame.
*
* @p2p: p2p device.
- * channel: channel on which action frame is to be trasmitted.
+ * @channel: channel on which action frame is to be trasmitted.
*
* search function to reach at common channel to send action frame. When
* channel is 0 then all social channels will be used to send af
@@ -1331,6 +1329,7 @@ brcmf_p2p_stop_wait_next_action_frame(struct brcmf_cfg80211_info *cfg)
* brcmf_p2p_gon_req_collision() - Check if go negotiaton collission
*
* @p2p: p2p device info struct.
+ * @mac: MAC address.
*
* return true if recevied action frame is to be dropped.
*/
@@ -1546,7 +1545,6 @@ static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p,
struct brcmf_cfg80211_vif *vif;
struct brcmf_p2p_action_frame *p2p_af;
s32 err = 0;
- s32 timeout = 0;
brcmf_dbg(TRACE, "Enter\n");
@@ -1582,8 +1580,7 @@ static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p,
(p2p->wait_for_offchan_complete) ?
"off-channel" : "on-channel");
- timeout = wait_for_completion_timeout(&p2p->send_af_done,
- P2P_AF_MAX_WAIT_TIME);
+ wait_for_completion_timeout(&p2p->send_af_done, P2P_AF_MAX_WAIT_TIME);
if (test_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status)) {
brcmf_dbg(TRACE, "TX action frame operation is success\n");
@@ -2041,8 +2038,8 @@ static void brcmf_p2p_get_current_chanspec(struct brcmf_p2p_info *p2p,
/**
* Change a P2P Role.
- * Parameters:
- * @mac: MAC address of the BSS to change a role
+ * @cfg: driver private data for cfg80211 interface.
+ * @if_type: interface type.
* Returns 0 if success.
*/
int brcmf_p2p_ifchange(struct brcmf_cfg80211_info *cfg,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
index bd08d3aaa8f4..f4a79e217da5 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
@@ -32,7 +32,7 @@ struct brcmf_proto {
u8 peer[ETH_ALEN]);
void (*add_tdls_peer)(struct brcmf_pub *drvr, int ifidx,
u8 peer[ETH_ALEN]);
- void (*rxreorder)(struct brcmf_if *ifp, struct sk_buff *skb);
+ void (*rxreorder)(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq);
void (*add_if)(struct brcmf_if *ifp);
void (*del_if)(struct brcmf_if *ifp);
void (*reset_if)(struct brcmf_if *ifp);
@@ -109,9 +109,9 @@ static inline bool brcmf_proto_is_reorder_skb(struct sk_buff *skb)
}
static inline void
-brcmf_proto_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb)
+brcmf_proto_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb, bool inirq)
{
- ifp->drvr->proto->rxreorder(ifp, skb);
+ ifp->drvr->proto->rxreorder(ifp, skb, inirq);
}
static inline void
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 3c07d1bbe1c6..99987a789e7e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -1704,7 +1704,7 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
brcmf_rx_event(bus->sdiodev->dev, pfirst);
else
brcmf_rx_frame(bus->sdiodev->dev, pfirst,
- false);
+ false, false);
bus->sdcnt.rxglompkts++;
}
@@ -2038,7 +2038,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
brcmf_rx_event(bus->sdiodev->dev, pkt);
else
brcmf_rx_frame(bus->sdiodev->dev, pkt,
- false);
+ false, false);
/* prepare the descriptor for the next read */
rd->len = rd->len_nxtfrm << 4;
@@ -3625,7 +3625,7 @@ void brcmf_sdio_trigger_dpc(struct brcmf_sdio *bus)
}
}
-void brcmf_sdio_isr(struct brcmf_sdio *bus)
+void brcmf_sdio_isr(struct brcmf_sdio *bus, bool in_isr)
{
brcmf_dbg(TRACE, "Enter\n");
@@ -3636,7 +3636,7 @@ void brcmf_sdio_isr(struct brcmf_sdio *bus)
/* Count the interrupt call */
bus->sdcnt.intrcount++;
- if (in_interrupt())
+ if (in_isr)
atomic_set(&bus->ipend, 1);
else
if (brcmf_sdio_intr_rstatus(bus)) {
@@ -4278,8 +4278,9 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_MESBUSYCTRL,
CY_43012_MESBUSYCTRL, &err);
break;
+ case SDIO_DEVICE_ID_BROADCOM_4329:
case SDIO_DEVICE_ID_BROADCOM_4339:
- brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes for 4339\n",
+ brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes\n",
CY_4339_F2_WATERMARK);
brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK,
CY_4339_F2_WATERMARK, &err);
@@ -4292,7 +4293,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
CY_4339_MESBUSYCTRL, &err);
break;
case SDIO_DEVICE_ID_BROADCOM_43455:
- brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes for 43455\n",
+ brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes\n",
CY_43455_F2_WATERMARK);
brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK,
CY_43455_F2_WATERMARK, &err);
@@ -4305,9 +4306,7 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
CY_43455_MESBUSYCTRL, &err);
break;
case SDIO_DEVICE_ID_BROADCOM_4359:
- /* fallthrough */
case SDIO_DEVICE_ID_BROADCOM_4354:
- /* fallthrough */
case SDIO_DEVICE_ID_BROADCOM_4356:
brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes\n",
CY_435X_F2_WATERMARK);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
index 12108927fb50..15d2c02fa3ec 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h
@@ -372,7 +372,7 @@ int brcmf_sdiod_remove(struct brcmf_sdio_dev *sdiodev);
struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev);
void brcmf_sdio_remove(struct brcmf_sdio *bus);
-void brcmf_sdio_isr(struct brcmf_sdio *bus);
+void brcmf_sdio_isr(struct brcmf_sdio *bus, bool in_isr);
void brcmf_sdio_wd_timer(struct brcmf_sdio *bus, bool active);
void brcmf_sdio_wowl_config(struct device *dev, bool enabled);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index ac5463838fcf..586f4dfc638b 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -532,7 +532,7 @@ static void brcmf_usb_rx_complete(struct urb *urb)
if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP ||
devinfo->bus_pub.state == BRCMFMAC_USB_STATE_SLEEP) {
skb_put(skb, urb->actual_length);
- brcmf_rx_frame(devinfo->dev, skb, true);
+ brcmf_rx_frame(devinfo->dev, skb, true, true);
brcmf_usb_rx_refill(devinfo, req);
usb_mark_last_busy(urb->dev);
} else {
@@ -1578,6 +1578,9 @@ void brcmf_usb_exit(void)
brcmf_dbg(USB, "Enter\n");
ret = driver_for_each_device(drv, NULL, NULL,
brcmf_usb_reset_device);
+ if (ret)
+ brcmf_err("failed to reset all usb devices %d\n", ret);
+
usb_deregister(&brcmf_usbdrvr);
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c
index fa391e4eb098..c9fb4b0cffaf 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c
@@ -645,7 +645,7 @@ void brcms_c_ampdu_finalize(struct brcms_ampdu_session *session)
u16 mimo_ctlchbw = PHY_TXC1_BW_20MHZ;
u32 rspec = 0, rspec_fallback = 0;
u32 rts_rspec = 0, rts_rspec_fallback = 0;
- u8 plcp0, plcp3, is40, sgi, mcs;
+ u8 plcp0, is40, mcs;
u16 mch;
u8 preamble_type = BRCMS_GF_PREAMBLE;
u8 fbr_preamble_type = BRCMS_GF_PREAMBLE;
@@ -704,15 +704,12 @@ void brcms_c_ampdu_finalize(struct brcms_ampdu_session *session)
txh->MacTxControlLow = cpu_to_le16(mcl);
fbr = txrate[1].count > 0;
- if (!fbr) {
+ if (!fbr)
plcp0 = plcp[0];
- plcp3 = plcp[3];
- } else {
+ else
plcp0 = txh->FragPLCPFallback[0];
- plcp3 = txh->FragPLCPFallback[3];
- }
+
is40 = (plcp0 & MIMO_PLCP_40MHZ) ? 1 : 0;
- sgi = plcp3_issgi(plcp3) ? 1 : 0;
mcs = plcp0 & ~MIMO_PLCP_40MHZ;
if (is40) {
@@ -850,10 +847,9 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
bool ba_recd = false, ack_recd = false;
u8 suc_mpdu = 0, tot_mpdu = 0;
uint supr_status;
- bool update_rate = true, retry = true, tx_error = false;
+ bool retry = true;
u16 mimoantsel = 0;
- u8 antselid = 0;
- u8 retry_limit, rr_retry_limit;
+ u8 retry_limit;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(p);
#ifdef DEBUG
@@ -866,15 +862,11 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
ini = &scb_ampdu->ini[tid];
retry_limit = ampdu->retry_limit_tid[tid];
- rr_retry_limit = ampdu->rr_retry_limit_tid[tid];
memset(bitmap, 0, sizeof(bitmap));
queue = txs->frameid & TXFID_QUEUE_MASK;
supr_status = txs->status & TX_STATUS_SUPR_MASK;
if (txs->status & TX_STATUS_ACK_RCV) {
- if (TX_STATUS_SUPR_UF == supr_status)
- update_rate = false;
-
WARN_ON(!(txs->status & TX_STATUS_INTERMEDIATE));
start_seq = txs->sequence >> SEQNUM_SHIFT;
bitmap[0] = (txs->status & TX_STATUS_BA_BMAP03_MASK) >>
@@ -898,7 +890,6 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
ba_recd = true;
} else {
if (supr_status) {
- update_rate = false;
if (supr_status == TX_STATUS_SUPR_BADCH) {
brcms_dbg_ht(wlc->hw->d11core,
"%s: Pkt tx suppressed, illegal channel possibly %d\n",
@@ -923,11 +914,9 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
* if there were underflows, but pre-loading
* is not active, notify rate adaptation.
*/
- if (brcms_c_ffpld_check_txfunfl(wlc, queue) > 0)
- tx_error = true;
+ brcms_c_ffpld_check_txfunfl(wlc, queue);
}
} else if (txs->phyerr) {
- update_rate = false;
brcms_dbg_ht(wlc->hw->d11core,
"%s: ampdu tx phy error (0x%x)\n",
__func__, txs->phyerr);
@@ -1023,20 +1012,15 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
}
/* update rate state */
- antselid = brcms_c_antsel_antsel2id(wlc->asi, mimoantsel);
+ brcms_c_antsel_antsel2id(wlc->asi, mimoantsel);
}
void
brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
struct sk_buff *p, struct tx_status *txs)
{
- struct scb_ampdu *scb_ampdu;
struct brcms_c_info *wlc = ampdu->wlc;
- struct scb_ampdu_tid_ini *ini;
u32 s1 = 0, s2 = 0;
- struct ieee80211_tx_info *tx_info;
-
- tx_info = IEEE80211_SKB_CB(p);
/* BMAC_NOTE: For the split driver, second level txstatus comes later
* So if the ACK was received then wait for the second level else just
@@ -1060,8 +1044,6 @@ brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
}
if (scb) {
- scb_ampdu = &scb->scb_ampdu;
- ini = &scb_ampdu->ini[p->priority];
brcms_c_ampdu_dotxstatus_complete(ampdu, scb, p, txs, s1, s2);
} else {
/* loop through all pkts and free */
@@ -1069,7 +1051,6 @@ brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
struct d11txh *txh;
u16 mcl;
while (p) {
- tx_info = IEEE80211_SKB_CB(p);
txh = (struct d11txh *) p->data;
trace_brcms_txdesc(&wlc->hw->d11core->dev, txh,
sizeof(*txh));
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index 648efcbc819f..818e523f6025 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -275,14 +275,13 @@ static void brcms_set_basic_rate(struct brcm_rateset *rs, u16 rate, bool is_br)
}
}
-/**
+/*
* This function frees the WL per-device resources.
*
* This function frees resources owned by the WL device pointed to
* by the wl parameter.
*
* precondition: can both be called locked and unlocked
- *
*/
static void brcms_free(struct brcms_info *wl)
{
@@ -982,11 +981,11 @@ static const struct ieee80211_ops brcms_ops = {
.set_tim = brcms_ops_beacon_set_tim,
};
-void brcms_dpc(unsigned long data)
+void brcms_dpc(struct tasklet_struct *t)
{
struct brcms_info *wl;
- wl = (struct brcms_info *) data;
+ wl = from_tasklet(wl, t, tasklet);
spin_lock_bh(&wl->lock);
@@ -1115,7 +1114,7 @@ static int ieee_hw_init(struct ieee80211_hw *hw)
return ieee_hw_rate_init(hw);
}
-/**
+/*
* attach to the WL device.
*
* Attach to the WL device identified by vendor and device parameters.
@@ -1149,7 +1148,7 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
init_waitqueue_head(&wl->tx_flush_wq);
/* setup the bottom half handler */
- tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl);
+ tasklet_setup(&wl->tasklet, brcms_dpc);
spin_lock_init(&wl->lock);
spin_lock_init(&wl->isr_lock);
@@ -1210,7 +1209,7 @@ fail:
-/**
+/*
* determines if a device is a WL device, and if so, attaches it.
*
* This function determines if a device pointed to by pdev is a WL device,
@@ -1290,7 +1289,7 @@ static struct bcma_driver brcms_bcma_driver = {
.id_table = brcms_coreid_table,
};
-/**
+/*
* This is the main entry point for the brcmsmac driver.
*
* This function is scheduled upon module initialization and
@@ -1317,7 +1316,7 @@ static int __init brcms_module_init(void)
return 0;
}
-/**
+/*
* This function unloads the brcmsmac driver from the system.
*
* This function unconditionally unloads the brcmsmac driver module from the
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.h
index 198053dfc310..eaf926a96a88 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.h
@@ -106,7 +106,7 @@ struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
void brcms_free_timer(struct brcms_timer *timer);
void brcms_add_timer(struct brcms_timer *timer, uint ms, int periodic);
bool brcms_del_timer(struct brcms_timer *timer);
-void brcms_dpc(unsigned long data);
+void brcms_dpc(struct tasklet_struct *t);
void brcms_timer(struct brcms_timer *t);
void brcms_fatal_error(struct brcms_info *wl);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
index 77494fc30c2c..763e0ec583d7 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
@@ -842,7 +842,6 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
uint supr_status;
bool lastframe;
struct ieee80211_hdr *h;
- u16 mcl;
struct ieee80211_tx_info *tx_info;
struct ieee80211_tx_rate *txrate;
int i;
@@ -879,7 +878,6 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
}
txh = (struct d11txh *) (p->data);
- mcl = le16_to_cpu(txh->MacTxControlLow);
if (txs->phyerr)
brcms_dbg_tx(wlc->hw->d11core, "phyerr 0x%x, rate 0x%x\n",
@@ -1776,7 +1774,6 @@ void brcms_b_phy_reset(struct brcms_hardware *wlc_hw)
{
struct brcms_phy_pub *pih = wlc_hw->band->pi;
u32 phy_bw_clkbits;
- bool phy_in_reset = false;
brcms_dbg_info(wlc_hw->d11core, "wl%d: reset phy\n", wlc_hw->unit);
@@ -1799,7 +1796,6 @@ void brcms_b_phy_reset(struct brcms_hardware *wlc_hw)
/* reset the PHY */
brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_PCLKE),
(SICF_PRST | SICF_PCLKE));
- phy_in_reset = true;
} else {
brcms_b_core_ioctl(wlc_hw,
(SICF_PRST | SICF_PCLKE | SICF_BWMASK),
@@ -2270,11 +2266,8 @@ static void brcms_ucode_write(struct brcms_hardware *wlc_hw,
static void brcms_ucode_download(struct brcms_hardware *wlc_hw)
{
- struct brcms_c_info *wlc;
struct brcms_ucode *ucode = &wlc_hw->wlc->wl->ucode;
- wlc = wlc_hw->wlc;
-
if (wlc_hw->ucode_loaded)
return;
@@ -3173,7 +3166,6 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
{
struct brcms_hardware *wlc_hw = wlc->hw;
struct bcma_device *core = wlc_hw->d11core;
- u32 sflags;
u32 bcnint_us;
uint i = 0;
bool fifosz_fixup = false;
@@ -3206,7 +3198,7 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
brcms_c_gpio_init(wlc);
- sflags = bcma_aread32(core, BCMA_IOST);
+ bcma_aread32(core, BCMA_IOST);
if (D11REV_IS(wlc_hw->corerev, 17) || D11REV_IS(wlc_hw->corerev, 23)) {
if (BRCMS_ISNPHY(wlc_hw->band))
@@ -3899,7 +3891,6 @@ static void brcms_c_setband(struct brcms_c_info *wlc,
static void brcms_c_set_chanspec(struct brcms_c_info *wlc, u16 chanspec)
{
uint bandunit;
- bool switchband = false;
u16 old_chanspec = wlc->chanspec;
if (!brcms_c_valid_chanspec_db(wlc->cmi, chanspec)) {
@@ -3912,7 +3903,6 @@ static void brcms_c_set_chanspec(struct brcms_c_info *wlc, u16 chanspec)
if (wlc->pub->_nbands > 1) {
bandunit = chspec_bandunit(chanspec);
if (wlc->band->bandunit != bandunit || wlc->bandinit_pending) {
- switchband = true;
if (wlc->bandlocked) {
brcms_err(wlc->hw->d11core,
"wl%d: %s: chspec %d band is locked!\n",
@@ -5095,13 +5085,6 @@ int brcms_c_up(struct brcms_c_info *wlc)
return 0;
}
-static uint brcms_c_down_del_timer(struct brcms_c_info *wlc)
-{
- uint callbacks = 0;
-
- return callbacks;
-}
-
static int brcms_b_bmac_down_prep(struct brcms_hardware *wlc_hw)
{
bool dev_gone;
@@ -5179,7 +5162,6 @@ uint brcms_c_down(struct brcms_c_info *wlc)
uint callbacks = 0;
int i;
- bool dev_gone = false;
brcms_dbg_info(wlc->hw->d11core, "wl%d\n", wlc->pub->unit);
@@ -5197,7 +5179,7 @@ uint brcms_c_down(struct brcms_c_info *wlc)
callbacks += brcms_b_bmac_down_prep(wlc->hw);
- dev_gone = brcms_deviceremoved(wlc);
+ brcms_deviceremoved(wlc);
/* Call any registered down handlers */
for (i = 0; i < BRCMS_MAXMODULES; i++) {
@@ -5212,8 +5194,6 @@ uint brcms_c_down(struct brcms_c_info *wlc)
callbacks++;
wlc->WDarmed = false;
}
- /* cancel all other timers */
- callbacks += brcms_c_down_del_timer(wlc);
wlc->pub->up = false;
@@ -5390,15 +5370,7 @@ brcms_c_set_internal_rateset(struct brcms_c_info *wlc,
static void brcms_c_ofdm_rateset_war(struct brcms_c_info *wlc)
{
- u8 r;
- bool war = false;
-
- if (wlc->pub->associated)
- r = wlc->bsscfg->current_bss->rateset.rates[0];
- else
- r = wlc->default_bss->rateset.rates[0];
-
- wlc_phy_ofdm_rateset_war(wlc->band->pi, war);
+ wlc_phy_ofdm_rateset_war(wlc->band->pi, false);
}
int brcms_c_set_channel(struct brcms_c_info *wlc, u16 channel)
@@ -5873,7 +5845,6 @@ mac80211_wlc_set_nrate(struct brcms_c_info *wlc, struct brcms_band *cur_band,
bool issgi = ((int_val & NRATE_SGI_MASK) >> NRATE_SGI_SHIFT);
bool override_mcs_only = ((int_val & NRATE_OVERRIDE_MCS_ONLY)
== NRATE_OVERRIDE_MCS_ONLY);
- int bcmerror = 0;
if (!ismcs)
return (u32) rate;
@@ -5884,7 +5855,6 @@ mac80211_wlc_set_nrate(struct brcms_c_info *wlc, struct brcms_band *cur_band,
if (stf > PHY_TXC1_MODE_SDM) {
brcms_err(core, "wl%d: %s: Invalid stf\n",
wlc->pub->unit, __func__);
- bcmerror = -EINVAL;
goto done;
}
@@ -5895,7 +5865,6 @@ mac80211_wlc_set_nrate(struct brcms_c_info *wlc, struct brcms_band *cur_band,
&& (stf != PHY_TXC1_MODE_CDD))) {
brcms_err(core, "wl%d: %s: Invalid mcs 32\n",
wlc->pub->unit, __func__);
- bcmerror = -EINVAL;
goto done;
}
/* mcs > 7 must use stf SDM */
@@ -5917,7 +5886,6 @@ mac80211_wlc_set_nrate(struct brcms_c_info *wlc, struct brcms_band *cur_band,
&& (stf == PHY_TXC1_MODE_STBC))) {
brcms_err(core, "wl%d: %s: Invalid STBC\n",
wlc->pub->unit, __func__);
- bcmerror = -EINVAL;
goto done;
}
}
@@ -5925,7 +5893,6 @@ mac80211_wlc_set_nrate(struct brcms_c_info *wlc, struct brcms_band *cur_band,
if ((stf != PHY_TXC1_MODE_CDD) && (stf != PHY_TXC1_MODE_SISO)) {
brcms_err(core, "wl%d: %s: Invalid OFDM\n",
wlc->pub->unit, __func__);
- bcmerror = -EINVAL;
goto done;
}
} else if (is_cck_rate(rate)) {
@@ -5933,20 +5900,17 @@ mac80211_wlc_set_nrate(struct brcms_c_info *wlc, struct brcms_band *cur_band,
|| (stf != PHY_TXC1_MODE_SISO)) {
brcms_err(core, "wl%d: %s: Invalid CCK\n",
wlc->pub->unit, __func__);
- bcmerror = -EINVAL;
goto done;
}
} else {
brcms_err(core, "wl%d: %s: Unknown rate type\n",
wlc->pub->unit, __func__);
- bcmerror = -EINVAL;
goto done;
}
/* make sure multiple antennae are available for non-siso rates */
if ((stf != PHY_TXC1_MODE_SISO) && (wlc->stf->txstreams == 1)) {
brcms_err(core, "wl%d: %s: SISO antenna but !SISO "
"request\n", wlc->pub->unit, __func__);
- bcmerror = -EINVAL;
goto done;
}
@@ -6210,7 +6174,6 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw,
bool use_rts = false;
bool use_cts = false;
bool use_rifs = false;
- bool short_preamble[2] = { false, false };
u8 preamble_type[2] = { BRCMS_LONG_PREAMBLE, BRCMS_LONG_PREAMBLE };
u8 rts_preamble_type[2] = { BRCMS_LONG_PREAMBLE, BRCMS_LONG_PREAMBLE };
u8 *rts_plcp, rts_plcp_fallback[D11_PHY_HDR_LEN];
@@ -6296,10 +6259,6 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw,
rspec[k] =
hw->wiphy->bands[tx_info->band]->
bitrates[txrate[k]->idx].hw_value;
- short_preamble[k] =
- txrate[k]->
- flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE ?
- true : false;
} else {
rspec[k] = BRCM_RATE_1M;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
index 2441714169de..ccc621b8ed9f 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
@@ -1513,14 +1513,12 @@ static s8 wlc_phy_env_measure_temperature(struct brcms_phy *pi)
static void wlc_phy_upd_env_txpwr_rate_limits(struct brcms_phy *pi, u32 band)
{
u8 i;
- s8 temp, vbat;
for (i = 0; i < TXP_NUM_RATES; i++)
pi->txpwr_env_limit[i] = BRCMS_TXPWR_MAX;
- vbat = wlc_phy_env_measure_vbat(pi);
- temp = wlc_phy_env_measure_temperature(pi);
-
+ wlc_phy_env_measure_vbat(pi);
+ wlc_phy_env_measure_temperature(pi);
}
static s8
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
index 7ef36234a25d..7717eb85a1db 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -357,61 +357,6 @@ u16 rxiq_cal_rf_reg[11] = {
RADIO_2064_REG12A,
};
-static const
-struct lcnphy_rx_iqcomp lcnphy_rx_iqcomp_table_rev0[] = {
- {1, 0, 0},
- {2, 0, 0},
- {3, 0, 0},
- {4, 0, 0},
- {5, 0, 0},
- {6, 0, 0},
- {7, 0, 0},
- {8, 0, 0},
- {9, 0, 0},
- {10, 0, 0},
- {11, 0, 0},
- {12, 0, 0},
- {13, 0, 0},
- {14, 0, 0},
- {34, 0, 0},
- {38, 0, 0},
- {42, 0, 0},
- {46, 0, 0},
- {36, 0, 0},
- {40, 0, 0},
- {44, 0, 0},
- {48, 0, 0},
- {52, 0, 0},
- {56, 0, 0},
- {60, 0, 0},
- {64, 0, 0},
- {100, 0, 0},
- {104, 0, 0},
- {108, 0, 0},
- {112, 0, 0},
- {116, 0, 0},
- {120, 0, 0},
- {124, 0, 0},
- {128, 0, 0},
- {132, 0, 0},
- {136, 0, 0},
- {140, 0, 0},
- {149, 0, 0},
- {153, 0, 0},
- {157, 0, 0},
- {161, 0, 0},
- {165, 0, 0},
- {184, 0, 0},
- {188, 0, 0},
- {192, 0, 0},
- {196, 0, 0},
- {200, 0, 0},
- {204, 0, 0},
- {208, 0, 0},
- {212, 0, 0},
- {216, 0, 0},
-};
-
static const u32 lcnphy_23bitgaincode_table[] = {
0x200100,
0x200200,
@@ -1363,7 +1308,7 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
u16 tx_pwr_ctrl;
u8 tx_gain_index_old = 0;
bool result = false, tx_gain_override_old = false;
- u16 i, Core1TxControl_old, RFOverride0_old,
+ u16 i, Core1TxControl_old,
RFOverrideVal0_old, rfoverride2_old, rfoverride2val_old,
rfoverride3_old, rfoverride3val_old, rfoverride4_old,
rfoverride4val_old, afectrlovr_old, afectrlovrval_old;
@@ -1404,7 +1349,7 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
or_phy_reg(pi, 0x631, 0x0015);
- RFOverride0_old = read_phy_reg(pi, 0x44c);
+ read_phy_reg(pi, 0x44c); /* RFOverride0_old */
RFOverrideVal0_old = read_phy_reg(pi, 0x44d);
rfoverride2_old = read_phy_reg(pi, 0x4b0);
rfoverride2val_old = read_phy_reg(pi, 0x4b1);
@@ -1664,7 +1609,7 @@ wlc_lcnphy_radio_2064_channel_tune_4313(struct brcms_phy *pi, u8 channel)
const struct chan_info_2064_lcnphy *ci;
u8 rfpll_doubler = 0;
u8 pll_pwrup, pll_pwrup_ovr;
- s32 qFxtal, qFref, qFvco, qFcal;
+ s32 qFcal;
u8 d15, d16, f16, e44, e45;
u32 div_int, div_frac, fvco3, fpfd, fref3, fcal_div;
u16 loop_bw, d30, setCount;
@@ -1738,10 +1683,7 @@ wlc_lcnphy_radio_2064_channel_tune_4313(struct brcms_phy *pi, u8 channel)
fvco3 = (ci->freq * 3);
fref3 = 2 * fpfd;
- qFxtal = wlc_lcnphy_qdiv_roundup(pi->xtalfreq, PLL_2064_MHZ, 16);
- qFref = wlc_lcnphy_qdiv_roundup(fpfd, PLL_2064_MHZ, 16);
qFcal = pi->xtalfreq * fcal_div / PLL_2064_MHZ;
- qFvco = wlc_lcnphy_qdiv_roundup(fvco3, 2, 16);
write_radio_reg(pi, RADIO_2064_REG04F, 0x02);
@@ -2853,7 +2795,7 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
bool suspend, tx_gain_override_old;
struct lcnphy_txgains old_gains;
struct brcms_phy *pi = container_of(ppi, struct brcms_phy, pubpi_ro);
- u16 idleTssi, idleTssi0_2C, idleTssi0_OB, idleTssi0_regvalue_OB,
+ u16 idleTssi0_2C, idleTssi0_OB, idleTssi0_regvalue_OB,
idleTssi0_regvalue_2C;
u16 SAVE_txpwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
u16 SAVE_lpfgain = read_radio_reg(pi, RADIO_2064_REG112);
@@ -2863,7 +2805,7 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4;
u8 SAVE_bbmult = wlc_lcnphy_get_bbmult(pi);
- idleTssi = read_phy_reg(pi, 0x4ab);
+ read_phy_reg(pi, 0x4ab); /* idleTssi */
suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
MCTL_EN_MAC));
if (!suspend)
@@ -2887,8 +2829,7 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
wlc_lcnphy_set_bbmult(pi, 0x0);
wlc_phy_do_dummy_tx(pi, true, OFF);
- idleTssi = ((read_phy_reg(pi, 0x4ab) & (0x1ff << 0))
- >> 0);
+ read_phy_reg(pi, 0x4ab); /* idleTssi */
idleTssi0_2C = ((read_phy_reg(pi, 0x63e) & (0x1ff << 0))
>> 0);
@@ -3858,8 +3799,6 @@ void wlc_lcnphy_get_tx_iqcc(struct brcms_phy *pi, u16 *a, u16 *b)
static void wlc_lcnphy_tx_iqlo_soft_cal_full(struct brcms_phy *pi)
{
- struct lcnphy_unsign16_struct iqcc0, locc2, locc3, locc4;
-
wlc_lcnphy_set_cc(pi, 0, 0, 0);
wlc_lcnphy_set_cc(pi, 2, 0, 0);
wlc_lcnphy_set_cc(pi, 3, 0, 0);
@@ -3872,10 +3811,10 @@ static void wlc_lcnphy_tx_iqlo_soft_cal_full(struct brcms_phy *pi)
wlc_lcnphy_a1(pi, 2, 2, 1);
wlc_lcnphy_a1(pi, 0, 4, 3);
- iqcc0 = wlc_lcnphy_get_cc(pi, 0);
- locc2 = wlc_lcnphy_get_cc(pi, 2);
- locc3 = wlc_lcnphy_get_cc(pi, 3);
- locc4 = wlc_lcnphy_get_cc(pi, 4);
+ wlc_lcnphy_get_cc(pi, 0);
+ wlc_lcnphy_get_cc(pi, 2);
+ wlc_lcnphy_get_cc(pi, 3);
+ wlc_lcnphy_get_cc(pi, 4);
}
u16 wlc_lcnphy_get_tx_locc(struct brcms_phy *pi)
@@ -4191,9 +4130,7 @@ static void wlc_lcnphy_glacial_timer_based_cal(struct brcms_phy *pi)
static void wlc_lcnphy_periodic_cal(struct brcms_phy *pi)
{
- bool suspend, full_cal;
- const struct lcnphy_rx_iqcomp *rx_iqcomp;
- int rx_iqcomp_sz;
+ bool suspend;
u16 SAVE_pwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
s8 index;
struct phytbl_info tab;
@@ -4203,9 +4140,6 @@ static void wlc_lcnphy_periodic_cal(struct brcms_phy *pi)
pi->phy_lastcal = pi->sh->now;
pi->phy_forcecal = false;
- full_cal =
- (pi_lcn->lcnphy_full_cal_channel !=
- CHSPEC_CHANNEL(pi->radio_chanspec));
pi_lcn->lcnphy_full_cal_channel = CHSPEC_CHANNEL(pi->radio_chanspec);
index = pi_lcn->lcnphy_current_index;
@@ -4220,9 +4154,6 @@ static void wlc_lcnphy_periodic_cal(struct brcms_phy *pi)
wlc_lcnphy_txpwrtbl_iqlo_cal(pi);
- rx_iqcomp = lcnphy_rx_iqcomp_table_rev0;
- rx_iqcomp_sz = ARRAY_SIZE(lcnphy_rx_iqcomp_table_rev0);
-
if (LCNREV_IS(pi->pubpi.phy_rev, 1))
wlc_lcnphy_rx_iq_cal(pi, NULL, 0, true, false, 1, 40);
else
@@ -4916,10 +4847,6 @@ static bool wlc_phy_txpwr_srom_read_lcnphy(struct brcms_phy *pi)
offset_ofdm >>= 4;
}
} else {
- u8 opo = 0;
-
- opo = sprom->opo;
-
for (i = TXP_FIRST_CCK; i <= TXP_LAST_CCK; i++)
pi->tx_srom_max_rate_2g[i] = txpwr;
@@ -5065,8 +4992,10 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi)
pi->pi_fptr.radioloftget = wlc_lcnphy_get_radio_loft;
pi->pi_fptr.detach = wlc_phy_detach_lcnphy;
- if (!wlc_phy_txpwr_srom_read_lcnphy(pi))
+ if (!wlc_phy_txpwr_srom_read_lcnphy(pi)) {
+ kfree(pi->u.pi_lcnphy);
return false;
+ }
if (LCNREV_IS(pi->pubpi.phy_rev, 1)) {
if (pi_lcn->lcnphy_tempsense_option == 3) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
index a3f094568cfb..8580a2754789 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c
@@ -19033,7 +19033,6 @@ static void wlc_phy_spurwar_nphy(struct brcms_phy *pi)
u32 nphy_adj_noise_var_buf[] = { 0x3ff, 0x3ff };
bool isAdjustNoiseVar = false;
uint numTonesAdjust = 0;
- u32 tempval = 0;
if (NREV_GE(pi->pubpi.phy_rev, 3)) {
if (pi->phyhang_avoid)
@@ -19139,9 +19138,6 @@ static void wlc_phy_spurwar_nphy(struct brcms_phy *pi)
numTonesAdjust,
nphy_adj_tone_id_buf,
nphy_adj_noise_var_buf);
-
- tempval = 0;
-
} else {
wlc_phy_adjust_min_noisevar_nphy(pi, 0, NULL,
NULL);
@@ -21980,7 +21976,7 @@ s16 wlc_phy_tempsense_nphy(struct brcms_phy *pi)
u16 auxADC_rssi_ctrlL, auxADC_rssi_ctrlH;
s32 auxADC_Vl;
u16 RfctrlOverride5_save, RfctrlOverride6_save;
- u16 RfctrlMiscReg5_save, RfctrlMiscReg6_save;
+ u16 RfctrlMiscReg5_save;
u16 RSSIMultCoef0QPowerDet_save;
u16 tempsense_Rcal;
@@ -21995,7 +21991,7 @@ s16 wlc_phy_tempsense_nphy(struct brcms_phy *pi)
RfctrlOverride5_save = read_phy_reg(pi, 0x346);
RfctrlOverride6_save = read_phy_reg(pi, 0x347);
RfctrlMiscReg5_save = read_phy_reg(pi, 0x344);
- RfctrlMiscReg6_save = read_phy_reg(pi, 0x345);
+ read_phy_reg(pi, 0x345); /* RfctrlMiscReg6_save */
wlc_phy_table_read_nphy(pi, NPHY_TBL_ID_AFECTRL, 1, 0x0A, 16,
&auxADC_Vmid_save);
@@ -22983,7 +22979,7 @@ int
wlc_phy_rssi_compute_nphy(struct brcms_phy *pi, struct d11rxhdr *rxh)
{
s16 rxpwr, rxpwr0, rxpwr1;
- s16 phyRx0_l, phyRx2_l;
+ s16 phyRx2_l;
rxpwr = 0;
rxpwr0 = rxh->PhyRxStatus_1 & PRXS1_nphy_PWR0_MASK;
@@ -22994,7 +22990,6 @@ wlc_phy_rssi_compute_nphy(struct brcms_phy *pi, struct d11rxhdr *rxh)
if (rxpwr1 > 127)
rxpwr1 -= 256;
- phyRx0_l = rxh->PhyRxStatus_0 & 0x00ff;
phyRx2_l = rxh->PhyRxStatus_2 & 0x00ff;
if (phyRx2_l > 127)
phyRx2_l -= 256;
@@ -23097,8 +23092,7 @@ wlc_phy_runsamples_nphy(struct brcms_phy *pi, u16 num_samps, u16 loops,
u16 bb_mult;
u8 phy_bw, sample_cmd;
u16 orig_RfseqCoreActv;
- u16 lpf_bw_ctl_override3, lpf_bw_ctl_override4, lpf_bw_ctl_miscreg3,
- lpf_bw_ctl_miscreg4;
+ u16 lpf_bw_ctl_override3, lpf_bw_ctl_override4;
if (pi->phyhang_avoid)
wlc_phy_stay_in_carriersearch_nphy(pi, true);
@@ -23111,12 +23105,7 @@ wlc_phy_runsamples_nphy(struct brcms_phy *pi, u16 num_samps, u16 loops,
lpf_bw_ctl_override3 = read_phy_reg(pi, 0x342) & (0x1 << 7);
lpf_bw_ctl_override4 = read_phy_reg(pi, 0x343) & (0x1 << 7);
- if (lpf_bw_ctl_override3 | lpf_bw_ctl_override4) {
- lpf_bw_ctl_miscreg3 = read_phy_reg(pi, 0x340) &
- (0x7 << 8);
- lpf_bw_ctl_miscreg4 = read_phy_reg(pi, 0x341) &
- (0x7 << 8);
- } else {
+ if (!(lpf_bw_ctl_override3 | lpf_bw_ctl_override4)) {
wlc_phy_rfctrl_override_nphy_rev7(
pi,
(0x1 << 7),
@@ -23126,12 +23115,9 @@ wlc_phy_runsamples_nphy(struct brcms_phy *pi, u16 num_samps, u16 loops,
NPHY_REV7_RFCTRLOVERRIDE_ID1);
pi->nphy_sample_play_lpf_bw_ctl_ovr = true;
-
- lpf_bw_ctl_miscreg3 = read_phy_reg(pi, 0x340) &
- (0x7 << 8);
- lpf_bw_ctl_miscreg4 = read_phy_reg(pi, 0x341) &
- (0x7 << 8);
}
+ read_phy_reg(pi, 0x340); /* lpf_bw_ctl_miscreg3 */
+ read_phy_reg(pi, 0x341); /* lpf_bw_ctl_miscreg4 */
}
if ((pi->nphy_bb_mult_save & BB_MULT_VALID_MASK) == 0) {
@@ -23403,7 +23389,6 @@ wlc_phy_iqcal_gainparams_nphy(struct brcms_phy *pi, u16 core_no,
struct nphy_iqcal_params *params)
{
u8 k;
- int idx;
u16 gain_index;
u8 band_idx = (CHSPEC_IS5G(pi->radio_chanspec) ? 1 : 0);
@@ -23436,13 +23421,10 @@ wlc_phy_iqcal_gainparams_nphy(struct brcms_phy *pi, u16 core_no,
(target_gain.pga[core_no] << 4) |
(target_gain.txgm[core_no] << 8));
- idx = -1;
for (k = 0; k < NPHY_IQCAL_NUMGAINS; k++) {
if (tbl_iqcal_gainparams_nphy[band_idx][k][0] ==
- gain_index) {
- idx = k;
+ gain_index)
break;
- }
}
params->txgm = tbl_iqcal_gainparams_nphy[band_idx][k][1];
@@ -24704,7 +24686,6 @@ wlc_phy_a2_nphy(struct brcms_phy *pi, struct nphy_ipa_txcalgains *txgains,
{
u16 phy_a1, phy_a2, phy_a3;
u16 phy_a4, phy_a5;
- bool phy_a6;
u8 phy_a7, m[2];
u32 phy_a8 = 0;
struct nphy_txgains phy_a9;
@@ -24714,9 +24695,6 @@ wlc_phy_a2_nphy(struct brcms_phy *pi, struct nphy_ipa_txcalgains *txgains,
phy_a7 = (core == PHY_CORE_0) ? 1 : 0;
- phy_a6 = ((cal_mode == CAL_GCTRL)
- || (cal_mode == CAL_SOFT)) ? true : false;
-
if (NREV_GE(pi->pubpi.phy_rev, 7)) {
phy_a9 = wlc_phy_get_tx_gain_nphy(pi);
@@ -24996,7 +24974,6 @@ static u8 wlc_phy_a3_nphy(struct brcms_phy *pi, u8 start_gain, u8 core)
s32 phy_a7, phy_a8;
u32 phy_a9;
int phy_a10;
- bool phy_a11 = false;
int phy_a12;
u8 phy_a13 = 0;
u8 phy_a14;
@@ -25064,8 +25041,6 @@ static u8 wlc_phy_a3_nphy(struct brcms_phy *pi, u8 start_gain, u8 core)
if (!phy_a6 && (phy_a3 != phy_a5)) {
if (!phy_a3)
phy_a12 -= (u8) phy_a1;
-
- phy_a11 = true;
break;
}
@@ -25079,8 +25054,6 @@ static u8 wlc_phy_a3_nphy(struct brcms_phy *pi, u8 start_gain, u8 core)
phy_a12 = phy_a14;
else
phy_a12 = phy_a13;
-
- phy_a11 = true;
break;
}
@@ -25110,8 +25083,6 @@ static u8 wlc_phy_a3_nphy(struct brcms_phy *pi, u8 start_gain, u8 core)
if (!phy_a6 && (phy_a3 != phy_a5)) {
if (!phy_a3)
phy_a12 -= (u8) phy_a1;
-
- phy_a11 = true;
break;
}
@@ -25125,8 +25096,6 @@ static u8 wlc_phy_a3_nphy(struct brcms_phy *pi, u8 start_gain, u8 core)
phy_a12 = 0;
else
phy_a12 = 127;
-
- phy_a11 = true;
break;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_lcn.c
index be703be34616..5331b5468e14 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_lcn.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_lcn.c
@@ -105,105 +105,6 @@ static const u32 dot11lcn_gain_tbl_rev0[] = {
0x00000000,
};
-static const u32 dot11lcn_gain_tbl_rev1[] = {
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000008,
- 0x00000004,
- 0x00000008,
- 0x00000001,
- 0x00000005,
- 0x00000009,
- 0x0000000D,
- 0x00000011,
- 0x00000051,
- 0x00000091,
- 0x00000011,
- 0x00000051,
- 0x00000091,
- 0x000000d1,
- 0x00000053,
- 0x00000093,
- 0x000000d3,
- 0x000000d7,
- 0x00000117,
- 0x00000517,
- 0x00000917,
- 0x00000957,
- 0x00000d57,
- 0x00001157,
- 0x00001197,
- 0x00005197,
- 0x00009197,
- 0x0000d197,
- 0x00011197,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000008,
- 0x00000004,
- 0x00000008,
- 0x00000001,
- 0x00000005,
- 0x00000009,
- 0x0000000D,
- 0x00000011,
- 0x00000051,
- 0x00000091,
- 0x00000011,
- 0x00000051,
- 0x00000091,
- 0x000000d1,
- 0x00000053,
- 0x00000093,
- 0x000000d3,
- 0x000000d7,
- 0x00000117,
- 0x00000517,
- 0x00000917,
- 0x00000957,
- 0x00000d57,
- 0x00001157,
- 0x00005157,
- 0x00009157,
- 0x0000d157,
- 0x00011157,
- 0x00015157,
- 0x00019157,
- 0x0001d157,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
-};
-
static const u16 dot11lcn_aux_gain_idx_tbl_rev0[] = {
0x0401,
0x0402,
@@ -1507,19 +1408,6 @@ const struct phytbl_info dot11lcnphytbl_rx_gain_info_rev0[] = {
,
};
-static const struct phytbl_info dot11lcnphytbl_rx_gain_info_rev1[] = {
- {&dot11lcn_gain_tbl_rev1,
- ARRAY_SIZE(dot11lcn_gain_tbl_rev1), 18,
- 0, 32}
- ,
- {&dot11lcn_aux_gain_idx_tbl_rev0,
- ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_rev0), 14, 0, 16}
- ,
- {&dot11lcn_gain_idx_tbl_rev0,
- ARRAY_SIZE(dot11lcn_gain_idx_tbl_rev0), 13, 0, 32}
- ,
-};
-
const struct phytbl_info dot11lcnphytbl_rx_gain_info_2G_rev2[] = {
{&dot11lcn_gain_tbl_2G,
ARRAY_SIZE(dot11lcn_gain_tbl_2G), 18, 0,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c
index 7607e67d20c7..396d005f4d16 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phytbl_n.c
@@ -9014,274 +9014,6 @@ static const u16 papd_comp_rfpwr_tbl_core1_rev3[] = {
0x01d6,
};
-static const u32 papd_comp_epsilon_tbl_core0_rev3[] = {
- 0x00000000,
- 0x00001fa0,
- 0x00019f78,
- 0x0001df7e,
- 0x03fa9f86,
- 0x03fd1f90,
- 0x03fe5f8a,
- 0x03fb1f94,
- 0x03fd9fa0,
- 0x00009f98,
- 0x03fd1fac,
- 0x03ff9fa2,
- 0x03fe9fae,
- 0x00001fae,
- 0x03fddfb4,
- 0x03ff1fb8,
- 0x03ff9fbc,
- 0x03ffdfbe,
- 0x03fe9fc2,
- 0x03fedfc6,
- 0x03fedfc6,
- 0x03ff9fc8,
- 0x03ff5fc6,
- 0x03fedfc2,
- 0x03ff9fc0,
- 0x03ff5fac,
- 0x03ff5fac,
- 0x03ff9fa2,
- 0x03ff9fa6,
- 0x03ff9faa,
- 0x03ff5fb0,
- 0x03ff5fb4,
- 0x03ff1fca,
- 0x03ff5fce,
- 0x03fcdfdc,
- 0x03fb4006,
- 0x00000030,
- 0x03ff808a,
- 0x03ff80da,
- 0x0000016c,
- 0x03ff8318,
- 0x03ff063a,
- 0x03fd8bd6,
- 0x00014ffe,
- 0x00034ffe,
- 0x00034ffe,
- 0x0003cffe,
- 0x00040ffe,
- 0x00040ffe,
- 0x0003cffe,
- 0x0003cffe,
- 0x00020ffe,
- 0x03fe0ffe,
- 0x03fdcffe,
- 0x03f94ffe,
- 0x03f54ffe,
- 0x03f44ffe,
- 0x03ef8ffe,
- 0x03ee0ffe,
- 0x03ebcffe,
- 0x03e8cffe,
- 0x03e74ffe,
- 0x03e4cffe,
- 0x03e38ffe,
-};
-
-static const u32 papd_cal_scalars_tbl_core0_rev3[] = {
- 0x05af005a,
- 0x0571005e,
- 0x05040066,
- 0x04bd006c,
- 0x047d0072,
- 0x04430078,
- 0x03f70081,
- 0x03cb0087,
- 0x03870091,
- 0x035e0098,
- 0x032e00a1,
- 0x030300aa,
- 0x02d800b4,
- 0x02ae00bf,
- 0x028900ca,
- 0x026400d6,
- 0x024100e3,
- 0x022200f0,
- 0x020200ff,
- 0x01e5010e,
- 0x01ca011e,
- 0x01b0012f,
- 0x01990140,
- 0x01830153,
- 0x016c0168,
- 0x0158017d,
- 0x01450193,
- 0x013301ab,
- 0x012101c5,
- 0x011101e0,
- 0x010201fc,
- 0x00f4021a,
- 0x00e6011d,
- 0x00d9012e,
- 0x00cd0140,
- 0x00c20153,
- 0x00b70167,
- 0x00ac017c,
- 0x00a30193,
- 0x009a01ab,
- 0x009101c4,
- 0x008901df,
- 0x008101fb,
- 0x007a0219,
- 0x00730239,
- 0x006d025b,
- 0x0067027e,
- 0x006102a4,
- 0x005c02cc,
- 0x005602f6,
- 0x00520323,
- 0x004d0353,
- 0x00490385,
- 0x004503bb,
- 0x004103f3,
- 0x003d042f,
- 0x003a046f,
- 0x003704b2,
- 0x003404f9,
- 0x00310545,
- 0x002e0596,
- 0x002b05f5,
- 0x00290640,
- 0x002606a4,
-};
-
-static const u32 papd_comp_epsilon_tbl_core1_rev3[] = {
- 0x00000000,
- 0x00001fa0,
- 0x00019f78,
- 0x0001df7e,
- 0x03fa9f86,
- 0x03fd1f90,
- 0x03fe5f8a,
- 0x03fb1f94,
- 0x03fd9fa0,
- 0x00009f98,
- 0x03fd1fac,
- 0x03ff9fa2,
- 0x03fe9fae,
- 0x00001fae,
- 0x03fddfb4,
- 0x03ff1fb8,
- 0x03ff9fbc,
- 0x03ffdfbe,
- 0x03fe9fc2,
- 0x03fedfc6,
- 0x03fedfc6,
- 0x03ff9fc8,
- 0x03ff5fc6,
- 0x03fedfc2,
- 0x03ff9fc0,
- 0x03ff5fac,
- 0x03ff5fac,
- 0x03ff9fa2,
- 0x03ff9fa6,
- 0x03ff9faa,
- 0x03ff5fb0,
- 0x03ff5fb4,
- 0x03ff1fca,
- 0x03ff5fce,
- 0x03fcdfdc,
- 0x03fb4006,
- 0x00000030,
- 0x03ff808a,
- 0x03ff80da,
- 0x0000016c,
- 0x03ff8318,
- 0x03ff063a,
- 0x03fd8bd6,
- 0x00014ffe,
- 0x00034ffe,
- 0x00034ffe,
- 0x0003cffe,
- 0x00040ffe,
- 0x00040ffe,
- 0x0003cffe,
- 0x0003cffe,
- 0x00020ffe,
- 0x03fe0ffe,
- 0x03fdcffe,
- 0x03f94ffe,
- 0x03f54ffe,
- 0x03f44ffe,
- 0x03ef8ffe,
- 0x03ee0ffe,
- 0x03ebcffe,
- 0x03e8cffe,
- 0x03e74ffe,
- 0x03e4cffe,
- 0x03e38ffe,
-};
-
-static const u32 papd_cal_scalars_tbl_core1_rev3[] = {
- 0x05af005a,
- 0x0571005e,
- 0x05040066,
- 0x04bd006c,
- 0x047d0072,
- 0x04430078,
- 0x03f70081,
- 0x03cb0087,
- 0x03870091,
- 0x035e0098,
- 0x032e00a1,
- 0x030300aa,
- 0x02d800b4,
- 0x02ae00bf,
- 0x028900ca,
- 0x026400d6,
- 0x024100e3,
- 0x022200f0,
- 0x020200ff,
- 0x01e5010e,
- 0x01ca011e,
- 0x01b0012f,
- 0x01990140,
- 0x01830153,
- 0x016c0168,
- 0x0158017d,
- 0x01450193,
- 0x013301ab,
- 0x012101c5,
- 0x011101e0,
- 0x010201fc,
- 0x00f4021a,
- 0x00e6011d,
- 0x00d9012e,
- 0x00cd0140,
- 0x00c20153,
- 0x00b70167,
- 0x00ac017c,
- 0x00a30193,
- 0x009a01ab,
- 0x009101c4,
- 0x008901df,
- 0x008101fb,
- 0x007a0219,
- 0x00730239,
- 0x006d025b,
- 0x0067027e,
- 0x006102a4,
- 0x005c02cc,
- 0x005602f6,
- 0x00520323,
- 0x004d0353,
- 0x00490385,
- 0x004503bb,
- 0x004103f3,
- 0x003d042f,
- 0x003a046f,
- 0x003704b2,
- 0x003404f9,
- 0x00310545,
- 0x002e0596,
- 0x002b05f5,
- 0x00290640,
- 0x002606a4,
-};
-
const struct phytbl_info mimophytbl_info_rev3_volatile[] = {
{&ant_swctrl_tbl_rev3, ARRAY_SIZE(ant_swctrl_tbl_rev3), 9, 0, 16},
};
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index 316672486d82..87b9398b03fd 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -321,8 +321,8 @@ static int do8bitIO /* = 0 */;
#define CMD_DELTLV 0x002b
#define CMD_FINDNEXTTLV 0x002c
#define CMD_PSPNODES 0x0030
-#define CMD_SETCW 0x0031
-#define CMD_SETPCF 0x0032
+#define CMD_SETCW 0x0031
+#define CMD_SETPCF 0x0032
#define CMD_SETPHYREG 0x003e
#define CMD_TXTEST 0x003f
#define MAC_ENABLETX 0x0101
@@ -433,7 +433,7 @@ static int do8bitIO /* = 0 */;
#define STATUS_INTS (EV_AWAKE|EV_LINK|EV_TXEXC|EV_TX|EV_TXCPY|EV_RX|EV_MIC)
#ifdef CHECK_UNKNOWN_INTS
-#define IGNORE_INTS ( EV_CMD | EV_UNKNOWN)
+#define IGNORE_INTS (EV_CMD | EV_UNKNOWN)
#else
#define IGNORE_INTS (~STATUS_INTS)
#endif
@@ -1107,9 +1107,9 @@ static const char version[] = "airo.c 0.6 (Ben Reed & Javier Achirica)";
struct airo_info;
-static int get_dec_u16( char *buffer, int *start, int limit );
-static void OUT4500( struct airo_info *, u16 reg, u16 value );
-static unsigned short IN4500( struct airo_info *, u16 reg );
+static int get_dec_u16(char *buffer, int *start, int limit);
+static void OUT4500(struct airo_info *, u16 reg, u16 value);
+static unsigned short IN4500(struct airo_info *, u16 reg);
static u16 setup_card(struct airo_info*, u8 *mac, int lock);
static int enable_MAC(struct airo_info *ai, int lock);
static void disable_MAC(struct airo_info *ai, int lock);
@@ -1127,24 +1127,24 @@ static int PC4500_accessrid(struct airo_info*, u16 rid, u16 accmd);
static int PC4500_readrid(struct airo_info*, u16 rid, void *pBuf, int len, int lock);
static int PC4500_writerid(struct airo_info*, u16 rid, const void
*pBuf, int len, int lock);
-static int do_writerid( struct airo_info*, u16 rid, const void *rid_data,
- int len, int dummy );
+static int do_writerid(struct airo_info*, u16 rid, const void *rid_data,
+ int len, int dummy);
static u16 transmit_allocate(struct airo_info*, int lenPayload, int raw);
static int transmit_802_3_packet(struct airo_info*, int len, char *pPacket);
static int transmit_802_11_packet(struct airo_info*, int len, char *pPacket);
-static int mpi_send_packet (struct net_device *dev);
+static int mpi_send_packet(struct net_device *dev);
static void mpi_unmap_card(struct pci_dev *pci);
static void mpi_receive_802_3(struct airo_info *ai);
static void mpi_receive_802_11(struct airo_info *ai);
-static int waitbusy (struct airo_info *ai);
+static int waitbusy(struct airo_info *ai);
-static irqreturn_t airo_interrupt( int irq, void* dev_id);
+static irqreturn_t airo_interrupt(int irq, void* dev_id);
static int airo_thread(void *data);
-static void timer_func( struct net_device *dev );
+static void timer_func(struct net_device *dev);
static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-static struct iw_statistics *airo_get_wireless_stats (struct net_device *dev);
-static void airo_read_wireless_stats (struct airo_info *local);
+static struct iw_statistics *airo_get_wireless_stats(struct net_device *dev);
+static void airo_read_wireless_stats(struct airo_info *local);
#ifdef CISCO_EXT
static int readrids(struct net_device *dev, aironet_ioctl *comp);
static int writerids(struct net_device *dev, aironet_ioctl *comp);
@@ -1155,8 +1155,8 @@ static int micsetup(struct airo_info *ai);
static int encapsulate(struct airo_info *ai, etherHead *pPacket, MICBuffer *buffer, int len);
static int decapsulate(struct airo_info *ai, MICBuffer *mic, etherHead *pPacket, u16 payLen);
-static u8 airo_rssi_to_dbm (tdsRssiEntry *rssi_rid, u8 rssi);
-static u8 airo_dbm_to_pct (tdsRssiEntry *rssi_rid, u8 dbm);
+static u8 airo_rssi_to_dbm(tdsRssiEntry *rssi_rid, u8 rssi);
+static u8 airo_dbm_to_pct(tdsRssiEntry *rssi_rid, u8 dbm);
static void airo_networks_free(struct airo_info *ai);
@@ -1261,16 +1261,16 @@ static inline int bap_read(struct airo_info *ai, __le16 *pu16Dst, int bytelen,
return ai->bap_read(ai, pu16Dst, bytelen, whichbap);
}
-static int setup_proc_entry( struct net_device *dev,
- struct airo_info *apriv );
-static int takedown_proc_entry( struct net_device *dev,
- struct airo_info *apriv );
+static int setup_proc_entry(struct net_device *dev,
+ struct airo_info *apriv);
+static int takedown_proc_entry(struct net_device *dev,
+ struct airo_info *apriv);
static int cmdreset(struct airo_info *ai);
-static int setflashmode (struct airo_info *ai);
-static int flashgchar(struct airo_info *ai,int matchbyte,int dwelltime);
+static int setflashmode(struct airo_info *ai);
+static int flashgchar(struct airo_info *ai, int matchbyte, int dwelltime);
static int flashputbuf(struct airo_info *ai);
-static int flashrestart(struct airo_info *ai,struct net_device *dev);
+static int flashrestart(struct airo_info *ai, struct net_device *dev);
#define airo_print(type, name, fmt, args...) \
printk(type DRV_NAME "(%s): " fmt "\n", name, ##args)
@@ -1294,14 +1294,14 @@ static int flashrestart(struct airo_info *ai,struct net_device *dev);
***********************************************************************
*/
-static int RxSeqValid (struct airo_info *ai,miccntx *context,int mcast,u32 micSeq);
+static int RxSeqValid(struct airo_info *ai, miccntx *context, int mcast, u32 micSeq);
static void MoveWindow(miccntx *context, u32 micSeq);
static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen,
struct crypto_sync_skcipher *tfm);
static void emmh32_init(emmh32_context *context);
static void emmh32_update(emmh32_context *context, u8 *pOctets, int len);
static void emmh32_final(emmh32_context *context, u8 digest[4]);
-static int flashpchar(struct airo_info *ai,int byte,int dwelltime);
+static int flashpchar(struct airo_info *ai, int byte, int dwelltime);
static void age_mic_context(miccntx *cur, miccntx *old, u8 *key, int key_len,
struct crypto_sync_skcipher *tfm)
@@ -1361,7 +1361,8 @@ static void micinit(struct airo_info *ai)
/* micsetup - Get ready for business */
-static int micsetup(struct airo_info *ai) {
+static int micsetup(struct airo_info *ai)
+{
int i;
if (ai->tfm == NULL)
@@ -1373,32 +1374,32 @@ static int micsetup(struct airo_info *ai) {
return ERROR;
}
- for (i=0; i < NUM_MODULES; i++) {
- memset(&ai->mod[i].mCtx,0,sizeof(miccntx));
- memset(&ai->mod[i].uCtx,0,sizeof(miccntx));
+ for (i = 0; i < NUM_MODULES; i++) {
+ memset(&ai->mod[i].mCtx, 0, sizeof(miccntx));
+ memset(&ai->mod[i].uCtx, 0, sizeof(miccntx));
}
return SUCCESS;
}
-static const u8 micsnap[] = {0xAA,0xAA,0x03,0x00,0x40,0x96,0x00,0x02};
+static const u8 micsnap[] = {0xAA, 0xAA, 0x03, 0x00, 0x40, 0x96, 0x00, 0x02};
/*===========================================================================
* Description: Mic a packet
- *
+ *
* Inputs: etherHead * pointer to an 802.3 frame
- *
+ *
* Returns: BOOLEAN if successful, otherwise false.
* PacketTxLen will be updated with the mic'd packets size.
*
* Caveats: It is assumed that the frame buffer will already
* be big enough to hold the largets mic message possible.
* (No memory allocation is done here).
- *
+ *
* Author: sbraneky (10/15/01)
* Merciless hacks by rwilcher (1/14/02)
*/
-static int encapsulate(struct airo_info *ai ,etherHead *frame, MICBuffer *mic, int payLen)
+static int encapsulate(struct airo_info *ai, etherHead *frame, MICBuffer *mic, int payLen)
{
miccntx *context;
@@ -1409,7 +1410,7 @@ static int encapsulate(struct airo_info *ai ,etherHead *frame, MICBuffer *mic, i
context = &ai->mod[0].mCtx;
else
context = &ai->mod[0].uCtx;
-
+
if (!context->valid)
return ERROR;
@@ -1422,10 +1423,10 @@ static int encapsulate(struct airo_info *ai ,etherHead *frame, MICBuffer *mic, i
context->tx += 2;
emmh32_init(&context->seed); // Mic the packet
- emmh32_update(&context->seed,frame->da,ETH_ALEN * 2); // DA,SA
- emmh32_update(&context->seed,(u8*)&mic->typelen,10); // Type/Length and Snap
- emmh32_update(&context->seed,(u8*)&mic->seq,sizeof(mic->seq)); //SEQ
- emmh32_update(&context->seed,(u8*)(frame + 1),payLen); //payload
+ emmh32_update(&context->seed, frame->da, ETH_ALEN * 2); // DA, SA
+ emmh32_update(&context->seed, (u8*)&mic->typelen, 10); // Type/Length and Snap
+ emmh32_update(&context->seed, (u8*)&mic->seq, sizeof(mic->seq)); //SEQ
+ emmh32_update(&context->seed, (u8*)(frame + 1), payLen); //payload
emmh32_final(&context->seed, (u8*)&mic->mic);
/* New Type/length ?????????? */
@@ -1444,11 +1445,11 @@ typedef enum {
/*===========================================================================
* Description: Decapsulates a MIC'd packet and returns the 802.3 packet
* (removes the MIC stuff) if packet is a valid packet.
- *
- * Inputs: etherHead pointer to the 802.3 packet
- *
+ *
+ * Inputs: etherHead pointer to the 802.3 packet
+ *
* Returns: BOOLEAN - TRUE if packet should be dropped otherwise FALSE
- *
+ *
* Author: sbraneky (10/15/01)
* Merciless hacks by rwilcher (1/14/02)
*---------------------------------------------------------------------------
@@ -1488,35 +1489,35 @@ static int decapsulate(struct airo_info *ai, MICBuffer *mic, etherHead *eth, u16
//Now do the mic error checking.
//Receive seq must be odd
- if ( (micSEQ & 1) == 0 ) {
+ if ((micSEQ & 1) == 0) {
ai->micstats.rxWrongSequence++;
return ERROR;
}
for (i = 0; i < NUM_MODULES; i++) {
int mcast = eth->da[0] & 1;
- //Determine proper context
+ //Determine proper context
context = mcast ? &ai->mod[i].mCtx : &ai->mod[i].uCtx;
-
+
//Make sure context is valid
if (!context->valid) {
if (i == 0)
micError = NOMICPLUMMED;
- continue;
+ continue;
}
- //DeMic it
+ //DeMic it
if (!mic->typelen)
mic->typelen = htons(payLen + sizeof(MICBuffer) - 2);
-
+
emmh32_init(&context->seed);
- emmh32_update(&context->seed, eth->da, ETH_ALEN*2);
- emmh32_update(&context->seed, (u8 *)&mic->typelen, sizeof(mic->typelen)+sizeof(mic->u.snap));
- emmh32_update(&context->seed, (u8 *)&mic->seq,sizeof(mic->seq));
- emmh32_update(&context->seed, (u8 *)(eth + 1),payLen);
+ emmh32_update(&context->seed, eth->da, ETH_ALEN*2);
+ emmh32_update(&context->seed, (u8 *)&mic->typelen, sizeof(mic->typelen)+sizeof(mic->u.snap));
+ emmh32_update(&context->seed, (u8 *)&mic->seq, sizeof(mic->seq));
+ emmh32_update(&context->seed, (u8 *)(eth + 1), payLen);
//Calculate MIC
emmh32_final(&context->seed, digest);
-
+
if (memcmp(digest, &mic->mic, 4)) { //Make sure the mics match
//Invalid Mic
if (i == 0)
@@ -1547,22 +1548,22 @@ static int decapsulate(struct airo_info *ai, MICBuffer *mic, etherHead *eth, u16
/*===========================================================================
* Description: Checks the Rx Seq number to make sure it is valid
* and hasn't already been received
- *
+ *
* Inputs: miccntx - mic context to check seq against
* micSeq - the Mic seq number
- *
- * Returns: TRUE if valid otherwise FALSE.
+ *
+ * Returns: TRUE if valid otherwise FALSE.
*
* Author: sbraneky (10/15/01)
* Merciless hacks by rwilcher (1/14/02)
*---------------------------------------------------------------------------
*/
-static int RxSeqValid (struct airo_info *ai,miccntx *context,int mcast,u32 micSeq)
+static int RxSeqValid(struct airo_info *ai, miccntx *context, int mcast, u32 micSeq)
{
- u32 seq,index;
+ u32 seq, index;
- //Allow for the ap being rebooted - if it is then use the next
+ //Allow for the ap being rebooted - if it is then use the next
//sequence number of the current sequence number - might go backwards
if (mcast) {
@@ -1583,10 +1584,10 @@ static int RxSeqValid (struct airo_info *ai,miccntx *context,int mcast,u32 micSe
//Too old of a SEQ number to check.
if ((s32)seq < 0)
return ERROR;
-
- if ( seq > 64 ) {
+
+ if (seq > 64) {
//Window is infinite forward
- MoveWindow(context,micSeq);
+ MoveWindow(context, micSeq);
return SUCCESS;
}
@@ -1599,7 +1600,7 @@ static int RxSeqValid (struct airo_info *ai,miccntx *context,int mcast,u32 micSe
//Add seqence number to the list of received numbers.
context->rx |= index;
- MoveWindow(context,micSeq);
+ MoveWindow(context, micSeq);
return SUCCESS;
}
@@ -1613,7 +1614,7 @@ static void MoveWindow(miccntx *context, u32 micSeq)
//Move window if seq greater than the middle of the window
if (micSeq > context->window) {
shift = (micSeq - context->window) >> 1;
-
+
//Shift out old
if (shift < 32)
context->rx >>= shift;
@@ -1638,7 +1639,7 @@ static void emmh32_setseed(emmh32_context *context, u8 *pkey, int keylen,
{
/* take the keying material, expand if necessary, truncate at 16-bytes */
/* run through AES counter mode to generate context->coeff[] */
-
+
SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
struct scatterlist sg;
u8 iv[AES_BLOCK_SIZE] = {};
@@ -1669,11 +1670,11 @@ static void emmh32_init(emmh32_context *context)
static void emmh32_update(emmh32_context *context, u8 *pOctets, int len)
{
int coeff_position, byte_position;
-
+
if (len == 0) return;
-
+
coeff_position = context->position >> 2;
-
+
/* deal with partial 32-bit word left over from last update */
byte_position = context->position & 3;
if (byte_position) {
@@ -1712,12 +1713,12 @@ static void emmh32_final(emmh32_context *context, u8 digest[4])
{
int coeff_position, byte_position;
u32 val;
-
+
u64 sum, utmp;
s64 stmp;
coeff_position = context->position >> 2;
-
+
/* deal with partial 32-bit word left over from last update */
byte_position = context->position & 3;
if (byte_position) {
@@ -1750,7 +1751,7 @@ static int readBSSListRid(struct airo_info *ai, int first,
if (first == 1) {
if (ai->flags & FLAG_RADIO_MASK) return -ENETDOWN;
memset(&cmd, 0, sizeof(cmd));
- cmd.cmd=CMD_LISTBSS;
+ cmd.cmd = CMD_LISTBSS;
if (down_interruptible(&ai->sem))
return -ERESTARTSYS;
ai->list_bss_task = current;
@@ -1815,7 +1816,7 @@ static inline void checkThrottle(struct airo_info *ai)
int i;
/* Old hardware had a limit on encryption speed */
if (ai->config.authType != AUTH_OPEN && maxencrypt) {
- for(i=0; i<8; i++) {
+ for (i = 0; i<8; i++) {
if (ai->config.rates[i] > maxencrypt) {
ai->config.rates[i] = 0;
}
@@ -1840,7 +1841,7 @@ static int writeConfigRid(struct airo_info *ai, int lock)
else
clear_bit(FLAG_ADHOC, &ai->flags);
- return PC4500_writerid( ai, RID_CONFIG, &cfgr, sizeof(cfgr), lock);
+ return PC4500_writerid(ai, RID_CONFIG, &cfgr, sizeof(cfgr), lock);
}
static int readStatusRid(struct airo_info *ai, StatusRid *statr, int lock)
@@ -1871,7 +1872,8 @@ static void try_auto_wep(struct airo_info *ai)
}
}
-static int airo_open(struct net_device *dev) {
+static int airo_open(struct net_device *dev)
+{
struct airo_info *ai = dev->ml_priv;
int rc = 0;
@@ -1947,7 +1949,7 @@ static netdev_tx_t mpi_start_xmit(struct sk_buff *skb,
spin_lock_irqsave(&ai->aux_lock, flags);
skb_queue_tail (&ai->txq, skb);
pending = test_bit(FLAG_PENDING_XMIT, &ai->flags);
- spin_unlock_irqrestore(&ai->aux_lock,flags);
+ spin_unlock_irqrestore(&ai->aux_lock, flags);
netif_wake_queue (dev);
if (pending == 0) {
@@ -2096,7 +2098,8 @@ static void get_tx_error(struct airo_info *ai, s32 fid)
}
}
-static void airo_end_xmit(struct net_device *dev) {
+static void airo_end_xmit(struct net_device *dev)
+{
u16 status;
int i;
struct airo_info *priv = dev->ml_priv;
@@ -2110,7 +2113,7 @@ static void airo_end_xmit(struct net_device *dev) {
up(&priv->sem);
i = 0;
- if ( status == SUCCESS ) {
+ if (status == SUCCESS) {
netif_trans_update(dev);
for (; i < MAX_FIDS / 2 && (priv->fids[i] & 0xffff0000); i++);
} else {
@@ -2130,7 +2133,7 @@ static netdev_tx_t airo_start_xmit(struct sk_buff *skb,
struct airo_info *priv = dev->ml_priv;
u32 *fids = priv->fids;
- if ( skb == NULL ) {
+ if (skb == NULL) {
airo_print_err(dev->name, "%s: skb == NULL!", __func__);
return NETDEV_TX_OK;
}
@@ -2140,10 +2143,10 @@ static netdev_tx_t airo_start_xmit(struct sk_buff *skb,
}
/* Find a vacant FID */
- for( i = 0; i < MAX_FIDS / 2 && (fids[i] & 0xffff0000); i++ );
- for( j = i + 1; j < MAX_FIDS / 2 && (fids[j] & 0xffff0000); j++ );
+ for (i = 0; i < MAX_FIDS / 2 && (fids[i] & 0xffff0000); i++);
+ for (j = i + 1; j < MAX_FIDS / 2 && (fids[j] & 0xffff0000); j++);
- if ( j >= MAX_FIDS / 2 ) {
+ if (j >= MAX_FIDS / 2) {
netif_stop_queue(dev);
if (i == MAX_FIDS / 2) {
@@ -2167,7 +2170,8 @@ static netdev_tx_t airo_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-static void airo_end_xmit11(struct net_device *dev) {
+static void airo_end_xmit11(struct net_device *dev)
+{
u16 status;
int i;
struct airo_info *priv = dev->ml_priv;
@@ -2181,7 +2185,7 @@ static void airo_end_xmit11(struct net_device *dev) {
up(&priv->sem);
i = MAX_FIDS / 2;
- if ( status == SUCCESS ) {
+ if (status == SUCCESS) {
netif_trans_update(dev);
for (; i < MAX_FIDS && (priv->fids[i] & 0xffff0000); i++);
} else {
@@ -2208,7 +2212,7 @@ static netdev_tx_t airo_start_xmit11(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- if ( skb == NULL ) {
+ if (skb == NULL) {
airo_print_err(dev->name, "%s: skb == NULL!", __func__);
return NETDEV_TX_OK;
}
@@ -2218,10 +2222,10 @@ static netdev_tx_t airo_start_xmit11(struct sk_buff *skb,
}
/* Find a vacant FID */
- for( i = MAX_FIDS / 2; i < MAX_FIDS && (fids[i] & 0xffff0000); i++ );
- for( j = i + 1; j < MAX_FIDS && (fids[j] & 0xffff0000); j++ );
+ for (i = MAX_FIDS / 2; i < MAX_FIDS && (fids[i] & 0xffff0000); i++);
+ for (j = i + 1; j < MAX_FIDS && (fids[j] & 0xffff0000); j++);
- if ( j >= MAX_FIDS ) {
+ if (j >= MAX_FIDS) {
netif_stop_queue(dev);
if (i == MAX_FIDS) {
@@ -2295,19 +2299,21 @@ static struct net_device_stats *airo_get_stats(struct net_device *dev)
return &dev->stats;
}
-static void airo_set_promisc(struct airo_info *ai) {
+static void airo_set_promisc(struct airo_info *ai)
+{
Cmd cmd;
Resp rsp;
memset(&cmd, 0, sizeof(cmd));
- cmd.cmd=CMD_SETMODE;
+ cmd.cmd = CMD_SETMODE;
clear_bit(JOB_PROMISC, &ai->jobs);
cmd.parm0=(ai->flags&IFF_PROMISC) ? PROMISC : NOPROMISC;
issuecommand(ai, &cmd, &rsp);
up(&ai->sem);
}
-static void airo_set_multicast_list(struct net_device *dev) {
+static void airo_set_multicast_list(struct net_device *dev)
+{
struct airo_info *ai = dev->ml_priv;
if ((dev->flags ^ ai->flags) & IFF_PROMISC) {
@@ -2357,7 +2363,8 @@ static void del_airo_dev(struct airo_info *ai)
list_del(&ai->dev_list);
}
-static int airo_close(struct net_device *dev) {
+static int airo_close(struct net_device *dev)
+{
struct airo_info *ai = dev->ml_priv;
netif_stop_queue(dev);
@@ -2372,7 +2379,7 @@ static int airo_close(struct net_device *dev) {
set_bit(FLAG_RADIO_DOWN, &ai->flags);
disable_MAC(ai, 1);
#endif
- disable_interrupts( ai );
+ disable_interrupts(ai);
free_irq(dev->irq, dev);
@@ -2382,16 +2389,16 @@ static int airo_close(struct net_device *dev) {
return 0;
}
-void stop_airo_card( struct net_device *dev, int freeres )
+void stop_airo_card(struct net_device *dev, int freeres)
{
struct airo_info *ai = dev->ml_priv;
set_bit(FLAG_RADIO_DOWN, &ai->flags);
disable_MAC(ai, 1);
disable_interrupts(ai);
- takedown_proc_entry( dev, ai );
+ takedown_proc_entry(dev, ai);
if (test_bit(FLAG_REGISTERED, &ai->flags)) {
- unregister_netdev( dev );
+ unregister_netdev(dev);
if (ai->wifidev) {
unregister_netdev(ai->wifidev);
free_netdev(ai->wifidev);
@@ -2415,7 +2422,7 @@ void stop_airo_card( struct net_device *dev, int freeres )
kfree(ai->SSID);
if (freeres) {
/* PCMCIA frees this stuff, so only for PCI and ISA */
- release_region( dev->base_addr, 64 );
+ release_region(dev->base_addr, 64);
if (test_bit(FLAG_MPI, &ai->flags)) {
if (ai->pci)
mpi_unmap_card(ai->pci);
@@ -2423,13 +2430,13 @@ void stop_airo_card( struct net_device *dev, int freeres )
iounmap(ai->pcimem);
if (ai->pciaux)
iounmap(ai->pciaux);
- pci_free_consistent(ai->pci, PCI_SHARED_LEN,
- ai->shared, ai->shared_dma);
+ dma_free_coherent(&ai->pci->dev, PCI_SHARED_LEN,
+ ai->shared, ai->shared_dma);
}
}
crypto_free_sync_skcipher(ai->tfm);
del_airo_dev(ai);
- free_netdev( dev );
+ free_netdev(dev);
}
EXPORT_SYMBOL(stop_airo_card);
@@ -2468,56 +2475,56 @@ static int mpi_init_descriptors (struct airo_info *ai)
/* Alloc card RX descriptors */
netif_stop_queue(ai->dev);
- memset(&rsp,0,sizeof(rsp));
- memset(&cmd,0,sizeof(cmd));
+ memset(&rsp, 0, sizeof(rsp));
+ memset(&cmd, 0, sizeof(cmd));
cmd.cmd = CMD_ALLOCATEAUX;
cmd.parm0 = FID_RX;
cmd.parm1 = (ai->rxfids[0].card_ram_off - ai->pciaux);
cmd.parm2 = MPI_MAX_FIDS;
- rc=issuecommand(ai, &cmd, &rsp);
+ rc = issuecommand(ai, &cmd, &rsp);
if (rc != SUCCESS) {
airo_print_err(ai->dev->name, "Couldn't allocate RX FID");
return rc;
}
- for (i=0; i<MPI_MAX_FIDS; i++) {
+ for (i = 0; i<MPI_MAX_FIDS; i++) {
memcpy_toio(ai->rxfids[i].card_ram_off,
&ai->rxfids[i].rx_desc, sizeof(RxFid));
}
/* Alloc card TX descriptors */
- memset(&rsp,0,sizeof(rsp));
- memset(&cmd,0,sizeof(cmd));
+ memset(&rsp, 0, sizeof(rsp));
+ memset(&cmd, 0, sizeof(cmd));
cmd.cmd = CMD_ALLOCATEAUX;
cmd.parm0 = FID_TX;
cmd.parm1 = (ai->txfids[0].card_ram_off - ai->pciaux);
cmd.parm2 = MPI_MAX_FIDS;
- for (i=0; i<MPI_MAX_FIDS; i++) {
+ for (i = 0; i<MPI_MAX_FIDS; i++) {
ai->txfids[i].tx_desc.valid = 1;
memcpy_toio(ai->txfids[i].card_ram_off,
&ai->txfids[i].tx_desc, sizeof(TxFid));
}
ai->txfids[i-1].tx_desc.eoc = 1; /* Last descriptor has EOC set */
- rc=issuecommand(ai, &cmd, &rsp);
+ rc = issuecommand(ai, &cmd, &rsp);
if (rc != SUCCESS) {
airo_print_err(ai->dev->name, "Couldn't allocate TX FID");
return rc;
}
/* Alloc card Rid descriptor */
- memset(&rsp,0,sizeof(rsp));
- memset(&cmd,0,sizeof(cmd));
+ memset(&rsp, 0, sizeof(rsp));
+ memset(&cmd, 0, sizeof(cmd));
cmd.cmd = CMD_ALLOCATEAUX;
cmd.parm0 = RID_RW;
cmd.parm1 = (ai->config_desc.card_ram_off - ai->pciaux);
cmd.parm2 = 1; /* Magic number... */
- rc=issuecommand(ai, &cmd, &rsp);
+ rc = issuecommand(ai, &cmd, &rsp);
if (rc != SUCCESS) {
airo_print_err(ai->dev->name, "Couldn't allocate RID");
return rc;
@@ -2574,9 +2581,10 @@ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci)
}
/* Reserve PKTSIZE for each fid and 2K for the Rids */
- ai->shared = pci_alloc_consistent(pci, PCI_SHARED_LEN, &ai->shared_dma);
+ ai->shared = dma_alloc_coherent(&pci->dev, PCI_SHARED_LEN,
+ &ai->shared_dma, GFP_KERNEL);
if (!ai->shared) {
- airo_print_err("", "Couldn't alloc_consistent %d",
+ airo_print_err("", "Couldn't alloc_coherent %d",
PCI_SHARED_LEN);
goto free_auxmap;
}
@@ -2589,7 +2597,7 @@ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci)
vpackoff = ai->shared;
/* RX descriptor setup */
- for(i = 0; i < MPI_MAX_FIDS; i++) {
+ for (i = 0; i < MPI_MAX_FIDS; i++) {
ai->rxfids[i].pending = 0;
ai->rxfids[i].card_ram_off = pciaddroff;
ai->rxfids[i].virtual_host_addr = vpackoff;
@@ -2604,7 +2612,7 @@ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci)
}
/* TX descriptor setup */
- for(i = 0; i < MPI_MAX_FIDS; i++) {
+ for (i = 0; i < MPI_MAX_FIDS; i++) {
ai->txfids[i].card_ram_off = pciaddroff;
ai->txfids[i].virtual_host_addr = vpackoff;
ai->txfids[i].tx_desc.valid = 1;
@@ -2636,7 +2644,8 @@ static int mpi_map_card(struct airo_info *ai, struct pci_dev *pci)
return 0;
free_shared:
- pci_free_consistent(pci, PCI_SHARED_LEN, ai->shared, ai->shared_dma);
+ dma_free_coherent(&pci->dev, PCI_SHARED_LEN, ai->shared,
+ ai->shared_dma);
free_auxmap:
iounmap(ai->pciaux);
free_memmap:
@@ -2674,7 +2683,7 @@ static void wifi_setup(struct net_device *dev)
dev->min_mtu = 68;
dev->max_mtu = MIC_MSGLEN_MAX;
dev->addr_len = ETH_ALEN;
- dev->tx_queue_len = 100;
+ dev->tx_queue_len = 100;
eth_broadcast_addr(dev->broadcast);
@@ -2703,13 +2712,14 @@ static struct net_device *init_wifidev(struct airo_info *ai,
return dev;
}
-static int reset_card( struct net_device *dev , int lock) {
+static int reset_card(struct net_device *dev, int lock)
+{
struct airo_info *ai = dev->ml_priv;
if (lock && down_interruptible(&ai->sem))
return -1;
waitbusy (ai);
- OUT4500(ai,COMMAND,CMD_SOFTRESET);
+ OUT4500(ai, COMMAND, CMD_SOFTRESET);
msleep(200);
waitbusy (ai);
msleep(200);
@@ -2774,9 +2784,9 @@ static const struct net_device_ops mpi_netdev_ops = {
};
-static struct net_device *_init_airo_card( unsigned short irq, int port,
+static struct net_device *_init_airo_card(unsigned short irq, int port,
int is_pcmcia, struct pci_dev *pci,
- struct device *dmdev )
+ struct device *dmdev)
{
struct net_device *dev;
struct airo_info *ai;
@@ -2849,7 +2859,7 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
if (probe) {
if (setup_card(ai, dev->dev_addr, 1) != SUCCESS) {
- airo_print_err(dev->name, "MAC could not be enabled" );
+ airo_print_err(dev->name, "MAC could not be enabled");
rc = -EIO;
goto err_out_map;
}
@@ -2907,8 +2917,8 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
/* Allocate the transmit buffers */
if (probe && !test_bit(FLAG_MPI,&ai->flags))
- for( i = 0; i < MAX_FIDS; i++ )
- ai->fids[i] = transmit_allocate(ai,AIRO_DEF_MTU,i>=MAX_FIDS/2);
+ for (i = 0; i < MAX_FIDS; i++)
+ ai->fids[i] = transmit_allocate(ai, AIRO_DEF_MTU, i>=MAX_FIDS/2);
if (setup_proc_entry(dev, dev->ml_priv) < 0)
goto err_out_wifi;
@@ -2922,14 +2932,15 @@ err_out_reg:
unregister_netdev(dev);
err_out_map:
if (test_bit(FLAG_MPI,&ai->flags) && pci) {
- pci_free_consistent(pci, PCI_SHARED_LEN, ai->shared, ai->shared_dma);
+ dma_free_coherent(&pci->dev, PCI_SHARED_LEN, ai->shared,
+ ai->shared_dma);
iounmap(ai->pciaux);
iounmap(ai->pcimem);
mpi_unmap_card(ai->pci);
}
err_out_res:
if (!is_pcmcia)
- release_region( dev->base_addr, 64 );
+ release_region(dev->base_addr, 64);
err_out_nets:
airo_networks_free(ai);
err_out_free:
@@ -2938,15 +2949,16 @@ err_out_free:
return NULL;
}
-struct net_device *init_airo_card( unsigned short irq, int port, int is_pcmcia,
+struct net_device *init_airo_card(unsigned short irq, int port, int is_pcmcia,
struct device *dmdev)
{
- return _init_airo_card ( irq, port, is_pcmcia, NULL, dmdev);
+ return _init_airo_card (irq, port, is_pcmcia, NULL, dmdev);
}
EXPORT_SYMBOL(init_airo_card);
-static int waitbusy (struct airo_info *ai) {
+static int waitbusy (struct airo_info *ai)
+{
int delay = 0;
while ((IN4500(ai, COMMAND) & COMMAND_BUSY) && (delay < 10000)) {
udelay (10);
@@ -2956,7 +2968,7 @@ static int waitbusy (struct airo_info *ai) {
return delay < 10000;
}
-int reset_airo_card( struct net_device *dev )
+int reset_airo_card(struct net_device *dev)
{
int i;
struct airo_info *ai = dev->ml_priv;
@@ -2964,24 +2976,25 @@ int reset_airo_card( struct net_device *dev )
if (reset_card (dev, 1))
return -1;
- if ( setup_card(ai, dev->dev_addr, 1 ) != SUCCESS ) {
+ if (setup_card(ai, dev->dev_addr, 1) != SUCCESS) {
airo_print_err(dev->name, "MAC could not be enabled");
return -1;
}
airo_print_info(dev->name, "MAC enabled %pM", dev->dev_addr);
/* Allocate the transmit buffers if needed */
if (!test_bit(FLAG_MPI,&ai->flags))
- for( i = 0; i < MAX_FIDS; i++ )
- ai->fids[i] = transmit_allocate (ai,AIRO_DEF_MTU,i>=MAX_FIDS/2);
+ for (i = 0; i < MAX_FIDS; i++)
+ ai->fids[i] = transmit_allocate (ai, AIRO_DEF_MTU, i>=MAX_FIDS/2);
- enable_interrupts( ai );
+ enable_interrupts(ai);
netif_wake_queue(dev);
return 0;
}
EXPORT_SYMBOL(reset_airo_card);
-static void airo_send_event(struct net_device *dev) {
+static void airo_send_event(struct net_device *dev)
+{
struct airo_info *ai = dev->ml_priv;
union iwreq_data wrqu;
StatusRid status_rid;
@@ -2998,7 +3011,8 @@ static void airo_send_event(struct net_device *dev) {
wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL);
}
-static void airo_process_scan_results (struct airo_info *ai) {
+static void airo_process_scan_results (struct airo_info *ai)
+{
union iwreq_data wrqu;
BSSListRid bss;
int rc;
@@ -3014,14 +3028,14 @@ static void airo_process_scan_results (struct airo_info *ai) {
/* Try to read the first entry of the scan result */
rc = PC4500_readrid(ai, ai->bssListFirst, &bss, ai->bssListRidLen, 0);
- if((rc) || (bss.index == cpu_to_le16(0xffff))) {
+ if ((rc) || (bss.index == cpu_to_le16(0xffff))) {
/* No scan results */
goto out;
}
/* Read and parse all entries */
tmp_net = NULL;
- while((!rc) && (bss.index != cpu_to_le16(0xffff))) {
+ while ((!rc) && (bss.index != cpu_to_le16(0xffff))) {
/* Grab a network off the free list */
if (!list_empty(&ai->network_free_list)) {
tmp_net = list_entry(ai->network_free_list.next,
@@ -3062,13 +3076,14 @@ out:
wireless_send_event(ai->dev, SIOCGIWSCAN, &wrqu, NULL);
}
-static int airo_thread(void *data) {
+static int airo_thread(void *data)
+{
struct net_device *dev = data;
struct airo_info *ai = dev->ml_priv;
int locked;
set_freezable();
- while(1) {
+ while (1) {
/* make swsusp happy with our thread */
try_to_freeze();
@@ -3088,11 +3103,11 @@ static int airo_thread(void *data) {
break;
if (ai->expires || ai->scan_timeout) {
if (ai->scan_timeout &&
- time_after_eq(jiffies,ai->scan_timeout)){
+ time_after_eq(jiffies, ai->scan_timeout)) {
set_bit(JOB_SCAN_RESULTS, &ai->jobs);
break;
} else if (ai->expires &&
- time_after_eq(jiffies,ai->expires)){
+ time_after_eq(jiffies, ai->expires)) {
set_bit(JOB_AUTOWEP, &ai->jobs);
break;
}
@@ -3442,11 +3457,11 @@ static void airo_handle_tx(struct airo_info *ai, u16 status)
spin_lock_irqsave(&ai->aux_lock, flags);
if (!skb_queue_empty(&ai->txq)) {
- spin_unlock_irqrestore(&ai->aux_lock,flags);
+ spin_unlock_irqrestore(&ai->aux_lock, flags);
mpi_send_packet(ai->dev);
} else {
clear_bit(FLAG_PENDING_XMIT, &ai->flags);
- spin_unlock_irqrestore(&ai->aux_lock,flags);
+ spin_unlock_irqrestore(&ai->aux_lock, flags);
netif_wake_queue(ai->dev);
}
OUT4500(ai, EVACK, status & (EV_TX | EV_TXCPY | EV_TXEXC));
@@ -3526,9 +3541,9 @@ static irqreturn_t airo_interrupt(int irq, void *dev_id)
if (status & (EV_TX | EV_TXCPY | EV_TXEXC))
airo_handle_tx(ai, status);
- if ( status & ~STATUS_INTS & ~IGNORE_INTS ) {
+ if (status & ~STATUS_INTS & ~IGNORE_INTS) {
airo_print_warn(ai->dev->name, "Got weird status %x",
- status & ~STATUS_INTS & ~IGNORE_INTS );
+ status & ~STATUS_INTS & ~IGNORE_INTS);
}
}
@@ -3547,27 +3562,29 @@ static irqreturn_t airo_interrupt(int irq, void *dev_id)
* NOTE: If use with 8bit mode and SMP bad things will happen!
* Why would some one do 8 bit IO in an SMP machine?!?
*/
-static void OUT4500( struct airo_info *ai, u16 reg, u16 val ) {
+static void OUT4500(struct airo_info *ai, u16 reg, u16 val)
+{
if (test_bit(FLAG_MPI,&ai->flags))
reg <<= 1;
- if ( !do8bitIO )
- outw( val, ai->dev->base_addr + reg );
+ if (!do8bitIO)
+ outw(val, ai->dev->base_addr + reg);
else {
- outb( val & 0xff, ai->dev->base_addr + reg );
- outb( val >> 8, ai->dev->base_addr + reg + 1 );
+ outb(val & 0xff, ai->dev->base_addr + reg);
+ outb(val >> 8, ai->dev->base_addr + reg + 1);
}
}
-static u16 IN4500( struct airo_info *ai, u16 reg ) {
+static u16 IN4500(struct airo_info *ai, u16 reg)
+{
unsigned short rc;
if (test_bit(FLAG_MPI,&ai->flags))
reg <<= 1;
- if ( !do8bitIO )
- rc = inw( ai->dev->base_addr + reg );
+ if (!do8bitIO)
+ rc = inw(ai->dev->base_addr + reg);
else {
- rc = inb( ai->dev->base_addr + reg );
- rc += ((int)inb( ai->dev->base_addr + reg + 1 )) << 8;
+ rc = inb(ai->dev->base_addr + reg);
+ rc += ((int)inb(ai->dev->base_addr + reg + 1)) << 8;
}
return rc;
}
@@ -3611,7 +3628,8 @@ static int enable_MAC(struct airo_info *ai, int lock)
return rc;
}
-static void disable_MAC( struct airo_info *ai, int lock ) {
+static void disable_MAC(struct airo_info *ai, int lock)
+{
Cmd cmd;
Resp rsp;
@@ -3630,13 +3648,15 @@ static void disable_MAC( struct airo_info *ai, int lock ) {
up(&ai->sem);
}
-static void enable_interrupts( struct airo_info *ai ) {
+static void enable_interrupts(struct airo_info *ai)
+{
/* Enable the interrupts */
- OUT4500( ai, EVINTEN, STATUS_INTS );
+ OUT4500(ai, EVINTEN, STATUS_INTS);
}
-static void disable_interrupts( struct airo_info *ai ) {
- OUT4500( ai, EVINTEN, 0 );
+static void disable_interrupts(struct airo_info *ai)
+{
+ OUT4500(ai, EVINTEN, 0);
}
static void mpi_receive_802_3(struct airo_info *ai)
@@ -3660,7 +3680,7 @@ static void mpi_receive_802_3(struct airo_info *ai)
ai->dev->stats.rx_dropped++;
goto badrx;
}
- buffer = skb_put(skb,len);
+ buffer = skb_put(skb, len);
memcpy(buffer, ai->rxfids[0].virtual_host_addr, ETH_ALEN * 2);
if (ai->micstats.enabled) {
memcpy(&micbuf,
@@ -3739,8 +3759,8 @@ static void mpi_receive_802_11(struct airo_info *ai)
fc = get_unaligned((__le16 *)ptr);
hdrlen = header_len(fc);
- skb = dev_alloc_skb( len + hdrlen + 2 );
- if ( !skb ) {
+ skb = dev_alloc_skb(len + hdrlen + 2);
+ if (!skb) {
ai->dev->stats.rx_dropped++;
goto badrx;
}
@@ -3784,7 +3804,7 @@ static void mpi_receive_802_11(struct airo_info *ai)
skb->dev = ai->wifidev;
skb->protocol = htons(ETH_P_802_2);
skb->ip_summed = CHECKSUM_NONE;
- netif_rx( skb );
+ netif_rx(skb);
badrx:
if (rxd.valid == 0) {
@@ -3815,7 +3835,7 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
WepKeyRid wkr;
int rc;
- memset( &mySsid, 0, sizeof( mySsid ) );
+ memset(&mySsid, 0, sizeof(mySsid));
kfree (ai->flash);
ai->flash = NULL;
@@ -3824,12 +3844,12 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
cmd.parm0 = cmd.parm1 = cmd.parm2 = 0;
if (lock && down_interruptible(&ai->sem))
return ERROR;
- if ( issuecommand( ai, &cmd, &rsp ) != SUCCESS ) {
+ if (issuecommand(ai, &cmd, &rsp) != SUCCESS) {
if (lock)
up(&ai->sem);
return ERROR;
}
- disable_MAC( ai, 0);
+ disable_MAC(ai, 0);
// Let's figure out if we need to use the AUX port
if (!test_bit(FLAG_MPI,&ai->flags)) {
@@ -3859,13 +3879,13 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
ai->SSID = NULL;
// general configuration (read/modify/write)
status = readConfigRid(ai, lock);
- if ( status != SUCCESS ) return ERROR;
+ if (status != SUCCESS) return ERROR;
status = readCapabilityRid(ai, &cap_rid, lock);
- if ( status != SUCCESS ) return ERROR;
+ if (status != SUCCESS) return ERROR;
- status = PC4500_readrid(ai,RID_RSSI,&rssi_rid,sizeof(rssi_rid),lock);
- if ( status == SUCCESS ) {
+ status = PC4500_readrid(ai, RID_RSSI,&rssi_rid, sizeof(rssi_rid), lock);
+ if (status == SUCCESS) {
if (ai->rssi || (ai->rssi = kmalloc(512, GFP_KERNEL)) != NULL)
memcpy(ai->rssi, (u8*)&rssi_rid + 2, 512); /* Skip RID length member */
}
@@ -3890,15 +3910,15 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
}
/* Save off the MAC */
- for( i = 0; i < ETH_ALEN; i++ ) {
+ for (i = 0; i < ETH_ALEN; i++) {
mac[i] = ai->config.macAddr[i];
}
/* Check to see if there are any insmod configured
rates to add */
- if ( rates[0] ) {
- memset(ai->config.rates,0,sizeof(ai->config.rates));
- for( i = 0; i < 8 && rates[i]; i++ ) {
+ if (rates[0]) {
+ memset(ai->config.rates, 0, sizeof(ai->config.rates));
+ for (i = 0; i < 8 && rates[i]; i++) {
ai->config.rates[i] = rates[i];
}
}
@@ -3906,9 +3926,9 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
}
/* Setup the SSIDs if present */
- if ( ssids[0] ) {
+ if (ssids[0]) {
int i;
- for( i = 0; i < 3 && ssids[i]; i++ ) {
+ for (i = 0; i < 3 && ssids[i]; i++) {
size_t len = strlen(ssids[i]);
if (len > 32)
len = 32;
@@ -3919,12 +3939,12 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
}
status = writeConfigRid(ai, lock);
- if ( status != SUCCESS ) return ERROR;
+ if (status != SUCCESS) return ERROR;
/* Set up the SSID list */
- if ( ssids[0] ) {
+ if (ssids[0]) {
status = writeSsidRid(ai, &mySsid, lock);
- if ( status != SUCCESS ) return ERROR;
+ if (status != SUCCESS) return ERROR;
}
status = enable_MAC(ai, lock);
@@ -3939,14 +3959,15 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
ai->defindex = wkr.mac[0];
}
rc = readWepKeyRid(ai, &wkr, 0, lock);
- } while(lastindex != wkr.kindex);
+ } while (lastindex != wkr.kindex);
try_auto_wep(ai);
return SUCCESS;
}
-static u16 issuecommand(struct airo_info *ai, Cmd *pCmd, Resp *pRsp) {
+static u16 issuecommand(struct airo_info *ai, Cmd *pCmd, Resp *pRsp)
+{
// Im really paranoid about letting it run forever!
int max_tries = 600000;
@@ -3966,7 +3987,7 @@ static u16 issuecommand(struct airo_info *ai, Cmd *pCmd, Resp *pRsp) {
schedule();
}
- if ( max_tries == -1 ) {
+ if (max_tries == -1) {
airo_print_err(ai->dev->name,
"Max tries exceeded when issuing command");
if (IN4500(ai, COMMAND) & COMMAND_BUSY)
@@ -3998,7 +4019,7 @@ static u16 issuecommand(struct airo_info *ai, Cmd *pCmd, Resp *pRsp) {
/* Sets up the bap to start exchange data. whichbap should
* be one of the BAP0 or BAP1 defines. Locks should be held before
* calling! */
-static int bap_setup(struct airo_info *ai, u16 rid, u16 offset, int whichbap )
+static int bap_setup(struct airo_info *ai, u16 rid, u16 offset, int whichbap)
{
int timeout = 50;
int max_tries = 3;
@@ -4013,15 +4034,15 @@ static int bap_setup(struct airo_info *ai, u16 rid, u16 offset, int whichbap )
if (timeout--) {
continue;
}
- } else if ( status & BAP_ERR ) {
+ } else if (status & BAP_ERR) {
/* invalid rid or offset */
airo_print_err(ai->dev->name, "BAP error %x %d",
- status, whichbap );
+ status, whichbap);
return ERROR;
} else if (status & BAP_DONE) { // success
return SUCCESS;
}
- if ( !(max_tries--) ) {
+ if (!(max_tries--)) {
airo_print_err(ai->dev->name,
"BAP setup error too many retries\n");
return ERROR;
@@ -4067,15 +4088,15 @@ static int aux_bap_read(struct airo_info *ai, __le16 *pu16Dst,
next = aux_setup(ai, page, offset, &len);
words = (bytelen+1)>>1;
- for (i=0; i<words;) {
+ for (i = 0; i<words;) {
int count;
count = (len>>1) < (words-i) ? (len>>1) : (words-i);
- if ( !do8bitIO )
- insw( ai->dev->base_addr+DATA0+whichbap,
- pu16Dst+i,count );
+ if (!do8bitIO)
+ insw(ai->dev->base_addr+DATA0+whichbap,
+ pu16Dst+i, count);
else
- insb( ai->dev->base_addr+DATA0+whichbap,
- pu16Dst+i, count << 1 );
+ insb(ai->dev->base_addr+DATA0+whichbap,
+ pu16Dst+i, count << 1);
i += count;
if (i<words) {
next = aux_setup(ai, next, 4, &len);
@@ -4091,10 +4112,10 @@ static int fast_bap_read(struct airo_info *ai, __le16 *pu16Dst,
int bytelen, int whichbap)
{
bytelen = (bytelen + 1) & (~1); // round up to even value
- if ( !do8bitIO )
- insw( ai->dev->base_addr+DATA0+whichbap, pu16Dst, bytelen>>1 );
+ if (!do8bitIO)
+ insw(ai->dev->base_addr+DATA0+whichbap, pu16Dst, bytelen>>1);
else
- insb( ai->dev->base_addr+DATA0+whichbap, pu16Dst, bytelen );
+ insb(ai->dev->base_addr+DATA0+whichbap, pu16Dst, bytelen);
return SUCCESS;
}
@@ -4103,11 +4124,11 @@ static int bap_write(struct airo_info *ai, const __le16 *pu16Src,
int bytelen, int whichbap)
{
bytelen = (bytelen + 1) & (~1); // round up to even value
- if ( !do8bitIO )
- outsw( ai->dev->base_addr+DATA0+whichbap,
- pu16Src, bytelen>>1 );
+ if (!do8bitIO)
+ outsw(ai->dev->base_addr+DATA0+whichbap,
+ pu16Src, bytelen>>1);
else
- outsb( ai->dev->base_addr+DATA0+whichbap, pu16Src, bytelen );
+ outsb(ai->dev->base_addr+DATA0+whichbap, pu16Src, bytelen);
return SUCCESS;
}
@@ -4122,7 +4143,7 @@ static int PC4500_accessrid(struct airo_info *ai, u16 rid, u16 accmd)
cmd.parm0 = rid;
status = issuecommand(ai, &cmd, &rsp);
if (status != 0) return status;
- if ( (rsp.status & 0x7F00) != 0) {
+ if ((rsp.status & 0x7F00) != 0) {
return (accmd << 8) + (rsp.rsp0 & 0xFF);
}
return 0;
@@ -4177,10 +4198,10 @@ static int PC4500_readrid(struct airo_info *ai, u16 rid, void *pBuf, int len, in
// length for remaining part of rid
len = min(len, (int)le16_to_cpu(*(__le16*)pBuf)) - 2;
- if ( len <= 2 ) {
+ if (len <= 2) {
airo_print_err(ai->dev->name,
"Rid %x has a length of %d which is too short",
- (int)rid, (int)len );
+ (int)rid, (int)len);
rc = ERROR;
goto done;
}
@@ -4248,7 +4269,7 @@ static int PC4500_writerid(struct airo_info *ai, u16 rid,
}
} else {
// --- first access so that we can write the rid data
- if ( (status = PC4500_accessrid(ai, rid, CMD_ACCESS)) != 0) {
+ if ((status = PC4500_accessrid(ai, rid, CMD_ACCESS)) != 0) {
rc = status;
goto done;
}
@@ -4285,7 +4306,7 @@ static u16 transmit_allocate(struct airo_info *ai, int lenPayload, int raw)
txFid = ERROR;
goto done;
}
- if ( (rsp.status & 0xFF00) != 0) {
+ if ((rsp.status & 0xFF00) != 0) {
txFid = ERROR;
goto done;
}
@@ -4344,9 +4365,9 @@ static int transmit_802_3_packet(struct airo_info *ai, int len, char *pPacket)
}
len -= ETH_ALEN * 2;
- if (test_bit(FLAG_MIC_CAPABLE, &ai->flags) && ai->micstats.enabled &&
+ if (test_bit(FLAG_MIC_CAPABLE, &ai->flags) && ai->micstats.enabled &&
(ntohs(((__be16 *)pPacket)[6]) != 0x888E)) {
- if (encapsulate(ai,(etherHead *)pPacket,&pMic,len) != SUCCESS)
+ if (encapsulate(ai, (etherHead *)pPacket,&pMic, len) != SUCCESS)
return ERROR;
miclen = sizeof(pMic);
}
@@ -4356,17 +4377,17 @@ static int transmit_802_3_packet(struct airo_info *ai, int len, char *pPacket)
/* The hardware addresses aren't counted as part of the payload, so
* we have to subtract the 12 bytes for the addresses off */
payloadLen = cpu_to_le16(len + miclen);
- bap_write(ai, &payloadLen, sizeof(payloadLen),BAP1);
+ bap_write(ai, &payloadLen, sizeof(payloadLen), BAP1);
bap_write(ai, (__le16*)pPacket, sizeof(etherHead), BAP1);
if (miclen)
bap_write(ai, (__le16*)&pMic, miclen, BAP1);
bap_write(ai, (__le16*)(pPacket + sizeof(etherHead)), len, BAP1);
// issue the transmit command
- memset( &cmd, 0, sizeof( cmd ) );
+ memset(&cmd, 0, sizeof(cmd));
cmd.cmd = CMD_TRANSMIT;
cmd.parm0 = txFid;
if (issuecommand(ai, &cmd, &rsp) != SUCCESS) return ERROR;
- if ( (rsp.status & 0xFF00) != 0) return ERROR;
+ if ((rsp.status & 0xFF00) != 0) return ERROR;
return SUCCESS;
}
@@ -4395,18 +4416,18 @@ static int transmit_802_11_packet(struct airo_info *ai, int len, char *pPacket)
/* The 802.11 header aren't counted as part of the payload, so
* we have to subtract the header bytes off */
payloadLen = cpu_to_le16(len-hdrlen);
- bap_write(ai, &payloadLen, sizeof(payloadLen),BAP1);
+ bap_write(ai, &payloadLen, sizeof(payloadLen), BAP1);
if (bap_setup(ai, txFid, 0x0014, BAP1) != SUCCESS) return ERROR;
bap_write(ai, (__le16 *)pPacket, hdrlen, BAP1);
bap_write(ai, (__le16 *)(tail + (hdrlen - 10)), 38 - hdrlen, BAP1);
bap_write(ai, (__le16 *)(pPacket + hdrlen), len - hdrlen, BAP1);
// issue the transmit command
- memset( &cmd, 0, sizeof( cmd ) );
+ memset(&cmd, 0, sizeof(cmd));
cmd.cmd = CMD_TRANSMIT;
cmd.parm0 = txFid;
if (issuecommand(ai, &cmd, &rsp) != SUCCESS) return ERROR;
- if ( (rsp.status & 0xFF00) != 0) return ERROR;
+ if ((rsp.status & 0xFF00) != 0) return ERROR;
return SUCCESS;
}
@@ -4415,25 +4436,25 @@ static int transmit_802_11_packet(struct airo_info *ai, int len, char *pPacket)
* like! Feel free to clean it up!
*/
-static ssize_t proc_read( struct file *file,
+static ssize_t proc_read(struct file *file,
char __user *buffer,
size_t len,
loff_t *offset);
-static ssize_t proc_write( struct file *file,
+static ssize_t proc_write(struct file *file,
const char __user *buffer,
size_t len,
- loff_t *offset );
-static int proc_close( struct inode *inode, struct file *file );
-
-static int proc_stats_open( struct inode *inode, struct file *file );
-static int proc_statsdelta_open( struct inode *inode, struct file *file );
-static int proc_status_open( struct inode *inode, struct file *file );
-static int proc_SSID_open( struct inode *inode, struct file *file );
-static int proc_APList_open( struct inode *inode, struct file *file );
-static int proc_BSSList_open( struct inode *inode, struct file *file );
-static int proc_config_open( struct inode *inode, struct file *file );
-static int proc_wepkey_open( struct inode *inode, struct file *file );
+ loff_t *offset);
+static int proc_close(struct inode *inode, struct file *file);
+
+static int proc_stats_open(struct inode *inode, struct file *file);
+static int proc_statsdelta_open(struct inode *inode, struct file *file);
+static int proc_status_open(struct inode *inode, struct file *file);
+static int proc_SSID_open(struct inode *inode, struct file *file);
+static int proc_APList_open(struct inode *inode, struct file *file);
+static int proc_BSSList_open(struct inode *inode, struct file *file);
+static int proc_config_open(struct inode *inode, struct file *file);
+static int proc_wepkey_open(struct inode *inode, struct file *file);
static const struct proc_ops proc_statsdelta_ops = {
.proc_read = proc_read,
@@ -4508,12 +4529,13 @@ struct proc_data {
void (*on_close) (struct inode *, struct file *);
};
-static int setup_proc_entry( struct net_device *dev,
- struct airo_info *apriv ) {
+static int setup_proc_entry(struct net_device *dev,
+ struct airo_info *apriv)
+{
struct proc_dir_entry *entry;
/* First setup the device directory */
- strcpy(apriv->proc_name,dev->name);
+ strcpy(apriv->proc_name, dev->name);
apriv->proc_entry = proc_mkdir_mode(apriv->proc_name, airo_perm,
airo_entry);
if (!apriv->proc_entry)
@@ -4582,8 +4604,8 @@ fail:
return -ENOMEM;
}
-static int takedown_proc_entry( struct net_device *dev,
- struct airo_info *apriv )
+static int takedown_proc_entry(struct net_device *dev,
+ struct airo_info *apriv)
{
remove_proc_subtree(apriv->proc_name, airo_entry);
return 0;
@@ -4601,10 +4623,10 @@ static int takedown_proc_entry( struct net_device *dev,
* The read routine is generic, it relies on the preallocated rbuffer
* to supply the data.
*/
-static ssize_t proc_read( struct file *file,
+static ssize_t proc_read(struct file *file,
char __user *buffer,
size_t len,
- loff_t *offset )
+ loff_t *offset)
{
struct proc_data *priv = file->private_data;
@@ -4619,10 +4641,10 @@ static ssize_t proc_read( struct file *file,
* The write routine is generic, it fills in a preallocated rbuffer
* to supply the data.
*/
-static ssize_t proc_write( struct file *file,
+static ssize_t proc_write(struct file *file,
const char __user *buffer,
size_t len,
- loff_t *offset )
+ loff_t *offset)
{
ssize_t ret;
struct proc_data *priv = file->private_data;
@@ -4648,10 +4670,10 @@ static int proc_status_open(struct inode *inode, struct file *file)
u16 mode;
int i;
- if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
+ if ((file->private_data = kzalloc(sizeof(struct proc_data), GFP_KERNEL)) == NULL)
return -ENOMEM;
data = file->private_data;
- if ((data->rbuffer = kmalloc( 2048, GFP_KERNEL )) == NULL) {
+ if ((data->rbuffer = kmalloc(2048, GFP_KERNEL)) == NULL) {
kfree (file->private_data);
return -ENOMEM;
}
@@ -4671,7 +4693,7 @@ static int proc_status_open(struct inode *inode, struct file *file)
mode & 0x100 ? "KEY ": "",
mode & 0x200 ? "WEP ": "",
mode & 0x8000 ? "ERR ": "");
- sprintf( data->rbuffer+i, "Mode: %x\n"
+ sprintf(data->rbuffer+i, "Mode: %x\n"
"Signal Strength: %d\n"
"Signal Quality: %d\n"
"SSID: %-.*s\n"
@@ -4701,26 +4723,28 @@ static int proc_status_open(struct inode *inode, struct file *file)
le16_to_cpu(cap_rid.softVer),
le16_to_cpu(cap_rid.softSubVer),
le16_to_cpu(cap_rid.bootBlockVer));
- data->readlen = strlen( data->rbuffer );
+ data->readlen = strlen(data->rbuffer);
return 0;
}
static int proc_stats_rid_open(struct inode*, struct file*, u16);
-static int proc_statsdelta_open( struct inode *inode,
- struct file *file ) {
+static int proc_statsdelta_open(struct inode *inode,
+ struct file *file)
+{
if (file->f_mode&FMODE_WRITE) {
return proc_stats_rid_open(inode, file, RID_STATSDELTACLEAR);
}
return proc_stats_rid_open(inode, file, RID_STATSDELTA);
}
-static int proc_stats_open( struct inode *inode, struct file *file ) {
+static int proc_stats_open(struct inode *inode, struct file *file)
+{
return proc_stats_rid_open(inode, file, RID_STATS);
}
-static int proc_stats_rid_open( struct inode *inode,
+static int proc_stats_rid_open(struct inode *inode,
struct file *file,
- u16 rid )
+ u16 rid)
{
struct proc_data *data;
struct net_device *dev = PDE_DATA(inode);
@@ -4730,10 +4754,10 @@ static int proc_stats_rid_open( struct inode *inode,
__le32 *vals = stats.vals;
int len;
- if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
+ if ((file->private_data = kzalloc(sizeof(struct proc_data), GFP_KERNEL)) == NULL)
return -ENOMEM;
data = file->private_data;
- if ((data->rbuffer = kmalloc( 4096, GFP_KERNEL )) == NULL) {
+ if ((data->rbuffer = kmalloc(4096, GFP_KERNEL)) == NULL) {
kfree (file->private_data);
return -ENOMEM;
}
@@ -4742,7 +4766,7 @@ static int proc_stats_rid_open( struct inode *inode,
len = le16_to_cpu(stats.len);
j = 0;
- for(i=0; statsLabels[i]!=(char *)-1 && i*4<len; i++) {
+ for (i = 0; statsLabels[i]!=(char *)-1 && i*4<len; i++) {
if (!statsLabels[i]) continue;
if (j+strlen(statsLabels[i])+16>4096) {
airo_print_warn(apriv->dev->name,
@@ -4759,7 +4783,8 @@ static int proc_stats_rid_open( struct inode *inode,
return 0;
}
-static int get_dec_u16( char *buffer, int *start, int limit ) {
+static int get_dec_u16(char *buffer, int *start, int limit)
+{
u16 value;
int valid = 0;
for (value = 0; *start < limit && buffer[*start] >= '0' &&
@@ -4768,7 +4793,7 @@ static int get_dec_u16( char *buffer, int *start, int limit ) {
value *= 10;
value += buffer[*start] - '0';
}
- if ( !valid ) return -1;
+ if (!valid) return -1;
return value;
}
@@ -4789,15 +4814,15 @@ static void proc_config_on_close(struct inode *inode, struct file *file)
struct airo_info *ai = dev->ml_priv;
char *line;
- if ( !data->writelen ) return;
+ if (!data->writelen) return;
readConfigRid(ai, 1);
set_bit (FLAG_COMMIT, &ai->flags);
line = data->wbuffer;
- while( line[0] ) {
+ while (line[0]) {
/*** Mode processing */
- if ( !strncmp( line, "Mode: ", 6 ) ) {
+ if (!strncmp(line, "Mode: ", 6)) {
line += 6;
if (sniffing_mode(ai))
set_bit (FLAG_RESET, &ai->flags);
@@ -4805,19 +4830,19 @@ static void proc_config_on_close(struct inode *inode, struct file *file)
clear_bit (FLAG_802_11, &ai->flags);
ai->config.opmode &= ~MODE_CFG_MASK;
ai->config.scanMode = SCANMODE_ACTIVE;
- if ( line[0] == 'a' ) {
+ if (line[0] == 'a') {
ai->config.opmode |= MODE_STA_IBSS;
} else {
ai->config.opmode |= MODE_STA_ESS;
- if ( line[0] == 'r' ) {
+ if (line[0] == 'r') {
ai->config.rmode |= RXMODE_RFMON | RXMODE_DISABLE_802_3_HEADER;
ai->config.scanMode = SCANMODE_PASSIVE;
set_bit (FLAG_802_11, &ai->flags);
- } else if ( line[0] == 'y' ) {
+ } else if (line[0] == 'y') {
ai->config.rmode |= RXMODE_RFMON_ANYBSS | RXMODE_DISABLE_802_3_HEADER;
ai->config.scanMode = SCANMODE_PASSIVE;
set_bit (FLAG_802_11, &ai->flags);
- } else if ( line[0] == 'l' )
+ } else if (line[0] == 'l')
ai->config.rmode |= RXMODE_LANMON;
}
set_bit (FLAG_COMMIT, &ai->flags);
@@ -4826,68 +4851,68 @@ static void proc_config_on_close(struct inode *inode, struct file *file)
/*** Radio status */
else if (!strncmp(line,"Radio: ", 7)) {
line += 7;
- if (!strncmp(line,"off",3)) {
+ if (!strncmp(line,"off", 3)) {
set_bit (FLAG_RADIO_OFF, &ai->flags);
} else {
clear_bit (FLAG_RADIO_OFF, &ai->flags);
}
}
/*** NodeName processing */
- else if ( !strncmp( line, "NodeName: ", 10 ) ) {
+ else if (!strncmp(line, "NodeName: ", 10)) {
int j;
line += 10;
- memset( ai->config.nodeName, 0, 16 );
+ memset(ai->config.nodeName, 0, 16);
/* Do the name, assume a space between the mode and node name */
- for( j = 0; j < 16 && line[j] != '\n'; j++ ) {
+ for (j = 0; j < 16 && line[j] != '\n'; j++) {
ai->config.nodeName[j] = line[j];
}
set_bit (FLAG_COMMIT, &ai->flags);
}
/*** PowerMode processing */
- else if ( !strncmp( line, "PowerMode: ", 11 ) ) {
+ else if (!strncmp(line, "PowerMode: ", 11)) {
line += 11;
- if ( !strncmp( line, "PSPCAM", 6 ) ) {
+ if (!strncmp(line, "PSPCAM", 6)) {
ai->config.powerSaveMode = POWERSAVE_PSPCAM;
set_bit (FLAG_COMMIT, &ai->flags);
- } else if ( !strncmp( line, "PSP", 3 ) ) {
+ } else if (!strncmp(line, "PSP", 3)) {
ai->config.powerSaveMode = POWERSAVE_PSP;
set_bit (FLAG_COMMIT, &ai->flags);
} else {
ai->config.powerSaveMode = POWERSAVE_CAM;
set_bit (FLAG_COMMIT, &ai->flags);
}
- } else if ( !strncmp( line, "DataRates: ", 11 ) ) {
+ } else if (!strncmp(line, "DataRates: ", 11)) {
int v, i = 0, k = 0; /* i is index into line,
k is index to rates */
line += 11;
- while((v = get_dec_u16(line, &i, 3))!=-1) {
+ while ((v = get_dec_u16(line, &i, 3))!=-1) {
ai->config.rates[k++] = (u8)v;
line += i + 1;
i = 0;
}
set_bit (FLAG_COMMIT, &ai->flags);
- } else if ( !strncmp( line, "Channel: ", 9 ) ) {
+ } else if (!strncmp(line, "Channel: ", 9)) {
int v, i = 0;
line += 9;
v = get_dec_u16(line, &i, i+3);
- if ( v != -1 ) {
+ if (v != -1) {
ai->config.channelSet = cpu_to_le16(v);
set_bit (FLAG_COMMIT, &ai->flags);
}
- } else if ( !strncmp( line, "XmitPower: ", 11 ) ) {
+ } else if (!strncmp(line, "XmitPower: ", 11)) {
int v, i = 0;
line += 11;
v = get_dec_u16(line, &i, i+3);
- if ( v != -1 ) {
+ if (v != -1) {
ai->config.txPower = cpu_to_le16(v);
set_bit (FLAG_COMMIT, &ai->flags);
}
- } else if ( !strncmp( line, "WEP: ", 5 ) ) {
+ } else if (!strncmp(line, "WEP: ", 5)) {
line += 5;
- switch( line[0] ) {
+ switch(line[0]) {
case 's':
set_auth_type(ai, AUTH_SHAREDKEY);
break;
@@ -4899,7 +4924,7 @@ static void proc_config_on_close(struct inode *inode, struct file *file)
break;
}
set_bit (FLAG_COMMIT, &ai->flags);
- } else if ( !strncmp( line, "LongRetryLimit: ", 16 ) ) {
+ } else if (!strncmp(line, "LongRetryLimit: ", 16)) {
int v, i = 0;
line += 16;
@@ -4907,7 +4932,7 @@ static void proc_config_on_close(struct inode *inode, struct file *file)
v = (v<0) ? 0 : ((v>255) ? 255 : v);
ai->config.longRetryLimit = cpu_to_le16(v);
set_bit (FLAG_COMMIT, &ai->flags);
- } else if ( !strncmp( line, "ShortRetryLimit: ", 17 ) ) {
+ } else if (!strncmp(line, "ShortRetryLimit: ", 17)) {
int v, i = 0;
line += 17;
@@ -4915,7 +4940,7 @@ static void proc_config_on_close(struct inode *inode, struct file *file)
v = (v<0) ? 0 : ((v>255) ? 255 : v);
ai->config.shortRetryLimit = cpu_to_le16(v);
set_bit (FLAG_COMMIT, &ai->flags);
- } else if ( !strncmp( line, "RTSThreshold: ", 14 ) ) {
+ } else if (!strncmp(line, "RTSThreshold: ", 14)) {
int v, i = 0;
line += 14;
@@ -4923,7 +4948,7 @@ static void proc_config_on_close(struct inode *inode, struct file *file)
v = (v<0) ? 0 : ((v>AIRO_DEF_MTU) ? AIRO_DEF_MTU : v);
ai->config.rtsThres = cpu_to_le16(v);
set_bit (FLAG_COMMIT, &ai->flags);
- } else if ( !strncmp( line, "TXMSDULifetime: ", 16 ) ) {
+ } else if (!strncmp(line, "TXMSDULifetime: ", 16)) {
int v, i = 0;
line += 16;
@@ -4931,7 +4956,7 @@ static void proc_config_on_close(struct inode *inode, struct file *file)
v = (v<0) ? 0 : v;
ai->config.txLifetime = cpu_to_le16(v);
set_bit (FLAG_COMMIT, &ai->flags);
- } else if ( !strncmp( line, "RXMSDULifetime: ", 16 ) ) {
+ } else if (!strncmp(line, "RXMSDULifetime: ", 16)) {
int v, i = 0;
line += 16;
@@ -4939,17 +4964,17 @@ static void proc_config_on_close(struct inode *inode, struct file *file)
v = (v<0) ? 0 : v;
ai->config.rxLifetime = cpu_to_le16(v);
set_bit (FLAG_COMMIT, &ai->flags);
- } else if ( !strncmp( line, "TXDiversity: ", 13 ) ) {
+ } else if (!strncmp(line, "TXDiversity: ", 13)) {
ai->config.txDiversity =
(line[13]=='l') ? 1 :
((line[13]=='r')? 2: 3);
set_bit (FLAG_COMMIT, &ai->flags);
- } else if ( !strncmp( line, "RXDiversity: ", 13 ) ) {
+ } else if (!strncmp(line, "RXDiversity: ", 13)) {
ai->config.rxDiversity =
(line[13]=='l') ? 1 :
((line[13]=='r')? 2: 3);
set_bit (FLAG_COMMIT, &ai->flags);
- } else if ( !strncmp( line, "FragThreshold: ", 15 ) ) {
+ } else if (!strncmp(line, "FragThreshold: ", 15)) {
int v, i = 0;
line += 15;
@@ -4961,24 +4986,24 @@ static void proc_config_on_close(struct inode *inode, struct file *file)
} else if (!strncmp(line, "Modulation: ", 12)) {
line += 12;
switch(*line) {
- case 'd': ai->config.modulation=MOD_DEFAULT; set_bit(FLAG_COMMIT, &ai->flags); break;
- case 'c': ai->config.modulation=MOD_CCK; set_bit(FLAG_COMMIT, &ai->flags); break;
- case 'm': ai->config.modulation=MOD_MOK; set_bit(FLAG_COMMIT, &ai->flags); break;
+ case 'd': ai->config.modulation = MOD_DEFAULT; set_bit(FLAG_COMMIT, &ai->flags); break;
+ case 'c': ai->config.modulation = MOD_CCK; set_bit(FLAG_COMMIT, &ai->flags); break;
+ case 'm': ai->config.modulation = MOD_MOK; set_bit(FLAG_COMMIT, &ai->flags); break;
default: airo_print_warn(ai->dev->name, "Unknown modulation");
}
} else if (!strncmp(line, "Preamble: ", 10)) {
line += 10;
switch(*line) {
- case 'a': ai->config.preamble=PREAMBLE_AUTO; set_bit(FLAG_COMMIT, &ai->flags); break;
- case 'l': ai->config.preamble=PREAMBLE_LONG; set_bit(FLAG_COMMIT, &ai->flags); break;
- case 's': ai->config.preamble=PREAMBLE_SHORT; set_bit(FLAG_COMMIT, &ai->flags); break;
+ case 'a': ai->config.preamble = PREAMBLE_AUTO; set_bit(FLAG_COMMIT, &ai->flags); break;
+ case 'l': ai->config.preamble = PREAMBLE_LONG; set_bit(FLAG_COMMIT, &ai->flags); break;
+ case 's': ai->config.preamble = PREAMBLE_SHORT; set_bit(FLAG_COMMIT, &ai->flags); break;
default: airo_print_warn(ai->dev->name, "Unknown preamble");
}
} else {
airo_print_warn(ai->dev->name, "Couldn't figure out %s", line);
}
- while( line[0] && line[0] != '\n' ) line++;
- if ( line[0] ) line++;
+ while (line[0] && line[0] != '\n') line++;
+ if (line[0]) line++;
}
airo_config_commit(dev, NULL, NULL, NULL);
}
@@ -5001,14 +5026,14 @@ static int proc_config_open(struct inode *inode, struct file *file)
int i;
__le16 mode;
- if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
+ if ((file->private_data = kzalloc(sizeof(struct proc_data), GFP_KERNEL)) == NULL)
return -ENOMEM;
data = file->private_data;
- if ((data->rbuffer = kmalloc( 2048, GFP_KERNEL )) == NULL) {
+ if ((data->rbuffer = kmalloc(2048, GFP_KERNEL)) == NULL) {
kfree (file->private_data);
return -ENOMEM;
}
- if ((data->wbuffer = kzalloc( 2048, GFP_KERNEL )) == NULL) {
+ if ((data->wbuffer = kzalloc(2048, GFP_KERNEL)) == NULL) {
kfree (data->rbuffer);
kfree (file->private_data);
return -ENOMEM;
@@ -5019,7 +5044,7 @@ static int proc_config_open(struct inode *inode, struct file *file)
readConfigRid(ai, 1);
mode = ai->config.opmode & MODE_CFG_MASK;
- i = sprintf( data->rbuffer,
+ i = sprintf(data->rbuffer,
"Mode: %s\n"
"Radio: %s\n"
"NodeName: %-16s\n"
@@ -5048,7 +5073,7 @@ static int proc_config_open(struct inode *inode, struct file *file)
le16_to_cpu(ai->config.channelSet),
le16_to_cpu(ai->config.txPower)
);
- sprintf( data->rbuffer + i,
+ sprintf(data->rbuffer + i,
"LongRetryLimit: %d\n"
"ShortRetryLimit: %d\n"
"RTSThreshold: %d\n"
@@ -5079,7 +5104,7 @@ static int proc_config_open(struct inode *inode, struct file *file)
ai->config.preamble == PREAMBLE_LONG ? "long" :
ai->config.preamble == PREAMBLE_SHORT ? "short" : "error"
);
- data->readlen = strlen( data->rbuffer );
+ data->readlen = strlen(data->rbuffer);
return 0;
}
@@ -5119,14 +5144,15 @@ static void proc_SSID_on_close(struct inode *inode, struct file *file)
enable_MAC(ai, 1);
}
-static void proc_APList_on_close( struct inode *inode, struct file *file ) {
+static void proc_APList_on_close(struct inode *inode, struct file *file)
+{
struct proc_data *data = file->private_data;
struct net_device *dev = PDE_DATA(inode);
struct airo_info *ai = dev->ml_priv;
APListRid *APList_rid = &ai->APList;
int i;
- if ( !data->writelen ) return;
+ if (!data->writelen) return;
memset(APList_rid, 0, sizeof(*APList_rid));
APList_rid->len = cpu_to_le16(sizeof(*APList_rid));
@@ -5140,8 +5166,9 @@ static void proc_APList_on_close( struct inode *inode, struct file *file ) {
}
/* This function wraps PC4500_writerid with a MAC disable */
-static int do_writerid( struct airo_info *ai, u16 rid, const void *rid_data,
- int len, int dummy ) {
+static int do_writerid(struct airo_info *ai, u16 rid, const void *rid_data,
+ int len, int dummy)
+{
int rc;
disable_MAC(ai, 1);
@@ -5241,7 +5268,8 @@ static int set_wep_tx_idx(struct airo_info *ai, u16 index, int perm, int lock)
return rc;
}
-static void proc_wepkey_on_close( struct inode *inode, struct file *file ) {
+static void proc_wepkey_on_close(struct inode *inode, struct file *file)
+{
struct proc_data *data;
struct net_device *dev = PDE_DATA(inode);
struct airo_info *ai = dev->ml_priv;
@@ -5253,7 +5281,7 @@ static void proc_wepkey_on_close( struct inode *inode, struct file *file ) {
memset(key, 0, sizeof(key));
data = file->private_data;
- if ( !data->writelen ) return;
+ if (!data->writelen) return;
if (data->wbuffer[0] >= '0' && data->wbuffer[0] <= '3' &&
(data->wbuffer[1] == ' ' || data->wbuffer[1] == '\n')) {
@@ -5273,7 +5301,7 @@ static void proc_wepkey_on_close( struct inode *inode, struct file *file ) {
return;
}
- for( i = 0; i < 16*3 && data->wbuffer[i+j]; i++ ) {
+ for (i = 0; i < 16*3 && data->wbuffer[i+j]; i++) {
switch(i%3) {
case 0:
key[i/3] = hex_to_bin(data->wbuffer[i+j])<<4;
@@ -5291,7 +5319,7 @@ static void proc_wepkey_on_close( struct inode *inode, struct file *file ) {
}
}
-static int proc_wepkey_open( struct inode *inode, struct file *file )
+static int proc_wepkey_open(struct inode *inode, struct file *file)
{
struct proc_data *data;
struct net_device *dev = PDE_DATA(inode);
@@ -5299,20 +5327,20 @@ static int proc_wepkey_open( struct inode *inode, struct file *file )
char *ptr;
WepKeyRid wkr;
__le16 lastindex;
- int j=0;
+ int j = 0;
int rc;
- if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
+ if ((file->private_data = kzalloc(sizeof(struct proc_data), GFP_KERNEL)) == NULL)
return -ENOMEM;
memset(&wkr, 0, sizeof(wkr));
data = file->private_data;
- if ((data->rbuffer = kzalloc( 180, GFP_KERNEL )) == NULL) {
+ if ((data->rbuffer = kzalloc(180, GFP_KERNEL)) == NULL) {
kfree (file->private_data);
return -ENOMEM;
}
data->writelen = 0;
data->maxwritelen = 80;
- if ((data->wbuffer = kzalloc( 80, GFP_KERNEL )) == NULL) {
+ if ((data->wbuffer = kzalloc(80, GFP_KERNEL)) == NULL) {
kfree (data->rbuffer);
kfree (file->private_data);
return -ENOMEM;
@@ -5333,9 +5361,9 @@ static int proc_wepkey_open( struct inode *inode, struct file *file )
le16_to_cpu(wkr.klen));
}
readWepKeyRid(ai, &wkr, 0, 1);
- } while((lastindex != wkr.kindex) && (j < 180-30));
+ } while ((lastindex != wkr.kindex) && (j < 180-30));
- data->readlen = strlen( data->rbuffer );
+ data->readlen = strlen(data->rbuffer);
return 0;
}
@@ -5348,10 +5376,10 @@ static int proc_SSID_open(struct inode *inode, struct file *file)
char *ptr;
SsidRid SSID_rid;
- if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
+ if ((file->private_data = kzalloc(sizeof(struct proc_data), GFP_KERNEL)) == NULL)
return -ENOMEM;
data = file->private_data;
- if ((data->rbuffer = kmalloc( 104, GFP_KERNEL )) == NULL) {
+ if ((data->rbuffer = kmalloc(104, GFP_KERNEL)) == NULL) {
kfree (file->private_data);
return -ENOMEM;
}
@@ -5379,11 +5407,12 @@ static int proc_SSID_open(struct inode *inode, struct file *file)
*ptr++ = '\n';
}
*ptr = '\0';
- data->readlen = strlen( data->rbuffer );
+ data->readlen = strlen(data->rbuffer);
return 0;
}
-static int proc_APList_open( struct inode *inode, struct file *file ) {
+static int proc_APList_open(struct inode *inode, struct file *file)
+{
struct proc_data *data;
struct net_device *dev = PDE_DATA(inode);
struct airo_info *ai = dev->ml_priv;
@@ -5391,16 +5420,16 @@ static int proc_APList_open( struct inode *inode, struct file *file ) {
char *ptr;
APListRid *APList_rid = &ai->APList;
- if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
+ if ((file->private_data = kzalloc(sizeof(struct proc_data), GFP_KERNEL)) == NULL)
return -ENOMEM;
data = file->private_data;
- if ((data->rbuffer = kmalloc( 104, GFP_KERNEL )) == NULL) {
+ if ((data->rbuffer = kmalloc(104, GFP_KERNEL)) == NULL) {
kfree (file->private_data);
return -ENOMEM;
}
data->writelen = 0;
data->maxwritelen = 4*6*3;
- if ((data->wbuffer = kzalloc( data->maxwritelen, GFP_KERNEL )) == NULL) {
+ if ((data->wbuffer = kzalloc(data->maxwritelen, GFP_KERNEL)) == NULL) {
kfree (data->rbuffer);
kfree (file->private_data);
return -ENOMEM;
@@ -5408,20 +5437,21 @@ static int proc_APList_open( struct inode *inode, struct file *file ) {
data->on_close = proc_APList_on_close;
ptr = data->rbuffer;
- for( i = 0; i < 4; i++ ) {
+ for (i = 0; i < 4; i++) {
// We end when we find a zero MAC
- if ( !*(int*)APList_rid->ap[i] &&
+ if (!*(int*)APList_rid->ap[i] &&
!*(int*)&APList_rid->ap[i][2]) break;
ptr += sprintf(ptr, "%pM\n", APList_rid->ap[i]);
}
if (i==0) ptr += sprintf(ptr, "Not using specific APs\n");
*ptr = '\0';
- data->readlen = strlen( data->rbuffer );
+ data->readlen = strlen(data->rbuffer);
return 0;
}
-static int proc_BSSList_open( struct inode *inode, struct file *file ) {
+static int proc_BSSList_open(struct inode *inode, struct file *file)
+{
struct proc_data *data;
struct net_device *dev = PDE_DATA(inode);
struct airo_info *ai = dev->ml_priv;
@@ -5431,10 +5461,10 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
/* If doLoseSync is not 1, we won't do a Lose Sync */
int doLoseSync = -1;
- if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
+ if ((file->private_data = kzalloc(sizeof(struct proc_data), GFP_KERNEL)) == NULL)
return -ENOMEM;
data = file->private_data;
- if ((data->rbuffer = kmalloc( 1024, GFP_KERNEL )) == NULL) {
+ if ((data->rbuffer = kmalloc(1024, GFP_KERNEL)) == NULL) {
kfree (file->private_data);
return -ENOMEM;
}
@@ -5454,7 +5484,7 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
return -ENETDOWN;
}
memset(&cmd, 0, sizeof(cmd));
- cmd.cmd=CMD_LISTBSS;
+ cmd.cmd = CMD_LISTBSS;
if (down_interruptible(&ai->sem)) {
kfree(data->rbuffer);
kfree(file->private_data);
@@ -5472,7 +5502,7 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
Since it is a rare condition, we'll just live with it, otherwise
we have to add a spin lock... */
rc = readBSSListRid(ai, doLoseSync, &BSSList_rid);
- while(rc == 0 && BSSList_rid.index != cpu_to_le16(0xffff)) {
+ while (rc == 0 && BSSList_rid.index != cpu_to_le16(0xffff)) {
ptr += sprintf(ptr, "%pM %.*s rssi = %d",
BSSList_rid.bssid,
(int)BSSList_rid.ssidLen,
@@ -5487,11 +5517,11 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
rc = readBSSListRid(ai, 0, &BSSList_rid);
}
*ptr = '\0';
- data->readlen = strlen( data->rbuffer );
+ data->readlen = strlen(data->rbuffer);
return 0;
}
-static int proc_close( struct inode *inode, struct file *file )
+static int proc_close(struct inode *inode, struct file *file)
{
struct proc_data *data = file->private_data;
@@ -5508,7 +5538,8 @@ static int proc_close( struct inode *inode, struct file *file )
will switch WEP modes to see if that will help. If the card is
associated we will check every minute to see if anything has
changed. */
-static void timer_func( struct net_device *dev ) {
+static void timer_func(struct net_device *dev)
+{
struct airo_info *apriv = dev->ml_priv;
/* We don't have a link so try changing the authtype */
@@ -5642,7 +5673,7 @@ static int __maybe_unused airo_pci_resume(struct device *dev_d)
}
#endif
-static int __init airo_init_module( void )
+static int __init airo_init_module(void)
{
int i;
@@ -5658,9 +5689,10 @@ static int __init airo_init_module( void )
for (i = 0; i < 4 && io[i] && irq[i]; i++) {
airo_print_info("", "Trying to configure ISA adapter at irq=%d "
- "io=0x%x", irq[i], io[i] );
- if (init_airo_card( irq[i], io[i], 0, NULL ))
+ "io = 0x%x", irq[i], io[i]);
+ if (init_airo_card(irq[i], io[i], 0, NULL)) {
/* do nothing */ ;
+ }
}
#ifdef CONFIG_PCI
@@ -5680,10 +5712,10 @@ static int __init airo_init_module( void )
return 0;
}
-static void __exit airo_cleanup_module( void )
+static void __exit airo_cleanup_module(void)
{
struct airo_info *ai;
- while(!list_empty(&airo_devices)) {
+ while (!list_empty(&airo_devices)) {
ai = list_entry(airo_devices.next, struct airo_info, dev_list);
airo_print_info(ai->dev->name, "Unregistering...");
stop_airo_card(ai->dev, 1);
@@ -5783,7 +5815,7 @@ static int airo_set_freq(struct net_device *dev,
int rc = -EINPROGRESS; /* Call commit handler */
/* If setting by frequency, convert to a channel */
- if(fwrq->e == 1) {
+ if (fwrq->e == 1) {
int f = fwrq->m / 100000;
/* Hack to fall through... */
@@ -5797,7 +5829,7 @@ static int airo_set_freq(struct net_device *dev,
int channel = fwrq->m;
/* We should do a better check than that,
* based on the card capability !!! */
- if((channel < 1) || (channel > 14)) {
+ if ((channel < 1) || (channel > 14)) {
airo_print_dbg(dev->name, "New channel value of %d is invalid!",
fwrq->m);
rc = -EINVAL;
@@ -5831,7 +5863,7 @@ static int airo_get_freq(struct net_device *dev,
readStatusRid(local, &status_rid, 1);
ch = le16_to_cpu(status_rid.channel);
- if((ch > 0) && (ch < 15)) {
+ if ((ch > 0) && (ch < 15)) {
fwrq->m = 100000 *
ieee80211_channel_to_frequency(ch, NL80211_BAND_2GHZ);
fwrq->e = 1;
@@ -5935,7 +5967,7 @@ static int airo_set_wap(struct net_device *dev,
else if (is_broadcast_ether_addr(awrq->sa_data) ||
is_zero_ether_addr(awrq->sa_data)) {
memset(&cmd, 0, sizeof(cmd));
- cmd.cmd=CMD_LOSE_SYNC;
+ cmd.cmd = CMD_LOSE_SYNC;
if (down_interruptible(&local->sem))
return -ERESTARTSYS;
issuecommand(local, &cmd, &rsp);
@@ -5984,7 +6016,7 @@ static int airo_set_nick(struct net_device *dev,
struct airo_info *local = dev->ml_priv;
/* Check the size of the string */
- if(dwrq->length > 16) {
+ if (dwrq->length > 16) {
return -E2BIG;
}
readConfigRid(local, 1);
@@ -6032,7 +6064,7 @@ static int airo_set_rate(struct net_device *dev,
readCapabilityRid(local, &cap_rid, 1);
/* Which type of value ? */
- if((vwrq->value < 8) && (vwrq->value >= 0)) {
+ if ((vwrq->value < 8) && (vwrq->value >= 0)) {
/* Setting by rate index */
/* Find value in the magic rate table */
brate = cap_rid.supportedRates[vwrq->value];
@@ -6041,36 +6073,36 @@ static int airo_set_rate(struct net_device *dev,
u8 normvalue = (u8) (vwrq->value/500000);
/* Check if rate is valid */
- for(i = 0 ; i < 8 ; i++) {
- if(normvalue == cap_rid.supportedRates[i]) {
+ for (i = 0 ; i < 8 ; i++) {
+ if (normvalue == cap_rid.supportedRates[i]) {
brate = normvalue;
break;
}
}
}
/* -1 designed the max rate (mostly auto mode) */
- if(vwrq->value == -1) {
+ if (vwrq->value == -1) {
/* Get the highest available rate */
- for(i = 0 ; i < 8 ; i++) {
- if(cap_rid.supportedRates[i] == 0)
+ for (i = 0 ; i < 8 ; i++) {
+ if (cap_rid.supportedRates[i] == 0)
break;
}
- if(i != 0)
+ if (i != 0)
brate = cap_rid.supportedRates[i - 1];
}
/* Check that it is valid */
- if(brate == 0) {
+ if (brate == 0) {
return -EINVAL;
}
readConfigRid(local, 1);
/* Now, check if we want a fixed or auto value */
- if(vwrq->fixed == 0) {
+ if (vwrq->fixed == 0) {
/* Fill all the rates up to this max rate */
memset(local->config.rates, 0, 8);
- for(i = 0 ; i < 8 ; i++) {
+ for (i = 0 ; i < 8 ; i++) {
local->config.rates[i] = cap_rid.supportedRates[i];
- if(local->config.rates[i] == brate)
+ if (local->config.rates[i] == brate)
break;
}
} else {
@@ -6118,9 +6150,9 @@ static int airo_set_rts(struct net_device *dev,
struct airo_info *local = dev->ml_priv;
int rthr = vwrq->value;
- if(vwrq->disabled)
+ if (vwrq->disabled)
rthr = AIRO_DEF_MTU;
- if((rthr < 0) || (rthr > AIRO_DEF_MTU)) {
+ if ((rthr < 0) || (rthr > AIRO_DEF_MTU)) {
return -EINVAL;
}
readConfigRid(local, 1);
@@ -6161,9 +6193,9 @@ static int airo_set_frag(struct net_device *dev,
struct airo_info *local = dev->ml_priv;
int fthr = vwrq->value;
- if(vwrq->disabled)
+ if (vwrq->disabled)
fthr = AIRO_DEF_MTU;
- if((fthr < 256) || (fthr > AIRO_DEF_MTU)) {
+ if ((fthr < 256) || (fthr > AIRO_DEF_MTU)) {
return -EINVAL;
}
fthr &= ~0x1; /* Get an even value - is it really needed ??? */
@@ -6340,7 +6372,7 @@ static int airo_set_encode(struct net_device *dev,
else
key.len = MIN_KEY_SIZE;
/* Check if the key is not marked as invalid */
- if(!(dwrq->flags & IW_ENCODE_NOKEY)) {
+ if (!(dwrq->flags & IW_ENCODE_NOKEY)) {
/* Cleanup */
memset(key.key, 0, MAX_KEY_SIZE);
/* Copy the key in the driver */
@@ -6357,7 +6389,7 @@ static int airo_set_encode(struct net_device *dev,
/* WE specify that if a valid key is set, encryption
* should be enabled (user may turn it off later)
* This is also how "iwconfig ethX key on" works */
- if((index == current_index) && (key.len > 0) &&
+ if ((index == current_index) && (key.len > 0) &&
(local->config.authType == AUTH_OPEN))
set_auth_type(local, AUTH_ENCRYPT);
} else {
@@ -6380,7 +6412,7 @@ static int airo_set_encode(struct net_device *dev,
/* Read the flags */
if (dwrq->flags & IW_ENCODE_DISABLED)
set_auth_type(local, AUTH_OPEN); /* disable encryption */
- if(dwrq->flags & IW_ENCODE_RESTRICTED)
+ if (dwrq->flags & IW_ENCODE_RESTRICTED)
set_auth_type(local, AUTH_SHAREDKEY); /* Only Both */
if (dwrq->flags & IW_ENCODE_OPEN)
set_auth_type(local, AUTH_ENCRYPT); /* Only Wep */
@@ -6458,7 +6490,7 @@ static int airo_set_encodeext(struct net_device *dev,
struct airo_info *local = dev->ml_priv;
struct iw_point *encoding = &wrqu->encoding;
struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
- int perm = ( encoding->flags & IW_ENCODE_TEMP ? 0 : 1 );
+ int perm = (encoding->flags & IW_ENCODE_TEMP ? 0 : 1);
__le16 currentAuthType = local->config.authType;
int idx, key_len, alg = ext->alg, set_key = 1, rc;
wep_key_t key;
@@ -6540,7 +6572,7 @@ static int airo_set_encodeext(struct net_device *dev,
/* Read the flags */
if (encoding->flags & IW_ENCODE_DISABLED)
set_auth_type(local, AUTH_OPEN); /* disable encryption */
- if(encoding->flags & IW_ENCODE_RESTRICTED)
+ if (encoding->flags & IW_ENCODE_RESTRICTED)
set_auth_type(local, AUTH_SHAREDKEY); /* Only Both */
if (encoding->flags & IW_ENCODE_OPEN)
set_auth_type(local, AUTH_ENCRYPT);
@@ -6606,7 +6638,7 @@ static int airo_get_encodeext(struct net_device *dev,
/* We can't return the key, so set the proper flag and return zero */
encoding->flags |= IW_ENCODE_NOKEY;
memset(extra, 0, 16);
-
+
/* Copy the key to the user buffer */
wep_key_len = get_wep_key(local, idx, &buf[0], sizeof(buf));
if (wep_key_len < 0) {
@@ -6806,13 +6838,13 @@ static int airo_set_retry(struct net_device *dev,
struct airo_info *local = dev->ml_priv;
int rc = -EINVAL;
- if(vwrq->disabled) {
+ if (vwrq->disabled) {
return -EINVAL;
}
readConfigRid(local, 1);
- if(vwrq->flags & IW_RETRY_LIMIT) {
+ if (vwrq->flags & IW_RETRY_LIMIT) {
__le16 v = cpu_to_le16(vwrq->value);
- if(vwrq->flags & IW_RETRY_LONG)
+ if (vwrq->flags & IW_RETRY_LONG)
local->config.longRetryLimit = v;
else if (vwrq->flags & IW_RETRY_SHORT)
local->config.shortRetryLimit = v;
@@ -6824,7 +6856,7 @@ static int airo_set_retry(struct net_device *dev,
set_bit (FLAG_COMMIT, &local->flags);
rc = -EINPROGRESS; /* Call commit handler */
}
- if(vwrq->flags & IW_RETRY_LIFETIME) {
+ if (vwrq->flags & IW_RETRY_LIFETIME) {
local->config.txLifetime = cpu_to_le16(vwrq->value / 1024);
set_bit (FLAG_COMMIT, &local->flags);
rc = -EINPROGRESS; /* Call commit handler */
@@ -6847,16 +6879,16 @@ static int airo_get_retry(struct net_device *dev,
readConfigRid(local, 1);
/* Note : by default, display the min retry number */
- if((vwrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
+ if ((vwrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
vwrq->flags = IW_RETRY_LIFETIME;
vwrq->value = le16_to_cpu(local->config.txLifetime) * 1024;
- } else if((vwrq->flags & IW_RETRY_LONG)) {
+ } else if ((vwrq->flags & IW_RETRY_LONG)) {
vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
vwrq->value = le16_to_cpu(local->config.longRetryLimit);
} else {
vwrq->flags = IW_RETRY_LIMIT;
vwrq->value = le16_to_cpu(local->config.shortRetryLimit);
- if(local->config.shortRetryLimit != local->config.longRetryLimit)
+ if (local->config.shortRetryLimit != local->config.longRetryLimit)
vwrq->flags |= IW_RETRY_SHORT;
}
@@ -6888,7 +6920,7 @@ static int airo_get_range(struct net_device *dev,
/* Should be based on cap_rid.country to give only
* what the current card support */
k = 0;
- for(i = 0; i < 14; i++) {
+ for (i = 0; i < 14; i++) {
range->freq[k].i = i + 1; /* List index */
range->freq[k].m = 100000 *
ieee80211_channel_to_frequency(i + 1, NL80211_BAND_2GHZ);
@@ -6918,9 +6950,9 @@ static int airo_get_range(struct net_device *dev,
}
range->avg_qual.noise = 0x100 - 85; /* -85 dBm */
- for(i = 0 ; i < 8 ; i++) {
+ for (i = 0 ; i < 8 ; i++) {
range->bitrate[i] = cap_rid.supportedRates[i] * 500000;
- if(range->bitrate[i] == 0)
+ if (range->bitrate[i] == 0)
break;
}
range->num_bitrates = i;
@@ -6928,7 +6960,7 @@ static int airo_get_range(struct net_device *dev,
/* Set an indication of the max TCP throughput
* in bit/s that we can expect using this interface.
* May be use for QoS stuff... Jean II */
- if(i > 2)
+ if (i > 2)
range->throughput = 5000 * 1000;
else
range->throughput = 1500 * 1000;
@@ -6938,7 +6970,7 @@ static int airo_get_range(struct net_device *dev,
range->min_frag = 256;
range->max_frag = AIRO_DEF_MTU;
- if(cap_rid.softCap & cpu_to_le16(2)) {
+ if (cap_rid.softCap & cpu_to_le16(2)) {
// WEP: RC4 40 bits
range->encoding_size[0] = 5;
// RC4 ~128 bits
@@ -6962,9 +6994,9 @@ static int airo_get_range(struct net_device *dev,
range->pm_capa = IW_POWER_PERIOD | IW_POWER_TIMEOUT | IW_POWER_ALL_R;
/* Transmit Power - values are in mW */
- for(i = 0 ; i < 8 ; i++) {
+ for (i = 0 ; i < 8 ; i++) {
range->txpower[i] = le16_to_cpu(cap_rid.txPowerLevels[i]);
- if(range->txpower[i] == 0)
+ if (range->txpower[i] == 0)
break;
}
range->num_txpower = i;
@@ -7235,7 +7267,7 @@ static int airo_set_scan(struct net_device *dev,
/* Initiate a scan command */
ai->scan_timeout = RUN_AT(3*HZ);
memset(&cmd, 0, sizeof(cmd));
- cmd.cmd=CMD_LISTBSS;
+ cmd.cmd = CMD_LISTBSS;
issuecommand(ai, &cmd, &rsp);
wake = 1;
@@ -7276,7 +7308,7 @@ static inline char *airo_translate_scan(struct net_device *dev,
/* Add the ESSID */
iwe.u.data.length = bss->ssidLen;
- if(iwe.u.data.length > 32)
+ if (iwe.u.data.length > 32)
iwe.u.data.length = 32;
iwe.cmd = SIOCGIWESSID;
iwe.u.data.flags = 1;
@@ -7286,8 +7318,8 @@ static inline char *airo_translate_scan(struct net_device *dev,
/* Add mode */
iwe.cmd = SIOCGIWMODE;
capabilities = bss->cap;
- if(capabilities & (CAP_ESS | CAP_IBSS)) {
- if(capabilities & CAP_ESS)
+ if (capabilities & (CAP_ESS | CAP_IBSS)) {
+ if (capabilities & CAP_ESS)
iwe.u.mode = IW_MODE_MASTER;
else
iwe.u.mode = IW_MODE_ADHOC;
@@ -7327,7 +7359,7 @@ static inline char *airo_translate_scan(struct net_device *dev,
/* Add encryption capability */
iwe.cmd = SIOCGIWENCODE;
- if(capabilities & CAP_PRIVACY)
+ if (capabilities & CAP_PRIVACY)
iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
else
iwe.u.data.flags = IW_ENCODE_DISABLED;
@@ -7343,9 +7375,9 @@ static inline char *airo_translate_scan(struct net_device *dev,
/* Those two flags are ignored... */
iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
/* Max 8 values */
- for(i = 0 ; i < 8 ; i++) {
+ for (i = 0 ; i < 8 ; i++) {
/* NULL terminated */
- if(bss->rates[i] == 0)
+ if (bss->rates[i] == 0)
break;
/* Bit rate given in 500 kb/s units (+ 0x80) */
iwe.u.bitrate.value = ((bss->rates[i] & 0x7f) * 500000);
@@ -7453,7 +7485,7 @@ static int airo_get_scan(struct net_device *dev,
&net->bss);
/* Check if there is space for one more entry */
- if((extra + dwrq->length - current_ev) <= IW_EV_ADDR_LEN) {
+ if ((extra + dwrq->length - current_ev) <= IW_EV_ADDR_LEN) {
/* Ask user space to try again with a bigger buffer */
err = -E2BIG;
goto out;
@@ -7491,7 +7523,7 @@ static int airo_config_commit(struct net_device *dev,
readSsidRid(local, &SSID_rid);
if (test_bit(FLAG_MPI,&local->flags))
- setup_card(local, dev->dev_addr, 1 );
+ setup_card(local, dev->dev_addr, 1);
else
reset_airo_card(dev);
disable_MAC(local, 1);
@@ -7635,9 +7667,9 @@ static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
int val = AIROMAGIC;
aironet_ioctl com;
- if (copy_from_user(&com,rq->ifr_data,sizeof(com)))
+ if (copy_from_user(&com, rq->ifr_data, sizeof(com)))
rc = -EFAULT;
- else if (copy_to_user(com.data,(char *)&val,sizeof(val)))
+ else if (copy_to_user(com.data, (char *)&val, sizeof(val)))
rc = -EFAULT;
}
break;
@@ -7651,24 +7683,24 @@ static int airo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
*/
{
aironet_ioctl com;
- if (copy_from_user(&com,rq->ifr_data,sizeof(com))) {
+ if (copy_from_user(&com, rq->ifr_data, sizeof(com))) {
rc = -EFAULT;
break;
}
/* Separate R/W functions bracket legality here
*/
- if ( com.command == AIRORSWVERSION ) {
+ if (com.command == AIRORSWVERSION) {
if (copy_to_user(com.data, swversion, sizeof(swversion)))
rc = -EFAULT;
else
rc = 0;
}
- else if ( com.command <= AIRORRID)
+ else if (com.command <= AIRORRID)
rc = readrids(dev,&com);
- else if ( com.command >= AIROPCAP && com.command <= (AIROPLEAPUSR+2) )
+ else if (com.command >= AIROPCAP && com.command <= (AIROPLEAPUSR+2))
rc = writerids(dev,&com);
- else if ( com.command >= AIROFLSHRST && com.command <= AIRORESTART )
+ else if (com.command >= AIROFLSHRST && com.command <= AIRORESTART)
rc = flashcard(dev,&com);
else
rc = -EINVAL; /* Bad command in ioctl */
@@ -7770,7 +7802,8 @@ static struct iw_statistics *airo_get_wireless_stats(struct net_device *dev)
* as needed. This represents the READ side of control I/O to
* the card
*/
-static int readrids(struct net_device *dev, aironet_ioctl *comp) {
+static int readrids(struct net_device *dev, aironet_ioctl *comp)
+{
unsigned short ridcode;
unsigned char *iobuf;
int len;
@@ -7800,7 +7833,7 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) {
case AIROGSTATSC32: ridcode = RID_STATS; break;
case AIROGMICSTATS:
if (copy_to_user(comp->data, &ai->micstats,
- min((int)comp->len,(int)sizeof(ai->micstats))))
+ min((int)comp->len, (int)sizeof(ai->micstats))))
return -EFAULT;
return 0;
case AIRORRID: ridcode = comp->ridnum; break;
@@ -7817,7 +7850,7 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) {
if ((iobuf = kzalloc(RIDSIZE, GFP_KERNEL)) == NULL)
return -ENOMEM;
- PC4500_readrid(ai,ridcode,iobuf,RIDSIZE, 1);
+ PC4500_readrid(ai, ridcode, iobuf, RIDSIZE, 1);
/* get the count of bytes in the rid docs say 1st 2 bytes is it.
* then return it to the user
* 9/22/2000 Honor user given length
@@ -7836,7 +7869,8 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) {
* Danger Will Robinson write the rids here
*/
-static int writerids(struct net_device *dev, aironet_ioctl *comp) {
+static int writerids(struct net_device *dev, aironet_ioctl *comp)
+{
struct airo_info *ai = dev->ml_priv;
int ridcode;
int enabled;
@@ -7893,10 +7927,10 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
if ((iobuf = kmalloc(RIDSIZE, GFP_KERNEL)) == NULL)
return -ENOMEM;
- PC4500_readrid(ai,RID_STATSDELTACLEAR,iobuf,RIDSIZE, 1);
+ PC4500_readrid(ai, RID_STATSDELTACLEAR, iobuf, RIDSIZE, 1);
enabled = ai->micstats.enabled;
- memset(&ai->micstats,0,sizeof(ai->micstats));
+ memset(&ai->micstats, 0, sizeof(ai->micstats));
ai->micstats.enabled = enabled;
if (copy_to_user(comp->data, iobuf,
@@ -7910,13 +7944,13 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
default:
return -EOPNOTSUPP; /* Blarg! */
}
- if(comp->len > RIDSIZE)
+ if (comp->len > RIDSIZE)
return -EINVAL;
if ((iobuf = kmalloc(RIDSIZE, GFP_KERNEL)) == NULL)
return -ENOMEM;
- if (copy_from_user(iobuf,comp->data,comp->len)) {
+ if (copy_from_user(iobuf, comp->data, comp->len)) {
kfree (iobuf);
return -EFAULT;
}
@@ -7933,7 +7967,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
clear_bit (FLAG_ADHOC, &ai->flags);
}
- if((*writer)(ai, ridcode, iobuf,comp->len,1)) {
+ if ((*writer)(ai, ridcode, iobuf, comp->len, 1)) {
kfree (iobuf);
return -EIO;
}
@@ -7950,7 +7984,8 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
* Flash command switch table
*/
-static int flashcard(struct net_device *dev, aironet_ioctl *comp) {
+static int flashcard(struct net_device *dev, aironet_ioctl *comp)
+{
int z;
/* Only super-user can modify flash */
@@ -7969,23 +8004,23 @@ static int flashcard(struct net_device *dev, aironet_ioctl *comp) {
return setflashmode((struct airo_info *)dev->ml_priv);
case AIROFLSHGCHR: /* Get char from aux */
- if(comp->len != sizeof(int))
+ if (comp->len != sizeof(int))
return -EINVAL;
- if (copy_from_user(&z,comp->data,comp->len))
+ if (copy_from_user(&z, comp->data, comp->len))
return -EFAULT;
return flashgchar((struct airo_info *)dev->ml_priv, z, 8000);
case AIROFLSHPCHR: /* Send char to card. */
- if(comp->len != sizeof(int))
+ if (comp->len != sizeof(int))
return -EINVAL;
- if (copy_from_user(&z,comp->data,comp->len))
+ if (copy_from_user(&z, comp->data, comp->len))
return -EFAULT;
return flashpchar((struct airo_info *)dev->ml_priv, z, 8000);
case AIROFLPUTBUF: /* Send 32k to card */
if (!AIRO_FLASH(dev))
return -ENOMEM;
- if(comp->len > FLASHSIZE)
+ if (comp->len > FLASHSIZE)
return -EINVAL;
if (copy_from_user(AIRO_FLASH(dev), comp->data, comp->len))
return -EFAULT;
@@ -8009,19 +8044,20 @@ static int flashcard(struct net_device *dev, aironet_ioctl *comp) {
* card.
*/
-static int cmdreset(struct airo_info *ai) {
+static int cmdreset(struct airo_info *ai)
+{
disable_MAC(ai, 1);
- if(!waitbusy (ai)){
+ if (!waitbusy (ai)) {
airo_print_info(ai->dev->name, "Waitbusy hang before RESET");
return -EBUSY;
}
- OUT4500(ai,COMMAND,CMD_SOFTRESET);
+ OUT4500(ai, COMMAND, CMD_SOFTRESET);
ssleep(1); /* WAS 600 12/7/00 */
- if(!waitbusy (ai)){
+ if (!waitbusy (ai)) {
airo_print_info(ai->dev->name, "Waitbusy hang AFTER RESET");
return -EBUSY;
}
@@ -8033,22 +8069,23 @@ static int cmdreset(struct airo_info *ai) {
* mode
*/
-static int setflashmode (struct airo_info *ai) {
+static int setflashmode (struct airo_info *ai)
+{
set_bit (FLAG_FLASHING, &ai->flags);
OUT4500(ai, SWS0, FLASH_COMMAND);
OUT4500(ai, SWS1, FLASH_COMMAND);
if (probe) {
OUT4500(ai, SWS0, FLASH_COMMAND);
- OUT4500(ai, COMMAND,0x10);
+ OUT4500(ai, COMMAND, 0x10);
} else {
OUT4500(ai, SWS2, FLASH_COMMAND);
OUT4500(ai, SWS3, FLASH_COMMAND);
- OUT4500(ai, COMMAND,0);
+ OUT4500(ai, COMMAND, 0);
}
msleep(500); /* 500ms delay */
- if(!waitbusy(ai)) {
+ if (!waitbusy(ai)) {
clear_bit (FLAG_FLASHING, &ai->flags);
airo_print_info(ai->dev->name, "Waitbusy hang after setflash mode");
return -EIO;
@@ -8060,16 +8097,17 @@ static int setflashmode (struct airo_info *ai) {
* x 50us for echo .
*/
-static int flashpchar(struct airo_info *ai,int byte,int dwelltime) {
+static int flashpchar(struct airo_info *ai, int byte, int dwelltime)
+{
int echo;
int waittime;
byte |= 0x8000;
- if(dwelltime == 0 )
+ if (dwelltime == 0)
dwelltime = 200;
- waittime=dwelltime;
+ waittime = dwelltime;
/* Wait for busy bit d15 to go false indicating buffer empty */
while ((IN4500 (ai, SWS0) & 0x8000) && waittime > 0) {
@@ -8078,20 +8116,20 @@ static int flashpchar(struct airo_info *ai,int byte,int dwelltime) {
}
/* timeout for busy clear wait */
- if(waittime <= 0 ){
+ if (waittime <= 0) {
airo_print_info(ai->dev->name, "flash putchar busywait timeout!");
return -EBUSY;
}
/* Port is clear now write byte and wait for it to echo back */
do {
- OUT4500(ai,SWS0,byte);
+ OUT4500(ai, SWS0, byte);
udelay(50);
dwelltime -= 50;
- echo = IN4500(ai,SWS1);
+ echo = IN4500(ai, SWS1);
} while (dwelltime >= 0 && echo != byte);
- OUT4500(ai,SWS1,0);
+ OUT4500(ai, SWS1, 0);
return (echo == byte) ? 0 : -EIO;
}
@@ -8100,29 +8138,30 @@ static int flashpchar(struct airo_info *ai,int byte,int dwelltime) {
* Get a character from the card matching matchbyte
* Step 3)
*/
-static int flashgchar(struct airo_info *ai,int matchbyte,int dwelltime){
+static int flashgchar(struct airo_info *ai, int matchbyte, int dwelltime)
+{
int rchar;
- unsigned char rbyte=0;
+ unsigned char rbyte = 0;
do {
- rchar = IN4500(ai,SWS1);
+ rchar = IN4500(ai, SWS1);
- if(dwelltime && !(0x8000 & rchar)){
+ if (dwelltime && !(0x8000 & rchar)) {
dwelltime -= 10;
mdelay(10);
continue;
}
rbyte = 0xff & rchar;
- if( (rbyte == matchbyte) && (0x8000 & rchar) ){
- OUT4500(ai,SWS1,0);
+ if ((rbyte == matchbyte) && (0x8000 & rchar)) {
+ OUT4500(ai, SWS1, 0);
return 0;
}
- if( rbyte == 0x81 || rbyte == 0x82 || rbyte == 0x83 || rbyte == 0x1a || 0xffff == rchar)
+ if (rbyte == 0x81 || rbyte == 0x82 || rbyte == 0x83 || rbyte == 0x1a || 0xffff == rchar)
break;
- OUT4500(ai,SWS1,0);
+ OUT4500(ai, SWS1, 0);
- }while(dwelltime > 0);
+ } while (dwelltime > 0);
return -EIO;
}
@@ -8131,21 +8170,22 @@ static int flashgchar(struct airo_info *ai,int matchbyte,int dwelltime){
* send to the card
*/
-static int flashputbuf(struct airo_info *ai){
+static int flashputbuf(struct airo_info *ai)
+{
int nwords;
/* Write stuff */
if (test_bit(FLAG_MPI,&ai->flags))
memcpy_toio(ai->pciaux + 0x8000, ai->flash, FLASHSIZE);
else {
- OUT4500(ai,AUXPAGE,0x100);
- OUT4500(ai,AUXOFF,0);
+ OUT4500(ai, AUXPAGE, 0x100);
+ OUT4500(ai, AUXOFF, 0);
- for(nwords=0;nwords != FLASHSIZE / 2;nwords++){
- OUT4500(ai,AUXDATA,ai->flash[nwords] & 0xffff);
+ for (nwords = 0; nwords != FLASHSIZE / 2; nwords++) {
+ OUT4500(ai, AUXDATA, ai->flash[nwords] & 0xffff);
}
}
- OUT4500(ai,SWS0,0x8000);
+ OUT4500(ai, SWS0, 0x8000);
return 0;
}
@@ -8153,8 +8193,9 @@ static int flashputbuf(struct airo_info *ai){
/*
*
*/
-static int flashrestart(struct airo_info *ai,struct net_device *dev){
- int i,status;
+static int flashrestart(struct airo_info *ai, struct net_device *dev)
+{
+ int i, status;
ssleep(1); /* Added 12/7/00 */
clear_bit (FLAG_FLASHING, &ai->flags);
@@ -8166,9 +8207,9 @@ static int flashrestart(struct airo_info *ai,struct net_device *dev){
status = setup_card(ai, dev->dev_addr, 1);
if (!test_bit(FLAG_MPI,&ai->flags))
- for( i = 0; i < MAX_FIDS; i++ ) {
+ for (i = 0; i < MAX_FIDS; i++) {
ai->fids[i] = transmit_allocate
- ( ai, AIRO_DEF_MTU, i >= MAX_FIDS / 2 );
+ (ai, AIRO_DEF_MTU, i >= MAX_FIDS / 2);
}
ssleep(1); /* Added 12/7/00 */
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index 461e955aa259..23fbddd0c1f8 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -201,8 +201,7 @@ static u32 ipw2100_debug_level = IPW_DL_NONE;
#define IPW_DEBUG(level, message...) \
do { \
if (ipw2100_debug_level & (level)) { \
- printk(KERN_DEBUG "ipw2100: %c %s ", \
- in_interrupt() ? 'I' : 'U', __func__); \
+ printk(KERN_DEBUG "ipw2100: %s ", __func__); \
printk(message); \
} \
} while (0)
@@ -3204,9 +3203,9 @@ static void ipw2100_tx_send_data(struct ipw2100_priv *priv)
}
}
-static void ipw2100_irq_tasklet(unsigned long data)
+static void ipw2100_irq_tasklet(struct tasklet_struct *t)
{
- struct ipw2100_priv *priv = (struct ipw2100_priv *)data;
+ struct ipw2100_priv *priv = from_tasklet(priv, t, irq_tasklet);
struct net_device *dev = priv->net_dev;
unsigned long flags;
u32 inta, tmp;
@@ -6005,7 +6004,7 @@ static void ipw2100_rf_kill(struct work_struct *work)
spin_unlock_irqrestore(&priv->low_lock, flags);
}
-static void ipw2100_irq_tasklet(unsigned long data);
+static void ipw2100_irq_tasklet(struct tasklet_struct *t);
static const struct net_device_ops ipw2100_netdev_ops = {
.ndo_open = ipw2100_open,
@@ -6135,8 +6134,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
INIT_DELAYED_WORK(&priv->rf_kill, ipw2100_rf_kill);
INIT_DELAYED_WORK(&priv->scan_event, ipw2100_scan_event);
- tasklet_init(&priv->irq_tasklet,
- ipw2100_irq_tasklet, (unsigned long)priv);
+ tasklet_setup(&priv->irq_tasklet, ipw2100_irq_tasklet);
/* NOTE: We do not start the deferred work for status checks yet */
priv->stop_rf_kill = 1;
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index 129ef2f6248a..ada6ce32c1f1 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -1945,12 +1945,11 @@ static void notify_wx_assoc_event(struct ipw_priv *priv)
wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
}
-static void ipw_irq_tasklet(unsigned long data)
+static void ipw_irq_tasklet(struct tasklet_struct *t)
{
- struct ipw_priv *priv = (struct ipw_priv *)data;
+ struct ipw_priv *priv = from_tasklet(priv, t, irq_tasklet);
u32 inta, inta_mask, handled = 0;
unsigned long flags;
- int rc = 0;
spin_lock_irqsave(&priv->irq_lock, flags);
@@ -1980,7 +1979,7 @@ static void ipw_irq_tasklet(unsigned long data)
if (inta & IPW_INTA_BIT_TX_CMD_QUEUE) {
IPW_DEBUG_HC("Command completed.\n");
- rc = ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
+ ipw_queue_tx_reclaim(priv, &priv->txq_cmd, -1);
priv->status &= ~STATUS_HCMD_ACTIVE;
wake_up_interruptible(&priv->wait_command_queue);
handled |= IPW_INTA_BIT_TX_CMD_QUEUE;
@@ -1988,25 +1987,25 @@ static void ipw_irq_tasklet(unsigned long data)
if (inta & IPW_INTA_BIT_TX_QUEUE_1) {
IPW_DEBUG_TX("TX_QUEUE_1\n");
- rc = ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
+ ipw_queue_tx_reclaim(priv, &priv->txq[0], 0);
handled |= IPW_INTA_BIT_TX_QUEUE_1;
}
if (inta & IPW_INTA_BIT_TX_QUEUE_2) {
IPW_DEBUG_TX("TX_QUEUE_2\n");
- rc = ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
+ ipw_queue_tx_reclaim(priv, &priv->txq[1], 1);
handled |= IPW_INTA_BIT_TX_QUEUE_2;
}
if (inta & IPW_INTA_BIT_TX_QUEUE_3) {
IPW_DEBUG_TX("TX_QUEUE_3\n");
- rc = ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
+ ipw_queue_tx_reclaim(priv, &priv->txq[2], 2);
handled |= IPW_INTA_BIT_TX_QUEUE_3;
}
if (inta & IPW_INTA_BIT_TX_QUEUE_4) {
IPW_DEBUG_TX("TX_QUEUE_4\n");
- rc = ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
+ ipw_queue_tx_reclaim(priv, &priv->txq[3], 3);
handled |= IPW_INTA_BIT_TX_QUEUE_4;
}
@@ -2999,7 +2998,7 @@ static void ipw_remove_current_network(struct ipw_priv *priv)
spin_unlock_irqrestore(&priv->ieee->lock, flags);
}
-/**
+/*
* Check that card is still alive.
* Reads debug register from domain0.
* If card is present, pre-defined value should
@@ -3114,7 +3113,7 @@ static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
mdelay(1);
/* write ucode */
- /**
+ /*
* @bug
* Do NOT set indirect address register once and then
* store data to indirect data register in the loop.
@@ -3667,7 +3666,7 @@ static int ipw_load(struct ipw_priv *priv)
return rc;
}
-/**
+/*
* DMA services
*
* Theory of operation
@@ -3690,11 +3689,11 @@ static int ipw_load(struct ipw_priv *priv)
* we only utilize the first data transmit queue (queue1).
*/
-/**
+/*
* Driver allocates buffers of this size for Rx
*/
-/**
+/*
* ipw_rx_queue_space - Return number of free slots available in queue.
*/
static int ipw_rx_queue_space(const struct ipw_rx_queue *q)
@@ -3725,7 +3724,7 @@ static inline int ipw_queue_inc_wrap(int index, int n_bd)
return (++index == n_bd) ? 0 : index;
}
-/**
+/*
* Initialize common DMA queue structure
*
* @param q queue to init
@@ -3789,7 +3788,7 @@ static int ipw_queue_tx_init(struct ipw_priv *priv,
return 0;
}
-/**
+/*
* Free one TFD, those at index [txq->q.last_used].
* Do NOT advance any indexes
*
@@ -3812,7 +3811,7 @@ static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
if (le32_to_cpu(bd->u.data.num_chunks) > NUM_TFD_CHUNKS) {
IPW_ERROR("Too many chunks: %i\n",
le32_to_cpu(bd->u.data.num_chunks));
- /** @todo issue fatal error, it is quite serious situation */
+ /* @todo issue fatal error, it is quite serious situation */
return;
}
@@ -3829,7 +3828,7 @@ static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
}
}
-/**
+/*
* Deallocate DMA queue.
*
* Empty queue by removing and destroying all BD's.
@@ -3861,7 +3860,7 @@ static void ipw_queue_tx_free(struct ipw_priv *priv, struct clx2_tx_queue *txq)
memset(txq, 0, sizeof(*txq));
}
-/**
+/*
* Destroy all DMA queues and structures
*
* @param priv
@@ -4466,7 +4465,7 @@ static void handle_scan_event(struct ipw_priv *priv)
}
}
-/**
+/*
* Handle host notification packet.
* Called from interrupt routine
*/
@@ -4926,7 +4925,7 @@ static void ipw_rx_notification(struct ipw_priv *priv,
}
}
-/**
+/*
* Destroys all DMA structures and initialise them again
*
* @param priv
@@ -4935,7 +4934,7 @@ static void ipw_rx_notification(struct ipw_priv *priv,
static int ipw_queue_reset(struct ipw_priv *priv)
{
int rc = 0;
- /** @todo customize queue sizes */
+ /* @todo customize queue sizes */
int nTx = 64, nTxCmd = 8;
ipw_tx_queue_free(priv);
/* Tx CMD queue */
@@ -4991,7 +4990,7 @@ static int ipw_queue_reset(struct ipw_priv *priv)
return rc;
}
-/**
+/*
* Reclaim Tx queue entries no more used by NIC.
*
* When FW advances 'R' index, all entries between old and
@@ -8248,12 +8247,12 @@ static void ipw_rx(struct ipw_priv *priv)
struct ipw_rx_mem_buffer *rxb;
struct ipw_rx_packet *pkt;
struct libipw_hdr_4addr *header;
- u32 r, w, i;
+ u32 r, i;
u8 network_packet;
u8 fill_rx = 0;
r = ipw_read32(priv, IPW_RX_READ_INDEX);
- w = ipw_read32(priv, IPW_RX_WRITE_INDEX);
+ ipw_read32(priv, IPW_RX_WRITE_INDEX);
i = priv->rxq->read;
if (ipw_rx_queue_space (priv->rxq) > (RX_QUEUE_SIZE / 2))
@@ -8446,7 +8445,7 @@ static void ipw_rx(struct ipw_priv *priv)
#define DEFAULT_SHORT_RETRY_LIMIT 7U
#define DEFAULT_LONG_RETRY_LIMIT 4U
-/**
+/*
* ipw_sw_reset
* @option: options to control different reset behaviour
* 0 = reset everything except the 'disable' module_param
@@ -10673,8 +10672,7 @@ static void ipw_setup_deferred_work(struct ipw_priv *priv)
INIT_WORK(&priv->qos_activate, ipw_bg_qos_activate);
#endif /* CONFIG_IPW2200_QOS */
- tasklet_init(&priv->irq_tasklet,
- ipw_irq_tasklet, (unsigned long)priv);
+ tasklet_setup(&priv->irq_tasklet, ipw_irq_tasklet);
}
static void shim__set_security(struct net_device *dev,
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.h b/drivers/net/wireless/intel/ipw2x00/ipw2200.h
index e1ec1c96dcd8..98fe62737888 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.h
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.h
@@ -1382,14 +1382,12 @@ BIT_ARG16(x)
#define IPW_DEBUG(level, fmt, args...) \
do { if (ipw_debug_level & (level)) \
- printk(KERN_DEBUG DRV_NAME": %c %s " fmt, \
- in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
+ printk(KERN_DEBUG DRV_NAME": %s " fmt, __func__ , ## args); } while (0)
#ifdef CONFIG_IPW2200_DEBUG
#define IPW_LL_DEBUG(level, fmt, args...) \
do { if (ipw_debug_level & (level)) \
- printk(KERN_DEBUG DRV_NAME": %c %s " fmt, \
- in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
+ printk(KERN_DEBUG DRV_NAME": %s " fmt, __func__ , ## args); } while (0)
#else
#define IPW_LL_DEBUG(level, fmt, args...) do {} while (0)
#endif /* CONFIG_IPW2200_DEBUG */
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw.h b/drivers/net/wireless/intel/ipw2x00/libipw.h
index e87538a8b88b..7964ef7d15f0 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw.h
+++ b/drivers/net/wireless/intel/ipw2x00/libipw.h
@@ -60,8 +60,7 @@
extern u32 libipw_debug_level;
#define LIBIPW_DEBUG(level, fmt, args...) \
do { if (libipw_debug_level & (level)) \
- printk(KERN_DEBUG "libipw: %c %s " fmt, \
- in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
+ printk(KERN_DEBUG "libipw: %s " fmt, __func__ , ## args); } while (0)
#else
#define LIBIPW_DEBUG(level, fmt, args...) do {} while (0)
#endif /* CONFIG_LIBIPW_DEBUG */
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index 9167c3d2711d..4ca8212d4fa4 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -365,7 +365,7 @@ il3945_build_tx_cmd_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
case WLAN_CIPHER_SUITE_WEP104:
tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
- /* fall through */
+ fallthrough;
case WLAN_CIPHER_SUITE_WEP40:
tx_cmd->sec_ctl |=
TX_CMD_SEC_WEP | (info->control.hw_key->
@@ -807,7 +807,7 @@ il3945_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb)
wake_up(&il->wait_command_queue);
}
-/**
+/*
* il3945_setup_handlers - Initialize Rx handler callbacks
*
* Setup the RX handlers for each of the reply types sent from the uCode
@@ -907,7 +907,7 @@ il3945_setup_handlers(struct il_priv *il)
*
*/
-/**
+/*
* il3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
*/
static inline __le32
@@ -916,7 +916,7 @@ il3945_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr)
return cpu_to_le32((u32) dma_addr);
}
-/**
+/*
* il3945_rx_queue_restock - refill RX queue from pre-allocated pool
*
* If there are slots in the RX queue that need to be restocked,
@@ -966,7 +966,7 @@ il3945_rx_queue_restock(struct il_priv *il)
}
}
-/**
+/*
* il3945_rx_replenish - Move all used packet from rx_used to rx_free
*
* When moving to rx_free an SKB is allocated for the slot.
@@ -1167,7 +1167,7 @@ il3945_calc_db_from_ratio(int sig_ratio)
return (int)ratio2dB[sig_ratio];
}
-/**
+/*
* il3945_rx_handle - Main entry function for receiving responses from uCode
*
* Uses the il->handlers callback function array to invoke
@@ -1374,9 +1374,9 @@ il3945_dump_nic_error_log(struct il_priv *il)
}
static void
-il3945_irq_tasklet(unsigned long data)
+il3945_irq_tasklet(struct tasklet_struct *t)
{
- struct il_priv *il = (struct il_priv *)data;
+ struct il_priv *il = from_tasklet(il, t, irq_tasklet);
u32 inta, handled = 0;
u32 inta_fh;
unsigned long flags;
@@ -1654,7 +1654,7 @@ il3945_dealloc_ucode_pci(struct il_priv *il)
il_free_fw_desc(il->pci_dev, &il->ucode_boot);
}
-/**
+/*
* il3945_verify_inst_full - verify runtime uCode image in card vs. host,
* looking at all data.
*/
@@ -1693,7 +1693,7 @@ il3945_verify_inst_full(struct il_priv *il, __le32 * image, u32 len)
return rc;
}
-/**
+/*
* il3945_verify_inst_sparse - verify runtime uCode image in card vs. host,
* using sample data 100 bytes apart. If these sample points are good,
* it's a pretty good bet that everything between them is good, too.
@@ -1730,7 +1730,7 @@ il3945_verify_inst_sparse(struct il_priv *il, __le32 * image, u32 len)
return rc;
}
-/**
+/*
* il3945_verify_ucode - determine which instruction image is in SRAM,
* and verify its contents
*/
@@ -1811,7 +1811,7 @@ IL3945_UCODE_GET(init_size);
IL3945_UCODE_GET(init_data_size);
IL3945_UCODE_GET(boot_size);
-/**
+/*
* il3945_read_ucode - Read uCode images from disk file.
*
* Copy into buffers for card to fetch via bus-mastering
@@ -2047,7 +2047,7 @@ error:
return ret;
}
-/**
+/*
* il3945_set_ucode_ptrs - Set uCode address location
*
* Tell initialization uCode where to find runtime uCode.
@@ -2081,7 +2081,7 @@ il3945_set_ucode_ptrs(struct il_priv *il)
return 0;
}
-/**
+/*
* il3945_init_alive_start - Called after N_ALIVE notification received
*
* Called after N_ALIVE notification received from "initialize" uCode.
@@ -2125,7 +2125,7 @@ restart:
queue_work(il->workqueue, &il->restart);
}
-/**
+/*
* il3945_alive_start - called after N_ALIVE notification received
* from protocol/runtime uCode (initialization uCode's
* Alive gets handled by il3945_init_alive_start()).
@@ -3399,9 +3399,7 @@ il3945_setup_deferred_work(struct il_priv *il)
timer_setup(&il->watchdog, il_bg_watchdog, 0);
- tasklet_init(&il->irq_tasklet,
- il3945_irq_tasklet,
- (unsigned long)il);
+ tasklet_setup(&il->irq_tasklet, il3945_irq_tasklet);
}
static void
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-rs.c b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
index 0af9e997c9f6..b2478cbe558e 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
@@ -124,7 +124,7 @@ il3945_clear_win(struct il3945_rate_scale_data *win)
win->stamp = 0;
}
-/**
+/*
* il3945_rate_scale_flush_wins - flush out the rate scale wins
*
* Returns the number of wins that have gathered data but were
@@ -229,7 +229,7 @@ il3945_bg_rate_scale_flush(struct timer_list *t)
D_RATE("leave\n");
}
-/**
+/*
* il3945_collect_tx_data - Update the success/failure sliding win
*
* We keep a sliding win of the last 64 packets transmitted
@@ -416,7 +416,7 @@ il3945_rs_free_sta(void *il_priv, struct ieee80211_sta *sta, void *il_sta)
del_timer_sync(&rs_sta->rate_scale_flush);
}
-/**
+/*
* il3945_rs_tx_status - Update rate control values based on Tx results
*
* NOTE: Uses il_priv->retry_rate for the # of retries attempted by
@@ -584,7 +584,7 @@ il3945_get_adjacent_rate(struct il3945_rs_sta *rs_sta, u8 idx, u16 rate_mask,
return (high << 8) | low;
}
-/**
+/*
* il3945_rs_get_rate - find the rate for the requested packet
*
* Returns the ieee80211_rate structure allocated by the driver.
diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c
index fd63eba47ba2..0597d828bee1 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945.c
@@ -90,7 +90,7 @@ il3945_get_prev_ieee_rate(u8 rate_idx)
#define IL_EVT_DISABLE (0)
#define IL_EVT_DISABLE_SIZE (1532/32)
-/**
+/*
* il3945_disable_events - Disable selected events in uCode event log
*
* Disable an event by writing "1"s into "disable"
@@ -261,7 +261,7 @@ il3945_rs_next_rate(struct il_priv *il, int rate)
return next_rate;
}
-/**
+/*
* il3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
*
* When FW advances 'R' idx, all entries between old and new 'R' idx
@@ -291,7 +291,7 @@ il3945_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
il_wake_queue(il, txq);
}
-/**
+/*
* il3945_hdl_tx - Handle Tx response
*/
static void
@@ -627,7 +627,7 @@ il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
return 0;
}
-/**
+/*
* il3945_hw_txq_free_tfd - Free one TFD, those at idx [txq->q.read_ptr]
*
* Does NOT advance any idxes
@@ -675,7 +675,7 @@ il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
}
}
-/**
+/*
* il3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD:
*
*/
@@ -828,7 +828,7 @@ il3945_tx_reset(struct il_priv *il)
return 0;
}
-/**
+/*
* il3945_txq_ctx_reset - Reset TX queue context
*
* Destroys all DMA structures and initialize them again
@@ -993,7 +993,7 @@ il3945_hw_nic_init(struct il_priv *il)
return 0;
}
-/**
+/*
* il3945_hw_txq_ctx_free - Free TXQ Context
*
* Destroy all TX DMA queues and structures
@@ -1035,7 +1035,7 @@ il3945_hw_txq_ctx_stop(struct il_priv *il)
}
}
-/**
+/*
* il3945_hw_reg_adjust_power_by_temp
* return idx delta into power gain settings table
*/
@@ -1045,7 +1045,7 @@ il3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading)
return (new_reading - old_reading) * (-11) / 100;
}
-/**
+/*
* il3945_hw_reg_temp_out_of_range - Keep temperature in sane range
*/
static inline int
@@ -1060,7 +1060,7 @@ il3945_hw_get_temperature(struct il_priv *il)
return _il_rd(il, CSR_UCODE_DRV_GP2);
}
-/**
+/*
* il3945_hw_reg_txpower_get_temperature
* get the current temperature by reading from NIC
*/
@@ -1096,7 +1096,7 @@ il3945_hw_reg_txpower_get_temperature(struct il_priv *il)
* Both are lower than older versions' 9 degrees */
#define IL_TEMPERATURE_LIMIT_TIMER 6
-/**
+/*
* il3945_is_temp_calib_needed - determines if new calibration is needed
*
* records new temperature in tx_mgr->temperature.
@@ -1315,7 +1315,7 @@ il3945_hw_reg_fix_power_idx(int idx)
/* Kick off thermal recalibration check every 60 seconds */
#define REG_RECALIB_PERIOD (60)
-/**
+/*
* il3945_hw_reg_set_scan_power - Set Tx power for scan probe requests
*
* Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK)
@@ -1372,7 +1372,7 @@ il3945_hw_reg_set_scan_power(struct il_priv *il, u32 scan_tbl_idx, s32 rate_idx,
power_gain_table[band_idx][power_idx].dsp_atten;
}
-/**
+/*
* il3945_send_tx_power - fill in Tx Power command with gain settings
*
* Configures power settings for all rates for the current channel,
@@ -1439,7 +1439,7 @@ il3945_send_tx_power(struct il_priv *il)
}
-/**
+/*
* il3945_hw_reg_set_new_power - Configures power tables at new levels
* @ch_info: Channel to update. Uses power_info.requested_power.
*
@@ -1510,7 +1510,7 @@ il3945_hw_reg_set_new_power(struct il_priv *il, struct il_channel_info *ch_info)
return 0;
}
-/**
+/*
* il3945_hw_reg_get_ch_txpower_limit - returns new power limit for channel
*
* NOTE: Returned power limit may be less (but not more) than requested,
@@ -1537,7 +1537,7 @@ il3945_hw_reg_get_ch_txpower_limit(struct il_channel_info *ch_info)
return min(max_power, ch_info->max_power_avg);
}
-/**
+/*
* il3945_hw_reg_comp_txpower_temp - Compensate for temperature
*
* Compensate txpower settings of *all* channels for temperature.
@@ -1699,7 +1699,7 @@ il3945_send_rxon_assoc(struct il_priv *il)
return rc;
}
-/**
+/*
* il3945_commit_rxon - commit staging_rxon to hardware
*
* The RXON command in staging_rxon is committed to the hardware and
@@ -1830,7 +1830,7 @@ il3945_commit_rxon(struct il_priv *il)
return 0;
}
-/**
+/*
* il3945_reg_txpower_periodic - called when time to check our temperature.
*
* -- reset periodic timer
@@ -1873,7 +1873,7 @@ out:
mutex_unlock(&il->mutex);
}
-/**
+/*
* il3945_hw_reg_get_ch_grp_idx - find the channel-group idx (0-4) for channel.
*
* This function is used when initializing channel-info structs.
@@ -1912,7 +1912,7 @@ il3945_hw_reg_get_ch_grp_idx(struct il_priv *il,
return group_idx;
}
-/**
+/*
* il3945_hw_reg_get_matched_power_idx - Interpolate to get nominal idx
*
* Interpolate to get nominal (i.e. at factory calibration temperature) idx
@@ -2035,7 +2035,7 @@ il3945_hw_reg_init_channel_groups(struct il_priv *il)
}
}
-/**
+/*
* il3945_txpower_set_from_eeprom - Set channel power info based on EEPROM
*
* Second pass (during init) to set up il->channel_info
@@ -2305,7 +2305,7 @@ il3945_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
vif->bss_conf.bssid);
}
-/**
+/*
* il3945_init_hw_rate_table - Initialize the hardware rate fallback table
*/
int
@@ -2520,7 +2520,7 @@ il3945_eeprom_release_semaphore(struct il_priv *il)
return;
}
- /**
+ /*
* il3945_load_bsm - Load bootstrap instructions
*
* BSM operation:
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-calib.c b/drivers/net/wireless/intel/iwlegacy/4965-calib.c
index e78bdefb8952..2f97cbd42320 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-calib.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-calib.c
@@ -598,7 +598,7 @@ il4965_find_first_chain(u8 mask)
return CHAIN_C;
}
-/**
+/*
* Run disconnected antenna algorithm to find out which antennas are
* disconnected.
*/
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index e73c223a7d28..28675a4ad861 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -226,7 +226,7 @@ il4965_hw_nic_init(struct il_priv *il)
return 0;
}
-/**
+/*
* il4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
*/
static inline __le32
@@ -235,7 +235,7 @@ il4965_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr)
return cpu_to_le32((u32) (dma_addr >> 8));
}
-/**
+/*
* il4965_rx_queue_restock - refill RX queue from pre-allocated pool
*
* If there are slots in the RX queue that need to be restocked,
@@ -288,7 +288,7 @@ il4965_rx_queue_restock(struct il_priv *il)
}
}
-/**
+/*
* il4965_rx_replenish - Move all used packet from rx_used to rx_free
*
* When moving to rx_free an SKB is allocated for the slot.
@@ -544,7 +544,7 @@ il4965_translate_rx_status(struct il_priv *il, u32 decrypt_in)
decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
break;
}
- /* fall through - if TTAK OK */
+ fallthrough; /* if TTAK OK */
default:
if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
@@ -1127,7 +1127,7 @@ il4965_count_chain_bitmap(u32 chain_bitmap)
return res;
}
-/**
+/*
* il4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
*
* Selects how many and which Rx receivers/antennas/chains to use.
@@ -1617,7 +1617,7 @@ il4965_tx_cmd_build_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
case WLAN_CIPHER_SUITE_WEP104:
tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
- /* fall through */
+ fallthrough;
case WLAN_CIPHER_SUITE_WEP40:
tx_cmd->sec_ctl |=
(TX_CMD_SEC_WEP | (keyconf->keyidx & TX_CMD_SEC_MSK) <<
@@ -1933,7 +1933,7 @@ il4965_free_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr)
memset(ptr, 0, sizeof(*ptr));
}
-/**
+/*
* il4965_hw_txq_ctx_free - Free TXQ Context
*
* Destroy all TX DMA queues and structures
@@ -1959,12 +1959,9 @@ il4965_hw_txq_ctx_free(struct il_priv *il)
il_free_txq_mem(il);
}
-/**
+/*
* il4965_txq_ctx_alloc - allocate TX queue context
* Allocate all Tx DMA structures and initialize them
- *
- * @param il
- * @return error code
*/
int
il4965_txq_ctx_alloc(struct il_priv *il)
@@ -2060,7 +2057,7 @@ il4965_txq_ctx_unmap(struct il_priv *il)
il_tx_queue_unmap(il, txq_id);
}
-/**
+/*
* il4965_txq_ctx_stop - Stop all Tx DMA channels
*/
void
@@ -2101,7 +2098,7 @@ il4965_txq_ctx_activate_free(struct il_priv *il)
return -1;
}
-/**
+/*
* il4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
*/
static void
@@ -2114,7 +2111,7 @@ il4965_tx_queue_stop_scheduler(struct il_priv *il, u16 txq_id)
(1 << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
}
-/**
+/*
* il4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
*/
static int
@@ -2141,7 +2138,7 @@ il4965_tx_queue_set_q2ratid(struct il_priv *il, u16 ra_tid, u16 txq_id)
return 0;
}
-/**
+/*
* il4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
*
* NOTE: txq_id must be greater than IL49_FIRST_AMPDU_QUEUE,
@@ -2276,7 +2273,7 @@ il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
return ret;
}
-/**
+/*
* txq_id must be greater than IL49_FIRST_AMPDU_QUEUE
* il->lock must be held by the caller
*/
@@ -2488,7 +2485,7 @@ il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
return nfreed;
}
-/**
+/*
* il4965_tx_status_reply_compressed_ba - Update tx status from block-ack
*
* Go through block-ack's bitmap of ACK'd frames, update driver's record of
@@ -2641,7 +2638,7 @@ il4965_tx_status_to_mac80211(u32 status)
}
}
-/**
+/*
* il4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue
*/
static int
@@ -2753,7 +2750,7 @@ il4965_tx_status_reply_tx(struct il_priv *il, struct il_ht_agg *agg,
return 0;
}
-/**
+/*
* il4965_hdl_tx - Handle standard (non-aggregation) Tx response
*/
static void
@@ -2873,7 +2870,7 @@ il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
spin_unlock_irqrestore(&il->sta_lock, flags);
}
-/**
+/*
* translate ucode response to mac80211 tx status control values
*/
void
@@ -2897,7 +2894,7 @@ il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
r->idx = il4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
}
-/**
+/*
* il4965_hdl_compressed_ba - Handler for N_COMPRESSED_BA
*
* Handles block-acknowledge notification from device, which reports success
@@ -3502,7 +3499,7 @@ il4965_set_dynamic_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
return ret;
}
-/**
+/*
* il4965_alloc_bcast_station - add broadcast station into driver's station table.
*
* This adds the broadcast station into the driver's station table
@@ -3543,7 +3540,7 @@ il4965_alloc_bcast_station(struct il_priv *il)
return 0;
}
-/**
+/*
* il4965_update_bcast_station - update broadcast station's LQ command
*
* Only used by iwl4965. Placed here to have all bcast station management
@@ -3579,7 +3576,7 @@ il4965_update_bcast_stations(struct il_priv *il)
return il4965_update_bcast_station(il);
}
-/**
+/*
* il4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
*/
int
@@ -3903,10 +3900,8 @@ il4965_tfd_get_num_tbs(struct il_tfd *tfd)
return tfd->num_tbs & 0x1f;
}
-/**
+/*
* il4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
- * @il - driver ilate data
- * @txq - tx queue
*
* Does NOT advance any TFD circular buffer read/write idxes
* Does NOT free the TFD itself (which is within circular buffer)
@@ -4044,7 +4039,7 @@ il4965_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
IL_WARN("uCode did not respond OK.\n");
}
-/**
+/*
* il4965_bg_stats_periodic - Timer callback to queue stats
*
* This callback is provided in order to send a stats request.
@@ -4155,7 +4150,7 @@ il4965_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb)
wake_up(&il->wait_command_queue);
}
-/**
+/*
* il4965_setup_handlers - Initialize Rx handler callbacks
*
* Setup the RX handlers for each of the reply types sent from the uCode
@@ -4199,7 +4194,7 @@ il4965_setup_handlers(struct il_priv *il)
il->handlers[C_TX] = il4965_hdl_tx;
}
-/**
+/*
* il4965_rx_handle - Main entry function for receiving responses from uCode
*
* Uses the il->handlers callback function array to invoke
@@ -4344,9 +4339,9 @@ il4965_synchronize_irq(struct il_priv *il)
}
static void
-il4965_irq_tasklet(unsigned long data)
+il4965_irq_tasklet(struct tasklet_struct *t)
{
- struct il_priv *il = (struct il_priv *)data;
+ struct il_priv *il = from_tasklet(il, t, irq_tasklet);
u32 inta, handled = 0;
u32 inta_fh;
unsigned long flags;
@@ -4756,7 +4751,7 @@ il4965_load_firmware(struct il_priv *il, const struct firmware *ucode_raw,
return 0;
}
-/**
+/*
* il4965_ucode_callback - callback when firmware was loaded
*
* If loaded successfully, copies the firmware into buffers
@@ -5259,7 +5254,7 @@ il4965_alive_notify(struct il_priv *il)
return 0;
}
-/**
+/*
* il4965_alive_start - called after N_ALIVE notification received
* from protocol/runtime uCode (initialization uCode's
* Alive gets handled by il_init_alive_start()).
@@ -6238,9 +6233,7 @@ il4965_setup_deferred_work(struct il_priv *il)
timer_setup(&il->watchdog, il_bg_watchdog, 0);
- tasklet_init(&il->irq_tasklet,
- il4965_irq_tasklet,
- (unsigned long)il);
+ tasklet_setup(&il->irq_tasklet, il4965_irq_tasklet);
}
static void
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
index 1f196665d21f..9a491e5db75b 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
@@ -142,7 +142,7 @@ il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx)
}
#endif
-/**
+/*
* The following tables contain the expected throughput metrics for all rates
*
* 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
@@ -393,7 +393,7 @@ il4965_get_expected_tpt(struct il_scale_tbl_info *tbl, int rs_idx)
return 0;
}
-/**
+/*
* il4965_rs_collect_tx_data - Update the success/failure sliding win
*
* We keep a sliding win of the last 62 packets transmitted
@@ -620,7 +620,7 @@ il4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
return 1;
}
-/**
+/*
* Green-field mode is valid if the station supports it and
* there are no non-GF stations present in the BSS.
*/
@@ -631,7 +631,7 @@ il4965_rs_use_green(struct il_priv *il, struct ieee80211_sta *sta)
!il->ht.non_gf_sta_present;
}
-/**
+/*
* il4965_rs_get_supported_rates - get the available rates
*
* if management frame or broadcast frame only return
@@ -2114,7 +2114,7 @@ out:
lq_sta->last_txrate_idx = i;
}
-/**
+/*
* il4965_rs_initialize_lq - Initialize a station's hardware rate table
*
* The uCode's station table contains a table of fallback rates
diff --git a/drivers/net/wireless/intel/iwlegacy/4965.c b/drivers/net/wireless/intel/iwlegacy/4965.c
index fc8fa5818de7..9fa556486511 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965.c
@@ -25,7 +25,7 @@
#include "common.h"
#include "4965.h"
-/**
+/*
* il_verify_inst_sparse - verify runtime uCode image in card vs. host,
* using sample data 100 bytes apart. If these sample points are good,
* it's a pretty good bet that everything between them is good, too.
@@ -57,7 +57,7 @@ il4965_verify_inst_sparse(struct il_priv *il, __le32 * image, u32 len)
return ret;
}
-/**
+/*
* il4965_verify_inst_full - verify runtime uCode image in card vs. host,
* looking at all data.
*/
@@ -96,7 +96,7 @@ il4965_verify_inst_full(struct il_priv *il, __le32 * image, u32 len)
return ret;
}
-/**
+/*
* il4965_verify_ucode - determine which instruction image is in SRAM,
* and verify its contents
*/
@@ -292,7 +292,7 @@ il4965_verify_bsm(struct il_priv *il)
return 0;
}
-/**
+/*
* il4965_load_bsm - Load bootstrap instructions
*
* BSM operation:
@@ -402,7 +402,7 @@ il4965_load_bsm(struct il_priv *il)
return 0;
}
-/**
+/*
* il4965_set_ucode_ptrs - Set uCode address location
*
* Tell initialization uCode where to find runtime uCode.
@@ -435,7 +435,7 @@ il4965_set_ucode_ptrs(struct il_priv *il)
return 0;
}
-/**
+/*
* il4965_init_alive_start - Called after N_ALIVE notification received
*
* Called after N_ALIVE notification received from "initialize" uCode.
@@ -567,7 +567,7 @@ il4965_math_div_round(s32 num, s32 denom, s32 * res)
return 1;
}
-/**
+/*
* il4965_get_voltage_compensation - Power supply voltage comp for txpower
*
* Determines power supply voltage compensation for txpower calculations.
@@ -654,7 +654,7 @@ il4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
}
}
-/**
+/*
* il4965_interpolate_chan - Interpolate factory measurements for one channel
*
* Interpolates factory measurements from the two sample channels within a
@@ -1231,7 +1231,7 @@ il4965_fill_txpower_tbl(struct il_priv *il, u8 band, u16 channel, u8 is_ht40,
return 0;
}
-/**
+/*
* il4965_send_tx_power - Configure the TXPOWER level user limit
*
* Uses the active RXON for channel, band, and characteristics (ht40, high)
@@ -1528,7 +1528,7 @@ il4965_hw_channel_switch(struct il_priv *il,
return il_send_cmd_pdu(il, C_CHANNEL_SWITCH, sizeof(cmd), &cmd);
}
-/**
+/*
* il4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
*/
static void
@@ -1553,9 +1553,8 @@ il4965_txq_update_byte_cnt_tbl(struct il_priv *il, struct il_tx_queue *txq,
bc_ent;
}
-/**
+/*
* il4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
- * @stats: Provides the temperature reading from the uCode
*
* A return of <0 indicates bogus data in the stats
*/
@@ -1619,7 +1618,7 @@ il4965_hw_get_temperature(struct il_priv *il)
/* Adjust Txpower only if temperature variance is greater than threshold. */
#define IL_TEMPERATURE_THRESHOLD 3
-/**
+/*
* il4965_is_temp_calib_needed - determines if new calibration is needed
*
* If the temperature changed has changed sufficiently, then a recalibration
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index f78e062df572..0651a6a416d1 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -685,7 +685,7 @@ il_eeprom_query16(const struct il_priv *il, size_t offset)
}
EXPORT_SYMBOL(il_eeprom_query16);
-/**
+/*
* il_eeprom_init - read EEPROM contents
*
* Load the EEPROM contents from adapter into il->eeprom
@@ -836,7 +836,7 @@ il_init_band_reference(const struct il_priv *il, int eep_band,
#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
? # x " " : "")
-/**
+/*
* il_mod_ht40_chan_info - Copy ht40 channel info into driver's il.
*
* Does not set up a command, or touch hardware.
@@ -877,7 +877,7 @@ il_mod_ht40_chan_info(struct il_priv *il, enum nl80211_band band, u16 channel,
#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
? # x " " : "")
-/**
+/*
* il_init_channel_map - Set up driver's info for all possible channels
*/
int
@@ -1024,7 +1024,7 @@ il_free_channel_map(struct il_priv *il)
}
EXPORT_SYMBOL(il_free_channel_map);
-/**
+/*
* il_get_channel_info - Find driver's ilate channel info
*
* Based on band and channel number.
@@ -1343,7 +1343,7 @@ il_do_scan_abort(struct il_priv *il)
D_SCAN("Successfully send scan abort\n");
}
-/**
+/*
* il_scan_cancel - Cancel any currently executing HW scan
*/
int
@@ -1355,7 +1355,7 @@ il_scan_cancel(struct il_priv *il)
}
EXPORT_SYMBOL(il_scan_cancel);
-/**
+/*
* il_scan_cancel_timeout - Cancel any currently executing HW scan
* @ms: amount of time to wait (in milliseconds) for scan to abort
*
@@ -1607,10 +1607,9 @@ il_bg_scan_check(struct work_struct *data)
mutex_unlock(&il->mutex);
}
-/**
+/*
* il_fill_probe_req - fill in all required fields and IE for probe request
*/
-
u16
il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
const u8 *ta, const u8 *ies, int ie_len, int left)
@@ -1913,7 +1912,7 @@ done:
return;
}
-/**
+/*
* il_prep_station - Prepare station information for addition
*
* should be called with sta_lock held
@@ -2000,7 +1999,7 @@ EXPORT_SYMBOL_GPL(il_prep_station);
#define STA_WAIT_TIMEOUT (HZ/2)
-/**
+/*
* il_add_station_common -
*/
int
@@ -2060,7 +2059,7 @@ il_add_station_common(struct il_priv *il, const u8 *addr, bool is_ap,
}
EXPORT_SYMBOL(il_add_station_common);
-/**
+/*
* il_sta_ucode_deactivate - deactivate ucode status for a station
*
* il->sta_lock must be held
@@ -2136,7 +2135,7 @@ il_send_remove_station(struct il_priv *il, const u8 * addr, int sta_id,
return ret;
}
-/**
+/*
* il_remove_station - Remove driver's knowledge of station.
*/
int
@@ -2192,7 +2191,7 @@ out_err:
}
EXPORT_SYMBOL_GPL(il_remove_station);
-/**
+/*
* il_clear_ucode_stations - clear ucode station table bits
*
* This function clears all the bits in the driver indicating
@@ -2224,7 +2223,7 @@ il_clear_ucode_stations(struct il_priv *il)
}
EXPORT_SYMBOL(il_clear_ucode_stations);
-/**
+/*
* il_restore_stations() - Restore driver known stations to device
*
* All stations considered active by driver, but not present in ucode, is
@@ -2356,7 +2355,7 @@ il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
}
#endif
-/**
+/*
* il_is_lq_table_valid() - Test one aspect of LQ cmd for validity
*
* It sometimes happens when a HT rate has been in use and we
@@ -2385,7 +2384,7 @@ il_is_lq_table_valid(struct il_priv *il, struct il_link_quality_cmd *lq)
return true;
}
-/**
+/*
* il_send_lq_cmd() - Send link quality command
* @init: This command is sent as part of station initialization right
* after station has been added.
@@ -2531,7 +2530,7 @@ EXPORT_SYMBOL(il_mac_sta_remove);
*
*/
-/**
+/*
* il_rx_queue_space - Return number of free slots available in queue.
*/
int
@@ -2548,7 +2547,7 @@ il_rx_queue_space(const struct il_rx_queue *q)
}
EXPORT_SYMBOL(il_rx_queue_space);
-/**
+/*
* il_rx_queue_update_write_ptr - Update the write pointer for the RX queue
*/
void
@@ -2677,7 +2676,7 @@ il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
RX_RES_STATUS_BAD_KEY_TTAK)
break;
- /* fall through */
+ fallthrough;
case RX_RES_STATUS_SEC_TYPE_WEP:
if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
@@ -2687,7 +2686,7 @@ il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
D_RX("Packet destroyed\n");
return -1;
}
- /* fall through */
+ fallthrough;
case RX_RES_STATUS_SEC_TYPE_CCMP:
if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
RX_RES_STATUS_DECRYPT_OK) {
@@ -2703,7 +2702,7 @@ il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
}
EXPORT_SYMBOL(il_set_decrypted_flag);
-/**
+/*
* il_txq_update_write_ptr - Send new write idx to hardware
*/
void
@@ -2743,7 +2742,7 @@ il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq)
}
EXPORT_SYMBOL(il_txq_update_write_ptr);
-/**
+/*
* il_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
*/
void
@@ -2762,7 +2761,7 @@ il_tx_queue_unmap(struct il_priv *il, int txq_id)
}
EXPORT_SYMBOL(il_tx_queue_unmap);
-/**
+/*
* il_tx_queue_free - Deallocate DMA queue.
* @txq: Transmit queue to deallocate.
*
@@ -2805,7 +2804,7 @@ il_tx_queue_free(struct il_priv *il, int txq_id)
}
EXPORT_SYMBOL(il_tx_queue_free);
-/**
+/*
* il_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
*/
void
@@ -2843,9 +2842,8 @@ il_cmd_queue_unmap(struct il_priv *il)
}
EXPORT_SYMBOL(il_cmd_queue_unmap);
-/**
+/*
* il_cmd_queue_free - Deallocate DMA queue.
- * @txq: Transmit queue to deallocate.
*
* Empty queue by removing and destroying all BD's.
* Free all buffers.
@@ -2924,7 +2922,7 @@ il_queue_space(const struct il_queue *q)
EXPORT_SYMBOL(il_queue_space);
-/**
+/*
* il_queue_init - Initialize queue's high/low-water and read/write idxes
*/
static int
@@ -2958,7 +2956,7 @@ il_queue_init(struct il_priv *il, struct il_queue *q, int slots, u32 id)
return 0;
}
-/**
+/*
* il_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
*/
static int
@@ -2998,7 +2996,7 @@ error:
return -ENOMEM;
}
-/**
+/*
* il_tx_queue_init - Allocate and initialize one tx/cmd queue
*/
int
@@ -3105,7 +3103,7 @@ EXPORT_SYMBOL(il_tx_queue_reset);
/*************** HOST COMMAND QUEUE FUNCTIONS *****/
-/**
+/*
* il_enqueue_hcmd - enqueue a uCode command
* @il: device ilate data point
* @cmd: a point to the ucode command structure
@@ -3123,7 +3121,6 @@ il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
struct il_cmd_meta *out_meta;
dma_addr_t phys_addr;
unsigned long flags;
- int len;
u32 idx;
u16 fix_size;
@@ -3182,9 +3179,6 @@ il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
cpu_to_le16(QUEUE_TO_SEQ(il->cmd_queue) | IDX_TO_SEQ(q->write_ptr));
if (cmd->flags & CMD_SIZE_HUGE)
out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
- len = sizeof(struct il_device_cmd);
- if (idx == TFD_CMD_SLOTS)
- len = IL_MAX_CMD_SIZE;
#ifdef CONFIG_IWLEGACY_DEBUG
switch (out_cmd->hdr.cmd) {
@@ -3233,7 +3227,7 @@ out:
return idx;
}
-/**
+/*
* il_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
*
* When FW advances 'R' idx, all entries between old and new 'R' idx
@@ -3266,7 +3260,7 @@ il_hcmd_queue_reclaim(struct il_priv *il, int txq_id, int idx, int cmd_idx)
}
}
-/**
+/*
* il_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
* @rxb: Rx buffer to reclaim
*
@@ -3417,7 +3411,7 @@ il_init_ht_hw_capab(const struct il_priv *il,
}
}
-/**
+/*
* il_init_geos - Initialize mac80211's geo/channel info based from eeprom
*/
int
@@ -3763,7 +3757,7 @@ il_check_rxon_cmd(struct il_priv *il)
}
EXPORT_SYMBOL(il_check_rxon_cmd);
-/**
+/*
* il_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
* @il: staging_rxon is compared to active_rxon
*
@@ -3943,7 +3937,7 @@ il_get_single_channel_number(struct il_priv *il, enum nl80211_band band)
}
EXPORT_SYMBOL(il_get_single_channel_number);
-/**
+/*
* il_set_rxon_channel - Set the band and channel values in staging RXON
* @ch: requested channel as a pointer to struct ieee80211_channel
@@ -4146,7 +4140,7 @@ il_print_rx_config_cmd(struct il_priv *il)
}
EXPORT_SYMBOL(il_print_rx_config_cmd);
#endif
-/**
+/*
* il_irq_handle_error - called for HW or SW error interrupt from card
*/
void
@@ -5011,7 +5005,7 @@ il_update_qos(struct il_priv *il)
&il->qos_data.def_qos_parm, NULL);
}
-/**
+/*
* il_mac_config - mac80211 config callback
*/
int
diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h
index bc9cd7e5ccb8..ea1b1bb7ddcb 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.h
+++ b/drivers/net/wireless/intel/iwlegacy/common.h
@@ -2925,8 +2925,8 @@ do { \
#define IL_DBG(level, fmt, args...) \
do { \
if (il_get_debug_level(il) & level) \
- dev_err(&il->hw->wiphy->dev, "%c %s " fmt, \
- in_interrupt() ? 'I' : 'U', __func__ , ##args); \
+ dev_err(&il->hw->wiphy->dev, "%s " fmt, __func__, \
+ ##args); \
} while (0)
#define il_print_hex_dump(il, level, p, len) \
diff --git a/drivers/net/wireless/intel/iwlegacy/debug.c b/drivers/net/wireless/intel/iwlegacy/debug.c
index 4f741b305d96..d998a3f1b056 100644
--- a/drivers/net/wireless/intel/iwlegacy/debug.c
+++ b/drivers/net/wireless/intel/iwlegacy/debug.c
@@ -1364,9 +1364,8 @@ il_dbgfs_register(struct il_priv *il, const char *name)
}
EXPORT_SYMBOL(il_dbgfs_register);
-/**
+/*
* Remove the debugfs files and directories
- *
*/
void
il_dbgfs_unregister(struct il_priv *il)
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index fbcd1405aeea..14b0db28143b 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -13,9 +13,10 @@ iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o
iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
iwlwifi-objs += iwl-dbg-tlv.o
iwlwifi-objs += iwl-trans.o
+iwlwifi-objs += queue/tx.o
iwlwifi-objs += fw/img.o fw/notif-wait.o
-iwlwifi-objs += fw/dbg.o
+iwlwifi-objs += fw/dbg.o fw/pnvm.o
iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o
iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index efe427049a6e..d2bbe6a73514 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -57,7 +57,7 @@
#include "iwl-prph.h"
/* Highest firmware API version supported */
-#define IWL_22000_UCODE_API_MAX 56
+#define IWL_22000_UCODE_API_MAX 59
/* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 39
@@ -89,6 +89,9 @@
#define IWL_SO_A_GF4_A_FW_PRE "iwlwifi-so-a0-gf4-a0-"
#define IWL_SNJ_A_GF4_A_FW_PRE "iwlwifi-SoSnj-a0-gf4-a0-"
#define IWL_SNJ_A_GF_A_FW_PRE "iwlwifi-SoSnj-a0-gf-a0-"
+#define IWL_SNJ_A_HR_B_FW_PRE "iwlwifi-SoSnj-a0-hr-b0-"
+#define IWL_MA_A_GF_A_FW_PRE "iwlwifi-ma-a0-gf-a0-"
+#define IWL_MA_A_MR_A_FW_PRE "iwlwifi-ma-a0-mr-a0-"
#define IWL_QU_B_HR_B_MODULE_FIRMWARE(api) \
IWL_QU_B_HR_B_FW_PRE __stringify(api) ".ucode"
@@ -118,6 +121,12 @@
IWL_SNJ_A_GF4_A_FW_PRE __stringify(api) ".ucode"
#define IWL_SNJ_A_GF_A_MODULE_FIRMWARE(api) \
IWL_SNJ_A_GF_A_FW_PRE __stringify(api) ".ucode"
+#define IWL_SNJ_A_HR_B_MODULE_FIRMWARE(api) \
+ IWL_SNJ_A_HR_B_FW_PRE __stringify(api) ".ucode"
+#define IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(api) \
+ IWL_MA_A_GF_A_FW_PRE __stringify(api) ".ucode"
+#define IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(api) \
+ IWL_MA_A_MR_A_FW_PRE __stringify(api) ".ucode"
static const struct iwl_base_params iwl_22000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
@@ -244,7 +253,7 @@ const struct iwl_cfg_trans_params iwl_qu_trans_cfg = {
.device_family = IWL_DEVICE_FAMILY_22000,
.base_params = &iwl_22000_base_params,
.integrated = true,
- .xtal_latency = 5000,
+ .xtal_latency = 500,
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_200US,
};
@@ -335,14 +344,32 @@ const struct iwl_cfg_trans_params iwl_ax200_trans_cfg = {
.bisr_workaround = 1,
};
+const struct iwl_cfg_trans_params iwl_ma_trans_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_AX210,
+ .base_params = &iwl_ax210_base_params,
+ .mq_rx_supported = true,
+ .use_tfh = true,
+ .rf_id = true,
+ .gen2 = true,
+ .integrated = true,
+ .umac_prph_offset = 0x300000
+};
+
+const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101";
const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz";
const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz";
-const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101";
+const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6 AX211 160MHz";
+const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6 AX411 160MHz";
+const char iwl_ma_name[] = "Intel(R) Wi-Fi 6";
const char iwl_ax200_killer_1650w_name[] =
"Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)";
const char iwl_ax200_killer_1650x_name[] =
"Killer(R) Wi-Fi 6 AX1650x 160MHz Wireless Network Adapter (200NGW)";
+const char iwl_ax201_killer_1650s_name[] =
+ "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)";
+const char iwl_ax201_killer_1650i_name[] =
+ "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)";
const struct iwl_cfg iwl_qu_b0_hr1_b0 = {
.fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
@@ -539,7 +566,7 @@ const struct iwl_cfg iwlax210_2ax_cfg_so_hr_a0 = {
};
const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0 = {
- .name = "Intel(R) Wi-Fi 6 AX211 160MHz",
+ .name = iwl_ax211_name,
.fw_name_pre = IWL_SO_A_GF_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
@@ -547,7 +574,7 @@ const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0 = {
};
const struct iwl_cfg iwlax211_2ax_cfg_so_gf_a0_long = {
- .name = "Intel(R) Wi-Fi 6 AX211 160MHz",
+ .name = iwl_ax211_name,
.fw_name_pre = IWL_SO_A_GF_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
@@ -565,7 +592,7 @@ const struct iwl_cfg iwlax210_2ax_cfg_ty_gf_a0 = {
};
const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0 = {
- .name = "Intel(R) Wi-Fi 6 AX411 160MHz",
+ .name = iwl_ax411_name,
.fw_name_pre = IWL_SO_A_GF4_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
@@ -573,7 +600,7 @@ const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0 = {
};
const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long = {
- .name = "Intel(R) Wi-Fi 6 AX411 160MHz",
+ .name = iwl_ax411_name,
.fw_name_pre = IWL_SO_A_GF4_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
@@ -583,7 +610,7 @@ const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long = {
};
const struct iwl_cfg iwlax411_2ax_cfg_sosnj_gf4_a0 = {
- .name = "Intel(R) Wi-Fi 6 AX411 160MHz",
+ .name = iwl_ax411_name,
.fw_name_pre = IWL_SNJ_A_GF4_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
@@ -591,13 +618,35 @@ const struct iwl_cfg iwlax411_2ax_cfg_sosnj_gf4_a0 = {
};
const struct iwl_cfg iwlax211_cfg_snj_gf_a0 = {
- .name = "Intel(R) Wi-Fi 6 AX211 160MHz",
+ .name = iwl_ax211_name,
.fw_name_pre = IWL_SNJ_A_GF_A_FW_PRE,
.uhb_supported = true,
IWL_DEVICE_AX210,
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
+const struct iwl_cfg iwlax201_cfg_snj_hr_b0 = {
+ .name = iwl_ax201_name,
+ .fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
+const struct iwl_cfg iwl_cfg_ma_a0_gf_a0 = {
+ .fw_name_pre = IWL_MA_A_GF_A_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
+const struct iwl_cfg iwl_cfg_ma_a0_mr_a0 = {
+ .fw_name_pre = IWL_MA_A_MR_A_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_AX210,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
MODULE_FIRMWARE(IWL_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QNJ_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
@@ -612,3 +661,6 @@ MODULE_FIRMWARE(IWL_SO_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_TY_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SNJ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_SNJ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_SNJ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_MA_A_GF_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_MA_A_MR_A_FW_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
index f84b8e5d3f0b..be4acf4a0e32 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -20,7 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -180,7 +180,16 @@ const struct iwl_cfg_trans_params iwl9560_trans_cfg = {
.mq_rx_supported = true,
.rf_id = true,
.integrated = true,
- .xtal_latency = 5000,
+ .xtal_latency = 650,
+};
+
+const struct iwl_cfg_trans_params iwl9560_long_latency_trans_cfg = {
+ .device_family = IWL_DEVICE_FAMILY_9000,
+ .base_params = &iwl9000_base_params,
+ .mq_rx_supported = true,
+ .rf_id = true,
+ .integrated = true,
+ .xtal_latency = 2820,
};
const struct iwl_cfg_trans_params iwl9560_shared_clk_trans_cfg = {
@@ -189,7 +198,7 @@ const struct iwl_cfg_trans_params iwl9560_shared_clk_trans_cfg = {
.mq_rx_supported = true,
.rf_id = true,
.integrated = true,
- .xtal_latency = 5000,
+ .xtal_latency = 670,
.extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
};
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/calib.c b/drivers/net/wireless/intel/iwlwifi/dvm/calib.c
index 588b15697710..974e1c324ca7 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/calib.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/calib.c
@@ -761,7 +761,7 @@ static inline u8 find_first_chain(u8 mask)
return CHAIN_C;
}
-/**
+/*
* Run disconnected antenna algorithm to find out which antennas are
* disconnected.
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
index d42bc46fe566..c3e25885d194 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
@@ -58,8 +58,8 @@ static void iwl1000_nic_config(struct iwl_priv *priv)
/**
* iwl_beacon_time_mask_low - mask of lower 32 bit of beacon time
- * @priv -- pointer to iwl_priv data structure
- * @tsf_bits -- number of bits need to shift for masking)
+ * @priv: pointer to iwl_priv data structure
+ * @tsf_bits: number of bits need to shift for masking)
*/
static inline u32 iwl_beacon_time_mask_low(struct iwl_priv *priv,
u16 tsf_bits)
@@ -69,8 +69,8 @@ static inline u32 iwl_beacon_time_mask_low(struct iwl_priv *priv,
/**
* iwl_beacon_time_mask_high - mask of higher 32 bit of beacon time
- * @priv -- pointer to iwl_priv data structure
- * @tsf_bits -- number of bits need to shift for masking)
+ * @priv: pointer to iwl_priv data structure
+ * @tsf_bits: number of bits need to shift for masking)
*/
static inline u32 iwl_beacon_time_mask_high(struct iwl_priv *priv,
u16 tsf_bits)
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
index eab94d2f46b1..3b937a7dd403 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
@@ -110,7 +110,7 @@ int iwlagn_manage_ibss_station(struct iwl_priv *priv,
vif->bss_conf.bssid);
}
-/**
+/*
* iwlagn_txfifo_flush: send REPLY_TXFIFO_FLUSH command to uCode
*
* pre-requirements:
@@ -769,7 +769,7 @@ static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
return res;
}
-/**
+/*
* iwlagn_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
*
* Selects how many and which Rx receivers/antennas/chains to use.
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index b882705ff66d..461af5831156 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -374,7 +374,7 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
&statistics_cmd);
}
-/**
+/*
* iwl_bg_statistics_periodic - Timer callback to queue statistics
*
* This callback is provided in order to send a statistics request.
@@ -533,7 +533,7 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv)
priv->event_log.next_entry = next_entry;
}
-/**
+/*
* iwl_bg_ucode_trace - Timer callback to log ucode event
*
* The timer is continually set to execute every
@@ -762,7 +762,7 @@ static void iwl_send_bt_config(struct iwl_priv *priv)
IWL_ERR(priv, "failed to send BT Coex Config\n");
}
-/**
+/*
* iwl_alive_start - called after REPLY_ALIVE notification received
* from protocol/runtime uCode (initialization uCode's
* Alive gets handled by iwl_init_alive_start()).
@@ -1682,9 +1682,8 @@ static void iwl_dump_nic_error_log(struct iwl_priv *priv)
#define EVENT_START_OFFSET (4 * sizeof(u32))
-/**
+/*
* iwl_print_event_log - Dump error event log to syslog
- *
*/
static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
u32 num_events, u32 mode,
@@ -1762,7 +1761,7 @@ static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
return pos;
}
-/**
+/*
* iwl_print_last_event_logs - Dump the newest # of event log to syslog
*/
static int iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
index 4fa4eab2d7f3..548540dd0c0f 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
@@ -151,7 +151,7 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
{}
#endif
-/**
+/*
* The following tables contain the expected throughput metrics for all rates
*
* 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
@@ -318,7 +318,7 @@ static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data,
}
#ifdef CONFIG_MAC80211_DEBUGFS
-/**
+/*
* Program the device to use fixed rate for frame transmit
* This is for debugging/testing only
* once the device start use fixed rate, we need to reload the module
@@ -440,7 +440,7 @@ static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
return 0;
}
-/**
+/*
* rs_collect_tx_data - Update the success/failure sliding window
*
* We keep a sliding window of the last 62 packets transmitted
@@ -673,7 +673,7 @@ static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
return 1;
}
-/**
+/*
* Green-field mode is valid if the station supports it and
* there are no non-GF stations present in the BSS.
*/
@@ -689,7 +689,7 @@ static bool rs_use_green(struct ieee80211_sta *sta)
return false;
}
-/**
+/*
* rs_get_supported_rates - get the available rates
*
* if management frame or broadcast frame only return
@@ -2612,7 +2612,7 @@ out:
lq_sta->last_txrate_idx = index;
}
-/**
+/*
* rs_initialize_lq - Initialize a station's hardware rate table
*
* The uCode's station table contains a table of fallback rates
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
index 673d60784bfa..9d55ece05020 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
@@ -132,7 +132,7 @@ static void iwlagn_rx_beacon_notif(struct iwl_priv *priv,
priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
}
-/**
+/*
* iwl_good_plcp_health - checks for plcp error.
*
* When the plcp error is exceeding the thresholds, reset the radio
@@ -929,7 +929,7 @@ static void iwlagn_rx_noa_notification(struct iwl_priv *priv,
kfree_rcu(old_data, rcu_head);
}
-/**
+/*
* iwl_setup_rx_handlers - Initialize Rx handler callbacks
*
* Setup the RX handlers for each of the reply types sent from the uCode
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
index 6f37c9fac31d..12a3d464ae64 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
@@ -689,7 +689,7 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
_iwl_set_rxon_ht(priv, ht_conf, ctx);
}
-/**
+/*
* iwl_set_rxon_channel - Set the band and channel values in staging RXON
* @ch: requested channel as a pointer to struct ieee80211_channel
@@ -826,7 +826,7 @@ static int iwl_check_rxon_cmd(struct iwl_priv *priv,
return errors ? -EINVAL : 0;
}
-/**
+/*
* iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
* @priv: staging_rxon is compared to active_rxon
*
@@ -1007,7 +1007,7 @@ static void iwl_calc_basic_rates(struct iwl_priv *priv,
ctx->staging.ofdm_basic_rates = ofdm;
}
-/**
+/*
* iwlagn_commit_rxon - commit staging_rxon to hardware
*
* The RXON command in staging_rxon is committed to the hardware and
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
index 1d8590046ff7..832fcbb787e9 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
@@ -186,7 +186,7 @@ static void iwl_do_scan_abort(struct iwl_priv *priv)
IWL_DEBUG_SCAN(priv, "Successfully send scan abort\n");
}
-/**
+/*
* iwl_scan_cancel - Cancel any currently executing HW scan
*/
int iwl_scan_cancel(struct iwl_priv *priv)
@@ -196,10 +196,9 @@ int iwl_scan_cancel(struct iwl_priv *priv)
return 0;
}
-/**
+/*
* iwl_scan_cancel_timeout - Cancel any currently executing HW scan
* @ms: amount of time to wait (in milliseconds) for scan to abort
- *
*/
void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
{
@@ -560,10 +559,9 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
return added;
}
-/**
+/*
* iwl_fill_probe_req - fill in all required fields and IE for probe request
*/
-
static u16 iwl_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta,
const u8 *ies, int ie_len, const u8 *ssid,
u8 ssid_len, int left)
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
index 51158edce15b..e622948661fa 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
@@ -234,7 +234,7 @@ static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
priv->stations[index].sta.station_flags |= flags;
}
-/**
+/*
* iwl_prep_station - Prepare station information for addition
*
* should be called with sta_lock held
@@ -323,7 +323,7 @@ u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
#define STA_WAIT_TIMEOUT (HZ/2)
-/**
+/*
* iwl_add_station_common -
*/
int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
@@ -383,7 +383,7 @@ int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
return ret;
}
-/**
+/*
* iwl_sta_ucode_deactivate - deactivate ucode status for a station
*/
static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
@@ -451,7 +451,7 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
return ret;
}
-/**
+/*
* iwl_remove_station - Remove driver's knowledge of station.
*/
int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
@@ -601,7 +601,7 @@ static void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
link_cmd->sta_id = sta_id;
}
-/**
+/*
* iwl_clear_ucode_stations - clear ucode station table bits
*
* This function clears all the bits in the driver indicating
@@ -636,7 +636,7 @@ void iwl_clear_ucode_stations(struct iwl_priv *priv,
"No active stations found to be cleared\n");
}
-/**
+/*
* iwl_restore_stations() - Restore driver known stations to device
*
* All stations considered active by driver, but not present in ucode, is
@@ -773,7 +773,7 @@ static inline void iwl_dump_lq_cmd(struct iwl_priv *priv,
}
#endif
-/**
+/*
* is_lq_table_valid() - Test one aspect of LQ cmd for validity
*
* It sometimes happens when a HT rate has been in use and we
@@ -807,7 +807,7 @@ static bool is_lq_table_valid(struct iwl_priv *priv,
return true;
}
-/**
+/*
* iwl_send_lq_cmd() - Send link quality command
* @init: This command is sent as part of station initialization right
* after station has been added.
@@ -1258,7 +1258,7 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
return ret;
}
-/**
+/*
* iwlagn_alloc_bcast_station - add broadcast station into driver's station table.
*
* This adds the broadcast station into the driver's station table
@@ -1298,7 +1298,7 @@ int iwlagn_alloc_bcast_station(struct iwl_priv *priv,
return 0;
}
-/**
+/*
* iwl_update_bcast_station - update broadcast station's LQ command
*
* Only used by iwlagn. Placed here to have all bcast station management
@@ -1341,7 +1341,7 @@ int iwl_update_bcast_stations(struct iwl_priv *priv)
return ret;
}
-/**
+/*
* iwl_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
*/
int iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
index fd454836adbe..e3962bb52328 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
@@ -803,7 +803,7 @@ static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
rcu_read_unlock();
}
-/**
+/*
* translate ucode response to mac80211 tx status control values
*/
static void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
@@ -1256,7 +1256,7 @@ void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb)
}
}
-/**
+/*
* iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
*
* Handles block-acknowledge notification from device, which reports success
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index dc769b580431..3e5a35e26ad3 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -118,8 +118,8 @@ IWL_EXPORT_SYMBOL(iwl_acpi_get_object);
* method (DSM) interface. The returned acpi object must be freed by calling
* function.
*/
-void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func,
- union acpi_object *args)
+static void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func,
+ union acpi_object *args)
{
union acpi_object *obj;
@@ -400,9 +400,9 @@ out_free:
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_eckv);
-int iwl_sar_set_profile(union acpi_object *table,
- struct iwl_sar_profile *profile,
- bool enabled)
+static int iwl_sar_set_profile(union acpi_object *table,
+ struct iwl_sar_profile *profile,
+ bool enabled)
{
int i;
@@ -418,18 +418,13 @@ int iwl_sar_set_profile(union acpi_object *table,
return 0;
}
-IWL_EXPORT_SYMBOL(iwl_sar_set_profile);
-int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
- __le16 per_chain_restriction[][IWL_NUM_SUB_BANDS],
- int prof_a, int prof_b)
+static int iwl_sar_fill_table(struct iwl_fw_runtime *fwrt,
+ __le16 *per_chain, u32 n_subbands,
+ int prof_a, int prof_b)
{
- int i, j, idx;
int profs[ACPI_SAR_NUM_CHAIN_LIMITS] = { prof_a, prof_b };
-
- BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS < 2);
- BUILD_BUG_ON(ACPI_SAR_NUM_CHAIN_LIMITS * ACPI_SAR_NUM_SUB_BANDS !=
- ACPI_SAR_TABLE_SIZE);
+ int i, j, idx;
for (i = 0; i < ACPI_SAR_NUM_CHAIN_LIMITS; i++) {
struct iwl_sar_profile *prof;
@@ -461,9 +456,9 @@ int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
"SAR EWRD: chain %d profile index %d\n",
i, profs[i]);
IWL_DEBUG_RADIO(fwrt, " Chain[%d]:\n", i);
- for (j = 0; j < ACPI_SAR_NUM_SUB_BANDS; j++) {
- idx = (i * ACPI_SAR_NUM_SUB_BANDS) + j;
- per_chain_restriction[i][j] =
+ for (j = 0; j < n_subbands; j++) {
+ idx = i * ACPI_SAR_NUM_SUB_BANDS + j;
+ per_chain[i * n_subbands + j] =
cpu_to_le16(prof->table[idx]);
IWL_DEBUG_RADIO(fwrt, " Band[%d] = %d * .125dBm\n",
j, prof->table[idx]);
@@ -472,6 +467,23 @@ int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
return 0;
}
+
+int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
+ __le16 *per_chain, u32 n_tables, u32 n_subbands,
+ int prof_a, int prof_b)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < n_tables; i++) {
+ ret = iwl_sar_fill_table(fwrt,
+ &per_chain[i * n_subbands * ACPI_SAR_NUM_CHAIN_LIMITS],
+ n_subbands, prof_a, prof_b);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
IWL_EXPORT_SYMBOL(iwl_sar_select_profile);
int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt)
@@ -632,25 +644,8 @@ bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
}
IWL_EXPORT_SYMBOL(iwl_sar_geo_support);
-int iwl_validate_sar_geo_profile(struct iwl_fw_runtime *fwrt,
- struct iwl_host_cmd *cmd)
-{
- struct iwl_geo_tx_power_profiles_resp *resp;
- int ret;
-
- resp = (void *)cmd->resp_pkt->data;
- ret = le32_to_cpu(resp->profile_idx);
- if (WARN_ON(ret > ACPI_NUM_GEO_PROFILES)) {
- ret = -EIO;
- IWL_WARN(fwrt, "Invalid geographic profile idx (%d)\n", ret);
- }
-
- return ret;
-}
-IWL_EXPORT_SYMBOL(iwl_validate_sar_geo_profile);
-
int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
- struct iwl_per_chain_offset_group *table)
+ struct iwl_per_chain_offset *table, u32 n_bands)
{
int ret, i, j;
@@ -666,23 +661,26 @@ int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
return -ENOENT;
}
- BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS *
- ACPI_WGDS_TABLE_SIZE + 1 != ACPI_WGDS_WIFI_DATA_SIZE);
-
- BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES > IWL_NUM_GEO_PROFILES);
-
for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
- struct iwl_per_chain_offset *chain =
- (struct iwl_per_chain_offset *)&table[i];
-
- for (j = 0; j < ACPI_WGDS_NUM_BANDS; j++) {
+ for (j = 0; j < n_bands; j++) {
+ struct iwl_per_chain_offset *chain =
+ &table[i * n_bands + j];
u8 *value;
+ if (j * ACPI_GEO_PER_CHAIN_SIZE >=
+ ARRAY_SIZE(fwrt->geo_profiles[0].values))
+ /*
+ * Currently we only store lb an hb values, and
+ * don't have any special ones for uhb. So leave
+ * those empty for the time being
+ */
+ break;
+
value = &fwrt->geo_profiles[i].values[j *
ACPI_GEO_PER_CHAIN_SIZE];
- chain[j].max_tx_power = cpu_to_le16(value[0]);
- chain[j].chain_a = value[1];
- chain[j].chain_b = value[2];
+ chain->max_tx_power = cpu_to_le16(value[0]);
+ chain->chain_a = value[1];
+ chain->chain_b = value[2];
IWL_DEBUG_RADIO(fwrt,
"SAR geographic profile[%d] Band[%d]: chain A = %d chain B = %d max_tx_power = %d\n",
i, j, value[1], value[2], value[0]);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
index 0ada9eddb8b1..bddf8a44e163 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -89,6 +89,7 @@
#define ACPI_SAR_NUM_CHAIN_LIMITS 2
#define ACPI_SAR_NUM_SUB_BANDS 5
+#define ACPI_SAR_NUM_TABLES 1
#define ACPI_WRDS_WIFI_DATA_SIZE (ACPI_SAR_TABLE_SIZE + 2)
#define ACPI_EWRD_WIFI_DATA_SIZE ((ACPI_SAR_PROFILE_NUM - 1) * \
@@ -104,13 +105,12 @@
#define APCI_WTAS_BLACK_LIST_MAX 16
#define ACPI_WTAS_WIFI_DATA_SIZE (3 + APCI_WTAS_BLACK_LIST_MAX)
-#define ACPI_WGDS_NUM_BANDS 2
#define ACPI_WGDS_TABLE_SIZE 3
-#define ACPI_PPAG_NUM_CHAINS 2
-#define ACPI_PPAG_NUM_SUB_BANDS 5
-#define ACPI_PPAG_WIFI_DATA_SIZE ((ACPI_PPAG_NUM_CHAINS * \
- ACPI_PPAG_NUM_SUB_BANDS) + 3)
+#define ACPI_PPAG_WIFI_DATA_SIZE ((IWL_NUM_CHAIN_LIMITS * \
+ IWL_NUM_SUB_BANDS) + 3)
+#define ACPI_PPAG_WIFI_DATA_SIZE_V2 ((IWL_NUM_CHAIN_LIMITS * \
+ IWL_NUM_SUB_BANDS_V2) + 3)
/* PPAG gain value bounds in 1/8 dBm */
#define ACPI_PPAG_MIN_LB -16
@@ -133,15 +133,26 @@ enum iwl_dsm_funcs_rev_0 {
DSM_FUNC_ENABLE_INDONESIA_5G2 = 2,
};
+enum iwl_dsm_values_srd {
+ DSM_VALUE_SRD_ACTIVE,
+ DSM_VALUE_SRD_PASSIVE,
+ DSM_VALUE_SRD_DISABLE,
+ DSM_VALUE_SRD_MAX
+};
+
+enum iwl_dsm_values_indonesia {
+ DSM_VALUE_INDONESIA_DISABLE,
+ DSM_VALUE_INDONESIA_ENABLE,
+ DSM_VALUE_INDONESIA_RESERVED,
+ DSM_VALUE_INDONESIA_MAX
+};
+
#ifdef CONFIG_ACPI
struct iwl_fw_runtime;
void *iwl_acpi_get_object(struct device *dev, acpi_string method);
-void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func,
- union acpi_object *args);
-
int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func);
union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
@@ -171,12 +182,8 @@ u64 iwl_acpi_get_pwr_limit(struct device *dev);
*/
int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk);
-int iwl_sar_set_profile(union acpi_object *table,
- struct iwl_sar_profile *profile,
- bool enabled);
-
int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
- __le16 per_chain_restriction[][IWL_NUM_SUB_BANDS],
+ __le16 *per_chain, u32 n_tables, u32 n_subbands,
int prof_a, int prof_b);
int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt);
@@ -187,11 +194,8 @@ int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt);
bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt);
-int iwl_validate_sar_geo_profile(struct iwl_fw_runtime *fwrt,
- struct iwl_host_cmd *cmd);
-
int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
- struct iwl_per_chain_offset_group *table);
+ struct iwl_per_chain_offset *table, u32 n_bands);
int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, __le32 *black_list_array,
int *black_list_size);
@@ -237,15 +241,8 @@ static inline int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk)
return -ENOENT;
}
-static inline int iwl_sar_set_profile(union acpi_object *table,
- struct iwl_sar_profile *profile,
- bool enabled)
-{
- return -ENOENT;
-}
-
static inline int iwl_sar_select_profile(struct iwl_fw_runtime *fwrt,
- __le16 per_chain_restriction[][IWL_NUM_SUB_BANDS],
+ __le16 *per_chain, u32 n_tables, u32 n_subbands,
int prof_a, int prof_b)
{
return -ENOENT;
@@ -271,18 +268,6 @@ static inline bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
return false;
}
-static inline int iwl_validate_sar_geo_profile(struct iwl_fw_runtime *fwrt,
- struct iwl_host_cmd *cmd)
-{
- return -ENOENT;
-}
-
-static inline int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
- struct iwl_per_chain_offset_group *table)
-{
- return -ENOENT;
-}
-
static inline int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
__le32 *black_list_array,
int *black_list_size)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
index df1bd0d2450e..a1cac47395bc 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
@@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018, 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +30,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018, 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -129,19 +128,31 @@ struct iwl_umac_alive {
struct iwl_umac_debug_addrs dbg_ptrs;
} __packed; /* UMAC_ALIVE_DATA_API_S_VER_2 */
-struct mvm_alive_resp_v3 {
+struct iwl_sku_id {
+ __le32 data[3];
+} __packed; /* SKU_ID_API_S_VER_1 */
+
+struct iwl_alive_ntf_v3 {
__le16 status;
__le16 flags;
struct iwl_lmac_alive lmac_data;
struct iwl_umac_alive umac_data;
-} __packed; /* ALIVE_RES_API_S_VER_3 */
+} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_3 */
+
+struct iwl_alive_ntf_v4 {
+ __le16 status;
+ __le16 flags;
+ struct iwl_lmac_alive lmac_data[2];
+ struct iwl_umac_alive umac_data;
+} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_4 */
-struct mvm_alive_resp {
+struct iwl_alive_ntf_v5 {
__le16 status;
__le16 flags;
struct iwl_lmac_alive lmac_data[2];
struct iwl_umac_alive umac_data;
-} __packed; /* ALIVE_RES_API_S_VER_4 */
+ struct iwl_sku_id sku_id;
+} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_5 */
/**
* enum iwl_extended_cfg_flag - commands driver may send before
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h b/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h
index 570f19026c91..6cb22a9a9380 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014, 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
*
@@ -27,7 +27,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014, 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* All rights reserved.
@@ -59,10 +59,12 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
-
#ifndef __iwl_fw_api_binding_h__
#define __iwl_fw_api_binding_h__
+#include <fw/file.h>
+#include <fw/img.h>
+
#define MAX_MACS_IN_BINDING (3)
#define MAX_BINDINGS (4)
@@ -112,6 +114,14 @@ struct iwl_binding_cmd {
#define IWL_LMAC_24G_INDEX 0
#define IWL_LMAC_5G_INDEX 1
+static inline u32 iwl_mvm_get_lmac_id(const struct iwl_fw *fw,
+ enum nl80211_band band){
+ if (!fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_CDB_SUPPORT) ||
+ band == NL80211_BAND_2GHZ)
+ return IWL_LMAC_24G_INDEX;
+ return IWL_LMAC_5G_INDEX;
+}
+
/* The maximal number of fragments in the FW's schedule session */
#define IWL_MVM_MAX_QUOTA 128
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index 4f46f3ed8794..8cc36dbb2311 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -104,11 +104,12 @@ enum iwl_mvm_command_groups {
*/
enum iwl_legacy_cmds {
/**
- * @MVM_ALIVE:
+ * @UCODE_ALIVE_NTFY:
* Alive data from the firmware, as described in
- * &struct mvm_alive_resp_v3 or &struct mvm_alive_resp.
+ * &struct iwl_alive_ntf_v3 or &struct iwl_alive_ntf_v4 or
+ * &struct iwl_alive_ntf_v5.
*/
- MVM_ALIVE = 0x1,
+ UCODE_ALIVE_NTFY = 0x1,
/**
* @REPLY_ERROR: Cause an error in the firmware, for testing purposes.
@@ -410,7 +411,8 @@ enum iwl_legacy_cmds {
* one of &struct iwl_statistics_cmd,
* &struct iwl_notif_statistics_v11,
* &struct iwl_notif_statistics_v10,
- * &struct iwl_notif_statistics
+ * &struct iwl_notif_statistics,
+ * &struct iwl_statistics_operational_ntfy
*/
STATISTICS_CMD = 0x9c,
@@ -418,7 +420,8 @@ enum iwl_legacy_cmds {
* @STATISTICS_NOTIFICATION:
* one of &struct iwl_notif_statistics_v10,
* &struct iwl_notif_statistics_v11,
- * &struct iwl_notif_statistics
+ * &struct iwl_notif_statistic,
+ * &struct iwl_statistics_operational_ntfy
*/
STATISTICS_NOTIFICATION = 0x9d,
@@ -431,8 +434,7 @@ enum iwl_legacy_cmds {
/**
* @REDUCE_TX_POWER_CMD:
- * &struct iwl_dev_tx_power_cmd_v3 or &struct iwl_dev_tx_power_cmd_v4
- * or &struct iwl_dev_tx_power_cmd
+ * &struct iwl_dev_tx_power_cmd
*/
REDUCE_TX_POWER_CMD = 0x9f,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
index c4562e1f8d18..5db301a6a312 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -451,10 +451,15 @@ union iwl_all_tsc_rsc {
struct iwl_aes_rsc_tsc aes;
}; /* ALL_TSC_RSC_API_S_VER_2 */
-struct iwl_wowlan_rsc_tsc_params_cmd {
+struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 {
union iwl_all_tsc_rsc all_tsc_rsc;
} __packed; /* ALL_TSC_RSC_API_S_VER_2 */
+struct iwl_wowlan_rsc_tsc_params_cmd {
+ struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 params;
+ __le32 sta_id;
+} __packed; /* ALL_TSC_RSC_API_S_VER_4 */
+
#define IWL_MIC_KEY_SIZE 8
struct iwl_mic_keys {
u8 tx[IWL_MIC_KEY_SIZE];
@@ -469,17 +474,26 @@ struct iwl_p1k_cache {
#define IWL_NUM_RX_P1K_CACHE 2
-struct iwl_wowlan_tkip_params_cmd {
+struct iwl_wowlan_tkip_params_cmd_ver_1 {
struct iwl_mic_keys mic_keys;
struct iwl_p1k_cache tx;
struct iwl_p1k_cache rx_uni[IWL_NUM_RX_P1K_CACHE];
struct iwl_p1k_cache rx_multi[IWL_NUM_RX_P1K_CACHE];
} __packed; /* WOWLAN_TKIP_SETTING_API_S_VER_1 */
+struct iwl_wowlan_tkip_params_cmd {
+ struct iwl_mic_keys mic_keys;
+ struct iwl_p1k_cache tx;
+ struct iwl_p1k_cache rx_uni[IWL_NUM_RX_P1K_CACHE];
+ struct iwl_p1k_cache rx_multi[IWL_NUM_RX_P1K_CACHE];
+ u8 reversed[2];
+ __le32 sta_id;
+} __packed; /* WOWLAN_TKIP_SETTING_API_S_VER_2 */
+
#define IWL_KCK_MAX_SIZE 32
#define IWL_KEK_MAX_SIZE 32
-struct iwl_wowlan_kek_kck_material_cmd {
+struct iwl_wowlan_kek_kck_material_cmd_v2 {
u8 kck[IWL_KCK_MAX_SIZE];
u8 kek[IWL_KEK_MAX_SIZE];
__le16 kck_len;
@@ -487,6 +501,18 @@ struct iwl_wowlan_kek_kck_material_cmd {
__le64 replay_ctr;
} __packed; /* KEK_KCK_MATERIAL_API_S_VER_2 */
+struct iwl_wowlan_kek_kck_material_cmd_v3 {
+ u8 kck[IWL_KCK_MAX_SIZE];
+ u8 kek[IWL_KEK_MAX_SIZE];
+ __le16 kck_len;
+ __le16 kek_len;
+ __le64 replay_ctr;
+ __le32 akm;
+ __le32 gtk_cipher;
+ __le32 igtk_cipher;
+ __le32 bigtk_cipher;
+} __packed; /* KEK_KCK_MATERIAL_API_S_VER_3 */
+
#define RF_KILL_INDICATOR_FOR_WOWLAN 0x87
enum iwl_wowlan_rekey_status {
@@ -525,7 +551,7 @@ struct iwl_wowlan_gtk_status_v1 {
u8 reserved[3];
u8 decrypt_key[16];
u8 tkip_mic_key[8];
- struct iwl_wowlan_rsc_tsc_params_cmd rsc;
+ struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 rsc;
} __packed; /* WOWLAN_GTK_MATERIAL_VER_1 */
#define WOWLAN_KEY_MAX_SIZE 32
@@ -550,7 +576,7 @@ struct iwl_wowlan_gtk_status {
u8 key_flags;
u8 reserved[2];
u8 tkip_mic_key[8];
- struct iwl_wowlan_rsc_tsc_params_cmd rsc;
+ struct iwl_wowlan_rsc_tsc_params_cmd_ver_2 rsc;
} __packed; /* WOWLAN_GTK_MATERIAL_VER_2 */
#define IWL_WOWLAN_GTK_IDX_MASK (BIT(0) | BIT(1))
@@ -635,7 +661,7 @@ struct iwl_wowlan_status_v7 {
} __packed; /* WOWLAN_STATUSES_API_S_VER_7 */
/**
- * struct iwl_wowlan_status - WoWLAN status
+ * struct iwl_wowlan_status_v9 - WoWLAN status (version 9)
* @gtk: GTK data
* @igtk: IGTK data
* @replay_ctr: GTK rekey replay counter
@@ -653,7 +679,7 @@ struct iwl_wowlan_status_v7 {
* @reserved: unused
* @wake_packet: wakeup packet
*/
-struct iwl_wowlan_status {
+struct iwl_wowlan_status_v9 {
struct iwl_wowlan_gtk_status gtk[WOWLAN_GTK_KEYS_NUM];
struct iwl_wowlan_igtk_status igtk[WOWLAN_IGTK_KEYS_NUM];
__le64 replay_ctr;
@@ -671,6 +697,44 @@ struct iwl_wowlan_status {
u8 wake_packet[]; /* can be truncated from _length to _bufsize */
} __packed; /* WOWLAN_STATUSES_API_S_VER_9 */
+/**
+ * struct iwl_wowlan_status - WoWLAN status
+ * @gtk: GTK data
+ * @igtk: IGTK data
+ * @bigtk: BIGTK data
+ * @replay_ctr: GTK rekey replay counter
+ * @pattern_number: number of the matched pattern
+ * @non_qos_seq_ctr: non-QoS sequence counter to use next
+ * @qos_seq_ctr: QoS sequence counters to use next
+ * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason
+ * @num_of_gtk_rekeys: number of GTK rekeys
+ * @tid_tear_down: bitmap of TIDs torn down
+ * @reserved: reserved
+ * @received_beacons: number of received beacons
+ * @wake_packet_length: wakeup packet length
+ * @wake_packet_bufsize: wakeup packet buffer size
+ * @tid_tear_down: bit mask of tids whose BA sessions were closed
+ * in suspend state
+ * @wake_packet: wakeup packet
+ */
+struct iwl_wowlan_status {
+ struct iwl_wowlan_gtk_status gtk[1];
+ struct iwl_wowlan_igtk_status igtk[1];
+ struct iwl_wowlan_igtk_status bigtk[WOWLAN_IGTK_KEYS_NUM];
+ __le64 replay_ctr;
+ __le16 pattern_number;
+ __le16 non_qos_seq_ctr;
+ __le16 qos_seq_ctr[8];
+ __le32 wakeup_reasons;
+ __le32 num_of_gtk_rekeys;
+ u8 tid_tear_down;
+ u8 reserved[3];
+ __le32 received_beacons;
+ __le32 wake_packet_length;
+ __le32 wake_packet_bufsize;
+ u8 wake_packet[]; /* can be truncated from _length to _bufsize */
+} __packed; /* WOWLAN_STATUSES_API_S_VER_11 */
+
static inline u8 iwlmvm_wowlan_gtk_idx(struct iwl_wowlan_gtk_status *gtk)
{
return gtk->key_flags & IWL_WOWLAN_GTK_IDX_MASK;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
index 74ac65bd545a..95ada51d3f9e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
@@ -135,6 +135,25 @@ struct iwl_fw_ini_region_err_table {
} __packed; /* FW_TLV_DEBUG_REGION_ERROR_TABLE_API_S_VER_1 */
/**
+ * struct iwl_fw_ini_region_special_device_memory - special device memory
+ *
+ * Configuration to read a special memory
+ *
+ * @type: type of the special memory
+ * @version: version of the special memory
+ * @base_addr: base address of the error table
+ * @size: size of the error table
+ * @offset: offset to add to &base_addr
+ */
+struct iwl_fw_ini_region_special_device_memory {
+ __le16 type;
+ __le16 version;
+ __le32 base_addr;
+ __le32 size;
+ __le32 offset;
+} __packed; /* FW_TLV_DEBUG_REGION_SPECIAL_DEVICE_ADDR_API_S_VER_1 */
+
+/**
* struct iwl_fw_ini_region_internal_buffer - internal buffer region data
*
* Configuration to read internal monitor buffer
@@ -185,6 +204,7 @@ struct iwl_fw_ini_region_tlv {
struct iwl_fw_ini_region_fifos fifos;
struct iwl_fw_ini_region_err_table err_table;
struct iwl_fw_ini_region_internal_buffer internal_buffer;
+ struct iwl_fw_ini_region_special_device_memory special_mem;
__le32 dram_alloc_id;
__le32 tlv_mask;
}; /* FW_TLV_DEBUG_REGION_CONF_PARAMS_API_U_VER_1 */
@@ -281,6 +301,7 @@ struct iwl_fw_ini_hcmd_tlv {
* @IWL_FW_INI_ALLOCATION_ID_DBGC1: allocation meant for DBGC1 configuration
* @IWL_FW_INI_ALLOCATION_ID_DBGC2: allocation meant for DBGC2 configuration
* @IWL_FW_INI_ALLOCATION_ID_DBGC3: allocation meant for DBGC3 configuration
+ * @IWL_FW_INI_ALLOCATION_ID_INTERNAL: allocation meant for Intreanl SMEM in D3
* @IWL_FW_INI_ALLOCATION_NUM: number of allocation ids
*/
enum iwl_fw_ini_allocation_id {
@@ -288,6 +309,7 @@ enum iwl_fw_ini_allocation_id {
IWL_FW_INI_ALLOCATION_ID_DBGC1,
IWL_FW_INI_ALLOCATION_ID_DBGC2,
IWL_FW_INI_ALLOCATION_ID_DBGC3,
+ IWL_FW_INI_ALLOCATION_ID_INTERNAL,
IWL_FW_INI_ALLOCATION_NUM,
}; /* FW_DEBUG_TLV_ALLOCATION_ID_E_VER_1 */
@@ -327,6 +349,7 @@ enum iwl_fw_ini_buffer_location {
* @IWL_FW_INI_REGION_CSR: CSR registers
* @IWL_FW_INI_REGION_DRAM_IMR: IMR memory
* @IWL_FW_INI_REGION_PCI_IOSF_CONFIG: PCI/IOSF config
+ * @IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY: special device memory
* @IWL_FW_INI_REGION_NUM: number of region types
*/
enum iwl_fw_ini_region_type {
@@ -347,6 +370,7 @@ enum iwl_fw_ini_region_type {
IWL_FW_INI_REGION_CSR,
IWL_FW_INI_REGION_DRAM_IMR,
IWL_FW_INI_REGION_PCI_IOSF_CONFIG,
+ IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY,
IWL_FW_INI_REGION_NUM
}; /* FW_TLV_DEBUG_REGION_TYPE_API_E */
@@ -362,13 +386,13 @@ enum iwl_fw_ini_region_type {
* @IWL_FW_INI_TIME_POINT_FW_ASSERT: FW assert
* @IWL_FW_INI_TIME_POINT_FW_HW_ERROR: FW HW error
* @IWL_FW_INI_TIME_POINT_FW_TFD_Q_HANG: TFD queue hang
- * @IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFOCATION: DHC cmd response and notif
+ * @IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION: DHC cmd response and notif
* @IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF: FW response or notification.
* data field holds id and group
* @IWL_FW_INI_TIME_POINT_USER_TRIGGER: user trigger time point
* @IWL_FW_INI_TIME_POINT_PERIODIC: periodic timepoint that fires in constant
* intervals. data field holds the interval time in msec
- * @IWL_FW_INI_TIME_POINT_WDG_TIMEOUT: watchdog timeout
+ * @IWL_FW_INI_TIME_POINT_RESERVED: reserved
* @IWL_FW_INI_TIME_POINT_HOST_ASSERT: Unused
* @IWL_FW_INI_TIME_POINT_HOST_ALIVE_TIMEOUT: alive timeout
* @IWL_FW_INI_TIME_POINT_HOST_DEVICE_ENABLE: device enable
@@ -395,11 +419,11 @@ enum iwl_fw_ini_time_point {
IWL_FW_INI_TIME_POINT_FW_ASSERT,
IWL_FW_INI_TIME_POINT_FW_HW_ERROR,
IWL_FW_INI_TIME_POINT_FW_TFD_Q_HANG,
- IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFOCATION,
+ IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION,
IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF,
IWL_FW_INI_TIME_POINT_USER_TRIGGER,
IWL_FW_INI_TIME_POINT_PERIODIC,
- IWL_FW_INI_TIME_POINT_WDG_TIMEOUT,
+ IWL_FW_INI_TIME_POINT_RESERVED,
IWL_FW_INI_TIME_POINT_HOST_ASSERT,
IWL_FW_INI_TIME_POINT_HOST_ALIVE_TIMEOUT,
IWL_FW_INI_TIME_POINT_HOST_DEVICE_ENABLE,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
index 1df2e497fabf..465a8e3974e8 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
@@ -321,12 +321,54 @@ struct iwl_tof_responder_config_cmd {
* data (if exists) follows, and then 0-padding again to complete a
* 4-multiple long buffer.
*/
-struct iwl_tof_responder_dyn_config_cmd {
+struct iwl_tof_responder_dyn_config_cmd_v2 {
__le32 lci_len;
__le32 civic_len;
u8 lci_civic[];
} __packed; /* TOF_RESPONDER_DYN_CONFIG_CMD_API_S_VER_2 */
+#define IWL_LCI_MAX_SIZE 160
+#define IWL_CIVIC_MAX_SIZE 160
+#define HLTK_11AZ_LEN 32
+
+/**
+ * enum iwl_responder_dyn_cfg_valid_flags - valid flags for dyn_config_cmd
+ * @IWL_RESPONDER_DYN_CFG_VALID_LCI: LCI data is valid
+ * @IWL_RESPONDER_DYN_CFG_VALID_CIVIC: Civic data is valid
+ * @IWL_RESPONDER_DYN_CFG_VALID_PASN_STA: the pasn_addr, HLTK and cipher fields
+ * are valid.
+ */
+enum iwl_responder_dyn_cfg_valid_flags {
+ IWL_RESPONDER_DYN_CFG_VALID_LCI = BIT(0),
+ IWL_RESPONDER_DYN_CFG_VALID_CIVIC = BIT(1),
+ IWL_RESPONDER_DYN_CFG_VALID_PASN_STA = BIT(2),
+};
+
+/**
+ * struct iwl_tof_responder_dyn_config_cmd - Dynamic responder settings
+ * @cipher: The negotiated cipher. see &enum iwl_location_cipher.
+ * @valid_flags: flags indicating which fields in the command are valid. see
+ * &enum iwl_responder_dyn_cfg_valid_flags.
+ * @lci_len: length of the LCI data in bytes
+ * @civic_len: length of the Civic data in bytes
+ * @lci_buf: the LCI buffer
+ * @civic_buf: the Civic buffer
+ * @hltk_buf: HLTK for secure LTF bits generation for the specified station
+ * @addr: mac address of the station for which to use the HLTK
+ * @reserved: for alignment
+ */
+struct iwl_tof_responder_dyn_config_cmd {
+ u8 cipher;
+ u8 valid_flags;
+ u8 lci_len;
+ u8 civic_len;
+ u8 lci_buf[IWL_LCI_MAX_SIZE];
+ u8 civic_buf[IWL_LCI_MAX_SIZE];
+ u8 hltk_buf[HLTK_11AZ_LEN];
+ u8 addr[ETH_ALEN];
+ u8 reserved[2];
+} __packed; /* TOF_RESPONDER_DYN_CONFIG_CMD_API_S_VER_3 */
+
/**
* struct iwl_tof_range_req_ext_cmd - extended range req for WLS
* @tsf_timer_offset_msec: the recommended time offset (mSec) from the AP's TSF
@@ -507,7 +549,6 @@ enum iwl_location_bw {
IWL_LOCATION_BW_80MHZ,
};
-#define HLTK_11AZ_LEN 32
#define TK_11AZ_LEN 32
#define LOCATION_BW_POS 4
@@ -552,15 +593,19 @@ struct iwl_tof_range_req_ap_entry_v4 {
* @IWL_LOCATION_CIPHER_CCMP_128: CCMP 128
* @IWL_LOCATION_CIPHER_GCMP_128: GCMP 128
* @IWL_LOCATION_CIPHER_GCMP_256: GCMP 256
+ * @IWL_LOCATION_CIPHER_INVALID: security is not used.
+ * @IWL_LOCATION_CIPHER_MAX: maximum value for this enum.
*/
enum iwl_location_cipher {
IWL_LOCATION_CIPHER_CCMP_128,
IWL_LOCATION_CIPHER_GCMP_128,
IWL_LOCATION_CIPHER_GCMP_256,
+ IWL_LOCATION_CIPHER_INVALID,
+ IWL_LOCATION_CIPHER_MAX,
};
/**
- * struct iwl_tof_range_req_ap_entry - AP configuration parameters
+ * struct iwl_tof_range_req_ap_entry_v6 - AP configuration parameters
* @initiator_ap_flags: see &enum iwl_initiator_ap_flags.
* @channel_num: AP Channel number
* @format_bw: bits 0 - 3: &enum iwl_location_frame_format.
@@ -588,7 +633,7 @@ enum iwl_location_cipher {
* @beacon_interval: beacon interval of the AP in TUs. Only required if
* &IWL_INITIATOR_AP_FLAGS_TB is set.
*/
-struct iwl_tof_range_req_ap_entry {
+struct iwl_tof_range_req_ap_entry_v6 {
__le32 initiator_ap_flags;
u8 channel_num;
u8 format_bw;
@@ -607,6 +652,61 @@ struct iwl_tof_range_req_ap_entry {
} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_6 */
/**
+ * struct iwl_tof_range_req_ap_entry_v7 - AP configuration parameters
+ * @initiator_ap_flags: see &enum iwl_initiator_ap_flags.
+ * @channel_num: AP Channel number
+ * @format_bw: bits 0 - 3: &enum iwl_location_frame_format.
+ * bits 4 - 7: &enum iwl_location_bw.
+ * @ctrl_ch_position: Coding of the control channel position relative to the
+ * center frequency, see iwl_mvm_get_ctrl_pos().
+ * @ftmr_max_retries: Max number of retries to send the FTMR in case of no
+ * reply from the AP.
+ * @bssid: AP's BSSID
+ * @burst_period: Recommended value to be sent to the AP. Measurement
+ * periodicity In units of 100ms. ignored if num_of_bursts_exp = 0
+ * @samples_per_burst: the number of FTMs pairs in single Burst (1-31);
+ * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of
+ * the number of measurement iterations (min 2^0 = 1, max 2^14)
+ * @sta_id: the station id of the AP. Only relevant when associated to the AP,
+ * otherwise should be set to &IWL_MVM_INVALID_STA.
+ * @cipher: pairwise cipher suite for secured measurement.
+ * &enum iwl_location_cipher.
+ * @hltk: HLTK to be used for secured 11az measurement
+ * @tk: TK to be used for secured 11az measurement
+ * @calib: An array of calibration values per FTM rx bandwidth.
+ * If &IWL_INITIATOR_AP_FLAGS_USE_CALIB is set, the fw will use the
+ * calibration value that corresponds to the rx bandwidth of the FTM
+ * frame.
+ * @beacon_interval: beacon interval of the AP in TUs. Only required if
+ * &IWL_INITIATOR_AP_FLAGS_TB is set.
+ * @rx_pn: the next expected PN for protected management frames Rx. LE byte
+ * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id
+ * is set to &IWL_MVM_INVALID_STA.
+ * @tx_pn: the next PN to use for protected management frames Tx. LE byte
+ * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id
+ * is set to &IWL_MVM_INVALID_STA.
+ */
+struct iwl_tof_range_req_ap_entry_v7 {
+ __le32 initiator_ap_flags;
+ u8 channel_num;
+ u8 format_bw;
+ u8 ctrl_ch_position;
+ u8 ftmr_max_retries;
+ u8 bssid[ETH_ALEN];
+ __le16 burst_period;
+ u8 samples_per_burst;
+ u8 num_of_bursts;
+ u8 sta_id;
+ u8 cipher;
+ u8 hltk[HLTK_11AZ_LEN];
+ u8 tk[TK_11AZ_LEN];
+ __le16 calib[IWL_TOF_BW_NUM];
+ __le16 beacon_interval;
+ u8 rx_pn[IEEE80211_CCMP_PN_LEN];
+ u8 tx_pn[IEEE80211_CCMP_PN_LEN];
+} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_7 */
+
+/**
* enum iwl_tof_response_mode
* @IWL_MVM_TOF_RESPONSE_ASAP: report each AP measurement separately as soon as
* possible (not supported for this release)
@@ -772,7 +872,7 @@ struct iwl_tof_range_req_cmd_v8 {
} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_8 */
/**
- * struct iwl_tof_range_req_cmd - start measurement cmd
+ * struct iwl_tof_range_req_cmd_v9 - start measurement cmd
* @initiator_flags: see flags @ iwl_tof_initiator_flags
* @request_id: A Token incremented per request. The same Token will be
* sent back in the range response
@@ -787,7 +887,7 @@ struct iwl_tof_range_req_cmd_v8 {
* TSF of this mac id. 0xff to disable TSF reporting.
* @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v2.
*/
-struct iwl_tof_range_req_cmd {
+struct iwl_tof_range_req_cmd_v9 {
__le32 initiator_flags;
u8 request_id;
u8 num_of_ap;
@@ -796,9 +896,37 @@ struct iwl_tof_range_req_cmd {
u8 macaddr_template[ETH_ALEN];
__le32 req_timeout_ms;
__le32 tsf_mac_id;
- struct iwl_tof_range_req_ap_entry ap[IWL_MVM_TOF_MAX_APS];
+ struct iwl_tof_range_req_ap_entry_v6 ap[IWL_MVM_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_9 */
+/**
+ * struct iwl_tof_range_req_cmd_v11 - start measurement cmd
+ * @initiator_flags: see flags @ iwl_tof_initiator_flags
+ * @request_id: A Token incremented per request. The same Token will be
+ * sent back in the range response
+ * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @range_req_bssid: ranging request BSSID
+ * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
+ * Bits set to 1 shall be randomized by the UMAC
+ * @macaddr_template: MAC address template to use for non-randomized bits
+ * @req_timeout_ms: Requested timeout of the response in units of milliseconds.
+ * This is the session time for completing the measurement.
+ * @tsf_mac_id: report the measurement start time for each ap in terms of the
+ * TSF of this mac id. 0xff to disable TSF reporting.
+ * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v2.
+ */
+struct iwl_tof_range_req_cmd_v11 {
+ __le32 initiator_flags;
+ u8 request_id;
+ u8 num_of_ap;
+ u8 range_req_bssid[ETH_ALEN];
+ u8 macaddr_mask[ETH_ALEN];
+ u8 macaddr_template[ETH_ALEN];
+ __le32 req_timeout_ms;
+ __le32 tsf_mac_id;
+ struct iwl_tof_range_req_ap_entry_v7 ap[IWL_MVM_TOF_MAX_APS];
+} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_11 */
+
/*
* enum iwl_tof_range_request_status - status of the sent request
* @IWL_TOF_RANGE_REQUEST_STATUS_SUCCESSFUL - FW successfully received the
@@ -960,7 +1088,7 @@ struct iwl_tof_range_rsp_ap_entry_ntfy_v4 {
} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_4 */
/**
- * struct iwl_tof_range_rsp_ap_entry_ntfy - AP parameters (response)
+ * struct iwl_tof_range_rsp_ap_entry_ntfy_v5 - AP parameters (response)
* @bssid: BSSID of the AP
* @measure_status: current APs measurement status, one of
* &enum iwl_tof_entry_status.
@@ -992,7 +1120,7 @@ struct iwl_tof_range_rsp_ap_entry_ntfy_v4 {
* @rttConfidence: a value between 0 - 31 that represents the rtt accuracy.
* @reserved: for alignment
*/
-struct iwl_tof_range_rsp_ap_entry_ntfy {
+struct iwl_tof_range_rsp_ap_entry_ntfy_v5 {
u8 bssid[ETH_ALEN];
u8 measure_status;
u8 measure_bw;
@@ -1017,6 +1145,69 @@ struct iwl_tof_range_rsp_ap_entry_ntfy {
} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_5 */
/**
+ * struct iwl_tof_range_rsp_ap_entry_ntfy_v6 - AP parameters (response)
+ * @bssid: BSSID of the AP
+ * @measure_status: current APs measurement status, one of
+ * &enum iwl_tof_entry_status.
+ * @measure_bw: Current AP Bandwidth: 0 20MHz, 1 40MHz, 2 80MHz
+ * @rtt: The Round Trip Time that took for the last measurement for
+ * current AP [pSec]
+ * @rtt_variance: The Variance of the RTT values measured for current AP
+ * @rtt_spread: The Difference between the maximum and the minimum RTT
+ * values measured for current AP in the current session [pSec]
+ * @rssi: RSSI as uploaded in the Channel Estimation notification
+ * @rssi_spread: The Difference between the maximum and the minimum RSSI values
+ * measured for current AP in the current session
+ * @last_burst: 1 if no more FTM sessions are scheduled for this responder
+ * @refusal_period: refusal period in case of
+ * @IWL_TOF_ENTRY_RESPONDER_CANNOT_COLABORATE [sec]
+ * @timestamp: The GP2 Clock [usec] where Channel Estimation notification was
+ * uploaded by the LMAC
+ * @start_tsf: measurement start time in TSF of the mac specified in the range
+ * request
+ * @rx_rate_n_flags: rate and flags of the last FTM frame received from this
+ * responder
+ * @tx_rate_n_flags: rate and flags of the last ack sent to this responder
+ * @t2t3_initiator: as calculated from the algo in the initiator
+ * @t1t4_responder: as calculated from the algo in the responder
+ * @common_calib: Calib val that was used in for this AP measurement
+ * @specific_calib: val that was used in for this AP measurement
+ * @papd_calib_output: The result of the tof papd calibration that was injected
+ * into the algorithm.
+ * @rttConfidence: a value between 0 - 31 that represents the rtt accuracy.
+ * @reserved: for alignment
+ * @rx_pn: the last PN used for this responder Rx in case PMF is configured in
+ * LE byte order.
+ * @tx_pn: the last PN used for this responder Tx in case PMF is configured in
+ * LE byte order.
+ */
+struct iwl_tof_range_rsp_ap_entry_ntfy_v6 {
+ u8 bssid[ETH_ALEN];
+ u8 measure_status;
+ u8 measure_bw;
+ __le32 rtt;
+ __le32 rtt_variance;
+ __le32 rtt_spread;
+ s8 rssi;
+ u8 rssi_spread;
+ u8 last_burst;
+ u8 refusal_period;
+ __le32 timestamp;
+ __le32 start_tsf;
+ __le32 rx_rate_n_flags;
+ __le32 tx_rate_n_flags;
+ __le32 t2t3_initiator;
+ __le32 t1t4_responder;
+ __le16 common_calib;
+ __le16 specific_calib;
+ __le32 papd_calib_output;
+ u8 rttConfidence;
+ u8 reserved[3];
+ u8 rx_pn[IEEE80211_CCMP_PN_LEN];
+ u8 tx_pn[IEEE80211_CCMP_PN_LEN];
+} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_6 */
+
+/**
* enum iwl_tof_response_status - tof response status
*
* @IWL_TOF_RESPONSE_SUCCESS: successful range.
@@ -1066,21 +1257,37 @@ struct iwl_tof_range_rsp_ntfy_v6 {
} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_6 */
/**
- * struct iwl_tof_range_rsp_ntfy - ranging response notification
+ * struct iwl_tof_range_rsp_ntfy_v7 - ranging response notification
* @request_id: A Token ID of the corresponding Range request
* @num_of_aps: Number of APs results
* @last_report: 1 if no more FTM sessions are scheduled, 0 otherwise.
* @reserved: reserved
* @ap: per-AP data
*/
-struct iwl_tof_range_rsp_ntfy {
+struct iwl_tof_range_rsp_ntfy_v7 {
u8 request_id;
u8 num_of_aps;
u8 last_report;
u8 reserved;
- struct iwl_tof_range_rsp_ap_entry_ntfy ap[IWL_MVM_TOF_MAX_APS];
+ struct iwl_tof_range_rsp_ap_entry_ntfy_v5 ap[IWL_MVM_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_7 */
+/**
+ * struct iwl_tof_range_rsp_ntfy_v8 - ranging response notification
+ * @request_id: A Token ID of the corresponding Range request
+ * @num_of_aps: Number of APs results
+ * @last_report: 1 if no more FTM sessions are scheduled, 0 otherwise.
+ * @reserved: reserved
+ * @ap: per-AP data
+ */
+struct iwl_tof_range_rsp_ntfy_v8 {
+ u8 request_id;
+ u8 num_of_aps;
+ u8 last_report;
+ u8 reserved;
+ struct iwl_tof_range_rsp_ap_entry_ntfy_v6 ap[IWL_MVM_TOF_MAX_APS];
+} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_8 */
+
#define IWL_MVM_TOF_MCSI_BUF_SIZE (245)
/**
* struct iwl_tof_mcsi_notif - used for debug
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
index 73fb0030c496..260f9978a6ef 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
@@ -5,9 +5,8 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -27,9 +26,8 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -72,7 +70,7 @@
#define NUM_MAC_INDEX (NUM_MAC_INDEX_DRIVER + 1)
#define NUM_MAC_INDEX_CDB (NUM_MAC_INDEX_DRIVER + 2)
-#define IWL_MVM_STATION_COUNT 16
+#define IWL_MVM_STATION_COUNT_MAX 16
#define IWL_MVM_INVALID_STA 0xFF
enum iwl_ac {
@@ -188,9 +186,17 @@ struct iwl_mac_data_ibss {
/**
* enum iwl_mac_data_policy - policy of the data path for this MAC
* @TWT_SUPPORTED: twt is supported
+ * @MORE_DATA_ACK_SUPPORTED: AP supports More Data Ack according to
+ * paragraph 9.4.1.17 in P802.11ax_D4 specification. Used for TWT
+ * early termination detection.
+ * @FLEXIBLE_TWT_SUPPORTED: AP supports flexible TWT schedule
+ * @PROTECTED_TWT_SUPPORTED: AP supports protected TWT frames (with 11w)
*/
enum iwl_mac_data_policy {
- TWT_SUPPORTED = BIT(0),
+ TWT_SUPPORTED = BIT(0),
+ MORE_DATA_ACK_SUPPORTED = BIT(1),
+ FLEXIBLE_TWT_SUPPORTED = BIT(2),
+ PROTECTED_TWT_SUPPORTED = BIT(3),
};
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index b6c31f01ea9e..55573168444e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -90,6 +90,11 @@ enum iwl_regulatory_and_nvm_subcmd_ids {
* @TAS_CONFIG: &struct iwl_tas_config_cmd
*/
TAS_CONFIG = 0x3,
+
+ /**
+ * @PNVM_INIT_COMPLETE_NTFY: &struct iwl_pnvm_init_complete_ntfy
+ */
+ PNVM_INIT_COMPLETE_NTFY = 0xFE,
};
/**
@@ -476,4 +481,12 @@ struct iwl_lari_config_change_cmd {
__le32 config_bitmap;
} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_1 */
+/**
+ * struct iwl_pnvm_init_complete_ntfy - PNVM initialization complete
+ * @status: PNVM image loading status
+ */
+struct iwl_pnvm_init_complete_ntfy {
+ __le32 status;
+} __packed; /* PNVM_INIT_COMPLETE_NTFY_S_VER_1 */
+
#endif /* __iwl_fw_api_nvm_reg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
index b833b80ea3d6..e6a069683462 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
@@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018, 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,10 +27,9 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018, 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -181,15 +179,37 @@ struct iwl_phy_context_cmd_tail {
* @ci: channel info
* @tail: command tail
*/
-struct iwl_phy_context_cmd {
+struct iwl_phy_context_cmd_v1 {
/* COMMON_INDEX_HDR_API_S_VER_1 */
__le32 id_and_color;
__le32 action;
- /* PHY_CONTEXT_DATA_API_S_VER_1 */
+ /* PHY_CONTEXT_DATA_API_S_VER_3 */
__le32 apply_time;
__le32 tx_param_color;
struct iwl_fw_channel_info ci;
struct iwl_phy_context_cmd_tail tail;
} __packed; /* PHY_CONTEXT_CMD_API_VER_1 */
+/**
+ * struct iwl_phy_context_cmd - config of the PHY context
+ * ( PHY_CONTEXT_CMD = 0x8 )
+ * @id_and_color: ID and color of the relevant Binding
+ * @action: action to perform, one of FW_CTXT_ACTION_*
+ * @lmac_id: the lmac id the phy context belongs to
+ * @ci: channel info
+ * @rxchain_info: ???
+ * @dsp_cfg_flags: set to 0
+ * @reserved: reserved to align to 64 bit
+ */
+struct iwl_phy_context_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ __le32 id_and_color;
+ __le32 action;
+ /* PHY_CONTEXT_DATA_API_S_VER_3 */
+ struct iwl_fw_channel_info ci;
+ __le32 lmac_id;
+ __le32 rxchain_info;
+ __le32 dsp_cfg_flags;
+ __le32 reserved;
+} __packed; /* PHY_CONTEXT_CMD_API_VER_3 */
#endif /* __iwl_fw_api_phy_ctxt_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
index 8991ddffbf5e..0debca6dd037 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2019 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2019 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -214,6 +214,15 @@ struct iwl_dts_measurement_notif_v2 {
} __packed; /* TEMPERATURE_MEASUREMENT_TRIGGER_NTFY_S_VER_2 */
/**
+ * struct iwl_dts_measurement_resp - measurements response
+ *
+ * @temp: the measured temperature
+ */
+struct iwl_dts_measurement_resp {
+ __le32 temp;
+} __packed; /* CMD_DTS_MEASUREMENT_RSP_API_S_VER_1 */
+
+/**
* struct ct_kill_notif - CT-kill entry notification
*
* @temperature: the current temperature in celsius
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
index 6e1b9b21904e..4e6ad1793d0a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright (C) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright (C) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -329,48 +329,56 @@ enum iwl_dev_tx_power_cmd_mode {
IWL_TX_POWER_MODE_SET_SAR_TIMER_DEFAULT_TABLE = 5,
}; /* TX_POWER_REDUCED_FLAGS_TYPE_API_E_VER_5 */;
+#define IWL_NUM_CHAIN_TABLES 1
+#define IWL_NUM_CHAIN_TABLES_V2 2
#define IWL_NUM_CHAIN_LIMITS 2
#define IWL_NUM_SUB_BANDS 5
+#define IWL_NUM_SUB_BANDS_V2 11
/**
- * struct iwl_dev_tx_power_cmd - TX power reduction command
+ * struct iwl_dev_tx_power_common - Common part of the TX power reduction cmd
* @set_mode: see &enum iwl_dev_tx_power_cmd_mode
* @mac_context_id: id of the mac ctx for which we are reducing TX power.
* @pwr_restriction: TX power restriction in 1/8 dBms.
* @dev_24: device TX power restriction in 1/8 dBms
* @dev_52_low: device TX power restriction upper band - low
* @dev_52_high: device TX power restriction upper band - high
- * @per_chain_restriction: per chain restrictions
*/
-struct iwl_dev_tx_power_cmd_v3 {
+struct iwl_dev_tx_power_common {
__le32 set_mode;
__le32 mac_context_id;
__le16 pwr_restriction;
__le16 dev_24;
__le16 dev_52_low;
__le16 dev_52_high;
- __le16 per_chain_restriction[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS];
+};
+
+/**
+ * struct iwl_dev_tx_power_cmd_v3 - TX power reduction command version 3
+ * @per_chain: per chain restrictions
+ */
+struct iwl_dev_tx_power_cmd_v3 {
+ __le16 per_chain[IWL_NUM_CHAIN_TABLES][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS];
} __packed; /* TX_REDUCED_POWER_API_S_VER_3 */
#define IWL_DEV_MAX_TX_POWER 0x7FFF
/**
- * struct iwl_dev_tx_power_cmd - TX power reduction command
- * @v3: version 3 of the command, embedded here for easier software handling
+ * struct iwl_dev_tx_power_cmd_v4 - TX power reduction command version 4
+ * @per_chain: per chain restrictions
* @enable_ack_reduction: enable or disable close range ack TX power
* reduction.
* @reserved: reserved (padding)
*/
struct iwl_dev_tx_power_cmd_v4 {
- /* v4 is just an extension of v3 - keep this here */
- struct iwl_dev_tx_power_cmd_v3 v3;
+ __le16 per_chain[IWL_NUM_CHAIN_TABLES][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS];
u8 enable_ack_reduction;
u8 reserved[3];
} __packed; /* TX_REDUCED_POWER_API_S_VER_4 */
/**
- * struct iwl_dev_tx_power_cmd - TX power reduction command
- * @v3: version 3 of the command, embedded here for easier software handling
+ * struct iwl_dev_tx_power_cmd_v5 - TX power reduction command version 5
+ * @per_chain: per chain restrictions
* @enable_ack_reduction: enable or disable close range ack TX power
* reduction.
* @per_chain_restriction_changed: is per_chain_restriction has changed
@@ -381,16 +389,56 @@ struct iwl_dev_tx_power_cmd_v4 {
* @timer_period: timer in milliseconds. if expires FW will change to default
* BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER
*/
-struct iwl_dev_tx_power_cmd {
- /* v5 is just an extension of v3 - keep this here */
- struct iwl_dev_tx_power_cmd_v3 v3;
+struct iwl_dev_tx_power_cmd_v5 {
+ __le16 per_chain[IWL_NUM_CHAIN_TABLES][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS];
u8 enable_ack_reduction;
u8 per_chain_restriction_changed;
u8 reserved[2];
__le32 timer_period;
} __packed; /* TX_REDUCED_POWER_API_S_VER_5 */
+/**
+ * struct iwl_dev_tx_power_cmd_v5 - TX power reduction command version 5
+ * @per_chain: per chain restrictions
+ * @enable_ack_reduction: enable or disable close range ack TX power
+ * reduction.
+ * @per_chain_restriction_changed: is per_chain_restriction has changed
+ * from last command. used if set_mode is
+ * IWL_TX_POWER_MODE_SET_SAR_TIMER.
+ * note: if not changed, the command is used for keep alive only.
+ * @reserved: reserved (padding)
+ * @timer_period: timer in milliseconds. if expires FW will change to default
+ * BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER
+ */
+struct iwl_dev_tx_power_cmd_v6 {
+ __le16 per_chain[IWL_NUM_CHAIN_TABLES_V2][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2];
+ u8 enable_ack_reduction;
+ u8 per_chain_restriction_changed;
+ u8 reserved[2];
+ __le32 timer_period;
+} __packed; /* TX_REDUCED_POWER_API_S_VER_6 */
+
+/**
+ * struct iwl_dev_tx_power_cmd - TX power reduction command (multiversion)
+ * @common: common part of the command
+ * @v3: version 3 part of the command
+ * @v4: version 4 part of the command
+ * @v5: version 5 part of the command
+ * @v6: version 6 part of the command
+ */
+struct iwl_dev_tx_power_cmd {
+ struct iwl_dev_tx_power_common common;
+ union {
+ struct iwl_dev_tx_power_cmd_v3 v3;
+ struct iwl_dev_tx_power_cmd_v4 v4;
+ struct iwl_dev_tx_power_cmd_v5 v5;
+ struct iwl_dev_tx_power_cmd_v6 v6;
+ };
+};
+
#define IWL_NUM_GEO_PROFILES 3
+#define IWL_NUM_BANDS_PER_CHAIN_V1 2
+#define IWL_NUM_BANDS_PER_CHAIN_V2 3
/**
* enum iwl_geo_per_chain_offset_operation - type of operation
@@ -414,11 +462,6 @@ struct iwl_per_chain_offset {
u8 chain_b;
} __packed; /* PER_CHAIN_LIMIT_OFFSET_PER_CHAIN_S_VER_1 */
-struct iwl_per_chain_offset_group {
- struct iwl_per_chain_offset lb;
- struct iwl_per_chain_offset hb;
-} __packed; /* PER_CHAIN_LIMIT_OFFSET_GROUP_S_VER_1 */
-
/**
* struct iwl_geo_tx_power_profile_cmd_v1 - struct for GEO_TX_POWER_LIMIT cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
@@ -426,20 +469,38 @@ struct iwl_per_chain_offset_group {
*/
struct iwl_geo_tx_power_profiles_cmd_v1 {
__le32 ops;
- struct iwl_per_chain_offset_group table[IWL_NUM_GEO_PROFILES];
+ struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES][IWL_NUM_BANDS_PER_CHAIN_V1];
} __packed; /* GEO_TX_POWER_LIMIT_VER_1 */
/**
- * struct iwl_geo_tx_power_profile_cmd - struct for GEO_TX_POWER_LIMIT cmd.
+ * struct iwl_geo_tx_power_profile_cmd_v2 - struct for GEO_TX_POWER_LIMIT cmd.
+ * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
+ * @table: offset profile per band.
+ * @table_revision: BIOS table revision.
+ */
+struct iwl_geo_tx_power_profiles_cmd_v2 {
+ __le32 ops;
+ struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES][IWL_NUM_BANDS_PER_CHAIN_V1];
+ __le32 table_revision;
+} __packed; /* GEO_TX_POWER_LIMIT_VER_2 */
+
+/**
+ * struct iwl_geo_tx_power_profile_cmd_v3 - struct for GEO_TX_POWER_LIMIT cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
* @table_revision: BIOS table revision.
*/
-struct iwl_geo_tx_power_profiles_cmd {
+struct iwl_geo_tx_power_profiles_cmd_v3 {
__le32 ops;
- struct iwl_per_chain_offset_group table[IWL_NUM_GEO_PROFILES];
+ struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES][IWL_NUM_BANDS_PER_CHAIN_V2];
__le32 table_revision;
-} __packed; /* GEO_TX_POWER_LIMIT */
+} __packed; /* GEO_TX_POWER_LIMIT_VER_3 */
+
+union iwl_geo_tx_power_profiles_cmd {
+ struct iwl_geo_tx_power_profiles_cmd_v1 v1;
+ struct iwl_geo_tx_power_profiles_cmd_v2 v2;
+ struct iwl_geo_tx_power_profiles_cmd_v3 v3;
+};
/**
* struct iwl_geo_tx_power_profiles_resp - response to GEO_TX_POWER_LIMIT cmd
@@ -450,16 +511,26 @@ struct iwl_geo_tx_power_profiles_resp {
} __packed; /* GEO_TX_POWER_LIMIT_RESP */
/**
- * struct iwl_ppag_table_cmd - struct for PER_PLATFORM_ANT_GAIN_CMD cmd.
+ * union iwl_ppag_table_cmd - union for all versions of PPAG command
+ * @v1: version 1, table revision = 0
+ * @v2: version 2, table revision = 1
+ *
* @enabled: 1 if PPAG is enabled, 0 otherwise
* @gain: table of antenna gain values per chain and sub-band
* @reserved: reserved
*/
-struct iwl_ppag_table_cmd {
- __le32 enabled;
- s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS];
- s8 reserved[2];
-} __packed; /* PER_PLATFORM_ANT_GAIN_CMD */
+union iwl_ppag_table_cmd {
+ struct {
+ __le32 enabled;
+ s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS];
+ s8 reserved[2];
+ } v1;
+ struct {
+ __le32 enabled;
+ s8 gain[IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2];
+ s8 reserved[2];
+ } v2;
+} __packed;
/**
* struct iwl_beacon_filter_cmd
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
index 4347be6491e9..1ea54f643030 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -29,7 +29,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -193,6 +193,8 @@ enum IWL_TLC_HT_BW_RATES {
* @sgi_ch_width_supp: bitmap of SGI support per channel width
* use BIT(@enum iwl_tlc_mng_cfg_cw)
* @reserved2: reserved
+ * @max_tx_op: max TXOP in uSecs for all AC (BK, BE, VO, VI),
+ * set zero for no limit.
*/
struct iwl_tlc_config_cmd {
u8 sta_id;
@@ -206,8 +208,9 @@ struct iwl_tlc_config_cmd {
__le16 ht_rates[IWL_TLC_NSS_MAX][2];
__le16 max_mpdu_len;
u8 sgi_ch_width_supp;
- u8 reserved2[1];
-} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_2 */
+ u8 reserved2;
+ __le32 max_tx_op;
+} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_3 */
/**
* enum iwl_tlc_update_flags - updated fields
@@ -486,6 +489,13 @@ enum {
#define RATE_MCS_HE_106T_POS 28
#define RATE_MCS_HE_106T_MSK (1 << RATE_MCS_HE_106T_POS)
+/* Bit 30-31: (1) RTS, (2) CTS */
+#define RATE_MCS_RTS_REQUIRED_POS (30)
+#define RATE_MCS_RTS_REQUIRED_MSK (0x1 << RATE_MCS_RTS_REQUIRED_POS)
+
+#define RATE_MCS_CTS_REQUIRED_POS (31)
+#define RATE_MCS_CTS_REQUIRED_MSK (0x1 << RATE_MCS_CTS_REQUIRED_POS)
+
/* Link Quality definitions */
/* # entries in rate scale table to support Tx retries */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
index b8b36a4f9eb9..8a8a204bfe26 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
@@ -308,17 +308,11 @@ enum iwl_rx_mpdu_status {
IWL_RX_MPDU_STATUS_EXT_IV_MATCH = BIT(13),
IWL_RX_MPDU_STATUS_KEY_ID_MATCH = BIT(14),
IWL_RX_MPDU_STATUS_ROBUST_MNG_FRAME = BIT(15),
-};
-enum iwl_rx_mpdu_hash_filter {
- IWL_RX_MPDU_HF_A1_HASH_MASK = 0x3f,
- IWL_RX_MPDU_HF_FILTER_STATUS_MASK = 0xc0,
-};
+ IWL_RX_MPDU_STATUS_KEY = 0x3f0000,
+ IWL_RX_MPDU_STATUS_DUPLICATE = BIT(22),
-enum iwl_rx_mpdu_sta_id_flags {
- IWL_RX_MPDU_SIF_STA_ID_MASK = 0x1f,
- IWL_RX_MPDU_SIF_RRF_ABORT = 0x20,
- IWL_RX_MPDU_SIF_FILTER_STATUS_MASK = 0xc0,
+ IWL_RX_MPDU_STATUS_STA_ID = 0x1f000000,
};
#define IWL_RX_REORDER_DATA_INVALID_BAID 0x7f
@@ -560,7 +554,11 @@ struct iwl_rx_mpdu_desc_v3 {
/**
* @raw_xsum: raw xsum value
*/
- __le32 raw_xsum;
+ __be16 raw_xsum;
+ /**
+ * @reserved_xsum: reserved high bits in the raw checksum
+ */
+ __le16 reserved_xsum;
/* DW11 */
/**
* @rate_n_flags: RX rate/flags encoding
@@ -668,15 +666,8 @@ struct iwl_rx_mpdu_desc {
/**
* @status: &enum iwl_rx_mpdu_status
*/
- __le16 status;
- /**
- * @hash_filter: hash filter value
- */
- u8 hash_filter;
- /**
- * @sta_id_flags: &enum iwl_rx_mpdu_sta_id_flags
- */
- u8 sta_id_flags;
+ __le32 status;
+
/* DW6 */
/**
* @reorder_data: &enum iwl_rx_mpdu_reorder_data
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
index c010e6febbf4..d43e0d3f3a12 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
@@ -380,7 +380,7 @@ struct iwl_mvm_add_sta_cmd {
u8 add_modify;
u8 awake_acs;
__le16 tid_disable_tx;
- __le32 mac_id_n_color;
+ __le32 mac_id_n_color; /* can be used for lmac id when using cmd v12 */
u8 addr[ETH_ALEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */
__le16 reserved2;
u8 sta_id;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h
index 318843138490..d41cab4016fe 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/stats.h
@@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018, 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,9 +27,8 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018, 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -383,14 +381,14 @@ struct mvm_statistics_load {
__le32 air_time[MAC_INDEX_AUX];
__le32 byte_count[MAC_INDEX_AUX];
__le32 pkt_count[MAC_INDEX_AUX];
- u8 avg_energy[IWL_MVM_STATION_COUNT];
+ u8 avg_energy[IWL_MVM_STATION_COUNT_MAX];
} __packed; /* STATISTICS_RX_MAC_STATION_S_VER_3 */
struct mvm_statistics_load_v1 {
__le32 air_time[NUM_MAC_INDEX];
__le32 byte_count[NUM_MAC_INDEX];
__le32 pkt_count[NUM_MAC_INDEX];
- u8 avg_energy[IWL_MVM_STATION_COUNT];
+ u8 avg_energy[IWL_MVM_STATION_COUNT_MAX];
} __packed; /* STATISTICS_RX_MAC_STATION_S_VER_1 */
struct mvm_statistics_rx {
@@ -466,4 +464,465 @@ struct iwl_statistics_cmd {
__le32 flags;
} __packed; /* STATISTICS_CMD_API_S_VER_1 */
+#define MAX_BCAST_FILTER_NUM 8
+
+/**
+ * enum iwl_fw_statistics_type
+ *
+ * @FW_STATISTICS_OPERATIONAL: operational statistics
+ * @FW_STATISTICS_PHY: phy statistics
+ * @FW_STATISTICS_MAC: mac statistics
+ * @FW_STATISTICS_RX: rx statistics
+ * @FW_STATISTICS_TX: tx statistics
+ * @FW_STATISTICS_DURATION: duration statistics
+ * @FW_STATISTICS_HE: he statistics
+ */
+enum iwl_fw_statistics_type {
+ FW_STATISTICS_OPERATIONAL,
+ FW_STATISTICS_PHY,
+ FW_STATISTICS_MAC,
+ FW_STATISTICS_RX,
+ FW_STATISTICS_TX,
+ FW_STATISTICS_DURATION,
+ FW_STATISTICS_HE,
+}; /* FW_STATISTICS_TYPE_API_E_VER_1 */
+
+/**
+ * struct iwl_statistics_ntfy_hdr
+ *
+ * @type: struct type
+ * @version: version of the struct
+ * @size: size in bytes
+ */
+struct iwl_statistics_ntfy_hdr {
+ u8 type;
+ u8 version;
+ __le16 size;
+}; /* STATISTICS_NTFY_HDR_API_S_VER_1 */
+
+/**
+ * struct iwl_statistics_operational_ntfy
+ *
+ * @hdr: general statistics header
+ * @flags: bitmap of possible notification structures
+ * @mac_id: mac on which the beacon was received
+ * @beacon_filter_average_energy: Average energy [-dBm] of the 2
+ * antennas.
+ * @beacon_filter_reason: beacon filter reason
+ * @radio_temperature: radio temperature
+ * @air_time: air time
+ * @beacon_counter: all beacons (both filtered and not filtered)
+ * @beacon_average_energy: all beacons (both filtered and not
+ * filtered)
+ * @beacon_rssi_a: beacon RSSI on antenna A
+ * @beacon_rssi_b: beacon RSSI on antenna B
+ * @rx_bytes: per MAC RX byte count
+ * @rx_time: rx time
+ * @tx_time: usec the radio is transmitting.
+ * @on_time_rf: The total time in usec the RF is awake.
+ * @on_time_scan: usec the radio is awake due to scan.
+ * @average_energy: in fact it is minus the energy..
+ * @reserved: reserved
+ */
+struct iwl_statistics_operational_ntfy {
+ struct iwl_statistics_ntfy_hdr hdr;
+ __le32 flags;
+ __le32 mac_id;
+ __le32 beacon_filter_average_energy;
+ __le32 beacon_filter_reason;
+ __le32 radio_temperature;
+ __le32 air_time[MAC_INDEX_AUX];
+ __le32 beacon_counter[MAC_INDEX_AUX];
+ __le32 beacon_average_energy[MAC_INDEX_AUX];
+ __le32 beacon_rssi_a;
+ __le32 beacon_rssi_b;
+ __le32 rx_bytes[MAC_INDEX_AUX];
+ __le64 rx_time;
+ __le64 tx_time;
+ __le64 on_time_rf;
+ __le64 on_time_scan;
+ __le32 average_energy[IWL_MVM_STATION_COUNT_MAX];
+ __le32 reserved;
+} __packed; /* STATISTICS_OPERATIONAL_NTFY_API_S_VER_14 */
+
+/**
+ * struct iwl_statistics_phy_ntfy
+ *
+ * @hdr: general statistics header
+ * RX PHY related statistics
+ * @energy_and_config: ???
+ * @rssi_band: @31:24 rssiAllBand_B, 23:16 rssiInBand_B, 15:8
+ * rssiAllBand_A, 7:0 rssiInBand_A
+ * @agc_word: @31:16 agcWord_B, 15:0 agcWord_A
+ * @agc_gain: @19:10 agcGain_B, 9:0 agcGain_A
+ * @dfe_gain: @19:10 dfeGain_B, 9:0 dfeGain_A
+ * @snr_calc_main: @18:0 snrCalcMain
+ * @energy_calc_main: @18:0 energyCalcMain
+ * @snr_calc_aux: @18:0 snrCalcAux
+ * @dsp_dc_estim_a: @27:14 dspDcEstimQA, 13:0 dspDcEstimIA
+ * @dsp_dc_estim_b: @27:14 dspDcEstimQB, 13:0 dspDcEstimIB
+ * @ina_detec_type_and_ofdm_corr_comb: @31:31 inaDetectCckMrc,
+ * 30:27 inaDetectType, 26:0 ofdmCorrComb
+ * @cw_corr_comb: @26:0 cwCorrComb
+ * @rssi_comb: @25:0 rssiComb
+ * @auto_corr_cck: @23:12 autoCck, 11:00 crossCck
+ * @ofdm_fine_freq_and_pina_freq_err: @18:7 ofdmFineFreq, 6:0
+ * ofdmPinaFreqErr
+ * @snrm_evm_main: @31:0 snrmEvmMain
+ * @snrm_evm_aux: @31:0 snrmEvmAux
+ * @rx_rate: @31:0 rate
+ * TX PHY related statistics
+ * @per_chain_enums_and_dsp_atten_a: @perChainEnumsAndDspAtten
+ * (per version)
+ * @target_power_and_power_meas_a: @31:16 targetPower_A, 15:0
+ * powerMeasuredCalc_A
+ * @tx_config_as_i_and_ac_a: @31:16 txConfigAsI_A, 15:0
+ * txConfigAc_A
+ * @predist_dcq_and_dci_a: @31:16 predist_dci_A, 15:0
+ * predist_dcq_A
+ * @per_chain_enums_and_dsp_atten_b: @perChainEnumsAndDspAtten
+ * (per version)
+ * @target_power_and_power_meas_b: @31:16 targetPower_B, 15:0
+ * powerMeasuredCalc_B
+ * @tx_config_as_i_and_ac_b: @31:16 txConfigAsI_B, 15:0
+ * txConfigAc_B
+ * @predist_dcq_and_dci_b: @31:16 predist_dci_B, 15:0
+ * predist_dcq_B
+ * @tx_rate: @31:0 rate
+ * @tlc_backoff: @31:0 tlcBackoff
+ * @mpapd_calib_mode_mpapd_calib_type_a: @31:16
+ * mpapdCalibMode_A, 15:0 mpapdCalibType_A
+ * @psat_and_phy_power_limit_a: @31:16 psat_A, 15:0
+ * phyPowerLimit_A
+ * @sar_and_regulatory_power_limit_a: @31:16 sarPowerLimit_A,
+ * 15:0 regulatoryPowerLimit_A
+ * @mpapd_calib_mode_mpapd_calib_type_b: @31:16
+ * mpapdCalibMode_B, 15:0 mpapdCalibType_B
+ * @psat_and_phy_power_limit_b: @31:16 psat_B, 15:0
+ * phyPowerLimit_B
+ * @sar_and_regulatory_power_limit_b: @31:16 sarPowerLimit_B,
+ * 15:0 regulatoryPowerLimit_B
+ * @srd_and_driver_power_limits: @31:16 srdPowerLimit, 15:0
+ * driverPowerLimit
+ * @reserved: reserved
+ */
+struct iwl_statistics_phy_ntfy {
+ struct iwl_statistics_ntfy_hdr hdr;
+ __le32 energy_and_config;
+ __le32 rssi_band;
+ __le32 agc_word;
+ __le32 agc_gain;
+ __le32 dfe_gain;
+ __le32 snr_calc_main;
+ __le32 energy_calc_main;
+ __le32 snr_calc_aux;
+ __le32 dsp_dc_estim_a;
+ __le32 dsp_dc_estim_b;
+ __le32 ina_detec_type_and_ofdm_corr_comb;
+ __le32 cw_corr_comb;
+ __le32 rssi_comb;
+ __le32 auto_corr_cck;
+ __le32 ofdm_fine_freq_and_pina_freq_err;
+ __le32 snrm_evm_main;
+ __le32 snrm_evm_aux;
+ __le32 rx_rate;
+ __le32 per_chain_enums_and_dsp_atten_a;
+ __le32 target_power_and_power_meas_a;
+ __le32 tx_config_as_i_and_ac_a;
+ __le32 predist_dcq_and_dci_a;
+ __le32 per_chain_enums_and_dsp_atten_b;
+ __le32 target_power_and_power_meas_b;
+ __le32 tx_config_as_i_and_ac_b;
+ __le32 predist_dcq_and_dci_b;
+ __le32 tx_rate;
+ __le32 tlc_backoff;
+ __le32 mpapd_calib_mode_mpapd_calib_type_a;
+ __le32 psat_and_phy_power_limit_a;
+ __le32 sar_and_regulatory_power_limit_a;
+ __le32 mpapd_calib_mode_mpapd_calib_type_b;
+ __le32 psat_and_phy_power_limit_b;
+ __le32 sar_and_regulatory_power_limit_b;
+ __le32 srd_and_driver_power_limits;
+ __le32 reserved;
+} __packed; /* STATISTICS_PHY_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_statistics_mac_ntfy
+ *
+ * @hdr: general statistics header
+ * @bcast_filter_passed_per_mac: bcast filter passed per mac
+ * @bcast_filter_dropped_per_mac: bcast filter dropped per mac
+ * @bcast_filter_passed_per_filter: bcast filter passed per filter
+ * @bcast_filter_dropped_per_filter: bcast filter dropped per filter
+ * @reserved: reserved
+ */
+struct iwl_statistics_mac_ntfy {
+ struct iwl_statistics_ntfy_hdr hdr;
+ __le32 bcast_filter_passed_per_mac[NUM_MAC_INDEX_CDB];
+ __le32 bcast_filter_dropped_per_mac[NUM_MAC_INDEX_CDB];
+ __le32 bcast_filter_passed_per_filter[MAX_BCAST_FILTER_NUM];
+ __le32 bcast_filter_dropped_per_filter[MAX_BCAST_FILTER_NUM];
+ __le32 reserved;
+} __packed; /* STATISTICS_MAC_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_statistics_rx_ntfy
+ *
+ * @hdr: general statistics header
+ * @rx_agg_mpdu_cnt: aggregation frame count (number of
+ * delimiters)
+ * @rx_agg_cnt: number of RX Aggregations
+ * @unsupported_mcs: number of PLCP headers that have rate which
+ * is unsupported by DSP
+ * @bogus_cts: CTS received when not expecting CTS
+ * @bogus_ack: ACK received when not expecting ACK
+ * @rx_byte_count: ???
+ * @rx_packet_count: ???
+ * @missed_beacons: ???
+ * @unresponded_rts: un-responded RTS, due to NAV not zero
+ * @rxe_frame_limit_overrun: RXE got frame limit overrun
+ * @sent_ba_rsp_cnt: BA response TX count
+ * @late_rx_handle: count the number of times the RX path was
+ * aborted due to late entry
+ * @num_bt_kills: ???
+ * @reserved: reserved
+ */
+struct iwl_statistics_rx_ntfy {
+ struct iwl_statistics_ntfy_hdr hdr;
+ __le32 rx_agg_mpdu_cnt;
+ __le32 rx_agg_cnt;
+ __le32 unsupported_mcs;
+ __le32 bogus_cts;
+ __le32 bogus_ack;
+ __le32 rx_byte_count[MAC_INDEX_AUX];
+ __le32 rx_packet_count[MAC_INDEX_AUX];
+ __le32 missed_beacons;
+ __le32 unresponded_rts;
+ __le32 rxe_frame_limit_overrun;
+ __le32 sent_ba_rsp_cnt;
+ __le32 late_rx_handle;
+ __le32 num_bt_kills;
+ __le32 reserved;
+} __packed; /* STATISTICS_RX_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_statistics_tx_ntfy
+ *
+ * @hdr: general statistics header
+ * @cts_timeout: timeout when waiting for CTS
+ * @ack_timeout: timeout when waiting for ACK
+ * @dump_msdu_cnt: number of MSDUs that were dumped due to any
+ * reason
+ * @burst_abort_missing_next_frame_cnt: number of times a burst
+ * was aborted due to missing next frame bytes in txfifo
+ * number of times got timeout when waiting for CTS/ACK/BA and energy was
+ * detected just after sending the RTS/DATA. this statistics may help getting
+ * interesting indicators, like the likelihood of collision (so the benefit of
+ * protection may be estimated Vs. its cost). Or how many of the failures are
+ * due to collision and how many due to SNR.
+ * For Link-quality the CTS collision indication is more reliable then the ACK
+ * collision indication as the RTS frame is short and has more chance that the
+ * frame/s which caused the collision continue after the RTS was sent.
+ * @cts_timeout_collision: ???
+ * ACK/BA failed and energy as detected after DATA
+ * Note: to get the collision ratio need to:
+ * ackOrBaTimeoutCollision / (ack_timeout + ba_timeout)
+ * @ack_or_ba_timeout_collision: ???
+ * @ba_timeout: timeout when waiting for immediate BA response
+ * @ba_reschedule_frames: failed to get BA response and
+ * rescheduled all the non-ACKed frames
+ * gives the avarage number of frames inside aggregation
+ * @scd_query_agg_frame_cnt: ???
+ * @scd_query_no_agg: scheduler query prevented aggregation
+ * @scd_query_agg: scheduler query allowed aggregation
+ * @scd_query_mismatch: scheduler query inaccurate, either too
+ * short or too long
+ * @agg_terminated_underrun: aggregation was terminated due to
+ * underrun
+ * @agg_terminated_bt_prio_kill: aggregation was terminated due
+ * to BT
+ * @tx_kill_on_long_retry: count the tx frames dropped due to
+ * long retry limit (DATA frame failed)
+ * @tx_kill_on_short_retry: count the tx frames dropped due to
+ * short retry limit (RTS frame failed)
+ * TX deffer on energy. This counter is reset on each successful transmit.
+ * When timer exceed TX deffer limit than will be uCode assert.
+ * @tx_deffer_counter: ???
+ * @tx_deffer_base_time: Keep the time of the last successful
+ * transmit
+ * @tx_underrun: TX killed due to underrun
+ * @bt_defer: TX deferred due to BT priority, so probably TX was
+ * not started.
+ * @tx_kill_on_dsp_timeout: TX killed on DSP problem detected
+ * @tx_kill_on_immediate_quiet: TX killed due to immediate quiet
+ * @kill_ba_cnt: number of times sending BA failed
+ * @kill_ack_cnt: number of times sending ACK failed
+ * @kill_cts_cnt: number of times sending CTS failed
+ * @burst_terminated: Count burst or fragmentation termination
+ * occurrence
+ * @late_tx_vec_wr_cnt: ???
+ * TX is not sent because ucode failed to notify the TRM in SIFS-delta from
+ * ON_AIR deassertion.
+ * @late_rx2_tx_cnt: ???
+ * @scd_query_cnt: count the times SCD query was done to check
+ * for TX AGG
+ * @tx_frames_acked_in_agg: count the number of frames
+ * transmitted inside AGG and were successful
+ * @last_tx_ch_width_indx: ???
+ * number of deferred TX per channel width, 0 - 20, 1/2/3 - 40/80/160
+ * @rx_detected_per_ch_width: ???
+ * @success_per_ch_width: ???
+ * @fail_per_ch_width: ???
+ * @reserved: reserved
+ */
+struct iwl_statistics_tx_ntfy {
+ struct iwl_statistics_ntfy_hdr hdr;
+ __le32 cts_timeout;
+ __le32 ack_timeout;
+ __le32 dump_msdu_cnt;
+ __le32 burst_abort_missing_next_frame_cnt;
+ __le32 cts_timeout_collision;
+ __le32 ack_or_ba_timeout_collision;
+ __le32 ba_timeout;
+ __le32 ba_reschedule_frames;
+ __le32 scd_query_agg_frame_cnt;
+ __le32 scd_query_no_agg;
+ __le32 scd_query_agg;
+ __le32 scd_query_mismatch;
+ __le32 agg_terminated_underrun;
+ __le32 agg_terminated_bt_prio_kill;
+ __le32 tx_kill_on_long_retry;
+ __le32 tx_kill_on_short_retry;
+ __le32 tx_deffer_counter;
+ __le32 tx_deffer_base_time;
+ __le32 tx_underrun;
+ __le32 bt_defer;
+ __le32 tx_kill_on_dsp_timeout;
+ __le32 tx_kill_on_immediate_quiet;
+ __le32 kill_ba_cnt;
+ __le32 kill_ack_cnt;
+ __le32 kill_cts_cnt;
+ __le32 burst_terminated;
+ __le32 late_tx_vec_wr_cnt;
+ __le32 late_rx2_tx_cnt;
+ __le32 scd_query_cnt;
+ __le32 tx_frames_acked_in_agg;
+ __le32 last_tx_ch_width_indx;
+ __le32 rx_detected_per_ch_width[4];
+ __le32 success_per_ch_width[4];
+ __le32 fail_per_ch_width[4];
+ __le32 reserved;
+} __packed; /* STATISTICS_TX_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_statistics_duration_ntfy
+ *
+ * @hdr: general statistics header
+ * @cont_burst_chk_cnt: number of times continuation or
+ * fragmentation or bursting was checked
+ * @cont_burst_cnt: number of times continuation or fragmentation
+ * or bursting was successful
+ * @wait_for_silence_timeout_cnt: ???
+ * @reserved: reserved
+ */
+struct iwl_statistics_duration_ntfy {
+ struct iwl_statistics_ntfy_hdr hdr;
+ __le32 cont_burst_chk_cnt;
+ __le32 cont_burst_cnt;
+ __le32 wait_for_silence_timeout_cnt;
+ __le32 reserved;
+} __packed; /* STATISTICS_DURATION_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwl_statistics_he_ntfy
+ *
+ * @hdr: general statistics header
+ * received HE frames
+ * @rx_siga_valid_cnt: rx HE SIG-A valid
+ * @rx_siga_invalid_cnt: rx HE SIG-A invalid
+ * received HE frames w/ valid Sig-A
+ * @rx_trig_based_frame_cnt: rx HE-TB (trig-based)
+ * @rx_su_frame_cnt: rx HE-SU
+ * @rx_sigb_invalid_cnt: rx (suspected) HE-MU w/ bad SIG-B
+ * @rx_our_bss_color_cnt: rx valid HE SIG-A w/ our BSS color
+ * @rx_other_bss_color_cnt: rx valid HE SIG-A w/ other BSS color
+ * @rx_zero_bss_color_cnt: ???
+ * received HE-MU frames w/ good Sig-B
+ * @rx_mu_for_us_cnt: match AID
+ * @rx_mu_not_for_us_cnt: no matched AID
+ * received HE-MU frames for us (w/ our AID)
+ * @rx_mu_nss_ar: 0 - SISO, 1 - MIMO2
+ * @rx_mu_mimo_cnt: full BW RU, compressed SIG-B
+ * @rx_mu_ru_bw_ar: MU alloc, MHz: 0 - 2, 1 - 5, 2 - 10, 3 - 20,
+ * 4 - 40, 5 - 80, 6 - 160
+ * received trigger frames
+ * @rx_trig_for_us_cnt: ???
+ * @rx_trig_not_for_us_cnt: ???
+ * trigger for us
+ * @rx_trig_with_cs_req_cnt: ???
+ * @rx_trig_type_ar: ???
+ * @rx_trig_in_agg_cnt: ???
+ * basic trigger for us allocations
+ * @rx_basic_trig_alloc_nss_ar: ???
+ * @rx_basic_trig_alloc_mu_mimo_cnt: ???
+ * @rx_basic_trig_alloc_ru_bw_ar: ???
+ * @rx_basic_trig_total_byte_cnt: ???
+ * trig-based TX
+ * @tx_trig_based_cs_req_fail_cnt: ???
+ * @tx_trig_based_sifs_ok_cnt: ???
+ * @tx_trig_based_sifs_fail_cnt: ???
+ * @tx_trig_based_byte_cnt: ???
+ * @tx_trig_based_pad_byte_cnt: ???
+ * @tx_trig_based_frame_cnt: ???
+ * @tx_trig_based_acked_frame_cnt: ???
+ * @tx_trig_based_ack_timeout_cnt: ???
+ * HE-SU TX
+ * @tx_su_frame_cnt: ???
+ * EDCA <--> MU-EDCA transitions
+ * @tx_edca_to_mu_edca_cnt: ???
+ * @tx_mu_edca_to_edca_by_timeout_cnt: ???
+ * @tx_mu_edca_to_edca_by_ack_fail_cnt: ???
+ * @tx_mu_edca_to_edca_by_small_alloc_cnt: ???
+ * @reserved: reserved
+ */
+struct iwl_statistics_he_ntfy {
+ struct iwl_statistics_ntfy_hdr hdr;
+ __le32 rx_siga_valid_cnt;
+ __le32 rx_siga_invalid_cnt;
+ __le32 rx_trig_based_frame_cnt;
+ __le32 rx_su_frame_cnt;
+ __le32 rx_sigb_invalid_cnt;
+ __le32 rx_our_bss_color_cnt;
+ __le32 rx_other_bss_color_cnt;
+ __le32 rx_zero_bss_color_cnt;
+ __le32 rx_mu_for_us_cnt;
+ __le32 rx_mu_not_for_us_cnt;
+ __le32 rx_mu_nss_ar[2];
+ __le32 rx_mu_mimo_cnt;
+ __le32 rx_mu_ru_bw_ar[7];
+ __le32 rx_trig_for_us_cnt;
+ __le32 rx_trig_not_for_us_cnt;
+ __le32 rx_trig_with_cs_req_cnt;
+ __le32 rx_trig_type_ar[8 + 1];
+ __le32 rx_trig_in_agg_cnt;
+ __le32 rx_basic_trig_alloc_nss_ar[2];
+ __le32 rx_basic_trig_alloc_mu_mimo_cnt;
+ __le32 rx_basic_trig_alloc_ru_bw_ar[7];
+ __le32 rx_basic_trig_total_byte_cnt;
+ __le32 tx_trig_based_cs_req_fail_cnt;
+ __le32 tx_trig_based_sifs_ok_cnt;
+ __le32 tx_trig_based_sifs_fail_cnt;
+ __le32 tx_trig_based_byte_cnt;
+ __le32 tx_trig_based_pad_byte_cnt;
+ __le32 tx_trig_based_frame_cnt;
+ __le32 tx_trig_based_acked_frame_cnt;
+ __le32 tx_trig_based_ack_timeout_cnt;
+ __le32 tx_su_frame_cnt;
+ __le32 tx_edca_to_mu_edca_cnt;
+ __le32 tx_mu_edca_to_edca_by_timeout_cnt;
+ __le32 tx_mu_edca_to_edca_by_ack_fail_cnt;
+ __le32 tx_mu_edca_to_edca_by_small_alloc_cnt;
+ __le32 reserved;
+} __packed; /* STATISTICS_HE_NTFY_API_S_VER_1 */
+
#endif /* __iwl_fw_api_stats_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index 82d59b5a5f8c..de2e2ca7a3ea 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -5,9 +5,8 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -27,9 +26,8 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -61,6 +59,7 @@
#ifndef __iwl_fw_api_tx_h__
#define __iwl_fw_api_tx_h__
+#include <linux/ieee80211.h>
/**
* enum iwl_tx_flags - bitmasks for tx_flags in TX command
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 7ea55cfdd8a8..ab4a8b942c81 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -1507,6 +1507,27 @@ iwl_dump_ini_err_table_iter(struct iwl_fw_runtime *fwrt,
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
+static int
+iwl_dump_ini_special_mem_iter(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *range_ptr, int idx)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_ini_region_special_device_memory *special_mem =
+ &reg->special_mem;
+
+ struct iwl_fw_ini_error_dump_range *range = range_ptr;
+ u32 addr = le32_to_cpu(special_mem->base_addr) +
+ le32_to_cpu(special_mem->offset);
+
+ range->internal_base_addr = cpu_to_le32(addr);
+ range->range_data_size = special_mem->size;
+ iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data,
+ le32_to_cpu(special_mem->size));
+
+ return sizeof(*range) + le32_to_cpu(range->range_data_size);
+}
+
static int iwl_dump_ini_fw_pkt_iter(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data,
void *range_ptr, int idx)
@@ -1636,6 +1657,21 @@ iwl_dump_ini_err_table_fill_header(struct iwl_fw_runtime *fwrt,
return dump->ranges;
}
+static void *
+iwl_dump_ini_special_mem_fill_header(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data,
+ void *data)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ struct iwl_fw_ini_special_device_memory *dump = data;
+
+ dump->header.version = cpu_to_le32(IWL_INI_DUMP_VER);
+ dump->type = reg->special_mem.type;
+ dump->version = reg->special_mem.version;
+
+ return dump->ranges;
+}
+
static u32 iwl_dump_ini_mem_ranges(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data)
{
@@ -1827,6 +1863,20 @@ iwl_dump_ini_err_table_get_size(struct iwl_fw_runtime *fwrt,
}
static u32
+iwl_dump_ini_special_mem_get_size(struct iwl_fw_runtime *fwrt,
+ struct iwl_dump_ini_region_data *reg_data)
+{
+ struct iwl_fw_ini_region_tlv *reg = (void *)reg_data->reg_tlv->data;
+ u32 size = le32_to_cpu(reg->special_mem.size);
+
+ if (size)
+ size += sizeof(struct iwl_fw_ini_special_device_memory) +
+ sizeof(struct iwl_fw_ini_error_dump_range);
+
+ return size;
+}
+
+static u32
iwl_dump_ini_fw_pkt_get_size(struct iwl_fw_runtime *fwrt,
struct iwl_dump_ini_region_data *reg_data)
{
@@ -2125,6 +2175,12 @@ static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops[] = {
.fill_mem_hdr = iwl_dump_ini_mem_fill_header,
.fill_range = iwl_dump_ini_config_iter,
},
+ [IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY] = {
+ .get_num_of_ranges = iwl_dump_ini_single_range,
+ .get_size = iwl_dump_ini_special_mem_get_size,
+ .fill_mem_hdr = iwl_dump_ini_special_mem_fill_header,
+ .fill_range = iwl_dump_ini_special_mem_iter,
+ },
};
static u32 iwl_dump_ini_trigger(struct iwl_fw_runtime *fwrt,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
index 72bfc64580ab..cb40f509ab61 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -495,6 +495,20 @@ struct iwl_fw_ini_monitor_dump {
} __packed;
/**
+ * struct iwl_fw_ini_special_device_memory - special device memory
+ * @header: header of the region
+ * @type: type of special memory
+ * @version: struct special memory version
+ * @ranges: the memory ranges of this this region
+ */
+struct iwl_fw_ini_special_device_memory {
+ struct iwl_fw_ini_error_dump_header header;
+ __le16 type;
+ __le16 version;
+ struct iwl_fw_ini_error_dump_range ranges[];
+} __packed;
+
+/**
* struct iwl_fw_error_dump_paging - content of the UMAC's image page
* block on DRAM
* @index: the index of the page block
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 1fb45fd30ffa..02c64b988a13 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -90,6 +90,7 @@ struct iwl_ucode_header {
};
#define IWL_UCODE_TLV_DEBUG_BASE 0x1000005
+#define IWL_UCODE_TLV_CONST_BASE 0x100
/*
* new TLV uCode file layout
@@ -145,7 +146,13 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_UMAC_DEBUG_ADDRS = 54,
IWL_UCODE_TLV_LMAC_DEBUG_ADDRS = 55,
IWL_UCODE_TLV_FW_RECOVERY_INFO = 57,
- IWL_UCODE_TLV_FW_FSEQ_VERSION = 60,
+ IWL_UCODE_TLV_HW_TYPE = 58,
+ IWL_UCODE_TLV_FW_FSEQ_VERSION = 60,
+
+ IWL_UCODE_TLV_PNVM_VERSION = 62,
+ IWL_UCODE_TLV_PNVM_SKU = 64,
+
+ IWL_UCODE_TLV_FW_NUM_STATIONS = IWL_UCODE_TLV_CONST_BASE + 0,
IWL_UCODE_TLV_TYPE_DEBUG_INFO = IWL_UCODE_TLV_DEBUG_BASE + 0,
IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION = IWL_UCODE_TLV_DEBUG_BASE + 1,
@@ -405,8 +412,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* to report the CSI information with (certain) RX frames
* @IWL_UCODE_TLV_CAPA_FTM_CALIBRATED: has FTM calibrated and thus supports both
* initiator and responder
- *
* @IWL_UCODE_TLV_CAPA_MLME_OFFLOAD: supports MLME offload
+ * @IWL_UCODE_TLV_CAPA_PROTECTED_TWT: Supports protection of TWT action frames
*
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/
@@ -451,6 +458,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_SET_PPAG = (__force iwl_ucode_tlv_capa_t)52,
IWL_UCODE_TLV_CAPA_TAS_CFG = (__force iwl_ucode_tlv_capa_t)53,
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD = (__force iwl_ucode_tlv_capa_t)54,
+ IWL_UCODE_TLV_CAPA_PROTECTED_TWT = (__force iwl_ucode_tlv_capa_t)56,
/* set 2 */
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.c b/drivers/net/wireless/intel/iwlwifi/fw/img.c
index de8cff463dbe..c2a4e60518bc 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2019 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -25,7 +25,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2019 Intel Corporation
+ * Copyright(c) 2019 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -57,22 +57,25 @@
#include "img.h"
-u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd)
+u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def)
{
const struct iwl_fw_cmd_version *entry;
unsigned int i;
if (!fw->ucode_capa.cmd_versions ||
!fw->ucode_capa.n_cmd_versions)
- return IWL_FW_CMD_VER_UNKNOWN;
+ return def;
entry = fw->ucode_capa.cmd_versions;
for (i = 0; i < fw->ucode_capa.n_cmd_versions; i++, entry++) {
- if (entry->group == grp && entry->cmd == cmd)
+ if (entry->group == grp && entry->cmd == cmd) {
+ if (entry->cmd_ver == IWL_FW_CMD_VER_UNKNOWN)
+ return def;
return entry->cmd_ver;
+ }
}
- return IWL_FW_CMD_VER_UNKNOWN;
+ return def;
}
EXPORT_SYMBOL_GPL(iwl_fw_lookup_cmd_ver);
@@ -97,3 +100,43 @@ u8 iwl_fw_lookup_notif_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def)
return def;
}
EXPORT_SYMBOL_GPL(iwl_fw_lookup_notif_ver);
+
+#define FW_SYSASSERT_CPU_MASK 0xf0000000
+static const struct {
+ const char *name;
+ u8 num;
+} advanced_lookup[] = {
+ { "NMI_INTERRUPT_WDG", 0x34 },
+ { "SYSASSERT", 0x35 },
+ { "UCODE_VERSION_MISMATCH", 0x37 },
+ { "BAD_COMMAND", 0x38 },
+ { "BAD_COMMAND", 0x39 },
+ { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
+ { "FATAL_ERROR", 0x3D },
+ { "NMI_TRM_HW_ERR", 0x46 },
+ { "NMI_INTERRUPT_TRM", 0x4C },
+ { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
+ { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
+ { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
+ { "NMI_INTERRUPT_HOST", 0x66 },
+ { "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
+ { "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
+ { "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
+ { "NMI_INTERRUPT_ACTION_PT", 0x7C },
+ { "NMI_INTERRUPT_UNKNOWN", 0x84 },
+ { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
+ { "ADVANCED_SYSASSERT", 0 },
+};
+
+const char *iwl_fw_lookup_assert_desc(u32 num)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++)
+ if (advanced_lookup[i].num == (num & ~FW_SYSASSERT_CPU_MASK))
+ return advanced_lookup[i].name;
+
+ /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
+ return advanced_lookup[i].name;
+}
+EXPORT_SYMBOL_GPL(iwl_fw_lookup_assert_desc);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
index a8630bf90b63..f836f3a8567b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2008 - 2014, 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +30,7 @@
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2008 - 2014, 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -107,6 +106,7 @@ struct iwl_ucode_capabilities {
u32 flags;
u32 error_log_addr;
u32 error_log_size;
+ u32 num_stations;
unsigned long _api[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_API)];
unsigned long _capa[BITS_TO_LONGS(NUM_IWL_UCODE_TLV_CAPA)];
@@ -313,7 +313,8 @@ iwl_get_ucode_image(const struct iwl_fw *fw, enum iwl_ucode_type ucode_type)
return &fw->img[ucode_type];
}
-u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd);
+u8 iwl_fw_lookup_cmd_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def);
u8 iwl_fw_lookup_notif_ver(const struct iwl_fw *fw, u8 grp, u8 cmd, u8 def);
+const char *iwl_fw_lookup_assert_desc(u32 num);
#endif /* __iwl_fw_img_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c
index b373606e1241..f8516c7ca767 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/init.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c
@@ -134,7 +134,8 @@ int iwl_set_soc_latency(struct iwl_fw_runtime *fwrt)
SOC_FLAGS_LTR_APPLY_DELAY_MASK);
if (iwl_fw_lookup_cmd_ver(fwrt->fw, IWL_ALWAYS_LONG_GROUP,
- SCAN_REQ_UMAC) >= 2 &&
+ SCAN_REQ_UMAC,
+ IWL_FW_CMD_VER_UNKNOWN) >= 2 &&
fwrt->trans->trans_cfg->low_latency_xtal)
cmd.flags |= cpu_to_le32(SOC_CONFIG_CMD_FLAGS_LOW_LATENCY);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
new file mode 100644
index 000000000000..6d8f7bff1243
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
@@ -0,0 +1,274 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/******************************************************************************
+ *
+ * Copyright(c) 2020 Intel Corporation
+ *
+ *****************************************************************************/
+
+#include "iwl-drv.h"
+#include "pnvm.h"
+#include "iwl-prph.h"
+#include "iwl-io.h"
+#include "fw/api/commands.h"
+#include "fw/api/nvm-reg.h"
+#include "fw/api/alive.h"
+
+struct iwl_pnvm_section {
+ __le32 offset;
+ const u8 data[];
+} __packed;
+
+static bool iwl_pnvm_complete_fn(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct iwl_trans *trans = (struct iwl_trans *)data;
+ struct iwl_pnvm_init_complete_ntfy *pnvm_ntf = (void *)pkt->data;
+
+ IWL_DEBUG_FW(trans,
+ "PNVM complete notification received with status %d\n",
+ le32_to_cpu(pnvm_ntf->status));
+
+ return true;
+}
+
+static int iwl_pnvm_handle_section(struct iwl_trans *trans, const u8 *data,
+ size_t len)
+{
+ struct iwl_ucode_tlv *tlv;
+ u32 sha1 = 0;
+ u16 mac_type = 0, rf_id = 0;
+ u8 *pnvm_data = NULL, *tmp;
+ u32 size = 0;
+ int ret;
+
+ IWL_DEBUG_FW(trans, "Handling PNVM section\n");
+
+ while (len >= sizeof(*tlv)) {
+ u32 tlv_len, tlv_type;
+
+ len -= sizeof(*tlv);
+ tlv = (void *)data;
+
+ tlv_len = le32_to_cpu(tlv->length);
+ tlv_type = le32_to_cpu(tlv->type);
+
+ if (len < tlv_len) {
+ IWL_ERR(trans, "invalid TLV len: %zd/%u\n",
+ len, tlv_len);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ data += sizeof(*tlv);
+
+ switch (tlv_type) {
+ case IWL_UCODE_TLV_PNVM_VERSION:
+ if (tlv_len < sizeof(__le32)) {
+ IWL_DEBUG_FW(trans,
+ "Invalid size for IWL_UCODE_TLV_PNVM_VERSION (expected %zd, got %d)\n",
+ sizeof(__le32), tlv_len);
+ break;
+ }
+
+ sha1 = le32_to_cpup((__le32 *)data);
+
+ IWL_DEBUG_FW(trans,
+ "Got IWL_UCODE_TLV_PNVM_VERSION %0x\n",
+ sha1);
+ break;
+ case IWL_UCODE_TLV_HW_TYPE:
+ if (tlv_len < 2 * sizeof(__le16)) {
+ IWL_DEBUG_FW(trans,
+ "Invalid size for IWL_UCODE_TLV_HW_TYPE (expected %zd, got %d)\n",
+ 2 * sizeof(__le16), tlv_len);
+ break;
+ }
+
+ mac_type = le16_to_cpup((__le16 *)data);
+ rf_id = le16_to_cpup((__le16 *)(data + sizeof(__le16)));
+
+ IWL_DEBUG_FW(trans,
+ "Got IWL_UCODE_TLV_HW_TYPE mac_type 0x%0x rf_id 0x%0x\n",
+ mac_type, rf_id);
+
+ if (mac_type != CSR_HW_REV_TYPE(trans->hw_rev) ||
+ rf_id != CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
+ IWL_DEBUG_FW(trans,
+ "HW mismatch, skipping PNVM section, mac_type 0x%0x, rf_id 0x%0x.\n",
+ CSR_HW_REV_TYPE(trans->hw_rev), trans->hw_rf_id);
+ ret = -ENOENT;
+ goto out;
+ }
+
+ break;
+ case IWL_UCODE_TLV_SEC_RT: {
+ struct iwl_pnvm_section *section = (void *)data;
+ u32 data_len = tlv_len - sizeof(*section);
+
+ IWL_DEBUG_FW(trans,
+ "Got IWL_UCODE_TLV_SEC_RT len %d\n",
+ tlv_len);
+
+ /* TODO: remove, this is a deprecated separator */
+ if (le32_to_cpup((__le32 *)data) == 0xddddeeee) {
+ IWL_DEBUG_FW(trans, "Ignoring separator.\n");
+ break;
+ }
+
+ IWL_DEBUG_FW(trans, "Adding data (size %d)\n",
+ data_len);
+
+ tmp = krealloc(pnvm_data, size + data_len, GFP_KERNEL);
+ if (!tmp) {
+ IWL_DEBUG_FW(trans,
+ "Couldn't allocate (more) pnvm_data\n");
+
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ pnvm_data = tmp;
+
+ memcpy(pnvm_data + size, section->data, data_len);
+
+ size += data_len;
+
+ break;
+ }
+ case IWL_UCODE_TLV_PNVM_SKU:
+ IWL_DEBUG_FW(trans,
+ "New PNVM section started, stop parsing.\n");
+ goto done;
+ default:
+ IWL_DEBUG_FW(trans, "Found TLV 0x%0x, len %d\n",
+ tlv_type, tlv_len);
+ break;
+ }
+
+ len -= ALIGN(tlv_len, 4);
+ data += ALIGN(tlv_len, 4);
+ }
+
+done:
+ if (!size) {
+ IWL_DEBUG_FW(trans, "Empty PNVM, skipping.\n");
+ ret = -ENOENT;
+ goto out;
+ }
+
+ IWL_INFO(trans, "loaded PNVM version 0x%0x\n", sha1);
+
+ ret = iwl_trans_set_pnvm(trans, pnvm_data, size);
+out:
+ kfree(pnvm_data);
+ return ret;
+}
+
+static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data,
+ size_t len)
+{
+ struct iwl_ucode_tlv *tlv;
+
+ IWL_DEBUG_FW(trans, "Parsing PNVM file\n");
+
+ while (len >= sizeof(*tlv)) {
+ u32 tlv_len, tlv_type;
+
+ len -= sizeof(*tlv);
+ tlv = (void *)data;
+
+ tlv_len = le32_to_cpu(tlv->length);
+ tlv_type = le32_to_cpu(tlv->type);
+
+ if (len < tlv_len) {
+ IWL_ERR(trans, "invalid TLV len: %zd/%u\n",
+ len, tlv_len);
+ return -EINVAL;
+ }
+
+ if (tlv_type == IWL_UCODE_TLV_PNVM_SKU) {
+ struct iwl_sku_id *sku_id =
+ (void *)(data + sizeof(*tlv));
+
+ IWL_DEBUG_FW(trans,
+ "Got IWL_UCODE_TLV_PNVM_SKU len %d\n",
+ tlv_len);
+ IWL_DEBUG_FW(trans, "sku_id 0x%0x 0x%0x 0x%0x\n",
+ le32_to_cpu(sku_id->data[0]),
+ le32_to_cpu(sku_id->data[1]),
+ le32_to_cpu(sku_id->data[2]));
+
+ if (trans->sku_id[0] == le32_to_cpu(sku_id->data[0]) &&
+ trans->sku_id[1] == le32_to_cpu(sku_id->data[1]) &&
+ trans->sku_id[2] == le32_to_cpu(sku_id->data[2])) {
+ int ret;
+
+ data += sizeof(*tlv) + ALIGN(tlv_len, 4);
+ len -= ALIGN(tlv_len, 4);
+
+ ret = iwl_pnvm_handle_section(trans, data, len);
+ if (!ret)
+ return 0;
+ } else {
+ IWL_DEBUG_FW(trans, "SKU ID didn't match!\n");
+ }
+ } else {
+ data += sizeof(*tlv) + ALIGN(tlv_len, 4);
+ len -= ALIGN(tlv_len, 4);
+ }
+ }
+
+ return -ENOENT;
+}
+
+int iwl_pnvm_load(struct iwl_trans *trans,
+ struct iwl_notif_wait_data *notif_wait)
+{
+ const struct firmware *pnvm;
+ struct iwl_notification_wait pnvm_wait;
+ static const u16 ntf_cmds[] = { WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ PNVM_INIT_COMPLETE_NTFY) };
+ char pnvm_name[64];
+ int ret;
+
+ /* if the SKU_ID is empty, there's nothing to do */
+ if (!trans->sku_id[0] && !trans->sku_id[1] && !trans->sku_id[2])
+ return 0;
+
+ /* if we already have it, nothing to do either */
+ if (trans->pnvm_loaded)
+ return 0;
+
+ /*
+ * The prefix unfortunately includes a hyphen at the end, so
+ * don't add the dot here...
+ */
+ snprintf(pnvm_name, sizeof(pnvm_name), "%spnvm",
+ trans->cfg->fw_name_pre);
+
+ /* ...but replace the hyphen with the dot here. */
+ if (strlen(trans->cfg->fw_name_pre) < sizeof(pnvm_name))
+ pnvm_name[strlen(trans->cfg->fw_name_pre) - 1] = '.';
+
+ ret = firmware_request_nowarn(&pnvm, pnvm_name, trans->dev);
+ if (ret) {
+ IWL_DEBUG_FW(trans, "PNVM file %s not found %d\n",
+ pnvm_name, ret);
+ } else {
+ iwl_pnvm_parse(trans, pnvm->data, pnvm->size);
+
+ release_firmware(pnvm);
+ }
+
+ iwl_init_notification_wait(notif_wait, &pnvm_wait,
+ ntf_cmds, ARRAY_SIZE(ntf_cmds),
+ iwl_pnvm_complete_fn, trans);
+
+ /* kick the doorbell */
+ iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
+ UREG_DOORBELL_TO_ISR6_PNVM);
+
+ return iwl_wait_notification(notif_wait, &pnvm_wait,
+ MVM_UCODE_PNVM_TIMEOUT);
+}
+IWL_EXPORT_SYMBOL(iwl_pnvm_load);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h
new file mode 100644
index 000000000000..e4f91bce222d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/******************************************************************************
+ *
+ * Copyright(c) 2020 Intel Corporation
+ *
+ *****************************************************************************/
+
+#ifndef __IWL_PNVM_H__
+#define __IWL_PNVM_H__
+
+#include "fw/notif-wait.h"
+
+#define MVM_UCODE_PNVM_TIMEOUT (HZ / 10)
+
+int iwl_pnvm_load(struct iwl_trans *trans,
+ struct iwl_notif_wait_data *notif_wait);
+
+#endif /* __IWL_PNVM_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index b5e5e32b6152..cddcb4d9a264 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -207,7 +207,8 @@ struct iwl_fw_runtime {
u8 sar_chain_b_profile;
struct iwl_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES];
u32 geo_rev;
- struct iwl_ppag_table_cmd ppag_table;
+ union iwl_ppag_table_cmd ppag_table;
+ u32 ppag_ver;
#endif
};
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index e27c13263a23..ca4967b81d01 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -472,6 +472,7 @@ struct iwl_cfg {
#define IWL_CFG_MAC_TYPE_QU 0x33
#define IWL_CFG_MAC_TYPE_QUZ 0x35
#define IWL_CFG_MAC_TYPE_QNJ 0x36
+#define IWL_CFG_MAC_TYPE_MA 0x44
#define IWL_CFG_RF_TYPE_TH 0x105
#define IWL_CFG_RF_TYPE_TH1 0x108
@@ -479,6 +480,8 @@ struct iwl_cfg {
#define IWL_CFG_RF_TYPE_JF1 0x108
#define IWL_CFG_RF_TYPE_HR2 0x10A
#define IWL_CFG_RF_TYPE_HR1 0x10C
+#define IWL_CFG_RF_TYPE_GF 0x10D
+#define IWL_CFG_RF_TYPE_MR 0x110
#define IWL_CFG_RF_ID_TH 0x1
#define IWL_CFG_RF_ID_TH1 0x1
@@ -516,12 +519,14 @@ struct iwl_dev_info {
*/
extern const struct iwl_cfg_trans_params iwl9000_trans_cfg;
extern const struct iwl_cfg_trans_params iwl9560_trans_cfg;
+extern const struct iwl_cfg_trans_params iwl9560_long_latency_trans_cfg;
extern const struct iwl_cfg_trans_params iwl9560_shared_clk_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_qnj_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_qu_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_qu_medium_latency_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_qu_long_latency_trans_cfg;
extern const struct iwl_cfg_trans_params iwl_ax200_trans_cfg;
+extern const struct iwl_cfg_trans_params iwl_ma_trans_cfg;
extern const char iwl9162_name[];
extern const char iwl9260_name[];
extern const char iwl9260_1_name[];
@@ -543,7 +548,11 @@ extern const char iwl_ax201_name[];
extern const char iwl_ax101_name[];
extern const char iwl_ax200_killer_1650w_name[];
extern const char iwl_ax200_killer_1650x_name[];
-
+extern const char iwl_ax201_killer_1650s_name[];
+extern const char iwl_ax201_killer_1650i_name[];
+extern const char iwl_ma_name[];
+extern const char iwl_ax211_name[];
+extern const char iwl_ax411_name[];
#if IS_ENABLED(CONFIG_IWLDVM)
extern const struct iwl_cfg iwl5300_agn_cfg;
extern const struct iwl_cfg iwl5100_agn_cfg;
@@ -641,6 +650,9 @@ extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0;
extern const struct iwl_cfg iwlax411_2ax_cfg_so_gf4_a0_long;
extern const struct iwl_cfg iwlax411_2ax_cfg_sosnj_gf4_a0;
extern const struct iwl_cfg iwlax211_cfg_snj_gf_a0;
+extern const struct iwl_cfg iwlax201_cfg_snj_hr_b0;
+extern const struct iwl_cfg iwl_cfg_ma_a0_gf_a0;
+extern const struct iwl_cfg iwl_cfg_ma_a0_mr_a0;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
index 9d7a04833cd0..5624fe42efd9 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
@@ -138,16 +138,16 @@ struct iwl_prph_scratch_control {
} __packed; /* PERIPH_SCRATCH_CONTROL_S */
/*
- * struct iwl_prph_scratch_ror_cfg - ror config
- * @ror_base_addr: ror start address
- * @ror_size: ror size in DWs
+ * struct iwl_prph_scratch_pnvm_cfg - ror config
+ * @pnvm_base_addr: PNVM start address
+ * @pnvm_size: PNVM size in DWs
* @reserved: reserved
*/
-struct iwl_prph_scratch_ror_cfg {
- __le64 ror_base_addr;
- __le32 ror_size;
+struct iwl_prph_scratch_pnvm_cfg {
+ __le64 pnvm_base_addr;
+ __le32 pnvm_size;
__le32 reserved;
-} __packed; /* PERIPH_SCRATCH_ROR_CFG_S */
+} __packed; /* PERIPH_SCRATCH_PNVM_CFG_S */
/*
* struct iwl_prph_scratch_hwm_cfg - hwm config
@@ -175,14 +175,14 @@ struct iwl_prph_scratch_rbd_cfg {
* struct iwl_prph_scratch_ctrl_cfg - prph scratch ctrl and config
* @version: version information of context info and HW
* @control: control flags of FH configurations
- * @ror_cfg: ror configuration
+ * @pnvm_cfg: ror configuration
* @hwm_cfg: hwm configuration
* @rbd_cfg: default RX queue configuration
*/
struct iwl_prph_scratch_ctrl_cfg {
struct iwl_prph_scratch_version version;
struct iwl_prph_scratch_control control;
- struct iwl_prph_scratch_ror_cfg ror_cfg;
+ struct iwl_prph_scratch_pnvm_cfg pnvm_cfg;
struct iwl_prph_scratch_hwm_cfg hwm_cfg;
struct iwl_prph_scratch_rbd_cfg rbd_cfg;
} __packed; /* PERIPH_SCRATCH_CTRL_CFG_S */
@@ -291,4 +291,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
const struct fw_img *fw);
void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans);
+int iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
+ const void *data, u32 len);
+
#endif /* __iwl_context_info_file_gen3_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
index eeaa8cbdddce..76b7bbdf8393 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -20,7 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -225,5 +225,8 @@ void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans);
int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
const struct fw_img *fw,
struct iwl_context_info_dram *ctxt_dram);
+int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
+ const void *data, u32 len,
+ struct iwl_dram_data *dram);
#endif /* __iwl_context_info_file_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index 9ce7207d9ec5..51ce93d21ffe 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -182,9 +182,13 @@ static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans,
alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
goto err;
- if ((buf_location == IWL_FW_INI_LOCATION_SRAM_PATH ||
- buf_location == IWL_FW_INI_LOCATION_NPK_PATH) &&
- alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
+ if (buf_location == IWL_FW_INI_LOCATION_NPK_PATH &&
+ alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
+ goto err;
+
+ if (buf_location == IWL_FW_INI_LOCATION_SRAM_PATH &&
+ alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1 &&
+ alloc_id != IWL_FW_INI_ALLOCATION_ID_INTERNAL)
goto err;
trans->dbg.fw_mon_cfg[alloc_id] = *alloc;
@@ -233,6 +237,13 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
if (le32_to_cpu(tlv->length) < sizeof(*reg))
return -EINVAL;
+ /* For safe using a string from FW make sure we have a
+ * null terminator
+ */
+ reg->name[IWL_FW_INI_MAX_NAME - 1] = 0;
+
+ IWL_DEBUG_FW(trans, "WRT: parsing region: %s\n", reg->name);
+
if (id >= IWL_FW_INI_MAX_REGION_ID) {
IWL_ERR(trans, "WRT: Invalid region id %u\n", id);
return -EINVAL;
@@ -947,9 +958,8 @@ static bool iwl_dbg_tlv_check_fw_pkt(struct iwl_fw_runtime *fwrt,
struct iwl_rx_packet *pkt = tp_data->fw_pkt;
struct iwl_cmd_header *wanted_hdr = (void *)&trig_data;
- if (pkt && ((wanted_hdr->cmd == 0 && wanted_hdr->group_id == 0) ||
- (pkt->hdr.cmd == wanted_hdr->cmd &&
- pkt->hdr.group_id == wanted_hdr->group_id))) {
+ if (pkt && (pkt->hdr.cmd == wanted_hdr->cmd &&
+ pkt->hdr.group_id == wanted_hdr->group_id)) {
struct iwl_rx_packet *fw_pkt =
kmemdup(pkt,
sizeof(*pkt) + iwl_rx_packet_payload_len(pkt),
@@ -1012,6 +1022,9 @@ static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt)
enum iwl_fw_ini_buffer_location *ini_dest = &fwrt->trans->dbg.ini_dest;
int ret, i;
+ if (*ini_dest != IWL_FW_INI_LOCATION_INVALID)
+ return;
+
IWL_DEBUG_FW(fwrt,
"WRT: Generating active triggers list, domain 0x%x\n",
fwrt->trans->dbg.domains_bitmap);
@@ -1076,6 +1089,7 @@ void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
break;
case IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF:
case IWL_FW_INI_TIME_POINT_MISSED_BEACONS:
+ case IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION:
iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data,
iwl_dbg_tlv_check_fw_pkt);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-debug.c b/drivers/net/wireless/intel/iwlwifi/iwl-debug.c
index e1a41fd503a8..7df173cc9ddc 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-debug.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.c
@@ -121,10 +121,9 @@ void __iwl_dbg(struct device *dev,
#ifdef CONFIG_IWLWIFI_DEBUG
if (iwl_have_debug_level(level) &&
(!limit || net_ratelimit()))
- dev_printk(KERN_DEBUG, dev, "%c %s %pV",
- in_interrupt() ? 'I' : 'U', function, &vaf);
+ dev_printk(KERN_DEBUG, dev, "%s %pV", function, &vaf);
#endif
- trace_iwlwifi_dbg(level, in_interrupt(), function, &vaf);
+ trace_iwlwifi_dbg(level, function, &vaf);
va_end(args);
}
IWL_EXPORT_SYMBOL(__iwl_dbg);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
index 063d8add147f..528eba441926 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright (C) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project.
*
@@ -139,7 +139,7 @@ do { \
/* 0x00000F00 - 0x00000100 */
#define IWL_DL_POWER 0x00000100
#define IWL_DL_TEMP 0x00000200
-#define IWL_DL_RPM 0x00000400
+#define IWL_DL_WOWLAN 0x00000400
#define IWL_DL_SCAN 0x00000800
/* 0x0000F000 - 0x00001000 */
#define IWL_DL_ASSOC 0x00001000
@@ -205,7 +205,7 @@ do { \
#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)
#define IWL_DEBUG_TPT(p, f, a...) IWL_DEBUG(p, IWL_DL_TPT, f, ## a)
-#define IWL_DEBUG_RPM(p, f, a...) IWL_DEBUG(p, IWL_DL_RPM, f, ## a)
+#define IWL_DEBUG_WOWLAN(p, f, a...) IWL_DEBUG(p, IWL_DL_WOWLAN, f, ## a)
#define IWL_DEBUG_LAR(p, f, a...) IWL_DEBUG(p, IWL_DL_LAR, f, ## a)
#define IWL_DEBUG_FW_INFO(p, f, a...) \
IWL_DEBUG(p, IWL_DL_INFO | IWL_DL_FW, f, ## a)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h
index 9ad93ef60890..d0467da5af03 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h
@@ -54,18 +54,16 @@ DEFINE_EVENT(iwlwifi_msg_event, iwlwifi_crit,
);
TRACE_EVENT(iwlwifi_dbg,
- TP_PROTO(u32 level, bool in_interrupt, const char *function,
+ TP_PROTO(u32 level, const char *function,
struct va_format *vaf),
- TP_ARGS(level, in_interrupt, function, vaf),
+ TP_ARGS(level, function, vaf),
TP_STRUCT__entry(
__field(u32, level)
- __field(u8, in_interrupt)
__string(function, function)
__dynamic_array(char, msg, MAX_MSG_LEN)
),
TP_fast_assign(
__entry->level = level;
- __entry->in_interrupt = in_interrupt;
__assign_str(function, function);
WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
MAX_MSG_LEN, vaf->fmt,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 04f14bfdd091..9dcd2e990c9c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -76,6 +76,7 @@
#include "iwl-config.h"
#include "iwl-modparams.h"
#include "fw/api/alive.h"
+#include "fw/api/mac.h"
/******************************************************************************
*
@@ -102,6 +103,9 @@ static struct dentry *iwl_dbgfs_root;
* @fw_index: firmware revision to try loading
* @firmware_name: composite filename of ucode file to load
* @request_firmware_complete: the firmware has been obtained from user space
+ * @dbgfs_drv: debugfs root directory entry
+ * @dbgfs_trans: debugfs transport directory entry
+ * @dbgfs_op_mode: debugfs op_mode directory entry
*/
struct iwl_drv {
struct list_head list;
@@ -1124,6 +1128,19 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
fseq_ver->version);
}
break;
+ case IWL_UCODE_TLV_FW_NUM_STATIONS:
+ if (tlv_len != sizeof(u32))
+ goto invalid_tlv_len;
+ if (le32_to_cpup((__le32 *)tlv_data) >
+ IWL_MVM_STATION_COUNT_MAX) {
+ IWL_ERR(drv,
+ "%d is an invalid number of station\n",
+ le32_to_cpup((__le32 *)tlv_data));
+ goto tlv_error;
+ }
+ capa->num_stations =
+ le32_to_cpup((__le32 *)tlv_data);
+ break;
case IWL_UCODE_TLV_UMAC_DEBUG_ADDRS: {
struct iwl_umac_debug_addrs *dbg_ptrs =
(void *)tlv_data;
@@ -1319,7 +1336,7 @@ static void _iwl_op_mode_stop(struct iwl_drv *drv)
}
}
-/**
+/*
* iwl_req_fw_callback - callback when firmware was loaded
*
* If loaded successfully, copies the firmware into buffers
@@ -1345,6 +1362,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
fw->ucode_capa.standard_phy_calibration_size =
IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS;
+ fw->ucode_capa.num_stations = IWL_MVM_STATION_COUNT_MAX;
/* dump all fw memory areas by default */
fw->dbg.dump_mask = 0xffffffff;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index ee410417761d..6d19de3058d2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -254,6 +254,65 @@ enum iwl_reg_capa_flags {
REG_CAPA_11AX_DISABLED = BIT(10),
};
+/**
+ * enum iwl_reg_capa_flags_v2 - global flags applied for the whole regulatory
+ * domain (version 2).
+ * @REG_CAPA_V2_STRADDLE_DISABLED: Straddle channels (144, 142, 138) are
+ * disabled.
+ * @REG_CAPA_V2_BF_CCD_LOW_BAND: Beam-forming or Cyclic Delay Diversity in the
+ * 2.4Ghz band is allowed.
+ * @REG_CAPA_V2_BF_CCD_HIGH_BAND: Beam-forming or Cyclic Delay Diversity in the
+ * 5Ghz band is allowed.
+ * @REG_CAPA_V2_160MHZ_ALLOWED: 11ac channel with a width of 160Mhz is allowed
+ * for this regulatory domain (valid only in 5Ghz).
+ * @REG_CAPA_V2_80MHZ_ALLOWED: 11ac channel with a width of 80Mhz is allowed
+ * for this regulatory domain (valid only in 5Ghz).
+ * @REG_CAPA_V2_MCS_8_ALLOWED: 11ac with MCS 8 is allowed.
+ * @REG_CAPA_V2_MCS_9_ALLOWED: 11ac with MCS 9 is allowed.
+ * @REG_CAPA_V2_WEATHER_DISABLED: Weather radar channels (120, 124, 128, 118,
+ * 126, 122) are disabled.
+ * @REG_CAPA_V2_40MHZ_ALLOWED: 11n channel with a width of 40Mhz is allowed
+ * for this regulatory domain (uvalid only in 5Ghz).
+ * @REG_CAPA_V2_11AX_DISABLED: 11ax is forbidden for this regulatory domain.
+ */
+enum iwl_reg_capa_flags_v2 {
+ REG_CAPA_V2_STRADDLE_DISABLED = BIT(0),
+ REG_CAPA_V2_BF_CCD_LOW_BAND = BIT(1),
+ REG_CAPA_V2_BF_CCD_HIGH_BAND = BIT(2),
+ REG_CAPA_V2_160MHZ_ALLOWED = BIT(3),
+ REG_CAPA_V2_80MHZ_ALLOWED = BIT(4),
+ REG_CAPA_V2_MCS_8_ALLOWED = BIT(5),
+ REG_CAPA_V2_MCS_9_ALLOWED = BIT(6),
+ REG_CAPA_V2_WEATHER_DISABLED = BIT(7),
+ REG_CAPA_V2_40MHZ_ALLOWED = BIT(8),
+ REG_CAPA_V2_11AX_DISABLED = BIT(13),
+};
+
+/*
+* API v2 for reg_capa_flags is relevant from version 6 and onwards of the
+* MCC update command response.
+*/
+#define REG_CAPA_V2_RESP_VER 6
+
+/**
+ * struct iwl_reg_capa - struct for global regulatory capabilities, Used for
+ * handling the different APIs of reg_capa_flags.
+ *
+ * @allow_40mhz: 11n channel with a width of 40Mhz is allowed
+ * for this regulatory domain (valid only in 5Ghz).
+ * @allow_80mhz: 11ac channel with a width of 80Mhz is allowed
+ * for this regulatory domain (valid only in 5Ghz).
+ * @allow_160mhz: 11ac channel with a width of 160Mhz is allowed
+ * for this regulatory domain (valid only in 5Ghz).
+ * @disable_11ax: 11ax is forbidden for this regulatory domain.
+ */
+struct iwl_reg_capa {
+ u16 allow_40mhz;
+ u16 allow_80mhz;
+ u16 allow_160mhz;
+ u16 disable_11ax;
+};
+
static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level,
int chan, u32 flags)
{
@@ -1064,7 +1123,7 @@ IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
int ch_idx, u16 nvm_flags,
- u16 cap_flags,
+ struct iwl_reg_capa reg_capa,
const struct iwl_cfg *cfg)
{
u32 flags = NL80211_RRF_NO_HT40;
@@ -1104,29 +1163,46 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
flags |= NL80211_RRF_GO_CONCURRENT;
/*
- * cap_flags is per regulatory domain so apply it for every channel
+ * reg_capa is per regulatory domain so apply it for every channel
*/
if (ch_idx >= NUM_2GHZ_CHANNELS) {
- if (cap_flags & REG_CAPA_40MHZ_FORBIDDEN)
+ if (!reg_capa.allow_40mhz)
flags |= NL80211_RRF_NO_HT40;
- if (!(cap_flags & REG_CAPA_80MHZ_ALLOWED))
+ if (!reg_capa.allow_80mhz)
flags |= NL80211_RRF_NO_80MHZ;
- if (!(cap_flags & REG_CAPA_160MHZ_ALLOWED))
+ if (!reg_capa.allow_160mhz)
flags |= NL80211_RRF_NO_160MHZ;
}
-
- if (cap_flags & REG_CAPA_11AX_DISABLED)
+ if (reg_capa.disable_11ax)
flags |= NL80211_RRF_NO_HE;
return flags;
}
+static struct iwl_reg_capa iwl_get_reg_capa(u16 flags, u8 resp_ver)
+{
+ struct iwl_reg_capa reg_capa;
+
+ if (resp_ver >= REG_CAPA_V2_RESP_VER) {
+ reg_capa.allow_40mhz = flags & REG_CAPA_V2_40MHZ_ALLOWED;
+ reg_capa.allow_80mhz = flags & REG_CAPA_V2_80MHZ_ALLOWED;
+ reg_capa.allow_160mhz = flags & REG_CAPA_V2_160MHZ_ALLOWED;
+ reg_capa.disable_11ax = flags & REG_CAPA_V2_11AX_DISABLED;
+ } else {
+ reg_capa.allow_40mhz = !(flags & REG_CAPA_40MHZ_FORBIDDEN);
+ reg_capa.allow_80mhz = flags & REG_CAPA_80MHZ_ALLOWED;
+ reg_capa.allow_160mhz = flags & REG_CAPA_160MHZ_ALLOWED;
+ reg_capa.disable_11ax = flags & REG_CAPA_11AX_DISABLED;
+ }
+ return reg_capa;
+}
+
struct ieee80211_regdomain *
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, __le32 *channels, u16 fw_mcc,
- u16 geo_info, u16 cap)
+ u16 geo_info, u16 cap, u8 resp_ver)
{
int ch_idx;
u16 ch_flags;
@@ -1139,6 +1215,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int valid_rules = 0;
bool new_rule;
int max_num_ch;
+ struct iwl_reg_capa reg_capa;
if (cfg->uhb_supported) {
max_num_ch = IWL_NVM_NUM_CHANNELS_UHB;
@@ -1169,6 +1246,9 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
regd->alpha2[0] = fw_mcc >> 8;
regd->alpha2[1] = fw_mcc & 0xff;
+ /* parse regulatory capability flags */
+ reg_capa = iwl_get_reg_capa(cap, resp_ver);
+
for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
ch_flags = (u16)__le32_to_cpup(channels + ch_idx);
band = iwl_nl80211_band_from_channel_idx(ch_idx);
@@ -1183,7 +1263,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
}
reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
- ch_flags, cap,
+ ch_flags, reg_capa,
cfg);
/* we can't continue the same rule */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
index fb0b385d10fd..50bd7fdcf852 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
@@ -104,7 +104,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
struct ieee80211_regdomain *
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, __le32 *channels, u16 fw_mcc,
- u16 geo_info, u16 cap);
+ u16 geo_info, u16 cap, u8 resp_ver);
/**
* struct iwl_nvm_section - describes an NVM section in memory.
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index 8e254c0eda13..fa3f15778fc7 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -460,6 +460,7 @@ enum {
#define UREG_DOORBELL_TO_ISR6_NMI_BIT BIT(0)
#define UREG_DOORBELL_TO_ISR6_SUSPEND BIT(18)
#define UREG_DOORBELL_TO_ISR6_RESUME BIT(19)
+#define UREG_DOORBELL_TO_ISR6_PNVM BIT(20)
#define FSEQ_ERROR_CODE 0xA340C8
#define FSEQ_TOP_INIT_VERSION 0xA34038
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
index f91197e4ae40..becee92a5fd6 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
@@ -7,6 +7,7 @@
*
* Copyright(c) 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2019 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,6 +29,7 @@
*
* Copyright(c) 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2019 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -60,17 +62,20 @@
#include <linux/kernel.h>
#include <linux/bsearch.h>
+#include "fw/api/tx.h"
#include "iwl-trans.h"
#include "iwl-drv.h"
#include "iwl-fh.h"
+#include "queue/tx.h"
+#include <linux/dmapool.h>
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
const struct iwl_trans_ops *ops,
- unsigned int cmd_pool_size,
- unsigned int cmd_pool_align)
+ const struct iwl_cfg_trans_params *cfg_trans)
{
struct iwl_trans *trans;
+ int txcmd_size, txcmd_align;
#ifdef CONFIG_LOCKDEP
static struct lock_class_key __key;
#endif
@@ -79,6 +84,25 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
if (!trans)
return NULL;
+ trans->trans_cfg = cfg_trans;
+ if (!cfg_trans->gen2) {
+ txcmd_size = sizeof(struct iwl_tx_cmd);
+ txcmd_align = sizeof(void *);
+ } else if (cfg_trans->device_family < IWL_DEVICE_FAMILY_AX210) {
+ txcmd_size = sizeof(struct iwl_tx_cmd_gen2);
+ txcmd_align = 64;
+ } else {
+ txcmd_size = sizeof(struct iwl_tx_cmd_gen3);
+ txcmd_align = 128;
+ }
+
+ txcmd_size += sizeof(struct iwl_cmd_header);
+ txcmd_size += 36; /* biggest possible 802.11 header */
+
+ /* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */
+ if (WARN_ON(cfg_trans->gen2 && txcmd_size >= txcmd_align))
+ return ERR_PTR(-EINVAL);
+
#ifdef CONFIG_LOCKDEP
lockdep_init_map(&trans->sync_cmd_lockdep_map, "sync_cmd_lockdep_map",
&__key, 0);
@@ -88,22 +112,68 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
trans->ops = ops;
trans->num_rx_queues = 1;
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ trans->txqs.bc_tbl_size = sizeof(struct iwl_gen3_bc_tbl);
+ else
+ trans->txqs.bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl);
+ /*
+ * For gen2 devices, we use a single allocation for each byte-count
+ * table, but they're pretty small (1k) so use a DMA pool that we
+ * allocate here.
+ */
+ if (trans->trans_cfg->gen2) {
+ trans->txqs.bc_pool = dmam_pool_create("iwlwifi:bc", dev,
+ trans->txqs.bc_tbl_size,
+ 256, 0);
+ if (!trans->txqs.bc_pool)
+ return NULL;
+ }
+
+ if (trans->trans_cfg->use_tfh) {
+ trans->txqs.tfd.addr_size = 64;
+ trans->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS;
+ trans->txqs.tfd.size = sizeof(struct iwl_tfh_tfd);
+ } else {
+ trans->txqs.tfd.addr_size = 36;
+ trans->txqs.tfd.max_tbs = IWL_NUM_OF_TBS;
+ trans->txqs.tfd.size = sizeof(struct iwl_tfd);
+ }
+ trans->max_skb_frags = IWL_TRANS_MAX_FRAGS(trans);
+
snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
"iwl_cmd_pool:%s", dev_name(trans->dev));
trans->dev_cmd_pool =
kmem_cache_create(trans->dev_cmd_pool_name,
- cmd_pool_size, cmd_pool_align,
+ txcmd_size, txcmd_align,
SLAB_HWCACHE_ALIGN, NULL);
if (!trans->dev_cmd_pool)
return NULL;
WARN_ON(!ops->wait_txq_empty && !ops->wait_tx_queues_empty);
+ trans->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
+ if (!trans->txqs.tso_hdr_page) {
+ kmem_cache_destroy(trans->dev_cmd_pool);
+ return NULL;
+ }
+
return trans;
}
void iwl_trans_free(struct iwl_trans *trans)
{
+ int i;
+
+ for_each_possible_cpu(i) {
+ struct iwl_tso_hdr_page *p =
+ per_cpu_ptr(trans->txqs.tso_hdr_page, i);
+
+ if (p->page)
+ __free_page(p->page);
+ }
+
+ free_percpu(trans->txqs.tso_hdr_page);
+
kmem_cache_destroy(trans->dev_cmd_pool);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 34788e7afc7b..11a040e75bf3 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -73,6 +73,7 @@
#include "iwl-config.h"
#include "fw/img.h"
#include "iwl-op-mode.h"
+#include <linux/firmware.h>
#include "fw/api/cmdhdr.h"
#include "fw/api/txq.h"
#include "fw/api/dbg-tlv.h"
@@ -215,6 +216,12 @@ struct iwl_device_tx_cmd {
*/
#define IWL_MAX_CMD_TBS_PER_TFD 2
+/* We need 2 entries for the TX command and header, and another one might
+ * be needed for potential data in the SKB's head. The remaining ones can
+ * be used for frags.
+ */
+#define IWL_TRANS_MAX_FRAGS(trans) ((trans)->txqs.tfd.max_tbs - 3)
+
/**
* enum iwl_hcmd_dataflag - flag for each one of the chunks of the command
*
@@ -316,6 +323,7 @@ static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
#define IWL_MGMT_TID 15
#define IWL_FRAME_LIMIT 64
#define IWL_MAX_RX_HW_QUEUES 16
+#define IWL_9000_MAX_RX_HW_QUEUES 6
/**
* enum iwl_wowlan_status - WoWLAN image/device status
@@ -561,6 +569,8 @@ struct iwl_trans_rxq_dma_data {
* Note that the transport must fill in the proper file headers.
* @debugfs_cleanup: used in the driver unload flow to make a proper cleanup
* of the trans debugfs
+ * @set_pnvm: set the pnvm data in the prph scratch buffer, inside the
+ * context info.
*/
struct iwl_trans_ops {
@@ -633,6 +643,7 @@ struct iwl_trans_ops {
u32 dump_mask);
void (*debugfs_cleanup)(struct iwl_trans *trans);
void (*sync_nmi)(struct iwl_trans *trans);
+ int (*set_pnvm)(struct iwl_trans *trans, const void *data, u32 len);
};
/**
@@ -906,19 +917,37 @@ struct iwl_txq {
/**
* struct iwl_trans_txqs - transport tx queues data
*
+ * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
+ * @page_offs: offset from skb->cb to mac header page pointer
+ * @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer
* @queue_used - bit mask of used queues
* @queue_stopped - bit mask of stopped queues
+ * @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler
*/
struct iwl_trans_txqs {
unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
+ struct dma_pool *bc_pool;
+ size_t bc_tbl_size;
+ bool bc_table_dword;
+ u8 page_offs;
+ u8 dev_cmd_offs;
+ struct __percpu iwl_tso_hdr_page * tso_hdr_page;
+
struct {
u8 fifo;
u8 q_id;
unsigned int wdg_timeout;
} cmd;
+ struct {
+ u8 max_tbs;
+ u16 size;
+ u8 addr_size;
+ } tfd;
+
+ struct iwl_dma_ptr scd_bc_tbls;
};
/**
@@ -971,11 +1000,13 @@ struct iwl_trans {
u32 hw_rf_id;
u32 hw_id;
char hw_id_str[52];
+ u32 sku_id[3];
u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
bool pm_support;
bool ltr_enabled;
+ u8 pnvm_loaded:1;
const struct iwl_hcmd_arr *command_groups;
int command_groups_size;
@@ -1425,6 +1456,21 @@ static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
trans->ops->sync_nmi(trans);
}
+static inline int iwl_trans_set_pnvm(struct iwl_trans *trans,
+ const void *data, u32 len)
+{
+ if (trans->ops->set_pnvm) {
+ int ret = trans->ops->set_pnvm(trans, data, len);
+
+ if (ret)
+ return ret;
+ }
+
+ trans->pnvm_loaded = true;
+
+ return 0;
+}
+
static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
{
return trans->dbg.internal_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED ||
@@ -1435,10 +1481,9 @@ static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
* transport helper functions
*****************************************************/
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
- struct device *dev,
- const struct iwl_trans_ops *ops,
- unsigned int cmd_pool_size,
- unsigned int cmd_pool_align);
+ struct device *dev,
+ const struct iwl_trans_ops *ops,
+ const struct iwl_cfg_trans_params *cfg_trans);
void iwl_trans_free(struct iwl_trans *trans);
/*****************************************************
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/binding.c b/drivers/net/wireless/intel/iwlwifi/mvm/binding.c
index 4094a4158032..5e731c57e4f7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/binding.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/binding.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014, 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
@@ -26,7 +26,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2012 - 2014, 2020 Intel Corporation. All rights reserved.
* Copyright(c) 2016 Intel Deutschland GmbH
* All rights reserved.
*
@@ -86,11 +86,8 @@ static int iwl_mvm_binding_cmd(struct iwl_mvm *mvm, u32 action,
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT)) {
size = sizeof(cmd);
- if (phyctxt->channel->band == NL80211_BAND_2GHZ ||
- !iwl_mvm_is_cdb_supported(mvm))
- cmd.lmac_id = cpu_to_le32(IWL_LMAC_24G_INDEX);
- else
- cmd.lmac_id = cpu_to_le32(IWL_LMAC_5G_INDEX);
+ cmd.lmac_id = cpu_to_le32(iwl_mvm_get_lmac_id(mvm->fw,
+ phyctxt->channel->band));
} else {
size = IWL_BINDING_CMD_SIZE_V1;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
index b0268f44b2ea..2487871eac73 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
@@ -152,12 +152,18 @@
#define IWL_MVM_FTM_INITIATOR_ALGO IWL_TOF_ALGO_TYPE_MAX_LIKE
#define IWL_MVM_FTM_INITIATOR_DYNACK true
#define IWL_MVM_D3_DEBUG false
-#define IWL_MVM_USE_TWT false
+#define IWL_MVM_USE_TWT true
#define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 10
#define IWL_MVM_USE_NSSN_SYNC 0
#define IWL_MVM_PHY_FILTER_CHAIN_A 0
#define IWL_MVM_PHY_FILTER_CHAIN_B 0
#define IWL_MVM_PHY_FILTER_CHAIN_C 0
#define IWL_MVM_PHY_FILTER_CHAIN_D 0
+#define IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH false
+#define IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA 40
+/* 20016 pSec is 6 meter RTT, meaning 3 meter range */
+#define IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT 20016
+#define IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT 20016
+#define IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC 2
#endif /* __MVM_CONSTANTS_H */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 2a94545d737f..d21143495e70 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -70,6 +70,7 @@
#include "iwl-modparams.h"
#include "fw-api.h"
#include "mvm.h"
+#include "fw/img.h"
void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -80,8 +81,11 @@ void iwl_mvm_set_rekey_data(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
- memcpy(mvmvif->rekey_data.kek, data->kek, NL80211_KEK_LEN);
- memcpy(mvmvif->rekey_data.kck, data->kck, NL80211_KCK_LEN);
+ mvmvif->rekey_data.kek_len = data->kek_len;
+ mvmvif->rekey_data.kck_len = data->kck_len;
+ memcpy(mvmvif->rekey_data.kek, data->kek, data->kek_len);
+ memcpy(mvmvif->rekey_data.kck, data->kck, data->kck_len);
+ mvmvif->rekey_data.akm = data->akm & 0xFF;
mvmvif->rekey_data.replay_ctr =
cpu_to_le64(be64_to_cpup((__be64 *)data->replay_ctr));
mvmvif->rekey_data.valid = true;
@@ -156,6 +160,7 @@ static const u8 *iwl_mvm_find_max_pn(struct ieee80211_key_conf *key,
struct wowlan_key_data {
struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc;
struct iwl_wowlan_tkip_params_cmd *tkip;
+ struct iwl_wowlan_kek_kck_material_cmd_v3 *kek_kck_cmd;
bool error, use_rsc_tsc, use_tkip, configure_keys;
int wep_key_idx;
};
@@ -232,7 +237,12 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
default:
data->error = true;
return;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_GCMP);
+ return;
case WLAN_CIPHER_SUITE_AES_CMAC:
+ data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_CCM);
/*
* Ignore CMAC keys -- the WoWLAN firmware doesn't support them
* but we also shouldn't abort suspend due to that. It does have
@@ -245,8 +255,10 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
if (sta) {
u64 pn64;
- tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
- tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
+ tkip_sc =
+ data->rsc_tsc->params.all_tsc_rsc.tkip.unicast_rsc;
+ tkip_tx_sc =
+ &data->rsc_tsc->params.all_tsc_rsc.tkip.tsc;
rx_p1ks = data->tkip->rx_uni;
@@ -265,9 +277,11 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
rx_mic_key = data->tkip->mic_keys.rx_unicast;
} else {
tkip_sc =
- data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc;
+ data->rsc_tsc->params.all_tsc_rsc.tkip.multicast_rsc;
rx_p1ks = data->tkip->rx_multi;
rx_mic_key = data->tkip->mic_keys.rx_mcast;
+ data->kek_kck_cmd->gtk_cipher =
+ cpu_to_le32(STA_KEY_FLG_TKIP);
}
/*
@@ -299,16 +313,25 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
data->use_rsc_tsc = true;
break;
case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
if (sta) {
u64 pn64;
- aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
- aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
+ aes_sc =
+ data->rsc_tsc->params.all_tsc_rsc.aes.unicast_rsc;
+ aes_tx_sc =
+ &data->rsc_tsc->params.all_tsc_rsc.aes.tsc;
pn64 = atomic64_read(&key->tx_pn);
aes_tx_sc->pn = cpu_to_le64(pn64);
} else {
- aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
+ aes_sc =
+ data->rsc_tsc->params.all_tsc_rsc.aes.multicast_rsc;
+ data->kek_kck_cmd->gtk_cipher =
+ key->cipher == WLAN_CIPHER_SUITE_CCMP ?
+ cpu_to_le32(STA_KEY_FLG_CCM) :
+ cpu_to_le32(STA_KEY_FLG_GCMP);
}
/*
@@ -321,11 +344,12 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
const u8 *pn;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
- ptk_pn = rcu_dereference_protected(
- mvmsta->ptk_pn[key->keyidx],
- lockdep_is_held(&mvm->mutex));
- if (WARN_ON(!ptk_pn))
+ rcu_read_lock();
+ ptk_pn = rcu_dereference(mvmsta->ptk_pn[key->keyidx]);
+ if (WARN_ON(!ptk_pn)) {
+ rcu_read_unlock();
break;
+ }
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
pn = iwl_mvm_find_max_pn(key, ptk_pn, &seq, i,
@@ -337,6 +361,8 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
((u64)pn[1] << 32) |
((u64)pn[0] << 40));
}
+
+ rcu_read_unlock();
} else {
for (i = 0; i < IWL_NUM_RSC; i++) {
u8 *pn = seq.ccmp.pn;
@@ -354,6 +380,8 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
break;
}
+ IWL_DEBUG_WOWLAN(mvm, "GTK cipher %d\n", data->kek_kck_cmd->gtk_cipher);
+
if (data->configure_keys) {
mutex_lock(&mvm->mutex);
/*
@@ -734,7 +762,7 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
u32 cmd_flags)
{
- struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
+ struct iwl_wowlan_kek_kck_material_cmd_v3 kek_kck_cmd = {};
struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
bool unified = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
@@ -743,9 +771,12 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
.use_rsc_tsc = false,
.tkip = &tkip_cmd,
.use_tkip = false,
+ .kek_kck_cmd = &kek_kck_cmd,
};
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ret;
+ u8 cmd_ver;
+ size_t cmd_size;
key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
if (!key_data.rsc_tsc)
@@ -772,10 +803,29 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
}
if (key_data.use_rsc_tsc) {
- ret = iwl_mvm_send_cmd_pdu(mvm,
- WOWLAN_TSC_RSC_PARAM, cmd_flags,
- sizeof(*key_data.rsc_tsc),
+ int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+ WOWLAN_TSC_RSC_PARAM,
+ IWL_FW_CMD_VER_UNKNOWN);
+ int size;
+
+ if (ver == 4) {
+ size = sizeof(*key_data.rsc_tsc);
+ key_data.rsc_tsc->sta_id =
+ cpu_to_le32(mvmvif->ap_sta_id);
+
+ } else if (ver == 2 || ver == IWL_FW_CMD_VER_UNKNOWN) {
+ size = sizeof(key_data.rsc_tsc->params);
+ } else {
+ ret = 0;
+ WARN_ON_ONCE(1);
+ goto out;
+ }
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_TSC_RSC_PARAM,
+ cmd_flags,
+ size,
key_data.rsc_tsc);
+
if (ret)
goto out;
}
@@ -783,9 +833,27 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
if (key_data.use_tkip &&
!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_TKIP_MIC_KEYS)) {
+ int ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+ WOWLAN_TKIP_PARAM,
+ IWL_FW_CMD_VER_UNKNOWN);
+ int size;
+
+ if (ver == 2) {
+ size = sizeof(tkip_cmd);
+ key_data.tkip->sta_id =
+ cpu_to_le32(mvmvif->ap_sta_id);
+ } else if (ver == 1 || ver == IWL_FW_CMD_VER_UNKNOWN) {
+ size = sizeof(struct iwl_wowlan_tkip_params_cmd_ver_1);
+ } else {
+ ret = -EINVAL;
+ WARN_ON_ONCE(1);
+ goto out;
+ }
+
+ /* send relevant data according to CMD version */
ret = iwl_mvm_send_cmd_pdu(mvm,
WOWLAN_TKIP_PARAM,
- cmd_flags, sizeof(tkip_cmd),
+ cmd_flags, size,
&tkip_cmd);
if (ret)
goto out;
@@ -793,18 +861,33 @@ static int iwl_mvm_wowlan_config_key_params(struct iwl_mvm *mvm,
/* configure rekey data only if offloaded rekey is supported (d3) */
if (mvmvif->rekey_data.valid) {
- memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd));
+ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ IWL_ALWAYS_LONG_GROUP,
+ WOWLAN_KEK_KCK_MATERIAL,
+ IWL_FW_CMD_VER_UNKNOWN);
+ if (WARN_ON(cmd_ver != 2 && cmd_ver != 3 &&
+ cmd_ver != IWL_FW_CMD_VER_UNKNOWN))
+ return -EINVAL;
+ if (cmd_ver == 3)
+ cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v3);
+ else
+ cmd_size = sizeof(struct iwl_wowlan_kek_kck_material_cmd_v2);
+
memcpy(kek_kck_cmd.kck, mvmvif->rekey_data.kck,
- NL80211_KCK_LEN);
- kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN);
+ mvmvif->rekey_data.kck_len);
+ kek_kck_cmd.kck_len = cpu_to_le16(mvmvif->rekey_data.kck_len);
memcpy(kek_kck_cmd.kek, mvmvif->rekey_data.kek,
- NL80211_KEK_LEN);
- kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
+ mvmvif->rekey_data.kek_len);
+ kek_kck_cmd.kek_len = cpu_to_le16(mvmvif->rekey_data.kek_len);
kek_kck_cmd.replay_ctr = mvmvif->rekey_data.replay_ctr;
+ kek_kck_cmd.akm = cpu_to_le32(mvmvif->rekey_data.akm);
+
+ IWL_DEBUG_WOWLAN(mvm, "setting akm %d\n",
+ mvmvif->rekey_data.akm);
ret = iwl_mvm_send_cmd_pdu(mvm,
WOWLAN_KEK_KCK_MATERIAL, cmd_flags,
- sizeof(kek_kck_cmd),
+ cmd_size,
&kek_kck_cmd);
if (ret)
goto out;
@@ -1283,10 +1366,12 @@ static void iwl_mvm_set_aes_rx_seq(struct iwl_mvm *mvm, struct aes_sc *scs,
mvmsta = iwl_mvm_sta_from_mac80211(sta);
- ptk_pn = rcu_dereference_protected(mvmsta->ptk_pn[key->keyidx],
- lockdep_is_held(&mvm->mutex));
- if (WARN_ON(!ptk_pn))
+ rcu_read_lock();
+ ptk_pn = rcu_dereference(mvmsta->ptk_pn[key->keyidx]);
+ if (WARN_ON(!ptk_pn)) {
+ rcu_read_unlock();
return;
+ }
for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
struct ieee80211_key_seq seq = {};
@@ -1298,6 +1383,7 @@ static void iwl_mvm_set_aes_rx_seq(struct iwl_mvm *mvm, struct aes_sc *scs,
memcpy(ptk_pn->q[i].pn[tid],
seq.ccmp.pn, IEEE80211_CCMP_PN_LEN);
}
+ rcu_read_unlock();
} else {
for (tid = 0; tid < IWL_NUM_RSC; tid++) {
struct ieee80211_key_seq seq = {};
@@ -1331,6 +1417,8 @@ static void iwl_mvm_set_key_rx_seq(struct iwl_mvm *mvm,
switch (key->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
iwl_mvm_set_aes_rx_seq(mvm, rsc->aes.multicast_rsc, NULL, key);
break;
case WLAN_CIPHER_SUITE_TKIP:
@@ -1367,6 +1455,8 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw,
/* ignore WEP completely, nothing to do */
return;
case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
case WLAN_CIPHER_SUITE_TKIP:
/* we support these */
break;
@@ -1392,6 +1482,8 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw,
switch (key->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
iwl_mvm_set_aes_rx_seq(data->mvm, sc->aes.unicast_rsc,
sta, key);
atomic64_set(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn));
@@ -1460,6 +1552,8 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
ieee80211_iter_keys(mvm->hw, vif,
iwl_mvm_d3_update_keys, &gtkdata);
+ IWL_DEBUG_WOWLAN(mvm, "num of GTK rekeying %d\n",
+ le32_to_cpu(status->num_of_gtk_rekeys));
if (status->num_of_gtk_rekeys) {
struct ieee80211_key_conf *key;
struct {
@@ -1472,13 +1566,26 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
};
__be64 replay_ctr;
+ IWL_DEBUG_WOWLAN(mvm,
+ "Received from FW GTK cipher %d, key index %d\n",
+ conf.conf.cipher, conf.conf.keyidx);
switch (gtkdata.cipher) {
case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ BUILD_BUG_ON(WLAN_KEY_LEN_CCMP != WLAN_KEY_LEN_GCMP);
+ BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_CCMP);
conf.conf.keylen = WLAN_KEY_LEN_CCMP;
memcpy(conf.conf.key, status->gtk[0].key,
WLAN_KEY_LEN_CCMP);
break;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_GCMP_256);
+ conf.conf.keylen = WLAN_KEY_LEN_GCMP_256;
+ memcpy(conf.conf.key, status->gtk[0].key,
+ WLAN_KEY_LEN_GCMP_256);
+ break;
case WLAN_CIPHER_SUITE_TKIP:
+ BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_TKIP);
conf.conf.keylen = WLAN_KEY_LEN_TKIP;
memcpy(conf.conf.key, status->gtk[0].key, 16);
/* leave TX MIC key zeroed, we don't use it anyway */
@@ -1508,15 +1615,60 @@ out:
return true;
}
+/* Occasionally, templates would be nice. This is one of those times ... */
+#define iwl_mvm_parse_wowlan_status_common(_ver) \
+static struct iwl_wowlan_status * \
+iwl_mvm_parse_wowlan_status_common_ ## _ver(struct iwl_mvm *mvm, \
+ void *_data, int len) \
+{ \
+ struct iwl_wowlan_status *status; \
+ struct iwl_wowlan_status_ ##_ver *data = _data; \
+ int data_size; \
+ \
+ if (len < sizeof(*data)) { \
+ IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \
+ return ERR_PTR(-EIO); \
+ } \
+ \
+ data_size = ALIGN(le32_to_cpu(data->wake_packet_bufsize), 4); \
+ if (len != sizeof(*data) + data_size) { \
+ IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \
+ return ERR_PTR(-EIO); \
+ } \
+ \
+ status = kzalloc(sizeof(*status) + data_size, GFP_KERNEL); \
+ if (!status) \
+ return ERR_PTR(-ENOMEM); \
+ \
+ /* copy all the common fields */ \
+ status->replay_ctr = data->replay_ctr; \
+ status->pattern_number = data->pattern_number; \
+ status->non_qos_seq_ctr = data->non_qos_seq_ctr; \
+ memcpy(status->qos_seq_ctr, data->qos_seq_ctr, \
+ sizeof(status->qos_seq_ctr)); \
+ status->wakeup_reasons = data->wakeup_reasons; \
+ status->num_of_gtk_rekeys = data->num_of_gtk_rekeys; \
+ status->received_beacons = data->received_beacons; \
+ status->wake_packet_length = data->wake_packet_length; \
+ status->wake_packet_bufsize = data->wake_packet_bufsize; \
+ memcpy(status->wake_packet, data->wake_packet, \
+ le32_to_cpu(status->wake_packet_bufsize)); \
+ \
+ return status; \
+}
+
+iwl_mvm_parse_wowlan_status_common(v6)
+iwl_mvm_parse_wowlan_status_common(v7)
+iwl_mvm_parse_wowlan_status_common(v9)
+
struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm)
{
- struct iwl_wowlan_status_v7 *v7;
struct iwl_wowlan_status *status;
struct iwl_host_cmd cmd = {
.id = WOWLAN_GET_STATUSES,
.flags = CMD_WANT_SKB,
};
- int ret, len, status_size, data_size;
+ int ret, len;
u8 notif_ver;
lockdep_assert_held(&mvm->mutex);
@@ -1528,28 +1680,19 @@ struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm)
}
len = iwl_rx_packet_payload_len(cmd.resp_pkt);
+
+ /* default to 7 (when we have IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL) */
+ notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
+ WOWLAN_GET_STATUSES, 7);
+
if (!fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL)) {
struct iwl_wowlan_status_v6 *v6 = (void *)cmd.resp_pkt->data;
- status_size = sizeof(*v6);
-
- if (len < status_size) {
- IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
- status = ERR_PTR(-EIO);
- goto out_free_resp;
- }
-
- data_size = ALIGN(le32_to_cpu(v6->wake_packet_bufsize), 4);
-
- if (len != (status_size + data_size)) {
- IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
- status = ERR_PTR(-EIO);
- goto out_free_resp;
- }
-
- status = kzalloc(sizeof(*status) + data_size, GFP_KERNEL);
- if (!status)
+ status = iwl_mvm_parse_wowlan_status_common_v6(mvm,
+ cmd.resp_pkt->data,
+ len);
+ if (IS_ERR(status))
goto out_free_resp;
BUILD_BUG_ON(sizeof(v6->gtk.decrypt_key) >
@@ -1574,47 +1717,37 @@ struct iwl_wowlan_status *iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm)
* currently used key.
*/
status->gtk[0].key_flags = v6->gtk.key_index | BIT(7);
+ } else if (notif_ver == 7) {
+ struct iwl_wowlan_status_v7 *v7 = (void *)cmd.resp_pkt->data;
- status->replay_ctr = v6->replay_ctr;
-
- /* everything starting from pattern_number is identical */
- memcpy(&status->pattern_number, &v6->pattern_number,
- offsetof(struct iwl_wowlan_status, wake_packet) -
- offsetof(struct iwl_wowlan_status, pattern_number) +
- data_size);
-
- goto out_free_resp;
- }
-
- v7 = (void *)cmd.resp_pkt->data;
- notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
- WOWLAN_GET_STATUSES, 0);
+ status = iwl_mvm_parse_wowlan_status_common_v7(mvm,
+ cmd.resp_pkt->data,
+ len);
+ if (IS_ERR(status))
+ goto out_free_resp;
- status_size = sizeof(*status);
+ status->gtk[0] = v7->gtk[0];
+ status->igtk[0] = v7->igtk[0];
+ } else if (notif_ver == 9) {
+ struct iwl_wowlan_status_v9 *v9 = (void *)cmd.resp_pkt->data;
- if (notif_ver == IWL_FW_CMD_VER_UNKNOWN || notif_ver < 9)
- status_size = sizeof(*v7);
+ status = iwl_mvm_parse_wowlan_status_common_v9(mvm,
+ cmd.resp_pkt->data,
+ len);
+ if (IS_ERR(status))
+ goto out_free_resp;
- if (len < status_size) {
- IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
- status = ERR_PTR(-EIO);
- goto out_free_resp;
- }
- data_size = ALIGN(le32_to_cpu(v7->wake_packet_bufsize), 4);
+ status->gtk[0] = v9->gtk[0];
+ status->igtk[0] = v9->igtk[0];
- if (len != (status_size + data_size)) {
- IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
+ status->tid_tear_down = v9->tid_tear_down;
+ } else {
+ IWL_ERR(mvm,
+ "Firmware advertises unknown WoWLAN status response %d!\n",
+ notif_ver);
status = ERR_PTR(-EIO);
- goto out_free_resp;
}
- status = kzalloc(sizeof(*status) + data_size, GFP_KERNEL);
- if (!status)
- goto out_free_resp;
-
- memcpy(status, v7, status_size);
- memcpy(status->wake_packet, (u8 *)v7 + status_size, data_size);
-
out_free_resp:
iwl_free_resp(&cmd);
return status;
@@ -1647,6 +1780,9 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
if (IS_ERR_OR_NULL(fw_status))
goto out_unlock;
+ IWL_DEBUG_WOWLAN(mvm, "wakeup reason 0x%x\n",
+ le32_to_cpu(fw_status->wakeup_reasons));
+
status.pattern_number = le16_to_cpu(fw_status->pattern_number);
for (i = 0; i < 8; i++)
status.qos_seq_ctr[i] =
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 8fae7e707374..3395c4675988 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -174,7 +174,7 @@ static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf,
if (sscanf(buf, "%d %d", &sta_id, &drain) != 2)
return -EINVAL;
- if (sta_id < 0 || sta_id >= IWL_MVM_STATION_COUNT)
+ if (sta_id < 0 || sta_id >= mvm->fw->ucode_capa.num_stations)
return -EINVAL;
if (drain < 0 || drain > 1)
return -EINVAL;
@@ -403,7 +403,7 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
mutex_lock(&mvm->mutex);
- for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
pos += scnprintf(buf + pos, bufsz - pos, "%.2d: ", i);
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index 5ca45915cf7c..a0ce761d0c59 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -76,6 +76,103 @@ struct iwl_mvm_loc_entry {
u8 buf[];
};
+struct iwl_mvm_smooth_entry {
+ struct list_head list;
+ u8 addr[ETH_ALEN];
+ s64 rtt_avg;
+ u64 host_time;
+};
+
+struct iwl_mvm_ftm_pasn_entry {
+ struct list_head list;
+ u8 addr[ETH_ALEN];
+ u8 hltk[HLTK_11AZ_LEN];
+ u8 tk[TK_11AZ_LEN];
+ u8 cipher;
+ u8 tx_pn[IEEE80211_CCMP_PN_LEN];
+ u8 rx_pn[IEEE80211_CCMP_PN_LEN];
+};
+
+int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ u8 *addr, u32 cipher, u8 *tk, u32 tk_len,
+ u8 *hltk, u32 hltk_len)
+{
+ struct iwl_mvm_ftm_pasn_entry *pasn = kzalloc(sizeof(*pasn),
+ GFP_KERNEL);
+ u32 expected_tk_len;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!pasn)
+ return -ENOBUFS;
+
+ pasn->cipher = iwl_mvm_cipher_to_location_cipher(cipher);
+
+ switch (pasn->cipher) {
+ case IWL_LOCATION_CIPHER_CCMP_128:
+ case IWL_LOCATION_CIPHER_GCMP_128:
+ expected_tk_len = WLAN_KEY_LEN_CCMP;
+ break;
+ case IWL_LOCATION_CIPHER_GCMP_256:
+ expected_tk_len = WLAN_KEY_LEN_GCMP_256;
+ break;
+ default:
+ goto out;
+ }
+
+ /*
+ * If associated to this AP and already have security context,
+ * the TK is already configured for this station, so it
+ * shouldn't be set again here.
+ */
+ if (vif->bss_conf.assoc &&
+ !memcmp(addr, vif->bss_conf.bssid, ETH_ALEN)) {
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct ieee80211_sta *sta;
+
+ rcu_read_lock();
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]);
+ if (!IS_ERR_OR_NULL(sta) && sta->mfp)
+ expected_tk_len = 0;
+ rcu_read_unlock();
+ }
+
+ if (tk_len != expected_tk_len || hltk_len != sizeof(pasn->hltk)) {
+ IWL_ERR(mvm, "Invalid key length: tk_len=%u hltk_len=%u\n",
+ tk_len, hltk_len);
+ goto out;
+ }
+
+ memcpy(pasn->addr, addr, sizeof(pasn->addr));
+ memcpy(pasn->hltk, hltk, sizeof(pasn->hltk));
+
+ if (tk && tk_len)
+ memcpy(pasn->tk, tk, sizeof(pasn->tk));
+
+ list_add_tail(&pasn->list, &mvm->ftm_initiator.pasn_list);
+ return 0;
+out:
+ kfree(pasn);
+ return -EINVAL;
+}
+
+void iwl_mvm_ftm_remove_pasn_sta(struct iwl_mvm *mvm, u8 *addr)
+{
+ struct iwl_mvm_ftm_pasn_entry *entry, *prev;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ list_for_each_entry_safe(entry, prev, &mvm->ftm_initiator.pasn_list,
+ list) {
+ if (memcmp(entry->addr, addr, sizeof(entry->addr)))
+ continue;
+
+ list_del(&entry->list);
+ kfree(entry);
+ return;
+ }
+}
+
static void iwl_mvm_ftm_reset(struct iwl_mvm *mvm)
{
struct iwl_mvm_loc_entry *e, *t;
@@ -84,6 +181,7 @@ static void iwl_mvm_ftm_reset(struct iwl_mvm *mvm)
mvm->ftm_initiator.req_wdev = NULL;
memset(mvm->ftm_initiator.responses, 0,
sizeof(mvm->ftm_initiator.responses));
+
list_for_each_entry_safe(e, t, &mvm->ftm_initiator.loc_list, list) {
list_del(&e->list);
kfree(e);
@@ -120,6 +218,30 @@ void iwl_mvm_ftm_restart(struct iwl_mvm *mvm)
iwl_mvm_ftm_reset(mvm);
}
+void iwl_mvm_ftm_initiator_smooth_config(struct iwl_mvm *mvm)
+{
+ INIT_LIST_HEAD(&mvm->ftm_initiator.smooth.resp);
+
+ IWL_DEBUG_INFO(mvm,
+ "enable=%u, alpha=%u, age_jiffies=%u, thresh=(%u:%u)\n",
+ IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH,
+ IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA,
+ IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * HZ,
+ IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT,
+ IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT);
+}
+
+void iwl_mvm_ftm_initiator_smooth_stop(struct iwl_mvm *mvm)
+{
+ struct iwl_mvm_smooth_entry *se, *st;
+
+ list_for_each_entry_safe(se, st, &mvm->ftm_initiator.smooth.resp,
+ list) {
+ list_del(&se->list);
+ kfree(se);
+ }
+}
+
static int
iwl_ftm_range_request_status_to_err(enum iwl_tof_range_request_status s)
{
@@ -166,7 +288,7 @@ static void iwl_mvm_ftm_cmd_v5(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
static void iwl_mvm_ftm_cmd_common(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- struct iwl_tof_range_req_cmd *cmd,
+ struct iwl_tof_range_req_cmd_v9 *cmd,
struct cfg80211_pmsr_request *req)
{
int i;
@@ -335,7 +457,7 @@ iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm,
static void
iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm,
struct cfg80211_pmsr_request_peer *peer,
- struct iwl_tof_range_req_ap_entry *target)
+ struct iwl_tof_range_req_ap_entry_v6 *target)
{
memcpy(target->bssid, peer->addr, ETH_ALEN);
target->burst_period =
@@ -411,7 +533,7 @@ iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm,
static int
iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_pmsr_request_peer *peer,
- struct iwl_tof_range_req_ap_entry *target)
+ struct iwl_tof_range_req_ap_entry_v6 *target)
{
int ret;
@@ -421,7 +543,7 @@ iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (ret)
return ret;
- iwl_mvm_ftm_put_target_common(mvm, peer, (void *)target);
+ iwl_mvm_ftm_put_target_common(mvm, peer, target);
if (vif->bss_conf.assoc &&
!memcmp(peer->addr, vif->bss_conf.bssid, ETH_ALEN)) {
@@ -539,7 +661,7 @@ static int iwl_mvm_ftm_start_v8(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_pmsr_request *req)
{
- struct iwl_tof_range_req_cmd cmd;
+ struct iwl_tof_range_req_cmd_v9 cmd;
struct iwl_host_cmd hcmd = {
.id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
.dataflags[0] = IWL_HCMD_DFL_DUP,
@@ -553,7 +675,7 @@ static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
for (i = 0; i < cmd.num_of_ap; i++) {
struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
- struct iwl_tof_range_req_ap_entry *target = &cmd.ap[i];
+ struct iwl_tof_range_req_ap_entry_v6 *target = &cmd.ap[i];
err = iwl_mvm_ftm_put_target(mvm, vif, peer, target);
if (err)
@@ -563,6 +685,93 @@ static int iwl_mvm_ftm_start_v9(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
}
+static void iter(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *data)
+{
+ struct iwl_tof_range_req_ap_entry_v6 *target = data;
+
+ if (!sta || memcmp(sta->addr, target->bssid, ETH_ALEN))
+ return;
+
+ WARN_ON(!sta->mfp);
+
+ if (WARN_ON(key->keylen > sizeof(target->tk)))
+ return;
+
+ memcpy(target->tk, key->key, key->keylen);
+ target->cipher = iwl_mvm_cipher_to_location_cipher(key->cipher);
+ WARN_ON(target->cipher == IWL_LOCATION_CIPHER_INVALID);
+}
+
+static void
+iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_tof_range_req_ap_entry_v7 *target)
+{
+ struct iwl_mvm_ftm_pasn_entry *entry;
+ u32 flags = le32_to_cpu(target->initiator_ap_flags);
+
+ if (!(flags & (IWL_INITIATOR_AP_FLAGS_NON_TB |
+ IWL_INITIATOR_AP_FLAGS_TB)))
+ return;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) {
+ if (memcmp(entry->addr, target->bssid, sizeof(entry->addr)))
+ continue;
+
+ target->cipher = entry->cipher;
+ memcpy(target->hltk, entry->hltk, sizeof(target->hltk));
+
+ if (vif->bss_conf.assoc &&
+ !memcmp(vif->bss_conf.bssid, target->bssid,
+ sizeof(target->bssid)))
+ ieee80211_iter_keys(mvm->hw, vif, iter, target);
+ else
+ memcpy(target->tk, entry->tk, sizeof(target->tk));
+
+ memcpy(target->rx_pn, entry->rx_pn, sizeof(target->rx_pn));
+ memcpy(target->tx_pn, entry->tx_pn, sizeof(target->tx_pn));
+
+ target->initiator_ap_flags |=
+ cpu_to_le32(IWL_INITIATOR_AP_FLAGS_SECURED);
+ return;
+ }
+}
+
+static int iwl_mvm_ftm_start_v11(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *req)
+{
+ struct iwl_tof_range_req_cmd_v11 cmd;
+ struct iwl_host_cmd hcmd = {
+ .id = iwl_cmd_id(TOF_RANGE_REQ_CMD, LOCATION_GROUP, 0),
+ .dataflags[0] = IWL_HCMD_DFL_DUP,
+ .data[0] = &cmd,
+ .len[0] = sizeof(cmd),
+ };
+ u8 i;
+ int err;
+
+ iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req);
+
+ for (i = 0; i < cmd.num_of_ap; i++) {
+ struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
+ struct iwl_tof_range_req_ap_entry_v7 *target = &cmd.ap[i];
+
+ err = iwl_mvm_ftm_put_target(mvm, vif, peer, (void *)target);
+ if (err)
+ return err;
+
+ iwl_mvm_ftm_set_secured_ranging(mvm, vif, target);
+ }
+
+ return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
+}
+
int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_pmsr_request *req)
{
@@ -577,9 +786,13 @@ int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (new_api) {
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
- TOF_RANGE_REQ_CMD);
+ TOF_RANGE_REQ_CMD,
+ IWL_FW_CMD_VER_UNKNOWN);
switch (cmd_ver) {
+ case 11:
+ err = iwl_mvm_ftm_start_v11(mvm, vif, req);
+ break;
case 9:
case 10:
err = iwl_mvm_ftm_start_v9(mvm, vif, req);
@@ -696,6 +909,95 @@ static int iwl_mvm_ftm_range_resp_valid(struct iwl_mvm *mvm, u8 request_id,
return 0;
}
+static void iwl_mvm_ftm_rtt_smoothing(struct iwl_mvm *mvm,
+ struct cfg80211_pmsr_result *res)
+{
+ struct iwl_mvm_smooth_entry *resp;
+ s64 rtt_avg, rtt = res->ftm.rtt_avg;
+ u32 undershoot, overshoot;
+ u8 alpha;
+ bool found;
+
+ if (!IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH)
+ return;
+
+ WARN_ON(rtt < 0);
+
+ if (res->status != NL80211_PMSR_STATUS_SUCCESS) {
+ IWL_DEBUG_INFO(mvm,
+ ": %pM: ignore failed measurement. Status=%u\n",
+ res->addr, res->status);
+ return;
+ }
+
+ found = false;
+ list_for_each_entry(resp, &mvm->ftm_initiator.smooth.resp, list) {
+ if (!memcmp(res->addr, resp->addr, ETH_ALEN)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp)
+ return;
+
+ memcpy(resp->addr, res->addr, ETH_ALEN);
+ list_add_tail(&resp->list, &mvm->ftm_initiator.smooth.resp);
+
+ resp->rtt_avg = rtt;
+
+ IWL_DEBUG_INFO(mvm, "new: %pM: rtt_avg=%lld\n",
+ resp->addr, resp->rtt_avg);
+ goto update_time;
+ }
+
+ if (res->host_time - resp->host_time >
+ IWL_MVM_FTM_INITIATOR_SMOOTH_AGE_SEC * 1000000000) {
+ resp->rtt_avg = rtt;
+
+ IWL_DEBUG_INFO(mvm, "expired: %pM: rtt_avg=%lld\n",
+ resp->addr, resp->rtt_avg);
+ goto update_time;
+ }
+
+ /* Smooth the results based on the tracked RTT average */
+ undershoot = IWL_MVM_FTM_INITIATOR_SMOOTH_UNDERSHOOT;
+ overshoot = IWL_MVM_FTM_INITIATOR_SMOOTH_OVERSHOOT;
+ alpha = IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA;
+
+ rtt_avg = (alpha * rtt + (100 - alpha) * resp->rtt_avg) / 100;
+
+ IWL_DEBUG_INFO(mvm,
+ "%pM: prev rtt_avg=%lld, new rtt_avg=%lld, rtt=%lld\n",
+ resp->addr, resp->rtt_avg, rtt_avg, rtt);
+
+ /*
+ * update the responder's average RTT results regardless of
+ * the under/over shoot logic below
+ */
+ resp->rtt_avg = rtt_avg;
+
+ /* smooth the results */
+ if (rtt_avg > rtt && (rtt_avg - rtt) > undershoot) {
+ res->ftm.rtt_avg = rtt_avg;
+
+ IWL_DEBUG_INFO(mvm,
+ "undershoot: val=%lld\n",
+ (rtt_avg - rtt));
+ } else if (rtt_avg < rtt && (rtt - rtt_avg) >
+ overshoot) {
+ res->ftm.rtt_avg = rtt_avg;
+ IWL_DEBUG_INFO(mvm,
+ "overshoot: val=%lld\n",
+ (rtt - rtt_avg));
+ }
+
+update_time:
+ resp->host_time = res->host_time;
+}
+
static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index,
struct cfg80211_pmsr_result *res)
{
@@ -715,12 +1017,31 @@ static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index,
IWL_DEBUG_INFO(mvm, "\tdistance: %lld\n", rtt_avg);
}
+static void
+iwl_mvm_ftm_pasn_update_pn(struct iwl_mvm *mvm,
+ struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap)
+{
+ struct iwl_mvm_ftm_pasn_entry *entry;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) {
+ if (memcmp(fw_ap->bssid, entry->addr, sizeof(entry->addr)))
+ continue;
+
+ memcpy(entry->rx_pn, fw_ap->rx_pn, sizeof(entry->rx_pn));
+ memcpy(entry->tx_pn, fw_ap->tx_pn, sizeof(entry->tx_pn));
+ return;
+ }
+}
+
void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_tof_range_rsp_ntfy_v5 *fw_resp_v5 = (void *)pkt->data;
struct iwl_tof_range_rsp_ntfy_v6 *fw_resp_v6 = (void *)pkt->data;
- struct iwl_tof_range_rsp_ntfy *fw_resp = (void *)pkt->data;
+ struct iwl_tof_range_rsp_ntfy_v7 *fw_resp_v7 = (void *)pkt->data;
+ struct iwl_tof_range_rsp_ntfy_v8 *fw_resp_v8 = (void *)pkt->data;
int i;
bool new_api = fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_FTM_NEW_RANGE_REQ);
@@ -733,12 +1054,12 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
}
if (new_api) {
- if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp->request_id,
- fw_resp->num_of_aps))
+ if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v8->request_id,
+ fw_resp_v8->num_of_aps))
return;
- num_of_aps = fw_resp->num_of_aps;
- last_in_batch = fw_resp->last_report;
+ num_of_aps = fw_resp_v8->num_of_aps;
+ last_in_batch = fw_resp_v8->last_report;
} else {
if (iwl_mvm_ftm_range_resp_valid(mvm, fw_resp_v5->request_id,
fw_resp_v5->num_of_aps))
@@ -754,17 +1075,21 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
for (i = 0; i < num_of_aps && i < IWL_MVM_TOF_MAX_APS; i++) {
struct cfg80211_pmsr_result result = {};
- struct iwl_tof_range_rsp_ap_entry_ntfy *fw_ap;
+ struct iwl_tof_range_rsp_ap_entry_ntfy_v6 *fw_ap;
int peer_idx;
if (new_api) {
- if (fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_FTM_RTT_ACCURACY))
- fw_ap = &fw_resp->ap[i];
- else
+ if (mvm->cmd_ver.range_resp == 8) {
+ fw_ap = &fw_resp_v8->ap[i];
+ iwl_mvm_ftm_pasn_update_pn(mvm, fw_ap);
+ } else if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_FTM_RTT_ACCURACY)) {
+ fw_ap = (void *)&fw_resp_v7->ap[i];
+ } else {
fw_ap = (void *)&fw_resp_v6->ap[i];
+ }
- result.final = fw_resp->ap[i].last_burst;
+ result.final = fw_ap->last_burst;
result.ap_tsf = le32_to_cpu(fw_ap->start_tsf);
result.ap_tsf_valid = 1;
} else {
@@ -830,6 +1155,8 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
iwl_mvm_ftm_get_lci_civic(mvm, &result);
+ iwl_mvm_ftm_rtt_smoothing(mvm, &result);
+
cfg80211_pmsr_report(mvm->ftm_initiator.req_wdev,
mvm->ftm_initiator.req,
&result, GFP_KERNEL);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
index 0b6c32098b5a..dd3662b9a5bc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
@@ -6,7 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright (C) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -27,7 +27,7 @@
* BSD LICENSE
*
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright (C) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -62,6 +62,18 @@
#include "mvm.h"
#include "constants.h"
+struct iwl_mvm_pasn_sta {
+ struct list_head list;
+ struct iwl_mvm_int_sta int_sta;
+ u8 addr[ETH_ALEN];
+};
+
+struct iwl_mvm_pasn_hltk_data {
+ u8 *addr;
+ u8 cipher;
+ u8 *hltk;
+};
+
static int iwl_mvm_ftm_responder_set_bw_v1(struct cfg80211_chan_def *chandef,
u8 *bw, u8 *ctrl_ch_position)
{
@@ -137,7 +149,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
.sta_id = mvmvif->bcast_sta.sta_id,
};
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
- TOF_RESPONDER_CONFIG_CMD);
+ TOF_RESPONDER_CONFIG_CMD, 6);
int err;
lockdep_assert_held(&mvm->mutex);
@@ -162,11 +174,11 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
}
static int
-iwl_mvm_ftm_responder_dyn_cfg_cmd(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct ieee80211_ftm_responder_params *params)
+iwl_mvm_ftm_responder_dyn_cfg_v2(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ftm_responder_params *params)
{
- struct iwl_tof_responder_dyn_config_cmd cmd = {
+ struct iwl_tof_responder_dyn_config_cmd_v2 cmd = {
.lci_len = cpu_to_le32(params->lci_len + 2),
.civic_len = cpu_to_le32(params->civicloc_len + 2),
};
@@ -207,6 +219,171 @@ iwl_mvm_ftm_responder_dyn_cfg_cmd(struct iwl_mvm *mvm,
return iwl_mvm_send_cmd(mvm, &hcmd);
}
+static int
+iwl_mvm_ftm_responder_dyn_cfg_v3(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ftm_responder_params *params,
+ struct iwl_mvm_pasn_hltk_data *hltk_data)
+{
+ struct iwl_tof_responder_dyn_config_cmd cmd;
+ struct iwl_host_cmd hcmd = {
+ .id = iwl_cmd_id(TOF_RESPONDER_DYN_CONFIG_CMD,
+ LOCATION_GROUP, 0),
+ .data[0] = &cmd,
+ .len[0] = sizeof(cmd),
+ /* may not be able to DMA from stack */
+ .dataflags[0] = IWL_HCMD_DFL_DUP,
+ };
+
+ lockdep_assert_held(&mvm->mutex);
+
+ cmd.valid_flags = 0;
+
+ if (params) {
+ if (params->lci_len + 2 > sizeof(cmd.lci_buf) ||
+ params->civicloc_len + 2 > sizeof(cmd.civic_buf)) {
+ IWL_ERR(mvm,
+ "LCI/civic data too big (lci=%zd, civic=%zd)\n",
+ params->lci_len, params->civicloc_len);
+ return -ENOBUFS;
+ }
+
+ cmd.lci_buf[0] = WLAN_EID_MEASURE_REPORT;
+ cmd.lci_buf[1] = params->lci_len;
+ memcpy(cmd.lci_buf + 2, params->lci, params->lci_len);
+ cmd.lci_len = params->lci_len + 2;
+
+ cmd.civic_buf[0] = WLAN_EID_MEASURE_REPORT;
+ cmd.civic_buf[1] = params->civicloc_len;
+ memcpy(cmd.civic_buf + 2, params->civicloc,
+ params->civicloc_len);
+ cmd.civic_len = params->civicloc_len + 2;
+
+ cmd.valid_flags |= IWL_RESPONDER_DYN_CFG_VALID_LCI |
+ IWL_RESPONDER_DYN_CFG_VALID_CIVIC;
+ }
+
+ if (hltk_data) {
+ if (hltk_data->cipher > IWL_LOCATION_CIPHER_GCMP_256) {
+ IWL_ERR(mvm, "invalid cipher: %u\n",
+ hltk_data->cipher);
+ return -EINVAL;
+ }
+
+ cmd.cipher = hltk_data->cipher;
+ memcpy(cmd.addr, hltk_data->addr, sizeof(cmd.addr));
+ memcpy(cmd.hltk_buf, hltk_data->hltk, sizeof(cmd.hltk_buf));
+ cmd.valid_flags |= IWL_RESPONDER_DYN_CFG_VALID_PASN_STA;
+ }
+
+ return iwl_mvm_send_cmd(mvm, &hcmd);
+}
+
+static int
+iwl_mvm_ftm_responder_dyn_cfg_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ftm_responder_params *params)
+{
+ int ret;
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
+ TOF_RESPONDER_DYN_CONFIG_CMD, 2);
+
+ switch (cmd_ver) {
+ case 2:
+ ret = iwl_mvm_ftm_responder_dyn_cfg_v2(mvm, vif,
+ params);
+ break;
+ case 3:
+ ret = iwl_mvm_ftm_responder_dyn_cfg_v3(mvm, vif,
+ params, NULL);
+ break;
+ default:
+ IWL_ERR(mvm, "Unsupported DYN_CONFIG_CMD version %u\n",
+ cmd_ver);
+ ret = -ENOTSUPP;
+ }
+
+ return ret;
+}
+
+static void iwl_mvm_resp_del_pasn_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mvm_pasn_sta *sta)
+{
+ list_del(&sta->list);
+ iwl_mvm_rm_sta_id(mvm, vif, sta->int_sta.sta_id);
+ iwl_mvm_dealloc_int_sta(mvm, &sta->int_sta);
+ kfree(sta);
+}
+
+int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u8 *addr, u32 cipher, u8 *tk, u32 tk_len,
+ u8 *hltk, u32 hltk_len)
+{
+ int ret;
+ struct iwl_mvm_pasn_sta *sta = NULL;
+ struct iwl_mvm_pasn_hltk_data hltk_data = {
+ .addr = addr,
+ .hltk = hltk,
+ };
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
+ TOF_RESPONDER_DYN_CONFIG_CMD, 2);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (cmd_ver < 3) {
+ IWL_ERR(mvm, "Adding PASN station not supported by FW\n");
+ return -ENOTSUPP;
+ }
+
+ hltk_data.cipher = iwl_mvm_cipher_to_location_cipher(cipher);
+ if (hltk_data.cipher == IWL_LOCATION_CIPHER_INVALID) {
+ IWL_ERR(mvm, "invalid cipher: %u\n", cipher);
+ return -EINVAL;
+ }
+
+ if (tk && tk_len) {
+ sta = kzalloc(sizeof(*sta), GFP_KERNEL);
+ if (!sta)
+ return -ENOBUFS;
+
+ ret = iwl_mvm_add_pasn_sta(mvm, vif, &sta->int_sta, addr,
+ cipher, tk, tk_len);
+ if (ret) {
+ kfree(sta);
+ return ret;
+ }
+
+ memcpy(sta->addr, addr, ETH_ALEN);
+ list_add_tail(&sta->list, &mvm->resp_pasn_list);
+ }
+
+ ret = iwl_mvm_ftm_responder_dyn_cfg_v3(mvm, vif, NULL, &hltk_data);
+ if (ret && sta)
+ iwl_mvm_resp_del_pasn_sta(mvm, vif, sta);
+
+ return ret;
+}
+
+int iwl_mvm_ftm_resp_remove_pasn_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif, u8 *addr)
+{
+ struct iwl_mvm_pasn_sta *sta, *prev;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ list_for_each_entry_safe(sta, prev, &mvm->resp_pasn_list, list) {
+ if (!memcmp(sta->addr, addr, ETH_ALEN)) {
+ iwl_mvm_resp_del_pasn_sta(mvm, vif, sta);
+ return 0;
+ }
+ }
+
+ IWL_ERR(mvm, "FTM: PASN station %pM not found\n", addr);
+ return -EINVAL;
+}
+
int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -255,12 +432,24 @@ int iwl_mvm_ftm_start_responder(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return ret;
}
+void iwl_mvm_ftm_responder_clear(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_pasn_sta *sta, *prev;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ list_for_each_entry_safe(sta, prev, &mvm->resp_pasn_list, list)
+ iwl_mvm_resp_del_pasn_sta(mvm, vif, sta);
+}
+
void iwl_mvm_ftm_restart_responder(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
if (!vif->bss_conf.ftm_responder)
return;
+ iwl_mvm_ftm_responder_clear(mvm, vif);
iwl_mvm_ftm_start_responder(mvm, vif);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 95a613537047..6385b9641126 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -70,6 +70,7 @@
#include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */
#include "iwl-prph.h"
#include "fw/acpi.h"
+#include "fw/pnvm.h"
#include "mvm.h"
#include "fw/dbg.h"
@@ -77,8 +78,8 @@
#include "iwl-modparams.h"
#include "iwl-nvm-parse.h"
-#define MVM_UCODE_ALIVE_TIMEOUT HZ
-#define MVM_UCODE_CALIB_TIMEOUT (2*HZ)
+#define MVM_UCODE_ALIVE_TIMEOUT (HZ)
+#define MVM_UCODE_CALIB_TIMEOUT (2 * HZ)
#define UCODE_VALID_OK cpu_to_le32(0x1)
@@ -132,7 +133,14 @@ static int iwl_configure_rxq(struct iwl_mvm *mvm)
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
};
- /* Do not configure default queue, it is configured via context info */
+ /*
+ * The default queue is configured via context info, so if we
+ * have a single queue, there's nothing to do here.
+ */
+ if (mvm->trans->num_rx_queues == 1)
+ return 0;
+
+ /* skip the default queue */
num_queues = mvm->trans->num_rx_queues - 1;
size = struct_size(cmd, data, num_queues);
@@ -210,25 +218,55 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
struct iwl_mvm *mvm =
container_of(notif_wait, struct iwl_mvm, notif_wait);
struct iwl_mvm_alive_data *alive_data = data;
- struct mvm_alive_resp_v3 *palive3;
- struct mvm_alive_resp *palive;
struct iwl_umac_alive *umac;
struct iwl_lmac_alive *lmac1;
struct iwl_lmac_alive *lmac2 = NULL;
u16 status;
- u32 lmac_error_event_table, umac_error_event_table;
+ u32 lmac_error_event_table, umac_error_table;
+
+ /*
+ * For v5 and above, we can check the version, for older
+ * versions we need to check the size.
+ */
+ if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
+ UCODE_ALIVE_NTFY, 0) == 5) {
+ struct iwl_alive_ntf_v5 *palive;
- if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
palive = (void *)pkt->data;
umac = &palive->umac_data;
lmac1 = &palive->lmac_data[0];
lmac2 = &palive->lmac_data[1];
status = le16_to_cpu(palive->status);
- } else {
+
+ mvm->trans->sku_id[0] = le32_to_cpu(palive->sku_id.data[0]);
+ mvm->trans->sku_id[1] = le32_to_cpu(palive->sku_id.data[1]);
+ mvm->trans->sku_id[2] = le32_to_cpu(palive->sku_id.data[2]);
+
+ IWL_DEBUG_FW(mvm, "Got sku_id: 0x0%x 0x0%x 0x0%x\n",
+ mvm->trans->sku_id[0],
+ mvm->trans->sku_id[1],
+ mvm->trans->sku_id[2]);
+ } else if (iwl_rx_packet_payload_len(pkt) == sizeof(struct iwl_alive_ntf_v4)) {
+ struct iwl_alive_ntf_v4 *palive;
+
+ palive = (void *)pkt->data;
+ umac = &palive->umac_data;
+ lmac1 = &palive->lmac_data[0];
+ lmac2 = &palive->lmac_data[1];
+ status = le16_to_cpu(palive->status);
+ } else if (iwl_rx_packet_payload_len(pkt) ==
+ sizeof(struct iwl_alive_ntf_v3)) {
+ struct iwl_alive_ntf_v3 *palive3;
+
palive3 = (void *)pkt->data;
umac = &palive3->umac_data;
lmac1 = &palive3->lmac_data;
status = le16_to_cpu(palive3->status);
+ } else {
+ WARN(1, "unsupported alive notification (size %d)\n",
+ iwl_rx_packet_payload_len(pkt));
+ /* get timeout later */
+ return false;
}
lmac_error_event_table =
@@ -239,26 +277,22 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
mvm->trans->dbg.lmac_error_event_table[1] =
le32_to_cpu(lmac2->dbg_ptrs.error_event_table_ptr);
- umac_error_event_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr);
+ umac_error_table = le32_to_cpu(umac->dbg_ptrs.error_info_addr);
- if (!umac_error_event_table) {
- mvm->support_umac_log = false;
- } else if (umac_error_event_table >=
- mvm->trans->cfg->min_umac_error_event_table) {
- mvm->support_umac_log = true;
- } else {
- IWL_ERR(mvm,
- "Not valid error log pointer 0x%08X for %s uCode\n",
- umac_error_event_table,
- (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) ?
- "Init" : "RT");
- mvm->support_umac_log = false;
+ if (umac_error_table) {
+ if (umac_error_table >=
+ mvm->trans->cfg->min_umac_error_event_table) {
+ iwl_fw_umac_set_alive_err_table(mvm->trans,
+ umac_error_table);
+ } else {
+ IWL_ERR(mvm,
+ "Not valid error log pointer 0x%08X for %s uCode\n",
+ umac_error_table,
+ (mvm->fwrt.cur_fw_img == IWL_UCODE_INIT) ?
+ "Init" : "RT");
+ }
}
- if (mvm->support_umac_log)
- iwl_fw_umac_set_alive_err_table(mvm->trans,
- umac_error_event_table);
-
alive_data->scd_base_addr = le32_to_cpu(lmac1->dbg_ptrs.scd_base_ptr);
alive_data->valid = status == IWL_ALIVE_STATUS_OK;
@@ -310,7 +344,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
const struct fw_img *fw;
int ret;
enum iwl_ucode_type old_type = mvm->fwrt.cur_fw_img;
- static const u16 alive_cmd[] = { MVM_ALIVE };
+ static const u16 alive_cmd[] = { UCODE_ALIVE_NTFY };
bool run_in_rfkill =
ucode_type == IWL_UCODE_INIT || iwl_mvm_has_unified_ucode(mvm);
@@ -390,6 +424,13 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
return -EIO;
}
+ ret = iwl_pnvm_load(mvm->trans, &mvm->notif_wait);
+ if (ret) {
+ IWL_ERR(mvm, "Timeout waiting for PNVM load!\n");
+ iwl_fw_set_current_image(&mvm->fwrt, old_type);
+ return ret;
+ }
+
iwl_trans_fw_alive(mvm->trans, alive_data.scd_base_addr);
/*
@@ -590,7 +631,8 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
mvm->fw->default_calib[ucode_type].flow_trigger;
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
- PHY_CONFIGURATION_CMD);
+ PHY_CONFIGURATION_CMD,
+ IWL_FW_CMD_VER_UNKNOWN);
if (cmd_ver == 3) {
iwl_mvm_phy_filter_init(mvm, &phy_filters);
memcpy(&phy_cfg_cmd.phy_specific_cfg, &phy_filters,
@@ -740,28 +782,42 @@ static int iwl_mvm_config_ltr(struct iwl_mvm *mvm)
#ifdef CONFIG_ACPI
int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
{
- union {
- struct iwl_dev_tx_power_cmd v5;
- struct iwl_dev_tx_power_cmd_v4 v4;
- } cmd = {
- .v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
+ struct iwl_dev_tx_power_cmd cmd = {
+ .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS),
};
+ __le16 *per_chain;
int ret;
u16 len = 0;
-
- if (fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_REDUCE_TX_POWER))
+ u32 n_subbands;
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+ REDUCE_TX_POWER_CMD,
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ if (cmd_ver == 6) {
+ len = sizeof(cmd.v6);
+ n_subbands = IWL_NUM_SUB_BANDS_V2;
+ per_chain = cmd.v6.per_chain[0][0];
+ } else if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_REDUCE_TX_POWER)) {
len = sizeof(cmd.v5);
- else if (fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
- len = sizeof(struct iwl_dev_tx_power_cmd_v4);
- else
- len = sizeof(cmd.v4.v3);
+ n_subbands = IWL_NUM_SUB_BANDS;
+ per_chain = cmd.v5.per_chain[0][0];
+ } else if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) {
+ len = sizeof(cmd.v4);
+ n_subbands = IWL_NUM_SUB_BANDS;
+ per_chain = cmd.v4.per_chain[0][0];
+ } else {
+ len = sizeof(cmd.v3);
+ n_subbands = IWL_NUM_SUB_BANDS;
+ per_chain = cmd.v3.per_chain[0][0];
+ }
+ /* all structs have the same common part, add it */
+ len += sizeof(cmd.common);
- ret = iwl_sar_select_profile(&mvm->fwrt,
- cmd.v5.v3.per_chain_restriction,
- prof_a, prof_b);
+ ret = iwl_sar_select_profile(&mvm->fwrt, per_chain, ACPI_SAR_NUM_TABLES,
+ n_subbands, prof_a, prof_b);
/* return on error or if the profile is disabled (positive number) */
if (ret)
@@ -773,21 +829,26 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
{
- union geo_tx_power_profiles_cmd geo_tx_cmd;
+ union iwl_geo_tx_power_profiles_cmd geo_tx_cmd;
+ struct iwl_geo_tx_power_profiles_resp *resp;
u16 len;
int ret;
struct iwl_host_cmd cmd;
-
- if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
- IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
- geo_tx_cmd.geo_cmd.ops =
- cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE);
- len = sizeof(geo_tx_cmd.geo_cmd);
- } else {
- geo_tx_cmd.geo_cmd_v1.ops =
- cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE);
- len = sizeof(geo_tx_cmd.geo_cmd_v1);
- }
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
+ GEO_TX_POWER_LIMIT,
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ /* the ops field is at the same spot for all versions, so set in v1 */
+ geo_tx_cmd.v1.ops =
+ cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE);
+
+ if (cmd_ver == 3)
+ len = sizeof(geo_tx_cmd.v3);
+ else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
+ IWL_UCODE_TLV_API_SAR_TABLE_VER))
+ len = sizeof(geo_tx_cmd.v2);
+ else
+ len = sizeof(geo_tx_cmd.v1);
if (!iwl_sar_geo_support(&mvm->fwrt))
return -EOPNOTSUPP;
@@ -804,21 +865,55 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret);
return ret;
}
- ret = iwl_validate_sar_geo_profile(&mvm->fwrt, &cmd);
+
+ resp = (void *)cmd.resp_pkt->data;
+ ret = le32_to_cpu(resp->profile_idx);
+
+ if (WARN_ON(ret > ACPI_NUM_GEO_PROFILES))
+ ret = -EIO;
+
iwl_free_resp(&cmd);
return ret;
}
static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
{
- u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
- union geo_tx_power_profiles_cmd cmd;
+ union iwl_geo_tx_power_profiles_cmd cmd;
u16 len;
+ u32 n_bands;
int ret;
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
+ GEO_TX_POWER_LIMIT,
+ IWL_FW_CMD_VER_UNKNOWN);
- cmd.geo_cmd.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES);
+ BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, ops) !=
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, ops) ||
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, ops) !=
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, ops));
+ /* the ops field is at the same spot for all versions, so set in v1 */
+ cmd.v1.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES);
+
+ if (cmd_ver == 3) {
+ len = sizeof(cmd.v3);
+ n_bands = ARRAY_SIZE(cmd.v3.table[0]);
+ cmd.v3.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
+ } else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
+ IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
+ len = sizeof(cmd.v2);
+ n_bands = ARRAY_SIZE(cmd.v2.table[0]);
+ cmd.v2.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
+ } else {
+ len = sizeof(cmd.v1);
+ n_bands = ARRAY_SIZE(cmd.v1.table[0]);
+ }
+
+ BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, table) !=
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, table) ||
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, table) !=
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, table));
+ /* the table is at the same position for all versions, so set use v1 */
+ ret = iwl_sar_geo_init(&mvm->fwrt, &cmd.v1.table[0][0], n_bands);
- ret = iwl_sar_geo_init(&mvm->fwrt, cmd.geo_cmd.table);
/*
* It is a valid scenario to not support SAR, or miss wgds table,
* but in that case there is no need to send the command.
@@ -826,42 +921,61 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
if (ret)
return 0;
- cmd.geo_cmd.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
-
- if (!fw_has_api(&mvm->fwrt.fw->ucode_capa,
- IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
- len = sizeof(struct iwl_geo_tx_power_profiles_cmd_v1);
- } else {
- len = sizeof(cmd.geo_cmd);
- }
-
- return iwl_mvm_send_cmd_pdu(mvm, cmd_wide_id, 0, len, &cmd);
+ return iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT),
+ 0, len, &cmd);
}
static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
{
union acpi_object *wifi_pkg, *data, *enabled;
- int i, j, ret, tbl_rev;
+ union iwl_ppag_table_cmd ppag_table;
+ int i, j, ret, tbl_rev, num_sub_bands;
int idx = 2;
+ s8 *gain;
- mvm->fwrt.ppag_table.enabled = cpu_to_le32(0);
+ /*
+ * The 'enabled' field is the same in v1 and v2 so we can just
+ * use v1 to access it.
+ */
+ mvm->fwrt.ppag_table.v1.enabled = cpu_to_le32(0);
data = iwl_acpi_get_object(mvm->dev, ACPI_PPAG_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
+ /* try to read ppag table revision 1 */
wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
- ACPI_PPAG_WIFI_DATA_SIZE, &tbl_rev);
-
- if (IS_ERR(wifi_pkg)) {
- ret = PTR_ERR(wifi_pkg);
- goto out_free;
+ ACPI_PPAG_WIFI_DATA_SIZE_V2, &tbl_rev);
+ if (!IS_ERR(wifi_pkg)) {
+ if (tbl_rev != 1) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+ num_sub_bands = IWL_NUM_SUB_BANDS_V2;
+ gain = mvm->fwrt.ppag_table.v2.gain[0];
+ mvm->fwrt.ppag_ver = 2;
+ IWL_DEBUG_RADIO(mvm, "Reading PPAG table v2 (tbl_rev=1)\n");
+ goto read_table;
}
- if (tbl_rev != 0) {
- ret = -EINVAL;
- goto out_free;
+ /* try to read ppag table revision 0 */
+ wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
+ ACPI_PPAG_WIFI_DATA_SIZE, &tbl_rev);
+ if (!IS_ERR(wifi_pkg)) {
+ if (tbl_rev != 0) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+ num_sub_bands = IWL_NUM_SUB_BANDS;
+ gain = mvm->fwrt.ppag_table.v1.gain[0];
+ mvm->fwrt.ppag_ver = 1;
+ IWL_DEBUG_RADIO(mvm, "Reading PPAG table v1 (tbl_rev=0)\n");
+ goto read_table;
}
+ ret = PTR_ERR(wifi_pkg);
+ goto out_free;
+read_table:
enabled = &wifi_pkg->package.elements[1];
if (enabled->type != ACPI_TYPE_INTEGER ||
(enabled->integer.value != 0 && enabled->integer.value != 1)) {
@@ -869,8 +983,8 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
goto out_free;
}
- mvm->fwrt.ppag_table.enabled = cpu_to_le32(enabled->integer.value);
- if (!mvm->fwrt.ppag_table.enabled) {
+ ppag_table.v1.enabled = cpu_to_le32(enabled->integer.value);
+ if (!ppag_table.v1.enabled) {
ret = 0;
goto out_free;
}
@@ -880,8 +994,8 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
* first sub-band (j=0) corresponds to Low-Band (2.4GHz), and the
* following sub-bands to High-Band (5GHz).
*/
- for (i = 0; i < ACPI_PPAG_NUM_CHAINS; i++) {
- for (j = 0; j < ACPI_PPAG_NUM_SUB_BANDS; j++) {
+ for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
+ for (j = 0; j < num_sub_bands; j++) {
union acpi_object *ent;
ent = &wifi_pkg->package.elements[idx++];
@@ -890,11 +1004,11 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
(j == 0 && ent->integer.value < ACPI_PPAG_MIN_LB) ||
(j != 0 && ent->integer.value > ACPI_PPAG_MAX_HB) ||
(j != 0 && ent->integer.value < ACPI_PPAG_MIN_HB)) {
- mvm->fwrt.ppag_table.enabled = cpu_to_le32(0);
+ ppag_table.v1.enabled = cpu_to_le32(0);
ret = -EINVAL;
goto out_free;
}
- mvm->fwrt.ppag_table.gain[i][j] = ent->integer.value;
+ gain[i * num_sub_bands + j] = ent->integer.value;
}
}
ret = 0;
@@ -905,34 +1019,56 @@ out_free:
int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
{
- int i, j, ret;
+ u8 cmd_ver;
+ int i, j, ret, num_sub_bands, cmd_size;
+ union iwl_ppag_table_cmd ppag_table;
+ s8 *gain;
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) {
IWL_DEBUG_RADIO(mvm,
"PPAG capability not supported by FW, command not sent.\n");
return 0;
}
-
- if (!mvm->fwrt.ppag_table.enabled) {
- IWL_DEBUG_RADIO(mvm,
- "PPAG not enabled, command not sent.\n");
+ if (!mvm->fwrt.ppag_table.v1.enabled) {
+ IWL_DEBUG_RADIO(mvm, "PPAG not enabled, command not sent.\n");
return 0;
}
- IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
+ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
+ PER_PLATFORM_ANT_GAIN_CMD,
+ IWL_FW_CMD_VER_UNKNOWN);
+ if (cmd_ver == 1) {
+ num_sub_bands = IWL_NUM_SUB_BANDS;
+ gain = mvm->fwrt.ppag_table.v1.gain[0];
+ cmd_size = sizeof(ppag_table.v1);
+ if (mvm->fwrt.ppag_ver == 2) {
+ IWL_DEBUG_RADIO(mvm,
+ "PPAG table is v2 but FW supports v1, sending truncated table\n");
+ }
+ } else if (cmd_ver == 2) {
+ num_sub_bands = IWL_NUM_SUB_BANDS_V2;
+ gain = mvm->fwrt.ppag_table.v2.gain[0];
+ cmd_size = sizeof(ppag_table.v2);
+ if (mvm->fwrt.ppag_ver == 1) {
+ IWL_DEBUG_RADIO(mvm,
+ "PPAG table is v1 but FW supports v2, sending padded table\n");
+ }
+ } else {
+ IWL_DEBUG_RADIO(mvm, "Unsupported PPAG command version\n");
+ return 0;
+ }
- for (i = 0; i < ACPI_PPAG_NUM_CHAINS; i++) {
- for (j = 0; j < ACPI_PPAG_NUM_SUB_BANDS; j++) {
+ for (i = 0; i < IWL_NUM_CHAIN_LIMITS; i++) {
+ for (j = 0; j < num_sub_bands; j++) {
IWL_DEBUG_RADIO(mvm,
"PPAG table: chain[%d] band[%d]: gain = %d\n",
- i, j, mvm->fwrt.ppag_table.gain[i][j]);
+ i, j, gain[i * num_sub_bands + j]);
}
}
-
+ IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP,
PER_PLATFORM_ANT_GAIN_CMD),
- 0, sizeof(mvm->fwrt.ppag_table),
- &mvm->fwrt.ppag_table);
+ 0, cmd_size, &ppag_table);
if (ret < 0)
IWL_ERR(mvm, "failed to send PER_PLATFORM_ANT_GAIN_CMD (%d)\n",
ret);
@@ -989,41 +1125,90 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
IWL_DEBUG_RADIO(mvm, "failed to send TAS_CONFIG (%d)\n", ret);
}
-static bool iwl_mvm_eval_dsm_indonesia_5g2(struct iwl_mvm *mvm)
+static u8 iwl_mvm_eval_dsm_indonesia_5g2(struct iwl_mvm *mvm)
{
int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0,
DSM_FUNC_ENABLE_INDONESIA_5G2);
- IWL_DEBUG_RADIO(mvm,
- "Evaluated DSM function ENABLE_INDONESIA_5G2, ret=%d\n",
- ret);
+ if (ret < 0)
+ IWL_DEBUG_RADIO(mvm,
+ "Failed to evaluate DSM function ENABLE_INDONESIA_5G2, ret=%d\n",
+ ret);
+
+ else if (ret >= DSM_VALUE_INDONESIA_MAX)
+ IWL_DEBUG_RADIO(mvm,
+ "DSM function ENABLE_INDONESIA_5G2 return invalid value, ret=%d\n",
+ ret);
- return ret == 1;
+ else if (ret == DSM_VALUE_INDONESIA_ENABLE) {
+ IWL_DEBUG_RADIO(mvm,
+ "Evaluated DSM function ENABLE_INDONESIA_5G2: Enabling 5g2\n");
+ return DSM_VALUE_INDONESIA_ENABLE;
+ }
+ /* default behaviour is disabled */
+ return DSM_VALUE_INDONESIA_DISABLE;
+}
+
+static u8 iwl_mvm_eval_dsm_disable_srd(struct iwl_mvm *mvm)
+{
+ int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0,
+ DSM_FUNC_DISABLE_SRD);
+
+ if (ret < 0)
+ IWL_DEBUG_RADIO(mvm,
+ "Failed to evaluate DSM function DISABLE_SRD, ret=%d\n",
+ ret);
+
+ else if (ret >= DSM_VALUE_SRD_MAX)
+ IWL_DEBUG_RADIO(mvm,
+ "DSM function DISABLE_SRD return invalid value, ret=%d\n",
+ ret);
+
+ else if (ret == DSM_VALUE_SRD_PASSIVE) {
+ IWL_DEBUG_RADIO(mvm,
+ "Evaluated DSM function DISABLE_SRD: setting SRD to passive\n");
+ return DSM_VALUE_SRD_PASSIVE;
+
+ } else if (ret == DSM_VALUE_SRD_DISABLE) {
+ IWL_DEBUG_RADIO(mvm,
+ "Evaluated DSM function DISABLE_SRD: disabling SRD\n");
+ return DSM_VALUE_SRD_DISABLE;
+ }
+ /* default behaviour is active */
+ return DSM_VALUE_SRD_ACTIVE;
}
static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
{
- int ret;
+ u8 ret;
+ int cmd_ret;
struct iwl_lari_config_change_cmd cmd = {};
- if (iwl_mvm_eval_dsm_indonesia_5g2(mvm))
+ if (iwl_mvm_eval_dsm_indonesia_5g2(mvm) == DSM_VALUE_INDONESIA_ENABLE)
cmd.config_bitmap |=
cpu_to_le32(LARI_CONFIG_ENABLE_5G2_IN_INDONESIA_MSK);
+ ret = iwl_mvm_eval_dsm_disable_srd(mvm);
+ if (ret == DSM_VALUE_SRD_PASSIVE)
+ cmd.config_bitmap |=
+ cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_PASSIVE_MSK);
+
+ else if (ret == DSM_VALUE_SRD_DISABLE)
+ cmd.config_bitmap |=
+ cpu_to_le32(LARI_CONFIG_CHANGE_ETSI_TO_DISABLED_MSK);
+
/* apply more config masks here */
if (cmd.config_bitmap) {
- IWL_DEBUG_RADIO(mvm,
- "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x\n",
- le32_to_cpu(cmd.config_bitmap));
- ret = iwl_mvm_send_cmd_pdu(mvm,
- WIDE_ID(REGULATORY_AND_NVM_GROUP,
- LARI_CONFIG_CHANGE),
- 0, sizeof(cmd), &cmd);
- if (ret < 0)
+ IWL_DEBUG_RADIO(mvm, "sending LARI_CONFIG_CHANGE\n");
+ cmd_ret = iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ LARI_CONFIG_CHANGE),
+ 0, sizeof(cmd), &cmd);
+ if (cmd_ret < 0)
IWL_DEBUG_RADIO(mvm,
"Failed to send LARI_CONFIG_CHANGE (%d)\n",
- ret);
+ cmd_ret);
}
}
#else /* CONFIG_ACPI */
@@ -1260,7 +1445,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
}
/* init the fw <-> mac80211 STA mapping */
- for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++)
+ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++)
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
@@ -1274,10 +1459,23 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
}
- /* Add auxiliary station for scanning */
- ret = iwl_mvm_add_aux_sta(mvm);
- if (ret)
- goto error;
+ /*
+ * Add auxiliary station for scanning.
+ * Newer versions of this command implies that the fw uses
+ * internal aux station for all aux activities that don't
+ * requires a dedicated data queue.
+ */
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+ ADD_STA,
+ 0) < 12) {
+ /*
+ * In old version the aux station uses mac id like other
+ * station and not lmac id
+ */
+ ret = iwl_mvm_add_aux_sta(mvm, MAC_INDEX_AUX);
+ if (ret)
+ goto error;
+ }
/* Add all the PHY contexts */
i = 0;
@@ -1383,6 +1581,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
iwl_mvm_tas_init(mvm);
iwl_mvm_leds_sync(mvm);
+ iwl_mvm_ftm_initiator_smooth_config(mvm);
+
IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
return 0;
error:
@@ -1421,13 +1621,24 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm)
goto error;
/* init the fw <-> mac80211 STA mapping */
- for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++)
+ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++)
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[i], NULL);
- /* Add auxiliary station for scanning */
- ret = iwl_mvm_add_aux_sta(mvm);
- if (ret)
- goto error;
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+ ADD_STA,
+ 0) < 12) {
+ /*
+ * Add auxiliary station for scanning.
+ * Newer versions of this command implies that the fw uses
+ * internal aux station for all aux activities that don't
+ * requires a dedicated data queue.
+ * In old version the aux station uses mac id like other
+ * station and not lmac id
+ */
+ ret = iwl_mvm_add_aux_sta(mvm, MAC_INDEX_AUX);
+ if (ret)
+ goto error;
+ }
return 0;
error:
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index b78992e341d5..cbdebefb854a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,10 +27,9 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -665,7 +663,7 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
* allow multicast data frames only as long as the station is
* authorized, i.e., GTK keys are already installed (if needed)
*/
- if (ap_sta_id < IWL_MVM_STATION_COUNT) {
+ if (ap_sta_id < mvm->fw->ucode_capa.num_stations) {
struct ieee80211_sta *sta;
rcu_read_lock();
@@ -704,8 +702,12 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
if (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) {
cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX);
- if (vif->bss_conf.twt_requester && IWL_MVM_USE_TWT)
+ if (vif->bss_conf.twt_requester && IWL_MVM_USE_TWT) {
ctxt_sta->data_policy |= cpu_to_le32(TWT_SUPPORTED);
+ if (vif->bss_conf.twt_protected)
+ ctxt_sta->data_policy |=
+ cpu_to_le32(PROTECTED_TWT_SUPPORTED);
+ }
}
@@ -1300,8 +1302,8 @@ static void iwl_mvm_csa_count_down(struct iwl_mvm *mvm,
mvmvif->csa_countdown = true;
- if (!ieee80211_csa_is_complete(csa_vif)) {
- int c = ieee80211_csa_update_counter(csa_vif);
+ if (!ieee80211_beacon_cntdwn_is_complete(csa_vif)) {
+ int c = ieee80211_beacon_update_cntdwn(csa_vif);
iwl_mvm_mac_ctxt_beacon_changed(mvm, csa_vif);
if (csa_vif->p2p &&
@@ -1543,7 +1545,7 @@ void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm,
if (notif->csa_counter != IWL_PROBE_RESP_DATA_NO_CSA &&
notif->csa_counter >= 1)
- ieee80211_csa_set_counter(vif, notif->csa_counter);
+ ieee80211_beacon_set_cntdwn(vif, notif->csa_counter);
}
void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
@@ -1595,9 +1597,7 @@ void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
RCU_INIT_POINTER(mvm->csa_vif, NULL);
return;
case NL80211_IFTYPE_STATION:
- if (!fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD))
- iwl_mvm_csa_client_absent(mvm, vif);
+ iwl_mvm_csa_client_absent(mvm, vif);
cancel_delayed_work(&mvmvif->csa_work);
ieee80211_chswitch_done(vif, true);
break;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 9374c85c5caf..688c1125e67b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -234,6 +234,7 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mcc_update_resp *resp;
+ u8 resp_ver;
IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2);
@@ -252,13 +253,16 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
*changed = (status == MCC_RESP_NEW_CHAN_PROFILE ||
status == MCC_RESP_ILLEGAL);
}
+ resp_ver = iwl_fw_lookup_notif_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
+ MCC_UPDATE_CMD, 0);
+ IWL_DEBUG_LAR(mvm, "MCC update response version: %d\n", resp_ver);
regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg,
__le32_to_cpu(resp->n_channels),
resp->channels,
__le16_to_cpu(resp->mcc),
__le16_to_cpu(resp->geo_info),
- __le16_to_cpu(resp->cap));
+ __le16_to_cpu(resp->cap), resp_ver);
/* Store the return source id */
src_id = resp->source_id;
kfree(resp);
@@ -662,14 +666,17 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT))
hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
+ WOWLAN_KEK_KCK_MATERIAL,
+ IWL_FW_CMD_VER_UNKNOWN) == 3)
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK;
+
if (fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_SCAN_TSF_REPORT)) {
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_SCAN_START_TIME);
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_BSS_PARENT_TSF);
- wiphy_ext_feature_set(hw->wiphy,
- NL80211_EXT_FEATURE_SET_SCAN_DWELL);
}
if (iwl_mvm_is_oce_supported(mvm)) {
@@ -753,6 +760,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER);
+ if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_PROTECTED_TWT))
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_PROTECTED_TWT);
+
hw->wiphy->available_antennas_tx = iwl_mvm_get_valid_tx_ant(mvm);
hw->wiphy->available_antennas_rx = iwl_mvm_get_valid_rx_ant(mvm);
@@ -813,7 +824,7 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
iwl_mvm_vif_from_mac80211(info->control.vif);
u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id);
- if (ap_sta_id < IWL_MVM_STATION_COUNT) {
+ if (ap_sta_id < mvm->fw->ucode_capa.num_stations) {
/* mac80211 holds rcu read lock */
sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]);
if (IS_ERR_OR_NULL(sta))
@@ -1203,6 +1214,8 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
{
lockdep_assert_held(&mvm->mutex);
+ iwl_mvm_ftm_initiator_smooth_stop(mvm);
+
/* firmware counters are obviously reset now, but we shouldn't
* partially track so also clear the fw_reset_accu counters.
*/
@@ -1210,13 +1223,8 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
/* async_handlers_wk is now blocked */
- /*
- * The work item could be running or queued if the
- * ROC time event stops just as we get here.
- */
- flush_work(&mvm->roc_done_wk);
-
- iwl_mvm_rm_aux_sta(mvm);
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, ADD_STA, 0) < 12)
+ iwl_mvm_rm_aux_sta(mvm);
iwl_mvm_stop_device(mvm);
@@ -1271,6 +1279,12 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork);
cancel_delayed_work_sync(&mvm->scan_timeout_dwork);
+ /*
+ * The work item could be running or queued if the
+ * ROC time event stops just as we get here.
+ */
+ flush_work(&mvm->roc_done_wk);
+
mutex_lock(&mvm->mutex);
__iwl_mvm_mac_stop(mvm);
mutex_unlock(&mvm->mutex);
@@ -1300,27 +1314,32 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
s16 tx_power)
{
int len;
- union {
- struct iwl_dev_tx_power_cmd v5;
- struct iwl_dev_tx_power_cmd_v4 v4;
- } cmd = {
- .v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
- .v5.v3.mac_context_id =
+ struct iwl_dev_tx_power_cmd cmd = {
+ .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC),
+ .common.mac_context_id =
cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
- .v5.v3.pwr_restriction = cpu_to_le16(8 * tx_power),
+ .common.pwr_restriction = cpu_to_le16(8 * tx_power),
};
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+ REDUCE_TX_POWER_CMD,
+ IWL_FW_CMD_VER_UNKNOWN);
if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
- cmd.v5.v3.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
+ cmd.common.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
- if (fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_REDUCE_TX_POWER))
+ if (cmd_ver == 6)
+ len = sizeof(cmd.v6);
+ else if (fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_REDUCE_TX_POWER))
len = sizeof(cmd.v5);
else if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_TX_POWER_ACK))
len = sizeof(cmd.v4);
else
- len = sizeof(cmd.v4.v3);
+ len = sizeof(cmd.v3);
+
+ /* all structs have the same common part, add it */
+ len += sizeof(cmd.common);
return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
}
@@ -1352,9 +1371,7 @@ static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
goto out_unlock;
}
- if (!fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD))
- iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
+ iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false);
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
@@ -1392,10 +1409,14 @@ static void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211(mvm, "Abort CSA on mac %d\n", mvmvif->id);
mutex_lock(&mvm->mutex);
- WARN_ON(iwl_mvm_send_cmd_pdu(mvm,
- WIDE_ID(MAC_CONF_GROUP,
- CHANNEL_SWITCH_TIME_EVENT_CMD),
- 0, sizeof(cmd), &cmd));
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD))
+ iwl_mvm_remove_csa_period(mvm, vif);
+ else
+ WARN_ON(iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(MAC_CONF_GROUP,
+ CHANNEL_SWITCH_TIME_EVENT_CMD),
+ 0, sizeof(cmd), &cmd));
mutex_unlock(&mvm->mutex);
WARN_ON(iwl_mvm_post_channel_switch(hw, vif));
@@ -2629,6 +2650,8 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
iwl_mvm_update_quotas(mvm, false, NULL);
+ iwl_mvm_ftm_responder_clear(mvm, vif);
+
/*
* This is not very nice, but the simplest:
* For older FWs removing the mcast sta before the bcast station may
@@ -2864,7 +2887,7 @@ void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
struct iwl_mvm_sta *mvmsta;
bool sleeping = (notif->type != IWL_MVM_PM_EVENT_AWAKE);
- if (WARN_ON(notif->sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id)))
+ if (WARN_ON(notif->sta_id >= mvm->fw->ucode_capa.num_stations))
return;
rcu_read_lock();
@@ -3428,15 +3451,16 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
*/
if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
- key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
+ key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
ret = -EOPNOTSUPP;
- else
- ret = 0;
+ break;
+ }
if (key->cipher != WLAN_CIPHER_SUITE_GCMP &&
key->cipher != WLAN_CIPHER_SUITE_GCMP_256 &&
!iwl_mvm_has_new_tx_api(mvm)) {
key->hw_key_idx = STA_KEY_IDX_INVALID;
+ ret = 0;
break;
}
@@ -3452,6 +3476,8 @@ static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
if (i >= ARRAY_SIZE(mvmvif->ap_early_keys))
ret = -ENOSPC;
+ else
+ ret = 0;
break;
}
@@ -3693,9 +3719,12 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
tail->apply_time_max_delay = cpu_to_le32(delay);
IWL_DEBUG_TE(mvm,
- "ROC: Requesting to remain on channel %u for %ums (requested = %ums, max_delay = %ums, dtim_interval = %ums)\n",
- channel->hw_value, req_dur, duration, delay,
- dtim_interval);
+ "ROC: Requesting to remain on channel %u for %ums\n",
+ channel->hw_value, req_dur);
+ IWL_DEBUG_TE(mvm,
+ "\t(requested = %ums, max_delay = %ums, dtim_interval = %ums)\n",
+ duration, delay, dtim_interval);
+
/* Set the node address */
memcpy(tail->node_addr, vif->addr, ETH_ALEN);
@@ -3780,6 +3809,17 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) {
/* Use aux roc framework (HS20) */
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+ ADD_STA, 0) >= 12) {
+ u32 lmac_id;
+
+ lmac_id = iwl_mvm_get_lmac_id(mvm->fw,
+ channel->band);
+ ret = iwl_mvm_add_aux_sta(mvm, lmac_id);
+ if (WARN(ret,
+ "Failed to allocate aux station"))
+ goto out_unlock;
+ }
ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
vif, duration);
goto out_unlock;
@@ -4658,7 +4698,7 @@ static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
}
mutex_lock(&mvm->mutex);
- for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
struct ieee80211_sta *sta;
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
@@ -4700,7 +4740,7 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
mvmvif = iwl_mvm_vif_from_mac80211(vif);
/* flush the AP-station and all TDLS peers */
- for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(sta))
@@ -4714,7 +4754,7 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls);
if (drop) {
- if (iwl_mvm_flush_sta(mvm, mvmsta, false, 0))
+ if (iwl_mvm_flush_sta(mvm, mvmsta, false))
IWL_ERR(mvm, "flush request fail\n");
} else {
msk |= mvmsta->tfd_queue_msk;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index e2f7f6ec711e..7159d1da3e77 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -184,11 +184,6 @@ enum iwl_power_scheme {
IWL_POWER_SCHEME_LP
};
-union geo_tx_power_profiles_cmd {
- struct iwl_geo_tx_power_profiles_cmd geo_cmd;
- struct iwl_geo_tx_power_profiles_cmd_v1 geo_cmd_v1;
-};
-
#define IWL_CONN_MAX_LISTEN_INTERVAL 10
#define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
@@ -421,7 +416,11 @@ struct iwl_mvm_vif {
#ifdef CONFIG_PM
/* WoWLAN GTK rekey data */
struct {
- u8 kck[NL80211_KCK_LEN], kek[NL80211_KEK_LEN];
+ u8 kck[NL80211_KCK_EXT_LEN];
+ u8 kek[NL80211_KEK_EXT_LEN];
+ size_t kek_len;
+ size_t kck_len;
+ u32 akm;
__le64 replay_ctr;
bool valid;
} rekey_data;
@@ -852,7 +851,6 @@ struct iwl_mvm {
bool hw_registered;
bool rfkill_safe_init_done;
- bool support_umac_log;
u32 ampdu_ref;
bool ampdu_toggle;
@@ -890,7 +888,7 @@ struct iwl_mvm {
/* data related to data path */
struct iwl_rx_phy_info last_phy_info;
- struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT];
+ struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT_MAX];
u8 rx_ba_sessions;
/* configured by mac80211 */
@@ -1113,10 +1111,17 @@ struct iwl_mvm {
struct wireless_dev *req_wdev;
struct list_head loc_list;
int responses[IWL_MVM_TOF_MAX_APS];
+ struct {
+ struct list_head resp;
+ } smooth;
+ struct list_head pasn_list;
} ftm_initiator;
+ struct list_head resp_pasn_list;
+
struct {
u8 d0i3_resp;
+ u8 range_resp;
} cmd_ver;
struct ieee80211_vif *nan_vif;
@@ -1200,7 +1205,7 @@ iwl_mvm_sta_from_staid_rcu(struct iwl_mvm *mvm, u8 sta_id)
{
struct ieee80211_sta *sta;
- if (sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))
+ if (sta_id >= mvm->fw->ucode_capa.num_stations)
return NULL;
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
@@ -1217,7 +1222,7 @@ iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id)
{
struct ieee80211_sta *sta;
- if (sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))
+ if (sta_id >= mvm->fw->ucode_capa.num_stations)
return NULL;
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
@@ -1523,7 +1528,7 @@ const char *iwl_mvm_get_tx_fail_reason(u32 status);
static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
#endif
int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, u32 flags);
-int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal, u32 flags);
+int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal);
int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id,
u16 tids, u32 flags);
@@ -1996,6 +2001,14 @@ void iwl_mvm_ftm_restart_responder(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
void iwl_mvm_ftm_responder_stats(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
+int iwl_mvm_ftm_resp_remove_pasn_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif, u8 *addr);
+int iwl_mvm_ftm_respoder_add_pasn_sta(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u8 *addr, u32 cipher, u8 *tk, u32 tk_len,
+ u8 *hltk, u32 hltk_len);
+void iwl_mvm_ftm_responder_clear(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
/* FTM initiator */
void iwl_mvm_ftm_restart(struct iwl_mvm *mvm);
@@ -2006,6 +2019,12 @@ void iwl_mvm_ftm_lc_notif(struct iwl_mvm *mvm,
int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_pmsr_request *request);
void iwl_mvm_ftm_abort(struct iwl_mvm *mvm, struct cfg80211_pmsr_request *req);
+void iwl_mvm_ftm_initiator_smooth_config(struct iwl_mvm *mvm);
+void iwl_mvm_ftm_initiator_smooth_stop(struct iwl_mvm *mvm);
+int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ u8 *addr, u32 cipher, u8 *tk, u32 tk_len,
+ u8 *hltk, u32 hltk_len);
+void iwl_mvm_ftm_remove_pasn_sta(struct iwl_mvm *mvm, u8 *addr);
/* TDLS */
@@ -2146,8 +2165,24 @@ iwl_mvm_set_chan_info_chandef(struct iwl_mvm *mvm,
static inline int iwl_umac_scan_get_max_profiles(const struct iwl_fw *fw)
{
u8 ver = iwl_fw_lookup_cmd_ver(fw, IWL_ALWAYS_LONG_GROUP,
- SCAN_OFFLOAD_UPDATE_PROFILES_CMD);
+ SCAN_OFFLOAD_UPDATE_PROFILES_CMD,
+ IWL_FW_CMD_VER_UNKNOWN);
return (ver == IWL_FW_CMD_VER_UNKNOWN || ver < 3) ?
IWL_SCAN_MAX_PROFILES : IWL_SCAN_MAX_PROFILES_V2;
}
+
+static inline
+enum iwl_location_cipher iwl_mvm_cipher_to_location_cipher(u32 cipher)
+{
+ switch (cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ return IWL_LOCATION_CIPHER_CCMP_128;
+ case WLAN_CIPHER_SUITE_GCMP:
+ return IWL_LOCATION_CIPHER_GCMP_128;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ return IWL_LOCATION_CIPHER_GCMP_256;
+ default:
+ return IWL_LOCATION_CIPHER_INVALID;
+ }
+}
#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index d095ff847be9..f1c5b3a9c26f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -133,6 +133,7 @@ module_exit(iwl_mvm_exit);
static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ struct iwl_trans_debug *dbg = &mvm->trans->dbg;
u8 radio_cfg_type, radio_cfg_step, radio_cfg_dash;
u32 reg_val = 0;
u32 phy_config = iwl_mvm_get_phy_config(mvm);
@@ -169,7 +170,10 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
reg_val |= CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI;
- if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt))
+ if (iwl_fw_dbg_is_d3_debug_enabled(&mvm->fwrt) ||
+ (iwl_trans_dbg_ini_valid(mvm->trans) &&
+ dbg->fw_mon_cfg[IWL_FW_INI_ALLOCATION_ID_INTERNAL].buf_location)
+ )
reg_val |= CSR_HW_IF_CONFIG_REG_D3_DEBUG;
iwl_trans_set_bits_mask(mvm->trans, CSR_HW_IF_CONFIG_REG,
@@ -319,7 +323,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
* Access is done through binary search
*/
static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
- HCMD_NAME(MVM_ALIVE),
+ HCMD_NAME(UCODE_ALIVE_NTFY),
HCMD_NAME(REPLY_ERROR),
HCMD_NAME(ECHO_CMD),
HCMD_NAME(INIT_COMPLETE_NOTIF),
@@ -463,15 +467,6 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search
*/
-static const struct iwl_hcmd_names iwl_mvm_debug_names[] = {
- HCMD_NAME(DBGC_SUSPEND_RESUME),
- HCMD_NAME(BUFFER_ALLOCATION),
- HCMD_NAME(MFU_ASSERT_DUMP_NTF),
-};
-
-/* Please keep this array *SORTED* by hex value.
- * Access is done through binary search
- */
static const struct iwl_hcmd_names iwl_mvm_location_names[] = {
HCMD_NAME(TOF_RANGE_REQ_CMD),
HCMD_NAME(TOF_CONFIG_CMD),
@@ -622,11 +617,12 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
enum iwl_amsdu_size rb_size_default;
/*
- * We use IWL_MVM_STATION_COUNT to check the validity of the station
+ * We use IWL_MVM_STATION_COUNT_MAX to check the validity of the station
* index all over the driver - check that its value corresponds to the
* array size.
*/
- BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) != IWL_MVM_STATION_COUNT);
+ BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) !=
+ IWL_MVM_STATION_COUNT_MAX);
/********************************
* 1. Allocating and configuring HW data
@@ -695,6 +691,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_LIST_HEAD(&mvm->async_handlers_list);
spin_lock_init(&mvm->time_event_lock);
INIT_LIST_HEAD(&mvm->ftm_initiator.loc_list);
+ INIT_LIST_HEAD(&mvm->ftm_initiator.pasn_list);
+ INIT_LIST_HEAD(&mvm->resp_pasn_list);
INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
@@ -724,6 +722,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (WARN_ON_ONCE(mvm->cmd_ver.d0i3_resp > 1))
goto out_free;
+ mvm->cmd_ver.range_resp =
+ iwl_fw_lookup_notif_ver(mvm->fw, LOCATION_GROUP,
+ TOF_RANGE_RESPONSE_NOTIF, 5);
+ /* we only support up to version 8 */
+ if (WARN_ON_ONCE(mvm->cmd_ver.range_resp > 8))
+ goto out_free;
+
/*
* Populate the state variables that the transport layer needs
* to know about.
@@ -1106,7 +1111,7 @@ static void iwl_mvm_queue_state_change(struct iwl_op_mode *op_mode,
mvm->tvqm_info[hw_queue].sta_id :
mvm->queue_info[hw_queue].ra_sta_id;
- if (WARN_ON_ONCE(sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id)))
+ if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations))
return;
rcu_read_lock();
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
index 0243dbe8ac49..bf2fc44dcb8d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
@@ -7,8 +7,8 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -125,30 +125,19 @@ u8 iwl_mvm_get_ctrl_pos(struct cfg80211_chan_def *chandef)
*/
static void iwl_mvm_phy_ctxt_cmd_hdr(struct iwl_mvm_phy_ctxt *ctxt,
struct iwl_phy_context_cmd *cmd,
- u32 action, u32 apply_time)
+ u32 action)
{
- memset(cmd, 0, sizeof(struct iwl_phy_context_cmd));
-
cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(ctxt->id,
ctxt->color));
cmd->action = cpu_to_le32(action);
- cmd->apply_time = cpu_to_le32(apply_time);
}
-/*
- * Add the phy configuration to the PHY context command
- */
-static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
- struct iwl_phy_context_cmd *cmd,
- struct cfg80211_chan_def *chandef,
- u8 chains_static, u8 chains_dynamic)
+static void iwl_mvm_phy_ctxt_set_rxchain(struct iwl_mvm *mvm,
+ __le32 *rxchain_info,
+ u8 chains_static,
+ u8 chains_dynamic)
{
u8 active_cnt, idle_cnt;
- struct iwl_phy_context_cmd_tail *tail =
- iwl_mvm_chan_info_cmd_tail(mvm, &cmd->ci);
-
- /* Set the channel info data */
- iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef);
/* Set rx the chains */
idle_cnt = chains_static;
@@ -166,20 +155,56 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
active_cnt = 2;
}
- tail->rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) <<
+ *rxchain_info = cpu_to_le32(iwl_mvm_get_valid_rx_ant(mvm) <<
PHY_RX_CHAIN_VALID_POS);
- tail->rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
- tail->rxchain_info |= cpu_to_le32(active_cnt <<
+ *rxchain_info |= cpu_to_le32(idle_cnt << PHY_RX_CHAIN_CNT_POS);
+ *rxchain_info |= cpu_to_le32(active_cnt <<
PHY_RX_CHAIN_MIMO_CNT_POS);
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (unlikely(mvm->dbgfs_rx_phyinfo))
- tail->rxchain_info = cpu_to_le32(mvm->dbgfs_rx_phyinfo);
+ *rxchain_info = cpu_to_le32(mvm->dbgfs_rx_phyinfo);
#endif
+}
+
+/*
+ * Add the phy configuration to the PHY context command
+ */
+static void iwl_mvm_phy_ctxt_cmd_data_v1(struct iwl_mvm *mvm,
+ struct iwl_phy_context_cmd_v1 *cmd,
+ struct cfg80211_chan_def *chandef,
+ u8 chains_static, u8 chains_dynamic)
+{
+ struct iwl_phy_context_cmd_tail *tail =
+ iwl_mvm_chan_info_cmd_tail(mvm, &cmd->ci);
+
+ /* Set the channel info data */
+ iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef);
+
+ iwl_mvm_phy_ctxt_set_rxchain(mvm, &tail->rxchain_info,
+ chains_static, chains_dynamic);
tail->txchain_info = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
}
/*
+ * Add the phy configuration to the PHY context command
+ */
+static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
+ struct iwl_phy_context_cmd *cmd,
+ struct cfg80211_chan_def *chandef,
+ u8 chains_static, u8 chains_dynamic)
+{
+ cmd->lmac_id = cpu_to_le32(iwl_mvm_get_lmac_id(mvm->fw,
+ chandef->chan->band));
+
+ /* Set the channel info data */
+ iwl_mvm_set_chan_info_chandef(mvm, &cmd->ci, chandef);
+
+ iwl_mvm_phy_ctxt_set_rxchain(mvm, &cmd->rxchain_info,
+ chains_static, chains_dynamic);
+}
+
+/*
* Send a command to apply the current phy configuration. The command is send
* only if something in the configuration changed: in case that this is the
* first time that the phy configuration is applied or in case that the phy
@@ -189,20 +214,46 @@ static int iwl_mvm_phy_ctxt_apply(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *ctxt,
struct cfg80211_chan_def *chandef,
u8 chains_static, u8 chains_dynamic,
- u32 action, u32 apply_time)
+ u32 action)
{
- struct iwl_phy_context_cmd cmd;
int ret;
- u16 len = sizeof(cmd) - iwl_mvm_chan_info_padding(mvm);
-
- /* Set the command header fields */
- iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action, apply_time);
+ int ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
+ PHY_CONTEXT_CMD, 1);
+
+ if (ver == 3) {
+ struct iwl_phy_context_cmd cmd = {};
+
+ /* Set the command header fields */
+ iwl_mvm_phy_ctxt_cmd_hdr(ctxt, &cmd, action);
+
+ /* Set the command data */
+ iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef,
+ chains_static,
+ chains_dynamic);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD,
+ 0, sizeof(cmd), &cmd);
+ } else if (ver < 3) {
+ struct iwl_phy_context_cmd_v1 cmd = {};
+ u16 len = sizeof(cmd) - iwl_mvm_chan_info_padding(mvm);
+
+ /* Set the command header fields */
+ iwl_mvm_phy_ctxt_cmd_hdr(ctxt,
+ (struct iwl_phy_context_cmd *)&cmd,
+ action);
+
+ /* Set the command data */
+ iwl_mvm_phy_ctxt_cmd_data_v1(mvm, &cmd, chandef,
+ chains_static,
+ chains_dynamic);
+ ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD,
+ 0, len, &cmd);
+ } else {
+ IWL_ERR(mvm, "PHY ctxt cmd error ver %d not supported\n", ver);
+ return -EOPNOTSUPP;
+ }
- /* Set the command data */
- iwl_mvm_phy_ctxt_cmd_data(mvm, &cmd, chandef,
- chains_static, chains_dynamic);
- ret = iwl_mvm_send_cmd_pdu(mvm, PHY_CONTEXT_CMD, 0, len, &cmd);
if (ret)
IWL_ERR(mvm, "PHY ctxt cmd error. ret=%d\n", ret);
return ret;
@@ -223,7 +274,7 @@ int iwl_mvm_phy_ctxt_add(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
chains_static, chains_dynamic,
- FW_CTXT_ACTION_ADD, 0);
+ FW_CTXT_ACTION_ADD);
}
/*
@@ -257,7 +308,7 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
/* ... remove it here ...*/
ret = iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
chains_static, chains_dynamic,
- FW_CTXT_ACTION_REMOVE, 0);
+ FW_CTXT_ACTION_REMOVE);
if (ret)
return ret;
@@ -269,7 +320,7 @@ int iwl_mvm_phy_ctxt_changed(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt,
ctxt->width = chandef->width;
return iwl_mvm_phy_ctxt_apply(mvm, ctxt, chandef,
chains_static, chains_dynamic,
- action, 0);
+ action);
}
void iwl_mvm_phy_ctxt_unref(struct iwl_mvm *mvm, struct iwl_mvm_phy_ctxt *ctxt)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index 6f4d241d47e9..e0e80906fdc6 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -195,14 +195,20 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta,
{
u16 supp;
int i, highest_mcs;
- u8 nss = sta->rx_nss;
+ u8 max_nss = sta->rx_nss;
+ struct ieee80211_vht_cap ieee_vht_cap = {
+ .vht_cap_info = cpu_to_le32(vht_cap->cap),
+ .supp_mcs = vht_cap->vht_mcs,
+ };
/* the station support only a single receive chain */
if (sta->smps_mode == IEEE80211_SMPS_STATIC)
- nss = 1;
+ max_nss = 1;
- for (i = 0; i < nss && i < IWL_TLC_NSS_MAX; i++) {
- highest_mcs = rs_fw_vht_highest_rx_mcs_index(vht_cap, i + 1);
+ for (i = 0; i < max_nss && i < IWL_TLC_NSS_MAX; i++) {
+ int nss = i + 1;
+
+ highest_mcs = rs_fw_vht_highest_rx_mcs_index(vht_cap, nss);
if (!highest_mcs)
continue;
@@ -211,7 +217,15 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta,
supp &= ~BIT(IWL_TLC_MNG_HT_RATE_MCS9);
cmd->ht_rates[i][IWL_TLC_HT_BW_NONE_160] = cpu_to_le16(supp);
- if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
+ /*
+ * Check if VHT extended NSS indicates that the bandwidth/NSS
+ * configuration is supported - only for MCS 0 since we already
+ * decoded the MCS bits anyway ourselves.
+ */
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_160 &&
+ ieee80211_get_vht_max_nss(&ieee_vht_cap,
+ IEEE80211_VHT_CHANWIDTH_160MHZ,
+ 0, true, nss) >= nss)
cmd->ht_rates[i][IWL_TLC_HT_BW_160] =
cmd->ht_rates[i][IWL_TLC_HT_BW_NONE_160];
}
@@ -454,6 +468,12 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
.amsdu = iwl_mvm_is_csum_supported(mvm),
};
int ret;
+ u16 cmd_size = sizeof(cfg_cmd);
+
+ /* In old versions of the API the struct is 4 bytes smaller */
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, DATA_PATH_GROUP,
+ TLC_MNG_CONFIG_CMD, 0) < 3)
+ cmd_size -= 4;
memset(lq_sta, 0, offsetof(typeof(*lq_sta), pers));
@@ -468,7 +488,7 @@ void rs_fw_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
*/
sta->max_amsdu_len = max_amsdu_len;
- ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, sizeof(cfg_cmd),
+ ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, cmd_size,
&cfg_cmd);
if (ret)
IWL_ERR(mvm, "Failed to send rate scale config (%d)\n", ret);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 86b2ebb5d5fb..ed7382e7ea17 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -830,6 +830,12 @@ static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm,
return ucode_rate;
}
+ /* set RTS protection for all non legacy rates
+ * This helps with congested environments reducing the conflict cost to
+ * RTS retries only, instead of the entire BA packet.
+ */
+ ucode_rate |= RATE_MCS_RTS_REQUIRED_MSK;
+
if (is_ht(rate)) {
if (index < IWL_FIRST_HT_RATE || index > IWL_LAST_HT_RATE) {
IWL_ERR(mvm, "Invalid HT rate index %d\n", index);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 77b8def26edb..0059c83c2783 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -420,7 +420,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
id >>= RX_MDPU_RES_STATUS_STA_ID_SHIFT;
- if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) {
+ if (!WARN_ON_ONCE(id >= mvm->fw->ucode_capa.num_stations)) {
sta = rcu_dereference(mvm->fw_id_to_mac_id[id]);
if (IS_ERR(sta))
sta = NULL;
@@ -569,7 +569,8 @@ struct iwl_mvm_stat_data {
__le32 flags;
__le32 mac_id;
u8 beacon_filter_average_energy;
- void *general;
+ __le32 *beacon_counter;
+ u8 *beacon_average_energy;
};
static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
@@ -589,23 +590,10 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
* data copied into the "data" struct, but rather the data from
* the notification directly.
*/
- if (iwl_mvm_has_new_rx_stats_api(mvm)) {
- struct mvm_statistics_general *general =
- data->general;
-
- mvmvif->beacon_stats.num_beacons =
- le32_to_cpu(general->beacon_counter[vif_id]);
- mvmvif->beacon_stats.avg_signal =
- -general->beacon_average_energy[vif_id];
- } else {
- struct mvm_statistics_general_v8 *general =
- data->general;
-
- mvmvif->beacon_stats.num_beacons =
- le32_to_cpu(general->beacon_counter[vif_id]);
- mvmvif->beacon_stats.avg_signal =
- -general->beacon_average_energy[vif_id];
- }
+ mvmvif->beacon_stats.num_beacons =
+ le32_to_cpu(data->beacon_counter[vif_id]);
+ mvmvif->beacon_stats.avg_signal =
+ -data->beacon_average_energy[vif_id];
/* make sure that beacon statistics don't go backwards with TCM
* request to clear statistics
@@ -701,18 +689,136 @@ iwl_mvm_rx_stats_check_trigger(struct iwl_mvm *mvm, struct iwl_rx_packet *pkt)
iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, NULL);
}
+static void iwl_mvm_update_avg_energy(struct iwl_mvm *mvm,
+ u8 energy[IWL_MVM_STATION_COUNT_MAX])
+{
+ int i;
+
+ if (WARN_ONCE(mvm->fw->ucode_capa.num_stations >
+ IWL_MVM_STATION_COUNT_MAX,
+ "Driver and FW station count mismatch %d\n",
+ mvm->fw->ucode_capa.num_stations))
+ return;
+
+ rcu_read_lock();
+ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
+ struct iwl_mvm_sta *sta;
+
+ if (!energy[i])
+ continue;
+
+ sta = iwl_mvm_sta_from_staid_rcu(mvm, i);
+ if (!sta)
+ continue;
+ sta->avg_energy = energy[i];
+ }
+ rcu_read_unlock();
+}
+
+static void
+iwl_mvm_update_tcm_from_stats(struct iwl_mvm *mvm, __le32 *air_time_le,
+ __le32 *rx_bytes_le)
+{
+ int i;
+
+ spin_lock(&mvm->tcm.lock);
+ for (i = 0; i < NUM_MAC_INDEX_DRIVER; i++) {
+ struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[i];
+ u32 rx_bytes = le32_to_cpu(rx_bytes_le[i]);
+ u32 airtime = le32_to_cpu(air_time_le[i]);
+
+ mdata->rx.airtime += airtime;
+ mdata->uapsd_nonagg_detect.rx_bytes += rx_bytes;
+ if (airtime) {
+ /* re-init every time to store rate from FW */
+ ewma_rate_init(&mdata->uapsd_nonagg_detect.rate);
+ ewma_rate_add(&mdata->uapsd_nonagg_detect.rate,
+ rx_bytes * 8 / airtime);
+ }
+ }
+ spin_unlock(&mvm->tcm.lock);
+}
+
+static void
+iwl_mvm_handle_rx_statistics_tlv(struct iwl_mvm *mvm,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_mvm_stat_data data = {
+ .mvm = mvm,
+ };
+ u8 beacon_average_energy[MAC_INDEX_AUX];
+ u8 average_energy[IWL_MVM_STATION_COUNT_MAX];
+ struct iwl_statistics_operational_ntfy *stats;
+ int expected_size;
+ __le32 flags;
+ int i;
+
+ expected_size = sizeof(*stats);
+ if (WARN_ONCE(iwl_rx_packet_payload_len(pkt) < expected_size,
+ "received invalid statistics size (%d)!, expected_size: %d\n",
+ iwl_rx_packet_payload_len(pkt), expected_size))
+ return;
+
+ stats = (void *)&pkt->data;
+
+ if (WARN_ONCE(stats->hdr.type != FW_STATISTICS_OPERATIONAL ||
+ stats->hdr.version != 1,
+ "received unsupported hdr type %d, version %d\n",
+ stats->hdr.type, stats->hdr.version))
+ return;
+
+ flags = stats->flags;
+ mvm->radio_stats.rx_time = le64_to_cpu(stats->rx_time);
+ mvm->radio_stats.tx_time = le64_to_cpu(stats->tx_time);
+ mvm->radio_stats.on_time_rf = le64_to_cpu(stats->on_time_rf);
+ mvm->radio_stats.on_time_scan = le64_to_cpu(stats->on_time_scan);
+
+ iwl_mvm_rx_stats_check_trigger(mvm, pkt);
+
+ data.mac_id = stats->mac_id;
+ data.beacon_filter_average_energy =
+ le32_to_cpu(stats->beacon_filter_average_energy);
+ data.flags = flags;
+ data.beacon_counter = stats->beacon_counter;
+ for (i = 0; i < ARRAY_SIZE(beacon_average_energy); i++)
+ beacon_average_energy[i] =
+ le32_to_cpu(stats->beacon_average_energy[i]);
+
+ data.beacon_average_energy = beacon_average_energy;
+
+ ieee80211_iterate_active_interfaces(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_stat_iterator,
+ &data);
+
+ for (i = 0; i < ARRAY_SIZE(average_energy); i++)
+ average_energy[i] = le32_to_cpu(stats->average_energy[i]);
+ iwl_mvm_update_avg_energy(mvm, average_energy);
+
+ /*
+ * Don't update in case the statistics are not cleared, since
+ * we will end up counting twice the same airtime, once in TCM
+ * request and once in statistics notification.
+ */
+ if (le32_to_cpu(flags) & IWL_STATISTICS_REPLY_FLG_CLEAR)
+ iwl_mvm_update_tcm_from_stats(mvm, stats->air_time,
+ stats->rx_bytes);
+}
+
void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
struct iwl_mvm_stat_data data = {
.mvm = mvm,
};
+ __le32 *bytes, *air_time, flags;
int expected_size;
- int i;
u8 *energy;
- __le32 *bytes;
- __le32 *air_time;
- __le32 flags;
+
+ /* From ver 14 and up we use TLV statistics format */
+ if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP,
+ STATISTICS_CMD, 0) >= 14)
+ return iwl_mvm_handle_rx_statistics_tlv(mvm, pkt);
if (!iwl_mvm_has_new_rx_stats_api(mvm)) {
if (iwl_mvm_has_new_rx_api(mvm))
@@ -746,8 +852,9 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
mvm->radio_stats.on_time_scan =
le64_to_cpu(stats->general.common.on_time_scan);
- data.general = &stats->general;
-
+ data.beacon_counter = stats->general.beacon_counter;
+ data.beacon_average_energy =
+ stats->general.beacon_average_energy;
flags = stats->flag;
} else {
struct iwl_notif_statistics *stats = (void *)&pkt->data;
@@ -767,8 +874,9 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
mvm->radio_stats.on_time_scan =
le64_to_cpu(stats->general.common.on_time_scan);
- data.general = &stats->general;
-
+ data.beacon_counter = stats->general.beacon_counter;
+ data.beacon_average_energy =
+ stats->general.beacon_average_energy;
flags = stats->flag;
}
data.flags = flags;
@@ -797,45 +905,16 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
air_time = (void *)&stats->load_stats.air_time;
}
- rcu_read_lock();
- for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
- struct iwl_mvm_sta *sta;
-
- if (!energy[i])
- continue;
-
- sta = iwl_mvm_sta_from_staid_rcu(mvm, i);
- if (!sta)
- continue;
- sta->avg_energy = energy[i];
- }
- rcu_read_unlock();
+ iwl_mvm_update_avg_energy(mvm, energy);
/*
* Don't update in case the statistics are not cleared, since
* we will end up counting twice the same airtime, once in TCM
* request and once in statistics notification.
*/
- if (!(le32_to_cpu(flags) & IWL_STATISTICS_REPLY_FLG_CLEAR))
- return;
-
- spin_lock(&mvm->tcm.lock);
- for (i = 0; i < NUM_MAC_INDEX_DRIVER; i++) {
- struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[i];
- u32 airtime = le32_to_cpu(air_time[i]);
- u32 rx_bytes = le32_to_cpu(bytes[i]);
+ if (le32_to_cpu(flags) & IWL_STATISTICS_REPLY_FLG_CLEAR)
+ iwl_mvm_update_tcm_from_stats(mvm, air_time, bytes);
- mdata->uapsd_nonagg_detect.rx_bytes += rx_bytes;
- if (airtime) {
- /* re-init every time to store rate from FW */
- ewma_rate_init(&mdata->uapsd_nonagg_detect.rate);
- ewma_rate_add(&mdata->uapsd_nonagg_detect.rate,
- rx_bytes * 8 / airtime);
- }
-
- mdata->rx.airtime += airtime;
- }
- spin_unlock(&mvm->tcm.lock);
}
void iwl_mvm_rx_statistics(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index c15f7dbc9516..838734fec502 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -221,6 +221,31 @@ static int iwl_mvm_create_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
skb_put_data(skb, hdr, hdrlen);
skb_put_data(skb, (u8 *)hdr + hdrlen + pad_len, headlen - hdrlen);
+ /*
+ * If we did CHECKSUM_COMPLETE, the hardware only does it right for
+ * certain cases and starts the checksum after the SNAP. Check if
+ * this is the case - it's easier to just bail out to CHECKSUM_NONE
+ * in the cases the hardware didn't handle, since it's rare to see
+ * such packets, even though the hardware did calculate the checksum
+ * in this case, just starting after the MAC header instead.
+ */
+ if (skb->ip_summed == CHECKSUM_COMPLETE) {
+ struct {
+ u8 hdr[6];
+ __be16 type;
+ } __packed *shdr = (void *)((u8 *)hdr + hdrlen + pad_len);
+
+ if (unlikely(headlen - hdrlen < sizeof(*shdr) ||
+ !ether_addr_equal(shdr->hdr, rfc1042_header) ||
+ (shdr->type != htons(ETH_P_IP) &&
+ shdr->type != htons(ETH_P_ARP) &&
+ shdr->type != htons(ETH_P_IPV6) &&
+ shdr->type != htons(ETH_P_8021Q) &&
+ shdr->type != htons(ETH_P_PAE) &&
+ shdr->type != htons(ETH_P_TDLS))))
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+
fraglen = len - headlen;
if (fraglen) {
@@ -308,7 +333,7 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
struct iwl_rx_mpdu_desc *desc,
u32 pkt_flags, int queue, u8 *crypt_len)
{
- u16 status = le16_to_cpu(desc->status);
+ u32 status = le32_to_cpu(desc->status);
/*
* Drop UNKNOWN frames in aggregation, unless in monitor mode
@@ -393,22 +418,36 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
return 0;
}
-static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
+static void iwl_mvm_rx_csum(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
struct sk_buff *skb,
- struct iwl_rx_mpdu_desc *desc)
+ struct iwl_rx_packet *pkt)
{
- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
- u16 flags = le16_to_cpu(desc->l3l4_flags);
- u8 l3_prot = (u8)((flags & IWL_RX_L3L4_L3_PROTO_MASK) >>
- IWL_RX_L3_PROTO_POS);
-
- if (mvmvif->features & NETIF_F_RXCSUM &&
- flags & IWL_RX_L3L4_TCP_UDP_CSUM_OK &&
- (flags & IWL_RX_L3L4_IP_HDR_CSUM_OK ||
- l3_prot == IWL_RX_L3_TYPE_IPV6 ||
- l3_prot == IWL_RX_L3_TYPE_IPV6_FRAG))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
+
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ if (pkt->len_n_flags & cpu_to_le32(FH_RSCSR_RPA_EN)) {
+ u16 hwsum = be16_to_cpu(desc->v3.raw_xsum);
+
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = csum_unfold(~(__force __sum16)hwsum);
+ }
+ } else {
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_vif *mvmvif;
+ u16 flags = le16_to_cpu(desc->l3l4_flags);
+ u8 l3_prot = (u8)((flags & IWL_RX_L3L4_L3_PROTO_MASK) >>
+ IWL_RX_L3_PROTO_POS);
+
+ mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+
+ if (mvmvif->features & NETIF_F_RXCSUM &&
+ flags & IWL_RX_L3L4_TCP_UDP_CSUM_OK &&
+ (flags & IWL_RX_L3L4_IP_HDR_CSUM_OK ||
+ l3_prot == IWL_RX_L3_TYPE_IPV6 ||
+ l3_prot == IWL_RX_L3_TYPE_IPV6_FRAG))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
}
/*
@@ -1668,10 +1707,10 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
* Keep packets with CRC errors (and with overrun) for monitor mode
* (otherwise the firmware discards them) but mark them as bad.
*/
- if (!(desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_CRC_OK)) ||
- !(desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_OVERRUN_OK))) {
+ if (!(desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_CRC_OK)) ||
+ !(desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_OVERRUN_OK))) {
IWL_DEBUG_RX(mvm, "Bad CRC or FIFO: 0x%08X.\n",
- le16_to_cpu(desc->status));
+ le32_to_cpu(desc->status));
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
}
/* set the preamble flag if appropriate */
@@ -1731,10 +1770,10 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rcu_read_lock();
- if (desc->status & cpu_to_le16(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) {
- u8 id = desc->sta_id_flags & IWL_RX_MPDU_SIF_STA_ID_MASK;
+ if (desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) {
+ u8 id = le32_get_bits(desc->status, IWL_RX_MPDU_STATUS_STA_ID);
- if (!WARN_ON_ONCE(id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) {
+ if (!WARN_ON_ONCE(id >= mvm->fw->ucode_capa.num_stations)) {
sta = rcu_dereference(mvm->fw_id_to_mac_id[id]);
if (IS_ERR(sta))
sta = NULL;
@@ -1796,7 +1835,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
}
if (ieee80211_is_data(hdr->frame_control))
- iwl_mvm_rx_csum(sta, skb, desc);
+ iwl_mvm_rx_csum(mvm, sta, skb, pkt);
if (iwl_mvm_is_dup(sta, queue, rx_status, hdr, desc)) {
kfree_skb(skb);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 51a061b138ba..875281cf7fc0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -147,7 +147,7 @@ struct iwl_mvm_scan_params {
struct cfg80211_match_set *match_sets;
int n_scan_plans;
struct cfg80211_sched_scan_plan *scan_plans;
- u32 measurement_dwell;
+ bool iter_notif;
};
static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm)
@@ -337,33 +337,6 @@ iwl_mvm_scan_type iwl_mvm_get_scan_type_band(struct iwl_mvm *mvm,
return _iwl_mvm_get_scan_type(mvm, vif, load, low_latency);
}
-static int
-iwl_mvm_get_measurement_dwell(struct iwl_mvm *mvm,
- struct cfg80211_scan_request *req,
- struct iwl_mvm_scan_params *params)
-{
- u32 duration = scan_timing[params->type].max_out_time;
-
- if (!req->duration)
- return 0;
-
- if (iwl_mvm_is_cdb_supported(mvm)) {
- u32 hb_time = scan_timing[params->hb_type].max_out_time;
-
- duration = min_t(u32, duration, hb_time);
- }
-
- if (req->duration_mandatory && req->duration > duration) {
- IWL_DEBUG_SCAN(mvm,
- "Measurement scan - too long dwell %hu (max out time %u)\n",
- req->duration,
- duration);
- return -EOPNOTSUPP;
- }
-
- return min_t(u32, (u32)req->duration, duration);
-}
-
static inline bool iwl_mvm_rrm_scan_needed(struct iwl_mvm *mvm)
{
/* require rrm scan whenever the fw supports it */
@@ -725,14 +698,28 @@ static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm,
tx_cmd[0].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
NL80211_BAND_2GHZ,
no_cck);
- tx_cmd[0].sta_id = mvm->aux_sta.sta_id;
+
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+ ADD_STA,
+ 0) < 12) {
+ tx_cmd[0].sta_id = mvm->aux_sta.sta_id;
+ tx_cmd[1].sta_id = mvm->aux_sta.sta_id;
+
+ /*
+ * Fw doesn't use this sta anymore, pending deprecation via HOST API
+ * change
+ */
+ } else {
+ tx_cmd[0].sta_id = 0xff;
+ tx_cmd[1].sta_id = 0xff;
+ }
tx_cmd[1].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
TX_CMD_FLG_BT_DIS);
+
tx_cmd[1].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
NL80211_BAND_5GHZ,
no_cck);
- tx_cmd[1].sta_id = mvm->aux_sta.sta_id;
}
static void
@@ -1144,6 +1131,10 @@ static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
+ /* This function should not be called when using ADD_STA ver >=12 */
+ WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+ ADD_STA, 0) >= 12);
+
cfg->bcast_sta_id = mvm->aux_sta.sta_id;
cfg->channel_flags = channel_flags;
@@ -1192,6 +1183,10 @@ static void iwl_mvm_fill_scan_config_v2(struct iwl_mvm *mvm, void *config,
memcpy(&cfg->mac_addr, &mvm->addresses[0].addr, ETH_ALEN);
+ /* This function should not be called when using ADD_STA ver >=12 */
+ WARN_ON_ONCE(iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+ ADD_STA, 0) >= 12);
+
cfg->bcast_sta_id = mvm->aux_sta.sta_id;
cfg->channel_flags = channel_flags;
@@ -1305,7 +1300,16 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
memset(&cfg, 0, sizeof(cfg));
- cfg.bcast_sta_id = mvm->aux_sta.sta_id;
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+ ADD_STA, 0) < 12)
+ cfg.bcast_sta_id = mvm->aux_sta.sta_id;
+ /*
+ * Fw doesn't use this sta anymore, pending deprecation via HOST API
+ * change.
+ */
+ else
+ cfg.bcast_sta_id = 0xff;
+
cfg.tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
cfg.rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
@@ -1333,10 +1337,8 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
u8 active_dwell, passive_dwell;
timing = &scan_timing[params->type];
- active_dwell = params->measurement_dwell ?
- params->measurement_dwell : IWL_SCAN_DWELL_ACTIVE;
- passive_dwell = params->measurement_dwell ?
- params->measurement_dwell : IWL_SCAN_DWELL_PASSIVE;
+ active_dwell = IWL_SCAN_DWELL_ACTIVE;
+ passive_dwell = IWL_SCAN_DWELL_PASSIVE;
if (iwl_mvm_is_adaptive_dwell_supported(mvm)) {
cmd->v7.adwell_default_n_aps_social =
@@ -1389,8 +1391,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
}
}
} else {
- cmd->v1.extended_dwell = params->measurement_dwell ?
- params->measurement_dwell : IWL_SCAN_DWELL_EXTENDED;
+ cmd->v1.extended_dwell = IWL_SCAN_DWELL_EXTENDED;
cmd->v1.active_dwell = active_dwell;
cmd->v1.passive_dwell = passive_dwell;
cmd->v1.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
@@ -1443,10 +1444,8 @@ iwl_mvm_scan_umac_dwell_v10(struct iwl_mvm *mvm,
u8 active_dwell, passive_dwell;
timing = &scan_timing[params->type];
- active_dwell = params->measurement_dwell ?
- params->measurement_dwell : IWL_SCAN_DWELL_ACTIVE;
- passive_dwell = params->measurement_dwell ?
- params->measurement_dwell : IWL_SCAN_DWELL_PASSIVE;
+ active_dwell = IWL_SCAN_DWELL_ACTIVE;
+ passive_dwell = IWL_SCAN_DWELL_PASSIVE;
general_params->adwell_default_social_chn =
IWL_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
@@ -1737,7 +1736,7 @@ static u16 iwl_mvm_scan_umac_flags_v2(struct iwl_mvm *mvm,
if (!iwl_mvm_is_regular_scan(params))
flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC;
- if (params->measurement_dwell ||
+ if (params->iter_notif ||
mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE;
@@ -1782,7 +1781,7 @@ static u16 iwl_mvm_scan_umac_flags(struct iwl_mvm *mvm,
if (!iwl_mvm_is_regular_scan(params))
flags |= IWL_UMAC_SCAN_GEN_FLAGS_PERIODIC;
- if (params->measurement_dwell)
+ if (params->iter_notif)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE;
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -2229,7 +2228,8 @@ static int iwl_mvm_build_scan_cmd(struct iwl_mvm *mvm,
hcmd->id = iwl_cmd_id(SCAN_REQ_UMAC, IWL_ALWAYS_LONG_GROUP, 0);
scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
- SCAN_REQ_UMAC);
+ SCAN_REQ_UMAC,
+ IWL_FW_CMD_VER_UNKNOWN);
for (i = 0; i < ARRAY_SIZE(iwl_scan_umac_handlers); i++) {
const struct iwl_scan_umac_handler *ver_handler =
@@ -2293,11 +2293,8 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_fill_scan_type(mvm, &params, vif);
- ret = iwl_mvm_get_measurement_dwell(mvm, req, &params);
- if (ret < 0)
- return ret;
-
- params.measurement_dwell = ret;
+ if (req->duration)
+ params.iter_notif = true;
iwl_mvm_build_scan_probe(mvm, vif, ies, &params);
@@ -2569,7 +2566,8 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm)
{
int base_size, tail_size;
u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP,
- SCAN_REQ_UMAC);
+ SCAN_REQ_UMAC,
+ IWL_FW_CMD_VER_UNKNOWN);
base_size = iwl_scan_req_umac_get_size(scan_ver);
if (base_size)
@@ -2626,6 +2624,15 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
mvm->scan_uid_status[uid] = 0;
}
+ uid = iwl_mvm_scan_uid_by_status(mvm,
+ IWL_MVM_SCAN_STOPPING_REGULAR);
+ if (uid >= 0)
+ mvm->scan_uid_status[uid] = 0;
+
+ uid = iwl_mvm_scan_uid_by_status(mvm,
+ IWL_MVM_SCAN_STOPPING_SCHED);
+ if (uid >= 0)
+ mvm->scan_uid_status[uid] = 0;
/* We shouldn't have any UIDs still set. Loop over all the
* UIDs to make sure there's nothing left there and warn if
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 9e124755a3ce..017537944fd0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -85,7 +85,7 @@ static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
int sta_id;
u32 reserved_ids = 0;
- BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
+ BUILD_BUG_ON(IWL_MVM_STATION_COUNT_MAX > 32);
WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
lockdep_assert_held(&mvm->mutex);
@@ -95,7 +95,7 @@ static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
reserved_ids = BIT(0);
/* Don't take rcu_read_lock() since we are protected by mvm->mutex */
- for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) {
+ for (sta_id = 0; sta_id < mvm->fw->ucode_capa.num_stations; sta_id++) {
if (BIT(sta_id) & reserved_ids)
continue;
@@ -770,8 +770,6 @@ static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
queue, sta_id, tid);
- IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d\n", queue);
-
return queue;
}
@@ -1540,8 +1538,15 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
memset(&cmd, 0, sizeof(cmd));
cmd.sta_id = sta->sta_id;
- cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
- color));
+
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, ADD_STA,
+ 0) >= 12 &&
+ sta->type == IWL_STA_AUX_ACTIVITY)
+ cmd.mac_id_n_color = cpu_to_le32(mac_id);
+ else
+ cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
+ color));
+
if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
cmd.station_type = sta->type;
@@ -1858,7 +1863,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
return ret;
/* flush its queues here since we are freeing mvm_sta */
- ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0);
+ ret = iwl_mvm_flush_sta(mvm, mvm_sta, false);
if (ret)
return ret;
if (iwl_mvm_has_new_tx_api(mvm)) {
@@ -1997,7 +2002,7 @@ static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id)
}
static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
- int maccolor,
+ int maccolor, u8 *addr,
struct iwl_mvm_int_sta *sta,
u16 *queue, int fifo)
{
@@ -2007,7 +2012,7 @@ static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
if (!iwl_mvm_has_new_tx_api(mvm))
iwl_mvm_enable_aux_snif_queue(mvm, *queue, sta->sta_id, fifo);
- ret = iwl_mvm_add_int_sta_common(mvm, sta, NULL, macidx, maccolor);
+ ret = iwl_mvm_add_int_sta_common(mvm, sta, addr, macidx, maccolor);
if (ret) {
if (!iwl_mvm_has_new_tx_api(mvm))
iwl_mvm_disable_txq(mvm, NULL, *queue,
@@ -2034,7 +2039,7 @@ static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
return 0;
}
-int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
+int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id)
{
int ret;
@@ -2047,7 +2052,11 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
if (ret)
return ret;
- ret = iwl_mvm_add_int_sta_with_queue(mvm, MAC_INDEX_AUX, 0,
+ /*
+ * In CDB NICs we need to specify which lmac to use for aux activity
+ * using the mac_id argument place to send lmac_id to the function
+ */
+ ret = iwl_mvm_add_int_sta_with_queue(mvm, lmac_id, 0, NULL,
&mvm->aux_sta, &mvm->aux_queue,
IWL_MVM_TX_FIFO_MCAST);
if (ret) {
@@ -2065,7 +2074,8 @@ int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex);
return iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
- &mvm->snif_sta, &mvm->snif_queue,
+ NULL, &mvm->snif_sta,
+ &mvm->snif_queue,
IWL_MVM_TX_FIFO_BE);
}
@@ -2189,7 +2199,7 @@ static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
- iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0);
+ iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true);
switch (vif->type) {
case NL80211_IFTYPE_AP:
@@ -2438,7 +2448,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex);
- iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
+ iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true);
iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0);
@@ -2863,7 +2873,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
} else {
tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
- ret = 0;
+ ret = IEEE80211_AMPDU_TX_START_DELAY_ADDBA;
}
out:
@@ -3761,7 +3771,7 @@ void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
struct ieee80211_sta *sta;
u32 sta_id = le32_to_cpu(notif->sta_id);
- if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
+ if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations))
return;
rcu_read_lock();
@@ -3844,7 +3854,7 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
/* Block/unblock all the stations of the given mvmvif */
- for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(sta))
@@ -3903,3 +3913,43 @@ u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
}
+
+int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
+ u8 *key, u32 key_len)
+{
+ int ret;
+ u16 queue;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct ieee80211_key_conf *keyconf;
+
+ ret = iwl_mvm_allocate_int_sta(mvm, sta, 0,
+ NL80211_IFTYPE_UNSPECIFIED,
+ IWL_STA_LINK);
+ if (ret)
+ return ret;
+
+ ret = iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
+ addr, sta, &queue,
+ IWL_MVM_TX_FIFO_BE);
+ if (ret)
+ goto out;
+
+ keyconf = kzalloc(sizeof(*keyconf) + key_len, GFP_KERNEL);
+ if (!keyconf) {
+ ret = -ENOBUFS;
+ goto out;
+ }
+
+ keyconf->cipher = cipher;
+ memcpy(keyconf->key, key, key_len);
+ keyconf->keylen = key_len;
+
+ ret = iwl_mvm_send_sta_key(mvm, sta->sta_id, keyconf, false,
+ 0, NULL, 0, 0, true);
+ kfree(keyconf);
+ return 0;
+out:
+ iwl_mvm_dealloc_int_sta(mvm, sta);
+ return ret;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index da2d1ac01229..d7578c981a65 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -70,7 +70,7 @@
#include <linux/wait.h>
#include "iwl-trans.h" /* for IWL_MAX_TID_COUNT */
-#include "fw-api.h" /* IWL_MVM_STATION_COUNT */
+#include "fw-api.h" /* IWL_MVM_STATION_COUNT_MAX */
#include "rs.h"
struct iwl_mvm;
@@ -540,7 +540,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u8 queue, bool start);
-int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm);
+int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id);
int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm);
int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
@@ -579,5 +579,7 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
bool disable);
void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
-
+int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
+ u8 *key, u32 key_len);
#endif /* __sta_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
index d781777b6b96..2ad959b4ce0a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2014 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(C) 2018 - 2019 Intel Corporation
+ * Copyright(C) 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -29,7 +29,7 @@
*
* Copyright(c) 2014 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(C) 2018 - 2019 Intel Corporation
+ * Copyright(C) 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -77,7 +77,7 @@ void iwl_mvm_teardown_tdls_peers(struct iwl_mvm *mvm)
lockdep_assert_held(&mvm->mutex);
- for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
if (!sta || IS_ERR(sta) || !sta->tdls)
@@ -100,7 +100,7 @@ int iwl_mvm_tdls_sta_count(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex);
- for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
if (!sta || IS_ERR(sta) || !sta->tdls)
@@ -144,7 +144,7 @@ static void iwl_mvm_tdls_config(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
/* populate TDLS peer data */
cnt = 0;
- for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(sta) || !sta->tdls)
@@ -273,7 +273,7 @@ void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
return;
}
- if (WARN_ON(sta_id >= IWL_MVM_STATION_COUNT))
+ if (WARN_ON(sta_id >= mvm->fw->ucode_capa.num_stations))
return;
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 1babc4bb5194..7fce79c1c114 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -5,10 +5,9 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -28,10 +27,9 @@
*
* BSD LICENSE
*
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019 Intel Corporation
+ * Copyright(c) 2012 - 2014, 2018 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -116,14 +114,9 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
* event finishes or is canceled, so that frames queued for it
* won't get stuck on the queue and be transmitted in the next
* time event.
- * We have to send the command asynchronously since this cannot
- * be under the mutex for locking reasons, but that's not an
- * issue as it will have to complete before the next command is
- * executed, and a new time event means a new command.
*/
- iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true, CMD_ASYNC);
- /* Do the same for the P2P device queue (STA) */
+ mutex_lock(&mvm->mutex);
if (test_and_clear_bit(IWL_MVM_STATUS_NEED_FLUSH_P2P, &mvm->status)) {
struct iwl_mvm_vif *mvmvif;
@@ -136,10 +129,20 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
if (!WARN_ON(!mvm->p2p_device_vif)) {
mvmvif = iwl_mvm_vif_from_mac80211(mvm->p2p_device_vif);
- iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true,
- CMD_ASYNC);
+ iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true);
}
+ } else {
+ /* do the same in case of hot spot 2.0 */
+ iwl_mvm_flush_sta(mvm, &mvm->aux_sta, true);
+ /* In newer version of this command an aux station is added only
+ * in cases of dedicated tx queue and need to be removed in end
+ * of use */
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+ ADD_STA, 0) >= 12)
+ iwl_mvm_rm_aux_sta(mvm);
}
+
+ mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
@@ -172,7 +175,7 @@ static void iwl_mvm_csa_noa_start(struct iwl_mvm *mvm)
* So we just do nothing here and the switch
* will be performed on the last TBTT.
*/
- if (!ieee80211_csa_is_complete(csa_vif)) {
+ if (!ieee80211_beacon_cntdwn_is_complete(csa_vif)) {
IWL_WARN(mvm, "CSA NOA started too early\n");
goto out_unlock;
}
@@ -1013,6 +1016,28 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
iwl_mvm_roc_finished(mvm);
}
+void iwl_mvm_remove_csa_period(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
+ u32 id;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!te_data->running)
+ return;
+
+ spin_lock_bh(&mvm->time_event_lock);
+ id = te_data->id;
+ spin_unlock_bh(&mvm->time_event_lock);
+
+ if (id != TE_CHANNEL_SWITCH_PERIOD)
+ return;
+
+ iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
+}
+
int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
u32 duration, u32 apply_time)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
index 3186d7e40567..b6bac776f236 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.h
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright (C) 2019 Intel Corporation
+ * Copyright (C) 2019 - 2020 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -29,7 +29,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright (C) 2019 Intel Corporation
+ * Copyright (C) 2019 - 2020 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -216,6 +216,9 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm);
void iwl_mvm_roc_done_wk(struct work_struct *wk);
+void iwl_mvm_remove_csa_period(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+
/**
* iwl_mvm_schedule_csa_period - request channel switch absence period
* @mvm: the mvm component
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index 0c95663bf9ed..340c892b30ff 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -228,24 +228,67 @@ void iwl_mvm_ct_kill_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
iwl_mvm_enter_ctkill(mvm);
}
-static int iwl_mvm_get_temp_cmd(struct iwl_mvm *mvm)
+/*
+ * send the DTS_MEASUREMENT_TRIGGER command with or without waiting for a
+ * response. If we get a response then the measurement is stored in 'temp'
+ */
+static int iwl_mvm_send_temp_cmd(struct iwl_mvm *mvm, bool response, s32 *temp)
{
- struct iwl_dts_measurement_cmd cmd = {
+ struct iwl_host_cmd cmd = {};
+ struct iwl_dts_measurement_cmd dts_cmd = {
.flags = cpu_to_le32(DTS_TRIGGER_CMD_FLAGS_TEMP),
};
- struct iwl_ext_dts_measurement_cmd extcmd = {
+ struct iwl_ext_dts_measurement_cmd ext_cmd = {
.control_mode = cpu_to_le32(DTS_DIRECT_WITHOUT_MEASURE),
};
- u32 cmdid;
+ struct iwl_dts_measurement_resp *resp;
+ void *cmd_ptr;
+ int ret;
+ u32 cmd_flags = 0;
+ u16 len;
+
+ /* Check which command format is used (regular/extended) */
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE)) {
+ len = sizeof(ext_cmd);
+ cmd_ptr = &ext_cmd;
+ } else {
+ len = sizeof(dts_cmd);
+ cmd_ptr = &dts_cmd;
+ }
+ /* The command version where we get a response is zero length */
+ if (response) {
+ cmd_flags = CMD_WANT_SKB;
+ len = 0;
+ }
- cmdid = iwl_cmd_id(CMD_DTS_MEASUREMENT_TRIGGER_WIDE,
- PHY_OPS_GROUP, 0);
+ cmd.id = WIDE_ID(PHY_OPS_GROUP, CMD_DTS_MEASUREMENT_TRIGGER_WIDE);
+ cmd.len[0] = len;
+ cmd.flags = cmd_flags;
+ cmd.data[0] = cmd_ptr;
+
+ IWL_DEBUG_TEMP(mvm,
+ "Sending temperature measurement command - %s response\n",
+ response ? "with" : "without");
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+
+ if (ret) {
+ IWL_ERR(mvm,
+ "Failed to send the temperature measurement command (err=%d)\n",
+ ret);
+ return ret;
+ }
- if (!fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE))
- return iwl_mvm_send_cmd_pdu(mvm, cmdid, 0, sizeof(cmd), &cmd);
+ if (response) {
+ resp = (void *)cmd.resp_pkt->data;
+ *temp = le32_to_cpu(resp->temp);
+ IWL_DEBUG_TEMP(mvm,
+ "Got temperature measurement response: temp=%d\n",
+ *temp);
+ iwl_free_resp(&cmd);
+ }
- return iwl_mvm_send_cmd_pdu(mvm, cmdid, 0, sizeof(extcmd), &extcmd);
+ return ret;
}
int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp)
@@ -254,6 +297,18 @@ int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp)
static u16 temp_notif[] = { WIDE_ID(PHY_OPS_GROUP,
DTS_MEASUREMENT_NOTIF_WIDE) };
int ret;
+ u8 cmd_ver;
+
+ /*
+ * If command version is 1 we send the command and immediately get
+ * a response. For older versions we send the command and wait for a
+ * notification (no command TLV for previous versions).
+ */
+ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
+ CMD_DTS_MEASUREMENT_TRIGGER_WIDE,
+ IWL_FW_CMD_VER_UNKNOWN);
+ if (cmd_ver == 1)
+ return iwl_mvm_send_temp_cmd(mvm, true, temp);
lockdep_assert_held(&mvm->mutex);
@@ -261,9 +316,8 @@ int iwl_mvm_get_temp(struct iwl_mvm *mvm, s32 *temp)
temp_notif, ARRAY_SIZE(temp_notif),
iwl_mvm_temp_notif_wait, temp);
- ret = iwl_mvm_get_temp_cmd(mvm);
+ ret = iwl_mvm_send_temp_cmd(mvm, false, temp);
if (ret) {
- IWL_ERR(mvm, "Failed to get the temperature (err=%d)\n", ret);
iwl_remove_notification(&mvm->notif_wait, &wait_temp_notif);
return ret;
}
@@ -295,6 +349,8 @@ static void check_exit_ctkill(struct work_struct *work)
duration = tt->params.ct_kill_duration;
+ flush_work(&mvm->roc_done_wk);
+
mutex_lock(&mvm->mutex);
if (__iwl_mvm_mac_start(mvm))
@@ -345,7 +401,7 @@ static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable)
struct iwl_mvm_sta *mvmsta;
int i, err;
- for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
+ for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
mvmsta = iwl_mvm_sta_from_staid_protected(mvm, i);
if (!mvmsta)
continue;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 2f6484e0d726..fe1c538cd718 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -793,11 +793,7 @@ unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
enum nl80211_band band = mvmsta->vif->bss_conf.chandef.chan->band;
u8 ac = tid_to_mac80211_ac[tid];
unsigned int txf;
- int lmac = IWL_LMAC_24G_INDEX;
-
- if (iwl_mvm_is_cdb_supported(mvm) &&
- band == NL80211_BAND_5GHZ)
- lmac = IWL_LMAC_5G_INDEX;
+ int lmac = iwl_mvm_get_lmac_id(mvm->fw, band);
/* For HE redirect to trigger based fifos */
if (sta->he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm)))
@@ -1371,7 +1367,7 @@ void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
}
}
-/**
+/*
* translate ucode response to mac80211 tx status control values
*/
static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags,
@@ -1413,7 +1409,7 @@ static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
}
}
-/**
+/*
* iwl_mvm_get_scd_ssn - returns the SSN of the SCD
* @tx_resp: the Tx response from the fw (agg or non-agg)
*
@@ -1768,13 +1764,13 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
struct ieee80211_tx_info *ba_info, u32 rate)
{
struct sk_buff_head reclaimed_skbs;
- struct iwl_mvm_tid_data *tid_data;
+ struct iwl_mvm_tid_data *tid_data = NULL;
struct ieee80211_sta *sta;
- struct iwl_mvm_sta *mvmsta;
+ struct iwl_mvm_sta *mvmsta = NULL;
struct sk_buff *skb;
int freed;
- if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
+ if (WARN_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations ||
tid > IWL_MAX_TID_COUNT,
"sta_id %d tid %d", sta_id, tid))
return;
@@ -1784,11 +1780,44 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
/* Reclaiming frames for a station that has been deleted ? */
- if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
+ if (WARN_ON_ONCE(!sta)) {
rcu_read_unlock();
return;
}
+ __skb_queue_head_init(&reclaimed_skbs);
+
+ /*
+ * Release all TFDs before the SSN, i.e. all TFDs in front of
+ * block-ack window (we assume that they've been successfully
+ * transmitted ... if not, it's too late anyway).
+ */
+ iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs);
+
+ skb_queue_walk(&reclaimed_skbs, skb) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
+
+ memset(&info->status, 0, sizeof(info->status));
+ /* Packet was transmitted successfully, failures come as single
+ * frames because before failing a frame the firmware transmits
+ * it without aggregation at least once.
+ */
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ }
+
+ /*
+ * It's possible to get a BA response after invalidating the rcu (rcu is
+ * invalidated in order to prevent new Tx from being sent, but there may
+ * be some frames already in-flight).
+ * In this case we just want to reclaim, and could skip all the
+ * sta-dependent stuff since it's in the middle of being removed
+ * anyways.
+ */
+ if (IS_ERR(sta))
+ goto out;
+
mvmsta = iwl_mvm_sta_from_mac80211(sta);
tid_data = &mvmsta->tid_data[tid];
@@ -1800,15 +1829,6 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
return;
}
- __skb_queue_head_init(&reclaimed_skbs);
-
- /*
- * Release all TFDs before the SSN, i.e. all TFDs in front of
- * block-ack window (we assume that they've been successfully
- * transmitted ... if not, it's too late anyway).
- */
- iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs);
-
spin_lock_bh(&mvmsta->lock);
tid_data->next_reclaimed = index;
@@ -1832,15 +1852,6 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
else
WARN_ON_ONCE(tid != IWL_MAX_TID_COUNT);
- iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
-
- memset(&info->status, 0, sizeof(info->status));
- /* Packet was transmitted successfully, failures come as single
- * frames because before failing a frame the firmware transmits
- * it without aggregation at least once.
- */
- info->flags |= IEEE80211_TX_STAT_ACK;
-
/* this is the first skb we deliver in this batch */
/* put the rate scaling data there */
if (freed == 1) {
@@ -1917,8 +1928,14 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
rcu_read_lock();
mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
- if (!mvmsta)
- goto out_unlock;
+ /*
+ * It's possible to get a BA response after invalidating the rcu
+ * (rcu is invalidated in order to prevent new Tx from being
+ * sent, but there may be some frames already in-flight).
+ * In this case we just want to reclaim, and could skip all the
+ * sta-dependent stuff since it's in the middle of being removed
+ * anyways.
+ */
/* Free per TID */
for (i = 0; i < le16_to_cpu(ba_res->tfd_cnt); i++) {
@@ -1929,7 +1946,9 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
if (tid == IWL_MGMT_TID)
tid = IWL_MAX_TID_COUNT;
- mvmsta->tid_data[i].lq_color = lq_color;
+ if (mvmsta)
+ mvmsta->tid_data[i].lq_color = lq_color;
+
iwl_mvm_tx_reclaim(mvm, sta_id, tid,
(int)(le16_to_cpu(ba_tfd->q_num)),
le16_to_cpu(ba_tfd->tfd_index),
@@ -1937,9 +1956,9 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
le32_to_cpu(ba_res->tx_rate));
}
- iwl_mvm_tx_airtime(mvm, mvmsta,
- le32_to_cpu(ba_res->wireless_time));
-out_unlock:
+ if (mvmsta)
+ iwl_mvm_tx_airtime(mvm, mvmsta,
+ le32_to_cpu(ba_res->wireless_time));
rcu_read_unlock();
out:
IWL_DEBUG_TX_REPLY(mvm,
@@ -2036,7 +2055,7 @@ int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id,
return ret;
}
-int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal, u32 flags)
+int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal)
{
struct iwl_mvm_int_sta *int_sta = sta;
struct iwl_mvm_sta *mvm_sta = sta;
@@ -2045,12 +2064,10 @@ int iwl_mvm_flush_sta(struct iwl_mvm *mvm, void *sta, bool internal, u32 flags)
offsetof(struct iwl_mvm_sta, sta_id));
if (iwl_mvm_has_new_tx_api(mvm))
- return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id,
- 0xffff, flags);
+ return iwl_mvm_flush_sta_tids(mvm, mvm_sta->sta_id, 0xffff, 0);
if (internal)
- return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk,
- flags);
+ return iwl_mvm_flush_tx_path(mvm, int_sta->tfd_queue_msk, 0);
- return iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, flags);
+ return iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index be57b8391850..3123036978a5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -67,6 +67,7 @@
#include "iwl-csr.h"
#include "mvm.h"
#include "fw/api/rs.h"
+#include "fw/img.h"
/*
* Will return 0 even if the cmd failed when RFKILL is asserted unless
@@ -289,45 +290,6 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx)
return last_idx;
}
-#define FW_SYSASSERT_CPU_MASK 0xf0000000
-static const struct {
- const char *name;
- u8 num;
-} advanced_lookup[] = {
- { "NMI_INTERRUPT_WDG", 0x34 },
- { "SYSASSERT", 0x35 },
- { "UCODE_VERSION_MISMATCH", 0x37 },
- { "BAD_COMMAND", 0x38 },
- { "BAD_COMMAND", 0x39 },
- { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
- { "FATAL_ERROR", 0x3D },
- { "NMI_TRM_HW_ERR", 0x46 },
- { "NMI_INTERRUPT_TRM", 0x4C },
- { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
- { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
- { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
- { "NMI_INTERRUPT_HOST", 0x66 },
- { "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
- { "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
- { "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
- { "NMI_INTERRUPT_ACTION_PT", 0x7C },
- { "NMI_INTERRUPT_UNKNOWN", 0x84 },
- { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
- { "ADVANCED_SYSASSERT", 0 },
-};
-
-static const char *desc_lookup(u32 num)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++)
- if (advanced_lookup[i].num == (num & ~FW_SYSASSERT_CPU_MASK))
- return advanced_lookup[i].name;
-
- /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
- return advanced_lookup[i].name;
-}
-
/*
* Note: This structure is read from the device with IO accesses,
* and the reading already does the endian conversion. As it is
@@ -463,7 +425,7 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
struct iwl_umac_error_event_table table;
u32 base = mvm->trans->dbg.umac_error_event_table;
- if (!mvm->support_umac_log &&
+ if (!base &&
!(mvm->trans->dbg.error_event_table_tlv_status &
IWL_ERROR_EVENT_TABLE_UMAC))
return;
@@ -480,7 +442,7 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
}
IWL_ERR(mvm, "0x%08X | %s\n", table.error_id,
- desc_lookup(table.error_id));
+ iwl_fw_lookup_assert_desc(table.error_id));
IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1);
IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2);
IWL_ERR(mvm, "0x%08X | umac interruptlink1\n", table.ilink1);
@@ -550,7 +512,7 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u8 lmac_num)
IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
- desc_lookup(table.error_id));
+ iwl_fw_lookup_assert_desc(table.error_id));
IWL_ERR(mvm, "0x%08X | trm_hw_status0\n", table.trm_hw_status0);
IWL_ERR(mvm, "0x%08X | trm_hw_status1\n", table.trm_hw_status1);
IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2);
@@ -658,7 +620,8 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
/**
* iwl_mvm_send_lq_cmd() - Send link quality command
- * @sync: This command can be sent synchronously.
+ * @mvm: Driver data.
+ * @lq: Link quality command to send.
*
* The link quality command is sent as the last step of station creation.
* This is the special case in which init is set and we call a callback in
@@ -683,8 +646,10 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq)
/**
* iwl_mvm_update_smps - Get a request to change the SMPS mode
+ * @mvm: Driver data.
+ * @vif: Pointer to the ieee80211_vif structure
* @req_type: The part of the driver who call for a change.
- * @smps_requests: The request to change the SMPS mode.
+ * @smps_request: The request to change the SMPS mode.
*
* Get a requst to change the SMPS mode,
* and change it according to all other requests in the driver.
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index 1ab136600415..a0352fa873d9 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -301,3 +301,30 @@ void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans)
trans_pcie->prph_info_dma_addr = 0;
trans_pcie->prph_info = NULL;
}
+
+int iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
+ const void *data, u32 len)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
+ &trans_pcie->prph_scratch->ctrl_cfg;
+ int ret;
+
+ if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ return 0;
+
+ ret = iwl_pcie_ctxt_info_alloc_dma(trans, data, len,
+ &trans_pcie->pnvm_dram);
+ if (ret < 0) {
+ IWL_DEBUG_FW(trans, "Failed to allocate PNVM DMA %d.\n",
+ ret);
+ return ret;
+ }
+
+ prph_sc_ctrl->pnvm_cfg.pnvm_base_addr =
+ cpu_to_le64(trans_pcie->pnvm_dram.physical);
+ prph_sc_ctrl->pnvm_cfg.pnvm_size =
+ cpu_to_le32(trans_pcie->pnvm_dram.size);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index 23abfbd096b0..13fe9c00d7e8 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -73,7 +73,7 @@ static void *_iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
if (!result)
return NULL;
- if (unlikely(iwl_pcie_crosses_4g_boundary(*phys, size))) {
+ if (unlikely(iwl_txq_crosses_4g_boundary(*phys, size))) {
void *old = result;
dma_addr_t oldphys = *phys;
@@ -93,17 +93,17 @@ static void *iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans,
return _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size, phys, 0);
}
-static int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
- const struct fw_desc *sec,
- struct iwl_dram_data *dram)
+int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
+ const void *data, u32 len,
+ struct iwl_dram_data *dram)
{
- dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent(trans, sec->len,
+ dram->block = iwl_pcie_ctxt_info_dma_alloc_coherent(trans, len,
&dram->physical);
if (!dram->block)
return -ENOMEM;
- dram->size = sec->len;
- memcpy(dram->block, sec->data, sec->len);
+ dram->size = len;
+ memcpy(dram->block, data, len);
return 0;
}
@@ -156,7 +156,8 @@ int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
/* initialize lmac sections */
for (i = 0; i < lmac_cnt; i++) {
- ret = iwl_pcie_ctxt_info_alloc_dma(trans, &fw->sec[i],
+ ret = iwl_pcie_ctxt_info_alloc_dma(trans, fw->sec[i].data,
+ fw->sec[i].len,
&dram->fw[dram->fw_cnt]);
if (ret)
return ret;
@@ -169,7 +170,8 @@ int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
for (i = 0; i < umac_cnt; i++) {
/* access FW with +1 to make up for lmac separator */
ret = iwl_pcie_ctxt_info_alloc_dma(trans,
- &fw->sec[dram->fw_cnt + 1],
+ fw->sec[dram->fw_cnt + 1].data,
+ fw->sec[dram->fw_cnt + 1].len,
&dram->fw[dram->fw_cnt]);
if (ret)
return ret;
@@ -192,7 +194,8 @@ int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
/* access FW with +2 to make up for lmac & umac separators */
int fw_idx = dram->fw_cnt + i + 2;
- ret = iwl_pcie_ctxt_info_alloc_dma(trans, &fw->sec[fw_idx],
+ ret = iwl_pcie_ctxt_info_alloc_dma(trans, fw->sec[fw_idx].data,
+ fw->sec[fw_idx].len,
&dram->paging[i]);
if (ret)
return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index e02bafb8921f..129021f26791 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -512,9 +512,9 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
/* 9000 Series */
{IWL_PCI_DEVICE(0x2526, PCI_ANY_ID, iwl9000_trans_cfg)},
- {IWL_PCI_DEVICE(0x271B, PCI_ANY_ID, iwl9560_trans_cfg)},
- {IWL_PCI_DEVICE(0x271C, PCI_ANY_ID, iwl9560_trans_cfg)},
- {IWL_PCI_DEVICE(0x30DC, PCI_ANY_ID, iwl9560_trans_cfg)},
+ {IWL_PCI_DEVICE(0x271B, PCI_ANY_ID, iwl9000_trans_cfg)},
+ {IWL_PCI_DEVICE(0x271C, PCI_ANY_ID, iwl9000_trans_cfg)},
+ {IWL_PCI_DEVICE(0x30DC, PCI_ANY_ID, iwl9560_long_latency_trans_cfg)},
{IWL_PCI_DEVICE(0x31DC, PCI_ANY_ID, iwl9560_shared_clk_trans_cfg)},
{IWL_PCI_DEVICE(0x9DF0, PCI_ANY_ID, iwl9560_trans_cfg)},
{IWL_PCI_DEVICE(0xA370, PCI_ANY_ID, iwl9560_trans_cfg)},
@@ -540,20 +540,33 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2725, 0x0510, iwlax210_2ax_cfg_ty_gf_a0)},
{IWL_PCI_DEVICE(0x2725, 0x0A10, iwlax210_2ax_cfg_ty_gf_a0)},
{IWL_PCI_DEVICE(0x2725, 0x00B0, iwlax411_2ax_cfg_sosnj_gf4_a0)},
+ {IWL_PCI_DEVICE(0x2726, 0x0070, iwlax201_cfg_snj_hr_b0)},
+ {IWL_PCI_DEVICE(0x2726, 0x0074, iwlax201_cfg_snj_hr_b0)},
+ {IWL_PCI_DEVICE(0x2726, 0x0078, iwlax201_cfg_snj_hr_b0)},
+ {IWL_PCI_DEVICE(0x2726, 0x007C, iwlax201_cfg_snj_hr_b0)},
{IWL_PCI_DEVICE(0x2726, 0x0090, iwlax211_cfg_snj_gf_a0)},
+ {IWL_PCI_DEVICE(0x2726, 0x0098, iwlax211_cfg_snj_gf_a0)},
{IWL_PCI_DEVICE(0x2726, 0x00B0, iwlax411_2ax_cfg_sosnj_gf4_a0)},
{IWL_PCI_DEVICE(0x2726, 0x0510, iwlax211_cfg_snj_gf_a0)},
+ {IWL_PCI_DEVICE(0x2726, 0x2074, iwlax201_cfg_snj_hr_b0)},
+ {IWL_PCI_DEVICE(0x2726, 0x4070, iwlax201_cfg_snj_hr_b0)},
{IWL_PCI_DEVICE(0x7A70, 0x0090, iwlax211_2ax_cfg_so_gf_a0_long)},
+ {IWL_PCI_DEVICE(0x7A70, 0x0098, iwlax211_2ax_cfg_so_gf_a0_long)},
{IWL_PCI_DEVICE(0x7A70, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0_long)},
{IWL_PCI_DEVICE(0x7A70, 0x0310, iwlax211_2ax_cfg_so_gf_a0_long)},
{IWL_PCI_DEVICE(0x7A70, 0x0510, iwlax211_2ax_cfg_so_gf_a0_long)},
{IWL_PCI_DEVICE(0x7A70, 0x0A10, iwlax211_2ax_cfg_so_gf_a0_long)},
{IWL_PCI_DEVICE(0x7AF0, 0x0090, iwlax211_2ax_cfg_so_gf_a0)},
+ {IWL_PCI_DEVICE(0x7AF0, 0x0098, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x7AF0, 0x00B0, iwlax411_2ax_cfg_so_gf4_a0)},
{IWL_PCI_DEVICE(0x7AF0, 0x0310, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x7AF0, 0x0510, iwlax211_2ax_cfg_so_gf_a0)},
{IWL_PCI_DEVICE(0x7AF0, 0x0A10, iwlax211_2ax_cfg_so_gf_a0)},
+/* Ma devices */
+ {IWL_PCI_DEVICE(0x2729, PCI_ANY_ID, iwl_ma_trans_cfg)},
+ {IWL_PCI_DEVICE(0x7E80, PCI_ANY_ID, iwl_ma_trans_cfg)},
+
#endif /* CONFIG_IWLMVM */
{0}
@@ -595,6 +608,12 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
/* QnJ with Hr */
IWL_DEV_INFO(0x2720, IWL_CFG_ANY, iwl_qnj_b0_hr_b0_cfg, iwl_ax201_name),
+ /* SnJ with HR*/
+ IWL_DEV_INFO(0x2726, 0x0244, iwlax201_cfg_snj_hr_b0, iwl_ax101_name),
+ IWL_DEV_INFO(0x2726, 0x1651, iwlax201_cfg_snj_hr_b0, iwl_ax201_killer_1650s_name),
+ IWL_DEV_INFO(0x2726, 0x1652, iwlax201_cfg_snj_hr_b0, iwl_ax201_killer_1650i_name),
+ IWL_DEV_INFO(0x2726, 0x4244, iwlax201_cfg_snj_hr_b0, iwl_ax101_name),
+
/* Qu with Hr */
IWL_DEV_INFO(0x43F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x43F0, 0x0074, iwl_ax201_cfg_qu_hr, NULL),
@@ -613,6 +632,7 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_DEV_INFO(0xA0F0, 0x4070, iwl_ax201_cfg_qu_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x0070, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x0074, iwl_ax201_cfg_quz_hr, NULL),
+ IWL_DEV_INFO(0x02F0, 0x6074, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x0078, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x007C, iwl_ax201_cfg_quz_hr, NULL),
IWL_DEV_INFO(0x02F0, 0x0310, iwl_ax201_cfg_quz_hr, NULL),
@@ -955,6 +975,18 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_ANY, IWL_CFG_ANY,
iwl_quz_a0_hr1_b0, iwl_ax101_name),
+/* Ma */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_GF, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_ma_a0_gf_a0, iwl_ax211_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_ma_a0_mr_a0, iwl_ma_name),
+
#endif /* CONFIG_IWLMVM */
};
@@ -987,9 +1019,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
- /* the trans_cfg should never change, so set it now */
- iwl_trans->trans_cfg = trans;
-
iwl_trans->hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID);
for (i = 0; i < ARRAY_SIZE(iwl_dev_info_table); i++) {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 55808ba10d27..ff542d2f0054 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -79,12 +79,7 @@
#include "iwl-io.h"
#include "iwl-op-mode.h"
#include "iwl-drv.h"
-
-/* We need 2 entries for the TX command and header, and another one might
- * be needed for potential data in the SKB's head. The remaining ones can
- * be used for frags.
- */
-#define IWL_PCIE_MAX_FRAGS(x) (x->max_tbs - 3)
+#include "queue/tx.h"
/*
* RX related structures and functions
@@ -247,16 +242,6 @@ struct iwl_rb_allocator {
};
/**
- * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
- * @index -- current index
- */
-static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index)
-{
- return ++index &
- (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
-}
-
-/**
* iwl_get_closed_rb_stts - get closed rb stts from different structs
* @rxq - the rxq to get the rb stts from
*/
@@ -274,28 +259,6 @@ static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
}
}
-/**
- * iwl_queue_dec_wrap - decrement queue index, wrap back to end
- * @index -- current index
- */
-static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index)
-{
- return --index &
- (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
-}
-
-static inline dma_addr_t
-iwl_pcie_get_first_tb_dma(struct iwl_txq *txq, int idx)
-{
- return txq->first_tb_dma +
- sizeof(struct iwl_pcie_first_tb_buf) * idx;
-}
-
-struct iwl_tso_hdr_page {
- struct page *page;
- u8 *pos;
-};
-
#ifdef CONFIG_IWLWIFI_DEBUGFS
/**
* enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data
@@ -375,8 +338,8 @@ struct cont_rec {
* count for allocating and freeing the memory.
* @trans: pointer to the generic transport area
* @scd_base_addr: scheduler sram base address in SRAM
- * @scd_bc_tbls: pointer to the byte count table of the scheduler
* @kw: keep warm address
+ * @pnvm_dram: DRAM area that contains the PNVM data
* @pci_dev: basic pci-network driver stuff
* @hw_base: pci hardware address support
* @ucode_write_complete: indicates that the ucode has been copied.
@@ -384,7 +347,6 @@ struct cont_rec {
* @cmd_queue - command queue number
* @def_rx_queue - default rx queue number
* @rx_buf_size: Rx buffer size
- * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
* @scd_set_active: should the transport configure the SCD for HCMD queue
* @sw_csum_tx: if true, then the transport will compute the csum of the TXed
* frame.
@@ -434,8 +396,6 @@ struct iwl_trans_pcie {
struct net_device napi_dev;
- struct __percpu iwl_tso_hdr_page *tso_hdr_page;
-
/* INT ICT Table */
__le32 *ict_tbl;
dma_addr_t ict_tbl_dma;
@@ -449,9 +409,9 @@ struct iwl_trans_pcie {
struct mutex mutex;
u32 inta_mask;
u32 scd_base_addr;
- struct iwl_dma_ptr scd_bc_tbls;
struct iwl_dma_ptr kw;
- struct dma_pool *bc_pool;
+
+ struct iwl_dram_data pnvm_dram;
struct iwl_txq *txq_memory;
@@ -465,17 +425,12 @@ struct iwl_trans_pcie {
wait_queue_head_t wait_command_queue;
wait_queue_head_t sx_waitq;
- u8 page_offs, dev_cmd_offs;
-
u8 def_rx_queue;
u8 n_no_reclaim_cmds;
u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
- u8 max_tbs;
- u16 tfd_size;
u16 num_rx_bufs;
enum iwl_amsdu_size rx_buf_size;
- bool bc_table_dword;
bool scd_set_active;
bool sw_csum_tx;
bool pcie_dbg_dumped_once;
@@ -579,19 +534,7 @@ void iwl_pcie_disable_ict(struct iwl_trans *trans);
/*****************************************************
* TX / HCMD
******************************************************/
-/*
- * We need this inline in case dma_addr_t is only 32-bits - since the
- * hardware is always 64-bit, the issue can still occur in that case,
- * so use u64 for 'phys' here to force the addition in 64-bit.
- */
-static inline bool iwl_pcie_crosses_4g_boundary(u64 phys, u16 len)
-{
- return upper_32_bits(phys) != upper_32_bits(phys + len);
-}
-
int iwl_pcie_tx_init(struct iwl_trans *trans);
-int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id,
- int queue_size);
void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
int iwl_pcie_tx_stop(struct iwl_trans *trans);
void iwl_pcie_tx_free(struct iwl_trans *trans);
@@ -602,14 +545,10 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
bool configure_scd);
void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
bool shared_mode);
-void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
- struct iwl_txq *txq);
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_tx_cmd *dev_cmd, int txq_id);
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
-void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans,
- struct iwl_txq *txq);
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
struct iwl_rx_cmd_buffer *rxb);
void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
@@ -617,22 +556,6 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
void iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
-static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *_tfd,
- u8 idx)
-{
- if (trans->trans_cfg->use_tfh) {
- struct iwl_tfh_tfd *tfd = _tfd;
- struct iwl_tfh_tb *tb = &tfd->tbs[idx];
-
- return le16_to_cpu(tb->tb_len);
- } else {
- struct iwl_tfd *tfd = _tfd;
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
-
- return le16_to_cpu(tb->hi_n_len) >> 4;
- }
-}
-
/*****************************************************
* Error handling
******************************************************/
@@ -800,22 +723,6 @@ static inline void iwl_enable_fw_load_int_ctx_info(struct iwl_trans *trans)
}
}
-static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index)
-{
- return index & (q->n_window - 1);
-}
-
-static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
- struct iwl_txq *txq, int idx)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- if (trans->trans_cfg->use_tfh)
- idx = iwl_pcie_get_cmd_index(txq, idx);
-
- return txq->tfds + trans_pcie->tfd_size * idx;
-}
-
static inline const char *queue_name(struct device *dev,
struct iwl_trans_pcie *trans_p, int i)
{
@@ -867,37 +774,6 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans);
-static inline void iwl_wake_queue(struct iwl_trans *trans,
- struct iwl_txq *txq)
-{
- if (test_and_clear_bit(txq->id, trans->txqs.queue_stopped)) {
- IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
- iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
- }
-}
-
-static inline void iwl_stop_queue(struct iwl_trans *trans,
- struct iwl_txq *txq)
-{
- if (!test_and_set_bit(txq->id, trans->txqs.queue_stopped)) {
- iwl_op_mode_queue_full(trans->op_mode, txq->id);
- IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
- } else
- IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
- txq->id);
-}
-
-static inline bool iwl_queue_used(const struct iwl_txq *q, int i)
-{
- int index = iwl_pcie_get_cmd_index(q, i);
- int r = iwl_pcie_get_cmd_index(q, q->read_ptr);
- int w = iwl_pcie_get_cmd_index(q, q->write_ptr);
-
- return w >= r ?
- (index >= r && index < w) :
- !(index < r && index >= w);
-}
-
static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -964,23 +840,12 @@ bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
bool was_in_rfkill);
void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
-int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q);
void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
-int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
- int slots_num, bool cmd_queue);
-int iwl_pcie_txq_alloc(struct iwl_trans *trans,
- struct iwl_txq *txq, int slots_num, bool cmd_queue);
int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
struct iwl_dma_ptr *ptr, size_t size);
void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr);
void iwl_pcie_apply_destination(struct iwl_trans *trans);
-void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
- struct sk_buff *skb);
-#ifdef CONFIG_INET
-struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
- struct sk_buff *skb);
-#endif
/* common functions that are used by gen3 transport */
void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
@@ -989,28 +854,10 @@ void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
const struct fw_img *fw, bool run_in_rfkill);
void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
-void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
- struct iwl_txq *txq);
-int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
- struct iwl_txq **intxq, int size,
- unsigned int timeout);
-int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
- struct iwl_txq *txq,
- struct iwl_host_cmd *hcmd);
-int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
- __le16 flags, u8 sta_id, u8 tid,
- int cmd_id, int size,
- unsigned int timeout);
-void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue);
-int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_tx_cmd *dev_cmd, int txq_id);
int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd);
void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
-void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id);
-void iwl_pcie_gen2_tx_free(struct iwl_trans *trans);
-void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans);
void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
bool test, bool reset);
#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 9463c108aa96..94299f259518 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1359,7 +1359,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
sequence = le16_to_cpu(pkt->hdr.sequence);
index = SEQ_TO_INDEX(sequence);
- cmd_index = iwl_pcie_get_cmd_index(txq, index);
+ cmd_index = iwl_txq_get_cmd_index(txq, index);
if (rxq->id == trans_pcie->def_rx_queue)
iwl_op_mode_rx(trans->op_mode, &rxq->napi,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index 97c9e9c87436..91ec9379c061 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -162,7 +162,7 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
IWL_DEBUG_INFO(trans,
"DEVICE_ENABLED bit was set and is now cleared\n");
- iwl_pcie_gen2_tx_stop(trans);
+ iwl_txq_gen2_tx_stop(trans);
iwl_pcie_rx_stop(trans);
}
@@ -245,7 +245,7 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
return -ENOMEM;
/* Allocate or reset and init all Tx and Command queues */
- if (iwl_pcie_gen2_tx_init(trans, trans->txqs.cmd.q_id, queue_size))
+ if (iwl_txq_gen2_init(trans, trans->txqs.cmd.q_id, queue_size))
return -ENOMEM;
/* enable shadow regs in HW */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index e5160d620868..d2e69ad53b27 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -81,6 +81,7 @@
#include "fw/api/tx.h"
#include "internal.h"
#include "iwl-fh.h"
+#include "iwl-context-info-gen3.h"
/* extended range in FW SRAM */
#define IWL_FW_MEM_EXTENDED_START 0x40000
@@ -1607,11 +1608,15 @@ iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int max_irqs, num_irqs, i, ret;
u16 pci_cmd;
+ u32 max_rx_queues = IWL_MAX_RX_HW_QUEUES;
if (!cfg_trans->mq_rx_supported)
goto enable_msi;
- max_irqs = min_t(u32, num_online_cpus() + 2, IWL_MAX_RX_HW_QUEUES);
+ if (cfg_trans->device_family <= IWL_DEVICE_FAMILY_9000)
+ max_rx_queues = IWL_9000_MAX_RX_HW_QUEUES;
+
+ max_irqs = min_t(u32, num_online_cpus() + 2, max_rx_queues);
for (i = 0; i < max_irqs; i++)
trans_pcie->msix_entries[i].entry = i;
@@ -1907,6 +1912,9 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
trans->txqs.cmd.q_id = trans_cfg->cmd_queue;
trans->txqs.cmd.fifo = trans_cfg->cmd_fifo;
trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
+ trans->txqs.page_offs = trans_cfg->cb_data_offs;
+ trans->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
+
if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
trans_pcie->n_no_reclaim_cmds = 0;
else
@@ -1924,13 +1932,10 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
trans_pcie->supported_dma_mask = DMA_BIT_MASK(11);
- trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
+ trans->txqs.bc_table_dword = trans_cfg->bc_table_dword;
trans_pcie->scd_set_active = trans_cfg->scd_set_active;
trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx;
- trans_pcie->page_offs = trans_cfg->cb_data_offs;
- trans_pcie->dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
-
trans->command_groups = trans_cfg->command_groups;
trans->command_groups_size = trans_cfg->command_groups_size;
@@ -1951,7 +1956,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
iwl_pcie_synchronize_irqs(trans);
if (trans->trans_cfg->gen2)
- iwl_pcie_gen2_tx_free(trans);
+ iwl_txq_gen2_tx_free(trans);
else
iwl_pcie_tx_free(trans);
iwl_pcie_rx_free(trans);
@@ -1975,15 +1980,11 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
iwl_pcie_free_fw_monitor(trans);
- for_each_possible_cpu(i) {
- struct iwl_tso_hdr_page *p =
- per_cpu_ptr(trans_pcie->tso_hdr_page, i);
-
- if (p->page)
- __free_page(p->page);
- }
+ if (trans_pcie->pnvm_dram.size)
+ dma_free_coherent(trans->dev, trans_pcie->pnvm_dram.size,
+ trans_pcie->pnvm_dram.block,
+ trans_pcie->pnvm_dram.physical);
- free_percpu(trans_pcie->tso_hdr_page);
mutex_destroy(&trans_pcie->mutex);
iwl_trans_free(trans);
}
@@ -2276,36 +2277,6 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
#define IWL_FLUSH_WAIT_MS 2000
-void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
-{
- u32 txq_id = txq->id;
- u32 status;
- bool active;
- u8 fifo;
-
- if (trans->trans_cfg->use_tfh) {
- IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
- txq->read_ptr, txq->write_ptr);
- /* TODO: access new SCD registers and dump them */
- return;
- }
-
- status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));
- fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
- active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
-
- IWL_ERR(trans,
- "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
- txq_id, active ? "" : "in", fifo,
- jiffies_to_msecs(txq->wd_timeout),
- txq->read_ptr, txq->write_ptr,
- iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
- (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
- iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
- (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
- iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
-}
-
static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
struct iwl_trans_rxq_dma_data *data)
{
@@ -2374,7 +2345,7 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
if (txq->read_ptr != txq->write_ptr) {
IWL_ERR(trans,
"fail to flush all tx fifo queues Q %d\n", txq_idx);
- iwl_trans_pcie_log_scd_error(trans, txq);
+ iwl_txq_log_scd_error(trans, txq);
return -ETIMEDOUT;
}
@@ -2985,12 +2956,11 @@ static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 cmdlen = 0;
int i;
- for (i = 0; i < trans_pcie->max_tbs; i++)
- cmdlen += iwl_pcie_tfd_tb_get_len(trans, tfd, i);
+ for (i = 0; i < trans->txqs.tfd.max_tbs; i++)
+ cmdlen += iwl_txq_gen1_tfd_tb_get_len(trans, tfd, i);
return cmdlen;
}
@@ -3329,14 +3299,14 @@ static struct iwl_trans_dump_data
data = (void *)dump_data->data;
if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) {
- u16 tfd_size = trans_pcie->tfd_size;
+ u16 tfd_size = trans->txqs.tfd.size;
data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
txcmd = (void *)data->data;
spin_lock_bh(&cmdq->lock);
ptr = cmdq->write_ptr;
for (i = 0; i < cmdq->n_window; i++) {
- u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr);
+ u8 idx = iwl_txq_get_cmd_index(cmdq, ptr);
u8 tfdidx;
u32 caplen, cmdlen;
@@ -3359,7 +3329,7 @@ static struct iwl_trans_dump_data
txcmd = (void *)((u8 *)txcmd->data + caplen);
}
- ptr = iwl_queue_dec_wrap(trans, ptr);
+ ptr = iwl_txq_dec_wrap(trans, ptr);
}
spin_unlock_bh(&cmdq->lock);
@@ -3478,15 +3448,16 @@ static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
.send_cmd = iwl_trans_pcie_gen2_send_hcmd,
- .tx = iwl_trans_pcie_gen2_tx,
+ .tx = iwl_txq_gen2_tx,
.reclaim = iwl_trans_pcie_reclaim,
.set_q_ptrs = iwl_trans_pcie_set_q_ptrs,
- .txq_alloc = iwl_trans_pcie_dyn_txq_alloc,
- .txq_free = iwl_trans_pcie_dyn_txq_free,
+ .txq_alloc = iwl_txq_dyn_alloc,
+ .txq_free = iwl_txq_dyn_free,
.wait_txq_empty = iwl_trans_pcie_wait_txq_empty,
.rxq_dma_data = iwl_trans_pcie_rxq_dma_data,
+ .set_pnvm = iwl_trans_pcie_ctx_info_gen3_set_pnvm,
#ifdef CONFIG_IWLWIFI_DEBUGFS
.debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
#endif
@@ -3498,34 +3469,18 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
{
struct iwl_trans_pcie *trans_pcie;
struct iwl_trans *trans;
- int ret, addr_size, txcmd_size, txcmd_align;
+ int ret, addr_size;
const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2;
- if (!cfg_trans->gen2) {
+ if (!cfg_trans->gen2)
ops = &trans_ops_pcie;
- txcmd_size = sizeof(struct iwl_tx_cmd);
- txcmd_align = sizeof(void *);
- } else if (cfg_trans->device_family < IWL_DEVICE_FAMILY_AX210) {
- txcmd_size = sizeof(struct iwl_tx_cmd_gen2);
- txcmd_align = 64;
- } else {
- txcmd_size = sizeof(struct iwl_tx_cmd_gen3);
- txcmd_align = 128;
- }
-
- txcmd_size += sizeof(struct iwl_cmd_header);
- txcmd_size += 36; /* biggest possible 802.11 header */
-
- /* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */
- if (WARN_ON(cfg_trans->gen2 && txcmd_size >= txcmd_align))
- return ERR_PTR(-EINVAL);
ret = pcim_enable_device(pdev);
if (ret)
return ERR_PTR(ret);
trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops,
- txcmd_size, txcmd_align);
+ cfg_trans);
if (!trans)
return ERR_PTR(-ENOMEM);
@@ -3547,11 +3502,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
}
INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
- trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
- if (!trans_pcie->tso_hdr_page) {
- ret = -ENOMEM;
- goto out_no_pci;
- }
trans_pcie->debug_rfkill = -1;
if (!cfg_trans->base_params->pcie_l1_allowed) {
@@ -3567,19 +3517,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->def_rx_queue = 0;
- if (cfg_trans->use_tfh) {
- addr_size = 64;
- trans_pcie->max_tbs = IWL_TFH_NUM_TBS;
- trans_pcie->tfd_size = sizeof(struct iwl_tfh_tfd);
- } else {
- addr_size = 36;
- trans_pcie->max_tbs = IWL_NUM_OF_TBS;
- trans_pcie->tfd_size = sizeof(struct iwl_tfd);
- }
- trans->max_skb_frags = IWL_PCIE_MAX_FRAGS(trans_pcie);
-
pci_set_master(pdev);
+ addr_size = trans->txqs.tfd.addr_size;
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(addr_size));
if (!ret)
ret = pci_set_consistent_dma_mask(pdev,
@@ -3661,24 +3601,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
init_waitqueue_head(&trans_pcie->sx_waitq);
- /*
- * For gen2 devices, we use a single allocation for each byte-count
- * table, but they're pretty small (1k) so use a DMA pool that we
- * allocate here.
- */
- if (cfg_trans->gen2) {
- size_t bc_tbl_size;
-
- if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_AX210)
- bc_tbl_size = sizeof(struct iwl_gen3_bc_tbl);
- else
- bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl);
-
- trans_pcie->bc_pool = dmam_pool_create("iwlwifi:bc", &pdev->dev,
- bc_tbl_size, 256, 0);
- if (!trans_pcie->bc_pool)
- goto out_no_pci;
- }
if (trans_pcie->msix_enabled) {
ret = iwl_pcie_init_msix_handler(pdev, trans_pcie);
@@ -3712,7 +3634,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
out_free_ict:
iwl_pcie_free_ict(trans);
out_no_pci:
- free_percpu(trans_pcie->tso_hdr_page);
destroy_workqueue(trans_pcie->rba.alloc_wq);
out_free_trans:
iwl_trans_free(trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 606bef2ecc7b..baa83a0b8593 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -58,751 +58,7 @@
#include "iwl-io.h"
#include "internal.h"
#include "fw/api/tx.h"
-
- /*
- * iwl_pcie_gen2_tx_stop - Stop all Tx DMA channels
- */
-void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
-{
- int txq_id;
-
- /*
- * This function can be called before the op_mode disabled the
- * queues. This happens when we have an rfkill interrupt.
- * Since we stop Tx altogether - mark the queues as stopped.
- */
- memset(trans->txqs.queue_stopped, 0,
- sizeof(trans->txqs.queue_stopped));
- memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
-
- /* Unmap DMA from host system and free skb's */
- for (txq_id = 0; txq_id < ARRAY_SIZE(trans->txqs.txq); txq_id++) {
- if (!trans->txqs.txq[txq_id])
- continue;
- iwl_pcie_gen2_txq_unmap(trans, txq_id);
- }
-}
-
-/*
- * iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array
- */
-static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
- struct iwl_txq *txq, u16 byte_cnt,
- int num_tbs)
-{
- struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
- int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
- u8 filled_tfd_size, num_fetch_chunks;
- u16 len = byte_cnt;
- __le16 bc_ent;
-
- if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
- return;
-
- filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
- num_tbs * sizeof(struct iwl_tfh_tb);
- /*
- * filled_tfd_size contains the number of filled bytes in the TFD.
- * Dividing it by 64 will give the number of chunks to fetch
- * to SRAM- 0 for one chunk, 1 for 2 and so on.
- * If, for example, TFD contains only 3 TBs then 32 bytes
- * of the TFD are used, and only one chunk of 64 bytes should
- * be fetched
- */
- num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
-
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
-
- /* Starting from AX210, the HW expects bytes */
- WARN_ON(trans_pcie->bc_table_dword);
- WARN_ON(len > 0x3FFF);
- bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
- scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
- } else {
- struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
-
- /* Before AX210, the HW expects DW */
- WARN_ON(!trans_pcie->bc_table_dword);
- len = DIV_ROUND_UP(len, 4);
- WARN_ON(len > 0xFFF);
- bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
- scd_bc_tbl->tfd_offset[idx] = bc_ent;
- }
-}
-
-/*
- * iwl_pcie_gen2_txq_inc_wr_ptr - Send new write index to hardware
- */
-void iwl_pcie_gen2_txq_inc_wr_ptr(struct iwl_trans *trans,
- struct iwl_txq *txq)
-{
- lockdep_assert_held(&txq->lock);
-
- IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
-
- /*
- * if not in power-save mode, uCode will never sleep when we're
- * trying to tx (during RFKILL, we're not trying to tx).
- */
- iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
-}
-
-static u8 iwl_pcie_gen2_get_num_tbs(struct iwl_trans *trans,
- struct iwl_tfh_tfd *tfd)
-{
- return le16_to_cpu(tfd->num_tbs) & 0x1f;
-}
-
-static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans,
- struct iwl_cmd_meta *meta,
- struct iwl_tfh_tfd *tfd)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int i, num_tbs;
-
- /* Sanity check on number of chunks */
- num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd);
-
- if (num_tbs > trans_pcie->max_tbs) {
- IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
- return;
- }
-
- /* first TB is never freed - it's the bidirectional DMA data */
- for (i = 1; i < num_tbs; i++) {
- if (meta->tbs & BIT(i))
- dma_unmap_page(trans->dev,
- le64_to_cpu(tfd->tbs[i].addr),
- le16_to_cpu(tfd->tbs[i].tb_len),
- DMA_TO_DEVICE);
- else
- dma_unmap_single(trans->dev,
- le64_to_cpu(tfd->tbs[i].addr),
- le16_to_cpu(tfd->tbs[i].tb_len),
- DMA_TO_DEVICE);
- }
-
- tfd->num_tbs = 0;
-}
-
-static void iwl_pcie_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
-{
- /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
- * idx is bounded by n_window
- */
- int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
-
- lockdep_assert_held(&txq->lock);
-
- iwl_pcie_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
- iwl_pcie_get_tfd(trans, txq, idx));
-
- /* free SKB */
- if (txq->entries) {
- struct sk_buff *skb;
-
- skb = txq->entries[idx].skb;
-
- /* Can be called from irqs-disabled context
- * If skb is not NULL, it means that the whole queue is being
- * freed and that the queue is not empty - free the skb
- */
- if (skb) {
- iwl_op_mode_free_skb(trans->op_mode, skb);
- txq->entries[idx].skb = NULL;
- }
- }
-}
-
-static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans,
- struct iwl_tfh_tfd *tfd, dma_addr_t addr,
- u16 len)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd);
- struct iwl_tfh_tb *tb;
-
- /*
- * Only WARN here so we know about the issue, but we mess up our
- * unmap path because not every place currently checks for errors
- * returned from this function - it can only return an error if
- * there's no more space, and so when we know there is enough we
- * don't always check ...
- */
- WARN(iwl_pcie_crosses_4g_boundary(addr, len),
- "possible DMA problem with iova:0x%llx, len:%d\n",
- (unsigned long long)addr, len);
-
- if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
- return -EINVAL;
- tb = &tfd->tbs[idx];
-
- /* Each TFD can point to a maximum max_tbs Tx buffers */
- if (le16_to_cpu(tfd->num_tbs) >= trans_pcie->max_tbs) {
- IWL_ERR(trans, "Error can not send more than %d chunks\n",
- trans_pcie->max_tbs);
- return -EINVAL;
- }
-
- put_unaligned_le64(addr, &tb->addr);
- tb->tb_len = cpu_to_le16(len);
-
- tfd->num_tbs = cpu_to_le16(idx + 1);
-
- return idx;
-}
-
-static struct page *get_workaround_page(struct iwl_trans *trans,
- struct sk_buff *skb)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct page **page_ptr;
- struct page *ret;
-
- page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
-
- ret = alloc_page(GFP_ATOMIC);
- if (!ret)
- return NULL;
-
- /* set the chaining pointer to the previous page if there */
- *(void **)(page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
- *page_ptr = ret;
-
- return ret;
-}
-
-/*
- * Add a TB and if needed apply the FH HW bug workaround;
- * meta != NULL indicates that it's a page mapping and we
- * need to dma_unmap_page() and set the meta->tbs bit in
- * this case.
- */
-static int iwl_pcie_gen2_set_tb_with_wa(struct iwl_trans *trans,
- struct sk_buff *skb,
- struct iwl_tfh_tfd *tfd,
- dma_addr_t phys, void *virt,
- u16 len, struct iwl_cmd_meta *meta)
-{
- dma_addr_t oldphys = phys;
- struct page *page;
- int ret;
-
- if (unlikely(dma_mapping_error(trans->dev, phys)))
- return -ENOMEM;
-
- if (likely(!iwl_pcie_crosses_4g_boundary(phys, len))) {
- ret = iwl_pcie_gen2_set_tb(trans, tfd, phys, len);
-
- if (ret < 0)
- goto unmap;
-
- if (meta)
- meta->tbs |= BIT(ret);
-
- ret = 0;
- goto trace;
- }
-
- /*
- * Work around a hardware bug. If (as expressed in the
- * condition above) the TB ends on a 32-bit boundary,
- * then the next TB may be accessed with the wrong
- * address.
- * To work around it, copy the data elsewhere and make
- * a new mapping for it so the device will not fail.
- */
-
- if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) {
- ret = -ENOBUFS;
- goto unmap;
- }
-
- page = get_workaround_page(trans, skb);
- if (!page) {
- ret = -ENOMEM;
- goto unmap;
- }
-
- memcpy(page_address(page), virt, len);
-
- phys = dma_map_single(trans->dev, page_address(page), len,
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, phys)))
- return -ENOMEM;
- ret = iwl_pcie_gen2_set_tb(trans, tfd, phys, len);
- if (ret < 0) {
- /* unmap the new allocation as single */
- oldphys = phys;
- meta = NULL;
- goto unmap;
- }
- IWL_WARN(trans,
- "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
- len, (unsigned long long)oldphys, (unsigned long long)phys);
-
- ret = 0;
-unmap:
- if (meta)
- dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
- else
- dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
-trace:
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
-
- return ret;
-}
-
-static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
- struct sk_buff *skb,
- struct iwl_tfh_tfd *tfd, int start_len,
- u8 hdr_len,
- struct iwl_device_tx_cmd *dev_cmd)
-{
-#ifdef CONFIG_INET
- struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
- struct ieee80211_hdr *hdr = (void *)skb->data;
- unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
- unsigned int mss = skb_shinfo(skb)->gso_size;
- u16 length, amsdu_pad;
- u8 *start_hdr;
- struct iwl_tso_hdr_page *hdr_page;
- struct tso_t tso;
-
- trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
- &dev_cmd->hdr, start_len, 0);
-
- ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
- snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
- total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
- amsdu_pad = 0;
-
- /* total amount of header we may need for this A-MSDU */
- hdr_room = DIV_ROUND_UP(total_len, mss) *
- (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
-
- /* Our device supports 9 segments at most, it will fit in 1 page */
- hdr_page = get_page_hdr(trans, hdr_room, skb);
- if (!hdr_page)
- return -ENOMEM;
-
- start_hdr = hdr_page->pos;
-
- /*
- * Pull the ieee80211 header to be able to use TSO core,
- * we will restore it for the tx_status flow.
- */
- skb_pull(skb, hdr_len);
-
- /*
- * Remove the length of all the headers that we don't actually
- * have in the MPDU by themselves, but that we duplicate into
- * all the different MSDUs inside the A-MSDU.
- */
- le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
-
- tso_start(skb, &tso);
-
- while (total_len) {
- /* this is the data left for this subframe */
- unsigned int data_left = min_t(unsigned int, mss, total_len);
- struct sk_buff *csum_skb = NULL;
- unsigned int tb_len;
- dma_addr_t tb_phys;
- u8 *subf_hdrs_start = hdr_page->pos;
-
- total_len -= data_left;
-
- memset(hdr_page->pos, 0, amsdu_pad);
- hdr_page->pos += amsdu_pad;
- amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
- data_left)) & 0x3;
- ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
- hdr_page->pos += ETH_ALEN;
- ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
- hdr_page->pos += ETH_ALEN;
-
- length = snap_ip_tcp_hdrlen + data_left;
- *((__be16 *)hdr_page->pos) = cpu_to_be16(length);
- hdr_page->pos += sizeof(length);
-
- /*
- * This will copy the SNAP as well which will be considered
- * as MAC header.
- */
- tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
-
- hdr_page->pos += snap_ip_tcp_hdrlen;
-
- tb_len = hdr_page->pos - start_hdr;
- tb_phys = dma_map_single(trans->dev, start_hdr,
- tb_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
- dev_kfree_skb(csum_skb);
- goto out_err;
- }
- /*
- * No need for _with_wa, this is from the TSO page and
- * we leave some space at the end of it so can't hit
- * the buggy scenario.
- */
- iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len);
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
- tb_phys, tb_len);
- /* add this subframe's headers' length to the tx_cmd */
- le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
-
- /* prepare the start_hdr for the next subframe */
- start_hdr = hdr_page->pos;
-
- /* put the payload */
- while (data_left) {
- int ret;
-
- tb_len = min_t(unsigned int, tso.size, data_left);
- tb_phys = dma_map_single(trans->dev, tso.data,
- tb_len, DMA_TO_DEVICE);
- ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd,
- tb_phys, tso.data,
- tb_len, NULL);
- if (ret) {
- dev_kfree_skb(csum_skb);
- goto out_err;
- }
-
- data_left -= tb_len;
- tso_build_data(skb, &tso, tb_len);
- }
- }
-
- /* re -add the WiFi header */
- skb_push(skb, hdr_len);
-
- return 0;
-
-out_err:
-#endif
- return -EINVAL;
-}
-
-static struct
-iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
- struct iwl_txq *txq,
- struct iwl_device_tx_cmd *dev_cmd,
- struct sk_buff *skb,
- struct iwl_cmd_meta *out_meta,
- int hdr_len,
- int tx_cmd_len)
-{
- int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
- struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
- dma_addr_t tb_phys;
- int len;
- void *tb1_addr;
-
- tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
-
- /*
- * No need for _with_wa, the first TB allocation is aligned up
- * to a 64-byte boundary and thus can't be at the end or cross
- * a page boundary (much less a 2^32 boundary).
- */
- iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
-
- /*
- * The second TB (tb1) points to the remainder of the TX command
- * and the 802.11 header - dword aligned size
- * (This calculation modifies the TX command, so do it before the
- * setup of the first TB)
- */
- len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
- IWL_FIRST_TB_SIZE;
-
- /* do not align A-MSDU to dword as the subframe header aligns it */
-
- /* map the data for TB1 */
- tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
- tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
- goto out_err;
- /*
- * No need for _with_wa(), we ensure (via alignment) that the data
- * here can never cross or end at a page boundary.
- */
- iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, len);
-
- if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
- len + IWL_FIRST_TB_SIZE,
- hdr_len, dev_cmd))
- goto out_err;
-
- /* building the A-MSDU might have changed this data, memcpy it now */
- memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
- return tfd;
-
-out_err:
- iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
- return NULL;
-}
-
-static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans,
- struct sk_buff *skb,
- struct iwl_tfh_tfd *tfd,
- struct iwl_cmd_meta *out_meta)
-{
- int i;
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- dma_addr_t tb_phys;
- unsigned int fragsz = skb_frag_size(frag);
- int ret;
-
- if (!fragsz)
- continue;
-
- tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
- fragsz, DMA_TO_DEVICE);
- ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
- skb_frag_address(frag),
- fragsz, out_meta);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static struct
-iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
- struct iwl_txq *txq,
- struct iwl_device_tx_cmd *dev_cmd,
- struct sk_buff *skb,
- struct iwl_cmd_meta *out_meta,
- int hdr_len,
- int tx_cmd_len,
- bool pad)
-{
- int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
- struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
- dma_addr_t tb_phys;
- int len, tb1_len, tb2_len;
- void *tb1_addr;
- struct sk_buff *frag;
-
- tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
-
- /* The first TB points to bi-directional DMA data */
- memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
-
- /*
- * No need for _with_wa, the first TB allocation is aligned up
- * to a 64-byte boundary and thus can't be at the end or cross
- * a page boundary (much less a 2^32 boundary).
- */
- iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
-
- /*
- * The second TB (tb1) points to the remainder of the TX command
- * and the 802.11 header - dword aligned size
- * (This calculation modifies the TX command, so do it before the
- * setup of the first TB)
- */
- len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
- IWL_FIRST_TB_SIZE;
-
- if (pad)
- tb1_len = ALIGN(len, 4);
- else
- tb1_len = len;
-
- /* map the data for TB1 */
- tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
- tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
- goto out_err;
- /*
- * No need for _with_wa(), we ensure (via alignment) that the data
- * here can never cross or end at a page boundary.
- */
- iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
- trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
- IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
-
- /* set up TFD's third entry to point to remainder of skb's head */
- tb2_len = skb_headlen(skb) - hdr_len;
-
- if (tb2_len > 0) {
- int ret;
-
- tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
- tb2_len, DMA_TO_DEVICE);
- ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
- skb->data + hdr_len, tb2_len,
- NULL);
- if (ret)
- goto out_err;
- }
-
- if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta))
- goto out_err;
-
- skb_walk_frags(skb, frag) {
- int ret;
-
- tb_phys = dma_map_single(trans->dev, frag->data,
- skb_headlen(frag), DMA_TO_DEVICE);
- ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
- frag->data,
- skb_headlen(frag), NULL);
- if (ret)
- goto out_err;
- if (iwl_pcie_gen2_tx_add_frags(trans, frag, tfd, out_meta))
- goto out_err;
- }
-
- return tfd;
-
-out_err:
- iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
- return NULL;
-}
-
-static
-struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
- struct iwl_txq *txq,
- struct iwl_device_tx_cmd *dev_cmd,
- struct sk_buff *skb,
- struct iwl_cmd_meta *out_meta)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
- struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
- int len, hdr_len;
- bool amsdu;
-
- /* There must be data left over for TB1 or this code must be changed */
- BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
-
- memset(tfd, 0, sizeof(*tfd));
-
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
- len = sizeof(struct iwl_tx_cmd_gen2);
- else
- len = sizeof(struct iwl_tx_cmd_gen3);
-
- amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
- (*ieee80211_get_qos_ctl(hdr) &
- IEEE80211_QOS_CTL_A_MSDU_PRESENT);
-
- hdr_len = ieee80211_hdrlen(hdr->frame_control);
-
- /*
- * Only build A-MSDUs here if doing so by GSO, otherwise it may be
- * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been
- * built in the higher layers already.
- */
- if (amsdu && skb_shinfo(skb)->gso_size)
- return iwl_pcie_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
- out_meta, hdr_len, len);
-
- return iwl_pcie_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
- hdr_len, len, !amsdu);
-}
-
-int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_tx_cmd *dev_cmd, int txq_id)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_cmd_meta *out_meta;
- struct iwl_txq *txq = trans->txqs.txq[txq_id];
- u16 cmd_len;
- int idx;
- void *tfd;
-
- if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
- "queue %d out of range", txq_id))
- return -EINVAL;
-
- if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
- "TX on unused queue %d\n", txq_id))
- return -EINVAL;
-
- if (skb_is_nonlinear(skb) &&
- skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) &&
- __skb_linearize(skb))
- return -ENOMEM;
-
- spin_lock(&txq->lock);
-
- if (iwl_queue_space(trans, txq) < txq->high_mark) {
- iwl_stop_queue(trans, txq);
-
- /* don't put the packet on the ring, if there is no room */
- if (unlikely(iwl_queue_space(trans, txq) < 3)) {
- struct iwl_device_tx_cmd **dev_cmd_ptr;
-
- dev_cmd_ptr = (void *)((u8 *)skb->cb +
- trans_pcie->dev_cmd_offs);
-
- *dev_cmd_ptr = dev_cmd;
- __skb_queue_tail(&txq->overflow_q, skb);
- spin_unlock(&txq->lock);
- return 0;
- }
- }
-
- idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
-
- /* Set up driver data for this TFD */
- txq->entries[idx].skb = skb;
- txq->entries[idx].cmd = dev_cmd;
-
- dev_cmd->hdr.sequence =
- cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
- INDEX_TO_SEQ(idx)));
-
- /* Set up first empty entry in queue's array of Tx/cmd buffers */
- out_meta = &txq->entries[idx].meta;
- out_meta->flags = 0;
-
- tfd = iwl_pcie_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
- if (!tfd) {
- spin_unlock(&txq->lock);
- return -1;
- }
-
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
- (void *)dev_cmd->payload;
-
- cmd_len = le16_to_cpu(tx_cmd_gen3->len);
- } else {
- struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
- (void *)dev_cmd->payload;
-
- cmd_len = le16_to_cpu(tx_cmd_gen2->len);
- }
-
- /* Set up entry for this TFD in Tx byte-count array */
- iwl_pcie_gen2_update_byte_tbl(trans_pcie, txq, cmd_len,
- iwl_pcie_gen2_get_num_tbs(trans, tfd));
-
- /* start timer if queue currently empty */
- if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
- mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
-
- /* Tell device the write index *just past* this latest filled TFD */
- txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
- iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
- /*
- * At this point the frame is "transmitted" successfully
- * and we will get a TX status notification eventually.
- */
- spin_unlock(&txq->lock);
- return 0;
-}
+#include "queue/tx.h"
/*************** HOST COMMAND QUEUE FUNCTIONS *****/
@@ -902,11 +158,11 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
spin_lock_bh(&txq->lock);
- idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
- tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
+ idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+ tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
memset(tfd, 0, sizeof(*tfd));
- if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+ if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
spin_unlock_bh(&txq->lock);
IWL_ERR(trans, "No space in command queue\n");
@@ -984,8 +240,8 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
/* start the TFD with the minimum copy bytes */
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
memcpy(&txq->first_tb_bufs[idx], out_cmd, tb0_size);
- iwl_pcie_gen2_set_tb(trans, tfd, iwl_pcie_get_first_tb_dma(txq, idx),
- tb0_size);
+ iwl_txq_gen2_set_tb(trans, tfd, iwl_txq_get_first_tb_dma(txq, idx),
+ tb0_size);
/* map first command fragment, if any remains */
if (copy_size > tb0_size) {
@@ -995,11 +251,11 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
DMA_TO_DEVICE);
if (dma_mapping_error(trans->dev, phys_addr)) {
idx = -ENOMEM;
- iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
+ iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
goto out;
}
- iwl_pcie_gen2_set_tb(trans, tfd, phys_addr,
- copy_size - tb0_size);
+ iwl_txq_gen2_set_tb(trans, tfd, phys_addr,
+ copy_size - tb0_size);
}
/* map the remaining (adjusted) nocopy/dup fragments */
@@ -1017,10 +273,10 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
cmdlen[i], DMA_TO_DEVICE);
if (dma_mapping_error(trans->dev, phys_addr)) {
idx = -ENOMEM;
- iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
+ iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
goto out;
}
- iwl_pcie_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]);
+ iwl_txq_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]);
}
BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
@@ -1037,8 +293,8 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
spin_lock_irqsave(&trans_pcie->reg_lock, flags);
/* Increment and update queue's write index */
- txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
- iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
+ txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
+ iwl_txq_inc_wr_ptr(trans, txq);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
out:
@@ -1169,322 +425,3 @@ int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
return iwl_pcie_gen2_send_hcmd_sync(trans, cmd);
}
-/*
- * iwl_pcie_gen2_txq_unmap - Unmap any remaining DMA mappings and free skb's
- */
-void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans->txqs.txq[txq_id];
-
- spin_lock_bh(&txq->lock);
- while (txq->write_ptr != txq->read_ptr) {
- IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
- txq_id, txq->read_ptr);
-
- if (txq_id != trans->txqs.cmd.q_id) {
- int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
- struct sk_buff *skb = txq->entries[idx].skb;
-
- if (WARN_ON_ONCE(!skb))
- continue;
-
- iwl_pcie_free_tso_page(trans_pcie, skb);
- }
- iwl_pcie_gen2_free_tfd(trans, txq);
- txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
- }
-
- while (!skb_queue_empty(&txq->overflow_q)) {
- struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
-
- iwl_op_mode_free_skb(trans->op_mode, skb);
- }
-
- spin_unlock_bh(&txq->lock);
-
- /* just in case - this queue may have been stopped */
- iwl_wake_queue(trans, txq);
-}
-
-void iwl_pcie_gen2_txq_free_memory(struct iwl_trans *trans,
- struct iwl_txq *txq)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct device *dev = trans->dev;
-
- /* De-alloc circular buffer of TFDs */
- if (txq->tfds) {
- dma_free_coherent(dev,
- trans_pcie->tfd_size * txq->n_window,
- txq->tfds, txq->dma_addr);
- dma_free_coherent(dev,
- sizeof(*txq->first_tb_bufs) * txq->n_window,
- txq->first_tb_bufs, txq->first_tb_dma);
- }
-
- kfree(txq->entries);
- if (txq->bc_tbl.addr)
- dma_pool_free(trans_pcie->bc_pool, txq->bc_tbl.addr,
- txq->bc_tbl.dma);
- kfree(txq);
-}
-
-/*
- * iwl_pcie_txq_free - Deallocate DMA queue.
- * @txq: Transmit queue to deallocate.
- *
- * Empty queue by removing and destroying all BD's.
- * Free all buffers.
- * 0-fill, but do not free "txq" descriptor structure.
- */
-static void iwl_pcie_gen2_txq_free(struct iwl_trans *trans, int txq_id)
-{
- struct iwl_txq *txq;
- int i;
-
- if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
- "queue %d out of range", txq_id))
- return;
-
- txq = trans->txqs.txq[txq_id];
-
- if (WARN_ON(!txq))
- return;
-
- iwl_pcie_gen2_txq_unmap(trans, txq_id);
-
- /* De-alloc array of command/tx buffers */
- if (txq_id == trans->txqs.cmd.q_id)
- for (i = 0; i < txq->n_window; i++) {
- kfree_sensitive(txq->entries[i].cmd);
- kfree_sensitive(txq->entries[i].free_buf);
- }
- del_timer_sync(&txq->stuck_timer);
-
- iwl_pcie_gen2_txq_free_memory(trans, txq);
-
- trans->txqs.txq[txq_id] = NULL;
-
- clear_bit(txq_id, trans->txqs.queue_used);
-}
-
-int iwl_trans_pcie_dyn_txq_alloc_dma(struct iwl_trans *trans,
- struct iwl_txq **intxq, int size,
- unsigned int timeout)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- size_t bc_tbl_size, bc_tbl_entries;
- struct iwl_txq *txq;
- int ret;
-
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- bc_tbl_size = sizeof(struct iwl_gen3_bc_tbl);
- bc_tbl_entries = bc_tbl_size / sizeof(u16);
- } else {
- bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl);
- bc_tbl_entries = bc_tbl_size / sizeof(u16);
- }
-
- if (WARN_ON(size > bc_tbl_entries))
- return -EINVAL;
-
- txq = kzalloc(sizeof(*txq), GFP_KERNEL);
- if (!txq)
- return -ENOMEM;
-
- txq->bc_tbl.addr = dma_pool_alloc(trans_pcie->bc_pool, GFP_KERNEL,
- &txq->bc_tbl.dma);
- if (!txq->bc_tbl.addr) {
- IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
- kfree(txq);
- return -ENOMEM;
- }
-
- ret = iwl_pcie_txq_alloc(trans, txq, size, false);
- if (ret) {
- IWL_ERR(trans, "Tx queue alloc failed\n");
- goto error;
- }
- ret = iwl_pcie_txq_init(trans, txq, size, false);
- if (ret) {
- IWL_ERR(trans, "Tx queue init failed\n");
- goto error;
- }
-
- txq->wd_timeout = msecs_to_jiffies(timeout);
-
- *intxq = txq;
- return 0;
-
-error:
- iwl_pcie_gen2_txq_free_memory(trans, txq);
- return ret;
-}
-
-int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans,
- struct iwl_txq *txq,
- struct iwl_host_cmd *hcmd)
-{
- struct iwl_tx_queue_cfg_rsp *rsp;
- int ret, qid;
- u32 wr_ptr;
-
- if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) !=
- sizeof(*rsp))) {
- ret = -EINVAL;
- goto error_free_resp;
- }
-
- rsp = (void *)hcmd->resp_pkt->data;
- qid = le16_to_cpu(rsp->queue_number);
- wr_ptr = le16_to_cpu(rsp->write_pointer);
-
- if (qid >= ARRAY_SIZE(trans->txqs.txq)) {
- WARN_ONCE(1, "queue index %d unsupported", qid);
- ret = -EIO;
- goto error_free_resp;
- }
-
- if (test_and_set_bit(qid, trans->txqs.queue_used)) {
- WARN_ONCE(1, "queue %d already used", qid);
- ret = -EIO;
- goto error_free_resp;
- }
-
- txq->id = qid;
- trans->txqs.txq[qid] = txq;
- wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
-
- /* Place first TFD at index corresponding to start sequence number */
- txq->read_ptr = wr_ptr;
- txq->write_ptr = wr_ptr;
-
- IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
-
- iwl_free_resp(hcmd);
- return qid;
-
-error_free_resp:
- iwl_free_resp(hcmd);
- iwl_pcie_gen2_txq_free_memory(trans, txq);
- return ret;
-}
-
-int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
- __le16 flags, u8 sta_id, u8 tid,
- int cmd_id, int size,
- unsigned int timeout)
-{
- struct iwl_txq *txq = NULL;
- struct iwl_tx_queue_cfg_cmd cmd = {
- .flags = flags,
- .sta_id = sta_id,
- .tid = tid,
- };
- struct iwl_host_cmd hcmd = {
- .id = cmd_id,
- .len = { sizeof(cmd) },
- .data = { &cmd, },
- .flags = CMD_WANT_SKB,
- };
- int ret;
-
- ret = iwl_trans_pcie_dyn_txq_alloc_dma(trans, &txq, size, timeout);
- if (ret)
- return ret;
-
- cmd.tfdq_addr = cpu_to_le64(txq->dma_addr);
- cmd.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
- cmd.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
-
- ret = iwl_trans_send_cmd(trans, &hcmd);
- if (ret)
- goto error;
-
- return iwl_trans_pcie_txq_alloc_response(trans, txq, &hcmd);
-
-error:
- iwl_pcie_gen2_txq_free_memory(trans, txq);
- return ret;
-}
-
-void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue)
-{
- if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
- "queue %d out of range", queue))
- return;
-
- /*
- * Upon HW Rfkill - we stop the device, and then stop the queues
- * in the op_mode. Just for the sake of the simplicity of the op_mode,
- * allow the op_mode to call txq_disable after it already called
- * stop_device.
- */
- if (!test_and_clear_bit(queue, trans->txqs.queue_used)) {
- WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
- "queue %d not used", queue);
- return;
- }
-
- iwl_pcie_gen2_txq_unmap(trans, queue);
-
- iwl_pcie_gen2_txq_free_memory(trans, trans->txqs.txq[queue]);
- trans->txqs.txq[queue] = NULL;
-
- IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
-}
-
-void iwl_pcie_gen2_tx_free(struct iwl_trans *trans)
-{
- int i;
-
- memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
-
- /* Free all TX queues */
- for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) {
- if (!trans->txqs.txq[i])
- continue;
-
- iwl_pcie_gen2_txq_free(trans, i);
- }
-}
-
-int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id, int queue_size)
-{
- struct iwl_txq *queue;
- int ret;
-
- /* alloc and init the tx queue */
- if (!trans->txqs.txq[txq_id]) {
- queue = kzalloc(sizeof(*queue), GFP_KERNEL);
- if (!queue) {
- IWL_ERR(trans, "Not enough memory for tx queue\n");
- return -ENOMEM;
- }
- trans->txqs.txq[txq_id] = queue;
- ret = iwl_pcie_txq_alloc(trans, queue, queue_size, true);
- if (ret) {
- IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
- goto error;
- }
- } else {
- queue = trans->txqs.txq[txq_id];
- }
-
- ret = iwl_pcie_txq_init(trans, queue, queue_size,
- (txq_id == trans->txqs.cmd.q_id));
- if (ret) {
- IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
- goto error;
- }
- trans->txqs.txq[txq_id]->id = txq_id;
- set_bit(txq_id, trans->txqs.queue_used);
-
- return 0;
-
-error:
- iwl_pcie_gen2_tx_free(trans);
- return ret;
-}
-
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index eb396c06b7fb..966be5689d63 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -77,9 +77,6 @@
#include "internal.h"
#include "fw/api/tx.h"
-#define IWL_TX_CRC_SIZE 4
-#define IWL_TX_DELIMITER_SIZE 4
-
/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
* DMA services
*
@@ -102,60 +99,6 @@
*
***************************************************/
-int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q)
-{
- unsigned int max;
- unsigned int used;
-
- /*
- * To avoid ambiguity between empty and completely full queues, there
- * should always be less than max_tfd_queue_size elements in the queue.
- * If q->n_window is smaller than max_tfd_queue_size, there is no need
- * to reserve any queue entries for this purpose.
- */
- if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size)
- max = q->n_window;
- else
- max = trans->trans_cfg->base_params->max_tfd_queue_size - 1;
-
- /*
- * max_tfd_queue_size is a power of 2, so the following is equivalent to
- * modulo by max_tfd_queue_size and is well defined.
- */
- used = (q->write_ptr - q->read_ptr) &
- (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
-
- if (WARN_ON(used > max))
- return 0;
-
- return max - used;
-}
-
-/*
- * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
- */
-static int iwl_queue_init(struct iwl_txq *q, int slots_num)
-{
- q->n_window = slots_num;
-
- /* slots_num must be power-of-two size, otherwise
- * iwl_pcie_get_cmd_index is broken. */
- if (WARN_ON(!is_power_of_2(slots_num)))
- return -EINVAL;
-
- q->low_mark = q->n_window / 4;
- if (q->low_mark < 4)
- q->low_mark = 4;
-
- q->high_mark = q->n_window / 8;
- if (q->high_mark < 2)
- q->high_mark = 2;
-
- q->write_ptr = 0;
- q->read_ptr = 0;
-
- return 0;
-}
int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
struct iwl_dma_ptr *ptr, size_t size)
@@ -180,99 +123,6 @@ void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)
memset(ptr, 0, sizeof(*ptr));
}
-static void iwl_pcie_txq_stuck_timer(struct timer_list *t)
-{
- struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
- struct iwl_trans *trans = txq->trans;
-
- spin_lock(&txq->lock);
- /* check if triggered erroneously */
- if (txq->read_ptr == txq->write_ptr) {
- spin_unlock(&txq->lock);
- return;
- }
- spin_unlock(&txq->lock);
-
- iwl_trans_pcie_log_scd_error(trans, txq);
-
- iwl_force_nmi(trans);
-}
-
-/*
- * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
- */
-static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
- struct iwl_txq *txq, u16 byte_cnt,
- int num_tbs)
-{
- struct iwlagn_scd_bc_tbl *scd_bc_tbl;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int write_ptr = txq->write_ptr;
- int txq_id = txq->id;
- u8 sec_ctl = 0;
- u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
- __le16 bc_ent;
- struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
- struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
- u8 sta_id = tx_cmd->sta_id;
-
- scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
-
- sec_ctl = tx_cmd->sec_ctl;
-
- switch (sec_ctl & TX_CMD_SEC_MSK) {
- case TX_CMD_SEC_CCM:
- len += IEEE80211_CCMP_MIC_LEN;
- break;
- case TX_CMD_SEC_TKIP:
- len += IEEE80211_TKIP_ICV_LEN;
- break;
- case TX_CMD_SEC_WEP:
- len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
- break;
- }
- if (trans_pcie->bc_table_dword)
- len = DIV_ROUND_UP(len, 4);
-
- if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
- return;
-
- bc_ent = cpu_to_le16(len | (sta_id << 12));
-
- scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
-
- if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
- scd_bc_tbl[txq_id].
- tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
-}
-
-static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
- struct iwl_txq *txq)
-{
- struct iwl_trans_pcie *trans_pcie =
- IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
- int txq_id = txq->id;
- int read_ptr = txq->read_ptr;
- u8 sta_id = 0;
- __le16 bc_ent;
- struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
- struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
-
- WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
-
- if (txq_id != trans->txqs.cmd.q_id)
- sta_id = tx_cmd->sta_id;
-
- bc_ent = cpu_to_le16(1 | (sta_id << 12));
-
- scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
-
- if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
- scd_bc_tbl[txq_id].
- tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
-}
-
/*
* iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
*/
@@ -339,35 +189,6 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
}
}
-static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans,
- void *_tfd, u8 idx)
-{
-
- if (trans->trans_cfg->use_tfh) {
- struct iwl_tfh_tfd *tfd = _tfd;
- struct iwl_tfh_tb *tb = &tfd->tbs[idx];
-
- return (dma_addr_t)(le64_to_cpu(tb->addr));
- } else {
- struct iwl_tfd *tfd = _tfd;
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
- dma_addr_t addr = get_unaligned_le32(&tb->lo);
- dma_addr_t hi_len;
-
- if (sizeof(dma_addr_t) <= sizeof(u32))
- return addr;
-
- hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
-
- /*
- * shift by 16 twice to avoid warnings on 32-bit
- * (where this code never runs anyway due to the
- * if statement above)
- */
- return addr | ((hi_len << 16) << 16);
- }
-}
-
static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd,
u8 idx, dma_addr_t addr, u16 len)
{
@@ -384,67 +205,6 @@ static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd,
tfd_fh->num_tbs = idx + 1;
}
-static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_trans *trans, void *_tfd)
-{
- if (trans->trans_cfg->use_tfh) {
- struct iwl_tfh_tfd *tfd = _tfd;
-
- return le16_to_cpu(tfd->num_tbs) & 0x1f;
- } else {
- struct iwl_tfd *tfd = _tfd;
-
- return tfd->num_tbs & 0x1f;
- }
-}
-
-static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
- struct iwl_cmd_meta *meta,
- struct iwl_txq *txq, int index)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int i, num_tbs;
- void *tfd = iwl_pcie_get_tfd(trans, txq, index);
-
- /* Sanity check on number of chunks */
- num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
-
- if (num_tbs > trans_pcie->max_tbs) {
- IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
- /* @todo issue fatal error, it is quite serious situation */
- return;
- }
-
- /* first TB is never freed - it's the bidirectional DMA data */
-
- for (i = 1; i < num_tbs; i++) {
- if (meta->tbs & BIT(i))
- dma_unmap_page(trans->dev,
- iwl_pcie_tfd_tb_get_addr(trans, tfd, i),
- iwl_pcie_tfd_tb_get_len(trans, tfd, i),
- DMA_TO_DEVICE);
- else
- dma_unmap_single(trans->dev,
- iwl_pcie_tfd_tb_get_addr(trans, tfd,
- i),
- iwl_pcie_tfd_tb_get_len(trans, tfd,
- i),
- DMA_TO_DEVICE);
- }
-
- meta->tbs = 0;
-
- if (trans->trans_cfg->use_tfh) {
- struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
-
- tfd_fh->num_tbs = 0;
- } else {
- struct iwl_tfd *tfd_fh = (void *)tfd;
-
- tfd_fh->num_tbs = 0;
- }
-
-}
-
/*
* iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
* @trans - transport private data
@@ -460,14 +220,14 @@ void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
* idx is bounded by n_window
*/
int rd_ptr = txq->read_ptr;
- int idx = iwl_pcie_get_cmd_index(txq, rd_ptr);
+ int idx = iwl_txq_get_cmd_index(txq, rd_ptr);
lockdep_assert_held(&txq->lock);
/* We have only q->n_window txq->entries, but we use
* TFD_QUEUE_SIZE_MAX tfds
*/
- iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr);
+ iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr);
/* free SKB */
if (txq->entries) {
@@ -489,21 +249,20 @@ void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
dma_addr_t addr, u16 len, bool reset)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
void *tfd;
u32 num_tbs;
- tfd = txq->tfds + trans_pcie->tfd_size * txq->write_ptr;
+ tfd = txq->tfds + trans->txqs.tfd.size * txq->write_ptr;
if (reset)
- memset(tfd, 0, trans_pcie->tfd_size);
+ memset(tfd, 0, trans->txqs.tfd.size);
- num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
+ num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd);
/* Each TFD can point to a maximum max_tbs Tx buffers */
- if (num_tbs >= trans_pcie->max_tbs) {
+ if (num_tbs >= trans->txqs.tfd.max_tbs) {
IWL_ERR(trans, "Error can not send more than %d chunks\n",
- trans_pcie->max_tbs);
+ trans->txqs.tfd.max_tbs);
return -EINVAL;
}
@@ -516,126 +275,6 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
return num_tbs;
}
-int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
- int slots_num, bool cmd_queue)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- size_t tfd_sz = trans_pcie->tfd_size *
- trans->trans_cfg->base_params->max_tfd_queue_size;
- size_t tb0_buf_sz;
- int i;
-
- if (WARN_ON(txq->entries || txq->tfds))
- return -EINVAL;
-
- if (trans->trans_cfg->use_tfh)
- tfd_sz = trans_pcie->tfd_size * slots_num;
-
- timer_setup(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 0);
- txq->trans = trans;
-
- txq->n_window = slots_num;
-
- txq->entries = kcalloc(slots_num,
- sizeof(struct iwl_pcie_txq_entry),
- GFP_KERNEL);
-
- if (!txq->entries)
- goto error;
-
- if (cmd_queue)
- for (i = 0; i < slots_num; i++) {
- txq->entries[i].cmd =
- kmalloc(sizeof(struct iwl_device_cmd),
- GFP_KERNEL);
- if (!txq->entries[i].cmd)
- goto error;
- }
-
- /* Circular buffer of transmit frame descriptors (TFDs),
- * shared with device */
- txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
- &txq->dma_addr, GFP_KERNEL);
- if (!txq->tfds)
- goto error;
-
- BUILD_BUG_ON(IWL_FIRST_TB_SIZE_ALIGN != sizeof(*txq->first_tb_bufs));
-
- tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
-
- txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
- &txq->first_tb_dma,
- GFP_KERNEL);
- if (!txq->first_tb_bufs)
- goto err_free_tfds;
-
- return 0;
-err_free_tfds:
- dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
-error:
- if (txq->entries && cmd_queue)
- for (i = 0; i < slots_num; i++)
- kfree(txq->entries[i].cmd);
- kfree(txq->entries);
- txq->entries = NULL;
-
- return -ENOMEM;
-
-}
-
-int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
- int slots_num, bool cmd_queue)
-{
- int ret;
- u32 tfd_queue_max_size =
- trans->trans_cfg->base_params->max_tfd_queue_size;
-
- txq->need_update = false;
-
- /* max_tfd_queue_size must be power-of-two size, otherwise
- * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
- if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
- "Max tfd queue size must be a power of two, but is %d",
- tfd_queue_max_size))
- return -EINVAL;
-
- /* Initialize queue's high/low-water marks, and head/tail indexes */
- ret = iwl_queue_init(txq, slots_num);
- if (ret)
- return ret;
-
- spin_lock_init(&txq->lock);
-
- if (cmd_queue) {
- static struct lock_class_key iwl_pcie_cmd_queue_lock_class;
-
- lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class);
- }
-
- __skb_queue_head_init(&txq->overflow_q);
-
- return 0;
-}
-
-void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
- struct sk_buff *skb)
-{
- struct page **page_ptr;
- struct page *next;
-
- page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
- next = *page_ptr;
- *page_ptr = NULL;
-
- while (next) {
- struct page *tmp = next;
-
- next = *(void **)(page_address(next) + PAGE_SIZE -
- sizeof(void *));
- __free_page(tmp);
- }
-}
-
static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -671,10 +310,10 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
if (WARN_ON_ONCE(!skb))
continue;
- iwl_pcie_free_tso_page(trans_pcie, skb);
+ iwl_txq_free_tso_page(trans, skb);
}
iwl_pcie_txq_free_tfd(trans, txq);
- txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
+ txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
if (txq->read_ptr == txq->write_ptr) {
unsigned long flags;
@@ -708,7 +347,6 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
*/
static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans->txqs.txq[txq_id];
struct device *dev = trans->dev;
int i;
@@ -728,7 +366,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
/* De-alloc circular buffer of TFDs */
if (txq->tfds) {
dma_free_coherent(dev,
- trans_pcie->tfd_size *
+ trans->txqs.tfd.size *
trans->trans_cfg->base_params->max_tfd_queue_size,
txq->tfds, txq->dma_addr);
txq->dma_addr = 0;
@@ -774,7 +412,7 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
NULL, clear_dwords);
iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
- trans_pcie->scd_bc_tbls.dma >> 10);
+ trans->txqs.scd_bc_tbls.dma >> 10);
/* The chain extension of the SCD doesn't work well. This feature is
* enabled by default by the HW, so we need to disable it manually.
@@ -939,7 +577,7 @@ void iwl_pcie_tx_free(struct iwl_trans *trans)
iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);
- iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
+ iwl_pcie_free_dma_ptr(trans, &trans->txqs.scd_bc_tbls);
}
/*
@@ -965,7 +603,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
goto error;
}
- ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
+ ret = iwl_pcie_alloc_dma_ptr(trans, &trans->txqs.scd_bc_tbls,
bc_tbls_size);
if (ret) {
IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
@@ -1000,8 +638,8 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
trans->cfg->min_256_ba_txq_size);
trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id];
- ret = iwl_pcie_txq_alloc(trans, trans->txqs.txq[txq_id],
- slots_num, cmd_queue);
+ ret = iwl_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num,
+ cmd_queue);
if (ret) {
IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
goto error;
@@ -1053,8 +691,8 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
else
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
trans->cfg->min_256_ba_txq_size);
- ret = iwl_pcie_txq_init(trans, trans->txqs.txq[txq_id],
- slots_num, cmd_queue);
+ ret = iwl_txq_init(trans, trans->txqs.txq[txq_id], slots_num,
+ cmd_queue);
if (ret) {
IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
goto error;
@@ -1111,10 +749,9 @@ static inline void iwl_pcie_txq_progress(struct iwl_txq *txq)
void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans->txqs.txq[txq_id];
- int tfd_num = iwl_pcie_get_cmd_index(txq, ssn);
- int read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
+ int tfd_num = iwl_txq_get_cmd_index(txq, ssn);
+ int read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr);
int last_to_free;
/* This function is not meant to release cmd queue*/
@@ -1137,9 +774,9 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
/*Since we free until index _not_ inclusive, the one before index is
* the last we will free. This one must be used */
- last_to_free = iwl_queue_dec_wrap(trans, tfd_num);
+ last_to_free = iwl_txq_dec_wrap(trans, tfd_num);
- if (!iwl_queue_used(txq, last_to_free)) {
+ if (!iwl_txq_used(txq, last_to_free)) {
IWL_ERR(trans,
"%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
__func__, txq_id, last_to_free,
@@ -1153,28 +790,28 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
for (;
read_ptr != tfd_num;
- txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr),
- read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr)) {
+ txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr),
+ read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) {
struct sk_buff *skb = txq->entries[read_ptr].skb;
if (WARN_ON_ONCE(!skb))
continue;
- iwl_pcie_free_tso_page(trans_pcie, skb);
+ iwl_txq_free_tso_page(trans, skb);
__skb_queue_tail(skbs, skb);
txq->entries[read_ptr].skb = NULL;
if (!trans->trans_cfg->use_tfh)
- iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
+ iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq);
iwl_pcie_txq_free_tfd(trans, txq);
}
iwl_pcie_txq_progress(txq);
- if (iwl_queue_space(trans, txq) > txq->low_mark &&
+ if (iwl_txq_space(trans, txq) > txq->low_mark &&
test_bit(txq_id, trans->txqs.queue_stopped)) {
struct sk_buff_head overflow_skbs;
@@ -1204,17 +841,17 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct iwl_device_tx_cmd *dev_cmd_ptr;
dev_cmd_ptr = *(void **)((u8 *)skb->cb +
- trans_pcie->dev_cmd_offs);
+ trans->txqs.dev_cmd_offs);
/*
* Note that we can very well be overflowing again.
- * In that case, iwl_queue_space will be small again
+ * In that case, iwl_txq_space will be small again
* and we won't wake mac80211's queue.
*/
iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id);
}
- if (iwl_queue_space(trans, txq) > txq->low_mark)
+ if (iwl_txq_space(trans, txq) > txq->low_mark)
iwl_wake_queue(trans, txq);
spin_lock_bh(&txq->lock);
@@ -1295,11 +932,11 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
lockdep_assert_held(&txq->lock);
- idx = iwl_pcie_get_cmd_index(txq, idx);
- r = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
+ idx = iwl_txq_get_cmd_index(txq, idx);
+ r = iwl_txq_get_cmd_index(txq, txq->read_ptr);
if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size ||
- (!iwl_queue_used(txq, idx))) {
+ (!iwl_txq_used(txq, idx))) {
WARN_ONCE(test_bit(txq_id, trans->txqs.queue_used),
"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
__func__, txq_id, idx,
@@ -1308,9 +945,9 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
return;
}
- for (idx = iwl_queue_inc_wrap(trans, idx); r != idx;
- r = iwl_queue_inc_wrap(trans, r)) {
- txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
+ for (idx = iwl_txq_inc_wrap(trans, idx); r != idx;
+ r = iwl_txq_inc_wrap(trans, r)) {
+ txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
if (nfreed++ > 0) {
IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
@@ -1627,7 +1264,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
spin_lock_bh(&txq->lock);
- if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+ if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
spin_unlock_bh(&txq->lock);
IWL_ERR(trans, "No space in command queue\n");
@@ -1636,7 +1273,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
goto free_dup_buf;
}
- idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
+ idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
out_cmd = txq->entries[idx].cmd;
out_meta = &txq->entries[idx].meta;
@@ -1719,7 +1356,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size);
iwl_pcie_txq_build_tfd(trans, txq,
- iwl_pcie_get_first_tb_dma(txq, idx),
+ iwl_txq_get_first_tb_dma(txq, idx),
tb0_size, true);
/* map first command fragment, if any remains */
@@ -1729,8 +1366,8 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
copy_size - tb0_size,
DMA_TO_DEVICE);
if (dma_mapping_error(trans->dev, phys_addr)) {
- iwl_pcie_tfd_unmap(trans, out_meta, txq,
- txq->write_ptr);
+ iwl_txq_gen1_tfd_unmap(trans, out_meta, txq,
+ txq->write_ptr);
idx = -ENOMEM;
goto out;
}
@@ -1753,8 +1390,8 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
phys_addr = dma_map_single(trans->dev, (void *)data,
cmdlen[i], DMA_TO_DEVICE);
if (dma_mapping_error(trans->dev, phys_addr)) {
- iwl_pcie_tfd_unmap(trans, out_meta, txq,
- txq->write_ptr);
+ iwl_txq_gen1_tfd_unmap(trans, out_meta, txq,
+ txq->write_ptr);
idx = -ENOMEM;
goto out;
}
@@ -1783,7 +1420,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
}
/* Increment and update queue's write index */
- txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
+ txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
iwl_pcie_txq_inc_wr_ptr(trans, txq);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
@@ -1828,13 +1465,13 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
spin_lock_bh(&txq->lock);
- cmd_index = iwl_pcie_get_cmd_index(txq, index);
+ cmd_index = iwl_txq_get_cmd_index(txq, index);
cmd = txq->entries[cmd_index].cmd;
meta = &txq->entries[cmd_index].meta;
group_id = cmd->hdr.group_id;
cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0);
- iwl_pcie_tfd_unmap(trans, meta, txq, index);
+ iwl_txq_gen1_tfd_unmap(trans, meta, txq, index);
/* Input error checking is done when commands are added to queue. */
if (meta->flags & CMD_WANT_SKB) {
@@ -2055,51 +1692,6 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
}
#ifdef CONFIG_INET
-struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
- struct sk_buff *skb)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page);
- struct page **page_ptr;
-
- page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
-
- if (WARN_ON(*page_ptr))
- return NULL;
-
- if (!p->page)
- goto alloc;
-
- /*
- * Check if there's enough room on this page
- *
- * Note that we put a page chaining pointer *last* in the
- * page - we need it somewhere, and if it's there then we
- * avoid DMA mapping the last bits of the page which may
- * trigger the 32-bit boundary hardware bug.
- *
- * (see also get_workaround_page() in tx-gen2.c)
- */
- if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE -
- sizeof(void *))
- goto out;
-
- /* We don't have enough room on this page, get a new one. */
- __free_page(p->page);
-
-alloc:
- p->page = alloc_page(GFP_ATOMIC);
- if (!p->page)
- return NULL;
- p->pos = page_address(p->page);
- /* set the chaining pointer to NULL */
- *(void **)(page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
-out:
- *page_ptr = p->page;
- get_page(p->page);
- return p;
-}
-
static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph,
bool ipv6, unsigned int len)
{
@@ -2142,8 +1734,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
IEEE80211_CCMP_HDR_LEN : 0;
trace_iwlwifi_dev_tx(trans->dev, skb,
- iwl_pcie_get_tfd(trans, txq, txq->write_ptr),
- trans_pcie->tfd_size,
+ iwl_txq_get_tfd(trans, txq, txq->write_ptr),
+ trans->txqs.tfd.size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
@@ -2352,7 +1944,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
}
if (skb_is_nonlinear(skb) &&
- skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) &&
+ skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) &&
__skb_linearize(skb))
return -ENOMEM;
@@ -2365,15 +1957,15 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
spin_lock(&txq->lock);
- if (iwl_queue_space(trans, txq) < txq->high_mark) {
- iwl_stop_queue(trans, txq);
+ if (iwl_txq_space(trans, txq) < txq->high_mark) {
+ iwl_txq_stop(trans, txq);
/* don't put the packet on the ring, if there is no room */
- if (unlikely(iwl_queue_space(trans, txq) < 3)) {
+ if (unlikely(iwl_txq_space(trans, txq) < 3)) {
struct iwl_device_tx_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
- trans_pcie->dev_cmd_offs);
+ trans->txqs.dev_cmd_offs);
*dev_cmd_ptr = dev_cmd;
__skb_queue_tail(&txq->overflow_q, skb);
@@ -2402,7 +1994,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
INDEX_TO_SEQ(txq->write_ptr)));
- tb0_phys = iwl_pcie_get_first_tb_dma(txq, txq->write_ptr);
+ tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr);
scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
offsetof(struct iwl_tx_cmd, scratch);
@@ -2452,9 +2044,8 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
trace_iwlwifi_dev_tx(trans->dev, skb,
- iwl_pcie_get_tfd(trans, txq,
- txq->write_ptr),
- trans_pcie->tfd_size,
+ iwl_txq_get_tfd(trans, txq, txq->write_ptr),
+ trans->txqs.tfd.size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
hdr_len);
@@ -2486,10 +2077,11 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
/* building the A-MSDU might have changed this data, so memcpy it now */
memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE);
- tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
+ tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
/* Set up entry for this TFD in Tx byte-count array */
- iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
- iwl_pcie_tfd_get_num_tbs(trans, tfd));
+ iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
+ iwl_txq_gen1_tfd_get_num_tbs(trans,
+ tfd));
wait_write_ptr = ieee80211_has_morefrags(fc);
@@ -2509,7 +2101,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
}
/* Tell device the write index *just past* this latest filled TFD */
- txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
+ txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
if (!wait_write_ptr)
iwl_pcie_txq_inc_wr_ptr(trans, txq);
@@ -2520,7 +2112,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
spin_unlock(&txq->lock);
return 0;
out_err:
- iwl_pcie_tfd_unmap(trans, out_meta, txq, txq->write_ptr);
+ iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, txq->write_ptr);
spin_unlock(&txq->lock);
return -1;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
new file mode 100644
index 000000000000..af0b27a68d84
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
@@ -0,0 +1,1529 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2020 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2020 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#include <net/tso.h>
+#include <linux/tcp.h>
+
+#include "iwl-debug.h"
+#include "iwl-io.h"
+#include "fw/api/tx.h"
+#include "queue/tx.h"
+#include "iwl-fh.h"
+#include "iwl-scd.h"
+#include <linux/dmapool.h>
+
+/*
+ * iwl_txq_gen2_tx_stop - Stop all Tx DMA channels
+ */
+void iwl_txq_gen2_tx_stop(struct iwl_trans *trans)
+{
+ int txq_id;
+
+ /*
+ * This function can be called before the op_mode disabled the
+ * queues. This happens when we have an rfkill interrupt.
+ * Since we stop Tx altogether - mark the queues as stopped.
+ */
+ memset(trans->txqs.queue_stopped, 0,
+ sizeof(trans->txqs.queue_stopped));
+ memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
+
+ /* Unmap DMA from host system and free skb's */
+ for (txq_id = 0; txq_id < ARRAY_SIZE(trans->txqs.txq); txq_id++) {
+ if (!trans->txqs.txq[txq_id])
+ continue;
+ iwl_txq_gen2_unmap(trans, txq_id);
+ }
+}
+
+/*
+ * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array
+ */
+static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
+ struct iwl_txq *txq, u16 byte_cnt,
+ int num_tbs)
+{
+ int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+ u8 filled_tfd_size, num_fetch_chunks;
+ u16 len = byte_cnt;
+ __le16 bc_ent;
+
+ if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
+ return;
+
+ filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
+ num_tbs * sizeof(struct iwl_tfh_tb);
+ /*
+ * filled_tfd_size contains the number of filled bytes in the TFD.
+ * Dividing it by 64 will give the number of chunks to fetch
+ * to SRAM- 0 for one chunk, 1 for 2 and so on.
+ * If, for example, TFD contains only 3 TBs then 32 bytes
+ * of the TFD are used, and only one chunk of 64 bytes should
+ * be fetched
+ */
+ num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
+
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
+
+ /* Starting from AX210, the HW expects bytes */
+ WARN_ON(trans->txqs.bc_table_dword);
+ WARN_ON(len > 0x3FFF);
+ bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
+ scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
+ } else {
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
+
+ /* Before AX210, the HW expects DW */
+ WARN_ON(!trans->txqs.bc_table_dword);
+ len = DIV_ROUND_UP(len, 4);
+ WARN_ON(len > 0xFFF);
+ bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
+ scd_bc_tbl->tfd_offset[idx] = bc_ent;
+ }
+}
+
+/*
+ * iwl_txq_inc_wr_ptr - Send new write index to hardware
+ */
+void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
+{
+ lockdep_assert_held(&txq->lock);
+
+ IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
+
+ /*
+ * if not in power-save mode, uCode will never sleep when we're
+ * trying to tx (during RFKILL, we're not trying to tx).
+ */
+ iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
+}
+
+static u8 iwl_txq_gen2_get_num_tbs(struct iwl_trans *trans,
+ struct iwl_tfh_tfd *tfd)
+{
+ return le16_to_cpu(tfd->num_tbs) & 0x1f;
+}
+
+void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
+ struct iwl_tfh_tfd *tfd)
+{
+ int i, num_tbs;
+
+ /* Sanity check on number of chunks */
+ num_tbs = iwl_txq_gen2_get_num_tbs(trans, tfd);
+
+ if (num_tbs > trans->txqs.tfd.max_tbs) {
+ IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
+ return;
+ }
+
+ /* first TB is never freed - it's the bidirectional DMA data */
+ for (i = 1; i < num_tbs; i++) {
+ if (meta->tbs & BIT(i))
+ dma_unmap_page(trans->dev,
+ le64_to_cpu(tfd->tbs[i].addr),
+ le16_to_cpu(tfd->tbs[i].tb_len),
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(trans->dev,
+ le64_to_cpu(tfd->tbs[i].addr),
+ le16_to_cpu(tfd->tbs[i].tb_len),
+ DMA_TO_DEVICE);
+ }
+
+ tfd->num_tbs = 0;
+}
+
+void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
+{
+ /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
+ * idx is bounded by n_window
+ */
+ int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
+
+ lockdep_assert_held(&txq->lock);
+
+ iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
+ iwl_txq_get_tfd(trans, txq, idx));
+
+ /* free SKB */
+ if (txq->entries) {
+ struct sk_buff *skb;
+
+ skb = txq->entries[idx].skb;
+
+ /* Can be called from irqs-disabled context
+ * If skb is not NULL, it means that the whole queue is being
+ * freed and that the queue is not empty - free the skb
+ */
+ if (skb) {
+ iwl_op_mode_free_skb(trans->op_mode, skb);
+ txq->entries[idx].skb = NULL;
+ }
+ }
+}
+
+int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd,
+ dma_addr_t addr, u16 len)
+{
+ int idx = iwl_txq_gen2_get_num_tbs(trans, tfd);
+ struct iwl_tfh_tb *tb;
+
+ /*
+ * Only WARN here so we know about the issue, but we mess up our
+ * unmap path because not every place currently checks for errors
+ * returned from this function - it can only return an error if
+ * there's no more space, and so when we know there is enough we
+ * don't always check ...
+ */
+ WARN(iwl_txq_crosses_4g_boundary(addr, len),
+ "possible DMA problem with iova:0x%llx, len:%d\n",
+ (unsigned long long)addr, len);
+
+ if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
+ return -EINVAL;
+ tb = &tfd->tbs[idx];
+
+ /* Each TFD can point to a maximum max_tbs Tx buffers */
+ if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) {
+ IWL_ERR(trans, "Error can not send more than %d chunks\n",
+ trans->txqs.tfd.max_tbs);
+ return -EINVAL;
+ }
+
+ put_unaligned_le64(addr, &tb->addr);
+ tb->tb_len = cpu_to_le16(len);
+
+ tfd->num_tbs = cpu_to_le16(idx + 1);
+
+ return idx;
+}
+
+static struct page *get_workaround_page(struct iwl_trans *trans,
+ struct sk_buff *skb)
+{
+ struct page **page_ptr;
+ struct page *ret;
+
+ page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
+
+ ret = alloc_page(GFP_ATOMIC);
+ if (!ret)
+ return NULL;
+
+ /* set the chaining pointer to the previous page if there */
+ *(void **)(page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
+ *page_ptr = ret;
+
+ return ret;
+}
+
+/*
+ * Add a TB and if needed apply the FH HW bug workaround;
+ * meta != NULL indicates that it's a page mapping and we
+ * need to dma_unmap_page() and set the meta->tbs bit in
+ * this case.
+ */
+static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
+ struct sk_buff *skb,
+ struct iwl_tfh_tfd *tfd,
+ dma_addr_t phys, void *virt,
+ u16 len, struct iwl_cmd_meta *meta)
+{
+ dma_addr_t oldphys = phys;
+ struct page *page;
+ int ret;
+
+ if (unlikely(dma_mapping_error(trans->dev, phys)))
+ return -ENOMEM;
+
+ if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) {
+ ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
+
+ if (ret < 0)
+ goto unmap;
+
+ if (meta)
+ meta->tbs |= BIT(ret);
+
+ ret = 0;
+ goto trace;
+ }
+
+ /*
+ * Work around a hardware bug. If (as expressed in the
+ * condition above) the TB ends on a 32-bit boundary,
+ * then the next TB may be accessed with the wrong
+ * address.
+ * To work around it, copy the data elsewhere and make
+ * a new mapping for it so the device will not fail.
+ */
+
+ if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) {
+ ret = -ENOBUFS;
+ goto unmap;
+ }
+
+ page = get_workaround_page(trans, skb);
+ if (!page) {
+ ret = -ENOMEM;
+ goto unmap;
+ }
+
+ memcpy(page_address(page), virt, len);
+
+ phys = dma_map_single(trans->dev, page_address(page), len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, phys)))
+ return -ENOMEM;
+ ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
+ if (ret < 0) {
+ /* unmap the new allocation as single */
+ oldphys = phys;
+ meta = NULL;
+ goto unmap;
+ }
+ IWL_WARN(trans,
+ "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
+ len, (unsigned long long)oldphys, (unsigned long long)phys);
+
+ ret = 0;
+unmap:
+ if (meta)
+ dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
+ else
+ dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
+trace:
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
+
+ return ret;
+}
+
+#ifdef CONFIG_INET
+struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
+ struct sk_buff *skb)
+{
+ struct iwl_tso_hdr_page *p = this_cpu_ptr(trans->txqs.tso_hdr_page);
+ struct page **page_ptr;
+
+ page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
+
+ if (WARN_ON(*page_ptr))
+ return NULL;
+
+ if (!p->page)
+ goto alloc;
+
+ /*
+ * Check if there's enough room on this page
+ *
+ * Note that we put a page chaining pointer *last* in the
+ * page - we need it somewhere, and if it's there then we
+ * avoid DMA mapping the last bits of the page which may
+ * trigger the 32-bit boundary hardware bug.
+ *
+ * (see also get_workaround_page() in tx-gen2.c)
+ */
+ if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE -
+ sizeof(void *))
+ goto out;
+
+ /* We don't have enough room on this page, get a new one. */
+ __free_page(p->page);
+
+alloc:
+ p->page = alloc_page(GFP_ATOMIC);
+ if (!p->page)
+ return NULL;
+ p->pos = page_address(p->page);
+ /* set the chaining pointer to NULL */
+ *(void **)(page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
+out:
+ *page_ptr = p->page;
+ get_page(p->page);
+ return p;
+}
+#endif
+
+static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
+ struct sk_buff *skb,
+ struct iwl_tfh_tfd *tfd, int start_len,
+ u8 hdr_len,
+ struct iwl_device_tx_cmd *dev_cmd)
+{
+#ifdef CONFIG_INET
+ struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
+ unsigned int mss = skb_shinfo(skb)->gso_size;
+ u16 length, amsdu_pad;
+ u8 *start_hdr;
+ struct iwl_tso_hdr_page *hdr_page;
+ struct tso_t tso;
+
+ trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
+ &dev_cmd->hdr, start_len, 0);
+
+ ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
+ snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
+ total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
+ amsdu_pad = 0;
+
+ /* total amount of header we may need for this A-MSDU */
+ hdr_room = DIV_ROUND_UP(total_len, mss) *
+ (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
+
+ /* Our device supports 9 segments at most, it will fit in 1 page */
+ hdr_page = get_page_hdr(trans, hdr_room, skb);
+ if (!hdr_page)
+ return -ENOMEM;
+
+ start_hdr = hdr_page->pos;
+
+ /*
+ * Pull the ieee80211 header to be able to use TSO core,
+ * we will restore it for the tx_status flow.
+ */
+ skb_pull(skb, hdr_len);
+
+ /*
+ * Remove the length of all the headers that we don't actually
+ * have in the MPDU by themselves, but that we duplicate into
+ * all the different MSDUs inside the A-MSDU.
+ */
+ le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
+
+ tso_start(skb, &tso);
+
+ while (total_len) {
+ /* this is the data left for this subframe */
+ unsigned int data_left = min_t(unsigned int, mss, total_len);
+ struct sk_buff *csum_skb = NULL;
+ unsigned int tb_len;
+ dma_addr_t tb_phys;
+ u8 *subf_hdrs_start = hdr_page->pos;
+
+ total_len -= data_left;
+
+ memset(hdr_page->pos, 0, amsdu_pad);
+ hdr_page->pos += amsdu_pad;
+ amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
+ data_left)) & 0x3;
+ ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
+ hdr_page->pos += ETH_ALEN;
+ ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
+ hdr_page->pos += ETH_ALEN;
+
+ length = snap_ip_tcp_hdrlen + data_left;
+ *((__be16 *)hdr_page->pos) = cpu_to_be16(length);
+ hdr_page->pos += sizeof(length);
+
+ /*
+ * This will copy the SNAP as well which will be considered
+ * as MAC header.
+ */
+ tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
+
+ hdr_page->pos += snap_ip_tcp_hdrlen;
+
+ tb_len = hdr_page->pos - start_hdr;
+ tb_phys = dma_map_single(trans->dev, start_hdr,
+ tb_len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
+ dev_kfree_skb(csum_skb);
+ goto out_err;
+ }
+ /*
+ * No need for _with_wa, this is from the TSO page and
+ * we leave some space at the end of it so can't hit
+ * the buggy scenario.
+ */
+ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len);
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
+ tb_phys, tb_len);
+ /* add this subframe's headers' length to the tx_cmd */
+ le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
+
+ /* prepare the start_hdr for the next subframe */
+ start_hdr = hdr_page->pos;
+
+ /* put the payload */
+ while (data_left) {
+ int ret;
+
+ tb_len = min_t(unsigned int, tso.size, data_left);
+ tb_phys = dma_map_single(trans->dev, tso.data,
+ tb_len, DMA_TO_DEVICE);
+ ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd,
+ tb_phys, tso.data,
+ tb_len, NULL);
+ if (ret) {
+ dev_kfree_skb(csum_skb);
+ goto out_err;
+ }
+
+ data_left -= tb_len;
+ tso_build_data(skb, &tso, tb_len);
+ }
+ }
+
+ /* re -add the WiFi header */
+ skb_push(skb, hdr_len);
+
+ return 0;
+
+out_err:
+#endif
+ return -EINVAL;
+}
+
+static struct
+iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_device_tx_cmd *dev_cmd,
+ struct sk_buff *skb,
+ struct iwl_cmd_meta *out_meta,
+ int hdr_len,
+ int tx_cmd_len)
+{
+ int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+ struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
+ dma_addr_t tb_phys;
+ int len;
+ void *tb1_addr;
+
+ tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
+
+ /*
+ * No need for _with_wa, the first TB allocation is aligned up
+ * to a 64-byte boundary and thus can't be at the end or cross
+ * a page boundary (much less a 2^32 boundary).
+ */
+ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
+
+ /*
+ * The second TB (tb1) points to the remainder of the TX command
+ * and the 802.11 header - dword aligned size
+ * (This calculation modifies the TX command, so do it before the
+ * setup of the first TB)
+ */
+ len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
+ IWL_FIRST_TB_SIZE;
+
+ /* do not align A-MSDU to dword as the subframe header aligns it */
+
+ /* map the data for TB1 */
+ tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
+ tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ goto out_err;
+ /*
+ * No need for _with_wa(), we ensure (via alignment) that the data
+ * here can never cross or end at a page boundary.
+ */
+ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len);
+
+ if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, len + IWL_FIRST_TB_SIZE,
+ hdr_len, dev_cmd))
+ goto out_err;
+
+ /* building the A-MSDU might have changed this data, memcpy it now */
+ memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
+ return tfd;
+
+out_err:
+ iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
+ return NULL;
+}
+
+static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans,
+ struct sk_buff *skb,
+ struct iwl_tfh_tfd *tfd,
+ struct iwl_cmd_meta *out_meta)
+{
+ int i;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ dma_addr_t tb_phys;
+ unsigned int fragsz = skb_frag_size(frag);
+ int ret;
+
+ if (!fragsz)
+ continue;
+
+ tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
+ fragsz, DMA_TO_DEVICE);
+ ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+ skb_frag_address(frag),
+ fragsz, out_meta);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct
+iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_device_tx_cmd *dev_cmd,
+ struct sk_buff *skb,
+ struct iwl_cmd_meta *out_meta,
+ int hdr_len,
+ int tx_cmd_len,
+ bool pad)
+{
+ int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+ struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
+ dma_addr_t tb_phys;
+ int len, tb1_len, tb2_len;
+ void *tb1_addr;
+ struct sk_buff *frag;
+
+ tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
+
+ /* The first TB points to bi-directional DMA data */
+ memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
+
+ /*
+ * No need for _with_wa, the first TB allocation is aligned up
+ * to a 64-byte boundary and thus can't be at the end or cross
+ * a page boundary (much less a 2^32 boundary).
+ */
+ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
+
+ /*
+ * The second TB (tb1) points to the remainder of the TX command
+ * and the 802.11 header - dword aligned size
+ * (This calculation modifies the TX command, so do it before the
+ * setup of the first TB)
+ */
+ len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
+ IWL_FIRST_TB_SIZE;
+
+ if (pad)
+ tb1_len = ALIGN(len, 4);
+ else
+ tb1_len = len;
+
+ /* map the data for TB1 */
+ tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
+ tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ goto out_err;
+ /*
+ * No need for _with_wa(), we ensure (via alignment) that the data
+ * here can never cross or end at a page boundary.
+ */
+ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
+ trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
+ IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
+
+ /* set up TFD's third entry to point to remainder of skb's head */
+ tb2_len = skb_headlen(skb) - hdr_len;
+
+ if (tb2_len > 0) {
+ int ret;
+
+ tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
+ tb2_len, DMA_TO_DEVICE);
+ ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+ skb->data + hdr_len, tb2_len,
+ NULL);
+ if (ret)
+ goto out_err;
+ }
+
+ if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta))
+ goto out_err;
+
+ skb_walk_frags(skb, frag) {
+ int ret;
+
+ tb_phys = dma_map_single(trans->dev, frag->data,
+ skb_headlen(frag), DMA_TO_DEVICE);
+ ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+ frag->data,
+ skb_headlen(frag), NULL);
+ if (ret)
+ goto out_err;
+ if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta))
+ goto out_err;
+ }
+
+ return tfd;
+
+out_err:
+ iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
+ return NULL;
+}
+
+static
+struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_device_tx_cmd *dev_cmd,
+ struct sk_buff *skb,
+ struct iwl_cmd_meta *out_meta)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+ struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
+ int len, hdr_len;
+ bool amsdu;
+
+ /* There must be data left over for TB1 or this code must be changed */
+ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
+
+ memset(tfd, 0, sizeof(*tfd));
+
+ if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ len = sizeof(struct iwl_tx_cmd_gen2);
+ else
+ len = sizeof(struct iwl_tx_cmd_gen3);
+
+ amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
+ (*ieee80211_get_qos_ctl(hdr) &
+ IEEE80211_QOS_CTL_A_MSDU_PRESENT);
+
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ /*
+ * Only build A-MSDUs here if doing so by GSO, otherwise it may be
+ * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been
+ * built in the higher layers already.
+ */
+ if (amsdu && skb_shinfo(skb)->gso_size)
+ return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
+ out_meta, hdr_len, len);
+ return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
+ hdr_len, len, !amsdu);
+}
+
+int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q)
+{
+ unsigned int max;
+ unsigned int used;
+
+ /*
+ * To avoid ambiguity between empty and completely full queues, there
+ * should always be less than max_tfd_queue_size elements in the queue.
+ * If q->n_window is smaller than max_tfd_queue_size, there is no need
+ * to reserve any queue entries for this purpose.
+ */
+ if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size)
+ max = q->n_window;
+ else
+ max = trans->trans_cfg->base_params->max_tfd_queue_size - 1;
+
+ /*
+ * max_tfd_queue_size is a power of 2, so the following is equivalent to
+ * modulo by max_tfd_queue_size and is well defined.
+ */
+ used = (q->write_ptr - q->read_ptr) &
+ (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
+
+ if (WARN_ON(used > max))
+ return 0;
+
+ return max - used;
+}
+
+int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id)
+{
+ struct iwl_cmd_meta *out_meta;
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
+ u16 cmd_len;
+ int idx;
+ void *tfd;
+
+ if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
+ "queue %d out of range", txq_id))
+ return -EINVAL;
+
+ if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
+ "TX on unused queue %d\n", txq_id))
+ return -EINVAL;
+
+ if (skb_is_nonlinear(skb) &&
+ skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) &&
+ __skb_linearize(skb))
+ return -ENOMEM;
+
+ spin_lock(&txq->lock);
+
+ if (iwl_txq_space(trans, txq) < txq->high_mark) {
+ iwl_txq_stop(trans, txq);
+
+ /* don't put the packet on the ring, if there is no room */
+ if (unlikely(iwl_txq_space(trans, txq) < 3)) {
+ struct iwl_device_tx_cmd **dev_cmd_ptr;
+
+ dev_cmd_ptr = (void *)((u8 *)skb->cb +
+ trans->txqs.dev_cmd_offs);
+
+ *dev_cmd_ptr = dev_cmd;
+ __skb_queue_tail(&txq->overflow_q, skb);
+ spin_unlock(&txq->lock);
+ return 0;
+ }
+ }
+
+ idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+
+ /* Set up driver data for this TFD */
+ txq->entries[idx].skb = skb;
+ txq->entries[idx].cmd = dev_cmd;
+
+ dev_cmd->hdr.sequence =
+ cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
+ INDEX_TO_SEQ(idx)));
+
+ /* Set up first empty entry in queue's array of Tx/cmd buffers */
+ out_meta = &txq->entries[idx].meta;
+ out_meta->flags = 0;
+
+ tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
+ if (!tfd) {
+ spin_unlock(&txq->lock);
+ return -1;
+ }
+
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
+ (void *)dev_cmd->payload;
+
+ cmd_len = le16_to_cpu(tx_cmd_gen3->len);
+ } else {
+ struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
+ (void *)dev_cmd->payload;
+
+ cmd_len = le16_to_cpu(tx_cmd_gen2->len);
+ }
+
+ /* Set up entry for this TFD in Tx byte-count array */
+ iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len,
+ iwl_txq_gen2_get_num_tbs(trans, tfd));
+
+ /* start timer if queue currently empty */
+ if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
+ mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+
+ /* Tell device the write index *just past* this latest filled TFD */
+ txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
+ iwl_txq_inc_wr_ptr(trans, txq);
+ /*
+ * At this point the frame is "transmitted" successfully
+ * and we will get a TX status notification eventually.
+ */
+ spin_unlock(&txq->lock);
+ return 0;
+}
+
+/*************** HOST COMMAND QUEUE FUNCTIONS *****/
+
+/*
+ * iwl_txq_gen2_unmap - Unmap any remaining DMA mappings and free skb's
+ */
+void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id)
+{
+ struct iwl_txq *txq = trans->txqs.txq[txq_id];
+
+ spin_lock_bh(&txq->lock);
+ while (txq->write_ptr != txq->read_ptr) {
+ IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
+ txq_id, txq->read_ptr);
+
+ if (txq_id != trans->txqs.cmd.q_id) {
+ int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
+ struct sk_buff *skb = txq->entries[idx].skb;
+
+ if (WARN_ON_ONCE(!skb))
+ continue;
+
+ iwl_txq_free_tso_page(trans, skb);
+ }
+ iwl_txq_gen2_free_tfd(trans, txq);
+ txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
+ }
+
+ while (!skb_queue_empty(&txq->overflow_q)) {
+ struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
+
+ iwl_op_mode_free_skb(trans->op_mode, skb);
+ }
+
+ spin_unlock_bh(&txq->lock);
+
+ /* just in case - this queue may have been stopped */
+ iwl_wake_queue(trans, txq);
+}
+
+static void iwl_txq_gen2_free_memory(struct iwl_trans *trans,
+ struct iwl_txq *txq)
+{
+ struct device *dev = trans->dev;
+
+ /* De-alloc circular buffer of TFDs */
+ if (txq->tfds) {
+ dma_free_coherent(dev,
+ trans->txqs.tfd.size * txq->n_window,
+ txq->tfds, txq->dma_addr);
+ dma_free_coherent(dev,
+ sizeof(*txq->first_tb_bufs) * txq->n_window,
+ txq->first_tb_bufs, txq->first_tb_dma);
+ }
+
+ kfree(txq->entries);
+ if (txq->bc_tbl.addr)
+ dma_pool_free(trans->txqs.bc_pool,
+ txq->bc_tbl.addr, txq->bc_tbl.dma);
+ kfree(txq);
+}
+
+/*
+ * iwl_pcie_txq_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id)
+{
+ struct iwl_txq *txq;
+ int i;
+
+ if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
+ "queue %d out of range", txq_id))
+ return;
+
+ txq = trans->txqs.txq[txq_id];
+
+ if (WARN_ON(!txq))
+ return;
+
+ iwl_txq_gen2_unmap(trans, txq_id);
+
+ /* De-alloc array of command/tx buffers */
+ if (txq_id == trans->txqs.cmd.q_id)
+ for (i = 0; i < txq->n_window; i++) {
+ kfree_sensitive(txq->entries[i].cmd);
+ kfree_sensitive(txq->entries[i].free_buf);
+ }
+ del_timer_sync(&txq->stuck_timer);
+
+ iwl_txq_gen2_free_memory(trans, txq);
+
+ trans->txqs.txq[txq_id] = NULL;
+
+ clear_bit(txq_id, trans->txqs.queue_used);
+}
+
+/*
+ * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
+ */
+static int iwl_queue_init(struct iwl_txq *q, int slots_num)
+{
+ q->n_window = slots_num;
+
+ /* slots_num must be power-of-two size, otherwise
+ * iwl_txq_get_cmd_index is broken. */
+ if (WARN_ON(!is_power_of_2(slots_num)))
+ return -EINVAL;
+
+ q->low_mark = q->n_window / 4;
+ if (q->low_mark < 4)
+ q->low_mark = 4;
+
+ q->high_mark = q->n_window / 8;
+ if (q->high_mark < 2)
+ q->high_mark = 2;
+
+ q->write_ptr = 0;
+ q->read_ptr = 0;
+
+ return 0;
+}
+
+int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
+ bool cmd_queue)
+{
+ int ret;
+ u32 tfd_queue_max_size =
+ trans->trans_cfg->base_params->max_tfd_queue_size;
+
+ txq->need_update = false;
+
+ /* max_tfd_queue_size must be power-of-two size, otherwise
+ * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken. */
+ if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
+ "Max tfd queue size must be a power of two, but is %d",
+ tfd_queue_max_size))
+ return -EINVAL;
+
+ /* Initialize queue's high/low-water marks, and head/tail indexes */
+ ret = iwl_queue_init(txq, slots_num);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&txq->lock);
+
+ if (cmd_queue) {
+ static struct lock_class_key iwl_txq_cmd_queue_lock_class;
+
+ lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class);
+ }
+
+ __skb_queue_head_init(&txq->overflow_q);
+
+ return 0;
+}
+
+void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb)
+{
+ struct page **page_ptr;
+ struct page *next;
+
+ page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
+ next = *page_ptr;
+ *page_ptr = NULL;
+
+ while (next) {
+ struct page *tmp = next;
+
+ next = *(void **)(page_address(next) + PAGE_SIZE -
+ sizeof(void *));
+ __free_page(tmp);
+ }
+}
+
+void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
+{
+ u32 txq_id = txq->id;
+ u32 status;
+ bool active;
+ u8 fifo;
+
+ if (trans->trans_cfg->use_tfh) {
+ IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
+ txq->read_ptr, txq->write_ptr);
+ /* TODO: access new SCD registers and dump them */
+ return;
+ }
+
+ status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));
+ fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
+ active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
+
+ IWL_ERR(trans,
+ "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
+ txq_id, active ? "" : "in", fifo,
+ jiffies_to_msecs(txq->wd_timeout),
+ txq->read_ptr, txq->write_ptr,
+ iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
+ (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
+ iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
+ (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
+ iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
+}
+
+static void iwl_txq_stuck_timer(struct timer_list *t)
+{
+ struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
+ struct iwl_trans *trans = txq->trans;
+
+ spin_lock(&txq->lock);
+ /* check if triggered erroneously */
+ if (txq->read_ptr == txq->write_ptr) {
+ spin_unlock(&txq->lock);
+ return;
+ }
+ spin_unlock(&txq->lock);
+
+ iwl_txq_log_scd_error(trans, txq);
+
+ iwl_force_nmi(trans);
+}
+
+int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
+ bool cmd_queue)
+{
+ size_t tfd_sz = trans->txqs.tfd.size *
+ trans->trans_cfg->base_params->max_tfd_queue_size;
+ size_t tb0_buf_sz;
+ int i;
+
+ if (WARN_ON(txq->entries || txq->tfds))
+ return -EINVAL;
+
+ if (trans->trans_cfg->use_tfh)
+ tfd_sz = trans->txqs.tfd.size * slots_num;
+
+ timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0);
+ txq->trans = trans;
+
+ txq->n_window = slots_num;
+
+ txq->entries = kcalloc(slots_num,
+ sizeof(struct iwl_pcie_txq_entry),
+ GFP_KERNEL);
+
+ if (!txq->entries)
+ goto error;
+
+ if (cmd_queue)
+ for (i = 0; i < slots_num; i++) {
+ txq->entries[i].cmd =
+ kmalloc(sizeof(struct iwl_device_cmd),
+ GFP_KERNEL);
+ if (!txq->entries[i].cmd)
+ goto error;
+ }
+
+ /* Circular buffer of transmit frame descriptors (TFDs),
+ * shared with device */
+ txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
+ &txq->dma_addr, GFP_KERNEL);
+ if (!txq->tfds)
+ goto error;
+
+ BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN);
+
+ tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
+
+ txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
+ &txq->first_tb_dma,
+ GFP_KERNEL);
+ if (!txq->first_tb_bufs)
+ goto err_free_tfds;
+
+ return 0;
+err_free_tfds:
+ dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
+error:
+ if (txq->entries && cmd_queue)
+ for (i = 0; i < slots_num; i++)
+ kfree(txq->entries[i].cmd);
+ kfree(txq->entries);
+ txq->entries = NULL;
+
+ return -ENOMEM;
+}
+
+static int iwl_txq_dyn_alloc_dma(struct iwl_trans *trans,
+ struct iwl_txq **intxq, int size,
+ unsigned int timeout)
+{
+ size_t bc_tbl_size, bc_tbl_entries;
+ struct iwl_txq *txq;
+ int ret;
+
+ WARN_ON(!trans->txqs.bc_tbl_size);
+
+ bc_tbl_size = trans->txqs.bc_tbl_size;
+ bc_tbl_entries = bc_tbl_size / sizeof(u16);
+
+ if (WARN_ON(size > bc_tbl_entries))
+ return -EINVAL;
+
+ txq = kzalloc(sizeof(*txq), GFP_KERNEL);
+ if (!txq)
+ return -ENOMEM;
+
+ txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL,
+ &txq->bc_tbl.dma);
+ if (!txq->bc_tbl.addr) {
+ IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
+ kfree(txq);
+ return -ENOMEM;
+ }
+
+ ret = iwl_txq_alloc(trans, txq, size, false);
+ if (ret) {
+ IWL_ERR(trans, "Tx queue alloc failed\n");
+ goto error;
+ }
+ ret = iwl_txq_init(trans, txq, size, false);
+ if (ret) {
+ IWL_ERR(trans, "Tx queue init failed\n");
+ goto error;
+ }
+
+ txq->wd_timeout = msecs_to_jiffies(timeout);
+
+ *intxq = txq;
+ return 0;
+
+error:
+ iwl_txq_gen2_free_memory(trans, txq);
+ return ret;
+}
+
+static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq,
+ struct iwl_host_cmd *hcmd)
+{
+ struct iwl_tx_queue_cfg_rsp *rsp;
+ int ret, qid;
+ u32 wr_ptr;
+
+ if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) !=
+ sizeof(*rsp))) {
+ ret = -EINVAL;
+ goto error_free_resp;
+ }
+
+ rsp = (void *)hcmd->resp_pkt->data;
+ qid = le16_to_cpu(rsp->queue_number);
+ wr_ptr = le16_to_cpu(rsp->write_pointer);
+
+ if (qid >= ARRAY_SIZE(trans->txqs.txq)) {
+ WARN_ONCE(1, "queue index %d unsupported", qid);
+ ret = -EIO;
+ goto error_free_resp;
+ }
+
+ if (test_and_set_bit(qid, trans->txqs.queue_used)) {
+ WARN_ONCE(1, "queue %d already used", qid);
+ ret = -EIO;
+ goto error_free_resp;
+ }
+
+ txq->id = qid;
+ trans->txqs.txq[qid] = txq;
+ wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
+
+ /* Place first TFD at index corresponding to start sequence number */
+ txq->read_ptr = wr_ptr;
+ txq->write_ptr = wr_ptr;
+
+ IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
+
+ iwl_free_resp(hcmd);
+ return qid;
+
+error_free_resp:
+ iwl_free_resp(hcmd);
+ iwl_txq_gen2_free_memory(trans, txq);
+ return ret;
+}
+
+int iwl_txq_dyn_alloc(struct iwl_trans *trans, __le16 flags, u8 sta_id, u8 tid,
+ int cmd_id, int size, unsigned int timeout)
+{
+ struct iwl_txq *txq = NULL;
+ struct iwl_tx_queue_cfg_cmd cmd = {
+ .flags = flags,
+ .sta_id = sta_id,
+ .tid = tid,
+ };
+ struct iwl_host_cmd hcmd = {
+ .id = cmd_id,
+ .len = { sizeof(cmd) },
+ .data = { &cmd, },
+ .flags = CMD_WANT_SKB,
+ };
+ int ret;
+
+ ret = iwl_txq_dyn_alloc_dma(trans, &txq, size, timeout);
+ if (ret)
+ return ret;
+
+ cmd.tfdq_addr = cpu_to_le64(txq->dma_addr);
+ cmd.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
+ cmd.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
+
+ ret = iwl_trans_send_cmd(trans, &hcmd);
+ if (ret)
+ goto error;
+
+ return iwl_txq_alloc_response(trans, txq, &hcmd);
+
+error:
+ iwl_txq_gen2_free_memory(trans, txq);
+ return ret;
+}
+
+void iwl_txq_dyn_free(struct iwl_trans *trans, int queue)
+{
+ if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
+ "queue %d out of range", queue))
+ return;
+
+ /*
+ * Upon HW Rfkill - we stop the device, and then stop the queues
+ * in the op_mode. Just for the sake of the simplicity of the op_mode,
+ * allow the op_mode to call txq_disable after it already called
+ * stop_device.
+ */
+ if (!test_and_clear_bit(queue, trans->txqs.queue_used)) {
+ WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
+ "queue %d not used", queue);
+ return;
+ }
+
+ iwl_txq_gen2_unmap(trans, queue);
+
+ iwl_txq_gen2_free_memory(trans, trans->txqs.txq[queue]);
+
+ trans->txqs.txq[queue] = NULL;
+
+ IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
+}
+
+void iwl_txq_gen2_tx_free(struct iwl_trans *trans)
+{
+ int i;
+
+ memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
+
+ /* Free all TX queues */
+ for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) {
+ if (!trans->txqs.txq[i])
+ continue;
+
+ iwl_txq_gen2_free(trans, i);
+ }
+}
+
+int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size)
+{
+ struct iwl_txq *queue;
+ int ret;
+
+ /* alloc and init the tx queue */
+ if (!trans->txqs.txq[txq_id]) {
+ queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+ if (!queue) {
+ IWL_ERR(trans, "Not enough memory for tx queue\n");
+ return -ENOMEM;
+ }
+ trans->txqs.txq[txq_id] = queue;
+ ret = iwl_txq_alloc(trans, queue, queue_size, true);
+ if (ret) {
+ IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
+ goto error;
+ }
+ } else {
+ queue = trans->txqs.txq[txq_id];
+ }
+
+ ret = iwl_txq_init(trans, queue, queue_size,
+ (txq_id == trans->txqs.cmd.q_id));
+ if (ret) {
+ IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
+ goto error;
+ }
+ trans->txqs.txq[txq_id]->id = txq_id;
+ set_bit(txq_id, trans->txqs.queue_used);
+
+ return 0;
+
+error:
+ iwl_txq_gen2_tx_free(trans);
+ return ret;
+}
+
+static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans,
+ void *_tfd, u8 idx)
+{
+ struct iwl_tfd *tfd;
+ struct iwl_tfd_tb *tb;
+ dma_addr_t addr;
+ dma_addr_t hi_len;
+
+ if (trans->trans_cfg->use_tfh) {
+ struct iwl_tfh_tfd *tfd = _tfd;
+ struct iwl_tfh_tb *tb = &tfd->tbs[idx];
+
+ return (dma_addr_t)(le64_to_cpu(tb->addr));
+ }
+
+ tfd = _tfd;
+ tb = &tfd->tbs[idx];
+ addr = get_unaligned_le32(&tb->lo);
+
+ if (sizeof(dma_addr_t) <= sizeof(u32))
+ return addr;
+
+ hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
+
+ /*
+ * shift by 16 twice to avoid warnings on 32-bit
+ * (where this code never runs anyway due to the
+ * if statement above)
+ */
+ return addr | ((hi_len << 16) << 16);
+}
+
+void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
+ struct iwl_cmd_meta *meta,
+ struct iwl_txq *txq, int index)
+{
+ int i, num_tbs;
+ void *tfd = iwl_txq_get_tfd(trans, txq, index);
+
+ /* Sanity check on number of chunks */
+ num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd);
+
+ if (num_tbs > trans->txqs.tfd.max_tbs) {
+ IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
+ /* @todo issue fatal error, it is quite serious situation */
+ return;
+ }
+
+ /* first TB is never freed - it's the bidirectional DMA data */
+
+ for (i = 1; i < num_tbs; i++) {
+ if (meta->tbs & BIT(i))
+ dma_unmap_page(trans->dev,
+ iwl_txq_gen1_tfd_tb_get_addr(trans,
+ tfd, i),
+ iwl_txq_gen1_tfd_tb_get_len(trans,
+ tfd, i),
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(trans->dev,
+ iwl_txq_gen1_tfd_tb_get_addr(trans,
+ tfd, i),
+ iwl_txq_gen1_tfd_tb_get_len(trans,
+ tfd, i),
+ DMA_TO_DEVICE);
+ }
+
+ meta->tbs = 0;
+
+ if (trans->trans_cfg->use_tfh) {
+ struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
+
+ tfd_fh->num_tbs = 0;
+ } else {
+ struct iwl_tfd *tfd_fh = (void *)tfd;
+
+ tfd_fh->num_tbs = 0;
+ }
+}
+
+#define IWL_TX_CRC_SIZE 4
+#define IWL_TX_DELIMITER_SIZE 4
+
+/*
+ * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array
+ */
+void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
+ struct iwl_txq *txq, u16 byte_cnt,
+ int num_tbs)
+{
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl;
+ int write_ptr = txq->write_ptr;
+ int txq_id = txq->id;
+ u8 sec_ctl = 0;
+ u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
+ __le16 bc_ent;
+ struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
+ struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
+ u8 sta_id = tx_cmd->sta_id;
+
+ scd_bc_tbl = trans->txqs.scd_bc_tbls.addr;
+
+ sec_ctl = tx_cmd->sec_ctl;
+
+ switch (sec_ctl & TX_CMD_SEC_MSK) {
+ case TX_CMD_SEC_CCM:
+ len += IEEE80211_CCMP_MIC_LEN;
+ break;
+ case TX_CMD_SEC_TKIP:
+ len += IEEE80211_TKIP_ICV_LEN;
+ break;
+ case TX_CMD_SEC_WEP:
+ len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
+ break;
+ }
+ if (trans->txqs.bc_table_dword)
+ len = DIV_ROUND_UP(len, 4);
+
+ if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
+ return;
+
+ bc_ent = cpu_to_le16(len | (sta_id << 12));
+
+ scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
+
+ if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
+ bc_ent;
+}
+
+void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
+ struct iwl_txq *txq)
+{
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans->txqs.scd_bc_tbls.addr;
+ int txq_id = txq->id;
+ int read_ptr = txq->read_ptr;
+ u8 sta_id = 0;
+ __le16 bc_ent;
+ struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
+ struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
+
+ WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
+
+ if (txq_id != trans->txqs.cmd.q_id)
+ sta_id = tx_cmd->sta_id;
+
+ bc_ent = cpu_to_le16(1 | (sta_id << 12));
+
+ scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
+
+ if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] =
+ bc_ent;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.h b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
new file mode 100644
index 000000000000..c67577dfa21d
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
@@ -0,0 +1,230 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2020 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <linuxwifi@intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2020 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __iwl_trans_queue_tx_h__
+#define __iwl_trans_queue_tx_h__
+#include "iwl-fh.h"
+#include "fw/api/tx.h"
+
+struct iwl_tso_hdr_page {
+ struct page *page;
+ u8 *pos;
+};
+
+static inline dma_addr_t
+iwl_txq_get_first_tb_dma(struct iwl_txq *txq, int idx)
+{
+ return txq->first_tb_dma +
+ sizeof(struct iwl_pcie_first_tb_buf) * idx;
+}
+
+static inline u16 iwl_txq_get_cmd_index(const struct iwl_txq *q, u32 index)
+{
+ return index & (q->n_window - 1);
+}
+
+void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id);
+
+static inline void iwl_wake_queue(struct iwl_trans *trans,
+ struct iwl_txq *txq)
+{
+ if (test_and_clear_bit(txq->id, trans->txqs.queue_stopped)) {
+ IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
+ iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
+ }
+}
+
+static inline void *iwl_txq_get_tfd(struct iwl_trans *trans,
+ struct iwl_txq *txq, int idx)
+{
+ if (trans->trans_cfg->use_tfh)
+ idx = iwl_txq_get_cmd_index(txq, idx);
+
+ return txq->tfds + trans->txqs.tfd.size * idx;
+}
+
+int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
+ bool cmd_queue);
+/*
+ * We need this inline in case dma_addr_t is only 32-bits - since the
+ * hardware is always 64-bit, the issue can still occur in that case,
+ * so use u64 for 'phys' here to force the addition in 64-bit.
+ */
+static inline bool iwl_txq_crosses_4g_boundary(u64 phys, u16 len)
+{
+ return upper_32_bits(phys) != upper_32_bits(phys + len);
+}
+
+int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q);
+
+static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq)
+{
+ if (!test_and_set_bit(txq->id, trans->txqs.queue_stopped)) {
+ iwl_op_mode_queue_full(trans->op_mode, txq->id);
+ IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
+ } else {
+ IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
+ txq->id);
+ }
+}
+
+/**
+ * iwl_txq_inc_wrap - increment queue index, wrap back to beginning
+ * @index -- current index
+ */
+static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index)
+{
+ return ++index &
+ (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
+}
+
+/**
+ * iwl_txq_dec_wrap - decrement queue index, wrap back to end
+ * @index -- current index
+ */
+static inline int iwl_txq_dec_wrap(struct iwl_trans *trans, int index)
+{
+ return --index &
+ (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
+}
+
+static inline bool iwl_txq_used(const struct iwl_txq *q, int i)
+{
+ int index = iwl_txq_get_cmd_index(q, i);
+ int r = iwl_txq_get_cmd_index(q, q->read_ptr);
+ int w = iwl_txq_get_cmd_index(q, q->write_ptr);
+
+ return w >= r ?
+ (index >= r && index < w) :
+ !(index < r && index >= w);
+}
+
+void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb);
+
+void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq);
+
+int iwl_txq_gen2_set_tb(struct iwl_trans *trans,
+ struct iwl_tfh_tfd *tfd, dma_addr_t addr,
+ u16 len);
+
+void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
+ struct iwl_cmd_meta *meta,
+ struct iwl_tfh_tfd *tfd);
+
+int iwl_txq_dyn_alloc(struct iwl_trans *trans,
+ __le16 flags, u8 sta_id, u8 tid,
+ int cmd_id, int size,
+ unsigned int timeout);
+
+int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id);
+
+void iwl_txq_dyn_free(struct iwl_trans *trans, int queue);
+void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
+void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
+void iwl_txq_gen2_tx_stop(struct iwl_trans *trans);
+void iwl_txq_gen2_tx_free(struct iwl_trans *trans);
+int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
+ bool cmd_queue);
+int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size);
+#ifdef CONFIG_INET
+struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
+ struct sk_buff *skb);
+#endif
+static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_trans *trans,
+ void *_tfd)
+{
+ struct iwl_tfd *tfd;
+
+ if (trans->trans_cfg->use_tfh) {
+ struct iwl_tfh_tfd *tfd = _tfd;
+
+ return le16_to_cpu(tfd->num_tbs) & 0x1f;
+ }
+
+ tfd = (struct iwl_tfd *)_tfd;
+ return tfd->num_tbs & 0x1f;
+}
+
+static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans,
+ void *_tfd, u8 idx)
+{
+ struct iwl_tfd *tfd;
+ struct iwl_tfd_tb *tb;
+
+ if (trans->trans_cfg->use_tfh) {
+ struct iwl_tfh_tfd *tfd = _tfd;
+ struct iwl_tfh_tb *tb = &tfd->tbs[idx];
+
+ return le16_to_cpu(tb->tb_len);
+ }
+
+ tfd = (struct iwl_tfd *)_tfd;
+ tb = &tfd->tbs[idx];
+
+ return le16_to_cpu(tb->hi_n_len) >> 4;
+}
+
+void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
+ struct iwl_cmd_meta *meta,
+ struct iwl_txq *txq, int index);
+void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
+ struct iwl_txq *txq);
+void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
+ struct iwl_txq *txq, u16 byte_cnt,
+ int num_tbs);
+#endif /* __iwl_trans_queue_tx_h__ */
diff --git a/drivers/net/wireless/intersil/hostap/hostap.h b/drivers/net/wireless/intersil/hostap/hostap.h
index 8130d29c7989..c4b81ff7d7e4 100644
--- a/drivers/net/wireless/intersil/hostap/hostap.h
+++ b/drivers/net/wireless/intersil/hostap/hostap.h
@@ -8,8 +8,10 @@
#include "hostap_wlan.h"
#include "hostap_ap.h"
-static const long freq_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442,
- 2447, 2452, 2457, 2462, 2467, 2472, 2484 };
+static const long __maybe_unused freq_list[] = {
+ 2412, 2417, 2422, 2427, 2432, 2437, 2442,
+ 2447, 2452, 2457, 2462, 2467, 2472, 2484
+};
#define FREQ_COUNT ARRAY_SIZE(freq_list)
/* hostap.c */
diff --git a/drivers/net/wireless/intersil/hostap/hostap_ap.c b/drivers/net/wireless/intersil/hostap/hostap_ap.c
index 3ec46f48cfde..8bcc1cdcb75b 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_ap.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_ap.c
@@ -1504,7 +1504,7 @@ static void handle_assoc(local_info_t *local, struct sk_buff *skb,
u16 resp = WLAN_STATUS_SUCCESS;
struct sta_info *sta = NULL;
int send_deauth = 0;
- char *txt = "";
+ char __always_unused *txt = "";
u8 prev_ap[ETH_ALEN];
left = len = skb->len - IEEE80211_MGMT_HDR_LEN;
diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c
index b6c497ce12e1..22cfb6452644 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_hw.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c
@@ -320,12 +320,6 @@ static int hfa384x_cmd(struct net_device *dev, u16 cmd, u16 param0,
iface = netdev_priv(dev);
local = iface->local;
- if (in_interrupt()) {
- printk(KERN_DEBUG "%s: hfa384x_cmd called from interrupt "
- "context\n", dev->name);
- return -1;
- }
-
if (local->cmd_queue_len >= HOSTAP_CMD_QUEUE_MAX_LEN) {
printk(KERN_DEBUG "%s: hfa384x_cmd: cmd_queue full\n",
dev->name);
@@ -1560,12 +1554,6 @@ static void prism2_hw_reset(struct net_device *dev)
iface = netdev_priv(dev);
local = iface->local;
- if (in_interrupt()) {
- printk(KERN_DEBUG "%s: driver bug - prism2_hw_reset() called "
- "in interrupt context\n", dev->name);
- return;
- }
-
if (local->hw_downloading)
return;
@@ -1803,7 +1791,7 @@ static int prism2_tx_80211(struct sk_buff *skb, struct net_device *dev)
struct hfa384x_tx_frame txdesc;
struct hostap_skb_tx_data *meta;
int hdr_len, data_len, idx, res, ret = -1;
- u16 tx_control, fc;
+ u16 tx_control;
iface = netdev_priv(dev);
local = iface->local;
@@ -1826,7 +1814,6 @@ static int prism2_tx_80211(struct sk_buff *skb, struct net_device *dev)
/* skb->data starts with txdesc->frame_control */
hdr_len = 24;
skb_copy_from_linear_data(skb, &txdesc.frame_control, hdr_len);
- fc = le16_to_cpu(txdesc.frame_control);
if (ieee80211_is_data(txdesc.frame_control) &&
ieee80211_has_a4(txdesc.frame_control) &&
skb->len >= 30) {
@@ -2083,9 +2070,9 @@ static void hostap_rx_skb(local_info_t *local, struct sk_buff *skb)
/* Called only as a tasklet (software IRQ) */
-static void hostap_rx_tasklet(unsigned long data)
+static void hostap_rx_tasklet(struct tasklet_struct *t)
{
- local_info_t *local = (local_info_t *) data;
+ local_info_t *local = from_tasklet(local, t, rx_tasklet);
struct sk_buff *skb;
while ((skb = skb_dequeue(&local->rx_list)) != NULL)
@@ -2288,9 +2275,9 @@ static void prism2_tx_ev(local_info_t *local)
/* Called only as a tasklet (software IRQ) */
-static void hostap_sta_tx_exc_tasklet(unsigned long data)
+static void hostap_sta_tx_exc_tasklet(struct tasklet_struct *t)
{
- local_info_t *local = (local_info_t *) data;
+ local_info_t *local = from_tasklet(local, t, sta_tx_exc_tasklet);
struct sk_buff *skb;
while ((skb = skb_dequeue(&local->sta_tx_exc_list)) != NULL) {
@@ -2390,9 +2377,9 @@ static void prism2_txexc(local_info_t *local)
/* Called only as a tasklet (software IRQ) */
-static void hostap_info_tasklet(unsigned long data)
+static void hostap_info_tasklet(struct tasklet_struct *t)
{
- local_info_t *local = (local_info_t *) data;
+ local_info_t *local = from_tasklet(local, t, info_tasklet);
struct sk_buff *skb;
while ((skb = skb_dequeue(&local->info_list)) != NULL) {
@@ -2469,9 +2456,9 @@ static void prism2_info(local_info_t *local)
/* Called only as a tasklet (software IRQ) */
-static void hostap_bap_tasklet(unsigned long data)
+static void hostap_bap_tasklet(struct tasklet_struct *t)
{
- local_info_t *local = (local_info_t *) data;
+ local_info_t *local = from_tasklet(local, t, bap_tasklet);
struct net_device *dev = local->dev;
u16 ev;
int frames = 30;
@@ -3183,7 +3170,7 @@ prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx,
/* Initialize tasklets for handling hardware IRQ related operations
* outside hw IRQ handler */
#define HOSTAP_TASKLET_INIT(q, f, d) \
-do { memset((q), 0, sizeof(*(q))); (q)->func = (f); (q)->data = (d); } \
+do { memset((q), 0, sizeof(*(q))); (q)->func = (void(*)(unsigned long))(f); } \
while (0)
HOSTAP_TASKLET_INIT(&local->bap_tasklet, hostap_bap_tasklet,
(unsigned long) local);
diff --git a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
index 1ca9731d9b14..514c7b01dbf6 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_ioctl.c
@@ -1955,7 +1955,7 @@ static inline int prism2_translate_scan(local_info_t *local,
char *buffer, int buflen)
{
struct hfa384x_hostscan_result *scan;
- int entry, hostscan;
+ int entry;
char *current_ev = buffer;
char *end_buf = buffer + buflen;
struct list_head *ptr;
@@ -1968,7 +1968,6 @@ static inline int prism2_translate_scan(local_info_t *local,
bss->included = 0;
}
- hostscan = local->last_scan_type == PRISM2_HOSTSCAN;
for (entry = 0; entry < local->last_scan_results_count; entry++) {
int found = 0;
scan = &local->last_scan_results[entry];
diff --git a/drivers/net/wireless/intersil/orinoco/main.c b/drivers/net/wireless/intersil/orinoco/main.c
index 00264a14e52c..0e73a10cc06c 100644
--- a/drivers/net/wireless/intersil/orinoco/main.c
+++ b/drivers/net/wireless/intersil/orinoco/main.c
@@ -1062,9 +1062,9 @@ static void orinoco_rx(struct net_device *dev,
stats->rx_dropped++;
}
-static void orinoco_rx_isr_tasklet(unsigned long data)
+static void orinoco_rx_isr_tasklet(struct tasklet_struct *t)
{
- struct orinoco_private *priv = (struct orinoco_private *) data;
+ struct orinoco_private *priv = from_tasklet(priv, t, rx_tasklet);
struct net_device *dev = priv->ndev;
struct orinoco_rx_data *rx_data, *temp;
struct hermes_rx_descriptor *desc;
@@ -1503,7 +1503,7 @@ void __orinoco_ev_info(struct net_device *dev, struct hermes *hw)
schedule_work(&priv->join_work);
break;
}
- /* fall through */
+ fallthrough;
case HERMES_INQ_HOSTSCAN:
case HERMES_INQ_HOSTSCAN_SYMBOL: {
/* Result of a scanning. Contains information about
@@ -1594,7 +1594,7 @@ void __orinoco_ev_info(struct net_device *dev, struct hermes *hw)
/* Ignore this frame for now */
if (priv->firmware_type == FIRMWARE_TYPE_AGERE)
break;
- /* fall through */
+ fallthrough;
default:
printk(KERN_DEBUG "%s: Unknown information frame received: "
"type 0x%04x, length %d\n", dev->name, type, len);
@@ -2198,8 +2198,7 @@ struct orinoco_private
INIT_WORK(&priv->wevent_work, orinoco_send_wevents);
INIT_LIST_HEAD(&priv->rx_list);
- tasklet_init(&priv->rx_tasklet, orinoco_rx_isr_tasklet,
- (unsigned long) priv);
+ tasklet_setup(&priv->rx_tasklet, orinoco_rx_isr_tasklet);
spin_lock_init(&priv->scan_lock);
INIT_LIST_HEAD(&priv->scan_list);
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
index 11fa38fedd87..b849d27bd741 100644
--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
@@ -423,13 +423,13 @@ static void ezusb_ctx_complete(struct request_context *ctx)
}
}
-/**
+/*
* ezusb_req_queue_run:
* Description:
* Note: Only one active CTX at any one time, because there's no
* other (reliable) way to match the response URB to the correct
* CTX.
- **/
+ */
static void ezusb_req_queue_run(struct ezusb_priv *upriv)
{
unsigned long flags;
@@ -535,7 +535,7 @@ static void ezusb_request_out_callback(struct urb *urb)
flags);
break;
}
- /* fall through */
+ fallthrough;
case EZUSB_CTX_RESP_RECEIVED:
/* IN already received before this OUT-ACK */
ctx->state = EZUSB_CTX_COMPLETE;
@@ -557,7 +557,7 @@ static void ezusb_request_out_callback(struct urb *urb)
case EZUSB_CTX_REQ_SUBMITTED:
case EZUSB_CTX_RESP_RECEIVED:
ctx->state = EZUSB_CTX_REQ_FAILED;
- /* fall through */
+ fallthrough;
case EZUSB_CTX_REQ_FAILED:
case EZUSB_CTX_REQ_TIMEOUT:
@@ -704,7 +704,7 @@ static inline u16 build_crc(struct ezusb_packet *data)
return crc;
}
-/**
+/*
* ezusb_fill_req:
*
* if data == NULL and length > 0 the data is assumed to be already in
@@ -897,11 +897,11 @@ static int ezusb_access_ltv(struct ezusb_priv *upriv,
case EZUSB_CTX_REQ_SUBMITTED:
if (!ctx->in_rid)
break;
- /* fall through */
+ fallthrough;
default:
err("%s: Unexpected context state %d", __func__,
state);
- /* fall through */
+ fallthrough;
case EZUSB_CTX_REQ_TIMEOUT:
case EZUSB_CTX_REQ_FAILED:
case EZUSB_CTX_RESP_TIMEOUT:
diff --git a/drivers/net/wireless/intersil/p54/p54pci.c b/drivers/net/wireless/intersil/p54/p54pci.c
index 9d96c8b8409d..e97ee547b9f3 100644
--- a/drivers/net/wireless/intersil/p54/p54pci.c
+++ b/drivers/net/wireless/intersil/p54/p54pci.c
@@ -278,10 +278,10 @@ static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
}
}
-static void p54p_tasklet(unsigned long dev_id)
+static void p54p_tasklet(struct tasklet_struct *t)
{
- struct ieee80211_hw *dev = (struct ieee80211_hw *)dev_id;
- struct p54p_priv *priv = dev->priv;
+ struct p54p_priv *priv = from_tasklet(priv, t, tasklet);
+ struct ieee80211_hw *dev = pci_get_drvdata(priv->pdev);
struct p54p_ring_control *ring_control = priv->ring_control;
p54p_check_tx_ring(dev, &priv->tx_idx_mgmt, 3, ring_control->tx_mgmt,
@@ -333,10 +333,12 @@ static void p54p_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
struct p54p_desc *desc;
dma_addr_t mapping;
u32 idx, i;
+ __le32 device_addr;
spin_lock_irqsave(&priv->lock, flags);
idx = le32_to_cpu(ring_control->host_idx[1]);
i = idx % ARRAY_SIZE(ring_control->tx_data);
+ device_addr = ((struct p54_hdr *)skb->data)->req_id;
mapping = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
@@ -350,7 +352,7 @@ static void p54p_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
desc = &ring_control->tx_data[i];
desc->host_addr = cpu_to_le32(mapping);
- desc->device_addr = ((struct p54_hdr *)skb->data)->req_id;
+ desc->device_addr = device_addr;
desc->len = cpu_to_le16(skb->len);
desc->flags = 0;
@@ -620,7 +622,7 @@ static int p54p_probe(struct pci_dev *pdev,
priv->common.tx = p54p_tx;
spin_lock_init(&priv->lock);
- tasklet_init(&priv->tasklet, p54p_tasklet, (unsigned long)dev);
+ tasklet_setup(&priv->tasklet, p54p_tasklet);
err = request_firmware_nowait(THIS_MODULE, 1, "isl3886pci",
&priv->pdev->dev, GFP_KERNEL,
diff --git a/drivers/net/wireless/intersil/prism54/isl_38xx.c b/drivers/net/wireless/intersil/prism54/isl_38xx.c
index a1f956707887..ae964de347f7 100644
--- a/drivers/net/wireless/intersil/prism54/isl_38xx.c
+++ b/drivers/net/wireless/intersil/prism54/isl_38xx.c
@@ -223,7 +223,7 @@ isl38xx_in_queue(isl38xx_control_block *cb, int queue)
/* send queues */
case ISL38XX_CB_TX_MGMTQ:
BUG_ON(delta > ISL38XX_CB_MGMT_QSIZE);
- /* fall through */
+ fallthrough;
case ISL38XX_CB_TX_DATA_LQ:
case ISL38XX_CB_TX_DATA_HQ:
diff --git a/drivers/net/wireless/intersil/prism54/isl_ioctl.c b/drivers/net/wireless/intersil/prism54/isl_ioctl.c
index 3ccf2a4b548c..2076f449b6e2 100644
--- a/drivers/net/wireless/intersil/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/intersil/prism54/isl_ioctl.c
@@ -163,7 +163,6 @@ prism54_update_stats(struct work_struct *work)
{
islpci_private *priv = container_of(work, islpci_private, stats_work);
char *data;
- int j;
struct obj_bss bss, *bss2;
union oid_res_t r;
@@ -187,7 +186,7 @@ prism54_update_stats(struct work_struct *work)
kfree(data);
/* now ask for the corresponding bss */
- j = mgt_get_request(priv, DOT11_OID_BSSFIND, 0, (void *) &bss, &r);
+ mgt_get_request(priv, DOT11_OID_BSSFIND, 0, (void *) &bss, &r);
bss2 = r.ptr;
/* report the rssi and use it to calculate
* link quality through a signal-noise
@@ -1691,7 +1690,7 @@ static int prism54_get_encodeext(struct net_device *ndev,
case DOT11_AUTH_BOTH:
case DOT11_AUTH_SK:
wrqu->encoding.flags |= IW_ENCODE_RESTRICTED;
- /* fall through */
+ fallthrough;
case DOT11_AUTH_OS:
default:
wrqu->encoding.flags |= IW_ENCODE_OPEN;
diff --git a/drivers/net/wireless/intersil/prism54/islpci_dev.c b/drivers/net/wireless/intersil/prism54/islpci_dev.c
index efd64e555bb5..8eb6d5e4bd57 100644
--- a/drivers/net/wireless/intersil/prism54/islpci_dev.c
+++ b/drivers/net/wireless/intersil/prism54/islpci_dev.c
@@ -918,7 +918,7 @@ islpci_set_state(islpci_private *priv, islpci_state_t new_state)
switch (new_state) {
case PRV_STATE_OFF:
priv->state_off++;
- /* fall through */
+ fallthrough;
default:
priv->state = new_state;
break;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 9dd9d73f4484..3b3fc7c9c91d 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -99,7 +99,7 @@ MODULE_PARM_DESC(support_p2p_device, "Support P2P-Device interface type");
* domain requests. The first radio will adhere to the first custom world
* regulatory domain, the second one to the second custom world regulatory
* domain. All other devices will world roam.
- * @HWSIM_REGTEST_STRICT_FOLLOW_: Used for testing strict regulatory domain
+ * @HWSIM_REGTEST_STRICT_FOLLOW: Used for testing strict regulatory domain
* settings, only the first radio will send a regulatory domain request
* and use strict settings. The rest of the radios are expected to follow.
* @HWSIM_REGTEST_STRICT_ALL: Used for testing strict regulatory domain
@@ -377,6 +377,49 @@ static const struct ieee80211_channel hwsim_channels_5ghz[] = {
CHAN5G(5925), /* Channel 185 */
};
+#define NUM_S1G_CHANS_US 51
+static struct ieee80211_channel hwsim_channels_s1g[NUM_S1G_CHANS_US];
+
+static const struct ieee80211_sta_s1g_cap hwsim_s1g_cap = {
+ .s1g = true,
+ .cap = { S1G_CAP0_SGI_1MHZ | S1G_CAP0_SGI_2MHZ,
+ 0,
+ 0,
+ S1G_CAP3_MAX_MPDU_LEN,
+ 0,
+ S1G_CAP5_AMPDU,
+ 0,
+ S1G_CAP7_DUP_1MHZ,
+ S1G_CAP8_TWT_RESPOND | S1G_CAP8_TWT_REQUEST,
+ 0},
+ .nss_mcs = { 0xfc | 1, /* MCS 7 for 1 SS */
+ /* RX Highest Supported Long GI Data Rate 0:7 */
+ 0,
+ /* RX Highest Supported Long GI Data Rate 0:7 */
+ /* TX S1G MCS Map 0:6 */
+ 0xfa,
+ /* TX S1G MCS Map :7 */
+ /* TX Highest Supported Long GI Data Rate 0:6 */
+ 0x80,
+ /* TX Highest Supported Long GI Data Rate 7:8 */
+ /* Rx Single spatial stream and S1G-MCS Map for 1MHz */
+ /* Tx Single spatial stream and S1G-MCS Map for 1MHz */
+ 0 },
+};
+
+static void hwsim_init_s1g_channels(struct ieee80211_channel *channels)
+{
+ int ch, freq;
+
+ for (ch = 0; ch < NUM_S1G_CHANS_US; ch++) {
+ freq = 902000 + (ch + 1) * 500;
+ channels[ch].band = NL80211_BAND_S1GHZ;
+ channels[ch].center_freq = KHZ_TO_MHZ(freq);
+ channels[ch].freq_offset = freq % 1000;
+ channels[ch].hw_value = ch + 1;
+ }
+}
+
static const struct ieee80211_rate hwsim_rates[] = {
{ .bitrate = 10 },
{ .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
@@ -505,6 +548,7 @@ struct mac80211_hwsim_data {
struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
struct ieee80211_channel channels_2ghz[ARRAY_SIZE(hwsim_channels_2ghz)];
struct ieee80211_channel channels_5ghz[ARRAY_SIZE(hwsim_channels_5ghz)];
+ struct ieee80211_channel channels_s1g[ARRAY_SIZE(hwsim_channels_s1g)];
struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)];
struct ieee80211_iface_combination if_combination;
struct ieee80211_iface_limit if_limits[3];
@@ -900,12 +944,14 @@ static void mac80211_hwsim_monitor_rx(struct ieee80211_hw *hw,
struct mac80211_hwsim_data *data = hw->priv;
struct sk_buff *skb;
struct hwsim_radiotap_hdr *hdr;
- u16 flags;
+ u16 flags, bitrate;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_skb);
struct ieee80211_rate *txrate = ieee80211_get_tx_rate(hw, info);
- if (WARN_ON(!txrate))
- return;
+ if (!txrate)
+ bitrate = 0;
+ else
+ bitrate = txrate->bitrate;
if (!netif_running(hwsim_mon))
return;
@@ -924,10 +970,10 @@ static void mac80211_hwsim_monitor_rx(struct ieee80211_hw *hw,
(1 << IEEE80211_RADIOTAP_CHANNEL));
hdr->rt_tsft = __mac80211_hwsim_get_tsf(data);
hdr->rt_flags = 0;
- hdr->rt_rate = txrate->bitrate / 5;
+ hdr->rt_rate = bitrate / 5;
hdr->rt_channel = cpu_to_le16(chan->center_freq);
flags = IEEE80211_CHAN_2GHZ;
- if (txrate->flags & IEEE80211_RATE_ERP_G)
+ if (txrate && txrate->flags & IEEE80211_RATE_ERP_G)
flags |= IEEE80211_CHAN_OFDM;
else
flags |= IEEE80211_CHAN_CCK;
@@ -1341,6 +1387,7 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
memset(&rx_status, 0, sizeof(rx_status));
rx_status.flag |= RX_FLAG_MACTIME_START;
rx_status.freq = chan->center_freq;
+ rx_status.freq_offset = chan->freq_offset ? 1 : 0;
rx_status.band = chan->band;
if (info->control.rates[0].flags & IEEE80211_TX_RC_VHT_MCS) {
rx_status.rate_idx =
@@ -1522,14 +1569,18 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
/* fake header transmission time */
struct ieee80211_mgmt *mgmt;
struct ieee80211_rate *txrate;
+ /* TODO: get MCS */
+ int bitrate = 100;
u64 ts;
mgmt = (struct ieee80211_mgmt *)skb->data;
txrate = ieee80211_get_tx_rate(hw, txi);
+ if (txrate)
+ bitrate = txrate->bitrate;
ts = mac80211_hwsim_get_tsf_raw();
mgmt->u.probe_resp.timestamp =
cpu_to_le64(ts + data->tsf_offset +
- 24 * 8 * 10 / txrate->bitrate);
+ 24 * 8 * 10 / bitrate);
}
mac80211_hwsim_monitor_rx(hw, skb, channel);
@@ -1664,6 +1715,8 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
struct ieee80211_rate *txrate;
struct ieee80211_mgmt *mgmt;
struct sk_buff *skb;
+ /* TODO: get MCS */
+ int bitrate = 100;
hwsim_check_magic(vif);
@@ -1683,13 +1736,25 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
ARRAY_SIZE(info->control.rates));
txrate = ieee80211_get_tx_rate(hw, info);
+ if (txrate)
+ bitrate = txrate->bitrate;
mgmt = (struct ieee80211_mgmt *) skb->data;
/* fake header transmission time */
data->abs_bcn_ts = mac80211_hwsim_get_tsf_raw();
- mgmt->u.beacon.timestamp = cpu_to_le64(data->abs_bcn_ts +
- data->tsf_offset +
- 24 * 8 * 10 / txrate->bitrate);
+ if (ieee80211_is_s1g_beacon(mgmt->frame_control)) {
+ struct ieee80211_ext *ext = (void *) mgmt;
+
+ ext->u.s1g_beacon.timestamp = cpu_to_le32(data->abs_bcn_ts +
+ data->tsf_offset +
+ 10 * 8 * 10 /
+ bitrate);
+ } else {
+ mgmt->u.beacon.timestamp = cpu_to_le64(data->abs_bcn_ts +
+ data->tsf_offset +
+ 24 * 8 * 10 /
+ bitrate);
+ }
mac80211_hwsim_tx_frame(hw, skb,
rcu_dereference(vif->chanctx_conf)->def.chan);
@@ -1699,7 +1764,7 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
rcu_dereference(vif->chanctx_conf)->def.chan);
}
- if (vif->csa_active && ieee80211_csa_is_complete(vif))
+ if (vif->csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
ieee80211_csa_finish(vif);
}
@@ -1737,6 +1802,11 @@ static const char * const hwsim_chanwidths[] = {
[NL80211_CHAN_WIDTH_80] = "vht80",
[NL80211_CHAN_WIDTH_80P80] = "vht80p80",
[NL80211_CHAN_WIDTH_160] = "vht160",
+ [NL80211_CHAN_WIDTH_1] = "1MHz",
+ [NL80211_CHAN_WIDTH_2] = "2MHz",
+ [NL80211_CHAN_WIDTH_4] = "4MHz",
+ [NL80211_CHAN_WIDTH_8] = "8MHz",
+ [NL80211_CHAN_WIDTH_16] = "16MHz",
};
static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
@@ -3079,6 +3149,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
sizeof(hwsim_channels_2ghz));
memcpy(data->channels_5ghz, hwsim_channels_5ghz,
sizeof(hwsim_channels_5ghz));
+ memcpy(data->channels_s1g, hwsim_channels_s1g,
+ sizeof(hwsim_channels_s1g));
memcpy(data->rates, hwsim_rates, sizeof(hwsim_rates));
for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
@@ -3121,6 +3193,12 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
sband->vht_cap.vht_mcs.tx_mcs_map =
sband->vht_cap.vht_mcs.rx_mcs_map;
break;
+ case NL80211_BAND_S1GHZ:
+ memcpy(&sband->s1g_cap, &hwsim_s1g_cap,
+ sizeof(sband->s1g_cap));
+ sband->channels = data->channels_s1g;
+ sband->n_channels = ARRAY_SIZE(hwsim_channels_s1g);
+ break;
default:
continue;
}
@@ -3886,7 +3964,7 @@ done:
}
/* Generic Netlink operations array */
-static const struct genl_ops hwsim_ops[] = {
+static const struct genl_small_ops hwsim_ops[] = {
{
.cmd = HWSIM_CMD_REGISTER,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
@@ -3930,8 +4008,8 @@ static struct genl_family hwsim_genl_family __ro_after_init = {
.policy = hwsim_genl_policy,
.netnsok = true,
.module = THIS_MODULE,
- .ops = hwsim_ops,
- .n_ops = ARRAY_SIZE(hwsim_ops),
+ .small_ops = hwsim_ops,
+ .n_small_ops = ARRAY_SIZE(hwsim_ops),
.mcgrps = hwsim_mcgrps,
.n_mcgrps = ARRAY_SIZE(hwsim_mcgrps),
};
@@ -4318,6 +4396,8 @@ static int __init init_mac80211_hwsim(void)
goto out_exit_virtio;
}
+ hwsim_init_s1g_channels(hwsim_channels_s1g);
+
for (i = 0; i < radios; i++) {
struct hwsim_new_radio_params param = { 0 };
diff --git a/drivers/net/wireless/marvell/libertas/defs.h b/drivers/net/wireless/marvell/libertas/defs.h
index 58e2ead7b0cc..f7e7bf56f924 100644
--- a/drivers/net/wireless/marvell/libertas/defs.h
+++ b/drivers/net/wireless/marvell/libertas/defs.h
@@ -50,8 +50,7 @@ extern unsigned int lbs_debug;
#ifdef DEBUG
#define LBS_DEB_LL(grp, grpnam, fmt, args...) \
do { if ((lbs_debug & (grp)) == (grp)) \
- printk(KERN_DEBUG DRV_NAME grpnam "%s: " fmt, \
- in_interrupt() ? " (INT)" : "", ## args); } while (0)
+ printk(KERN_DEBUG DRV_NAME grpnam ": " fmt, ## args); } while (0)
#else
#define LBS_DEB_LL(grp, grpnam, fmt, args...) do {} while (0)
#endif
diff --git a/drivers/net/wireless/marvell/libertas/firmware.c b/drivers/net/wireless/marvell/libertas/firmware.c
index 69029c59a272..f124110944b7 100644
--- a/drivers/net/wireless/marvell/libertas/firmware.c
+++ b/drivers/net/wireless/marvell/libertas/firmware.c
@@ -121,12 +121,12 @@ void lbs_wait_for_firmware_load(struct lbs_private *priv)
* either a helper firmware and a main firmware (2-stage), or just the helper.
*
* @priv: Pointer to lbs_private instance
- * @dev: A pointer to &device structure
+ * @device: A pointer to &device structure
* @card_model: Bus-specific card model ID used to filter firmware table
* elements
* @fw_table: Table of firmware file names and device model numbers
* terminated by an entry with a NULL helper name
- * @callback: User callback to invoke when firmware load succeeds or fails.
+ * @callback: User callback to invoke when firmware load succeeds or fails.
*/
int lbs_get_firmware_async(struct lbs_private *priv, struct device *device,
u32 card_model, const struct lbs_fw_table *fw_table,
diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c
index 2233b59cdf44..ee4cf3437e28 100644
--- a/drivers/net/wireless/marvell/libertas/main.c
+++ b/drivers/net/wireless/marvell/libertas/main.c
@@ -721,7 +721,7 @@ EXPORT_SYMBOL_GPL(lbs_resume);
* lbs_cmd_timeout_handler - handles the timeout of command sending.
* It will re-send the same command again.
*
- * @data: &struct lbs_private pointer
+ * @t: Context from which to retrieve a &struct lbs_private pointer
*/
static void lbs_cmd_timeout_handler(struct timer_list *t)
{
@@ -755,7 +755,7 @@ out:
* to the hardware. This is known to frequently happen with SD8686 when
* waking up after a Wake-on-WLAN-triggered resume.
*
- * @data: &struct lbs_private pointer
+ * @t: Context from which to retrieve a &struct lbs_private pointer
*/
static void lbs_tx_lockup_handler(struct timer_list *t)
{
@@ -777,7 +777,7 @@ static void lbs_tx_lockup_handler(struct timer_list *t)
/**
* auto_deepsleep_timer_fn - put the device back to deep sleep mode when
* timer expires and no activity (command, event, data etc.) is detected.
- * @data: &struct lbs_private pointer
+ * @t: Context from which to retrieve a &struct lbs_private pointer
* returns: N/A
*/
static void auto_deepsleep_timer_fn(struct timer_list *t)
diff --git a/drivers/net/wireless/marvell/libertas/rx.c b/drivers/net/wireless/marvell/libertas/rx.c
index f28aa09d1f9e..9f24b0760e1f 100644
--- a/drivers/net/wireless/marvell/libertas/rx.c
+++ b/drivers/net/wireless/marvell/libertas/rx.c
@@ -147,10 +147,7 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
dev->stats.rx_packets++;
skb->protocol = eth_type_trans(skb, dev);
- if (in_interrupt())
- netif_rx(skb);
- else
- netif_rx_ni(skb);
+ netif_rx_any_context(skb);
ret = 0;
done:
@@ -265,11 +262,7 @@ static int process_rxed_802_11_packet(struct lbs_private *priv,
dev->stats.rx_packets++;
skb->protocol = eth_type_trans(skb, priv->dev);
-
- if (in_interrupt())
- netif_rx(skb);
- else
- netif_rx_ni(skb);
+ netif_rx_any_context(skb);
ret = 0;
diff --git a/drivers/net/wireless/marvell/libertas_tf/cmd.c b/drivers/net/wireless/marvell/libertas_tf/cmd.c
index a0b4c9debc11..efb98304555a 100644
--- a/drivers/net/wireless/marvell/libertas_tf/cmd.c
+++ b/drivers/net/wireless/marvell/libertas_tf/cmd.c
@@ -32,10 +32,10 @@ static struct cmd_ctrl_node *lbtf_get_cmd_ctrl_node(struct lbtf_private *priv);
/**
* lbtf_cmd_copyback - Simple callback that copies response back into command
*
- * @priv A pointer to struct lbtf_private structure
- * @extra A pointer to the original command structure for which
+ * @priv: A pointer to struct lbtf_private structure
+ * @extra: A pointer to the original command structure for which
* 'resp' is a response
- * @resp A pointer to the command response
+ * @resp: A pointer to the command response
*
* Returns: 0 on success, error on failure
*/
@@ -72,7 +72,7 @@ static void lbtf_geo_init(struct lbtf_private *priv)
/**
* lbtf_update_hw_spec: Updates the hardware details.
*
- * @priv A pointer to struct lbtf_private structure
+ * @priv: A pointer to struct lbtf_private structure
*
* Returns: 0 on success, error on failure
*/
@@ -141,8 +141,8 @@ out:
/**
* lbtf_set_channel: Set the radio channel
*
- * @priv A pointer to struct lbtf_private structure
- * @channel The desired channel, or 0 to clear a locked channel
+ * @priv: A pointer to struct lbtf_private structure
+ * @channel: The desired channel, or 0 to clear a locked channel
*
* Returns: 0 on success, error on failure
*/
@@ -268,7 +268,7 @@ static void lbtf_submit_command(struct lbtf_private *priv,
lbtf_deb_leave(LBTF_DEB_HOST);
}
-/**
+/*
* This function inserts command node to cmdfreeq
* after cleans it. Requires priv->driver_lock held.
*/
@@ -434,7 +434,7 @@ void lbtf_set_mac_control(struct lbtf_private *priv)
/**
* lbtf_allocate_cmd_buffer - Allocates cmd buffer, links it to free cmd queue
*
- * @priv A pointer to struct lbtf_private structure
+ * @priv: A pointer to struct lbtf_private structure
*
* Returns: 0 on success.
*/
@@ -482,7 +482,7 @@ done:
/**
* lbtf_free_cmd_buffer - Frees the cmd buffer.
*
- * @priv A pointer to struct lbtf_private structure
+ * @priv: A pointer to struct lbtf_private structure
*
* Returns: 0
*/
@@ -519,7 +519,7 @@ done:
/**
* lbtf_get_cmd_ctrl_node - Gets free cmd node from free cmd queue.
*
- * @priv A pointer to struct lbtf_private structure
+ * @priv: A pointer to struct lbtf_private structure
*
* Returns: pointer to a struct cmd_ctrl_node or NULL if none available.
*/
@@ -553,7 +553,7 @@ static struct cmd_ctrl_node *lbtf_get_cmd_ctrl_node(struct lbtf_private *priv)
/**
* lbtf_execute_next_command: execute next command in cmd pending queue.
*
- * @priv A pointer to struct lbtf_private structure
+ * @priv: A pointer to struct lbtf_private structure
*
* Returns: 0 on success.
*/
diff --git a/drivers/net/wireless/marvell/libertas_tf/deb_defs.h b/drivers/net/wireless/marvell/libertas_tf/deb_defs.h
index 37a98e228b46..0b520df62f1f 100644
--- a/drivers/net/wireless/marvell/libertas_tf/deb_defs.h
+++ b/drivers/net/wireless/marvell/libertas_tf/deb_defs.h
@@ -48,8 +48,7 @@ extern unsigned int lbtf_debug;
#ifdef DEBUG
#define LBTF_DEB_LL(grp, grpnam, fmt, args...) \
do { if ((lbtf_debug & (grp)) == (grp)) \
- printk(KERN_DEBUG DRV_NAME grpnam "%s: " fmt, \
- in_interrupt() ? " (INT)" : "", ## args); } while (0)
+ printk(KERN_DEBUG DRV_NAME grpnam ": " fmt, ## args); } while (0)
#else
#define LBTF_DEB_LL(grp, grpnam, fmt, args...) do {} while (0)
#endif
diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
index bedc09215088..a92916dc81a9 100644
--- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
@@ -50,7 +50,7 @@ static int if_usb_reset_device(struct lbtf_private *priv);
/**
* if_usb_wrike_bulk_callback - call back to handle URB status
*
- * @param urb pointer to urb structure
+ * @urb: pointer to urb structure
*/
static void if_usb_write_bulk_callback(struct urb *urb)
{
@@ -67,7 +67,7 @@ static void if_usb_write_bulk_callback(struct urb *urb)
/**
* if_usb_free - free tx/rx urb, skb and rx buffer
*
- * @param cardp pointer if_usb_card
+ * @cardp: pointer if_usb_card
*/
static void if_usb_free(struct if_usb_card *cardp)
{
@@ -136,8 +136,8 @@ static const struct lbtf_ops if_usb_ops = {
/**
* if_usb_probe - sets the configuration values
*
- * @ifnum interface number
- * @id pointer to usb_device_id
+ * @intf: USB interface structure
+ * @id: pointer to usb_device_id
*
* Returns: 0 on success, error code on failure
*/
@@ -238,7 +238,7 @@ lbtf_deb_leave(LBTF_DEB_MAIN);
/**
* if_usb_disconnect - free resource and cleanup
*
- * @intf USB interface structure
+ * @intf: USB interface structure
*/
static void if_usb_disconnect(struct usb_interface *intf)
{
@@ -264,7 +264,7 @@ static void if_usb_disconnect(struct usb_interface *intf)
/**
* if_usb_send_fw_pkt - This function downloads the FW
*
- * @priv pointer to struct lbtf_private
+ * @cardp: pointer if_usb_card
*
* Returns: 0
*/
@@ -360,10 +360,10 @@ static int if_usb_reset_device(struct lbtf_private *priv)
/**
* usb_tx_block - transfer data to the device
*
- * @priv pointer to struct lbtf_private
- * @payload pointer to payload data
- * @nb data length
- * @data non-zero for data, zero for commands
+ * @cardp: pointer if_usb_card
+ * @payload: pointer to payload data
+ * @nb: data length
+ * @data: non-zero for data, zero for commands
*
* Returns: 0 on success, nonzero otherwise.
*/
@@ -619,7 +619,7 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
/**
* if_usb_receive - read data received from the device.
*
- * @urb pointer to struct urb
+ * @urb: pointer to struct urb
*/
static void if_usb_receive(struct urb *urb)
{
@@ -702,10 +702,10 @@ setup_for_next:
/**
* if_usb_host_to_card - Download data to the device
*
- * @priv pointer to struct lbtf_private structure
- * @type type of data
- * @buf pointer to data buffer
- * @len number of bytes
+ * @priv: pointer to struct lbtf_private structure
+ * @type: type of data
+ * @payload: pointer to payload buffer
+ * @nb: number of bytes
*
* Returns: 0 on success, nonzero otherwise
*/
@@ -734,7 +734,8 @@ static int if_usb_host_to_card(struct lbtf_private *priv, uint8_t type,
/**
* if_usb_issue_boot_command - Issue boot command to Boot2.
*
- * @ivalue 1 boots from FW by USB-Download, 2 boots from FW in EEPROM.
+ * @cardp: pointer if_usb_card
+ * @ivalue: 1 boots from FW by USB-Download, 2 boots from FW in EEPROM.
*
* Returns: 0
*/
@@ -757,8 +758,8 @@ static int if_usb_issue_boot_command(struct if_usb_card *cardp, int ivalue)
/**
* check_fwfile_format - Check the validity of Boot2/FW image.
*
- * @data pointer to image
- * @totlen image length
+ * @data: pointer to image
+ * @totlen: image length
*
* Returns: 0 if the image is valid, nonzero otherwise.
*/
diff --git a/drivers/net/wireless/marvell/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c
index 02bd7c99b358..71492211904b 100644
--- a/drivers/net/wireless/marvell/libertas_tf/main.c
+++ b/drivers/net/wireless/marvell/libertas_tf/main.c
@@ -15,7 +15,6 @@
/* thinfirm version: 5.132.X.pX */
#define LBTF_FW_VER_MIN 0x05840300
#define LBTF_FW_VER_MAX 0x0584ffff
-#define QOS_CONTROL_LEN 2
/* Module parameters */
unsigned int lbtf_debug;
@@ -121,7 +120,7 @@ static void lbtf_cmd_work(struct work_struct *work)
lbtf_deb_leave(LBTF_DEB_CMD);
}
-/**
+/*
* This function handles the timeout of command sending.
* It will re-send the same command again.
*/
@@ -542,11 +541,9 @@ done:
}
EXPORT_SYMBOL_GPL(lbtf_rx);
-/**
+/*
* lbtf_add_card: Add and initialize the card.
*
- * @card A pointer to card
- *
* Returns: pointer to struct lbtf_priv.
*/
struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev,
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 96848fa0e417..a6b9dc6700b1 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -1163,7 +1163,7 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
case NL80211_IFTYPE_UNSPECIFIED:
mwifiex_dbg(priv->adapter, INFO,
"%s: kept type as IBSS\n", dev->name);
- /* fall through */
+ fallthrough;
case NL80211_IFTYPE_ADHOC: /* This shouldn't happen */
return 0;
default:
@@ -1194,7 +1194,7 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
case NL80211_IFTYPE_UNSPECIFIED:
mwifiex_dbg(priv->adapter, INFO,
"%s: kept type as STA\n", dev->name);
- /* fall through */
+ fallthrough;
case NL80211_IFTYPE_STATION: /* This shouldn't happen */
return 0;
default:
@@ -1217,7 +1217,7 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
case NL80211_IFTYPE_UNSPECIFIED:
mwifiex_dbg(priv->adapter, INFO,
"%s: kept type as AP\n", dev->name);
- /* fall through */
+ fallthrough;
case NL80211_IFTYPE_AP: /* This shouldn't happen */
return 0;
default:
@@ -1257,7 +1257,7 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
case NL80211_IFTYPE_UNSPECIFIED:
mwifiex_dbg(priv->adapter, INFO,
"%s: kept type as P2P\n", dev->name);
- /* fall through */
+ fallthrough;
case NL80211_IFTYPE_P2P_CLIENT:
case NL80211_IFTYPE_P2P_GO:
return 0;
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index d068b9075c32..3a11342a6bde 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -322,9 +322,9 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
adapter->seq_num++;
sleep_cfm_buf->seq_num =
- cpu_to_le16((HostCmd_SET_SEQ_NO_BSS_INFO
+ cpu_to_le16(HostCmd_SET_SEQ_NO_BSS_INFO
(adapter->seq_num, priv->bss_num,
- priv->bss_type)));
+ priv->bss_type));
mwifiex_dbg(adapter, CMD,
"cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n",
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index d9f8bdbc817b..470d669c7f14 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -513,10 +513,10 @@ enum mwifiex_channel_flags {
#define RF_ANTENNA_AUTO 0xFFFF
-#define HostCmd_SET_SEQ_NO_BSS_INFO(seq, num, type) { \
- (((seq) & 0x00ff) | \
- (((num) & 0x000f) << 8)) | \
- (((type) & 0x000f) << 12); }
+#define HostCmd_SET_SEQ_NO_BSS_INFO(seq, num, type) \
+ ((((seq) & 0x00ff) | \
+ (((num) & 0x000f) << 8)) | \
+ (((type) & 0x000f) << 12))
#define HostCmd_GET_SEQ_NO(seq) \
((seq) & HostCmd_SEQ_NUM_MASK)
diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c
index 811abe963af2..40e99eaf5a30 100644
--- a/drivers/net/wireless/marvell/mwifiex/ie.c
+++ b/drivers/net/wireless/marvell/mwifiex/ie.c
@@ -374,7 +374,7 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv,
(const u8 *)hdr,
token_len))
break;
- /* fall through */
+ fallthrough;
default:
if (ie_len + token_len > IEEE_MAX_IE_SIZE) {
err = -EINVAL;
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index 82d69bc3aaaf..f006a3d72b40 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -695,14 +695,12 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
int ret;
u32 poll_num = 1;
- if (adapter->if_ops.check_fw_status) {
- /* check if firmware is already running */
- ret = adapter->if_ops.check_fw_status(adapter, poll_num);
- if (!ret) {
- mwifiex_dbg(adapter, MSG,
- "WLAN FW already running! Skip FW dnld\n");
- return 0;
- }
+ /* check if firmware is already running */
+ ret = adapter->if_ops.check_fw_status(adapter, poll_num);
+ if (!ret) {
+ mwifiex_dbg(adapter, MSG,
+ "WLAN FW already running! Skip FW dnld\n");
+ return 0;
}
/* check if we are the winner for downloading FW */
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 9ee5600351a7..9ba8a8f64976 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -48,6 +48,8 @@ bool aggr_ctrl;
module_param(aggr_ctrl, bool, 0000);
MODULE_PARM_DESC(aggr_ctrl, "usb tx aggregation enable:1, disable:0");
+const u16 mwifiex_1d_to_wmm_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
+
/*
* This function registers the device and performs all the necessary
* initializations.
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index 87b4ccca4b9a..6a10ff0377a2 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -33,6 +33,155 @@
static struct mwifiex_if_ops pcie_ops;
+static const struct mwifiex_pcie_card_reg mwifiex_reg_8766 = {
+ .cmd_addr_lo = PCIE_SCRATCH_0_REG,
+ .cmd_addr_hi = PCIE_SCRATCH_1_REG,
+ .cmd_size = PCIE_SCRATCH_2_REG,
+ .fw_status = PCIE_SCRATCH_3_REG,
+ .cmdrsp_addr_lo = PCIE_SCRATCH_4_REG,
+ .cmdrsp_addr_hi = PCIE_SCRATCH_5_REG,
+ .tx_rdptr = PCIE_SCRATCH_6_REG,
+ .tx_wrptr = PCIE_SCRATCH_7_REG,
+ .rx_rdptr = PCIE_SCRATCH_8_REG,
+ .rx_wrptr = PCIE_SCRATCH_9_REG,
+ .evt_rdptr = PCIE_SCRATCH_10_REG,
+ .evt_wrptr = PCIE_SCRATCH_11_REG,
+ .drv_rdy = PCIE_SCRATCH_12_REG,
+ .tx_start_ptr = 0,
+ .tx_mask = MWIFIEX_TXBD_MASK,
+ .tx_wrap_mask = 0,
+ .rx_mask = MWIFIEX_RXBD_MASK,
+ .rx_wrap_mask = 0,
+ .tx_rollover_ind = MWIFIEX_BD_FLAG_ROLLOVER_IND,
+ .rx_rollover_ind = MWIFIEX_BD_FLAG_ROLLOVER_IND,
+ .evt_rollover_ind = MWIFIEX_BD_FLAG_ROLLOVER_IND,
+ .ring_flag_sop = 0,
+ .ring_flag_eop = 0,
+ .ring_flag_xs_sop = 0,
+ .ring_flag_xs_eop = 0,
+ .ring_tx_start_ptr = 0,
+ .pfu_enabled = 0,
+ .sleep_cookie = 1,
+ .msix_support = 0,
+};
+
+static const struct mwifiex_pcie_card_reg mwifiex_reg_8897 = {
+ .cmd_addr_lo = PCIE_SCRATCH_0_REG,
+ .cmd_addr_hi = PCIE_SCRATCH_1_REG,
+ .cmd_size = PCIE_SCRATCH_2_REG,
+ .fw_status = PCIE_SCRATCH_3_REG,
+ .cmdrsp_addr_lo = PCIE_SCRATCH_4_REG,
+ .cmdrsp_addr_hi = PCIE_SCRATCH_5_REG,
+ .tx_rdptr = PCIE_RD_DATA_PTR_Q0_Q1,
+ .tx_wrptr = PCIE_WR_DATA_PTR_Q0_Q1,
+ .rx_rdptr = PCIE_WR_DATA_PTR_Q0_Q1,
+ .rx_wrptr = PCIE_RD_DATA_PTR_Q0_Q1,
+ .evt_rdptr = PCIE_SCRATCH_10_REG,
+ .evt_wrptr = PCIE_SCRATCH_11_REG,
+ .drv_rdy = PCIE_SCRATCH_12_REG,
+ .tx_start_ptr = 16,
+ .tx_mask = 0x03FF0000,
+ .tx_wrap_mask = 0x07FF0000,
+ .rx_mask = 0x000003FF,
+ .rx_wrap_mask = 0x000007FF,
+ .tx_rollover_ind = MWIFIEX_BD_FLAG_TX_ROLLOVER_IND,
+ .rx_rollover_ind = MWIFIEX_BD_FLAG_RX_ROLLOVER_IND,
+ .evt_rollover_ind = MWIFIEX_BD_FLAG_EVT_ROLLOVER_IND,
+ .ring_flag_sop = MWIFIEX_BD_FLAG_SOP,
+ .ring_flag_eop = MWIFIEX_BD_FLAG_EOP,
+ .ring_flag_xs_sop = MWIFIEX_BD_FLAG_XS_SOP,
+ .ring_flag_xs_eop = MWIFIEX_BD_FLAG_XS_EOP,
+ .ring_tx_start_ptr = MWIFIEX_BD_FLAG_TX_START_PTR,
+ .pfu_enabled = 1,
+ .sleep_cookie = 0,
+ .fw_dump_ctrl = PCIE_SCRATCH_13_REG,
+ .fw_dump_start = PCIE_SCRATCH_14_REG,
+ .fw_dump_end = 0xcff,
+ .fw_dump_host_ready = 0xee,
+ .fw_dump_read_done = 0xfe,
+ .msix_support = 0,
+};
+
+static const struct mwifiex_pcie_card_reg mwifiex_reg_8997 = {
+ .cmd_addr_lo = PCIE_SCRATCH_0_REG,
+ .cmd_addr_hi = PCIE_SCRATCH_1_REG,
+ .cmd_size = PCIE_SCRATCH_2_REG,
+ .fw_status = PCIE_SCRATCH_3_REG,
+ .cmdrsp_addr_lo = PCIE_SCRATCH_4_REG,
+ .cmdrsp_addr_hi = PCIE_SCRATCH_5_REG,
+ .tx_rdptr = 0xC1A4,
+ .tx_wrptr = 0xC174,
+ .rx_rdptr = 0xC174,
+ .rx_wrptr = 0xC1A4,
+ .evt_rdptr = PCIE_SCRATCH_10_REG,
+ .evt_wrptr = PCIE_SCRATCH_11_REG,
+ .drv_rdy = PCIE_SCRATCH_12_REG,
+ .tx_start_ptr = 16,
+ .tx_mask = 0x0FFF0000,
+ .tx_wrap_mask = 0x1FFF0000,
+ .rx_mask = 0x00000FFF,
+ .rx_wrap_mask = 0x00001FFF,
+ .tx_rollover_ind = BIT(28),
+ .rx_rollover_ind = BIT(12),
+ .evt_rollover_ind = MWIFIEX_BD_FLAG_EVT_ROLLOVER_IND,
+ .ring_flag_sop = MWIFIEX_BD_FLAG_SOP,
+ .ring_flag_eop = MWIFIEX_BD_FLAG_EOP,
+ .ring_flag_xs_sop = MWIFIEX_BD_FLAG_XS_SOP,
+ .ring_flag_xs_eop = MWIFIEX_BD_FLAG_XS_EOP,
+ .ring_tx_start_ptr = MWIFIEX_BD_FLAG_TX_START_PTR,
+ .pfu_enabled = 1,
+ .sleep_cookie = 0,
+ .fw_dump_ctrl = PCIE_SCRATCH_13_REG,
+ .fw_dump_start = PCIE_SCRATCH_14_REG,
+ .fw_dump_end = 0xcff,
+ .fw_dump_host_ready = 0xcc,
+ .fw_dump_read_done = 0xdd,
+ .msix_support = 0,
+};
+
+static struct memory_type_mapping mem_type_mapping_tbl_w8897[] = {
+ {"ITCM", NULL, 0, 0xF0},
+ {"DTCM", NULL, 0, 0xF1},
+ {"SQRAM", NULL, 0, 0xF2},
+ {"IRAM", NULL, 0, 0xF3},
+ {"APU", NULL, 0, 0xF4},
+ {"CIU", NULL, 0, 0xF5},
+ {"ICU", NULL, 0, 0xF6},
+ {"MAC", NULL, 0, 0xF7},
+};
+
+static struct memory_type_mapping mem_type_mapping_tbl_w8997[] = {
+ {"DUMP", NULL, 0, 0xDD},
+};
+
+static const struct mwifiex_pcie_device mwifiex_pcie8766 = {
+ .reg = &mwifiex_reg_8766,
+ .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
+ .can_dump_fw = false,
+ .can_ext_scan = true,
+};
+
+static const struct mwifiex_pcie_device mwifiex_pcie8897 = {
+ .reg = &mwifiex_reg_8897,
+ .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
+ .can_dump_fw = true,
+ .mem_type_mapping_tbl = mem_type_mapping_tbl_w8897,
+ .num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl_w8897),
+ .can_ext_scan = true,
+};
+
+static const struct mwifiex_pcie_device mwifiex_pcie8997 = {
+ .reg = &mwifiex_reg_8997,
+ .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
+ .can_dump_fw = true,
+ .mem_type_mapping_tbl = mem_type_mapping_tbl_w8997,
+ .num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl_w8997),
+ .can_ext_scan = true,
+};
+
static const struct of_device_id mwifiex_pcie_of_match_table[] = {
{ .compatible = "pci11ab,2b42" },
{ .compatible = "pci1b4b,2b42" },
@@ -58,8 +207,8 @@ mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb,
struct pcie_service_card *card = adapter->card;
struct mwifiex_dma_mapping mapping;
- mapping.addr = pci_map_single(card->dev, skb->data, size, flags);
- if (pci_dma_mapping_error(card->dev, mapping.addr)) {
+ mapping.addr = dma_map_single(&card->dev->dev, skb->data, size, flags);
+ if (dma_mapping_error(&card->dev->dev, mapping.addr)) {
mwifiex_dbg(adapter, ERROR, "failed to map pci memory!\n");
return -1;
}
@@ -75,7 +224,7 @@ static void mwifiex_unmap_pci_memory(struct mwifiex_adapter *adapter,
struct mwifiex_dma_mapping mapping;
mwifiex_get_mapping(skb, &mapping);
- pci_unmap_single(card->dev, mapping.addr, mapping.len, flags);
+ dma_unmap_single(&card->dev->dev, mapping.addr, mapping.len, flags);
}
/*
@@ -461,10 +610,9 @@ static void mwifiex_delay_for_sleep_cookie(struct mwifiex_adapter *adapter,
struct sk_buff *cmdrsp = card->cmdrsp_buf;
for (count = 0; count < max_delay_loop_cnt; count++) {
- pci_dma_sync_single_for_cpu(card->dev,
- MWIFIEX_SKB_DMA_ADDR(cmdrsp),
- sizeof(sleep_cookie),
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&card->dev->dev,
+ MWIFIEX_SKB_DMA_ADDR(cmdrsp),
+ sizeof(sleep_cookie), DMA_FROM_DEVICE);
buffer = cmdrsp->data;
sleep_cookie = get_unaligned_le32(buffer);
@@ -473,10 +621,10 @@ static void mwifiex_delay_for_sleep_cookie(struct mwifiex_adapter *adapter,
"sleep cookie found at count %d\n", count);
break;
}
- pci_dma_sync_single_for_device(card->dev,
- MWIFIEX_SKB_DMA_ADDR(cmdrsp),
- sizeof(sleep_cookie),
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&card->dev->dev,
+ MWIFIEX_SKB_DMA_ADDR(cmdrsp),
+ sizeof(sleep_cookie),
+ DMA_FROM_DEVICE);
usleep_range(20, 30);
}
@@ -630,7 +778,7 @@ static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter)
if (mwifiex_map_pci_memory(adapter, skb,
MWIFIEX_RX_DATA_BUF_SIZE,
- PCI_DMA_FROMDEVICE))
+ DMA_FROM_DEVICE))
return -1;
buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
@@ -687,7 +835,7 @@ static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter)
skb_put(skb, MAX_EVENT_SIZE);
if (mwifiex_map_pci_memory(adapter, skb, MAX_EVENT_SIZE,
- PCI_DMA_FROMDEVICE)) {
+ DMA_FROM_DEVICE)) {
kfree_skb(skb);
kfree(card->evtbd_ring_vbase);
return -1;
@@ -730,7 +878,7 @@ static void mwifiex_cleanup_txq_ring(struct mwifiex_adapter *adapter)
if (card->tx_buf_list[i]) {
skb = card->tx_buf_list[i];
mwifiex_unmap_pci_memory(adapter, skb,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
}
memset(desc2, 0, sizeof(*desc2));
@@ -739,7 +887,7 @@ static void mwifiex_cleanup_txq_ring(struct mwifiex_adapter *adapter)
if (card->tx_buf_list[i]) {
skb = card->tx_buf_list[i];
mwifiex_unmap_pci_memory(adapter, skb,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
}
memset(desc, 0, sizeof(*desc));
@@ -769,7 +917,7 @@ static void mwifiex_cleanup_rxq_ring(struct mwifiex_adapter *adapter)
if (card->rx_buf_list[i]) {
skb = card->rx_buf_list[i];
mwifiex_unmap_pci_memory(adapter, skb,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
}
memset(desc2, 0, sizeof(*desc2));
@@ -778,7 +926,7 @@ static void mwifiex_cleanup_rxq_ring(struct mwifiex_adapter *adapter)
if (card->rx_buf_list[i]) {
skb = card->rx_buf_list[i];
mwifiex_unmap_pci_memory(adapter, skb,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
}
memset(desc, 0, sizeof(*desc));
@@ -804,7 +952,7 @@ static void mwifiex_cleanup_evt_ring(struct mwifiex_adapter *adapter)
if (card->evt_buf_list[i]) {
skb = card->evt_buf_list[i];
mwifiex_unmap_pci_memory(adapter, skb,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
}
card->evt_buf_list[i] = NULL;
@@ -845,18 +993,20 @@ static int mwifiex_pcie_create_txbd_ring(struct mwifiex_adapter *adapter)
mwifiex_dbg(adapter, INFO,
"info: txbd_ring: Allocating %d bytes\n",
card->txbd_ring_size);
- card->txbd_ring_vbase = pci_alloc_consistent(card->dev,
- card->txbd_ring_size,
- &card->txbd_ring_pbase);
+ card->txbd_ring_vbase = dma_alloc_coherent(&card->dev->dev,
+ card->txbd_ring_size,
+ &card->txbd_ring_pbase,
+ GFP_KERNEL);
if (!card->txbd_ring_vbase) {
mwifiex_dbg(adapter, ERROR,
- "allocate consistent memory (%d bytes) failed!\n",
+ "allocate coherent memory (%d bytes) failed!\n",
card->txbd_ring_size);
return -ENOMEM;
}
+
mwifiex_dbg(adapter, DATA,
- "info: txbd_ring - base: %p, pbase: %#x:%x, len: %x\n",
- card->txbd_ring_vbase, (unsigned int)card->txbd_ring_pbase,
+ "info: txbd_ring - base: %p, pbase: %#x:%x, len: %#x\n",
+ card->txbd_ring_vbase, (u32)card->txbd_ring_pbase,
(u32)((u64)card->txbd_ring_pbase >> 32),
card->txbd_ring_size);
@@ -871,9 +1021,9 @@ static int mwifiex_pcie_delete_txbd_ring(struct mwifiex_adapter *adapter)
mwifiex_cleanup_txq_ring(adapter);
if (card->txbd_ring_vbase)
- pci_free_consistent(card->dev, card->txbd_ring_size,
- card->txbd_ring_vbase,
- card->txbd_ring_pbase);
+ dma_free_coherent(&card->dev->dev, card->txbd_ring_size,
+ card->txbd_ring_vbase,
+ card->txbd_ring_pbase);
card->txbd_ring_size = 0;
card->txbd_wrptr = 0;
card->txbd_rdptr = 0 | reg->tx_rollover_ind;
@@ -909,12 +1059,13 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
mwifiex_dbg(adapter, INFO,
"info: rxbd_ring: Allocating %d bytes\n",
card->rxbd_ring_size);
- card->rxbd_ring_vbase = pci_alloc_consistent(card->dev,
- card->rxbd_ring_size,
- &card->rxbd_ring_pbase);
+ card->rxbd_ring_vbase = dma_alloc_coherent(&card->dev->dev,
+ card->rxbd_ring_size,
+ &card->rxbd_ring_pbase,
+ GFP_KERNEL);
if (!card->rxbd_ring_vbase) {
mwifiex_dbg(adapter, ERROR,
- "allocate consistent memory (%d bytes) failed!\n",
+ "allocate coherent memory (%d bytes) failed!\n",
card->rxbd_ring_size);
return -ENOMEM;
}
@@ -939,9 +1090,9 @@ static int mwifiex_pcie_delete_rxbd_ring(struct mwifiex_adapter *adapter)
mwifiex_cleanup_rxq_ring(adapter);
if (card->rxbd_ring_vbase)
- pci_free_consistent(card->dev, card->rxbd_ring_size,
- card->rxbd_ring_vbase,
- card->rxbd_ring_pbase);
+ dma_free_coherent(&card->dev->dev, card->rxbd_ring_size,
+ card->rxbd_ring_vbase,
+ card->rxbd_ring_pbase);
card->rxbd_ring_size = 0;
card->rxbd_wrptr = 0;
card->rxbd_rdptr = 0 | reg->rx_rollover_ind;
@@ -972,13 +1123,14 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter)
mwifiex_dbg(adapter, INFO,
"info: evtbd_ring: Allocating %d bytes\n",
- card->evtbd_ring_size);
- card->evtbd_ring_vbase = pci_alloc_consistent(card->dev,
- card->evtbd_ring_size,
- &card->evtbd_ring_pbase);
+ card->evtbd_ring_size);
+ card->evtbd_ring_vbase = dma_alloc_coherent(&card->dev->dev,
+ card->evtbd_ring_size,
+ &card->evtbd_ring_pbase,
+ GFP_KERNEL);
if (!card->evtbd_ring_vbase) {
mwifiex_dbg(adapter, ERROR,
- "allocate consistent memory (%d bytes) failed!\n",
+ "allocate coherent memory (%d bytes) failed!\n",
card->evtbd_ring_size);
return -ENOMEM;
}
@@ -1003,9 +1155,9 @@ static int mwifiex_pcie_delete_evtbd_ring(struct mwifiex_adapter *adapter)
mwifiex_cleanup_evt_ring(adapter);
if (card->evtbd_ring_vbase)
- pci_free_consistent(card->dev, card->evtbd_ring_size,
- card->evtbd_ring_vbase,
- card->evtbd_ring_pbase);
+ dma_free_coherent(&card->dev->dev, card->evtbd_ring_size,
+ card->evtbd_ring_vbase,
+ card->evtbd_ring_pbase);
card->evtbd_wrptr = 0;
card->evtbd_rdptr = 0 | reg->evt_rollover_ind;
card->evtbd_ring_size = 0;
@@ -1032,7 +1184,7 @@ static int mwifiex_pcie_alloc_cmdrsp_buf(struct mwifiex_adapter *adapter)
}
skb_put(skb, MWIFIEX_UPLD_SIZE);
if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
- PCI_DMA_FROMDEVICE)) {
+ DMA_FROM_DEVICE)) {
kfree_skb(skb);
return -1;
}
@@ -1056,14 +1208,14 @@ static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter)
if (card && card->cmdrsp_buf) {
mwifiex_unmap_pci_memory(adapter, card->cmdrsp_buf,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
dev_kfree_skb_any(card->cmdrsp_buf);
card->cmdrsp_buf = NULL;
}
if (card && card->cmd_buf) {
mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
dev_kfree_skb_any(card->cmd_buf);
card->cmd_buf = NULL;
}
@@ -1078,11 +1230,13 @@ static int mwifiex_pcie_alloc_sleep_cookie_buf(struct mwifiex_adapter *adapter)
struct pcie_service_card *card = adapter->card;
u32 tmp;
- card->sleep_cookie_vbase = pci_alloc_consistent(card->dev, sizeof(u32),
- &card->sleep_cookie_pbase);
+ card->sleep_cookie_vbase = dma_alloc_coherent(&card->dev->dev,
+ sizeof(u32),
+ &card->sleep_cookie_pbase,
+ GFP_KERNEL);
if (!card->sleep_cookie_vbase) {
mwifiex_dbg(adapter, ERROR,
- "pci_alloc_consistent failed!\n");
+ "dma_alloc_coherent failed!\n");
return -ENOMEM;
}
/* Init val of Sleep Cookie */
@@ -1109,9 +1263,9 @@ static int mwifiex_pcie_delete_sleep_cookie_buf(struct mwifiex_adapter *adapter)
card = adapter->card;
if (card && card->sleep_cookie_vbase) {
- pci_free_consistent(card->dev, sizeof(u32),
- card->sleep_cookie_vbase,
- card->sleep_cookie_pbase);
+ dma_free_coherent(&card->dev->dev, sizeof(u32),
+ card->sleep_cookie_vbase,
+ card->sleep_cookie_pbase);
card->sleep_cookie_vbase = NULL;
}
@@ -1183,7 +1337,7 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
"SEND COMP: Detach skb %p at txbd_rdidx=%d\n",
skb, wrdoneidx);
mwifiex_unmap_pci_memory(adapter, skb,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
unmap_count++;
@@ -1276,7 +1430,7 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
put_unaligned_le16(MWIFIEX_TYPE_DATA, payload + 2);
if (mwifiex_map_pci_memory(adapter, skb, skb->len,
- PCI_DMA_TODEVICE))
+ DMA_TO_DEVICE))
return -1;
wrindx = (card->txbd_wrptr & reg->tx_mask) >> reg->tx_start_ptr;
@@ -1358,7 +1512,7 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
return -EINPROGRESS;
done_unmap:
- mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE);
card->tx_buf_list[wrindx] = NULL;
atomic_dec(&adapter->tx_hw_pending);
if (reg->pfu_enabled)
@@ -1412,7 +1566,7 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
if (!skb_data)
return -ENOMEM;
- mwifiex_unmap_pci_memory(adapter, skb_data, PCI_DMA_FROMDEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb_data, DMA_FROM_DEVICE);
card->rx_buf_list[rd_index] = NULL;
/* Get data length from interface header -
@@ -1450,7 +1604,7 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
if (mwifiex_map_pci_memory(adapter, skb_tmp,
MWIFIEX_RX_DATA_BUF_SIZE,
- PCI_DMA_FROMDEVICE))
+ DMA_FROM_DEVICE))
return -1;
buf_pa = MWIFIEX_SKB_DMA_ADDR(skb_tmp);
@@ -1527,7 +1681,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
return -1;
}
- if (mwifiex_map_pci_memory(adapter, skb, skb->len, PCI_DMA_TODEVICE))
+ if (mwifiex_map_pci_memory(adapter, skb, skb->len, DMA_TO_DEVICE))
return -1;
buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
@@ -1539,7 +1693,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
mwifiex_dbg(adapter, ERROR,
"%s: failed to write download command to boot code.\n",
__func__);
- mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE);
return -1;
}
@@ -1551,7 +1705,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
mwifiex_dbg(adapter, ERROR,
"%s: failed to write download command to boot code.\n",
__func__);
- mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE);
return -1;
}
@@ -1560,7 +1714,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
mwifiex_dbg(adapter, ERROR,
"%s: failed to write command len to cmd_size scratch reg\n",
__func__);
- mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE);
return -1;
}
@@ -1569,7 +1723,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
CPU_INTR_DOOR_BELL)) {
mwifiex_dbg(adapter, ERROR,
"%s: failed to assert door-bell intr\n", __func__);
- mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE);
return -1;
}
@@ -1628,7 +1782,7 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
put_unaligned_le16((u16)skb->len, &payload[0]);
put_unaligned_le16(MWIFIEX_TYPE_CMD, &payload[2]);
- if (mwifiex_map_pci_memory(adapter, skb, skb->len, PCI_DMA_TODEVICE))
+ if (mwifiex_map_pci_memory(adapter, skb, skb->len, DMA_TO_DEVICE))
return -1;
card->cmd_buf = skb;
@@ -1728,17 +1882,16 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
"info: Rx CMD Response\n");
if (adapter->curr_cmd)
- mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_FROMDEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, DMA_FROM_DEVICE);
else
- pci_dma_sync_single_for_cpu(card->dev,
- MWIFIEX_SKB_DMA_ADDR(skb),
- MWIFIEX_UPLD_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&card->dev->dev,
+ MWIFIEX_SKB_DMA_ADDR(skb),
+ MWIFIEX_UPLD_SIZE, DMA_FROM_DEVICE);
/* Unmap the command as a response has been received. */
if (card->cmd_buf) {
mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
dev_kfree_skb_any(card->cmd_buf);
card->cmd_buf = NULL;
}
@@ -1749,10 +1902,10 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
if (!adapter->curr_cmd) {
if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
- pci_dma_sync_single_for_device(card->dev,
- MWIFIEX_SKB_DMA_ADDR(skb),
- MWIFIEX_SLEEP_COOKIE_SIZE,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&card->dev->dev,
+ MWIFIEX_SKB_DMA_ADDR(skb),
+ MWIFIEX_SLEEP_COOKIE_SIZE,
+ DMA_FROM_DEVICE);
if (mwifiex_write_reg(adapter,
PCIE_CPU_INT_EVENT,
CPU_INTR_SLEEP_CFM_DONE)) {
@@ -1763,7 +1916,7 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
mwifiex_delay_for_sleep_cookie(adapter,
MWIFIEX_MAX_DELAY_COUNT);
mwifiex_unmap_pci_memory(adapter, skb,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
skb_pull(skb, adapter->intf_hdr_len);
while (reg->sleep_cookie && (count++ < 10) &&
mwifiex_pcie_ok_to_access_hw(adapter))
@@ -1779,7 +1932,7 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len));
skb_push(skb, adapter->intf_hdr_len);
if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
- PCI_DMA_FROMDEVICE))
+ DMA_FROM_DEVICE))
return -1;
} else if (mwifiex_pcie_ok_to_access_hw(adapter)) {
skb_pull(skb, adapter->intf_hdr_len);
@@ -1821,7 +1974,7 @@ static int mwifiex_pcie_cmdrsp_complete(struct mwifiex_adapter *adapter,
card->cmdrsp_buf = skb;
skb_push(card->cmdrsp_buf, adapter->intf_hdr_len);
if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
- PCI_DMA_FROMDEVICE))
+ DMA_FROM_DEVICE))
return -1;
}
@@ -1876,7 +2029,7 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
mwifiex_dbg(adapter, INFO,
"info: Read Index: %d\n", rdptr);
skb_cmd = card->evt_buf_list[rdptr];
- mwifiex_unmap_pci_memory(adapter, skb_cmd, PCI_DMA_FROMDEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb_cmd, DMA_FROM_DEVICE);
/* Take the pointer and set it to event pointer in adapter
and will return back after event handling callback */
@@ -1956,7 +2109,7 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
skb_put(skb, MAX_EVENT_SIZE - skb->len);
if (mwifiex_map_pci_memory(adapter, skb,
MAX_EVENT_SIZE,
- PCI_DMA_FROMDEVICE))
+ DMA_FROM_DEVICE))
return -1;
card->evt_buf_list[rdptr] = skb;
desc = card->evtbd_ring[rdptr];
@@ -2238,7 +2391,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
"interrupt status during fw dnld.\n",
__func__);
mwifiex_unmap_pci_memory(adapter, skb,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
ret = -1;
goto done;
}
@@ -2250,12 +2403,12 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
mwifiex_dbg(adapter, ERROR, "%s: Card failed to ACK download\n",
__func__);
mwifiex_unmap_pci_memory(adapter, skb,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
ret = -1;
goto done;
}
- mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, DMA_TO_DEVICE);
offset += txlen;
} while (true);
@@ -2925,15 +3078,9 @@ static int mwifiex_init_pcie(struct mwifiex_adapter *adapter)
pci_set_master(pdev);
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (ret) {
- pr_err("set_dma_mask(32) failed: %d\n", ret);
- goto err_set_dma_mask;
- }
-
- ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
- pr_err("set_consistent_dma_mask(64) failed\n");
+ pr_err("dma_set_mask(32) failed: %d\n", ret);
goto err_set_dma_mask;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h
index fc59b522f670..843d57eda820 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.h
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.h
@@ -158,127 +158,6 @@ struct mwifiex_pcie_card_reg {
u8 msix_support;
};
-static const struct mwifiex_pcie_card_reg mwifiex_reg_8766 = {
- .cmd_addr_lo = PCIE_SCRATCH_0_REG,
- .cmd_addr_hi = PCIE_SCRATCH_1_REG,
- .cmd_size = PCIE_SCRATCH_2_REG,
- .fw_status = PCIE_SCRATCH_3_REG,
- .cmdrsp_addr_lo = PCIE_SCRATCH_4_REG,
- .cmdrsp_addr_hi = PCIE_SCRATCH_5_REG,
- .tx_rdptr = PCIE_SCRATCH_6_REG,
- .tx_wrptr = PCIE_SCRATCH_7_REG,
- .rx_rdptr = PCIE_SCRATCH_8_REG,
- .rx_wrptr = PCIE_SCRATCH_9_REG,
- .evt_rdptr = PCIE_SCRATCH_10_REG,
- .evt_wrptr = PCIE_SCRATCH_11_REG,
- .drv_rdy = PCIE_SCRATCH_12_REG,
- .tx_start_ptr = 0,
- .tx_mask = MWIFIEX_TXBD_MASK,
- .tx_wrap_mask = 0,
- .rx_mask = MWIFIEX_RXBD_MASK,
- .rx_wrap_mask = 0,
- .tx_rollover_ind = MWIFIEX_BD_FLAG_ROLLOVER_IND,
- .rx_rollover_ind = MWIFIEX_BD_FLAG_ROLLOVER_IND,
- .evt_rollover_ind = MWIFIEX_BD_FLAG_ROLLOVER_IND,
- .ring_flag_sop = 0,
- .ring_flag_eop = 0,
- .ring_flag_xs_sop = 0,
- .ring_flag_xs_eop = 0,
- .ring_tx_start_ptr = 0,
- .pfu_enabled = 0,
- .sleep_cookie = 1,
- .msix_support = 0,
-};
-
-static const struct mwifiex_pcie_card_reg mwifiex_reg_8897 = {
- .cmd_addr_lo = PCIE_SCRATCH_0_REG,
- .cmd_addr_hi = PCIE_SCRATCH_1_REG,
- .cmd_size = PCIE_SCRATCH_2_REG,
- .fw_status = PCIE_SCRATCH_3_REG,
- .cmdrsp_addr_lo = PCIE_SCRATCH_4_REG,
- .cmdrsp_addr_hi = PCIE_SCRATCH_5_REG,
- .tx_rdptr = PCIE_RD_DATA_PTR_Q0_Q1,
- .tx_wrptr = PCIE_WR_DATA_PTR_Q0_Q1,
- .rx_rdptr = PCIE_WR_DATA_PTR_Q0_Q1,
- .rx_wrptr = PCIE_RD_DATA_PTR_Q0_Q1,
- .evt_rdptr = PCIE_SCRATCH_10_REG,
- .evt_wrptr = PCIE_SCRATCH_11_REG,
- .drv_rdy = PCIE_SCRATCH_12_REG,
- .tx_start_ptr = 16,
- .tx_mask = 0x03FF0000,
- .tx_wrap_mask = 0x07FF0000,
- .rx_mask = 0x000003FF,
- .rx_wrap_mask = 0x000007FF,
- .tx_rollover_ind = MWIFIEX_BD_FLAG_TX_ROLLOVER_IND,
- .rx_rollover_ind = MWIFIEX_BD_FLAG_RX_ROLLOVER_IND,
- .evt_rollover_ind = MWIFIEX_BD_FLAG_EVT_ROLLOVER_IND,
- .ring_flag_sop = MWIFIEX_BD_FLAG_SOP,
- .ring_flag_eop = MWIFIEX_BD_FLAG_EOP,
- .ring_flag_xs_sop = MWIFIEX_BD_FLAG_XS_SOP,
- .ring_flag_xs_eop = MWIFIEX_BD_FLAG_XS_EOP,
- .ring_tx_start_ptr = MWIFIEX_BD_FLAG_TX_START_PTR,
- .pfu_enabled = 1,
- .sleep_cookie = 0,
- .fw_dump_ctrl = PCIE_SCRATCH_13_REG,
- .fw_dump_start = PCIE_SCRATCH_14_REG,
- .fw_dump_end = 0xcff,
- .fw_dump_host_ready = 0xee,
- .fw_dump_read_done = 0xfe,
- .msix_support = 0,
-};
-
-static const struct mwifiex_pcie_card_reg mwifiex_reg_8997 = {
- .cmd_addr_lo = PCIE_SCRATCH_0_REG,
- .cmd_addr_hi = PCIE_SCRATCH_1_REG,
- .cmd_size = PCIE_SCRATCH_2_REG,
- .fw_status = PCIE_SCRATCH_3_REG,
- .cmdrsp_addr_lo = PCIE_SCRATCH_4_REG,
- .cmdrsp_addr_hi = PCIE_SCRATCH_5_REG,
- .tx_rdptr = 0xC1A4,
- .tx_wrptr = 0xC174,
- .rx_rdptr = 0xC174,
- .rx_wrptr = 0xC1A4,
- .evt_rdptr = PCIE_SCRATCH_10_REG,
- .evt_wrptr = PCIE_SCRATCH_11_REG,
- .drv_rdy = PCIE_SCRATCH_12_REG,
- .tx_start_ptr = 16,
- .tx_mask = 0x0FFF0000,
- .tx_wrap_mask = 0x1FFF0000,
- .rx_mask = 0x00000FFF,
- .rx_wrap_mask = 0x00001FFF,
- .tx_rollover_ind = BIT(28),
- .rx_rollover_ind = BIT(12),
- .evt_rollover_ind = MWIFIEX_BD_FLAG_EVT_ROLLOVER_IND,
- .ring_flag_sop = MWIFIEX_BD_FLAG_SOP,
- .ring_flag_eop = MWIFIEX_BD_FLAG_EOP,
- .ring_flag_xs_sop = MWIFIEX_BD_FLAG_XS_SOP,
- .ring_flag_xs_eop = MWIFIEX_BD_FLAG_XS_EOP,
- .ring_tx_start_ptr = MWIFIEX_BD_FLAG_TX_START_PTR,
- .pfu_enabled = 1,
- .sleep_cookie = 0,
- .fw_dump_ctrl = PCIE_SCRATCH_13_REG,
- .fw_dump_start = PCIE_SCRATCH_14_REG,
- .fw_dump_end = 0xcff,
- .fw_dump_host_ready = 0xcc,
- .fw_dump_read_done = 0xdd,
- .msix_support = 0,
-};
-
-static struct memory_type_mapping mem_type_mapping_tbl_w8897[] = {
- {"ITCM", NULL, 0, 0xF0},
- {"DTCM", NULL, 0, 0xF1},
- {"SQRAM", NULL, 0, 0xF2},
- {"IRAM", NULL, 0, 0xF3},
- {"APU", NULL, 0, 0xF4},
- {"CIU", NULL, 0, 0xF5},
- {"ICU", NULL, 0, 0xF6},
- {"MAC", NULL, 0, 0xF7},
-};
-
-static struct memory_type_mapping mem_type_mapping_tbl_w8997[] = {
- {"DUMP", NULL, 0, 0xDD},
-};
-
struct mwifiex_pcie_device {
const struct mwifiex_pcie_card_reg *reg;
u16 blksz_fw_dl;
@@ -289,34 +168,6 @@ struct mwifiex_pcie_device {
bool can_ext_scan;
};
-static const struct mwifiex_pcie_device mwifiex_pcie8766 = {
- .reg = &mwifiex_reg_8766,
- .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
- .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
- .can_dump_fw = false,
- .can_ext_scan = true,
-};
-
-static const struct mwifiex_pcie_device mwifiex_pcie8897 = {
- .reg = &mwifiex_reg_8897,
- .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
- .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
- .can_dump_fw = true,
- .mem_type_mapping_tbl = mem_type_mapping_tbl_w8897,
- .num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl_w8897),
- .can_ext_scan = true,
-};
-
-static const struct mwifiex_pcie_device mwifiex_pcie8997 = {
- .reg = &mwifiex_reg_8997,
- .blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
- .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
- .can_dump_fw = true,
- .mem_type_mapping_tbl = mem_type_mapping_tbl_w8997,
- .num_mem_types = ARRAY_SIZE(mem_type_mapping_tbl_w8997),
- .can_ext_scan = true,
-};
-
struct mwifiex_evt_buf_desc {
u64 paddr;
u16 len;
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index ff932627a46c..c2a685f63e95 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -1328,7 +1328,7 @@ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
case WLAN_EID_CHANNEL_SWITCH:
bss_entry->chan_sw_ie_present = true;
- /* fall through */
+ fallthrough;
case WLAN_EID_PWR_CAPABILITY:
case WLAN_EID_TPC_REPORT:
case WLAN_EID_QUIET:
@@ -1889,7 +1889,7 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
chan, CFG80211_BSS_FTYPE_UNKNOWN,
bssid, timestamp,
cap_info_bitmap, beacon_period,
- ie_buf, ie_len, rssi, GFP_KERNEL);
+ ie_buf, ie_len, rssi, GFP_ATOMIC);
if (bss) {
bss_priv = (struct mwifiex_bss_priv *)bss->priv;
bss_priv->band = band;
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index a042965962a2..bde9e4bbfffe 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -35,6 +35,433 @@ static void mwifiex_sdio_work(struct work_struct *work);
static struct mwifiex_if_ops sdio_ops;
+static const struct mwifiex_sdio_card_reg mwifiex_reg_sd87xx = {
+ .start_rd_port = 1,
+ .start_wr_port = 1,
+ .base_0_reg = 0x0040,
+ .base_1_reg = 0x0041,
+ .poll_reg = 0x30,
+ .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK,
+ .host_int_rsr_reg = 0x1,
+ .host_int_mask_reg = 0x02,
+ .host_int_status_reg = 0x03,
+ .status_reg_0 = 0x60,
+ .status_reg_1 = 0x61,
+ .sdio_int_mask = 0x3f,
+ .data_port_mask = 0x0000fffe,
+ .io_port_0_reg = 0x78,
+ .io_port_1_reg = 0x79,
+ .io_port_2_reg = 0x7A,
+ .max_mp_regs = 64,
+ .rd_bitmap_l = 0x04,
+ .rd_bitmap_u = 0x05,
+ .wr_bitmap_l = 0x06,
+ .wr_bitmap_u = 0x07,
+ .rd_len_p0_l = 0x08,
+ .rd_len_p0_u = 0x09,
+ .card_misc_cfg_reg = 0x6c,
+ .func1_dump_reg_start = 0x0,
+ .func1_dump_reg_end = 0x9,
+ .func1_scratch_reg = 0x60,
+ .func1_spec_reg_num = 5,
+ .func1_spec_reg_table = {0x28, 0x30, 0x34, 0x38, 0x3c},
+};
+
+static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8897 = {
+ .start_rd_port = 0,
+ .start_wr_port = 0,
+ .base_0_reg = 0x60,
+ .base_1_reg = 0x61,
+ .poll_reg = 0x50,
+ .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK |
+ CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK,
+ .host_int_rsr_reg = 0x1,
+ .host_int_status_reg = 0x03,
+ .host_int_mask_reg = 0x02,
+ .status_reg_0 = 0xc0,
+ .status_reg_1 = 0xc1,
+ .sdio_int_mask = 0xff,
+ .data_port_mask = 0xffffffff,
+ .io_port_0_reg = 0xD8,
+ .io_port_1_reg = 0xD9,
+ .io_port_2_reg = 0xDA,
+ .max_mp_regs = 184,
+ .rd_bitmap_l = 0x04,
+ .rd_bitmap_u = 0x05,
+ .rd_bitmap_1l = 0x06,
+ .rd_bitmap_1u = 0x07,
+ .wr_bitmap_l = 0x08,
+ .wr_bitmap_u = 0x09,
+ .wr_bitmap_1l = 0x0a,
+ .wr_bitmap_1u = 0x0b,
+ .rd_len_p0_l = 0x0c,
+ .rd_len_p0_u = 0x0d,
+ .card_misc_cfg_reg = 0xcc,
+ .card_cfg_2_1_reg = 0xcd,
+ .cmd_rd_len_0 = 0xb4,
+ .cmd_rd_len_1 = 0xb5,
+ .cmd_rd_len_2 = 0xb6,
+ .cmd_rd_len_3 = 0xb7,
+ .cmd_cfg_0 = 0xb8,
+ .cmd_cfg_1 = 0xb9,
+ .cmd_cfg_2 = 0xba,
+ .cmd_cfg_3 = 0xbb,
+ .fw_dump_host_ready = 0xee,
+ .fw_dump_ctrl = 0xe2,
+ .fw_dump_start = 0xe3,
+ .fw_dump_end = 0xea,
+ .func1_dump_reg_start = 0x0,
+ .func1_dump_reg_end = 0xb,
+ .func1_scratch_reg = 0xc0,
+ .func1_spec_reg_num = 8,
+ .func1_spec_reg_table = {0x4C, 0x50, 0x54, 0x55, 0x58,
+ 0x59, 0x5c, 0x5d},
+};
+
+static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8977 = {
+ .start_rd_port = 0,
+ .start_wr_port = 0,
+ .base_0_reg = 0xF8,
+ .base_1_reg = 0xF9,
+ .poll_reg = 0x5C,
+ .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK |
+ CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK,
+ .host_int_rsr_reg = 0x4,
+ .host_int_status_reg = 0x0C,
+ .host_int_mask_reg = 0x08,
+ .status_reg_0 = 0xE8,
+ .status_reg_1 = 0xE9,
+ .sdio_int_mask = 0xff,
+ .data_port_mask = 0xffffffff,
+ .io_port_0_reg = 0xE4,
+ .io_port_1_reg = 0xE5,
+ .io_port_2_reg = 0xE6,
+ .max_mp_regs = 196,
+ .rd_bitmap_l = 0x10,
+ .rd_bitmap_u = 0x11,
+ .rd_bitmap_1l = 0x12,
+ .rd_bitmap_1u = 0x13,
+ .wr_bitmap_l = 0x14,
+ .wr_bitmap_u = 0x15,
+ .wr_bitmap_1l = 0x16,
+ .wr_bitmap_1u = 0x17,
+ .rd_len_p0_l = 0x18,
+ .rd_len_p0_u = 0x19,
+ .card_misc_cfg_reg = 0xd8,
+ .card_cfg_2_1_reg = 0xd9,
+ .cmd_rd_len_0 = 0xc0,
+ .cmd_rd_len_1 = 0xc1,
+ .cmd_rd_len_2 = 0xc2,
+ .cmd_rd_len_3 = 0xc3,
+ .cmd_cfg_0 = 0xc4,
+ .cmd_cfg_1 = 0xc5,
+ .cmd_cfg_2 = 0xc6,
+ .cmd_cfg_3 = 0xc7,
+ .fw_dump_host_ready = 0xcc,
+ .fw_dump_ctrl = 0xf0,
+ .fw_dump_start = 0xf1,
+ .fw_dump_end = 0xf8,
+ .func1_dump_reg_start = 0x10,
+ .func1_dump_reg_end = 0x17,
+ .func1_scratch_reg = 0xe8,
+ .func1_spec_reg_num = 13,
+ .func1_spec_reg_table = {0x08, 0x58, 0x5C, 0x5D,
+ 0x60, 0x61, 0x62, 0x64,
+ 0x65, 0x66, 0x68, 0x69,
+ 0x6a},
+};
+
+static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8997 = {
+ .start_rd_port = 0,
+ .start_wr_port = 0,
+ .base_0_reg = 0xF8,
+ .base_1_reg = 0xF9,
+ .poll_reg = 0x5C,
+ .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK |
+ CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK,
+ .host_int_rsr_reg = 0x4,
+ .host_int_status_reg = 0x0C,
+ .host_int_mask_reg = 0x08,
+ .status_reg_0 = 0xE8,
+ .status_reg_1 = 0xE9,
+ .sdio_int_mask = 0xff,
+ .data_port_mask = 0xffffffff,
+ .io_port_0_reg = 0xE4,
+ .io_port_1_reg = 0xE5,
+ .io_port_2_reg = 0xE6,
+ .max_mp_regs = 196,
+ .rd_bitmap_l = 0x10,
+ .rd_bitmap_u = 0x11,
+ .rd_bitmap_1l = 0x12,
+ .rd_bitmap_1u = 0x13,
+ .wr_bitmap_l = 0x14,
+ .wr_bitmap_u = 0x15,
+ .wr_bitmap_1l = 0x16,
+ .wr_bitmap_1u = 0x17,
+ .rd_len_p0_l = 0x18,
+ .rd_len_p0_u = 0x19,
+ .card_misc_cfg_reg = 0xd8,
+ .card_cfg_2_1_reg = 0xd9,
+ .cmd_rd_len_0 = 0xc0,
+ .cmd_rd_len_1 = 0xc1,
+ .cmd_rd_len_2 = 0xc2,
+ .cmd_rd_len_3 = 0xc3,
+ .cmd_cfg_0 = 0xc4,
+ .cmd_cfg_1 = 0xc5,
+ .cmd_cfg_2 = 0xc6,
+ .cmd_cfg_3 = 0xc7,
+ .fw_dump_host_ready = 0xcc,
+ .fw_dump_ctrl = 0xf0,
+ .fw_dump_start = 0xf1,
+ .fw_dump_end = 0xf8,
+ .func1_dump_reg_start = 0x10,
+ .func1_dump_reg_end = 0x17,
+ .func1_scratch_reg = 0xe8,
+ .func1_spec_reg_num = 13,
+ .func1_spec_reg_table = {0x08, 0x58, 0x5C, 0x5D,
+ 0x60, 0x61, 0x62, 0x64,
+ 0x65, 0x66, 0x68, 0x69,
+ 0x6a},
+};
+
+static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8887 = {
+ .start_rd_port = 0,
+ .start_wr_port = 0,
+ .base_0_reg = 0x6C,
+ .base_1_reg = 0x6D,
+ .poll_reg = 0x5C,
+ .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK |
+ CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK,
+ .host_int_rsr_reg = 0x4,
+ .host_int_status_reg = 0x0C,
+ .host_int_mask_reg = 0x08,
+ .status_reg_0 = 0x90,
+ .status_reg_1 = 0x91,
+ .sdio_int_mask = 0xff,
+ .data_port_mask = 0xffffffff,
+ .io_port_0_reg = 0xE4,
+ .io_port_1_reg = 0xE5,
+ .io_port_2_reg = 0xE6,
+ .max_mp_regs = 196,
+ .rd_bitmap_l = 0x10,
+ .rd_bitmap_u = 0x11,
+ .rd_bitmap_1l = 0x12,
+ .rd_bitmap_1u = 0x13,
+ .wr_bitmap_l = 0x14,
+ .wr_bitmap_u = 0x15,
+ .wr_bitmap_1l = 0x16,
+ .wr_bitmap_1u = 0x17,
+ .rd_len_p0_l = 0x18,
+ .rd_len_p0_u = 0x19,
+ .card_misc_cfg_reg = 0xd8,
+ .card_cfg_2_1_reg = 0xd9,
+ .cmd_rd_len_0 = 0xc0,
+ .cmd_rd_len_1 = 0xc1,
+ .cmd_rd_len_2 = 0xc2,
+ .cmd_rd_len_3 = 0xc3,
+ .cmd_cfg_0 = 0xc4,
+ .cmd_cfg_1 = 0xc5,
+ .cmd_cfg_2 = 0xc6,
+ .cmd_cfg_3 = 0xc7,
+ .func1_dump_reg_start = 0x10,
+ .func1_dump_reg_end = 0x17,
+ .func1_scratch_reg = 0x90,
+ .func1_spec_reg_num = 13,
+ .func1_spec_reg_table = {0x08, 0x58, 0x5C, 0x5D, 0x60,
+ 0x61, 0x62, 0x64, 0x65, 0x66,
+ 0x68, 0x69, 0x6a},
+};
+
+static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8987 = {
+ .start_rd_port = 0,
+ .start_wr_port = 0,
+ .base_0_reg = 0xF8,
+ .base_1_reg = 0xF9,
+ .poll_reg = 0x5C,
+ .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK |
+ CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK,
+ .host_int_rsr_reg = 0x4,
+ .host_int_status_reg = 0x0C,
+ .host_int_mask_reg = 0x08,
+ .status_reg_0 = 0xE8,
+ .status_reg_1 = 0xE9,
+ .sdio_int_mask = 0xff,
+ .data_port_mask = 0xffffffff,
+ .io_port_0_reg = 0xE4,
+ .io_port_1_reg = 0xE5,
+ .io_port_2_reg = 0xE6,
+ .max_mp_regs = 196,
+ .rd_bitmap_l = 0x10,
+ .rd_bitmap_u = 0x11,
+ .rd_bitmap_1l = 0x12,
+ .rd_bitmap_1u = 0x13,
+ .wr_bitmap_l = 0x14,
+ .wr_bitmap_u = 0x15,
+ .wr_bitmap_1l = 0x16,
+ .wr_bitmap_1u = 0x17,
+ .rd_len_p0_l = 0x18,
+ .rd_len_p0_u = 0x19,
+ .card_misc_cfg_reg = 0xd8,
+ .card_cfg_2_1_reg = 0xd9,
+ .cmd_rd_len_0 = 0xc0,
+ .cmd_rd_len_1 = 0xc1,
+ .cmd_rd_len_2 = 0xc2,
+ .cmd_rd_len_3 = 0xc3,
+ .cmd_cfg_0 = 0xc4,
+ .cmd_cfg_1 = 0xc5,
+ .cmd_cfg_2 = 0xc6,
+ .cmd_cfg_3 = 0xc7,
+ .fw_dump_host_ready = 0xcc,
+ .fw_dump_ctrl = 0xf9,
+ .fw_dump_start = 0xf1,
+ .fw_dump_end = 0xf8,
+ .func1_dump_reg_start = 0x10,
+ .func1_dump_reg_end = 0x17,
+ .func1_scratch_reg = 0xE8,
+ .func1_spec_reg_num = 13,
+ .func1_spec_reg_table = {0x08, 0x58, 0x5C, 0x5D, 0x60,
+ 0x61, 0x62, 0x64, 0x65, 0x66,
+ 0x68, 0x69, 0x6a},
+};
+
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
+ .firmware = SD8786_DEFAULT_FW_NAME,
+ .reg = &mwifiex_reg_sd87xx,
+ .max_ports = 16,
+ .mp_agg_pkt_limit = 8,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
+ .supports_sdio_new_mode = false,
+ .has_control_mask = true,
+ .can_dump_fw = false,
+ .can_auto_tdls = false,
+ .can_ext_scan = false,
+};
+
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
+ .firmware = SD8787_DEFAULT_FW_NAME,
+ .reg = &mwifiex_reg_sd87xx,
+ .max_ports = 16,
+ .mp_agg_pkt_limit = 8,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
+ .supports_sdio_new_mode = false,
+ .has_control_mask = true,
+ .can_dump_fw = false,
+ .can_auto_tdls = false,
+ .can_ext_scan = true,
+};
+
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
+ .firmware = SD8797_DEFAULT_FW_NAME,
+ .reg = &mwifiex_reg_sd87xx,
+ .max_ports = 16,
+ .mp_agg_pkt_limit = 8,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
+ .supports_sdio_new_mode = false,
+ .has_control_mask = true,
+ .can_dump_fw = false,
+ .can_auto_tdls = false,
+ .can_ext_scan = true,
+};
+
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
+ .firmware = SD8897_DEFAULT_FW_NAME,
+ .reg = &mwifiex_reg_sd8897,
+ .max_ports = 32,
+ .mp_agg_pkt_limit = 16,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+ .supports_sdio_new_mode = true,
+ .has_control_mask = false,
+ .can_dump_fw = true,
+ .can_auto_tdls = false,
+ .can_ext_scan = true,
+};
+
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8977 = {
+ .firmware = SD8977_DEFAULT_FW_NAME,
+ .reg = &mwifiex_reg_sd8977,
+ .max_ports = 32,
+ .mp_agg_pkt_limit = 16,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+ .supports_sdio_new_mode = true,
+ .has_control_mask = false,
+ .can_dump_fw = true,
+ .fw_dump_enh = true,
+ .can_auto_tdls = false,
+ .can_ext_scan = true,
+};
+
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8997 = {
+ .firmware = SD8997_DEFAULT_FW_NAME,
+ .reg = &mwifiex_reg_sd8997,
+ .max_ports = 32,
+ .mp_agg_pkt_limit = 16,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+ .supports_sdio_new_mode = true,
+ .has_control_mask = false,
+ .can_dump_fw = true,
+ .fw_dump_enh = true,
+ .can_auto_tdls = false,
+ .can_ext_scan = true,
+};
+
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
+ .firmware = SD8887_DEFAULT_FW_NAME,
+ .reg = &mwifiex_reg_sd8887,
+ .max_ports = 32,
+ .mp_agg_pkt_limit = 16,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
+ .supports_sdio_new_mode = true,
+ .has_control_mask = false,
+ .can_dump_fw = false,
+ .can_auto_tdls = true,
+ .can_ext_scan = true,
+};
+
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8987 = {
+ .firmware = SD8987_DEFAULT_FW_NAME,
+ .reg = &mwifiex_reg_sd8987,
+ .max_ports = 32,
+ .mp_agg_pkt_limit = 16,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
+ .supports_sdio_new_mode = true,
+ .has_control_mask = false,
+ .can_dump_fw = true,
+ .fw_dump_enh = true,
+ .can_auto_tdls = true,
+ .can_ext_scan = true,
+};
+
+static const struct mwifiex_sdio_device mwifiex_sdio_sd8801 = {
+ .firmware = SD8801_DEFAULT_FW_NAME,
+ .reg = &mwifiex_reg_sd87xx,
+ .max_ports = 16,
+ .mp_agg_pkt_limit = 8,
+ .supports_sdio_new_mode = false,
+ .has_control_mask = true,
+ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
+ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
+ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
+ .can_dump_fw = false,
+ .can_auto_tdls = false,
+ .can_ext_scan = true,
+};
+
static struct memory_type_mapping generic_mem_type_map[] = {
{"DUMP", NULL, 0, 0xDD},
};
@@ -1976,6 +2403,8 @@ error:
kfree(card->mpa_rx.buf);
card->mpa_tx.buf_size = 0;
card->mpa_rx.buf_size = 0;
+ card->mpa_tx.buf = NULL;
+ card->mpa_rx.buf = NULL;
}
return ret;
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h
index 8b476b007c5e..dec534a6ddb1 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.h
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.h
@@ -290,433 +290,6 @@ struct mwifiex_sdio_device {
bool can_ext_scan;
};
-static const struct mwifiex_sdio_card_reg mwifiex_reg_sd87xx = {
- .start_rd_port = 1,
- .start_wr_port = 1,
- .base_0_reg = 0x0040,
- .base_1_reg = 0x0041,
- .poll_reg = 0x30,
- .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK,
- .host_int_rsr_reg = 0x1,
- .host_int_mask_reg = 0x02,
- .host_int_status_reg = 0x03,
- .status_reg_0 = 0x60,
- .status_reg_1 = 0x61,
- .sdio_int_mask = 0x3f,
- .data_port_mask = 0x0000fffe,
- .io_port_0_reg = 0x78,
- .io_port_1_reg = 0x79,
- .io_port_2_reg = 0x7A,
- .max_mp_regs = 64,
- .rd_bitmap_l = 0x04,
- .rd_bitmap_u = 0x05,
- .wr_bitmap_l = 0x06,
- .wr_bitmap_u = 0x07,
- .rd_len_p0_l = 0x08,
- .rd_len_p0_u = 0x09,
- .card_misc_cfg_reg = 0x6c,
- .func1_dump_reg_start = 0x0,
- .func1_dump_reg_end = 0x9,
- .func1_scratch_reg = 0x60,
- .func1_spec_reg_num = 5,
- .func1_spec_reg_table = {0x28, 0x30, 0x34, 0x38, 0x3c},
-};
-
-static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8897 = {
- .start_rd_port = 0,
- .start_wr_port = 0,
- .base_0_reg = 0x60,
- .base_1_reg = 0x61,
- .poll_reg = 0x50,
- .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK |
- CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK,
- .host_int_rsr_reg = 0x1,
- .host_int_status_reg = 0x03,
- .host_int_mask_reg = 0x02,
- .status_reg_0 = 0xc0,
- .status_reg_1 = 0xc1,
- .sdio_int_mask = 0xff,
- .data_port_mask = 0xffffffff,
- .io_port_0_reg = 0xD8,
- .io_port_1_reg = 0xD9,
- .io_port_2_reg = 0xDA,
- .max_mp_regs = 184,
- .rd_bitmap_l = 0x04,
- .rd_bitmap_u = 0x05,
- .rd_bitmap_1l = 0x06,
- .rd_bitmap_1u = 0x07,
- .wr_bitmap_l = 0x08,
- .wr_bitmap_u = 0x09,
- .wr_bitmap_1l = 0x0a,
- .wr_bitmap_1u = 0x0b,
- .rd_len_p0_l = 0x0c,
- .rd_len_p0_u = 0x0d,
- .card_misc_cfg_reg = 0xcc,
- .card_cfg_2_1_reg = 0xcd,
- .cmd_rd_len_0 = 0xb4,
- .cmd_rd_len_1 = 0xb5,
- .cmd_rd_len_2 = 0xb6,
- .cmd_rd_len_3 = 0xb7,
- .cmd_cfg_0 = 0xb8,
- .cmd_cfg_1 = 0xb9,
- .cmd_cfg_2 = 0xba,
- .cmd_cfg_3 = 0xbb,
- .fw_dump_host_ready = 0xee,
- .fw_dump_ctrl = 0xe2,
- .fw_dump_start = 0xe3,
- .fw_dump_end = 0xea,
- .func1_dump_reg_start = 0x0,
- .func1_dump_reg_end = 0xb,
- .func1_scratch_reg = 0xc0,
- .func1_spec_reg_num = 8,
- .func1_spec_reg_table = {0x4C, 0x50, 0x54, 0x55, 0x58,
- 0x59, 0x5c, 0x5d},
-};
-
-static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8977 = {
- .start_rd_port = 0,
- .start_wr_port = 0,
- .base_0_reg = 0xF8,
- .base_1_reg = 0xF9,
- .poll_reg = 0x5C,
- .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK |
- CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK,
- .host_int_rsr_reg = 0x4,
- .host_int_status_reg = 0x0C,
- .host_int_mask_reg = 0x08,
- .status_reg_0 = 0xE8,
- .status_reg_1 = 0xE9,
- .sdio_int_mask = 0xff,
- .data_port_mask = 0xffffffff,
- .io_port_0_reg = 0xE4,
- .io_port_1_reg = 0xE5,
- .io_port_2_reg = 0xE6,
- .max_mp_regs = 196,
- .rd_bitmap_l = 0x10,
- .rd_bitmap_u = 0x11,
- .rd_bitmap_1l = 0x12,
- .rd_bitmap_1u = 0x13,
- .wr_bitmap_l = 0x14,
- .wr_bitmap_u = 0x15,
- .wr_bitmap_1l = 0x16,
- .wr_bitmap_1u = 0x17,
- .rd_len_p0_l = 0x18,
- .rd_len_p0_u = 0x19,
- .card_misc_cfg_reg = 0xd8,
- .card_cfg_2_1_reg = 0xd9,
- .cmd_rd_len_0 = 0xc0,
- .cmd_rd_len_1 = 0xc1,
- .cmd_rd_len_2 = 0xc2,
- .cmd_rd_len_3 = 0xc3,
- .cmd_cfg_0 = 0xc4,
- .cmd_cfg_1 = 0xc5,
- .cmd_cfg_2 = 0xc6,
- .cmd_cfg_3 = 0xc7,
- .fw_dump_host_ready = 0xcc,
- .fw_dump_ctrl = 0xf0,
- .fw_dump_start = 0xf1,
- .fw_dump_end = 0xf8,
- .func1_dump_reg_start = 0x10,
- .func1_dump_reg_end = 0x17,
- .func1_scratch_reg = 0xe8,
- .func1_spec_reg_num = 13,
- .func1_spec_reg_table = {0x08, 0x58, 0x5C, 0x5D,
- 0x60, 0x61, 0x62, 0x64,
- 0x65, 0x66, 0x68, 0x69,
- 0x6a},
-};
-
-static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8997 = {
- .start_rd_port = 0,
- .start_wr_port = 0,
- .base_0_reg = 0xF8,
- .base_1_reg = 0xF9,
- .poll_reg = 0x5C,
- .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK |
- CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK,
- .host_int_rsr_reg = 0x4,
- .host_int_status_reg = 0x0C,
- .host_int_mask_reg = 0x08,
- .status_reg_0 = 0xE8,
- .status_reg_1 = 0xE9,
- .sdio_int_mask = 0xff,
- .data_port_mask = 0xffffffff,
- .io_port_0_reg = 0xE4,
- .io_port_1_reg = 0xE5,
- .io_port_2_reg = 0xE6,
- .max_mp_regs = 196,
- .rd_bitmap_l = 0x10,
- .rd_bitmap_u = 0x11,
- .rd_bitmap_1l = 0x12,
- .rd_bitmap_1u = 0x13,
- .wr_bitmap_l = 0x14,
- .wr_bitmap_u = 0x15,
- .wr_bitmap_1l = 0x16,
- .wr_bitmap_1u = 0x17,
- .rd_len_p0_l = 0x18,
- .rd_len_p0_u = 0x19,
- .card_misc_cfg_reg = 0xd8,
- .card_cfg_2_1_reg = 0xd9,
- .cmd_rd_len_0 = 0xc0,
- .cmd_rd_len_1 = 0xc1,
- .cmd_rd_len_2 = 0xc2,
- .cmd_rd_len_3 = 0xc3,
- .cmd_cfg_0 = 0xc4,
- .cmd_cfg_1 = 0xc5,
- .cmd_cfg_2 = 0xc6,
- .cmd_cfg_3 = 0xc7,
- .fw_dump_host_ready = 0xcc,
- .fw_dump_ctrl = 0xf0,
- .fw_dump_start = 0xf1,
- .fw_dump_end = 0xf8,
- .func1_dump_reg_start = 0x10,
- .func1_dump_reg_end = 0x17,
- .func1_scratch_reg = 0xe8,
- .func1_spec_reg_num = 13,
- .func1_spec_reg_table = {0x08, 0x58, 0x5C, 0x5D,
- 0x60, 0x61, 0x62, 0x64,
- 0x65, 0x66, 0x68, 0x69,
- 0x6a},
-};
-
-static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8887 = {
- .start_rd_port = 0,
- .start_wr_port = 0,
- .base_0_reg = 0x6C,
- .base_1_reg = 0x6D,
- .poll_reg = 0x5C,
- .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK |
- CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK,
- .host_int_rsr_reg = 0x4,
- .host_int_status_reg = 0x0C,
- .host_int_mask_reg = 0x08,
- .status_reg_0 = 0x90,
- .status_reg_1 = 0x91,
- .sdio_int_mask = 0xff,
- .data_port_mask = 0xffffffff,
- .io_port_0_reg = 0xE4,
- .io_port_1_reg = 0xE5,
- .io_port_2_reg = 0xE6,
- .max_mp_regs = 196,
- .rd_bitmap_l = 0x10,
- .rd_bitmap_u = 0x11,
- .rd_bitmap_1l = 0x12,
- .rd_bitmap_1u = 0x13,
- .wr_bitmap_l = 0x14,
- .wr_bitmap_u = 0x15,
- .wr_bitmap_1l = 0x16,
- .wr_bitmap_1u = 0x17,
- .rd_len_p0_l = 0x18,
- .rd_len_p0_u = 0x19,
- .card_misc_cfg_reg = 0xd8,
- .card_cfg_2_1_reg = 0xd9,
- .cmd_rd_len_0 = 0xc0,
- .cmd_rd_len_1 = 0xc1,
- .cmd_rd_len_2 = 0xc2,
- .cmd_rd_len_3 = 0xc3,
- .cmd_cfg_0 = 0xc4,
- .cmd_cfg_1 = 0xc5,
- .cmd_cfg_2 = 0xc6,
- .cmd_cfg_3 = 0xc7,
- .func1_dump_reg_start = 0x10,
- .func1_dump_reg_end = 0x17,
- .func1_scratch_reg = 0x90,
- .func1_spec_reg_num = 13,
- .func1_spec_reg_table = {0x08, 0x58, 0x5C, 0x5D, 0x60,
- 0x61, 0x62, 0x64, 0x65, 0x66,
- 0x68, 0x69, 0x6a},
-};
-
-static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8987 = {
- .start_rd_port = 0,
- .start_wr_port = 0,
- .base_0_reg = 0xF8,
- .base_1_reg = 0xF9,
- .poll_reg = 0x5C,
- .host_int_enable = UP_LD_HOST_INT_MASK | DN_LD_HOST_INT_MASK |
- CMD_PORT_UPLD_INT_MASK | CMD_PORT_DNLD_INT_MASK,
- .host_int_rsr_reg = 0x4,
- .host_int_status_reg = 0x0C,
- .host_int_mask_reg = 0x08,
- .status_reg_0 = 0xE8,
- .status_reg_1 = 0xE9,
- .sdio_int_mask = 0xff,
- .data_port_mask = 0xffffffff,
- .io_port_0_reg = 0xE4,
- .io_port_1_reg = 0xE5,
- .io_port_2_reg = 0xE6,
- .max_mp_regs = 196,
- .rd_bitmap_l = 0x10,
- .rd_bitmap_u = 0x11,
- .rd_bitmap_1l = 0x12,
- .rd_bitmap_1u = 0x13,
- .wr_bitmap_l = 0x14,
- .wr_bitmap_u = 0x15,
- .wr_bitmap_1l = 0x16,
- .wr_bitmap_1u = 0x17,
- .rd_len_p0_l = 0x18,
- .rd_len_p0_u = 0x19,
- .card_misc_cfg_reg = 0xd8,
- .card_cfg_2_1_reg = 0xd9,
- .cmd_rd_len_0 = 0xc0,
- .cmd_rd_len_1 = 0xc1,
- .cmd_rd_len_2 = 0xc2,
- .cmd_rd_len_3 = 0xc3,
- .cmd_cfg_0 = 0xc4,
- .cmd_cfg_1 = 0xc5,
- .cmd_cfg_2 = 0xc6,
- .cmd_cfg_3 = 0xc7,
- .fw_dump_host_ready = 0xcc,
- .fw_dump_ctrl = 0xf9,
- .fw_dump_start = 0xf1,
- .fw_dump_end = 0xf8,
- .func1_dump_reg_start = 0x10,
- .func1_dump_reg_end = 0x17,
- .func1_scratch_reg = 0xE8,
- .func1_spec_reg_num = 13,
- .func1_spec_reg_table = {0x08, 0x58, 0x5C, 0x5D, 0x60,
- 0x61, 0x62, 0x64, 0x65, 0x66,
- 0x68, 0x69, 0x6a},
-};
-
-static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = {
- .firmware = SD8786_DEFAULT_FW_NAME,
- .reg = &mwifiex_reg_sd87xx,
- .max_ports = 16,
- .mp_agg_pkt_limit = 8,
- .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
- .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
- .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
- .supports_sdio_new_mode = false,
- .has_control_mask = true,
- .can_dump_fw = false,
- .can_auto_tdls = false,
- .can_ext_scan = false,
-};
-
-static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = {
- .firmware = SD8787_DEFAULT_FW_NAME,
- .reg = &mwifiex_reg_sd87xx,
- .max_ports = 16,
- .mp_agg_pkt_limit = 8,
- .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
- .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
- .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
- .supports_sdio_new_mode = false,
- .has_control_mask = true,
- .can_dump_fw = false,
- .can_auto_tdls = false,
- .can_ext_scan = true,
-};
-
-static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = {
- .firmware = SD8797_DEFAULT_FW_NAME,
- .reg = &mwifiex_reg_sd87xx,
- .max_ports = 16,
- .mp_agg_pkt_limit = 8,
- .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
- .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
- .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
- .supports_sdio_new_mode = false,
- .has_control_mask = true,
- .can_dump_fw = false,
- .can_auto_tdls = false,
- .can_ext_scan = true,
-};
-
-static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = {
- .firmware = SD8897_DEFAULT_FW_NAME,
- .reg = &mwifiex_reg_sd8897,
- .max_ports = 32,
- .mp_agg_pkt_limit = 16,
- .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
- .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
- .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
- .supports_sdio_new_mode = true,
- .has_control_mask = false,
- .can_dump_fw = true,
- .can_auto_tdls = false,
- .can_ext_scan = true,
-};
-
-static const struct mwifiex_sdio_device mwifiex_sdio_sd8977 = {
- .firmware = SD8977_DEFAULT_FW_NAME,
- .reg = &mwifiex_reg_sd8977,
- .max_ports = 32,
- .mp_agg_pkt_limit = 16,
- .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
- .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
- .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
- .supports_sdio_new_mode = true,
- .has_control_mask = false,
- .can_dump_fw = true,
- .fw_dump_enh = true,
- .can_auto_tdls = false,
- .can_ext_scan = true,
-};
-
-static const struct mwifiex_sdio_device mwifiex_sdio_sd8997 = {
- .firmware = SD8997_DEFAULT_FW_NAME,
- .reg = &mwifiex_reg_sd8997,
- .max_ports = 32,
- .mp_agg_pkt_limit = 16,
- .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
- .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
- .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
- .supports_sdio_new_mode = true,
- .has_control_mask = false,
- .can_dump_fw = true,
- .fw_dump_enh = true,
- .can_auto_tdls = false,
- .can_ext_scan = true,
-};
-
-static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = {
- .firmware = SD8887_DEFAULT_FW_NAME,
- .reg = &mwifiex_reg_sd8887,
- .max_ports = 32,
- .mp_agg_pkt_limit = 16,
- .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
- .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
- .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_32K,
- .supports_sdio_new_mode = true,
- .has_control_mask = false,
- .can_dump_fw = false,
- .can_auto_tdls = true,
- .can_ext_scan = true,
-};
-
-static const struct mwifiex_sdio_device mwifiex_sdio_sd8987 = {
- .firmware = SD8987_DEFAULT_FW_NAME,
- .reg = &mwifiex_reg_sd8987,
- .max_ports = 32,
- .mp_agg_pkt_limit = 16,
- .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
- .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
- .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX,
- .supports_sdio_new_mode = true,
- .has_control_mask = false,
- .can_dump_fw = true,
- .fw_dump_enh = true,
- .can_auto_tdls = true,
- .can_ext_scan = true,
-};
-
-static const struct mwifiex_sdio_device mwifiex_sdio_sd8801 = {
- .firmware = SD8801_DEFAULT_FW_NAME,
- .reg = &mwifiex_reg_sd87xx,
- .max_ports = 16,
- .mp_agg_pkt_limit = 8,
- .supports_sdio_new_mode = false,
- .has_control_mask = true,
- .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
- .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
- .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_16K,
- .can_dump_fw = false,
- .can_auto_tdls = false,
- .can_ext_scan = true,
-};
-
/*
* .cmdrsp_complete handler
*/
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
index 77c8595f84f8..9bbdb8dfce62 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
@@ -350,11 +350,7 @@ int mwifiex_uap_recv_packet(struct mwifiex_private *priv,
skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
/* Forward multicast/broadcast packet to upper layer*/
- if (in_interrupt())
- netif_rx(skb);
- else
- netif_rx_ni(skb);
-
+ netif_rx_any_context(skb);
return 0;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index 6f3cfde4654c..426e39d4ccf0 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -1353,7 +1353,8 @@ static void mwifiex_usb_cleanup_tx_aggr(struct mwifiex_adapter *adapter)
skb_dequeue(&port->tx_aggr.aggr_list)))
mwifiex_write_data_complete(adapter, skb_tmp,
0, -1);
- del_timer_sync(&port->tx_aggr.timer_cnxt.hold_timer);
+ if (port->tx_aggr.timer_cnxt.hold_timer.function)
+ del_timer_sync(&port->tx_aggr.timer_cnxt.hold_timer);
port->tx_aggr.timer_cnxt.is_hold_timer_set = false;
port->tx_aggr.timer_cnxt.hold_tmo_msecs = 0;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c
index de89a1e710b1..d583fa600a29 100644
--- a/drivers/net/wireless/marvell/mwifiex/util.c
+++ b/drivers/net/wireless/marvell/mwifiex/util.c
@@ -488,11 +488,7 @@ int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb)
(skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE))
skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
- if (in_interrupt())
- netif_rx(skb);
- else
- netif_rx_ni(skb);
-
+ netif_rx_any_context(skb);
return 0;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c
index a06fff199ea3..b8f19ca73414 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.c
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.c
@@ -40,6 +40,21 @@
static bool disable_tx_amsdu;
module_param(disable_tx_amsdu, bool, 0644);
+/* This table inverses the tos_to_tid operation to get a priority
+ * which is in sequential order, and can be compared.
+ * Use this to compare the priority of two different TIDs.
+ */
+const u8 tos_to_tid_inv[] = {
+ 0x02, /* from tos_to_tid[2] = 0 */
+ 0x00, /* from tos_to_tid[0] = 1 */
+ 0x01, /* from tos_to_tid[1] = 2 */
+ 0x03,
+ 0x04,
+ 0x05,
+ 0x06,
+ 0x07
+};
+
/* WMM information IE */
static const u8 wmm_info_ie[] = { WLAN_EID_VENDOR_SPECIFIC, 0x07,
0x00, 0x50, 0xf2, 0x02,
diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.h b/drivers/net/wireless/marvell/mwifiex/wmm.h
index 04d7da95e307..1cb3d1804758 100644
--- a/drivers/net/wireless/marvell/mwifiex/wmm.h
+++ b/drivers/net/wireless/marvell/mwifiex/wmm.h
@@ -31,22 +31,8 @@ enum ieee_types_wmm_ecw_bitmasks {
MWIFIEX_ECW_MAX = (BIT(4) | BIT(5) | BIT(6) | BIT(7)),
};
-static const u16 mwifiex_1d_to_wmm_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
-
-/*
- * This table inverses the tos_to_tid operation to get a priority
- * which is in sequential order, and can be compared.
- * Use this to compare the priority of two different TIDs.
- */
-static const u8 tos_to_tid_inv[] = {
- 0x02, /* from tos_to_tid[2] = 0 */
- 0x00, /* from tos_to_tid[0] = 1 */
- 0x01, /* from tos_to_tid[1] = 2 */
- 0x03,
- 0x04,
- 0x05,
- 0x06,
- 0x07};
+extern const u16 mwifiex_1d_to_wmm_queue[];
+extern const u8 tos_to_tid_inv[];
/*
* This function retrieves the TID of the given RA list.
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index 97f23f93f6e7..23efd7075df6 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -4630,10 +4630,10 @@ static irqreturn_t mwl8k_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void mwl8k_tx_poll(unsigned long data)
+static void mwl8k_tx_poll(struct tasklet_struct *t)
{
- struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
- struct mwl8k_priv *priv = hw->priv;
+ struct mwl8k_priv *priv = from_tasklet(priv, t, poll_tx_task);
+ struct ieee80211_hw *hw = pci_get_drvdata(priv->pdev);
int limit;
int i;
@@ -4659,10 +4659,10 @@ static void mwl8k_tx_poll(unsigned long data)
}
}
-static void mwl8k_rx_poll(unsigned long data)
+static void mwl8k_rx_poll(struct tasklet_struct *t)
{
- struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
- struct mwl8k_priv *priv = hw->priv;
+ struct mwl8k_priv *priv = from_tasklet(priv, t, poll_rx_task);
+ struct ieee80211_hw *hw = pci_get_drvdata(priv->pdev);
int limit;
limit = 32;
@@ -6120,9 +6120,9 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
INIT_WORK(&priv->fw_reload, mwl8k_hw_restart_work);
/* TX reclaim and RX tasklets. */
- tasklet_init(&priv->poll_tx_task, mwl8k_tx_poll, (unsigned long)hw);
+ tasklet_setup(&priv->poll_tx_task, mwl8k_tx_poll);
tasklet_disable(&priv->poll_tx_task);
- tasklet_init(&priv->poll_rx_task, mwl8k_rx_poll, (unsigned long)hw);
+ tasklet_setup(&priv->poll_rx_task, mwl8k_rx_poll);
tasklet_disable(&priv->poll_rx_task);
/* Power management cookie */
diff --git a/drivers/net/wireless/mediatek/mt76/debugfs.c b/drivers/net/wireless/mediatek/mt76/debugfs.c
index 5d58b16bfe9f..52f583cb1418 100644
--- a/drivers/net/wireless/mediatek/mt76/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/debugfs.c
@@ -31,15 +31,14 @@ int mt76_queues_read(struct seq_file *s, void *data)
int i;
for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++) {
- struct mt76_sw_queue *q = &dev->q_tx[i];
+ struct mt76_queue *q = dev->q_tx[i];
- if (!q->q)
+ if (!q)
continue;
seq_printf(s,
- "%d: queued=%d head=%d tail=%d swq_queued=%d\n",
- i, q->q->queued, q->q->head, q->q->tail,
- q->swq_queued);
+ "%d: queued=%d head=%d tail=%d\n",
+ i, q->queued, q->head, q->tail);
}
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 6c25859dd386..214fc95b8a33 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -7,6 +7,76 @@
#include "mt76.h"
#include "dma.h"
+static struct mt76_txwi_cache *
+mt76_alloc_txwi(struct mt76_dev *dev)
+{
+ struct mt76_txwi_cache *t;
+ dma_addr_t addr;
+ u8 *txwi;
+ int size;
+
+ size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
+ txwi = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
+ if (!txwi)
+ return NULL;
+
+ addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size,
+ DMA_TO_DEVICE);
+ t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
+ t->dma_addr = addr;
+
+ return t;
+}
+
+static struct mt76_txwi_cache *
+__mt76_get_txwi(struct mt76_dev *dev)
+{
+ struct mt76_txwi_cache *t = NULL;
+
+ spin_lock(&dev->lock);
+ if (!list_empty(&dev->txwi_cache)) {
+ t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
+ list);
+ list_del(&t->list);
+ }
+ spin_unlock(&dev->lock);
+
+ return t;
+}
+
+static struct mt76_txwi_cache *
+mt76_get_txwi(struct mt76_dev *dev)
+{
+ struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
+
+ if (t)
+ return t;
+
+ return mt76_alloc_txwi(dev);
+}
+
+void
+mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
+{
+ if (!t)
+ return;
+
+ spin_lock(&dev->lock);
+ list_add(&t->list, &dev->txwi_cache);
+ spin_unlock(&dev->lock);
+}
+EXPORT_SYMBOL_GPL(mt76_put_txwi);
+
+static void
+mt76_free_pending_txwi(struct mt76_dev *dev)
+{
+ struct mt76_txwi_cache *t;
+
+ while ((t = __mt76_get_txwi(dev)) != NULL)
+ dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size,
+ DMA_TO_DEVICE);
+}
+
static int
mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
int idx, int n_desc, int bufsize,
@@ -49,6 +119,7 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_queue_buf *buf, int nbufs, u32 info,
struct sk_buff *skb, void *txwi)
{
+ struct mt76_queue_entry *entry;
struct mt76_desc *desc;
u32 ctrl;
int i, idx = -1;
@@ -61,10 +132,27 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
for (i = 0; i < nbufs; i += 2, buf += 2) {
u32 buf0 = buf[0].addr, buf1 = 0;
+ idx = q->head;
+ q->head = (q->head + 1) % q->ndesc;
+
+ desc = &q->desc[idx];
+ entry = &q->entry[idx];
+
+ if (buf[0].skip_unmap)
+ entry->skip_buf0 = true;
+ entry->skip_buf1 = i == nbufs - 1;
+
+ entry->dma_addr[0] = buf[0].addr;
+ entry->dma_len[0] = buf[0].len;
+
ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
if (i < nbufs - 1) {
+ entry->dma_addr[1] = buf[1].addr;
+ entry->dma_len[1] = buf[1].len;
buf1 = buf[1].addr;
ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
+ if (buf[1].skip_unmap)
+ entry->skip_buf1 = true;
}
if (i == nbufs - 1)
@@ -72,11 +160,6 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
else if (i == nbufs - 2)
ctrl |= MT_DMA_CTL_LAST_SEC1;
- idx = q->head;
- q->head = (q->head + 1) % q->ndesc;
-
- desc = &q->desc[idx];
-
WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
WRITE_ONCE(desc->info, cpu_to_le32(info));
@@ -96,24 +179,14 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
struct mt76_queue_entry *prev_e)
{
struct mt76_queue_entry *e = &q->entry[idx];
- __le32 __ctrl = READ_ONCE(q->desc[idx].ctrl);
- u32 ctrl = le32_to_cpu(__ctrl);
- if (!e->skip_buf0) {
- __le32 addr = READ_ONCE(q->desc[idx].buf0);
- u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
-
- dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
+ if (!e->skip_buf0)
+ dma_unmap_single(dev->dev, e->dma_addr[0], e->dma_len[0],
DMA_TO_DEVICE);
- }
- if (!(ctrl & MT_DMA_CTL_LAST_SEC0)) {
- __le32 addr = READ_ONCE(q->desc[idx].buf1);
- u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN1, ctrl);
-
- dma_unmap_single(dev->dev, le32_to_cpu(addr), len,
+ if (!e->skip_buf1)
+ dma_unmap_single(dev->dev, e->dma_addr[1], e->dma_len[1],
DMA_TO_DEVICE);
- }
if (e->txwi == DMA_DUMMY_DATA)
e->txwi = NULL;
@@ -137,19 +210,17 @@ mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
static void
mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
+ wmb();
writel(q->head, &q->regs->cpu_idx);
}
static void
mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
{
- struct mt76_sw_queue *sq = &dev->q_tx[qid];
- struct mt76_queue *q = sq->q;
+ struct mt76_queue *q = dev->q_tx[qid];
struct mt76_queue_entry entry;
- unsigned int n_swq_queued[8] = {};
- unsigned int n_queued = 0;
bool wake = false;
- int i, last;
+ int last;
if (!q)
return;
@@ -159,16 +230,9 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
else
last = readl(&q->regs->dma_idx);
- while ((q->queued > n_queued) && q->tail != last) {
+ while (q->queued > 0 && q->tail != last) {
mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
- if (entry.schedule)
- n_swq_queued[entry.qid]++;
-
- q->tail = (q->tail + 1) % q->ndesc;
- n_queued++;
-
- if (entry.skb)
- dev->drv->tx_complete_skb(dev, qid, &entry);
+ mt76_queue_tx_complete(dev, q, &entry);
if (entry.txwi) {
if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE))
@@ -178,29 +242,14 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
if (!flush && q->tail == last)
last = readl(&q->regs->dma_idx);
- }
-
- spin_lock_bh(&q->lock);
-
- q->queued -= n_queued;
- for (i = 0; i < 4; i++) {
- if (!n_swq_queued[i])
- continue;
-
- dev->q_tx[i].swq_queued -= n_swq_queued[i];
- }
-
- /* ext PHY */
- for (i = 0; i < 4; i++) {
- if (!n_swq_queued[i])
- continue;
- dev->q_tx[__MT_TXQ_MAX + i].swq_queued -= n_swq_queued[4 + i];
}
if (flush) {
+ spin_lock_bh(&q->lock);
mt76_dma_sync_idx(dev, q);
mt76_dma_kick_queue(dev, q);
+ spin_unlock_bh(&q->lock);
}
wake = wake && q->stopped &&
@@ -211,8 +260,6 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
if (!q->queued)
wake_up(&dev->tx_wait);
- spin_unlock_bh(&q->lock);
-
if (wake)
ieee80211_wake_queue(dev->hw, qid);
}
@@ -227,7 +274,7 @@ mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
void *buf = e->buf;
int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
- buf_addr = le32_to_cpu(READ_ONCE(desc->buf0));
+ buf_addr = e->dma_addr[0];
if (len) {
u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
*len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl);
@@ -268,7 +315,7 @@ static int
mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, u32 tx_info)
{
- struct mt76_queue *q = dev->q_tx[qid].q;
+ struct mt76_queue *q = dev->q_tx[qid];
struct mt76_queue_buf buf;
dma_addr_t addr;
@@ -300,7 +347,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta)
{
- struct mt76_queue *q = dev->q_tx[qid].q;
+ struct mt76_queue *q = dev->q_tx[qid];
struct mt76_tx_info tx_info = {
.skb = skb,
};
@@ -378,7 +425,7 @@ free:
e.skb = tx_info.skb;
e.txwi = t;
- dev->drv->tx_complete_skb(dev, qid, &e);
+ dev->drv->tx_complete_skb(dev, &e);
mt76_put_txwi(dev, t);
return ret;
}
@@ -612,6 +659,7 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
{
int i;
+ mt76_worker_disable(&dev->tx_worker);
netif_napi_del(&dev->tx_napi);
for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++)
mt76_dma_tx_cleanup(dev, i, true);
@@ -620,5 +668,7 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
netif_napi_del(&dev->napi[i]);
mt76_dma_rx_cleanup(dev, &dev->q_rx[i]);
}
+
+ mt76_free_pending_txwi(dev);
}
EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 3d4bf72700a5..4befe7f937a9 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -2,6 +2,7 @@
/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*/
+#include <linux/sched.h>
#include <linux/of.h>
#include "mt76.h"
@@ -304,11 +305,11 @@ mt76_phy_init(struct mt76_dev *dev, struct ieee80211_hw *hw)
ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
- ieee80211_hw_set(hw, TX_AMSDU);
- /* TODO: avoid linearization for SDIO */
- if (!mt76_is_sdio(dev))
+ if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD)) {
+ ieee80211_hw_set(hw, TX_AMSDU);
ieee80211_hw_set(hw, TX_FRAG_LIST);
+ }
ieee80211_hw_set(hw, MFP_CAPABLE);
ieee80211_hw_set(hw, AP_LINK_PS);
@@ -433,14 +434,13 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
skb_queue_head_init(&dev->mcu.res_q);
init_waitqueue_head(&dev->mcu.wait);
mutex_init(&dev->mcu.mutex);
+ dev->tx_worker.fn = mt76_tx_worker;
INIT_LIST_HEAD(&dev->txwi_cache);
for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
skb_queue_head_init(&dev->rx_skb[i]);
- tasklet_init(&dev->tx_tasklet, mt76_tx_tasklet, (unsigned long)dev);
-
dev->wq = alloc_ordered_workqueue("mt76", 0);
if (!dev->wq) {
ieee80211_free_hw(hw);
@@ -483,7 +483,14 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
return ret;
}
- return ieee80211_register_hw(hw);
+ ret = ieee80211_register_hw(hw);
+ if (ret)
+ return ret;
+
+ WARN_ON(mt76_worker_setup(hw, &dev->tx_worker, NULL, "tx"));
+ sched_set_fifo_low(dev->tx_worker.task);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(mt76_register_device);
@@ -500,12 +507,11 @@ EXPORT_SYMBOL_GPL(mt76_unregister_device);
void mt76_free_device(struct mt76_dev *dev)
{
+ mt76_worker_teardown(&dev->tx_worker);
if (dev->wq) {
destroy_workqueue(dev->wq);
dev->wq = NULL;
}
- if (mt76_is_mmio(dev))
- mt76_tx_free(dev);
ieee80211_free_hw(dev->hw);
}
EXPORT_SYMBOL_GPL(mt76_free_device);
@@ -540,7 +546,7 @@ bool mt76_has_tx_pending(struct mt76_phy *phy)
offset = __MT_TXQ_MAX * (phy != &dev->phy);
for (i = 0; i < __MT_TXQ_MAX; i++) {
- q = dev->q_tx[offset + i].q;
+ q = dev->q_tx[offset + i];
if (q && q->queued)
return true;
}
@@ -870,7 +876,6 @@ mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
struct ieee80211_hw *hw;
struct mt76_wcid *wcid = status->wcid;
bool ps;
- int i;
hw = mt76_phy_hw(dev, status->ext_phy);
if (ieee80211_is_pspoll(hdr->frame_control) && !wcid) {
@@ -920,20 +925,6 @@ mt76_check_sta(struct mt76_dev *dev, struct sk_buff *skb)
dev->drv->sta_ps(dev, sta, ps);
ieee80211_sta_ps_transition(sta, ps);
-
- if (ps)
- return;
-
- for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
- struct mt76_txq *mtxq;
-
- if (!sta->txq[i])
- continue;
-
- mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
- if (!skb_queue_empty(&mtxq->retry_q))
- ieee80211_schedule_txq(hw, sta->txq[i]);
- }
}
void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
@@ -995,8 +986,6 @@ mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif,
mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv;
mtxq->wcid = wcid;
-
- mt76_txq_init(dev, sta->txq[i]);
}
ewma_signal_init(&wcid->rssi);
@@ -1024,8 +1013,6 @@ void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
dev->drv->sta_remove(dev, vif, sta);
mt76_tx_status_check(dev, wcid, true);
- for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
- mt76_txq_remove(dev, sta->txq[i]);
mt76_wcid_mask_clear(dev->wcid_mask, idx);
mt76_wcid_mask_clear(dev->wcid_phy_mask, idx);
}
@@ -1095,7 +1082,7 @@ EXPORT_SYMBOL_GPL(mt76_get_txpower);
static void
__mt76_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
- if (vif->csa_active && ieee80211_csa_is_complete(vif))
+ if (vif->csa_active && ieee80211_beacon_cntdwn_is_complete(vif))
ieee80211_csa_finish(vif);
}
@@ -1120,7 +1107,7 @@ __mt76_csa_check(void *priv, u8 *mac, struct ieee80211_vif *vif)
if (!vif->csa_active)
return;
- dev->csa_complete |= ieee80211_csa_is_complete(vif);
+ dev->csa_complete |= ieee80211_beacon_cntdwn_is_complete(vif);
}
void mt76_csa_check(struct mt76_dev *dev)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index af35bc388ae2..a5be66de1cff 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -17,11 +17,13 @@
#include "util.h"
#include "testmode.h"
-#define MT_TX_RING_SIZE 256
#define MT_MCU_RING_SIZE 32
#define MT_RX_BUF_SIZE 2048
#define MT_SKB_HEAD_LEN 128
+#define MT_MAX_NON_AQL_PKT 16
+#define MT_TXQ_FREE_THR 32
+
struct mt76_dev;
struct mt76_phy;
struct mt76_wcid;
@@ -79,7 +81,8 @@ enum mt76_rxq_id {
struct mt76_queue_buf {
dma_addr_t addr;
- int len;
+ u16 len;
+ bool skip_unmap;
};
struct mt76_tx_info {
@@ -99,9 +102,11 @@ struct mt76_queue_entry {
struct urb *urb;
int buf_sz;
};
- enum mt76_txq_id qid;
+ u32 dma_addr[2];
+ u16 dma_len[2];
+ u16 wcid;
bool skip_buf0:1;
- bool schedule:1;
+ bool skip_buf1:1;
bool done:1;
};
@@ -135,13 +140,6 @@ struct mt76_queue {
struct page_frag_cache rx_page;
};
-struct mt76_sw_queue {
- struct mt76_queue *q;
-
- struct list_head swq;
- int swq_queued;
-};
-
struct mt76_mcu_ops {
u32 headroom;
u32 tailroom;
@@ -204,6 +202,7 @@ DECLARE_EWMA(signal, 10, 8);
struct mt76_wcid {
struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS];
+ atomic_t non_aql_packets;
unsigned long flags;
struct ewma_signal rssi;
@@ -214,6 +213,7 @@ struct mt76_wcid {
u8 sta:1;
u8 ext_phy:1;
+ u8 amsdu:1;
u8 rx_check_pn;
u8 rx_key_pn[IEEE80211_NUM_TIDS][6];
@@ -226,11 +226,8 @@ struct mt76_wcid {
};
struct mt76_txq {
- struct mt76_sw_queue *swq;
struct mt76_wcid *wcid;
- struct sk_buff_head retry_q;
-
u16 agg_ssn;
bool send_bar;
bool aggr;
@@ -309,6 +306,7 @@ struct mt76_hw_cap {
#define MT_DRV_SW_RX_AIRTIME BIT(2)
#define MT_DRV_RX_DMA_HDR BIT(3)
#define MT_DRV_HW_MGMT_TXQ BIT(4)
+#define MT_DRV_AMSDU_OFFLOAD BIT(5)
struct mt76_driver_ops {
u32 drv_flags;
@@ -322,7 +320,7 @@ struct mt76_driver_ops {
struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info);
- void (*tx_complete_skb)(struct mt76_dev *dev, enum mt76_txq_id qid,
+ void (*tx_complete_skb)(struct mt76_dev *dev,
struct mt76_queue_entry *e);
bool (*tx_status_data)(struct mt76_dev *dev, u8 *update);
@@ -445,14 +443,24 @@ struct mt76_usb {
} mcu;
};
+#define MT76S_XMIT_BUF_SZ (16 * PAGE_SIZE)
struct mt76_sdio {
- struct task_struct *tx_kthread;
- struct task_struct *kthread;
+ struct workqueue_struct *txrx_wq;
+ struct {
+ struct work_struct xmit_work;
+ struct work_struct status_work;
+ } tx;
+ struct {
+ struct work_struct recv_work;
+ struct work_struct net_work;
+ } rx;
+
struct work_struct stat_work;
- unsigned long state;
+ u8 *xmit_buf[MT_TXQ_MCU_WA];
struct sdio_func *func;
+ void *intr_data;
struct {
struct mutex lock;
@@ -593,12 +601,12 @@ struct mt76_dev {
struct sk_buff_head rx_skb[__MT_RXQ_MAX];
struct list_head txwi_cache;
- struct mt76_sw_queue q_tx[2 * __MT_TXQ_MAX];
+ struct mt76_queue *q_tx[2 * __MT_TXQ_MAX];
struct mt76_queue q_rx[__MT_RXQ_MAX];
const struct mt76_queue_ops *queue_ops;
int tx_dma_idx[4];
- struct tasklet_struct tx_tasklet;
+ struct mt76_worker tx_worker;
struct napi_struct tx_napi;
struct delayed_work mac_work;
@@ -892,14 +900,13 @@ static inline bool mt76_testmode_enabled(struct mt76_dev *dev)
void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
void mt76_tx(struct mt76_phy *dev, struct ieee80211_sta *sta,
struct mt76_wcid *wcid, struct sk_buff *skb);
-void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq);
-void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq);
void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
bool send_bar);
+void mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb);
void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid);
void mt76_txq_schedule_all(struct mt76_phy *phy);
-void mt76_tx_tasklet(unsigned long data);
+void mt76_tx_worker(struct mt76_worker *w);
void mt76_release_buffered_frames(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
u16 tids, int nframes,
@@ -932,7 +939,7 @@ struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev,
struct sk_buff_head *list);
void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
struct sk_buff_head *list);
-void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb);
+void mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb);
void mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid,
bool flush);
int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -996,8 +1003,6 @@ mt76_tx_status_get_hw(struct mt76_dev *dev, struct sk_buff *skb)
return hw;
}
-void mt76_tx_free(struct mt76_dev *dev);
-struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev);
void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
struct napi_struct *napi);
@@ -1005,6 +1010,8 @@ void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
struct napi_struct *napi);
void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
void mt76_testmode_tx_pending(struct mt76_dev *dev);
+void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
+ struct mt76_queue_entry *e);
/* usb */
static inline bool mt76u_urb_error(struct urb *urb)
@@ -1039,7 +1046,7 @@ mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout);
}
-int mt76_skb_adjust_pad(struct sk_buff *skb);
+int mt76_skb_adjust_pad(struct sk_buff *skb, int pad);
int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
u8 req_type, u16 val, u16 offset,
void *buf, size_t len);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
index 7a41cdf1c4ae..d728c5e43783 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
@@ -29,7 +29,7 @@ mt7603_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
mt76_wr(dev, MT_DMA_FQCR0, MT_DMA_FQCR0_BUSY |
FIELD_PREP(MT_DMA_FQCR0_TARGET_WCID, mvif->sta.wcid.idx) |
FIELD_PREP(MT_DMA_FQCR0_TARGET_QID,
- dev->mt76.q_tx[MT_TXQ_CAB].q->hw_idx) |
+ dev->mt76.q_tx[MT_TXQ_CAB]->hw_idx) |
FIELD_PREP(MT_DMA_FQCR0_DEST_PORT_ID, 3) |
FIELD_PREP(MT_DMA_FQCR0_DEST_QUEUE_ID, 8));
@@ -78,7 +78,7 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg)
data.dev = dev;
__skb_queue_head_init(&data.q);
- q = dev->mt76.q_tx[MT_TXQ_BEACON].q;
+ q = dev->mt76.q_tx[MT_TXQ_BEACON];
spin_lock_bh(&q->lock);
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
IEEE80211_IFACE_ITER_RESUME_ALL,
@@ -95,7 +95,7 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg)
if (dev->mt76.csa_complete)
goto out;
- q = dev->mt76.q_tx[MT_TXQ_CAB].q;
+ q = dev->mt76.q_tx[MT_TXQ_CAB];
do {
nframes = skb_queue_len(&data.q);
ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
@@ -136,7 +136,7 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg)
out:
mt76_queue_tx_cleanup(dev, MT_TXQ_BEACON, false);
- if (dev->mt76.q_tx[MT_TXQ_BEACON].q->queued >
+ if (dev->mt76.q_tx[MT_TXQ_BEACON]->queued >
hweight8(dev->mt76.beacon_mask))
dev->beacon_check++;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
index 8ce6880b2bb8..f52165dff422 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/debugfs.c
@@ -70,7 +70,7 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_edcca, mt7603_edcca_get,
mt7603_edcca_set, "%lld\n");
static int
-mt7603_ampdu_stat_read(struct seq_file *file, void *data)
+mt7603_ampdu_stat_show(struct seq_file *file, void *data)
{
struct mt7603_dev *dev = file->private;
int bound[3], i, range;
@@ -91,18 +91,7 @@ mt7603_ampdu_stat_read(struct seq_file *file, void *data)
return 0;
}
-static int
-mt7603_ampdu_stat_open(struct inode *inode, struct file *f)
-{
- return single_open(f, mt7603_ampdu_stat_read, inode->i_private);
-}
-
-static const struct file_operations fops_ampdu_stat = {
- .open = mt7603_ampdu_stat_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(mt7603_ampdu_stat);
void mt7603_init_debugfs(struct mt7603_dev *dev)
{
@@ -112,7 +101,8 @@ void mt7603_init_debugfs(struct mt7603_dev *dev)
if (!dir)
return;
- debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat);
+ debugfs_create_file("ampdu_stat", 0400, dir, dev,
+ &mt7603_ampdu_stat_fops);
debugfs_create_devm_seqfile(dev->mt76.dev, "xmit-queues", dir,
mt76_queues_read);
debugfs_create_file("edcca", 0600, dir, dev, &fops_edcca);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
index a08b85281170..d60d00f6f6a0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
@@ -5,8 +5,7 @@
#include "../dma.h"
static int
-mt7603_init_tx_queue(struct mt7603_dev *dev, struct mt76_sw_queue *q,
- int idx, int n_desc)
+mt7603_init_tx_queue(struct mt7603_dev *dev, int qid, int idx, int n_desc)
{
struct mt76_queue *hwq;
int err;
@@ -19,8 +18,7 @@ mt7603_init_tx_queue(struct mt7603_dev *dev, struct mt76_sw_queue *q,
if (err < 0)
return err;
- INIT_LIST_HEAD(&q->swq);
- q->q = hwq;
+ dev->mt76.q_tx[qid] = hwq;
mt7603_irq_enable(dev, MT_INT_TX_DONE(idx));
@@ -123,7 +121,7 @@ void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
mt76_rx(&dev->mt76, q, skb);
return;
}
- /* fall through */
+ fallthrough;
default:
dev_kfree_skb(skb);
break;
@@ -165,7 +163,7 @@ static int mt7603_poll_tx(struct napi_struct *napi, int budget)
mt7603_mac_sta_poll(dev);
- tasklet_schedule(&dev->mt76.tx_tasklet);
+ mt76_worker_schedule(&dev->mt76.tx_worker);
return 0;
}
@@ -193,29 +191,28 @@ int mt7603_dma_init(struct mt7603_dev *dev)
mt7603_pse_client_reset(dev);
for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
- ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[i],
- wmm_queue_map[i],
- MT_TX_RING_SIZE);
+ ret = mt7603_init_tx_queue(dev, i, wmm_queue_map[i],
+ MT7603_TX_RING_SIZE);
if (ret)
return ret;
}
- ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD],
- MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE);
+ ret = mt7603_init_tx_queue(dev, MT_TXQ_PSD,
+ MT_TX_HW_QUEUE_MGMT, MT7603_PSD_RING_SIZE);
if (ret)
return ret;
- ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
+ ret = mt7603_init_tx_queue(dev, MT_TXQ_MCU,
MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE);
if (ret)
return ret;
- ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_BEACON],
+ ret = mt7603_init_tx_queue(dev, MT_TXQ_BEACON,
MT_TX_HW_QUEUE_BCN, MT_MCU_RING_SIZE);
if (ret)
return ret;
- ret = mt7603_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_CAB],
+ ret = mt7603_init_tx_queue(dev, MT_TXQ_CAB,
MT_TX_HW_QUEUE_BMC, MT_MCU_RING_SIZE);
if (ret)
return ret;
@@ -249,6 +246,5 @@ void mt7603_dma_cleanup(struct mt7603_dev *dev)
MT_WPDMA_GLO_CFG_RX_DMA_EN |
MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
- tasklet_kill(&dev->mt76.tx_tasklet);
mt76_dma_cleanup(&dev->mt76);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c
index 3ee06e2577b8..01f1e0da5ee1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.c
@@ -147,8 +147,14 @@ static int mt7603_check_eeprom(struct mt76_dev *dev)
}
}
+static inline bool is_mt7688(struct mt7603_dev *dev)
+{
+ return mt76_rr(dev, MT_EFUSE_BASE + 0x64) & BIT(4);
+}
+
int mt7603_eeprom_init(struct mt7603_dev *dev)
{
+ u8 *eeprom;
int ret;
ret = mt7603_eeprom_load(dev);
@@ -163,9 +169,16 @@ int mt7603_eeprom_init(struct mt7603_dev *dev)
MT7603_EEPROM_SIZE);
}
+ eeprom = (u8 *)dev->mt76.eeprom.data;
dev->mt76.cap.has_2ghz = true;
- memcpy(dev->mt76.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
- ETH_ALEN);
+ memcpy(dev->mt76.macaddr, eeprom + MT_EE_MAC_ADDR, ETH_ALEN);
+
+ /* Check for 1SS devices */
+ dev->mphy.antenna_mask = 3;
+ if (FIELD_GET(MT_EE_NIC_CONF_0_RX_PATH, eeprom[MT_EE_NIC_CONF_0]) == 1 ||
+ FIELD_GET(MT_EE_NIC_CONF_0_TX_PATH, eeprom[MT_EE_NIC_CONF_0]) == 1 ||
+ is_mt7688(dev))
+ dev->mphy.antenna_mask = 1;
mt76_eeprom_override(&dev->mt76);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h
index b893facfba48..4687d6dc00dc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/eeprom.h
@@ -85,4 +85,7 @@ enum mt7603_eeprom_source {
MT_EE_SRC_FLASH,
};
+#define MT_EE_NIC_CONF_0_RX_PATH GENMASK(3, 0)
+#define MT_EE_NIC_CONF_0_TX_PATH GENMASK(7, 4)
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
index 94196599797e..c4848fafd270 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
@@ -536,11 +536,6 @@ int mt7603_register_device(struct mt7603_dev *dev)
tasklet_init(&dev->mt76.pre_tbtt_tasklet, mt7603_pre_tbtt_tasklet,
(unsigned long)dev);
- /* Check for 7688, which only has 1SS */
- dev->mphy.antenna_mask = 3;
- if (mt76_rr(dev, MT_EFUSE_BASE + 0x64) & BIT(4))
- dev->mphy.antenna_mask = 1;
-
dev->slottime = 9;
dev->sensitivity_limit = 28;
dev->dynamic_sensitivity = true;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
index 8060c1514396..f665a1c95eed 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -445,7 +445,7 @@ void mt7603_mac_sta_poll(struct mt7603_dev *dev)
sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
for (i = 0; i < 4; i++) {
- struct mt76_queue *q = dev->mt76.q_tx[i].q;
+ struct mt76_queue *q = dev->mt76.q_tx[i];
u8 qidx = q->hw_idx;
u8 tid = ac_to_tid[i];
u32 txtime = airtime[qidx];
@@ -592,7 +592,7 @@ mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb)
switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) {
case MT_PHY_TYPE_CCK:
cck = true;
- /* fall through */
+ fallthrough;
case MT_PHY_TYPE_OFDM:
i = mt76_get_rate(&dev->mt76, sband, i, cck);
break;
@@ -896,7 +896,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
struct ieee80211_vif *vif = info->control.vif;
- struct mt76_queue *q = dev->mt76.q_tx[qid].q;
+ struct mt76_queue *q = dev->mt76.q_tx[qid];
struct mt7603_vif *mvif;
int wlan_idx;
int hdr_len = ieee80211_get_hdrlen_from_skb(skb);
@@ -1036,6 +1036,8 @@ int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
IEEE80211_TX_CTL_CLEAR_PS_FILT)) ||
(info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
mt7603_wtbl_set_ps(dev, msta, false);
+
+ mt76_tx_check_agg_ssn(sta, tx_info->skb);
}
pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
@@ -1161,7 +1163,7 @@ out:
switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) {
case MT_PHY_TYPE_CCK:
cck = true;
- /* fall through */
+ fallthrough;
case MT_PHY_TYPE_OFDM:
if (dev->mphy.chandef.chan->band == NL80211_BAND_5GHZ)
sband = &dev->mphy.sband_5g.sband;
@@ -1269,8 +1271,7 @@ out:
rcu_read_unlock();
}
-void mt7603_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
- struct mt76_queue_entry *e)
+void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
{
struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
struct sk_buff *skb = e->skb;
@@ -1280,10 +1281,8 @@ void mt7603_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
return;
}
- if (qid < 4)
- dev->tx_hang_check = 0;
-
- mt76_tx_complete_skb(mdev, skb);
+ dev->tx_hang_check = 0;
+ mt76_tx_complete_skb(mdev, e->wcid, skb);
}
static bool
@@ -1403,7 +1402,7 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
/* lock/unlock all queues to ensure that no tx is pending */
mt76_txq_schedule_all(&dev->mphy);
- tasklet_disable(&dev->mt76.tx_tasklet);
+ mt76_worker_disable(&dev->mt76.tx_worker);
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
napi_disable(&dev->mt76.napi[0]);
napi_disable(&dev->mt76.napi[1]);
@@ -1452,7 +1451,7 @@ skip_dma_reset:
clear_bit(MT76_RESET, &dev->mphy.state);
mutex_unlock(&dev->mt76.mutex);
- tasklet_enable(&dev->mt76.tx_tasklet);
+ mt76_worker_enable(&dev->mt76.tx_worker);
napi_enable(&dev->mt76.tx_napi);
napi_schedule(&dev->mt76.tx_napi);
@@ -1515,7 +1514,7 @@ static bool mt7603_tx_hang(struct mt7603_dev *dev)
int i;
for (i = 0; i < 4; i++) {
- q = dev->mt76.q_tx[i].q;
+ q = dev->mt76.q_tx[i];
if (!q->queued)
continue;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index 447f2c63ef38..c9226dceb510 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -75,7 +75,6 @@ mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
mtxq->wcid = &mvif->sta.wcid;
- mt76_txq_init(&dev->mt76, vif->txq);
rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
out:
@@ -99,7 +98,6 @@ mt7603_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mt7603_beacon_set_timer(dev, mvif->idx, 0);
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
- mt76_txq_remove(&dev->mt76, vif->txq);
spin_lock_bh(&dev->sta_poll_lock);
if (!list_empty(&msta->poll_list))
@@ -514,7 +512,7 @@ mt7603_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
u16 cw_max = (1 << 10) - 1;
u32 val;
- queue = dev->mt76.q_tx[queue].q->hw_idx;
+ queue = dev->mt76.q_tx[queue]->hw_idx;
if (params->cw_min)
cw_min = params->cw_min;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
index c86305241e66..2a6e4332ad06 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
@@ -17,6 +17,8 @@
#define MT7603_MCU_RX_RING_SIZE 64
#define MT7603_RX_RING_SIZE 128
+#define MT7603_TX_RING_SIZE 256
+#define MT7603_PSD_RING_SIZE 128
#define MT7603_FIRMWARE_E1 "mt7603_e1.bin"
#define MT7603_FIRMWARE_E2 "mt7603_e2.bin"
@@ -241,8 +243,7 @@ int mt7603_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info);
-void mt7603_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
- struct mt76_queue_entry *e);
+void mt7603_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/pci.c b/drivers/net/wireless/mediatek/mt76/mt7603/pci.c
index 2f2f337e2201..a5845da3547a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/pci.c
@@ -44,6 +44,8 @@ mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
(mt76_rr(dev, MT_HW_REV) & 0xff);
dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+
ret = devm_request_irq(mdev->dev, pdev->irq, mt7603_irq_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (ret)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
index de170765e938..ba927033bbe8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
@@ -35,6 +35,8 @@ mt76_wmac_probe(struct platform_device *pdev)
(mt76_rr(dev, MT_HW_REV) & 0xff);
dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+
ret = devm_request_irq(mdev->dev, irq, mt7603_irq_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (ret)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
index 88931658a9fb..00ba550fc48f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
@@ -165,15 +165,14 @@ mt7615_reset_test_set(void *data, u64 val)
if (!mt7615_wait_for_mcu_init(dev))
return 0;
- mt7615_mutex_acquire(dev);
-
skb = alloc_skb(1, GFP_KERNEL);
if (!skb)
return -ENOMEM;
skb_put(skb, 1);
- mt76_tx_queue_skb_raw(dev, 0, skb, 0);
+ mt7615_mutex_acquire(dev);
+ mt76_tx_queue_skb_raw(dev, 0, skb, 0);
mt7615_mutex_release(dev);
return 0;
@@ -221,7 +220,7 @@ mt7615_ampdu_stat_read_phy(struct mt7615_phy *phy,
}
static int
-mt7615_ampdu_stat_read(struct seq_file *file, void *data)
+mt7615_ampdu_stat_show(struct seq_file *file, void *data)
{
struct mt7615_dev *dev = file->private;
@@ -235,18 +234,7 @@ mt7615_ampdu_stat_read(struct seq_file *file, void *data)
return 0;
}
-static int
-mt7615_ampdu_stat_open(struct inode *inode, struct file *f)
-{
- return single_open(f, mt7615_ampdu_stat_read, inode->i_private);
-}
-
-static const struct file_operations fops_ampdu_stat = {
- .open = mt7615_ampdu_stat_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(mt7615_ampdu_stat);
static void
mt7615_radio_read_phy(struct mt7615_phy *phy, struct seq_file *s)
@@ -340,15 +328,15 @@ mt7615_queues_read(struct seq_file *s, void *data)
int i;
for (i = 0; i < ARRAY_SIZE(queue_map); i++) {
- struct mt76_sw_queue *q = &dev->mt76.q_tx[queue_map[i].id];
+ struct mt76_queue *q = dev->mt76.q_tx[queue_map[i].id];
- if (!q->q)
+ if (!q)
continue;
seq_printf(s,
"%s: queued=%d head=%d tail=%d\n",
- queue_map[i].queue, q->q->queued, q->q->head,
- q->q->tail);
+ queue_map[i].queue, q->queued, q->head,
+ q->tail);
}
return 0;
@@ -393,7 +381,7 @@ int mt7615_init_debugfs(struct mt7615_dev *dev)
mt76_queues_read);
debugfs_create_devm_seqfile(dev->mt76.dev, "acq", dir,
mt7615_queues_acq);
- debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat);
+ debugfs_create_file("ampdu_stat", 0400, dir, dev, &mt7615_ampdu_stat_fops);
debugfs_create_file("scs", 0600, dir, dev, &fops_scs);
debugfs_create_file("dbdc", 0600, dir, dev, &fops_dbdc);
debugfs_create_file("fw_debug", 0600, dir, dev, &fops_fw_debug);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
index 1231a5ddf9ea..bf8ae14121db 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
@@ -12,8 +12,7 @@
#include "mac.h"
static int
-mt7615_init_tx_queue(struct mt7615_dev *dev, struct mt76_sw_queue *q,
- int idx, int n_desc)
+mt7615_init_tx_queue(struct mt7615_dev *dev, int qid, int idx, int n_desc)
{
struct mt76_queue *hwq;
int err;
@@ -26,8 +25,7 @@ mt7615_init_tx_queue(struct mt7615_dev *dev, struct mt76_sw_queue *q,
if (err < 0)
return err;
- INIT_LIST_HEAD(&q->swq);
- q->q = hwq;
+ dev->mt76.q_tx[qid] = hwq;
return 0;
}
@@ -45,19 +43,18 @@ mt7622_init_tx_queues_multi(struct mt7615_dev *dev)
int i;
for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
- ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[i],
- wmm_queue_map[i],
+ ret = mt7615_init_tx_queue(dev, i, wmm_queue_map[i],
MT7615_TX_RING_SIZE / 2);
if (ret)
return ret;
}
- ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD],
+ ret = mt7615_init_tx_queue(dev, MT_TXQ_PSD,
MT7622_TXQ_MGMT, MT7615_TX_MGMT_RING_SIZE);
if (ret)
return ret;
- ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
+ ret = mt7615_init_tx_queue(dev, MT_TXQ_MCU,
MT7622_TXQ_MCU, MT7615_TX_MCU_RING_SIZE);
return ret;
}
@@ -65,10 +62,9 @@ mt7622_init_tx_queues_multi(struct mt7615_dev *dev)
static int
mt7615_init_tx_queues(struct mt7615_dev *dev)
{
- struct mt76_sw_queue *q;
int ret, i;
- ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_FWDL],
+ ret = mt7615_init_tx_queue(dev, MT_TXQ_FWDL,
MT7615_TXQ_FWDL,
MT7615_TX_FWDL_RING_SIZE);
if (ret)
@@ -77,52 +73,28 @@ mt7615_init_tx_queues(struct mt7615_dev *dev)
if (!is_mt7615(&dev->mt76))
return mt7622_init_tx_queues_multi(dev);
- ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[0], 0,
- MT7615_TX_RING_SIZE);
+ ret = mt7615_init_tx_queue(dev, 0, 0, MT7615_TX_RING_SIZE);
if (ret)
return ret;
- for (i = 1; i < MT_TXQ_MCU; i++) {
- q = &dev->mt76.q_tx[i];
- INIT_LIST_HEAD(&q->swq);
- q->q = dev->mt76.q_tx[0].q;
- }
+ for (i = 1; i < MT_TXQ_MCU; i++)
+ dev->mt76.q_tx[i] = dev->mt76.q_tx[0];
- ret = mt7615_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
- MT7615_TXQ_MCU,
+ ret = mt7615_init_tx_queue(dev, MT_TXQ_MCU, MT7615_TXQ_MCU,
MT7615_TX_MCU_RING_SIZE);
return 0;
}
-static void
-mt7615_tx_cleanup(struct mt7615_dev *dev)
-{
- int i;
-
- mt76_queue_tx_cleanup(dev, MT_TXQ_MCU, false);
- mt76_queue_tx_cleanup(dev, MT_TXQ_PSD, false);
- if (is_mt7615(&dev->mt76)) {
- mt76_queue_tx_cleanup(dev, MT_TXQ_BE, false);
- } else {
- for (i = 0; i < IEEE80211_NUM_ACS; i++)
- mt76_queue_tx_cleanup(dev, i, false);
- }
-}
-
static int mt7615_poll_tx(struct napi_struct *napi, int budget)
{
struct mt7615_dev *dev;
dev = container_of(napi, struct mt7615_dev, mt76.tx_napi);
- mt7615_tx_cleanup(dev);
+ mt76_queue_tx_cleanup(dev, MT_TXQ_MCU, false);
if (napi_complete_done(napi, 0))
- mt7615_irq_enable(dev, MT_INT_TX_DONE_ALL);
-
- mt7615_tx_cleanup(dev);
-
- tasklet_schedule(&dev->mt76.tx_tasklet);
+ mt7615_irq_enable(dev, mt7615_tx_mcu_int_mask(dev));
return 0;
}
@@ -306,7 +278,7 @@ int mt7615_dma_init(struct mt7615_dev *dev)
MT_WPDMA_GLO_CFG_RX_DMA_EN);
/* enable interrupts for TX/RX rings */
- mt7615_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
+ mt7615_irq_enable(dev, MT_INT_RX_DONE_ALL | mt7615_tx_mcu_int_mask(dev) |
MT_INT_MCU_CMD);
if (is_mt7622(&dev->mt76))
@@ -325,6 +297,5 @@ void mt7615_dma_cleanup(struct mt7615_dev *dev)
MT_WPDMA_GLO_CFG_RX_DMA_EN);
mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_SW_RESET);
- tasklet_kill(&dev->mt76.tx_tasklet);
mt76_dma_cleanup(&dev->mt76);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
index 22e4eabe6578..f4756bb946c3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
@@ -125,6 +125,9 @@ mt7615_eeprom_parse_hw_band_cap(struct mt7615_dev *dev)
case MT_EE_2GHZ:
dev->mt76.cap.has_2ghz = true;
break;
+ case MT_EE_DBDC:
+ dev->dbdc_support = true;
+ /* fall through */
default:
dev->mt76.cap.has_2ghz = true;
dev->mt76.cap.has_5ghz = true;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
index 1f57b43693bc..e194259c84e9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
@@ -217,6 +217,22 @@ static const struct ieee80211_iface_limit if_limits[] = {
}
};
+static const struct ieee80211_iface_combination if_comb_radar[] = {
+ {
+ .limits = if_limits,
+ .n_limits = ARRAY_SIZE(if_limits),
+ .max_interfaces = 4,
+ .num_different_channels = 1,
+ .beacon_int_infra_match = true,
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_160) |
+ BIT(NL80211_CHAN_WIDTH_80P80),
+ }
+};
+
static const struct ieee80211_iface_combination if_comb[] = {
{
.limits = if_limits,
@@ -306,8 +322,13 @@ mt7615_init_wiphy(struct ieee80211_hw *hw)
hw->sta_data_size = sizeof(struct mt7615_sta);
hw->vif_data_size = sizeof(struct mt7615_vif);
- wiphy->iface_combinations = if_comb;
- wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+ if (is_mt7663(&phy->dev->mt76)) {
+ wiphy->iface_combinations = if_comb;
+ wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
+ } else {
+ wiphy->iface_combinations = if_comb_radar;
+ wiphy->n_iface_combinations = ARRAY_SIZE(if_comb_radar);
+ }
wiphy->reg_notifier = mt7615_regd_notifier;
wiphy->max_sched_scan_plan_interval = MT7615_MAX_SCHED_SCAN_INTERVAL;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
index 3dd8dd28690e..8dc645e398fd 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -378,7 +378,7 @@ static int mt7615_mac_fill_rx(struct mt7615_dev *dev, struct sk_buff *skb)
switch (FIELD_GET(MT_RXV1_TX_MODE, rxdg0)) {
case MT_PHY_TYPE_CCK:
cck = true;
- /* fall through */
+ fallthrough;
case MT_PHY_TYPE_OFDM:
i = mt76_get_rate(&dev->mt76, sband, i, cck);
break;
@@ -1271,7 +1271,7 @@ out:
switch (FIELD_GET(MT_TX_RATE_MODE, final_rate)) {
case MT_PHY_TYPE_CCK:
cck = true;
- /* fall through */
+ fallthrough;
case MT_PHY_TYPE_OFDM:
mphy = &dev->mphy;
if (sta->wcid.ext_phy && dev->mt76.phy2)
@@ -1400,6 +1400,9 @@ mt7615_mac_tx_free_token(struct mt7615_dev *dev, u16 token)
{
struct mt76_dev *mdev = &dev->mt76;
struct mt76_txwi_cache *txwi;
+ __le32 *txwi_data;
+ u32 val;
+ u8 wcid;
trace_mac_tx_free(dev, token);
@@ -1410,9 +1413,13 @@ mt7615_mac_tx_free_token(struct mt7615_dev *dev, u16 token)
if (!txwi)
return;
+ txwi_data = (__le32 *)mt76_get_txwi_ptr(mdev, txwi);
+ val = le32_to_cpu(txwi_data[1]);
+ wcid = FIELD_GET(MT_TXD1_WLAN_IDX, val);
+
mt7615_txp_skb_unmap(mdev, txwi);
if (txwi->skb) {
- mt76_tx_complete_skb(mdev, txwi->skb);
+ mt76_tx_complete_skb(mdev, wcid, txwi->skb);
txwi->skb = NULL;
}
@@ -1424,6 +1431,14 @@ static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data;
u8 i, count;
+ mt76_queue_tx_cleanup(dev, MT_TXQ_PSD, false);
+ if (is_mt7615(&dev->mt76)) {
+ mt76_queue_tx_cleanup(dev, MT_TXQ_BE, false);
+ } else {
+ for (i = 0; i < IEEE80211_NUM_ACS; i++)
+ mt76_queue_tx_cleanup(dev, i, false);
+ }
+
count = FIELD_GET(MT_TX_FREE_MSDU_ID_CNT, le16_to_cpu(free->ctrl));
if (is_mt7615(&dev->mt76)) {
__le16 *token = &free->token[0];
@@ -1439,11 +1454,15 @@ static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
dev_kfree_skb(skb);
+ if (test_bit(MT76_STATE_PM, &dev->phy.mt76->state))
+ return;
+
rcu_read_lock();
mt7615_mac_sta_poll(dev);
rcu_read_unlock();
- tasklet_schedule(&dev->mt76.tx_tasklet);
+ mt7615_pm_power_save_sched(dev);
+ mt76_worker_schedule(&dev->mt76.tx_worker);
}
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
@@ -1478,7 +1497,7 @@ void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
mt76_rx(&dev->mt76, q, skb);
return;
}
- /* fall through */
+ fallthrough;
default:
dev_kfree_skb(skb);
break;
@@ -1845,7 +1864,7 @@ void mt7615_pm_wake_work(struct work_struct *work)
pm.wake_work);
mphy = dev->phy.mt76;
- if (mt7615_driver_own(dev)) {
+ if (mt7615_mcu_set_drv_ctrl(dev)) {
dev_err(mphy->dev->dev, "failed to wake device\n");
goto out;
}
@@ -1853,12 +1872,13 @@ void mt7615_pm_wake_work(struct work_struct *work)
spin_lock_bh(&dev->pm.txq_lock);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
struct mt7615_sta *msta = dev->pm.tx_q[i].msta;
- struct mt76_wcid *wcid = msta ? &msta->wcid : NULL;
struct ieee80211_sta *sta = NULL;
+ struct mt76_wcid *wcid;
if (!dev->pm.tx_q[i].skb)
continue;
+ wcid = msta ? &msta->wcid : &dev->mt76.global_wcid;
if (msta && wcid->sta)
sta = container_of((void *)msta, struct ieee80211_sta,
drv_priv);
@@ -1868,7 +1888,7 @@ void mt7615_pm_wake_work(struct work_struct *work)
}
spin_unlock_bh(&dev->pm.txq_lock);
- tasklet_schedule(&dev->mt76.tx_tasklet);
+ mt76_worker_schedule(&dev->mt76.tx_worker);
out:
ieee80211_wake_queues(mphy->hw);
@@ -1943,7 +1963,7 @@ void mt7615_pm_power_save_work(struct work_struct *work)
goto out;
}
- if (!mt7615_firmware_own(dev))
+ if (!mt7615_mcu_set_fw_ctrl(dev))
return;
out:
queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta);
@@ -2110,7 +2130,7 @@ void mt7615_mac_reset_work(struct work_struct *work)
if (ext_phy)
mt76_txq_schedule_all(ext_phy);
- tasklet_disable(&dev->mt76.tx_tasklet);
+ mt76_worker_disable(&dev->mt76.tx_worker);
napi_disable(&dev->mt76.napi[0]);
napi_disable(&dev->mt76.napi[1]);
napi_disable(&dev->mt76.tx_napi);
@@ -2131,7 +2151,7 @@ void mt7615_mac_reset_work(struct work_struct *work)
clear_bit(MT76_MCU_RESET, &dev->mphy.state);
clear_bit(MT76_RESET, &dev->mphy.state);
- tasklet_enable(&dev->mt76.tx_tasklet);
+ mt76_worker_enable(&dev->mt76.tx_worker);
napi_enable(&dev->mt76.tx_napi);
napi_schedule(&dev->mt76.tx_napi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index 2d0b1f49fdbc..3186b7b2ca48 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -205,7 +205,6 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
if (vif->txq) {
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
mtxq->wcid = &mvif->sta.wcid;
- mt76_txq_init(&dev->mt76, vif->txq);
}
ret = mt7615_mcu_add_dev_info(dev, vif, true);
@@ -256,8 +255,6 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
mt7615_mcu_add_dev_info(dev, vif, false);
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
- if (vif->txq)
- mt76_txq_remove(&dev->mt76, vif->txq);
dev->mphy.vif_mask &= ~BIT(mvif->idx);
dev->omac_mask &= ~BIT(mvif->omac_idx);
@@ -361,7 +358,10 @@ mt7615_queue_key_update(struct mt7615_dev *dev, enum set_key_cmd cmd,
wd->key.keylen = key->keylen;
wd->key.cmd = cmd;
+ spin_lock_bh(&dev->mt76.lock);
list_add_tail(&wd->node, &dev->wd_head);
+ spin_unlock_bh(&dev->mt76.lock);
+
queue_work(dev->mt76.wq, &dev->wtbl_work);
return 0;
@@ -703,7 +703,8 @@ mt7615_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
return;
}
- tasklet_schedule(&dev->mt76.tx_tasklet);
+ dev->pm.last_activity = jiffies;
+ mt76_worker_schedule(&dev->mt76.tx_worker);
}
static void mt7615_tx(struct ieee80211_hw *hw,
@@ -732,6 +733,7 @@ static void mt7615_tx(struct ieee80211_hw *hw,
}
if (!test_bit(MT76_STATE_PM, &mphy->state)) {
+ dev->pm.last_activity = jiffies;
mt76_tx(mphy, control->sta, wcid, skb);
return;
}
@@ -813,7 +815,6 @@ mt7615_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
case IEEE80211_AMPDU_TX_START:
ssn = mt7615_mac_get_sta_tid_sn(dev, msta->wcid.idx, tid);
params->ssn = ssn;
- mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(ssn);
ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index bd316dbd9041..31b40fb83f6c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -324,6 +324,97 @@ int mt7615_rf_wr(struct mt7615_dev *dev, u32 wf, u32 reg, u32 val)
sizeof(req), false);
}
+static void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en)
+{
+ if (!is_mt7622(&dev->mt76))
+ return;
+
+ regmap_update_bits(dev->infracfg, MT_INFRACFG_MISC,
+ MT_INFRACFG_MISC_AP2CONN_WAKE,
+ !en * MT_INFRACFG_MISC_AP2CONN_WAKE);
+}
+
+static int mt7615_mcu_drv_pmctrl(struct mt7615_dev *dev)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt76_dev *mdev = &dev->mt76;
+ u32 addr;
+ int err;
+
+ addr = is_mt7663(mdev) ? MT_PCIE_DOORBELL_PUSH : MT_CFG_LPCR_HOST;
+ mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN);
+
+ mt7622_trigger_hif_int(dev, true);
+
+ addr = is_mt7663(mdev) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
+ err = !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 3000);
+
+ mt7622_trigger_hif_int(dev, false);
+
+ if (err) {
+ dev_err(mdev->dev, "driver own failed\n");
+ return -ETIMEDOUT;
+ }
+
+ clear_bit(MT76_STATE_PM, &mphy->state);
+
+ return 0;
+}
+
+static int mt7615_mcu_lp_drv_pmctrl(struct mt7615_dev *dev)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ int i;
+
+ if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state))
+ goto out;
+
+ for (i = 0; i < MT7615_DRV_OWN_RETRY_COUNT; i++) {
+ mt76_wr(dev, MT_PCIE_DOORBELL_PUSH, MT_CFG_LPCR_HOST_DRV_OWN);
+ if (mt76_poll_msec(dev, MT_CONN_HIF_ON_LPCTL,
+ MT_CFG_LPCR_HOST_FW_OWN, 0, 50))
+ break;
+ }
+
+ if (i == MT7615_DRV_OWN_RETRY_COUNT) {
+ dev_err(dev->mt76.dev, "driver own failed\n");
+ set_bit(MT76_STATE_PM, &mphy->state);
+ return -EIO;
+ }
+
+out:
+ dev->pm.last_activity = jiffies;
+
+ return 0;
+}
+
+static int mt7615_mcu_fw_pmctrl(struct mt7615_dev *dev)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ int err = 0;
+ u32 addr;
+
+ if (test_and_set_bit(MT76_STATE_PM, &mphy->state))
+ return 0;
+
+ mt7622_trigger_hif_int(dev, true);
+
+ addr = is_mt7663(&dev->mt76) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
+ mt76_wr(dev, addr, MT_CFG_LPCR_HOST_FW_OWN);
+
+ if (is_mt7622(&dev->mt76) &&
+ !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN,
+ MT_CFG_LPCR_HOST_FW_OWN, 3000)) {
+ dev_err(dev->mt76.dev, "Timeout for firmware own\n");
+ clear_bit(MT76_STATE_PM, &mphy->state);
+ err = -EIO;
+ }
+
+ mt7622_trigger_hif_int(dev, false);
+
+ return err;
+}
+
static void
mt7615_mcu_csa_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
{
@@ -650,12 +741,12 @@ mt7615_mcu_add_beacon_offload(struct mt7615_dev *dev,
memcpy(req.pkt + MT_TXD_SIZE, skb->data, skb->len);
req.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
req.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + offs.tim_offset);
- if (offs.csa_counter_offs[0]) {
+ if (offs.cntdwn_counter_offs[0]) {
u16 csa_offs;
- csa_offs = MT_TXD_SIZE + offs.csa_counter_offs[0] - 4;
+ csa_offs = MT_TXD_SIZE + offs.cntdwn_counter_offs[0] - 4;
req.csa_ie_pos = cpu_to_le16(csa_offs);
- req.csa_cnt = skb->data[offs.csa_counter_offs[0]];
+ req.csa_cnt = skb->data[offs.cntdwn_counter_offs[0]];
}
dev_kfree_skb(skb);
@@ -1106,7 +1197,7 @@ mt7615_mcu_wtbl_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_HT, sizeof(*ht),
wtbl_tlv, sta_wtbl);
ht = (struct wtbl_ht *)tlv;
- ht->ldpc = sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING;
+ ht->ldpc = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING);
ht->af = sta->ht_cap.ampdu_factor;
ht->mm = sta->ht_cap.ampdu_density;
ht->ht = 1;
@@ -1124,7 +1215,7 @@ mt7615_mcu_wtbl_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
tlv = mt7615_mcu_add_nested_tlv(skb, WTBL_VHT, sizeof(*vht),
wtbl_tlv, sta_wtbl);
vht = (struct wtbl_vht *)tlv;
- vht->ldpc = sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC,
+ vht->ldpc = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
vht->vht = 1;
af = (sta->vht_cap.cap &
@@ -1314,6 +1405,8 @@ static const struct mt7615_mcu_ops wtbl_update_ops = {
.add_tx_ba = mt7615_mcu_wtbl_tx_ba,
.add_rx_ba = mt7615_mcu_wtbl_rx_ba,
.sta_add = mt7615_mcu_wtbl_sta_add,
+ .set_drv_ctrl = mt7615_mcu_drv_pmctrl,
+ .set_fw_ctrl = mt7615_mcu_fw_pmctrl,
};
static int
@@ -1410,6 +1503,8 @@ static const struct mt7615_mcu_ops sta_update_ops = {
.add_tx_ba = mt7615_mcu_sta_tx_ba,
.add_rx_ba = mt7615_mcu_sta_rx_ba,
.sta_add = mt7615_mcu_add_sta,
+ .set_drv_ctrl = mt7615_mcu_drv_pmctrl,
+ .set_fw_ctrl = mt7615_mcu_fw_pmctrl,
};
static int
@@ -1713,10 +1808,10 @@ mt7615_mcu_uni_add_beacon_offload(struct mt7615_dev *dev,
req.beacon_tlv.pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
req.beacon_tlv.tim_ie_pos = cpu_to_le16(MT_TXD_SIZE + offs.tim_offset);
- if (offs.csa_counter_offs[0]) {
+ if (offs.cntdwn_counter_offs[0]) {
u16 csa_offs;
- csa_offs = MT_TXD_SIZE + offs.csa_counter_offs[0] - 4;
+ csa_offs = MT_TXD_SIZE + offs.cntdwn_counter_offs[0] - 4;
req.beacon_tlv.csa_ie_pos = cpu_to_le16(csa_offs);
}
dev_kfree_skb(skb);
@@ -1823,6 +1918,8 @@ static const struct mt7615_mcu_ops uni_update_ops = {
.add_tx_ba = mt7615_mcu_uni_tx_ba,
.add_rx_ba = mt7615_mcu_uni_rx_ba,
.sta_add = mt7615_mcu_uni_add_sta,
+ .set_drv_ctrl = mt7615_mcu_lp_drv_pmctrl,
+ .set_fw_ctrl = mt7615_mcu_fw_pmctrl,
};
static int mt7615_mcu_send_firmware(struct mt7615_dev *dev, const void *data,
@@ -1895,81 +1992,6 @@ static int mt7615_mcu_start_patch(struct mt7615_dev *dev)
&req, sizeof(req), true);
}
-static void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en)
-{
- if (!is_mt7622(&dev->mt76))
- return;
-
- regmap_update_bits(dev->infracfg, MT_INFRACFG_MISC,
- MT_INFRACFG_MISC_AP2CONN_WAKE,
- !en * MT_INFRACFG_MISC_AP2CONN_WAKE);
-}
-
-int mt7615_driver_own(struct mt7615_dev *dev)
-{
- struct mt76_phy *mphy = &dev->mt76.phy;
- struct mt76_dev *mdev = &dev->mt76;
- int i;
-
- if (!test_and_clear_bit(MT76_STATE_PM, &mphy->state))
- goto out;
-
- mt7622_trigger_hif_int(dev, true);
-
- for (i = 0; i < MT7615_DRV_OWN_RETRY_COUNT; i++) {
- u32 addr;
-
- addr = is_mt7663(mdev) ? MT_PCIE_DOORBELL_PUSH : MT_CFG_LPCR_HOST;
- mt76_wr(dev, addr, MT_CFG_LPCR_HOST_DRV_OWN);
-
- addr = is_mt7663(mdev) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
- if (mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN, 0, 50))
- break;
- }
-
- mt7622_trigger_hif_int(dev, false);
-
- if (i == MT7615_DRV_OWN_RETRY_COUNT) {
- dev_err(mdev->dev, "driver own failed\n");
- set_bit(MT76_STATE_PM, &mphy->state);
- return -EIO;
- }
-
-out:
- dev->pm.last_activity = jiffies;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(mt7615_driver_own);
-
-int mt7615_firmware_own(struct mt7615_dev *dev)
-{
- struct mt76_phy *mphy = &dev->mt76.phy;
- int err = 0;
- u32 addr;
-
- if (test_and_set_bit(MT76_STATE_PM, &mphy->state))
- return 0;
-
- mt7622_trigger_hif_int(dev, true);
-
- addr = is_mt7663(&dev->mt76) ? MT_CONN_HIF_ON_LPCTL : MT_CFG_LPCR_HOST;
- mt76_wr(dev, addr, MT_CFG_LPCR_HOST_FW_OWN);
-
- if (is_mt7622(&dev->mt76) &&
- !mt76_poll_msec(dev, addr, MT_CFG_LPCR_HOST_FW_OWN,
- MT_CFG_LPCR_HOST_FW_OWN, 300)) {
- dev_err(dev->mt76.dev, "Timeout for firmware own\n");
- clear_bit(MT76_STATE_PM, &mphy->state);
- err = -EIO;
- }
-
- mt7622_trigger_hif_int(dev, false);
-
- return err;
-}
-EXPORT_SYMBOL_GPL(mt7615_firmware_own);
-
static int mt7615_load_patch(struct mt7615_dev *dev, u32 addr, const char *name)
{
const struct mt7615_patch_hdr *hdr;
@@ -2452,7 +2474,7 @@ int mt7615_mcu_init(struct mt7615_dev *dev)
dev->mt76.mcu_ops = &mt7615_mcu_ops,
- ret = mt7615_driver_own(dev);
+ ret = mt7615_mcu_drv_pmctrl(dev);
if (ret)
return ret;
@@ -2482,7 +2504,7 @@ EXPORT_SYMBOL_GPL(mt7615_mcu_init);
void mt7615_mcu_exit(struct mt7615_dev *dev)
{
__mt76_mcu_restart(&dev->mt76);
- mt7615_firmware_own(dev);
+ mt7615_mcu_set_fw_ctrl(dev);
skb_queue_purge(&dev->mt76.mcu.res_q);
}
EXPORT_SYMBOL_GPL(mt7615_mcu_exit);
@@ -2847,14 +2869,6 @@ int mt7615_mcu_set_chan_info(struct mt7615_phy *phy, int cmd)
.center_chan2 = ieee80211_frequency_to_channel(freq2),
};
-#ifdef CONFIG_NL80211_TESTMODE
- if (dev->mt76.test.state == MT76_TM_STATE_TX_FRAMES &&
- dev->mt76.test.tx_antenna_mask) {
- req.tx_streams = hweight8(dev->mt76.test.tx_antenna_mask);
- req.rx_streams_mask = dev->mt76.test.tx_antenna_mask;
- }
-#endif
-
if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
else if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
@@ -3279,7 +3293,7 @@ static int mt7615_dcoc_freq_idx(u16 freq, u8 bw)
freq = freq_bw40[idx];
break;
}
- /* fall through */
+ fallthrough;
case NL80211_CHAN_WIDTH_40:
idx = mt7615_find_freq_idx(freq_bw40, ARRAY_SIZE(freq_bw40),
freq);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
index 133f93a6ed1b..6de492a4cf02 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
@@ -101,30 +101,29 @@ static irqreturn_t mt7615_irq_handler(int irq, void *dev_instance)
static void mt7615_irq_tasklet(unsigned long data)
{
struct mt7615_dev *dev = (struct mt7615_dev *)data;
- u32 intr, mask = 0;
+ u32 intr, mask = 0, tx_mcu_mask = mt7615_tx_mcu_int_mask(dev);
mt76_wr(dev, MT_INT_MASK_CSR, 0);
intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+ intr &= dev->mt76.mmio.irqmask;
mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
- intr &= dev->mt76.mmio.irqmask;
- if (intr & MT_INT_TX_DONE_ALL) {
- mask |= MT_INT_TX_DONE_ALL;
+ mask |= intr & MT_INT_RX_DONE_ALL;
+ if (intr & tx_mcu_mask)
+ mask |= tx_mcu_mask;
+ mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
+
+ if (intr & tx_mcu_mask)
napi_schedule(&dev->mt76.tx_napi);
- }
- if (intr & MT_INT_RX_DONE(0)) {
- mask |= MT_INT_RX_DONE(0);
+ if (intr & MT_INT_RX_DONE(0))
napi_schedule(&dev->mt76.napi[0]);
- }
- if (intr & MT_INT_RX_DONE(1)) {
- mask |= MT_INT_RX_DONE(1);
+ if (intr & MT_INT_RX_DONE(1))
napi_schedule(&dev->mt76.napi[1]);
- }
if (intr & MT_INT_MCU_CMD) {
u32 val = mt76_rr(dev, MT_MCU_CMD);
@@ -135,8 +134,6 @@ static void mt7615_irq_tasklet(unsigned long data)
wake_up(&dev->reset_wait);
}
}
-
- mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
}
static u32 __mt7615_reg_addr(struct mt7615_dev *dev, u32 addr)
@@ -227,6 +224,8 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
bus_ops->rmw = mt7615_rmw;
dev->mt76.bus = bus_ops;
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+
ret = devm_request_irq(mdev->dev, irq, mt7615_irq_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (ret)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
index 571eadc033a3..6a9f9187f76a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
@@ -220,6 +220,8 @@ struct mt7615_phy {
#define mt7615_mcu_add_bss_info(phy, ...) (phy->dev)->mcu_ops->add_bss_info((phy), __VA_ARGS__)
#define mt7615_mcu_add_beacon(dev, ...) (dev)->mcu_ops->add_beacon_offload((dev), __VA_ARGS__)
#define mt7615_mcu_set_pm(dev, ...) (dev)->mcu_ops->set_pm_state((dev), __VA_ARGS__)
+#define mt7615_mcu_set_drv_ctrl(dev) (dev)->mcu_ops->set_drv_ctrl((dev))
+#define mt7615_mcu_set_fw_ctrl(dev) (dev)->mcu_ops->set_fw_ctrl((dev))
struct mt7615_mcu_ops {
int (*add_tx_ba)(struct mt7615_dev *dev,
struct ieee80211_ampdu_params *params,
@@ -238,6 +240,8 @@ struct mt7615_mcu_ops {
struct ieee80211_hw *hw,
struct ieee80211_vif *vif, bool enable);
int (*set_pm_state)(struct mt7615_dev *dev, int band, int state);
+ int (*set_drv_ctrl)(struct mt7615_dev *dev);
+ int (*set_fw_ctrl)(struct mt7615_dev *dev);
};
struct mt7615_dev {
@@ -278,6 +282,7 @@ struct mt7615_dev {
bool fw_debug;
bool flash_eeprom;
+ bool dbdc_support;
spinlock_t token_lock;
struct idr token;
@@ -535,6 +540,11 @@ static inline u8 mt7615_lmac_mapping(struct mt7615_dev *dev, u8 ac)
return lmac_queue_map[ac];
}
+static inline u32 mt7615_tx_mcu_int_mask(struct mt7615_dev *dev)
+{
+ return MT_INT_TX_DONE(dev->mt76.q_tx[MT_TXQ_MCU]->hw_idx);
+}
+
void mt7615_dma_reset(struct mt7615_dev *dev);
void mt7615_scan_work(struct work_struct *work);
void mt7615_roc_work(struct work_struct *work);
@@ -608,8 +618,7 @@ int mt7615_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info);
-void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
- struct mt76_queue_entry *e);
+void mt7615_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
@@ -638,8 +647,6 @@ int mt7615_mcu_set_p2p_oppps(struct ieee80211_hw *hw,
struct ieee80211_vif *vif);
int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_channel *chan, int duration);
-int mt7615_firmware_own(struct mt7615_dev *dev);
-int mt7615_driver_own(struct mt7615_dev *dev);
int mt7615_init_debugfs(struct mt7615_dev *dev);
int mt7615_mcu_wait_response(struct mt7615_dev *dev, int cmd, int seq);
@@ -666,7 +673,6 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct mt76_tx_info *tx_info);
bool mt7663_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update);
void mt7663_usb_sdio_tx_complete_skb(struct mt76_dev *mdev,
- enum mt76_txq_id qid,
struct mt76_queue_entry *e);
void mt7663_usb_sdio_wtbl_work(struct work_struct *work);
int mt7663_usb_sdio_register_device(struct mt7615_dev *dev);
@@ -675,9 +681,8 @@ int mt7663u_mcu_init(struct mt7615_dev *dev);
/* sdio */
u32 mt7663s_read_pcr(struct mt7615_dev *dev);
int mt7663s_mcu_init(struct mt7615_dev *dev);
-int mt7663s_driver_own(struct mt7615_dev *dev);
-int mt7663s_firmware_own(struct mt7615_dev *dev);
-int mt7663s_kthread_run(void *data);
+void mt7663s_tx_work(struct work_struct *work);
+void mt7663s_rx_work(struct work_struct *work);
void mt7663s_sdio_irq(struct sdio_func *func);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
index 2328d78e06a1..dbd29d897b29 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
@@ -88,7 +88,7 @@ static int mt7615_pci_suspend(struct pci_dev *pdev, pm_message_t state)
}
napi_disable(&mdev->tx_napi);
- tasklet_kill(&mdev->tx_tasklet);
+ mt76_worker_disable(&mdev->tx_worker);
mt76_for_each_q_rx(mdev, i) {
napi_disable(&mdev->napi[i]);
@@ -118,7 +118,7 @@ static int mt7615_pci_suspend(struct pci_dev *pdev, pm_message_t state)
if (err)
goto restore;
- err = mt7615_firmware_own(dev);
+ err = mt7615_mcu_set_fw_ctrl(dev);
if (err)
goto restore;
@@ -142,7 +142,7 @@ static int mt7615_pci_resume(struct pci_dev *pdev)
bool pdma_reset;
int i, err;
- err = mt7615_driver_own(dev);
+ err = mt7615_mcu_set_drv_ctrl(dev);
if (err < 0)
return err;
@@ -162,6 +162,7 @@ static int mt7615_pci_resume(struct pci_dev *pdev)
if (pdma_reset)
dev_err(mdev->dev, "PDMA engine must be reinitialized\n");
+ mt76_worker_enable(&mdev->tx_worker);
mt76_for_each_q_rx(mdev, i) {
napi_enable(&mdev->napi[i]);
napi_schedule(&mdev->napi[i]);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
index 7224a0078211..06a0f8f7bc89 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_init.c
@@ -25,6 +25,9 @@ static void mt7615_init_work(struct work_struct *work)
mt7615_phy_init(dev);
mt7615_mcu_del_wtbl_all(dev);
mt7615_check_offload_capability(dev);
+
+ if (dev->dbdc_support)
+ mt7615_register_ext_phy(dev);
}
static int mt7615_init_hardware(struct mt7615_dev *dev)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
index 2d67f9a148cd..4cf7c5d34325 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
@@ -14,8 +14,7 @@
#include "../dma.h"
#include "mac.h"
-void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
- struct mt76_queue_entry *e)
+void mt7615_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
{
if (!e->txwi) {
dev_kfree_skb_any(e->skb);
@@ -45,7 +44,7 @@ void mt7615_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
}
if (e->skb)
- mt76_tx_complete_skb(mdev, e->skb);
+ mt76_tx_complete_skb(mdev, e->wcid, e->skb);
}
static void
@@ -107,6 +106,7 @@ mt7615_write_fw_txp(struct mt7615_dev *dev, struct mt76_tx_info *tx_info,
/* pass partial skb header to fw */
tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
tx_info->buf[1].len = MT_CT_PARSE_LEN;
+ tx_info->buf[1].skip_unmap = true;
tx_info->nbuf = MT_CT_DMA_BUF_NUM;
txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
index 9137d9e6b51d..61623f480806 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/regs.h
@@ -575,7 +575,7 @@ enum mt7615_reg_base {
#define MT_MCU_PTA_BASE 0x81060000
#define MT_MCU_PTA(_n) (MT_MCU_PTA_BASE + (_n))
-#define MT_ANT_SWITCH_CON(n) MT_MCU_PTA(0x0c8)
+#define MT_ANT_SWITCH_CON(_n) MT_MCU_PTA(0x0c8 + ((_n) - 1) * 4)
#define MT_ANT_SWITCH_CON_MODE(_n) (GENMASK(4, 0) << (_n * 8))
#define MT_ANT_SWITCH_CON_MODE1(_n) (GENMASK(3, 0) << (_n * 8))
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
index dabce51117b0..874c929d8552 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
@@ -323,7 +323,7 @@ static int mt7663s_probe(struct sdio_func *func,
{
static const struct mt76_driver_ops drv_ops = {
.txwi_size = MT_USB_TXD_SIZE,
- .drv_flags = MT_DRV_RX_DMA_HDR | MT_DRV_HW_MGMT_TXQ,
+ .drv_flags = MT_DRV_RX_DMA_HDR,
.tx_prepare_skb = mt7663_usb_sdio_tx_prepare_skb,
.tx_complete_skb = mt7663_usb_sdio_tx_complete_skb,
.tx_status_data = mt7663_usb_sdio_tx_status_data,
@@ -346,7 +346,7 @@ static int mt7663s_probe(struct sdio_func *func,
struct ieee80211_ops *ops;
struct mt7615_dev *dev;
struct mt76_dev *mdev;
- int ret;
+ int i, ret;
ops = devm_kmemdup(&func->dev, &mt7615_ops, sizeof(mt7615_ops),
GFP_KERNEL);
@@ -364,23 +364,39 @@ static int mt7663s_probe(struct sdio_func *func,
dev->ops = ops;
sdio_set_drvdata(func, dev);
- mdev->sdio.tx_kthread = kthread_create(mt7663s_kthread_run, dev,
- "mt7663s_tx");
- if (IS_ERR(mdev->sdio.tx_kthread))
- return PTR_ERR(mdev->sdio.tx_kthread);
-
ret = mt76s_init(mdev, func, &mt7663s_ops);
if (ret < 0)
goto err_free;
+ INIT_WORK(&mdev->sdio.tx.xmit_work, mt7663s_tx_work);
+ INIT_WORK(&mdev->sdio.rx.recv_work, mt7663s_rx_work);
+
ret = mt7663s_hw_init(dev, func);
if (ret)
- goto err_free;
+ goto err_deinit;
mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
(mt76_rr(dev, MT_HW_REV) & 0xff);
dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+ mdev->sdio.intr_data = devm_kmalloc(mdev->dev,
+ sizeof(struct mt76s_intr),
+ GFP_KERNEL);
+ if (!mdev->sdio.intr_data) {
+ ret = -ENOMEM;
+ goto err_deinit;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mdev->sdio.xmit_buf); i++) {
+ mdev->sdio.xmit_buf[i] = devm_kmalloc(mdev->dev,
+ MT76S_XMIT_BUF_SZ,
+ GFP_KERNEL);
+ if (!mdev->sdio.xmit_buf[i]) {
+ ret = -ENOMEM;
+ goto err_deinit;
+ }
+ }
+
ret = mt76s_alloc_queues(&dev->mt76);
if (ret)
goto err_deinit;
@@ -426,9 +442,11 @@ static int mt7663s_suspend(struct device *dev)
return err;
}
+ sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+
mt76s_stop_txrx(&mdev->mt76);
- return mt7663s_firmware_own(mdev);
+ return mt7615_mcu_set_fw_ctrl(mdev);
}
static int mt7663s_resume(struct device *dev)
@@ -437,7 +455,7 @@ static int mt7663s_resume(struct device *dev)
struct mt7615_dev *mdev = sdio_get_drvdata(func);
int err;
- err = mt7663s_driver_own(mdev);
+ err = mt7615_mcu_set_drv_ctrl(mdev);
if (err)
return err;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
index 28b86bec7fc2..38670c00380c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
@@ -53,7 +53,7 @@ mt7663s_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
if (ret)
goto out;
- mt76_queue_kick(dev, mdev->q_tx[MT_TXQ_MCU].q);
+ mt76_queue_kick(dev, mdev->q_tx[MT_TXQ_MCU]);
if (wait_resp)
ret = mt7615_mcu_wait_response(dev, cmd, seq);
@@ -63,7 +63,7 @@ out:
return ret;
}
-int mt7663s_driver_own(struct mt7615_dev *dev)
+static int mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev)
{
struct sdio_func *func = dev->mt76.sdio.func;
struct mt76_phy *mphy = &dev->mt76.phy;
@@ -75,7 +75,7 @@ int mt7663s_driver_own(struct mt7615_dev *dev)
sdio_claim_host(func);
- sdio_writel(func, WHLPCR_FW_OWN_REQ_CLR, MCR_WHLPCR, 0);
+ sdio_writel(func, WHLPCR_FW_OWN_REQ_CLR, MCR_WHLPCR, NULL);
ret = readx_poll_timeout(mt7663s_read_pcr, dev, status,
status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000);
@@ -95,7 +95,7 @@ out:
return 0;
}
-int mt7663s_firmware_own(struct mt7615_dev *dev)
+static int mt7663s_mcu_fw_pmctrl(struct mt7615_dev *dev)
{
struct sdio_func *func = dev->mt76.sdio.func;
struct mt76_phy *mphy = &dev->mt76.phy;
@@ -107,7 +107,7 @@ int mt7663s_firmware_own(struct mt7615_dev *dev)
sdio_claim_host(func);
- sdio_writel(func, WHLPCR_FW_OWN_REQ_SET, MCR_WHLPCR, 0);
+ sdio_writel(func, WHLPCR_FW_OWN_REQ_SET, MCR_WHLPCR, NULL);
ret = readx_poll_timeout(mt7663s_read_pcr, dev, status,
!(status & WHLPCR_IS_DRIVER_OWN), 2000, 1000000);
@@ -132,9 +132,10 @@ int mt7663s_mcu_init(struct mt7615_dev *dev)
.mcu_rr = mt7615_mcu_reg_rr,
.mcu_wr = mt7615_mcu_reg_wr,
};
+ struct mt7615_mcu_ops *mcu_ops;
int ret;
- ret = mt7663s_driver_own(dev);
+ ret = mt7663s_mcu_drv_pmctrl(dev);
if (ret)
return ret;
@@ -152,6 +153,15 @@ int mt7663s_mcu_init(struct mt7615_dev *dev)
if (ret)
return ret;
+ mcu_ops = devm_kmemdup(dev->mt76.dev, dev->mcu_ops, sizeof(*mcu_ops),
+ GFP_KERNEL);
+ if (!mcu_ops)
+ return -ENOMEM;
+
+ mcu_ops->set_drv_ctrl = mt7663s_mcu_drv_pmctrl;
+ mcu_ops->set_fw_ctrl = mt7663s_mcu_fw_pmctrl;
+ dev->mcu_ops = mcu_ops;
+
ret = mt7663s_mcu_init_sched(dev);
if (ret)
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
index 443a4ecdad3a..2486cda3243b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
@@ -19,21 +19,40 @@
#include "sdio.h"
#include "mac.h"
-static void mt7663s_refill_sched_quota(struct mt7615_dev *dev, u32 *data)
+static int mt7663s_refill_sched_quota(struct mt76_dev *dev, u32 *data)
{
- struct mt76_sdio *sdio = &dev->mt76.sdio;
+ u32 ple_ac_data_quota[] = {
+ FIELD_GET(TXQ_CNT_L, data[4]), /* VO */
+ FIELD_GET(TXQ_CNT_H, data[3]), /* VI */
+ FIELD_GET(TXQ_CNT_L, data[3]), /* BE */
+ FIELD_GET(TXQ_CNT_H, data[2]), /* BK */
+ };
+ u32 pse_ac_data_quota[] = {
+ FIELD_GET(TXQ_CNT_H, data[1]), /* VO */
+ FIELD_GET(TXQ_CNT_L, data[1]), /* VI */
+ FIELD_GET(TXQ_CNT_H, data[0]), /* BE */
+ FIELD_GET(TXQ_CNT_L, data[0]), /* BK */
+ };
+ u32 pse_mcu_quota = FIELD_GET(TXQ_CNT_L, data[2]);
+ u32 pse_data_quota = 0, ple_data_quota = 0;
+ struct mt76_sdio *sdio = &dev->sdio;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pse_ac_data_quota); i++) {
+ pse_data_quota += pse_ac_data_quota[i];
+ ple_data_quota += ple_ac_data_quota[i];
+ }
+
+ if (!pse_data_quota && !ple_data_quota && !pse_mcu_quota)
+ return 0;
mutex_lock(&sdio->sched.lock);
- sdio->sched.pse_data_quota += FIELD_GET(TXQ_CNT_L, data[0]) + /* BK */
- FIELD_GET(TXQ_CNT_H, data[0]) + /* BE */
- FIELD_GET(TXQ_CNT_L, data[1]) + /* VI */
- FIELD_GET(TXQ_CNT_H, data[1]); /* VO */
- sdio->sched.ple_data_quota += FIELD_GET(TXQ_CNT_H, data[2]) + /* BK */
- FIELD_GET(TXQ_CNT_L, data[3]) + /* BE */
- FIELD_GET(TXQ_CNT_H, data[3]) + /* VI */
- FIELD_GET(TXQ_CNT_L, data[4]); /* VO */
- sdio->sched.pse_mcu_quota += FIELD_GET(TXQ_CNT_L, data[2]);
+ sdio->sched.pse_mcu_quota += pse_mcu_quota;
+ sdio->sched.pse_data_quota += pse_data_quota;
+ sdio->sched.ple_data_quota += ple_data_quota;
mutex_unlock(&sdio->sched.lock);
+
+ return pse_data_quota + ple_data_quota + pse_mcu_quota;
}
static struct sk_buff *mt7663s_build_rx_skb(void *data, int data_len,
@@ -61,11 +80,11 @@ static struct sk_buff *mt7663s_build_rx_skb(void *data, int data_len,
return skb;
}
-static int mt7663s_rx_run_queue(struct mt7615_dev *dev, enum mt76_rxq_id qid,
+static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
struct mt76s_intr *intr)
{
- struct mt76_queue *q = &dev->mt76.q_rx[qid];
- struct mt76_sdio *sdio = &dev->mt76.sdio;
+ struct mt76_queue *q = &dev->q_rx[qid];
+ struct mt76_sdio *sdio = &dev->sdio;
int len = 0, err, i, order;
struct page *page;
u8 *buf;
@@ -86,15 +105,18 @@ static int mt7663s_rx_run_queue(struct mt7615_dev *dev, enum mt76_rxq_id qid,
buf = page_address(page);
+ sdio_claim_host(sdio->func);
err = sdio_readsb(sdio->func, buf, MCR_WRDR(qid), len);
+ sdio_release_host(sdio->func);
+
if (err < 0) {
- dev_err(dev->mt76.dev, "sdio read data failed:%d\n", err);
+ dev_err(dev->dev, "sdio read data failed:%d\n", err);
__free_pages(page, order);
return err;
}
for (i = 0; i < intr->rx.num[qid]; i++) {
- int index = (q->tail + i) % q->ndesc;
+ int index = (q->head + i) % q->ndesc;
struct mt76_queue_entry *e = &q->entry[index];
len = intr->rx.len[qid][i];
@@ -109,160 +131,198 @@ static int mt7663s_rx_run_queue(struct mt7615_dev *dev, enum mt76_rxq_id qid,
__free_pages(page, order);
spin_lock_bh(&q->lock);
- q->tail = (q->tail + i) % q->ndesc;
+ q->head = (q->head + i) % q->ndesc;
q->queued += i;
spin_unlock_bh(&q->lock);
- return err;
+ return i;
}
-static int mt7663s_tx_update_sched(struct mt7615_dev *dev,
- struct mt76_queue_entry *e,
- bool mcu)
+static int mt7663s_tx_pick_quota(struct mt76_sdio *sdio, enum mt76_txq_id qid,
+ int buf_sz, int *pse_size, int *ple_size)
{
- struct mt76_sdio *sdio = &dev->mt76.sdio;
- struct mt76_phy *mphy = &dev->mt76.phy;
- struct ieee80211_hdr *hdr;
- int size, ret = -EBUSY;
-
- size = DIV_ROUND_UP(e->buf_sz + sdio->sched.deficit, MT_PSE_PAGE_SZ);
+ int pse_sz;
- if (mcu) {
- if (!test_bit(MT76_STATE_MCU_RUNNING, &mphy->state))
- return 0;
+ pse_sz = DIV_ROUND_UP(buf_sz + sdio->sched.deficit, MT_PSE_PAGE_SZ);
- mutex_lock(&sdio->sched.lock);
- if (sdio->sched.pse_mcu_quota > size) {
- sdio->sched.pse_mcu_quota -= size;
- ret = 0;
- }
- mutex_unlock(&sdio->sched.lock);
+ if (qid == MT_TXQ_MCU) {
+ if (sdio->sched.pse_mcu_quota < *pse_size + pse_sz)
+ return -EBUSY;
+ } else {
+ if (sdio->sched.pse_data_quota < *pse_size + pse_sz ||
+ sdio->sched.ple_data_quota < *ple_size)
+ return -EBUSY;
- return ret;
+ *ple_size = *ple_size + 1;
}
+ *pse_size = *pse_size + pse_sz;
- hdr = (struct ieee80211_hdr *)(e->skb->data + MT_USB_TXD_SIZE);
- if (ieee80211_is_ctl(hdr->frame_control))
- return 0;
+ return 0;
+}
+static void mt7663s_tx_update_quota(struct mt76_sdio *sdio, enum mt76_txq_id qid,
+ int pse_size, int ple_size)
+{
mutex_lock(&sdio->sched.lock);
- if (sdio->sched.pse_data_quota > size &&
- sdio->sched.ple_data_quota > 0) {
- sdio->sched.pse_data_quota -= size;
- sdio->sched.ple_data_quota--;
- ret = 0;
+ if (qid == MT_TXQ_MCU) {
+ sdio->sched.pse_mcu_quota -= pse_size;
+ } else {
+ sdio->sched.pse_data_quota -= pse_size;
+ sdio->sched.ple_data_quota -= ple_size;
}
mutex_unlock(&sdio->sched.lock);
+}
+
+static int __mt7663s_xmit_queue(struct mt76_dev *dev, u8 *data, int len)
+{
+ struct mt76_sdio *sdio = &dev->sdio;
+ int err;
+
+ if (len > sdio->func->cur_blksize)
+ len = roundup(len, sdio->func->cur_blksize);
+
+ sdio_claim_host(sdio->func);
+ err = sdio_writesb(sdio->func, MCR_WTDR1, data, len);
+ sdio_release_host(sdio->func);
+
+ if (err)
+ dev_err(dev->dev, "sdio write failed: %d\n", err);
- return ret;
+ return err;
}
-static int mt7663s_tx_run_queue(struct mt7615_dev *dev, struct mt76_queue *q)
+static int mt7663s_tx_run_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
{
- bool mcu = q == dev->mt76.q_tx[MT_TXQ_MCU].q;
- struct mt76_sdio *sdio = &dev->mt76.sdio;
- int nframes = 0;
+ int err, nframes = 0, len = 0, pse_sz = 0, ple_sz = 0;
+ struct mt76_queue *q = dev->q_tx[qid];
+ struct mt76_sdio *sdio = &dev->sdio;
- while (q->first != q->tail) {
+ while (q->first != q->head) {
struct mt76_queue_entry *e = &q->entry[q->first];
- int err, len = e->skb->len;
+ struct sk_buff *iter;
- if (mt7663s_tx_update_sched(dev, e, mcu))
+ if (!test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) {
+ __skb_put_zero(e->skb, 4);
+ err = __mt7663s_xmit_queue(dev, e->skb->data,
+ e->skb->len);
+ if (err)
+ return err;
+
+ goto next;
+ }
+
+ if (len + e->skb->len + 4 > MT76S_XMIT_BUF_SZ)
break;
- if (len > sdio->func->cur_blksize)
- len = roundup(len, sdio->func->cur_blksize);
+ if (mt7663s_tx_pick_quota(sdio, qid, e->buf_sz, &pse_sz,
+ &ple_sz))
+ break;
- /* TODO: skb_walk_frags and then write to SDIO port */
- err = sdio_writesb(sdio->func, MCR_WTDR1, e->skb->data, len);
- if (err) {
- dev_err(dev->mt76.dev, "sdio write failed: %d\n", err);
- return -EIO;
- }
+ memcpy(sdio->xmit_buf[qid] + len, e->skb->data,
+ skb_headlen(e->skb));
+ len += skb_headlen(e->skb);
+ nframes++;
- e->done = true;
+ skb_walk_frags(e->skb, iter) {
+ memcpy(sdio->xmit_buf[qid] + len, iter->data,
+ iter->len);
+ len += iter->len;
+ nframes++;
+ }
+next:
q->first = (q->first + 1) % q->ndesc;
- nframes++;
+ e->done = true;
+ }
+
+ if (nframes) {
+ memset(sdio->xmit_buf[qid] + len, 0, 4);
+ err = __mt7663s_xmit_queue(dev, sdio->xmit_buf[qid], len + 4);
+ if (err)
+ return err;
}
+ mt7663s_tx_update_quota(sdio, qid, pse_sz, ple_sz);
return nframes;
}
-static int mt7663s_tx_run_queues(struct mt7615_dev *dev)
+void mt7663s_tx_work(struct work_struct *work)
{
+ struct mt76_sdio *sdio = container_of(work, struct mt76_sdio,
+ tx.xmit_work);
+ struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
int i, nframes = 0;
for (i = 0; i < MT_TXQ_MCU_WA; i++) {
int ret;
- ret = mt7663s_tx_run_queue(dev, dev->mt76.q_tx[i].q);
+ ret = mt7663s_tx_run_queue(dev, i);
if (ret < 0)
- return ret;
+ break;
nframes += ret;
}
+ if (nframes)
+ queue_work(sdio->txrx_wq, &sdio->tx.xmit_work);
- return nframes;
+ queue_work(sdio->txrx_wq, &sdio->tx.status_work);
}
-int mt7663s_kthread_run(void *data)
+void mt7663s_rx_work(struct work_struct *work)
{
- struct mt7615_dev *dev = data;
- struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt76_sdio *sdio = container_of(work, struct mt76_sdio,
+ rx.recv_work);
+ struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
+ struct mt76s_intr *intr = sdio->intr_data;
+ int nframes = 0, ret;
- while (!kthread_should_stop()) {
- int ret;
+ /* disable interrupt */
+ sdio_claim_host(sdio->func);
+ sdio_writel(sdio->func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, NULL);
+ ret = sdio_readsb(sdio->func, intr, MCR_WHISR, sizeof(*intr));
+ sdio_release_host(sdio->func);
- cond_resched();
+ if (ret < 0)
+ goto out;
- sdio_claim_host(dev->mt76.sdio.func);
- ret = mt7663s_tx_run_queues(dev);
- sdio_release_host(dev->mt76.sdio.func);
+ trace_dev_irq(dev, intr->isr, 0);
- if (ret <= 0 || !test_bit(MT76_STATE_RUNNING, &mphy->state)) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule();
- } else {
- wake_up_process(dev->mt76.sdio.kthread);
+ if (intr->isr & WHIER_RX0_DONE_INT_EN) {
+ ret = mt7663s_rx_run_queue(dev, 0, intr);
+ if (ret > 0) {
+ queue_work(sdio->txrx_wq, &sdio->rx.net_work);
+ nframes += ret;
}
}
- return 0;
+ if (intr->isr & WHIER_RX1_DONE_INT_EN) {
+ ret = mt7663s_rx_run_queue(dev, 1, intr);
+ if (ret > 0) {
+ queue_work(sdio->txrx_wq, &sdio->rx.net_work);
+ nframes += ret;
+ }
+ }
+
+ if (mt7663s_refill_sched_quota(dev, intr->tx.wtqcr))
+ queue_work(sdio->txrx_wq, &sdio->tx.xmit_work);
+
+ if (nframes) {
+ queue_work(sdio->txrx_wq, &sdio->rx.recv_work);
+ return;
+ }
+out:
+ /* enable interrupt */
+ sdio_claim_host(sdio->func);
+ sdio_writel(sdio->func, WHLPCR_INT_EN_SET, MCR_WHLPCR, NULL);
+ sdio_release_host(sdio->func);
}
void mt7663s_sdio_irq(struct sdio_func *func)
{
struct mt7615_dev *dev = sdio_get_drvdata(func);
struct mt76_sdio *sdio = &dev->mt76.sdio;
- struct mt76s_intr intr;
-
- /* disable interrupt */
- sdio_writel(func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, 0);
-
- do {
- sdio_readsb(func, &intr, MCR_WHISR, sizeof(struct mt76s_intr));
- trace_dev_irq(&dev->mt76, intr.isr, 0);
- if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.phy.state))
- goto out;
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.phy.state))
+ return;
- if (intr.isr & WHIER_RX0_DONE_INT_EN) {
- mt7663s_rx_run_queue(dev, 0, &intr);
- wake_up_process(sdio->kthread);
- }
-
- if (intr.isr & WHIER_RX1_DONE_INT_EN) {
- mt7663s_rx_run_queue(dev, 1, &intr);
- wake_up_process(sdio->kthread);
- }
-
- if (intr.isr & WHIER_TX_DONE_INT_EN) {
- mt7663s_refill_sched_quota(dev, intr.tx.wtqcr);
- mt7663s_tx_run_queues(dev);
- wake_up_process(sdio->kthread);
- }
- } while (intr.isr);
-out:
- /* enable interrupt */
- sdio_writel(func, WHLPCR_INT_EN_SET, MCR_WHLPCR, 0);
+ queue_work(sdio->txrx_wq, &sdio->rx.recv_work);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c
index 1730751133aa..e4dc62314bae 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/testmode.c
@@ -70,7 +70,7 @@ mt7615_tm_set_tx_power(struct mt7615_phy *phy)
if (dev->mt76.test.state != MT76_TM_STATE_OFF)
tx_power = dev->mt76.test.tx_power;
- len = sizeof(req_hdr) + MT7615_EE_MAX - MT_EE_NIC_CONF_0;
+ len = MT7615_EE_MAX - MT_EE_NIC_CONF_0;
skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(req_hdr) + len);
if (!skb)
return -ENOMEM;
@@ -80,13 +80,12 @@ mt7615_tm_set_tx_power(struct mt7615_phy *phy)
target_chains = mt7615_ext_pa_enabled(dev, band) ? 1 : n_chains;
for (i = 0; i < target_chains; i++) {
- int index;
-
ret = mt7615_eeprom_get_target_power_index(dev, chandef->chan, i);
- if (ret < 0)
+ if (ret < 0) {
+ dev_kfree_skb(skb);
return -EINVAL;
+ }
- index = ret - MT_EE_NIC_CONF_0;
if (tx_power && tx_power[i])
data[ret - MT_EE_NIC_CONF_0] = tx_power[i];
}
@@ -191,7 +190,7 @@ mt7615_tm_set_tx_antenna(struct mt7615_dev *dev, bool en)
for (i = 0; i < 4; i++) {
mt76_rmw_field(dev, MT_WF_PHY_RFINTF3_0(i),
MT_WF_PHY_RFINTF3_0_ANT,
- td->tx_antenna_mask & BIT(i) ? 0 : 0xa);
+ (td->tx_antenna_mask & BIT(i)) ? 0 : 0xa);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
index 23a21338c46e..f0ad83af9e00 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
@@ -180,9 +180,7 @@ static int mt7663u_suspend(struct usb_interface *intf, pm_message_t state)
}
mt76u_stop_rx(&dev->mt76);
-
mt76u_stop_tx(&dev->mt76);
- tasklet_kill(&dev->mt76.tx_tasklet);
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
index 0b33df3e3bfe..4d8be366af31 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_mcu.c
@@ -18,7 +18,7 @@ mt7663u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
int cmd, bool wait_resp)
{
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
- int ret, seq, ep;
+ int ret, seq, ep, len, pad;
mutex_lock(&mdev->mcu.mutex);
@@ -28,8 +28,10 @@ mt7663u_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
else
ep = MT_EP_OUT_AC_BE;
- put_unaligned_le32(skb->len, skb_push(skb, sizeof(skb->len)));
- ret = mt76_skb_adjust_pad(skb);
+ len = skb->len;
+ put_unaligned_le32(len, skb_push(skb, sizeof(len)));
+ pad = round_up(skb->len, 4) + 4 - skb->len;
+ ret = mt76_skb_adjust_pad(skb, pad);
if (ret < 0)
goto out;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
index 6dffdaaa9ad5..3b29a6d3dc64 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
@@ -226,7 +226,6 @@ bool mt7663_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update)
EXPORT_SYMBOL_GPL(mt7663_usb_sdio_tx_status_data);
void mt7663_usb_sdio_tx_complete_skb(struct mt76_dev *mdev,
- enum mt76_txq_id qid,
struct mt76_queue_entry *e)
{
unsigned int headroom = MT_USB_TXD_SIZE;
@@ -235,7 +234,7 @@ void mt7663_usb_sdio_tx_complete_skb(struct mt76_dev *mdev,
headroom += MT_USB_HDR_SIZE;
skb_pull(e->skb, headroom);
- mt76_tx_complete_skb(mdev, e->skb);
+ mt76_tx_complete_skb(mdev, e->wcid, e->skb);
}
EXPORT_SYMBOL_GPL(mt7663_usb_sdio_tx_complete_skb);
@@ -248,6 +247,7 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
struct sk_buff *skb = tx_info->skb;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ int pad;
if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) &&
!msta->rate_probe) {
@@ -259,10 +259,16 @@ int mt7663_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
}
mt7663_usb_sdio_write_txwi(dev, wcid, qid, sta, skb);
- if (mt76_is_usb(mdev))
- put_unaligned_le32(skb->len, skb_push(skb, sizeof(skb->len)));
+ if (mt76_is_usb(mdev)) {
+ u32 len = skb->len;
+
+ put_unaligned_le32(len, skb_push(skb, sizeof(len)));
+ pad = round_up(skb->len, 4) + 4 - skb->len;
+ } else {
+ pad = round_up(skb->len, 4) - skb->len;
+ }
- return mt76_skb_adjust_pad(skb);
+ return mt76_skb_adjust_pad(skb, pad);
}
EXPORT_SYMBOL_GPL(mt7663_usb_sdio_tx_prepare_skb);
@@ -359,14 +365,15 @@ int mt7663_usb_sdio_register_device(struct mt7615_dev *dev)
if (err)
return err;
- /* check hw sg support in order to enable AMSDU */
- if (dev->mt76.usb.sg_en || mt76_is_sdio(&dev->mt76))
- hw->max_tx_fragments = MT_HW_TXP_MAX_BUF_NUM;
- else
- hw->max_tx_fragments = 1;
hw->extra_tx_headroom += MT_USB_TXD_SIZE;
- if (mt76_is_usb(&dev->mt76))
+ if (mt76_is_usb(&dev->mt76)) {
hw->extra_tx_headroom += MT_USB_HDR_SIZE;
+ /* check hw sg support in order to enable AMSDU */
+ if (dev->mt76.usb.sg_en)
+ hw->max_tx_fragments = MT_HW_TXP_MAX_BUF_NUM;
+ else
+ hw->max_tx_fragments = 1;
+ }
err = mt76_register_device(&dev->mt76, true, mt7615_rates,
ARRAY_SIZE(mt7615_rates));
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
index dc8bf4c6969a..d78866bf41ba 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c
@@ -10,6 +10,7 @@
#include "eeprom.h"
#include "mcu.h"
#include "initvals.h"
+#include "initvals_init.h"
#include "../mt76x02_phy.h"
static void
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
index 3dcd9620a126..99808ed0c6cb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
@@ -11,139 +11,6 @@
#include "phy.h"
-static const struct mt76_reg_pair common_mac_reg_table[] = {
- { MT_BCN_OFFSET(0), 0xf8f0e8e0 },
- { MT_BCN_OFFSET(1), 0x6f77d0c8 },
- { MT_LEGACY_BASIC_RATE, 0x0000013f },
- { MT_HT_BASIC_RATE, 0x00008003 },
- { MT_MAC_SYS_CTRL, 0x00000000 },
- { MT_RX_FILTR_CFG, 0x00017f97 },
- { MT_BKOFF_SLOT_CFG, 0x00000209 },
- { MT_TX_SW_CFG0, 0x00000000 },
- { MT_TX_SW_CFG1, 0x00080606 },
- { MT_TX_LINK_CFG, 0x00001020 },
- { MT_TX_TIMEOUT_CFG, 0x000a2090 },
- { MT_MAX_LEN_CFG, 0xa0fff | 0x00001000 },
- { MT_LED_CFG, 0x7f031e46 },
- { MT_PBF_TX_MAX_PCNT, 0x1fbf1f1f },
- { MT_PBF_RX_MAX_PCNT, 0x0000fe9f },
- { MT_TX_RETRY_CFG, 0x47d01f0f },
- { MT_AUTO_RSP_CFG, 0x00000013 },
- { MT_CCK_PROT_CFG, 0x07f40003 },
- { MT_OFDM_PROT_CFG, 0x07f42004 },
- { MT_PBF_CFG, 0x00f40006 },
- { MT_WPDMA_GLO_CFG, 0x00000030 },
- { MT_GF20_PROT_CFG, 0x01742004 },
- { MT_GF40_PROT_CFG, 0x03f42084 },
- { MT_MM20_PROT_CFG, 0x01742004 },
- { MT_MM40_PROT_CFG, 0x03f42084 },
- { MT_TXOP_CTRL_CFG, 0x0000583f },
- { MT_TX_RTS_CFG, 0x00ffff20 },
- { MT_EXP_ACK_TIME, 0x002400ca },
- { MT_TXOP_HLDR_ET, 0x00000002 },
- { MT_XIFS_TIME_CFG, 0x33a41010 },
- { MT_PWR_PIN_CFG, 0x00000000 },
-};
-
-static const struct mt76_reg_pair mt76x0_mac_reg_table[] = {
- { MT_IOCFG_6, 0xa0040080 },
- { MT_PBF_SYS_CTRL, 0x00080c00 },
- { MT_PBF_CFG, 0x77723c1f },
- { MT_FCE_PSE_CTRL, 0x00000001 },
- { MT_AMPDU_MAX_LEN_20M1S, 0xAAA99887 },
- { MT_TX_SW_CFG0, 0x00000601 },
- { MT_TX_SW_CFG1, 0x00040000 },
- { MT_TX_SW_CFG2, 0x00000000 },
- { 0xa44, 0x00000000 },
- { MT_HEADER_TRANS_CTRL_REG, 0x00000000 },
- { MT_TSO_CTRL, 0x00000000 },
- { MT_BB_PA_MODE_CFG1, 0x00500055 },
- { MT_RF_PA_MODE_CFG1, 0x00500055 },
- { MT_TX_ALC_CFG_0, 0x2F2F000C },
- { MT_TX0_BB_GAIN_ATTEN, 0x00000000 },
- { MT_TX_PWR_CFG_0, 0x3A3A3A3A },
- { MT_TX_PWR_CFG_1, 0x3A3A3A3A },
- { MT_TX_PWR_CFG_2, 0x3A3A3A3A },
- { MT_TX_PWR_CFG_3, 0x3A3A3A3A },
- { MT_TX_PWR_CFG_4, 0x3A3A3A3A },
- { MT_TX_PWR_CFG_7, 0x3A3A3A3A },
- { MT_TX_PWR_CFG_8, 0x0000003A },
- { MT_TX_PWR_CFG_9, 0x0000003A },
- { 0x150C, 0x00000002 },
- { 0x1238, 0x001700C8 },
- { MT_LDO_CTRL_0, 0x00A647B6 },
- { MT_LDO_CTRL_1, 0x6B006464 },
- { MT_HT_BASIC_RATE, 0x00004003 },
- { MT_HT_CTRL_CFG, 0x000001FF },
- { MT_TXOP_HLDR_ET, 0x00000000 },
- { MT_PN_PAD_MODE, 0x00000003 },
- { MT_TX_PROT_CFG6, 0xe3f42004 },
- { MT_TX_PROT_CFG7, 0xe3f42084 },
- { MT_TX_PROT_CFG8, 0xe3f42104 },
- { MT_VHT_HT_FBK_CFG1, 0xedcba980 },
-};
-
-static const struct mt76_reg_pair mt76x0_bbp_init_tab[] = {
- { MT_BBP(CORE, 1), 0x00000002 },
- { MT_BBP(CORE, 4), 0x00000000 },
- { MT_BBP(CORE, 24), 0x00000000 },
- { MT_BBP(CORE, 32), 0x4003000a },
- { MT_BBP(CORE, 42), 0x00000000 },
- { MT_BBP(CORE, 44), 0x00000000 },
- { MT_BBP(IBI, 11), 0x0FDE8081 },
- { MT_BBP(AGC, 0), 0x00021400 },
- { MT_BBP(AGC, 1), 0x00000003 },
- { MT_BBP(AGC, 2), 0x003A6464 },
- { MT_BBP(AGC, 15), 0x88A28CB8 },
- { MT_BBP(AGC, 22), 0x00001E21 },
- { MT_BBP(AGC, 23), 0x0000272C },
- { MT_BBP(AGC, 24), 0x00002F3A },
- { MT_BBP(AGC, 25), 0x8000005A },
- { MT_BBP(AGC, 26), 0x007C2005 },
- { MT_BBP(AGC, 33), 0x00003238 },
- { MT_BBP(AGC, 34), 0x000A0C0C },
- { MT_BBP(AGC, 37), 0x2121262C },
- { MT_BBP(AGC, 41), 0x38383E45 },
- { MT_BBP(AGC, 57), 0x00001010 },
- { MT_BBP(AGC, 59), 0xBAA20E96 },
- { MT_BBP(AGC, 63), 0x00000001 },
- { MT_BBP(TXC, 0), 0x00280403 },
- { MT_BBP(TXC, 1), 0x00000000 },
- { MT_BBP(RXC, 1), 0x00000012 },
- { MT_BBP(RXC, 2), 0x00000011 },
- { MT_BBP(RXC, 3), 0x00000005 },
- { MT_BBP(RXC, 4), 0x00000000 },
- { MT_BBP(RXC, 5), 0xF977C4EC },
- { MT_BBP(RXC, 7), 0x00000090 },
- { MT_BBP(TXO, 8), 0x00000000 },
- { MT_BBP(TXBE, 0), 0x00000000 },
- { MT_BBP(TXBE, 4), 0x00000004 },
- { MT_BBP(TXBE, 6), 0x00000000 },
- { MT_BBP(TXBE, 8), 0x00000014 },
- { MT_BBP(TXBE, 9), 0x20000000 },
- { MT_BBP(TXBE, 10), 0x00000000 },
- { MT_BBP(TXBE, 12), 0x00000000 },
- { MT_BBP(TXBE, 13), 0x00000000 },
- { MT_BBP(TXBE, 14), 0x00000000 },
- { MT_BBP(TXBE, 15), 0x00000000 },
- { MT_BBP(TXBE, 16), 0x00000000 },
- { MT_BBP(TXBE, 17), 0x00000000 },
- { MT_BBP(RXFE, 1), 0x00008800 },
- { MT_BBP(RXFE, 3), 0x00000000 },
- { MT_BBP(RXFE, 4), 0x00000000 },
- { MT_BBP(RXO, 13), 0x00000192 },
- { MT_BBP(RXO, 14), 0x00060612 },
- { MT_BBP(RXO, 15), 0xC8321B18 },
- { MT_BBP(RXO, 16), 0x0000001E },
- { MT_BBP(RXO, 17), 0x00000000 },
- { MT_BBP(RXO, 18), 0xCC00A993 },
- { MT_BBP(RXO, 19), 0xB9CB9CB9 },
- { MT_BBP(RXO, 20), 0x26c00057 },
- { MT_BBP(RXO, 21), 0x00000001 },
- { MT_BBP(RXO, 24), 0x00000006 },
- { MT_BBP(RXO, 28), 0x0000003F },
-};
-
static const struct mt76x0_bbp_switch_item mt76x0_bbp_switch_tab[] = {
{ RF_G_BAND | RF_BW_20 | RF_BW_40, { MT_BBP(AGC, 4), 0x1FEDA049 } },
{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(AGC, 4), 0x1FECA054 } },
@@ -215,16 +82,4 @@ static const struct mt76x0_bbp_switch_item mt76x0_bbp_switch_tab[] = {
{ RF_A_BAND | RF_BW_20 | RF_BW_40 | RF_BW_80, { MT_BBP(RXFE, 0), 0x895000E0 } },
};
-static const struct mt76_reg_pair mt76x0_dcoc_tab[] = {
- { MT_BBP(CAL, 47), 0x000010F0 },
- { MT_BBP(CAL, 48), 0x00008080 },
- { MT_BBP(CAL, 49), 0x00000F07 },
- { MT_BBP(CAL, 50), 0x00000040 },
- { MT_BBP(CAL, 51), 0x00000404 },
- { MT_BBP(CAL, 52), 0x00080803 },
- { MT_BBP(CAL, 53), 0x00000704 },
- { MT_BBP(CAL, 54), 0x00002828 },
- { MT_BBP(CAL, 55), 0x00005050 },
-};
-
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_init.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_init.h
new file mode 100644
index 000000000000..9e99ba75f490
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_init.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * (c) Copyright 2002-2010, Ralink Technology, Inc.
+ * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
+ * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl>
+ * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
+ */
+
+#ifndef __MT76X0U_INITVALS_INIT_H
+#define __MT76X0U_INITVALS_INIT_H
+
+#include "phy.h"
+
+static const struct mt76_reg_pair common_mac_reg_table[] = {
+ { MT_BCN_OFFSET(0), 0xf8f0e8e0 },
+ { MT_BCN_OFFSET(1), 0x6f77d0c8 },
+ { MT_LEGACY_BASIC_RATE, 0x0000013f },
+ { MT_HT_BASIC_RATE, 0x00008003 },
+ { MT_MAC_SYS_CTRL, 0x00000000 },
+ { MT_RX_FILTR_CFG, 0x00017f97 },
+ { MT_BKOFF_SLOT_CFG, 0x00000209 },
+ { MT_TX_SW_CFG0, 0x00000000 },
+ { MT_TX_SW_CFG1, 0x00080606 },
+ { MT_TX_LINK_CFG, 0x00001020 },
+ { MT_TX_TIMEOUT_CFG, 0x000a2090 },
+ { MT_MAX_LEN_CFG, 0xa0fff | 0x00001000 },
+ { MT_LED_CFG, 0x7f031e46 },
+ { MT_PBF_TX_MAX_PCNT, 0x1fbf1f1f },
+ { MT_PBF_RX_MAX_PCNT, 0x0000fe9f },
+ { MT_TX_RETRY_CFG, 0x47d01f0f },
+ { MT_AUTO_RSP_CFG, 0x00000013 },
+ { MT_CCK_PROT_CFG, 0x07f40003 },
+ { MT_OFDM_PROT_CFG, 0x07f42004 },
+ { MT_PBF_CFG, 0x00f40006 },
+ { MT_WPDMA_GLO_CFG, 0x00000030 },
+ { MT_GF20_PROT_CFG, 0x01742004 },
+ { MT_GF40_PROT_CFG, 0x03f42084 },
+ { MT_MM20_PROT_CFG, 0x01742004 },
+ { MT_MM40_PROT_CFG, 0x03f42084 },
+ { MT_TXOP_CTRL_CFG, 0x0000583f },
+ { MT_TX_RTS_CFG, 0x00ffff20 },
+ { MT_EXP_ACK_TIME, 0x002400ca },
+ { MT_TXOP_HLDR_ET, 0x00000002 },
+ { MT_XIFS_TIME_CFG, 0x33a41010 },
+ { MT_PWR_PIN_CFG, 0x00000000 },
+};
+
+static const struct mt76_reg_pair mt76x0_mac_reg_table[] = {
+ { MT_IOCFG_6, 0xa0040080 },
+ { MT_PBF_SYS_CTRL, 0x00080c00 },
+ { MT_PBF_CFG, 0x77723c1f },
+ { MT_FCE_PSE_CTRL, 0x00000001 },
+ { MT_AMPDU_MAX_LEN_20M1S, 0xAAA99887 },
+ { MT_TX_SW_CFG0, 0x00000601 },
+ { MT_TX_SW_CFG1, 0x00040000 },
+ { MT_TX_SW_CFG2, 0x00000000 },
+ { 0xa44, 0x00000000 },
+ { MT_HEADER_TRANS_CTRL_REG, 0x00000000 },
+ { MT_TSO_CTRL, 0x00000000 },
+ { MT_BB_PA_MODE_CFG1, 0x00500055 },
+ { MT_RF_PA_MODE_CFG1, 0x00500055 },
+ { MT_TX_ALC_CFG_0, 0x2F2F000C },
+ { MT_TX0_BB_GAIN_ATTEN, 0x00000000 },
+ { MT_TX_PWR_CFG_0, 0x3A3A3A3A },
+ { MT_TX_PWR_CFG_1, 0x3A3A3A3A },
+ { MT_TX_PWR_CFG_2, 0x3A3A3A3A },
+ { MT_TX_PWR_CFG_3, 0x3A3A3A3A },
+ { MT_TX_PWR_CFG_4, 0x3A3A3A3A },
+ { MT_TX_PWR_CFG_7, 0x3A3A3A3A },
+ { MT_TX_PWR_CFG_8, 0x0000003A },
+ { MT_TX_PWR_CFG_9, 0x0000003A },
+ { 0x150C, 0x00000002 },
+ { 0x1238, 0x001700C8 },
+ { MT_LDO_CTRL_0, 0x00A647B6 },
+ { MT_LDO_CTRL_1, 0x6B006464 },
+ { MT_HT_BASIC_RATE, 0x00004003 },
+ { MT_HT_CTRL_CFG, 0x000001FF },
+ { MT_TXOP_HLDR_ET, 0x00000000 },
+ { MT_PN_PAD_MODE, 0x00000003 },
+ { MT_TX_PROT_CFG6, 0xe3f42004 },
+ { MT_TX_PROT_CFG7, 0xe3f42084 },
+ { MT_TX_PROT_CFG8, 0xe3f42104 },
+ { MT_VHT_HT_FBK_CFG1, 0xedcba980 },
+};
+
+static const struct mt76_reg_pair mt76x0_bbp_init_tab[] = {
+ { MT_BBP(CORE, 1), 0x00000002 },
+ { MT_BBP(CORE, 4), 0x00000000 },
+ { MT_BBP(CORE, 24), 0x00000000 },
+ { MT_BBP(CORE, 32), 0x4003000a },
+ { MT_BBP(CORE, 42), 0x00000000 },
+ { MT_BBP(CORE, 44), 0x00000000 },
+ { MT_BBP(IBI, 11), 0x0FDE8081 },
+ { MT_BBP(AGC, 0), 0x00021400 },
+ { MT_BBP(AGC, 1), 0x00000003 },
+ { MT_BBP(AGC, 2), 0x003A6464 },
+ { MT_BBP(AGC, 15), 0x88A28CB8 },
+ { MT_BBP(AGC, 22), 0x00001E21 },
+ { MT_BBP(AGC, 23), 0x0000272C },
+ { MT_BBP(AGC, 24), 0x00002F3A },
+ { MT_BBP(AGC, 25), 0x8000005A },
+ { MT_BBP(AGC, 26), 0x007C2005 },
+ { MT_BBP(AGC, 33), 0x00003238 },
+ { MT_BBP(AGC, 34), 0x000A0C0C },
+ { MT_BBP(AGC, 37), 0x2121262C },
+ { MT_BBP(AGC, 41), 0x38383E45 },
+ { MT_BBP(AGC, 57), 0x00001010 },
+ { MT_BBP(AGC, 59), 0xBAA20E96 },
+ { MT_BBP(AGC, 63), 0x00000001 },
+ { MT_BBP(TXC, 0), 0x00280403 },
+ { MT_BBP(TXC, 1), 0x00000000 },
+ { MT_BBP(RXC, 1), 0x00000012 },
+ { MT_BBP(RXC, 2), 0x00000011 },
+ { MT_BBP(RXC, 3), 0x00000005 },
+ { MT_BBP(RXC, 4), 0x00000000 },
+ { MT_BBP(RXC, 5), 0xF977C4EC },
+ { MT_BBP(RXC, 7), 0x00000090 },
+ { MT_BBP(TXO, 8), 0x00000000 },
+ { MT_BBP(TXBE, 0), 0x00000000 },
+ { MT_BBP(TXBE, 4), 0x00000004 },
+ { MT_BBP(TXBE, 6), 0x00000000 },
+ { MT_BBP(TXBE, 8), 0x00000014 },
+ { MT_BBP(TXBE, 9), 0x20000000 },
+ { MT_BBP(TXBE, 10), 0x00000000 },
+ { MT_BBP(TXBE, 12), 0x00000000 },
+ { MT_BBP(TXBE, 13), 0x00000000 },
+ { MT_BBP(TXBE, 14), 0x00000000 },
+ { MT_BBP(TXBE, 15), 0x00000000 },
+ { MT_BBP(TXBE, 16), 0x00000000 },
+ { MT_BBP(TXBE, 17), 0x00000000 },
+ { MT_BBP(RXFE, 1), 0x00008800 },
+ { MT_BBP(RXFE, 3), 0x00000000 },
+ { MT_BBP(RXFE, 4), 0x00000000 },
+ { MT_BBP(RXO, 13), 0x00000192 },
+ { MT_BBP(RXO, 14), 0x00060612 },
+ { MT_BBP(RXO, 15), 0xC8321B18 },
+ { MT_BBP(RXO, 16), 0x0000001E },
+ { MT_BBP(RXO, 17), 0x00000000 },
+ { MT_BBP(RXO, 18), 0xCC00A993 },
+ { MT_BBP(RXO, 19), 0xB9CB9CB9 },
+ { MT_BBP(RXO, 20), 0x26c00057 },
+ { MT_BBP(RXO, 21), 0x00000001 },
+ { MT_BBP(RXO, 24), 0x00000006 },
+ { MT_BBP(RXO, 28), 0x0000003F },
+};
+
+static const struct mt76_reg_pair mt76x0_dcoc_tab[] = {
+ { MT_BBP(CAL, 47), 0x000010F0 },
+ { MT_BBP(CAL, 48), 0x00008080 },
+ { MT_BBP(CAL, 49), 0x00000F07 },
+ { MT_BBP(CAL, 50), 0x00000040 },
+ { MT_BBP(CAL, 51), 0x00000404 },
+ { MT_BBP(CAL, 52), 0x00080803 },
+ { MT_BBP(CAL, 53), 0x00000704 },
+ { MT_BBP(CAL, 54), 0x00002828 },
+ { MT_BBP(CAL, 55), 0x00005050 },
+};
+
+#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index f7ec3400e368..dda11c704aba 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -180,6 +180,8 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev);
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+
ret = devm_request_irq(mdev->dev, pdev->irq, mt76x02_irq_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (ret)
@@ -202,7 +204,7 @@ static void mt76x0e_cleanup(struct mt76x02_dev *dev)
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
mt76x0_chip_onoff(dev, false, false);
mt76x0e_stop_hw(dev);
- mt76x02_dma_cleanup(dev);
+ mt76_dma_cleanup(&dev->mt76);
mt76x02_mcu_cleanup(dev);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
index 09f34deb6ba1..3de33aadf794 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c
@@ -734,7 +734,7 @@ mt76x0_phy_get_delta_power(struct mt76x02_dev *dev, u8 tx_mode,
case 1:
if (chan->band == NL80211_BAND_2GHZ)
tssi_target += 29491; /* 3.6 * 8192 */
- /* fall through */
+ fallthrough;
case 0:
break;
default:
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
index 4660b9691ec3..d626817a2103 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
@@ -15,6 +15,8 @@
#include "mt76x02_dfs.h"
#include "mt76x02_dma.h"
+#define MT76x02_TX_RING_SIZE 512
+#define MT76x02_PSD_RING_SIZE 128
#define MT76x02_N_WCIDS 128
#define MT_CALIBRATE_INTERVAL HZ
#define MT_MAC_WORK_INTERVAL (HZ / 10)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
index ff448a1ad4e3..c4fe1c436aaa 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
@@ -7,7 +7,7 @@
#include "mt76x02.h"
static int
-mt76x02_ampdu_stat_read(struct seq_file *file, void *data)
+mt76x02_ampdu_stat_show(struct seq_file *file, void *data)
{
struct mt76x02_dev *dev = file->private;
int i, j;
@@ -31,11 +31,7 @@ mt76x02_ampdu_stat_read(struct seq_file *file, void *data)
return 0;
}
-static int
-mt76x02_ampdu_stat_open(struct inode *inode, struct file *f)
-{
- return single_open(f, mt76x02_ampdu_stat_read, inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(mt76x02_ampdu_stat);
static int read_txpower(struct seq_file *file, void *data)
{
@@ -48,15 +44,8 @@ static int read_txpower(struct seq_file *file, void *data)
return 0;
}
-static const struct file_operations fops_ampdu_stat = {
- .open = mt76x02_ampdu_stat_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
static int
-mt76x02_dfs_stat_read(struct seq_file *file, void *data)
+mt76x02_dfs_stat_show(struct seq_file *file, void *data)
{
struct mt76x02_dev *dev = file->private;
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
@@ -81,18 +70,7 @@ mt76x02_dfs_stat_read(struct seq_file *file, void *data)
return 0;
}
-static int
-mt76x02_dfs_stat_open(struct inode *inode, struct file *f)
-{
- return single_open(f, mt76x02_dfs_stat_read, inode->i_private);
-}
-
-static const struct file_operations fops_dfs_stat = {
- .open = mt76x02_dfs_stat_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(mt76x02_dfs_stat);
static int read_agc(struct seq_file *file, void *data)
{
@@ -150,8 +128,8 @@ void mt76x02_init_debugfs(struct mt76x02_dev *dev)
debugfs_create_bool("tpc", 0600, dir, &dev->enable_tpc);
debugfs_create_file("edcca", 0600, dir, dev, &fops_edcca);
- debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat);
- debugfs_create_file("dfs_stats", 0400, dir, dev, &fops_dfs_stat);
+ debugfs_create_file("ampdu_stat", 0400, dir, dev, &mt76x02_ampdu_stat_fops);
+ debugfs_create_file("dfs_stats", 0400, dir, dev, &mt76x02_dfs_stat_fops);
debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir,
read_txpower);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
index ff6a9e4daac0..b29cd39dc176 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
@@ -429,11 +429,11 @@ static int mt76x02_dfs_create_sequence(struct mt76x02_dev *dev,
{
struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd;
struct mt76x02_dfs_sw_detector_params *sw_params;
- u32 width_delta, with_sum, factor, cur_pri;
+ u32 width_delta, with_sum;
struct mt76x02_dfs_sequence seq, *seq_p;
struct mt76x02_dfs_event_rb *event_rb;
struct mt76x02_dfs_event *cur_event;
- int i, j, end, pri;
+ int i, j, end, pri, factor, cur_pri;
event_rb = event->engine == 2 ? &dfs_pd->event_rb[1]
: &dfs_pd->event_rb[0];
@@ -517,7 +517,7 @@ static u16 mt76x02_dfs_add_event_to_sequence(struct mt76x02_dev *dev,
struct mt76x02_dfs_sw_detector_params *sw_params;
struct mt76x02_dfs_sequence *seq, *tmp_seq;
u16 max_seq_len = 0;
- u32 factor, pri;
+ int factor, pri;
sw_params = &dfs_pd->sw_dpd_params;
list_for_each_entry_safe(seq, tmp_seq, &dfs_pd->sequences, head) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dma.h b/drivers/net/wireless/mediatek/mt76/mt76x02_dma.h
index 4aff4f8e87b6..23b0e7d10d57 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_dma.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dma.h
@@ -61,6 +61,5 @@ mt76x02_wait_for_wpdma(struct mt76_dev *dev, int timeout)
int mt76x02_dma_init(struct mt76x02_dev *dev);
void mt76x02_dma_disable(struct mt76x02_dev *dev);
-void mt76x02_dma_cleanup(struct mt76x02_dev *dev);
#endif /* __MT76x02_DMA_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
index e4e03beabe43..da6d3f51f6d4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -300,7 +300,7 @@ mt76x02_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate,
return 0;
case MT_PHY_TYPE_HT_GF:
txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
- /* fall through */
+ fallthrough;
case MT_PHY_TYPE_HT:
txrate->flags |= IEEE80211_TX_RC_MCS;
txrate->idx = idx;
@@ -349,6 +349,8 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
memset(txwi, 0, sizeof(*txwi));
+ mt76_tx_check_agg_ssn(sta, skb);
+
if (!info->control.hw_key && wcid && wcid->hw_key_idx != 0xff &&
ieee80211_has_protected(hdr->frame_control)) {
wcid = NULL;
@@ -462,7 +464,7 @@ mt76x02_tx_rate_fallback(struct ieee80211_tx_rate *rates, int idx, int phy)
rates[1].idx = 0;
break;
}
- /* fall through */
+ fallthrough;
default:
rates[1].idx = max_t(int, rates[0].idx - 1, 0);
break;
@@ -677,7 +679,7 @@ mt76x02_mac_process_rate(struct mt76x02_dev *dev,
return 0;
case MT_PHY_TYPE_HT_GF:
status->enc_flags |= RX_ENC_FLAG_HT_GF;
- /* fall through */
+ fallthrough;
case MT_PHY_TYPE_HT:
status->encoding = RX_ENC_HT;
status->rate_idx = idx;
@@ -898,8 +900,7 @@ void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq)
}
}
-void mt76x02_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
- struct mt76_queue_entry *e)
+void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
struct mt76x02_txwi *txwi;
@@ -916,7 +917,7 @@ void mt76x02_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
txwi = (struct mt76x02_txwi *)txwi_ptr;
trace_mac_txdone(mdev, txwi->wcid, txwi->pktid);
- mt76_tx_complete_skb(mdev, e->skb);
+ mt76_tx_complete_skb(mdev, e->wcid, e->skb);
}
EXPORT_SYMBOL_GPL(mt76x02_tx_complete_skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
index c70d17b2290c..0cfbaca50210 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
@@ -194,8 +194,7 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta, int len);
void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq);
-void mt76x02_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
- struct mt76_queue_entry *e);
+void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
void mt76x02_update_channel(struct mt76_dev *mdev);
void mt76x02_mac_work(struct work_struct *work);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index bacb1f10a699..cf68731bd094 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -14,7 +14,7 @@
static void mt76x02_pre_tbtt_tasklet(unsigned long arg)
{
struct mt76x02_dev *dev = (struct mt76x02_dev *)arg;
- struct mt76_queue *q = dev->mt76.q_tx[MT_TXQ_PSD].q;
+ struct mt76_queue *q = dev->mt76.q_tx[MT_TXQ_PSD];
struct beacon_bc_data data = {};
struct sk_buff *skb;
int i;
@@ -104,8 +104,7 @@ void mt76x02e_init_beacon_config(struct mt76x02_dev *dev)
EXPORT_SYMBOL_GPL(mt76x02e_init_beacon_config);
static int
-mt76x02_init_tx_queue(struct mt76x02_dev *dev, struct mt76_sw_queue *q,
- int idx, int n_desc)
+mt76x02_init_tx_queue(struct mt76x02_dev *dev, int qid, int idx, int n_desc)
{
struct mt76_queue *hwq;
int err;
@@ -118,8 +117,7 @@ mt76x02_init_tx_queue(struct mt76x02_dev *dev, struct mt76_sw_queue *q,
if (err < 0)
return err;
- INIT_LIST_HEAD(&q->swq);
- q->q = hwq;
+ dev->mt76.q_tx[qid] = hwq;
mt76x02_irq_enable(dev, MT_INT_TX_DONE(idx));
@@ -151,9 +149,11 @@ static void mt76x02_process_tx_status_fifo(struct mt76x02_dev *dev)
mt76x02_send_tx_status(dev, &stat, &update);
}
-static void mt76x02_tx_tasklet(unsigned long data)
+static void mt76x02_tx_worker(struct mt76_worker *w)
{
- struct mt76x02_dev *dev = (struct mt76x02_dev *)data;
+ struct mt76x02_dev *dev;
+
+ dev = container_of(w, struct mt76x02_dev, mt76.tx_worker);
mt76x02_mac_poll_tx_status(dev, false);
mt76x02_process_tx_status_fifo(dev);
@@ -178,7 +178,7 @@ static int mt76x02_poll_tx(struct napi_struct *napi, int budget)
for (i = MT_TXQ_MCU; i >= 0; i--)
mt76_queue_tx_cleanup(dev, i, false);
- tasklet_schedule(&dev->mt76.tx_tasklet);
+ mt76_worker_schedule(&dev->mt76.tx_worker);
return 0;
}
@@ -197,8 +197,7 @@ int mt76x02_dma_init(struct mt76x02_dev *dev)
if (!status_fifo)
return -ENOMEM;
- tasklet_init(&dev->mt76.tx_tasklet, mt76x02_tx_tasklet,
- (unsigned long)dev);
+ dev->mt76.tx_worker.fn = mt76x02_tx_worker;
tasklet_init(&dev->mt76.pre_tbtt_tasklet, mt76x02_pre_tbtt_tasklet,
(unsigned long)dev);
@@ -210,19 +209,18 @@ int mt76x02_dma_init(struct mt76x02_dev *dev)
mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[i],
- mt76_ac_to_hwq(i),
- MT_TX_RING_SIZE);
+ ret = mt76x02_init_tx_queue(dev, i, mt76_ac_to_hwq(i),
+ MT76x02_TX_RING_SIZE);
if (ret)
return ret;
}
- ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_PSD],
- MT_TX_HW_QUEUE_MGMT, MT_TX_RING_SIZE);
+ ret = mt76x02_init_tx_queue(dev, MT_TXQ_PSD,
+ MT_TX_HW_QUEUE_MGMT, MT76x02_PSD_RING_SIZE);
if (ret)
return ret;
- ret = mt76x02_init_tx_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
+ ret = mt76x02_init_tx_queue(dev, MT_TXQ_MCU,
MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE);
if (ret)
return ret;
@@ -263,9 +261,10 @@ EXPORT_SYMBOL_GPL(mt76x02_rx_poll_complete);
irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
{
struct mt76x02_dev *dev = dev_instance;
- u32 intr;
+ u32 intr, mask;
intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+ intr &= dev->mt76.mmio.irqmask;
mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
@@ -273,17 +272,17 @@ irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
- intr &= dev->mt76.mmio.irqmask;
+ mask = intr & (MT_INT_RX_DONE_ALL | MT_INT_GPTIMER);
+ if (intr & (MT_INT_TX_DONE_ALL | MT_INT_TX_STAT))
+ mask |= MT_INT_TX_DONE_ALL;
+
+ mt76x02_irq_disable(dev, mask);
- if (intr & MT_INT_RX_DONE(0)) {
- mt76x02_irq_disable(dev, MT_INT_RX_DONE(0));
+ if (intr & MT_INT_RX_DONE(0))
napi_schedule(&dev->mt76.napi[0]);
- }
- if (intr & MT_INT_RX_DONE(1)) {
- mt76x02_irq_disable(dev, MT_INT_RX_DONE(1));
+ if (intr & MT_INT_RX_DONE(1))
napi_schedule(&dev->mt76.napi[1]);
- }
if (intr & MT_INT_PRE_TBTT)
tasklet_schedule(&dev->mt76.pre_tbtt_tasklet);
@@ -293,21 +292,17 @@ irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
if (dev->mt76.csa_complete)
mt76_csa_finish(&dev->mt76);
else
- mt76_queue_kick(dev, dev->mt76.q_tx[MT_TXQ_PSD].q);
+ mt76_queue_kick(dev, dev->mt76.q_tx[MT_TXQ_PSD]);
}
if (intr & MT_INT_TX_STAT)
mt76x02_mac_poll_tx_status(dev, true);
- if (intr & (MT_INT_TX_STAT | MT_INT_TX_DONE_ALL)) {
- mt76x02_irq_disable(dev, MT_INT_TX_DONE_ALL);
+ if (intr & (MT_INT_TX_STAT | MT_INT_TX_DONE_ALL))
napi_schedule(&dev->mt76.tx_napi);
- }
- if (intr & MT_INT_GPTIMER) {
- mt76x02_irq_disable(dev, MT_INT_GPTIMER);
+ if (intr & MT_INT_GPTIMER)
tasklet_schedule(&dev->dfs_pd.dfs_tasklet);
- }
return IRQ_HANDLED;
}
@@ -329,13 +324,6 @@ static void mt76x02_dma_enable(struct mt76x02_dev *dev)
MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
}
-void mt76x02_dma_cleanup(struct mt76x02_dev *dev)
-{
- tasklet_kill(&dev->mt76.tx_tasklet);
- mt76_dma_cleanup(&dev->mt76);
-}
-EXPORT_SYMBOL_GPL(mt76x02_dma_cleanup);
-
void mt76x02_dma_disable(struct mt76x02_dev *dev)
{
u32 val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
@@ -369,7 +357,7 @@ static bool mt76x02_tx_hang(struct mt76x02_dev *dev)
int i;
for (i = 0; i < 4; i++) {
- q = dev->mt76.q_tx[i].q;
+ q = dev->mt76.q_tx[i];
if (!q->queued)
continue;
@@ -453,7 +441,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
set_bit(MT76_RESET, &dev->mphy.state);
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
- tasklet_disable(&dev->mt76.tx_tasklet);
+ mt76_worker_disable(&dev->mt76.tx_worker);
napi_disable(&dev->mt76.tx_napi);
mt76_for_each_q_rx(&dev->mt76, i) {
@@ -510,7 +498,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
clear_bit(MT76_RESET, &dev->mphy.state);
- tasklet_enable(&dev->mt76.tx_tasklet);
+ mt76_worker_enable(&dev->mt76.tx_worker);
napi_enable(&dev->mt76.tx_napi);
napi_schedule(&dev->mt76.tx_napi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h b/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h
index a57dcc8820aa..b5be884b3549 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb.h
@@ -19,8 +19,7 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info);
-void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
- struct mt76_queue_entry *e);
+void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
void mt76x02u_init_beacon_config(struct mt76x02_dev *dev);
void mt76x02u_exit_beacon_config(struct mt76x02_dev *dev);
#endif /* __MT76x02_USB_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
index 37321e656776..2c2f56112b57 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
@@ -15,11 +15,10 @@ static void mt76x02u_remove_dma_hdr(struct sk_buff *skb)
mt76x02_remove_hdr_pad(skb, 2);
}
-void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
- struct mt76_queue_entry *e)
+void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
{
mt76x02u_remove_dma_hdr(e->skb);
- mt76_tx_complete_skb(mdev, e->skb);
+ mt76_tx_complete_skb(mdev, e->wcid, e->skb);
}
EXPORT_SYMBOL_GPL(mt76x02u_tx_complete_skb);
@@ -46,7 +45,7 @@ EXPORT_SYMBOL_GPL(mt76x02u_mac_start);
int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
{
- u32 info;
+ u32 info, pad;
/* Buffer layout:
* | 4B | xfer len | pad | 4B |
@@ -58,7 +57,8 @@ int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags)
FIELD_PREP(MT_TXD_INFO_DPORT, port) | flags;
put_unaligned_le32(info, skb_push(skb, sizeof(info)));
- return mt76_skb_adjust_pad(skb);
+ pad = round_up(skb->len, 4) + 4 - skb->len;
+ return mt76_skb_adjust_pad(skb, pad);
}
int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
@@ -67,7 +67,7 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
struct mt76_tx_info *tx_info)
{
struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76);
- int pid, len = tx_info->skb->len, ep = q2ep(mdev->q_tx[qid].q->hw_idx);
+ int pid, len = tx_info->skb->len, ep = q2ep(mdev->q_tx[qid]->hw_idx);
struct mt76x02_txwi *txwi;
bool ampdu = IEEE80211_SKB_CB(tx_info->skb)->flags & IEEE80211_TX_CTL_AMPDU;
enum mt76_qsel qsel;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index dbd4077ea283..11b769af2f8f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -294,8 +294,6 @@ mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
mvif->group_wcid.hw_key_idx = -1;
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
mtxq->wcid = &mvif->group_wcid;
-
- mt76_txq_init(&dev->mt76, vif->txq);
}
int
@@ -347,7 +345,6 @@ void mt76x02_remove_interface(struct ieee80211_hw *hw,
struct mt76x02_dev *dev = hw->priv;
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
- mt76_txq_remove(&dev->mt76, vif->txq);
dev->mphy.vif_mask &= ~BIT(mvif->idx);
}
EXPORT_SYMBOL_GPL(mt76x02_remove_interface);
@@ -490,7 +487,7 @@ int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u8 cw_min = 5, cw_max = 10, qid;
u32 val;
- qid = dev->mt76.q_tx[queue].q->hw_idx;
+ qid = dev->mt76.q_tx[queue]->hw_idx;
if (params->cw_min)
cw_min = fls(params->cw_min);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
index 6dfb0df8ec8a..4d50dad29ddf 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
@@ -63,6 +63,8 @@ mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev);
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+
ret = devm_request_irq(mdev->dev, pdev->irq, mt76x02_irq_handler,
IRQF_SHARED, KBUILD_MODNAME, dev);
if (ret)
@@ -111,7 +113,7 @@ mt76x2e_suspend(struct pci_dev *pdev, pm_message_t state)
napi_disable(&mdev->tx_napi);
tasklet_kill(&mdev->pre_tbtt_tasklet);
- tasklet_kill(&mdev->tx_tasklet);
+ mt76_worker_disable(&mdev->tx_worker);
mt76_for_each_q_rx(mdev, i)
napi_disable(&mdev->napi[i]);
@@ -145,6 +147,7 @@ mt76x2e_resume(struct pci_dev *pdev)
pci_restore_state(pdev);
+ mt76_worker_enable(&mdev->tx_worker);
mt76_for_each_q_rx(mdev, i) {
napi_enable(&mdev->napi[i]);
napi_schedule(&mdev->napi[i]);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
index 101a0fe00ef3..48a3ebc9892a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
@@ -283,7 +283,7 @@ void mt76x2_cleanup(struct mt76x02_dev *dev)
tasklet_disable(&dev->dfs_pd.dfs_tasklet);
tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
mt76x2_stop_hardware(dev);
- mt76x02_dma_cleanup(dev);
+ mt76_dma_cleanup(&dev->mt76);
mt76x02_mcu_cleanup(dev);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
index 38f473d587c9..1049927faf24 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
@@ -21,7 +21,6 @@ static int mt7915_ser_trigger_set(void *data, u64 val)
switch (val) {
case SER_SET_RECOVER_L1:
case SER_SET_RECOVER_L2:
- /* fall through */
ret = mt7915_mcu_set_ser(dev, SER_ENABLE, BIT(val), 0);
if (ret)
return ret;
@@ -292,15 +291,15 @@ mt7915_queues_read(struct seq_file *s, void *data)
int i;
for (i = 0; i < ARRAY_SIZE(queue_map); i++) {
- struct mt76_sw_queue *q = &dev->mt76.q_tx[queue_map[i].id];
+ struct mt76_queue *q = dev->mt76.q_tx[queue_map[i].id];
- if (!q->q)
+ if (!q)
continue;
seq_printf(s,
"%s: queued=%d head=%d tail=%d\n",
- queue_map[i].queue, q->q->queued, q->q->head,
- q->q->tail);
+ queue_map[i].queue, q->queued, q->head,
+ q->tail);
}
return 0;
@@ -400,7 +399,7 @@ static int mt7915_sta_fixed_rate_set(void *data, u64 rate)
struct ieee80211_sta *sta = data;
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
- return mt7915_mcu_set_fixed_rate(msta->vif->dev, sta, rate);
+ return mt7915_mcu_set_fixed_rate(msta->vif->phy->dev, sta, rate);
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_fixed_rate, NULL,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
index a8832c5e6004..cfa12c4c671f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
@@ -8,7 +8,6 @@
static int
mt7915_init_tx_queues(struct mt7915_dev *dev, int n_desc)
{
- struct mt76_sw_queue *q;
struct mt76_queue *hwq;
int err, i;
@@ -21,18 +20,14 @@ mt7915_init_tx_queues(struct mt7915_dev *dev, int n_desc)
if (err < 0)
return err;
- for (i = 0; i < MT_TXQ_MCU; i++) {
- q = &dev->mt76.q_tx[i];
- INIT_LIST_HEAD(&q->swq);
- q->q = hwq;
- }
+ for (i = 0; i < MT_TXQ_MCU; i++)
+ dev->mt76.q_tx[i] = hwq;
return 0;
}
static int
-mt7915_init_mcu_queue(struct mt7915_dev *dev, struct mt76_sw_queue *q,
- int idx, int n_desc)
+mt7915_init_mcu_queue(struct mt7915_dev *dev, int qid, int idx, int n_desc)
{
struct mt76_queue *hwq;
int err;
@@ -45,8 +40,7 @@ mt7915_init_mcu_queue(struct mt7915_dev *dev, struct mt76_sw_queue *q,
if (err < 0)
return err;
- INIT_LIST_HEAD(&q->swq);
- q->q = hwq;
+ dev->mt76.q_tx[qid] = hwq;
return 0;
}
@@ -72,7 +66,7 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
mt76_rx(&dev->mt76, q, skb);
return;
}
- /* fall through */
+ fallthrough;
default:
dev_kfree_skb(skb);
break;
@@ -84,8 +78,6 @@ mt7915_tx_cleanup(struct mt7915_dev *dev)
{
mt76_queue_tx_cleanup(dev, MT_TXQ_MCU, false);
mt76_queue_tx_cleanup(dev, MT_TXQ_MCU_WA, false);
- mt76_queue_tx_cleanup(dev, MT_TXQ_PSD, false);
- mt76_queue_tx_cleanup(dev, MT_TXQ_BE, false);
}
static int mt7915_poll_tx(struct napi_struct *napi, int budget)
@@ -97,13 +89,7 @@ static int mt7915_poll_tx(struct napi_struct *napi, int budget)
mt7915_tx_cleanup(dev);
if (napi_complete_done(napi, 0))
- mt7915_irq_enable(dev, MT_INT_TX_DONE_ALL);
-
- mt7915_tx_cleanup(dev);
-
- mt7915_mac_sta_poll(dev);
-
- tasklet_schedule(&dev->mt76.tx_tasklet);
+ mt7915_irq_enable(dev, MT_INT_TX_DONE_MCU);
return 0;
}
@@ -138,12 +124,120 @@ void mt7915_dma_prefetch(struct mt7915_dev *dev)
mt76_wr(dev, MT_WFDMA1_RX_RING3_EXT_CTRL, PREFETCH(0x480, 0x0));
}
+static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr)
+{
+ static const struct {
+ u32 phys;
+ u32 mapped;
+ u32 size;
+ } fixed_map[] = {
+ { 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
+ { 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
+ { 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
+ { 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
+ { 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
+ { 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
+ { 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
+ { 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
+ { 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
+ { 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
+ { 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */
+ { 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
+ { 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
+ { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
+ { 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
+ { 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
+ { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
+ { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
+ { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
+ { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
+ { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
+ { 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
+ { 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
+ { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
+ { 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
+ { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
+ { 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
+ { 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
+ { 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
+ { 0x820f3000, 0xa0c00, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_ARB) */
+ { 0x820f4000, 0xa1000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_TMAC) */
+ { 0x820f5000, 0xa1400, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_RMAC) */
+ { 0x820f7000, 0xa1e00, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_DMA) */
+ { 0x820f9000, 0xa3400, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_WTBLOFF) */
+ { 0x820fa000, 0xa4000, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_ETBF) */
+ { 0x820fb000, 0xa4200, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_LPON) */
+ { 0x820fc000, 0xa4600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_INT) */
+ { 0x820fd000, 0xa4800, 0x0800 }, /* WF_LMAC_TOP BN1 (WF_MIB) */
+ };
+ int i;
+
+ if (addr < 0x100000)
+ return addr;
+
+ for (i = 0; i < ARRAY_SIZE(fixed_map); i++) {
+ u32 ofs;
+
+ if (addr < fixed_map[i].phys)
+ continue;
+
+ ofs = addr - fixed_map[i].phys;
+ if (ofs > fixed_map[i].size)
+ continue;
+
+ return fixed_map[i].mapped + ofs;
+ }
+
+ if ((addr >= 0x18000000 && addr < 0x18c00000) ||
+ (addr >= 0x70000000 && addr < 0x78000000) ||
+ (addr >= 0x7c000000 && addr < 0x7c400000))
+ return mt7915_reg_map_l1(dev, addr);
+
+ return mt7915_reg_map_l2(dev, addr);
+}
+
+static u32 mt7915_rr(struct mt76_dev *mdev, u32 offset)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ u32 addr = __mt7915_reg_addr(dev, offset);
+
+ return dev->bus_ops->rr(mdev, addr);
+}
+
+static void mt7915_wr(struct mt76_dev *mdev, u32 offset, u32 val)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ u32 addr = __mt7915_reg_addr(dev, offset);
+
+ dev->bus_ops->wr(mdev, addr, val);
+}
+
+static u32 mt7915_rmw(struct mt76_dev *mdev, u32 offset, u32 mask, u32 val)
+{
+ struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
+ u32 addr = __mt7915_reg_addr(dev, offset);
+
+ return dev->bus_ops->rmw(mdev, addr, mask, val);
+}
+
int mt7915_dma_init(struct mt7915_dev *dev)
{
/* Increase buffer size to receive large VHT/HE MPDUs */
+ struct mt76_bus_ops *bus_ops;
int rx_buf_size = MT_RX_BUF_SIZE * 2;
int ret;
+ dev->bus_ops = dev->mt76.bus;
+ bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
+ GFP_KERNEL);
+ if (!bus_ops)
+ return -ENOMEM;
+
+ bus_ops->rr = mt7915_rr;
+ bus_ops->wr = mt7915_wr;
+ bus_ops->rmw = mt7915_rmw;
+ dev->mt76.bus = bus_ops;
+
mt76_dma_attach(&dev->mt76);
/* configure global setting */
@@ -168,22 +262,19 @@ int mt7915_dma_init(struct mt7915_dev *dev)
return ret;
/* command to WM */
- ret = mt7915_init_mcu_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU],
- MT7915_TXQ_MCU_WM,
+ ret = mt7915_init_mcu_queue(dev, MT_TXQ_MCU, MT7915_TXQ_MCU_WM,
MT7915_TX_MCU_RING_SIZE);
if (ret)
return ret;
/* command to WA */
- ret = mt7915_init_mcu_queue(dev, &dev->mt76.q_tx[MT_TXQ_MCU_WA],
- MT7915_TXQ_MCU_WA,
+ ret = mt7915_init_mcu_queue(dev, MT_TXQ_MCU_WA, MT7915_TXQ_MCU_WA,
MT7915_TX_MCU_RING_SIZE);
if (ret)
return ret;
/* firmware download */
- ret = mt7915_init_mcu_queue(dev, &dev->mt76.q_tx[MT_TXQ_FWDL],
- MT7915_TXQ_FWDL,
+ ret = mt7915_init_mcu_queue(dev, MT_TXQ_FWDL, MT7915_TXQ_FWDL,
MT7915_TX_FWDL_RING_SIZE);
if (ret)
return ret;
@@ -248,7 +339,7 @@ int mt7915_dma_init(struct mt7915_dev *dev)
MT_WFDMA1_GLO_CFG_TX_DMA_EN | MT_WFDMA1_GLO_CFG_RX_DMA_EN);
/* enable interrupts for TX/RX rings */
- mt7915_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
+ mt7915_irq_enable(dev, MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_MCU |
MT_INT_MCU_CMD);
return 0;
@@ -281,6 +372,5 @@ void mt7915_dma_cleanup(struct mt7915_dev *dev)
MT_WFDMA0_RST_DMASHDL_ALL_RST |
MT_WFDMA0_RST_LOGIC_RST);
- tasklet_kill(&dev->mt76.tx_tasklet);
mt76_dma_cleanup(&dev->mt76);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
index 8d6ceb3b67b4..0232b66acb4f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -135,6 +135,12 @@ static int mt7915_init_hardware(struct mt7915_dev *dev)
set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
+ /*
+ * force firmware operation mode into normal state,
+ * which should be set before firmware download stage.
+ */
+ mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE);
+
ret = mt7915_mcu_init(dev);
if (ret)
return ret;
@@ -612,6 +618,7 @@ int mt7915_register_ext_phy(struct mt7915_dev *dev)
mphy->antenna_mask = BIT(hweight8(phy->chainmask)) - 1;
mt7915_init_wiphy(mphy->hw);
+ INIT_LIST_HEAD(&phy->stats_list);
INIT_DELAYED_WORK(&phy->mac_work, mt7915_mac_work);
/*
@@ -652,7 +659,10 @@ int mt7915_register_device(struct mt7915_dev *dev)
dev->phy.dev = dev;
dev->phy.mt76 = &dev->mt76.phy;
dev->mt76.phy.priv = &dev->phy;
+ INIT_LIST_HEAD(&dev->phy.stats_list);
+ INIT_WORK(&dev->rc_work, mt7915_mac_sta_rc_work);
INIT_DELAYED_WORK(&dev->phy.mac_work, mt7915_mac_work);
+ INIT_LIST_HEAD(&dev->sta_rc_list);
INIT_LIST_HEAD(&dev->sta_poll_list);
spin_lock_init(&dev->sta_poll_lock);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index 036207f828f3..6f159d99a596 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -88,17 +88,16 @@ bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask)
0, 5000);
}
-static u32 mt7915_mac_wtbl_lmac_read(struct mt7915_dev *dev, u16 wcid,
- u16 addr)
+static u32 mt7915_mac_wtbl_lmac_addr(struct mt7915_dev *dev, u16 wcid)
{
mt76_wr(dev, MT_WTBLON_TOP_WDUCR,
FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7)));
- return mt76_rr(dev, MT_WTBL_LMAC_OFFS(wcid, addr));
+ return MT_WTBL_LMAC_OFFS(wcid, 0);
}
/* TODO: use txfree airtime info to avoid runtime accessing in the long run */
-void mt7915_mac_sta_poll(struct mt7915_dev *dev)
+static void mt7915_mac_sta_poll(struct mt7915_dev *dev)
{
static const u8 ac_to_tid[] = {
[IEEE80211_AC_BE] = 0,
@@ -106,47 +105,50 @@ void mt7915_mac_sta_poll(struct mt7915_dev *dev)
[IEEE80211_AC_VI] = 4,
[IEEE80211_AC_VO] = 6
};
- static const u8 hw_queue_map[] = {
- [IEEE80211_AC_BK] = 0,
- [IEEE80211_AC_BE] = 1,
- [IEEE80211_AC_VI] = 2,
- [IEEE80211_AC_VO] = 3,
- };
struct ieee80211_sta *sta;
struct mt7915_sta *msta;
u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
+ LIST_HEAD(sta_poll_list);
int i;
+ spin_lock_bh(&dev->sta_poll_lock);
+ list_splice_init(&dev->sta_poll_list, &sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+
rcu_read_lock();
while (true) {
bool clear = false;
+ u32 addr;
u16 idx;
spin_lock_bh(&dev->sta_poll_lock);
- if (list_empty(&dev->sta_poll_list)) {
+ if (list_empty(&sta_poll_list)) {
spin_unlock_bh(&dev->sta_poll_lock);
break;
}
- msta = list_first_entry(&dev->sta_poll_list,
+ msta = list_first_entry(&sta_poll_list,
struct mt7915_sta, poll_list);
list_del_init(&msta->poll_list);
spin_unlock_bh(&dev->sta_poll_lock);
- for (i = 0, idx = msta->wcid.idx; i < IEEE80211_NUM_ACS; i++) {
+ idx = msta->wcid.idx;
+ addr = mt7915_mac_wtbl_lmac_addr(dev, idx) + 20 * 4;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
u32 tx_last = msta->airtime_ac[i];
- u32 rx_last = msta->airtime_ac[i + IEEE80211_NUM_ACS];
+ u32 rx_last = msta->airtime_ac[i + 4];
+
+ msta->airtime_ac[i] = mt76_rr(dev, addr);
+ msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
- msta->airtime_ac[i] =
- mt7915_mac_wtbl_lmac_read(dev, idx, 20 + i);
- msta->airtime_ac[i + IEEE80211_NUM_ACS] =
- mt7915_mac_wtbl_lmac_read(dev, idx, 21 + i);
tx_time[i] = msta->airtime_ac[i] - tx_last;
- rx_time[i] = msta->airtime_ac[i + IEEE80211_NUM_ACS] -
- rx_last;
+ rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
if ((tx_last | rx_last) & BIT(30))
clear = true;
+
+ addr += 8;
}
if (clear) {
@@ -161,8 +163,9 @@ void mt7915_mac_sta_poll(struct mt7915_dev *dev)
sta = container_of((void *)msta, struct ieee80211_sta,
drv_priv);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- u32 tx_cur = tx_time[i];
- u32 rx_cur = rx_time[hw_queue_map[i]];
+ u8 q = mt7915_lmac_mapping(dev, i);
+ u32 tx_cur = tx_time[q];
+ u32 rx_cur = rx_time[q];
u8 tid = ac_to_tid[i];
if (!tx_cur && !rx_cur)
@@ -468,7 +471,7 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
switch (mode) {
case MT_PHY_TYPE_CCK:
cck = true;
- /* fall through */
+ fallthrough;
case MT_PHY_TYPE_OFDM:
i = mt76_get_rate(&dev->mt76, sband, i, cck);
break;
@@ -487,7 +490,7 @@ int mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
break;
case MT_PHY_TYPE_HE_MU:
status->flag |= RX_FLAG_RADIOTAP_HE_MU;
- /* fall through */
+ fallthrough;
case MT_PHY_TYPE_HE_SU:
case MT_PHY_TYPE_HE_EXT_SU:
case MT_PHY_TYPE_HE_TB:
@@ -565,13 +568,15 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
bool multicast = is_multicast_ether_addr(hdr->addr1);
struct ieee80211_vif *vif = info->control.vif;
struct mt76_phy *mphy = &dev->mphy;
bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY;
u8 fc_type, fc_stype, p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
__le16 fc = hdr->frame_control;
- u16 tx_count = 4, seqno = 0;
+ u16 tx_count = 15, seqno = 0;
+ u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
u32 val;
if (vif) {
@@ -587,6 +592,10 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
+ txwi[4] = 0;
+ txwi[5] = 0;
+ txwi[6] = 0;
+
if (beacon) {
p_fmt = MT_TX_TYPE_FW;
q_idx = MT_LMAC_BCN0;
@@ -599,6 +608,20 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
mt7915_lmac_mapping(dev, skb_get_queue_mapping(skb));
}
+ if (ieee80211_is_action(fc) &&
+ mgmt->u.action.category == WLAN_CATEGORY_BACK &&
+ mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) {
+ u16 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
+
+ txwi[5] |= cpu_to_le32(MT_TXD5_ADD_BA);
+ tid = (capab >> 2) & IEEE80211_QOS_CTL_TID_MASK;
+ } else if (ieee80211_is_back_req(hdr->frame_control)) {
+ struct ieee80211_bar *bar = (struct ieee80211_bar *)hdr;
+ u16 control = le16_to_cpu(bar->control);
+
+ tid = FIELD_GET(IEEE80211_BAR_CTRL_TID_INFO_MASK, control);
+ }
+
val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) |
FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
@@ -609,8 +632,7 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
FIELD_PREP(MT_TXD1_HDR_FORMAT, MT_HDR_FORMAT_802_11) |
FIELD_PREP(MT_TXD1_HDR_INFO,
ieee80211_get_hdrlen_from_skb(skb) / 2) |
- FIELD_PREP(MT_TXD1_TID,
- skb->priority & IEEE80211_QOS_CTL_TID_MASK) |
+ FIELD_PREP(MT_TXD1_TID, tid) |
FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
if (ext_phy && q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0)
@@ -634,10 +656,6 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
}
txwi[2] = cpu_to_le32(val);
- txwi[4] = 0;
- txwi[5] = 0;
- txwi[6] = 0;
-
if (!ieee80211_is_data(fc) || multicast) {
u16 rate;
@@ -665,20 +683,24 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype);
+ if (wcid->amsdu)
+ val |= MT_TXD7_HW_AMSDU;
txwi[7] = cpu_to_le32(val);
val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count);
- if (ieee80211_is_data_qos(fc)) {
- seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
- val |= MT_TXD3_SN_VALID;
- } else if (ieee80211_is_back_req(fc)) {
- struct ieee80211_bar *bar;
-
- bar = (struct ieee80211_bar *)skb->data;
- seqno = IEEE80211_SEQ_TO_SN(le16_to_cpu(bar->start_seq_num));
- val |= MT_TXD3_SN_VALID;
+ if (info->flags & IEEE80211_TX_CTL_INJECTED) {
+ seqno = le16_to_cpu(hdr->seq_ctrl);
+
+ if (ieee80211_is_back_req(hdr->frame_control)) {
+ struct ieee80211_bar *bar;
+
+ bar = (struct ieee80211_bar *)skb->data;
+ seqno = le16_to_cpu(bar->start_seq_num);
+ }
+
+ val |= MT_TXD3_SN_VALID |
+ FIELD_PREP(MT_TXD3_SEQ, IEEE80211_SEQ_TO_SN(seqno));
}
- val |= FIELD_PREP(MT_TXD3_SEQ, seqno);
txwi[3] |= cpu_to_le32(val);
}
@@ -715,6 +737,7 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
/* pass partial skb header to fw */
tx_info->buf[1].len = MT_CT_PARSE_LEN;
+ tx_info->buf[1].skip_unmap = true;
tx_info->nbuf = MT_CT_DMA_BUF_NUM;
txp->flags = cpu_to_le16(MT_CT_INFO_APPLY_TXD);
@@ -747,45 +770,29 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
return 0;
}
-static inline bool
-mt7915_tx_check_aggr_tid(struct mt7915_sta *msta, u8 tid)
-{
- bool ret = false;
-
- spin_lock_bh(&msta->ampdu_lock);
- if (msta->ampdu_state[tid] == MT7915_AGGR_STOP)
- ret = true;
- spin_unlock_bh(&msta->ampdu_lock);
-
- return ret;
-}
-
static void
-mt7915_tx_check_aggr(struct ieee80211_sta *sta, struct sk_buff *skb)
+mt7915_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct mt7915_sta *msta;
- u16 tid;
-
- if (!sta->ht_cap.ht_supported)
- return;
+ u16 fc, tid;
+ u32 val;
- if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO)
+ if (!sta || !sta->ht_cap.ht_supported)
return;
- if (unlikely(!ieee80211_is_data_qos(hdr->frame_control)))
+ tid = FIELD_GET(MT_TXD1_TID, le32_to_cpu(txwi[1]));
+ if (tid >= 6) /* skip VO queue */
return;
- if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))
+ val = le32_to_cpu(txwi[2]);
+ fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 |
+ FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4;
+ if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)))
return;
msta = (struct mt7915_sta *)sta->drv_priv;
- tid = ieee80211_get_tid(hdr);
-
- if (mt7915_tx_check_aggr_tid(msta, tid)) {
+ if (!test_and_set_bit(tid, &msta->ampdu_state))
ieee80211_start_tx_ba_session(sta, tid, 0);
- mt7915_set_aggr_state(msta, tid, MT7915_AGGR_PROGRESS);
- }
}
static inline void
@@ -822,8 +829,6 @@ mt7915_tx_complete_status(struct mt76_dev *mdev, struct sk_buff *skb,
if (info->flags & IEEE80211_TX_CTL_AMPDU)
info->flags |= IEEE80211_TX_STAT_AMPDU;
- else if (sta)
- mt7915_tx_check_aggr(sta, skb);
if (stat)
ieee80211_tx_info_clear_status(info);
@@ -864,6 +869,10 @@ void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
struct ieee80211_sta *sta = NULL;
u8 i, count;
+ /* clean DMA queues and unmap buffers first */
+ mt76_queue_tx_cleanup(dev, MT_TXQ_PSD, false);
+ mt76_queue_tx_cleanup(dev, MT_TXQ_BE, false);
+
/*
* TODO: MT_TX_FREE_LATENCY is msdu time from the TXD is queued into PLE,
* to the time ack is received or dropped by hw (air + hw queue time).
@@ -880,6 +889,7 @@ void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
*/
if (info & MT_TX_FREE_PAIR) {
struct mt7915_sta *msta;
+ struct mt7915_phy *phy;
struct mt76_wcid *wcid;
u16 idx;
@@ -891,8 +901,13 @@ void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
continue;
msta = container_of(wcid, struct mt7915_sta, wcid);
- ieee80211_queue_work(mt76_hw(dev), &msta->stats_work);
- continue;
+ phy = msta->vif->phy;
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (list_empty(&msta->stats_list))
+ list_add_tail(&msta->stats_list, &phy->stats_list);
+ if (list_empty(&msta->poll_list))
+ list_add_tail(&msta->poll_list, &dev->sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
}
msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
@@ -907,6 +922,21 @@ void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
mt7915_txp_skb_unmap(mdev, txwi);
if (txwi->skb) {
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txwi->skb);
+ void *txwi_ptr = mt76_get_txwi_ptr(mdev, txwi);
+
+ if (likely(txwi->skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ mt7915_tx_check_aggr(sta, txwi_ptr);
+
+ if (sta && !info->tx_time_est) {
+ struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
+ int pending;
+
+ pending = atomic_dec_return(&wcid->non_aql_packets);
+ if (pending < 0)
+ atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
+ }
+
mt7915_tx_complete_status(mdev, txwi->skb, sta, stat);
txwi->skb = NULL;
}
@@ -914,10 +944,12 @@ void mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
mt76_put_txwi(mdev, txwi);
}
dev_kfree_skb(skb);
+
+ mt7915_mac_sta_poll(dev);
+ mt76_worker_schedule(&dev->mt76.tx_worker);
}
-void mt7915_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
- struct mt76_queue_entry *e)
+void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
{
struct mt7915_dev *dev;
@@ -1186,7 +1218,7 @@ void mt7915_mac_reset_work(struct work_struct *work)
if (ext_phy)
mt76_txq_schedule_all(ext_phy);
- tasklet_disable(&dev->mt76.tx_tasklet);
+ mt76_worker_disable(&dev->mt76.tx_worker);
napi_disable(&dev->mt76.napi[0]);
napi_disable(&dev->mt76.napi[1]);
napi_disable(&dev->mt76.napi[2]);
@@ -1206,7 +1238,7 @@ void mt7915_mac_reset_work(struct work_struct *work)
clear_bit(MT76_MCU_RESET, &dev->mphy.state);
clear_bit(MT76_RESET, &dev->mphy.state);
- tasklet_enable(&dev->mt76.tx_tasklet);
+ mt76_worker_enable(&dev->mt76.tx_worker);
napi_enable(&dev->mt76.tx_napi);
napi_schedule(&dev->mt76.tx_napi);
@@ -1281,39 +1313,63 @@ mt7915_mac_update_mib_stats(struct mt7915_phy *phy)
}
}
-void mt7915_mac_sta_stats_work(struct work_struct *work)
+static void
+mt7915_mac_sta_stats_work(struct mt7915_phy *phy)
{
+ struct mt7915_dev *dev = phy->dev;
+ struct mt7915_sta *msta;
+ LIST_HEAD(list);
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ list_splice_init(&phy->stats_list, &list);
+
+ while (!list_empty(&list)) {
+ msta = list_first_entry(&list, struct mt7915_sta, stats_list);
+ list_del_init(&msta->stats_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+
+ /* use MT_TX_FREE_RATE to report Tx rate for further devices */
+ mt7915_mcu_get_rate_info(dev, RATE_CTRL_RU_INFO, msta->wcid.idx);
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ }
+
+ spin_unlock_bh(&dev->sta_poll_lock);
+}
+
+void mt7915_mac_sta_rc_work(struct work_struct *work)
+{
+ struct mt7915_dev *dev = container_of(work, struct mt7915_dev, rc_work);
struct ieee80211_sta *sta;
struct ieee80211_vif *vif;
- struct mt7915_sta_stats *stats;
struct mt7915_sta *msta;
- struct mt7915_dev *dev;
+ u32 changed;
+ LIST_HEAD(list);
- msta = container_of(work, struct mt7915_sta, stats_work);
- sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
- vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
- dev = msta->vif->dev;
- stats = &msta->stats;
+ spin_lock_bh(&dev->sta_poll_lock);
+ list_splice_init(&dev->sta_rc_list, &list);
- /* use MT_TX_FREE_RATE to report Tx rate for further devices */
- if (time_after(jiffies, stats->jiffies + HZ)) {
- mt7915_mcu_get_rate_info(dev, RATE_CTRL_RU_INFO,
- msta->wcid.idx);
+ while (!list_empty(&list)) {
+ msta = list_first_entry(&list, struct mt7915_sta, rc_list);
+ list_del_init(&msta->rc_list);
+ changed = msta->stats.changed;
+ msta->stats.changed = 0;
+ spin_unlock_bh(&dev->sta_poll_lock);
- stats->jiffies = jiffies;
- }
+ sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
+ vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
- if (test_and_clear_bit(IEEE80211_RC_SUPP_RATES_CHANGED |
+ if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED |
IEEE80211_RC_NSS_CHANGED |
- IEEE80211_RC_BW_CHANGED, &stats->changed))
- mt7915_mcu_add_rate_ctrl(dev, vif, sta);
+ IEEE80211_RC_BW_CHANGED))
+ mt7915_mcu_add_rate_ctrl(dev, vif, sta);
- if (test_and_clear_bit(IEEE80211_RC_SMPS_CHANGED, &stats->changed))
- mt7915_mcu_add_smps(dev, vif, sta);
+ if (changed & IEEE80211_RC_SMPS_CHANGED)
+ mt7915_mcu_add_smps(dev, vif, sta);
+
+ spin_lock_bh(&dev->sta_poll_lock);
+ }
- spin_lock_bh(&dev->sta_poll_lock);
- if (list_empty(&msta->poll_list))
- list_add_tail(&msta->poll_list, &dev->sta_poll_list);
spin_unlock_bh(&dev->sta_poll_lock);
}
@@ -1335,6 +1391,11 @@ void mt7915_mac_work(struct work_struct *work)
mt7915_mac_update_mib_stats(phy);
}
+ if (++phy->sta_work_count == 10) {
+ phy->sta_work_count = 0;
+ mt7915_mac_sta_stats_work(phy);
+ };
+
mutex_unlock(&mdev->mutex);
ieee80211_queue_delayed_work(phy->mt76->hw, &phy->mac_work,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
index f95a0b55c4a2..c48158392057 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
@@ -137,7 +137,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
goto out;
}
mvif->omac_idx = idx;
- mvif->dev = dev;
+ mvif->phy = phy;
mvif->band_idx = ext_phy;
if (ext_phy)
@@ -155,6 +155,8 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
idx = MT7915_WTBL_RESERVED - mvif->idx;
+ INIT_LIST_HEAD(&mvif->sta.rc_list);
+ INIT_LIST_HEAD(&mvif->sta.stats_list);
INIT_LIST_HEAD(&mvif->sta.poll_list);
mvif->sta.wcid.idx = idx;
mvif->sta.wcid.ext_phy = mvif->band_idx;
@@ -167,7 +169,6 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
if (vif->txq) {
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
mtxq->wcid = &mvif->sta.wcid;
- mt76_txq_init(&dev->mt76, vif->txq);
}
out:
@@ -190,8 +191,6 @@ static void mt7915_remove_interface(struct ieee80211_hw *hw,
mt7915_mcu_add_dev_info(dev, vif, false);
rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
- if (vif->txq)
- mt76_txq_remove(&dev->mt76, vif->txq);
mutex_lock(&dev->mt76.mutex);
phy->mt76->vif_mask &= ~BIT(mvif->idx);
@@ -493,9 +492,9 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
if (idx < 0)
return -ENOSPC;
+ INIT_LIST_HEAD(&msta->rc_list);
+ INIT_LIST_HEAD(&msta->stats_list);
INIT_LIST_HEAD(&msta->poll_list);
- INIT_WORK(&msta->stats_work, mt7915_mac_sta_stats_work);
- spin_lock_init(&msta->ampdu_lock);
msta->vif = mvif;
msta->wcid.sta = 1;
msta->wcid.idx = idx;
@@ -528,6 +527,10 @@ void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
spin_lock_bh(&dev->sta_poll_lock);
if (!list_empty(&msta->poll_list))
list_del_init(&msta->poll_list);
+ if (!list_empty(&msta->stats_list))
+ list_del_init(&msta->stats_list);
+ if (!list_empty(&msta->rc_list))
+ list_del_init(&msta->rc_list);
spin_unlock_bh(&dev->sta_poll_lock);
}
@@ -603,23 +606,21 @@ mt7915_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
case IEEE80211_AMPDU_TX_OPERATIONAL:
mtxq->aggr = true;
mtxq->send_bar = false;
- mt7915_set_aggr_state(msta, tid, MT7915_AGGR_OPERATIONAL);
mt7915_mcu_add_tx_ba(dev, params, true);
break;
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
mtxq->aggr = false;
- mt7915_set_aggr_state(msta, tid, MT7915_AGGR_STOP);
+ clear_bit(tid, &msta->ampdu_state);
mt7915_mcu_add_tx_ba(dev, params, false);
break;
case IEEE80211_AMPDU_TX_START:
- mtxq->agg_ssn = IEEE80211_SN_TO_SEQ(ssn);
- mt7915_set_aggr_state(msta, tid, MT7915_AGGR_START);
+ set_bit(tid, &msta->ampdu_state);
ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
mtxq->aggr = false;
- mt7915_set_aggr_state(msta, tid, MT7915_AGGR_STOP);
+ clear_bit(tid, &msta->ampdu_state);
mt7915_mcu_add_tx_ba(dev, params, false);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
@@ -789,18 +790,16 @@ mt7915_sta_rc_update(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
u32 changed)
{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
- rcu_read_lock();
- sta = ieee80211_find_sta(vif, sta->addr);
- if (!sta) {
- rcu_read_unlock();
- return;
- }
- rcu_read_unlock();
+ spin_lock_bh(&dev->sta_poll_lock);
+ msta->stats.changed |= changed;
+ if (list_empty(&msta->rc_list))
+ list_add_tail(&msta->rc_list, &dev->sta_rc_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
- set_bit(changed, &msta->stats.changed);
- ieee80211_queue_work(hw, &msta->stats_work);
+ ieee80211_queue_work(hw, &dev->rc_work);
}
const struct ieee80211_ops mt7915_ops = {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index eaed5ef05401..a3ccc1785661 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -522,6 +522,9 @@ mt7915_mcu_tx_rate_report(struct mt7915_dev *dev, struct sk_buff *skb)
return;
wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
+ if (!wcid)
+ return;
+
msta = container_of(wcid, struct mt7915_sta, wcid);
stats = &msta->stats;
@@ -714,8 +717,8 @@ mt7915_mcu_add_nested_subtlv(struct sk_buff *skb, int sub_tag, int sub_len,
ptlv = skb_put(skb, sub_len);
memcpy(ptlv, &tlv, sizeof(tlv));
- *sub_ntlv = cpu_to_le16(le16_to_cpu(*sub_ntlv) + 1);
- *len = cpu_to_le16(le16_to_cpu(*len) + sub_len);
+ le16_add_cpu(sub_ntlv, 1);
+ le16_add_cpu(len, sub_len);
return ptlv;
}
@@ -933,11 +936,11 @@ mt7915_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_HE_BASIC, sizeof(*he));
he = (struct bss_info_he *)tlv;
- he->he_pe_duration = vif->bss_conf.htc_trig_based_pkt_ext * 4;
+ he->he_pe_duration = vif->bss_conf.htc_trig_based_pkt_ext;
if (!he->he_pe_duration)
he->he_pe_duration = DEFAULT_HE_PE_DURATION;
- he->he_rts_thres = cpu_to_le16(vif->bss_conf.frame_time_rts_th * 32);
+ he->he_rts_thres = cpu_to_le16(vif->bss_conf.frame_time_rts_th);
if (!he->he_rts_thres)
he->he_rts_thres = cpu_to_le16(DEFAULT_HE_DURATION_RTS_THRES);
@@ -947,6 +950,23 @@ mt7915_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
}
static void
+mt7915_mcu_bss_hw_amsdu_tlv(struct sk_buff *skb)
+{
+#define TXD_CMP_MAP1 GENMASK(15, 0)
+#define TXD_CMP_MAP2 (GENMASK(31, 0) & ~BIT(23))
+ struct bss_info_hw_amsdu *amsdu;
+ struct tlv *tlv;
+
+ tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_HW_AMSDU, sizeof(*amsdu));
+
+ amsdu = (struct bss_info_hw_amsdu *)tlv;
+ amsdu->cmp_bitmap_0 = cpu_to_le32(TXD_CMP_MAP1);
+ amsdu->cmp_bitmap_1 = cpu_to_le32(TXD_CMP_MAP2);
+ amsdu->trig_thres = cpu_to_le16(2);
+ amsdu->enable = true;
+}
+
+static void
mt7915_mcu_bss_ext_tlv(struct sk_buff *skb, struct mt7915_vif *mvif)
{
/* SIFS 20us + 512 byte beacon tranmitted by 1Mbps (3906us) */
@@ -1020,6 +1040,7 @@ int mt7915_mcu_add_bss_info(struct mt7915_phy *phy,
mt7915_mcu_bss_rfch_tlv(skb, vif, phy);
mt7915_mcu_bss_bmc_tlv(skb, phy);
mt7915_mcu_bss_ra_tlv(skb, vif, phy);
+ mt7915_mcu_bss_hw_amsdu_tlv(skb);
if (vif->bss_conf.he_support)
mt7915_mcu_bss_he_tlv(skb, vif, phy);
@@ -1178,6 +1199,9 @@ mt7915_mcu_sta_ba(struct mt7915_dev *dev,
struct sk_buff *skb;
int ret;
+ if (enable && tx && !params->amsdu)
+ msta->wcid.amsdu = false;
+
skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
MT7915_STA_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
@@ -1407,7 +1431,7 @@ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
he->max_nss_mcs[CMD_HE_MCS_BW160] =
he_cap->he_mcs_nss_supp.rx_mcs_160;
- /* fall through */
+ fallthrough;
default:
he->max_nss_mcs[CMD_HE_MCS_BW80] =
he_cap->he_mcs_nss_supp.rx_mcs_80;
@@ -1441,6 +1465,38 @@ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
}
static void
+mt7915_mcu_sta_uapsd_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif)
+{
+ struct sta_rec_uapsd *uapsd;
+ struct tlv *tlv;
+
+ if (vif->type != NL80211_IFTYPE_AP || !sta->wme)
+ return;
+
+ tlv = mt7915_mcu_add_tlv(skb, STA_REC_APPS, sizeof(*uapsd));
+ uapsd = (struct sta_rec_uapsd *)tlv;
+
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) {
+ uapsd->dac_map |= BIT(3);
+ uapsd->tac_map |= BIT(3);
+ }
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) {
+ uapsd->dac_map |= BIT(2);
+ uapsd->tac_map |= BIT(2);
+ }
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) {
+ uapsd->dac_map |= BIT(1);
+ uapsd->tac_map |= BIT(1);
+ }
+ if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) {
+ uapsd->dac_map |= BIT(0);
+ uapsd->tac_map |= BIT(0);
+ }
+ uapsd->max_sp = sta->max_sp;
+}
+
+static void
mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
{
struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
@@ -1512,8 +1568,39 @@ mt7915_mcu_add_mu(struct mt7915_dev *dev, struct ieee80211_vif *vif,
}
static void
+mt7915_mcu_sta_amsdu_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+{
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct sta_rec_amsdu *amsdu;
+ struct tlv *tlv;
+
+ if (!sta->max_amsdu_len)
+ return;
+
+ tlv = mt7915_mcu_add_tlv(skb, STA_REC_HW_AMSDU, sizeof(*amsdu));
+ amsdu = (struct sta_rec_amsdu *)tlv;
+ amsdu->max_amsdu_num = 8;
+ amsdu->amsdu_en = true;
+ amsdu->max_mpdu_size = sta->max_amsdu_len >=
+ IEEE80211_MAX_MPDU_LEN_VHT_7991;
+ msta->wcid.amsdu = true;
+}
+
+static bool
+mt7915_hw_amsdu_supported(struct ieee80211_vif *vif)
+{
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_STATION:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void
mt7915_mcu_sta_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
- struct ieee80211_sta *sta)
+ struct ieee80211_sta *sta, struct ieee80211_vif *vif)
{
struct tlv *tlv;
@@ -1524,6 +1611,9 @@ mt7915_mcu_sta_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
tlv = mt7915_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht));
ht = (struct sta_rec_ht *)tlv;
ht->ht_cap = cpu_to_le16(sta->ht_cap.cap);
+
+ if (mt7915_hw_amsdu_supported(vif))
+ mt7915_mcu_sta_amsdu_tlv(skb, sta);
}
/* starec vht */
@@ -1540,6 +1630,9 @@ mt7915_mcu_sta_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
/* starec he */
if (sta->he_cap.has_he)
mt7915_mcu_sta_he_tlv(skb, sta);
+
+ /* starec uapsd */
+ mt7915_mcu_sta_uapsd_tlv(skb, sta, vif);
}
static void
@@ -2176,7 +2269,7 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
mt7915_mcu_sta_basic_tlv(skb, vif, sta, enable);
if (enable && sta)
- mt7915_mcu_sta_tlv(dev, skb, sta);
+ mt7915_mcu_sta_tlv(dev, skb, sta, vif);
sta_wtbl = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
@@ -2282,7 +2375,7 @@ mt7915_mcu_beacon_csa(struct sk_buff *rskb, struct sk_buff *skb,
struct bss_info_bcn *bcn,
struct ieee80211_mutable_offsets *offs)
{
- if (offs->csa_counter_offs[0]) {
+ if (offs->cntdwn_counter_offs[0]) {
struct tlv *tlv;
struct bss_info_bcn_csa *csa;
@@ -2290,7 +2383,7 @@ mt7915_mcu_beacon_csa(struct sk_buff *rskb, struct sk_buff *skb,
sizeof(*csa), &bcn->sub_ntlv,
&bcn->len);
csa = (struct bss_info_bcn_csa *)tlv;
- csa->cnt = skb->data[offs->csa_counter_offs[0]];
+ csa->cnt = skb->data[offs->cntdwn_counter_offs[0]];
}
}
@@ -2312,8 +2405,8 @@ mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct sk_buff *rskb,
cont->pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
cont->tim_ofs = cpu_to_le16(offs->tim_offset);
- if (offs->csa_counter_offs[0])
- cont->csa_ofs = cpu_to_le16(offs->csa_counter_offs[0] - 4);
+ if (offs->cntdwn_counter_offs[0])
+ cont->csa_ofs = cpu_to_le16(offs->cntdwn_counter_offs[0] - 4);
buf = (u8 *)tlv + sizeof(*cont);
mt7915_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, NULL,
@@ -2335,14 +2428,6 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw,
struct bss_info_bcn *bcn;
int len = MT7915_BEACON_UPDATE_SIZE + MAX_BEACON_SIZE;
- rskb = mt7915_mcu_alloc_sta_req(dev, mvif, NULL, len);
- if (IS_ERR(rskb))
- return PTR_ERR(rskb);
-
- tlv = mt7915_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn));
- bcn = (struct bss_info_bcn *)tlv;
- bcn->enable = en;
-
skb = ieee80211_beacon_get_template(hw, vif, &offs);
if (!skb)
return -EINVAL;
@@ -2353,6 +2438,16 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw,
return -EINVAL;
}
+ rskb = mt7915_mcu_alloc_sta_req(dev, mvif, NULL, len);
+ if (IS_ERR(rskb)) {
+ dev_kfree_skb(skb);
+ return PTR_ERR(rskb);
+ }
+
+ tlv = mt7915_mcu_add_tlv(rskb, BSS_INFO_OFFLOAD, sizeof(*bcn));
+ bcn = (struct bss_info_bcn *)tlv;
+ bcn->enable = en;
+
if (mvif->band_idx) {
info = IEEE80211_SKB_CB(skb);
info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
@@ -2901,6 +2996,7 @@ int mt7915_mcu_set_tx(struct mt7915_dev *dev, struct ieee80211_vif *vif)
struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac];
struct edca *e = &req.edca[ac];
+ e->set = WMM_PARAM_SET;
e->queue = ac + mvif->wmm_idx * MT7915_MAX_WMM_SETS;
e->aifs = q->aifs;
e->txop = cpu_to_le16(q->txop);
@@ -3052,8 +3148,10 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
.channel_band = chandef->chan->band,
};
- if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
- chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
+ if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
+ req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
+ else if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
+ chandef->chan->dfs_state != NL80211_DFS_AVAILABLE)
req.switch_reason = CH_SWITCH_DFS;
else
req.switch_reason = CH_SWITCH_NORMAL;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
index cb35e718409a..c656d66385c4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
@@ -402,6 +402,16 @@ struct bss_info_ra {
__le32 fast_interval;
} __packed;
+struct bss_info_hw_amsdu {
+ __le16 tag;
+ __le16 len;
+ __le32 cmp_bitmap_0;
+ __le32 cmp_bitmap_1;
+ __le16 trig_thres;
+ u8 enable;
+ u8 rsv;
+} __packed;
+
struct bss_info_he {
__le16 tag;
__le16 len;
@@ -645,6 +655,17 @@ struct sta_rec_vht {
u8 rsv[3];
} __packed;
+struct sta_rec_uapsd {
+ __le16 tag;
+ __le16 len;
+ u8 dac_map;
+ u8 tac_map;
+ u8 max_sp;
+ u8 rsv0;
+ __le16 listen_interval;
+ u8 rsv1[2];
+} __packed;
+
struct sta_rec_muru {
__le16 tag;
__le16 len;
@@ -725,6 +746,15 @@ struct sta_rec_ba {
__le16 winsize;
} __packed;
+struct sta_rec_amsdu {
+ __le16 tag;
+ __le16 len;
+ u8 max_amsdu_num;
+ u8 max_mpdu_size;
+ u8 amsdu_en;
+ u8 rsv;
+} __packed;
+
struct sec_key {
u8 cipher_id;
u8 cipher_len;
@@ -951,6 +981,8 @@ enum {
sizeof(struct sta_rec_he) + \
sizeof(struct sta_rec_ba) + \
sizeof(struct sta_rec_vht) + \
+ sizeof(struct sta_rec_uapsd) + \
+ sizeof(struct sta_rec_amsdu) + \
sizeof(struct tlv) + \
MT7915_WTBL_UPDATE_MAX_SIZE)
@@ -962,6 +994,7 @@ enum {
sizeof(struct bss_info_basic) +\
sizeof(struct bss_info_rf_ch) +\
sizeof(struct bss_info_ra) + \
+ sizeof(struct bss_info_hw_amsdu) +\
sizeof(struct bss_info_he) + \
sizeof(struct bss_info_bmc_rate) +\
sizeof(struct bss_info_ext_bss) +\
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
index d8a13b4a2359..4b8908fa7eda 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
@@ -62,13 +62,6 @@ enum mt7915_rxq_id {
MT7915_RXQ_MCU_WA,
};
-enum mt7915_ampdu_state {
- MT7915_AGGR_STOP,
- MT7915_AGGR_PROGRESS,
- MT7915_AGGR_START,
- MT7915_AGGR_OPERATIONAL
-};
-
struct mt7915_sta_stats {
struct rate_info prob_rate;
struct rate_info tx_rate;
@@ -83,14 +76,14 @@ struct mt7915_sta {
struct mt7915_vif *vif;
+ struct list_head stats_list;
struct list_head poll_list;
+ struct list_head rc_list;
u32 airtime_ac[8];
struct mt7915_sta_stats stats;
- struct work_struct stats_work;
- spinlock_t ampdu_lock;
- enum mt7915_ampdu_state ampdu_state[IEEE80211_NUM_TIDS];
+ unsigned long ampdu_state;
};
struct mt7915_vif {
@@ -100,7 +93,7 @@ struct mt7915_vif {
u8 wmm_idx;
struct mt7915_sta sta;
- struct mt7915_dev *dev;
+ struct mt7915_phy *phy;
struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
};
@@ -135,9 +128,11 @@ struct mt7915_phy {
u32 ampdu_ref;
struct mib_stats mib;
+ struct list_head stats_list;
struct delayed_work mac_work;
u8 mac_work_count;
+ u8 sta_work_count;
};
struct mt7915_dev {
@@ -146,15 +141,18 @@ struct mt7915_dev {
struct mt76_phy mphy;
};
+ const struct mt76_bus_ops *bus_ops;
struct mt7915_phy phy;
u16 chainmask;
struct work_struct init_work;
+ struct work_struct rc_work;
struct work_struct reset_work;
wait_queue_head_t reset_wait;
u32 reset_state;
+ struct list_head sta_rc_list;
struct list_head sta_poll_list;
spinlock_t sta_poll_lock;
@@ -260,26 +258,8 @@ mt7915_ext_phy(struct mt7915_dev *dev)
static inline u8 mt7915_lmac_mapping(struct mt7915_dev *dev, u8 ac)
{
- static const u8 lmac_queue_map[] = {
- [IEEE80211_AC_BK] = MT_LMAC_AC00,
- [IEEE80211_AC_BE] = MT_LMAC_AC01,
- [IEEE80211_AC_VI] = MT_LMAC_AC02,
- [IEEE80211_AC_VO] = MT_LMAC_AC03,
- };
-
- if (WARN_ON_ONCE(ac >= ARRAY_SIZE(lmac_queue_map)))
- return MT_LMAC_AC01; /* BE */
-
- return lmac_queue_map[ac];
-}
-
-static inline void
-mt7915_set_aggr_state(struct mt7915_sta *msta, u8 tid,
- enum mt7915_ampdu_state state)
-{
- spin_lock_bh(&msta->ampdu_lock);
- msta->ampdu_state[tid] = state;
- spin_unlock_bh(&msta->ampdu_lock);
+ /* LMAC uses the reverse order of mac80211 AC indexes */
+ return 3 - ac;
}
extern const struct ieee80211_ops mt7915_ops;
@@ -448,7 +428,6 @@ mt7915_l2_rmw(struct mt7915_dev *dev, u32 addr, u32 mask, u32 val)
bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask);
void mt7915_mac_reset_counters(struct mt7915_phy *phy);
void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy);
-void mt7915_mac_sta_poll(struct mt7915_dev *dev);
void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_key_conf *key, bool beacon);
@@ -461,13 +440,12 @@ void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void mt7915_mac_work(struct work_struct *work);
void mt7915_mac_reset_work(struct work_struct *work);
-void mt7915_mac_sta_stats_work(struct work_struct *work);
+void mt7915_mac_sta_rc_work(struct work_struct *work);
int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct ieee80211_sta *sta,
struct mt76_tx_info *tx_info);
-void mt7915_tx_complete_skb(struct mt76_dev *mdev, enum mt76_txq_id qid,
- struct mt76_queue_entry *e);
+void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
void mt7915_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
index 0ec4e184b889..fe62b4d853e4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
@@ -29,9 +29,10 @@ mt7915_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
static irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
{
struct mt7915_dev *dev = dev_instance;
- u32 intr;
+ u32 intr, mask;
intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
+ intr &= dev->mt76.mmio.irqmask;
mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
@@ -39,27 +40,23 @@ static irqreturn_t mt7915_irq_handler(int irq, void *dev_instance)
trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
- intr &= dev->mt76.mmio.irqmask;
+ mask = intr & MT_INT_RX_DONE_ALL;
+ if (intr & MT_INT_TX_DONE_MCU)
+ mask |= MT_INT_TX_DONE_MCU;
- if (intr & MT_INT_TX_DONE_ALL) {
- mt7915_irq_disable(dev, MT_INT_TX_DONE_ALL);
+ mt7915_irq_disable(dev, mask);
+
+ if (intr & MT_INT_TX_DONE_MCU)
napi_schedule(&dev->mt76.tx_napi);
- }
- if (intr & MT_INT_RX_DONE_DATA) {
- mt7915_irq_disable(dev, MT_INT_RX_DONE_DATA);
+ if (intr & MT_INT_RX_DONE_DATA)
napi_schedule(&dev->mt76.napi[0]);
- }
- if (intr & MT_INT_RX_DONE_WM) {
- mt7915_irq_disable(dev, MT_INT_RX_DONE_WM);
+ if (intr & MT_INT_RX_DONE_WM)
napi_schedule(&dev->mt76.napi[1]);
- }
- if (intr & MT_INT_RX_DONE_WA) {
- mt7915_irq_disable(dev, MT_INT_RX_DONE_WA);
+ if (intr & MT_INT_RX_DONE_WA)
napi_schedule(&dev->mt76.napi[2]);
- }
if (intr & MT_INT_MCU_CMD) {
u32 val = mt76_rr(dev, MT_MCU_CMD);
@@ -103,7 +100,8 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
static const struct mt76_driver_ops drv_ops = {
/* txwi_size = txd size + txp size */
.txwi_size = MT_TXD_SIZE + sizeof(struct mt7915_txp),
- .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ,
+ .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ |
+ MT_DRV_AMSDU_OFFLOAD,
.survey_flags = SURVEY_INFO_TIME_TX |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_BSS_RX,
@@ -149,6 +147,8 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
(mt7915_l1_rr(dev, MT_HW_REV) & 0xff);
dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+ mt76_wr(dev, MT_INT_MASK_CSR, 0);
+
/* master switch of PCIe tnterrupt enable */
mt7915_l1_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
index e0989141d9da..64327153b7fa 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
@@ -313,9 +313,17 @@
#define MT_INT_RX_DONE_WA BIT(1)
#define MT_INT_RX_DONE(_n) ((_n) ? BIT((_n) - 1) : BIT(16))
#define MT_INT_RX_DONE_ALL (BIT(0) | BIT(1) | BIT(16))
-#define MT_INT_TX_DONE_ALL (BIT(15) | GENMASK(27, 26) | BIT(30))
+#define MT_INT_TX_DONE_MCU_WA BIT(15)
+#define MT_INT_TX_DONE_FWDL BIT(26)
+#define MT_INT_TX_DONE_MCU_WM BIT(27)
+#define MT_INT_TX_DONE_BAND0 BIT(30)
+#define MT_INT_TX_DONE_BAND1 BIT(31)
#define MT_INT_MCU_CMD BIT(29)
+#define MT_INT_TX_DONE_MCU (MT_INT_TX_DONE_MCU_WA | \
+ MT_INT_TX_DONE_MCU_WM | \
+ MT_INT_TX_DONE_FWDL)
+
#define MT_WFDMA_EXT_CSR_HIF_MISC MT_WFDMA_EXT_CSR(0x44)
#define MT_WFDMA_EXT_CSR_HIF_MISC_BUSY BIT(0)
@@ -352,6 +360,13 @@
#define MT_HIF_REMAP_L2_BASE GENMASK(31, 12)
#define MT_HIF_REMAP_BASE_L2 0x00000
+#define MT_SWDEF_BASE 0x41f200
+#define MT_SWDEF(ofs) (MT_SWDEF_BASE + (ofs))
+#define MT_SWDEF_MODE MT_SWDEF(0x3c)
+#define MT_SWDEF_NORMAL_MODE 0
+#define MT_SWDEF_ICAP_MODE 1
+#define MT_SWDEF_SPECTRUM_MODE 2
+
#define MT_TOP_BASE 0x18060000
#define MT_TOP(ofs) (MT_TOP_BASE + (ofs))
diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
index d2b38ed7f3b4..9a4d95a2a707 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio.c
@@ -42,15 +42,13 @@ static int mt76s_alloc_tx(struct mt76_dev *dev)
int i;
for (i = 0; i < MT_TXQ_MCU_WA; i++) {
- INIT_LIST_HEAD(&dev->q_tx[i].swq);
-
q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
if (!q)
return -ENOMEM;
spin_lock_init(&q->lock);
q->hw_idx = i;
- dev->q_tx[i].q = q;
+ dev->q_tx[i] = q;
q->entry = devm_kcalloc(dev->dev,
MT_NUM_TX_ENTRIES, sizeof(*q->entry),
@@ -68,6 +66,10 @@ void mt76s_stop_txrx(struct mt76_dev *dev)
{
struct mt76_sdio *sdio = &dev->sdio;
+ cancel_work_sync(&sdio->tx.xmit_work);
+ cancel_work_sync(&sdio->tx.status_work);
+ cancel_work_sync(&sdio->rx.recv_work);
+ cancel_work_sync(&sdio->rx.net_work);
cancel_work_sync(&sdio->stat_work);
clear_bit(MT76_READING_STATS, &dev->phy.state);
@@ -94,8 +96,8 @@ mt76s_get_next_rx_entry(struct mt76_queue *q)
spin_lock_bh(&q->lock);
if (q->queued > 0) {
- e = &q->entry[q->head];
- q->head = (q->head + 1) % q->ndesc;
+ e = &q->entry[q->tail];
+ q->tail = (q->tail + 1) % q->ndesc;
q->queued--;
}
spin_unlock_bh(&q->lock);
@@ -129,38 +131,26 @@ mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
return nframes;
}
-static int mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
+static void mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
{
- struct mt76_sw_queue *sq = &dev->q_tx[qid];
- u32 n_dequeued = 0, n_sw_dequeued = 0;
+ struct mt76_queue *q = dev->q_tx[qid];
struct mt76_queue_entry entry;
- struct mt76_queue *q = sq->q;
bool wake;
- while (q->queued > n_dequeued) {
- if (!q->entry[q->head].done)
+ while (q->queued > 0) {
+ if (!q->entry[q->tail].done)
break;
- if (q->entry[q->head].schedule) {
- q->entry[q->head].schedule = false;
- n_sw_dequeued++;
- }
-
- entry = q->entry[q->head];
- q->entry[q->head].done = false;
- q->head = (q->head + 1) % q->ndesc;
- n_dequeued++;
+ entry = q->entry[q->tail];
+ q->entry[q->tail].done = false;
- if (qid == MT_TXQ_MCU)
+ if (qid == MT_TXQ_MCU) {
dev_kfree_skb(entry.skb);
- else
- dev->drv->tx_complete_skb(dev, qid, &entry);
- }
-
- spin_lock_bh(&q->lock);
+ entry.skb = NULL;
+ }
- sq->swq_queued -= n_sw_dequeued;
- q->queued -= n_dequeued;
+ mt76_queue_tx_complete(dev, q, &entry);
+ }
wake = q->stopped && q->queued < q->ndesc - 8;
if (wake)
@@ -169,19 +159,13 @@ static int mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
if (!q->queued)
wake_up(&dev->tx_wait);
- spin_unlock_bh(&q->lock);
-
if (qid == MT_TXQ_MCU)
- goto out;
+ return;
mt76_txq_schedule(&dev->phy, qid);
if (wake)
ieee80211_wake_queue(dev->hw, qid);
-
- wake_up_process(dev->sdio.tx_kthread);
-out:
- return n_dequeued;
}
static void mt76s_tx_status_data(struct work_struct *work)
@@ -214,12 +198,12 @@ mt76s_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta)
{
- struct mt76_queue *q = dev->q_tx[qid].q;
+ struct mt76_queue *q = dev->q_tx[qid];
struct mt76_tx_info tx_info = {
.skb = skb,
};
int err, len = skb->len;
- u16 idx = q->tail;
+ u16 idx = q->head;
if (q->queued == q->ndesc)
return -ENOSPC;
@@ -229,9 +213,9 @@ mt76s_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
if (err < 0)
return err;
- q->entry[q->tail].skb = tx_info.skb;
- q->entry[q->tail].buf_sz = len;
- q->tail = (q->tail + 1) % q->ndesc;
+ q->entry[q->head].skb = tx_info.skb;
+ q->entry[q->head].buf_sz = len;
+ q->head = (q->head + 1) % q->ndesc;
q->queued++;
return idx;
@@ -241,25 +225,31 @@ static int
mt76s_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, u32 tx_info)
{
- struct mt76_queue *q = dev->q_tx[qid].q;
- int ret = -ENOSPC, len = skb->len;
+ struct mt76_queue *q = dev->q_tx[qid];
+ int ret = -ENOSPC, len = skb->len, pad;
- spin_lock_bh(&q->lock);
if (q->queued == q->ndesc)
- goto out;
+ goto error;
- ret = mt76_skb_adjust_pad(skb);
+ pad = round_up(skb->len, 4) - skb->len;
+ ret = mt76_skb_adjust_pad(skb, pad);
if (ret)
- goto out;
+ goto error;
- q->entry[q->tail].buf_sz = len;
- q->entry[q->tail].skb = skb;
- q->tail = (q->tail + 1) % q->ndesc;
+ spin_lock_bh(&q->lock);
+
+ q->entry[q->head].buf_sz = len;
+ q->entry[q->head].skb = skb;
+ q->head = (q->head + 1) % q->ndesc;
q->queued++;
-out:
spin_unlock_bh(&q->lock);
+ return 0;
+
+error:
+ dev_kfree_skb(skb);
+
return ret;
}
@@ -267,7 +257,7 @@ static void mt76s_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
{
struct mt76_sdio *sdio = &dev->sdio;
- wake_up_process(sdio->tx_kthread);
+ queue_work(sdio->txrx_wq, &sdio->tx.xmit_work);
}
static const struct mt76_queue_ops sdio_queue_ops = {
@@ -276,41 +266,37 @@ static const struct mt76_queue_ops sdio_queue_ops = {
.tx_queue_skb_raw = mt76s_tx_queue_skb_raw,
};
-static int mt76s_kthread_run(void *data)
+static void mt76s_tx_work(struct work_struct *work)
{
- struct mt76_dev *dev = data;
- struct mt76_phy *mphy = &dev->phy;
-
- while (!kthread_should_stop()) {
- int i, nframes = 0;
-
- cond_resched();
-
- /* rx processing */
- local_bh_disable();
- rcu_read_lock();
+ struct mt76_sdio *sdio = container_of(work, struct mt76_sdio,
+ tx.status_work);
+ struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
+ int i;
- mt76_for_each_q_rx(dev, i)
- nframes += mt76s_process_rx_queue(dev, &dev->q_rx[i]);
+ for (i = 0; i < MT_TXQ_MCU_WA; i++)
+ mt76s_process_tx_queue(dev, i);
- rcu_read_unlock();
- local_bh_enable();
+ if (dev->drv->tx_status_data &&
+ !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
+ queue_work(dev->wq, &dev->sdio.stat_work);
+}
- /* tx processing */
- for (i = 0; i < MT_TXQ_MCU_WA; i++)
- nframes += mt76s_process_tx_queue(dev, i);
+static void mt76s_rx_work(struct work_struct *work)
+{
+ struct mt76_sdio *sdio = container_of(work, struct mt76_sdio,
+ rx.net_work);
+ struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
+ int i;
- if (dev->drv->tx_status_data &&
- !test_and_set_bit(MT76_READING_STATS, &mphy->state))
- queue_work(dev->wq, &dev->sdio.stat_work);
+ /* rx processing */
+ local_bh_disable();
+ rcu_read_lock();
- if (!nframes || !test_bit(MT76_STATE_RUNNING, &mphy->state)) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule();
- }
- }
+ mt76_for_each_q_rx(dev, i)
+ mt76s_process_rx_queue(dev, &dev->q_rx[i]);
- return 0;
+ rcu_read_unlock();
+ local_bh_enable();
}
void mt76s_deinit(struct mt76_dev *dev)
@@ -318,9 +304,11 @@ void mt76s_deinit(struct mt76_dev *dev)
struct mt76_sdio *sdio = &dev->sdio;
int i;
- kthread_stop(sdio->kthread);
- kthread_stop(sdio->tx_kthread);
mt76s_stop_txrx(dev);
+ if (sdio->txrx_wq) {
+ destroy_workqueue(sdio->txrx_wq);
+ sdio->txrx_wq = NULL;
+ }
sdio_claim_host(sdio->func);
sdio_release_irq(sdio->func);
@@ -348,11 +336,15 @@ int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
{
struct mt76_sdio *sdio = &dev->sdio;
- sdio->kthread = kthread_create(mt76s_kthread_run, dev, "mt76s");
- if (IS_ERR(sdio->kthread))
- return PTR_ERR(sdio->kthread);
+ sdio->txrx_wq = alloc_workqueue("mt76s_txrx_wq",
+ WQ_UNBOUND | WQ_HIGHPRI,
+ WQ_UNBOUND_MAX_ACTIVE);
+ if (!sdio->txrx_wq)
+ return -ENOMEM;
INIT_WORK(&sdio->stat_work, mt76s_tx_status_data);
+ INIT_WORK(&sdio->tx.status_work, mt76s_tx_work);
+ INIT_WORK(&sdio->rx.net_work, mt76s_rx_work);
mutex_init(&sdio->sched.lock);
dev->queue_ops = &sdio_queue_ops;
diff --git a/drivers/net/wireless/mediatek/mt76/testmode.c b/drivers/net/wireless/mediatek/mt76/testmode.c
index 75bb02cdfdae..883f59c7a7e4 100644
--- a/drivers/net/wireless/mediatek/mt76/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/testmode.c
@@ -29,11 +29,12 @@ void mt76_testmode_tx_pending(struct mt76_dev *dev)
return;
qid = skb_get_queue_mapping(skb);
- q = dev->q_tx[qid].q;
+ q = dev->q_tx[qid];
spin_lock_bh(&q->lock);
- while (td->tx_pending > 0 && q->queued < q->ndesc / 2) {
+ while (td->tx_pending > 0 && td->tx_queued - td->tx_done < 1000 &&
+ q->queued < q->ndesc / 2) {
int ret;
ret = dev->queue_ops->tx_queue_skb(dev, qid, skb_get(skb), wcid, NULL);
@@ -160,7 +161,7 @@ mt76_testmode_tx_start(struct mt76_dev *dev)
td->tx_queued = 0;
td->tx_done = 0;
td->tx_pending = td->tx_count;
- tasklet_schedule(&dev->tx_tasklet);
+ mt76_worker_schedule(&dev->tx_worker);
}
static void
@@ -168,11 +169,11 @@ mt76_testmode_tx_stop(struct mt76_dev *dev)
{
struct mt76_testmode_data *td = &dev->test;
- tasklet_disable(&dev->tx_tasklet);
+ mt76_worker_disable(&dev->tx_worker);
td->tx_pending = 0;
- tasklet_enable(&dev->tx_tasklet);
+ mt76_worker_enable(&dev->tx_worker);
wait_event_timeout(dev->tx_wait, td->tx_done == td->tx_queued, 10 * HZ);
@@ -442,9 +443,13 @@ int mt76_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
mutex_lock(&dev->mutex);
if (tb[MT76_TM_ATTR_STATS]) {
+ err = -EINVAL;
+
a = nla_nest_start(msg, MT76_TM_ATTR_STATS);
- err = mt76_testmode_dump_stats(dev, msg);
- nla_nest_end(msg, a);
+ if (a) {
+ err = mt76_testmode_dump_stats(dev, msg);
+ nla_nest_end(msg, a);
+ }
goto out;
}
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index 3afd89ecd6c9..44ef4bc7a46e 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -5,75 +5,6 @@
#include "mt76.h"
-static struct mt76_txwi_cache *
-mt76_alloc_txwi(struct mt76_dev *dev)
-{
- struct mt76_txwi_cache *t;
- dma_addr_t addr;
- u8 *txwi;
- int size;
-
- size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
- txwi = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
- if (!txwi)
- return NULL;
-
- addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size,
- DMA_TO_DEVICE);
- t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
- t->dma_addr = addr;
-
- return t;
-}
-
-static struct mt76_txwi_cache *
-__mt76_get_txwi(struct mt76_dev *dev)
-{
- struct mt76_txwi_cache *t = NULL;
-
- spin_lock_bh(&dev->lock);
- if (!list_empty(&dev->txwi_cache)) {
- t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
- list);
- list_del(&t->list);
- }
- spin_unlock_bh(&dev->lock);
-
- return t;
-}
-
-struct mt76_txwi_cache *
-mt76_get_txwi(struct mt76_dev *dev)
-{
- struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
-
- if (t)
- return t;
-
- return mt76_alloc_txwi(dev);
-}
-
-void
-mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
-{
- if (!t)
- return;
-
- spin_lock_bh(&dev->lock);
- list_add(&t->list, &dev->txwi_cache);
- spin_unlock_bh(&dev->lock);
-}
-EXPORT_SYMBOL_GPL(mt76_put_txwi);
-
-void mt76_tx_free(struct mt76_dev *dev)
-{
- struct mt76_txwi_cache *t;
-
- while ((t = __mt76_get_txwi(dev)) != NULL)
- dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size,
- DMA_TO_DEVICE);
-}
-
static int
mt76_txq_get_qid(struct ieee80211_txq *txq)
{
@@ -83,17 +14,27 @@ mt76_txq_get_qid(struct ieee80211_txq *txq)
return txq->ac;
}
-static void
-mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb)
+void
+mt76_tx_check_agg_ssn(struct ieee80211_sta *sta, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_txq *txq;
+ struct mt76_txq *mtxq;
+ u8 tid;
- if (!ieee80211_is_data_qos(hdr->frame_control) ||
+ if (!sta || !ieee80211_is_data_qos(hdr->frame_control) ||
!ieee80211_is_data_present(hdr->frame_control))
return;
+ tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
+ txq = sta->txq[tid];
+ mtxq = (struct mt76_txq *)txq->drv_priv;
+ if (!mtxq->aggr)
+ return;
+
mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
}
+EXPORT_SYMBOL_GPL(mt76_tx_check_agg_ssn);
void
mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
@@ -231,7 +172,32 @@ mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, bool flush)
}
EXPORT_SYMBOL_GPL(mt76_tx_status_check);
-void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb)
+static void
+mt76_tx_check_non_aql(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct mt76_wcid *wcid;
+ int pending;
+
+ if (info->tx_time_est)
+ return;
+
+ if (wcid_idx >= ARRAY_SIZE(dev->wcid))
+ return;
+
+ rcu_read_lock();
+
+ wcid = rcu_dereference(dev->wcid[wcid_idx]);
+ if (wcid) {
+ pending = atomic_dec_return(&wcid->non_aql_packets);
+ if (pending < 0)
+ atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
+ }
+
+ rcu_read_unlock();
+}
+
+void mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb)
{
struct ieee80211_hw *hw;
struct sk_buff_head list;
@@ -244,6 +210,8 @@ void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb)
}
#endif
+ mt76_tx_check_non_aql(dev, wcid_idx, skb);
+
if (!skb->prev) {
hw = mt76_tx_status_get_hw(dev, skb);
ieee80211_free_txskb(hw, skb);
@@ -256,6 +224,32 @@ void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(mt76_tx_complete_skb);
+static int
+__mt76_tx_queue_skb(struct mt76_dev *dev, int qid, struct sk_buff *skb,
+ struct mt76_wcid *wcid, struct ieee80211_sta *sta,
+ bool *stop)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct mt76_queue *q;
+ bool non_aql;
+ int pending;
+ int idx;
+
+ non_aql = !info->tx_time_est;
+ idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, sta);
+ if (idx < 0 || !sta || !non_aql)
+ return idx;
+
+ wcid = (struct mt76_wcid *)sta->drv_priv;
+ q = dev->q_tx[qid];
+ q->entry[idx].wcid = wcid->idx;
+ pending = atomic_inc_return(&wcid->non_aql_packets);
+ if (stop && pending >= MT_MAX_NON_AQL_PKT)
+ *stop = true;
+
+ return idx;
+}
+
void
mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
struct mt76_wcid *wcid, struct sk_buff *skb)
@@ -288,26 +282,13 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
ieee80211_get_tx_rates(info->control.vif, sta, skb,
info->control.rates, 1);
- if (sta && ieee80211_is_data_qos(hdr->frame_control)) {
- struct ieee80211_txq *txq;
- struct mt76_txq *mtxq;
- u8 tid;
-
- tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
- txq = sta->txq[tid];
- mtxq = (struct mt76_txq *)txq->drv_priv;
-
- if (mtxq->aggr)
- mt76_check_agg_ssn(mtxq, skb);
- }
-
if (ext_phy)
info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
- q = dev->q_tx[qid].q;
+ q = dev->q_tx[qid];
spin_lock_bh(&q->lock);
- dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, sta);
+ __mt76_tx_queue_skb(dev, qid, skb, wcid, sta, NULL);
dev->queue_ops->kick(dev, q);
if (q->queued > q->ndesc - 8 && !q->stopped) {
@@ -320,23 +301,13 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
EXPORT_SYMBOL_GPL(mt76_tx);
static struct sk_buff *
-mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq, bool ps)
+mt76_txq_dequeue(struct mt76_phy *phy, struct mt76_txq *mtxq)
{
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
struct ieee80211_tx_info *info;
bool ext_phy = phy != &phy->dev->phy;
struct sk_buff *skb;
- skb = skb_dequeue(&mtxq->retry_q);
- if (skb) {
- u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
-
- if (ps && skb_queue_empty(&mtxq->retry_q))
- ieee80211_sta_set_buffered(txq->sta, tid, false);
-
- return skb;
- }
-
skb = ieee80211_tx_dequeue(phy->hw, txq);
if (!skb)
return NULL;
@@ -361,7 +332,7 @@ mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
IEEE80211_TX_CTL_REQ_TX_STATUS;
mt76_skb_set_moredata(skb, !last);
- dev->queue_ops->tx_queue_skb(dev, MT_TXQ_PSD, skb, wcid, sta);
+ __mt76_tx_queue_skb(dev, MT_TXQ_PSD, skb, wcid, sta, NULL);
}
void
@@ -373,7 +344,7 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
struct mt76_phy *phy = hw->priv;
struct mt76_dev *dev = phy->dev;
struct sk_buff *last_skb = NULL;
- struct mt76_queue *hwq = dev->q_tx[MT_TXQ_PSD].q;
+ struct mt76_queue *hwq = dev->q_tx[MT_TXQ_PSD];
int i;
spin_lock_bh(&hwq->lock);
@@ -386,13 +357,10 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
continue;
do {
- skb = mt76_txq_dequeue(phy, mtxq, true);
+ skb = mt76_txq_dequeue(phy, mtxq);
if (!skb)
break;
- if (mtxq->aggr)
- mt76_check_agg_ssn(mtxq, skb);
-
nframes--;
if (last_skb)
mt76_queue_ps_skb(dev, sta, last_skb, false);
@@ -413,26 +381,26 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
static int
-mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_sw_queue *sq,
+mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_queue *q,
struct mt76_txq *mtxq)
{
struct mt76_dev *dev = phy->dev;
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
enum mt76_txq_id qid = mt76_txq_get_qid(txq);
struct mt76_wcid *wcid = mtxq->wcid;
- struct mt76_queue *hwq = sq->q;
struct ieee80211_tx_info *info;
struct sk_buff *skb;
- int n_frames = 1, limit;
- struct ieee80211_tx_rate tx_rate;
- bool ampdu;
- bool probe;
+ int n_frames = 1;
+ bool stop = false;
int idx;
if (test_bit(MT_WCID_FLAG_PS, &wcid->flags))
return 0;
- skb = mt76_txq_dequeue(phy, mtxq, false);
+ if (atomic_read(&wcid->non_aql_packets) >= MT_MAX_NON_AQL_PKT)
+ return 0;
+
+ skb = mt76_txq_dequeue(phy, mtxq);
if (!skb)
return 0;
@@ -440,62 +408,39 @@ mt76_txq_send_burst(struct mt76_phy *phy, struct mt76_sw_queue *sq,
if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
info->control.rates, 1);
- tx_rate = info->control.rates[0];
-
- probe = (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
- ampdu = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU;
- limit = ampdu ? 16 : 3;
-
- if (ampdu)
- mt76_check_agg_ssn(mtxq, skb);
-
- idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, txq->sta);
+ idx = __mt76_tx_queue_skb(dev, qid, skb, wcid, txq->sta, &stop);
if (idx < 0)
return idx;
do {
- bool cur_ampdu;
+ if (test_bit(MT76_STATE_PM, &phy->state) ||
+ test_bit(MT76_RESET, &phy->state))
+ return -EBUSY;
- if (probe)
+ if (stop)
break;
- if (test_bit(MT76_RESET, &phy->state))
- return -EBUSY;
+ if (q->queued + MT_TXQ_FREE_THR >= q->ndesc)
+ break;
- skb = mt76_txq_dequeue(phy, mtxq, false);
+ skb = mt76_txq_dequeue(phy, mtxq);
if (!skb)
break;
info = IEEE80211_SKB_CB(skb);
- cur_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
-
- if (ampdu != cur_ampdu ||
- (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
- skb_queue_tail(&mtxq->retry_q, skb);
- break;
- }
+ if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
+ ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
+ info->control.rates, 1);
- info->control.rates[0] = tx_rate;
-
- if (cur_ampdu)
- mt76_check_agg_ssn(mtxq, skb);
-
- idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid,
- txq->sta);
+ idx = __mt76_tx_queue_skb(dev, qid, skb, wcid, txq->sta, &stop);
if (idx < 0)
- return idx;
+ break;
n_frames++;
- } while (n_frames < limit);
-
- if (!probe) {
- hwq->entry[idx].qid = sq - dev->q_tx;
- hwq->entry[idx].schedule = true;
- sq->swq_queued++;
- }
+ } while (1);
- dev->queue_ops->kick(dev, hwq);
+ dev->queue_ops->kick(dev, q);
return n_frames;
}
@@ -504,23 +449,23 @@ static int
mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
{
struct mt76_dev *dev = phy->dev;
- struct mt76_sw_queue *sq = &dev->q_tx[qid];
- struct mt76_queue *hwq = sq->q;
+ struct mt76_queue *q = dev->q_tx[qid];
struct ieee80211_txq *txq;
struct mt76_txq *mtxq;
struct mt76_wcid *wcid;
int ret = 0;
- spin_lock_bh(&hwq->lock);
+ spin_lock_bh(&q->lock);
while (1) {
- if (sq->swq_queued >= 4)
- break;
-
- if (test_bit(MT76_RESET, &phy->state)) {
+ if (test_bit(MT76_STATE_PM, &phy->state) ||
+ test_bit(MT76_RESET, &phy->state)) {
ret = -EBUSY;
break;
}
+ if (q->queued + MT_TXQ_FREE_THR >= q->ndesc)
+ break;
+
txq = ieee80211_next_txq(phy->hw, qid);
if (!txq)
break;
@@ -538,32 +483,26 @@ mt76_txq_schedule_list(struct mt76_phy *phy, enum mt76_txq_id qid)
u8 tid = txq->tid;
mtxq->send_bar = false;
- spin_unlock_bh(&hwq->lock);
+ spin_unlock_bh(&q->lock);
ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
- spin_lock_bh(&hwq->lock);
+ spin_lock_bh(&q->lock);
}
- ret += mt76_txq_send_burst(phy, sq, mtxq);
- ieee80211_return_txq(phy->hw, txq,
- !skb_queue_empty(&mtxq->retry_q));
+ ret += mt76_txq_send_burst(phy, q, mtxq);
+ ieee80211_return_txq(phy->hw, txq, false);
}
- spin_unlock_bh(&hwq->lock);
+ spin_unlock_bh(&q->lock);
return ret;
}
void mt76_txq_schedule(struct mt76_phy *phy, enum mt76_txq_id qid)
{
- struct mt76_dev *dev = phy->dev;
- struct mt76_sw_queue *sq = &dev->q_tx[qid];
int len;
if (qid >= 4)
return;
- if (sq->swq_queued >= 4)
- return;
-
rcu_read_lock();
do {
@@ -585,9 +524,9 @@ void mt76_txq_schedule_all(struct mt76_phy *phy)
}
EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
-void mt76_tx_tasklet(unsigned long data)
+void mt76_tx_worker(struct mt76_worker *w)
{
- struct mt76_dev *dev = (struct mt76_dev *)data;
+ struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker);
mt76_txq_schedule_all(&dev->phy);
if (dev->phy2)
@@ -612,8 +551,8 @@ void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
if (!txq)
continue;
+ hwq = dev->q_tx[mt76_txq_get_qid(txq)];
mtxq = (struct mt76_txq *)txq->drv_priv;
- hwq = mtxq->swq->q;
spin_lock_bh(&hwq->lock);
mtxq->send_bar = mtxq->aggr && send_bar;
@@ -630,38 +569,10 @@ void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
if (!test_bit(MT76_STATE_RUNNING, &phy->state))
return;
- tasklet_schedule(&dev->tx_tasklet);
+ mt76_worker_schedule(&dev->tx_worker);
}
EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
-void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq)
-{
- struct ieee80211_hw *hw;
- struct mt76_txq *mtxq;
- struct sk_buff *skb;
-
- if (!txq)
- return;
-
- mtxq = (struct mt76_txq *)txq->drv_priv;
-
- while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL) {
- hw = mt76_tx_status_get_hw(dev, skb);
- ieee80211_free_txskb(hw, skb);
- }
-}
-EXPORT_SYMBOL_GPL(mt76_txq_remove);
-
-void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq)
-{
- struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv;
-
- skb_queue_head_init(&mtxq->retry_q);
-
- mtxq->swq = &dev->q_tx[mt76_txq_get_qid(txq)];
-}
-EXPORT_SYMBOL_GPL(mt76_txq_init);
-
u8 mt76_ac_to_hwq(u8 ac)
{
static const u8 wmm_queue_map[] = {
@@ -678,13 +589,9 @@ u8 mt76_ac_to_hwq(u8 ac)
}
EXPORT_SYMBOL_GPL(mt76_ac_to_hwq);
-int mt76_skb_adjust_pad(struct sk_buff *skb)
+int mt76_skb_adjust_pad(struct sk_buff *skb, int pad)
{
struct sk_buff *iter, *last = skb;
- u32 pad;
-
- /* Add zero pad of 4 - 7 bytes */
- pad = round_up(skb->len, 4) + 4 - skb->len;
/* First packet of a A-MSDU burst keeps track of the whole burst
* length, need to update length of it and the last packet.
@@ -706,3 +613,16 @@ int mt76_skb_adjust_pad(struct sk_buff *skb)
return 0;
}
EXPORT_SYMBOL_GPL(mt76_skb_adjust_pad);
+
+void mt76_queue_tx_complete(struct mt76_dev *dev, struct mt76_queue *q,
+ struct mt76_queue_entry *e)
+{
+ if (e->skb)
+ dev->drv->tx_complete_skb(dev, e);
+
+ spin_lock_bh(&q->lock);
+ q->tail = (q->tail + 1) % q->ndesc;
+ q->queued--;
+ spin_unlock_bh(&q->lock);
+}
+EXPORT_SYMBOL_GPL(mt76_queue_tx_complete);
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index dcab5993763a..7d3f0a2e5fa0 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -497,8 +497,8 @@ mt76u_get_next_rx_entry(struct mt76_queue *q)
spin_lock_irqsave(&q->lock, flags);
if (q->queued > 0) {
- urb = q->entry[q->head].urb;
- q->head = (q->head + 1) % q->ndesc;
+ urb = q->entry[q->tail].urb;
+ q->tail = (q->tail + 1) % q->ndesc;
q->queued--;
}
spin_unlock_irqrestore(&q->lock, flags);
@@ -616,16 +616,16 @@ static void mt76u_complete_rx(struct urb *urb)
default:
dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
urb->status);
- /* fall through */
+ fallthrough;
case 0:
break;
}
spin_lock_irqsave(&q->lock, flags);
- if (WARN_ONCE(q->entry[q->tail].urb != urb, "rx urb mismatch"))
+ if (WARN_ONCE(q->entry[q->head].urb != urb, "rx urb mismatch"))
goto out;
- q->tail = (q->tail + 1) % q->ndesc;
+ q->head = (q->head + 1) % q->ndesc;
q->queued++;
tasklet_schedule(&dev->usb.rx_tasklet);
out:
@@ -792,43 +792,27 @@ int mt76u_resume_rx(struct mt76_dev *dev)
}
EXPORT_SYMBOL_GPL(mt76u_resume_rx);
-static void mt76u_tx_tasklet(unsigned long data)
+static void mt76u_tx_worker(struct mt76_worker *w)
{
- struct mt76_dev *dev = (struct mt76_dev *)data;
+ struct mt76_dev *dev = container_of(w, struct mt76_dev, tx_worker);
struct mt76_queue_entry entry;
- struct mt76_sw_queue *sq;
struct mt76_queue *q;
bool wake;
int i;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- u32 n_dequeued = 0, n_sw_dequeued = 0;
-
- sq = &dev->q_tx[i];
- q = sq->q;
+ q = dev->q_tx[i];
- while (q->queued > n_dequeued) {
- if (!q->entry[q->head].done)
+ while (q->queued > 0) {
+ if (!q->entry[q->tail].done)
break;
- if (q->entry[q->head].schedule) {
- q->entry[q->head].schedule = false;
- n_sw_dequeued++;
- }
-
- entry = q->entry[q->head];
- q->entry[q->head].done = false;
- q->head = (q->head + 1) % q->ndesc;
- n_dequeued++;
+ entry = q->entry[q->tail];
+ q->entry[q->tail].done = false;
- dev->drv->tx_complete_skb(dev, i, &entry);
+ mt76_queue_tx_complete(dev, q, &entry);
}
- spin_lock_bh(&q->lock);
-
- sq->swq_queued -= n_sw_dequeued;
- q->queued -= n_dequeued;
-
wake = q->stopped && q->queued < q->ndesc - 8;
if (wake)
q->stopped = false;
@@ -836,8 +820,6 @@ static void mt76u_tx_tasklet(unsigned long data)
if (!q->queued)
wake_up(&dev->tx_wait);
- spin_unlock_bh(&q->lock);
-
mt76_txq_schedule(&dev->phy, i);
if (dev->drv->tx_status_data &&
@@ -882,7 +864,7 @@ static void mt76u_complete_tx(struct urb *urb)
dev_err(dev->dev, "tx urb failed: %d\n", urb->status);
e->done = true;
- tasklet_schedule(&dev->tx_tasklet);
+ mt76_worker_schedule(&dev->tx_worker);
}
static int
@@ -909,11 +891,11 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
struct sk_buff *skb, struct mt76_wcid *wcid,
struct ieee80211_sta *sta)
{
- struct mt76_queue *q = dev->q_tx[qid].q;
+ struct mt76_queue *q = dev->q_tx[qid];
struct mt76_tx_info tx_info = {
.skb = skb,
};
- u16 idx = q->tail;
+ u16 idx = q->head;
int err;
if (q->queued == q->ndesc)
@@ -932,7 +914,7 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
q->entry[idx].urb, mt76u_complete_tx,
&q->entry[idx]);
- q->tail = (q->tail + 1) % q->ndesc;
+ q->head = (q->head + 1) % q->ndesc;
q->entry[idx].skb = tx_info.skb;
q->queued++;
@@ -944,7 +926,7 @@ static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
struct urb *urb;
int err;
- while (q->first != q->tail) {
+ while (q->first != q->head) {
urb = q->entry[q->first].urb;
trace_submit_urb(dev, urb);
@@ -987,10 +969,8 @@ static int mt76u_alloc_tx(struct mt76_dev *dev)
int i, j, err;
for (i = 0; i <= MT_TXQ_PSD; i++) {
- INIT_LIST_HEAD(&dev->q_tx[i].swq);
-
if (i >= IEEE80211_NUM_ACS) {
- dev->q_tx[i].q = dev->q_tx[0].q;
+ dev->q_tx[i] = dev->q_tx[0];
continue;
}
@@ -1000,7 +980,7 @@ static int mt76u_alloc_tx(struct mt76_dev *dev)
spin_lock_init(&q->lock);
q->hw_idx = mt76u_ac_to_hwq(dev, i);
- dev->q_tx[i].q = q;
+ dev->q_tx[i] = q;
q->entry = devm_kcalloc(dev->dev,
MT_NUM_TX_ENTRIES, sizeof(*q->entry),
@@ -1027,7 +1007,7 @@ static void mt76u_free_tx(struct mt76_dev *dev)
struct mt76_queue *q;
int j;
- q = dev->q_tx[i].q;
+ q = dev->q_tx[i];
if (!q)
continue;
@@ -1040,6 +1020,8 @@ void mt76u_stop_tx(struct mt76_dev *dev)
{
int ret;
+ mt76_worker_disable(&dev->tx_worker);
+
ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(&dev->phy),
HZ / 5);
if (!ret) {
@@ -1050,7 +1032,7 @@ void mt76u_stop_tx(struct mt76_dev *dev)
dev_err(dev->dev, "timed out waiting for pending tx\n");
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- q = dev->q_tx[i].q;
+ q = dev->q_tx[i];
if (!q)
continue;
@@ -1058,32 +1040,26 @@ void mt76u_stop_tx(struct mt76_dev *dev)
usb_kill_urb(q->entry[j].urb);
}
- tasklet_kill(&dev->tx_tasklet);
-
/* On device removal we maight queue skb's, but mt76u_tx_kick()
* will fail to submit urb, cleanup those skb's manually.
*/
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- q = dev->q_tx[i].q;
+ q = dev->q_tx[i];
if (!q)
continue;
- /* Assure we are in sync with killed tasklet. */
- spin_lock_bh(&q->lock);
- while (q->queued) {
- entry = q->entry[q->head];
- q->head = (q->head + 1) % q->ndesc;
- q->queued--;
+ entry = q->entry[q->tail];
+ q->entry[q->tail].done = false;
- dev->drv->tx_complete_skb(dev, i, &entry);
- }
- spin_unlock_bh(&q->lock);
+ mt76_queue_tx_complete(dev, q, &entry);
}
}
cancel_work_sync(&dev->usb.stat_work);
clear_bit(MT76_READING_STATS, &dev->phy.state);
+ mt76_worker_enable(&dev->tx_worker);
+
mt76_tx_status_check(dev, NULL, true);
}
EXPORT_SYMBOL_GPL(mt76u_stop_tx);
@@ -1133,8 +1109,8 @@ int mt76u_init(struct mt76_dev *dev,
mt76u_ops.rmw = ext ? mt76u_rmw_ext : mt76u_rmw;
mt76u_ops.write_copy = ext ? mt76u_copy_ext : mt76u_copy;
+ dev->tx_worker.fn = mt76u_tx_worker;
tasklet_init(&usb->rx_tasklet, mt76u_rx_tasklet, (unsigned long)dev);
- tasklet_init(&dev->tx_tasklet, mt76u_tx_tasklet, (unsigned long)dev);
INIT_WORK(&usb->stat_work, mt76u_tx_status_data);
usb->data_len = usb_maxpacket(udev, usb_sndctrlpipe(udev, 0), 1);
diff --git a/drivers/net/wireless/mediatek/mt76/util.c b/drivers/net/wireless/mediatek/mt76/util.c
index f53bb4ae5001..581964425468 100644
--- a/drivers/net/wireless/mediatek/mt76/util.c
+++ b/drivers/net/wireless/mediatek/mt76/util.c
@@ -110,4 +110,32 @@ int mt76_get_min_avg_rssi(struct mt76_dev *dev, bool ext_phy)
}
EXPORT_SYMBOL_GPL(mt76_get_min_avg_rssi);
+int __mt76_worker_fn(void *ptr)
+{
+ struct mt76_worker *w = ptr;
+
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (kthread_should_park()) {
+ kthread_parkme();
+ continue;
+ }
+
+ if (!test_and_clear_bit(MT76_WORKER_SCHEDULED, &w->state)) {
+ schedule();
+ continue;
+ }
+
+ set_bit(MT76_WORKER_RUNNING, &w->state);
+ set_current_state(TASK_RUNNING);
+ w->fn(w);
+ cond_resched();
+ clear_bit(MT76_WORKER_RUNNING, &w->state);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__mt76_worker_fn);
+
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/util.h b/drivers/net/wireless/mediatek/mt76/util.h
index fd1a68820e0a..1c363ea9ab9c 100644
--- a/drivers/net/wireless/mediatek/mt76/util.h
+++ b/drivers/net/wireless/mediatek/mt76/util.h
@@ -10,6 +10,19 @@
#include <linux/skbuff.h>
#include <linux/bitops.h>
#include <linux/bitfield.h>
+#include <net/mac80211.h>
+
+struct mt76_worker
+{
+ struct task_struct *task;
+ void (*fn)(struct mt76_worker *);
+ unsigned long state;
+};
+
+enum {
+ MT76_WORKER_SCHEDULED,
+ MT76_WORKER_RUNNING,
+};
#define MT76_INCR(_var, _size) \
(_var = (((_var) + 1) % (_size)))
@@ -45,4 +58,67 @@ mt76_skb_set_moredata(struct sk_buff *skb, bool enable)
hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_MOREDATA);
}
+int __mt76_worker_fn(void *ptr);
+
+static inline int
+mt76_worker_setup(struct ieee80211_hw *hw, struct mt76_worker *w,
+ void (*fn)(struct mt76_worker *),
+ const char *name)
+{
+ const char *dev_name = wiphy_name(hw->wiphy);
+ int ret;
+
+ if (fn)
+ w->fn = fn;
+ w->task = kthread_create(__mt76_worker_fn, w, "mt76-%s %s",
+ name, dev_name);
+
+ ret = PTR_ERR_OR_ZERO(w->task);
+ if (ret) {
+ w->task = NULL;
+ return ret;
+ }
+
+ wake_up_process(w->task);
+
+ return 0;
+}
+
+static inline void mt76_worker_schedule(struct mt76_worker *w)
+{
+ if (!w->task)
+ return;
+
+ if (!test_and_set_bit(MT76_WORKER_SCHEDULED, &w->state) &&
+ !test_bit(MT76_WORKER_RUNNING, &w->state))
+ wake_up_process(w->task);
+}
+
+static inline void mt76_worker_disable(struct mt76_worker *w)
+{
+ if (!w->task)
+ return;
+
+ kthread_park(w->task);
+ WRITE_ONCE(w->state, 0);
+}
+
+static inline void mt76_worker_enable(struct mt76_worker *w)
+{
+ if (!w->task)
+ return;
+
+ kthread_unpark(w->task);
+ mt76_worker_schedule(w);
+}
+
+static inline void mt76_worker_teardown(struct mt76_worker *w)
+{
+ if (!w->task)
+ return;
+
+ kthread_stop(w->task);
+ w->task = NULL;
+}
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt7601u/debugfs.c b/drivers/net/wireless/mediatek/mt7601u/debugfs.c
index 300242bce799..20669eacb66e 100644
--- a/drivers/net/wireless/mediatek/mt7601u/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt7601u/debugfs.c
@@ -30,7 +30,7 @@ mt76_reg_get(void *data, u64 *val)
DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set, "0x%08llx\n");
static int
-mt7601u_ampdu_stat_read(struct seq_file *file, void *data)
+mt7601u_ampdu_stat_show(struct seq_file *file, void *data)
{
struct mt7601u_dev *dev = file->private;
int i, j;
@@ -73,21 +73,10 @@ mt7601u_ampdu_stat_read(struct seq_file *file, void *data)
return 0;
}
-static int
-mt7601u_ampdu_stat_open(struct inode *inode, struct file *f)
-{
- return single_open(f, mt7601u_ampdu_stat_read, inode->i_private);
-}
-
-static const struct file_operations fops_ampdu_stat = {
- .open = mt7601u_ampdu_stat_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(mt7601u_ampdu_stat);
static int
-mt7601u_eeprom_param_read(struct seq_file *file, void *data)
+mt7601u_eeprom_param_show(struct seq_file *file, void *data)
{
struct mt7601u_dev *dev = file->private;
struct mt7601u_rate_power *rp = &dev->ee->power_rate_table;
@@ -131,18 +120,7 @@ mt7601u_eeprom_param_read(struct seq_file *file, void *data)
return 0;
}
-static int
-mt7601u_eeprom_param_open(struct inode *inode, struct file *f)
-{
- return single_open(f, mt7601u_eeprom_param_read, inode->i_private);
-}
-
-static const struct file_operations fops_eeprom_param = {
- .open = mt7601u_eeprom_param_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(mt7601u_eeprom_param);
void mt7601u_init_debugfs(struct mt7601u_dev *dev)
{
@@ -157,6 +135,6 @@ void mt7601u_init_debugfs(struct mt7601u_dev *dev)
debugfs_create_u32("regidx", 0600, dir, &dev->debugfs_reg);
debugfs_create_file("regval", 0600, dir, dev, &fops_regval);
- debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat);
- debugfs_create_file("eeprom_param", 0400, dir, dev, &fops_eeprom_param);
+ debugfs_create_file("ampdu_stat", 0400, dir, dev, &mt7601u_ampdu_stat_fops);
+ debugfs_create_file("eeprom_param", 0400, dir, dev, &mt7601u_eeprom_param_fops);
}
diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c
index f6a0454abe04..09f931d4598c 100644
--- a/drivers/net/wireless/mediatek/mt7601u/dma.c
+++ b/drivers/net/wireless/mediatek/mt7601u/dma.c
@@ -196,7 +196,7 @@ static void mt7601u_complete_rx(struct urb *urb)
default:
dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
urb->status);
- /* fall through */
+ fallthrough;
case 0:
break;
}
@@ -241,7 +241,7 @@ static void mt7601u_complete_tx(struct urb *urb)
default:
dev_err_ratelimited(dev->dev, "tx urb failed: %d\n",
urb->status);
- /* fall through */
+ fallthrough;
case 0:
break;
}
diff --git a/drivers/net/wireless/mediatek/mt7601u/mac.c b/drivers/net/wireless/mediatek/mt7601u/mac.c
index cad5e81fcf77..d2ee1aaa3c81 100644
--- a/drivers/net/wireless/mediatek/mt7601u/mac.c
+++ b/drivers/net/wireless/mediatek/mt7601u/mac.c
@@ -45,7 +45,7 @@ mt76_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate)
return;
case MT_PHY_TYPE_HT_GF:
txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD;
- /* fall through */
+ fallthrough;
case MT_PHY_TYPE_HT:
txrate->flags |= IEEE80211_TX_RC_MCS;
txrate->idx = idx;
@@ -419,7 +419,7 @@ mt76_mac_process_rate(struct ieee80211_rx_status *status, u16 rate)
return;
case MT_PHY_TYPE_HT_GF:
status->enc_flags |= RX_ENC_FLAG_HT_GF;
- /* fall through */
+ fallthrough;
case MT_PHY_TYPE_HT:
status->encoding = RX_ENC_HT;
status->rate_idx = idx;
diff --git a/drivers/net/wireless/mediatek/mt7601u/phy.c b/drivers/net/wireless/mediatek/mt7601u/phy.c
index d863ab4a66c9..28db24a2b5e5 100644
--- a/drivers/net/wireless/mediatek/mt7601u/phy.c
+++ b/drivers/net/wireless/mediatek/mt7601u/phy.c
@@ -787,7 +787,7 @@ mt7601u_phy_rf_pa_mode_val(struct mt7601u_dev *dev, int phy_mode, int tx_rate)
switch (phy_mode) {
case MT_PHY_TYPE_OFDM:
tx_rate += 4;
- /* fall through */
+ fallthrough;
case MT_PHY_TYPE_CCK:
reg = dev->rf_pa_mode[0];
break;
@@ -1210,7 +1210,7 @@ void mt7601u_set_rx_path(struct mt7601u_dev *dev, u8 path)
/**
* mt7601u_set_tx_dac - set which tx DAC to use
* @dev: pointer to adapter structure
- * @path: DAC index, values are 0-based
+ * @dac: DAC index, values are 0-based
*/
void mt7601u_set_tx_dac(struct mt7601u_dev *dev, u8 dac)
{
diff --git a/drivers/net/wireless/microchip/wilc1000/mon.c b/drivers/net/wireless/microchip/wilc1000/mon.c
index 358ac8601333..b5a1b65c087c 100644
--- a/drivers/net/wireless/microchip/wilc1000/mon.c
+++ b/drivers/net/wireless/microchip/wilc1000/mon.c
@@ -235,11 +235,10 @@ struct net_device *wilc_wfi_init_mon_interface(struct wilc *wl,
if (register_netdevice(wl->monitor_dev)) {
netdev_err(real_dev, "register_netdevice failed\n");
+ free_netdev(wl->monitor_dev);
return NULL;
}
priv = netdev_priv(wl->monitor_dev);
- if (!priv)
- return NULL;
priv->real_ndev = real_dev;
diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
index 3ece7b0b0392..351ff909ab1c 100644
--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
+++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
@@ -149,9 +149,10 @@ static int wilc_sdio_probe(struct sdio_func *func,
wilc->dev = &func->dev;
wilc->rtc_clk = devm_clk_get(&func->card->dev, "rtc");
- if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER)
+ if (PTR_ERR_OR_ZERO(wilc->rtc_clk) == -EPROBE_DEFER) {
+ kfree(sdio_priv);
return -EPROBE_DEFER;
- else if (!IS_ERR(wilc->rtc_clk))
+ } else if (!IS_ERR(wilc->rtc_clk))
clk_prepare_enable(wilc->rtc_clk);
dev_info(&func->dev, "Driver Initializing success\n");
diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c
index f40d8c3c3d9e..f3ccbd2b1084 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/commands.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c
@@ -869,6 +869,7 @@ int qtnf_cmd_send_del_intf(struct qtnf_vif *vif)
default:
pr_warn("VIF%u.%u: unsupported iftype %d\n", vif->mac->macid,
vif->vifid, vif->wdev.iftype);
+ dev_kfree_skb(cmd_skb);
ret = -EINVAL;
goto out;
}
@@ -1924,6 +1925,7 @@ int qtnf_cmd_send_change_sta(struct qtnf_vif *vif, const u8 *mac,
break;
default:
pr_err("unsupported iftype %d\n", vif->wdev.iftype);
+ dev_kfree_skb(cmd_skb);
ret = -EINVAL;
goto out;
}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
index e013ebe3079c..bf6dbeb61842 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -15,7 +15,6 @@
#include "util.h"
#include "switchdev.h"
-#define QTNF_DMP_MAX_LEN 48
#define QTNF_PRIMARY_VIF_IDX 0
static bool slave_radar = true;
@@ -140,34 +139,13 @@ static void qtnf_netdev_get_stats64(struct net_device *ndev,
struct rtnl_link_stats64 *stats)
{
struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
- unsigned int start;
- int cpu;
netdev_stats_to_stats64(stats, &ndev->stats);
if (!vif->stats64)
return;
- for_each_possible_cpu(cpu) {
- struct pcpu_sw_netstats *stats64;
- u64 rx_packets, rx_bytes;
- u64 tx_packets, tx_bytes;
-
- stats64 = per_cpu_ptr(vif->stats64, cpu);
-
- do {
- start = u64_stats_fetch_begin_irq(&stats64->syncp);
- rx_packets = stats64->rx_packets;
- rx_bytes = stats64->rx_bytes;
- tx_packets = stats64->tx_packets;
- tx_bytes = stats64->tx_bytes;
- } while (u64_stats_fetch_retry_irq(&stats64->syncp, start));
-
- stats->rx_packets += rx_packets;
- stats->rx_bytes += rx_bytes;
- stats->tx_packets += tx_packets;
- stats->tx_bytes += tx_bytes;
- }
+ dev_fetch_sw_netstats(stats, vif->stats64);
}
/* Netdev handler for transmission timeout.
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
index eb67b66b846b..9a20c0f29078 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
@@ -1091,9 +1091,9 @@ fw_load_exit:
put_device(&pdev->dev);
}
-static void qtnf_pearl_reclaim_tasklet_fn(unsigned long data)
+static void qtnf_pearl_reclaim_tasklet_fn(struct tasklet_struct *t)
{
- struct qtnf_pcie_pearl_state *ps = (void *)data;
+ struct qtnf_pcie_pearl_state *ps = from_tasklet(ps, t, base.reclaim_tq);
qtnf_pearl_data_tx_reclaim(ps);
qtnf_en_txdone_irq(ps);
@@ -1145,8 +1145,7 @@ static int qtnf_pcie_pearl_probe(struct qtnf_bus *bus, unsigned int tx_bd_size,
return ret;
}
- tasklet_init(&ps->base.reclaim_tq, qtnf_pearl_reclaim_tasklet_fn,
- (unsigned long)ps);
+ tasklet_setup(&ps->base.reclaim_tq, qtnf_pearl_reclaim_tasklet_fn);
netif_napi_add(&bus->mux_dev, &bus->mux_napi,
qtnf_pcie_pearl_rx_poll, 10);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
index d1b850aa4657..4b87d3151017 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
@@ -1105,9 +1105,9 @@ fw_load_exit:
put_device(&pdev->dev);
}
-static void qtnf_reclaim_tasklet_fn(unsigned long data)
+static void qtnf_reclaim_tasklet_fn(struct tasklet_struct *t)
{
- struct qtnf_pcie_topaz_state *ts = (void *)data;
+ struct qtnf_pcie_topaz_state *ts = from_tasklet(ts, t, base.reclaim_tq);
qtnf_topaz_data_tx_reclaim(ts);
}
@@ -1158,8 +1158,7 @@ static int qtnf_pcie_topaz_probe(struct qtnf_bus *bus,
return ret;
}
- tasklet_init(&ts->base.reclaim_tq, qtnf_reclaim_tasklet_fn,
- (unsigned long)ts);
+ tasklet_setup(&ts->base.reclaim_tq, qtnf_reclaim_tasklet_fn);
netif_napi_add(&bus->mux_dev, &bus->mux_napi,
qtnf_topaz_rx_poll, 10);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
index c1ac933349d1..8f860c14da58 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c
@@ -1291,7 +1291,7 @@ static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev,
break;
case 2: /* Failure, excessive retries */
__set_bit(TXDONE_EXCESSIVE_RETRY, &txdesc.flags);
- /* Fall through - this is a failed frame! */
+ fallthrough; /* this is a failed frame! */
default: /* Failure */
__set_bit(TXDONE_FAILURE, &txdesc.flags);
}
@@ -1319,9 +1319,10 @@ static inline void rt2400pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
spin_unlock_irq(&rt2x00dev->irqmask_lock);
}
-static void rt2400pci_txstatus_tasklet(unsigned long data)
+static void rt2400pci_txstatus_tasklet(struct tasklet_struct *t)
{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t,
+ txstatus_tasklet);
u32 reg;
/*
@@ -1347,17 +1348,18 @@ static void rt2400pci_txstatus_tasklet(unsigned long data)
}
}
-static void rt2400pci_tbtt_tasklet(unsigned long data)
+static void rt2400pci_tbtt_tasklet(struct tasklet_struct *t)
{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t, tbtt_tasklet);
rt2x00lib_beacondone(rt2x00dev);
if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
rt2400pci_enable_interrupt(rt2x00dev, CSR8_TBCN_EXPIRE);
}
-static void rt2400pci_rxdone_tasklet(unsigned long data)
+static void rt2400pci_rxdone_tasklet(struct tasklet_struct *t)
{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t,
+ rxdone_tasklet);
if (rt2x00mmio_rxdone(rt2x00dev))
tasklet_schedule(&rt2x00dev->rxdone_tasklet);
else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
index 0859adebd860..e940443c52ad 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c
@@ -1419,7 +1419,7 @@ static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev,
break;
case 2: /* Failure, excessive retries */
__set_bit(TXDONE_EXCESSIVE_RETRY, &txdesc.flags);
- /* Fall through - this is a failed frame! */
+ fallthrough; /* this is a failed frame! */
default: /* Failure */
__set_bit(TXDONE_FAILURE, &txdesc.flags);
}
@@ -1447,9 +1447,10 @@ static inline void rt2500pci_enable_interrupt(struct rt2x00_dev *rt2x00dev,
spin_unlock_irq(&rt2x00dev->irqmask_lock);
}
-static void rt2500pci_txstatus_tasklet(unsigned long data)
+static void rt2500pci_txstatus_tasklet(struct tasklet_struct *t)
{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t,
+ txstatus_tasklet);
u32 reg;
/*
@@ -1475,17 +1476,18 @@ static void rt2500pci_txstatus_tasklet(unsigned long data)
}
}
-static void rt2500pci_tbtt_tasklet(unsigned long data)
+static void rt2500pci_tbtt_tasklet(struct tasklet_struct *t)
{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t, tbtt_tasklet);
rt2x00lib_beacondone(rt2x00dev);
if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
rt2500pci_enable_interrupt(rt2x00dev, CSR8_TBCN_EXPIRE);
}
-static void rt2500pci_rxdone_tasklet(unsigned long data)
+static void rt2500pci_rxdone_tasklet(struct tasklet_struct *t)
{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t,
+ rxdone_tasklet);
if (rt2x00mmio_rxdone(rt2x00dev))
tasklet_schedule(&rt2x00dev->rxdone_tasklet);
else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index a779fe771a55..fed6d21cd6ce 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -928,7 +928,7 @@ static void rt2800_rate_from_status(struct skb_frame_desc *skbdesc,
switch (rt2x00_get_field32(status, TX_STA_FIFO_PHYMODE)) {
case RATE_MODE_HT_GREENFIELD:
flags |= IEEE80211_TX_RC_GREEN_FIELD;
- /* fall through */
+ fallthrough;
case RATE_MODE_HT_MIX:
flags |= IEEE80211_TX_RC_MCS;
break;
@@ -2567,7 +2567,7 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
switch (rt2x00dev->default_ant.tx_chain_num) {
case 1:
rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
- /* fall through */
+ fallthrough;
case 2:
rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 1);
break;
@@ -2576,7 +2576,7 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
switch (rt2x00dev->default_ant.rx_chain_num) {
case 1:
rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
- /* fall through */
+ fallthrough;
case 2:
rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 1);
break;
@@ -2768,10 +2768,10 @@ static void rt2800_config_channel_rf3053(struct rt2x00_dev *rt2x00dev,
switch (rt2x00dev->default_ant.tx_chain_num) {
case 3:
rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 1);
- /* fallthrough */
+ fallthrough;
case 2:
rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
- /* fallthrough */
+ fallthrough;
case 1:
rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
break;
@@ -2780,10 +2780,10 @@ static void rt2800_config_channel_rf3053(struct rt2x00_dev *rt2x00dev,
switch (rt2x00dev->default_ant.rx_chain_num) {
case 3:
rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 1);
- /* fallthrough */
+ fallthrough;
case 2:
rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
- /* fallthrough */
+ fallthrough;
case 1:
rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
break;
@@ -3005,10 +3005,10 @@ static void rt2800_config_channel_rf3853(struct rt2x00_dev *rt2x00dev,
switch (rt2x00dev->default_ant.tx_chain_num) {
case 3:
rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 1);
- /* fallthrough */
+ fallthrough;
case 2:
rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
- /* fallthrough */
+ fallthrough;
case 1:
rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
break;
@@ -3017,10 +3017,10 @@ static void rt2800_config_channel_rf3853(struct rt2x00_dev *rt2x00dev,
switch (rt2x00dev->default_ant.rx_chain_num) {
case 3:
rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 1);
- /* fallthrough */
+ fallthrough;
case 2:
rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
- /* fallthrough */
+ fallthrough;
case 1:
rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
break;
@@ -4216,14 +4216,14 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rf->channel > 14);
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G2_EN,
rf->channel <= 14);
- /* fall-through */
+ fallthrough;
case 2:
/* Turn on secondary PAs */
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A1_EN,
rf->channel > 14);
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G1_EN,
rf->channel <= 14);
- /* fall-through */
+ fallthrough;
case 1:
/* Turn on primary PAs */
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A0_EN,
@@ -4241,12 +4241,12 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
/* Turn on tertiary LNAs */
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A2_EN, 1);
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G2_EN, 1);
- /* fall-through */
+ fallthrough;
case 2:
/* Turn on secondary LNAs */
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1);
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1);
- /* fall-through */
+ fallthrough;
case 1:
/* Turn on primary LNAs */
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, 1);
@@ -5438,10 +5438,10 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
switch (rt2x00dev->default_ant.tx_chain_num) {
case 3:
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G2_EN, 1);
- /* fall through */
+ fallthrough;
case 2:
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G1_EN, 1);
- /* fall through */
+ fallthrough;
case 1:
default:
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN, 1);
@@ -5451,10 +5451,10 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
switch (rt2x00dev->default_ant.tx_chain_num) {
case 3:
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A2_EN, 1);
- /* fall through */
+ fallthrough;
case 2:
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A1_EN, 1);
- /* fall through */
+ fallthrough;
case 1:
default:
rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A0_EN, 1);
@@ -10100,10 +10100,10 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
switch (rx_chains) {
case 3:
spec->ht.mcs.rx_mask[2] = 0xff;
- /* fall through */
+ fallthrough;
case 2:
spec->ht.mcs.rx_mask[1] = 0xff;
- /* fall through */
+ fallthrough;
case 1:
spec->ht.mcs.rx_mask[0] = 0xff;
spec->ht.mcs.rx_mask[4] = 0x1; /* MCS32 */
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
index 110bb391c372..862098f753d2 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
@@ -210,18 +210,19 @@ static inline void rt2800mmio_enable_interrupt(struct rt2x00_dev *rt2x00dev,
spin_unlock_irq(&rt2x00dev->irqmask_lock);
}
-void rt2800mmio_pretbtt_tasklet(unsigned long data)
+void rt2800mmio_pretbtt_tasklet(struct tasklet_struct *t)
{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t,
+ pretbtt_tasklet);
rt2x00lib_pretbtt(rt2x00dev);
if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_PRE_TBTT);
}
EXPORT_SYMBOL_GPL(rt2800mmio_pretbtt_tasklet);
-void rt2800mmio_tbtt_tasklet(unsigned long data)
+void rt2800mmio_tbtt_tasklet(struct tasklet_struct *t)
{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t, tbtt_tasklet);
struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
u32 reg;
@@ -254,9 +255,10 @@ void rt2800mmio_tbtt_tasklet(unsigned long data)
}
EXPORT_SYMBOL_GPL(rt2800mmio_tbtt_tasklet);
-void rt2800mmio_rxdone_tasklet(unsigned long data)
+void rt2800mmio_rxdone_tasklet(struct tasklet_struct *t)
{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t,
+ rxdone_tasklet);
if (rt2x00mmio_rxdone(rt2x00dev))
tasklet_schedule(&rt2x00dev->rxdone_tasklet);
else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
@@ -264,9 +266,10 @@ void rt2800mmio_rxdone_tasklet(unsigned long data)
}
EXPORT_SYMBOL_GPL(rt2800mmio_rxdone_tasklet);
-void rt2800mmio_autowake_tasklet(unsigned long data)
+void rt2800mmio_autowake_tasklet(struct tasklet_struct *t)
{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t,
+ autowake_tasklet);
rt2800mmio_wakeup(rt2x00dev);
if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
rt2800mmio_enable_interrupt(rt2x00dev,
@@ -307,9 +310,10 @@ static void rt2800mmio_fetch_txstatus(struct rt2x00_dev *rt2x00dev)
spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
}
-void rt2800mmio_txstatus_tasklet(unsigned long data)
+void rt2800mmio_txstatus_tasklet(struct tasklet_struct *t)
{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t,
+ txstatus_tasklet);
rt2800_txdone(rt2x00dev, 16);
@@ -593,7 +597,6 @@ void rt2800mmio_queue_init(struct data_queue *queue)
break;
case QID_ATIM:
- /* fallthrough */
default:
BUG();
break;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.h b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.h
index adcd9d54ac1c..05708950f24d 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800mmio.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800mmio.h
@@ -126,11 +126,11 @@ void rt2800mmio_fill_rxdone(struct queue_entry *entry,
struct rxdone_entry_desc *rxdesc);
/* Interrupt functions */
-void rt2800mmio_txstatus_tasklet(unsigned long data);
-void rt2800mmio_pretbtt_tasklet(unsigned long data);
-void rt2800mmio_tbtt_tasklet(unsigned long data);
-void rt2800mmio_rxdone_tasklet(unsigned long data);
-void rt2800mmio_autowake_tasklet(unsigned long data);
+void rt2800mmio_txstatus_tasklet(struct tasklet_struct *t);
+void rt2800mmio_pretbtt_tasklet(struct tasklet_struct *t);
+void rt2800mmio_tbtt_tasklet(struct tasklet_struct *t);
+void rt2800mmio_rxdone_tasklet(struct tasklet_struct *t);
+void rt2800mmio_autowake_tasklet(struct tasklet_struct *t);
irqreturn_t rt2800mmio_interrupt(int irq, void *dev_instance);
void rt2800mmio_toggle_irq(struct rt2x00_dev *rt2x00dev,
enum dev_state state);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
index 4cc64fe481a7..d08b251ec5a2 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
@@ -746,7 +746,6 @@ static void rt2800usb_queue_init(struct data_queue *queue)
break;
case QID_ATIM:
- /* fallthrough */
default:
BUG();
break;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index ecc60d8cbb01..780be81863b6 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -518,11 +518,11 @@ struct rt2x00lib_ops {
/*
* TX status tasklet handler.
*/
- void (*txstatus_tasklet) (unsigned long data);
- void (*pretbtt_tasklet) (unsigned long data);
- void (*tbtt_tasklet) (unsigned long data);
- void (*rxdone_tasklet) (unsigned long data);
- void (*autowake_tasklet) (unsigned long data);
+ void (*txstatus_tasklet) (struct tasklet_struct *t);
+ void (*pretbtt_tasklet) (struct tasklet_struct *t);
+ void (*tbtt_tasklet) (struct tasklet_struct *t);
+ void (*rxdone_tasklet) (struct tasklet_struct *t);
+ void (*autowake_tasklet) (struct tasklet_struct *t);
/*
* Device init handlers.
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index 8c6d3099b19d..b04f76551ca4 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -1167,9 +1167,8 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
*/
#define RT2X00_TASKLET_INIT(taskletname) \
if (rt2x00dev->ops->lib->taskletname) { \
- tasklet_init(&rt2x00dev->taskletname, \
- rt2x00dev->ops->lib->taskletname, \
- (unsigned long)rt2x00dev); \
+ tasklet_setup(&rt2x00dev->taskletname, \
+ rt2x00dev->ops->lib->taskletname); \
}
RT2X00_TASKLET_INIT(txstatus_tasklet);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
index eefce76fc99b..02da5dd37ddd 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
@@ -2130,7 +2130,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
break;
case 6: /* Failure, excessive retries */
__set_bit(TXDONE_EXCESSIVE_RETRY, &txdesc.flags);
- /* Fall through - this is a failed frame! */
+ fallthrough; /* this is a failed frame! */
default: /* Failure */
__set_bit(TXDONE_FAILURE, &txdesc.flags);
}
@@ -2190,34 +2190,38 @@ static void rt61pci_enable_mcu_interrupt(struct rt2x00_dev *rt2x00dev,
spin_unlock_irq(&rt2x00dev->irqmask_lock);
}
-static void rt61pci_txstatus_tasklet(unsigned long data)
+static void rt61pci_txstatus_tasklet(struct tasklet_struct *t)
{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t,
+ txstatus_tasklet);
+
rt61pci_txdone(rt2x00dev);
if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_TXDONE);
}
-static void rt61pci_tbtt_tasklet(unsigned long data)
+static void rt61pci_tbtt_tasklet(struct tasklet_struct *t)
{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t, tbtt_tasklet);
rt2x00lib_beacondone(rt2x00dev);
if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_BEACON_DONE);
}
-static void rt61pci_rxdone_tasklet(unsigned long data)
+static void rt61pci_rxdone_tasklet(struct tasklet_struct *t)
{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t,
+ rxdone_tasklet);
if (rt2x00mmio_rxdone(rt2x00dev))
tasklet_schedule(&rt2x00dev->rxdone_tasklet);
else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_RXDONE);
}
-static void rt61pci_autowake_tasklet(unsigned long data)
+static void rt61pci_autowake_tasklet(struct tasklet_struct *t)
{
- struct rt2x00_dev *rt2x00dev = (struct rt2x00_dev *)data;
+ struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t,
+ autowake_tasklet);
rt61pci_wakeup(rt2x00dev);
rt2x00mmio_register_write(rt2x00dev,
M2H_CMD_DONE_CSR, 0xffffffff);
@@ -2953,7 +2957,6 @@ static void rt61pci_queue_init(struct data_queue *queue)
break;
case QID_ATIM:
- /* fallthrough */
default:
BUG();
break;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt73usb.c b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
index e908c303b677..e69793773d87 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
@@ -2373,7 +2373,6 @@ static void rt73usb_queue_init(struct data_queue *queue)
break;
case QID_ATIM:
- /* fallthrough */
default:
BUG();
break;
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
index ba3286f732cc..2477e18c7cae 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
@@ -260,20 +260,20 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
if (unlikely(!new_skb))
goto done;
- mapping = pci_map_single(priv->pdev,
- skb_tail_pointer(new_skb),
- MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
+ mapping = dma_map_single(&priv->pdev->dev,
+ skb_tail_pointer(new_skb),
+ MAX_RX_SIZE, DMA_FROM_DEVICE);
- if (pci_dma_mapping_error(priv->pdev, mapping)) {
+ if (dma_mapping_error(&priv->pdev->dev, mapping)) {
kfree_skb(new_skb);
dev_err(&priv->pdev->dev, "RX DMA map error\n");
goto done;
}
- pci_unmap_single(priv->pdev,
+ dma_unmap_single(&priv->pdev->dev,
*((dma_addr_t *)skb->cb),
- MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
+ MAX_RX_SIZE, DMA_FROM_DEVICE);
skb_put(skb, flags & 0xFFF);
rx_status.antenna = (flags2 >> 15) & 1;
@@ -355,8 +355,8 @@ static void rtl8180_handle_tx(struct ieee80211_hw *dev, unsigned int prio)
ring->idx = (ring->idx + 1) % ring->entries;
skb = __skb_dequeue(&ring->queue);
- pci_unmap_single(priv->pdev, le32_to_cpu(entry->tx_buf),
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&priv->pdev->dev, le32_to_cpu(entry->tx_buf),
+ skb->len, DMA_TO_DEVICE);
info = IEEE80211_SKB_CB(skb);
ieee80211_tx_info_clear_status(info);
@@ -473,10 +473,10 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
prio = skb_get_queue_mapping(skb);
ring = &priv->tx_ring[prio];
- mapping = pci_map_single(priv->pdev, skb->data,
- skb->len, PCI_DMA_TODEVICE);
+ mapping = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
- if (pci_dma_mapping_error(priv->pdev, mapping)) {
+ if (dma_mapping_error(&priv->pdev->dev, mapping)) {
kfree_skb(skb);
dev_err(&priv->pdev->dev, "TX DMA mapping error\n");
return;
@@ -1004,8 +1004,9 @@ static int rtl8180_init_rx_ring(struct ieee80211_hw *dev)
else
priv->rx_ring_sz = sizeof(struct rtl8180_rx_desc);
- priv->rx_ring = pci_zalloc_consistent(priv->pdev, priv->rx_ring_sz * 32,
- &priv->rx_ring_dma);
+ priv->rx_ring = dma_alloc_coherent(&priv->pdev->dev,
+ priv->rx_ring_sz * 32,
+ &priv->rx_ring_dma, GFP_KERNEL);
if (!priv->rx_ring || (unsigned long)priv->rx_ring & 0xFF) {
wiphy_err(dev->wiphy, "Cannot allocate RX ring\n");
return -ENOMEM;
@@ -1018,20 +1019,23 @@ static int rtl8180_init_rx_ring(struct ieee80211_hw *dev)
dma_addr_t *mapping;
entry = priv->rx_ring + priv->rx_ring_sz*i;
if (!skb) {
- pci_free_consistent(priv->pdev, priv->rx_ring_sz * 32,
- priv->rx_ring, priv->rx_ring_dma);
+ dma_free_coherent(&priv->pdev->dev,
+ priv->rx_ring_sz * 32,
+ priv->rx_ring, priv->rx_ring_dma);
wiphy_err(dev->wiphy, "Cannot allocate RX skb\n");
return -ENOMEM;
}
priv->rx_buf[i] = skb;
mapping = (dma_addr_t *)skb->cb;
- *mapping = pci_map_single(priv->pdev, skb_tail_pointer(skb),
- MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
+ *mapping = dma_map_single(&priv->pdev->dev,
+ skb_tail_pointer(skb), MAX_RX_SIZE,
+ DMA_FROM_DEVICE);
- if (pci_dma_mapping_error(priv->pdev, *mapping)) {
+ if (dma_mapping_error(&priv->pdev->dev, *mapping)) {
kfree_skb(skb);
- pci_free_consistent(priv->pdev, priv->rx_ring_sz * 32,
- priv->rx_ring, priv->rx_ring_dma);
+ dma_free_coherent(&priv->pdev->dev,
+ priv->rx_ring_sz * 32,
+ priv->rx_ring, priv->rx_ring_dma);
wiphy_err(dev->wiphy, "Cannot map DMA for RX skb\n");
return -ENOMEM;
}
@@ -1054,14 +1058,13 @@ static void rtl8180_free_rx_ring(struct ieee80211_hw *dev)
if (!skb)
continue;
- pci_unmap_single(priv->pdev,
- *((dma_addr_t *)skb->cb),
- MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&priv->pdev->dev, *((dma_addr_t *)skb->cb),
+ MAX_RX_SIZE, DMA_FROM_DEVICE);
kfree_skb(skb);
}
- pci_free_consistent(priv->pdev, priv->rx_ring_sz * 32,
- priv->rx_ring, priv->rx_ring_dma);
+ dma_free_coherent(&priv->pdev->dev, priv->rx_ring_sz * 32,
+ priv->rx_ring, priv->rx_ring_dma);
priv->rx_ring = NULL;
}
@@ -1073,8 +1076,8 @@ static int rtl8180_init_tx_ring(struct ieee80211_hw *dev,
dma_addr_t dma;
int i;
- ring = pci_zalloc_consistent(priv->pdev, sizeof(*ring) * entries,
- &dma);
+ ring = dma_alloc_coherent(&priv->pdev->dev, sizeof(*ring) * entries,
+ &dma, GFP_KERNEL);
if (!ring || (unsigned long)ring & 0xFF) {
wiphy_err(dev->wiphy, "Cannot allocate TX ring (prio = %d)\n",
prio);
@@ -1103,14 +1106,15 @@ static void rtl8180_free_tx_ring(struct ieee80211_hw *dev, unsigned int prio)
struct rtl8180_tx_desc *entry = &ring->desc[ring->idx];
struct sk_buff *skb = __skb_dequeue(&ring->queue);
- pci_unmap_single(priv->pdev, le32_to_cpu(entry->tx_buf),
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&priv->pdev->dev, le32_to_cpu(entry->tx_buf),
+ skb->len, DMA_TO_DEVICE);
kfree_skb(skb);
ring->idx = (ring->idx + 1) % ring->entries;
}
- pci_free_consistent(priv->pdev, sizeof(*ring->desc)*ring->entries,
- ring->desc, ring->dma);
+ dma_free_coherent(&priv->pdev->dev,
+ sizeof(*ring->desc) * ring->entries, ring->desc,
+ ring->dma);
ring->desc = NULL;
}
@@ -1754,8 +1758,8 @@ static int rtl8180_probe(struct pci_dev *pdev,
goto err_free_reg;
}
- if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) ||
- (err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+ if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) ||
+ (err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)))) {
printk(KERN_ERR "%s (rtl8180): No suitable DMA available\n",
pci_name(pdev));
goto err_free_reg;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 19efae462a24..5cd7ef3625c5 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -5795,7 +5795,6 @@ static int rtl8xxxu_submit_int_urb(struct ieee80211_hw *hw)
ret = usb_submit_urb(urb, GFP_KERNEL);
if (ret) {
usb_unanchor_urb(urb);
- usb_free_urb(urb);
goto error;
}
@@ -5804,6 +5803,7 @@ static int rtl8xxxu_submit_int_urb(struct ieee80211_hw *hw)
rtl8xxxu_write32(priv, REG_USB_HIMR, val32);
error:
+ usb_free_urb(urb);
return ret;
}
@@ -6318,6 +6318,7 @@ static int rtl8xxxu_start(struct ieee80211_hw *hw)
struct rtl8xxxu_priv *priv = hw->priv;
struct rtl8xxxu_rx_urb *rx_urb;
struct rtl8xxxu_tx_urb *tx_urb;
+ struct sk_buff *skb;
unsigned long flags;
int ret, i;
@@ -6368,6 +6369,13 @@ static int rtl8xxxu_start(struct ieee80211_hw *hw)
rx_urb->hw = hw;
ret = rtl8xxxu_submit_rx_urb(priv, rx_urb);
+ if (ret) {
+ if (ret != -ENOMEM) {
+ skb = (struct sk_buff *)rx_urb->urb.context;
+ dev_kfree_skb(skb);
+ }
+ rtl8xxxu_queue_rx_urb(priv, rx_urb);
+ }
}
schedule_delayed_work(&priv->ra_watchdog, 2 * HZ);
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index a4489b9302d4..6e8bd99e8911 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -195,8 +195,8 @@ static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
} else {
if (get_rf_type(rtlphy) == RF_1T2R ||
get_rf_type(rtlphy) == RF_2T2R) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "1T2R or 2T2R\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "1T2R or 2T2R\n");
ht_cap->mcs.rx_mask[0] = 0xFF;
ht_cap->mcs.rx_mask[1] = 0xFF;
ht_cap->mcs.rx_mask[4] = 0x01;
@@ -204,7 +204,7 @@ static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
ht_cap->mcs.rx_highest =
cpu_to_le16(MAX_BIT_RATE_40MHZ_MCS15);
} else if (get_rf_type(rtlphy) == RF_1T1R) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "1T1R\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "1T1R\n");
ht_cap->mcs.rx_mask[0] = 0xFF;
ht_cap->mcs.rx_mask[1] = 0x00;
@@ -436,6 +436,10 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
}
}
+static void rtl_watchdog_wq_callback(struct work_struct *work);
+static void rtl_fwevt_wq_callback(struct work_struct *work);
+static void rtl_c2hcmd_wq_callback(struct work_struct *work);
+
static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -454,17 +458,14 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
}
INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq,
- (void *)rtl_watchdog_wq_callback);
+ rtl_watchdog_wq_callback);
INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq,
- (void *)rtl_ips_nic_off_wq_callback);
- INIT_DELAYED_WORK(&rtlpriv->works.ps_work,
- (void *)rtl_swlps_wq_callback);
+ rtl_ips_nic_off_wq_callback);
+ INIT_DELAYED_WORK(&rtlpriv->works.ps_work, rtl_swlps_wq_callback);
INIT_DELAYED_WORK(&rtlpriv->works.ps_rfon_wq,
- (void *)rtl_swlps_rfon_wq_callback);
- INIT_DELAYED_WORK(&rtlpriv->works.fwevt_wq,
- (void *)rtl_fwevt_wq_callback);
- INIT_DELAYED_WORK(&rtlpriv->works.c2hcmd_wq,
- (void *)rtl_c2hcmd_wq_callback);
+ rtl_swlps_rfon_wq_callback);
+ INIT_DELAYED_WORK(&rtlpriv->works.fwevt_wq, rtl_fwevt_wq_callback);
+ INIT_DELAYED_WORK(&rtlpriv->works.c2hcmd_wq, rtl_c2hcmd_wq_callback);
}
void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq)
@@ -1324,7 +1325,7 @@ bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb)
rtlpriv->cfg->ops->chk_switch_dmdp(hw);
}
if (ieee80211_is_auth(fc)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n");
mac->link_state = MAC80211_LINKING;
/* Dul mac */
@@ -1385,7 +1386,7 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
if (mac->act_scanning)
return false;
- RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
+ rtl_dbg(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
"%s ACT_ADDBAREQ From :%pM\n",
is_tx ? "Tx" : "Rx", hdr->addr2);
RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_DMESG, "req\n",
@@ -1400,8 +1401,8 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
rcu_read_lock();
sta = rtl_find_sta(hw, hdr->addr3);
if (sta == NULL) {
- RT_TRACE(rtlpriv, COMP_SEND | COMP_RECV,
- DBG_DMESG, "sta is NULL\n");
+ rtl_dbg(rtlpriv, COMP_SEND | COMP_RECV,
+ DBG_DMESG, "sta is NULL\n");
rcu_read_unlock();
return true;
}
@@ -1428,13 +1429,13 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
}
break;
case ACT_ADDBARSP:
- RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
- "%s ACT_ADDBARSP From :%pM\n",
- is_tx ? "Tx" : "Rx", hdr->addr2);
+ rtl_dbg(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
+ "%s ACT_ADDBARSP From :%pM\n",
+ is_tx ? "Tx" : "Rx", hdr->addr2);
break;
case ACT_DELBA:
- RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
- "ACT_ADDBADEL From :%pM\n", hdr->addr2);
+ rtl_dbg(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
+ "ACT_ADDBADEL From :%pM\n", hdr->addr2);
break;
}
break;
@@ -1455,7 +1456,7 @@ static void setup_special_tx(struct rtl_priv *rtlpriv, struct rtl_ps_ctl *ppsc,
if (rtlpriv->cfg->ops->get_btc_status())
rtlpriv->btcoexist.btc_ops->btc_special_packet_notify(
rtlpriv, type);
- rtl_lps_leave(hw);
+ rtl_lps_leave(hw, false);
ppsc->last_delaylps_stamp_jiffies = jiffies;
}
@@ -1519,9 +1520,9 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx,
/* 68 : UDP BOOTP client
* 67 : UDP BOOTP server
*/
- RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV),
- DBG_DMESG, "dhcp %s !!\n",
- (is_tx) ? "Tx" : "Rx");
+ rtl_dbg(rtlpriv, (COMP_SEND | COMP_RECV),
+ DBG_DMESG, "dhcp %s !!\n",
+ (is_tx) ? "Tx" : "Rx");
if (is_tx)
setup_special_tx(rtlpriv, ppsc,
@@ -1540,12 +1541,12 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx,
rtlpriv->btcoexist.btc_info.in_4way = true;
rtlpriv->btcoexist.btc_info.in_4way_ts = jiffies;
- RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
- "802.1X %s EAPOL pkt!!\n", (is_tx) ? "Tx" : "Rx");
+ rtl_dbg(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
+ "802.1X %s EAPOL pkt!!\n", (is_tx) ? "Tx" : "Rx");
if (is_tx) {
rtlpriv->ra.is_special_data = true;
- rtl_lps_leave(hw);
+ rtl_lps_leave(hw, false);
ppsc->last_delaylps_stamp_jiffies = jiffies;
setup_special_tx(rtlpriv, ppsc, PACKET_EAPOL);
@@ -1583,12 +1584,12 @@ static void rtl_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
info = IEEE80211_SKB_CB(skb);
ieee80211_tx_info_clear_status(info);
if (ack) {
- RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_LOUD,
- "tx report: ack\n");
+ rtl_dbg(rtlpriv, COMP_TX_REPORT, DBG_LOUD,
+ "tx report: ack\n");
info->flags |= IEEE80211_TX_STAT_ACK;
} else {
- RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_LOUD,
- "tx report: not ack\n");
+ rtl_dbg(rtlpriv, COMP_TX_REPORT, DBG_LOUD,
+ "tx report: not ack\n");
info->flags &= ~IEEE80211_TX_STAT_ACK;
}
ieee80211_tx_status_irqsafe(hw, skb);
@@ -1626,8 +1627,8 @@ static u16 rtl_get_tx_report_sn(struct ieee80211_hw *hw,
tx_report->last_sent_time = jiffies;
tx_info->sn = sn;
tx_info->send_time = tx_report->last_sent_time;
- RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_DMESG,
- "Send TX-Report sn=0x%X\n", sn);
+ rtl_dbg(rtlpriv, COMP_TX_REPORT, DBG_DMESG,
+ "Send TX-Report sn=0x%X\n", sn);
return sn;
}
@@ -1674,9 +1675,9 @@ void rtl_tx_report_handler(struct ieee80211_hw *hw, u8 *tmp_buf, u8 c2h_cmd_len)
break;
}
}
- RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_DMESG,
- "Recv TX-Report st=0x%02X sn=0x%X retry=0x%X\n",
- st, sn, retry);
+ rtl_dbg(rtlpriv, COMP_TX_REPORT, DBG_DMESG,
+ "Recv TX-Report st=0x%02X sn=0x%X retry=0x%X\n",
+ st, sn, retry);
}
EXPORT_SYMBOL_GPL(rtl_tx_report_handler);
@@ -1689,9 +1690,9 @@ bool rtl_check_tx_report_acked(struct ieee80211_hw *hw)
return true;
if (time_before(tx_report->last_sent_time + 3 * HZ, jiffies)) {
- RT_TRACE(rtlpriv, COMP_TX_REPORT, DBG_WARNING,
- "Check TX-Report timeout!! s_sn=0x%X r_sn=0x%X\n",
- tx_report->last_sent_sn, tx_report->last_recv_sn);
+ rtl_dbg(rtlpriv, COMP_TX_REPORT, DBG_WARNING,
+ "Check TX-Report timeout!! s_sn=0x%X r_sn=0x%X\n",
+ tx_report->last_sent_sn, tx_report->last_recv_sn);
return true; /* 3 sec. (timeout) seen as acked */
}
@@ -1707,8 +1708,8 @@ void rtl_wait_tx_report_acked(struct ieee80211_hw *hw, u32 wait_ms)
if (rtl_check_tx_report_acked(hw))
break;
usleep_range(1000, 2000);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "Wait 1ms (%d/%d) to disable key.\n", i, wait_ms);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "Wait 1ms (%d/%d) to disable key.\n", i, wait_ms);
}
}
@@ -1770,9 +1771,9 @@ int rtl_tx_agg_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return -ENXIO;
tid_data = &sta_entry->tids[tid];
- RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG,
- "on ra = %pM tid = %d seq:%d\n", sta->addr, tid,
- *ssn);
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_DMESG,
+ "on ra = %pM tid = %d seq:%d\n", sta->addr, tid,
+ *ssn);
tid_data->agg.agg_state = RTL_AGG_START;
@@ -1788,8 +1789,8 @@ int rtl_tx_agg_stop(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (sta == NULL)
return -EINVAL;
- RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG,
- "on ra = %pM tid = %d\n", sta->addr, tid);
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_DMESG,
+ "on ra = %pM tid = %d\n", sta->addr, tid);
if (unlikely(tid >= MAX_TID_COUNT))
return -EINVAL;
@@ -1828,8 +1829,8 @@ int rtl_rx_agg_start(struct ieee80211_hw *hw,
return -ENXIO;
tid_data = &sta_entry->tids[tid];
- RT_TRACE(rtlpriv, COMP_RECV, DBG_DMESG,
- "on ra = %pM tid = %d\n", sta->addr, tid);
+ rtl_dbg(rtlpriv, COMP_RECV, DBG_DMESG,
+ "on ra = %pM tid = %d\n", sta->addr, tid);
tid_data->agg.rx_agg_state = RTL_RX_AGG_START;
return 0;
@@ -1844,8 +1845,8 @@ int rtl_rx_agg_stop(struct ieee80211_hw *hw,
if (sta == NULL)
return -EINVAL;
- RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG,
- "on ra = %pM tid = %d\n", sta->addr, tid);
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_DMESG,
+ "on ra = %pM tid = %d\n", sta->addr, tid);
if (unlikely(tid >= MAX_TID_COUNT))
return -EINVAL;
@@ -1865,8 +1866,8 @@ int rtl_tx_agg_oper(struct ieee80211_hw *hw,
if (sta == NULL)
return -EINVAL;
- RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG,
- "on ra = %pM tid = %d\n", sta->addr, tid);
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_DMESG,
+ "on ra = %pM tid = %d\n", sta->addr, tid);
if (unlikely(tid >= MAX_TID_COUNT))
return -EINVAL;
@@ -1886,9 +1887,9 @@ void rtl_rx_ampdu_apply(struct rtl_priv *rtlpriv)
btc_ops->btc_get_ampdu_cfg(rtlpriv, &reject_agg,
&ctrl_agg_size, &agg_size);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "Set RX AMPDU: coex - reject=%d, ctrl_agg_size=%d, size=%d",
- reject_agg, ctrl_agg_size, agg_size);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "Set RX AMPDU: coex - reject=%d, ctrl_agg_size=%d, size=%d",
+ reject_agg, ctrl_agg_size, agg_size);
rtlpriv->hw->max_rx_aggregation_subframes =
(ctrl_agg_size ? agg_size : IEEE80211_MAX_AMPDU_BUF_HT);
@@ -1976,9 +1977,9 @@ void rtl_scan_list_expire(struct ieee80211_hw *hw)
list_del(&entry->list);
rtlpriv->scan_list.num--;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "BSSID=%pM is expire in scan list (total=%d)\n",
- entry->bssid, rtlpriv->scan_list.num);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "BSSID=%pM is expire in scan list (total=%d)\n",
+ entry->bssid, rtlpriv->scan_list.num);
kfree(entry);
}
@@ -2012,9 +2013,9 @@ void rtl_collect_scan_list(struct ieee80211_hw *hw, struct sk_buff *skb)
if (memcmp(entry->bssid, hdr->addr3, ETH_ALEN) == 0) {
list_del_init(&entry->list);
entry_found = true;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "Update BSSID=%pM to scan list (total=%d)\n",
- hdr->addr3, rtlpriv->scan_list.num);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "Update BSSID=%pM to scan list (total=%d)\n",
+ hdr->addr3, rtlpriv->scan_list.num);
break;
}
}
@@ -2028,9 +2029,9 @@ void rtl_collect_scan_list(struct ieee80211_hw *hw, struct sk_buff *skb)
memcpy(entry->bssid, hdr->addr3, ETH_ALEN);
rtlpriv->scan_list.num++;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "Add BSSID=%pM to scan list (total=%d)\n",
- hdr->addr3, rtlpriv->scan_list.num);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "Add BSSID=%pM to scan list (total=%d)\n",
+ hdr->addr3, rtlpriv->scan_list.num);
}
entry->age = jiffies;
@@ -2042,11 +2043,10 @@ label_err:
}
EXPORT_SYMBOL(rtl_collect_scan_list);
-void rtl_watchdog_wq_callback(void *data)
+static void rtl_watchdog_wq_callback(struct work_struct *work)
{
- struct rtl_works *rtlworks = container_of_dwork_rtl(data,
- struct rtl_works,
- watchdog_wq);
+ struct rtl_works *rtlworks = container_of(work, struct rtl_works,
+ watchdog_wq.work);
struct ieee80211_hw *hw = rtlworks->hw;
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -2147,9 +2147,9 @@ void rtl_watchdog_wq_callback(void *data)
if (rtlpriv->link_info.num_rx_inperiod +
rtlpriv->link_info.num_tx_inperiod > 8 ||
rtlpriv->link_info.num_rx_inperiod > 2)
- rtl_lps_leave(hw);
+ rtl_lps_leave(hw, true);
else
- rtl_lps_enter(hw);
+ rtl_lps_enter(hw, true);
label_lps_done:
;
@@ -2190,8 +2190,8 @@ label_lps_done:
if ((rtlpriv->link_info.bcn_rx_inperiod +
rtlpriv->link_info.num_rx_inperiod) == 0) {
rtlpriv->link_info.roam_times++;
- RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "AP off for %d s\n",
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_DMESG,
+ "AP off for %d s\n",
(rtlpriv->link_info.roam_times * 2));
/* if we can't recv beacon for 10s,
@@ -2239,10 +2239,10 @@ void rtl_watch_dog_timer_callback(struct timer_list *t)
jiffies + MSECS(RTL_WATCH_DOG_TIME));
}
-void rtl_fwevt_wq_callback(void *data)
+static void rtl_fwevt_wq_callback(struct work_struct *work)
{
- struct rtl_works *rtlworks =
- container_of_dwork_rtl(data, struct rtl_works, fwevt_wq);
+ struct rtl_works *rtlworks = container_of(work, struct rtl_works,
+ fwevt_wq.work);
struct ieee80211_hw *hw = rtlworks->hw;
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -2304,11 +2304,11 @@ static void rtl_c2h_content_parsing(struct ieee80211_hw *hw,
switch (cmd_id) {
case C2H_DBG:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "[C2H], C2H_DBG!!\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "[C2H], C2H_DBG!!\n");
break;
case C2H_TXBF:
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "[C2H], C2H_TXBF!!\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE,
+ "[C2H], C2H_TXBF!!\n");
break;
case C2H_TX_REPORT:
rtl_tx_report_handler(hw, cmd_buf, cmd_len);
@@ -2318,20 +2318,20 @@ static void rtl_c2h_content_parsing(struct ieee80211_hw *hw,
hal_ops->c2h_ra_report_handler(hw, cmd_buf, cmd_len);
break;
case C2H_BT_INFO:
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "[C2H], C2H_BT_INFO!!\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE,
+ "[C2H], C2H_BT_INFO!!\n");
if (rtlpriv->cfg->ops->get_btc_status())
btc_ops->btc_btinfo_notify(rtlpriv, cmd_buf, cmd_len);
break;
case C2H_BT_MP:
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "[C2H], C2H_BT_MP!!\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE,
+ "[C2H], C2H_BT_MP!!\n");
if (rtlpriv->cfg->ops->get_btc_status())
btc_ops->btc_btmpinfo_notify(rtlpriv, cmd_buf, cmd_len);
break;
default:
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "[C2H], Unknown packet!! cmd_id(%#X)!\n", cmd_id);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE,
+ "[C2H], Unknown packet!! cmd_id(%#X)!\n", cmd_id);
break;
}
}
@@ -2355,8 +2355,8 @@ void rtl_c2hcmd_launcher(struct ieee80211_hw *hw, int exec)
if (!skb)
break;
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "C2H rx_desc_shift=%d\n",
- *((u8 *)skb->cb));
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG, "C2H rx_desc_shift=%d\n",
+ *((u8 *)skb->cb));
RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_DMESG,
"C2H data: ", skb->data, skb->len);
@@ -2368,11 +2368,10 @@ void rtl_c2hcmd_launcher(struct ieee80211_hw *hw, int exec)
}
}
-void rtl_c2hcmd_wq_callback(void *data)
+static void rtl_c2hcmd_wq_callback(struct work_struct *work)
{
- struct rtl_works *rtlworks = container_of_dwork_rtl(data,
- struct rtl_works,
- c2hcmd_wq);
+ struct rtl_works *rtlworks = container_of(work, struct rtl_works,
+ c2hcmd_wq.work);
struct ieee80211_hw *hw = rtlworks->hw;
rtl_c2hcmd_launcher(hw, 1);
@@ -2443,7 +2442,7 @@ static struct sk_buff *rtl_make_smps_action(struct ieee80211_hw *hw,
case IEEE80211_SMPS_AUTOMATIC:/* 0 */
case IEEE80211_SMPS_NUM_MODES:/* 4 */
WARN_ON(1);
- /* fall through */
+ fallthrough;
case IEEE80211_SMPS_OFF:/* 1 */ /*MIMO_PS_NOLIMIT*/
action_frame->u.action.u.ht_smps.smps_control =
WLAN_HT_SMPS_CONTROL_DISABLED;/* 0 */
@@ -2701,29 +2700,29 @@ void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len)
(memcmp(mac->bssid, ap5_6, 3) == 0) ||
vendor == PEER_ATH) {
vendor = PEER_ATH;
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>ath find\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>ath find\n");
} else if ((memcmp(mac->bssid, ap4_4, 3) == 0) ||
(memcmp(mac->bssid, ap4_5, 3) == 0) ||
(memcmp(mac->bssid, ap4_1, 3) == 0) ||
(memcmp(mac->bssid, ap4_2, 3) == 0) ||
(memcmp(mac->bssid, ap4_3, 3) == 0) ||
vendor == PEER_RAL) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>ral find\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>ral find\n");
vendor = PEER_RAL;
} else if (memcmp(mac->bssid, ap6_1, 3) == 0 ||
vendor == PEER_CISCO) {
vendor = PEER_CISCO;
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>cisco find\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>cisco find\n");
} else if ((memcmp(mac->bssid, ap3_1, 3) == 0) ||
(memcmp(mac->bssid, ap3_2, 3) == 0) ||
(memcmp(mac->bssid, ap3_3, 3) == 0) ||
vendor == PEER_BROAD) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>broad find\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>broad find\n");
vendor = PEER_BROAD;
} else if (memcmp(mac->bssid, ap7_1, 3) == 0 ||
vendor == PEER_MARV) {
vendor = PEER_MARV;
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>marv find\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD, "=>marv find\n");
}
mac->vendor = vendor;
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h
index fa92e29fffda..0e4f8a8ae3a5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.h
+++ b/drivers/net/wireless/realtek/rtlwifi/base.h
@@ -108,9 +108,6 @@ int rtl_rx_agg_start(struct ieee80211_hw *hw,
int rtl_rx_agg_stop(struct ieee80211_hw *hw,
struct ieee80211_sta *sta, u16 tid);
void rtl_rx_ampdu_apply(struct rtl_priv *rtlpriv);
-void rtl_watchdog_wq_callback(void *data);
-void rtl_fwevt_wq_callback(void *data);
-void rtl_c2hcmd_wq_callback(void *data);
void rtl_c2hcmd_launcher(struct ieee80211_hw *hw, int exec);
void rtl_c2hcmd_enqueue(struct ieee80211_hw *hw, struct sk_buff *skb);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
index 658ff425c256..edcd3c879f7f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
@@ -54,8 +54,8 @@ static u8 btc8192e2ant_bt_rssi_state(struct btc_coexist *btcoexist,
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi thresh error!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi thresh error!!\n");
return coex_sta->pre_bt_rssi_state;
}
@@ -118,8 +118,8 @@ static u8 btc8192e2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI thresh error!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI thresh error!!\n");
return coex_sta->pre_wifi_rssi_state[index];
}
@@ -183,26 +183,26 @@ static void btc8192e2ant_monitor_bt_enable_disable(struct btc_coexist
bt_disabled = false;
btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
&bt_disabled);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT is enabled !!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT is enabled !!\n");
} else {
bt_disable_cnt++;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], bt all counters = 0, %d times!!\n",
- bt_disable_cnt);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bt all counters = 0, %d times!!\n",
+ bt_disable_cnt);
if (bt_disable_cnt >= 2) {
bt_disabled = true;
btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
&bt_disabled);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT is disabled !!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT is disabled !!\n");
}
}
if (pre_bt_disabled != bt_disabled) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT is from %s to %s!!\n",
- (pre_bt_disabled ? "disabled" : "enabled"),
- (bt_disabled ? "disabled" : "enabled"));
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT is from %s to %s!!\n",
+ (pre_bt_disabled ? "disabled" : "enabled"),
+ (bt_disabled ? "disabled" : "enabled"));
pre_bt_disabled = bt_disabled;
}
}
@@ -398,12 +398,12 @@ static void btc8192e2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
coex_sta->low_priority_tx = reg_lp_tx;
coex_sta->low_priority_rx = reg_lp_rx;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex] High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
- reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex] Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
- reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex] High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
+ reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex] Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
+ reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
/* reset counter */
btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
@@ -418,9 +418,9 @@ static void btc8192e2ant_query_bt_info(struct btc_coexist *btcoexist)
h2c_parameter[0] |= BIT0; /* trigger */
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
- h2c_parameter[0]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
}
@@ -526,8 +526,8 @@ static u8 btc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
if (!bt_link_info->bt_link_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "No BT link exists!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "No BT link exists!!!\n");
return algorithm;
}
@@ -542,29 +542,29 @@ static u8 btc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
if (num_of_diff_profile == 1) {
if (bt_link_info->sco_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "SCO only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "SCO only\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
} else {
if (bt_link_info->hid_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "HID only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "HID only\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_HID;
} else if (bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "A2DP only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "A2DP only\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_A2DP;
} else if (bt_link_info->pan_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "PAN(HS) only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "PAN(HS) only\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_PANHS;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "PAN(EDR) only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "PAN(EDR) only\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_PANEDR;
}
@@ -573,22 +573,22 @@ static u8 btc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
} else if (num_of_diff_profile == 2) {
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "SCO + HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "SCO + HID\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
} else if (bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "SCO + A2DP ==> SCO\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "SCO + A2DP ==> SCO\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
} else if (bt_link_info->pan_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
"SCO + PAN(HS)\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
"SCO + PAN(EDR)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_SCO_PAN;
@@ -598,14 +598,14 @@ static u8 btc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
if (stack_info->num_of_hid >= 2) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
"HID*2 + A2DP\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
"HID + A2DP\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_HID_A2DP;
@@ -613,29 +613,29 @@ static u8 btc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
"HID + PAN(HS)\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_HID;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "HID + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "HID + PAN(EDR)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "A2DP + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "A2DP + PAN(HS)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "A2DP + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "A2DP + PAN(EDR)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP;
}
@@ -645,34 +645,34 @@ static u8 btc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "SCO + HID + A2DP ==> HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "SCO + HID + A2DP ==> HID\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "SCO + HID + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "SCO + HID + PAN(HS)\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "SCO + HID + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "SCO + HID + PAN(EDR)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_SCO_PAN;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "SCO + A2DP + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "SCO + A2DP + PAN(HS)\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "SCO + A2DP + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "SCO + A2DP + PAN(EDR)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
}
@@ -682,15 +682,15 @@ static u8 btc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "HID + A2DP + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "HID + A2DP + PAN(HS)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_HID_A2DP;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "HID + A2DP + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "HID + A2DP + PAN(EDR)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
}
@@ -702,14 +702,14 @@ static u8 btc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "ErrorSCO+HID+A2DP+PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "ErrorSCO+HID+A2DP+PAN(HS)\n");
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "SCO+HID+A2DP+PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "SCO+HID+A2DP+PAN(EDR)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
}
@@ -731,10 +731,10 @@ static void btc8192e2ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist,
*/
h2c_parameter[0] = dac_swing_lvl;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swing_lvl);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swing_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
}
@@ -747,9 +747,9 @@ static void btc8192e2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
h2c_parameter[0] = dec_bt_pwr_lvl;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex] decrease Bt Power level = %d, FW write 0x62 = 0x%x\n",
- dec_bt_pwr_lvl, h2c_parameter[0]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex] decrease Bt Power level = %d, FW write 0x62 = 0x%x\n",
+ dec_bt_pwr_lvl, h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
}
@@ -759,15 +759,15 @@ static void btc8192e2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s Dec BT power level = %d\n",
- force_exec ? "force to" : "", dec_bt_pwr_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s Dec BT power level = %d\n",
+ force_exec ? "force to" : "", dec_bt_pwr_lvl);
coex_dm->cur_dec_bt_pwr = dec_bt_pwr_lvl;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], preBtDecPwrLvl=%d, curBtDecPwrLvl=%d\n",
- coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], preBtDecPwrLvl=%d, curBtDecPwrLvl=%d\n",
+ coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
}
btc8192e2ant_set_fw_dec_bt_pwr(btcoexist, coex_dm->cur_dec_bt_pwr);
@@ -785,9 +785,9 @@ static void btc8192e2ant_set_bt_auto_report(struct btc_coexist *btcoexist,
if (enable_auto_report)
h2c_parameter[0] |= BIT0;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
- (enable_auto_report ? "Enabled!!" : "Disabled!!"),
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
+ (enable_auto_report ? "Enabled!!" : "Disabled!!"),
h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
@@ -799,17 +799,17 @@ static void btc8192e2ant_bt_auto_report(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s BT Auto report = %s\n",
- (force_exec ? "force to" : ""),
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s BT Auto report = %s\n",
+ (force_exec ? "force to" : ""),
((enable_auto_report) ? "Enabled" : "Disabled"));
coex_dm->cur_bt_auto_report = enable_auto_report;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex] bPreBtAutoReport=%d, bCurBtAutoReport=%d\n",
- coex_dm->pre_bt_auto_report,
- coex_dm->cur_bt_auto_report);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex] bPreBtAutoReport=%d, bCurBtAutoReport=%d\n",
+ coex_dm->pre_bt_auto_report,
+ coex_dm->cur_bt_auto_report);
if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
return;
@@ -825,16 +825,16 @@ static void btc8192e2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s set FW Dac Swing level = %d\n",
- (force_exec ? "force to" : ""), fw_dac_swing_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s set FW Dac Swing level = %d\n",
+ (force_exec ? "force to" : ""), fw_dac_swing_lvl);
coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex] preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n",
- coex_dm->pre_fw_dac_swing_lvl,
- coex_dm->cur_fw_dac_swing_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex] preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n",
+ coex_dm->pre_fw_dac_swing_lvl,
+ coex_dm->cur_fw_dac_swing_lvl);
if (coex_dm->pre_fw_dac_swing_lvl ==
coex_dm->cur_fw_dac_swing_lvl)
@@ -854,8 +854,8 @@ static void btc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
if (rx_rf_shrink_on) {
/* Shrink RF Rx LPF corner */
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Shrink RF Rx LPF corner!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Shrink RF Rx LPF corner!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
0xfffff, 0xffffc);
} else {
@@ -863,8 +863,8 @@ static void btc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
* After initialized, we can use coex_dm->btRf0x1eBackup
*/
if (btcoexist->initialized) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Resume RF Rx LPF corner!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Resume RF Rx LPF corner!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
0xfffff,
coex_dm->bt_rf0x1e_backup);
@@ -877,17 +877,17 @@ static void btc8192e2ant_rf_shrink(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s turn Rx RF Shrink = %s\n",
- (force_exec ? "force to" : ""),
- ((rx_rf_shrink_on) ? "ON" : "OFF"));
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn Rx RF Shrink = %s\n",
+ (force_exec ? "force to" : ""),
+ ((rx_rf_shrink_on) ? "ON" : "OFF"));
coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex]bPreRfRxLpfShrink=%d,bCurRfRxLpfShrink=%d\n",
- coex_dm->pre_rf_rx_lpf_shrink,
- coex_dm->cur_rf_rx_lpf_shrink);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex]bPreRfRxLpfShrink=%d,bCurRfRxLpfShrink=%d\n",
+ coex_dm->pre_rf_rx_lpf_shrink,
+ coex_dm->cur_rf_rx_lpf_shrink);
if (coex_dm->pre_rf_rx_lpf_shrink ==
coex_dm->cur_rf_rx_lpf_shrink)
@@ -905,8 +905,8 @@ static void btc8192e2ant_set_dac_swing_reg(struct btc_coexist *btcoexist,
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 val = (u8)level;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Write SwDacSwing = 0x%x\n", level);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Write SwDacSwing = 0x%x\n", level);
btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val);
}
@@ -926,22 +926,22 @@ static void btc8192e2ant_dac_swing(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl = 0x%x\n",
- (force_exec ? "force to" : ""),
- ((dac_swing_on) ? "ON" : "OFF"), dac_swing_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl = 0x%x\n",
+ (force_exec ? "force to" : ""),
+ ((dac_swing_on) ? "ON" : "OFF"), dac_swing_lvl);
coex_dm->cur_dac_swing_on = dac_swing_on;
coex_dm->cur_dac_swing_lvl = dac_swing_lvl;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl = 0x%x, ",
- coex_dm->pre_dac_swing_on,
- coex_dm->pre_dac_swing_lvl);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "bCurDacSwingOn=%d, curDacSwingLvl = 0x%x\n",
- coex_dm->cur_dac_swing_on,
- coex_dm->cur_dac_swing_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl = 0x%x, ",
+ coex_dm->pre_dac_swing_on,
+ coex_dm->pre_dac_swing_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "bCurDacSwingOn=%d, curDacSwingLvl = 0x%x\n",
+ coex_dm->cur_dac_swing_on,
+ coex_dm->cur_dac_swing_lvl);
if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
(coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl))
@@ -961,8 +961,8 @@ static void btc8192e2ant_set_agc_table(struct btc_coexist *btcoexist,
/* BB AGC Gain Table */
if (agc_table_en) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BB Agc Table On!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BB Agc Table On!\n");
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x0a1A0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x091B0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x081C0001);
@@ -970,8 +970,8 @@ static void btc8192e2ant_set_agc_table(struct btc_coexist *btcoexist,
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x061E0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x051F0001);
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BB Agc Table Off!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BB Agc Table Off!\n");
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001);
@@ -986,17 +986,17 @@ static void btc8192e2ant_agc_table(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s %s Agc Table\n",
- (force_exec ? "force to" : ""),
- ((agc_table_en) ? "Enable" : "Disable"));
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s %s Agc Table\n",
+ (force_exec ? "force to" : ""),
+ ((agc_table_en) ? "Enable" : "Disable"));
coex_dm->cur_agc_table_en = agc_table_en;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
- coex_dm->pre_agc_table_en,
- coex_dm->cur_agc_table_en);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
+ coex_dm->pre_agc_table_en,
+ coex_dm->cur_agc_table_en);
if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en)
return;
@@ -1012,20 +1012,20 @@ static void btc8192e2ant_set_coex_table(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
}
@@ -1035,30 +1035,30 @@ static void btc8192e2ant_coex_table(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, ",
- (force_exec ? "force to" : ""), val0x6c0);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
- val0x6c4, val0x6c8, val0x6cc);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, ",
+ (force_exec ? "force to" : ""), val0x6c0);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
+ val0x6c4, val0x6c8, val0x6cc);
coex_dm->cur_val0x6c0 = val0x6c0;
coex_dm->cur_val0x6c4 = val0x6c4;
coex_dm->cur_val0x6c8 = val0x6c8;
coex_dm->cur_val0x6cc = val0x6cc;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], preVal0x6c0 = 0x%x, preVal0x6c4 = 0x%x, ",
- coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "preVal0x6c8 = 0x%x, preVal0x6cc = 0x%x !!\n",
- coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], curVal0x6c0 = 0x%x, curVal0x6c4 = 0x%x\n",
- coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "curVal0x6c8 = 0x%x, curVal0x6cc = 0x%x !!\n",
- coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], preVal0x6c0 = 0x%x, preVal0x6c4 = 0x%x, ",
+ coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "preVal0x6c8 = 0x%x, preVal0x6cc = 0x%x !!\n",
+ coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], curVal0x6c0 = 0x%x, curVal0x6c4 = 0x%x\n",
+ coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "curVal0x6c8 = 0x%x, curVal0x6cc = 0x%x !!\n",
+ coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
(coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
@@ -1113,9 +1113,9 @@ static void btc8192e2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
if (enable)
h2c_parameter[0] |= BIT0; /* function enable */
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex]set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
- h2c_parameter[0]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex]set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
}
@@ -1125,18 +1125,18 @@ static void btc8192e2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s turn Ignore WlanAct %s\n",
- (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn Ignore WlanAct %s\n",
+ (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
coex_dm->cur_ignore_wlan_act = enable;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], bPreIgnoreWlanAct = %d ",
- coex_dm->pre_ignore_wlan_act);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "bCurIgnoreWlanAct = %d!!\n",
- coex_dm->cur_ignore_wlan_act);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bPreIgnoreWlanAct = %d ",
+ coex_dm->pre_ignore_wlan_act);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "bCurIgnoreWlanAct = %d!!\n",
+ coex_dm->cur_ignore_wlan_act);
if (coex_dm->pre_ignore_wlan_act ==
coex_dm->cur_ignore_wlan_act)
@@ -1166,11 +1166,11 @@ static void btc8192e2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1,
coex_dm->ps_tdma_para[3] = byte4;
coex_dm->ps_tdma_para[4] = byte5;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n",
- h2c_parameter[0],
- h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
- h2c_parameter[3] << 8 | h2c_parameter[4]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n",
+ h2c_parameter[0],
+ h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
+ h2c_parameter[3] << 8 | h2c_parameter[4]);
btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
}
@@ -1196,20 +1196,20 @@ static void btc8192e2ant_ps_tdma(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s turn %s PS TDMA, type=%d\n",
- (force_exec ? "force to" : ""),
- (turn_on ? "ON" : "OFF"), type);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn %s PS TDMA, type=%d\n",
+ (force_exec ? "force to" : ""),
+ (turn_on ? "ON" : "OFF"), type);
coex_dm->cur_ps_tdma_on = turn_on;
coex_dm->cur_ps_tdma = type;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
- coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
- coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
+ coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
+ coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
(coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1337,8 +1337,8 @@ static void btc8192e2ant_set_switch_ss_type(struct btc_coexist *btcoexist,
u8 mimops = BTC_MIMO_PS_DYNAMIC;
u32 dis_ra_mask = 0x0;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], REAL set SS Type = %d\n", ss_type);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], REAL set SS Type = %d\n", ss_type);
dis_ra_mask = btc8192e2ant_decide_ra_mask(btcoexist, ss_type,
coex_dm->cur_ra_mask_type);
@@ -1372,9 +1372,9 @@ static void btc8192e2ant_switch_ss_type(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s Switch SS Type = %d\n",
- (force_exec ? "force to" : ""), new_ss_type);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s Switch SS Type = %d\n",
+ (force_exec ? "force to" : ""), new_ss_type);
coex_dm->cur_ss_type = new_ss_type;
if (!force_exec) {
@@ -1456,8 +1456,8 @@ static bool btc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
&low_pwr_disable);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi non-connected idle!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi non-connected idle!!\n");
if ((BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE ==
coex_dm->bt_status) ||
@@ -1491,8 +1491,8 @@ static bool btc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
BTC_SET_ACT_DISABLE_LOW_POWER,
&low_pwr_disable);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Wifi connected + BT non connected-idle!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Wifi connected + BT non connected-idle!!\n");
btc8192e2ant_switch_ss_type(btcoexist, NORMAL_EXEC, 2);
btc8192e2ant_coex_table_with_type(btcoexist,
@@ -1517,8 +1517,8 @@ static bool btc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
if (bt_hs_on)
return false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Wifi connected + BT connected-idle!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Wifi connected + BT connected-idle!!\n");
btc8192e2ant_switch_ss_type(btcoexist,
NORMAL_EXEC, 2);
@@ -1543,12 +1543,12 @@ static bool btc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
&low_pwr_disable);
if (wifi_busy) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Wifi Connected-Busy + BT Busy!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Wifi Connected-Busy + BT Busy!!\n");
common = false;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Wifi Connected-Idle + BT Busy!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Wifi Connected-Idle + BT Busy!!\n");
btc8192e2ant_switch_ss_type(btcoexist,
NORMAL_EXEC, 1);
@@ -1580,13 +1580,13 @@ static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
static int up, dn, m, n, wait_cnt;
u8 retry_cnt = 0;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], TdmaDurationAdjust()\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], TdmaDurationAdjust()\n");
if (!coex_dm->auto_tdma_adjust) {
coex_dm->auto_tdma_adjust = true;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], first run TdmaDurationAdjust()!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], first run TdmaDurationAdjust()!!\n");
if (sco_hid) {
if (tx_pause) {
if (max_interval == 1) {
@@ -1669,11 +1669,11 @@ static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
} else {
/* accquire the BT TRx retry count from BT_Info byte2 */
retry_cnt = coex_sta->bt_retry_cnt;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], retry_cnt = %d\n", retry_cnt);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_cnt=%d\n",
- up, dn, m, n, wait_cnt);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], retry_cnt = %d\n", retry_cnt);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_cnt=%d\n",
+ up, dn, m, n, wait_cnt);
wait_cnt++;
/* no retry in the last 2-second duration */
if (retry_cnt == 0) {
@@ -1688,8 +1688,8 @@ static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
n = 3;
up = 0;
dn = 0;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex]Increase wifi duration!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex]Increase wifi duration!!\n");
}
} else if (retry_cnt <= 3) {
up--;
@@ -1711,8 +1711,8 @@ static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
up = 0;
dn = 0;
wait_cnt = 0;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Reduce wifi duration for retry<3\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Reduce wifi duration for retry<3\n");
}
} else {
if (wait_cnt == 1)
@@ -1727,12 +1727,12 @@ static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
up = 0;
dn = 0;
wait_cnt = 0;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Decrease wifi duration for retryCounter>3!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Decrease wifi duration for retryCounter>3!!\n");
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], max Interval = %d\n", max_interval);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], max Interval = %d\n", max_interval);
}
/* if current PsTdma not match with
@@ -1742,10 +1742,10 @@ static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) {
bool scan = false, link = false, roam = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], PsTdma type mismatch!!!, ");
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "curPsTdma=%d, recordPsTdma=%d\n",
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], PsTdma type mismatch!!!, ");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "curPsTdma=%d, recordPsTdma=%d\n",
coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
@@ -1756,8 +1756,8 @@ static void btc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
btc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, coex_dm->tdma_adj_type);
else
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
}
}
@@ -1962,8 +1962,8 @@ static void btc8192e2ant_action_a2dp(struct btc_coexist *btcoexist)
bt_rssi_state == BTC_RSSI_STATE_STAY_LOW) &&
(wifi_rssi_state == BTC_RSSI_STATE_LOW ||
wifi_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], A2dp, wifi/bt rssi both LOW!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], A2dp, wifi/bt rssi both LOW!!\n");
long_dist = true;
}
if (long_dist) {
@@ -2464,105 +2464,105 @@ static void btc8192e2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 algorithm = 0;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism()===>\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism()===>\n");
if (btcoexist->manual_control) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], return for Manual CTRL <===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], return for Manual CTRL <===\n");
return;
}
if (coex_sta->under_ips) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi is under IPS !!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi is under IPS !!!\n");
return;
}
algorithm = btc8192e2ant_action_algorithm(btcoexist);
if (coex_sta->c2h_bt_inquiry_page &&
(BT_8192E_2ANT_COEX_ALGO_PANHS != algorithm)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT is under inquiry/page scan !!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT is under inquiry/page scan !!\n");
btc8192e2ant_action_bt_inquiry(btcoexist);
return;
}
coex_dm->cur_algorithm = algorithm;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
if (btc8192e2ant_is_common_action(btcoexist)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant common\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant common\n");
coex_dm->auto_tdma_adjust = false;
} else {
if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex] preAlgorithm=%d, curAlgorithm=%d\n",
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex] preAlgorithm=%d, curAlgorithm=%d\n",
coex_dm->pre_algorithm,
coex_dm->cur_algorithm);
coex_dm->auto_tdma_adjust = false;
}
switch (coex_dm->cur_algorithm) {
case BT_8192E_2ANT_COEX_ALGO_SCO:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Action 2-Ant, algorithm = SCO\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Action 2-Ant, algorithm = SCO\n");
btc8192e2ant_action_sco(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_SCO_PAN:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Action 2-Ant, algorithm = SCO+PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Action 2-Ant, algorithm = SCO+PAN(EDR)\n");
btc8192e2ant_action_sco_pan(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_HID:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Action 2-Ant, algorithm = HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Action 2-Ant, algorithm = HID\n");
btc8192e2ant_action_hid(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_A2DP:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Action 2-Ant, algorithm = A2DP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Action 2-Ant, algorithm = A2DP\n");
btc8192e2ant_action_a2dp(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
btc8192e2ant_action_a2dp_pan_hs(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_PANEDR:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Action 2-Ant, algorithm = PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Action 2-Ant, algorithm = PAN(EDR)\n");
btc8192e2ant_action_pan_edr(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_PANHS:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Action 2-Ant, algorithm = HS mode\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Action 2-Ant, algorithm = HS mode\n");
btc8192e2ant_action_pan_hs(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Action 2-Ant, algorithm = PAN+A2DP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Action 2-Ant, algorithm = PAN+A2DP\n");
btc8192e2ant_action_pan_edr_a2dp(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_PANEDR_HID:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Action 2-Ant, algorithm = PAN(EDR)+HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Action 2-Ant, algorithm = PAN(EDR)+HID\n");
btc8192e2ant_action_pan_edr_hid(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Action 2-Ant, algorithm = HID+A2DP+PAN\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Action 2-Ant, algorithm = HID+A2DP+PAN\n");
btc8192e2ant_action_hid_a2dp_pan_edr(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_HID_A2DP:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Action 2-Ant, algorithm = HID+A2DP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Action 2-Ant, algorithm = HID+A2DP\n");
btc8192e2ant_action_hid_a2dp(btcoexist);
break;
default:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Action 2-Ant, algorithm = unknown!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Action 2-Ant, algorithm = unknown!!\n");
/* btc8192e2ant_coex_all_off(btcoexist); */
break;
}
@@ -2577,8 +2577,8 @@ static void btc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist,
u16 u16tmp = 0;
u8 u8tmp = 0;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], 2Ant Init HW Config!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], 2Ant Init HW Config!!\n");
if (backup) {
/* backup rf 0x1e value */
@@ -2659,8 +2659,8 @@ void ex_btc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Coex Mechanism Init!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Coex Mechanism Init!!\n");
btc8192e2ant_init_coex_dm(btcoexist);
}
@@ -2876,13 +2876,13 @@ void ex_btc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
struct rtl_priv *rtlpriv = btcoexist->adapter;
if (BTC_IPS_ENTER == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], IPS ENTER notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], IPS ENTER notify\n");
coex_sta->under_ips = true;
btc8192e2ant_coex_all_off(btcoexist);
} else if (BTC_IPS_LEAVE == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], IPS LEAVE notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], IPS LEAVE notify\n");
coex_sta->under_ips = false;
}
}
@@ -2892,12 +2892,12 @@ void ex_btc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
struct rtl_priv *rtlpriv = btcoexist->adapter;
if (BTC_LPS_ENABLE == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], LPS ENABLE notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS ENABLE notify\n");
coex_sta->under_lps = true;
} else if (BTC_LPS_DISABLE == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], LPS DISABLE notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS DISABLE notify\n");
coex_sta->under_lps = false;
}
}
@@ -2907,11 +2907,11 @@ void ex_btc8192e2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
struct rtl_priv *rtlpriv = btcoexist->adapter;
if (BTC_SCAN_START == type)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCAN START notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN START notify\n");
else if (BTC_SCAN_FINISH == type)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCAN FINISH notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN FINISH notify\n");
}
void ex_btc8192e2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
@@ -2919,11 +2919,11 @@ void ex_btc8192e2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
struct rtl_priv *rtlpriv = btcoexist->adapter;
if (BTC_ASSOCIATE_START == type)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CONNECT START notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT START notify\n");
else if (BTC_ASSOCIATE_FINISH == type)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CONNECT FINISH notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT FINISH notify\n");
}
void ex_btc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
@@ -2940,11 +2940,11 @@ void ex_btc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
return;
if (BTC_MEDIA_CONNECT == type)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], MEDIA connect notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], MEDIA connect notify\n");
else
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], MEDIA disconnect notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], MEDIA disconnect notify\n");
/* only 2.4G we need to inform bt the chnl mask */
btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL,
@@ -2964,10 +2964,10 @@ void ex_btc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], FW write 0x66 = 0x%x\n",
- h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
- h2c_parameter[2]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW write 0x66 = 0x%x\n",
+ h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+ h2c_parameter[2]);
btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
}
@@ -2978,8 +2978,8 @@ void ex_btc8192e2ant_special_packet_notify(struct btc_coexist *btcoexist,
struct rtl_priv *rtlpriv = btcoexist->adapter;
if (type == BTC_PACKET_DHCP)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], DHCP Packet notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], DHCP Packet notify\n");
}
void ex_btc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
@@ -2998,19 +2998,19 @@ void ex_btc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
rsp_source = BT_INFO_SRC_8192E_2ANT_WIFI_FW;
coex_sta->bt_info_c2h_cnt[rsp_source]++;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Bt info[%d], length=%d, hex data = [",
- rsp_source, length);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Bt info[%d], length=%d, hex data = [",
+ rsp_source, length);
for (i = 0; i < length; i++) {
coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
if (i == 1)
bt_info = tmp_buf[i];
if (i == length-1)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "0x%02x]\n", tmp_buf[i]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "0x%02x]\n", tmp_buf[i]);
else
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "0x%02x, ", tmp_buf[i]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "0x%02x, ", tmp_buf[i]);
}
if (BT_INFO_SRC_8192E_2ANT_WIFI_FW != rsp_source) {
@@ -3028,8 +3028,8 @@ void ex_btc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
* because bt is reset and loss of the info.
*/
if ((coex_sta->bt_info_ext & BIT1)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "bit1, send wifi BW&Chnl to BT!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "bit1, send wifi BW&Chnl to BT!!\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
&wifi_connected);
if (wifi_connected)
@@ -3045,8 +3045,8 @@ void ex_btc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
if ((coex_sta->bt_info_ext & BIT3)) {
if (!btcoexist->manual_control &&
!btcoexist->stop_coex_dm) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "bit3, BT NOT ignore Wlan active!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "bit3, BT NOT ignore Wlan active!\n");
btc8192e2ant_ignore_wlan_act(btcoexist,
FORCE_EXEC,
false);
@@ -3102,25 +3102,25 @@ void ex_btc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
if (!(bt_info & BT_INFO_8192E_2ANT_B_CONNECTION)) {
coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Non-Connected idle!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Non-Connected idle!!!\n");
} else if (bt_info == BT_INFO_8192E_2ANT_B_CONNECTION) {
coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], bt_infoNotify(), BT Connected-idle!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bt_infoNotify(), BT Connected-idle!!!\n");
} else if ((bt_info & BT_INFO_8192E_2ANT_B_SCO_ESCO) ||
(bt_info & BT_INFO_8192E_2ANT_B_SCO_BUSY)) {
coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_SCO_BUSY;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], bt_infoNotify(), BT SCO busy!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bt_infoNotify(), BT SCO busy!!!\n");
} else if (bt_info & BT_INFO_8192E_2ANT_B_ACL_BUSY) {
coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_ACL_BUSY;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], bt_infoNotify(), BT ACL busy!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bt_infoNotify(), BT ACL busy!!!\n");
} else {
coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_MAX;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex]bt_infoNotify(), BT Non-Defined state!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex]bt_infoNotify(), BT Non-Defined state!!!\n");
}
if ((BT_8192E_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
@@ -3145,7 +3145,7 @@ void ex_btc8192e2ant_halt_notify(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Halt notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Halt notify\n");
btc8192e2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
ex_btc8192e2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
@@ -3159,29 +3159,29 @@ void ex_btc8192e2ant_periodical(struct btc_coexist *btcoexist)
struct btc_board_info *board_info = &btcoexist->board_info;
struct btc_stack_info *stack_info = &btcoexist->stack_info;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "=======================Periodical=======================\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "=======================Periodical=======================\n");
if (dis_ver_info_cnt <= 5) {
dis_ver_info_cnt += 1;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "************************************************\n");
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
- board_info->pg_ant_num, board_info->btdm_ant_num,
- board_info->btdm_ant_pos);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "BT stack/ hci ext ver = %s / %d\n",
- ((stack_info->profile_notified) ? "Yes" : "No"),
- stack_info->hci_version);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "************************************************\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
+ board_info->pg_ant_num, board_info->btdm_ant_num,
+ board_info->btdm_ant_pos);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "BT stack/ hci ext ver = %s / %d\n",
+ ((stack_info->profile_notified) ? "Yes" : "No"),
+ stack_info->hci_version);
btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
&bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
- glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant,
- fw_ver, bt_patch_ver, bt_patch_ver);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "************************************************\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
+ glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant,
+ fw_ver, bt_patch_ver, bt_patch_ver);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "************************************************\n");
}
if (!btcoexist->auto_report_2ant) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
index 528e442f25a4..70492929d7e4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
@@ -468,9 +468,9 @@ static void btc8723b1ant_set_sw_pen_tx_rate_adapt(struct btc_coexist *btcoexist,
h2c_parameter[5] = 0xf9; /* MCS5 or OFDM36 */
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set WiFi Low-Penalty Retry: %s",
- (low_penalty_ra ? "ON!!" : "OFF!!"));
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set WiFi Low-Penalty Retry: %s",
+ (low_penalty_ra ? "ON!!" : "OFF!!"));
btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
}
@@ -496,20 +496,20 @@ static void halbtc8723b1ant_set_coex_table(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
}
@@ -520,8 +520,8 @@ static void halbtc8723b1ant_coex_table(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6cc = 0x%x\n",
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6cc = 0x%x\n",
(force_exec ? "force to" : ""),
val0x6c0, val0x6c4, val0x6cc);
coex_dm->cur_val0x6c0 = val0x6c0;
@@ -636,9 +636,9 @@ halbtc8723b1ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
if (enable)
h2c_parameter[0] |= BIT0; /* function enable */
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
- h2c_parameter[0]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
}
@@ -648,15 +648,15 @@ static void halbtc8723b1ant_ignore_wlan_act(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s turn Ignore WlanAct %s\n",
- (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn Ignore WlanAct %s\n",
+ (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
coex_dm->cur_ignore_wlan_act = enable;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
- coex_dm->pre_ignore_wlan_act,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
+ coex_dm->pre_ignore_wlan_act,
coex_dm->cur_ignore_wlan_act);
if (coex_dm->pre_ignore_wlan_act ==
@@ -682,8 +682,8 @@ static void halbtc8723b1ant_set_fw_ps_tdma(struct btc_coexist *btcoexist,
if (ap_enable) {
if ((byte1 & BIT4) && !(byte1 & BIT5)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], FW for 1Ant AP mode\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW for 1Ant AP mode\n");
real_byte1 &= ~BIT4;
real_byte1 |= BIT5;
@@ -704,13 +704,13 @@ static void halbtc8723b1ant_set_fw_ps_tdma(struct btc_coexist *btcoexist,
coex_dm->ps_tdma_para[3] = byte4;
coex_dm->ps_tdma_para[4] = real_byte5;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
- h2c_parameter[0],
- h2c_parameter[1] << 24 |
- h2c_parameter[2] << 16 |
- h2c_parameter[3] << 8 |
- h2c_parameter[4]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
+ h2c_parameter[0],
+ h2c_parameter[1] << 24 |
+ h2c_parameter[2] << 16 |
+ h2c_parameter[3] << 8 |
+ h2c_parameter[4]);
btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
}
@@ -731,22 +731,22 @@ static void halbtc8723b1ant_lps_rpwm(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n",
- (force_exec ? "force to" : ""), lps_val, rpwm_val);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n",
+ (force_exec ? "force to" : ""), lps_val, rpwm_val);
coex_dm->cur_lps = lps_val;
coex_dm->cur_rpwm = rpwm_val;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], LPS-RxBeaconMode = 0x%x , LPS-RPWM = 0x%x!!\n",
- coex_dm->cur_lps, coex_dm->cur_rpwm);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS-RxBeaconMode = 0x%x , LPS-RPWM = 0x%x!!\n",
+ coex_dm->cur_lps, coex_dm->cur_rpwm);
if ((coex_dm->pre_lps == coex_dm->cur_lps) &&
(coex_dm->pre_rpwm == coex_dm->cur_rpwm)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], LPS-RPWM_Last = 0x%x , LPS-RPWM_Now = 0x%x!!\n",
- coex_dm->pre_rpwm, coex_dm->cur_rpwm);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS-RPWM_Last = 0x%x , LPS-RPWM_Now = 0x%x!!\n",
+ coex_dm->pre_rpwm, coex_dm->cur_rpwm);
return;
}
@@ -762,8 +762,8 @@ static void halbtc8723b1ant_sw_mechanism(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra);
halbtc8723b1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
}
@@ -861,16 +861,16 @@ static void halbtc8723b1ant_set_ant_path(struct btc_coexist *btcoexist,
0x49d);
cnt_bt_cal_chk++;
if (u8tmp & BIT(0)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], ########### BT is calibrating (wait cnt=%d) ###########\n",
- cnt_bt_cal_chk);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], ########### BT is calibrating (wait cnt=%d) ###########\n",
+ cnt_bt_cal_chk);
mdelay(50);
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], ********** BT is NOT calibrating (wait cnt=%d)**********\n",
- cnt_bt_cal_chk);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], ********** BT is NOT calibrating (wait cnt=%d)**********\n",
+ cnt_bt_cal_chk);
break;
}
}
@@ -1426,8 +1426,8 @@ void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
s32 result;
u8 retry_count = 0;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], TdmaDurationAdjustForAcl()\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], TdmaDurationAdjustForAcl()\n");
if ((wifi_status ==
BT_8723B_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN) ||
@@ -1451,8 +1451,8 @@ void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
if (!coex_dm->auto_tdma_adjust) {
coex_dm->auto_tdma_adjust = true;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], first run TdmaDurationAdjust()!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], first run TdmaDurationAdjust()!!\n");
halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
coex_dm->ps_tdma_du_adj_type = 2;
@@ -1490,8 +1490,8 @@ void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
up = 0;
dn = 0;
result = 1;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Increase wifi duration!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Increase wifi duration!!\n");
}
} else if (retry_count <= 3) {
/* <=3 retry in the last 2-second duration */
@@ -1523,8 +1523,8 @@ void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
dn = 0;
wait_count = 0;
result = -1;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
}
} else {
/* retry count > 3, once retry count > 3, to reduce
@@ -1548,8 +1548,8 @@ void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
dn = 0;
wait_count = 0;
result = -1;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
}
if (result == -1) {
@@ -1690,10 +1690,10 @@ static void halbtc8723b1ant_monitor_bt_enable_disable(struct btc_coexist
bt_disabled = true;
}
if (coex_sta->bt_disabled != bt_disabled) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT is from %s to %s!!\n",
- (coex_sta->bt_disabled ? "disabled" : "enabled"),
- (bt_disabled ? "disabled" : "enabled"));
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT is from %s to %s!!\n",
+ (coex_sta->bt_disabled ? "disabled" : "enabled"),
+ (bt_disabled ? "disabled" : "enabled"));
coex_sta->bt_disabled = bt_disabled;
btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
@@ -2029,15 +2029,15 @@ static void halbtc8723b1ant_action_wifi_connected(struct btc_coexist *btcoexist)
bool scan = false, link = false, roam = false;
bool under_4way = false, ap_enable = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CoexForWifiConnect()===>\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CoexForWifiConnect()===>\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
&under_4way);
if (under_4way) {
halbtc8723b1ant_action_wifi_connected_special_packet(btcoexist);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n");
return;
}
@@ -2051,8 +2051,8 @@ static void halbtc8723b1ant_action_wifi_connected(struct btc_coexist *btcoexist)
else
halbtc8723b1ant_action_wifi_connected_special_packet(
btcoexist);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
return;
}
@@ -2152,30 +2152,30 @@ static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
u32 num_of_wifi_link = 0;
u32 wifi_bw;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism()===>\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism()===>\n");
if (btcoexist->manual_control) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
return;
}
if (btcoexist->stop_coex_dm) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n");
return;
}
if (coex_sta->under_ips) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi is under IPS !!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi is under IPS !!!\n");
return;
}
if (coex_sta->bt_whck_test) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi is under IPS !!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi is under IPS !!!\n");
halbtc8723b1ant_action_bt_whck_test(btcoexist);
return;
}
@@ -2276,8 +2276,8 @@ static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
if (!wifi_connected) {
bool scan = false, link = false, roam = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi is non connected-idle !!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi is non connected-idle !!!\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2314,8 +2314,8 @@ static void halbtc8723b1ant_init_hw_config(struct btc_coexist *btcoexist,
u32 u32tmp = 0;
u8 u8tmpa = 0, u8tmpb = 0;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], 1Ant Init HW Config!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], 1Ant Init HW Config!!\n");
/* 0xf0[15:12] --> Chip Cut information */
coex_sta->cut_version =
@@ -2347,9 +2347,9 @@ static void halbtc8723b1ant_init_hw_config(struct btc_coexist *btcoexist,
u8tmpa = btcoexist->btc_read_1byte(btcoexist, 0x765);
u8tmpb = btcoexist->btc_read_1byte(btcoexist, 0x67);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "############# [BTCoex], 0x948=0x%x, 0x765=0x%x, 0x67=0x%x\n",
- u32tmp, u8tmpa, u8tmpb);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "############# [BTCoex], 0x948=0x%x, 0x765=0x%x, 0x67=0x%x\n",
+ u32tmp, u8tmpa, u8tmpb);
}
/**************************************************************
@@ -2363,8 +2363,8 @@ void ex_btc8723b1ant_power_on_setting(struct btc_coexist *btcoexist)
u16 u16tmp = 0x0;
u32 value;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "xxxxxxxxxxxxxxxx Execute 8723b 1-Ant PowerOn Setting xxxxxxxxxxxxxxxx!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "xxxxxxxxxxxxxxxx Execute 8723b 1-Ant PowerOn Setting xxxxxxxxxxxxxxxx!!\n");
btcoexist->stop_coex_dm = true;
@@ -2436,8 +2436,8 @@ void ex_btc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Coex Mechanism Init!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Coex Mechanism Init!!\n");
btcoexist->stop_coex_dm = false;
@@ -2718,8 +2718,8 @@ void ex_btc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
return;
if (BTC_IPS_ENTER == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], IPS ENTER notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], IPS ENTER notify\n");
coex_sta->under_ips = true;
halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT,
@@ -2729,8 +2729,8 @@ void ex_btc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
halbtc8723b1ant_coex_table_with_type(btcoexist,
NORMAL_EXEC, 0);
} else if (BTC_IPS_LEAVE == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], IPS LEAVE notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], IPS LEAVE notify\n");
coex_sta->under_ips = false;
halbtc8723b1ant_init_hw_config(btcoexist, false, false);
@@ -2747,12 +2747,12 @@ void ex_btc8723b1ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
return;
if (BTC_LPS_ENABLE == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], LPS ENABLE notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS ENABLE notify\n");
coex_sta->under_lps = true;
} else if (BTC_LPS_DISABLE == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], LPS DISABLE notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS DISABLE notify\n");
coex_sta->under_lps = false;
}
}
@@ -2773,8 +2773,8 @@ void ex_btc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
if (type == BTC_SCAN_START) {
coex_sta->wifi_is_high_pri_task = true;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCAN START notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN START notify\n");
/* Force antenna setup for no scan result issue */
halbtc8723b1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 8);
halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_PTA,
@@ -2783,13 +2783,13 @@ void ex_btc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
u8tmpa = btcoexist->btc_read_1byte(btcoexist, 0x765);
u8tmpb = btcoexist->btc_read_1byte(btcoexist, 0x67);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], 0x948=0x%x, 0x765=0x%x, 0x67=0x%x\n",
- u32tmp, u8tmpa, u8tmpb);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], 0x948=0x%x, 0x765=0x%x, 0x67=0x%x\n",
+ u32tmp, u8tmpa, u8tmpb);
} else {
coex_sta->wifi_is_high_pri_task = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCAN FINISH notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN FINISH notify\n");
btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM,
&coex_sta->scan_ap_num);
@@ -2824,8 +2824,8 @@ void ex_btc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
}
if (BTC_SCAN_START == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCAN START notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN START notify\n");
if (!wifi_connected)
/* non-connected scan */
btc8723b1ant_action_wifi_not_conn_scan(btcoexist);
@@ -2833,8 +2833,8 @@ void ex_btc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
/* wifi is connected */
btc8723b1ant_action_wifi_conn_scan(btcoexist);
} else if (BTC_SCAN_FINISH == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCAN FINISH notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN FINISH notify\n");
if (!wifi_connected)
/* non-connected scan */
btc8723b1ant_action_wifi_not_conn(btcoexist);
@@ -2866,13 +2866,13 @@ void ex_btc8723b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
halbtc8723b1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 8);
halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_PTA,
FORCE_EXEC, false, false);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CONNECT START notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT START notify\n");
coex_dm->arp_cnt = 0;
} else {
coex_sta->wifi_is_high_pri_task = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CONNECT FINISH notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT FINISH notify\n");
}
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_LINK_STATUS,
@@ -2896,12 +2896,12 @@ void ex_btc8723b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
}
if (BTC_ASSOCIATE_START == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CONNECT START notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT START notify\n");
btc8723b1ant_act_wifi_not_conn_asso_auth(btcoexist);
} else if (BTC_ASSOCIATE_FINISH == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CONNECT FINISH notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT FINISH notify\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
&wifi_connected);
@@ -2927,8 +2927,8 @@ void ex_btc8723b1ant_media_status_notify(struct btc_coexist *btcoexist,
return;
if (type == BTC_MEDIA_CONNECT) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], MEDIA connect notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], MEDIA connect notify\n");
/* Force antenna setup for no scan result issue */
halbtc8723b1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 8);
halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_PTA,
@@ -2958,8 +2958,8 @@ void ex_btc8723b1ant_media_status_notify(struct btc_coexist *btcoexist,
coex_dm->backup_ampdu_max_time =
btcoexist->btc_read_1byte(btcoexist, 0x456);
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], MEDIA disconnect notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], MEDIA disconnect notify\n");
coex_dm->arp_cnt = 0;
btcoexist->btc_write_1byte(btcoexist, 0x6cd, 0x0); /* CCK Tx */
@@ -2986,10 +2986,10 @@ void ex_btc8723b1ant_media_status_notify(struct btc_coexist *btcoexist,
coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], FW write 0x66 = 0x%x\n",
- h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
- h2c_parameter[2]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW write 0x66 = 0x%x\n",
+ h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+ h2c_parameter[2]);
btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
}
@@ -3014,12 +3014,12 @@ void ex_btc8723b1ant_special_packet_notify(struct btc_coexist *btcoexist,
if (type == BTC_PACKET_DHCP || type == BTC_PACKET_EAPOL ||
type == BTC_PACKET_ARP) {
if (type == BTC_PACKET_ARP) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], special Packet ARP notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], special Packet ARP notify\n");
coex_dm->arp_cnt++;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ARP Packet Count = %d\n",
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ARP Packet Count = %d\n",
coex_dm->arp_cnt);
if ((coex_dm->arp_cnt >= 10) && (!under_4way))
@@ -3031,13 +3031,13 @@ void ex_btc8723b1ant_special_packet_notify(struct btc_coexist *btcoexist,
coex_sta->wifi_is_high_pri_task = true;
} else {
coex_sta->wifi_is_high_pri_task = true;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], special Packet DHCP or EAPOL notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], special Packet DHCP or EAPOL notify\n");
}
} else {
coex_sta->wifi_is_high_pri_task = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], special Packet [Type = %d] notify\n",
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], special Packet [Type = %d] notify\n",
type);
}
@@ -3065,8 +3065,8 @@ void ex_btc8723b1ant_special_packet_notify(struct btc_coexist *btcoexist,
if (BTC_PACKET_DHCP == type ||
BTC_PACKET_EAPOL == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], special Packet(%d) notify\n", type);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], special Packet(%d) notify\n", type);
halbtc8723b1ant_action_wifi_connected_special_packet(btcoexist);
}
}
@@ -3087,19 +3087,19 @@ void ex_btc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
rsp_source = BT_INFO_SRC_8723B_1ANT_WIFI_FW;
coex_sta->bt_info_c2h_cnt[rsp_source]++;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Bt info[%d], length=%d, hex data = [",
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Bt info[%d], length=%d, hex data = [",
rsp_source, length);
for (i = 0; i < length; i++) {
coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
if (i == 1)
bt_info = tmp_buf[i];
if (i == length - 1)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "0x%02x]\n", tmp_buf[i]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "0x%02x]\n", tmp_buf[i]);
else
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "0x%02x, ", tmp_buf[i]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "0x%02x, ", tmp_buf[i]);
}
/* if 0xff, it means BT is under WHCK test */
@@ -3142,8 +3142,8 @@ void ex_btc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
/* BT into is responded by BT FW and BT RF REG
* 0x3C != 0x15 => Need to switch BT TRx Mask
*/
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Switch BT TRx Mask since BT RF REG 0x3C != 0x15\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Switch BT TRx Mask since BT RF REG 0x3C != 0x15\n");
btcoexist->btc_set_bt_reg(btcoexist, BTC_BT_REG_RF,
0x3c, 0x15);
@@ -3158,8 +3158,8 @@ void ex_btc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
* because bt is reset and loss of the info.
*/
if (coex_sta->bt_info_ext & BIT1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
&wifi_connected);
if (wifi_connected)
@@ -3173,8 +3173,8 @@ void ex_btc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
if (coex_sta->bt_info_ext & BIT3) {
if (!btcoexist->manual_control &&
!btcoexist->stop_coex_dm) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT ext info bit3 check, set BT NOT ignore Wlan active!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT ext info bit3 check, set BT NOT ignore Wlan active!!\n");
halbtc8723b1ant_ignore_wlan_act(btcoexist,
FORCE_EXEC,
false);
@@ -3280,29 +3280,29 @@ void ex_btc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
if (!(bt_info & BT_INFO_8723B_1ANT_B_CONNECTION)) {
coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT Non-Connected idle!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT Non-Connected idle!\n");
/* connection exists but no busy */
} else if (bt_info == BT_INFO_8723B_1ANT_B_CONNECTION) {
coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
} else if ((bt_info & BT_INFO_8723B_1ANT_B_SCO_ESCO) ||
(bt_info & BT_INFO_8723B_1ANT_B_SCO_BUSY)) {
coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_SCO_BUSY;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
} else if (bt_info & BT_INFO_8723B_1ANT_B_ACL_BUSY) {
if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY != coex_dm->bt_status)
coex_dm->auto_tdma_adjust = false;
coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_ACL_BUSY;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
} else {
coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_MAX;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT Non-Defined state!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT Non-Defined state!!\n");
}
if ((BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
@@ -3322,16 +3322,16 @@ void ex_btc8723b1ant_rf_status_notify(struct btc_coexist *btcoexist, u8 type)
u32 u32tmp;
u8 u8tmpa, u8tmpb, u8tmpc;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RF Status notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RF Status notify\n");
if (type == BTC_RF_ON) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RF is turned ON!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RF is turned ON!!\n");
btcoexist->stop_coex_dm = false;
} else if (type == BTC_RF_OFF) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RF is turned OFF!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RF is turned OFF!!\n");
halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
0x0, 0x0);
@@ -3347,9 +3347,9 @@ void ex_btc8723b1ant_rf_status_notify(struct btc_coexist *btcoexist, u8 type)
u8tmpb = btcoexist->btc_read_1byte(btcoexist, 0x67);
u8tmpc = btcoexist->btc_read_1byte(btcoexist, 0x76e);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "############# [BTCoex], 0x948=0x%x, 0x765=0x%x, 0x67=0x%x, 0x76e=0x%x\n",
- u32tmp, u8tmpa, u8tmpb, u8tmpc);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "############# [BTCoex], 0x948=0x%x, 0x765=0x%x, 0x67=0x%x, 0x76e=0x%x\n",
+ u32tmp, u8tmpa, u8tmpb, u8tmpc);
}
}
@@ -3357,7 +3357,7 @@ void ex_btc8723b1ant_halt_notify(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Halt notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Halt notify\n");
btcoexist->stop_coex_dm = true;
@@ -3379,11 +3379,11 @@ void ex_btc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Pnp notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Pnp notify\n");
if (BTC_WIFI_PNP_SLEEP == pnp_state) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Pnp notify to SLEEP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Pnp notify to SLEEP\n");
halbtc8723b1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT,
FORCE_EXEC, false, true);
halbtc8723b1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
@@ -3401,8 +3401,8 @@ void ex_btc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
coex_sta->under_lps = false;
btcoexist->stop_coex_dm = true;
} else if (BTC_WIFI_PNP_WAKE_UP == pnp_state) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Pnp notify to WAKE UP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Pnp notify to WAKE UP\n");
btcoexist->stop_coex_dm = false;
halbtc8723b1ant_init_hw_config(btcoexist, false, false);
halbtc8723b1ant_init_coex_dm(btcoexist);
@@ -3414,8 +3414,8 @@ void ex_btc8723b1ant_coex_dm_reset(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], *****************Coex DM Reset****************\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], *****************Coex DM Reset****************\n");
halbtc8723b1ant_init_hw_config(btcoexist, false, false);
halbtc8723b1ant_init_coex_dm(btcoexist);
@@ -3426,8 +3426,8 @@ void ex_btc8723b1ant_periodical(struct btc_coexist *btcoexist)
struct rtl_priv *rtlpriv = btcoexist->adapter;
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ==========================Periodical===========================\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ==========================Periodical===========================\n");
if (!btcoexist->auto_report_1ant) {
halbtc8723b1ant_query_bt_info(btcoexist);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
index 9f7b9af5bdcd..fb57cc8b2e47 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
@@ -53,28 +53,28 @@ static u8 btc8723b2ant_bt_rssi_state(struct btc_coexist *btcoexist,
if (bt_rssi >= rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
bt_rssi_state = BTC_RSSI_STATE_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to High\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state stay at Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at Low\n");
}
} else {
if (bt_rssi < rssi_thresh) {
bt_rssi_state = BTC_RSSI_STATE_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Low\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state stay at High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi thresh error!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi thresh error!!\n");
return coex_sta->pre_bt_rssi_state;
}
@@ -83,12 +83,12 @@ static u8 btc8723b2ant_bt_rssi_state(struct btc_coexist *btcoexist,
if (bt_rssi >= rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Medium\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state stay at Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at Low\n");
}
} else if ((coex_sta->pre_bt_rssi_state ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -97,26 +97,26 @@ static u8 btc8723b2ant_bt_rssi_state(struct btc_coexist *btcoexist,
if (bt_rssi >= rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
bt_rssi_state = BTC_RSSI_STATE_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to High\n");
} else if (bt_rssi < rssi_thresh) {
bt_rssi_state = BTC_RSSI_STATE_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Low\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state stay at Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at Medium\n");
}
} else {
if (bt_rssi < rssi_thresh1) {
bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Medium\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state stay at High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at High\n");
}
}
}
@@ -144,28 +144,28 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >= rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to High\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state stay at Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at Low\n");
}
} else {
if (wifi_rssi < rssi_thresh) {
wifi_rssi_state = BTC_RSSI_STATE_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Low\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state stay at High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI thresh error!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI thresh error!!\n");
return coex_sta->pre_wifi_rssi_state[index];
}
@@ -176,12 +176,12 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >= rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Medium\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state stay at Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at Low\n");
}
} else if ((coex_sta->pre_wifi_rssi_state[index] ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -190,26 +190,26 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >= rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to High\n");
} else if (wifi_rssi < rssi_thresh) {
wifi_rssi_state = BTC_RSSI_STATE_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Low\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state stay at Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at Medium\n");
}
} else {
if (wifi_rssi < rssi_thresh1) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Medium\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state stay at High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at High\n");
}
}
}
@@ -277,12 +277,12 @@ static void btc8723b2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
else
bt_link_info->slave_role = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], High Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
- reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Low Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
- reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], High Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+ reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Low Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+ reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
/* reset counter */
btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
@@ -334,9 +334,9 @@ static void btc8723b2ant_query_bt_info(struct btc_coexist *btcoexist)
h2c_parameter[0] |= BIT0; /* trigger */
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
- h2c_parameter[0]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
}
@@ -446,8 +446,8 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
if (!bt_link_info->bt_link_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], No BT link exists!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], No BT link exists!!!\n");
return algorithm;
}
@@ -462,29 +462,29 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
if (num_of_diff_profile == 1) {
if (bt_link_info->sco_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCO only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCO only\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_SCO;
} else {
if (bt_link_info->hid_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], HID only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], HID only\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_HID;
} else if (bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], A2DP only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], A2DP only\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_A2DP;
} else if (bt_link_info->pan_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], PAN(HS) only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], PAN(HS) only\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANHS;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], PAN(EDR) only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], PAN(EDR) only\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR;
}
@@ -493,23 +493,23 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
} else if (num_of_diff_profile == 2) {
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCO + HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCO + HID\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
} else if (bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCO + A2DP ==> SCO\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCO + A2DP ==> SCO\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
} else if (bt_link_info->pan_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], SCO + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + PAN(HS)\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_SCO;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], SCO + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + PAN(EDR)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
}
@@ -517,35 +517,35 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
} else {
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], HID + A2DP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], HID + A2DP\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_HID_A2DP;
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], HID + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], HID + PAN(HS)\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_HID;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], HID + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], HID + PAN(EDR)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], A2DP + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], A2DP + PAN(HS)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex],A2DP + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex],A2DP + PAN(EDR)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP;
}
@@ -555,36 +555,36 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCO + HID + A2DP ==> HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCO + HID + A2DP ==> HID\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], SCO + HID + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + HID + PAN(HS)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], SCO + HID + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + HID + PAN(EDR)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], SCO + A2DP + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + A2DP + PAN(HS)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
}
@@ -594,15 +594,15 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], HID + A2DP + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], HID + A2DP + PAN(HS)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_HID_A2DP;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], HID + A2DP + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], HID + A2DP + PAN(EDR)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
}
@@ -614,13 +614,13 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n");
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
}
@@ -641,10 +641,10 @@ static void btc8723b2ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist,
*/
h2c_parameter[0] = dac_swing_lvl;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
}
@@ -657,8 +657,8 @@ static void btc8723b2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
h2c_parameter[0] = dec_bt_pwr_lvl;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], decrease Bt Power Level : %u\n", dec_bt_pwr_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], decrease Bt Power Level : %u\n", dec_bt_pwr_lvl);
btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
}
@@ -668,15 +668,15 @@ static void btc8723b2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Dec BT power level = %u\n", dec_bt_pwr_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Dec BT power level = %u\n", dec_bt_pwr_lvl);
coex_dm->cur_dec_bt_pwr_lvl = dec_bt_pwr_lvl;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], PreDecBtPwrLvl=%d, CurDecBtPwrLvl=%d\n",
- coex_dm->pre_dec_bt_pwr_lvl,
- coex_dm->cur_dec_bt_pwr_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], PreDecBtPwrLvl=%d, CurDecBtPwrLvl=%d\n",
+ coex_dm->pre_dec_bt_pwr_lvl,
+ coex_dm->cur_dec_bt_pwr_lvl);
if (coex_dm->pre_dec_bt_pwr_lvl == coex_dm->cur_dec_bt_pwr_lvl)
return;
@@ -721,16 +721,16 @@ static void btc8723b2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s set FW Dac Swing level = %d\n",
- (force_exec ? "force to" : ""), fw_dac_swing_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s set FW Dac Swing level = %d\n",
+ (force_exec ? "force to" : ""), fw_dac_swing_lvl);
coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n",
- coex_dm->pre_fw_dac_swing_lvl,
- coex_dm->cur_fw_dac_swing_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n",
+ coex_dm->pre_fw_dac_swing_lvl,
+ coex_dm->cur_fw_dac_swing_lvl);
if (coex_dm->pre_fw_dac_swing_lvl ==
coex_dm->cur_fw_dac_swing_lvl)
@@ -759,9 +759,9 @@ static void btc8723b_set_penalty_txrate(struct btc_coexist *btcoexist,
h2c_parameter[5] = 0xf6; /* MCS5 or OFDM36 */
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set WiFi Low-Penalty Retry: %s",
- (low_penalty_ra ? "ON!!" : "OFF!!"));
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set WiFi Low-Penalty Retry: %s",
+ (low_penalty_ra ? "ON!!" : "OFF!!"));
btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
}
@@ -771,17 +771,17 @@ static void btc8723b2ant_low_penalty_ra(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s turn LowPenaltyRA = %s\n",
- (force_exec ? "force to" : ""), (low_penalty_ra ?
- "ON" : "OFF"));
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn LowPenaltyRA = %s\n",
+ (force_exec ? "force to" : ""), (low_penalty_ra ?
+ "ON" : "OFF"));
coex_dm->cur_low_penalty_ra = low_penalty_ra;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], bPreLowPenaltyRa=%d, bCurLowPenaltyRa=%d\n",
- coex_dm->pre_low_penalty_ra,
- coex_dm->cur_low_penalty_ra);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bPreLowPenaltyRa=%d, bCurLowPenaltyRa=%d\n",
+ coex_dm->pre_low_penalty_ra,
+ coex_dm->cur_low_penalty_ra);
if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra)
return;
@@ -797,8 +797,8 @@ static void btc8723b2ant_set_dac_swing_reg(struct btc_coexist *btcoexist,
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 val = (u8) level;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Write SwDacSwing = 0x%x\n", level);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Write SwDacSwing = 0x%x\n", level);
btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val);
}
@@ -818,20 +818,20 @@ static void btc8723b2ant_dac_swing(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n",
- (force_exec ? "force to" : ""),
- (dac_swing_on ? "ON" : "OFF"), dac_swing_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n",
+ (force_exec ? "force to" : ""),
+ (dac_swing_on ? "ON" : "OFF"), dac_swing_lvl);
coex_dm->cur_dac_swing_on = dac_swing_on;
coex_dm->cur_dac_swing_lvl = dac_swing_lvl;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x, bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n",
- coex_dm->pre_dac_swing_on,
- coex_dm->pre_dac_swing_lvl,
- coex_dm->cur_dac_swing_on,
- coex_dm->cur_dac_swing_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x, bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n",
+ coex_dm->pre_dac_swing_on,
+ coex_dm->pre_dac_swing_lvl,
+ coex_dm->cur_dac_swing_on,
+ coex_dm->cur_dac_swing_lvl);
if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
(coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl))
@@ -851,20 +851,20 @@ static void btc8723b2ant_set_coex_table(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0);
btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4);
btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8);
btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc);
btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
}
@@ -875,24 +875,24 @@ static void btc8723b2ant_coex_table(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s write Coex Table 0x6c0=0x%x, 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n",
- force_exec ? "force to" : "",
- val0x6c0, val0x6c4, val0x6c8, val0x6cc);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s write Coex Table 0x6c0=0x%x, 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n",
+ force_exec ? "force to" : "",
+ val0x6c0, val0x6c4, val0x6c8, val0x6cc);
coex_dm->cur_val0x6c0 = val0x6c0;
coex_dm->cur_val0x6c4 = val0x6c4;
coex_dm->cur_val0x6c8 = val0x6c8;
coex_dm->cur_val0x6cc = val0x6cc;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], preVal0x6c0=0x%x, preVal0x6c4=0x%x, preVal0x6c8=0x%x, preVal0x6cc=0x%x !!\n",
- coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4,
- coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], curVal0x6c0=0x%x, curVal0x6c4=0x%x, curVal0x6c8=0x%x, curVal0x6cc=0x%x !!\n",
- coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4,
- coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], preVal0x6c0=0x%x, preVal0x6c4=0x%x, preVal0x6c8=0x%x, preVal0x6cc=0x%x !!\n",
+ coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4,
+ coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], curVal0x6c0=0x%x, curVal0x6c4=0x%x, curVal0x6c8=0x%x, curVal0x6cc=0x%x !!\n",
+ coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4,
+ coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
(coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
@@ -991,9 +991,9 @@ static void btc8723b2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
if (enable)
h2c_parameter[0] |= BIT0; /* function enable */
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63=0x%x\n",
- h2c_parameter[0]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63=0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
}
@@ -1030,16 +1030,16 @@ static void btc8723b2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s turn Ignore WlanAct %s\n",
- (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn Ignore WlanAct %s\n",
+ (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
coex_dm->cur_ignore_wlan_act = enable;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
- coex_dm->pre_ignore_wlan_act,
- coex_dm->cur_ignore_wlan_act);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
+ coex_dm->pre_ignore_wlan_act,
+ coex_dm->cur_ignore_wlan_act);
if (coex_dm->pre_ignore_wlan_act ==
coex_dm->cur_ignore_wlan_act)
@@ -1070,11 +1070,11 @@ static void btc8723b2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1,
coex_dm->ps_tdma_para[3] = byte4;
coex_dm->ps_tdma_para[4] = byte5;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n",
- h2c_parameter[0],
- h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
- h2c_parameter[3] << 8 | h2c_parameter[4]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n",
+ h2c_parameter[0],
+ h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
+ h2c_parameter[3] << 8 | h2c_parameter[4]);
btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
}
@@ -1220,10 +1220,10 @@ static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec,
coex_dm->switch_thres_offset;
bt_rssi_state = btc8723b2ant_bt_rssi_state(btcoexist, 2, tmp, 0);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s turn %s PS TDMA, type=%d\n",
- (force_exec ? "force to" : ""),
- (turn_on ? "ON" : "OFF"), type);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn %s PS TDMA, type=%d\n",
+ (force_exec ? "force to" : ""),
+ (turn_on ? "ON" : "OFF"), type);
coex_dm->cur_ps_tdma_on = turn_on;
coex_dm->cur_ps_tdma = type;
@@ -1237,12 +1237,12 @@ static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec,
}
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
- coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
- coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
+ coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
+ coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
(coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1585,13 +1585,13 @@ static void btc8723b2ant_action_bt_inquiry(struct btc_coexist *btcoexist)
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 23);
btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 3);
} else if (scan || link || roam) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi link process + BT Inq/Page!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi link process + BT Inq/Page!!\n");
btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 15);
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22);
} else if (wifi_connected) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi connected + BT Inq/Page!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi connected + BT Inq/Page!!\n");
btc8723b2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 15);
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22);
} else {
@@ -1620,9 +1620,9 @@ static void btc8723b2ant_action_wifi_link_process(struct btc_coexist
u8tmpa = btcoexist->btc_read_1byte(btcoexist, 0x765);
u8tmpb = btcoexist->btc_read_1byte(btcoexist, 0x76e);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], 0x948 = 0x%x, 0x765 = 0x%x, 0x76e = 0x%x\n",
- u32tmp, u8tmpa, u8tmpb);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], 0x948 = 0x%x, 0x765 = 0x%x, 0x76e = 0x%x\n",
+ u32tmp, u8tmpa, u8tmpb);
}
static bool btc8723b2ant_action_wifi_idle_process(struct btc_coexist *btcoexist)
@@ -1645,8 +1645,8 @@ static bool btc8723b2ant_action_wifi_idle_process(struct btc_coexist *btcoexist)
/* office environment */
if (BTC_RSSI_HIGH(wifi_rssi_state1) && (coex_sta->hid_exist) &&
(coex_sta->a2dp_exist)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi idle process for BT HID+A2DP exist!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi idle process for BT HID+A2DP exist!!\n");
btc8723b2ant_dac_swing(btcoexist, NORMAL_EXEC, true, 0x6);
btc8723b2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
@@ -1685,8 +1685,8 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC,
false, false, 0x8);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi non-connected idle!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi non-connected idle!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff,
0x0);
@@ -1709,8 +1709,8 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC,
false, false, 0x8);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi connected + BT non connected-idle!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi connected + BT non connected-idle!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
0xfffff, 0x0);
@@ -1734,8 +1734,8 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
if (bt_hs_on)
return false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi connected + BT connected-idle!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi connected + BT connected-idle!!\n");
btc8723b2ant_limited_rx(btcoexist, NORMAL_EXEC,
false, false, 0x8);
@@ -1759,12 +1759,12 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
&low_pwr_disable);
if (wifi_busy) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
common = false;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
common =
btc8723b2ant_action_wifi_idle_process(
@@ -1786,13 +1786,13 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
s32 result;
u8 retry_count = 0;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], TdmaDurationAdjust()\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], TdmaDurationAdjust()\n");
if (!coex_dm->auto_tdma_adjust) {
coex_dm->auto_tdma_adjust = true;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], first run TdmaDurationAdjust()!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], first run TdmaDurationAdjust()!!\n");
if (sco_hid) {
if (tx_pause) {
if (max_interval == 1) {
@@ -1901,11 +1901,11 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
(coex_sta->low_priority_rx) > 1250)
retry_count++;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], retry_count = %d\n", retry_count);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_count=%d\n",
- up, dn, m, n, wait_count);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], retry_count = %d\n", retry_count);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_count=%d\n",
+ up, dn, m, n, wait_count);
result = 0;
wait_count++;
/* no retry in the last 2-second duration*/
@@ -1925,8 +1925,8 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
up = 0;
dn = 0;
result = 1;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Increase wifi duration!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Increase wifi duration!!\n");
} /* <=3 retry in the last 2-second duration*/
} else if (retry_count <= 3) {
up--;
@@ -1957,8 +1957,8 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
dn = 0;
wait_count = 0;
result = -1;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Decrease wifi duration for retry_counter<3!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Decrease wifi duration for retry_counter<3!!\n");
}
} else {
/* retry count > 3, once retry count > 3, to reduce
@@ -1982,12 +1982,12 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
dn = 0;
wait_count = 0;
result = -1;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Decrease wifi duration for retry_counter>3!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Decrease wifi duration for retry_counter>3!!\n");
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], max Interval = %d\n", max_interval);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], max Interval = %d\n", max_interval);
if (max_interval == 1) {
if (tx_pause) {
if (coex_dm->cur_ps_tdma == 71) {
@@ -2736,17 +2736,17 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
}
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], max Interval = %d\n", max_interval);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], max Interval = %d\n", max_interval);
/* if current PsTdma not match with the recorded one (scan, dhcp, ...),
* then we have to adjust it back to the previous recorded one.
*/
if (coex_dm->cur_ps_tdma != coex_dm->ps_tdma_du_adj_type) {
bool scan = false, link = false, roam = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], PsTdma type mismatch!!!, curPsTdma=%d, recordPsTdma=%d\n",
- coex_dm->cur_ps_tdma, coex_dm->ps_tdma_du_adj_type);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], PsTdma type mismatch!!!, curPsTdma=%d, recordPsTdma=%d\n",
+ coex_dm->cur_ps_tdma, coex_dm->ps_tdma_du_adj_type);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2756,8 +2756,8 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
coex_dm->ps_tdma_du_adj_type);
else
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
}
}
@@ -3352,26 +3352,26 @@ static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
bool miracast_plus_bt = false;
bool scan = false, link = false, roam = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism()===>\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism()===>\n");
if (btcoexist->manual_control) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
return;
}
if (coex_sta->under_ips) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi is under IPS !!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi is under IPS !!!\n");
return;
}
algorithm = btc8723b2ant_action_algorithm(btcoexist);
if (coex_sta->c2h_bt_inquiry_page &&
(BT_8723B_2ANT_COEX_ALGO_PANHS != algorithm)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT is under inquiry/page scan !!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT is under inquiry/page scan !!\n");
btc8723b2ant_action_bt_inquiry(btcoexist);
return;
}
@@ -3381,8 +3381,8 @@ static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
if (scan || link || roam) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], WiFi is under Link Process !!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], WiFi is under Link Process !!\n");
btc8723b2ant_action_wifi_link_process(btcoexist);
return;
}
@@ -3394,9 +3394,9 @@ static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
if ((num_of_wifi_link >= 2) ||
(wifi_link_status & WIFI_P2P_GO_CONNECTED)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "############# [BTCoex], Multi-Port num_of_wifi_link = %d, wifi_link_status = 0x%x\n",
- num_of_wifi_link, wifi_link_status);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "############# [BTCoex], Multi-Port num_of_wifi_link = %d, wifi_link_status = 0x%x\n",
+ num_of_wifi_link, wifi_link_status);
if (bt_link_info->bt_link_exist)
miracast_plus_bt = true;
@@ -3415,76 +3415,76 @@ static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
&miracast_plus_bt);
coex_dm->cur_algorithm = algorithm;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Algorithm = %d\n",
- coex_dm->cur_algorithm);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Algorithm = %d\n",
+ coex_dm->cur_algorithm);
if (btc8723b2ant_is_common_action(btcoexist)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant common\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant common\n");
coex_dm->auto_tdma_adjust = false;
} else {
if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], preAlgorithm=%d, curAlgorithm=%d\n",
- coex_dm->pre_algorithm,
- coex_dm->cur_algorithm);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], preAlgorithm=%d, curAlgorithm=%d\n",
+ coex_dm->pre_algorithm,
+ coex_dm->cur_algorithm);
coex_dm->auto_tdma_adjust = false;
}
switch (coex_dm->cur_algorithm) {
case BT_8723B_2ANT_COEX_ALGO_SCO:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = SCO\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = SCO\n");
btc8723b2ant_action_sco(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_HID:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = HID\n");
btc8723b2ant_action_hid(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_A2DP:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = A2DP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = A2DP\n");
btc8723b2ant_action_a2dp(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
btc8723b2ant_action_a2dp_pan_hs(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_PANEDR:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)\n");
btc8723b2ant_action_pan_edr(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_PANHS:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = HS mode\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = HS mode\n");
btc8723b2ant_action_pan_hs(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP\n");
btc8723b2ant_action_pan_edr_a2dp(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_PANEDR_HID:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID\n");
btc8723b2ant_action_pan_edr_hid(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN\n");
btc8723b2ant_action_hid_a2dp_pan_edr(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_HID_A2DP:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = HID+A2DP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = HID+A2DP\n");
btc8723b2ant_action_hid_a2dp(btcoexist);
break;
default:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n");
btc8723b2ant_coex_alloff(btcoexist);
break;
}
@@ -3531,8 +3531,8 @@ void ex_btc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist)
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 u8tmp = 0;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], 2Ant Init HW Config!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], 2Ant Init HW Config!!\n");
coex_dm->bt_rf0x1e_backup =
btcoexist->btc_get_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff);
@@ -3631,8 +3631,8 @@ void ex_btc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Coex Mechanism Init!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Coex Mechanism Init!!\n");
btc8723b2ant_init_coex_dm(btcoexist);
}
@@ -3853,15 +3853,15 @@ void ex_btc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
struct rtl_priv *rtlpriv = btcoexist->adapter;
if (BTC_IPS_ENTER == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], IPS ENTER notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], IPS ENTER notify\n");
coex_sta->under_ips = true;
btc8723b2ant_wifioff_hwcfg(btcoexist);
btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
btc8723b2ant_coex_alloff(btcoexist);
} else if (BTC_IPS_LEAVE == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], IPS LEAVE notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], IPS LEAVE notify\n");
coex_sta->under_ips = false;
ex_btc8723b2ant_init_hwconfig(btcoexist);
btc8723b2ant_init_coex_dm(btcoexist);
@@ -3874,12 +3874,12 @@ void ex_btc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
struct rtl_priv *rtlpriv = btcoexist->adapter;
if (BTC_LPS_ENABLE == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], LPS ENABLE notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS ENABLE notify\n");
coex_sta->under_lps = true;
} else if (BTC_LPS_DISABLE == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], LPS DISABLE notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS DISABLE notify\n");
coex_sta->under_lps = false;
}
}
@@ -3895,16 +3895,16 @@ void ex_btc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
u8tmpb = btcoexist->btc_read_1byte(btcoexist, 0x76e);
if (BTC_SCAN_START == type)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCAN START notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN START notify\n");
else if (BTC_SCAN_FINISH == type)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCAN FINISH notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN FINISH notify\n");
btcoexist->btc_get(btcoexist, BTC_GET_U1_AP_NUM,
&coex_sta->scan_ap_num);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "############# [BTCoex], 0x948=0x%x, 0x765=0x%x, 0x76e=0x%x\n",
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "############# [BTCoex], 0x948=0x%x, 0x765=0x%x, 0x76e=0x%x\n",
u32tmp, u8tmpa, u8tmpb);
}
@@ -3913,11 +3913,11 @@ void ex_btc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
struct rtl_priv *rtlpriv = btcoexist->adapter;
if (BTC_ASSOCIATE_START == type)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CONNECT START notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT START notify\n");
else if (BTC_ASSOCIATE_FINISH == type)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CONNECT FINISH notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT FINISH notify\n");
}
void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist,
@@ -3930,11 +3930,11 @@ void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist,
u8 ap_num = 0;
if (BTC_MEDIA_CONNECT == type)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], MEDIA connect notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], MEDIA connect notify\n");
else
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], MEDIA disconnect notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], MEDIA disconnect notify\n");
/* only 2.4G we need to inform bt the chnl mask */
btcoexist->btc_get(btcoexist,
@@ -3961,10 +3961,10 @@ void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist,
coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], FW write 0x66=0x%x\n",
- h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
- h2c_parameter[2]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW write 0x66=0x%x\n",
+ h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+ h2c_parameter[2]);
btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
}
@@ -3975,8 +3975,8 @@ void ex_btc8723b2ant_special_packet_notify(struct btc_coexist *btcoexist,
struct rtl_priv *rtlpriv = btcoexist->adapter;
if (type == BTC_PACKET_DHCP)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], DHCP Packet notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], DHCP Packet notify\n");
}
void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
@@ -3995,24 +3995,24 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
rsp_source = BT_INFO_SRC_8723B_2ANT_WIFI_FW;
coex_sta->bt_info_c2h_cnt[rsp_source]++;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Bt info[%d], length=%d, hex data=[",
- rsp_source, length);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Bt info[%d], length=%d, hex data=[",
+ rsp_source, length);
for (i = 0; i < length; i++) {
coex_sta->bt_info_c2h[rsp_source][i] = tmpbuf[i];
if (i == 1)
bt_info = tmpbuf[i];
if (i == length - 1)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "0x%02x]\n", tmpbuf[i]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "0x%02x]\n", tmpbuf[i]);
else
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "0x%02x, ", tmpbuf[i]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "0x%02x, ", tmpbuf[i]);
}
if (btcoexist->manual_control) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), return for Manual CTRL<===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), return for Manual CTRL<===\n");
return;
}
@@ -4043,8 +4043,8 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
* because BT is reset and loss of the info.
*/
if ((coex_sta->bt_info_ext & BIT1)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
&wifi_connected);
if (wifi_connected)
@@ -4058,8 +4058,8 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
}
if ((coex_sta->bt_info_ext & BIT3)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC,
false);
} else {
@@ -4120,26 +4120,26 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
if (!(bt_info & BT_INFO_8723B_2ANT_B_CONNECTION)) {
coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
/* connection exists but no busy */
} else if (bt_info == BT_INFO_8723B_2ANT_B_CONNECTION) {
coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
} else if ((bt_info & BT_INFO_8723B_2ANT_B_SCO_ESCO) ||
(bt_info & BT_INFO_8723B_2ANT_B_SCO_BUSY)) {
coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_SCO_BUSY;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
} else if (bt_info&BT_INFO_8723B_2ANT_B_ACL_BUSY) {
coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_ACL_BUSY;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
} else {
coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_MAX;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n");
}
if ((BT_8723B_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
@@ -4164,7 +4164,7 @@ void ex_btc8723b2ant_halt_notify(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Halt notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Halt notify\n");
btc8723b2ant_wifioff_hwcfg(btcoexist);
btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
@@ -4175,11 +4175,11 @@ void ex_btc8723b2ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Pnp notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Pnp notify\n");
if (pnp_state == BTC_WIFI_PNP_SLEEP) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Pnp notify to SLEEP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Pnp notify to SLEEP\n");
/* Driver do not leave IPS/LPS when driver is going to sleep, so
* BTCoexistence think wifi is still under IPS/LPS
@@ -4190,8 +4190,8 @@ void ex_btc8723b2ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
coex_sta->under_ips = false;
coex_sta->under_lps = false;
} else if (pnp_state == BTC_WIFI_PNP_WAKE_UP) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Pnp notify to WAKE UP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Pnp notify to WAKE UP\n");
ex_btc8723b2ant_init_hwconfig(btcoexist);
btc8723b2ant_init_coex_dm(btcoexist);
btc8723b2ant_query_bt_info(btcoexist);
@@ -4203,8 +4203,8 @@ void ex_btc8723b2ant_periodical(struct btc_coexist *btcoexist)
struct rtl_priv *rtlpriv = btcoexist->adapter;
struct btc_bt_link_info *bt_link_info = &btcoexist->bt_link_info;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ==========================Periodical===========================\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ==========================Periodical===========================\n");
if (coex_sta->dis_ver_info_cnt <= 5) {
coex_sta->dis_ver_info_cnt += 1;
@@ -4212,8 +4212,8 @@ void ex_btc8723b2ant_periodical(struct btc_coexist *btcoexist)
/* Antenna config to set 0x765 = 0x0 (GNT_BT control by
* PTA) after initial
*/
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Set GNT_BT control by PTA\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Set GNT_BT control by PTA\n");
btc8723b2ant_set_ant_path(
btcoexist, BTC_ANT_WIFI_AT_MAIN, false, false);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
index fa5b73f81c57..9f5e85be9764 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
@@ -55,28 +55,28 @@ static u8 btc8821a1ant_bt_rssi_state(struct btc_coexist *btcoexist,
if (bt_rssi >= (rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
bt_rssi_state = BTC_RSSI_STATE_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to High\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state stay at Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at Low\n");
}
} else {
if (bt_rssi < rssi_thresh) {
bt_rssi_state = BTC_RSSI_STATE_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Low\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state stay at High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi thresh error!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi thresh error!!\n");
return coex_sta->pre_bt_rssi_state;
}
@@ -85,12 +85,12 @@ static u8 btc8821a1ant_bt_rssi_state(struct btc_coexist *btcoexist,
if (bt_rssi >= (rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Medium\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state stay at Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at Low\n");
}
} else if ((coex_sta->pre_bt_rssi_state ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -99,26 +99,26 @@ static u8 btc8821a1ant_bt_rssi_state(struct btc_coexist *btcoexist,
if (bt_rssi >= (rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
bt_rssi_state = BTC_RSSI_STATE_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to High\n");
} else if (bt_rssi < rssi_thresh) {
bt_rssi_state = BTC_RSSI_STATE_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Low\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state stay at Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at Medium\n");
}
} else {
if (bt_rssi < rssi_thresh1) {
bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Medium\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state stay at High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at High\n");
}
}
}
@@ -145,28 +145,28 @@ static u8 btc8821a1ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >= (rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to High\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state stay at Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at Low\n");
}
} else {
if (wifi_rssi < rssi_thresh) {
wifi_rssi_state = BTC_RSSI_STATE_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Low\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state stay at High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI thresh error!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI thresh error!!\n");
return coex_sta->pre_wifi_rssi_state[index];
}
@@ -177,12 +177,12 @@ static u8 btc8821a1ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >= (rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Medium\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state stay at Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at Low\n");
}
} else if ((coex_sta->pre_wifi_rssi_state[index] ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -191,26 +191,26 @@ static u8 btc8821a1ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >= (rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to High\n");
} else if (wifi_rssi < rssi_thresh) {
wifi_rssi_state = BTC_RSSI_STATE_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Low\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state stay at Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at Medium\n");
}
} else {
if (wifi_rssi < rssi_thresh1) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Medium\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state stay at High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at High\n");
}
}
}
@@ -397,9 +397,9 @@ static void btc8821a1ant_query_bt_info(struct btc_coexist *btcoexist)
h2c_parameter[0] |= BIT0; /* trigger */
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
- h2c_parameter[0]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
}
@@ -471,8 +471,8 @@ static u8 btc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
if (!bt_link_info->bt_link_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], No BT link exists!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], No BT link exists!!!\n");
return algorithm;
}
@@ -487,28 +487,28 @@ static u8 btc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
if (num_of_diff_profile == 1) {
if (bt_link_info->sco_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Profile = SCO only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Profile = SCO only\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_SCO;
} else {
if (bt_link_info->hid_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Profile = HID only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Profile = HID only\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID;
} else if (bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Profile = A2DP only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Profile = A2DP only\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_A2DP;
} else if (bt_link_info->pan_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = PAN(HS) only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = PAN(HS) only\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANHS;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = PAN(EDR) only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = PAN(EDR) only\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR;
}
}
@@ -516,56 +516,56 @@ static u8 btc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
} else if (num_of_diff_profile == 2) {
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Profile = SCO + HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + HID\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID;
} else if (bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_SCO;
} else if (bt_link_info->pan_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = SCO + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + PAN(HS)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_SCO;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = SCO + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + PAN(EDR)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
}
}
} else {
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Profile = HID + A2DP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Profile = HID + A2DP\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP;
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = HID + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = HID + PAN(HS)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = HID + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = HID + PAN(EDR)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = A2DP + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = A2DP + PAN(HS)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_A2DP_PANHS;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = A2DP + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = A2DP + PAN(EDR)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_A2DP;
}
}
@@ -574,33 +574,33 @@ static u8 btc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID;
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_SCO;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
}
}
@@ -609,14 +609,14 @@ static u8 btc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP_PANEDR;
}
}
@@ -627,14 +627,14 @@ static u8 btc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n");
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
}
}
@@ -660,9 +660,9 @@ static void btc8821a1ant_set_sw_penalty_tx_rate(struct btc_coexist *btcoexist,
h2c_parameter[5] = 0xf9; /* MCS5 or OFDM36 */
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set WiFi Low-Penalty Retry: %s",
- (low_penalty_ra ? "ON!!" : "OFF!!"));
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set WiFi Low-Penalty Retry: %s",
+ (low_penalty_ra ? "ON!!" : "OFF!!"));
btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
}
@@ -688,20 +688,20 @@ static void btc8821a1ant_set_coex_table(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
}
@@ -711,10 +711,10 @@ static void btc8821a1ant_coex_table(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
- (force_exec ? "force to" : ""), val0x6c0, val0x6c4,
- val0x6c8, val0x6cc);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
+ (force_exec ? "force to" : ""), val0x6c0, val0x6c4,
+ val0x6c8, val0x6cc);
coex_dm->cur_val_0x6c0 = val0x6c0;
coex_dm->cur_val_0x6c4 = val0x6c4;
coex_dm->cur_val_0x6c8 = val0x6c8;
@@ -786,9 +786,9 @@ static void btc8821a1ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
if (enable)
h2c_parameter[0] |= BIT0; /* function enable */
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
- h2c_parameter[0]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
}
@@ -798,15 +798,15 @@ static void btc8821a1ant_ignore_wlan_act(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s turn Ignore WlanAct %s\n",
- (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn Ignore WlanAct %s\n",
+ (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
coex_dm->cur_ignore_wlan_act = enable;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n",
- coex_dm->pre_ignore_wlan_act,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n",
+ coex_dm->pre_ignore_wlan_act,
coex_dm->cur_ignore_wlan_act);
if (coex_dm->pre_ignore_wlan_act ==
@@ -831,8 +831,8 @@ static void btc8821a1ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1,
if (ap_enable) {
if (byte1 & BIT4 && !(byte1 & BIT5)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], FW for 1Ant AP mode\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW for 1Ant AP mode\n");
real_byte1 &= ~BIT4;
real_byte1 |= BIT5;
@@ -853,13 +853,13 @@ static void btc8821a1ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1,
coex_dm->ps_tdma_para[3] = byte4;
coex_dm->ps_tdma_para[4] = real_byte5;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
- h2c_parameter[0],
- h2c_parameter[1] << 24 |
- h2c_parameter[2] << 16 |
- h2c_parameter[3] << 8 |
- h2c_parameter[4]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
+ h2c_parameter[0],
+ h2c_parameter[1] << 24 |
+ h2c_parameter[2] << 16 |
+ h2c_parameter[3] << 8 |
+ h2c_parameter[4]);
btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
}
@@ -878,22 +878,22 @@ static void btc8821a1ant_lps_rpwm(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n",
- (force_exec ? "force to" : ""), lps_val, rpwm_val);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n",
+ (force_exec ? "force to" : ""), lps_val, rpwm_val);
coex_dm->cur_lps = lps_val;
coex_dm->cur_rpwm = rpwm_val;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], LPS-RxBeaconMode = 0x%x, LPS-RPWM = 0x%x!!\n",
- coex_dm->cur_lps, coex_dm->cur_rpwm);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS-RxBeaconMode = 0x%x, LPS-RPWM = 0x%x!!\n",
+ coex_dm->cur_lps, coex_dm->cur_rpwm);
if ((coex_dm->pre_lps == coex_dm->cur_lps) &&
(coex_dm->pre_rpwm == coex_dm->cur_rpwm)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], LPS-RPWM_Last = 0x%x, LPS-RPWM_Now = 0x%x!!\n",
- coex_dm->pre_rpwm, coex_dm->cur_rpwm);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS-RPWM_Last = 0x%x, LPS-RPWM_Now = 0x%x!!\n",
+ coex_dm->pre_rpwm, coex_dm->cur_rpwm);
return;
}
@@ -909,8 +909,8 @@ static void btc8821a1ant_sw_mechanism(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra);
btc8821a1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
}
@@ -1010,13 +1010,13 @@ static void btc8821a1ant_ps_tdma(struct btc_coexist *btcoexist,
if (!force_exec) {
if (coex_dm->cur_ps_tdma_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** TDMA(on, %d) **********\n",
- coex_dm->cur_ps_tdma);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ********** TDMA(on, %d) **********\n",
+ coex_dm->cur_ps_tdma);
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ********** TDMA(off, %d) **********\n",
- coex_dm->cur_ps_tdma);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ********** TDMA(off, %d) **********\n",
+ coex_dm->cur_ps_tdma);
}
if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
(coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1254,50 +1254,50 @@ static bool btc8821a1ant_is_common_action(struct btc_coexist *btcoexist)
if (!wifi_connected &&
BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
coex_dm->bt_status) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n");
btc8821a1ant_sw_mechanism(btcoexist, false);
common = true;
} else if (wifi_connected &&
(BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
coex_dm->bt_status)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi connected + BT non connected-idle!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi connected + BT non connected-idle!!\n");
btc8821a1ant_sw_mechanism(btcoexist, false);
common = true;
} else if (!wifi_connected &&
(BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE ==
coex_dm->bt_status)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n");
btc8821a1ant_sw_mechanism(btcoexist, false);
common = true;
} else if (wifi_connected &&
(BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE ==
coex_dm->bt_status)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi connected + BT connected-idle!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi connected + BT connected-idle!!\n");
btc8821a1ant_sw_mechanism(btcoexist, false);
common = true;
} else if (!wifi_connected &&
(BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE !=
coex_dm->bt_status)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi non connected-idle + BT Busy!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi non connected-idle + BT Busy!!\n");
btc8821a1ant_sw_mechanism(btcoexist, false);
common = true;
} else {
if (wifi_busy) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
}
common = false;
@@ -1743,15 +1743,15 @@ static void btc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist)
bool under_4way = false;
bool ap_enable = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CoexForWifiConnect()===>\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CoexForWifiConnect()===>\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
&under_4way);
if (under_4way) {
btc8821a1ant_act_wifi_conn_sp_pkt(btcoexist);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n");
return;
}
@@ -1764,8 +1764,8 @@ static void btc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist)
else
btc8821a1ant_act_wifi_conn_sp_pkt(btcoexist);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
return;
}
@@ -1834,58 +1834,58 @@ static void btc8821a1ant_run_sw_coex_mech(struct btc_coexist *btcoexist)
if (!btc8821a1ant_is_common_action(btcoexist)) {
switch (coex_dm->cur_algorithm) {
case BT_8821A_1ANT_COEX_ALGO_SCO:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = SCO\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = SCO\n");
btc8821a1ant_action_sco(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_HID:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = HID\n");
btc8821a1ant_action_hid(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_A2DP:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = A2DP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = A2DP\n");
btc8821a1ant_action_a2dp(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_A2DP_PANHS:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = A2DP+PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = A2DP+PAN(HS)\n");
btc8821a1ant_action_a2dp_pan_hs(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_PANEDR:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = PAN(EDR)\n");
btc8821a1ant_action_pan_edr(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_PANHS:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = HS mode\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = HS mode\n");
btc8821a1ant_action_pan_hs(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_PANEDR_A2DP:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = PAN+A2DP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = PAN+A2DP\n");
btc8821a1ant_action_pan_edr_a2dp(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_PANEDR_HID:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = PAN(EDR)+HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = PAN(EDR)+HID\n");
btc8821a1ant_action_pan_edr_hid(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_HID_A2DP_PANEDR:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = HID+A2DP+PAN\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = HID+A2DP+PAN\n");
btc8821a1ant_action_hid_a2dp_pan_edr(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_HID_A2DP:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = HID+A2DP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = HID+A2DP\n");
btc8821a1ant_action_hid_a2dp(btcoexist);
break;
default:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action algorithm = coexist All Off!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action algorithm = coexist All Off!!\n");
/*btc8821a1ant_coex_all_off(btcoexist);*/
break;
}
@@ -1906,31 +1906,31 @@ static void btc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
u32 num_of_wifi_link = 0;
bool wifi_under_5g = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism()===>\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism()===>\n");
if (btcoexist->manual_control) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
return;
}
if (btcoexist->stop_coex_dm) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n");
return;
}
if (coex_sta->under_ips) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi is under IPS !!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi is under IPS !!!\n");
return;
}
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
if (wifi_under_5g) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
btc8821a1ant_coex_under_5g(btcoexist);
return;
}
@@ -2001,8 +2001,8 @@ static void btc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
if (!wifi_connected) {
bool scan = false, link = false, roam = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi is non connected-idle !!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi is non connected-idle !!!\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2040,8 +2040,8 @@ static void btc8821a1ant_init_hw_config(struct btc_coexist *btcoexist,
u8 u1_tmp = 0;
bool wifi_under_5g = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], 1Ant Init HW Config!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], 1Ant Init HW Config!!\n");
if (wifi_only)
return;
@@ -2096,8 +2096,8 @@ void ex_btc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Coex Mechanism Init!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Coex Mechanism Init!!\n");
btcoexist->stop_coex_dm = false;
@@ -2353,15 +2353,15 @@ void ex_btc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
return;
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
if (wifi_under_5g) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
btc8821a1ant_coex_under_5g(btcoexist);
return;
}
if (BTC_IPS_ENTER == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], IPS ENTER notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], IPS ENTER notify\n");
coex_sta->under_ips = true;
btc8821a1ant_set_ant_path(btcoexist,
BTC_ANT_PATH_BT, false, true);
@@ -2370,8 +2370,8 @@ void ex_btc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
btc8821a1ant_coex_table_with_type(btcoexist,
NORMAL_EXEC, 0);
} else if (BTC_IPS_LEAVE == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], IPS LEAVE notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], IPS LEAVE notify\n");
coex_sta->under_ips = false;
btc8821a1ant_init_hw_config(btcoexist, false, false);
@@ -2388,12 +2388,12 @@ void ex_btc8821a1ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
return;
if (BTC_LPS_ENABLE == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], LPS ENABLE notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS ENABLE notify\n");
coex_sta->under_lps = true;
} else if (BTC_LPS_DISABLE == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], LPS DISABLE notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS DISABLE notify\n");
coex_sta->under_lps = false;
}
}
@@ -2412,23 +2412,23 @@ void ex_btc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
return;
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
if (wifi_under_5g) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
btc8821a1ant_coex_under_5g(btcoexist);
return;
}
if (type == BTC_SCAN_START) {
coex_sta->wifi_is_high_pri_task = true;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCAN START notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN START notify\n");
/* Force antenna setup for no scan result issue */
btc8821a1ant_ps_tdma(btcoexist, FORCE_EXEC, false, 8);
} else {
coex_sta->wifi_is_high_pri_task = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCAN FINISH notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN FINISH notify\n");
}
if (coex_sta->bt_disabled)
@@ -2461,8 +2461,8 @@ void ex_btc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
}
if (BTC_SCAN_START == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCAN START notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN START notify\n");
if (!wifi_connected) {
/* non-connected scan */
btc8821a1ant_act_wifi_not_conn_scan(btcoexist);
@@ -2471,8 +2471,8 @@ void ex_btc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
btc8821a1ant_action_wifi_connected_scan(btcoexist);
}
} else if (BTC_SCAN_FINISH == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCAN FINISH notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN FINISH notify\n");
if (!wifi_connected) {
/* non-connected scan */
btc8821a1ant_action_wifi_not_connected(btcoexist);
@@ -2497,21 +2497,21 @@ void ex_btc8821a1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
return;
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
if (wifi_under_5g) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
btc8821a1ant_coex_under_5g(btcoexist);
return;
}
if (type == BTC_ASSOCIATE_START) {
coex_sta->wifi_is_high_pri_task = true;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CONNECT START notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT START notify\n");
coex_dm->arp_cnt = 0;
} else {
coex_sta->wifi_is_high_pri_task = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CONNECT FINISH notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT FINISH notify\n");
coex_dm->arp_cnt = 0;
}
@@ -2536,12 +2536,12 @@ void ex_btc8821a1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
}
if (BTC_ASSOCIATE_START == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CONNECT START notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT START notify\n");
btc8821a1ant_act_wifi_not_conn_scan(btcoexist);
} else if (BTC_ASSOCIATE_FINISH == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CONNECT FINISH notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT FINISH notify\n");
btcoexist->btc_get(btcoexist,
BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
@@ -2568,18 +2568,18 @@ void ex_btc8821a1ant_media_status_notify(struct btc_coexist *btcoexist,
return;
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
if (wifi_under_5g) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
btc8821a1ant_coex_under_5g(btcoexist);
return;
}
if (BTC_MEDIA_CONNECT == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], MEDIA connect notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], MEDIA connect notify\n");
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], MEDIA disconnect notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], MEDIA disconnect notify\n");
coex_dm->arp_cnt = 0;
}
@@ -2602,11 +2602,11 @@ void ex_btc8821a1ant_media_status_notify(struct btc_coexist *btcoexist,
coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], FW write 0x66 = 0x%x\n",
- h2c_parameter[0] << 16 |
- h2c_parameter[1] << 8 |
- h2c_parameter[2]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW write 0x66 = 0x%x\n",
+ h2c_parameter[0] << 16 |
+ h2c_parameter[1] << 8 |
+ h2c_parameter[2]);
btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
}
@@ -2628,8 +2628,8 @@ void ex_btc8821a1ant_special_packet_notify(struct btc_coexist *btcoexist,
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
if (wifi_under_5g) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
btc8821a1ant_coex_under_5g(btcoexist);
return;
}
@@ -2639,17 +2639,17 @@ void ex_btc8821a1ant_special_packet_notify(struct btc_coexist *btcoexist,
coex_sta->wifi_is_high_pri_task = true;
if (type == BTC_PACKET_ARP) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], specific Packet ARP notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], specific Packet ARP notify\n");
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], specific Packet DHCP or EAPOL notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], specific Packet DHCP or EAPOL notify\n");
}
} else {
coex_sta->wifi_is_high_pri_task = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], specific Packet [Type = %d] notify\n",
- type);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], specific Packet [Type = %d] notify\n",
+ type);
}
coex_sta->special_pkt_period_cnt = 0;
@@ -2678,9 +2678,9 @@ void ex_btc8821a1ant_special_packet_notify(struct btc_coexist *btcoexist,
type == BTC_PACKET_ARP) {
if (type == BTC_PACKET_ARP) {
coex_dm->arp_cnt++;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ARP Packet Count = %d\n",
- coex_dm->arp_cnt);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ARP Packet Count = %d\n",
+ coex_dm->arp_cnt);
if (coex_dm->arp_cnt >= 10)
/* if APR PKT > 10 after connect, do not go to
* btc8821a1ant_act_wifi_conn_sp_pkt
@@ -2688,8 +2688,8 @@ void ex_btc8821a1ant_special_packet_notify(struct btc_coexist *btcoexist,
return;
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], special Packet(%d) notify\n", type);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], special Packet(%d) notify\n", type);
btc8821a1ant_act_wifi_conn_sp_pkt(btcoexist);
}
}
@@ -2715,19 +2715,19 @@ void ex_btc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
rsp_source = BT_INFO_SRC_8821A_1ANT_WIFI_FW;
coex_sta->bt_info_c2h_cnt[rsp_source]++;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Bt info[%d], length = %d, hex data = [",
- rsp_source, length);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Bt info[%d], length = %d, hex data = [",
+ rsp_source, length);
for (i = 0; i < length; i++) {
coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
if (i == 1)
bt_info = tmp_buf[i];
if (i == length - 1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "0x%02x]\n", tmp_buf[i]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "0x%02x]\n", tmp_buf[i]);
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "0x%02x, ", tmp_buf[i]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "0x%02x, ", tmp_buf[i]);
}
}
@@ -2749,8 +2749,8 @@ void ex_btc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
/* BT into is responded by BT FW and BT RF REG 0x3C !=
* 0x15 => Need to switch BT TRx Mask
*/
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Switch BT TRx Mask since BT RF REG 0x3C != 0x15\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Switch BT TRx Mask since BT RF REG 0x3C != 0x15\n");
btcoexist->btc_set_bt_reg(btcoexist, BTC_BT_REG_RF,
0x3c, 0x15);
}
@@ -2759,8 +2759,8 @@ void ex_btc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
* because bt is reset and lost the info
*/
if (coex_sta->bt_info_ext & BIT1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
&wifi_connected);
if (wifi_connected) {
@@ -2775,8 +2775,8 @@ void ex_btc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
if ((coex_sta->bt_info_ext & BIT3) && !wifi_under_5g) {
if (!btcoexist->manual_control &&
!btcoexist->stop_coex_dm) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
btc8821a1ant_ignore_wlan_act(btcoexist,
FORCE_EXEC,
false);
@@ -2827,28 +2827,28 @@ void ex_btc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
if (!(bt_info & BT_INFO_8821A_1ANT_B_CONNECTION)) {
coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
} else if (bt_info == BT_INFO_8821A_1ANT_B_CONNECTION) {
/* connection exists but no busy */
coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
} else if ((bt_info&BT_INFO_8821A_1ANT_B_SCO_ESCO) ||
(bt_info & BT_INFO_8821A_1ANT_B_SCO_BUSY)) {
coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_SCO_BUSY;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
} else if (bt_info & BT_INFO_8821A_1ANT_B_ACL_BUSY) {
if (BT_8821A_1ANT_BT_STATUS_ACL_BUSY != coex_dm->bt_status)
coex_dm->auto_tdma_adjust = false;
coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_ACL_BUSY;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
} else {
coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_MAX;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n");
}
if ((BT_8821A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
@@ -2868,12 +2868,12 @@ void ex_btc8821a1ant_halt_notify(struct btc_coexist *btcoexist)
struct rtl_priv *rtlpriv = btcoexist->adapter;
bool wifi_under_5g = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Halt notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Halt notify\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
if (wifi_under_5g) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
btc8821a1ant_coex_under_5g(btcoexist);
return;
}
@@ -2897,18 +2897,18 @@ void ex_btc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
if (wifi_under_5g) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
btc8821a1ant_coex_under_5g(btcoexist);
return;
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Pnp notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Pnp notify\n");
if (BTC_WIFI_PNP_SLEEP == pnp_state) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Pnp notify to SLEEP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Pnp notify to SLEEP\n");
/* BT should clear UnderIPS/UnderLPS state to avoid mismatch
* state after wakeup.
*/
@@ -2922,8 +2922,8 @@ void ex_btc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
btc8821a1ant_set_ant_path(btcoexist, BTC_ANT_PATH_BT, false,
true);
} else if (BTC_WIFI_PNP_WAKE_UP == pnp_state) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Pnp notify to WAKE UP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Pnp notify to WAKE UP\n");
btcoexist->stop_coex_dm = false;
btc8821a1ant_init_hw_config(btcoexist, false, false);
btc8821a1ant_init_coex_dm(btcoexist);
@@ -2939,33 +2939,33 @@ void ex_btc8821a1ant_periodical(struct btc_coexist *btcoexist)
struct btc_board_info *board_info = &btcoexist->board_info;
struct btc_stack_info *stack_info = &btcoexist->stack_info;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ==========================Periodical===========================\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ==========================Periodical===========================\n");
if (dis_ver_info_cnt <= 5) {
dis_ver_info_cnt += 1;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ****************************************************************\n");
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
- board_info->pg_ant_num,
- board_info->btdm_ant_num,
- board_info->btdm_ant_pos);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
- stack_info->profile_notified ? "Yes" : "No",
- stack_info->hci_version);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ****************************************************************\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
+ board_info->pg_ant_num,
+ board_info->btdm_ant_num,
+ board_info->btdm_ant_pos);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
+ stack_info->profile_notified ? "Yes" : "No",
+ stack_info->hci_version);
btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
&bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
- glcoex_ver_date_8821a_1ant,
- glcoex_ver_8821a_1ant,
- fw_ver, bt_patch_ver,
- bt_patch_ver);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ****************************************************************\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
+ glcoex_ver_date_8821a_1ant,
+ glcoex_ver_8821a_1ant,
+ fw_ver, bt_patch_ver,
+ bt_patch_ver);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ****************************************************************\n");
}
if (!btcoexist->auto_report_1ant) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
index e9e211fda264..e53789f11b08 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
@@ -54,28 +54,28 @@ static u8 btc8821a2ant_bt_rssi_state(struct btc_coexist *btcoexist,
if (bt_rssi >=
rssi_thresh + BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT) {
bt_rssi_state = BTC_RSSI_STATE_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to High\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state stay at Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at Low\n");
}
} else {
if (bt_rssi < rssi_thresh) {
bt_rssi_state = BTC_RSSI_STATE_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Low\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state stay at High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi thresh error!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi thresh error!!\n");
return coex_sta->pre_bt_rssi_state;
}
@@ -85,12 +85,12 @@ static u8 btc8821a2ant_bt_rssi_state(struct btc_coexist *btcoexist,
(rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Medium\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state stay at Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at Low\n");
}
} else if ((coex_sta->pre_bt_rssi_state ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -100,26 +100,26 @@ static u8 btc8821a2ant_bt_rssi_state(struct btc_coexist *btcoexist,
(rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
bt_rssi_state = BTC_RSSI_STATE_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to High\n");
} else if (bt_rssi < rssi_thresh) {
bt_rssi_state = BTC_RSSI_STATE_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Low\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state stay at Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at Medium\n");
}
} else {
if (bt_rssi < rssi_thresh1) {
bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state switch to Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state switch to Medium\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT Rssi state stay at High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT Rssi state stay at High\n");
}
}
}
@@ -147,28 +147,28 @@ static u8 btc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >=
(rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to High\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state stay at Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at Low\n");
}
} else {
if (wifi_rssi < rssi_thresh) {
wifi_rssi_state = BTC_RSSI_STATE_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Low\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state stay at High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI thresh error!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI thresh error!!\n");
return coex_sta->pre_wifi_rssi_state[index];
}
@@ -180,12 +180,12 @@ static u8 btc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
(rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Medium\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state stay at Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at Low\n");
}
} else if ((coex_sta->pre_wifi_rssi_state[index] ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -194,26 +194,26 @@ static u8 btc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >= (rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to High\n");
} else if (wifi_rssi < rssi_thresh) {
wifi_rssi_state = BTC_RSSI_STATE_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Low\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state stay at Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at Medium\n");
}
} else {
if (wifi_rssi < rssi_thresh1) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state switch to Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state switch to Medium\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi RSSI state stay at High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi RSSI state stay at High\n");
}
}
}
@@ -273,12 +273,12 @@ static void btc8821a2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
else
bt_link_info->slave_role = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
- reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
- reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
+ reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
+ reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
/* reset counter */
btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
@@ -330,9 +330,9 @@ static void btc8821a2ant_query_bt_info(struct btc_coexist *btcoexist)
h2c_parameter[0] |= BIT0; /* trigger */
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
- h2c_parameter[0]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
}
@@ -437,7 +437,7 @@ static u8 btc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
if (!bt_link_info->bt_link_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
"[BTCoex], No BT link exists!!!\n");
return algorithm;
}
@@ -453,28 +453,28 @@ static u8 btc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
if (num_of_diff_profile == 1) {
if (bt_link_info->sco_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCO only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCO only\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
} else {
if (bt_link_info->hid_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], HID only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], HID only\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_HID;
} else if (bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], A2DP only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], A2DP only\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_A2DP;
} else if (bt_link_info->pan_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], PAN(HS) only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], PAN(HS) only\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANHS;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], PAN(EDR) only\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], PAN(EDR) only\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR;
}
}
@@ -482,58 +482,58 @@ static u8 btc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
} else if (num_of_diff_profile == 2) {
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCO + HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCO + HID\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
} else if (bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCO + A2DP ==> SCO\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCO + A2DP ==> SCO\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
} else if (bt_link_info->pan_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], SCO + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + PAN(HS)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], SCO + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + PAN(EDR)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
}
}
} else {
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], HID + A2DP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], HID + A2DP\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP;
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], HID + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], HID + PAN(HS)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_HID;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], HID + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], HID + PAN(EDR)\n");
algorithm =
BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], A2DP + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], A2DP + PAN(HS)\n");
algorithm =
BT_8821A_2ANT_COEX_ALGO_A2DP_PANHS;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], A2DP + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], A2DP + PAN(EDR)\n");
algorithm =
BT_8821A_2ANT_COEX_ALGO_PANEDR_A2DP;
}
@@ -543,33 +543,33 @@ static u8 btc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCO + HID + A2DP ==> HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCO + HID + A2DP ==> HID\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], SCO + HID + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + HID + PAN(HS)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], SCO + HID + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + HID + PAN(EDR)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], SCO + A2DP + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + A2DP + PAN(HS)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
}
}
@@ -578,15 +578,15 @@ static u8 btc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], HID + A2DP + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], HID + A2DP + PAN(HS)\n");
algorithm =
BT_8821A_2ANT_COEX_ALGO_HID_A2DP;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], HID + A2DP + PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], HID + A2DP + PAN(EDR)\n");
algorithm =
BT_8821A_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
}
@@ -598,14 +598,14 @@ static u8 btc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n");
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
}
}
@@ -625,10 +625,10 @@ static void btc8821a2ant_set_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
*/
h2c_parameter[0] = dac_swing_lvl;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swing_lvl);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swing_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
}
@@ -641,9 +641,9 @@ static void btc8821a2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
h2c_parameter[0] = dec_bt_pwr_lvl;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], decrease Bt Power Level : %u, FW write 0x62 = 0x%x\n",
- dec_bt_pwr_lvl, h2c_parameter[0]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], decrease Bt Power Level : %u, FW write 0x62 = 0x%x\n",
+ dec_bt_pwr_lvl, h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
}
@@ -653,15 +653,15 @@ static void btc8821a2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s Dec BT power level = %u\n",
- (force_exec ? "force to" : ""), dec_bt_pwr_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s Dec BT power level = %u\n",
+ (force_exec ? "force to" : ""), dec_bt_pwr_lvl);
coex_dm->cur_dec_bt_pwr_lvl = dec_bt_pwr_lvl;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], pre_dec_bt_pwr_lvl = %d, cur_dec_bt_pwr_lvl = %d\n",
- coex_dm->pre_dec_bt_pwr_lvl,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], pre_dec_bt_pwr_lvl = %d, cur_dec_bt_pwr_lvl = %d\n",
+ coex_dm->pre_dec_bt_pwr_lvl,
coex_dm->cur_dec_bt_pwr_lvl);
if (coex_dm->pre_dec_bt_pwr_lvl == coex_dm->cur_dec_bt_pwr_lvl)
@@ -677,16 +677,16 @@ static void btc8821a2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s set FW Dac Swing level = %d\n",
- (force_exec ? "force to" : ""), fw_dac_swing_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s set FW Dac Swing level = %d\n",
+ (force_exec ? "force to" : ""), fw_dac_swing_lvl);
coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], pre_fw_dac_swing_lvl = %d, cur_fw_dac_swing_lvl = %d\n",
- coex_dm->pre_fw_dac_swing_lvl,
- coex_dm->cur_fw_dac_swing_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], pre_fw_dac_swing_lvl = %d, cur_fw_dac_swing_lvl = %d\n",
+ coex_dm->pre_fw_dac_swing_lvl,
+ coex_dm->cur_fw_dac_swing_lvl);
if (coex_dm->pre_fw_dac_swing_lvl ==
coex_dm->cur_fw_dac_swing_lvl)
@@ -719,9 +719,9 @@ static void btc8821a2ant_set_sw_penalty_tx_rate_adaptive(
h2c_parameter[5] = 0xa0;
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set WiFi Low-Penalty Retry: %s",
- (low_penalty_ra ? "ON!!" : "OFF!!"));
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set WiFi Low-Penalty Retry: %s",
+ (low_penalty_ra ? "ON!!" : "OFF!!"));
btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
}
@@ -731,17 +731,17 @@ static void btc8821a2ant_low_penalty_ra(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s turn LowPenaltyRA = %s\n",
- (force_exec ? "force to" : ""),
- ((low_penalty_ra) ? "ON" : "OFF"));
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn LowPenaltyRA = %s\n",
+ (force_exec ? "force to" : ""),
+ ((low_penalty_ra) ? "ON" : "OFF"));
coex_dm->cur_low_penalty_ra = low_penalty_ra;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], pre_low_penalty_ra = %d, cur_low_penalty_ra = %d\n",
- coex_dm->pre_low_penalty_ra,
- coex_dm->cur_low_penalty_ra);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], pre_low_penalty_ra = %d, cur_low_penalty_ra = %d\n",
+ coex_dm->pre_low_penalty_ra,
+ coex_dm->cur_low_penalty_ra);
if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra)
return;
@@ -758,8 +758,8 @@ static void btc8821a2ant_set_dac_swing_reg(struct btc_coexist *btcoexist,
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 val = (u8)level;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Write SwDacSwing = 0x%x\n", level);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Write SwDacSwing = 0x%x\n", level);
btcoexist->btc_write_1byte_bitmask(btcoexist, 0xc5b, 0x3e, val);
}
@@ -779,21 +779,21 @@ static void btc8821a2ant_dac_swing(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s turn DacSwing = %s, dac_swing_lvl = 0x%x\n",
- (force_exec ? "force to" : ""),
- ((dac_swing_on) ? "ON" : "OFF"),
- dac_swing_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn DacSwing = %s, dac_swing_lvl = 0x%x\n",
+ (force_exec ? "force to" : ""),
+ ((dac_swing_on) ? "ON" : "OFF"),
+ dac_swing_lvl);
coex_dm->cur_dac_swing_on = dac_swing_on;
coex_dm->cur_dac_swing_lvl = dac_swing_lvl;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], pre_dac_swing_on = %d, pre_dac_swing_lvl = 0x%x, cur_dac_swing_on = %d, cur_dac_swing_lvl = 0x%x\n",
- coex_dm->pre_dac_swing_on,
- coex_dm->pre_dac_swing_lvl,
- coex_dm->cur_dac_swing_on,
- coex_dm->cur_dac_swing_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], pre_dac_swing_on = %d, pre_dac_swing_lvl = 0x%x, cur_dac_swing_on = %d, cur_dac_swing_lvl = 0x%x\n",
+ coex_dm->pre_dac_swing_on,
+ coex_dm->pre_dac_swing_lvl,
+ coex_dm->cur_dac_swing_on,
+ coex_dm->cur_dac_swing_lvl);
if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
(coex_dm->pre_dac_swing_lvl ==
@@ -814,20 +814,20 @@ static void btc8821a2ant_set_coex_table(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
}
@@ -837,28 +837,28 @@ static void btc8821a2ant_coex_table(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
- (force_exec ? "force to" : ""),
- val0x6c0, val0x6c4, val0x6c8, val0x6cc);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
+ (force_exec ? "force to" : ""),
+ val0x6c0, val0x6c4, val0x6c8, val0x6cc);
coex_dm->cur_val0x6c0 = val0x6c0;
coex_dm->cur_val0x6c4 = val0x6c4;
coex_dm->cur_val0x6c8 = val0x6c8;
coex_dm->cur_val0x6cc = val0x6cc;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], pre_val0x6c0 = 0x%x, pre_val0x6c4 = 0x%x, pre_val0x6c8 = 0x%x, pre_val0x6cc = 0x%x !!\n",
- coex_dm->pre_val0x6c0,
- coex_dm->pre_val0x6c4,
- coex_dm->pre_val0x6c8,
- coex_dm->pre_val0x6cc);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], cur_val0x6c0 = 0x%x, cur_val0x6c4 = 0x%x, cur_val0x6c8 = 0x%x, cur_val0x6cc = 0x%x !!\n",
- coex_dm->cur_val0x6c0,
- coex_dm->cur_val0x6c4,
- coex_dm->cur_val0x6c8,
- coex_dm->cur_val0x6cc);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], pre_val0x6c0 = 0x%x, pre_val0x6c4 = 0x%x, pre_val0x6c8 = 0x%x, pre_val0x6cc = 0x%x !!\n",
+ coex_dm->pre_val0x6c0,
+ coex_dm->pre_val0x6c4,
+ coex_dm->pre_val0x6c8,
+ coex_dm->pre_val0x6cc);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], cur_val0x6c0 = 0x%x, cur_val0x6c4 = 0x%x, cur_val0x6c8 = 0x%x, cur_val0x6cc = 0x%x !!\n",
+ coex_dm->cur_val0x6c0,
+ coex_dm->cur_val0x6c4,
+ coex_dm->cur_val0x6c8,
+ coex_dm->cur_val0x6cc);
if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
(coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
@@ -967,9 +967,9 @@ static void btc8821a2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoex,
if (enable)
h2c_parameter[0] |= BIT0; /* function enable */
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
- h2c_parameter[0]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
+ h2c_parameter[0]);
btcoex->btc_fill_h2c(btcoex, 0x63, 1, h2c_parameter);
}
@@ -1006,15 +1006,15 @@ static void btc8821a2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s turn Ignore WlanAct %s\n",
- (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn Ignore WlanAct %s\n",
+ (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
coex_dm->cur_ignore_wlan_act = enable;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n",
- coex_dm->pre_ignore_wlan_act,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n",
+ coex_dm->pre_ignore_wlan_act,
coex_dm->cur_ignore_wlan_act);
if (coex_dm->pre_ignore_wlan_act ==
@@ -1045,13 +1045,13 @@ static void btc8821a2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist,
coex_dm->ps_tdma_para[3] = byte4;
coex_dm->ps_tdma_para[4] = byte5;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n",
- h2c_parameter[0],
- h2c_parameter[1] << 24 |
- h2c_parameter[2] << 16 |
- h2c_parameter[3] << 8 |
- h2c_parameter[4]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n",
+ h2c_parameter[0],
+ h2c_parameter[1] << 24 |
+ h2c_parameter[2] << 16 |
+ h2c_parameter[3] << 8 |
+ h2c_parameter[4]);
btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
}
@@ -1137,20 +1137,20 @@ static void btc8821a2ant_ps_tdma(struct btc_coexist *btcoexist,
type = type + 100;
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], %s turn %s PS TDMA, type = %d\n",
- (force_exec ? "force to" : ""), (turn_on ? "ON" : "OFF"),
- type);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], %s turn %s PS TDMA, type = %d\n",
+ (force_exec ? "force to" : ""), (turn_on ? "ON" : "OFF"),
+ type);
coex_dm->cur_ps_tdma_on = turn_on;
coex_dm->cur_ps_tdma = type;
if (!force_exec) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], pre_ps_tdma_on = %d, cur_ps_tdma_on = %d!!\n",
- coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], pre_ps_tdma = %d, cur_ps_tdma = %d!!\n",
- coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], pre_ps_tdma_on = %d, cur_ps_tdma_on = %d!!\n",
+ coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], pre_ps_tdma = %d, cur_ps_tdma = %d!!\n",
+ coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
(coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1472,18 +1472,18 @@ static void btc8821a2ant_action_bt_inquiry(struct btc_coexist *btcoexist)
btc8821a2ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE, 0x0, 0x0);
if (scan || link || roam) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi link process + BT Inq/Page!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi link process + BT Inq/Page!!\n");
btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 15);
btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22);
} else if (wifi_connected) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi connected + BT Inq/Page!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi connected + BT Inq/Page!!\n");
btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 15);
btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 22);
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi no-link + BT Inq/Page!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi no-link + BT Inq/Page!!\n");
btc8821a2ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 0);
btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
}
@@ -1509,8 +1509,8 @@ static void btc8821a2ant_action_wifi_link_process(struct btc_coexist *btcoexist)
u8tmpa = btcoexist->btc_read_1byte(btcoexist, 0x765);
u8tmpb = btcoexist->btc_read_1byte(btcoexist, 0x76e);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], 0x765=0x%x, 0x76e=0x%x\n", u8tmpa, u8tmpb);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], 0x765=0x%x, 0x76e=0x%x\n", u8tmpa, u8tmpb);
}
static bool btc8821a2ant_action_wifi_idle_process(struct btc_coexist *btcoexist)
@@ -1531,8 +1531,8 @@ static bool btc8821a2ant_action_wifi_idle_process(struct btc_coexist *btcoexist)
/* define the office environment */
if (BTC_RSSI_HIGH(wifi_rssi_state1) && (coex_sta->hid_exist) &&
(coex_sta->a2dp_exist)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi idle process for BT HID+A2DP exist!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi idle process for BT HID+A2DP exist!!\n");
btc8821a2ant_dac_swing(btcoexist, NORMAL_EXEC, true, 0x6);
btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
@@ -1550,8 +1550,8 @@ static bool btc8821a2ant_action_wifi_idle_process(struct btc_coexist *btcoexist)
return true;
} else if (coex_sta->pan_exist) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi idle process for BT PAN exist!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi idle process for BT PAN exist!!\n");
btc8821a2ant_dac_swing(btcoexist, NORMAL_EXEC, true, 0x6);
btc8821a2ant_dec_bt_pwr(btcoexist, NORMAL_EXEC, 0);
@@ -1592,8 +1592,8 @@ static bool btc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC, false, false,
0x8);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi non-connected idle!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi non-connected idle!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff,
0x0);
@@ -1620,8 +1620,8 @@ static bool btc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC,
false, false, 0x8);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi connected + BT non connected-idle!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi connected + BT non connected-idle!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
0xfffff, 0x0);
@@ -1650,8 +1650,8 @@ static bool btc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
if (bt_hs_on)
return false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi connected + BT connected-idle!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi connected + BT connected-idle!!\n");
btc8821a2ant_limited_rx(btcoexist, NORMAL_EXEC,
false, false, 0x8);
@@ -1679,12 +1679,12 @@ static bool btc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
&low_pwr_disable);
if (wifi_busy) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
common = false;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
common =
btc8821a2ant_action_wifi_idle_process(
btcoexist);
@@ -1707,13 +1707,13 @@ static void btc8821a2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
int result;
u8 retry_count = 0;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], TdmaDurationAdjust()\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], TdmaDurationAdjust()\n");
if (coex_dm->auto_tdma_adjust) {
coex_dm->auto_tdma_adjust = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], first run TdmaDurationAdjust()!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], first run TdmaDurationAdjust()!!\n");
if (sco_hid) {
if (tx_pause) {
if (max_interval == 1) {
@@ -1801,11 +1801,11 @@ static void btc8821a2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
} else {
/* accquire the BT TRx retry count from BT_Info byte2 */
retry_count = coex_sta->bt_retry_cnt;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], retry_count = %d\n", retry_count);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], up = %d, dn = %d, m = %d, n = %d, wait_count = %d\n",
- (int)up, (int)dn, (int)m, (int)n, (int)wait_count);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], retry_count = %d\n", retry_count);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], up = %d, dn = %d, m = %d, n = %d, wait_count = %d\n",
+ (int)up, (int)dn, (int)m, (int)n, (int)wait_count);
result = 0;
wait_count++;
@@ -1826,8 +1826,8 @@ static void btc8821a2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
up = 0;
dn = 0;
result = 1;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Increase wifi duration!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Increase wifi duration!!\n");
}
} else if (retry_count <= 3) {
/* <=3 retry in the last 2-second duration */
@@ -1856,8 +1856,8 @@ static void btc8821a2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
dn = 0;
wait_count = 0;
result = -1;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
}
} else {
/* retry count > 3, if retry count > 3 happens once,
@@ -1878,12 +1878,12 @@ static void btc8821a2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
dn = 0;
wait_count = 0;
result = -1;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], max Interval = %d\n", max_interval);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], max Interval = %d\n", max_interval);
if (max_interval == 1) {
if (tx_pause) {
@@ -2591,9 +2591,9 @@ static void btc8821a2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
if (coex_dm->cur_ps_tdma != coex_dm->ps_tdma_du_adj_type) {
bool scan = false, link = false, roam = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], PsTdma type mismatch!!!, cur_ps_tdma = %d, recordPsTdma = %d\n",
- coex_dm->cur_ps_tdma, coex_dm->ps_tdma_du_adj_type);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], PsTdma type mismatch!!!, cur_ps_tdma = %d, recordPsTdma = %d\n",
+ coex_dm->cur_ps_tdma, coex_dm->ps_tdma_du_adj_type);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2603,8 +2603,8 @@ static void btc8821a2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
btc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
coex_dm->ps_tdma_du_adj_type);
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
}
}
}
@@ -3389,31 +3389,31 @@ static void btc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
bool scan = false, link = false, roam = false;
if (btcoexist->manual_control) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Manual control!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Manual control!!!\n");
return;
}
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
if (wifi_under_5g) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], RunCoexistMechanism(), run 5G coex setting!!<===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], RunCoexistMechanism(), run 5G coex setting!!<===\n");
btc8821a2ant_coex_under_5g(btcoexist);
return;
}
if (coex_sta->under_ips) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], wifi is under IPS !!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], wifi is under IPS !!!\n");
return;
}
algorithm = btc8821a2ant_action_algorithm(btcoexist);
if (coex_sta->c2h_bt_inquiry_page &&
(BT_8821A_2ANT_COEX_ALGO_PANHS != algorithm)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT is under inquiry/page scan !!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT is under inquiry/page scan !!\n");
btc8821a2ant_action_bt_inquiry(btcoexist);
return;
}
@@ -3423,8 +3423,8 @@ static void btc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
if (scan || link || roam) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], WiFi is under Link Process !!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], WiFi is under Link Process !!\n");
btc8821a2ant_action_wifi_link_process(btcoexist);
return;
}
@@ -3436,9 +3436,9 @@ static void btc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
if ((num_of_wifi_link >= 2) ||
(wifi_link_status & WIFI_P2P_GO_CONNECTED)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "############# [BTCoex], Multi-Port num_of_wifi_link = %d, wifi_link_status = 0x%x\n",
- num_of_wifi_link, wifi_link_status);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "############# [BTCoex], Multi-Port num_of_wifi_link = %d, wifi_link_status = 0x%x\n",
+ num_of_wifi_link, wifi_link_status);
if (bt_link_info->bt_link_exist)
miracast_plus_bt = true;
@@ -3457,75 +3457,75 @@ static void btc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
&miracast_plus_bt);
coex_dm->cur_algorithm = algorithm;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
if (btc8821a2ant_is_common_action(btcoexist)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant common\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant common\n");
coex_dm->auto_tdma_adjust = true;
} else {
if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], pre_algorithm = %d, cur_algorithm = %d\n",
- coex_dm->pre_algorithm,
- coex_dm->cur_algorithm);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], pre_algorithm = %d, cur_algorithm = %d\n",
+ coex_dm->pre_algorithm,
+ coex_dm->cur_algorithm);
coex_dm->auto_tdma_adjust = false;
}
switch (coex_dm->cur_algorithm) {
case BT_8821A_2ANT_COEX_ALGO_SCO:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = SCO\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = SCO\n");
btc8821a2ant_action_sco(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_HID:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = HID\n");
btc8821a2ant_action_hid(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_A2DP:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = A2DP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = A2DP\n");
btc8821a2ant_action_a2dp(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_A2DP_PANHS:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
btc8821a2ant_action_a2dp_pan_hs(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_PANEDR:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)\n");
btc8821a2ant_action_pan_edr(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_PANHS:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = HS mode\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = HS mode\n");
btc8821a2ant_action_pan_hs(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_PANEDR_A2DP:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP\n");
btc8821a2ant_action_pan_edr_a2dp(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_PANEDR_HID:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID\n");
btc8821a2ant_action_pan_edr_hid(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN\n");
btc8821a2ant_act_hid_a2dp_pan_edr(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_HID_A2DP:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = HID+A2DP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = HID+A2DP\n");
btc8821a2ant_action_hid_a2dp(btcoexist);
break;
default:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n");
btc8821a2ant_coex_all_off(btcoexist);
break;
}
@@ -3561,8 +3561,8 @@ void ex_btc8821a2ant_init_hwconfig(struct btc_coexist *btcoexist)
struct rtl_priv *rtlpriv = btcoexist->adapter;
u8 u1tmp = 0;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], 2Ant Init HW Config!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], 2Ant Init HW Config!!\n");
/* backup rf 0x1e value */
coex_dm->bt_rf0x1e_backup =
@@ -3629,8 +3629,8 @@ void ex_btc8821a2ant_init_coex_dm(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Coex Mechanism Init!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Coex Mechanism Init!!\n");
btc8821a2ant_init_coex_dm(btcoexist);
}
@@ -3840,15 +3840,15 @@ void ex_btc8821a2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
struct rtl_priv *rtlpriv = btcoexist->adapter;
if (BTC_IPS_ENTER == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], IPS ENTER notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], IPS ENTER notify\n");
coex_sta->under_ips = true;
btc8821a2ant_wifi_off_hw_cfg(btcoexist);
btc8821a2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
btc8821a2ant_coex_all_off(btcoexist);
} else if (BTC_IPS_LEAVE == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], IPS LEAVE notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], IPS LEAVE notify\n");
coex_sta->under_ips = false;
ex_btc8821a2ant_init_hwconfig(btcoexist);
btc8821a2ant_init_coex_dm(btcoexist);
@@ -3861,12 +3861,12 @@ void ex_btc8821a2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
struct rtl_priv *rtlpriv = btcoexist->adapter;
if (BTC_LPS_ENABLE == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], LPS ENABLE notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS ENABLE notify\n");
coex_sta->under_lps = true;
} else if (BTC_LPS_DISABLE == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], LPS DISABLE notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], LPS DISABLE notify\n");
coex_sta->under_lps = false;
}
}
@@ -3876,11 +3876,11 @@ void ex_btc8821a2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
struct rtl_priv *rtlpriv = btcoexist->adapter;
if (BTC_SCAN_START == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCAN START notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN START notify\n");
} else if (BTC_SCAN_FINISH == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], SCAN FINISH notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], SCAN FINISH notify\n");
}
}
@@ -3889,11 +3889,11 @@ void ex_btc8821a2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
struct rtl_priv *rtlpriv = btcoexist->adapter;
if (BTC_ASSOCIATE_START == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CONNECT START notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT START notify\n");
} else if (BTC_ASSOCIATE_FINISH == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], CONNECT FINISH notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], CONNECT FINISH notify\n");
}
}
@@ -3907,11 +3907,11 @@ void ex_btc8821a2ant_media_status_notify(struct btc_coexist *btcoexist,
u8 ap_num = 0;
if (BTC_MEDIA_CONNECT == type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], MEDIA connect notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], MEDIA connect notify\n");
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], MEDIA disconnect notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], MEDIA disconnect notify\n");
}
/* only 2.4G we need to inform bt the chnl mask */
@@ -3937,11 +3937,11 @@ void ex_btc8821a2ant_media_status_notify(struct btc_coexist *btcoexist,
coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], FW write 0x66 = 0x%x\n",
- h2c_parameter[0] << 16 |
- h2c_parameter[1] << 8 |
- h2c_parameter[2]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], FW write 0x66 = 0x%x\n",
+ h2c_parameter[0] << 16 |
+ h2c_parameter[1] << 8 |
+ h2c_parameter[2]);
btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
}
@@ -3952,8 +3952,8 @@ void ex_btc8821a2ant_special_packet_notify(struct btc_coexist *btcoexist,
struct rtl_priv *rtlpriv = btcoexist->adapter;
if (type == BTC_PACKET_DHCP) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], DHCP Packet notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], DHCP Packet notify\n");
}
}
@@ -3976,25 +3976,25 @@ void ex_btc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist,
rsp_source = BT_INFO_SRC_8821A_2ANT_WIFI_FW;
coex_sta->bt_info_c2h_cnt[rsp_source]++;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Bt info[%d], length = %d, hex data = [",
- rsp_source, length);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Bt info[%d], length = %d, hex data = [",
+ rsp_source, length);
for (i = 0; i < length; i++) {
coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
if (i == 1)
bt_info = tmp_buf[i];
if (i == length - 1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "0x%02x]\n", tmp_buf[i]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "0x%02x]\n", tmp_buf[i]);
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "0x%02x, ", tmp_buf[i]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "0x%02x, ", tmp_buf[i]);
}
}
if (btcoexist->manual_control) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), return for Manual CTRL<===\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), return for Manual CTRL<===\n");
return;
}
@@ -4016,8 +4016,8 @@ void ex_btc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist,
/* BT into is responded by BT FW and BT RF REG 0x3C !=
* 0x01 => Need to switch BT TRx Mask
*/
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Switch BT TRx Mask since BT RF REG 0x3C != 0x01\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Switch BT TRx Mask since BT RF REG 0x3C != 0x01\n");
btcoexist->btc_set_bt_reg(btcoexist, BTC_BT_REG_RF,
0x3c, 0x01);
}
@@ -4039,31 +4039,31 @@ void ex_btc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist,
}
if (!btcoexist->manual_control && !wifi_under_5g) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT ext info = 0x%x!!\n",
- coex_sta->bt_info_ext);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT ext info = 0x%x!!\n",
+ coex_sta->bt_info_ext);
if ((coex_sta->bt_info_ext & BIT(3))) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT ext info bit3=1, wifi_connected=%d\n",
- wifi_connected);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT ext info bit3=1, wifi_connected=%d\n",
+ wifi_connected);
if (wifi_connected) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
- "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
+ "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
btc8821a2ant_ignore_wlan_act(btcoexist,
FORCE_EXEC,
false);
}
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BT ext info bit3=0, wifi_connected=%d\n",
- wifi_connected);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BT ext info bit3=0, wifi_connected=%d\n",
+ wifi_connected);
/* BT already NOT ignore Wlan active, do nothing
* here.
*/
if (!wifi_connected) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_LOUD,
"[BTCoex], BT ext info bit3 check, set BT to ignore Wlan active!!\n");
btc8821a2ant_ignore_wlan_act(
btcoexist, FORCE_EXEC, true);
@@ -4117,26 +4117,26 @@ void ex_btc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist,
if (!(bt_info & BT_INFO_8821A_2ANT_B_CONNECTION)) {
coex_dm->bt_status = BT_8821A_2ANT_BT_STATUS_IDLE;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
} else if (bt_info == BT_INFO_8821A_2ANT_B_CONNECTION) {
/* connection exists but no busy */
coex_dm->bt_status = BT_8821A_2ANT_BT_STATUS_CON_IDLE;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
} else if ((bt_info & BT_INFO_8821A_2ANT_B_SCO_ESCO) ||
(bt_info & BT_INFO_8821A_2ANT_B_SCO_BUSY)) {
coex_dm->bt_status = BT_8821A_2ANT_BT_STATUS_SCO_BUSY;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
} else if (bt_info & BT_INFO_8821A_2ANT_B_ACL_BUSY) {
coex_dm->bt_status = BT_8821A_2ANT_BT_STATUS_ACL_BUSY;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
} else {
coex_dm->bt_status = BT_8821A_2ANT_BT_STATUS_MAX;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n");
}
if ((coex_dm->bt_status == BT_8821A_2ANT_BT_STATUS_ACL_BUSY) ||
@@ -4161,8 +4161,8 @@ void ex_btc8821a2ant_halt_notify(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Halt notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Halt notify\n");
btc8821a2ant_wifi_off_hw_cfg(btcoexist);
btc8821a2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
@@ -4173,14 +4173,14 @@ void ex_btc8821a2ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Pnp notify\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD, "[BTCoex], Pnp notify\n");
if (pnp_state == BTC_WIFI_PNP_SLEEP) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Pnp notify to SLEEP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Pnp notify to SLEEP\n");
} else if (pnp_state == BTC_WIFI_PNP_WAKE_UP) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Pnp notify to WAKE UP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Pnp notify to WAKE UP\n");
ex_btc8821a2ant_init_hwconfig(btcoexist);
btc8821a2ant_init_coex_dm(btcoexist);
btc8821a2ant_query_bt_info(btcoexist);
@@ -4191,8 +4191,8 @@ void ex_btc8821a2ant_periodical(struct btc_coexist *btcoexist)
{
struct rtl_priv *rtlpriv = btcoexist->adapter;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], ==========================Periodical===========================\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], ==========================Periodical===========================\n");
if (coex_sta->dis_ver_info_cnt <= 5) {
coex_sta->dis_ver_info_cnt += 1;
@@ -4200,8 +4200,8 @@ void ex_btc8821a2ant_periodical(struct btc_coexist *btcoexist)
/* Antenna config to set 0x765 = 0x0 (GNT_BT control by
* PTA) after initial
*/
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Set GNT_BT control by PTA\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Set GNT_BT control by PTA\n");
btc8821a2ant_set_ant_path(btcoexist,
BTC_ANT_WIFI_AT_MAIN, false, false);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
index 2b140c1e8e8d..2c05369b79e4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
@@ -129,8 +129,8 @@ static u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist)
if (rtlphy->current_channel != 0)
chnl = rtlphy->current_channel;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "static halbtc_get_wifi_central_chnl:%d\n", chnl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "%s:%d\n", __func__, chnl);
return chnl;
}
@@ -250,16 +250,16 @@ bool halbtc_send_bt_mp_operation(struct btc_coexist *btcoexist, u8 op_code,
if (!wait_ms)
return true;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "btmpinfo wait req_num=%d wait=%ld\n", req_num, wait_ms);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "btmpinfo wait req_num=%d wait=%ld\n", req_num, wait_ms);
if (in_interrupt())
return false;
if (wait_for_completion_timeout(&btcoexist->bt_mp_comp,
msecs_to_jiffies(wait_ms)) == 0) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "btmpinfo wait (req_num=%d) timeout\n", req_num);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "btmpinfo wait (req_num=%d) timeout\n", req_num);
return false; /* timeout */
}
@@ -278,14 +278,15 @@ static void halbtc_leave_lps(struct btc_coexist *btcoexist)
&ap_enable);
if (ap_enable) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "%s()<--dont leave lps under AP mode\n", __func__);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "%s()<--dont leave lps under AP mode\n", __func__);
return;
}
btcoexist->bt_info.bt_ctrl_lps = true;
btcoexist->bt_info.bt_lps_on = false;
- rtl_lps_leave(rtlpriv->mac80211.hw);
+ /* FIXME: Context is unclear. Is it allowed to block? */
+ rtl_lps_leave(rtlpriv->mac80211.hw, false);
}
static void halbtc_enter_lps(struct btc_coexist *btcoexist)
@@ -299,14 +300,15 @@ static void halbtc_enter_lps(struct btc_coexist *btcoexist)
&ap_enable);
if (ap_enable) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "%s()<--dont enter lps under AP mode\n", __func__);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "%s()<--dont enter lps under AP mode\n", __func__);
return;
}
btcoexist->bt_info.bt_ctrl_lps = true;
btcoexist->bt_info.bt_lps_on = true;
- rtl_lps_enter(rtlpriv->mac80211.hw);
+ /* FIXME: Context is unclear. Is it allowed to block? */
+ rtl_lps_enter(rtlpriv->mac80211.hw, false);
}
static void halbtc_normal_lps(struct btc_coexist *btcoexist)
@@ -317,7 +319,8 @@ static void halbtc_normal_lps(struct btc_coexist *btcoexist)
if (btcoexist->bt_info.bt_ctrl_lps) {
btcoexist->bt_info.bt_lps_on = false;
- rtl_lps_leave(rtlpriv->mac80211.hw);
+ /* FIXME: Context is unclear. Is it allowed to block? */
+ rtl_lps_leave(rtlpriv->mac80211.hw, false);
btcoexist->bt_info.bt_ctrl_lps = false;
}
}
@@ -328,7 +331,8 @@ static void halbtc_pre_normal_lps(struct btc_coexist *btcoexist)
if (btcoexist->bt_info.bt_ctrl_lps) {
btcoexist->bt_info.bt_lps_on = false;
- rtl_lps_leave(rtlpriv->mac80211.hw);
+ /* FIXME: Context is unclear. Is it allowed to block? */
+ rtl_lps_leave(rtlpriv->mac80211.hw, false);
}
}
@@ -1368,11 +1372,11 @@ bool exhalbtc_bind_bt_coex_withadapter(void *adapter)
btcoexist->board_info.tfbga_package = true;
if (btcoexist->board_info.tfbga_package)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Package Type = TFBGA\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Package Type = TFBGA\n");
else
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[BTCoex], Package Type = Non-TFBGA\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[BTCoex], Package Type = Non-TFBGA\n");
btcoexist->board_info.rfe_type = rtl_get_hwpg_rfe_type(rtlpriv);
btcoexist->board_info.ant_div_cfg = 0;
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
index b8c4536af6c0..4641999f3fe9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c
@@ -191,7 +191,7 @@ void rtl_btc_init_hw_config(struct rtl_priv *rtlpriv)
u8 bt_exist;
bt_exist = rtl_get_hwpg_bt_exist(rtlpriv);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
"%s, bt_exist is %d\n", __func__, bt_exist);
if (!btcoexist)
@@ -383,8 +383,8 @@ void rtl_btc_btmpinfo_notify(struct rtl_priv *rtlpriv, u8 *tmp_buf, u8 length)
break;
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "btmpinfo complete req_num=%d\n", seq);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "btmpinfo complete req_num=%d\n", seq);
complete(&btcoexist->bt_mp_comp);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/cam.c b/drivers/net/wireless/realtek/rtlwifi/cam.c
index bf0e0bb1f99b..7aa28da39409 100644
--- a/drivers/net/wireless/realtek/rtlwifi/cam.c
+++ b/drivers/net/wireless/realtek/rtlwifi/cam.c
@@ -43,14 +43,14 @@ static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
target_command);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "WRITE %x: %x\n",
- rtlpriv->cfg->maps[WCAMI], target_content);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "The Key ID is %d\n", entry_no);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "WRITE %x: %x\n",
- rtlpriv->cfg->maps[RWCAM], target_command);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "WRITE %x: %x\n",
+ rtlpriv->cfg->maps[WCAMI], target_content);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "The Key ID is %d\n", entry_no);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "WRITE %x: %x\n",
+ rtlpriv->cfg->maps[RWCAM], target_command);
} else if (entry_i == 1) {
@@ -64,10 +64,10 @@ static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
target_command);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "WRITE A4: %x\n", target_content);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "WRITE A0: %x\n", target_command);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "WRITE A4: %x\n", target_content);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "WRITE A0: %x\n", target_command);
} else {
@@ -83,15 +83,15 @@ static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
target_command);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "WRITE A4: %x\n", target_content);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "WRITE A0: %x\n", target_command);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "WRITE A4: %x\n", target_content);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "WRITE A0: %x\n", target_command);
}
}
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "after set key, usconfig:%x\n", us_config);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "after set key, usconfig:%x\n", us_config);
}
u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
@@ -101,14 +101,14 @@ u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
u32 us_config;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "EntryNo:%x, ulKeyId=%x, ulEncAlg=%x, ulUseDK=%x MacAddr %pM\n",
- ul_entry_idx, ul_key_id, ul_enc_alg,
- ul_default_key, mac_addr);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "EntryNo:%x, ulKeyId=%x, ulEncAlg=%x, ulUseDK=%x MacAddr %pM\n",
+ ul_entry_idx, ul_key_id, ul_enc_alg,
+ ul_default_key, mac_addr);
if (ul_key_id == TOTAL_CAM_ENTRY) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "ulKeyId exceed!\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "ulKeyId exceed!\n");
return 0;
}
@@ -120,7 +120,7 @@ u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
rtl_cam_program_entry(hw, ul_entry_idx, mac_addr,
(u8 *)key_content, us_config);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "end\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "end\n");
return 1;
@@ -133,7 +133,7 @@ int rtl_cam_delete_one_entry(struct ieee80211_hw *hw,
u32 ul_command;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "key_idx:%d\n", ul_key_id);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "key_idx:%d\n", ul_key_id);
ul_command = ul_key_id * CAM_CONTENT_COUNT;
ul_command = ul_command | BIT(31) | BIT(16);
@@ -141,10 +141,10 @@ int rtl_cam_delete_one_entry(struct ieee80211_hw *hw,
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], 0);
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "rtl_cam_delete_one_entry(): WRITE A4: %x\n", 0);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "rtl_cam_delete_one_entry(): WRITE A0: %x\n", ul_command);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "%s: WRITE A4: %x\n", __func__, 0);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "%s: WRITE A0: %x\n", __func__, ul_command);
return 0;
@@ -195,10 +195,10 @@ void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index)
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], ul_content);
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "rtl_cam_mark_invalid(): WRITE A4: %x\n", ul_content);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "rtl_cam_mark_invalid(): WRITE A0: %x\n", ul_command);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "%s: WRITE A4: %x\n", __func__, ul_content);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "%s: WRITE A0: %x\n", __func__, ul_command);
}
EXPORT_SYMBOL(rtl_cam_mark_invalid);
@@ -245,12 +245,10 @@ void rtl_cam_empty_entry(struct ieee80211_hw *hw, u8 uc_index)
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], ul_content);
rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "rtl_cam_empty_entry(): WRITE A4: %x\n",
- ul_content);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "rtl_cam_empty_entry(): WRITE A0: %x\n",
- ul_command);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "%s: WRITE A4: %x\n", __func__, ul_content);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "%s: WRITE A0: %x\n", __func__, ul_command);
}
}
@@ -313,8 +311,8 @@ void rtl_cam_del_entry(struct ieee80211_hw *hw, u8 *sta_addr)
/* Remove from HW Security CAM */
eth_zero_addr(rtlpriv->sec.hwsec_cam_sta_addr[i]);
rtlpriv->sec.hwsec_cam_bitmap &= ~(BIT(0) << i);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "&&&&&&&&&del entry %d\n", i);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "&&&&&&&&&del entry %d\n", i);
}
}
return;
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index 4dd82c6052f0..a7259dbc953d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -76,8 +76,8 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context,
struct rtl_priv *rtlpriv = rtl_priv(hw);
int err;
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "Firmware callback routine entered!\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "Firmware callback routine entered!\n");
complete(&rtlpriv->firmware_loading_complete);
if (!firmware) {
if (rtlpriv->cfg->alt_fw_name) {
@@ -214,8 +214,8 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
u8 retry_limit = 0x30;
if (mac->vif) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "vif has been set!! mac->vif = 0x%p\n", mac->vif);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "vif has been set!! mac->vif = 0x%p\n", mac->vif);
return -EOPNOTSUPP;
}
@@ -227,19 +227,19 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
switch (ieee80211_vif_type_p2p(vif)) {
case NL80211_IFTYPE_P2P_CLIENT:
mac->p2p = P2P_ROLE_CLIENT;
- /*fall through*/
+ fallthrough;
case NL80211_IFTYPE_STATION:
if (mac->beacon_enabled == 1) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "NL80211_IFTYPE_STATION\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "NL80211_IFTYPE_STATION\n");
mac->beacon_enabled = 0;
rtlpriv->cfg->ops->update_interrupt_mask(hw, 0,
rtlpriv->cfg->maps[RTL_IBSS_INT_MASKS]);
}
break;
case NL80211_IFTYPE_ADHOC:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "NL80211_IFTYPE_ADHOC\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "NL80211_IFTYPE_ADHOC\n");
mac->link_state = MAC80211_LINKED;
rtlpriv->cfg->ops->set_bcn_reg(hw);
@@ -254,10 +254,10 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
break;
case NL80211_IFTYPE_P2P_GO:
mac->p2p = P2P_ROLE_GO;
- /*fall through*/
+ fallthrough;
case NL80211_IFTYPE_AP:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "NL80211_IFTYPE_AP\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "NL80211_IFTYPE_AP\n");
mac->link_state = MAC80211_LINKED;
rtlpriv->cfg->ops->set_bcn_reg(hw);
@@ -271,8 +271,8 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
retry_limit = 0x07;
break;
case NL80211_IFTYPE_MESH_POINT:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "NL80211_IFTYPE_MESH_POINT\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "NL80211_IFTYPE_MESH_POINT\n");
mac->link_state = MAC80211_LINKED;
rtlpriv->cfg->ops->set_bcn_reg(hw);
@@ -293,8 +293,8 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
}
if (mac->p2p) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "p2p role %x\n", vif->type);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "p2p role %x\n", vif->type);
mac->basic_rates = 0xff0;/*disable cck rate for p2p*/
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
(u8 *)(&mac->basic_rates));
@@ -360,8 +360,8 @@ static int rtl_op_change_interface(struct ieee80211_hw *hw,
vif->type = new_type;
vif->p2p = p2p;
ret = rtl_op_add_interface(hw, vif);
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "p2p %x\n", p2p);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "p2p %x\n", p2p);
return ret;
}
@@ -435,8 +435,8 @@ static void _rtl_add_wowlan_patterns(struct ieee80211_hw *hw,
memset(mask, 0, MAX_WOL_BIT_MASK_SIZE);
if (patterns[i].pattern_len < 0 ||
patterns[i].pattern_len > MAX_WOL_PATTERN_SIZE) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_WARNING,
- "Pattern[%d] is too long\n", i);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_WARNING,
+ "Pattern[%d] is too long\n", i);
continue;
}
pattern_os = patterns[i].pattern;
@@ -515,8 +515,8 @@ static void _rtl_add_wowlan_patterns(struct ieee80211_hw *hw,
"pattern to hw\n", content, len);
/* 3. calculate crc */
rtl_pattern.crc = _calculate_wol_pattern_crc(content, len);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "CRC_Remainder = 0x%x\n", rtl_pattern.crc);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "CRC_Remainder = 0x%x\n", rtl_pattern.crc);
/* 4. write crc & mask_for_hw to hw */
rtlpriv->cfg->ops->add_wowlan_pattern(hw, &rtl_pattern, i);
@@ -531,7 +531,7 @@ static int rtl_op_suspend(struct ieee80211_hw *hw,
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, "\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG, "\n");
if (WARN_ON(!wow))
return -EINVAL;
@@ -544,7 +544,7 @@ static int rtl_op_suspend(struct ieee80211_hw *hw,
rtlhal->driver_is_goingto_unload = true;
rtlhal->enter_pnp_sleep = true;
- rtl_lps_leave(hw);
+ rtl_lps_leave(hw, true);
rtl_op_stop(hw);
device_set_wakeup_enable(wiphy_dev(hw->wiphy), true);
return 0;
@@ -557,7 +557,7 @@ static int rtl_op_resume(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
time64_t now;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, "\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG, "\n");
rtlhal->driver_is_goingto_unload = false;
rtlhal->enter_pnp_sleep = false;
rtlhal->wake_from_pnp_sleep = true;
@@ -588,8 +588,8 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
mutex_lock(&rtlpriv->locks.conf_mutex);
if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) { /* BIT(2)*/
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "IEEE80211_CONF_CHANGE_LISTEN_INTERVAL\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "IEEE80211_CONF_CHANGE_LISTEN_INTERVAL\n");
}
/*For IPS */
@@ -632,9 +632,9 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
}
if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "IEEE80211_CONF_CHANGE_RETRY_LIMITS %x\n",
- hw->conf.long_frame_max_tx_count);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "IEEE80211_CONF_CHANGE_RETRY_LIMITS %x\n",
+ hw->conf.long_frame_max_tx_count);
/* brought up everything changes (changed == ~0) indicates first
* open, so use our default value instead of that of wiphy.
*/
@@ -809,13 +809,13 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
if (*new_flags & FIF_ALLMULTI) {
mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AM] |
rtlpriv->cfg->maps[MAC_RCR_AB];
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Enable receive multicast frame\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "Enable receive multicast frame\n");
} else {
mac->rx_conf &= ~(rtlpriv->cfg->maps[MAC_RCR_AM] |
rtlpriv->cfg->maps[MAC_RCR_AB]);
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Disable receive multicast frame\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "Disable receive multicast frame\n");
}
update_rcr = true;
}
@@ -823,12 +823,12 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
if (changed_flags & FIF_FCSFAIL) {
if (*new_flags & FIF_FCSFAIL) {
mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACRC32];
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Enable receive FCS error frame\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "Enable receive FCS error frame\n");
} else {
mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACRC32];
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Disable receive FCS error frame\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "Disable receive FCS error frame\n");
}
if (!update_rcr)
update_rcr = true;
@@ -855,12 +855,12 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
if (*new_flags & FIF_CONTROL) {
mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACF];
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Enable receive control frame.\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "Enable receive control frame.\n");
} else {
mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACF];
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Disable receive control frame.\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "Disable receive control frame.\n");
}
if (!update_rcr)
update_rcr = true;
@@ -869,12 +869,12 @@ static void rtl_op_configure_filter(struct ieee80211_hw *hw,
if (changed_flags & FIF_OTHER_BSS) {
if (*new_flags & FIF_OTHER_BSS) {
mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AAP];
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Enable receive other BSS's frame.\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "Enable receive other BSS's frame.\n");
} else {
mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_AAP];
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "Disable receive other BSS's frame.\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "Disable receive other BSS's frame.\n");
}
if (!update_rcr)
update_rcr = true;
@@ -923,7 +923,7 @@ static int rtl_op_sta_add(struct ieee80211_hw *hw,
sta->supp_rates[0] &= 0xfffffff0;
memcpy(sta_entry->mac_addr, sta->addr, ETH_ALEN);
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
"Add sta addr is %pM\n", sta->addr);
rtlpriv->cfg->ops->update_rate_tbl(hw, sta, 0, true);
}
@@ -939,8 +939,8 @@ static int rtl_op_sta_remove(struct ieee80211_hw *hw,
struct rtl_sta_info *sta_entry;
if (sta) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "Remove sta addr is %pM\n", sta->addr);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ "Remove sta addr is %pM\n", sta->addr);
sta_entry = (struct rtl_sta_info *)sta->drv_priv;
sta_entry->wireless_mode = 0;
sta_entry->ratr_index = 0;
@@ -988,8 +988,8 @@ static int rtl_op_conf_tx(struct ieee80211_hw *hw,
int aci;
if (queue >= AC_MAX) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "queue number %d is incorrect!\n", queue);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "queue number %d is incorrect!\n", queue);
return -EINVAL;
}
@@ -1034,8 +1034,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
(changed & BSS_CHANGED_BEACON_ENABLED &&
bss_conf->enable_beacon)) {
if (mac->beacon_enabled == 0) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "BSS_CHANGED_BEACON_ENABLED\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ "BSS_CHANGED_BEACON_ENABLED\n");
/*start hw beacon interrupt. */
/*rtlpriv->cfg->ops->set_bcn_reg(hw); */
@@ -1052,8 +1052,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
if ((changed & BSS_CHANGED_BEACON_ENABLED &&
!bss_conf->enable_beacon)) {
if (mac->beacon_enabled == 1) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "ADHOC DISABLE BEACON\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ "ADHOC DISABLE BEACON\n");
mac->beacon_enabled = 0;
rtlpriv->cfg->ops->update_interrupt_mask(hw, 0,
@@ -1062,8 +1062,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
}
}
if (changed & BSS_CHANGED_BEACON_INT) {
- RT_TRACE(rtlpriv, COMP_BEACON, DBG_TRACE,
- "BSS_CHANGED_BEACON_INT\n");
+ rtl_dbg(rtlpriv, COMP_BEACON, DBG_TRACE,
+ "BSS_CHANGED_BEACON_INT\n");
mac->beacon_interval = bss_conf->beacon_int;
rtlpriv->cfg->ops->set_bcn_intv(hw);
}
@@ -1102,8 +1102,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
rcu_read_unlock();
goto out;
}
- RT_TRACE(rtlpriv, COMP_EASY_CONCURRENT, DBG_LOUD,
- "send PS STATIC frame\n");
+ rtl_dbg(rtlpriv, COMP_EASY_CONCURRENT, DBG_LOUD,
+ "send PS STATIC frame\n");
if (rtlpriv->dm.supp_phymode_switch) {
if (sta->ht_cap.ht_supported)
rtl_send_smps_action(hw, sta,
@@ -1143,15 +1143,15 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
HW_VAR_KEEP_ALIVE,
(u8 *)(&keep_alive));
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "BSS_CHANGED_ASSOC\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ "BSS_CHANGED_ASSOC\n");
} else {
struct cfg80211_bss *bss = NULL;
mstatus = RT_MEDIA_DISCONNECT;
if (mac->link_state == MAC80211_LINKED)
- rtl_lps_leave(hw);
+ rtl_lps_leave(hw, true);
if (ppsc->p2p_ps_info.p2p_ps_mode > P2P_PS_NONE)
rtl_p2p_ps_cmd(hw, P2P_PS_DISABLE);
mac->link_state = MAC80211_NOLINK;
@@ -1161,14 +1161,14 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
IEEE80211_BSS_TYPE_ESS,
IEEE80211_PRIVACY_OFF);
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "bssid = %pMF\n", mac->bssid);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ "bssid = %pMF\n", mac->bssid);
if (bss) {
cfg80211_unlink_bss(hw->wiphy, bss);
cfg80211_put_bss(hw->wiphy, bss);
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "cfg80211_unlink !!\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ "cfg80211_unlink !!\n");
}
eth_zero_addr(mac->bssid);
@@ -1179,8 +1179,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
if (rtlpriv->cfg->ops->chk_switch_dmdp)
rtlpriv->cfg->ops->chk_switch_dmdp(hw);
}
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "BSS_CHANGED_UN_ASSOC\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ "BSS_CHANGED_UN_ASSOC\n");
}
rtlpriv->cfg->ops->set_network_type(hw, vif->type);
/* For FW LPS:
@@ -1198,14 +1198,14 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "BSS_CHANGED_ERP_CTS_PROT\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ "BSS_CHANGED_ERP_CTS_PROT\n");
mac->use_cts_protect = bss_conf->use_cts_prot;
}
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "BSS_CHANGED_ERP_PREAMBLE use short preamble:%x\n",
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
+ "BSS_CHANGED_ERP_PREAMBLE use short preamble:%x\n",
bss_conf->use_short_preamble);
mac->short_preamble = bss_conf->use_short_preamble;
@@ -1214,8 +1214,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_ERP_SLOT) {
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "BSS_CHANGED_ERP_SLOT\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ "BSS_CHANGED_ERP_SLOT\n");
if (bss_conf->use_short_slot)
mac->slot_time = RTL_SLOT_TIME_9;
@@ -1229,8 +1229,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_HT) {
struct ieee80211_sta *sta = NULL;
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "BSS_CHANGED_HT\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ "BSS_CHANGED_HT\n");
rcu_read_lock();
sta = ieee80211_find_sta(vif, (u8 *)bss_conf->bssid);
@@ -1261,8 +1261,8 @@ static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BSSID,
(u8 *)bss_conf->bssid);
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
- "bssid: %pM\n", bss_conf->bssid);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_DMESG,
+ "bssid: %pM\n", bss_conf->bssid);
mac->vendor = PEER_UNKNOWN;
memcpy(mac->bssid, bss_conf->bssid, ETH_ALEN);
@@ -1393,27 +1393,27 @@ static int rtl_op_ampdu_action(struct ieee80211_hw *hw,
switch (action) {
case IEEE80211_AMPDU_TX_START:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "IEEE80211_AMPDU_TX_START: TID:%d\n", tid);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ "IEEE80211_AMPDU_TX_START: TID:%d\n", tid);
return rtl_tx_agg_start(hw, vif, sta, tid, ssn);
case IEEE80211_AMPDU_TX_STOP_CONT:
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "IEEE80211_AMPDU_TX_STOP: TID:%d\n", tid);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ "IEEE80211_AMPDU_TX_STOP: TID:%d\n", tid);
return rtl_tx_agg_stop(hw, vif, sta, tid);
case IEEE80211_AMPDU_TX_OPERATIONAL:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "IEEE80211_AMPDU_TX_OPERATIONAL:TID:%d\n", tid);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ "IEEE80211_AMPDU_TX_OPERATIONAL:TID:%d\n", tid);
rtl_tx_agg_oper(hw, sta, tid);
break;
case IEEE80211_AMPDU_RX_START:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "IEEE80211_AMPDU_RX_START:TID:%d\n", tid);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ "IEEE80211_AMPDU_RX_START:TID:%d\n", tid);
return rtl_rx_agg_start(hw, sta, tid);
case IEEE80211_AMPDU_RX_STOP:
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
- "IEEE80211_AMPDU_RX_STOP:TID:%d\n", tid);
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_TRACE,
+ "IEEE80211_AMPDU_RX_STOP:TID:%d\n", tid);
return rtl_rx_agg_stop(hw, sta, tid);
default:
pr_err("IEEE80211_AMPDU_ERR!!!!:\n");
@@ -1429,7 +1429,7 @@ static void rtl_op_sw_scan_start(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD, "\n");
mac->act_scanning = true;
if (rtlpriv->link_info.higher_busytraffic) {
mac->skip_scan = true;
@@ -1448,7 +1448,7 @@ static void rtl_op_sw_scan_start(struct ieee80211_hw *hw,
}
if (mac->link_state == MAC80211_LINKED) {
- rtl_lps_leave(hw);
+ rtl_lps_leave(hw, true);
mac->link_state = MAC80211_LINKED_SCANNING;
} else {
rtl_ips_nic_on(hw);
@@ -1467,7 +1467,7 @@ static void rtl_op_sw_scan_complete(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD, "\n");
mac->act_scanning = false;
mac->skip_scan = false;
@@ -1517,8 +1517,8 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
rtlpriv->btcoexist.btc_info.in_4way = false;
if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "not open hw encryption\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "not open hw encryption\n");
return -ENOSPC; /*User disabled HW-crypto */
}
/* To support IBSS, use sw-crypto for GTK */
@@ -1526,10 +1526,10 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
vif->type == NL80211_IFTYPE_MESH_POINT) &&
!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
return -ENOSPC;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "%s hardware based encryption for keyidx: %d, mac: %pM\n",
- cmd == SET_KEY ? "Using" : "Disabling", key->keyidx,
- sta ? sta->addr : bcast_addr);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "%s hardware based encryption for keyidx: %d, mac: %pM\n",
+ cmd == SET_KEY ? "Using" : "Disabling", key->keyidx,
+ sta ? sta->addr : bcast_addr);
rtlpriv->sec.being_setkey = true;
rtl_ips_nic_on(hw);
mutex_lock(&rtlpriv->locks.conf_mutex);
@@ -1538,28 +1538,28 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
switch (key->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
key_type = WEP40_ENCRYPTION;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:WEP40\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "alg:WEP40\n");
break;
case WLAN_CIPHER_SUITE_WEP104:
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:WEP104\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "alg:WEP104\n");
key_type = WEP104_ENCRYPTION;
break;
case WLAN_CIPHER_SUITE_TKIP:
key_type = TKIP_ENCRYPTION;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:TKIP\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "alg:TKIP\n");
break;
case WLAN_CIPHER_SUITE_CCMP:
key_type = AESCCMP_ENCRYPTION;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:CCMP\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "alg:CCMP\n");
break;
case WLAN_CIPHER_SUITE_AES_CMAC:
/* HW don't support CMAC encryption,
* use software CMAC encryption
*/
key_type = AESCMAC_ENCRYPTION;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "alg:CMAC\n");
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "HW don't support CMAC encryption, use software CMAC encryption\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "alg:CMAC\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "HW don't support CMAC encryption, use software CMAC encryption\n");
err = -EOPNOTSUPP;
goto out_unlock;
default:
@@ -1605,9 +1605,9 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
key_type == WEP104_ENCRYPTION))
wep_only = true;
rtlpriv->sec.pairwise_enc_algorithm = key_type;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set enable_hw_sec, key_type:%x(OPEN:0 WEP40:1 TKIP:2 AES:4 WEP104:5)\n",
- key_type);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set enable_hw_sec, key_type:%x(OPEN:0 WEP40:1 TKIP:2 AES:4 WEP104:5)\n",
+ key_type);
rtlpriv->cfg->ops->enable_hw_sec(hw);
}
}
@@ -1615,8 +1615,8 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
switch (cmd) {
case SET_KEY:
if (wep_only) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set WEP(group/pairwise) key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set WEP(group/pairwise) key\n");
/* Pairwise key with an assigned MAC address. */
rtlpriv->sec.pairwise_enc_algorithm = key_type;
rtlpriv->sec.group_enc_algorithm = key_type;
@@ -1626,8 +1626,8 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
rtlpriv->sec.key_len[key_idx] = key->keylen;
eth_zero_addr(mac_addr);
} else if (group_key) { /* group key */
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set group key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set group key\n");
/* group key */
rtlpriv->sec.group_enc_algorithm = key_type;
/*set local buf about group key. */
@@ -1636,8 +1636,8 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
rtlpriv->sec.key_len[key_idx] = key->keylen;
memcpy(mac_addr, bcast_addr, ETH_ALEN);
} else { /* pairwise key */
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set pairwise key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set pairwise key\n");
if (!sta) {
WARN_ONCE(true,
"rtlwifi: pairwise key without mac_addr\n");
@@ -1669,8 +1669,8 @@ static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
break;
case DISABLE_KEY:
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "disable key delete one entry\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "disable key delete one entry\n");
/*set local buf about wep key. */
if (vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_MESH_POINT) {
@@ -1718,9 +1718,9 @@ static void rtl_op_rfkill_poll(struct ieee80211_hw *hw)
if (unlikely(radio_state != rtlpriv->rfkill.rfkill_state)) {
rtlpriv->rfkill.rfkill_state = radio_state;
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "wireless radio switch turned %s\n",
- radio_state ? "on" : "off");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "wireless radio switch turned %s\n",
+ radio_state ? "on" : "off");
blocked = !rtlpriv->rfkill.rfkill_state;
wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
@@ -1765,26 +1765,27 @@ bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
do {
cfg_cmd = pwrcfgcmd[ary_idx];
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "rtl_hal_pwrseqcmdparsing(): offset(%#x),cut_msk(%#x), famsk(%#x), interface_msk(%#x), base(%#x), cmd(%#x), msk(%#x), value(%#x)\n",
- GET_PWR_CFG_OFFSET(cfg_cmd),
- GET_PWR_CFG_CUT_MASK(cfg_cmd),
- GET_PWR_CFG_FAB_MASK(cfg_cmd),
- GET_PWR_CFG_INTF_MASK(cfg_cmd),
- GET_PWR_CFG_BASE(cfg_cmd), GET_PWR_CFG_CMD(cfg_cmd),
- GET_PWR_CFG_MASK(cfg_cmd), GET_PWR_CFG_VALUE(cfg_cmd));
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "%s: offset(%#x),cut_msk(%#x), famsk(%#x), interface_msk(%#x), base(%#x), cmd(%#x), msk(%#x), value(%#x)\n",
+ __func__,
+ GET_PWR_CFG_OFFSET(cfg_cmd),
+ GET_PWR_CFG_CUT_MASK(cfg_cmd),
+ GET_PWR_CFG_FAB_MASK(cfg_cmd),
+ GET_PWR_CFG_INTF_MASK(cfg_cmd),
+ GET_PWR_CFG_BASE(cfg_cmd), GET_PWR_CFG_CMD(cfg_cmd),
+ GET_PWR_CFG_MASK(cfg_cmd), GET_PWR_CFG_VALUE(cfg_cmd));
if ((GET_PWR_CFG_FAB_MASK(cfg_cmd)&faversion) &&
(GET_PWR_CFG_CUT_MASK(cfg_cmd)&cut_version) &&
(GET_PWR_CFG_INTF_MASK(cfg_cmd)&interface_type)) {
switch (GET_PWR_CFG_CMD(cfg_cmd)) {
case PWR_CMD_READ:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
"rtl_hal_pwrseqcmdparsing(): PWR_CMD_READ\n");
break;
case PWR_CMD_WRITE:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "%s(): PWR_CMD_WRITE\n", __func__);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "%s(): PWR_CMD_WRITE\n", __func__);
offset = GET_PWR_CFG_OFFSET(cfg_cmd);
/*Read the value from system register*/
@@ -1797,7 +1798,7 @@ bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
rtl_write_byte(rtlpriv, offset, value);
break;
case PWR_CMD_POLLING:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
"rtl_hal_pwrseqcmdparsing(): PWR_CMD_POLLING\n");
polling_bit = false;
offset = GET_PWR_CFG_OFFSET(cfg_cmd);
@@ -1818,8 +1819,8 @@ bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
} while (!polling_bit);
break;
case PWR_CMD_DELAY:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "rtl_hal_pwrseqcmdparsing(): PWR_CMD_DELAY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "%s: PWR_CMD_DELAY\n", __func__);
if (GET_PWR_CFG_VALUE(cfg_cmd) ==
PWRSEQ_DELAY_US)
udelay(GET_PWR_CFG_OFFSET(cfg_cmd));
@@ -1827,8 +1828,8 @@ bool rtl_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
mdelay(GET_PWR_CFG_OFFSET(cfg_cmd));
break;
case PWR_CMD_END:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "rtl_hal_pwrseqcmdparsing(): PWR_CMD_END\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "%s: PWR_CMD_END\n", __func__);
return true;
default:
WARN_ONCE(true,
diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.c b/drivers/net/wireless/realtek/rtlwifi/debug.c
index 55db71c766fe..901cdfe3723c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/debug.c
+++ b/drivers/net/wireless/realtek/rtlwifi/debug.c
@@ -8,26 +8,6 @@
#include <linux/vmalloc.h>
#ifdef CONFIG_RTLWIFI_DEBUG
-void _rtl_dbg_trace(struct rtl_priv *rtlpriv, u64 comp, int level,
- const char *fmt, ...)
-{
- if (unlikely((comp & rtlpriv->cfg->mod_params->debug_mask) &&
- level <= rtlpriv->cfg->mod_params->debug_level)) {
- struct va_format vaf;
- va_list args;
-
- va_start(args, fmt);
-
- vaf.fmt = fmt;
- vaf.va = &args;
-
- pr_info(":<%lx> %pV", in_interrupt(), &vaf);
-
- va_end(args);
- }
-}
-EXPORT_SYMBOL_GPL(_rtl_dbg_trace);
-
void _rtl_dbg_print(struct rtl_priv *rtlpriv, u64 comp, int level,
const char *fmt, ...)
{
@@ -404,8 +384,8 @@ static ssize_t rtl_debugfs_set_write_rfreg(struct file *filp,
&path, &addr, &bitmask, &data);
if (num != 4) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "Format is <path> <addr> <mask> <data>\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_DMESG,
+ "Format is <path> <addr> <mask> <data>\n");
return count;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.h b/drivers/net/wireless/realtek/rtlwifi/debug.h
index 69f169d4d4ae..1c0bcf8ec1a9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/debug.h
+++ b/drivers/net/wireless/realtek/rtlwifi/debug.h
@@ -149,10 +149,6 @@ enum dbgp_flag_e {
struct rtl_priv;
__printf(4, 5)
-void _rtl_dbg_trace(struct rtl_priv *rtlpriv, u64 comp, int level,
- const char *fmt, ...);
-
-__printf(4, 5)
void _rtl_dbg_print(struct rtl_priv *rtlpriv, u64 comp, int level,
const char *fmt, ...);
@@ -160,8 +156,8 @@ void _rtl_dbg_print_data(struct rtl_priv *rtlpriv, u64 comp, int level,
const char *titlestring,
const void *hexdata, int hexdatalen);
-#define RT_TRACE(rtlpriv, comp, level, fmt, ...) \
- _rtl_dbg_trace(rtlpriv, comp, level, \
+#define rtl_dbg(rtlpriv, comp, level, fmt, ...) \
+ _rtl_dbg_print(rtlpriv, comp, level, \
fmt, ##__VA_ARGS__)
#define RTPRINT(rtlpriv, dbgtype, dbgflag, fmt, ...) \
@@ -177,9 +173,9 @@ void _rtl_dbg_print_data(struct rtl_priv *rtlpriv, u64 comp, int level,
struct rtl_priv;
__printf(4, 5)
-static inline void RT_TRACE(struct rtl_priv *rtlpriv,
- u64 comp, int level,
- const char *fmt, ...)
+static inline void rtl_dbg(struct rtl_priv *rtlpriv,
+ u64 comp, int level,
+ const char *fmt, ...)
{
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.c b/drivers/net/wireless/realtek/rtlwifi/efuse.c
index cef9f2a9303b..2e945554ed6d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/efuse.c
+++ b/drivers/net/wireless/realtek/rtlwifi/efuse.c
@@ -120,8 +120,8 @@ void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value)
const u32 efuse_len =
rtlpriv->cfg->maps[EFUSE_REAL_CONTENT_SIZE];
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, "Addr=%x Data =%x\n",
- address, value);
+ rtl_dbg(rtlpriv, COMP_EFUSE, DBG_LOUD, "Addr=%x Data =%x\n",
+ address, value);
if (address < efuse_len) {
rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL], value);
@@ -211,9 +211,9 @@ void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
u8 efuse_usage;
if ((_offset + _size_byte) > rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]) {
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
- "read_efuse(): Invalid offset(%#x) with read bytes(%#x)!!\n",
- _offset, _size_byte);
+ rtl_dbg(rtlpriv, COMP_EFUSE, DBG_LOUD,
+ "%s: Invalid offset(%#x) with read bytes(%#x)!!\n",
+ __func__, _offset, _size_byte);
return;
}
@@ -376,9 +376,9 @@ bool efuse_shadow_update_chk(struct ieee80211_hw *hw)
(EFUSE_MAX_SIZE - rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN]))
result = false;
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
- "efuse_shadow_update_chk(): totalbytes(%#x), hdr_num(%#x), words_need(%#x), efuse_used(%d)\n",
- totalbytes, hdr_num, words_need, efuse_used);
+ rtl_dbg(rtlpriv, COMP_EFUSE, DBG_LOUD,
+ "%s: totalbytes(%#x), hdr_num(%#x), words_need(%#x), efuse_used(%d)\n",
+ __func__, totalbytes, hdr_num, words_need, efuse_used);
return result;
}
@@ -416,7 +416,7 @@ bool efuse_shadow_update(struct ieee80211_hw *hw)
u8 word_en = 0x0F;
u8 first_pg = false;
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_EFUSE, DBG_LOUD, "\n");
if (!efuse_shadow_update_chk(hw)) {
efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
@@ -424,8 +424,8 @@ bool efuse_shadow_update(struct ieee80211_hw *hw)
&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
- "efuse out of capacity!!\n");
+ rtl_dbg(rtlpriv, COMP_EFUSE, DBG_LOUD,
+ "efuse out of capacity!!\n");
return false;
}
efuse_power_switch(hw, true, true);
@@ -464,8 +464,8 @@ bool efuse_shadow_update(struct ieee80211_hw *hw)
if (!efuse_pg_packet_write(hw, (u8) offset, word_en,
tmpdata)) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "PG section(%#x) fail!!\n", offset);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "PG section(%#x) fail!!\n", offset);
break;
}
}
@@ -478,7 +478,7 @@ bool efuse_shadow_update(struct ieee80211_hw *hw)
&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_EFUSE, DBG_LOUD, "\n");
return true;
}
@@ -616,8 +616,8 @@ static int efuse_one_byte_write(struct ieee80211_hw *hw, u16 addr, u8 data)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 tmpidx = 0;
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
- "Addr = %x Data=%x\n", addr, data);
+ rtl_dbg(rtlpriv, COMP_EFUSE, DBG_LOUD,
+ "Addr = %x Data=%x\n", addr, data);
rtl_write_byte(rtlpriv,
rtlpriv->cfg->maps[EFUSE_CTRL] + 1, (u8) (addr & 0xff));
@@ -996,8 +996,8 @@ static int efuse_pg_packet_write(struct ieee80211_hw *hw,
if (efuse_addr >= (EFUSE_MAX_SIZE -
rtlpriv->cfg->maps[EFUSE_OOB_PROTECT_BYTES_LEN])) {
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
- "efuse_addr(%#x) Out of size!!\n", efuse_addr);
+ rtl_dbg(rtlpriv, COMP_EFUSE, DBG_LOUD,
+ "efuse_addr(%#x) Out of size!!\n", efuse_addr);
}
return true;
@@ -1037,8 +1037,8 @@ static u8 enable_efuse_data_write(struct ieee80211_hw *hw,
u8 tmpdata[8];
memset(tmpdata, 0xff, PGPKT_DATA_SIZE);
- RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
- "word_en = %x efuse_addr=%x\n", word_en, efuse_addr);
+ rtl_dbg(rtlpriv, COMP_EFUSE, DBG_LOUD,
+ "word_en = %x efuse_addr=%x\n", word_en, efuse_addr);
if (!(word_en & BIT(0))) {
tmpaddr = start_addr;
@@ -1240,11 +1240,11 @@ int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv,
eeprom_id = *((u16 *)&hwinfo[0]);
if (eeprom_id != params[0]) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "EEPROM ID(%#x) is invalid!!\n", eeprom_id);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "EEPROM ID(%#x) is invalid!!\n", eeprom_id);
rtlefuse->autoload_failflag = true;
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
rtlefuse->autoload_failflag = false;
}
@@ -1255,30 +1255,30 @@ int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv,
rtlefuse->eeprom_did = *(u16 *)&hwinfo[params[2]];
rtlefuse->eeprom_svid = *(u16 *)&hwinfo[params[3]];
rtlefuse->eeprom_smid = *(u16 *)&hwinfo[params[4]];
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROMId = 0x%4x\n", eeprom_id);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROMId = 0x%4x\n", eeprom_id);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid);
for (i = 0; i < 6; i += 2) {
usvalue = *(u16 *)&hwinfo[params[5] + i];
*((u16 *)(&rtlefuse->dev_addr[i])) = usvalue;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "%pM\n", rtlefuse->dev_addr);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "%pM\n", rtlefuse->dev_addr);
rtlefuse->eeprom_channelplan = *&hwinfo[params[6]];
rtlefuse->eeprom_version = *(u16 *)&hwinfo[params[7]];
rtlefuse->txpwr_fromeprom = true;
rtlefuse->eeprom_oemid = *&hwinfo[params[8]];
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
/* set channel plan to world wide 13 */
rtlefuse->channel_plan = params[9];
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index 25335bd2873b..3776495fd9d0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -204,8 +204,8 @@ static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
return;
if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "PCI(Bridge) UNKNOWN\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "PCI(Bridge) UNKNOWN\n");
return;
}
@@ -254,8 +254,8 @@ static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
return;
if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "PCI(Bridge) UNKNOWN\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "PCI(Bridge) UNKNOWN\n");
return;
}
@@ -271,10 +271,10 @@ static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
pci_write_config_byte(rtlpci->pdev, (num4bytes << 2),
u_pcibridge_aspmsetting);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "PlatformEnableASPM(): Write reg[%x] = %x\n",
- (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10),
- u_pcibridge_aspmsetting);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "PlatformEnableASPM(): Write reg[%x] = %x\n",
+ (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10),
+ u_pcibridge_aspmsetting);
udelay(50);
@@ -331,11 +331,11 @@ static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
list_for_each_entry(tpriv, &rtlpriv->glb_var->glb_priv_list,
list) {
tpcipriv = (struct rtl_pci_priv *)tpriv->priv;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "pcipriv->ndis_adapter.funcnumber %x\n",
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "pcipriv->ndis_adapter.funcnumber %x\n",
pcipriv->ndis_adapter.funcnumber);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "tpcipriv->ndis_adapter.funcnumber %x\n",
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "tpcipriv->ndis_adapter.funcnumber %x\n",
tpcipriv->ndis_adapter.funcnumber);
if (pcipriv->ndis_adapter.busnumber ==
@@ -350,8 +350,8 @@ static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "find_buddy_priv %d\n", find_buddy_priv);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "find_buddy_priv %d\n", find_buddy_priv);
if (find_buddy_priv)
*buddy_priv = tpriv;
@@ -388,8 +388,8 @@ static void rtl_pci_parse_configuration(struct pci_dev *pdev,
pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &linkctrl_reg);
pcipriv->ndis_adapter.linkctrl_reg = (u8)linkctrl_reg;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Link Control Register =%x\n",
- pcipriv->ndis_adapter.linkctrl_reg);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Link Control Register =%x\n",
+ pcipriv->ndis_adapter.linkctrl_reg);
pci_read_config_byte(pdev, 0x98, &tmp);
tmp |= BIT(4);
@@ -547,21 +547,20 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
ring->idx = (ring->idx + 1) % ring->entries;
skb = __skb_dequeue(&ring->queue);
- pci_unmap_single(rtlpci->pdev,
- rtlpriv->cfg->ops->
- get_desc(hw, (u8 *)entry, true,
- HW_DESC_TXBUFF_ADDR),
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&rtlpci->pdev->dev,
+ rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry,
+ true, HW_DESC_TXBUFF_ADDR),
+ skb->len, DMA_TO_DEVICE);
/* remove early mode header */
if (rtlpriv->rtlhal.earlymode_enable)
skb_pull(skb, EM_HDR_LEN);
- RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_TRACE,
- "new ring->idx:%d, free: skb_queue_len:%d, free: seq:%x\n",
- ring->idx,
- skb_queue_len(&ring->queue),
- *(u16 *)(skb->data + 22));
+ rtl_dbg(rtlpriv, (COMP_INTR | COMP_SEND), DBG_TRACE,
+ "new ring->idx:%d, free: skb_queue_len:%d, free: seq:%x\n",
+ ring->idx,
+ skb_queue_len(&ring->queue),
+ *(u16 *)(skb->data + 22));
if (prio == TXCMD_QUEUE) {
dev_kfree_skb(skb);
@@ -608,10 +607,10 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
}
if ((ring->entries - skb_queue_len(&ring->queue)) <= 4) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "more desc left, wake skb_queue@%d, ring->idx = %d, skb_queue_len = 0x%x\n",
- prio, ring->idx,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_DMESG,
+ "more desc left, wake skb_queue@%d, ring->idx = %d, skb_queue_len = 0x%x\n",
+ prio, ring->idx,
+ skb_queue_len(&ring->queue));
ieee80211_wake_queue(hw, skb_get_queue_mapping(skb));
}
@@ -622,7 +621,7 @@ tx_status_ok:
if (((rtlpriv->link_info.num_rx_inperiod +
rtlpriv->link_info.num_tx_inperiod) > 8) ||
rtlpriv->link_info.num_rx_inperiod > 2)
- rtl_lps_leave(hw);
+ rtl_lps_leave(hw, false);
}
static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
@@ -646,10 +645,10 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
remap:
/* just set skb->cb to mapping addr for pci_unmap_single use */
*((dma_addr_t *)skb->cb) =
- pci_map_single(rtlpci->pdev, skb_tail_pointer(skb),
- rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE);
+ dma_map_single(&rtlpci->pdev->dev, skb_tail_pointer(skb),
+ rtlpci->rxbuffersize, DMA_FROM_DEVICE);
bufferaddress = *((dma_addr_t *)skb->cb);
- if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress))
+ if (dma_mapping_error(&rtlpci->pdev->dev, bufferaddress))
return 0;
rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
if (rtlpriv->use_new_trx_flow) {
@@ -773,8 +772,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
* AAAAAAttention !!!
* We can NOT access 'skb' before 'pci_unmap_single'
*/
- pci_unmap_single(rtlpci->pdev, *((dma_addr_t *)skb->cb),
- rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&rtlpci->pdev->dev, *((dma_addr_t *)skb->cb),
+ rtlpci->rxbuffersize, DMA_FROM_DEVICE);
/* get a new skb - if fail, old one will be reused */
new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
@@ -801,9 +800,9 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
skb_reserve(skb, stats.rx_drvinfo_size +
stats.rx_bufshift);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "skb->end - skb->tail = %d, len is %d\n",
- skb->end - skb->tail, len);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "skb->end - skb->tail = %d, len is %d\n",
+ skb->end - skb->tail, len);
dev_kfree_skb_any(skb);
goto new_trx_end;
}
@@ -875,7 +874,7 @@ new_trx_end:
if (((rtlpriv->link_info.num_rx_inperiod +
rtlpriv->link_info.num_tx_inperiod) > 8) ||
rtlpriv->link_info.num_rx_inperiod > 2)
- rtl_lps_leave(hw);
+ rtl_lps_leave(hw, false);
skb = new_skb;
no_new:
if (rtlpriv->use_new_trx_flow) {
@@ -925,67 +924,67 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
/*<1> beacon related */
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK])
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "beacon ok interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "beacon ok interrupt!\n");
if (unlikely(intvec.inta & rtlpriv->cfg->maps[RTL_IMR_TBDER]))
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "beacon err interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "beacon err interrupt!\n");
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BDOK])
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "beacon interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE, "beacon interrupt!\n");
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BCNINT]) {
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "prepare beacon for interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "prepare beacon for interrupt!\n");
tasklet_schedule(&rtlpriv->works.irq_prepare_bcn_tasklet);
}
/*<2> Tx related */
if (unlikely(intvec.intb & rtlpriv->cfg->maps[RTL_IMR_TXFOVW]))
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "IMR_TXFOVW!\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING, "IMR_TXFOVW!\n");
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_MGNTDOK]) {
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "Manage ok interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "Manage ok interrupt!\n");
_rtl_pci_tx_isr(hw, MGNT_QUEUE);
}
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_HIGHDOK]) {
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "HIGH_QUEUE ok interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "HIGH_QUEUE ok interrupt!\n");
_rtl_pci_tx_isr(hw, HIGH_QUEUE);
}
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BKDOK]) {
rtlpriv->link_info.num_tx_inperiod++;
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "BK Tx OK interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "BK Tx OK interrupt!\n");
_rtl_pci_tx_isr(hw, BK_QUEUE);
}
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_BEDOK]) {
rtlpriv->link_info.num_tx_inperiod++;
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "BE TX OK interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "BE TX OK interrupt!\n");
_rtl_pci_tx_isr(hw, BE_QUEUE);
}
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_VIDOK]) {
rtlpriv->link_info.num_tx_inperiod++;
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "VI TX OK interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "VI TX OK interrupt!\n");
_rtl_pci_tx_isr(hw, VI_QUEUE);
}
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_VODOK]) {
rtlpriv->link_info.num_tx_inperiod++;
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "Vo TX OK interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "Vo TX OK interrupt!\n");
_rtl_pci_tx_isr(hw, VO_QUEUE);
}
@@ -993,8 +992,8 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
if (intvec.intd & rtlpriv->cfg->maps[RTL_IMR_H2CDOK]) {
rtlpriv->link_info.num_tx_inperiod++;
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "H2C TX OK interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "H2C TX OK interrupt!\n");
_rtl_pci_tx_isr(hw, H2C_QUEUE);
}
}
@@ -1003,34 +1002,34 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_COMDOK]) {
rtlpriv->link_info.num_tx_inperiod++;
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "CMD TX OK interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "CMD TX OK interrupt!\n");
_rtl_pci_tx_isr(hw, TXCMD_QUEUE);
}
}
/*<3> Rx related */
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) {
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "Rx ok interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE, "Rx ok interrupt!\n");
_rtl_pci_rx_interrupt(hw);
}
if (unlikely(intvec.inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "rx descriptor unavailable!\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "rx descriptor unavailable!\n");
_rtl_pci_rx_interrupt(hw);
}
if (unlikely(intvec.intb & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "rx overflow !\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING, "rx overflow !\n");
_rtl_pci_rx_interrupt(hw);
}
/*<4> fw related*/
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8723AE) {
if (intvec.inta & rtlpriv->cfg->maps[RTL_IMR_C2HCMD]) {
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "firmware interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "firmware interrupt!\n");
queue_delayed_work(rtlpriv->works.rtl_wq,
&rtlpriv->works.fwevt_wq, 0);
}
@@ -1046,8 +1045,8 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
rtlhal->hw_type == HARDWARE_TYPE_RTL8723BE) {
if (unlikely(intvec.inta &
rtlpriv->cfg->maps[RTL_IMR_HSISR_IND])) {
- RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
- "hsisr interrupt!\n");
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_TRACE,
+ "hsisr interrupt!\n");
_rtl_pci_hs_interrupt(hw);
}
}
@@ -1061,16 +1060,18 @@ done:
return ret;
}
-static void _rtl_pci_irq_tasklet(unsigned long data)
+static void _rtl_pci_irq_tasklet(struct tasklet_struct *t)
{
- struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
+ struct rtl_priv *rtlpriv = from_tasklet(rtlpriv, t, works.irq_tasklet);
+ struct ieee80211_hw *hw = rtlpriv->hw;
_rtl_pci_tx_chk_waitq(hw);
}
-static void _rtl_pci_prepare_bcn_tasklet(unsigned long data)
+static void _rtl_pci_prepare_bcn_tasklet(struct tasklet_struct *t)
{
- struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_priv *rtlpriv = from_tasklet(rtlpriv, t,
+ works.irq_prepare_bcn_tasklet);
+ struct ieee80211_hw *hw = rtlpriv->hw;
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl8192_tx_ring *ring = NULL;
@@ -1092,10 +1093,10 @@ static void _rtl_pci_prepare_bcn_tasklet(unsigned long data)
else
entry = (u8 *)(&ring->desc[ring->idx]);
if (pskb) {
- pci_unmap_single(rtlpci->pdev,
- rtlpriv->cfg->ops->get_desc(
- hw, (u8 *)entry, true, HW_DESC_TXBUFF_ADDR),
- pskb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&rtlpci->pdev->dev,
+ rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry,
+ true, HW_DESC_TXBUFF_ADDR),
+ pskb->len, DMA_TO_DEVICE);
kfree_skb(pskb);
}
@@ -1194,12 +1195,9 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
rtlpci->acm_method = EACMWAY2_SW;
/*task */
- tasklet_init(&rtlpriv->works.irq_tasklet,
- _rtl_pci_irq_tasklet,
- (unsigned long)hw);
- tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet,
- _rtl_pci_prepare_bcn_tasklet,
- (unsigned long)hw);
+ tasklet_setup(&rtlpriv->works.irq_tasklet, _rtl_pci_irq_tasklet);
+ tasklet_setup(&rtlpriv->works.irq_prepare_bcn_tasklet,
+ _rtl_pci_prepare_bcn_tasklet);
INIT_WORK(&rtlpriv->works.lps_change_work,
rtl_lps_change_work_callback);
}
@@ -1218,9 +1216,9 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
/* alloc tx buffer desc for new trx flow*/
if (rtlpriv->use_new_trx_flow) {
buffer_desc =
- pci_zalloc_consistent(rtlpci->pdev,
- sizeof(*buffer_desc) * entries,
- &buffer_desc_dma);
+ dma_alloc_coherent(&rtlpci->pdev->dev,
+ sizeof(*buffer_desc) * entries,
+ &buffer_desc_dma, GFP_KERNEL);
if (!buffer_desc || (unsigned long)buffer_desc & 0xFF) {
pr_err("Cannot allocate TX ring (prio = %d)\n",
@@ -1236,8 +1234,8 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
}
/* alloc dma for this ring */
- desc = pci_zalloc_consistent(rtlpci->pdev,
- sizeof(*desc) * entries, &desc_dma);
+ desc = dma_alloc_coherent(&rtlpci->pdev->dev, sizeof(*desc) * entries,
+ &desc_dma, GFP_KERNEL);
if (!desc || (unsigned long)desc & 0xFF) {
pr_err("Cannot allocate TX ring (prio = %d)\n", prio);
@@ -1251,8 +1249,8 @@ static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
rtlpci->tx_ring[prio].entries = entries;
skb_queue_head_init(&rtlpci->tx_ring[prio].queue);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "queue:%d, ring_addr:%p\n",
- prio, desc);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "queue:%d, ring_addr:%p\n",
+ prio, desc);
/* init every desc in this ring */
if (!rtlpriv->use_new_trx_flow) {
@@ -1280,11 +1278,10 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
struct rtl_rx_buffer_desc *entry = NULL;
/* alloc dma for this ring */
rtlpci->rx_ring[rxring_idx].buffer_desc =
- pci_zalloc_consistent(rtlpci->pdev,
- sizeof(*rtlpci->rx_ring[rxring_idx].
- buffer_desc) *
- rtlpci->rxringcount,
- &rtlpci->rx_ring[rxring_idx].dma);
+ dma_alloc_coherent(&rtlpci->pdev->dev,
+ sizeof(*rtlpci->rx_ring[rxring_idx].buffer_desc) *
+ rtlpci->rxringcount,
+ &rtlpci->rx_ring[rxring_idx].dma, GFP_KERNEL);
if (!rtlpci->rx_ring[rxring_idx].buffer_desc ||
(ulong)rtlpci->rx_ring[rxring_idx].buffer_desc & 0xFF) {
pr_err("Cannot allocate RX ring\n");
@@ -1304,10 +1301,10 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
u8 tmp_one = 1;
/* alloc dma for this ring */
rtlpci->rx_ring[rxring_idx].desc =
- pci_zalloc_consistent(rtlpci->pdev,
- sizeof(*rtlpci->rx_ring[rxring_idx].
- desc) * rtlpci->rxringcount,
- &rtlpci->rx_ring[rxring_idx].dma);
+ dma_alloc_coherent(&rtlpci->pdev->dev,
+ sizeof(*rtlpci->rx_ring[rxring_idx].desc) *
+ rtlpci->rxringcount,
+ &rtlpci->rx_ring[rxring_idx].dma, GFP_KERNEL);
if (!rtlpci->rx_ring[rxring_idx].desc ||
(unsigned long)rtlpci->rx_ring[rxring_idx].desc & 0xFF) {
pr_err("Cannot allocate RX ring\n");
@@ -1347,24 +1344,23 @@ static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw,
else
entry = (u8 *)(&ring->desc[ring->idx]);
- pci_unmap_single(rtlpci->pdev,
+ dma_unmap_single(&rtlpci->pdev->dev,
rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry,
- true,
- HW_DESC_TXBUFF_ADDR),
- skb->len, PCI_DMA_TODEVICE);
+ true, HW_DESC_TXBUFF_ADDR),
+ skb->len, DMA_TO_DEVICE);
kfree_skb(skb);
ring->idx = (ring->idx + 1) % ring->entries;
}
/* free dma of this ring */
- pci_free_consistent(rtlpci->pdev,
- sizeof(*ring->desc) * ring->entries,
- ring->desc, ring->dma);
+ dma_free_coherent(&rtlpci->pdev->dev,
+ sizeof(*ring->desc) * ring->entries, ring->desc,
+ ring->dma);
ring->desc = NULL;
if (rtlpriv->use_new_trx_flow) {
- pci_free_consistent(rtlpci->pdev,
- sizeof(*ring->buffer_desc) * ring->entries,
- ring->buffer_desc, ring->buffer_desc_dma);
+ dma_free_coherent(&rtlpci->pdev->dev,
+ sizeof(*ring->buffer_desc) * ring->entries,
+ ring->buffer_desc, ring->buffer_desc_dma);
ring->buffer_desc = NULL;
}
}
@@ -1381,25 +1377,25 @@ static void _rtl_pci_free_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
if (!skb)
continue;
- pci_unmap_single(rtlpci->pdev, *((dma_addr_t *)skb->cb),
- rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&rtlpci->pdev->dev, *((dma_addr_t *)skb->cb),
+ rtlpci->rxbuffersize, DMA_FROM_DEVICE);
kfree_skb(skb);
}
/* free dma of this ring */
if (rtlpriv->use_new_trx_flow) {
- pci_free_consistent(rtlpci->pdev,
- sizeof(*rtlpci->rx_ring[rxring_idx].
- buffer_desc) * rtlpci->rxringcount,
- rtlpci->rx_ring[rxring_idx].buffer_desc,
- rtlpci->rx_ring[rxring_idx].dma);
+ dma_free_coherent(&rtlpci->pdev->dev,
+ sizeof(*rtlpci->rx_ring[rxring_idx].buffer_desc) *
+ rtlpci->rxringcount,
+ rtlpci->rx_ring[rxring_idx].buffer_desc,
+ rtlpci->rx_ring[rxring_idx].dma);
rtlpci->rx_ring[rxring_idx].buffer_desc = NULL;
} else {
- pci_free_consistent(rtlpci->pdev,
- sizeof(*rtlpci->rx_ring[rxring_idx].desc) *
- rtlpci->rxringcount,
- rtlpci->rx_ring[rxring_idx].desc,
- rtlpci->rx_ring[rxring_idx].dma);
+ dma_free_coherent(&rtlpci->pdev->dev,
+ sizeof(*rtlpci->rx_ring[rxring_idx].desc) *
+ rtlpci->rxringcount,
+ rtlpci->rx_ring[rxring_idx].desc,
+ rtlpci->rx_ring[rxring_idx].dma);
rtlpci->rx_ring[rxring_idx].desc = NULL;
}
}
@@ -1527,13 +1523,10 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
else
entry = (u8 *)(&ring->desc[ring->idx]);
- pci_unmap_single(rtlpci->pdev,
- rtlpriv->cfg->ops->
- get_desc(hw, (u8 *)
- entry,
- true,
- HW_DESC_TXBUFF_ADDR),
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&rtlpci->pdev->dev,
+ rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry,
+ true, HW_DESC_TXBUFF_ADDR),
+ skb->len, DMA_TO_DEVICE);
dev_kfree_skb_irq(skb);
ring->idx = (ring->idx + 1) % ring->entries;
}
@@ -1649,10 +1642,10 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
true, HW_DESC_OWN);
if (own == 1 && hw_queue != BEACON_QUEUE) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "No more TX desc@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%x\n",
- hw_queue, ring->idx, idx,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "No more TX desc@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%x\n",
+ hw_queue, ring->idx, idx,
+ skb_queue_len(&ring->queue));
spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock,
flags);
@@ -1662,8 +1655,8 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
if (rtlpriv->cfg->ops->get_available_desc &&
rtlpriv->cfg->ops->get_available_desc(hw, hw_queue) == 0) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "get_available_desc fail\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "get_available_desc fail\n");
spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
return skb->len;
}
@@ -1686,8 +1679,8 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
if ((ring->entries - skb_queue_len(&ring->queue)) < 2 &&
hw_queue != BEACON_QUEUE) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "less desc left, stop skb_queue@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%x\n",
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "less desc left, stop skb_queue@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%x\n",
hw_queue, ring->idx, idx,
skb_queue_len(&ring->queue));
@@ -1794,8 +1787,8 @@ static int rtl_pci_start(struct ieee80211_hw *hw)
err = rtlpriv->cfg->ops->hw_init(hw);
if (err) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Failed to config hardware!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Failed to config hardware!\n");
kfree(rtlpriv->btcoexist.btc_context);
kfree(rtlpriv->btcoexist.wifi_only_context);
return err;
@@ -1804,7 +1797,7 @@ static int rtl_pci_start(struct ieee80211_hw *hw)
&rtlmac->retry_long);
rtlpriv->cfg->ops->enable_interrupt(hw);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "enable_interrupt OK\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "enable_interrupt OK\n");
rtl_init_rx_config(hw);
@@ -1815,7 +1808,7 @@ static int rtl_pci_start(struct ieee80211_hw *hw)
rtlpci->up_first_time = false;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "%s OK\n", __func__);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "%s OK\n", __func__);
return 0;
}
@@ -1909,71 +1902,71 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
deviceid == RTL_PCI_8171_DID) {
switch (revisionid) {
case RTL_PCI_REVISION_ID_8192PCIE:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "8192 PCI-E is found - vid/did=%x/%x\n",
- venderid, deviceid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "8192 PCI-E is found - vid/did=%x/%x\n",
+ venderid, deviceid);
rtlhal->hw_type = HARDWARE_TYPE_RTL8192E;
return false;
case RTL_PCI_REVISION_ID_8192SE:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "8192SE is found - vid/did=%x/%x\n",
- venderid, deviceid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "8192SE is found - vid/did=%x/%x\n",
+ venderid, deviceid);
rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Err: Unknown device - vid/did=%x/%x\n",
- venderid, deviceid);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Err: Unknown device - vid/did=%x/%x\n",
+ venderid, deviceid);
rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
break;
}
} else if (deviceid == RTL_PCI_8723AE_DID) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8723AE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "8723AE PCI-E is found - vid/did=%x/%x\n",
- venderid, deviceid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "8723AE PCI-E is found - vid/did=%x/%x\n",
+ venderid, deviceid);
} else if (deviceid == RTL_PCI_8192CET_DID ||
deviceid == RTL_PCI_8192CE_DID ||
deviceid == RTL_PCI_8191CE_DID ||
deviceid == RTL_PCI_8188CE_DID) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8192CE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "8192C PCI-E is found - vid/did=%x/%x\n",
- venderid, deviceid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "8192C PCI-E is found - vid/did=%x/%x\n",
+ venderid, deviceid);
} else if (deviceid == RTL_PCI_8192DE_DID ||
deviceid == RTL_PCI_8192DE_DID2) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8192DE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "8192D PCI-E is found - vid/did=%x/%x\n",
- venderid, deviceid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "8192D PCI-E is found - vid/did=%x/%x\n",
+ venderid, deviceid);
} else if (deviceid == RTL_PCI_8188EE_DID) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8188EE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find adapter, Hardware type is 8188EE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find adapter, Hardware type is 8188EE\n");
} else if (deviceid == RTL_PCI_8723BE_DID) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8723BE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find adapter, Hardware type is 8723BE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find adapter, Hardware type is 8723BE\n");
} else if (deviceid == RTL_PCI_8192EE_DID) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8192EE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find adapter, Hardware type is 8192EE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find adapter, Hardware type is 8192EE\n");
} else if (deviceid == RTL_PCI_8821AE_DID) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8821AE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find adapter, Hardware type is 8821AE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find adapter, Hardware type is 8821AE\n");
} else if (deviceid == RTL_PCI_8812AE_DID) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8812AE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find adapter, Hardware type is 8812AE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find adapter, Hardware type is 8812AE\n");
} else if (deviceid == RTL_PCI_8822BE_DID) {
rtlhal->hw_type = HARDWARE_TYPE_RTL8822BE;
rtlhal->bandset = BAND_ON_BOTH;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find adapter, Hardware type is 8822BE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find adapter, Hardware type is 8822BE\n");
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Err: Unknown device - vid/did=%x/%x\n",
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Err: Unknown device - vid/did=%x/%x\n",
venderid, deviceid);
rtlhal->hw_type = RTL_DEFAULT_HARDWARE_TYPE;
@@ -1982,17 +1975,17 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE) {
if (revisionid == 0 || revisionid == 1) {
if (revisionid == 0) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find 92DE MAC0\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find 92DE MAC0\n");
rtlhal->interfaceindex = 0;
} else if (revisionid == 1) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Find 92DE MAC1\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Find 92DE MAC1\n");
rtlhal->interfaceindex = 1;
}
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Unknown device - VendorID/DeviceID=%x/%x, Revision=%x\n",
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Unknown device - VendorID/DeviceID=%x/%x, Revision=%x\n",
venderid, deviceid, revisionid);
rtlhal->interfaceindex = 0;
}
@@ -2026,9 +2019,9 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
for (tmp = 0; tmp < PCI_BRIDGE_VENDOR_MAX; tmp++) {
if (bridge_pdev->vendor == pcibridge_vendors[tmp]) {
pcipriv->ndis_adapter.pcibridge_vendor = tmp;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Pci Bridge Vendor is found index: %d\n",
- tmp);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Pci Bridge Vendor is found index: %d\n",
+ tmp);
break;
}
}
@@ -2056,22 +2049,22 @@ static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "pcidev busnumber:devnumber:funcnumber:vendor:link_ctl %d:%d:%d:%x:%x\n",
- pcipriv->ndis_adapter.busnumber,
- pcipriv->ndis_adapter.devnumber,
- pcipriv->ndis_adapter.funcnumber,
- pdev->vendor, pcipriv->ndis_adapter.linkctrl_reg);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "pcidev busnumber:devnumber:funcnumber:vendor:link_ctl %d:%d:%d:%x:%x\n",
+ pcipriv->ndis_adapter.busnumber,
+ pcipriv->ndis_adapter.devnumber,
+ pcipriv->ndis_adapter.funcnumber,
+ pdev->vendor, pcipriv->ndis_adapter.linkctrl_reg);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "pci_bridge busnumber:devnumber:funcnumber:vendor:pcie_cap:link_ctl_reg:amd %d:%d:%d:%x:%x:%x:%x\n",
- pcipriv->ndis_adapter.pcibridge_busnum,
- pcipriv->ndis_adapter.pcibridge_devnum,
- pcipriv->ndis_adapter.pcibridge_funcnum,
- pcibridge_vendors[pcipriv->ndis_adapter.pcibridge_vendor],
- pcipriv->ndis_adapter.pcibridge_pciehdr_offset,
- pcipriv->ndis_adapter.pcibridge_linkctrlreg,
- pcipriv->ndis_adapter.amd_l1_patch);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "pci_bridge busnumber:devnumber:funcnumber:vendor:pcie_cap:link_ctl_reg:amd %d:%d:%d:%x:%x:%x:%x\n",
+ pcipriv->ndis_adapter.pcibridge_busnum,
+ pcipriv->ndis_adapter.pcibridge_devnum,
+ pcipriv->ndis_adapter.pcibridge_funcnum,
+ pcibridge_vendors[pcipriv->ndis_adapter.pcibridge_vendor],
+ pcipriv->ndis_adapter.pcibridge_pciehdr_offset,
+ pcipriv->ndis_adapter.pcibridge_linkctrlreg,
+ pcipriv->ndis_adapter.amd_l1_patch);
rtl_pci_parse_configuration(pdev, hw);
list_add_tail(&rtlpriv->list, &rtlpriv->glb_var->glb_priv_list);
@@ -2099,8 +2092,8 @@ static int rtl_pci_intr_mode_msi(struct ieee80211_hw *hw)
rtlpci->using_msi = true;
- RT_TRACE(rtlpriv, COMP_INIT | COMP_INTR, DBG_DMESG,
- "MSI Interrupt Mode!\n");
+ rtl_dbg(rtlpriv, COMP_INIT | COMP_INTR, DBG_DMESG,
+ "MSI Interrupt Mode!\n");
return 0;
}
@@ -2117,8 +2110,8 @@ static int rtl_pci_intr_mode_legacy(struct ieee80211_hw *hw)
return ret;
rtlpci->using_msi = false;
- RT_TRACE(rtlpriv, COMP_INIT | COMP_INTR, DBG_DMESG,
- "Pin-based Interrupt Mode!\n");
+ rtl_dbg(rtlpriv, COMP_INIT | COMP_INTR, DBG_DMESG,
+ "Pin-based Interrupt Mode!\n");
return 0;
}
@@ -2172,8 +2165,8 @@ int rtl_pci_probe(struct pci_dev *pdev,
}
if (((struct rtl_hal_cfg *)id->driver_data)->mod_params->dma64 &&
- !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
WARN_ONCE(true,
"Unable to obtain 64bit DMA for consistent allocations\n");
err = -ENOMEM;
@@ -2181,8 +2174,8 @@ int rtl_pci_probe(struct pci_dev *pdev,
}
platform_enable_dma64(pdev, true);
- } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
+ if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) {
WARN_ONCE(true,
"rtlwifi: Unable to obtain 32bit DMA for consistent allocations\n");
err = -ENOMEM;
@@ -2245,10 +2238,10 @@ int rtl_pci_probe(struct pci_dev *pdev,
goto fail2;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "mem mapped space: start: 0x%08lx len:%08lx flags:%08lx, after map:0x%08lx\n",
- pmem_start, pmem_len, pmem_flags,
- rtlpriv->io.pci_mem_start);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "mem mapped space: start: 0x%08lx len:%08lx flags:%08lx, after map:0x%08lx\n",
+ pmem_start, pmem_len, pmem_flags,
+ rtlpriv->io.pci_mem_start);
/* Disable Clk Request */
pci_write_config_byte(pdev, 0x81, 0);
@@ -2310,9 +2303,9 @@ int rtl_pci_probe(struct pci_dev *pdev,
rtlpci = rtl_pcidev(pcipriv);
err = rtl_pci_intr_mode_decide(hw);
if (err) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "%s: failed to register IRQ handler\n",
- wiphy_name(hw->wiphy));
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "%s: failed to register IRQ handler\n",
+ wiphy_name(hw->wiphy));
goto fail3;
}
rtlpci->irq_alloc = 1;
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
index 90f92728e16a..f99882255d48 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
@@ -19,8 +19,8 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
rtlpriv->intf_ops->reset_trx_ring(hw);
if (is_hal_stop(rtlhal))
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Driver is already down!\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Driver is already down!\n");
/*<2> Enable Adapter */
if (rtlpriv->cfg->ops->hw_init(hw))
@@ -80,9 +80,9 @@ static bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
if (ppsc->rfchange_inprogress) {
spin_unlock(&rtlpriv->locks.rf_ps_lock);
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "RF Change in progress! Wait to set..state_toset(%d).\n",
- state_toset);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "RF Change in progress! Wait to set..state_toset(%d).\n",
+ state_toset);
/* Set RF after the previous action is done. */
while (ppsc->rfchange_inprogress) {
@@ -179,10 +179,10 @@ static void _rtl_ps_inactive_ps(struct ieee80211_hw *hw)
ppsc->swrf_processing = false;
}
-void rtl_ips_nic_off_wq_callback(void *data)
+void rtl_ips_nic_off_wq_callback(struct work_struct *work)
{
- struct rtl_works *rtlworks =
- container_of_dwork_rtl(data, struct rtl_works, ips_nic_off_wq);
+ struct rtl_works *rtlworks = container_of(work, struct rtl_works,
+ ips_nic_off_wq.work);
struct ieee80211_hw *hw = rtlworks->hw;
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
@@ -191,8 +191,8 @@ void rtl_ips_nic_off_wq_callback(void *data)
enum rf_pwrstate rtstate;
if (mac->opmode != NL80211_IFTYPE_STATION) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "not station return\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "not station return\n");
return;
}
@@ -228,8 +228,8 @@ void rtl_ips_nic_off_wq_callback(void *data)
!ppsc->swrf_processing &&
(mac->link_state == MAC80211_NOLINK) &&
!mac->act_scanning) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "IPSEnter(): Turn off RF\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "IPSEnter(): Turn off RF\n");
ppsc->inactive_pwrstate = ERFOFF;
ppsc->in_powersavemode = true;
@@ -307,8 +307,8 @@ static bool rtl_get_fwlps_doze(struct ieee80211_hw *hw)
ppsc->last_delaylps_stamp_jiffies);
if (ps_timediff < 2000) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Delay enter Fw LPS for DHCP, ARP, or EAPOL exchanging state\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Delay enter Fw LPS for DHCP, ARP, or EAPOL exchanging state\n");
return false;
}
@@ -353,9 +353,9 @@ void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
if ((ppsc->fwctrl_lps) && ppsc->report_linked) {
if (ppsc->dot11_psmode == EACTIVE) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "FW LPS leave ps_mode:%x\n",
- FW_PS_ACTIVE_MODE);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "FW LPS leave ps_mode:%x\n",
+ FW_PS_ACTIVE_MODE);
enter_fwlps = false;
ppsc->pwr_mode = FW_PS_ACTIVE_MODE;
ppsc->smart_ps = 0;
@@ -368,9 +368,9 @@ void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
rtlpriv->btcoexist.btc_ops->btc_lps_notify(rtlpriv, rt_psmode);
} else {
if (rtl_get_fwlps_doze(hw)) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "FW LPS enter ps_mode:%x\n",
- ppsc->fwctrl_psmode);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "FW LPS enter ps_mode:%x\n",
+ ppsc->fwctrl_psmode);
if (rtlpriv->cfg->ops->get_btc_status())
rtlpriv->btcoexist.btc_ops->btc_lps_notify(rtlpriv, rt_psmode);
enter_fwlps = true;
@@ -420,8 +420,8 @@ static void rtl_lps_enter_core(struct ieee80211_hw *hw)
* bt_ccoexist may ask to enter lps.
* In normal case, this constraint move to rtl_lps_set_psmode().
*/
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Enter 802.11 power save mode...\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Enter 802.11 power save mode...\n");
rtl_lps_set_psmode(hw, EAUTOPS);
mutex_unlock(&rtlpriv->locks.lps_mutex);
@@ -449,8 +449,8 @@ static void rtl_lps_leave_core(struct ieee80211_hw *hw)
RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
}
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Busy Traffic,Leave 802.11 power save..\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Busy Traffic,Leave 802.11 power save..\n");
rtl_lps_set_psmode(hw, EACTIVE);
}
@@ -534,8 +534,8 @@ void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len)
queue_delayed_work(rtlpriv->works.rtl_wq,
&rtlpriv->works.ps_work, MSECS(5));
} else {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "u_bufferd: %x, m_buffered: %x\n", u_buffed, m_buffed);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "u_bufferd: %x, m_buffered: %x\n", u_buffed, m_buffed);
}
}
EXPORT_SYMBOL_GPL(rtl_swlps_beacon);
@@ -562,10 +562,10 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw)
mutex_unlock(&rtlpriv->locks.lps_mutex);
}
-void rtl_swlps_rfon_wq_callback(void *data)
+void rtl_swlps_rfon_wq_callback(struct work_struct *work)
{
- struct rtl_works *rtlworks =
- container_of_dwork_rtl(data, struct rtl_works, ps_rfon_wq);
+ struct rtl_works *rtlworks = container_of(work, struct rtl_works,
+ ps_rfon_wq.work);
struct ieee80211_hw *hw = rtlworks->hw;
rtl_swlps_rf_awake(hw);
@@ -630,9 +630,9 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
/* this print should always be dtim_conter = 0 &
* sleep = dtim_period, that meaons, we should
* awake before every dtim */
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "dtim_counter:%x will sleep :%d beacon_intv\n",
- rtlpriv->psc.dtim_counter, sleep_intv);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "dtim_counter:%x will sleep :%d beacon_intv\n",
+ rtlpriv->psc.dtim_counter, sleep_intv);
/* we tested that 40ms is enough for sw & hw sw delay */
queue_delayed_work(rtlpriv->works.rtl_wq, &rtlpriv->works.ps_rfon_wq,
@@ -653,33 +653,32 @@ void rtl_lps_change_work_callback(struct work_struct *work)
}
EXPORT_SYMBOL_GPL(rtl_lps_change_work_callback);
-void rtl_lps_enter(struct ieee80211_hw *hw)
+void rtl_lps_enter(struct ieee80211_hw *hw, bool may_block)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- if (!in_interrupt())
+ if (may_block)
return rtl_lps_enter_core(hw);
rtlpriv->enter_ps = true;
schedule_work(&rtlpriv->works.lps_change_work);
}
EXPORT_SYMBOL_GPL(rtl_lps_enter);
-void rtl_lps_leave(struct ieee80211_hw *hw)
+void rtl_lps_leave(struct ieee80211_hw *hw, bool may_block)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- if (!in_interrupt())
+ if (may_block)
return rtl_lps_leave_core(hw);
rtlpriv->enter_ps = false;
schedule_work(&rtlpriv->works.lps_change_work);
}
EXPORT_SYMBOL_GPL(rtl_lps_leave);
-void rtl_swlps_wq_callback(void *data)
+void rtl_swlps_wq_callback(struct work_struct *work)
{
- struct rtl_works *rtlworks = container_of_dwork_rtl(data,
- struct rtl_works,
- ps_work);
+ struct rtl_works *rtlworks = container_of(work, struct rtl_works,
+ ps_work.work);
struct ieee80211_hw *hw = rtlworks->hw;
struct rtl_priv *rtlpriv = rtl_priv(hw);
bool ps = false;
@@ -744,9 +743,9 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
if (ie[0] == 12) {
find_p2p_ps_ie = true;
if ((noa_len - 2) % 13 != 0) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "P2P notice of absence: invalid length.%d\n",
- noa_len);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "P2P notice of absence: invalid length.%d\n",
+ noa_len);
return;
} else {
noa_num = (noa_len - 2) / 13;
@@ -757,8 +756,8 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
noa_index = ie[3];
if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode ==
P2P_PS_NONE || noa_index != p2pinfo->noa_index) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
- "update NOA ie.\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD,
+ "update NOA ie.\n");
p2pinfo->noa_index = noa_index;
p2pinfo->opp_ps = (ie[4] >> 7);
p2pinfo->ctwindow = ie[4] & 0x7F;
@@ -829,7 +828,7 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
if (ie == NULL)
return;
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "action frame find P2P IE.\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "action frame find P2P IE.\n");
/*to find noa ie*/
while (ie + 1 < end) {
noa_len = le16_to_cpu(*(__le16 *)&ie[1]);
@@ -837,13 +836,13 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
return;
if (ie[0] == 12) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "find NOA IE.\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "find NOA IE.\n");
RT_PRINT_DATA(rtlpriv, COMP_FW, DBG_LOUD, "noa ie ",
ie, noa_len);
if ((noa_len - 2) % 13 != 0) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
- "P2P notice of absence: invalid length.%d\n",
- noa_len);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD,
+ "P2P notice of absence: invalid length.%d\n",
+ noa_len);
return;
} else {
noa_num = (noa_len - 2) / 13;
@@ -901,7 +900,7 @@ void rtl_p2p_ps_cmd(struct ieee80211_hw *hw , u8 p2p_ps_state)
struct rtl_ps_ctl *rtlps = rtl_psc(rtl_priv(hw));
struct rtl_p2p_ps_info *p2pinfo = &(rtlpriv->psc.p2p_ps_info);
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, " p2p state %x\n" , p2p_ps_state);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, " p2p state %x\n", p2p_ps_state);
switch (p2p_ps_state) {
case P2P_PS_DISABLE:
p2pinfo->p2p_ps_state = p2p_ps_state;
@@ -953,18 +952,18 @@ void rtl_p2p_ps_cmd(struct ieee80211_hw *hw , u8 p2p_ps_state)
default:
break;
}
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
- "ctwindow %x oppps %x\n",
- p2pinfo->ctwindow , p2pinfo->opp_ps);
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
- "count %x duration %x index %x interval %x start time %x noa num %x\n",
- p2pinfo->noa_count_type[0],
- p2pinfo->noa_duration[0],
- p2pinfo->noa_index,
- p2pinfo->noa_interval[0],
- p2pinfo->noa_start_time[0],
- p2pinfo->noa_num);
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "end\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD,
+ "ctwindow %x oppps %x\n",
+ p2pinfo->ctwindow, p2pinfo->opp_ps);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD,
+ "count %x duration %x index %x interval %x start time %x noa num %x\n",
+ p2pinfo->noa_count_type[0],
+ p2pinfo->noa_duration[0],
+ p2pinfo->noa_index,
+ p2pinfo->noa_interval[0],
+ p2pinfo->noa_start_time[0],
+ p2pinfo->noa_num);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "end\n");
}
void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len)
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.h b/drivers/net/wireless/realtek/rtlwifi/ps.h
index aaa2ed2bbe16..b37a929def82 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.h
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.h
@@ -10,15 +10,15 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw);
bool rtl_ps_disable_nic(struct ieee80211_hw *hw);
void rtl_ips_nic_off(struct ieee80211_hw *hw);
void rtl_ips_nic_on(struct ieee80211_hw *hw);
-void rtl_ips_nic_off_wq_callback(void *data);
-void rtl_lps_enter(struct ieee80211_hw *hw);
-void rtl_lps_leave(struct ieee80211_hw *hw);
+void rtl_ips_nic_off_wq_callback(struct work_struct *work);
+void rtl_lps_enter(struct ieee80211_hw *hw, bool may_block);
+void rtl_lps_leave(struct ieee80211_hw *hw, bool may_block);
void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode);
void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len);
-void rtl_swlps_wq_callback(void *data);
-void rtl_swlps_rfon_wq_callback(void *data);
+void rtl_swlps_wq_callback(struct work_struct *work);
+void rtl_swlps_rfon_wq_callback(struct work_struct *work);
void rtl_swlps_rf_awake(struct ieee80211_hw *hw);
void rtl_swlps_rf_sleep(struct ieee80211_hw *hw);
void rtl_p2p_ps_cmd(struct ieee80211_hw *hw , u8 p2p_ps_state);
diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c
index 8be31e0ad878..4cf8face0bbd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/regd.c
+++ b/drivers/net/wireless/realtek/rtlwifi/regd.c
@@ -393,13 +393,13 @@ int rtl_regd_init(struct ieee80211_hw *hw,
rtlpriv->regd.country_code =
channel_plan_to_country_code(rtlpriv->efuse.channel_plan);
- RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG,
- "rtl: EEPROM regdomain: 0x%0x country code: %d\n",
- rtlpriv->efuse.channel_plan, rtlpriv->regd.country_code);
+ rtl_dbg(rtlpriv, COMP_REGD, DBG_DMESG,
+ "rtl: EEPROM regdomain: 0x%0x country code: %d\n",
+ rtlpriv->efuse.channel_plan, rtlpriv->regd.country_code);
if (rtlpriv->regd.country_code >= COUNTRY_CODE_MAX) {
- RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG,
- "rtl: EEPROM indicates invalid country code, world wide 13 should be used\n");
+ rtl_dbg(rtlpriv, COMP_REGD, DBG_DMESG,
+ "rtl: EEPROM indicates invalid country code, world wide 13 should be used\n");
rtlpriv->regd.country_code = COUNTRY_CODE_WORLD_WIDE_13;
}
@@ -414,9 +414,9 @@ int rtl_regd_init(struct ieee80211_hw *hw,
rtlpriv->regd.alpha2[1] = '0';
}
- RT_TRACE(rtlpriv, COMP_REGD, DBG_TRACE,
- "rtl: Country alpha2 being used: %c%c\n",
- rtlpriv->regd.alpha2[0], rtlpriv->regd.alpha2[1]);
+ rtl_dbg(rtlpriv, COMP_REGD, DBG_TRACE,
+ "rtl: Country alpha2 being used: %c%c\n",
+ rtlpriv->regd.alpha2[0], rtlpriv->regd.alpha2[1]);
_rtl_regd_init_wiphy(&rtlpriv->regd, wiphy, reg_notifier);
@@ -428,7 +428,7 @@ void rtl_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_REGD, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_REGD, DBG_LOUD, "\n");
_rtl_reg_notifier_apply(wiphy, request, &rtlpriv->regd);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
index 1ffa188a65c9..d10c14c694da 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
@@ -415,16 +415,16 @@ static void rtl88e_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(15)|BIT(14), 0);
rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(15)|BIT(14), 2);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "cnt_parity_fail = %d, cnt_rate_illegal = %d, cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
- falsealm_cnt->cnt_parity_fail,
- falsealm_cnt->cnt_rate_illegal,
- falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail);
-
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
- falsealm_cnt->cnt_ofdm_fail,
- falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "cnt_parity_fail = %d, cnt_rate_illegal = %d, cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
+ falsealm_cnt->cnt_parity_fail,
+ falsealm_cnt->cnt_rate_illegal,
+ falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail);
+
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
+ falsealm_cnt->cnt_ofdm_fail,
+ falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all);
}
static void rtl88e_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
@@ -459,8 +459,8 @@ static void rtl88e_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
dm_dig->cur_cck_cca_thres = cur_cck_cca_thresh;
dm_dig->pre_cck_cca_thres = dm_dig->cur_cck_cca_thres;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "CCK cca thresh hold =%x\n", dm_dig->cur_cck_cca_thres);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "CCK cca thresh hold =%x\n", dm_dig->cur_cck_cca_thres);
}
static void rtl88e_dm_dig(struct ieee80211_hw *hw)
@@ -520,7 +520,7 @@ static void rtl88e_dm_dig(struct ieee80211_hw *hw)
} else {
dm_dig->rx_gain_max = dm_dig_max;
dig_dynamic_min = dm_dig_min;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "no link\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "no link\n");
}
if (rtlpriv->falsealm_cnt.cnt_all > 10000) {
@@ -624,8 +624,8 @@ static void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
if ((mac->link_state < MAC80211_LINKED) &&
(rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "Not connected to any\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "Not connected to any\n");
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
@@ -637,47 +637,47 @@ static void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
undec_sm_pwdb =
rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "AP Client PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "AP Client PWDB = 0x%lx\n",
+ undec_sm_pwdb);
} else {
undec_sm_pwdb =
rtlpriv->dm.undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "STA Default Port PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "STA Default Port PWDB = 0x%lx\n",
+ undec_sm_pwdb);
}
} else {
undec_sm_pwdb =
rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "AP Ext Port PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "AP Ext Port PWDB = 0x%lx\n",
+ undec_sm_pwdb);
}
if (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "TXHIGHPWRLEVEL_LEVEL1 (TxPwr = 0x0)\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "TXHIGHPWRLEVEL_LEVEL1 (TxPwr = 0x0)\n");
} else if ((undec_sm_pwdb <
(TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
(undec_sm_pwdb >=
TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "TXHIGHPWRLEVEL_LEVEL1 (TxPwr = 0x10)\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "TXHIGHPWRLEVEL_LEVEL1 (TxPwr = 0x10)\n");
} else if (undec_sm_pwdb <
(TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "TXHIGHPWRLEVEL_NORMAL\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "TXHIGHPWRLEVEL_NORMAL\n");
}
if ((rtlpriv->dm.dynamic_txhighpower_lvl !=
rtlpriv->dm.last_dtp_lvl)) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "PHY_SetTxPowerLevel8192S() Channel = %d\n",
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "PHY_SetTxPowerLevel8192S() Channel = %d\n",
rtlphy->current_channel);
rtl88e_phy_set_txpower_level(hw, rtlphy->current_channel);
}
@@ -690,8 +690,8 @@ void rtl88e_dm_write_dig(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct dig_t *dm_dig = &rtlpriv->dm_digtable;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "cur_igvalue = 0x%x, pre_igvalue = 0x%x, backoff_val = %d\n",
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "cur_igvalue = 0x%x, pre_igvalue = 0x%x, backoff_val = %d\n",
dm_dig->cur_igvalue, dm_dig->pre_igvalue,
dm_dig->back_val);
@@ -881,17 +881,17 @@ static void dm_txpower_track_cb_therm(struct ieee80211_hw *hw)
/*Initilization (7 steps in total) */
rtlpriv->dm.txpower_trackinginit = true;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "dm_txpower_track_cb_therm\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "%s\n", __func__);
thermalvalue = (u8)rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER,
0xfc00);
if (!thermalvalue)
return;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x\n",
- thermalvalue, rtlpriv->dm.thermalvalue,
- rtlefuse->eeprom_thermalmeter);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x\n",
+ thermalvalue, rtlpriv->dm.thermalvalue,
+ rtlefuse->eeprom_thermalmeter);
/*1. Query OFDM Default Setting: Path A*/
ele_d = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD) &
@@ -900,8 +900,8 @@ static void dm_txpower_track_cb_therm(struct ieee80211_hw *hw)
if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
ofdm_index_old[0] = (u8)i;
rtldm->swing_idx_ofdm_base[RF90_PATH_A] = (u8)i;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index = 0x%x\n",
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index = 0x%x\n",
ROFDM0_XATXIQIMBALANCE,
ele_d, ofdm_index_old[0]);
break;
@@ -915,24 +915,24 @@ static void dm_txpower_track_cb_therm(struct ieee80211_hw *hw)
if (memcmp(&temp_cck, &cck_tbl_ch14[i][2], 4) == 0) {
cck_index_old = (u8)i;
rtldm->swing_idx_cck_base = (u8)i;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
- DBG_LOUD,
- "Initial reg0x%x = 0x%lx, cck_index = 0x%x, ch 14 %d\n",
- RCCK0_TXFILTER2, temp_cck,
- cck_index_old,
- rtlpriv->dm.cck_inch14);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING,
+ DBG_LOUD,
+ "Initial reg0x%x = 0x%lx, cck_index = 0x%x, ch 14 %d\n",
+ RCCK0_TXFILTER2, temp_cck,
+ cck_index_old,
+ rtlpriv->dm.cck_inch14);
break;
}
} else {
if (memcmp(&temp_cck, &cck_tbl_ch1_13[i][2], 4) == 0) {
cck_index_old = (u8)i;
rtldm->swing_idx_cck_base = (u8)i;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
- DBG_LOUD,
- "Initial reg0x%x = 0x%lx, cck_index = 0x%x, ch14 %d\n",
- RCCK0_TXFILTER2, temp_cck,
- cck_index_old,
- rtlpriv->dm.cck_inch14);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING,
+ DBG_LOUD,
+ "Initial reg0x%x = 0x%lx, cck_index = 0x%x, ch14 %d\n",
+ RCCK0_TXFILTER2, temp_cck,
+ cck_index_old,
+ rtlpriv->dm.cck_inch14);
break;
}
}
@@ -987,11 +987,11 @@ static void dm_txpower_track_cb_therm(struct ieee80211_hw *hw)
(thermalvalue - rtlpriv->dm.thermalvalue_iqk) :
(rtlpriv->dm.thermalvalue_iqk - thermalvalue);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x delta 0x%x delta_lck 0x%x delta_iqk 0x%x\n",
- thermalvalue, rtlpriv->dm.thermalvalue,
- rtlefuse->eeprom_thermalmeter, delta, delta_lck,
- delta_iqk);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x delta 0x%x delta_lck 0x%x delta_iqk 0x%x\n",
+ thermalvalue, rtlpriv->dm.thermalvalue,
+ rtlefuse->eeprom_thermalmeter, delta, delta_lck,
+ delta_iqk);
/* 6 If necessary, do LCK.*/
if (delta_lck >= 8) {
rtlpriv->dm.thermalvalue_lck = thermalvalue;
@@ -1072,7 +1072,7 @@ static void dm_txpower_track_cb_therm(struct ieee80211_hw *hw)
if (rtldm->txpower_track_control)
rtldm->thermalvalue = thermalvalue;
rtldm->txpowercount = 0;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "end\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "end\n");
}
static void rtl88e_dm_init_txpower_tracking(struct ieee80211_hw *hw)
@@ -1087,9 +1087,9 @@ static void rtl88e_dm_init_txpower_tracking(struct ieee80211_hw *hw)
rtlpriv->dm.swing_idx_ofdm[RF90_PATH_A] = 12;
rtlpriv->dm.swing_idx_ofdm_cur = 12;
rtlpriv->dm.swing_flag_ofdm = false;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "rtlpriv->dm.txpower_tracking = %d\n",
- rtlpriv->dm.txpower_tracking);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "rtlpriv->dm.txpower_tracking = %d\n",
+ rtlpriv->dm.txpower_tracking);
}
void rtl88e_dm_check_txpower_tracking(struct ieee80211_hw *hw)
@@ -1102,13 +1102,13 @@ void rtl88e_dm_check_txpower_tracking(struct ieee80211_hw *hw)
if (!rtlpriv->dm.tm_trigger) {
rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, BIT(17)|BIT(16),
0x03);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Trigger 88E Thermal Meter!!\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Trigger 88E Thermal Meter!!\n");
rtlpriv->dm.tm_trigger = 1;
return;
} else {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Schedule TxPowerTracking !!\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Schedule TxPowerTracking !!\n");
dm_txpower_track_cb_therm(hw);
rtlpriv->dm.tm_trigger = 0;
}
@@ -1138,14 +1138,14 @@ static void rtl88e_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
struct ieee80211_sta *sta = NULL;
if (is_hal_stop(rtlhal)) {
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "driver is going to unload\n");
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "driver is going to unload\n");
return;
}
if (!rtlpriv->dm.useramask) {
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "driver does not control rate adaptive mask\n");
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "driver does not control rate adaptive mask\n");
return;
}
@@ -1180,14 +1180,14 @@ static void rtl88e_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
p_ra->ratr_state = DM_RATR_STA_LOW;
if (p_ra->pre_ratr_state != p_ra->ratr_state) {
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "RSSI = %ld\n",
- rtlpriv->dm.undec_sm_pwdb);
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "RSSI_LEVEL = %d\n", p_ra->ratr_state);
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "PreState = %d, CurState = %d\n",
- p_ra->pre_ratr_state, p_ra->ratr_state);
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "RSSI = %ld\n",
+ rtlpriv->dm.undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "RSSI_LEVEL = %d\n", p_ra->ratr_state);
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "PreState = %d, CurState = %d\n",
+ p_ra->pre_ratr_state, p_ra->ratr_state);
rcu_read_lock();
sta = rtl_find_sta(hw, mac->bssid);
@@ -1224,8 +1224,8 @@ static void rtl88e_dm_update_rx_idle_ant(struct ieee80211_hw *hw,
u32 default_ant, optional_ant;
if (pfat_table->rx_idle_ant != ant) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "need to update rx idle ant\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "need to update rx idle ant\n");
if (ant == MAIN_ANT) {
default_ant =
(pfat_table->rx_idle_ant == CG_TRX_HW_ANTDIV) ?
@@ -1260,8 +1260,8 @@ static void rtl88e_dm_update_rx_idle_ant(struct ieee80211_hw *hw,
}
}
pfat_table->rx_idle_ant = ant;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "RxIdleAnt %s\n",
- (ant == MAIN_ANT) ? ("MAIN_ANT") : ("AUX_ANT"));
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "RxIdleAnt %s\n",
+ (ant == MAIN_ANT) ? ("MAIN_ANT") : ("AUX_ANT"));
}
static void rtl88e_dm_update_tx_ant(struct ieee80211_hw *hw,
@@ -1280,9 +1280,9 @@ static void rtl88e_dm_update_tx_ant(struct ieee80211_hw *hw,
pfat_table->antsel_a[mac_id] = target_ant & BIT(0);
pfat_table->antsel_b[mac_id] = (target_ant & BIT(1)) >> 1;
pfat_table->antsel_c[mac_id] = (target_ant & BIT(2)) >> 2;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "txfrominfo target ant %s\n",
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "txfrominfo target ant %s\n",
(ant == MAIN_ANT) ? ("MAIN_ANT") : ("AUX_ANT"));
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "antsel_tr_mux = 3'b%d%d%d\n",
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "antsel_tr_mux = 3'b%d%d%d\n",
pfat_table->antsel_c[mac_id],
pfat_table->antsel_b[mac_id],
pfat_table->antsel_a[mac_id]);
@@ -1464,15 +1464,15 @@ static void rtl88e_dm_hw_ant_div(struct ieee80211_hw *hw)
target_ant = (main_rssi == aux_rssi) ?
pfat_table->rx_idle_ant : ((main_rssi >= aux_rssi) ?
MAIN_ANT : AUX_ANT);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
"main_ant_sum %d main_ant_cnt %d\n",
pfat_table->main_ant_sum[i],
pfat_table->main_ant_cnt[i]);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "aux_ant_sum %d aux_ant_cnt %d\n",
- pfat_table->aux_ant_sum[i], pfat_table->aux_ant_cnt[i]);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "main_rssi %d aux_rssi%d\n",
- main_rssi, aux_rssi);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "aux_ant_sum %d aux_ant_cnt %d\n",
+ pfat_table->aux_ant_sum[i], pfat_table->aux_ant_cnt[i]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "main_rssi %d aux_rssi%d\n",
+ main_rssi, aux_rssi);
local_max_rssi = (main_rssi > aux_rssi) ? main_rssi : aux_rssi;
if ((local_max_rssi > ant_div_max_rssi) && (local_max_rssi < 40))
ant_div_max_rssi = local_max_rssi;
@@ -1699,10 +1699,10 @@ static void rtl88e_dm_antenna_diversity(struct ieee80211_hw *hw)
struct fast_ant_training *pfat_table = &rtldm->fat_table;
if (mac->link_state < MAC80211_LINKED) {
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "No Link\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "No Link\n");
if (pfat_table->becomelinked) {
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "need to turn off HW AntDiv\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "need to turn off HW AntDiv\n");
rtl_set_bbreg(hw, DM_REG_IGI_A_11N, BIT(7), 0);
rtl_set_bbreg(hw, DM_REG_CCK_ANTDIV_PARA1_11N,
BIT(15), 0);
@@ -1716,8 +1716,8 @@ static void rtl88e_dm_antenna_diversity(struct ieee80211_hw *hw)
return;
} else {
if (!pfat_table->becomelinked) {
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "Need to turn on HW AntDiv\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Need to turn on HW AntDiv\n");
rtl_set_bbreg(hw, DM_REG_IGI_A_11N, BIT(7), 1);
rtl_set_bbreg(hw, DM_REG_CCK_ANTDIV_PARA1_11N,
BIT(15), 1);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
index fc7b9ad7e5d0..7252bc621211 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
@@ -40,7 +40,7 @@ static void _rtl88e_write_fw(struct ieee80211_hw *hw,
u32 pagenums, remainsize;
u32 page, offset;
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "FW size is %d bytes,\n", size);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "FW size is %d bytes,\n", size);
rtl_fill_dummy(bufferptr, &size);
@@ -123,14 +123,14 @@ int rtl88e_download_fw(struct ieee80211_hw *hw,
rtlhal->fw_subversion = pfwheader->subversion;
pfwdata = rtlhal->pfirmware;
fwsize = rtlhal->fwsize;
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- "normal Firmware SIZE %d\n", fwsize);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "normal Firmware SIZE %d\n", fwsize);
if (IS_FW_HEADER_EXIST(pfwheader)) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- "Firmware Version(%d), Signature(%#x), Size(%d)\n",
- pfwheader->version, pfwheader->signature,
- (int)sizeof(struct rtlwifi_firmware_header));
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "Firmware Version(%d), Signature(%#x), Size(%d)\n",
+ pfwheader->version, pfwheader->signature,
+ (int)sizeof(struct rtlwifi_firmware_header));
pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
@@ -181,22 +181,22 @@ static void _rtl88e_fill_h2c_command(struct ieee80211_hw *hw,
unsigned long flag;
u8 idx;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
while (true) {
spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
if (rtlhal->h2c_setinprogress) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "H2C set in progress! Wait to set..element_id(%d).\n",
- element_id);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "H2C set in progress! Wait to set..element_id(%d).\n",
+ element_id);
while (rtlhal->h2c_setinprogress) {
spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
flag);
h2c_waitcounter++;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Wait 100 us (%d times)...\n",
- h2c_waitcounter);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Wait 100 us (%d times)...\n",
+ h2c_waitcounter);
udelay(100);
if (h2c_waitcounter > 1000)
@@ -238,17 +238,17 @@ static void _rtl88e_fill_h2c_command(struct ieee80211_hw *hw,
box_extreg = REG_HMEBOX_EXT_3;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", boxnum);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", boxnum);
break;
}
isfw_read = _rtl88e_check_fw_read_last_h2c(hw, boxnum);
while (!isfw_read) {
wait_h2c_limmit--;
if (wait_h2c_limmit == 0) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Waiting too long for FW read clear HMEBox(%d)!\n",
- boxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Waiting too long for FW read clear HMEBox(%d)!\n",
+ boxnum);
break;
}
@@ -256,24 +256,24 @@ static void _rtl88e_fill_h2c_command(struct ieee80211_hw *hw,
isfw_read = _rtl88e_check_fw_read_last_h2c(hw, boxnum);
u1b_tmp = rtl_read_byte(rtlpriv, 0x130);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Waiting for FW read clear HMEBox(%d)!!! 0x130 = %2x\n",
- boxnum, u1b_tmp);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Waiting for FW read clear HMEBox(%d)!!! 0x130 = %2x\n",
+ boxnum, u1b_tmp);
}
if (!isfw_read) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n",
- boxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n",
+ boxnum);
break;
}
memset(boxcontent, 0, sizeof(boxcontent));
memset(boxextcontent, 0, sizeof(boxextcontent));
boxcontent[0] = element_id;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Write element_id box_reg(%4x) = %2x\n",
- box_reg, element_id);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Write element_id box_reg(%4x) = %2x\n",
+ box_reg, element_id);
switch (cmd_len) {
case 1:
@@ -309,8 +309,8 @@ static void _rtl88e_fill_h2c_command(struct ieee80211_hw *hw,
}
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", cmd_len);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", cmd_len);
break;
}
@@ -320,16 +320,16 @@ static void _rtl88e_fill_h2c_command(struct ieee80211_hw *hw,
if (rtlhal->last_hmeboxnum == 4)
rtlhal->last_hmeboxnum = 0;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "pHalData->last_hmeboxnum = %d\n",
- rtlhal->last_hmeboxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "pHalData->last_hmeboxnum = %d\n",
+ rtlhal->last_hmeboxnum);
}
spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
rtlhal->h2c_setinprogress = false;
spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
}
void rtl88e_fill_h2c_cmd(struct ieee80211_hw *hw,
@@ -359,8 +359,8 @@ void rtl88e_firmware_selfreset(struct ieee80211_hw *hw)
u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN+1);
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN+1, (u1b_tmp & (~BIT(2))));
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN+1, (u1b_tmp | BIT(2)));
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "8051Reset88E(): 8051 reset success\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "8051Reset88E(): 8051 reset success\n");
}
@@ -370,7 +370,7 @@ void rtl88e_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
u8 u1_h2c_set_pwrmode[H2C_88E_PWEMODE_LENGTH] = { 0 };
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
u8 rlbm, power_state = 0;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
set_h2ccmd_pwrmode_parm_mode(u1_h2c_set_pwrmode, ((mode) ? 1 : 0));
rlbm = 0;/*YJ, temp, 120316. FW now not support RLBM=2.*/
@@ -610,15 +610,15 @@ void rtl88e_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
b_dlok = true;
if (b_dlok) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Set RSVD page location to Fw.\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Set RSVD page location to Fw.\n");
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
"H2C_RSVDPAGE:\n", u1rsvdpageloc, 3);
rtl88e_fill_h2c_cmd(hw, H2C_88E_RSVDPAGE,
sizeof(u1rsvdpageloc), u1rsvdpageloc);
} else
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Set RSVD page location to Fw FAIL!!!!!!.\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set RSVD page location to Fw FAIL!!!!!!.\n");
}
/*Should check FW support p2p or not.*/
@@ -643,11 +643,11 @@ void rtl88e_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
switch (p2p_ps_state) {
case P2P_PS_DISABLE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_DISABLE\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_DISABLE\n");
memset(p2p_ps_offload, 0, sizeof(*p2p_ps_offload));
break;
case P2P_PS_ENABLE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_ENABLE\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_ENABLE\n");
/* update CTWindow value. */
if (p2pinfo->ctwindow > 0) {
p2p_ps_offload->ctwindow_en = 1;
@@ -703,11 +703,11 @@ void rtl88e_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
}
break;
case P2P_PS_SCAN:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN\n");
p2p_ps_offload->discovery = 1;
break;
case P2P_PS_SCAN_DONE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN_DONE\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN_DONE\n");
p2p_ps_offload->discovery = 0;
p2pinfo->p2p_ps_state = P2P_PS_ENABLE;
break;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
index 70716631de85..63f9ea21962f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/hw.c
@@ -75,11 +75,10 @@ static void _rtl88ee_return_beacon_queue_skb(struct ieee80211_hw *hw)
struct rtl_tx_desc *entry = &ring->desc[ring->idx];
struct sk_buff *skb = __skb_dequeue(&ring->queue);
- pci_unmap_single(rtlpci->pdev,
- rtlpriv->cfg->ops->get_desc(
- hw,
- (u8 *)entry, true, HW_DESC_TXBUFF_ADDR),
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&rtlpci->pdev->dev,
+ rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry,
+ true, HW_DESC_TXBUFF_ADDR),
+ skb->len, DMA_TO_DEVICE);
kfree_skb(skb);
ring->idx = (ring->idx + 1) % ring->entries;
}
@@ -140,9 +139,9 @@ static void _rtl88ee_set_fw_clock_on(struct ieee80211_hw *hw,
if (content & IMR_CPWM) {
rtl_write_word(rtlpriv, isr_regaddr, 0x0100);
rtlhal->fw_ps_state = FW_PS_STATE_RF_ON_88E;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Receive CPWM INT!!! Set pHalData->FwPSState = %X\n",
- rtlhal->fw_ps_state);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Receive CPWM INT!!! Set pHalData->FwPSState = %X\n",
+ rtlhal->fw_ps_state);
}
}
@@ -400,8 +399,8 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_SLOT_TIME:{
u8 e_aci;
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "HW_VAR_SLOT_TIME %x\n", val[0]);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "HW_VAR_SLOT_TIME %x\n", val[0]);
rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
@@ -445,9 +444,9 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
*val = min_spacing_to_set;
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
- mac->min_space_cfg);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
mac->min_space_cfg);
@@ -459,9 +458,9 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
density_to_set = *val;
mac->min_space_cfg |= (density_to_set << 3);
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
- mac->min_space_cfg);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
mac->min_space_cfg);
@@ -500,9 +499,9 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_AMPDU_FACTOR: %#x\n",
- factor_toset);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_FACTOR: %#x\n",
+ factor_toset);
}
break; }
case HW_VAR_AC_PARAM:{
@@ -536,9 +535,9 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
acm_ctrl |= ACMHW_VOQEN;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
- acm);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
+ acm);
break;
}
} else {
@@ -559,9 +558,9 @@ void rtl88ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
}
- RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
- "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
- acm_ctrl);
+ rtl_dbg(rtlpriv, COMP_QOS, DBG_TRACE,
+ "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
+ acm_ctrl);
rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
break; }
case HW_VAR_RCR:
@@ -775,22 +774,22 @@ static bool _rtl88ee_llt_table_init(struct ieee80211_hw *hw)
for (i = 0; i < (txpktbuf_bndy - 1); i++) {
status = _rtl88ee_llt_write(hw, i, i + 1);
- if (true != status)
+ if (!status)
return status;
}
status = _rtl88ee_llt_write(hw, (txpktbuf_bndy - 1), 0xFF);
- if (true != status)
+ if (!status)
return status;
for (i = txpktbuf_bndy; i < maxpage; i++) {
status = _rtl88ee_llt_write(hw, i, (i + 1));
- if (true != status)
+ if (!status)
return status;
}
status = _rtl88ee_llt_write(hw, maxpage, txpktbuf_bndy);
- if (true != status)
+ if (!status)
return status;
return true;
@@ -834,8 +833,8 @@ static bool _rtl88ee_init_mac(struct ieee80211_hw *hw)
if (!rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK,
PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,
RTL8188EE_NIC_ENABLE_FLOW)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "init MAC Fail as rtl_hal_pwrseqcmdparsing\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "init MAC Fail as rtl_hal_pwrseqcmdparsing\n");
return false;
}
@@ -869,9 +868,9 @@ static bool _rtl88ee_init_mac(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, MSR, 0x00);
if (!rtlhal->mac_func_enable) {
- if (_rtl88ee_llt_table_init(hw) == false) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "LLT table init fail\n");
+ if (!_rtl88ee_llt_table_init(hw)) {
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "LLT table init fail\n");
return false;
}
}
@@ -1002,14 +1001,14 @@ void rtl88ee_enable_hw_security_config(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 sec_reg_value;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
- rtlpriv->sec.pairwise_enc_algorithm,
- rtlpriv->sec.group_enc_algorithm);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
+ rtlpriv->sec.pairwise_enc_algorithm,
+ rtlpriv->sec.group_enc_algorithm);
if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "not open hw encryption\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "not open hw encryption\n");
return;
}
@@ -1024,8 +1023,8 @@ void rtl88ee_enable_hw_security_config(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "The SECR-value %x\n", sec_reg_value);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "The SECR-value %x\n", sec_reg_value);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
}
@@ -1068,7 +1067,7 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw)
}
rtstatus = _rtl88ee_init_mac(hw);
- if (rtstatus != true) {
+ if (!rtstatus) {
pr_info("Init MAC failed\n");
err = 1;
goto exit;
@@ -1076,8 +1075,8 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw)
err = rtl88e_download_fw(hw, false);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Failed to download FW. Init HW without FW now..\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Failed to download FW. Init HW without FW now..\n");
err = 1;
goto exit;
}
@@ -1130,9 +1129,9 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw)
rtl88e_phy_set_rfpath_switch(hw, false);
rtlpriv->dm.fat_table.rx_idle_ant = AUX_ANT;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "rx idle ant %s\n",
- (rtlpriv->dm.fat_table.rx_idle_ant == MAIN_ANT) ?
- ("MAIN_ANT") : ("AUX_ANT"));
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "rx idle ant %s\n",
+ (rtlpriv->dm.fat_table.rx_idle_ant == MAIN_ANT) ?
+ ("MAIN_ANT") : ("AUX_ANT"));
if (rtlphy->iqk_initialized) {
rtl88e_phy_iq_calibrate(hw, true);
@@ -1148,7 +1147,7 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw)
tmp_u1b = efuse_read_1byte(hw, 0x1FA);
if (!(tmp_u1b & BIT(0))) {
rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0F, 0x05);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "PA BIAS path A\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "PA BIAS path A\n");
}
if (!(tmp_u1b & BIT(4))) {
@@ -1157,7 +1156,7 @@ int rtl88ee_hw_init(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x80);
udelay(10);
rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x90);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "under 1.5V\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "under 1.5V\n");
}
rtl_write_byte(rtlpriv, REG_NAV_CTRL+2, ((30000+127)/128));
rtl88e_dm_init(hw);
@@ -1185,9 +1184,9 @@ static enum version_8188e _rtl88ee_read_chip_version(struct ieee80211_hw *hw)
}
rtlphy->rf_type = RF_1T1R;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Chip RF Type: %s\n", (rtlphy->rf_type == RF_2T2R) ?
- "RF_2T2R" : "RF_1T1R");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Chip RF Type: %s\n", (rtlphy->rf_type == RF_2T2R) ?
+ "RF_2T2R" : "RF_1T1R");
return version;
}
@@ -1203,26 +1202,26 @@ static int _rtl88ee_set_media_status(struct ieee80211_hw *hw,
switch (type) {
case NL80211_IFTYPE_UNSPECIFIED:
mode = MSR_NOLINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to NO LINK!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to NO LINK!\n");
break;
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_MESH_POINT:
mode = MSR_ADHOC;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to Ad Hoc!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to Ad Hoc!\n");
break;
case NL80211_IFTYPE_STATION:
mode = MSR_INFRA;
ledaction = LED_CTL_LINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to STA!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to STA!\n");
break;
case NL80211_IFTYPE_AP:
mode = MSR_AP;
ledaction = LED_CTL_LINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to AP!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to AP!\n");
break;
default:
pr_err("Network type %d not support!\n", type);
@@ -1248,9 +1247,9 @@ static int _rtl88ee_set_media_status(struct ieee80211_hw *hw,
_rtl88ee_resume_tx_beacon(hw);
_rtl88ee_disable_bcn_sub_func(hw);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n",
- mode);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n",
+ mode);
}
rtl_write_byte(rtlpriv, MSR, bt_msr | mode);
@@ -1370,7 +1369,7 @@ static void _rtl88ee_poweroff_adapter(struct ieee80211_hw *hw)
rtlhal->mac_func_enable = false;
rtlpriv->intf_ops->enable_aspm(hw);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "POWER OFF adapter\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "POWER OFF adapter\n");
u1b_tmp = rtl_read_byte(rtlpriv, REG_TX_RPT_CTRL);
rtl_write_byte(rtlpriv, REG_TX_RPT_CTRL, u1b_tmp & (~BIT(1)));
@@ -1427,7 +1426,7 @@ void rtl88ee_card_disable(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
enum nl80211_iftype opmode;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "RTL8188ee card disable\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "RTL8188ee card disable\n");
mac->link_state = MAC80211_NOLINK;
opmode = NL80211_IFTYPE_UNSPECIFIED;
@@ -1486,8 +1485,8 @@ void rtl88ee_set_beacon_interval(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
u16 bcn_interval = mac->beacon_interval;
- RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
- "beacon_interval:%d\n", bcn_interval);
+ rtl_dbg(rtlpriv, COMP_BEACON, DBG_DMESG,
+ "beacon_interval:%d\n", bcn_interval);
/*rtl88ee_disable_interrupt(hw);*/
rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
/*rtl88ee_enable_interrupt(hw);*/
@@ -1499,8 +1498,8 @@ void rtl88ee_update_interrupt_mask(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
- "add_msr:%x, rm_msr:%x\n", add_msr, rm_msr);
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD,
+ "add_msr:%x, rm_msr:%x\n", add_msr, rm_msr);
if (add_msr)
rtlpci->irq_mask[0] |= add_msr;
@@ -1559,15 +1558,15 @@ static void read_power_value_fromprom(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 rfpath, eeaddr = EEPROM_TX_PWR_INX, group, txcnt = 0;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "hal_ReadPowerValueFromPROM88E():PROMContent[0x%x]=0x%x\n",
- (eeaddr+1), hwinfo[eeaddr+1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "hal_ReadPowerValueFromPROM88E():PROMContent[0x%x]=0x%x\n",
+ (eeaddr + 1), hwinfo[eeaddr + 1]);
if (0xFF == hwinfo[eeaddr+1]) /*YJ,add,120316*/
autoload_fail = true;
if (autoload_fail) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "auto load fail : Use Default value!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "auto load fail : Use Default value!\n");
for (rfpath = 0 ; rfpath < MAX_RF_PATH ; rfpath++) {
/* 2.4G default value */
set_24g_base(pwrinfo24g, rfpath);
@@ -1826,8 +1825,8 @@ static void _rtl88ee_read_adapter_info(struct ieee80211_hw *hw)
if (rtlefuse->eeprom_oemid == 0xFF)
rtlefuse->eeprom_oemid = 0;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
/* set channel plan from efuse */
rtlefuse->channel_plan = rtlefuse->eeprom_channelplan;
/*tx power*/
@@ -1925,8 +1924,8 @@ static void _rtl88ee_hal_customized_behavior(struct ieee80211_hw *hw)
default:
break;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "RT Customized ID: 0x%02X\n", rtlhal->oem_id);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "RT Customized ID: 0x%02X\n", rtlhal->oem_id);
}
void rtl88ee_read_eeprom_info(struct ieee80211_hw *hw)
@@ -1943,18 +1942,18 @@ void rtl88ee_read_eeprom_info(struct ieee80211_hw *hw)
else
rtlpriv->dm.rfpath_rxenable[0] =
rtlpriv->dm.rfpath_rxenable[1] = true;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
- rtlhal->version);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
+ rtlhal->version);
tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
if (tmp_u1b & BIT(4)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
rtlefuse->epromtype = EEPROM_93C46;
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
}
if (tmp_u1b & BIT(5)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
rtlefuse->autoload_failflag = false;
_rtl88ee_read_adapter_info(hw);
} else {
@@ -2049,8 +2048,8 @@ static void rtl88ee_update_hal_rate_table(struct ieee80211_hw *hw,
rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "%x\n", rtl_read_dword(rtlpriv, REG_ARFR0));
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "%x\n", rtl_read_dword(rtlpriv, REG_ARFR0));
}
static void rtl88ee_update_hal_rate_mask(struct ieee80211_hw *hw,
@@ -2169,17 +2168,17 @@ static void rtl88ee_update_hal_rate_mask(struct ieee80211_hw *hw,
}
sta_entry->ratr_index = ratr_index;
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "ratr_bitmap :%x\n", ratr_bitmap);
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "ratr_bitmap :%x\n", ratr_bitmap);
*(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) |
(ratr_index << 28);
rate_mask[4] = macid | (b_shortgi ? 0x20 : 0x00) | 0x80;
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x\n",
- ratr_index, ratr_bitmap,
- rate_mask[0], rate_mask[1],
- rate_mask[2], rate_mask[3],
- rate_mask[4]);
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x\n",
+ ratr_index, ratr_bitmap,
+ rate_mask[0], rate_mask[1],
+ rate_mask[2], rate_mask[3],
+ rate_mask[4]);
rtl88e_fill_h2c_cmd(hw, H2C_88E_RA_MASK, 5, rate_mask);
_rtl88ee_set_bcn_ctrl_reg(hw, BIT(3), 0);
}
@@ -2236,16 +2235,16 @@ bool rtl88ee_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
e_rfpowerstate_toset = (u4tmp & BIT(31)) ? ERFON : ERFOFF;
if (ppsc->hwradiooff && (e_rfpowerstate_toset == ERFON)) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "GPIOChangeRF - HW Radio ON, RF ON\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "GPIOChangeRF - HW Radio ON, RF ON\n");
e_rfpowerstate_toset = ERFON;
ppsc->hwradiooff = false;
b_actuallyset = true;
} else if ((!ppsc->hwradiooff) &&
(e_rfpowerstate_toset == ERFOFF)) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "GPIOChangeRF - HW Radio OFF, RF OFF\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "GPIOChangeRF - HW Radio OFF, RF OFF\n");
e_rfpowerstate_toset = ERFOFF;
ppsc->hwradiooff = true;
@@ -2295,7 +2294,7 @@ void rtl88ee_set_key(struct ieee80211_hw *hw, u32 key_index,
u8 cam_offset = 0;
u8 clear_number = 5;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
for (idx = 0; idx < clear_number; idx++) {
rtl_cam_mark_invalid(hw, cam_offset + idx);
@@ -2354,27 +2353,27 @@ void rtl88ee_set_key(struct ieee80211_hw *hw, u32 key_index,
}
if (rtlpriv->sec.key_len[key_index] == 0) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "delete one entry, entry_id is %d\n",
- entry_id);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "delete one entry, entry_id is %d\n",
+ entry_id);
if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_MESH_POINT)
rtl_cam_del_entry(hw, p_macaddr);
rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
} else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "add one entry\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "add one entry\n");
if (is_pairwise) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set Pairwise key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set Pairwise key\n");
rtl_cam_add_one_entry(hw, macaddr, key_index,
entry_id, enc_algo,
CAM_CONFIG_NO_USEDK,
rtlpriv->sec.key_buf[key_index]);
} else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set group key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set group key\n");
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
rtl_cam_add_one_entry(hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c
index 4ef6d5907521..006b979da1c6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/led.c
@@ -19,8 +19,8 @@ void rtl88ee_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
u8 ledcfg;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
- "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD,
+ "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
switch (pled->ledpin) {
case LED_PIN_GPIO0:
@@ -35,8 +35,8 @@ void rtl88ee_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg & 0x10);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = true;
@@ -47,8 +47,8 @@ void rtl88ee_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 ledcfg;
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
- "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD,
+ "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
switch (pled->ledpin) {
case LED_PIN_GPIO0:
@@ -72,8 +72,8 @@ void rtl88ee_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
rtl_write_byte(rtlpriv, REG_LEDCFG1, (ledcfg | BIT(3)));
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = false;
@@ -123,7 +123,7 @@ void rtl88ee_led_control(struct ieee80211_hw *hw,
ledaction == LED_CTL_POWER_ON)) {
return;
}
- RT_TRACE(rtlpriv, COMP_LED, DBG_TRACE, "ledaction %d,\n",
- ledaction);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_TRACE, "ledaction %d,\n",
+ ledaction);
_rtl88ee_sw_led_control(hw, ledaction);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
index d13983ec09ad..9be032e8ec95 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
@@ -16,7 +16,12 @@ static u32 _rtl88e_phy_rf_serial_read(struct ieee80211_hw *hw,
static void _rtl88e_phy_rf_serial_write(struct ieee80211_hw *hw,
enum radio_path rfpath, u32 offset,
u32 data);
-static u32 _rtl88e_phy_calculate_bit_shift(u32 bitmask);
+static u32 _rtl88e_phy_calculate_bit_shift(u32 bitmask)
+{
+ u32 i = ffs(bitmask);
+
+ return i ? i - 1 : 32;
+}
static bool _rtl88e_phy_bb8188e_config_parafile(struct ieee80211_hw *hw);
static bool _rtl88e_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
static bool phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
@@ -43,15 +48,15 @@ u32 rtl88e_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 returnvalue, originalvalue, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask);
originalvalue = rtl_read_dword(rtlpriv, regaddr);
bitshift = _rtl88e_phy_calculate_bit_shift(bitmask);
returnvalue = (originalvalue & bitmask) >> bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "BBR MASK=0x%x Addr[0x%x]=0x%x\n", bitmask,
- regaddr, originalvalue);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "BBR MASK=0x%x Addr[0x%x]=0x%x\n", bitmask,
+ regaddr, originalvalue);
return returnvalue;
@@ -63,9 +68,9 @@ void rtl88e_phy_set_bb_reg(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 originalvalue, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x)\n",
- regaddr, bitmask, data);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
if (bitmask != MASKDWORD) {
originalvalue = rtl_read_dword(rtlpriv, regaddr);
@@ -75,9 +80,9 @@ void rtl88e_phy_set_bb_reg(struct ieee80211_hw *hw,
rtl_write_dword(rtlpriv, regaddr, data);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x)\n",
- regaddr, bitmask, data);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
}
u32 rtl88e_phy_query_rf_reg(struct ieee80211_hw *hw,
@@ -86,9 +91,9 @@ u32 rtl88e_phy_query_rf_reg(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, readback_value, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
- regaddr, rfpath, bitmask);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
+ regaddr, rfpath, bitmask);
spin_lock(&rtlpriv->locks.rf_lock);
@@ -99,9 +104,9 @@ u32 rtl88e_phy_query_rf_reg(struct ieee80211_hw *hw,
spin_unlock(&rtlpriv->locks.rf_lock);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
- regaddr, rfpath, bitmask, original_value);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
+ regaddr, rfpath, bitmask, original_value);
return readback_value;
}
@@ -112,9 +117,9 @@ void rtl88e_phy_set_rf_reg(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, rfpath);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
spin_lock(&rtlpriv->locks.rf_lock);
@@ -133,9 +138,9 @@ void rtl88e_phy_set_rf_reg(struct ieee80211_hw *hw,
spin_unlock(&rtlpriv->locks.rf_lock);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, rfpath);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
}
static u32 _rtl88e_phy_rf_serial_read(struct ieee80211_hw *hw,
@@ -179,9 +184,9 @@ static u32 _rtl88e_phy_rf_serial_read(struct ieee80211_hw *hw,
else
retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
BLSSIREADBACKDATA);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "RFR-%d Addr[0x%x]=0x%x\n",
- rfpath, pphyreg->rf_rb, retvalue);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "RFR-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rf_rb, retvalue);
return retvalue;
}
@@ -203,20 +208,9 @@ static void _rtl88e_phy_rf_serial_write(struct ieee80211_hw *hw,
newoffset = offset;
data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "RFW-%d Addr[0x%x]=0x%x\n",
- rfpath, pphyreg->rf3wire_offset, data_and_addr);
-}
-
-static u32 _rtl88e_phy_calculate_bit_shift(u32 bitmask)
-{
- u32 i;
-
- for (i = 0; i <= 31; i++) {
- if (((bitmask >> i) & 0x1) == 1)
- break;
- }
- return i;
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "RFW-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rf3wire_offset, data_and_addr);
}
bool rtl88e_phy_mac_config(struct ieee80211_hw *hw)
@@ -381,11 +375,11 @@ static bool _rtl88e_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
u32 arraylength;
u32 *ptrarray;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl8188EMACPHY_Array\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl8188EMACPHY_Array\n");
arraylength = RTL8188EEMAC_1T_ARRAYLEN;
ptrarray = RTL8188EEMAC_1T_ARRAY;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Img:RTL8188EEMAC_1T_ARRAY LEN %d\n", arraylength);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Img:RTL8188EEMAC_1T_ARRAY LEN %d\n", arraylength);
for (i = 0; i < arraylength; i = i + 2)
rtl_write_byte(rtlpriv, ptrarray[i], (u8)ptrarray[i + 1]);
return true;
@@ -487,9 +481,9 @@ static void handle_branch2(struct ieee80211_hw *hw, u16 arraylen,
READ_NEXT_PAIR(v1, v2, i);
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "The agctab_array_table[0] is %x Rtl818EEPHY_REGArray[1] is %x\n",
- array_table[i], array_table[i + 1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "The agctab_array_table[0] is %x Rtl818EEPHY_REGArray[1] is %x\n",
+ array_table[i], array_table[i + 1]);
}
}
@@ -521,52 +515,52 @@ static void store_pwrindex_rate_offset(struct ieee80211_hw *hw,
if (regaddr == RTXAGC_A_RATE18_06) {
rtlphy->mcs_txpwrlevel_origoffset[count][0] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][0] = 0x%x\n",
- count,
- rtlphy->mcs_txpwrlevel_origoffset[count][0]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][0] = 0x%x\n",
+ count,
+ rtlphy->mcs_txpwrlevel_origoffset[count][0]);
}
if (regaddr == RTXAGC_A_RATE54_24) {
rtlphy->mcs_txpwrlevel_origoffset[count][1] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][1] = 0x%x\n",
- count,
- rtlphy->mcs_txpwrlevel_origoffset[count][1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][1] = 0x%x\n",
+ count,
+ rtlphy->mcs_txpwrlevel_origoffset[count][1]);
}
if (regaddr == RTXAGC_A_CCK1_MCS32) {
rtlphy->mcs_txpwrlevel_origoffset[count][6] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][6] = 0x%x\n",
- count,
- rtlphy->mcs_txpwrlevel_origoffset[count][6]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][6] = 0x%x\n",
+ count,
+ rtlphy->mcs_txpwrlevel_origoffset[count][6]);
}
if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0xffffff00) {
rtlphy->mcs_txpwrlevel_origoffset[count][7] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][7] = 0x%x\n",
- count,
- rtlphy->mcs_txpwrlevel_origoffset[count][7]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][7] = 0x%x\n",
+ count,
+ rtlphy->mcs_txpwrlevel_origoffset[count][7]);
}
if (regaddr == RTXAGC_A_MCS03_MCS00) {
rtlphy->mcs_txpwrlevel_origoffset[count][2] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][2] = 0x%x\n",
- count,
- rtlphy->mcs_txpwrlevel_origoffset[count][2]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][2] = 0x%x\n",
+ count,
+ rtlphy->mcs_txpwrlevel_origoffset[count][2]);
}
if (regaddr == RTXAGC_A_MCS07_MCS04) {
rtlphy->mcs_txpwrlevel_origoffset[count][3] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][3] = 0x%x\n",
- count,
- rtlphy->mcs_txpwrlevel_origoffset[count][3]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][3] = 0x%x\n",
+ count,
+ rtlphy->mcs_txpwrlevel_origoffset[count][3]);
}
if (regaddr == RTXAGC_A_MCS11_MCS08) {
rtlphy->mcs_txpwrlevel_origoffset[count][4] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][4] = 0x%x\n",
- count,
- rtlphy->mcs_txpwrlevel_origoffset[count][4]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][4] = 0x%x\n",
+ count,
+ rtlphy->mcs_txpwrlevel_origoffset[count][4]);
}
if (regaddr == RTXAGC_A_MCS15_MCS12) {
rtlphy->mcs_txpwrlevel_origoffset[count][5] = data;
@@ -574,66 +568,66 @@ static void store_pwrindex_rate_offset(struct ieee80211_hw *hw,
count++;
rtlphy->pwrgroup_cnt = count;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][5] = 0x%x\n",
- count,
- rtlphy->mcs_txpwrlevel_origoffset[count][5]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][5] = 0x%x\n",
+ count,
+ rtlphy->mcs_txpwrlevel_origoffset[count][5]);
}
if (regaddr == RTXAGC_B_RATE18_06) {
rtlphy->mcs_txpwrlevel_origoffset[count][8] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][8] = 0x%x\n",
- count,
- rtlphy->mcs_txpwrlevel_origoffset[count][8]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][8] = 0x%x\n",
+ count,
+ rtlphy->mcs_txpwrlevel_origoffset[count][8]);
}
if (regaddr == RTXAGC_B_RATE54_24) {
rtlphy->mcs_txpwrlevel_origoffset[count][9] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][9] = 0x%x\n",
- count,
- rtlphy->mcs_txpwrlevel_origoffset[count][9]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][9] = 0x%x\n",
+ count,
+ rtlphy->mcs_txpwrlevel_origoffset[count][9]);
}
if (regaddr == RTXAGC_B_CCK1_55_MCS32) {
rtlphy->mcs_txpwrlevel_origoffset[count][14] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][14] = 0x%x\n",
- count,
- rtlphy->mcs_txpwrlevel_origoffset[count][14]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][14] = 0x%x\n",
+ count,
+ rtlphy->mcs_txpwrlevel_origoffset[count][14]);
}
if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff) {
rtlphy->mcs_txpwrlevel_origoffset[count][15] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][15] = 0x%x\n",
- count,
- rtlphy->mcs_txpwrlevel_origoffset[count][15]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][15] = 0x%x\n",
+ count,
+ rtlphy->mcs_txpwrlevel_origoffset[count][15]);
}
if (regaddr == RTXAGC_B_MCS03_MCS00) {
rtlphy->mcs_txpwrlevel_origoffset[count][10] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][10] = 0x%x\n",
- count,
- rtlphy->mcs_txpwrlevel_origoffset[count][10]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][10] = 0x%x\n",
+ count,
+ rtlphy->mcs_txpwrlevel_origoffset[count][10]);
}
if (regaddr == RTXAGC_B_MCS07_MCS04) {
rtlphy->mcs_txpwrlevel_origoffset[count][11] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][11] = 0x%x\n",
- count,
- rtlphy->mcs_txpwrlevel_origoffset[count][11]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][11] = 0x%x\n",
+ count,
+ rtlphy->mcs_txpwrlevel_origoffset[count][11]);
}
if (regaddr == RTXAGC_B_MCS11_MCS08) {
rtlphy->mcs_txpwrlevel_origoffset[count][12] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][12] = 0x%x\n",
- count,
- rtlphy->mcs_txpwrlevel_origoffset[count][12]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][12] = 0x%x\n",
+ count,
+ rtlphy->mcs_txpwrlevel_origoffset[count][12]);
}
if (regaddr == RTXAGC_B_MCS15_MCS12) {
rtlphy->mcs_txpwrlevel_origoffset[count][13] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][13] = 0x%x\n",
- count,
- rtlphy->mcs_txpwrlevel_origoffset[count][13]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][13] = 0x%x\n",
+ count,
+ rtlphy->mcs_txpwrlevel_origoffset[count][13]);
if (get_rf_type(rtlphy) != RF_1T1R) {
count++;
rtlphy->pwrgroup_cnt = count;
@@ -696,8 +690,8 @@ static bool phy_config_bb_with_pghdr(struct ieee80211_hw *hw, u8 configtype)
}
}
} else {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "configtype != BaseBand_Config_PHY_REG\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "configtype != BaseBand_Config_PHY_REG\n");
}
return true;
}
@@ -769,9 +763,9 @@ bool rtl88e_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
radioa_arraylen = RTL8188EE_RADIOA_1TARRAYLEN;
radioa_array_table = RTL8188EE_RADIOA_1TARRAY;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Radio_A:RTL8188EE_RADIOA_1TARRAY %d\n", radioa_arraylen);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Radio_A:RTL8188EE_RADIOA_1TARRAY %d\n", radioa_arraylen);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
switch (rfpath) {
case RF90_PATH_A:
process_path_a(hw, radioa_arraylen, radioa_array_table);
@@ -798,21 +792,21 @@ void rtl88e_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
rtlphy->default_initialgain[3] =
(u8)rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
- rtlphy->default_initialgain[0],
- rtlphy->default_initialgain[1],
- rtlphy->default_initialgain[2],
- rtlphy->default_initialgain[3]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
+ rtlphy->default_initialgain[0],
+ rtlphy->default_initialgain[1],
+ rtlphy->default_initialgain[2],
+ rtlphy->default_initialgain[3]);
rtlphy->framesync = (u8)rtl_get_bbreg(hw, ROFDM0_RXDETECTOR3,
MASKBYTE0);
rtlphy->framesync_c34 = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR2,
MASKDWORD);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Default framesync (0x%x) = 0x%x\n",
- ROFDM0_RXDETECTOR3, rtlphy->framesync);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Default framesync (0x%x) = 0x%x\n",
+ ROFDM0_RXDETECTOR3, rtlphy->framesync);
}
static void _rtl88e_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
@@ -1081,10 +1075,10 @@ void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
u8 reg_bw_opmode;
u8 reg_prsr_rsc;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
- "Switch to %s bandwidth\n",
- rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
- "20MHz" : "40MHz");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE,
+ "Switch to %s bandwidth\n",
+ rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
+ "20MHz" : "40MHz");
if (is_hal_stop(rtlhal)) {
rtlphy->set_bwmode_inprogress = false;
@@ -1138,7 +1132,7 @@ void rtl88e_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
}
rtl88e_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
rtlphy->set_bwmode_inprogress = false;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD, "\n");
}
void rtl88e_phy_set_bw_mode(struct ieee80211_hw *hw,
@@ -1155,8 +1149,8 @@ void rtl88e_phy_set_bw_mode(struct ieee80211_hw *hw,
if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
rtl88e_phy_set_bw_mode_callback(hw);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "false driver sleep or unload\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "false driver sleep or unload\n");
rtlphy->set_bwmode_inprogress = false;
rtlphy->current_chan_bw = tmp_bw;
}
@@ -1169,8 +1163,8 @@ void rtl88e_phy_sw_chnl_callback(struct ieee80211_hw *hw)
struct rtl_phy *rtlphy = &rtlpriv->phy;
u32 delay;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
- "switch to channel%d\n", rtlphy->current_channel);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE,
+ "switch to channel%d\n", rtlphy->current_channel);
if (is_hal_stop(rtlhal))
return;
do {
@@ -1188,7 +1182,7 @@ void rtl88e_phy_sw_chnl_callback(struct ieee80211_hw *hw)
}
break;
} while (true);
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
}
u8 rtl88e_phy_sw_chnl(struct ieee80211_hw *hw)
@@ -1208,13 +1202,13 @@ u8 rtl88e_phy_sw_chnl(struct ieee80211_hw *hw)
rtlphy->sw_chnl_step = 0;
if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
rtl88e_phy_sw_chnl_callback(hw);
- RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- "sw_chnl_inprogress false schedule workitem current channel %d\n",
- rtlphy->current_channel);
+ rtl_dbg(rtlpriv, COMP_CHAN, DBG_LOUD,
+ "sw_chnl_inprogress false schedule workitem current channel %d\n",
+ rtlphy->current_channel);
rtlphy->sw_chnl_inprogress = false;
} else {
- RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- "sw_chnl_inprogress false driver sleep or unload\n");
+ rtl_dbg(rtlpriv, COMP_CHAN, DBG_LOUD,
+ "sw_chnl_inprogress false driver sleep or unload\n");
rtlphy->sw_chnl_inprogress = false;
}
return 1;
@@ -1315,9 +1309,9 @@ static bool _rtl88e_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
}
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n",
- currentcmd->cmdid);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n",
+ currentcmd->cmdid);
break;
}
@@ -1581,7 +1575,7 @@ static void _rtl88e_phy_path_adda_on(struct ieee80211_hw *hw,
u32 i;
pathon = is_patha_on ? 0x04db25a4 : 0x0b1b25a4;
- if (false == is2t) {
+ if (!is2t) {
pathon = 0x0bdb25a0;
rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0);
} else {
@@ -1749,8 +1743,8 @@ static void _rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw,
for (i = 0; i < retrycount; i++) {
patha_ok = _rtl88e_phy_path_a_iqk(hw, is2t);
if (patha_ok == 0x01) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Path A Tx IQK Success!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Path A Tx IQK Success!!\n");
result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
0x3FF0000) >> 16;
result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
@@ -1762,22 +1756,22 @@ static void _rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw,
for (i = 0; i < retrycount; i++) {
patha_ok = _rtl88e_phy_path_a_rx_iqk(hw, is2t);
if (patha_ok == 0x03) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Path A Rx IQK Success!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Path A Rx IQK Success!!\n");
result[t][2] = (rtl_get_bbreg(hw, 0xea4, MASKDWORD) &
0x3FF0000) >> 16;
result[t][3] = (rtl_get_bbreg(hw, 0xeac, MASKDWORD) &
0x3FF0000) >> 16;
break;
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Path a RX iqk fail!!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Path a RX iqk fail!!!\n");
}
}
if (0 == patha_ok)
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Path A IQK Success!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Path A IQK Success!!\n");
if (is2t) {
_rtl88e_phy_path_a_standby(hw);
_rtl88e_phy_path_adda_on(hw, adda_reg, false, is2t);
@@ -1828,7 +1822,7 @@ static void _rtl88e_phy_iq_calibrate(struct ieee80211_hw *hw,
rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x01008c00);
rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x01008c00);
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "88ee IQK Finish!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "88ee IQK Finish!!\n");
}
static void _rtl88e_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
@@ -1874,7 +1868,7 @@ static void _rtl88e_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
} else {
rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
}
static void _rtl88e_phy_set_rfpath_switch(struct ieee80211_hw *hw,
@@ -1883,7 +1877,7 @@ static void _rtl88e_phy_set_rfpath_switch(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
if (is_hal_stop(rtlhal)) {
u8 u1btmp;
@@ -2074,24 +2068,24 @@ bool rtl88e_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
struct rtl_phy *rtlphy = &rtlpriv->phy;
bool postprocessing = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
- iotype, rtlphy->set_io_inprogress);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
+ iotype, rtlphy->set_io_inprogress);
do {
switch (iotype) {
case IO_CMD_RESUME_DM_BY_SCAN:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "[IO CMD] Resume DM after scan.\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "[IO CMD] Resume DM after scan.\n");
postprocessing = true;
break;
case IO_CMD_PAUSE_BAND0_DM_BY_SCAN:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "[IO CMD] Pause DM before scan.\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "[IO CMD] Pause DM before scan.\n");
postprocessing = true;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", iotype);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", iotype);
break;
}
} while (false);
@@ -2102,7 +2096,7 @@ bool rtl88e_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
return false;
}
rtl88e_phy_set_io(hw);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "IO Type(%#x)\n", iotype);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, "IO Type(%#x)\n", iotype);
return true;
}
@@ -2112,9 +2106,9 @@ static void rtl88e_phy_set_io(struct ieee80211_hw *hw)
struct rtl_phy *rtlphy = &rtlpriv->phy;
struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "--->Cmd(%#x), set_io_inprogress(%d)\n",
- rtlphy->current_io_type, rtlphy->set_io_inprogress);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "--->Cmd(%#x), set_io_inprogress(%d)\n",
+ rtlphy->current_io_type, rtlphy->set_io_inprogress);
switch (rtlphy->current_io_type) {
case IO_CMD_RESUME_DM_BY_SCAN:
dm_digtable->cur_igvalue = rtlphy->initgain_backup.xaagccore1;
@@ -2128,14 +2122,14 @@ static void rtl88e_phy_set_io(struct ieee80211_hw *hw)
rtl_set_bbreg(hw, RCCK0_CCA, 0xff0000, 0x40);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n",
- rtlphy->current_io_type);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n",
+ rtlphy->current_io_type);
break;
}
rtlphy->set_io_inprogress = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "(%#x)\n", rtlphy->current_io_type);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "(%#x)\n", rtlphy->current_io_type);
}
static void rtl88ee_phy_set_rf_on(struct ieee80211_hw *hw)
@@ -2180,19 +2174,18 @@ static bool _rtl88ee_phy_set_rf_power_state(struct ieee80211_hw *hw,
do {
initializecount++;
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic enable\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic enable\n");
rtstatus = rtl_ps_enable_nic(hw);
} while (!rtstatus &&
(initializecount < 10));
RT_CLEAR_PS_LEVEL(ppsc,
RT_RF_OFF_LEVL_HALT_NIC);
} else {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "Set ERFON sleeped:%d ms\n",
- jiffies_to_msecs(jiffies -
- ppsc->
- last_sleep_jiffies));
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "Set ERFON slept:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_sleep_jiffies));
ppsc->last_awake_jiffies = jiffies;
rtl88ee_phy_set_rf_on(hw);
}
@@ -2213,27 +2206,27 @@ static bool _rtl88ee_phy_set_rf_power_state(struct ieee80211_hw *hw,
queue_id++;
continue;
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
- (i + 1), queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
+ (i + 1), queue_id,
+ skb_queue_len(&ring->queue));
udelay(10);
i++;
}
if (i >= MAX_DOZE_WAITING_TIMES_9x) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "\n ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
- MAX_DOZE_WAITING_TIMES_9x,
- queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "\n ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue));
break;
}
}
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic disable\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic disable\n");
rtl_ps_disable_nic(hw);
RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
} else {
@@ -2256,34 +2249,34 @@ static bool _rtl88ee_phy_set_rf_power_state(struct ieee80211_hw *hw,
queue_id++;
continue;
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
- (i + 1), queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
+ (i + 1), queue_id,
+ skb_queue_len(&ring->queue));
udelay(10);
i++;
}
if (i >= MAX_DOZE_WAITING_TIMES_9x) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "\n ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
- MAX_DOZE_WAITING_TIMES_9x,
- queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "\n ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue));
break;
}
}
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "Set ERFSLEEP awaked:%d ms\n",
- jiffies_to_msecs(jiffies -
- ppsc->last_awake_jiffies));
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "Set ERFSLEEP awaked:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_awake_jiffies));
ppsc->last_sleep_jiffies = jiffies;
_rtl88ee_phy_set_rf_sleep(hw);
break;
}
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", rfpwr_state);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", rfpwr_state);
bresult = false;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c
index c376817a1bf4..24dc7011b7b2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/rf.c
@@ -474,13 +474,13 @@ static bool _rtl88e_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
}
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio[%d] Fail!!\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Radio[%d] Fail!!\n", rfpath);
return false;
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "\n");
return rtstatus;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
index a5d2d6ece8db..b9775eec4c54 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c
@@ -410,9 +410,9 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
else
wake_match = 0;
if (wake_match)
- RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD,
- "GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n",
- wake_match);
+ rtl_dbg(rtlpriv, COMP_RXDESC, DBG_LOUD,
+ "GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n",
+ wake_match);
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->band = hw->conf.chandef.chan->band;
@@ -515,11 +515,11 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw,
memset(skb->data, 0, EM_HDR_LEN);
}
buf_len = skb->len;
- mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error\n");
+ mapping = dma_map_single(&rtlpci->pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error\n");
return;
}
clear_pci_tx_desc_content(pdesc, sizeof(struct tx_desc_88e));
@@ -533,9 +533,9 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN +
EM_HDR_LEN);
if (ptcb_desc->empkt_num) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "Insert 8 byte.pTcb->EMPktNum:%d\n",
- ptcb_desc->empkt_num);
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "Insert 8 byte.pTcb->EMPktNum:%d\n",
+ ptcb_desc->empkt_num);
rtl88ee_insert_emcontent(ptcb_desc,
(__le32 *)(skb->data));
}
@@ -631,7 +631,7 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw,
}
if (ieee80211_is_data_qos(fc)) {
if (mac->rdg_en) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
"Enable RDG function.\n");
set_tx_desc_rdg_enable(pdesc, 1);
set_tx_desc_htc(pdesc, 1);
@@ -662,7 +662,7 @@ void rtl88ee_tx_fill_desc(struct ieee80211_hw *hw,
}
rtl88e_dm_set_tx_ant_by_tx_info(hw, pdesc8, ptcb_desc->mac_id);
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
}
void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw,
@@ -674,16 +674,15 @@ void rtl88ee_tx_fill_cmddesc(struct ieee80211_hw *hw,
u8 fw_queue = QSLT_BEACON;
__le32 *pdesc = (__le32 *)pdesc8;
- dma_addr_t mapping = pci_map_single(rtlpci->pdev,
- skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
__le16 fc = hdr->frame_control;
- if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error\n");
+ if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error\n");
return;
}
clear_pci_tx_desc_content(pdesc, TX_DESC_SIZE);
@@ -733,7 +732,7 @@ void rtl88ee_set_desc(struct ieee80211_hw *hw, u8 *pdesc8,
{
__le32 *pdesc = (__le32 *)pdesc8;
- if (istx == true) {
+ if (istx) {
switch (desc_name) {
case HW_DESC_OWN:
set_tx_desc_own(pdesc, 1);
@@ -774,7 +773,7 @@ u64 rtl88ee_get_desc(struct ieee80211_hw *hw,
u32 ret = 0;
__le32 *pdesc = (__le32 *)pdesc8;
- if (istx == true) {
+ if (istx) {
switch (desc_name) {
case HW_DESC_OWN:
ret = get_tx_desc_own(pdesc);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
index 06fc9b5cdd8f..265a1a336304 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
@@ -242,16 +242,16 @@ static void rtl92c_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 0);
rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "cnt_parity_fail = %d, cnt_rate_illegal = %d, cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
- falsealm_cnt->cnt_parity_fail,
- falsealm_cnt->cnt_rate_illegal,
- falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail);
-
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
- falsealm_cnt->cnt_ofdm_fail,
- falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "cnt_parity_fail = %d, cnt_rate_illegal = %d, cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
+ falsealm_cnt->cnt_parity_fail,
+ falsealm_cnt->cnt_rate_illegal,
+ falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail);
+
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
+ falsealm_cnt->cnt_ofdm_fail,
+ falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all);
}
static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw)
@@ -408,10 +408,10 @@ static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
rtl92c_dm_write_dig(hw);
}
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "curmultista_cstate = %x dig_ext_port_stage %x\n",
- dm_digtable->curmultista_cstate,
- dm_digtable->dig_ext_port_stage);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "curmultista_cstate = %x dig_ext_port_stage %x\n",
+ dm_digtable->curmultista_cstate,
+ dm_digtable->dig_ext_port_stage);
}
static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
@@ -419,9 +419,9 @@ static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "presta_cstate = %x, cursta_cstate = %x\n",
- dm_digtable->presta_cstate, dm_digtable->cursta_cstate);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "presta_cstate = %x, cursta_cstate = %x\n",
+ dm_digtable->presta_cstate, dm_digtable->cursta_cstate);
if (dm_digtable->presta_cstate == dm_digtable->cursta_cstate ||
dm_digtable->cursta_cstate == DIG_STA_BEFORE_CONNECT ||
dm_digtable->cursta_cstate == DIG_STA_CONNECT) {
@@ -537,10 +537,10 @@ void rtl92c_dm_write_dig(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "cur_igvalue = 0x%x, pre_igvalue = 0x%x, back_val = %d\n",
- dm_digtable->cur_igvalue, dm_digtable->pre_igvalue,
- dm_digtable->back_val);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "cur_igvalue = 0x%x, pre_igvalue = 0x%x, back_val = %d\n",
+ dm_digtable->cur_igvalue, dm_digtable->pre_igvalue,
+ dm_digtable->back_val);
if (rtlpriv->rtlhal.interface == INTF_USB &&
!dm_digtable->dig_enable_flag) {
@@ -559,12 +559,12 @@ void rtl92c_dm_write_dig(struct ieee80211_hw *hw)
dm_digtable->pre_igvalue = dm_digtable->cur_igvalue;
}
- RT_TRACE(rtlpriv, COMP_DIG, DBG_WARNING,
- "dig values 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
- dm_digtable->cur_igvalue, dm_digtable->pre_igvalue,
- dm_digtable->rssi_val_min, dm_digtable->back_val,
- dm_digtable->rx_gain_max, dm_digtable->rx_gain_min,
- dm_digtable->large_fa_hit, dm_digtable->forbidden_igi);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_WARNING,
+ "dig values 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ dm_digtable->cur_igvalue, dm_digtable->pre_igvalue,
+ dm_digtable->rssi_val_min, dm_digtable->back_val,
+ dm_digtable->rx_gain_max, dm_digtable->rx_gain_min,
+ dm_digtable->large_fa_hit, dm_digtable->forbidden_igi);
}
EXPORT_SYMBOL(rtl92c_dm_write_dig);
@@ -713,15 +713,15 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
u8 ofdm_min_index = 6, rf;
rtlpriv->dm.txpower_trackinginit = true;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "rtl92c_dm_txpower_tracking_callback_thermalmeter\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "%s\n", __func__);
thermalvalue = (u8) rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0x1f);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x\n",
- thermalvalue, rtlpriv->dm.thermalvalue,
- rtlefuse->eeprom_thermalmeter);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x\n",
+ thermalvalue, rtlpriv->dm.thermalvalue,
+ rtlefuse->eeprom_thermalmeter);
rtl92c_phy_ap_calibrate(hw, (thermalvalue -
rtlefuse->eeprom_thermalmeter));
@@ -738,10 +738,10 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
ofdm_index_old[0] = (u8) i;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index=0x%x\n",
- ROFDM0_XATXIQIMBALANCE,
- ele_d, ofdm_index_old[0]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index=0x%x\n",
+ ROFDM0_XATXIQIMBALANCE,
+ ele_d, ofdm_index_old[0]);
break;
}
}
@@ -754,11 +754,11 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
if (ele_d == (ofdmswing_table[i] &
MASKOFDM_D)) {
ofdm_index_old[1] = (u8) i;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
- DBG_LOUD,
- "Initial pathB ele_d reg0x%x = 0x%lx, ofdm_index=0x%x\n",
- ROFDM0_XBTXIQIMBALANCE, ele_d,
- ofdm_index_old[1]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING,
+ DBG_LOUD,
+ "Initial pathB ele_d reg0x%x = 0x%lx, ofdm_index=0x%x\n",
+ ROFDM0_XBTXIQIMBALANCE, ele_d,
+ ofdm_index_old[1]);
break;
}
}
@@ -774,12 +774,12 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
4) == 0) {
cck_index_old = (u8) i;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
- DBG_LOUD,
- "Initial reg0x%x = 0x%lx, cck_index=0x%x, ch 14 %d\n",
- RCCK0_TXFILTER2, temp_cck,
- cck_index_old,
- rtlpriv->dm.cck_inch14);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING,
+ DBG_LOUD,
+ "Initial reg0x%x = 0x%lx, cck_index=0x%x, ch 14 %d\n",
+ RCCK0_TXFILTER2, temp_cck,
+ cck_index_old,
+ rtlpriv->dm.cck_inch14);
break;
}
} else {
@@ -789,12 +789,12 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
4) == 0) {
cck_index_old = (u8) i;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
- DBG_LOUD,
- "Initial reg0x%x = 0x%lx, cck_index=0x%x, ch14 %d\n",
- RCCK0_TXFILTER2, temp_cck,
- cck_index_old,
- rtlpriv->dm.cck_inch14);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING,
+ DBG_LOUD,
+ "Initial reg0x%x = 0x%lx, cck_index=0x%x, ch14 %d\n",
+ RCCK0_TXFILTER2, temp_cck,
+ cck_index_old,
+ rtlpriv->dm.cck_inch14);
break;
}
}
@@ -823,11 +823,11 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
(thermalvalue - rtlpriv->dm.thermalvalue_iqk) :
(rtlpriv->dm.thermalvalue_iqk - thermalvalue);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x delta 0x%x delta_lck 0x%x delta_iqk 0x%x\n",
- thermalvalue, rtlpriv->dm.thermalvalue,
- rtlefuse->eeprom_thermalmeter, delta, delta_lck,
- delta_iqk);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x delta 0x%x delta_lck 0x%x delta_iqk 0x%x\n",
+ thermalvalue, rtlpriv->dm.thermalvalue,
+ rtlefuse->eeprom_thermalmeter, delta, delta_lck,
+ delta_iqk);
if (delta_lck > 1) {
rtlpriv->dm.thermalvalue_lck = thermalvalue;
@@ -846,16 +846,16 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
}
if (is2t) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "temp OFDM_A_index=0x%x, OFDM_B_index=0x%x, cck_index=0x%x\n",
- rtlpriv->dm.ofdm_index[0],
- rtlpriv->dm.ofdm_index[1],
- rtlpriv->dm.cck_index);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "temp OFDM_A_index=0x%x, OFDM_B_index=0x%x, cck_index=0x%x\n",
+ rtlpriv->dm.ofdm_index[0],
+ rtlpriv->dm.ofdm_index[1],
+ rtlpriv->dm.cck_index);
} else {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "temp OFDM_A_index=0x%x, cck_index=0x%x\n",
- rtlpriv->dm.ofdm_index[0],
- rtlpriv->dm.cck_index);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "temp OFDM_A_index=0x%x, cck_index=0x%x\n",
+ rtlpriv->dm.ofdm_index[0],
+ rtlpriv->dm.cck_index);
}
if (thermalvalue > rtlefuse->eeprom_thermalmeter) {
@@ -946,14 +946,14 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
cck_index = 0;
if (is2t) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "new OFDM_A_index=0x%x, OFDM_B_index=0x%x, cck_index=0x%x\n",
- ofdm_index[0], ofdm_index[1],
- cck_index);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "new OFDM_A_index=0x%x, OFDM_B_index=0x%x, cck_index=0x%x\n",
+ ofdm_index[0], ofdm_index[1],
+ cck_index);
} else {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "new OFDM_A_index=0x%x, cck_index=0x%x\n",
- ofdm_index[0], cck_index);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "new OFDM_A_index=0x%x, cck_index=0x%x\n",
+ ofdm_index[0], cck_index);
}
}
@@ -1111,7 +1111,7 @@ static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
rtlpriv->dm.thermalvalue = thermalvalue;
}
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "<===\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "<===\n");
}
@@ -1123,9 +1123,9 @@ static void rtl92c_dm_initialize_txpower_tracking_thermalmeter(
rtlpriv->dm.txpower_tracking = true;
rtlpriv->dm.txpower_trackinginit = false;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "pMgntInfo->txpower_tracking = %d\n",
- rtlpriv->dm.txpower_tracking);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "pMgntInfo->txpower_tracking = %d\n",
+ rtlpriv->dm.txpower_tracking);
}
static void rtl92c_dm_initialize_txpower_tracking(struct ieee80211_hw *hw)
@@ -1149,13 +1149,13 @@ static void rtl92c_dm_check_txpower_tracking_thermal_meter(
if (!rtlpriv->dm.tm_trigger) {
rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, RFREG_OFFSET_MASK,
0x60);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Trigger 92S Thermal Meter!!\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Trigger 92S Thermal Meter!!\n");
rtlpriv->dm.tm_trigger = 1;
return;
} else {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Schedule TxPowerTracking direct call!!\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Schedule TxPowerTracking direct call!!\n");
rtl92c_dm_txpower_tracking_directcall(hw);
rtlpriv->dm.tm_trigger = 0;
}
@@ -1276,29 +1276,29 @@ static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw)
if (((mac->link_state == MAC80211_NOLINK)) &&
(rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
dm_pstable->rssi_val_min = 0;
- RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, "Not connected to any\n");
+ rtl_dbg(rtlpriv, DBG_LOUD, DBG_LOUD, "Not connected to any\n");
}
if (mac->link_state == MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
dm_pstable->rssi_val_min =
rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
- "AP Client PWDB = 0x%lx\n",
- dm_pstable->rssi_val_min);
+ rtl_dbg(rtlpriv, DBG_LOUD, DBG_LOUD,
+ "AP Client PWDB = 0x%lx\n",
+ dm_pstable->rssi_val_min);
} else {
dm_pstable->rssi_val_min = rtlpriv->dm.undec_sm_pwdb;
- RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
- "STA Default Port PWDB = 0x%lx\n",
- dm_pstable->rssi_val_min);
+ rtl_dbg(rtlpriv, DBG_LOUD, DBG_LOUD,
+ "STA Default Port PWDB = 0x%lx\n",
+ dm_pstable->rssi_val_min);
}
} else {
dm_pstable->rssi_val_min =
rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
- "AP Ext Port PWDB = 0x%lx\n",
- dm_pstable->rssi_val_min);
+ rtl_dbg(rtlpriv, DBG_LOUD, DBG_LOUD,
+ "AP Ext Port PWDB = 0x%lx\n",
+ dm_pstable->rssi_val_min);
}
/* Power Saving for 92C */
@@ -1350,8 +1350,8 @@ void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
if ((mac->link_state < MAC80211_LINKED) &&
(rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "Not connected to any\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "Not connected to any\n");
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
@@ -1362,42 +1362,42 @@ void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
if (mac->link_state >= MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "AP Client PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "AP Client PWDB = 0x%lx\n",
+ undec_sm_pwdb);
} else {
undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "STA Default Port PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "STA Default Port PWDB = 0x%lx\n",
+ undec_sm_pwdb);
}
} else {
undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "AP Ext Port PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "AP Ext Port PWDB = 0x%lx\n",
+ undec_sm_pwdb);
}
if (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL2;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
} else if ((undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
(undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
} else if (undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "TXHIGHPWRLEVEL_NORMAL\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "TXHIGHPWRLEVEL_NORMAL\n");
}
if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "PHY_SetTxPowerLevel8192S() Channel = %d\n",
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "PHY_SetTxPowerLevel8192S() Channel = %d\n",
rtlphy->current_channel);
rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
if (rtlpriv->dm.dynamic_txhighpower_lvl ==
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
index 86b1b88cc4ed..b618f07f29b0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
@@ -54,7 +54,7 @@ static void _rtl92c_write_fw(struct ieee80211_hw *hw,
bool is_version_b;
u8 *bufferptr = (u8 *)buffer;
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size);
is_version_b = IS_NORMAL_CHIP(version);
if (is_version_b) {
u32 pagenums, remainsize;
@@ -143,10 +143,10 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
pfwdata = (u8 *)rtlhal->pfirmware;
fwsize = rtlhal->fwsize;
if (IS_FW_HEADER_EXIST(pfwheader)) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- "Firmware Version(%d), Signature(%#x),Size(%d)\n",
- pfwheader->version, pfwheader->signature,
- (int)sizeof(struct rtlwifi_firmware_header));
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "Firmware Version(%d), Signature(%#x),Size(%d)\n",
+ pfwheader->version, pfwheader->signature,
+ (int)sizeof(struct rtlwifi_firmware_header));
rtlhal->fw_version = le16_to_cpu(pfwheader->version);
rtlhal->fw_subversion = pfwheader->subversion;
@@ -198,21 +198,21 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
unsigned long flag;
u8 idx;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
while (true) {
spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
if (rtlhal->h2c_setinprogress) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "H2C set in progress! Wait to set..element_id(%d).\n",
- element_id);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "H2C set in progress! Wait to set..element_id(%d).\n",
+ element_id);
while (rtlhal->h2c_setinprogress) {
spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
flag);
h2c_waitcounter++;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Wait 100 us (%d times)...\n",
- h2c_waitcounter);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Wait 100 us (%d times)...\n",
+ h2c_waitcounter);
udelay(100);
if (h2c_waitcounter > 1000)
@@ -254,8 +254,8 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
box_extreg = REG_HMEBOX_EXT_3;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", boxnum);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", boxnum);
break;
}
@@ -263,9 +263,9 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
while (!isfw_read) {
wait_h2c_limmit--;
if (wait_h2c_limmit == 0) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Waiting too long for FW read clear HMEBox(%d)!\n",
- boxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Waiting too long for FW read clear HMEBox(%d)!\n",
+ boxnum);
break;
}
@@ -273,24 +273,24 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
isfw_read = _rtl92c_check_fw_read_last_h2c(hw, boxnum);
u1b_tmp = rtl_read_byte(rtlpriv, 0x1BF);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Waiting for FW read clear HMEBox(%d)!!! 0x1BF = %2x\n",
- boxnum, u1b_tmp);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Waiting for FW read clear HMEBox(%d)!!! 0x1BF = %2x\n",
+ boxnum, u1b_tmp);
}
if (!isfw_read) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n",
- boxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n",
+ boxnum);
break;
}
memset(boxcontent, 0, sizeof(boxcontent));
memset(boxextcontent, 0, sizeof(boxextcontent));
boxcontent[0] = element_id;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Write element_id box_reg(%4x) = %2x\n",
- box_reg, element_id);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Write element_id box_reg(%4x) = %2x\n",
+ box_reg, element_id);
switch (cmd_len) {
case 1:
@@ -358,8 +358,8 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
}
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", cmd_len);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", cmd_len);
break;
}
@@ -369,16 +369,16 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
if (rtlhal->last_hmeboxnum == 4)
rtlhal->last_hmeboxnum = 0;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "pHalData->last_hmeboxnum = %d\n",
- rtlhal->last_hmeboxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "pHalData->last_hmeboxnum = %d\n",
+ rtlhal->last_hmeboxnum);
}
spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
rtlhal->h2c_setinprogress = false;
spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
}
void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
@@ -428,7 +428,7 @@ void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
u8 u1_h2c_set_pwrmode[3] = { 0 };
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, mode);
SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode,
@@ -636,16 +636,16 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
b_dlok = true;
if (b_dlok) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Set RSVD page location to Fw.\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Set RSVD page location to Fw.\n");
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
"H2C_RSVDPAGE:\n",
u1rsvdpageloc, 3);
rtl92c_fill_h2c_cmd(hw, H2C_RSVDPAGE,
sizeof(u1rsvdpageloc), u1rsvdpageloc);
} else
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Set RSVD page location to Fw FAIL!!!!!!.\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set RSVD page location to Fw FAIL!!!!!!.\n");
}
EXPORT_SYMBOL(rtl92c_set_fw_rsvdpagepkt);
@@ -717,13 +717,13 @@ void rtl92c_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
switch (p2p_ps_state) {
case P2P_PS_DISABLE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
- "P2P_PS_DISABLE\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD,
+ "P2P_PS_DISABLE\n");
memset(p2p_ps_offload, 0, sizeof(*p2p_ps_offload));
break;
case P2P_PS_ENABLE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
- "P2P_PS_ENABLE\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD,
+ "P2P_PS_ENABLE\n");
/* update CTWindow value. */
if (p2pinfo->ctwindow > 0) {
p2p_ps_offload->ctwindow_en = 1;
@@ -751,12 +751,12 @@ void rtl92c_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
}
break;
case P2P_PS_SCAN:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN\n");
p2p_ps_offload->discovery = 1;
break;
case P2P_PS_SCAN_DONE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
- "P2P_PS_SCAN_DONE\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD,
+ "P2P_PS_SCAN_DONE\n");
p2p_ps_offload->discovery = 0;
p2pinfo->p2p_ps_state = P2P_PS_ENABLE;
break;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
index 661249d618c0..3d29c8dbb255 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/phy_common.c
@@ -14,15 +14,15 @@ u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 returnvalue, originalvalue, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "regaddr(%#x), bitmask(%#x)\n",
- regaddr, bitmask);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "regaddr(%#x), bitmask(%#x)\n",
+ regaddr, bitmask);
originalvalue = rtl_read_dword(rtlpriv, regaddr);
bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
returnvalue = (originalvalue & bitmask) >> bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "BBR MASK=0x%x Addr[0x%x]=0x%x\n",
- bitmask, regaddr, originalvalue);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "BBR MASK=0x%x Addr[0x%x]=0x%x\n",
+ bitmask, regaddr, originalvalue);
return returnvalue;
}
@@ -34,9 +34,9 @@ void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 originalvalue, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x)\n",
- regaddr, bitmask, data);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
if (bitmask != MASKDWORD) {
originalvalue = rtl_read_dword(rtlpriv, regaddr);
@@ -46,9 +46,9 @@ void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
rtl_write_dword(rtlpriv, regaddr, data);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x)\n",
- regaddr, bitmask, data);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
}
EXPORT_SYMBOL(rtl92c_phy_set_bb_reg);
@@ -112,9 +112,9 @@ u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
else
retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
BLSSIREADBACKDATA);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x]=0x%x\n",
- rfpath, pphyreg->rf_rb,
- retvalue);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rf_rb,
+ retvalue);
return retvalue;
}
EXPORT_SYMBOL(_rtl92c_phy_rf_serial_read);
@@ -137,21 +137,17 @@ void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
newoffset = offset;
data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFW-%d Addr[0x%x]=0x%x\n",
- rfpath, pphyreg->rf3wire_offset,
- data_and_addr);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "RFW-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rf3wire_offset,
+ data_and_addr);
}
EXPORT_SYMBOL(_rtl92c_phy_rf_serial_write);
u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask)
{
- u32 i;
+ u32 i = ffs(bitmask);
- for (i = 0; i <= 31; i++) {
- if (((bitmask >> i) & 0x1) == 1)
- break;
- }
- return i;
+ return i ? i - 1 : 32;
}
EXPORT_SYMBOL(_rtl92c_phy_calculate_bit_shift);
@@ -192,7 +188,7 @@ bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw)
}
if (rtlphy->rf_type == RF_1T2R) {
_rtl92c_phy_bb_config_1t(hw);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Config to 1T!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Config to 1T!!\n");
}
if (rtlefuse->autoload_failflag == false) {
rtlphy->pwrgroup_cnt = 0;
@@ -227,145 +223,145 @@ void _rtl92c_store_pwrindex_diffrate_offset(struct ieee80211_hw *hw,
if (regaddr == RTXAGC_A_RATE18_06) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][0] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][0] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][0]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][0] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][0]);
}
if (regaddr == RTXAGC_A_RATE54_24) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][1] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][1] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][1] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][1]);
}
if (regaddr == RTXAGC_A_CCK1_MCS32) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][6] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][6] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][6]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][6] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][6]);
}
if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0xffffff00) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][7] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][7] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][7] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
pwrgroup_cnt][7]);
}
if (regaddr == RTXAGC_A_MCS03_MCS00) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][2] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][2] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][2] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
pwrgroup_cnt][2]);
}
if (regaddr == RTXAGC_A_MCS07_MCS04) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][3] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][3] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][3] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
pwrgroup_cnt][3]);
}
if (regaddr == RTXAGC_A_MCS11_MCS08) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][4] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][4] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][4] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
pwrgroup_cnt][4]);
}
if (regaddr == RTXAGC_A_MCS15_MCS12) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][5] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][5] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][5] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
pwrgroup_cnt][5]);
}
if (regaddr == RTXAGC_B_RATE18_06) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][8] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][8] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][8] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
pwrgroup_cnt][8]);
}
if (regaddr == RTXAGC_B_RATE54_24) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][9] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][9] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][9] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
pwrgroup_cnt][9]);
}
if (regaddr == RTXAGC_B_CCK1_55_MCS32) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][14] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][14] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][14] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
pwrgroup_cnt][14]);
}
if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][15] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][15] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][15] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
pwrgroup_cnt][15]);
}
if (regaddr == RTXAGC_B_MCS03_MCS00) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][10] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][10] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][10] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
pwrgroup_cnt][10]);
}
if (regaddr == RTXAGC_B_MCS07_MCS04) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][11] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][11] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][11] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
pwrgroup_cnt][11]);
}
if (regaddr == RTXAGC_B_MCS11_MCS08) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][12] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][12] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][12] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
pwrgroup_cnt][12]);
}
if (regaddr == RTXAGC_B_MCS15_MCS12) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][13] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][13] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][13] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
pwrgroup_cnt][13]);
rtlphy->pwrgroup_cnt++;
@@ -387,21 +383,21 @@ void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
rtlphy->default_initialgain[3] =
(u8)rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
- rtlphy->default_initialgain[0],
- rtlphy->default_initialgain[1],
- rtlphy->default_initialgain[2],
- rtlphy->default_initialgain[3]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
+ rtlphy->default_initialgain[0],
+ rtlphy->default_initialgain[1],
+ rtlphy->default_initialgain[2],
+ rtlphy->default_initialgain[3]);
rtlphy->framesync = (u8)rtl_get_bbreg(hw,
ROFDM0_RXDETECTOR3, MASKBYTE0);
rtlphy->framesync_c34 = rtl_get_bbreg(hw,
ROFDM0_RXDETECTOR2, MASKDWORD);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Default framesync (0x%x) = 0x%x\n",
- ROFDM0_RXDETECTOR3, rtlphy->framesync);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Default framesync (0x%x) = 0x%x\n",
+ ROFDM0_RXDETECTOR3, rtlphy->framesync);
}
void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
@@ -588,9 +584,9 @@ bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw, long power_indbm)
ofdmtxpwridx -= rtlefuse->legacy_ht_txpowerdiff;
else
ofdmtxpwridx = 0;
- RT_TRACE(rtlpriv, COMP_TXAGC, DBG_TRACE,
- "%lx dBm, ccktxpwridx = %d, ofdmtxpwridx = %d\n",
- power_indbm, ccktxpwridx, ofdmtxpwridx);
+ rtl_dbg(rtlpriv, COMP_TXAGC, DBG_TRACE,
+ "%lx dBm, ccktxpwridx = %d, ofdmtxpwridx = %d\n",
+ power_indbm, ccktxpwridx, ofdmtxpwridx);
for (idx = 0; idx < 14; idx++) {
for (rf_path = 0; rf_path < 2; rf_path++) {
rtlefuse->txpwrlevel_cck[rf_path][idx] = ccktxpwridx;
@@ -675,8 +671,8 @@ void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
rtlpriv->cfg->ops->phy_set_bw_mode_callback(hw);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "false driver sleep or unload\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "false driver sleep or unload\n");
rtlphy->set_bwmode_inprogress = false;
rtlphy->current_chan_bw = tmp_bw;
}
@@ -690,8 +686,8 @@ void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw)
struct rtl_phy *rtlphy = &(rtlpriv->phy);
u32 delay;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
- "switch to channel%d\n", rtlphy->current_channel);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE,
+ "switch to channel%d\n", rtlphy->current_channel);
if (is_hal_stop(rtlhal))
return;
do {
@@ -709,7 +705,7 @@ void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw)
}
break;
} while (true);
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
}
EXPORT_SYMBOL(rtl92c_phy_sw_chnl_callback);
@@ -730,12 +726,12 @@ u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw)
rtlphy->sw_chnl_step = 0;
if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
rtl92c_phy_sw_chnl_callback(hw);
- RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- "sw_chnl_inprogress false schedule workitem\n");
+ rtl_dbg(rtlpriv, COMP_CHAN, DBG_LOUD,
+ "sw_chnl_inprogress false schedule workitem\n");
rtlphy->sw_chnl_inprogress = false;
} else {
- RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- "sw_chnl_inprogress false driver sleep or unload\n");
+ rtl_dbg(rtlpriv, COMP_CHAN, DBG_LOUD,
+ "sw_chnl_inprogress false driver sleep or unload\n");
rtlphy->sw_chnl_inprogress = false;
}
return 1;
@@ -884,9 +880,9 @@ bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
_rtl92c_phy_sw_rf_seting(hw, channel);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n",
- currentcmd->cmdid);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n",
+ currentcmd->cmdid);
break;
}
@@ -1103,7 +1099,7 @@ static void _rtl92c_phy_path_adda_on(struct ieee80211_hw *hw,
u32 i;
pathon = is_patha_on ? 0x04db25a4 : 0x0b1b25a4;
- if (false == is2t) {
+ if (!is2t) {
pathon = 0x0bdb25a0;
rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0);
} else {
@@ -1220,10 +1216,9 @@ static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw,
0x522, 0x550, 0x551, 0x040
};
const u32 retrycount = 2;
- u32 bbvalue;
if (t == 0) {
- bbvalue = rtl_get_bbreg(hw, 0x800, MASKDWORD);
+ rtl_get_bbreg(hw, 0x800, MASKDWORD);
_rtl92c_phy_save_adda_registers(hw, adda_reg,
rtlphy->adda_backup, 16);
@@ -1522,24 +1517,24 @@ bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
struct rtl_phy *rtlphy = &(rtlpriv->phy);
bool postprocessing = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
- iotype, rtlphy->set_io_inprogress);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
+ iotype, rtlphy->set_io_inprogress);
do {
switch (iotype) {
case IO_CMD_RESUME_DM_BY_SCAN:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "[IO CMD] Resume DM after scan.\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "[IO CMD] Resume DM after scan.\n");
postprocessing = true;
break;
case IO_CMD_PAUSE_BAND0_DM_BY_SCAN:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "[IO CMD] Pause DM before scan.\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "[IO CMD] Pause DM before scan.\n");
postprocessing = true;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", iotype);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", iotype);
break;
}
} while (false);
@@ -1550,7 +1545,7 @@ bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
return false;
}
rtl92c_phy_set_io(hw);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "IO Type(%#x)\n", iotype);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, "IO Type(%#x)\n", iotype);
return true;
}
EXPORT_SYMBOL(rtl92c_phy_set_io_cmd);
@@ -1561,9 +1556,9 @@ void rtl92c_phy_set_io(struct ieee80211_hw *hw)
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "--->Cmd(%#x), set_io_inprogress(%d)\n",
- rtlphy->current_io_type, rtlphy->set_io_inprogress);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "--->Cmd(%#x), set_io_inprogress(%d)\n",
+ rtlphy->current_io_type, rtlphy->set_io_inprogress);
switch (rtlphy->current_io_type) {
case IO_CMD_RESUME_DM_BY_SCAN:
dm_digtable->cur_igvalue = rtlphy->initgain_backup.xaagccore1;
@@ -1576,14 +1571,14 @@ void rtl92c_phy_set_io(struct ieee80211_hw *hw)
rtl92c_dm_write_dig(hw);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n",
- rtlphy->current_io_type);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n",
+ rtlphy->current_io_type);
break;
}
rtlphy->set_io_inprogress = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "(%#x)\n", rtlphy->current_io_type);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "(%#x)\n", rtlphy->current_io_type);
}
EXPORT_SYMBOL(rtl92c_phy_set_io);
@@ -1622,8 +1617,8 @@ void _rtl92c_phy_set_rf_sleep(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "Switch RF timeout !!!.\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "Switch RF timeout !!!.\n");
return;
}
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c
index a3e2c8a60967..34a730a0d81e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/dm.c
@@ -28,8 +28,8 @@ void rtl92ce_dm_dynamic_txpower(struct ieee80211_hw *hw)
if ((mac->link_state < MAC80211_LINKED) &&
(rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "Not connected to any\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "Not connected to any\n");
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
@@ -40,43 +40,43 @@ void rtl92ce_dm_dynamic_txpower(struct ieee80211_hw *hw)
if (mac->link_state >= MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "AP Client PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "AP Client PWDB = 0x%lx\n",
+ undec_sm_pwdb);
} else {
undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "STA Default Port PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "STA Default Port PWDB = 0x%lx\n",
+ undec_sm_pwdb);
}
} else {
undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "AP Ext Port PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "AP Ext Port PWDB = 0x%lx\n",
+ undec_sm_pwdb);
}
if (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
} else if ((undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
(undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
} else if (undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "TXHIGHPWRLEVEL_NORMAL\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "TXHIGHPWRLEVEL_NORMAL\n");
}
if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "PHY_SetTxPowerLevel8192S() Channel = %d\n",
- rtlphy->current_channel);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "PHY_SetTxPowerLevel8192S() Channel = %d\n",
+ rtlphy->current_channel);
rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
index 6402a9e09be7..bb5a0c4aec93 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/hw.c
@@ -183,8 +183,8 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_SLOT_TIME:{
u8 e_aci;
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "HW_VAR_SLOT_TIME %x\n", val[0]);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "HW_VAR_SLOT_TIME %x\n", val[0]);
rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
@@ -223,9 +223,9 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
*val = min_spacing_to_set;
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
- mac->min_space_cfg);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
mac->min_space_cfg);
@@ -238,9 +238,9 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
density_to_set = *val;
mac->min_space_cfg |= (density_to_set << 3);
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
- mac->min_space_cfg);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
mac->min_space_cfg);
@@ -287,9 +287,9 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_AMPDU_FACTOR: %#x\n",
- factor_toset);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_FACTOR: %#x\n",
+ factor_toset);
}
break;
}
@@ -326,9 +326,9 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
acm_ctrl |= ACMHW_VOQEN;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
- acm);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
+ acm);
break;
}
} else {
@@ -349,9 +349,9 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
}
- RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
- "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
- acm_ctrl);
+ rtl_dbg(rtlpriv, COMP_QOS, DBG_TRACE,
+ "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
+ acm_ctrl);
rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
break;
}
@@ -613,22 +613,22 @@ static bool _rtl92ce_llt_table_init(struct ieee80211_hw *hw)
for (i = 0; i < (txpktbuf_bndy - 1); i++) {
status = _rtl92ce_llt_write(hw, i, i + 1);
- if (true != status)
+ if (!status)
return status;
}
status = _rtl92ce_llt_write(hw, (txpktbuf_bndy - 1), 0xFF);
- if (true != status)
+ if (!status)
return status;
for (i = txpktbuf_bndy; i < maxpage; i++) {
status = _rtl92ce_llt_write(hw, i, (i + 1));
- if (true != status)
+ if (!status)
return status;
}
status = _rtl92ce_llt_write(hw, maxpage, txpktbuf_bndy);
- if (true != status)
+ if (!status)
return status;
return true;
@@ -690,15 +690,15 @@ static bool _rtl92ce_init_mac(struct ieee80211_hw *hw)
udelay(2);
retry = 0;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "reg0xec:%x:%x\n",
- rtl_read_dword(rtlpriv, 0xEC), bytetmp);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "reg0xec:%x:%x\n",
+ rtl_read_dword(rtlpriv, 0xEC), bytetmp);
while ((bytetmp & BIT(0)) && retry < 1000) {
retry++;
udelay(50);
bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "reg0xec:%x:%x\n",
- rtl_read_dword(rtlpriv, 0xEC), bytetmp);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "reg0xec:%x:%x\n",
+ rtl_read_dword(rtlpriv, 0xEC), bytetmp);
udelay(50);
}
@@ -880,14 +880,14 @@ void rtl92ce_enable_hw_security_config(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 sec_reg_value;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
- rtlpriv->sec.pairwise_enc_algorithm,
- rtlpriv->sec.group_enc_algorithm);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
+ rtlpriv->sec.pairwise_enc_algorithm,
+ rtlpriv->sec.group_enc_algorithm);
if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "not open hw encryption\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "not open hw encryption\n");
return;
}
@@ -902,8 +902,8 @@ void rtl92ce_enable_hw_security_config(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "The SECR-value %x\n", sec_reg_value);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "The SECR-value %x\n", sec_reg_value);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
@@ -946,8 +946,8 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
err = rtl92c_download_fw(hw);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Failed to download FW. Init HW without FW now..\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Failed to download FW. Init HW without FW now..\n");
err = 1;
goto exit;
}
@@ -1013,12 +1013,12 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
tmp_u1b = efuse_read_1byte(hw, 0x1FA);
if (!(tmp_u1b & BIT(0))) {
rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0F, 0x05);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "PA BIAS path A\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "PA BIAS path A\n");
}
if (!(tmp_u1b & BIT(1)) && is92c) {
rtl_set_rfreg(hw, RF90_PATH_B, 0x15, 0x0F, 0x05);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "PA BIAS path B\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "PA BIAS path B\n");
}
if (!(tmp_u1b & BIT(4))) {
@@ -1027,7 +1027,7 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x80);
udelay(10);
rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x90);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n");
}
rtl92c_dm_init(hw);
exit:
@@ -1122,8 +1122,8 @@ static enum version_8192c _rtl92ce_read_chip_version(struct ieee80211_hw *hw)
break;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Chip RF Type: %s\n",
- rtlphy->rf_type == RF_2T2R ? "RF_2T2R" : "RF_1T1R");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Chip RF Type: %s\n",
+ rtlphy->rf_type == RF_2T2R ? "RF_2T2R" : "RF_1T1R");
return version;
}
@@ -1141,30 +1141,30 @@ static int _rtl92ce_set_media_status(struct ieee80211_hw *hw,
switch (type) {
case NL80211_IFTYPE_UNSPECIFIED:
mode = MSR_NOLINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to NO LINK!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to NO LINK!\n");
break;
case NL80211_IFTYPE_ADHOC:
mode = MSR_ADHOC;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to Ad Hoc!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to Ad Hoc!\n");
break;
case NL80211_IFTYPE_STATION:
mode = MSR_INFRA;
ledaction = LED_CTL_LINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to STA!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to STA!\n");
break;
case NL80211_IFTYPE_AP:
mode = MSR_AP;
ledaction = LED_CTL_LINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to AP!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to AP!\n");
break;
case NL80211_IFTYPE_MESH_POINT:
mode = MSR_ADHOC;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to Mesh Point!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to Mesh Point!\n");
break;
default:
pr_err("Network type %d not supported!\n", type);
@@ -1190,9 +1190,9 @@ static int _rtl92ce_set_media_status(struct ieee80211_hw *hw,
_rtl92ce_resume_tx_beacon(hw);
_rtl92ce_disable_bcn_sub_func(hw);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n",
- mode);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n",
+ mode);
}
rtl_write_byte(rtlpriv, MSR, bt_msr | mode);
@@ -1393,8 +1393,8 @@ void rtl92ce_set_beacon_interval(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
u16 bcn_interval = mac->beacon_interval;
- RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
- "beacon_interval:%d\n", bcn_interval);
+ rtl_dbg(rtlpriv, COMP_BEACON, DBG_DMESG,
+ "beacon_interval:%d\n", bcn_interval);
rtl92ce_disable_interrupt(hw);
rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
rtl92ce_enable_interrupt(hw);
@@ -1406,8 +1406,8 @@ void rtl92ce_update_interrupt_mask(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD, "add_msr:%x, rm_msr:%x\n",
- add_msr, rm_msr);
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD, "add_msr:%x, rm_msr:%x\n",
+ add_msr, rm_msr);
if (add_msr)
rtlpci->irq_mask[0] |= add_msr;
@@ -1714,8 +1714,8 @@ static void _rtl92ce_hal_customized_behavior(struct ieee80211_hw *hw)
default:
break;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "RT Customized ID: 0x%02X\n", rtlhal->oem_id);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "RT Customized ID: 0x%02X\n", rtlhal->oem_id);
}
void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw)
@@ -1732,18 +1732,18 @@ void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw)
else
rtlpriv->dm.rfpath_rxenable[0] =
rtlpriv->dm.rfpath_rxenable[1] = true;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
- rtlhal->version);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
+ rtlhal->version);
tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
if (tmp_u1b & BIT(4)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
rtlefuse->epromtype = EEPROM_93C46;
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
}
if (tmp_u1b & BIT(5)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
rtlefuse->autoload_failflag = false;
_rtl92ce_read_adapter_info(hw);
} else {
@@ -1839,8 +1839,8 @@ static void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw,
rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n",
- rtl_read_dword(rtlpriv, REG_ARFR0));
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n",
+ rtl_read_dword(rtlpriv, REG_ARFR0));
}
static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw,
@@ -1962,14 +1962,14 @@ static void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw,
}
sta_entry->ratr_index = ratr_index;
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "ratr_bitmap :%x\n", ratr_bitmap);
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "ratr_bitmap :%x\n", ratr_bitmap);
*(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) |
(ratr_index << 28);
rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "Rate_index:%x, ratr_val:%x, %5phC\n",
- ratr_index, ratr_bitmap, rate_mask);
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "Rate_index:%x, ratr_val:%x, %5phC\n",
+ ratr_index, ratr_bitmap, rate_mask);
rtl92c_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask);
}
@@ -2031,15 +2031,15 @@ bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
e_rfpowerstate_toset = (u1tmp & BIT(3)) ? ERFON : ERFOFF;
if ((ppsc->hwradiooff) && (e_rfpowerstate_toset == ERFON)) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "GPIOChangeRF - HW Radio ON, RF ON\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "GPIOChangeRF - HW Radio ON, RF ON\n");
e_rfpowerstate_toset = ERFON;
ppsc->hwradiooff = false;
actuallyset = true;
} else if (!ppsc->hwradiooff && (e_rfpowerstate_toset == ERFOFF)) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "GPIOChangeRF - HW Radio OFF, RF OFF\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "GPIOChangeRF - HW Radio OFF, RF OFF\n");
e_rfpowerstate_toset = ERFOFF;
ppsc->hwradiooff = true;
@@ -2090,7 +2090,7 @@ void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index,
u8 cam_offset = 0;
u8 clear_number = 5;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
for (idx = 0; idx < clear_number; idx++) {
rtl_cam_mark_invalid(hw, cam_offset + idx);
@@ -2150,24 +2150,24 @@ void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index,
}
if (rtlpriv->sec.key_len[key_index] == 0) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "delete one entry, entry_id is %d\n",
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "delete one entry, entry_id is %d\n",
entry_id);
if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_MESH_POINT)
rtl_cam_del_entry(hw, p_macaddr);
rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
} else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "The insert KEY length is %d\n",
- rtlpriv->sec.key_len[PAIRWISE_KEYIDX]);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "The insert KEY is %x %x\n",
- rtlpriv->sec.key_buf[0][0],
- rtlpriv->sec.key_buf[0][1]);
-
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "add one entry\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "The insert KEY length is %d\n",
+ rtlpriv->sec.key_len[PAIRWISE_KEYIDX]);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "The insert KEY is %x %x\n",
+ rtlpriv->sec.key_buf[0][0],
+ rtlpriv->sec.key_buf[0][1]);
+
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "add one entry\n");
if (is_pairwise) {
RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_LOUD,
"Pairwise Key content",
@@ -2175,8 +2175,8 @@ void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index,
rtlpriv->sec.
key_len[PAIRWISE_KEYIDX]);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set Pairwise key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set Pairwise key\n");
rtl_cam_add_one_entry(hw, macaddr, key_index,
entry_id, enc_algo,
@@ -2184,8 +2184,8 @@ void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index,
rtlpriv->sec.
key_buf[key_index]);
} else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set group key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set group key\n");
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
rtl_cam_add_one_entry(hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c
index d6933d36ada2..57132278eb5c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/led.c
@@ -19,8 +19,8 @@ void rtl92ce_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
u8 ledcfg;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
- REG_LEDCFG2, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
+ REG_LEDCFG2, pled->ledpin);
ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
@@ -47,8 +47,8 @@ void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 ledcfg;
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
- REG_LEDCFG2, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
+ REG_LEDCFG2, pled->ledpin);
ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
@@ -119,7 +119,7 @@ void rtl92ce_led_control(struct ieee80211_hw *hw,
ledaction == LED_CTL_POWER_ON)) {
return;
}
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d\n",
- ledaction);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d\n",
+ ledaction);
_rtl92ce_sw_led_control(hw, ledaction);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
index f6574f31fa3b..04735da11168 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/phy.c
@@ -25,9 +25,9 @@ u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
u32 original_value, readback_value, bitshift;
struct rtl_phy *rtlphy = &(rtlpriv->phy);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
- regaddr, rfpath, bitmask);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
+ regaddr, rfpath, bitmask);
spin_lock(&rtlpriv->locks.rf_lock);
@@ -44,9 +44,9 @@ u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
spin_unlock(&rtlpriv->locks.rf_lock);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
- regaddr, rfpath, bitmask, original_value);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
+ regaddr, rfpath, bitmask, original_value);
return readback_value;
}
@@ -99,9 +99,9 @@ void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
struct rtl_phy *rtlphy = &(rtlpriv->phy);
u32 original_value, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, rfpath);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
spin_lock(&rtlpriv->locks.rf_lock);
@@ -132,9 +132,9 @@ void rtl92ce_phy_set_rf_reg(struct ieee80211_hw *hw,
spin_unlock(&rtlpriv->locks.rf_lock);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, rfpath);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
}
static bool _rtl92c_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
@@ -144,10 +144,10 @@ static bool _rtl92c_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
u32 arraylength;
u32 *ptrarray;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl819XMACPHY_Array\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl819XMACPHY_Array\n");
arraylength = MAC_2T_ARRAYLENGTH;
ptrarray = RTL8192CEMAC_2T_ARRAY;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Img:RTL8192CEMAC_2T_ARRAY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Img:RTL8192CEMAC_2T_ARRAY\n");
for (i = 0; i < arraylength; i = i + 2)
rtl_write_byte(rtlpriv, ptrarray[i], (u8) ptrarray[i + 1]);
return true;
@@ -180,20 +180,20 @@ bool _rtl92ce_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD,
phy_regarray_table[i + 1]);
udelay(1);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "The phy_regarray_table[0] is %x Rtl819XPHY_REGArray[1] is %x\n",
- phy_regarray_table[i],
- phy_regarray_table[i + 1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "The phy_regarray_table[0] is %x Rtl819XPHY_REGArray[1] is %x\n",
+ phy_regarray_table[i],
+ phy_regarray_table[i + 1]);
}
} else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
for (i = 0; i < agctab_arraylen; i = i + 2) {
rtl_set_bbreg(hw, agctab_array_table[i], MASKDWORD,
agctab_array_table[i + 1]);
udelay(1);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "The agctab_array_table[0] is %x Rtl819XPHY_REGArray[1] is %x\n",
- agctab_array_table[i],
- agctab_array_table[i + 1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "The agctab_array_table[0] is %x Rtl819XPHY_REGArray[1] is %x\n",
+ agctab_array_table[i],
+ agctab_array_table[i + 1]);
}
}
return true;
@@ -221,8 +221,8 @@ bool _rtl92ce_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
}
} else {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "configtype != BaseBand_Config_PHY_REG\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "configtype != BaseBand_Config_PHY_REG\n");
}
return true;
}
@@ -243,21 +243,21 @@ bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
radioa_array_table = RTL8192CERADIOA_2TARRAY;
radiob_arraylen = RADIOB_2TARRAYLENGTH;
radiob_array_table = RTL8192CE_RADIOB_2TARRAY;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio_A:RTL8192CERADIOA_2TARRAY\n");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio_B:RTL8192CE_RADIOB_2TARRAY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Radio_A:RTL8192CERADIOA_2TARRAY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Radio_B:RTL8192CE_RADIOB_2TARRAY\n");
} else {
radioa_arraylen = RADIOA_1TARRAYLENGTH;
radioa_array_table = RTL8192CE_RADIOA_1TARRAY;
radiob_arraylen = RADIOB_1TARRAYLENGTH;
radiob_array_table = RTL8192CE_RADIOB_1TARRAY;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio_A:RTL8192CE_RADIOA_1TARRAY\n");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio_B:RTL8192CE_RADIOB_1TARRAY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Radio_A:RTL8192CE_RADIOA_1TARRAY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Radio_B:RTL8192CE_RADIOB_1TARRAY\n");
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Radio No %x\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Radio No %x\n", rfpath);
switch (rfpath) {
case RF90_PATH_A:
for (i = 0; i < radioa_arraylen; i = i + 2) {
@@ -293,9 +293,9 @@ void rtl92ce_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
u8 reg_bw_opmode;
u8 reg_prsr_rsc;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "Switch to %s bandwidth\n",
- rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
- "20MHz" : "40MHz");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "Switch to %s bandwidth\n",
+ rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
+ "20MHz" : "40MHz");
if (is_hal_stop(rtlhal)) {
rtlphy->set_bwmode_inprogress = false;
@@ -348,7 +348,7 @@ void rtl92ce_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
}
rtl92ce_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
rtlphy->set_bwmode_inprogress = false;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
}
void _rtl92ce_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
@@ -418,8 +418,8 @@ static void _rtl92ce_phy_set_rf_sleep(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "Switch RF timeout !!!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "Switch RF timeout !!!\n");
return;
}
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
@@ -446,18 +446,17 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
do {
initializecount++;
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic enable\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic enable\n");
rtstatus = rtl_ps_enable_nic(hw);
} while (!rtstatus && (initializecount < 10));
RT_CLEAR_PS_LEVEL(ppsc,
RT_RF_OFF_LEVL_HALT_NIC);
} else {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "Set ERFON sleeped:%d ms\n",
- jiffies_to_msecs(jiffies -
- ppsc->
- last_sleep_jiffies));
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "Set ERFON slept:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_sleep_jiffies));
ppsc->last_awake_jiffies = jiffies;
rtl92ce_phy_set_rf_on(hw);
}
@@ -472,8 +471,8 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
}
case ERFOFF:{
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic disable\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic disable\n");
rtl_ps_disable_nic(hw);
RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
} else {
@@ -498,27 +497,27 @@ static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
queue_id++;
continue;
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
- i + 1, queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
+ i + 1, queue_id,
+ skb_queue_len(&ring->queue));
udelay(10);
i++;
}
if (i >= MAX_DOZE_WAITING_TIMES_9x) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
- MAX_DOZE_WAITING_TIMES_9x,
- queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue));
break;
}
}
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "Set ERFSLEEP awaked:%d ms\n",
- jiffies_to_msecs(jiffies -
- ppsc->last_awake_jiffies));
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "Set ERFSLEEP awaked:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_awake_jiffies));
ppsc->last_sleep_jiffies = jiffies;
_rtl92ce_phy_set_rf_sleep(hw);
break;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c
index 713859488744..8508a711d46a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/rf.c
@@ -470,13 +470,13 @@ static bool _rtl92ce_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
}
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio[%d] Fail!!\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Radio[%d] Fail!!\n", rfpath);
return false;
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "<---\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "<---\n");
return rtstatus;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
index 8fc3cb824066..c0635309a92d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c
@@ -361,15 +361,14 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
bool lastseg = ((hdr->frame_control &
cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0);
- dma_addr_t mapping = pci_map_single(rtlpci->pdev,
- skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
u8 bw_40 = 0;
- if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error\n");
+ if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error\n");
return;
}
rcu_read_lock();
@@ -477,8 +476,8 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
if (ieee80211_is_data_qos(fc)) {
if (mac->rdg_en) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "Enable RDG function\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "Enable RDG function\n");
set_tx_desc_rdg_enable(pdesc, 1);
set_tx_desc_htc(pdesc, 1);
}
@@ -516,7 +515,7 @@ void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_bmc(pdesc, 1);
}
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
}
void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
@@ -528,16 +527,15 @@ void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
u8 fw_queue = QSLT_BEACON;
__le32 *pdesc = (__le32 *)pdesc8;
- dma_addr_t mapping = pci_map_single(rtlpci->pdev,
- skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
__le16 fc = hdr->frame_control;
- if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error\n");
+ if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error\n");
return;
}
clear_pci_tx_desc_content(pdesc, TX_DESC_SIZE);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c
index 9d1167ff3b50..9823872692b1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/dm.c
@@ -25,8 +25,8 @@ void rtl92cu_dm_dynamic_txpower(struct ieee80211_hw *hw)
if ((mac->link_state < MAC80211_LINKED) &&
(rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "Not connected to any\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "Not connected to any\n");
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
@@ -37,42 +37,42 @@ void rtl92cu_dm_dynamic_txpower(struct ieee80211_hw *hw)
if (mac->link_state >= MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "AP Client PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "AP Client PWDB = 0x%lx\n",
+ undec_sm_pwdb);
} else {
undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "STA Default Port PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "STA Default Port PWDB = 0x%lx\n",
+ undec_sm_pwdb);
}
} else {
undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "AP Ext Port PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "AP Ext Port PWDB = 0x%lx\n",
+ undec_sm_pwdb);
}
if (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
} else if ((undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
(undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
} else if (undec_sm_pwdb < (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "TXHIGHPWRLEVEL_NORMAL\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "TXHIGHPWRLEVEL_NORMAL\n");
}
if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "PHY_SetTxPowerLevel8192S() Channel = %d\n",
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "PHY_SetTxPowerLevel8192S() Channel = %d\n",
rtlphy->current_channel);
rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
if (rtlpriv->dm.dynamic_txhighpower_lvl ==
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
index 0ae9cfc65272..6312fddd9c00 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
@@ -386,8 +386,8 @@ static void _rtl92cu_hal_customized_behavior(struct ieee80211_hw *hw)
default:
break;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "RT Customized ID: 0x%02X\n",
- rtlhal->oem_id);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "RT Customized ID: 0x%02X\n",
+ rtlhal->oem_id);
}
void rtl92cu_read_eeprom_info(struct ieee80211_hw *hw)
@@ -403,11 +403,11 @@ void rtl92cu_read_eeprom_info(struct ieee80211_hw *hw)
tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
rtlefuse->epromtype = (tmp_u1b & BOOT_FROM_EEPROM) ?
EEPROM_93C46 : EEPROM_BOOT_EFUSE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from %s\n",
- tmp_u1b & BOOT_FROM_EEPROM ? "EERROM" : "EFUSE");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from %s\n",
+ tmp_u1b & BOOT_FROM_EEPROM ? "EERROM" : "EFUSE");
rtlefuse->autoload_failflag = (tmp_u1b & EEPROM_EN) ? false : true;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload %s\n",
- tmp_u1b & EEPROM_EN ? "OK!!" : "ERR!!");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload %s\n",
+ tmp_u1b & EEPROM_EN ? "OK!!" : "ERR!!");
_rtl92cu_read_adapter_info(hw);
_rtl92cu_hal_customized_behavior(hw);
return;
@@ -424,8 +424,8 @@ static int _rtl92cu_init_power_on(struct ieee80211_hw *hw)
do {
if (rtl_read_byte(rtlpriv, REG_APS_FSMCO) & PFM_ALDN) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Autoload Done!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Autoload Done!\n");
break;
}
if (pollingcount++ > 100) {
@@ -443,9 +443,9 @@ static int _rtl92cu_init_power_on(struct ieee80211_hw *hw)
if (0 == (value8 & LDV12_EN)) {
value8 |= LDV12_EN;
rtl_write_byte(rtlpriv, REG_LDOV12D_CTRL, value8);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- " power-on :REG_LDOV12D_CTRL Reg0x21:0x%02x\n",
- value8);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ " power-on :REG_LDOV12D_CTRL Reg0x21:0x%02x\n",
+ value8);
udelay(100);
value8 = rtl_read_byte(rtlpriv, REG_SYS_ISO_CTRL);
value8 &= ~ISO_MD2PP;
@@ -828,7 +828,7 @@ static int _rtl92cu_init_mac(struct ieee80211_hw *hw)
? WMM_CHIP_B_TX_PAGE_BOUNDARY
: WMM_CHIP_A_TX_PAGE_BOUNDARY;
}
- if (false == rtl92c_init_llt_table(hw, boundary)) {
+ if (!rtl92c_init_llt_table(hw, boundary)) {
pr_err("Failed to init LLT Table!\n");
return -EINVAL;
}
@@ -860,13 +860,13 @@ void rtl92cu_enable_hw_security_config(struct ieee80211_hw *hw)
u8 sec_reg_value = 0x0;
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
- rtlpriv->sec.pairwise_enc_algorithm,
- rtlpriv->sec.group_enc_algorithm);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
+ rtlpriv->sec.pairwise_enc_algorithm,
+ rtlpriv->sec.group_enc_algorithm);
if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "not open sw encryption\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "not open sw encryption\n");
return;
}
sec_reg_value = SCR_TXENCENABLE | SCR_RXDECENABLE;
@@ -877,8 +877,8 @@ void rtl92cu_enable_hw_security_config(struct ieee80211_hw *hw)
if (IS_NORMAL_CHIP(rtlhal->version))
sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK);
rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, "The SECR-value %x\n",
- sec_reg_value);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD, "The SECR-value %x\n",
+ sec_reg_value);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
}
@@ -958,8 +958,8 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
}
err = rtl92c_download_fw(hw);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Failed to download FW. Init HW without FW now..\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Failed to download FW. Init HW without FW now..\n");
err = 1;
goto exit;
}
@@ -1280,32 +1280,32 @@ static int _rtl92cu_set_media_status(struct ieee80211_hw *hw,
_rtl92cu_resume_tx_beacon(hw);
_rtl92cu_disable_bcn_sub_func(hw);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Set HW_VAR_MEDIA_STATUS:No such media status(%x)\n",
- type);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set HW_VAR_MEDIA_STATUS:No such media status(%x)\n",
+ type);
}
switch (type) {
case NL80211_IFTYPE_UNSPECIFIED:
bt_msr |= MSR_NOLINK;
ledaction = LED_CTL_LINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to NO LINK!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to NO LINK!\n");
break;
case NL80211_IFTYPE_ADHOC:
bt_msr |= MSR_ADHOC;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to Ad Hoc!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to Ad Hoc!\n");
break;
case NL80211_IFTYPE_STATION:
bt_msr |= MSR_INFRA;
ledaction = LED_CTL_LINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to STA!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to STA!\n");
break;
case NL80211_IFTYPE_AP:
bt_msr |= MSR_AP;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to AP!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to AP!\n");
break;
default:
pr_err("Network type %d not supported!\n", type);
@@ -1438,9 +1438,9 @@ void rtl92cu_set_beacon_related_registers(struct ieee80211_hw *hw)
rtl_write_dword(rtlpriv, REG_TCR, value32);
value32 |= TSFRST;
rtl_write_dword(rtlpriv, REG_TCR, value32);
- RT_TRACE(rtlpriv, COMP_INIT|COMP_BEACON, DBG_LOUD,
- "SetBeaconRelatedRegisters8192CUsb(): Set TCR(%x)\n",
- value32);
+ rtl_dbg(rtlpriv, COMP_INIT | COMP_BEACON, DBG_LOUD,
+ "SetBeaconRelatedRegisters8192CUsb(): Set TCR(%x)\n",
+ value32);
/* TODO: Modify later (Find the right parameters)
* NOTE: Fix test chip's bug (about contention windows's randomness) */
if ((mac->opmode == NL80211_IFTYPE_ADHOC) ||
@@ -1458,8 +1458,8 @@ void rtl92cu_set_beacon_interval(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
u16 bcn_interval = mac->beacon_interval;
- RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG, "beacon_interval:%d\n",
- bcn_interval);
+ rtl_dbg(rtlpriv, COMP_BEACON, DBG_DMESG, "beacon_interval:%d\n",
+ bcn_interval);
rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
}
@@ -1599,7 +1599,7 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]);
rtl_write_byte(rtlpriv, REG_R2T_SIFS+1, val[0]);
rtl_write_byte(rtlpriv, REG_T2T_SIFS+1, val[0]);
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD, "HW_VAR_SIFS\n");
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD, "HW_VAR_SIFS\n");
break;
}
case HW_VAR_SLOT_TIME:{
@@ -1607,8 +1607,8 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
u8 QOS_MODE = 1;
rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "HW_VAR_SLOT_TIME %x\n", val[0]);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "HW_VAR_SLOT_TIME %x\n", val[0]);
if (QOS_MODE) {
for (e_aci = 0; e_aci < AC_MAX; e_aci++)
rtlpriv->cfg->ops->set_hw_reg(hw,
@@ -1672,9 +1672,9 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
0xf8) |
min_spacing_to_set);
*val = min_spacing_to_set;
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
- mac->min_space_cfg);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
mac->min_space_cfg);
}
@@ -1687,9 +1687,9 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
density_to_set &= 0x1f;
mac->min_space_cfg &= 0x07;
mac->min_space_cfg |= (density_to_set << 3);
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
- mac->min_space_cfg);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
mac->min_space_cfg);
break;
@@ -1721,9 +1721,9 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
(REG_AGGLEN_LMT + index),
p_regtoset[index]);
}
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_AMPDU_FACTOR: %#x\n",
- factor_toset);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_FACTOR: %#x\n",
+ factor_toset);
}
break;
}
@@ -1740,9 +1740,9 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
u4b_ac_param |= (u32) ((cw_max & 0xF) <<
AC_PARAM_ECW_MAX_OFFSET);
u4b_ac_param |= (u32) tx_op << AC_PARAM_TXOP_OFFSET;
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "queue:%x, ac_param:%x\n",
- e_aci, u4b_ac_param);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "queue:%x, ac_param:%x\n",
+ e_aci, u4b_ac_param);
switch (e_aci) {
case AC1_BK:
rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM,
@@ -1770,8 +1770,8 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_RCR:{
rtl_write_dword(rtlpriv, REG_RCR, ((u32 *) (val))[0]);
mac->rx_conf = ((u32 *) (val))[0];
- RT_TRACE(rtlpriv, COMP_RECV, DBG_DMESG,
- "### Set RCR(0x%08x) ###\n", mac->rx_conf);
+ rtl_dbg(rtlpriv, COMP_RECV, DBG_DMESG,
+ "### Set RCR(0x%08x) ###\n", mac->rx_conf);
break;
}
case HW_VAR_RETRY_LIMIT:{
@@ -1780,9 +1780,9 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtl_write_word(rtlpriv, REG_RL,
retry_limit << RETRY_LIMIT_SHORT_SHIFT |
retry_limit << RETRY_LIMIT_LONG_SHIFT);
- RT_TRACE(rtlpriv, COMP_MLME, DBG_DMESG,
- "Set HW_VAR_RETRY_LIMIT(0x%08x)\n",
- retry_limit);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_DMESG,
+ "Set HW_VAR_RETRY_LIMIT(0x%08x)\n",
+ retry_limit);
break;
}
case HW_VAR_DUAL_TSF_RST:
@@ -1987,8 +1987,8 @@ static void rtl92cu_update_hal_rate_table(struct ieee80211_hw *hw,
rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n",
- rtl_read_dword(rtlpriv, REG_ARFR0));
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n",
+ rtl_read_dword(rtlpriv, REG_ARFR0));
}
static void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw,
@@ -2121,14 +2121,14 @@ static void rtl92cu_update_hal_rate_mask(struct ieee80211_hw *hw,
}
sta_entry->ratr_index = ratr_index;
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "ratr_bitmap :%x\n", ratr_bitmap);
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "ratr_bitmap :%x\n", ratr_bitmap);
*(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) |
(ratr_index << 28);
rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "Rate_index:%x, ratr_val:%x, %5phC\n",
- ratr_index, ratr_bitmap, rate_mask);
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "Rate_index:%x, ratr_val:%x, %5phC\n",
+ ratr_index, ratr_bitmap, rate_mask);
memcpy(rtlpriv->rate_mask, rate_mask, 5);
/* rtl92c_fill_h2c_cmd() does USB I/O and will result in a
* "scheduled while atomic" if called directly */
@@ -2194,8 +2194,8 @@ bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
u1tmp = rtl_read_byte(rtlpriv, REG_HSISR);
e_rfpowerstate_toset = (u1tmp & BIT(7)) ?
ERFOFF : ERFON;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "pwrdown, 0x5c(BIT7)=%02x\n", u1tmp);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "pwrdown, 0x5c(BIT7)=%02x\n", u1tmp);
} else {
rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG,
rtl_read_byte(rtlpriv,
@@ -2203,26 +2203,26 @@ bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL);
e_rfpowerstate_toset = (u1tmp & BIT(3)) ?
ERFON : ERFOFF;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "GPIO_IN=%02x\n", u1tmp);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "GPIO_IN=%02x\n", u1tmp);
}
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "N-SS RF =%x\n",
- e_rfpowerstate_toset);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD, "N-SS RF =%x\n",
+ e_rfpowerstate_toset);
}
if ((ppsc->hwradiooff) && (e_rfpowerstate_toset == ERFON)) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "GPIOChangeRF - HW Radio ON, RF ON\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "GPIOChangeRF - HW Radio ON, RF ON\n");
ppsc->hwradiooff = false;
actuallyset = true;
} else if ((!ppsc->hwradiooff) && (e_rfpowerstate_toset ==
ERFOFF)) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "GPIOChangeRF - HW Radio OFF\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "GPIOChangeRF - HW Radio OFF\n");
ppsc->hwradiooff = true;
actuallyset = true;
} else {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "pHalData->bHwRadioOff and eRfPowerStateToSet do not match: pHalData->bHwRadioOff %x, eRfPowerStateToSet %x\n",
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "pHalData->bHwRadioOff and eRfPowerStateToSet do not match: pHalData->bHwRadioOff %x, eRfPowerStateToSet %x\n",
ppsc->hwradiooff, e_rfpowerstate_toset);
}
if (actuallyset) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c
index cc13a4a8f856..1488f52a2d2f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/led.c
@@ -23,8 +23,8 @@ void rtl92cu_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
u8 ledcfg;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
- REG_LEDCFG2, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
+ REG_LEDCFG2, pled->ledpin);
ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
switch (pled->ledpin) {
case LED_PIN_GPIO0:
@@ -49,8 +49,8 @@ void rtl92cu_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 ledcfg;
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
- REG_LEDCFG2, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
+ REG_LEDCFG2, pled->ledpin);
ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
switch (pled->ledpin) {
case LED_PIN_GPIO0:
@@ -113,6 +113,6 @@ void rtl92cu_led_control(struct ieee80211_hw *hw,
ledaction == LED_CTL_POWER_ON)) {
return;
}
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d\n", ledaction);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d\n", ledaction);
_rtl92cu_sw_led_control(hw, ledaction);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
index b4b67341dc83..2890a495a23e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c
@@ -91,24 +91,24 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw)
versionid = "UNKNOWN";
break;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Chip Version ID: %s\n", versionid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Chip Version ID: %s\n", versionid);
if (IS_92C_SERIAL(rtlhal->version))
rtlphy->rf_type =
(IS_92C_1T2R(rtlhal->version)) ? RF_1T2R : RF_2T2R;
else
rtlphy->rf_type = RF_1T1R;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Chip RF Type: %s\n",
- rtlphy->rf_type == RF_2T2R ? "RF_2T2R" : "RF_1T1R");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Chip RF Type: %s\n",
+ rtlphy->rf_type == RF_2T2R ? "RF_2T2R" : "RF_1T1R");
if (get_rf_type(rtlphy) == RF_1T1R)
rtlpriv->dm.rfpath_rxenable[0] = true;
else
rtlpriv->dm.rfpath_rxenable[0] =
rtlpriv->dm.rfpath_rxenable[1] = true;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
- rtlhal->version);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
+ rtlhal->version);
}
/**
@@ -158,14 +158,14 @@ bool rtl92c_init_llt_table(struct ieee80211_hw *hw, u32 boundary)
for (i = 0; i < (boundary - 1); i++) {
rst = rtl92c_llt_write(hw, i , i + 1);
- if (true != rst) {
+ if (!rst) {
pr_err("===> %s #1 fail\n", __func__);
return rst;
}
}
/* end of list */
rst = rtl92c_llt_write(hw, (boundary - 1), 0xFF);
- if (true != rst) {
+ if (!rst) {
pr_err("===> %s #2 fail\n", __func__);
return rst;
}
@@ -176,14 +176,14 @@ bool rtl92c_init_llt_table(struct ieee80211_hw *hw, u32 boundary)
*/
for (i = boundary; i < LLT_LAST_ENTRY_OF_TX_PKT_BUFFER; i++) {
rst = rtl92c_llt_write(hw, i, (i + 1));
- if (true != rst) {
+ if (!rst) {
pr_err("===> %s #3 fail\n", __func__);
return rst;
}
}
/* Let last entry point to the start entry of ring buffer */
rst = rtl92c_llt_write(hw, LLT_LAST_ENTRY_OF_TX_PKT_BUFFER, boundary);
- if (true != rst) {
+ if (!rst) {
pr_err("===> %s #4 fail\n", __func__);
return rst;
}
@@ -215,7 +215,7 @@ void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index,
u8 cam_offset = 0;
u8 clear_number = 5;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
for (idx = 0; idx < clear_number; idx++) {
rtl_cam_mark_invalid(hw, cam_offset + idx);
rtl_cam_empty_entry(hw, cam_offset + idx);
@@ -269,30 +269,30 @@ void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index,
}
}
if (rtlpriv->sec.key_len[key_index] == 0) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "delete one entry\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "delete one entry\n");
if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_MESH_POINT)
rtl_cam_del_entry(hw, p_macaddr);
rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
} else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "The insert KEY length is %d\n",
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "The insert KEY length is %d\n",
rtlpriv->sec.key_len[PAIRWISE_KEYIDX]);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "The insert KEY is %x %x\n",
- rtlpriv->sec.key_buf[0][0],
- rtlpriv->sec.key_buf[0][1]);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "add one entry\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "The insert KEY is %x %x\n",
+ rtlpriv->sec.key_buf[0][0],
+ rtlpriv->sec.key_buf[0][1]);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "add one entry\n");
if (is_pairwise) {
RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_LOUD,
"Pairwise Key content",
rtlpriv->sec.pairwise_key,
rtlpriv->sec.
key_len[PAIRWISE_KEYIDX]);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set Pairwise key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set Pairwise key\n");
rtl_cam_add_one_entry(hw, macaddr, key_index,
entry_id, enc_algo,
@@ -300,8 +300,8 @@ void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index,
rtlpriv->sec.
key_buf[key_index]);
} else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set group key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set group key\n");
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
rtl_cam_add_one_entry(hw,
rtlefuse->dev_addr,
@@ -383,27 +383,27 @@ int rtl92c_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
switch (type) {
case NL80211_IFTYPE_UNSPECIFIED:
value = NT_NO_LINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Set Network type to NO LINK!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Set Network type to NO LINK!\n");
break;
case NL80211_IFTYPE_ADHOC:
value = NT_LINK_AD_HOC;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Set Network type to Ad Hoc!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Set Network type to Ad Hoc!\n");
break;
case NL80211_IFTYPE_STATION:
value = NT_LINK_AP;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Set Network type to STA!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Set Network type to STA!\n");
break;
case NL80211_IFTYPE_AP:
value = NT_AS_AP;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Set Network type to AP!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Set Network type to AP!\n");
break;
default:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Network type %d not supported!\n", type);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Network type %d not supported!\n", type);
return -EOPNOTSUPP;
}
rtl_write_byte(rtlpriv, MSR, value);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
index 9cd028cb2239..a8d9fe269f31 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/phy.c
@@ -22,9 +22,9 @@ u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
u32 original_value, readback_value, bitshift;
struct rtl_phy *rtlphy = &(rtlpriv->phy);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
- regaddr, rfpath, bitmask);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
+ regaddr, rfpath, bitmask);
if (rtlphy->rf_mode != RF_OP_BY_FW) {
original_value = _rtl92c_phy_rf_serial_read(hw,
rfpath, regaddr);
@@ -34,9 +34,9 @@ u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
}
bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
readback_value = (original_value & bitmask) >> bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
- regaddr, rfpath, bitmask, original_value);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
+ regaddr, rfpath, bitmask, original_value);
return readback_value;
}
@@ -48,9 +48,9 @@ void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw,
struct rtl_phy *rtlphy = &(rtlpriv->phy);
u32 original_value, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, rfpath);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
if (rtlphy->rf_mode != RF_OP_BY_FW) {
if (bitmask != RFREG_OFFSET_MASK) {
original_value = _rtl92c_phy_rf_serial_read(hw,
@@ -74,9 +74,9 @@ void rtl92cu_phy_set_rf_reg(struct ieee80211_hw *hw,
}
_rtl92c_phy_fw_rf_serial_write(hw, rfpath, regaddr, data);
}
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, rfpath);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
}
bool rtl92cu_phy_mac_config(struct ieee80211_hw *hw)
@@ -121,10 +121,10 @@ bool _rtl92cu_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
u32 arraylength;
u32 *ptrarray;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl819XMACPHY_ARRAY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl819XMACPHY_ARRAY\n");
arraylength = rtlphy->hwparam_tables[MAC_REG].length ;
ptrarray = rtlphy->hwparam_tables[MAC_REG].pdata;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Img:RTL8192CUMAC_2T_ARRAY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Img:RTL8192CUMAC_2T_ARRAY\n");
for (i = 0; i < arraylength; i = i + 2)
rtl_write_byte(rtlpriv, ptrarray[i], (u8) ptrarray[i + 1]);
return true;
@@ -158,20 +158,20 @@ bool _rtl92cu_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD,
phy_regarray_table[i + 1]);
udelay(1);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "The phy_regarray_table[0] is %x Rtl819XPHY_REGARRAY[1] is %x\n",
- phy_regarray_table[i],
- phy_regarray_table[i + 1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "The phy_regarray_table[0] is %x Rtl819XPHY_REGARRAY[1] is %x\n",
+ phy_regarray_table[i],
+ phy_regarray_table[i + 1]);
}
} else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
for (i = 0; i < agctab_arraylen; i = i + 2) {
rtl_set_bbreg(hw, agctab_array_table[i], MASKDWORD,
agctab_array_table[i + 1]);
udelay(1);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "The agctab_array_table[0] is %x Rtl819XPHY_REGARRAY[1] is %x\n",
- agctab_array_table[i],
- agctab_array_table[i + 1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "The agctab_array_table[0] is %x Rtl819XPHY_REGARRAY[1] is %x\n",
+ agctab_array_table[i],
+ agctab_array_table[i + 1]);
}
}
return true;
@@ -198,8 +198,8 @@ bool _rtl92cu_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
phy_regarray_table_pg[i + 2]);
}
} else {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "configtype != BaseBand_Config_PHY_REG\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "configtype != BaseBand_Config_PHY_REG\n");
}
return true;
}
@@ -220,21 +220,21 @@ bool rtl92cu_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
radioa_array_table = rtlphy->hwparam_tables[RADIOA_2T].pdata;
radiob_arraylen = rtlphy->hwparam_tables[RADIOB_2T].length;
radiob_array_table = rtlphy->hwparam_tables[RADIOB_2T].pdata;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio_A:RTL8192CURADIOA_2TARRAY\n");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio_B:RTL8192CU_RADIOB_2TARRAY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Radio_A:RTL8192CURADIOA_2TARRAY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Radio_B:RTL8192CU_RADIOB_2TARRAY\n");
} else {
radioa_arraylen = rtlphy->hwparam_tables[RADIOA_1T].length;
radioa_array_table = rtlphy->hwparam_tables[RADIOA_1T].pdata;
radiob_arraylen = rtlphy->hwparam_tables[RADIOB_1T].length;
radiob_array_table = rtlphy->hwparam_tables[RADIOB_1T].pdata;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio_A:RTL8192CU_RADIOA_1TARRAY\n");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio_B:RTL8192CU_RADIOB_1TARRAY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Radio_A:RTL8192CU_RADIOA_1TARRAY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Radio_B:RTL8192CU_RADIOB_1TARRAY\n");
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Radio No %x\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Radio No %x\n", rfpath);
switch (rfpath) {
case RF90_PATH_A:
for (i = 0; i < radioa_arraylen; i = i + 2) {
@@ -269,9 +269,9 @@ void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
u8 reg_bw_opmode;
u8 reg_prsr_rsc;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "Switch to %s bandwidth\n",
- rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
- "20MHz" : "40MHz");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "Switch to %s bandwidth\n",
+ rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
+ "20MHz" : "40MHz");
if (is_hal_stop(rtlhal)) {
rtlphy->set_bwmode_inprogress = false;
return;
@@ -319,7 +319,7 @@ void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
}
rtl92cu_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
rtlphy->set_bwmode_inprogress = false;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
}
void rtl92cu_bb_block_on(struct ieee80211_hw *hw)
@@ -390,17 +390,17 @@ static bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
do {
init_count++;
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic enable\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic enable\n");
rtstatus = rtl_ps_enable_nic(hw);
} while (!rtstatus && (init_count < 10));
RT_CLEAR_PS_LEVEL(ppsc,
RT_RF_OFF_LEVL_HALT_NIC);
} else {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "Set ERFON sleeped:%d ms\n",
- jiffies_to_msecs(jiffies -
- ppsc->last_sleep_jiffies));
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "Set ERFON slept:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_sleep_jiffies));
ppsc->last_awake_jiffies = jiffies;
rtl92ce_phy_set_rf_on(hw);
}
@@ -421,26 +421,26 @@ static bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
queue_id++;
continue;
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
- i + 1,
- queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
+ i + 1,
+ queue_id,
+ skb_queue_len(&ring->queue));
udelay(10);
i++;
}
if (i >= MAX_DOZE_WAITING_TIMES_9x) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "ERFOFF: %d times TcbBusyQueue[%d] = %d !\n",
- MAX_DOZE_WAITING_TIMES_9x,
- queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "ERFOFF: %d times TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue));
break;
}
}
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic disable\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic disable\n");
rtl_ps_disable_nic(hw);
RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
} else {
@@ -463,25 +463,25 @@ static bool _rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
queue_id++;
continue;
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
- i + 1, queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
+ i + 1, queue_id,
+ skb_queue_len(&ring->queue));
udelay(10);
i++;
}
if (i >= MAX_DOZE_WAITING_TIMES_9x) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
- MAX_DOZE_WAITING_TIMES_9x,
- queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue));
break;
}
}
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "Set ERFSLEEP awaked:%d ms\n",
- jiffies_to_msecs(jiffies - ppsc->last_awake_jiffies));
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "Set ERFSLEEP awaked:%d ms\n",
+ jiffies_to_msecs(jiffies - ppsc->last_awake_jiffies));
ppsc->last_sleep_jiffies = jiffies;
_rtl92c_phy_set_rf_sleep(hw);
break;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
index d259794a308b..288033f02266 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/rf.c
@@ -431,12 +431,12 @@ static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
break;
}
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio[%d] Fail!!\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Radio[%d] Fail!!\n", rfpath);
goto phy_rf_cfg_fail;
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "<---\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "<---\n");
phy_rf_cfg_fail:
return rtstatus;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
index fc526477740f..1ad0cf37f60b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c
@@ -82,8 +82,8 @@ static void twooutepmapping(struct ieee80211_hw *hw, bool is_chip8,
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (bwificfg) { /* for WMM */
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "USB Chip-B & WMM Setting.....\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "USB Chip-B & WMM Setting.....\n");
ep_map->ep_mapping[RTL_TXQ_BE] = 2;
ep_map->ep_mapping[RTL_TXQ_BK] = 3;
ep_map->ep_mapping[RTL_TXQ_VI] = 3;
@@ -92,8 +92,8 @@ static void twooutepmapping(struct ieee80211_hw *hw, bool is_chip8,
ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
ep_map->ep_mapping[RTL_TXQ_HI] = 2;
} else { /* typical setting */
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "USB typical Setting.....\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "USB typical Setting.....\n");
ep_map->ep_mapping[RTL_TXQ_BE] = 3;
ep_map->ep_mapping[RTL_TXQ_BK] = 3;
ep_map->ep_mapping[RTL_TXQ_VI] = 2;
@@ -110,8 +110,8 @@ static void threeoutepmapping(struct ieee80211_hw *hw, bool bwificfg,
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (bwificfg) { /* for WMM */
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "USB 3EP Setting for WMM.....\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "USB 3EP Setting for WMM.....\n");
ep_map->ep_mapping[RTL_TXQ_BE] = 5;
ep_map->ep_mapping[RTL_TXQ_BK] = 3;
ep_map->ep_mapping[RTL_TXQ_VI] = 3;
@@ -120,8 +120,8 @@ static void threeoutepmapping(struct ieee80211_hw *hw, bool bwificfg,
ep_map->ep_mapping[RTL_TXQ_BCN] = 2;
ep_map->ep_mapping[RTL_TXQ_HI] = 2;
} else { /* typical setting */
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "USB 3EP Setting for typical.....\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "USB 3EP Setting for typical.....\n");
ep_map->ep_mapping[RTL_TXQ_BE] = 5;
ep_map->ep_mapping[RTL_TXQ_BK] = 5;
ep_map->ep_mapping[RTL_TXQ_VI] = 3;
@@ -248,24 +248,24 @@ static enum rtl_desc_qsel _rtl8192cu_mq_to_descq(struct ieee80211_hw *hw,
switch (mac80211_queue_index) {
case 0: /* VO */
qsel = QSLT_VO;
- RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG,
- "VO queue, set qsel = 0x%x\n", QSLT_VO);
+ rtl_dbg(rtlpriv, COMP_USB, DBG_DMESG,
+ "VO queue, set qsel = 0x%x\n", QSLT_VO);
break;
case 1: /* VI */
qsel = QSLT_VI;
- RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG,
- "VI queue, set qsel = 0x%x\n", QSLT_VI);
+ rtl_dbg(rtlpriv, COMP_USB, DBG_DMESG,
+ "VI queue, set qsel = 0x%x\n", QSLT_VI);
break;
case 3: /* BK */
qsel = QSLT_BK;
- RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG,
- "BK queue, set qsel = 0x%x\n", QSLT_BK);
+ rtl_dbg(rtlpriv, COMP_USB, DBG_DMESG,
+ "BK queue, set qsel = 0x%x\n", QSLT_BK);
break;
case 2: /* BE */
default:
qsel = QSLT_BE;
- RT_TRACE(rtlpriv, COMP_USB, DBG_DMESG,
- "BE queue, set qsel = 0x%x\n", QSLT_BE);
+ rtl_dbg(rtlpriv, COMP_USB, DBG_DMESG,
+ "BE queue, set qsel = 0x%x\n", QSLT_BE);
break;
}
out:
@@ -398,18 +398,18 @@ static void _rtl_rx_process(struct ieee80211_hw *hw, struct sk_buff *skb)
fc = hdr->frame_control;
bv = ieee80211_is_probe_resp(fc);
if (bv)
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Got probe response frame\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Got probe response frame\n");
if (ieee80211_is_beacon(fc))
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Got beacon frame\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Got beacon frame\n");
if (ieee80211_is_data(fc))
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Got data frame\n");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Fram: fc = 0x%X addr1 = 0x%02X:0x%02X:0x%02X:0x%02X:0x%02X:0x%02X\n",
- fc,
- (u32)hdr->addr1[0], (u32)hdr->addr1[1],
- (u32)hdr->addr1[2], (u32)hdr->addr1[3],
- (u32)hdr->addr1[4], (u32)hdr->addr1[5]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Got data frame\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Fram: fc = 0x%X addr1 = 0x%02X:0x%02X:0x%02X:0x%02X:0x%02X:0x%02X\n",
+ fc,
+ (u32)hdr->addr1[0], (u32)hdr->addr1[1],
+ (u32)hdr->addr1[2], (u32)hdr->addr1[3],
+ (u32)hdr->addr1[4], (u32)hdr->addr1[5]);
ieee80211_rx(hw, skb);
}
@@ -570,8 +570,8 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_use_rate(txdesc, tcb_desc->use_driver_rate ? 1 : 0);
if (ieee80211_is_data_qos(fc)) {
if (mac->rdg_en) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "Enable RDG function\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "Enable RDG function\n");
set_tx_desc_rdg_enable(txdesc, 1);
set_tx_desc_htc(txdesc, 1);
}
@@ -597,7 +597,7 @@ void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_bmc(txdesc, 1);
_rtl_fill_usb_tx_desc(txdesc);
_rtl_tx_desc_checksum(txdesc);
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "==>\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "==>\n");
}
void rtl92cu_fill_fake_txdesc(struct ieee80211_hw *hw, u8 *pdesc8,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c
index 71f3b6b5d7bd..b3f25a228532 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c
@@ -194,21 +194,21 @@ static void rtl92d_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2);
rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
}
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "Cnt_Fast_Fsync_fail = %x, Cnt_SB_Search_fail = %x\n",
- falsealm_cnt->cnt_fast_fsync_fail,
- falsealm_cnt->cnt_sb_search_fail);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "Cnt_Parity_Fail = %x, Cnt_Rate_Illegal = %x, Cnt_Crc8_fail = %x, Cnt_Mcs_fail = %x\n",
- falsealm_cnt->cnt_parity_fail,
- falsealm_cnt->cnt_rate_illegal,
- falsealm_cnt->cnt_crc8_fail,
- falsealm_cnt->cnt_mcs_fail);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "Cnt_Ofdm_fail = %x, Cnt_Cck_fail = %x, Cnt_all = %x\n",
- falsealm_cnt->cnt_ofdm_fail,
- falsealm_cnt->cnt_cck_fail,
- falsealm_cnt->cnt_all);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Cnt_Fast_Fsync_fail = %x, Cnt_SB_Search_fail = %x\n",
+ falsealm_cnt->cnt_fast_fsync_fail,
+ falsealm_cnt->cnt_sb_search_fail);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Cnt_Parity_Fail = %x, Cnt_Rate_Illegal = %x, Cnt_Crc8_fail = %x, Cnt_Mcs_fail = %x\n",
+ falsealm_cnt->cnt_parity_fail,
+ falsealm_cnt->cnt_rate_illegal,
+ falsealm_cnt->cnt_crc8_fail,
+ falsealm_cnt->cnt_mcs_fail);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Cnt_Ofdm_fail = %x, Cnt_Cck_fail = %x, Cnt_all = %x\n",
+ falsealm_cnt->cnt_ofdm_fail,
+ falsealm_cnt->cnt_cck_fail,
+ falsealm_cnt->cnt_all);
}
static void rtl92d_dm_find_minimum_rssi(struct ieee80211_hw *hw)
@@ -221,33 +221,33 @@ static void rtl92d_dm_find_minimum_rssi(struct ieee80211_hw *hw)
if ((mac->link_state < MAC80211_LINKED) &&
(rtlpriv->dm.UNDEC_SM_PWDB == 0)) {
de_digtable->min_undec_pwdb_for_dm = 0;
- RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "Not connected to any\n");
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "Not connected to any\n");
}
if (mac->link_state >= MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
de_digtable->min_undec_pwdb_for_dm =
rtlpriv->dm.UNDEC_SM_PWDB;
- RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "AP Client PWDB = 0x%lx\n",
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "AP Client PWDB = 0x%lx\n",
rtlpriv->dm.UNDEC_SM_PWDB);
} else {
de_digtable->min_undec_pwdb_for_dm =
rtlpriv->dm.undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "STA Default Port PWDB = 0x%x\n",
- de_digtable->min_undec_pwdb_for_dm);
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "STA Default Port PWDB = 0x%x\n",
+ de_digtable->min_undec_pwdb_for_dm);
}
} else {
de_digtable->min_undec_pwdb_for_dm = rtlpriv->dm.UNDEC_SM_PWDB;
- RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "AP Ext Port or disconnect PWDB = 0x%x\n",
- de_digtable->min_undec_pwdb_for_dm);
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "AP Ext Port or disconnect PWDB = 0x%x\n",
+ de_digtable->min_undec_pwdb_for_dm);
}
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "MinUndecoratedPWDBForDM =%d\n",
- de_digtable->min_undec_pwdb_for_dm);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "MinUndecoratedPWDBForDM =%d\n",
+ de_digtable->min_undec_pwdb_for_dm);
}
static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
@@ -287,14 +287,14 @@ static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
}
de_digtable->pre_cck_pd_state = de_digtable->cur_cck_pd_state;
}
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CurSTAConnectState=%s\n",
- de_digtable->cursta_cstate == DIG_STA_CONNECT ?
- "DIG_STA_CONNECT " : "DIG_STA_DISCONNECT");
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CCKPDStage=%s\n",
- de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI ?
- "Low RSSI " : "High RSSI ");
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "is92d single phy =%x\n",
- IS_92D_SINGLEPHY(rtlpriv->rtlhal.version));
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "CurSTAConnectState=%s\n",
+ de_digtable->cursta_cstate == DIG_STA_CONNECT ?
+ "DIG_STA_CONNECT " : "DIG_STA_DISCONNECT");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "CCKPDStage=%s\n",
+ de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI ?
+ "Low RSSI " : "High RSSI ");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "is92d single phy =%x\n",
+ IS_92D_SINGLEPHY(rtlpriv->rtlhal.version));
}
@@ -303,12 +303,12 @@ void rtl92d_dm_write_dig(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct dig_t *de_digtable = &rtlpriv->dm_digtable;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "cur_igvalue = 0x%x, pre_igvalue = 0x%x, back_val = %d\n",
- de_digtable->cur_igvalue, de_digtable->pre_igvalue,
- de_digtable->back_val);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "cur_igvalue = 0x%x, pre_igvalue = 0x%x, back_val = %d\n",
+ de_digtable->cur_igvalue, de_digtable->pre_igvalue,
+ de_digtable->back_val);
if (de_digtable->dig_enable_flag == false) {
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "DIG is disabled\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "DIG is disabled\n");
de_digtable->pre_igvalue = 0x17;
return;
}
@@ -327,21 +327,21 @@ static void rtl92d_early_mode_enabled(struct rtl_priv *rtlpriv)
if ((rtlpriv->mac80211.link_state >= MAC80211_LINKED) &&
(rtlpriv->mac80211.vendor == PEER_CISCO)) {
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "IOT_PEER = CISCO\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "IOT_PEER = CISCO\n");
if (de_digtable->last_min_undec_pwdb_for_dm >= 50
&& de_digtable->min_undec_pwdb_for_dm < 50) {
rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x00);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "Early Mode Off\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Early Mode Off\n");
} else if (de_digtable->last_min_undec_pwdb_for_dm <= 55 &&
de_digtable->min_undec_pwdb_for_dm > 55) {
rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "Early Mode On\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Early Mode On\n");
}
} else if (!(rtl_read_byte(rtlpriv, REG_EARLY_MODE_CONTROL) & 0xf)) {
rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "Early Mode On\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "Early Mode On\n");
}
}
@@ -352,7 +352,7 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw)
u8 value_igi = de_digtable->cur_igvalue;
struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "==>\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "==>\n");
if (rtlpriv->rtlhal.earlymode_enable) {
rtl92d_early_mode_enabled(rtlpriv);
de_digtable->last_min_undec_pwdb_for_dm =
@@ -371,7 +371,7 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw)
/* Not STA mode return tmp */
if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION)
return;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "progress\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "progress\n");
/* Decide the current status and if modify initial gain or not */
if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
de_digtable->cursta_cstate = DIG_STA_CONNECT;
@@ -387,17 +387,17 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw)
value_igi++;
else if (falsealm_cnt->cnt_all >= DM_DIG_FA_TH2)
value_igi += 2;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "dm_DIG() Before: large_fa_hit=%d, forbidden_igi=%x\n",
- de_digtable->large_fa_hit, de_digtable->forbidden_igi);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "dm_DIG() Before: Recover_cnt=%d, rx_gain_min=%x\n",
- de_digtable->recover_cnt, de_digtable->rx_gain_min);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "dm_DIG() Before: large_fa_hit=%d, forbidden_igi=%x\n",
+ de_digtable->large_fa_hit, de_digtable->forbidden_igi);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "dm_DIG() Before: Recover_cnt=%d, rx_gain_min=%x\n",
+ de_digtable->recover_cnt, de_digtable->rx_gain_min);
/* deal with abnormally large false alarm */
if (falsealm_cnt->cnt_all > 10000) {
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "dm_DIG(): Abnormally false alarm case\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "dm_DIG(): Abnormally false alarm case\n");
de_digtable->large_fa_hit++;
if (de_digtable->forbidden_igi < de_digtable->cur_igvalue) {
@@ -435,12 +435,12 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw)
}
}
}
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "dm_DIG() After: large_fa_hit=%d, forbidden_igi=%x\n",
- de_digtable->large_fa_hit, de_digtable->forbidden_igi);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "dm_DIG() After: recover_cnt=%d, rx_gain_min=%x\n",
- de_digtable->recover_cnt, de_digtable->rx_gain_min);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "dm_DIG() After: large_fa_hit=%d, forbidden_igi=%x\n",
+ de_digtable->large_fa_hit, de_digtable->forbidden_igi);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "dm_DIG() After: recover_cnt=%d, rx_gain_min=%x\n",
+ de_digtable->recover_cnt, de_digtable->rx_gain_min);
if (value_igi > DM_DIG_MAX)
value_igi = DM_DIG_MAX;
@@ -450,7 +450,7 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw)
rtl92d_dm_write_dig(hw);
if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G)
rtl92d_dm_cck_packet_detection_thresh(hw);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "<<==\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "<<==\n");
}
static void rtl92d_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
@@ -477,8 +477,8 @@ static void rtl92d_dm_dynamic_txpower(struct ieee80211_hw *hw)
}
if ((mac->link_state < MAC80211_LINKED) &&
(rtlpriv->dm.UNDEC_SM_PWDB == 0)) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "Not connected to any\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "Not connected to any\n");
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
return;
@@ -487,49 +487,49 @@ static void rtl92d_dm_dynamic_txpower(struct ieee80211_hw *hw)
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
undec_sm_pwdb =
rtlpriv->dm.UNDEC_SM_PWDB;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "IBSS Client PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "IBSS Client PWDB = 0x%lx\n",
+ undec_sm_pwdb);
} else {
undec_sm_pwdb =
rtlpriv->dm.undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "STA Default Port PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "STA Default Port PWDB = 0x%lx\n",
+ undec_sm_pwdb);
}
} else {
undec_sm_pwdb =
rtlpriv->dm.UNDEC_SM_PWDB;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "AP Ext Port PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "AP Ext Port PWDB = 0x%lx\n",
+ undec_sm_pwdb);
}
if (rtlhal->current_bandtype == BAND_ON_5G) {
if (undec_sm_pwdb >= 0x33) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_LEVEL2;
- RT_TRACE(rtlpriv, COMP_HIPWR, DBG_LOUD,
- "5G:TxHighPwrLevel_Level2 (TxPwr=0x0)\n");
+ rtl_dbg(rtlpriv, COMP_HIPWR, DBG_LOUD,
+ "5G:TxHighPwrLevel_Level2 (TxPwr=0x0)\n");
} else if ((undec_sm_pwdb < 0x33)
&& (undec_sm_pwdb >= 0x2b)) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_LEVEL1;
- RT_TRACE(rtlpriv, COMP_HIPWR, DBG_LOUD,
- "5G:TxHighPwrLevel_Level1 (TxPwr=0x10)\n");
+ rtl_dbg(rtlpriv, COMP_HIPWR, DBG_LOUD,
+ "5G:TxHighPwrLevel_Level1 (TxPwr=0x10)\n");
} else if (undec_sm_pwdb < 0x2b) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_NORMAL;
- RT_TRACE(rtlpriv, COMP_HIPWR, DBG_LOUD,
- "5G:TxHighPwrLevel_Normal\n");
+ rtl_dbg(rtlpriv, COMP_HIPWR, DBG_LOUD,
+ "5G:TxHighPwrLevel_Normal\n");
}
} else {
if (undec_sm_pwdb >=
TX_POWER_NEAR_FIELD_THRESH_LVL2) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_LEVEL2;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
} else
if ((undec_sm_pwdb <
(TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3))
@@ -538,20 +538,20 @@ static void rtl92d_dm_dynamic_txpower(struct ieee80211_hw *hw)
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_LEVEL1;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
} else if (undec_sm_pwdb <
(TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
rtlpriv->dm.dynamic_txhighpower_lvl =
TXHIGHPWRLEVEL_NORMAL;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "TXHIGHPWRLEVEL_NORMAL\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "TXHIGHPWRLEVEL_NORMAL\n");
}
}
if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "PHY_SetTxPowerLevel8192S() Channel = %d\n",
- rtlphy->current_channel);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "PHY_SetTxPowerLevel8192S() Channel = %d\n",
+ rtlphy->current_channel);
rtl92d_phy_set_txpower_level(hw, rtlphy->current_channel);
}
rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl;
@@ -666,8 +666,8 @@ static void rtl92d_dm_rxgain_tracking_thermalmeter(struct ieee80211_hw *hw)
u4tmp = (index_mapping[(rtlpriv->efuse.eeprom_thermalmeter -
rtlpriv->dm.thermalvalue_rxgain)]) << 12;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "===> Rx Gain %x\n", u4tmp);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "===> Rx Gain %x\n", u4tmp);
for (i = RF90_PATH_A; i < rtlpriv->phy.num_total_rfpath; i++)
rtl_set_rfreg(hw, i, 0x3C, RFREG_OFFSET_MASK,
(rtlpriv->phy.reg_rf3c[i] & (~(0xF000))) | u4tmp);
@@ -695,11 +695,11 @@ static void rtl92d_bandtype_2_4G(struct ieee80211_hw *hw, long *temp_cckg,
if (temp_cck == le32_to_cpu(*((__le32 *)cckswing))) {
*cck_index_old = (u8)i;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Initial reg0x%x = 0x%lx, cck_index = 0x%x, ch14 %d\n",
- RCCK0_TXFILTER2, temp_cck,
- *cck_index_old,
- rtlpriv->dm.cck_inch14);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Initial reg0x%x = 0x%lx, cck_index = 0x%x, ch14 %d\n",
+ RCCK0_TXFILTER2, temp_cck,
+ *cck_index_old,
+ rtlpriv->dm.cck_inch14);
break;
}
}
@@ -821,12 +821,12 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
};
rtlpriv->dm.txpower_trackinginit = true;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "\n");
thermalvalue = (u8) rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0xf800);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x\n",
- thermalvalue,
- rtlpriv->dm.thermalvalue, rtlefuse->eeprom_thermalmeter);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x\n",
+ thermalvalue,
+ rtlpriv->dm.thermalvalue, rtlefuse->eeprom_thermalmeter);
rtl92d_phy_ap_calibrate(hw, (thermalvalue -
rtlefuse->eeprom_thermalmeter));
@@ -846,10 +846,10 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
ofdm_index_old[0] = (u8)i;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index=0x%x\n",
- ROFDM0_XATXIQIMBALANCE,
- ele_d, ofdm_index_old[0]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index=0x%x\n",
+ ROFDM0_XATXIQIMBALANCE,
+ ele_d, ofdm_index_old[0]);
break;
}
}
@@ -860,11 +860,11 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
if (ele_d ==
(ofdmswing_table[i] & MASKOFDM_D)) {
ofdm_index_old[1] = (u8)i;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
- DBG_LOUD,
- "Initial pathB ele_d reg 0x%x = 0x%lx, ofdm_index = 0x%x\n",
- ROFDM0_XBTXIQIMBALANCE, ele_d,
- ofdm_index_old[1]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING,
+ DBG_LOUD,
+ "Initial pathB ele_d reg 0x%x = 0x%lx, ofdm_index = 0x%x\n",
+ ROFDM0_XBTXIQIMBALANCE, ele_d,
+ ofdm_index_old[1]);
break;
}
}
@@ -889,8 +889,8 @@ static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
for (i = 0; i < rf; i++)
rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i];
rtlpriv->dm.cck_index = cck_index_old;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "reload ofdm index for band switch\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "reload ofdm index for band switch\n");
}
old_index_done:
for (i = 0; i < rf; i++)
@@ -934,11 +934,11 @@ old_index_done:
(thermalvalue > rtlpriv->dm.thermalvalue_rxgain) ?
(thermalvalue - rtlpriv->dm.thermalvalue_rxgain) :
(rtlpriv->dm.thermalvalue_rxgain - thermalvalue);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x delta 0x%x delta_lck 0x%x delta_iqk 0x%x\n",
- thermalvalue, rtlpriv->dm.thermalvalue,
- rtlefuse->eeprom_thermalmeter, delta, delta_lck,
- delta_iqk);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x delta 0x%x delta_lck 0x%x delta_iqk 0x%x\n",
+ thermalvalue, rtlpriv->dm.thermalvalue,
+ rtlefuse->eeprom_thermalmeter, delta, delta_lck,
+ delta_iqk);
if (delta_lck > rtlefuse->delta_lck && rtlefuse->delta_lck != 0) {
rtlpriv->dm.thermalvalue_lck = thermalvalue;
rtl92d_phy_lc_calibrate(hw);
@@ -974,16 +974,16 @@ old_index_done:
index_mapping_internal_pa);
}
if (is2t) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "temp OFDM_A_index=0x%x, OFDM_B_index = 0x%x,cck_index=0x%x\n",
- rtlpriv->dm.ofdm_index[0],
- rtlpriv->dm.ofdm_index[1],
- rtlpriv->dm.cck_index);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "temp OFDM_A_index=0x%x, OFDM_B_index = 0x%x,cck_index=0x%x\n",
+ rtlpriv->dm.ofdm_index[0],
+ rtlpriv->dm.ofdm_index[1],
+ rtlpriv->dm.cck_index);
} else {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "temp OFDM_A_index=0x%x,cck_index = 0x%x\n",
- rtlpriv->dm.ofdm_index[0],
- rtlpriv->dm.cck_index);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "temp OFDM_A_index=0x%x,cck_index = 0x%x\n",
+ rtlpriv->dm.ofdm_index[0],
+ rtlpriv->dm.cck_index);
}
for (i = 0; i < rf; i++) {
if (ofdm_index[i] > OFDM_TABLE_SIZE_92D - 1)
@@ -1003,14 +1003,14 @@ old_index_done:
}
}
if (is2t) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "new OFDM_A_index=0x%x, OFDM_B_index = 0x%x, cck_index=0x%x\n",
- ofdm_index[0], ofdm_index[1],
- cck_index);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "new OFDM_A_index=0x%x, OFDM_B_index = 0x%x, cck_index=0x%x\n",
+ ofdm_index[0], ofdm_index[1],
+ cck_index);
} else {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "new OFDM_A_index=0x%x,cck_index = 0x%x\n",
- ofdm_index[0], cck_index);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "new OFDM_A_index=0x%x,cck_index = 0x%x\n",
+ ofdm_index[0], cck_index);
}
ele_d = (ofdmswing_table[ofdm_index[0]] & 0xFFC00000) >> 22;
val_x = rtlphy->iqk_matrix[indexforchannel].value[0][0];
@@ -1050,11 +1050,11 @@ old_index_done:
BIT(24), 0x00);
}
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "TxPwrTracking for interface %d path A: X = 0x%lx, Y = 0x%lx ele_A = 0x%lx ele_C = 0x%lx ele_D = 0x%lx 0xe94 = 0x%lx 0xe9c = 0x%lx\n",
- rtlhal->interfaceindex,
- val_x, val_y, ele_a, ele_c, ele_d,
- val_x, val_y);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "TxPwrTracking for interface %d path A: X = 0x%lx, Y = 0x%lx ele_A = 0x%lx ele_C = 0x%lx ele_D = 0x%lx 0xe94 = 0x%lx 0xe9c = 0x%lx\n",
+ rtlhal->interfaceindex,
+ val_x, val_y, ele_a, ele_c, ele_d,
+ val_x, val_y);
if (cck_index >= CCK_TABLE_SIZE)
cck_index = CCK_TABLE_SIZE - 1;
@@ -1134,17 +1134,17 @@ old_index_done:
rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
BIT(28), 0x00);
}
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "TxPwrTracking path B: X = 0x%lx, Y = 0x%lx ele_A = 0x%lx ele_C = 0x%lx ele_D = 0x%lx 0xeb4 = 0x%lx 0xebc = 0x%lx\n",
- val_x, val_y, ele_a, ele_c,
- ele_d, val_x, val_y);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "TxPwrTracking path B: X = 0x%lx, Y = 0x%lx ele_A = 0x%lx ele_C = 0x%lx ele_D = 0x%lx 0xeb4 = 0x%lx 0xebc = 0x%lx\n",
+ val_x, val_y, ele_a, ele_c,
+ ele_d, val_x, val_y);
}
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "TxPwrTracking 0xc80 = 0x%x, 0xc94 = 0x%x RF 0x24 = 0x%x\n",
- rtl_get_bbreg(hw, 0xc80, MASKDWORD),
- rtl_get_bbreg(hw, 0xc94, MASKDWORD),
- rtl_get_rfreg(hw, RF90_PATH_A, 0x24,
- RFREG_OFFSET_MASK));
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "TxPwrTracking 0xc80 = 0x%x, 0xc94 = 0x%x RF 0x24 = 0x%x\n",
+ rtl_get_bbreg(hw, 0xc80, MASKDWORD),
+ rtl_get_bbreg(hw, 0xc94, MASKDWORD),
+ rtl_get_rfreg(hw, RF90_PATH_A, 0x24,
+ RFREG_OFFSET_MASK));
check_delta:
if (delta_iqk > rtlefuse->delta_iqk && rtlefuse->delta_iqk != 0) {
@@ -1161,7 +1161,7 @@ check_delta:
rtlpriv->dm.thermalvalue = thermalvalue;
exit:
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "<===\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "<===\n");
}
static void rtl92d_dm_initialize_txpower_tracking(struct ieee80211_hw *hw)
@@ -1171,9 +1171,9 @@ static void rtl92d_dm_initialize_txpower_tracking(struct ieee80211_hw *hw)
rtlpriv->dm.txpower_tracking = true;
rtlpriv->dm.txpower_trackinginit = false;
rtlpriv->dm.txpower_track_control = true;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "pMgntInfo->txpower_tracking = %d\n",
- rtlpriv->dm.txpower_tracking);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "pMgntInfo->txpower_tracking = %d\n",
+ rtlpriv->dm.txpower_tracking);
}
void rtl92d_dm_check_txpower_tracking_thermal_meter(struct ieee80211_hw *hw)
@@ -1186,13 +1186,13 @@ void rtl92d_dm_check_txpower_tracking_thermal_meter(struct ieee80211_hw *hw)
if (!rtlpriv->dm.tm_trigger) {
rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, BIT(17) |
BIT(16), 0x03);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Trigger 92S Thermal Meter!!\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Trigger 92S Thermal Meter!!\n");
rtlpriv->dm.tm_trigger = 1;
return;
} else {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Schedule TxPowerTracking direct call!!\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Schedule TxPowerTracking direct call!!\n");
rtl92d_dm_txpower_tracking_callback_thermalmeter(hw);
rtlpriv->dm.tm_trigger = 0;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
index 2064813f9381..9ddb8478784b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
@@ -47,7 +47,7 @@ static void _rtl92d_write_fw(struct ieee80211_hw *hw,
u32 pagenums, remainsize;
u32 page, offset;
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size);
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE)
rtl_fill_dummy(bufferptr, &size);
pagenums = size / FW_8192D_PAGE_SIZE;
@@ -104,8 +104,8 @@ void rtl92d_firmware_selfreset(struct ieee80211_hw *hw)
u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
}
WARN_ONCE((delay <= 0), "rtl8192de: 8051 reset failed!\n");
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- "=====> 8051 reset success (%d)\n", delay);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "=====> 8051 reset success (%d)\n", delay);
}
static int _rtl92d_fw_init(struct ieee80211_hw *hw)
@@ -114,27 +114,27 @@ static int _rtl92d_fw_init(struct ieee80211_hw *hw)
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
u32 counter;
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, "FW already have download\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG, "FW already have download\n");
/* polling for FW ready */
counter = 0;
do {
if (rtlhal->interfaceindex == 0) {
if (rtl_read_byte(rtlpriv, FW_MAC0_READY) &
MAC0_READY) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- "Polling FW ready success!! REG_MCUFWDL: 0x%x\n",
- rtl_read_byte(rtlpriv,
- FW_MAC0_READY));
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "Polling FW ready success!! REG_MCUFWDL: 0x%x\n",
+ rtl_read_byte(rtlpriv,
+ FW_MAC0_READY));
return 0;
}
udelay(5);
} else {
if (rtl_read_byte(rtlpriv, FW_MAC1_READY) &
MAC1_READY) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- "Polling FW ready success!! REG_MCUFWDL: 0x%x\n",
- rtl_read_byte(rtlpriv,
- FW_MAC1_READY));
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "Polling FW ready success!! REG_MCUFWDL: 0x%x\n",
+ rtl_read_byte(rtlpriv,
+ FW_MAC1_READY));
return 0;
}
udelay(5);
@@ -142,17 +142,17 @@ static int _rtl92d_fw_init(struct ieee80211_hw *hw)
} while (counter++ < POLLING_READY_TIMEOUT_COUNT);
if (rtlhal->interfaceindex == 0) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- "Polling FW ready fail!! MAC0 FW init not ready: 0x%x\n",
- rtl_read_byte(rtlpriv, FW_MAC0_READY));
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "Polling FW ready fail!! MAC0 FW init not ready: 0x%x\n",
+ rtl_read_byte(rtlpriv, FW_MAC0_READY));
} else {
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- "Polling FW ready fail!! MAC1 FW init not ready: 0x%x\n",
- rtl_read_byte(rtlpriv, FW_MAC1_READY));
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "Polling FW ready fail!! MAC1 FW init not ready: 0x%x\n",
+ rtl_read_byte(rtlpriv, FW_MAC1_READY));
}
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- "Polling FW ready fail!! REG_MCUFWDL:0x%08x\n",
- rtl_read_dword(rtlpriv, REG_MCUFWDL));
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "Polling FW ready fail!! REG_MCUFWDL:0x%08x\n",
+ rtl_read_dword(rtlpriv, REG_MCUFWDL));
return -1;
}
@@ -177,13 +177,13 @@ int rtl92d_download_fw(struct ieee80211_hw *hw)
pfwdata = rtlhal->pfirmware;
rtlhal->fw_version = (u16) GET_FIRMWARE_HDR_VERSION(pfwheader);
rtlhal->fw_subversion = (u16) GET_FIRMWARE_HDR_SUB_VER(pfwheader);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "FirmwareVersion(%d), FirmwareSubVersion(%d), Signature(%#x)\n",
- rtlhal->fw_version, rtlhal->fw_subversion,
- GET_FIRMWARE_HDR_SIGNATURE(pfwheader));
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "FirmwareVersion(%d), FirmwareSubVersion(%d), Signature(%#x)\n",
+ rtlhal->fw_version, rtlhal->fw_subversion,
+ GET_FIRMWARE_HDR_SIGNATURE(pfwheader));
if (IS_FW_HEADER_EXIST(pfwheader)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Shift 32 bytes for FW header!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Shift 32 bytes for FW header!!\n");
pfwdata = pfwdata + 32;
fwsize = fwsize - 32;
}
@@ -214,8 +214,8 @@ int rtl92d_download_fw(struct ieee80211_hw *hw)
else if (!fwdl_in_process)
break;
else
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- "Wait for another mac download fw\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "Wait for another mac download fw\n");
}
spin_lock_irqsave(&globalmutex_for_fwdownload, flags);
value = rtl_read_byte(rtlpriv, 0x1f);
@@ -286,25 +286,25 @@ static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw,
u8 idx;
if (ppsc->rfpwr_state == ERFOFF || ppsc->inactive_pwrstate == ERFOFF) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Return as RF is off!!!\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Return as RF is off!!!\n");
return;
}
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
while (true) {
spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
if (rtlhal->h2c_setinprogress) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "H2C set in progress! Wait to set..element_id(%d)\n",
- element_id);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "H2C set in progress! Wait to set..element_id(%d)\n",
+ element_id);
while (rtlhal->h2c_setinprogress) {
spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
flag);
h2c_waitcounter++;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Wait 100 us (%d times)...\n",
- h2c_waitcounter);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Wait 100 us (%d times)...\n",
+ h2c_waitcounter);
udelay(100);
if (h2c_waitcounter > 1000)
@@ -353,30 +353,30 @@ static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw,
while (!isfw_read) {
wait_h2c_limmit--;
if (wait_h2c_limmit == 0) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Waiting too long for FW read clear HMEBox(%d)!\n",
- boxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Waiting too long for FW read clear HMEBox(%d)!\n",
+ boxnum);
break;
}
udelay(10);
isfw_read = _rtl92d_check_fw_read_last_h2c(hw, boxnum);
u1b_tmp = rtl_read_byte(rtlpriv, 0x1BF);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Waiting for FW read clear HMEBox(%d)!!! 0x1BF = %2x\n",
- boxnum, u1b_tmp);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Waiting for FW read clear HMEBox(%d)!!! 0x1BF = %2x\n",
+ boxnum, u1b_tmp);
}
if (!isfw_read) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n",
- boxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n",
+ boxnum);
break;
}
memset(boxcontent, 0, sizeof(boxcontent));
memset(boxextcontent, 0, sizeof(boxextcontent));
boxcontent[0] = element_id;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Write element_id box_reg(%4x) = %2x\n",
- box_reg, element_id);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Write element_id box_reg(%4x) = %2x\n",
+ box_reg, element_id);
switch (cmd_len) {
case 1:
boxcontent[0] &= ~(BIT(7));
@@ -430,14 +430,14 @@ static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw,
rtlhal->last_hmeboxnum = boxnum + 1;
if (rtlhal->last_hmeboxnum == 4)
rtlhal->last_hmeboxnum = 0;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "pHalData->last_hmeboxnum = %d\n",
- rtlhal->last_hmeboxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "pHalData->last_hmeboxnum = %d\n",
+ rtlhal->last_hmeboxnum);
}
spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
rtlhal->h2c_setinprogress = false;
spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
}
void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw,
@@ -653,15 +653,15 @@ void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished)
dlok = true;
}
if (dlok) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Set RSVD page location to Fw\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Set RSVD page location to Fw\n");
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
"H2C_RSVDPAGE", u1rsvdpageloc, 3);
rtl92d_fill_h2c_cmd(hw, H2C_RSVDPAGE,
sizeof(u1rsvdpageloc), u1rsvdpageloc);
} else
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Set RSVD page location to Fw FAIL!!!!!!\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set RSVD page location to Fw FAIL!!!!!!\n");
}
void rtl92d_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
index 146fe144f5f5..f849291cc587 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
@@ -204,8 +204,8 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_SLOT_TIME: {
u8 e_aci;
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "HW_VAR_SLOT_TIME %x\n", val[0]);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "HW_VAR_SLOT_TIME %x\n", val[0]);
rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
for (e_aci = 0; e_aci < AC_MAX; e_aci++)
rtlpriv->cfg->ops->set_hw_reg(hw,
@@ -235,9 +235,9 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
mac->min_space_cfg = ((mac->min_space_cfg & 0xf8) |
min_spacing_to_set);
*val = min_spacing_to_set;
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
- mac->min_space_cfg);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
mac->min_space_cfg);
}
@@ -249,9 +249,9 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
density_to_set = *val;
mac->min_space_cfg = rtlpriv->rtlhal.minspace_cfg;
mac->min_space_cfg |= (density_to_set << 3);
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
- mac->min_space_cfg);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
mac->min_space_cfg);
break;
@@ -284,9 +284,9 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
| (factor_toset);
}
rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, regtoset);
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_AMPDU_FACTOR: %#x\n",
- factor_toset);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_FACTOR: %#x\n",
+ factor_toset);
}
break;
}
@@ -318,9 +318,9 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
acm_ctrl |= ACMHW_VOQEN;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
- acm);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
+ acm);
break;
}
} else {
@@ -340,9 +340,9 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
}
- RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
- "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
- acm_ctrl);
+ rtl_dbg(rtlpriv, COMP_QOS, DBG_TRACE,
+ "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
+ acm_ctrl);
rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
break;
}
@@ -563,13 +563,13 @@ static bool _rtl92de_llt_table_init(struct ieee80211_hw *hw)
/* 18. LLT_table_init(Adapter); */
for (i = 0; i < (txpktbuf_bndy - 1); i++) {
status = _rtl92de_llt_write(hw, i, i + 1);
- if (true != status)
+ if (!status)
return status;
}
/* end of list */
status = _rtl92de_llt_write(hw, (txpktbuf_bndy - 1), 0xFF);
- if (true != status)
+ if (!status)
return status;
/* Make the other pages as ring buffer */
@@ -578,13 +578,13 @@ static bool _rtl92de_llt_table_init(struct ieee80211_hw *hw)
/* Otherwise used as local loopback buffer. */
for (i = txpktbuf_bndy; i < maxpage; i++) {
status = _rtl92de_llt_write(hw, i, (i + 1));
- if (true != status)
+ if (!status)
return status;
}
/* Let last entry point to the start entry of ring buffer */
status = _rtl92de_llt_write(hw, maxpage, txpktbuf_bndy);
- if (true != status)
+ if (!status)
return status;
return true;
@@ -851,13 +851,13 @@ void rtl92de_enable_hw_security_config(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 sec_reg_value;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
- rtlpriv->sec.pairwise_enc_algorithm,
- rtlpriv->sec.group_enc_algorithm);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
+ rtlpriv->sec.pairwise_enc_algorithm,
+ rtlpriv->sec.group_enc_algorithm);
if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "not open hw encryption\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "not open hw encryption\n");
return;
}
sec_reg_value = SCR_TXENCENABLE | SCR_RXENCENABLE;
@@ -867,8 +867,8 @@ void rtl92de_enable_hw_security_config(struct ieee80211_hw *hw)
}
sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK);
rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "The SECR-value %x\n", sec_reg_value);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "The SECR-value %x\n", sec_reg_value);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
}
@@ -902,8 +902,8 @@ int rtl92de_hw_init(struct ieee80211_hw *hw)
err = rtl92d_download_fw(hw);
spin_unlock_irqrestore(&globalmutex_for_power_and_efuse, flags);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Failed to download FW. Init HW without FW..\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Failed to download FW. Init HW without FW..\n");
return 1;
}
rtlhal->last_hmeboxnum = 0;
@@ -914,8 +914,8 @@ int rtl92de_hw_init(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, 0x605, tmp_u1b);
if (rtlhal->earlymode_enable) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EarlyMode Enabled!!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EarlyMode Enabled!!!\n");
tmp_u1b = rtl_read_byte(rtlpriv, 0x4d0);
tmp_u1b = tmp_u1b | 0x1f;
@@ -1033,10 +1033,10 @@ static enum version_8192d _rtl92de_read_chip_version(struct ieee80211_hw *hw)
value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG);
if (!(value32 & 0x000f0000)) {
version = VERSION_TEST_CHIP_92D_SINGLEPHY;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "TEST CHIP!!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "TEST CHIP!!!\n");
} else {
version = VERSION_NORMAL_CHIP_92D_SINGLEPHY;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Normal CHIP!!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Normal CHIP!!!\n");
}
return version;
}
@@ -1060,9 +1060,9 @@ static int _rtl92de_set_media_status(struct ieee80211_hw *hw,
_rtl92de_resume_tx_beacon(hw);
_rtl92de_disable_bcn_sub_func(hw);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Set HW_VAR_MEDIA_STATUS: No such media status(%x)\n",
- type);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set HW_VAR_MEDIA_STATUS: No such media status(%x)\n",
+ type);
}
bcnfunc_enable = rtl_read_byte(rtlpriv, REG_BCN_CTRL);
switch (type) {
@@ -1070,27 +1070,27 @@ static int _rtl92de_set_media_status(struct ieee80211_hw *hw,
bt_msr |= MSR_NOLINK;
ledaction = LED_CTL_LINK;
bcnfunc_enable &= 0xF7;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to NO LINK!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to NO LINK!\n");
break;
case NL80211_IFTYPE_ADHOC:
bt_msr |= MSR_ADHOC;
bcnfunc_enable |= 0x08;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to Ad Hoc!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to Ad Hoc!\n");
break;
case NL80211_IFTYPE_STATION:
bt_msr |= MSR_INFRA;
ledaction = LED_CTL_LINK;
bcnfunc_enable &= 0xF7;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to STA!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to STA!\n");
break;
case NL80211_IFTYPE_AP:
bt_msr |= MSR_AP;
bcnfunc_enable |= 0x08;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to AP!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to AP!\n");
break;
default:
pr_err("Network type %d not supported!\n", type);
@@ -1156,8 +1156,8 @@ void rtl92d_linked_set_reg(struct ieee80211_hw *hw)
indexforchannel = rtl92d_get_rightchnlplace_for_iqk(channel);
if (!rtlphy->iqk_matrix[indexforchannel].iqk_done) {
- RT_TRACE(rtlpriv, COMP_SCAN | COMP_INIT, DBG_DMESG,
- "Do IQK for channel:%d\n", channel);
+ rtl_dbg(rtlpriv, COMP_SCAN | COMP_INIT, DBG_DMESG,
+ "Do IQK for channel:%d\n", channel);
rtl92d_phy_iq_calibrate(hw);
}
}
@@ -1255,9 +1255,9 @@ static void _rtl92de_poweroff_adapter(struct ieee80211_hw *hw)
/* is set as 0x18, they had ever met auto load fail problem. */
rtl_write_byte(rtlpriv, REG_APS_FSMCO + 1, 0x10);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "In PowerOff,reg0x%x=%X\n",
- REG_SPS0_CTRL, rtl_read_byte(rtlpriv, REG_SPS0_CTRL));
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "In PowerOff,reg0x%x=%X\n",
+ REG_SPS0_CTRL, rtl_read_byte(rtlpriv, REG_SPS0_CTRL));
/* r. Note: for PCIe interface, PON will not turn */
/* off m-bias and BandGap in PCIe suspend mode. */
@@ -1270,7 +1270,7 @@ static void _rtl92de_poweroff_adapter(struct ieee80211_hw *hw)
spin_unlock_irqrestore(&globalmutex_power, flags);
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "<=======\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "<=======\n");
}
void rtl92de_card_disable(struct ieee80211_hw *hw)
@@ -1328,7 +1328,7 @@ void rtl92de_card_disable(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 1, 0xff);
udelay(50);
rtl_write_byte(rtlpriv, REG_CR, 0x0);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "==> Do power off.......\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "==> Do power off.......\n");
if (rtl92d_phy_check_poweroff(hw))
_rtl92de_poweroff_adapter(hw);
return;
@@ -1370,8 +1370,8 @@ void rtl92de_set_beacon_interval(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
u16 bcn_interval = mac->beacon_interval;
- RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
- "beacon_interval:%d\n", bcn_interval);
+ rtl_dbg(rtlpriv, COMP_BEACON, DBG_DMESG,
+ "beacon_interval:%d\n", bcn_interval);
rtl92de_disable_interrupt(hw);
rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
rtl92de_enable_interrupt(hw);
@@ -1383,8 +1383,8 @@ void rtl92de_update_interrupt_mask(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD, "add_msr:%x, rm_msr:%x\n",
- add_msr, rm_msr);
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD, "add_msr:%x, rm_msr:%x\n",
+ add_msr, rm_msr);
if (add_msr)
rtlpci->irq_mask[0] |= add_msr;
if (rm_msr)
@@ -1560,10 +1560,10 @@ static void _rtl92de_read_txpower_info(struct ieee80211_hw *hw,
!((hwinfo[EEPROM_TSSI_A_5G] & BIT(6)) >> 6);
rtlefuse->internal_pa_5g[1] =
!((hwinfo[EEPROM_TSSI_B_5G] & BIT(6)) >> 6);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Is D cut,Internal PA0 %d Internal PA1 %d\n",
- rtlefuse->internal_pa_5g[0],
- rtlefuse->internal_pa_5g[1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Is D cut,Internal PA0 %d Internal PA1 %d\n",
+ rtlefuse->internal_pa_5g[0],
+ rtlefuse->internal_pa_5g[1]);
}
rtlefuse->eeprom_c9 = hwinfo[EEPROM_RF_OPT6];
rtlefuse->eeprom_cc = hwinfo[EEPROM_RF_OPT7];
@@ -1612,15 +1612,15 @@ static void _rtl92de_read_txpower_info(struct ieee80211_hw *hw,
rtlefuse->delta_lck = tempval[1] - 1;
if (rtlefuse->eeprom_c9 == 0xFF)
rtlefuse->eeprom_c9 = 0x00;
- RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
- "EEPROMRegulatory = 0x%x\n", rtlefuse->eeprom_regulatory);
- RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
- "ThermalMeter = 0x%x\n", rtlefuse->eeprom_thermalmeter);
- RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
- "CrystalCap = 0x%x\n", rtlefuse->crystalcap);
- RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
- "Delta_IQK = 0x%x Delta_LCK = 0x%x\n",
- rtlefuse->delta_iqk, rtlefuse->delta_lck);
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD,
+ "EEPROMRegulatory = 0x%x\n", rtlefuse->eeprom_regulatory);
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD,
+ "ThermalMeter = 0x%x\n", rtlefuse->eeprom_thermalmeter);
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD,
+ "CrystalCap = 0x%x\n", rtlefuse->crystalcap);
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD,
+ "Delta_IQK = 0x%x Delta_LCK = 0x%x\n",
+ rtlefuse->delta_iqk, rtlefuse->delta_lck);
for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) {
for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++) {
@@ -1655,12 +1655,12 @@ static void _rtl92de_read_macphymode_from_prom(struct ieee80211_hw *hw,
if (macphy_crvalue & BIT(3)) {
rtlhal->macphymode = SINGLEMAC_SINGLEPHY;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "MacPhyMode SINGLEMAC_SINGLEPHY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "MacPhyMode SINGLEMAC_SINGLEPHY\n");
} else {
rtlhal->macphymode = DUALMAC_DUALPHY;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "MacPhyMode DUALMAC_DUALPHY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "MacPhyMode DUALMAC_DUALPHY\n");
}
}
@@ -1687,15 +1687,15 @@ static void _rtl92de_efuse_update_chip_version(struct ieee80211_hw *hw)
switch (chipvalue) {
case 0xAA55:
chipver |= CHIP_92D_C_CUT;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "C-CUT!!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "C-CUT!!!\n");
break;
case 0x9966:
chipver |= CHIP_92D_D_CUT;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "D-CUT!!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "D-CUT!!!\n");
break;
case 0xCC33:
chipver |= CHIP_92D_E_CUT;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "E-CUT!!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "E-CUT!!!\n");
break;
default:
chipver |= CHIP_92D_D_CUT;
@@ -1737,7 +1737,7 @@ static void _rtl92de_read_adapter_info(struct ieee80211_hw *hw)
}
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR,
rtlefuse->dev_addr);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "%pM\n", rtlefuse->dev_addr);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "%pM\n", rtlefuse->dev_addr);
_rtl92de_read_txpower_info(hw, rtlefuse->autoload_failflag, hwinfo);
/* Read Channel Plan */
@@ -1771,14 +1771,14 @@ void rtl92de_read_eeprom_info(struct ieee80211_hw *hw)
tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
rtlefuse->autoload_status = tmp_u1b;
if (tmp_u1b & BIT(4)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
rtlefuse->epromtype = EEPROM_93C46;
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
}
if (tmp_u1b & BIT(5)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
rtlefuse->autoload_failflag = false;
_rtl92de_read_adapter_info(hw);
@@ -1866,8 +1866,8 @@ static void rtl92de_update_hal_rate_table(struct ieee80211_hw *hw,
(shortgi_rate << 4) | (shortgi_rate);
}
rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n",
- rtl_read_dword(rtlpriv, REG_ARFR0));
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n",
+ rtl_read_dword(rtlpriv, REG_ARFR0));
}
static void rtl92de_update_hal_rate_mask(struct ieee80211_hw *hw,
@@ -1998,9 +1998,9 @@ static void rtl92de_update_hal_rate_mask(struct ieee80211_hw *hw,
value[0] = (ratr_bitmap & 0x0fffffff) | (ratr_index << 28);
value[1] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "ratr_bitmap :%x value0:%x value1:%x\n",
- ratr_bitmap, value[0], value[1]);
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "ratr_bitmap :%x value0:%x value1:%x\n",
+ ratr_bitmap, value[0], value[1]);
rtl92d_fill_h2c_cmd(hw, H2C_RA_MASK, 5, (u8 *) value);
if (macid != 0)
sta_entry->ratr_index = ratr_index;
@@ -2059,14 +2059,14 @@ bool rtl92de_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL);
e_rfpowerstate_toset = (u1tmp & BIT(3)) ? ERFON : ERFOFF;
if (ppsc->hwradiooff && (e_rfpowerstate_toset == ERFON)) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "GPIOChangeRF - HW Radio ON, RF ON\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "GPIOChangeRF - HW Radio ON, RF ON\n");
e_rfpowerstate_toset = ERFON;
ppsc->hwradiooff = false;
actuallyset = true;
} else if (!ppsc->hwradiooff && (e_rfpowerstate_toset == ERFOFF)) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "GPIOChangeRF - HW Radio OFF, RF OFF\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "GPIOChangeRF - HW Radio OFF, RF OFF\n");
e_rfpowerstate_toset = ERFOFF;
ppsc->hwradiooff = true;
actuallyset = true;
@@ -2110,7 +2110,7 @@ void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index,
u8 idx;
u8 cam_offset = 0;
u8 clear_number = 5;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
for (idx = 0; idx < clear_number; idx++) {
rtl_cam_mark_invalid(hw, cam_offset + idx);
rtl_cam_empty_entry(hw, cam_offset + idx);
@@ -2164,38 +2164,38 @@ void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index,
}
}
if (rtlpriv->sec.key_len[key_index] == 0) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "delete one entry, entry_id is %d\n",
- entry_id);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "delete one entry, entry_id is %d\n",
+ entry_id);
if (mac->opmode == NL80211_IFTYPE_AP)
rtl_cam_del_entry(hw, p_macaddr);
rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
} else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "The insert KEY length is %d\n",
- rtlpriv->sec.key_len[PAIRWISE_KEYIDX]);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
- "The insert KEY is %x %x\n",
- rtlpriv->sec.key_buf[0][0],
- rtlpriv->sec.key_buf[0][1]);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "add one entry\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "The insert KEY length is %d\n",
+ rtlpriv->sec.key_len[PAIRWISE_KEYIDX]);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "The insert KEY is %x %x\n",
+ rtlpriv->sec.key_buf[0][0],
+ rtlpriv->sec.key_buf[0][1]);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "add one entry\n");
if (is_pairwise) {
RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_LOUD,
"Pairwise Key content",
rtlpriv->sec.pairwise_key,
rtlpriv->
sec.key_len[PAIRWISE_KEYIDX]);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set Pairwise key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set Pairwise key\n");
rtl_cam_add_one_entry(hw, macaddr, key_index,
entry_id, enc_algo,
CAM_CONFIG_NO_USEDK,
rtlpriv->
sec.key_buf[key_index]);
} else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set group key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set group key\n");
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
rtl_cam_add_one_entry(hw,
rtlefuse->dev_addr,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c
index 2b76a025deb8..93d1c6a610c3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c
@@ -19,8 +19,8 @@ void rtl92de_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
u8 ledcfg;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
- REG_LEDCFG2, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
+ REG_LEDCFG2, pled->ledpin);
switch (pled->ledpin) {
case LED_PIN_GPIO0:
@@ -56,8 +56,8 @@ void rtl92de_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 ledcfg;
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
- REG_LEDCFG2, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
+ REG_LEDCFG2, pled->ledpin);
ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
@@ -128,7 +128,7 @@ void rtl92de_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction)
ledaction == LED_CTL_POWER_ON)) {
return;
}
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d,\n", ledaction);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d,\n", ledaction);
_rtl92ce_sw_led_control(hw, ledaction);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
index 4b672199c81d..e34d33e73e52 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
@@ -162,14 +162,9 @@ static u32 targetchnl_2g[TARGET_CHNL_NUM_2G] = {
static u32 _rtl92d_phy_calculate_bit_shift(u32 bitmask)
{
- u32 i;
-
- for (i = 0; i <= 31; i++) {
- if (((bitmask >> i) & 0x1) == 1)
- break;
- }
+ u32 i = ffs(bitmask);
- return i;
+ return i ? i - 1 : 32;
}
u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
@@ -178,8 +173,8 @@ u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
u32 returnvalue, originalvalue, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "regaddr(%#x), bitmask(%#x)\n",
- regaddr, bitmask);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "regaddr(%#x), bitmask(%#x)\n",
+ regaddr, bitmask);
if (rtlhal->during_mac1init_radioa || rtlhal->during_mac0init_radiob) {
u8 dbi_direct = 0;
@@ -196,9 +191,9 @@ u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
}
bitshift = _rtl92d_phy_calculate_bit_shift(bitmask);
returnvalue = (originalvalue & bitmask) >> bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "BBR MASK=0x%x Addr[0x%x]=0x%x\n",
- bitmask, regaddr, originalvalue);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "BBR MASK=0x%x Addr[0x%x]=0x%x\n",
+ bitmask, regaddr, originalvalue);
return returnvalue;
}
@@ -210,9 +205,9 @@ void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw,
u8 dbi_direct = 0;
u32 originalvalue, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x)\n",
- regaddr, bitmask, data);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
if (rtlhal->during_mac1init_radioa)
dbi_direct = BIT(3);
else if (rtlhal->during_mac0init_radiob)
@@ -233,9 +228,9 @@ void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw,
rtl92de_write_dword_dbi(hw, (u16) regaddr, data, dbi_direct);
else
rtl_write_dword(rtlpriv, regaddr, data);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x)\n",
- regaddr, bitmask, data);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
}
static u32 _rtl92d_phy_rf_serial_read(struct ieee80211_hw *hw,
@@ -279,8 +274,8 @@ static u32 _rtl92d_phy_rf_serial_read(struct ieee80211_hw *hw,
else
retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
BLSSIREADBACKDATA);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x] = 0x%x\n",
- rfpath, pphyreg->rf_rb, retvalue);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x] = 0x%x\n",
+ rfpath, pphyreg->rf_rb, retvalue);
return retvalue;
}
@@ -298,8 +293,8 @@ static void _rtl92d_phy_rf_serial_write(struct ieee80211_hw *hw,
/* T65 RF */
data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFW-%d Addr[0x%x]=0x%x\n",
- rfpath, pphyreg->rf3wire_offset, data_and_addr);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "RFW-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rf3wire_offset, data_and_addr);
}
u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw,
@@ -308,17 +303,17 @@ u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, readback_value, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
- regaddr, rfpath, bitmask);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
+ regaddr, rfpath, bitmask);
spin_lock(&rtlpriv->locks.rf_lock);
original_value = _rtl92d_phy_rf_serial_read(hw, rfpath, regaddr);
bitshift = _rtl92d_phy_calculate_bit_shift(bitmask);
readback_value = (original_value & bitmask) >> bitshift;
spin_unlock(&rtlpriv->locks.rf_lock);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
- regaddr, rfpath, bitmask, original_value);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
+ regaddr, rfpath, bitmask, original_value);
return readback_value;
}
@@ -329,9 +324,9 @@ void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
struct rtl_phy *rtlphy = &(rtlpriv->phy);
u32 original_value, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, rfpath);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
if (bitmask == 0)
return;
spin_lock(&rtlpriv->locks.rf_lock);
@@ -346,9 +341,9 @@ void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
_rtl92d_phy_rf_serial_write(hw, rfpath, regaddr, data);
}
spin_unlock(&rtlpriv->locks.rf_lock);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, rfpath);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
}
bool rtl92d_phy_mac_config(struct ieee80211_hw *hw)
@@ -358,10 +353,10 @@ bool rtl92d_phy_mac_config(struct ieee80211_hw *hw)
u32 arraylength;
u32 *ptrarray;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl819XMACPHY_Array\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl819XMACPHY_Array\n");
arraylength = MAC_2T_ARRAYLENGTH;
ptrarray = rtl8192de_mac_2tarray;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Img:Rtl819XMAC_Array\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Img:Rtl819XMAC_Array\n");
for (i = 0; i < arraylength; i = i + 2)
rtl_write_byte(rtlpriv, ptrarray[i], (u8) ptrarray[i + 1]);
if (rtlpriv->rtlhal.macphymode == SINGLEMAC_SINGLEPHY) {
@@ -519,36 +514,36 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
if (rtlhal->interfaceindex == 0) {
agctab_arraylen = AGCTAB_ARRAYLENGTH;
agctab_array_table = rtl8192de_agctab_array;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- " ===> phy:MAC0, Rtl819XAGCTAB_Array\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ " ===> phy:MAC0, Rtl819XAGCTAB_Array\n");
} else {
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
agctab_arraylen = AGCTAB_2G_ARRAYLENGTH;
agctab_array_table = rtl8192de_agctab_2garray;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- " ===> phy:MAC1, Rtl819XAGCTAB_2GArray\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ " ===> phy:MAC1, Rtl819XAGCTAB_2GArray\n");
} else {
agctab_5garraylen = AGCTAB_5G_ARRAYLENGTH;
agctab_5garray_table = rtl8192de_agctab_5garray;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- " ===> phy:MAC1, Rtl819XAGCTAB_5GArray\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ " ===> phy:MAC1, Rtl819XAGCTAB_5GArray\n");
}
}
phy_reg_arraylen = PHY_REG_2T_ARRAYLENGTH;
phy_regarray_table = rtl8192de_phy_reg_2tarray;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- " ===> phy:Rtl819XPHY_REG_Array_PG\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ " ===> phy:Rtl819XPHY_REG_Array_PG\n");
if (configtype == BASEBAND_CONFIG_PHY_REG) {
for (i = 0; i < phy_reg_arraylen; i = i + 2) {
rtl_addr_delay(phy_regarray_table[i]);
rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD,
phy_regarray_table[i + 1]);
udelay(1);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "The phy_regarray_table[0] is %x Rtl819XPHY_REGArray[1] is %x\n",
- phy_regarray_table[i],
- phy_regarray_table[i + 1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "The phy_regarray_table[0] is %x Rtl819XPHY_REGArray[1] is %x\n",
+ phy_regarray_table[i],
+ phy_regarray_table[i + 1]);
}
} else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
if (rtlhal->interfaceindex == 0) {
@@ -559,13 +554,13 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
/* Add 1us delay between BB/RF register
* setting. */
udelay(1);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "The Rtl819XAGCTAB_Array_Table[0] is %u Rtl819XPHY_REGArray[1] is %u\n",
- agctab_array_table[i],
- agctab_array_table[i + 1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "The Rtl819XAGCTAB_Array_Table[0] is %u Rtl819XPHY_REGArray[1] is %u\n",
+ agctab_array_table[i],
+ agctab_array_table[i + 1]);
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Normal Chip, MAC0, load Rtl819XAGCTAB_Array\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Normal Chip, MAC0, load Rtl819XAGCTAB_Array\n");
} else {
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
for (i = 0; i < agctab_arraylen; i = i + 2) {
@@ -575,13 +570,13 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
/* Add 1us delay between BB/RF register
* setting. */
udelay(1);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "The Rtl819XAGCTAB_Array_Table[0] is %u Rtl819XPHY_REGArray[1] is %u\n",
- agctab_array_table[i],
- agctab_array_table[i + 1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "The Rtl819XAGCTAB_Array_Table[0] is %u Rtl819XPHY_REGArray[1] is %u\n",
+ agctab_array_table[i],
+ agctab_array_table[i + 1]);
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Load Rtl819XAGCTAB_2GArray\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Load Rtl819XAGCTAB_2GArray\n");
} else {
for (i = 0; i < agctab_5garraylen; i = i + 2) {
rtl_set_bbreg(hw,
@@ -591,13 +586,13 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
/* Add 1us delay between BB/RF registeri
* setting. */
udelay(1);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "The Rtl819XAGCTAB_5GArray_Table[0] is %u Rtl819XPHY_REGArray[1] is %u\n",
- agctab_5garray_table[i],
- agctab_5garray_table[i + 1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "The Rtl819XAGCTAB_5GArray_Table[0] is %u Rtl819XPHY_REGArray[1] is %u\n",
+ agctab_5garray_table[i],
+ agctab_5garray_table[i + 1]);
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Load Rtl819XAGCTAB_5GArray\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Load Rtl819XAGCTAB_5GArray\n");
}
}
}
@@ -648,10 +643,10 @@ static void _rtl92d_store_pwrindex_diffrate_offset(struct ieee80211_hw *hw,
return;
rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][index] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][%d] = 0x%x\n",
- rtlphy->pwrgroup_cnt, index,
- rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][index]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][%d] = 0x%x\n",
+ rtlphy->pwrgroup_cnt, index,
+ rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][index]);
if (index == 13)
rtlphy->pwrgroup_cnt++;
}
@@ -675,8 +670,8 @@ static bool _rtl92d_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
phy_regarray_table_pg[i + 2]);
}
} else {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "configtype != BaseBand_Config_PHY_REG\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "configtype != BaseBand_Config_PHY_REG\n");
}
return true;
}
@@ -688,7 +683,7 @@ static bool _rtl92d_phy_bb_config(struct ieee80211_hw *hw)
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
bool rtstatus = true;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "==>\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "==>\n");
rtstatus = _rtl92d_phy_config_bb_with_headerfile(hw,
BASEBAND_CONFIG_PHY_REG);
if (!rtstatus) {
@@ -698,7 +693,7 @@ static bool _rtl92d_phy_bb_config(struct ieee80211_hw *hw)
/* if (rtlphy->rf_type == RF_1T2R) {
* _rtl92c_phy_bb_config_1t(hw);
- * RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Config to 1T!!\n");
+ * rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Config to 1T!!\n");
*} */
if (rtlefuse->autoload_failflag == false) {
@@ -777,18 +772,18 @@ bool rtl92d_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
radiob_arraylen = RADIOB_2T_INT_PA_ARRAYLENGTH;
radiob_array_table = rtl8192de_radiob_2t_int_paarray;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "PHY_ConfigRFWithHeaderFile() Radio_A:Rtl819XRadioA_1TArray\n");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "PHY_ConfigRFWithHeaderFile() Radio_B:Rtl819XRadioB_1TArray\n");
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Radio No %x\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "PHY_ConfigRFWithHeaderFile() Radio_A:Rtl819XRadioA_1TArray\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "PHY_ConfigRFWithHeaderFile() Radio_B:Rtl819XRadioB_1TArray\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Radio No %x\n", rfpath);
/* this only happens when DMDP, mac0 start on 2.4G,
* mac1 start on 5G, mac 0 has to set phy0&phy1
* pathA or mac1 has to set phy0&phy1 pathA */
if ((content == radiob_txt) && (rfpath == RF90_PATH_A)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- " ===> althougth Path A, we load radiob.txt\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ " ===> althougth Path A, we load radiob.txt\n");
radioa_arraylen = radiob_arraylen;
radioa_array_table = radiob_array_table;
}
@@ -828,19 +823,19 @@ void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
(u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0);
rtlphy->default_initialgain[3] =
(u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
- rtlphy->default_initialgain[0],
- rtlphy->default_initialgain[1],
- rtlphy->default_initialgain[2],
- rtlphy->default_initialgain[3]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
+ rtlphy->default_initialgain[0],
+ rtlphy->default_initialgain[1],
+ rtlphy->default_initialgain[2],
+ rtlphy->default_initialgain[3]);
rtlphy->framesync = (u8)rtl_get_bbreg(hw, ROFDM0_RXDETECTOR3,
MASKBYTE0);
rtlphy->framesync_c34 = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR2,
MASKDWORD);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Default framesync (0x%x) = 0x%x\n",
- ROFDM0_RXDETECTOR3, rtlphy->framesync);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Default framesync (0x%x) = 0x%x\n",
+ ROFDM0_RXDETECTOR3, rtlphy->framesync);
}
static void _rtl92d_get_txpower_index(struct ieee80211_hw *hw, u8 channel,
@@ -938,14 +933,14 @@ void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
if (rtlphy->set_bwmode_inprogress)
return;
if ((is_hal_stop(rtlhal)) || (RT_CANNOT_IO(hw))) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "FALSE driver sleep or unload\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "FALSE driver sleep or unload\n");
return;
}
rtlphy->set_bwmode_inprogress = true;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "Switch to %s bandwidth\n",
- rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
- "20MHz" : "40MHz");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "Switch to %s bandwidth\n",
+ rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
+ "20MHz" : "40MHz");
reg_bw_opmode = rtl_read_byte(rtlpriv, REG_BWOPMODE);
reg_prsr_rsc = rtl_read_byte(rtlpriv, REG_RRSR + 2);
switch (rtlphy->current_chan_bw) {
@@ -1001,7 +996,7 @@ void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
}
rtl92d_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
rtlphy->set_bwmode_inprogress = false;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
}
static void _rtl92d_phy_stop_trx_before_changeband(struct ieee80211_hw *hw)
@@ -1018,7 +1013,7 @@ static void rtl92d_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
u8 value8;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "==>\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "==>\n");
rtlhal->bandset = band;
rtlhal->current_bandtype = band;
if (IS_92D_SINGLEPHY(rtlhal->version))
@@ -1028,13 +1023,13 @@ static void rtl92d_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
/* reconfig BB/RF according to wireless mode */
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
/* BB & RF Config */
- RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, "====>2.4G\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_DMESG, "====>2.4G\n");
if (rtlhal->interfaceindex == 1)
_rtl92d_phy_config_bb_with_headerfile(hw,
BASEBAND_CONFIG_AGC_TAB);
} else {
/* 5G band */
- RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, "====>5G\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_DMESG, "====>5G\n");
if (rtlhal->interfaceindex == 1)
_rtl92d_phy_config_bb_with_headerfile(hw,
BASEBAND_CONFIG_AGC_TAB);
@@ -1062,7 +1057,7 @@ static void rtl92d_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
0 ? REG_MAC0 : REG_MAC1), value8);
}
mdelay(1);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "<==Switch Band OK\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "<==Switch Band OK\n");
}
static void _rtl92d_phy_reload_imr_setting(struct ieee80211_hw *hw,
@@ -1074,9 +1069,9 @@ static void _rtl92d_phy_reload_imr_setting(struct ieee80211_hw *hw,
u8 group, i;
unsigned long flag = 0;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "====>path %d\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "====>path %d\n", rfpath);
if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "====>5G\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "====>5G\n");
rtl_set_bbreg(hw, RFPGA0_RFMOD, BIT(25) | BIT(24), 0);
rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0x00f00000, 0xf);
/* fc area 0xd2c */
@@ -1097,14 +1092,14 @@ static void _rtl92d_phy_reload_imr_setting(struct ieee80211_hw *hw,
rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN, 1);
} else {
/* G band. */
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "Load RF IMR parameters for G band. IMR already setting %d\n",
- rtlpriv->rtlhal.load_imrandiqk_setting_for2g);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "====>2.4G\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "Load RF IMR parameters for G band. IMR already setting %d\n",
+ rtlpriv->rtlhal.load_imrandiqk_setting_for2g);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "====>2.4G\n");
if (!rtlpriv->rtlhal.load_imrandiqk_setting_for2g) {
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "Load RF IMR parameters for G band. %d\n",
- rfpath);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "Load RF IMR parameters for G band. %d\n",
+ rfpath);
rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
rtl_set_bbreg(hw, RFPGA0_RFMOD, BIT(25) | BIT(24), 0);
rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4,
@@ -1122,7 +1117,7 @@ static void _rtl92d_phy_reload_imr_setting(struct ieee80211_hw *hw,
rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
}
}
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n");
}
static void _rtl92d_phy_enable_rf_env(struct ieee80211_hw *hw,
@@ -1132,7 +1127,7 @@ static void _rtl92d_phy_enable_rf_env(struct ieee80211_hw *hw,
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "====>\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "====>\n");
/*----Store original RFENV control type----*/
switch (rfpath) {
case RF90_PATH_A:
@@ -1158,7 +1153,7 @@ static void _rtl92d_phy_enable_rf_env(struct ieee80211_hw *hw,
/*Set 0 to 12 bits for 8255 */
rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREDATALENGTH, 0x0);
udelay(1);
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "<====\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "<====\n");
}
static void _rtl92d_phy_restore_rf_env(struct ieee80211_hw *hw, u8 rfpath,
@@ -1168,7 +1163,7 @@ static void _rtl92d_phy_restore_rf_env(struct ieee80211_hw *hw, u8 rfpath,
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "=====>\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "=====>\n");
/*----Restore RFENV control type----*/
switch (rfpath) {
case RF90_PATH_A:
@@ -1181,7 +1176,7 @@ static void _rtl92d_phy_restore_rf_env(struct ieee80211_hw *hw, u8 rfpath,
*pu4_regval);
break;
}
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "<=====\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "<=====\n");
}
static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
@@ -1195,10 +1190,10 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
bool need_pwr_down = false, internal_pa = false;
u32 u4regvalue, mask = 0x1C000, value = 0, u4tmp, u4tmp2;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "====>\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "====>\n");
/* config path A for 5G */
if (rtlhal->current_bandtype == BAND_ON_5G) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "====>5G\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "====>5G\n");
u4tmp = curveindex_5g[channel - 1];
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"ver 1 set RF-A, 5G, 0x28 = 0x%x !!\n", u4tmp);
@@ -1246,14 +1241,14 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
RFREG_OFFSET_MASK,
rf_reg_pram_c_5g[index][i]);
}
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "offset 0x%x value 0x%x path %d index %d readback 0x%x\n",
- rf_reg_for_c_cut_5g[i],
- rf_reg_pram_c_5g[index][i],
- path, index,
- rtl_get_rfreg(hw, (enum radio_path)path,
- rf_reg_for_c_cut_5g[i],
- RFREG_OFFSET_MASK));
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "offset 0x%x value 0x%x path %d index %d readback 0x%x\n",
+ rf_reg_for_c_cut_5g[i],
+ rf_reg_pram_c_5g[index][i],
+ path, index,
+ rtl_get_rfreg(hw, (enum radio_path)path,
+ rf_reg_for_c_cut_5g[i],
+ RFREG_OFFSET_MASK));
}
if (need_pwr_down)
_rtl92d_phy_restore_rf_env(hw, path, &u4regvalue);
@@ -1285,11 +1280,11 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
rf_for_c_cut_5g_internal_pa[i],
RFREG_OFFSET_MASK,
rf_pram_c_5g_int_pa[index][i]);
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
- "offset 0x%x value 0x%x path %d index %d\n",
- rf_for_c_cut_5g_internal_pa[i],
- rf_pram_c_5g_int_pa[index][i],
- rfpath, index);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
+ "offset 0x%x value 0x%x path %d index %d\n",
+ rf_for_c_cut_5g_internal_pa[i],
+ rf_pram_c_5g_int_pa[index][i],
+ rfpath, index);
}
} else {
rtl_set_rfreg(hw, (enum radio_path)rfpath, 0x0B,
@@ -1297,7 +1292,7 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
}
}
} else if (rtlhal->current_bandtype == BAND_ON_2_4G) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "====>2.4G\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "====>2.4G\n");
u4tmp = curveindex_2g[channel - 1];
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"ver 3 set RF-B, 2G, 0x28 = 0x%x !!\n", u4tmp);
@@ -1333,14 +1328,14 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
RFREG_OFFSET_MASK,
rf_reg_param_for_c_cut_2g
[index][i]);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "offset 0x%x value 0x%x mak 0x%x path %d index %d readback 0x%x\n",
- rf_reg_for_c_cut_2g[i],
- rf_reg_param_for_c_cut_2g[index][i],
- rf_reg_mask_for_c_cut_2g[i], path, index,
- rtl_get_rfreg(hw, (enum radio_path)path,
- rf_reg_for_c_cut_2g[i],
- RFREG_OFFSET_MASK));
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "offset 0x%x value 0x%x mak 0x%x path %d index %d readback 0x%x\n",
+ rf_reg_for_c_cut_2g[i],
+ rf_reg_param_for_c_cut_2g[index][i],
+ rf_reg_mask_for_c_cut_2g[i], path, index,
+ rtl_get_rfreg(hw, (enum radio_path)path,
+ rf_reg_for_c_cut_2g[i],
+ RFREG_OFFSET_MASK));
}
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"cosa ver 3 set RF-B, 2G, 0x28 = 0x%x !!\n",
@@ -1354,7 +1349,7 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
if (rtlhal->during_mac0init_radiob)
rtl92d_phy_powerdown_anotherphy(hw, true);
}
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n");
}
u8 rtl92d_get_rightchnlplace_for_iqk(u8 chnl)
@@ -2358,8 +2353,8 @@ void rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw)
rtlphy->iqk_matrix[indexforchannel].iqk_done =
true;
- RT_TRACE(rtlpriv, COMP_SCAN | COMP_MLME, DBG_LOUD,
- "IQK OK indexforchannel %d\n", indexforchannel);
+ rtl_dbg(rtlpriv, COMP_SCAN | COMP_MLME, DBG_LOUD,
+ "IQK OK indexforchannel %d\n", indexforchannel);
}
}
@@ -2370,26 +2365,26 @@ void rtl92d_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel)
struct rtl_hal *rtlhal = &(rtlpriv->rtlhal);
u8 indexforchannel;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "channel %d\n", channel);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "channel %d\n", channel);
/*------Do IQK for normal chip and test chip 5G band------- */
indexforchannel = rtl92d_get_rightchnlplace_for_iqk(channel);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "indexforchannel %d done %d\n",
- indexforchannel,
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "indexforchannel %d done %d\n",
+ indexforchannel,
rtlphy->iqk_matrix[indexforchannel].iqk_done);
if (0 && !rtlphy->iqk_matrix[indexforchannel].iqk_done &&
rtlphy->need_iqk) {
/* Re Do IQK. */
- RT_TRACE(rtlpriv, COMP_SCAN | COMP_INIT, DBG_LOUD,
- "Do IQK Matrix reg for channel:%d....\n", channel);
+ rtl_dbg(rtlpriv, COMP_SCAN | COMP_INIT, DBG_LOUD,
+ "Do IQK Matrix reg for channel:%d....\n", channel);
rtl92d_phy_iq_calibrate(hw);
} else {
/* Just load the value. */
/* 2G band just load once. */
if (((!rtlhal->load_imrandiqk_setting_for2g) &&
indexforchannel == 0) || indexforchannel > 0) {
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "Just Read IQK Matrix reg for channel:%d....\n",
- channel);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "Just Read IQK Matrix reg for channel:%d....\n",
+ channel);
if ((rtlphy->iqk_matrix[indexforchannel].
value[0] != NULL)
/*&&(regea4 != 0) */)
@@ -2413,7 +2408,7 @@ void rtl92d_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel)
}
}
rtlphy->need_iqk = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n");
}
static u32 _rtl92d_phy_get_abs(u32 val1, u32 val2)
@@ -2477,7 +2472,7 @@ static void _rtl92d_phy_reload_lck_setting(struct ieee80211_hw *hw,
u32 u4tmp = 0, u4regvalue = 0;
bool bneed_powerdown_radio = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "path %d\n", erfpath);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "path %d\n", erfpath);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "band type = %d\n",
rtlpriv->rtlhal.current_bandtype);
RTPRINT(rtlpriv, FINIT, INIT_IQK, "channel = %d\n", channel);
@@ -2522,7 +2517,7 @@ static void _rtl92d_phy_reload_lck_setting(struct ieee80211_hw *hw,
if (rtlpriv->rtlhal.during_mac0init_radiob)
rtl92d_phy_powerdown_anotherphy(hw, true);
}
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n");
}
static void _rtl92d_phy_lc_calibrate_sw(struct ieee80211_hw *hw, bool is2t)
@@ -2695,11 +2690,11 @@ void rtl92d_phy_reset_iqk_result(struct ieee80211_hw *hw)
struct rtl_phy *rtlphy = &(rtlpriv->phy);
u8 i;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "settings regs %d default regs %d\n",
- (int)(sizeof(rtlphy->iqk_matrix) /
- sizeof(struct iqk_matrix_regs)),
- IQK_MATRIX_REG_NUM);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "settings regs %d default regs %d\n",
+ (int)(sizeof(rtlphy->iqk_matrix) /
+ sizeof(struct iqk_matrix_regs)),
+ IQK_MATRIX_REG_NUM);
/* 0xe94, 0xe9c, 0xea4, 0xeac, 0xeb4, 0xebc, 0xec4, 0xecc */
for (i = 0; i < IQK_MATRIX_SETTINGS_NUM; i++) {
rtlphy->iqk_matrix[i].value[0][0] = 0x100;
@@ -2844,8 +2839,8 @@ u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw)
return 0;
if ((is_hal_stop(rtlhal)) || (RT_CANNOT_IO(hw))) {
- RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- "sw_chnl_inprogress false driver sleep or unload\n");
+ rtl_dbg(rtlpriv, COMP_CHAN, DBG_LOUD,
+ "sw_chnl_inprogress false driver sleep or unload\n");
return 0;
}
while (rtlphy->lck_inprogress && timecount < timeout) {
@@ -2886,8 +2881,8 @@ u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw)
channel = 1;
rtlphy->sw_chnl_stage = 0;
rtlphy->sw_chnl_step = 0;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
- "switch to channel%d\n", rtlphy->current_channel);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE,
+ "switch to channel%d\n", rtlphy->current_channel);
do {
if (!rtlphy->sw_chnl_inprogress)
@@ -2904,7 +2899,7 @@ u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw)
}
break;
} while (true);
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
rtlphy->sw_chnl_inprogress = false;
return 1;
}
@@ -2915,9 +2910,9 @@ static void rtl92d_phy_set_io(struct ieee80211_hw *hw)
struct dig_t *de_digtable = &rtlpriv->dm_digtable;
struct rtl_phy *rtlphy = &(rtlpriv->phy);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "--->Cmd(%#x), set_io_inprogress(%d)\n",
- rtlphy->current_io_type, rtlphy->set_io_inprogress);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "--->Cmd(%#x), set_io_inprogress(%d)\n",
+ rtlphy->current_io_type, rtlphy->set_io_inprogress);
switch (rtlphy->current_io_type) {
case IO_CMD_RESUME_DM_BY_SCAN:
de_digtable->cur_igvalue = rtlphy->initgain_backup.xaagccore1;
@@ -2935,8 +2930,8 @@ static void rtl92d_phy_set_io(struct ieee80211_hw *hw)
break;
}
rtlphy->set_io_inprogress = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "<---(%#x)\n",
- rtlphy->current_io_type);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, "<---(%#x)\n",
+ rtlphy->current_io_type);
}
bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
@@ -2945,19 +2940,19 @@ bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
struct rtl_phy *rtlphy = &(rtlpriv->phy);
bool postprocessing = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
iotype, rtlphy->set_io_inprogress);
do {
switch (iotype) {
case IO_CMD_RESUME_DM_BY_SCAN:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "[IO CMD] Resume DM after scan\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "[IO CMD] Resume DM after scan\n");
postprocessing = true;
break;
case IO_CMD_PAUSE_DM_BY_SCAN:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "[IO CMD] Pause DM before scan\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "[IO CMD] Pause DM before scan\n");
postprocessing = true;
break;
default:
@@ -2973,7 +2968,7 @@ bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
return false;
}
rtl92d_phy_set_io(hw);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "<--IO Type(%#x)\n", iotype);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, "<--IO Type(%#x)\n", iotype);
return true;
}
@@ -3030,8 +3025,8 @@ static void _rtl92d_phy_set_rfsleep(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Fail !!! Switch RF timeout\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Fail !!! Switch RF timeout\n");
return;
}
/* e. For PCIE: SYS_FUNC_EN 0x02[7:0] = 0xE2 reset BB TRX function */
@@ -3065,18 +3060,18 @@ bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
u32 initializecount = 0;
do {
initializecount++;
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic enable\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic enable\n");
rtstatus = rtl_ps_enable_nic(hw);
} while (!rtstatus && (initializecount < 10));
RT_CLEAR_PS_LEVEL(ppsc,
RT_RF_OFF_LEVL_HALT_NIC);
} else {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "awake, sleeped:%d ms state_inap:%x\n",
- jiffies_to_msecs(jiffies -
- ppsc->last_sleep_jiffies),
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "awake, slept:%d ms state_inap:%x\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_sleep_jiffies),
rtlpriv->psc.state_inap);
ppsc->last_awake_jiffies = jiffies;
_rtl92d_phy_set_rfon(hw);
@@ -3091,8 +3086,8 @@ bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
break;
case ERFOFF:
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic disable\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic disable\n");
rtl_ps_disable_nic(hw);
RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
} else {
@@ -3116,35 +3111,35 @@ bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
queue_id++;
continue;
} else if (rtlpci->pdev->current_state != PCI_D0) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "eRf Off/Sleep: %d times TcbBusyQueue[%d] !=0 but lower power state!\n",
- i + 1, queue_id);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "eRf Off/Sleep: %d times TcbBusyQueue[%d] !=0 but lower power state!\n",
+ i + 1, queue_id);
break;
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
- i + 1, queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
+ i + 1, queue_id,
+ skb_queue_len(&ring->queue));
udelay(10);
i++;
}
if (i >= MAX_DOZE_WAITING_TIMES_9x) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "ERFOFF: %d times TcbBusyQueue[%d] = %d !\n",
- MAX_DOZE_WAITING_TIMES_9x, queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "ERFOFF: %d times TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x, queue_id,
+ skb_queue_len(&ring->queue));
break;
}
}
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "Set rfsleep awaked:%d ms\n",
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "Set rfsleep awakened:%d ms\n",
jiffies_to_msecs(jiffies - ppsc->last_awake_jiffies));
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "sleep awaked:%d ms state_inap:%x\n",
- jiffies_to_msecs(jiffies -
- ppsc->last_awake_jiffies),
- rtlpriv->psc.state_inap);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "sleep awakened:%d ms state_inap:%x\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_awake_jiffies),
+ rtlpriv->psc.state_inap);
ppsc->last_sleep_jiffies = jiffies;
_rtl92d_phy_set_rfsleep(hw);
break;
@@ -3167,18 +3162,18 @@ void rtl92d_phy_config_macphymode(struct ieee80211_hw *hw)
switch (rtlhal->macphymode) {
case DUALMAC_DUALPHY:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "MacPhyMode: DUALMAC_DUALPHY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "MacPhyMode: DUALMAC_DUALPHY\n");
rtl_write_byte(rtlpriv, offset, 0xF3);
break;
case SINGLEMAC_SINGLEPHY:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "MacPhyMode: SINGLEMAC_SINGLEPHY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "MacPhyMode: SINGLEMAC_SINGLEPHY\n");
rtl_write_byte(rtlpriv, offset, 0xF4);
break;
case DUALMAC_SINGLEPHY:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "MacPhyMode: DUALMAC_SINGLEPHY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "MacPhyMode: DUALMAC_SINGLEPHY\n");
rtl_write_byte(rtlpriv, offset, 0xF1);
break;
}
@@ -3346,7 +3341,7 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
u8 rfpath, i;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "==>\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "==>\n");
/* r_select_5G for path_A/B 0 for 2.4G, 1 for 5G */
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
/* r_select_5G for path_A/B,0x878 */
@@ -3494,8 +3489,8 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
BIT(13), 0x3);
} else {
rtl92d_phy_enable_anotherphy(hw, false);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "MAC1 use DBI to update 0x888\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "MAC1 use DBI to update 0x888\n");
/* 0x888 */
rtl92de_write_dword_dbi(hw, RFPGA0_ADDALLOCKEN,
rtl92de_read_dword_dbi(hw,
@@ -3520,9 +3515,9 @@ void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
RFREG_OFFSET_MASK);
}
for (i = 0; i < 2; i++)
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "RF 0x18 = 0x%x\n",
- rtlphy->rfreg_chnlval[i]);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "<==\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "RF 0x18 = 0x%x\n",
+ rtlphy->rfreg_chnlval[i]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "<==\n");
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c
index 915a36f7af5e..83787fd293de 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c
@@ -23,9 +23,9 @@ void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
rtl_set_rfreg(hw, rfpath, RF_CHNLBW, BIT(10) |
BIT(11), 0x01);
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
- "20M RF 0x18 = 0x%x\n",
- rtlphy->rfreg_chnlval[rfpath]);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
+ "20M RF 0x18 = 0x%x\n",
+ rtlphy->rfreg_chnlval[rfpath]);
}
break;
@@ -35,9 +35,9 @@ void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
((rtlphy->rfreg_chnlval[rfpath] & 0xfffff3ff));
rtl_set_rfreg(hw, rfpath, RF_CHNLBW, BIT(10) | BIT(11),
0x00);
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
- "40M RF 0x18 = 0x%x\n",
- rtlphy->rfreg_chnlval[rfpath]);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
+ "40M RF 0x18 = 0x%x\n",
+ rtlphy->rfreg_chnlval[rfpath]);
}
break;
default:
@@ -391,11 +391,11 @@ bool rtl92d_phy_enable_anotherphy(struct ieee80211_hw *hw, bool bmac0)
rtlhal->during_mac0init_radiob = false;
rtlhal->during_mac1init_radioa = false;
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "===>\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "===>\n");
/* MAC0 Need PHY1 load radio_b.txt . Driver use DBI to write. */
u1btmp = rtl_read_byte(rtlpriv, mac_reg);
if (!(u1btmp & mac_on_bit)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "enable BB & RF\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "enable BB & RF\n");
/* Enable BB and RF power */
rtl92de_write_dword_dbi(hw, REG_SYS_ISO_CTRL,
rtl92de_read_dword_dbi(hw, REG_SYS_ISO_CTRL, direct) |
@@ -405,7 +405,7 @@ bool rtl92d_phy_enable_anotherphy(struct ieee80211_hw *hw, bool bmac0)
* and radio_b.txt has been load. */
bresult = false;
}
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "<===\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "<===\n");
return bresult;
}
@@ -421,17 +421,17 @@ void rtl92d_phy_powerdown_anotherphy(struct ieee80211_hw *hw, bool bmac0)
rtlhal->during_mac0init_radiob = false;
rtlhal->during_mac1init_radioa = false;
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "====>\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "====>\n");
/* check MAC0 enable or not again now, if
* enabled, not power down radio A. */
u1btmp = rtl_read_byte(rtlpriv, mac_reg);
if (!(u1btmp & mac_on_bit)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "power down\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "power down\n");
/* power down RF radio A according to YuNan's advice. */
rtl92de_write_dword_dbi(hw, RFPGA0_XA_LSSIPARAMETER,
0x00000000, direct);
}
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "<====\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "<====\n");
}
bool rtl92d_phy_rf6052_config(struct ieee80211_hw *hw)
@@ -573,8 +573,8 @@ bool rtl92d_phy_rf6052_config(struct ieee80211_hw *hw)
break;
}
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio[%d] Fail!!\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Radio[%d] Fail!!\n", rfpath);
goto phy_rf_cfg_fail;
}
@@ -588,7 +588,7 @@ bool rtl92d_phy_rf6052_config(struct ieee80211_hw *hw)
rtl92d_phy_powerdown_anotherphy(hw, false);
else if (need_pwrdown_radiob)
rtl92d_phy_powerdown_anotherphy(hw, true);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "<---\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "<---\n");
return rtstatus;
phy_rf_cfg_fail:
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
index ab5b05ef168e..8944712274b5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
@@ -508,11 +508,11 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
memset(skb->data, 0, EM_HDR_LEN);
}
buf_len = skb->len;
- mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error\n");
+ mapping = dma_map_single(&rtlpci->pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error\n");
return;
}
clear_pci_tx_desc_content(pdesc, sizeof(struct tx_desc_92d));
@@ -526,9 +526,9 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN +
EM_HDR_LEN);
if (ptcb_desc->empkt_num) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_LOUD,
- "Insert 8 byte.pTcb->EMPktNum:%d\n",
- ptcb_desc->empkt_num);
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_LOUD,
+ "Insert 8 byte.pTcb->EMPktNum:%d\n",
+ ptcb_desc->empkt_num);
_rtl92de_insert_emcontent(ptcb_desc,
(u8 *)(skb->data));
}
@@ -625,8 +625,8 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
}
if (ieee80211_is_data_qos(fc)) {
if (mac->rdg_en) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "Enable RDG function\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "Enable RDG function\n");
set_tx_desc_rdg_enable(pdesc, 1);
set_tx_desc_htc(pdesc, 1);
}
@@ -652,7 +652,7 @@ void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_pkt_id(pdesc, 8);
}
set_tx_desc_more_frag(pdesc, (lastseg ? 0 : 1));
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
}
void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw,
@@ -664,15 +664,15 @@ void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw,
struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
u8 fw_queue = QSLT_BEACON;
- dma_addr_t mapping = pci_map_single(rtlpci->pdev,
- skb->data, skb->len, PCI_DMA_TODEVICE);
+ dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
__le16 fc = hdr->frame_control;
__le32 *pdesc = (__le32 *)pdesc8;
- if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error\n");
+ if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error\n");
return;
}
clear_pci_tx_desc_content(pdesc, TX_DESC_SIZE);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c
index 551aa86825ed..997ff115b9ab 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/dm.c
@@ -86,16 +86,16 @@ static void rtl92ee_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(15) | BIT(14), 0);
rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(15) | BIT(14), 2);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "cnt_parity_fail = %d, cnt_rate_illegal = %d, cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
- falsealm_cnt->cnt_parity_fail,
- falsealm_cnt->cnt_rate_illegal,
- falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail);
-
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
- falsealm_cnt->cnt_ofdm_fail,
- falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "cnt_parity_fail = %d, cnt_rate_illegal = %d, cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
+ falsealm_cnt->cnt_parity_fail,
+ falsealm_cnt->cnt_rate_illegal,
+ falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail);
+
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
+ falsealm_cnt->cnt_ofdm_fail,
+ falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all);
}
static void rtl92ee_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
@@ -174,7 +174,7 @@ static void rtl92ee_dm_dig(struct ieee80211_hw *hw)
} else {
dm_dig->rx_gain_max = dm_dig_max;
dig_min_0 = dm_dig_min;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "no link\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "no link\n");
}
if (rtlpriv->falsealm_cnt.cnt_all > 10000) {
@@ -334,34 +334,34 @@ static void rtl92ee_dm_find_minimum_rssi(struct ieee80211_hw *hw)
if ((mac->link_state < MAC80211_LINKED) &&
(rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
rtl_dm_dig->min_undec_pwdb_for_dm = 0;
- RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "Not connected to any\n");
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "Not connected to any\n");
}
if (mac->link_state >= MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
rtl_dm_dig->min_undec_pwdb_for_dm =
rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "AP Client PWDB = 0x%lx\n",
- rtlpriv->dm.entry_min_undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "AP Client PWDB = 0x%lx\n",
+ rtlpriv->dm.entry_min_undec_sm_pwdb);
} else {
rtl_dm_dig->min_undec_pwdb_for_dm =
rtlpriv->dm.undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "STA Default Port PWDB = 0x%x\n",
- rtl_dm_dig->min_undec_pwdb_for_dm);
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "STA Default Port PWDB = 0x%x\n",
+ rtl_dm_dig->min_undec_pwdb_for_dm);
}
} else {
rtl_dm_dig->min_undec_pwdb_for_dm =
rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "AP Ext Port or disconnect PWDB = 0x%x\n",
- rtl_dm_dig->min_undec_pwdb_for_dm);
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "AP Ext Port or disconnect PWDB = 0x%x\n",
+ rtl_dm_dig->min_undec_pwdb_for_dm);
}
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "MinUndecoratedPWDBForDM =%d\n",
- rtl_dm_dig->min_undec_pwdb_for_dm);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "MinUndecoratedPWDBForDM =%d\n",
+ rtl_dm_dig->min_undec_pwdb_for_dm);
}
static void rtl92ee_dm_check_rssi_monitor(struct ieee80211_hw *hw)
@@ -687,8 +687,8 @@ static void rtl92ee_dm_dynamic_atc_switch(struct ieee80211_hw *hw)
if (rtlpriv->cfg->ops->get_btc_status()) {
if (!rtlpriv->btcoexist.btc_ops->
btc_is_bt_disabled(rtlpriv)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "odm_DynamicATCSwitch(): Disable CFO tracking for BT!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "odm_DynamicATCSwitch(): Disable CFO tracking for BT!!\n");
return;
}
}
@@ -718,11 +718,11 @@ static void rtl92ee_dm_dynamic_atc_switch(struct ieee80211_hw *hw)
(rtldm->cfo_ave_pre - cfo_ave) :
(cfo_ave - rtldm->cfo_ave_pre);
- if (cfo_ave_diff > 20 && rtldm->large_cfo_hit == 0) {
- rtldm->large_cfo_hit = 1;
+ if (cfo_ave_diff > 20 && !rtldm->large_cfo_hit) {
+ rtldm->large_cfo_hit = true;
return;
}
- rtldm->large_cfo_hit = 0;
+ rtldm->large_cfo_hit = false;
rtldm->cfo_ave_pre = cfo_ave;
@@ -842,8 +842,8 @@ static bool _rtl92ee_dm_ra_state_check(struct ieee80211_hw *hw,
low_rssithresh_for_ra += go_up_gap;
break;
default:
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "wrong rssi level setting %d !\n", *ratr_state);
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "wrong rssi level setting %d !\n", *ratr_state);
break;
}
@@ -872,14 +872,14 @@ static void rtl92ee_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
struct ieee80211_sta *sta = NULL;
if (is_hal_stop(rtlhal)) {
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "driver is going to unload\n");
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "driver is going to unload\n");
return;
}
if (!rtlpriv->dm.useramask) {
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "driver does not control rate adaptive mask\n");
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "driver does not control rate adaptive mask\n");
return;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
index 05462422d247..88b7a715f4c5 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
@@ -36,7 +36,7 @@ static void _rtl92ee_write_fw(struct ieee80211_hw *hw,
u32 pagenums, remainsize;
u32 page, offset;
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD , "FW size is %d bytes,\n", size);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "FW size is %d bytes,\n", size);
rtl_fill_dummy(bufferptr, &size);
@@ -118,21 +118,21 @@ int rtl92ee_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
rtlhal->fw_subversion = pfwheader->subversion;
pfwdata = (u8 *)rtlhal->pfirmware;
fwsize = rtlhal->fwsize;
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- "normal Firmware SIZE %d\n" , fwsize);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "normal Firmware SIZE %d\n", fwsize);
if (IS_FW_HEADER_EXIST(pfwheader)) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- "Firmware Version(%d), Signature(%#x),Size(%d)\n",
- pfwheader->version, pfwheader->signature,
- (int)sizeof(struct rtlwifi_firmware_header));
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "Firmware Version(%d), Signature(%#x),Size(%d)\n",
+ pfwheader->version, pfwheader->signature,
+ (int)sizeof(struct rtlwifi_firmware_header));
pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
} else {
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- "Firmware no Header, Signature(%#x)\n",
- pfwheader->signature);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "Firmware no Header, Signature(%#x)\n",
+ pfwheader->signature);
}
if (rtlhal->mac_func_enable) {
@@ -180,12 +180,12 @@ static void _rtl92ee_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
if (ppsc->dot11_psmode != EACTIVE ||
ppsc->inactive_pwrstate == ERFOFF) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD ,
- "FillH2CCommand8192E(): Return because RF is off!!!\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "FillH2CCommand8192E(): Return because RF is off!!!\n");
return;
}
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD , "come in\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
/* 1. Prevent race condition in setting H2C cmd.
* (copy from MgntActSet_RF_State().)
@@ -193,17 +193,17 @@ static void _rtl92ee_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
while (true) {
spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
if (rtlhal->h2c_setinprogress) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD ,
- "H2C set in progress! Wait to set..element_id(%d).\n",
- element_id);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "H2C set in progress! Wait to set..element_id(%d).\n",
+ element_id);
while (rtlhal->h2c_setinprogress) {
spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
flag);
h2c_waitcounter++;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD ,
- "Wait 100 us (%d times)...\n",
- h2c_waitcounter);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Wait 100 us (%d times)...\n",
+ h2c_waitcounter);
udelay(100);
if (h2c_waitcounter > 1000)
@@ -240,8 +240,8 @@ static void _rtl92ee_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
box_extreg = REG_HMEBOX_EXT_3;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", boxnum);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", boxnum);
break;
}
@@ -263,18 +263,18 @@ static void _rtl92ee_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
while (!isfw_read) {
wait_h2c_limmit--;
if (wait_h2c_limmit == 0) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD ,
- "Waiting too long for FW read clear HMEBox(%d)!!!\n",
- boxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Waiting too long for FW read clear HMEBox(%d)!!!\n",
+ boxnum);
break;
}
udelay(10);
isfw_read =
_rtl92ee_check_fw_read_last_h2c(hw, boxnum);
u1b_tmp = rtl_read_byte(rtlpriv, 0x130);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD ,
- "Waiting for FW read clear HMEBox(%d)!!! 0x130 = %2x\n",
- boxnum, u1b_tmp);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Waiting for FW read clear HMEBox(%d)!!! 0x130 = %2x\n",
+ boxnum, u1b_tmp);
}
}
@@ -282,18 +282,18 @@ static void _rtl92ee_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
* H2C cmd, break and give up this H2C.
*/
if (!isfw_read) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD ,
- "Write H2C reg BOX[%d] fail,Fw don't read.\n",
- boxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Write H2C reg BOX[%d] fail,Fw don't read.\n",
+ boxnum);
break;
}
/* 4. Fill the H2C cmd into box */
memset(boxcontent, 0, sizeof(boxcontent));
memset(boxextcontent, 0, sizeof(boxextcontent));
boxcontent[0] = element_id;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD ,
- "Write element_id box_reg(%4x) = %2x\n",
- box_reg, element_id);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Write element_id box_reg(%4x) = %2x\n",
+ box_reg, element_id);
switch (cmd_len) {
case 1:
@@ -329,8 +329,8 @@ static void _rtl92ee_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
}
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", cmd_len);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", cmd_len);
break;
}
@@ -340,16 +340,16 @@ static void _rtl92ee_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
if (rtlhal->last_hmeboxnum == 4)
rtlhal->last_hmeboxnum = 0;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD ,
- "pHalData->last_hmeboxnum = %d\n",
- rtlhal->last_hmeboxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "pHalData->last_hmeboxnum = %d\n",
+ rtlhal->last_hmeboxnum);
}
spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
rtlhal->h2c_setinprogress = false;
spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD , "go out\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
}
void rtl92ee_fill_h2c_cmd(struct ieee80211_hw *hw,
@@ -388,8 +388,8 @@ void rtl92ee_firmware_selfreset(struct ieee80211_hw *hw)
u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, (u1b_tmp | BIT(2)));
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD ,
- " _8051Reset92E(): 8051 reset success .\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ " _8051Reset92E(): 8051 reset success .\n");
}
void rtl92ee_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
@@ -408,8 +408,8 @@ void rtl92ee_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
if (bt_ctrl_lps)
mode = (bt_lps_on ? FW_PS_MIN_MODE : FW_PS_ACTIVE_MODE);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, "FW LPS mode = %d (coex:%d)\n",
- mode, bt_ctrl_lps);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG, "FW LPS mode = %d (coex:%d)\n",
+ mode, bt_ctrl_lps);
switch (mode) {
case FW_PS_MIN_MODE:
@@ -750,15 +750,15 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
b_dlok = true;
if (b_dlok) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD ,
- "Set RSVD page location to Fw.\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Set RSVD page location to Fw.\n");
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD ,
"H2C_RSVDPAGE:\n", u1rsvdpageloc, 3);
rtl92ee_fill_h2c_cmd(hw, H2C_92E_RSVDPAGE,
sizeof(u1rsvdpageloc), u1rsvdpageloc);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Set RSVD page location to Fw FAIL!!!!!!.\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set RSVD page location to Fw FAIL!!!!!!.\n");
}
}
@@ -783,11 +783,11 @@ void rtl92ee_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
switch (p2p_ps_state) {
case P2P_PS_DISABLE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD , "P2P_PS_DISABLE\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_DISABLE\n");
memset(p2p_ps_offload, 0, sizeof(*p2p_ps_offload));
break;
case P2P_PS_ENABLE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD , "P2P_PS_ENABLE\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_ENABLE\n");
/* update CTWindow value. */
if (p2pinfo->ctwindow > 0) {
p2p_ps_offload->ctwindow_en = 1;
@@ -838,11 +838,11 @@ void rtl92ee_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
}
break;
case P2P_PS_SCAN:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD , "P2P_PS_SCAN\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN\n");
p2p_ps_offload->discovery = 1;
break;
case P2P_PS_SCAN_DONE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD , "P2P_PS_SCAN_DONE\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN_DONE\n");
p2p_ps_offload->discovery = 0;
p2pinfo->p2p_ps_state = P2P_PS_ENABLE;
break;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
index 53011c2a44f5..88fa2e593fef 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
@@ -119,9 +119,9 @@ static void _rtl92ee_set_fw_clock_on(struct ieee80211_hw *hw,
if (content & IMR_CPWM) {
rtl_write_word(rtlpriv, isr_regaddr, 0x0100);
rtlhal->fw_ps_state = FW_PS_STATE_RF_ON_92E;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Receive CPWM INT!!! PSState = %X\n",
- rtlhal->fw_ps_state);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Receive CPWM INT!!! PSState = %X\n",
+ rtlhal->fw_ps_state);
}
}
@@ -319,8 +319,8 @@ void rtl92ee_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HAL_DEF_WOWLAN:
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "switch case %#x not processed\n", variable);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_DMESG,
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -390,8 +390,8 @@ static void _rtl92ee_download_rsvd_page(struct ieee80211_hw *hw)
} while (!(bcnvalid_reg & BIT(0)) && dlbcn_count < 5);
if (!(bcnvalid_reg & BIT(0)))
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Download RSVD page failed!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Download RSVD page failed!\n");
/* Enable Bcn */
_rtl92ee_set_bcn_ctrl_reg(hw, BIT(3), 0);
@@ -447,8 +447,8 @@ void rtl92ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_SLOT_TIME:{
u8 e_aci;
- RT_TRACE(rtlpriv, COMP_MLME, DBG_TRACE,
- "HW_VAR_SLOT_TIME %x\n", val[0]);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_TRACE,
+ "HW_VAR_SLOT_TIME %x\n", val[0]);
rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
@@ -494,8 +494,8 @@ void rtl92ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
(REG_AGGLEN_LMT + i),
reg[i]);
}
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_AMPDU_FACTOR:%#x\n", fac);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_FACTOR:%#x\n", fac);
}
}
break;
@@ -528,9 +528,9 @@ void rtl92ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
acm_ctrl |= ACMHW_VOQEN;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
- acm);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
+ acm);
break;
}
} else {
@@ -545,16 +545,16 @@ void rtl92ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
acm_ctrl &= (~ACMHW_VOQEN);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "switch case %#x not processed\n",
- e_aci);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_DMESG,
+ "switch case %#x not processed\n",
+ e_aci);
break;
}
}
- RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
- "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
- acm_ctrl);
+ rtl_dbg(rtlpriv, COMP_QOS, DBG_TRACE,
+ "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
+ acm_ctrl);
rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
}
break;
@@ -665,8 +665,8 @@ void rtl92ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "switch case %#x not processed\n", variable);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_DMESG,
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -771,8 +771,8 @@ static bool _rtl92ee_init_mac(struct ieee80211_hw *hw)
if (!rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
PWR_INTF_PCI_MSK,
RTL8192E_NIC_ENABLE_FLOW)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "init MAC Fail as rtl_hal_pwrseqcmdparsing\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "init MAC Fail as rtl_hal_pwrseqcmdparsing\n");
return false;
}
@@ -794,9 +794,9 @@ static bool _rtl92ee_init_mac(struct ieee80211_hw *hw)
rtl_write_word(rtlpriv, REG_CR, 0x2ff);
if (!rtlhal->mac_func_enable) {
- if (_rtl92ee_llt_table_init(hw) == false) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "LLT table init fail\n");
+ if (!_rtl92ee_llt_table_init(hw)) {
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "LLT table init fail\n");
return false;
}
}
@@ -1107,14 +1107,14 @@ void rtl92ee_enable_hw_security_config(struct ieee80211_hw *hw)
u8 sec_reg_value;
u8 tmp;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
- rtlpriv->sec.pairwise_enc_algorithm,
- rtlpriv->sec.group_enc_algorithm);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
+ rtlpriv->sec.pairwise_enc_algorithm,
+ rtlpriv->sec.group_enc_algorithm);
if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "not open hw encryption\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "not open hw encryption\n");
return;
}
@@ -1130,8 +1130,8 @@ void rtl92ee_enable_hw_security_config(struct ieee80211_hw *hw)
tmp = rtl_read_byte(rtlpriv, REG_CR + 1);
rtl_write_byte(rtlpriv, REG_CR + 1, tmp | BIT(1));
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "The SECR-value %x\n", sec_reg_value);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "The SECR-value %x\n", sec_reg_value);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
}
@@ -1153,8 +1153,8 @@ static bool _rtl8192ee_check_pcie_dma_hang(struct rtl_priv *rtlpriv)
*/
tmp = rtl_read_byte(rtlpriv, REG_BACKDOOR_DBI_DATA + 3);
if ((tmp & BIT(0)) || (tmp & BIT(1))) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "CheckPcieDMAHang8192EE(): true!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "CheckPcieDMAHang8192EE(): true!!\n");
return true;
}
return false;
@@ -1167,8 +1167,8 @@ static void _rtl8192ee_reset_pcie_interface_dma(struct rtl_priv *rtlpriv,
bool release_mac_rx_pause;
u8 backup_pcie_dma_pause;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "ResetPcieInterfaceDMA8192EE()\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "ResetPcieInterfaceDMA8192EE()\n");
/* Revise Note: Follow the document "PCIe RX DMA Hang Reset Flow_v03"
* released by SD1 Alan.
@@ -1281,7 +1281,7 @@ int rtl92ee_hw_init(struct ieee80211_hw *hw)
u8 tmp_u1b, u1byte;
u32 tmp_u4b;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, " Rtl8192EE hw init\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, " Rtl8192EE hw init\n");
rtlpriv->rtlhal.being_init_adapter = true;
rtlpriv->intf_ops->disable_aspm(hw);
@@ -1295,7 +1295,7 @@ int rtl92ee_hw_init(struct ieee80211_hw *hw)
}
if (_rtl8192ee_check_pcie_dma_hang(rtlpriv)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "92ee dma hang!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "92ee dma hang!\n");
_rtl8192ee_reset_pcie_interface_dma(rtlpriv,
rtlhal->mac_func_enable);
rtlhal->mac_func_enable = false;
@@ -1324,8 +1324,8 @@ int rtl92ee_hw_init(struct ieee80211_hw *hw)
rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG, 0x8000);
err = rtl92ee_download_fw(hw, false);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Failed to download FW. Init HW without FW now..\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Failed to download FW. Init HW without FW now..\n");
err = 1;
rtlhal->fw_ready = false;
return err;
@@ -1401,12 +1401,12 @@ int rtl92ee_hw_init(struct ieee80211_hw *hw)
efuse_one_byte_read(hw, 0x1FA, &tmp_u1b);
if (!(tmp_u1b & BIT(0))) {
rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0F, 0x05);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "PA BIAS path A\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "PA BIAS path A\n");
}
if ((!(tmp_u1b & BIT(1))) && (rtlphy->rf_type == RF_2T2R)) {
rtl_set_rfreg(hw, RF90_PATH_B, 0x15, 0x0F, 0x05);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "PA BIAS path B\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "PA BIAS path B\n");
}
rtl_write_byte(rtlpriv, REG_NAV_UPPER, ((30000 + 127) / 128));
@@ -1421,8 +1421,8 @@ int rtl92ee_hw_init(struct ieee80211_hw *hw)
rtl_write_dword(rtlpriv, 0x4fc, 0);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "end of Rtl8192EE hw init %x\n", err);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "end of Rtl8192EE hw init %x\n", err);
return 0;
}
@@ -1441,9 +1441,9 @@ static enum version_8192e _rtl92ee_read_chip_version(struct ieee80211_hw *hw)
else
version = (enum version_8192e)VERSION_NORMAL_CHIP_2T2R_8192E;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Chip RF Type: %s\n", (rtlphy->rf_type == RF_2T2R) ?
- "RF_2T2R" : "RF_1T1R");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Chip RF Type: %s\n", (rtlphy->rf_type == RF_2T2R) ?
+ "RF_2T2R" : "RF_1T1R");
return version;
}
@@ -1459,26 +1459,26 @@ static int _rtl92ee_set_media_status(struct ieee80211_hw *hw,
switch (type) {
case NL80211_IFTYPE_UNSPECIFIED:
mode = MSR_NOLINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to NO LINK!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to NO LINK!\n");
break;
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_MESH_POINT:
mode = MSR_ADHOC;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to Ad Hoc!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to Ad Hoc!\n");
break;
case NL80211_IFTYPE_STATION:
mode = MSR_INFRA;
ledaction = LED_CTL_LINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to STA!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to STA!\n");
break;
case NL80211_IFTYPE_AP:
mode = MSR_AP;
ledaction = LED_CTL_LINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to AP!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to AP!\n");
break;
default:
pr_err("Network type %d not support!\n", type);
@@ -1503,9 +1503,9 @@ static int _rtl92ee_set_media_status(struct ieee80211_hw *hw,
_rtl92ee_resume_tx_beacon(hw);
_rtl92ee_disable_bcn_sub_func(hw);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n",
- mode);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n",
+ mode);
}
rtl_write_byte(rtlpriv, MSR, bt_msr | mode);
@@ -1611,7 +1611,7 @@ static void _rtl92ee_poweroff_adapter(struct ieee80211_hw *hw)
rtlhal->mac_func_enable = false;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "POWER OFF adapter\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "POWER OFF adapter\n");
/* Run LPS WL RFOFF flow */
rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK,
@@ -1651,7 +1651,7 @@ void rtl92ee_card_disable(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
enum nl80211_iftype opmode;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "RTL8192ee card disable\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "RTL8192ee card disable\n");
RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
@@ -1710,8 +1710,8 @@ void rtl92ee_set_beacon_interval(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
u16 bcn_interval = mac->beacon_interval;
- RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
- "beacon_interval:%d\n", bcn_interval);
+ rtl_dbg(rtlpriv, COMP_BEACON, DBG_DMESG,
+ "beacon_interval:%d\n", bcn_interval);
rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
}
@@ -1721,8 +1721,8 @@ void rtl92ee_update_interrupt_mask(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
- "add_msr:%x, rm_msr:%x\n", add_msr, rm_msr);
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD,
+ "add_msr:%x, rm_msr:%x\n", add_msr, rm_msr);
if (add_msr)
rtlpci->irq_mask[0] |= add_msr;
@@ -1788,15 +1788,15 @@ static void _rtl8192ee_read_power_value_fromprom(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 rf, addr = EEPROM_TX_PWR_INX, group, i = 0;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "hal_ReadPowerValueFromPROM92E(): PROMContent[0x%x]=0x%x\n",
- (addr + 1), hwinfo[addr + 1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "hal_ReadPowerValueFromPROM92E(): PROMContent[0x%x]=0x%x\n",
+ (addr + 1), hwinfo[addr + 1]);
if (0xFF == hwinfo[addr+1]) /*YJ,add,120316*/
autoload_fail = true;
if (autoload_fail) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "auto load fail : Use Default value!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "auto load fail : Use Default value!\n");
for (rf = 0 ; rf < MAX_RF_PATH ; rf++) {
/* 2.4G default value */
for (group = 0 ; group < MAX_CHNL_GROUP_24G; group++) {
@@ -2113,8 +2113,8 @@ static void _rtl92ee_read_adapter_info(struct ieee80211_hw *hw)
if (rtlefuse->eeprom_oemid == 0xFF)
rtlefuse->eeprom_oemid = 0;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid);
/* set channel plan from efuse */
rtlefuse->channel_plan = rtlefuse->eeprom_channelplan;
/*tx power*/
@@ -2134,8 +2134,8 @@ static void _rtl92ee_read_adapter_info(struct ieee80211_hw *hw)
rtlefuse->board_type |= BIT(2); /* ODM_BOARD_BT */
rtlhal->board_type = rtlefuse->board_type;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "board_type = 0x%x\n", rtlefuse->board_type);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "board_type = 0x%x\n", rtlefuse->board_type);
/*parse xtal*/
rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_92E];
if (hwinfo[EEPROM_XTAL_92E] == 0xFF)
@@ -2172,8 +2172,8 @@ static void _rtl92ee_hal_customized_behavior(struct ieee80211_hw *hw)
rtlpriv->ledctl.led_opendrain = true;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "RT Customized ID: 0x%02X\n", rtlhal->oem_id);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "RT Customized ID: 0x%02X\n", rtlhal->oem_id);
}
void rtl92ee_read_eeprom_info(struct ieee80211_hw *hw)
@@ -2191,18 +2191,18 @@ void rtl92ee_read_eeprom_info(struct ieee80211_hw *hw)
rtlpriv->dm.rfpath_rxenable[0] = true;
rtlpriv->dm.rfpath_rxenable[1] = true;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
- rtlhal->version);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
+ rtlhal->version);
tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
if (tmp_u1b & BIT(4)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
rtlefuse->epromtype = EEPROM_93C46;
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
}
if (tmp_u1b & BIT(5)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
rtlefuse->autoload_failflag = false;
_rtl92ee_read_adapter_info(hw);
} else {
@@ -2361,8 +2361,8 @@ static void rtl92ee_update_hal_rate_mask(struct ieee80211_hw *hw,
ratr_index = _rtl92ee_mrate_idx_to_arfr_id(hw, ratr_index);
sta_entry->ratr_index = ratr_index;
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "ratr_bitmap :%x\n", ratr_bitmap);
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "ratr_bitmap :%x\n", ratr_bitmap);
*(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) |
(ratr_index << 28);
rate_mask[0] = macid;
@@ -2372,11 +2372,11 @@ static void rtl92ee_update_hal_rate_mask(struct ieee80211_hw *hw,
rate_mask[4] = (u8)((ratr_bitmap & 0x0000ff00) >> 8);
rate_mask[5] = (u8)((ratr_bitmap & 0x00ff0000) >> 16);
rate_mask[6] = (u8)((ratr_bitmap & 0xff000000) >> 24);
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x:%x:%x\n",
- ratr_index, ratr_bitmap, rate_mask[0], rate_mask[1],
- rate_mask[2], rate_mask[3], rate_mask[4],
- rate_mask[5], rate_mask[6]);
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x:%x:%x\n",
+ ratr_index, ratr_bitmap, rate_mask[0], rate_mask[1],
+ rate_mask[2], rate_mask[3], rate_mask[4],
+ rate_mask[5], rate_mask[6]);
rtl92ee_fill_h2c_cmd(hw, H2C_92E_RA_MASK, 7, rate_mask);
_rtl92ee_set_bcn_ctrl_reg(hw, BIT(3), 0);
}
@@ -2438,7 +2438,7 @@ void rtl92ee_set_key(struct ieee80211_hw *hw, u32 key_index,
u8 cam_offset = 0;
u8 clear_number = 5;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
for (idx = 0; idx < clear_number; idx++) {
rtl_cam_mark_invalid(hw, cam_offset + idx);
@@ -2466,8 +2466,8 @@ void rtl92ee_set_key(struct ieee80211_hw *hw, u32 key_index,
enc_algo = CAM_AES;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "switch case %#x not processed\n", enc_algo);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_DMESG,
+ "switch case %#x not processed\n", enc_algo);
enc_algo = CAM_TKIP;
break;
}
@@ -2498,27 +2498,27 @@ void rtl92ee_set_key(struct ieee80211_hw *hw, u32 key_index,
}
if (rtlpriv->sec.key_len[key_index] == 0) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "delete one entry, entry_id is %d\n",
- entry_id);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "delete one entry, entry_id is %d\n",
+ entry_id);
if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_MESH_POINT)
rtl_cam_del_entry(hw, p_macaddr);
rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
} else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "add one entry\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "add one entry\n");
if (is_pairwise) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set Pairwise key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set Pairwise key\n");
rtl_cam_add_one_entry(hw, macaddr, key_index,
entry_id, enc_algo,
CAM_CONFIG_NO_USEDK,
rtlpriv->sec.key_buf[key_index]);
} else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set group key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set group key\n");
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
rtl_cam_add_one_entry(hw,
@@ -2603,7 +2603,7 @@ void rtl92ee_allow_all_destaddr(struct ieee80211_hw *hw,
if (write_into_reg)
rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
- RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
- "receive_config=0x%08X, write_into_reg=%d\n",
- rtlpci->receive_config, write_into_reg);
+ rtl_dbg(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
+ "receive_config=0x%08X, write_into_reg=%d\n",
+ rtlpci->receive_config, write_into_reg);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c
index 78202ad4036e..fb4ea3a8481f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/led.c
@@ -19,8 +19,8 @@ void rtl92ee_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
u32 ledcfg;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
- "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD,
+ "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
switch (pled->ledpin) {
case LED_PIN_GPIO0:
@@ -38,8 +38,8 @@ void rtl92ee_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = true;
@@ -50,8 +50,8 @@ void rtl92ee_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 ledcfg;
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
- "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD,
+ "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
switch (pled->ledpin) {
case LED_PIN_GPIO0:
@@ -68,8 +68,8 @@ void rtl92ee_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = false;
@@ -118,6 +118,6 @@ void rtl92ee_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction)
ledaction == LED_CTL_POWER_ON)) {
return;
}
- RT_TRACE(rtlpriv, COMP_LED, DBG_TRACE, "ledaction %d,\n", ledaction);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_TRACE, "ledaction %d,\n", ledaction);
_rtl92ee_sw_led_control(hw, ledaction);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
index bb291b951f4d..cc0bcaf13e96 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/phy.c
@@ -43,15 +43,15 @@ u32 rtl92ee_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 returnvalue, originalvalue, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask);
originalvalue = rtl_read_dword(rtlpriv, regaddr);
bitshift = _rtl92ee_phy_calculate_bit_shift(bitmask);
returnvalue = (originalvalue & bitmask) >> bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "BBR MASK=0x%x Addr[0x%x]=0x%x\n",
- bitmask, regaddr, originalvalue);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "BBR MASK=0x%x Addr[0x%x]=0x%x\n",
+ bitmask, regaddr, originalvalue);
return returnvalue;
}
@@ -62,9 +62,9 @@ void rtl92ee_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 originalvalue, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x)\n",
- regaddr, bitmask, data);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
if (bitmask != MASKDWORD) {
originalvalue = rtl_read_dword(rtlpriv, regaddr);
@@ -74,9 +74,9 @@ void rtl92ee_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr,
rtl_write_dword(rtlpriv, regaddr, data);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x)\n",
- regaddr, bitmask, data);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
}
u32 rtl92ee_phy_query_rf_reg(struct ieee80211_hw *hw,
@@ -85,9 +85,9 @@ u32 rtl92ee_phy_query_rf_reg(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, readback_value, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
- regaddr, rfpath, bitmask);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
+ regaddr, rfpath, bitmask);
spin_lock(&rtlpriv->locks.rf_lock);
@@ -97,9 +97,9 @@ u32 rtl92ee_phy_query_rf_reg(struct ieee80211_hw *hw,
spin_unlock(&rtlpriv->locks.rf_lock);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x),rfpath(%#x),bitmask(%#x),original_value(%#x)\n",
- regaddr, rfpath, bitmask, original_value);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x),rfpath(%#x),bitmask(%#x),original_value(%#x)\n",
+ regaddr, rfpath, bitmask, original_value);
return readback_value;
}
@@ -111,9 +111,9 @@ void rtl92ee_phy_set_rf_reg(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- addr, bitmask, data, rfpath);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ addr, bitmask, data, rfpath);
spin_lock(&rtlpriv->locks.rf_lock);
@@ -127,9 +127,9 @@ void rtl92ee_phy_set_rf_reg(struct ieee80211_hw *hw,
spin_unlock(&rtlpriv->locks.rf_lock);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- addr, bitmask, data, rfpath);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ addr, bitmask, data, rfpath);
}
static u32 _rtl92ee_phy_rf_serial_read(struct ieee80211_hw *hw,
@@ -172,9 +172,9 @@ static u32 _rtl92ee_phy_rf_serial_read(struct ieee80211_hw *hw,
else
retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
BLSSIREADBACKDATA);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "RFR-%d Addr[0x%x]=0x%x\n",
- rfpath, pphyreg->rf_rb, retvalue);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "RFR-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rf_rb, retvalue);
return retvalue;
}
@@ -196,20 +196,16 @@ static void _rtl92ee_phy_rf_serial_write(struct ieee80211_hw *hw,
newoffset = offset;
data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "RFW-%d Addr[0x%x]=0x%x\n", rfpath,
- pphyreg->rf3wire_offset, data_and_addr);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "RFW-%d Addr[0x%x]=0x%x\n", rfpath,
+ pphyreg->rf3wire_offset, data_and_addr);
}
static u32 _rtl92ee_phy_calculate_bit_shift(u32 bitmask)
{
- u32 i;
+ u32 i = ffs(bitmask);
- for (i = 0; i <= 31; i++) {
- if (((bitmask >> i) & 0x1) == 1)
- break;
- }
- return i;
+ return i ? i - 1 : 32;
}
bool rtl92ee_phy_mac_config(struct ieee80211_hw *hw)
@@ -400,8 +396,8 @@ static void _rtl92ee_phy_set_txpower_by_rate_base(struct ieee80211_hw *hw,
struct rtl_phy *rtlphy = &rtlpriv->phy;
if (path > RF90_PATH_D) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid Rf Path %d\n", path);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid Rf Path %d\n", path);
return;
}
@@ -420,14 +416,14 @@ static void _rtl92ee_phy_set_txpower_by_rate_base(struct ieee80211_hw *hw,
rtlphy->txpwr_by_rate_base_24g[path][txnum][3] = value;
break;
default:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid RateSection %d in 2.4G,Rf %d,%dTx\n",
- rate_section, path, txnum);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid RateSection %d in 2.4G,Rf %d,%dTx\n",
+ rate_section, path, txnum);
break;
}
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid Band %d\n", band);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid Band %d\n", band);
}
}
@@ -440,8 +436,8 @@ static u8 _rtl92ee_phy_get_txpower_by_rate_base(struct ieee80211_hw *hw,
u8 value = 0;
if (path > RF90_PATH_D) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid Rf Path %d\n", path);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid Rf Path %d\n", path);
return 0;
}
@@ -460,14 +456,14 @@ static u8 _rtl92ee_phy_get_txpower_by_rate_base(struct ieee80211_hw *hw,
value = rtlphy->txpwr_by_rate_base_24g[path][txnum][3];
break;
default:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid RateSection %d in 2.4G,Rf %d,%dTx\n",
- rate_section, path, txnum);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid RateSection %d in 2.4G,Rf %d,%dTx\n",
+ rate_section, path, txnum);
break;
}
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid Band %d()\n", band);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid Band %d()\n", band);
}
return value;
}
@@ -606,8 +602,8 @@ static void phy_convert_txpwr_dbm_to_rel_val(struct ieee80211_hw *hw)
0, 3, base);
}
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "<==phy_convert_txpwr_dbm_to_rel_val()\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "<==%s\n", __func__);
}
static void _rtl92ee_phy_txpower_by_rate_configuration(struct ieee80211_hw *hw)
@@ -659,11 +655,11 @@ static bool _rtl92ee_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
u32 arraylength;
u32 *ptrarray;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl8192EMACPHY_Array\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl8192EMACPHY_Array\n");
arraylength = RTL8192EE_MAC_ARRAY_LEN;
ptrarray = RTL8192EE_MAC_ARRAY;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Img:RTL8192EE_MAC_ARRAY LEN %d\n" , arraylength);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Img:RTL8192EE_MAC_ARRAY LEN %d\n", arraylength);
for (i = 0; i < arraylength; i = i + 2)
rtl_write_byte(rtlpriv, ptrarray[i], (u8)ptrarray[i + 1]);
return true;
@@ -776,10 +772,10 @@ static bool phy_config_bb_with_hdr_file(struct ieee80211_hw *hw,
}
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "The agctab_array_table[0] is %x Rtl818EEPHY_REGArray[1] is %x\n",
- array[i],
- array[i + 1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "The agctab_array_table[0] is %x Rtl818EEPHY_REGArray[1] is %x\n",
+ array[i],
+ array[i + 1]);
}
}
return true;
@@ -843,17 +839,17 @@ static void _rtl92ee_store_tx_power_by_rate(struct ieee80211_hw *hw,
u8 section = _rtl92ee_get_rate_section_index(regaddr);
if (band != BAND_ON_2_4G && band != BAND_ON_5G) {
- RT_TRACE(rtlpriv, FPHY, PHY_TXPWR, "Invalid Band %d\n", band);
+ rtl_dbg(rtlpriv, FPHY, PHY_TXPWR, "Invalid Band %d\n", band);
return;
}
if (rfpath > MAX_RF_PATH - 1) {
- RT_TRACE(rtlpriv, FPHY, PHY_TXPWR,
- "Invalid RfPath %d\n", rfpath);
+ rtl_dbg(rtlpriv, FPHY, PHY_TXPWR,
+ "Invalid RfPath %d\n", rfpath);
return;
}
if (txnum > MAX_RF_PATH - 1) {
- RT_TRACE(rtlpriv, FPHY, PHY_TXPWR, "Invalid TxNum %d\n", txnum);
+ rtl_dbg(rtlpriv, FPHY, PHY_TXPWR, "Invalid TxNum %d\n", txnum);
return;
}
@@ -888,8 +884,8 @@ static bool phy_config_bb_with_pghdrfile(struct ieee80211_hw *hw,
}
}
} else {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "configtype != BaseBand_Config_PHY_REG\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "configtype != BaseBand_Config_PHY_REG\n");
}
return true;
}
@@ -914,9 +910,9 @@ bool rtl92ee_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
case RF90_PATH_A:
len = RTL8192EE_RADIOA_ARRAY_LEN;
array = RTL8192EE_RADIOA_ARRAY;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Radio_A:RTL8192EE_RADIOA_ARRAY %d\n" , len);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Radio_A:RTL8192EE_RADIOA_ARRAY %d\n", len);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
for (i = 0; i < len; i = i + 2) {
v1 = array[i];
v2 = array[i+1];
@@ -961,9 +957,9 @@ bool rtl92ee_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
case RF90_PATH_B:
len = RTL8192EE_RADIOB_ARRAY_LEN;
array = RTL8192EE_RADIOB_ARRAY;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Radio_A:RTL8192EE_RADIOB_ARRAY %d\n" , len);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Radio_A:RTL8192EE_RADIOB_ARRAY %d\n", len);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
for (i = 0; i < len; i = i + 2) {
v1 = array[i];
v2 = array[i+1];
@@ -1025,21 +1021,21 @@ void rtl92ee_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
rtlphy->default_initialgain[3] =
(u8)rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
- rtlphy->default_initialgain[0],
- rtlphy->default_initialgain[1],
- rtlphy->default_initialgain[2],
- rtlphy->default_initialgain[3]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
+ rtlphy->default_initialgain[0],
+ rtlphy->default_initialgain[1],
+ rtlphy->default_initialgain[2],
+ rtlphy->default_initialgain[3]);
rtlphy->framesync = (u8)rtl_get_bbreg(hw,
ROFDM0_RXDETECTOR3, MASKBYTE0);
rtlphy->framesync_c34 = rtl_get_bbreg(hw,
ROFDM0_RXDETECTOR2, MASKDWORD);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Default framesync (0x%x) = 0x%x\n",
- ROFDM0_RXDETECTOR3, rtlphy->framesync);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Default framesync (0x%x) = 0x%x\n",
+ ROFDM0_RXDETECTOR3, rtlphy->framesync);
}
static void phy_init_bb_rf_register_def(struct ieee80211_hw *hw)
@@ -1236,8 +1232,8 @@ static u8 _rtl92ee_get_txpower_index(struct ieee80211_hw *hw,
if (channel < 1 || channel > 14) {
index = 0;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_DMESG,
- "Illegal channel!!\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_DMESG,
+ "Illegal channel!!\n");
}
if (IS_CCK_RATE((s8)rate))
@@ -1395,8 +1391,8 @@ static void _rtl92ee_set_txpower_index(struct ieee80211_hw *hw, u8 pwr_idx,
pwr_idx);
break;
default:
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Invalid Rate!!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Invalid Rate!!\n");
break;
}
} else if (rfpath == RF90_PATH_B) {
@@ -1514,12 +1510,12 @@ static void _rtl92ee_set_txpower_index(struct ieee80211_hw *hw, u8 pwr_idx,
pwr_idx);
break;
default:
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Invalid Rate!!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Invalid Rate!!\n");
break;
}
} else {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Invalid RFPath!!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD, "Invalid RFPath!!\n");
}
}
@@ -1578,8 +1574,8 @@ static void phy_set_txpower_index_by_rate_section(struct ieee80211_hw *hw,
rtlphy->current_chan_bw,
channel, ht_rates2t, 8);
} else
- RT_TRACE(rtlpriv, FPHY, PHY_TXPWR,
- "Invalid RateSection %d\n", section);
+ rtl_dbg(rtlpriv, FPHY, PHY_TXPWR,
+ "Invalid RateSection %d\n", section);
}
void rtl92ee_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
@@ -1665,10 +1661,10 @@ void rtl92ee_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
u8 reg_bw_opmode;
u8 reg_prsr_rsc;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
- "Switch to %s bandwidth\n",
- rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
- "20MHz" : "40MHz");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE,
+ "Switch to %s bandwidth\n",
+ rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
+ "20MHz" : "40MHz");
if (is_hal_stop(rtlhal)) {
rtlphy->set_bwmode_inprogress = false;
@@ -1722,7 +1718,7 @@ void rtl92ee_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
}
rtl92ee_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
rtlphy->set_bwmode_inprogress = false;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD, "\n");
}
void rtl92ee_phy_set_bw_mode(struct ieee80211_hw *hw,
@@ -1739,8 +1735,8 @@ void rtl92ee_phy_set_bw_mode(struct ieee80211_hw *hw,
if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
rtl92ee_phy_set_bw_mode_callback(hw);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "false driver sleep or unload\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "false driver sleep or unload\n");
rtlphy->set_bwmode_inprogress = false;
rtlphy->current_chan_bw = tmp_bw;
}
@@ -1753,8 +1749,8 @@ void rtl92ee_phy_sw_chnl_callback(struct ieee80211_hw *hw)
struct rtl_phy *rtlphy = &rtlpriv->phy;
u32 delay;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
- "switch to channel%d\n", rtlphy->current_channel);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE,
+ "switch to channel%d\n", rtlphy->current_channel);
if (is_hal_stop(rtlhal))
return;
do {
@@ -1772,7 +1768,7 @@ void rtl92ee_phy_sw_chnl_callback(struct ieee80211_hw *hw)
}
break;
} while (true);
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
}
u8 rtl92ee_phy_sw_chnl(struct ieee80211_hw *hw)
@@ -1792,13 +1788,13 @@ u8 rtl92ee_phy_sw_chnl(struct ieee80211_hw *hw)
rtlphy->sw_chnl_step = 0;
if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
rtl92ee_phy_sw_chnl_callback(hw);
- RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- "sw_chnl_inprogress false schedule workitem current channel %d\n",
- rtlphy->current_channel);
+ rtl_dbg(rtlpriv, COMP_CHAN, DBG_LOUD,
+ "sw_chnl_inprogress false schedule workitem current channel %d\n",
+ rtlphy->current_channel);
rtlphy->sw_chnl_inprogress = false;
} else {
- RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- "sw_chnl_inprogress false driver sleep or unload\n");
+ rtl_dbg(rtlpriv, COMP_CHAN, DBG_LOUD,
+ "sw_chnl_inprogress false driver sleep or unload\n");
rtlphy->sw_chnl_inprogress = false;
}
return 1;
@@ -1900,9 +1896,9 @@ static bool _rtl92ee_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
}
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n",
- currentcmd->cmdid);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n",
+ currentcmd->cmdid);
break;
}
@@ -2248,7 +2244,7 @@ static u8 _rtl92ee_phy_path_b_rx_iqk(struct ieee80211_hw *hw, bool config_pathb)
(((reg_ecc & 0x03FF0000) >> 16) != 0x36))
result |= 0x02;
else
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD, "Path B Rx IQK fail!!\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "Path B Rx IQK fail!!\n");
return result;
}
@@ -2545,8 +2541,8 @@ static void _rtl92ee_phy_iq_calibrate(struct ieee80211_hw *hw,
patha_ok = _rtl92ee_phy_path_a_iqk(hw, is2t);
if (patha_ok == 0x01) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
- "Path A Tx IQK Success!!\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
+ "Path A Tx IQK Success!!\n");
result[t][0] = (rtl_get_bbreg(hw,
RTX_POWER_BEFORE_IQK_A,
MASKDWORD) & 0x3FF0000)
@@ -2556,17 +2552,17 @@ static void _rtl92ee_phy_iq_calibrate(struct ieee80211_hw *hw,
>> 16;
break;
}
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
- "Path A Tx IQK Fail!!, ret = 0x%x\n",
- patha_ok);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
+ "Path A Tx IQK Fail!!, ret = 0x%x\n",
+ patha_ok);
}
for (i = 0 ; i < retrycount ; i++) {
patha_ok = _rtl92ee_phy_path_a_rx_iqk(hw, is2t);
if (patha_ok == 0x03) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
- "Path A Rx IQK Success!!\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
+ "Path A Rx IQK Success!!\n");
result[t][2] = (rtl_get_bbreg(hw,
RRX_POWER_BEFORE_IQK_A_2,
MASKDWORD) & 0x3FF0000)
@@ -2577,14 +2573,14 @@ static void _rtl92ee_phy_iq_calibrate(struct ieee80211_hw *hw,
>> 16;
break;
}
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
- "Path A Rx IQK Fail!!, ret = 0x%x\n",
- patha_ok);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
+ "Path A Rx IQK Fail!!, ret = 0x%x\n",
+ patha_ok);
}
if (0x00 == patha_ok)
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
- "Path A IQK failed!!, ret = 0\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
+ "Path A IQK failed!!, ret = 0\n");
if (is2t) {
_rtl92ee_phy_path_a_standby(hw);
/* Turn Path B ADDA on */
@@ -2598,8 +2594,8 @@ static void _rtl92ee_phy_iq_calibrate(struct ieee80211_hw *hw,
for (i = 0 ; i < retrycount ; i++) {
pathb_ok = _rtl92ee_phy_path_b_iqk(hw);
if (pathb_ok == 0x01) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
- "Path B Tx IQK Success!!\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
+ "Path B Tx IQK Success!!\n");
result[t][4] = (rtl_get_bbreg(hw,
RTX_POWER_BEFORE_IQK_B,
MASKDWORD) & 0x3FF0000)
@@ -2610,16 +2606,16 @@ static void _rtl92ee_phy_iq_calibrate(struct ieee80211_hw *hw,
>> 16;
break;
}
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
- "Path B Tx IQK Fail!!, ret = 0x%x\n",
- pathb_ok);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
+ "Path B Tx IQK Fail!!, ret = 0x%x\n",
+ pathb_ok);
}
for (i = 0 ; i < retrycount ; i++) {
pathb_ok = _rtl92ee_phy_path_b_rx_iqk(hw, is2t);
if (pathb_ok == 0x03) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
- "Path B Rx IQK Success!!\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
+ "Path B Rx IQK Success!!\n");
result[t][6] = (rtl_get_bbreg(hw,
RRX_POWER_BEFORE_IQK_B_2,
MASKDWORD) & 0x3FF0000)
@@ -2630,18 +2626,18 @@ static void _rtl92ee_phy_iq_calibrate(struct ieee80211_hw *hw,
>> 16;
break;
}
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
- "Path B Rx IQK Fail!!, ret = 0x%x\n",
- pathb_ok);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
+ "Path B Rx IQK Fail!!, ret = 0x%x\n",
+ pathb_ok);
}
if (0x00 == pathb_ok)
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
- "Path B IQK failed!!, ret = 0\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
+ "Path B IQK failed!!, ret = 0\n");
}
/* Back to BB mode, load original value */
- RT_TRACE(rtlpriv, COMP_RF, DBG_LOUD,
- "IQK:Back to BB mode, load original value!\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
+ "IQK:Back to BB mode, load original value!\n");
rtl_set_bbreg(hw, RFPGA0_IQK, MASKDWORD, 0);
if (t != 0) {
@@ -2724,7 +2720,7 @@ static void _rtl92ee_phy_set_rfpath_switch(struct ieee80211_hw *hw,
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- RT_TRACE(rtlpriv, COMP_INIT , DBG_LOUD , "\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
if (is_hal_stop(rtlhal)) {
u8 u1btmp;
@@ -2953,24 +2949,24 @@ bool rtl92ee_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
struct rtl_phy *rtlphy = &rtlpriv->phy;
bool postprocessing = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
- iotype, rtlphy->set_io_inprogress);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
+ iotype, rtlphy->set_io_inprogress);
do {
switch (iotype) {
case IO_CMD_RESUME_DM_BY_SCAN:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "[IO CMD] Resume DM after scan.\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "[IO CMD] Resume DM after scan.\n");
postprocessing = true;
break;
case IO_CMD_PAUSE_BAND0_DM_BY_SCAN:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "[IO CMD] Pause DM before scan.\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "[IO CMD] Pause DM before scan.\n");
postprocessing = true;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", iotype);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", iotype);
break;
}
} while (false);
@@ -2981,7 +2977,7 @@ bool rtl92ee_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
return false;
}
rtl92ee_phy_set_io(hw);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "IO Type(%#x)\n", iotype);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, "IO Type(%#x)\n", iotype);
return true;
}
@@ -2991,14 +2987,14 @@ static void rtl92ee_phy_set_io(struct ieee80211_hw *hw)
struct rtl_phy *rtlphy = &rtlpriv->phy;
struct dig_t *dm_dig = &rtlpriv->dm_digtable;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "--->Cmd(%#x), set_io_inprogress(%d)\n",
- rtlphy->current_io_type, rtlphy->set_io_inprogress);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "--->Cmd(%#x), set_io_inprogress(%d)\n",
+ rtlphy->current_io_type, rtlphy->set_io_inprogress);
switch (rtlphy->current_io_type) {
case IO_CMD_RESUME_DM_BY_SCAN:
rtl92ee_dm_write_dig(hw, rtlphy->initgain_backup.xaagccore1);
rtl92ee_dm_write_cck_cca_thres(hw, rtlphy->initgain_backup.cca);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE , "no set txpower\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, "no set txpower\n");
rtl92ee_phy_set_txpower_level(hw, rtlphy->current_channel);
break;
case IO_CMD_PAUSE_BAND0_DM_BY_SCAN:
@@ -3009,14 +3005,14 @@ static void rtl92ee_phy_set_io(struct ieee80211_hw *hw)
rtl92ee_dm_write_cck_cca_thres(hw, 0x40);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n",
- rtlphy->current_io_type);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n",
+ rtlphy->current_io_type);
break;
}
rtlphy->set_io_inprogress = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "(%#x)\n", rtlphy->current_io_type);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "(%#x)\n", rtlphy->current_io_type);
}
static void rtl92ee_phy_set_rf_on(struct ieee80211_hw *hw)
@@ -3062,16 +3058,16 @@ static bool _rtl92ee_phy_set_rf_power_state(struct ieee80211_hw *hw,
do {
initializecount++;
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic enable\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic enable\n");
rtstatus = rtl_ps_enable_nic(hw);
} while (!rtstatus && (initializecount < 10));
RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
} else {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "Set ERFON sleeping:%d ms\n",
- jiffies_to_msecs(jiffies -
- ppsc->last_sleep_jiffies));
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "Set ERFON sleeping:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_sleep_jiffies));
ppsc->last_awake_jiffies = jiffies;
rtl92ee_phy_set_rf_on(hw);
}
@@ -3089,27 +3085,27 @@ static bool _rtl92ee_phy_set_rf_power_state(struct ieee80211_hw *hw,
queue_id++;
continue;
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
- (i + 1), queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
+ (i + 1), queue_id,
+ skb_queue_len(&ring->queue));
udelay(10);
i++;
}
if (i >= MAX_DOZE_WAITING_TIMES_9x) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "\n ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
- MAX_DOZE_WAITING_TIMES_9x,
- queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "\n ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue));
break;
}
}
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic disable\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic disable\n");
rtl_ps_disable_nic(hw);
RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
} else {
@@ -3132,32 +3128,32 @@ static bool _rtl92ee_phy_set_rf_power_state(struct ieee80211_hw *hw,
queue_id++;
continue;
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
- (i + 1), queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
+ (i + 1), queue_id,
+ skb_queue_len(&ring->queue));
udelay(10);
i++;
}
if (i >= MAX_DOZE_WAITING_TIMES_9x) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "\n ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
- MAX_DOZE_WAITING_TIMES_9x,
- queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "\n ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue));
break;
}
}
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "Set ERFSLEEP awaked:%d ms\n",
- jiffies_to_msecs(jiffies -
- ppsc->last_awake_jiffies));
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "Set ERFSLEEP awaked:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_awake_jiffies));
ppsc->last_sleep_jiffies = jiffies;
_rtl92ee_phy_set_rf_sleep(hw);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", rfpwr_state);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", rfpwr_state);
bresult = false;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c
index 6b8ef680dc57..bbe632d56b19 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/rf.c
@@ -118,12 +118,12 @@ static bool _rtl92ee_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
}
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio[%d] Fail!!\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Radio[%d] Fail!!\n", rfpath);
return false;
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "\n");
return rtstatus;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
index dc7b515bdc85..eef7a041e80d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
@@ -363,9 +363,9 @@ bool rtl92ee_rx_query_desc(struct ieee80211_hw *hw,
else
wake_match = 0;
if (wake_match)
- RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD,
- "GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n",
- wake_match);
+ rtl_dbg(rtlpriv, COMP_RXDESC, DBG_LOUD,
+ "GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n",
+ wake_match);
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->band = hw->conf.chandef.chan->band;
@@ -468,9 +468,9 @@ u16 rtl92ee_rx_desc_buff_remained_cnt(struct ieee80211_hw *hw, u8 queue_index)
write_point = (u16)(tmp_4byte & 0x7ff);
if (write_point != rtlpci->rx_ring[queue_index].next_rx_rp) {
- RT_TRACE(rtlpriv, COMP_RXDESC, DBG_DMESG,
- "!!!write point is 0x%x, reg 0x3B4 value is 0x%x\n",
- write_point, tmp_4byte);
+ rtl_dbg(rtlpriv, COMP_RXDESC, DBG_DMESG,
+ "!!!write point is 0x%x, reg 0x3B4 value is 0x%x\n",
+ write_point, tmp_4byte);
tmp_4byte = rtl_read_dword(rtlpriv, REG_RXQ_TXBD_IDX);
read_point = (u16)((tmp_4byte>>16) & 0x7ff);
write_point = (u16)(tmp_4byte & 0x7ff);
@@ -675,11 +675,11 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw,
skb_push(skb, EM_HDR_LEN);
memset(skb->data, 0, EM_HDR_LEN);
}
- mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error\n");
+ mapping = dma_map_single(&rtlpci->pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error\n");
return;
}
@@ -697,9 +697,9 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_offset(pdesc,
USB_HWDESC_HEADER_LEN + EM_HDR_LEN);
if (ptcb_desc->empkt_num) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "Insert 8 byte.pTcb->EMPktNum:%d\n",
- ptcb_desc->empkt_num);
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "Insert 8 byte.pTcb->EMPktNum:%d\n",
+ ptcb_desc->empkt_num);
_rtl92ee_insert_emcontent(ptcb_desc,
(u8 *)(skb->data));
}
@@ -798,8 +798,8 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw,
}
if (ieee80211_is_data_qos(fc)) {
if (mac->rdg_en) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "Enable RDG function.\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "Enable RDG function.\n");
set_tx_desc_rdg_enable(pdesc, 1);
set_tx_desc_htc(pdesc, 1);
}
@@ -824,7 +824,7 @@ void rtl92ee_tx_fill_desc(struct ieee80211_hw *hw,
is_broadcast_ether_addr(ieee80211_get_DA(hdr))) {
set_tx_desc_bmc(pdesc, 1);
}
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
}
void rtl92ee_tx_fill_cmddesc(struct ieee80211_hw *hw,
@@ -834,15 +834,14 @@ void rtl92ee_tx_fill_cmddesc(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
u8 fw_queue = QSLT_BEACON;
- dma_addr_t mapping = pci_map_single(rtlpci->pdev,
- skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
u8 txdesc_len = 40;
__le32 *pdesc = (__le32 *)pdesc8;
- if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error\n");
+ if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error\n");
return;
}
clear_pci_tx_desc_content(pdesc, txdesc_len);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c
index a6e4384ceea1..5fce3db52cd9 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/dm.c
@@ -144,10 +144,10 @@ static void _rtl92s_dm_txpowertracking_callback_thermalmeter(
thermalvalue = (u8)rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0x1f);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermal meter 0x%x\n",
- thermalvalue,
- rtlpriv->dm.thermalvalue, rtlefuse->eeprom_thermalmeter);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermal meter 0x%x\n",
+ thermalvalue,
+ rtlpriv->dm.thermalvalue, rtlefuse->eeprom_thermalmeter);
if (thermalvalue) {
rtlpriv->dm.thermalvalue = thermalvalue;
@@ -158,8 +158,8 @@ static void _rtl92s_dm_txpowertracking_callback_thermalmeter(
(rtlpriv->efuse.thermalmeter[0] << 8) |
(thermalvalue << 16));
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Write to FW Thermal Val = 0x%x\n", fw_cmd);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Write to FW Thermal Val = 0x%x\n", fw_cmd);
rtl_write_dword(rtlpriv, WFM5, fw_cmd);
rtl92s_phy_chk_fwcmd_iodone(hw);
@@ -264,10 +264,10 @@ static void _rtl92s_dm_refresh_rateadaptive_mask(struct ieee80211_hw *hw)
}
if (ra->pre_ratr_state != ra->ratr_state) {
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "RSSI = %ld RSSI_LEVEL = %d PreState = %d, CurState = %d\n",
- rtlpriv->dm.undec_sm_pwdb, ra->ratr_state,
- ra->pre_ratr_state, ra->ratr_state);
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "RSSI = %ld RSSI_LEVEL = %d PreState = %d, CurState = %d\n",
+ rtlpriv->dm.undec_sm_pwdb, ra->ratr_state,
+ ra->pre_ratr_state, ra->ratr_state);
rcu_read_lock();
sta = rtl_find_sta(hw, mac->bssid);
@@ -576,8 +576,8 @@ static void _rtl92s_dm_dynamic_txpower(struct ieee80211_hw *hw)
if ((mac->link_state < MAC80211_LINKED) &&
(rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "Not connected to any\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "Not connected to any\n");
rtlpriv->dm.dynamic_txhighpower_lvl = TX_HIGHPWR_LEVEL_NORMAL;
@@ -588,21 +588,21 @@ static void _rtl92s_dm_dynamic_txpower(struct ieee80211_hw *hw)
if (mac->link_state >= MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "AP Client PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "AP Client PWDB = 0x%lx\n",
+ undec_sm_pwdb);
} else {
undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "STA Default Port PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "STA Default Port PWDB = 0x%lx\n",
+ undec_sm_pwdb);
}
} else {
undec_sm_pwdb = rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "AP Ext Port PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "AP Ext Port PWDB = 0x%lx\n",
+ undec_sm_pwdb);
}
txpwr_threshold_lv2 = TX_POWER_NEAR_FIELD_THRESH_LVL2;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c
index 47a5b95ca2b9..f570495af044 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/fw.c
@@ -39,8 +39,8 @@ static bool _rtl92s_firmware_enable_cpu(struct ieee80211_hw *hw)
do {
cpustatus = rtl_read_byte(rtlpriv, TCR);
if (cpustatus & IMEM_RDY) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "IMEM Ready after CPU has refilled\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "IMEM Ready after CPU has refilled\n");
break;
}
@@ -195,8 +195,8 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw,
short pollingcnt = 1000;
bool rtstatus = true;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "LoadStaus(%d)\n", loadfw_status);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "LoadStaus(%d)\n", loadfw_status);
firmware->fwstatus = (enum fw_status)loadfw_status;
@@ -256,9 +256,9 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw,
goto status_check_fail;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "DMEM code download success, cpustatus(%#x)\n",
- cpustatus);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "DMEM code download success, cpustatus(%#x)\n",
+ cpustatus);
/* Prevent Delay too much and being scheduled out */
/* Polling Load Firmware ready */
@@ -270,9 +270,9 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw,
udelay(40);
} while (pollingcnt--);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Polling Load Firmware ready, cpustatus(%x)\n",
- cpustatus);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Polling Load Firmware ready, cpustatus(%x)\n",
+ cpustatus);
if (((cpustatus & LOAD_FW_READY) != LOAD_FW_READY) ||
(pollingcnt <= 0)) {
@@ -290,8 +290,8 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw,
rtl_write_dword(rtlpriv, RCR, (tmpu4b | RCR_APPFCS |
RCR_APP_ICV | RCR_APP_MIC));
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Current RCR settings(%#x)\n", tmpu4b);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Current RCR settings(%#x)\n", tmpu4b);
/* Set to normal mode. */
rtl_write_byte(rtlpriv, LBKMD_SEL, LBK_NORMAL);
@@ -304,9 +304,9 @@ static bool _rtl92s_firmware_checkready(struct ieee80211_hw *hw,
}
status_check_fail:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "loadfw_status(%d), rtstatus(%x)\n",
- loadfw_status, rtstatus);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "loadfw_status(%d), rtstatus(%x)\n",
+ loadfw_status, rtstatus);
return rtstatus;
}
@@ -337,11 +337,11 @@ int rtl92s_download_fw(struct ieee80211_hw *hw)
firmware->firmwareversion = byte(pfwheader->version, 0);
firmware->pfwheader->fwpriv.hci_sel = 1;/* pcie */
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "signature:%x, version:%x, size:%x, imemsize:%x, sram size:%x\n",
- pfwheader->signature,
- pfwheader->version, pfwheader->dmem_size,
- pfwheader->img_imem_size, pfwheader->img_sram_size);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "signature:%x, version:%x, size:%x, imemsize:%x, sram size:%x\n",
+ pfwheader->signature,
+ pfwheader->version, pfwheader->dmem_size,
+ pfwheader->img_imem_size, pfwheader->img_sram_size);
/* 2. Retrieve IMEM image. */
if ((pfwheader->img_imem_size == 0) || (pfwheader->img_imem_size >
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
index 81313e0ca834..47fabce5c235 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/hw.c
@@ -111,8 +111,8 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_SLOT_TIME:{
u8 e_aci;
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "HW_VAR_SLOT_TIME %x\n", val[0]);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "HW_VAR_SLOT_TIME %x\n", val[0]);
rtl_write_byte(rtlpriv, SLOT_TIME, val[0]);
@@ -156,9 +156,9 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
*val = min_spacing_to_set;
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
- mac->min_space_cfg);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, AMPDU_MIN_SPACE,
mac->min_space_cfg);
@@ -172,9 +172,9 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
mac->min_space_cfg = rtlpriv->rtlhal.minspace_cfg;
mac->min_space_cfg |= (density_to_set << 3);
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
- mac->min_space_cfg);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, AMPDU_MIN_SPACE,
mac->min_space_cfg);
@@ -215,9 +215,9 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
(factorlevel[17] << 4));
rtl_write_byte(rtlpriv, AGGLEN_LMT_H, regtoset);
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_AMPDU_FACTOR: %#x\n",
- factor_toset);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_FACTOR: %#x\n",
+ factor_toset);
}
break;
}
@@ -253,9 +253,9 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
acm_ctrl |= ACMHW_VOQEN;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
- acm);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
+ acm);
break;
}
} else {
@@ -276,8 +276,8 @@ void rtl92se_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
}
- RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
- "HW_VAR_ACM_CTRL Write 0x%X\n", acm_ctrl);
+ rtl_dbg(rtlpriv, COMP_QOS, DBG_TRACE,
+ "HW_VAR_ACM_CTRL Write 0x%X\n", acm_ctrl);
rtl_write_byte(rtlpriv, ACMHWCTRL, acm_ctrl);
break;
}
@@ -417,14 +417,14 @@ void rtl92se_enable_hw_security_config(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 sec_reg_value = 0x0;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
- rtlpriv->sec.pairwise_enc_algorithm,
- rtlpriv->sec.group_enc_algorithm);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
+ rtlpriv->sec.pairwise_enc_algorithm,
+ rtlpriv->sec.group_enc_algorithm);
if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "not open hw encryption\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "not open hw encryption\n");
return;
}
@@ -435,8 +435,8 @@ void rtl92se_enable_hw_security_config(struct ieee80211_hw *hw)
sec_reg_value |= SCR_RXUSEDK;
}
- RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD, "The SECR-value %x\n",
- sec_reg_value);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD, "The SECR-value %x\n",
+ sec_reg_value);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
@@ -870,10 +870,10 @@ static void _rtl92se_macconfig_after_fwdownload(struct ieee80211_hw *hw)
/* Change Program timing */
rtl_write_byte(rtlpriv, REG_EFUSE_CTRL + 3, 0x72);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "EFUSE CONFIG OK\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "EFUSE CONFIG OK\n");
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "OK\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "OK\n");
}
@@ -960,9 +960,8 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
/* 2. download firmware */
rtstatus = rtl92s_download_fw(hw);
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Failed to download FW. Init HW without FW now... "
- "Please copy FW into /lib/firmware/rtlwifi\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Failed to download FW. Init HW without FW now... Please copy FW into /lib/firmware/rtlwifi\n");
err = 1;
goto exit;
}
@@ -1014,7 +1013,7 @@ int rtl92se_hw_init(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, RF_CTRL, 0x07);
if (!rtl92s_phy_rf_config(hw)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "RF Config failed\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "RF Config failed\n");
err = rtstatus;
goto exit;
}
@@ -1147,23 +1146,23 @@ static int _rtl92se_set_media_status(struct ieee80211_hw *hw,
switch (type) {
case NL80211_IFTYPE_UNSPECIFIED:
bt_msr |= (MSR_LINK_NONE << MSR_LINK_SHIFT);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to NO LINK!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to NO LINK!\n");
break;
case NL80211_IFTYPE_ADHOC:
bt_msr |= (MSR_LINK_ADHOC << MSR_LINK_SHIFT);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to Ad Hoc!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to Ad Hoc!\n");
break;
case NL80211_IFTYPE_STATION:
bt_msr |= (MSR_LINK_MANAGED << MSR_LINK_SHIFT);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to STA!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to STA!\n");
break;
case NL80211_IFTYPE_AP:
bt_msr |= (MSR_LINK_MASTER << MSR_LINK_SHIFT);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to AP!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to AP!\n");
break;
default:
pr_err("Network type %d not supported!\n", type);
@@ -1606,8 +1605,8 @@ void rtl92se_update_interrupt_mask(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD, "add_msr:%x, rm_msr:%x\n",
- add_msr, rm_msr);
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD, "add_msr:%x, rm_msr:%x\n",
+ add_msr, rm_msr);
if (add_msr)
rtlpci->irq_mask[0] |= add_msr;
@@ -1671,11 +1670,11 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
eeprom_id = *((u16 *)&hwinfo[0]);
if (eeprom_id != RTL8190_EEPROM_ID) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "EEPROM ID(%#x) is invalid!!\n", eeprom_id);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "EEPROM ID(%#x) is invalid!!\n", eeprom_id);
rtlefuse->autoload_failflag = true;
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
rtlefuse->autoload_failflag = false;
}
@@ -1692,16 +1691,16 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
rtlefuse->eeprom_smid = *(u16 *)&hwinfo[EEPROM_SMID];
rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROMId = 0x%4x\n", eeprom_id);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROMId = 0x%4x\n", eeprom_id);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM VID = 0x%4x\n", rtlefuse->eeprom_vid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM DID = 0x%4x\n", rtlefuse->eeprom_did);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM SVID = 0x%4x\n", rtlefuse->eeprom_svid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EEPROM SMID = 0x%4x\n", rtlefuse->eeprom_smid);
for (i = 0; i < 6; i += 2) {
usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i];
@@ -1711,7 +1710,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
for (i = 0; i < 6; i++)
rtl_write_byte(rtlpriv, MACIDR0 + i, rtlefuse->dev_addr[i]);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "%pM\n", rtlefuse->dev_addr);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "%pM\n", rtlefuse->dev_addr);
/* Get Tx Power Level by Channel */
/* Read Tx power of Channel 1 ~ 14 from EEPROM. */
@@ -1906,7 +1905,7 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
* index diff of legacy to HT OFDM rate. */
tempval = hwinfo[EEPROM_RFIND_POWERDIFF] & 0xff;
rtlefuse->eeprom_txpowerdiff = tempval;
- rtlefuse->legacy_httxpowerdiff =
+ rtlefuse->legacy_ht_txpowerdiff =
rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][0];
RTPRINT(rtlpriv, FINIT, INIT_TXPOWER,
@@ -1964,15 +1963,15 @@ static void _rtl92se_read_adapter_info(struct ieee80211_hw *hw)
tempval = rtl_read_byte(rtlpriv, 0x07);
if (!(tempval & BIT(0))) {
rtlefuse->b1x1_recvcombine = true;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "RF_TYPE=1T2R but only 1SS\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "RF_TYPE=1T2R but only 1SS\n");
}
}
rtlefuse->b1ss_support = rtlefuse->b1x1_recvcombine;
rtlefuse->eeprom_oemid = *&hwinfo[EEPROM_CUSTOMID];
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM Customer ID: 0x%2x\n",
- rtlefuse->eeprom_oemid);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "EEPROM Customer ID: 0x%2x\n",
+ rtlefuse->eeprom_oemid);
/* set channel paln to world wide 13 */
rtlefuse->channel_plan = COUNTRY_CODE_WORLD_WIDE_13;
@@ -1987,15 +1986,15 @@ void rtl92se_read_eeprom_info(struct ieee80211_hw *hw)
tmp_u1b = rtl_read_byte(rtlpriv, EPROM_CMD);
if (tmp_u1b & BIT(4)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
rtlefuse->epromtype = EEPROM_93C46;
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
}
if (tmp_u1b & BIT(5)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
rtlefuse->autoload_failflag = false;
_rtl92se_read_adapter_info(hw);
} else {
@@ -2101,8 +2100,8 @@ static void rtl92se_update_hal_rate_table(struct ieee80211_hw *hw,
else
rtl92s_phy_set_fw_cmd(hw, FW_CMD_RA_REFRESH_BG);
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n",
- rtl_read_dword(rtlpriv, ARFR0));
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n",
+ rtl_read_dword(rtlpriv, ARFR0));
}
static void rtl92se_update_hal_rate_mask(struct ieee80211_hw *hw,
@@ -2256,8 +2255,8 @@ static void rtl92se_update_hal_rate_mask(struct ieee80211_hw *hw,
mask |= (bmulticast ? 1 : 0) << 9 | (macid & 0x1f) << 4 | (band & 0xf);
- RT_TRACE(rtlpriv, COMP_RATR, DBG_TRACE, "mask = %x, bitmap = %x\n",
- mask, ratr_bitmap);
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_TRACE, "mask = %x, bitmap = %x\n",
+ mask, ratr_bitmap);
rtl_write_dword(rtlpriv, 0x2c4, ratr_bitmap);
rtl_write_dword(rtlpriv, WFM5, (FW_RA_UPDATE_MASK | (mask << 8)));
@@ -2332,15 +2331,15 @@ bool rtl92se_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
rfpwr_toset = _rtl92se_rf_onoff_detect(hw);
if ((ppsc->hwradiooff) && (rfpwr_toset == ERFON)) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "RFKILL-HW Radio ON, RF ON\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "RFKILL-HW Radio ON, RF ON\n");
rfpwr_toset = ERFON;
ppsc->hwradiooff = false;
actuallyset = true;
} else if ((!ppsc->hwradiooff) && (rfpwr_toset == ERFOFF)) {
- RT_TRACE(rtlpriv, COMP_RF,
- DBG_DMESG, "RFKILL-HW Radio OFF, RF OFF\n");
+ rtl_dbg(rtlpriv, COMP_RF,
+ DBG_DMESG, "RFKILL-HW Radio OFF, RF OFF\n");
rfpwr_toset = ERFOFF;
ppsc->hwradiooff = true;
@@ -2404,7 +2403,7 @@ void rtl92se_set_key(struct ieee80211_hw *hw, u32 key_index, u8 *p_macaddr,
u8 cam_offset = 0;
u8 clear_number = 5;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
for (idx = 0; idx < clear_number; idx++) {
rtl_cam_mark_invalid(hw, cam_offset + idx);
@@ -2463,26 +2462,26 @@ void rtl92se_set_key(struct ieee80211_hw *hw, u32 key_index, u8 *p_macaddr,
}
if (rtlpriv->sec.key_len[key_index] == 0) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "delete one entry, entry_id is %d\n",
- entry_id);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "delete one entry, entry_id is %d\n",
+ entry_id);
if (mac->opmode == NL80211_IFTYPE_AP)
rtl_cam_del_entry(hw, p_macaddr);
rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
} else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "add one entry\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "add one entry\n");
if (is_pairwise) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set Pairwise key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set Pairwise key\n");
rtl_cam_add_one_entry(hw, macaddr, key_index,
entry_id, enc_algo,
CAM_CONFIG_NO_USEDK,
rtlpriv->sec.key_buf[key_index]);
} else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set group key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set group key\n");
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
rtl_cam_add_one_entry(hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c
index 2d18bc1ee480..ecbf425f679f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/led.c
@@ -27,8 +27,8 @@ void rtl92se_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
u8 ledcfg;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
- LEDCFG, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
+ LEDCFG, pled->ledpin);
ledcfg = rtl_read_byte(rtlpriv, LEDCFG);
@@ -57,8 +57,8 @@ void rtl92se_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
rtlpriv = rtl_priv(hw);
if (!rtlpriv || rtlpriv->max_fw_size)
return;
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
- LEDCFG, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD, "LedAddr:%X ledpin=%d\n",
+ LEDCFG, pled->ledpin);
ledcfg = rtl_read_byte(rtlpriv, LEDCFG);
@@ -119,7 +119,7 @@ void rtl92se_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction)
ledaction == LED_CTL_POWER_ON)) {
return;
}
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d\n", ledaction);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d\n", ledaction);
_rtl92se_sw_led_control(hw, ledaction);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
index d5c0eb462315..63283d9e7485 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
@@ -16,14 +16,9 @@
static u32 _rtl92s_phy_calculate_bit_shift(u32 bitmask)
{
- u32 i;
-
- for (i = 0; i <= 31; i++) {
- if (((bitmask >> i) & 0x1) == 1)
- break;
- }
+ u32 i = ffs(bitmask);
- return i;
+ return i ? i - 1 : 32;
}
u32 rtl92s_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
@@ -31,15 +26,15 @@ u32 rtl92s_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 returnvalue = 0, originalvalue, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "regaddr(%#x), bitmask(%#x)\n",
- regaddr, bitmask);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "regaddr(%#x), bitmask(%#x)\n",
+ regaddr, bitmask);
originalvalue = rtl_read_dword(rtlpriv, regaddr);
bitshift = _rtl92s_phy_calculate_bit_shift(bitmask);
returnvalue = (originalvalue & bitmask) >> bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "BBR MASK=0x%x Addr[0x%x]=0x%x\n",
- bitmask, regaddr, originalvalue);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "BBR MASK=0x%x Addr[0x%x]=0x%x\n",
+ bitmask, regaddr, originalvalue);
return returnvalue;
@@ -51,9 +46,9 @@ void rtl92s_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 originalvalue, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x)\n",
- regaddr, bitmask, data);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
if (bitmask != MASKDWORD) {
originalvalue = rtl_read_dword(rtlpriv, regaddr);
@@ -63,9 +58,9 @@ void rtl92s_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,
rtl_write_dword(rtlpriv, regaddr, data);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x)\n",
- regaddr, bitmask, data);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
}
@@ -123,8 +118,8 @@ static u32 _rtl92s_phy_rf_serial_read(struct ieee80211_hw *hw,
retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
BLSSI_READBACK_DATA);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x]=0x%x\n",
- rfpath, pphyreg->rf_rb, retvalue);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rf_rb, retvalue);
return retvalue;
@@ -146,8 +141,8 @@ static void _rtl92s_phy_rf_serial_write(struct ieee80211_hw *hw,
data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, "RFW-%d Addr[0x%x]=0x%x\n",
- rfpath, pphyreg->rf3wire_offset, data_and_addr);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "RFW-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rf3wire_offset, data_and_addr);
}
@@ -157,8 +152,8 @@ u32 rtl92s_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, readback_value, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
regaddr, rfpath, bitmask);
spin_lock(&rtlpriv->locks.rf_lock);
@@ -170,9 +165,9 @@ u32 rtl92s_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
spin_unlock(&rtlpriv->locks.rf_lock);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
- regaddr, rfpath, bitmask, original_value);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
+ regaddr, rfpath, bitmask, original_value);
return readback_value;
}
@@ -187,9 +182,9 @@ void rtl92s_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
if (!((rtlphy->rf_pathmap >> rfpath) & 0x1))
return;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, rfpath);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
spin_lock(&rtlpriv->locks.rf_lock);
@@ -204,9 +199,9 @@ void rtl92s_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
spin_unlock(&rtlpriv->locks.rf_lock);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, rfpath);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
}
@@ -239,9 +234,9 @@ void rtl92s_phy_set_bw_mode(struct ieee80211_hw *hw,
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
u8 reg_bw_opmode;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "Switch to %s bandwidth\n",
- rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
- "20MHz" : "40MHz");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "Switch to %s bandwidth\n",
+ rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
+ "20MHz" : "40MHz");
if (rtlphy->set_bwmode_inprogress)
return;
@@ -296,7 +291,7 @@ void rtl92s_phy_set_bw_mode(struct ieee80211_hw *hw,
rtl92s_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
rtlphy->set_bwmode_inprogress = false;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
}
static bool _rtl92s_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
@@ -434,8 +429,8 @@ u8 rtl92s_phy_sw_chnl(struct ieee80211_hw *hw)
u32 delay;
bool ret;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "switch to channel%d\n",
- rtlphy->current_channel);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "switch to channel%d\n",
+ rtlphy->current_channel);
if (rtlphy->sw_chnl_inprogress)
return 0;
@@ -471,7 +466,7 @@ u8 rtl92s_phy_sw_chnl(struct ieee80211_hw *hw)
rtlphy->sw_chnl_inprogress = false;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
return 1;
}
@@ -530,20 +525,19 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw,
u32 initializecount = 0;
do {
initializecount++;
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic enable\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic enable\n");
rtstatus = rtl_ps_enable_nic(hw);
} while (!rtstatus && (initializecount < 10));
RT_CLEAR_PS_LEVEL(ppsc,
RT_RF_OFF_LEVL_HALT_NIC);
} else {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "awake, sleeped:%d ms state_inap:%x\n",
- jiffies_to_msecs(jiffies -
- ppsc->
- last_sleep_jiffies),
- rtlpriv->psc.state_inap);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "awake, slept:%d ms state_inap:%x\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_sleep_jiffies),
+ rtlpriv->psc.state_inap);
ppsc->last_awake_jiffies = jiffies;
rtl_write_word(rtlpriv, CMDR, 0x37FC);
rtl_write_byte(rtlpriv, TXPAUSE, 0x00);
@@ -560,8 +554,8 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw,
}
case ERFOFF:{
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic disable\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic disable\n");
rtl_ps_disable_nic(hw);
RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
} else {
@@ -586,34 +580,34 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw,
queue_id++;
continue;
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "eRf Off/Sleep: %d times TcbBusyQueue[%d] = %d before doze!\n",
- i + 1, queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "eRf Off/Sleep: %d times TcbBusyQueue[%d] = %d before doze!\n",
+ i + 1, queue_id,
+ skb_queue_len(&ring->queue));
udelay(10);
i++;
}
if (i >= MAX_DOZE_WAITING_TIMES_9x) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "ERFOFF: %d times TcbBusyQueue[%d] = %d !\n",
- MAX_DOZE_WAITING_TIMES_9x,
- queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "ERFOFF: %d times TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue));
break;
}
}
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "Set ERFSLEEP awaked:%d ms\n",
- jiffies_to_msecs(jiffies -
- ppsc->last_awake_jiffies));
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "Set ERFSLEEP awaked:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_awake_jiffies));
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "sleep awaked:%d ms state_inap:%x\n",
- jiffies_to_msecs(jiffies -
- ppsc->last_awake_jiffies),
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "sleep awaked:%d ms state_inap:%x\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_awake_jiffies),
rtlpriv->psc.state_inap);
ppsc->last_sleep_jiffies = jiffies;
_rtl92se_phy_set_rf_sleep(hw);
@@ -968,7 +962,7 @@ u8 rtl92s_phy_config_rf(struct ieee80211_hw *hw, enum radio_path rfpath)
radio_b_tblen = RADIOB_ARRAYLENGTH;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
rtstatus = true;
switch (rfpath) {
@@ -1088,20 +1082,20 @@ void rtl92s_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
ROFDM0_XCAGCCORE1, MASKBYTE0);
rtlphy->default_initialgain[3] = rtl_get_bbreg(hw,
ROFDM0_XDAGCCORE1, MASKBYTE0);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x)\n",
- rtlphy->default_initialgain[0],
- rtlphy->default_initialgain[1],
- rtlphy->default_initialgain[2],
- rtlphy->default_initialgain[3]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x)\n",
+ rtlphy->default_initialgain[0],
+ rtlphy->default_initialgain[1],
+ rtlphy->default_initialgain[2],
+ rtlphy->default_initialgain[3]);
/* read framesync */
rtlphy->framesync = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR3, MASKBYTE0);
rtlphy->framesync_c34 = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR2,
MASKDWORD);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Default framesync (0x%x) = 0x%x\n",
- ROFDM0_RXDETECTOR3, rtlphy->framesync);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Default framesync (0x%x) = 0x%x\n",
+ ROFDM0_RXDETECTOR3, rtlphy->framesync);
}
@@ -1163,10 +1157,10 @@ void rtl92s_phy_set_txpower(struct ieee80211_hw *hw, u8 channel)
_rtl92s_phy_get_txpower_index(hw, channel, &cckpowerlevel[0],
&ofdmpowerlevel[0]);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Channel-%d, cckPowerLevel (A / B) = 0x%x / 0x%x, ofdmPowerLevel (A / B) = 0x%x / 0x%x\n",
- channel, cckpowerlevel[0], cckpowerlevel[1],
- ofdmpowerlevel[0], ofdmpowerlevel[1]);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Channel-%d, cckPowerLevel (A / B) = 0x%x / 0x%x, ofdmPowerLevel (A / B) = 0x%x / 0x%x\n",
+ channel, cckpowerlevel[0], cckpowerlevel[1],
+ ofdmpowerlevel[0], ofdmpowerlevel[1]);
_rtl92s_phy_ccxpower_indexcheck(hw, channel, &cckpowerlevel[0],
&ofdmpowerlevel[0]);
@@ -1224,17 +1218,17 @@ static void _rtl92s_phy_set_fwcmd_io(struct ieee80211_hw *hw)
skip:
switch (rtlhal->current_fwcmd_io) {
case FW_CMD_RA_RESET:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, "FW_CMD_RA_RESET\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_DMESG, "FW_CMD_RA_RESET\n");
rtl_write_dword(rtlpriv, WFM5, FW_RA_RESET);
rtl92s_phy_chk_fwcmd_iodone(hw);
break;
case FW_CMD_RA_ACTIVE:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, "FW_CMD_RA_ACTIVE\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_DMESG, "FW_CMD_RA_ACTIVE\n");
rtl_write_dword(rtlpriv, WFM5, FW_RA_ACTIVE);
rtl92s_phy_chk_fwcmd_iodone(hw);
break;
case FW_CMD_RA_REFRESH_N:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, "FW_CMD_RA_REFRESH_N\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_DMESG, "FW_CMD_RA_REFRESH_N\n");
input = FW_RA_REFRESH;
rtl_write_dword(rtlpriv, WFM5, input);
rtl92s_phy_chk_fwcmd_iodone(hw);
@@ -1242,29 +1236,29 @@ skip:
rtl92s_phy_chk_fwcmd_iodone(hw);
break;
case FW_CMD_RA_REFRESH_BG:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG,
- "FW_CMD_RA_REFRESH_BG\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_DMESG,
+ "FW_CMD_RA_REFRESH_BG\n");
rtl_write_dword(rtlpriv, WFM5, FW_RA_REFRESH);
rtl92s_phy_chk_fwcmd_iodone(hw);
rtl_write_dword(rtlpriv, WFM5, FW_RA_DISABLE_RSSI_MASK);
rtl92s_phy_chk_fwcmd_iodone(hw);
break;
case FW_CMD_RA_REFRESH_N_COMB:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG,
- "FW_CMD_RA_REFRESH_N_COMB\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_DMESG,
+ "FW_CMD_RA_REFRESH_N_COMB\n");
input = FW_RA_IOT_N_COMB;
rtl_write_dword(rtlpriv, WFM5, input);
rtl92s_phy_chk_fwcmd_iodone(hw);
break;
case FW_CMD_RA_REFRESH_BG_COMB:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG,
- "FW_CMD_RA_REFRESH_BG_COMB\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_DMESG,
+ "FW_CMD_RA_REFRESH_BG_COMB\n");
input = FW_RA_IOT_BG_COMB;
rtl_write_dword(rtlpriv, WFM5, input);
rtl92s_phy_chk_fwcmd_iodone(hw);
break;
case FW_CMD_IQK_ENABLE:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, "FW_CMD_IQK_ENABLE\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_DMESG, "FW_CMD_IQK_ENABLE\n");
rtl_write_dword(rtlpriv, WFM5, FW_IQK_ENABLE);
rtl92s_phy_chk_fwcmd_iodone(hw);
break;
@@ -1299,7 +1293,7 @@ skip:
rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd);
break;
case FW_CMD_LPS_ENTER:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, "FW_CMD_LPS_ENTER\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_DMESG, "FW_CMD_LPS_ENTER\n");
current_aid = rtlpriv->mac80211.assoc_id;
rtl_write_dword(rtlpriv, WFM5, (FW_LPS_ENTER |
((current_aid | 0xc000) << 8)));
@@ -1308,18 +1302,18 @@ skip:
* turbo mode until driver leave LPS */
break;
case FW_CMD_LPS_LEAVE:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, "FW_CMD_LPS_LEAVE\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_DMESG, "FW_CMD_LPS_LEAVE\n");
rtl_write_dword(rtlpriv, WFM5, FW_LPS_LEAVE);
rtl92s_phy_chk_fwcmd_iodone(hw);
break;
case FW_CMD_ADD_A2_ENTRY:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, "FW_CMD_ADD_A2_ENTRY\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_DMESG, "FW_CMD_ADD_A2_ENTRY\n");
rtl_write_dword(rtlpriv, WFM5, FW_ADD_A2_ENTRY);
rtl92s_phy_chk_fwcmd_iodone(hw);
break;
case FW_CMD_CTRL_DM_BY_DRIVER:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "FW_CMD_CTRL_DM_BY_DRIVER\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "FW_CMD_CTRL_DM_BY_DRIVER\n");
rtl_write_dword(rtlpriv, WFM5, FW_CTRL_DM_BY_DRIVER);
rtl92s_phy_chk_fwcmd_iodone(hw);
break;
@@ -1344,9 +1338,9 @@ bool rtl92s_phy_set_fw_cmd(struct ieee80211_hw *hw, enum fwcmd_iotype fw_cmdio)
u16 fw_cmdmap = FW_CMD_IO_QUERY(rtlpriv);
bool postprocessing = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Set FW Cmd(%#x), set_fwcmd_inprogress(%d)\n",
- fw_cmdio, rtlhal->set_fwcmd_inprogress);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Set FW Cmd(%#x), set_fwcmd_inprogress(%d)\n",
+ fw_cmdio, rtlhal->set_fwcmd_inprogress);
do {
/* We re-map to combined FW CMD ones if firmware version */
@@ -1383,30 +1377,30 @@ bool rtl92s_phy_set_fw_cmd(struct ieee80211_hw *hw, enum fwcmd_iotype fw_cmdio)
* DM map table in the future. */
switch (fw_cmdio) {
case FW_CMD_RA_INIT:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "RA init!!\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "RA init!!\n");
fw_cmdmap |= FW_RA_INIT_CTL;
FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
/* Clear control flag to sync with FW. */
FW_CMD_IO_CLR(rtlpriv, FW_RA_INIT_CTL);
break;
case FW_CMD_DIG_DISABLE:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Set DIG disable!!\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Set DIG disable!!\n");
fw_cmdmap &= ~FW_DIG_ENABLE_CTL;
FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
break;
case FW_CMD_DIG_ENABLE:
case FW_CMD_DIG_RESUME:
if (!(rtlpriv->dm.dm_flag & HAL_DM_DIG_DISABLE)) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Set DIG enable or resume!!\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Set DIG enable or resume!!\n");
fw_cmdmap |= (FW_DIG_ENABLE_CTL | FW_SS_CTL);
FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
}
break;
case FW_CMD_DIG_HALT:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Set DIG halt!!\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Set DIG halt!!\n");
fw_cmdmap &= ~(FW_DIG_ENABLE_CTL | FW_SS_CTL);
FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
break;
@@ -1421,9 +1415,9 @@ bool rtl92s_phy_set_fw_cmd(struct ieee80211_hw *hw, enum fwcmd_iotype fw_cmdio)
fw_param |= ((thermalval << 24) |
(rtlefuse->thermalmeter[0] << 16));
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Set TxPwr tracking!! FwCmdMap(%#x), FwParam(%#x)\n",
- fw_cmdmap, fw_param);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Set TxPwr tracking!! FwCmdMap(%#x), FwParam(%#x)\n",
+ fw_cmdmap, fw_param);
FW_CMD_PARA_SET(rtlpriv, fw_param);
FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
@@ -1443,9 +1437,9 @@ bool rtl92s_phy_set_fw_cmd(struct ieee80211_hw *hw, enum fwcmd_iotype fw_cmdio)
/* Clear FW parameter in terms of RA parts. */
fw_param &= FW_RA_PARAM_CLR;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "[FW CMD] [New Version] Set RA/IOT Comb in n mode!! FwCmdMap(%#x), FwParam(%#x)\n",
- fw_cmdmap, fw_param);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "[FW CMD] [New Version] Set RA/IOT Comb in n mode!! FwCmdMap(%#x), FwParam(%#x)\n",
+ fw_cmdmap, fw_param);
FW_CMD_PARA_SET(rtlpriv, fw_param);
FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
@@ -1531,8 +1525,8 @@ bool rtl92s_phy_set_fw_cmd(struct ieee80211_hw *hw, enum fwcmd_iotype fw_cmdio)
FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
break;
case FW_CMD_PAPE_CONTROL:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "[FW CMD] Set PAPE Control\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "[FW CMD] Set PAPE Control\n");
fw_cmdmap &= ~FW_PAPE_CTL_BY_SW_HW;
FW_CMD_IO_SET(rtlpriv, fw_cmdmap);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c
index a37855f57e76..5a493602aaf2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c
@@ -25,7 +25,7 @@ static void _rtl92s_get_powerbase(struct ieee80211_hw *hw, u8 *p_pwrlevel,
/* We only care about the path A for legacy. */
if (rtlefuse->eeprom_version < 2) {
- pwrbase0 = pwrlevel[0] + (rtlefuse->legacy_httxpowerdiff & 0xf);
+ pwrbase0 = pwrlevel[0] + (rtlefuse->legacy_ht_txpowerdiff & 0xf);
} else {
legacy_pwrdiff = rtlefuse->txpwr_legacyhtdiff
[RF90_PATH_A][chnl - 1];
@@ -95,13 +95,13 @@ static void _rtl92s_get_powerbase(struct ieee80211_hw *hw, u8 *p_pwrlevel,
}
if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "40MHz finalpwr_idx (A / B) = 0x%x / 0x%x\n",
- p_final_pwridx[0], p_final_pwridx[1]);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "40MHz finalpwr_idx (A / B) = 0x%x / 0x%x\n",
+ p_final_pwridx[0], p_final_pwridx[1]);
} else {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "20MHz finalpwr_idx (A / B) = 0x%x / 0x%x\n",
- p_final_pwridx[0], p_final_pwridx[1]);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "20MHz finalpwr_idx (A / B) = 0x%x / 0x%x\n",
+ p_final_pwridx[0], p_final_pwridx[1]);
}
}
@@ -124,9 +124,9 @@ static void _rtl92s_set_antennadiff(struct ieee80211_hw *hw,
if (ant_pwr_diff < -8)
ant_pwr_diff = -8;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Antenna Diff from RF-B to RF-A = %d (0x%x)\n",
- ant_pwr_diff, ant_pwr_diff & 0xf);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Antenna Diff from RF-B to RF-A = %d (0x%x)\n",
+ ant_pwr_diff, ant_pwr_diff & 0xf);
ant_pwr_diff &= 0xf;
}
@@ -143,8 +143,8 @@ static void _rtl92s_set_antennadiff(struct ieee80211_hw *hw,
rtl_set_bbreg(hw, RFPGA0_TXGAINSTAGE, (BXBTXAGC | BXCTXAGC | BXDTXAGC),
u4reg_val);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Write BCD-Diff(0x%x) = 0x%x\n",
- RFPGA0_TXGAINSTAGE, u4reg_val);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD, "Write BCD-Diff(0x%x) = 0x%x\n",
+ RFPGA0_TXGAINSTAGE, u4reg_val);
}
static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw,
@@ -169,8 +169,8 @@ static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw,
writeval = rtlphy->mcs_offset[chnlgroup][index] +
((index < 2) ? pwrbase0 : pwrbase1);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "RTK better performance, writeval = 0x%x\n", writeval);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "RTK better performance, writeval = 0x%x\n", writeval);
break;
case 1:
/* Realtek regulatory increase power diff defined
@@ -178,9 +178,9 @@ static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw,
if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
writeval = ((index < 2) ? pwrbase0 : pwrbase1);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Realtek regulatory, 40MHz, writeval = 0x%x\n",
- writeval);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Realtek regulatory, 40MHz, writeval = 0x%x\n",
+ writeval);
} else {
chnlgroup = 0;
@@ -199,16 +199,16 @@ static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw,
+ ((index < 2) ?
pwrbase0 : pwrbase1);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Realtek regulatory, 20MHz, writeval = 0x%x\n",
- writeval);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Realtek regulatory, 20MHz, writeval = 0x%x\n",
+ writeval);
}
break;
case 2:
/* Better regulatory don't increase any power diff */
writeval = ((index < 2) ? pwrbase0 : pwrbase1);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Better regulatory, writeval = 0x%x\n", writeval);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Better regulatory, writeval = 0x%x\n", writeval);
break;
case 3:
/* Customer defined power diff. increase power diff
@@ -216,15 +216,15 @@ static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw,
chnlgroup = 0;
if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "customer's limit, 40MHz = 0x%x\n",
- rtlefuse->pwrgroup_ht40
- [RF90_PATH_A][chnl - 1]);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "customer's limit, 40MHz = 0x%x\n",
+ rtlefuse->pwrgroup_ht40
+ [RF90_PATH_A][chnl - 1]);
} else {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "customer's limit, 20MHz = 0x%x\n",
- rtlefuse->pwrgroup_ht20
- [RF90_PATH_A][chnl - 1]);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "customer's limit, 20MHz = 0x%x\n",
+ rtlefuse->pwrgroup_ht20
+ [RF90_PATH_A][chnl - 1]);
}
for (i = 0; i < 4; i++) {
@@ -256,20 +256,20 @@ static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw,
(pwrdiff_limit[2] << 16) |
(pwrdiff_limit[1] << 8) |
(pwrdiff_limit[0]);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Customer's limit = 0x%x\n", customer_limit);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Customer's limit = 0x%x\n", customer_limit);
writeval = customer_limit + ((index < 2) ?
pwrbase0 : pwrbase1);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Customer, writeval = 0x%x\n", writeval);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Customer, writeval = 0x%x\n", writeval);
break;
default:
chnlgroup = 0;
writeval = rtlphy->mcs_offset[chnlgroup][index] +
((index < 2) ? pwrbase0 : pwrbase1);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "RTK better performance, writeval = 0x%x\n", writeval);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "RTK better performance, writeval = 0x%x\n", writeval);
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
index 7a54497b7df2..6d352a3161b8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
@@ -65,8 +65,8 @@ static void rtl92se_fw_cb(const struct firmware *firmware, void *context)
struct rt_firmware *pfirmware = NULL;
char *fw_name = "rtlwifi/rtl8192sefw.bin";
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "Firmware callback routine entered!\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "Firmware callback routine entered!\n");
complete(&rtlpriv->firmware_loading_complete);
if (!firmware) {
pr_err("Firmware %s not available\n", fw_name);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
index 9eaa5348b556..38034102aacb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c
@@ -328,13 +328,13 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
bool firstseg = (!(hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG)));
bool lastseg = (!(hdr->frame_control &
cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)));
- dma_addr_t mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
u8 bw_40 = 0;
- if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error\n");
+ if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error\n");
return;
}
if (mac->opmode == NL80211_IFTYPE_STATION) {
@@ -488,7 +488,7 @@ void rtl92se_tx_fill_desc(struct ieee80211_hw *hw,
/* DOWRD 8 */
set_tx_desc_tx_buffer_address(pdesc, mapping);
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
}
void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc8,
@@ -500,12 +500,12 @@ void rtl92se_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc8,
struct rtl_tcb_desc *tcb_desc = (struct rtl_tcb_desc *)(skb->cb);
__le32 *pdesc = (__le32 *)pdesc8;
- dma_addr_t mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
- if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error\n");
+ if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error\n");
return;
}
/* Clear all status */
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
index c61a92df9d73..8ada31380efa 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/dm.c
@@ -82,16 +82,16 @@ static void rtl8723e_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 0);
rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "cnt_parity_fail = %d, cnt_rate_illegal = %d, cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
- falsealm_cnt->cnt_parity_fail,
- falsealm_cnt->cnt_rate_illegal,
- falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail);
-
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
- falsealm_cnt->cnt_ofdm_fail,
- falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "cnt_parity_fail = %d, cnt_rate_illegal = %d, cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
+ falsealm_cnt->cnt_parity_fail,
+ falsealm_cnt->cnt_rate_illegal,
+ falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail);
+
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
+ falsealm_cnt->cnt_ofdm_fail,
+ falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all);
}
static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw)
@@ -150,9 +150,9 @@ static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw)
dm_digtable->cur_igvalue = dm_digtable->rssi_val_min + 10 -
dm_digtable->back_val;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "rssi_val_min = %x back_val %x\n",
- dm_digtable->rssi_val_min, dm_digtable->back_val);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "rssi_val_min = %x back_val %x\n",
+ dm_digtable->rssi_val_min, dm_digtable->back_val);
rtl8723e_dm_write_dig(hw);
}
@@ -201,10 +201,10 @@ static void rtl8723e_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
rtl8723e_dm_write_dig(hw);
}
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "curmultista_cstate = %x dig_ext_port_stage %x\n",
- dm_digtable->curmultista_cstate,
- dm_digtable->dig_ext_port_stage);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "curmultista_cstate = %x dig_ext_port_stage %x\n",
+ dm_digtable->curmultista_cstate,
+ dm_digtable->dig_ext_port_stage);
}
static void rtl8723e_dm_initial_gain_sta(struct ieee80211_hw *hw)
@@ -212,10 +212,10 @@ static void rtl8723e_dm_initial_gain_sta(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "presta_cstate = %x, cursta_cstate = %x\n",
- dm_digtable->presta_cstate,
- dm_digtable->cursta_cstate);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "presta_cstate = %x, cursta_cstate = %x\n",
+ dm_digtable->presta_cstate,
+ dm_digtable->cursta_cstate);
if (dm_digtable->presta_cstate == dm_digtable->cursta_cstate ||
dm_digtable->cursta_cstate == DIG_STA_BEFORE_CONNECT ||
@@ -296,8 +296,8 @@ static void rtl8723e_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
dm_digtable->pre_cck_pd_state = dm_digtable->cur_cck_pd_state;
}
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "CCKPDStage=%x\n", dm_digtable->cur_cck_pd_state);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "CCKPDStage=%x\n", dm_digtable->cur_cck_pd_state);
}
@@ -354,8 +354,8 @@ static void rtl8723e_dm_dynamic_txpower(struct ieee80211_hw *hw)
if ((mac->link_state < MAC80211_LINKED) &&
(rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "Not connected to any\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "Not connected to any\n");
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
@@ -367,47 +367,47 @@ static void rtl8723e_dm_dynamic_txpower(struct ieee80211_hw *hw)
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
undec_sm_pwdb =
rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "AP Client PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "AP Client PWDB = 0x%lx\n",
+ undec_sm_pwdb);
} else {
undec_sm_pwdb =
rtlpriv->dm.undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "STA Default Port PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "STA Default Port PWDB = 0x%lx\n",
+ undec_sm_pwdb);
}
} else {
undec_sm_pwdb =
rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "AP Ext Port PWDB = 0x%lx\n",
- undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "AP Ext Port PWDB = 0x%lx\n",
+ undec_sm_pwdb);
}
if (undec_sm_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n");
} else if ((undec_sm_pwdb <
(TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
(undec_sm_pwdb >=
TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n");
} else if (undec_sm_pwdb <
(TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "TXHIGHPWRLEVEL_NORMAL\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "TXHIGHPWRLEVEL_NORMAL\n");
}
if (rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "PHY_SetTxPowerLevel8192S() Channel = %d\n",
- rtlphy->current_channel);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "PHY_SetTxPowerLevel8192S() Channel = %d\n",
+ rtlphy->current_channel);
rtl8723e_phy_set_txpower_level(hw, rtlphy->current_channel);
}
@@ -419,10 +419,10 @@ void rtl8723e_dm_write_dig(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "cur_igvalue = 0x%x, pre_igvalue = 0x%x, back_val = %d\n",
- dm_digtable->cur_igvalue, dm_digtable->pre_igvalue,
- dm_digtable->back_val);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "cur_igvalue = 0x%x, pre_igvalue = 0x%x, back_val = %d\n",
+ dm_digtable->cur_igvalue, dm_digtable->pre_igvalue,
+ dm_digtable->back_val);
if (dm_digtable->pre_igvalue != dm_digtable->cur_igvalue) {
rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
@@ -521,9 +521,9 @@ static void rtl8723e_dm_initialize_txpower_tracking_thermalmeter(
rtlpriv->dm.txpower_tracking = true;
rtlpriv->dm.txpower_trackinginit = false;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "pMgntInfo->txpower_tracking = %d\n",
- rtlpriv->dm.txpower_tracking);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "pMgntInfo->txpower_tracking = %d\n",
+ rtlpriv->dm.txpower_tracking);
}
static void rtl8723e_dm_initialize_txpower_tracking(struct ieee80211_hw *hw)
@@ -561,14 +561,14 @@ static void rtl8723e_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
struct ieee80211_sta *sta = NULL;
if (is_hal_stop(rtlhal)) {
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- " driver is going to unload\n");
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ " driver is going to unload\n");
return;
}
if (!rtlpriv->dm.useramask) {
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- " driver does not control rate adaptive mask\n");
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ " driver does not control rate adaptive mask\n");
return;
}
@@ -612,14 +612,14 @@ static void rtl8723e_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
p_ra->ratr_state = DM_RATR_STA_LOW;
if (p_ra->pre_ratr_state != p_ra->ratr_state) {
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "RSSI = %ld\n",
- rtlpriv->dm.undec_sm_pwdb);
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "RSSI_LEVEL = %d\n", p_ra->ratr_state);
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "PreState = %d, CurState = %d\n",
- p_ra->pre_ratr_state, p_ra->ratr_state);
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "RSSI = %ld\n",
+ rtlpriv->dm.undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "RSSI_LEVEL = %d\n", p_ra->ratr_state);
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "PreState = %d, CurState = %d\n",
+ p_ra->pre_ratr_state, p_ra->ratr_state);
rcu_read_lock();
sta = rtl_find_sta(hw, mac->bssid);
@@ -716,31 +716,31 @@ static void rtl8723e_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw)
if (((mac->link_state == MAC80211_NOLINK)) &&
(rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
dm_pstable->rssi_val_min = 0;
- RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
- "Not connected to any\n");
+ rtl_dbg(rtlpriv, DBG_LOUD, DBG_LOUD,
+ "Not connected to any\n");
}
if (mac->link_state == MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
dm_pstable->rssi_val_min =
rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
- "AP Client PWDB = 0x%lx\n",
- dm_pstable->rssi_val_min);
+ rtl_dbg(rtlpriv, DBG_LOUD, DBG_LOUD,
+ "AP Client PWDB = 0x%lx\n",
+ dm_pstable->rssi_val_min);
} else {
dm_pstable->rssi_val_min =
rtlpriv->dm.undec_sm_pwdb;
- RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
- "STA Default Port PWDB = 0x%lx\n",
- dm_pstable->rssi_val_min);
+ rtl_dbg(rtlpriv, DBG_LOUD, DBG_LOUD,
+ "STA Default Port PWDB = 0x%lx\n",
+ dm_pstable->rssi_val_min);
}
} else {
dm_pstable->rssi_val_min =
rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
- "AP Ext Port PWDB = 0x%lx\n",
- dm_pstable->rssi_val_min);
+ rtl_dbg(rtlpriv, DBG_LOUD, DBG_LOUD,
+ "AP Ext Port PWDB = 0x%lx\n",
+ dm_pstable->rssi_val_min);
}
rtl8723e_dm_rf_saving(hw, false);
@@ -820,21 +820,21 @@ void rtl8723e_dm_bt_coexist(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 tmp_byte = 0;
if (!rtlpriv->btcoexist.bt_coexistence) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[DM]{BT], BT not exist!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[DM]{BT], BT not exist!!\n");
return;
}
if (!rtlpriv->btcoexist.init_set) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[DM][BT], rtl8723e_dm_bt_coexist()\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[DM][BT], %s\n", __func__);
rtl8723e_dm_init_bt_coexist(hw);
}
tmp_byte = rtl_read_byte(rtlpriv, 0x40);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "[DM][BT], 0x40 is 0x%x\n", tmp_byte);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[DM][BT], bt_dm_coexist start\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "[DM][BT], 0x40 is 0x%x\n", tmp_byte);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[DM][BT], bt_dm_coexist start\n");
rtl8723e_dm_bt_coexist_8723(hw);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
index 33481232fad0..d1b50a80c191 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
@@ -43,22 +43,22 @@ static void _rtl8723e_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
unsigned long flag;
u8 idx;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
while (true) {
spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
if (rtlhal->h2c_setinprogress) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "H2C set in progress! Wait to set..element_id(%d).\n",
- element_id);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "H2C set in progress! Wait to set..element_id(%d).\n",
+ element_id);
while (rtlhal->h2c_setinprogress) {
spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
flag);
h2c_waitcounter++;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Wait 100 us (%d times)...\n",
- h2c_waitcounter);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Wait 100 us (%d times)...\n",
+ h2c_waitcounter);
udelay(100);
if (h2c_waitcounter > 1000)
@@ -110,9 +110,9 @@ static void _rtl8723e_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
wait_h2c_limmit--;
if (wait_h2c_limmit == 0) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Waiting too long for FW read clear HMEBox(%d)!\n",
- boxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Waiting too long for FW read clear HMEBox(%d)!\n",
+ boxnum);
break;
}
@@ -121,24 +121,24 @@ static void _rtl8723e_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
isfw_read = _rtl8723e_check_fw_read_last_h2c(hw,
boxnum);
u1b_tmp = rtl_read_byte(rtlpriv, 0x1BF);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Waiting for FW read clear HMEBox(%d)!!! 0x1BF = %2x\n",
- boxnum, u1b_tmp);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Waiting for FW read clear HMEBox(%d)!!! 0x1BF = %2x\n",
+ boxnum, u1b_tmp);
}
if (!isfw_read) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n",
- boxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n",
+ boxnum);
break;
}
memset(boxcontent, 0, sizeof(boxcontent));
memset(boxextcontent, 0, sizeof(boxextcontent));
boxcontent[0] = element_id;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Write element_id box_reg(%4x) = %2x\n",
- box_reg, element_id);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Write element_id box_reg(%4x) = %2x\n",
+ box_reg, element_id);
switch (cmd_len) {
case 1:
@@ -217,16 +217,16 @@ static void _rtl8723e_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
if (rtlhal->last_hmeboxnum == 4)
rtlhal->last_hmeboxnum = 0;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "pHalData->last_hmeboxnum = %d\n",
- rtlhal->last_hmeboxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "pHalData->last_hmeboxnum = %d\n",
+ rtlhal->last_hmeboxnum);
}
spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
rtlhal->h2c_setinprogress = false;
spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
}
void rtl8723e_fill_h2c_cmd(struct ieee80211_hw *hw,
@@ -252,7 +252,7 @@ void rtl8723e_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
u8 u1_h2c_set_pwrmode[3] = { 0 };
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD, "FW LPS mode = %d\n", mode);
SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, mode);
SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode,
@@ -458,16 +458,16 @@ void rtl8723e_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
b_dlok = true;
if (b_dlok) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Set RSVD page location to Fw.\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Set RSVD page location to Fw.\n");
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
"H2C_RSVDPAGE:\n",
u1rsvdpageloc, 3);
rtl8723e_fill_h2c_cmd(hw, H2C_RSVDPAGE,
sizeof(u1rsvdpageloc), u1rsvdpageloc);
} else
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Set RSVD page location to Fw FAIL!!!!!!.\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set RSVD page location to Fw FAIL!!!!!!.\n");
}
void rtl8723e_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus)
@@ -501,11 +501,11 @@ void rtl8723e_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
switch (p2p_ps_state) {
case P2P_PS_DISABLE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_DISABLE\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_DISABLE\n");
memset(p2p_ps_offload, 0, sizeof(*p2p_ps_offload));
break;
case P2P_PS_ENABLE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_ENABLE\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_ENABLE\n");
/* update CTWindow value. */
if (p2pinfo->ctwindow > 0) {
p2p_ps_offload->ctwindow_en = 1;
@@ -564,11 +564,11 @@ void rtl8723e_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
}
break;
case P2P_PS_SCAN:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN\n");
p2p_ps_offload->discovery = 1;
break;
case P2P_PS_SCAN_DONE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN_DONE\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN_DONE\n");
p2p_ps_offload->discovery = 0;
p2pinfo->p2p_ps_state = P2P_PS_ENABLE;
break;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.c
index 3ac31ec26517..6c4fedc3ed63 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_bt_coexist.c
@@ -102,12 +102,12 @@ u8 rtl8723e_dm_bt_check_coex_rssi_state1(struct ieee80211_hw *hw,
BT_COEX_STATE_WIFI_RSSI_1_HIGH;
rtlpriv->btcoexist.cstate &=
~BT_COEX_STATE_WIFI_RSSI_1_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI_1 state switch to High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI_1 state switch to High\n");
} else {
bt_rssi_state = BT_RSSI_STATE_STAY_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI_1 state stay at Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI_1 state stay at Low\n");
}
} else {
if (undecoratedsmoothed_pwdb < rssi_thresh) {
@@ -116,18 +116,18 @@ u8 rtl8723e_dm_bt_check_coex_rssi_state1(struct ieee80211_hw *hw,
BT_COEX_STATE_WIFI_RSSI_1_LOW;
rtlpriv->btcoexist.cstate &=
~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI_1 state switch to Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI_1 state switch to Low\n");
} else {
bt_rssi_state = BT_RSSI_STATE_STAY_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI_1 state stay at High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI_1 state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI_1 thresh error!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI_1 thresh error!!\n");
return rtlpriv->btcoexist.bt_pre_rssi_state;
}
@@ -144,12 +144,12 @@ u8 rtl8723e_dm_bt_check_coex_rssi_state1(struct ieee80211_hw *hw,
~BT_COEX_STATE_WIFI_RSSI_1_LOW;
rtlpriv->btcoexist.cstate &=
~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI_1 state switch to Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI_1 state switch to Medium\n");
} else {
bt_rssi_state = BT_RSSI_STATE_STAY_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI_1 state stay at Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI_1 state stay at Low\n");
}
} else if ((rtlpriv->btcoexist.bt_pre_rssi_state ==
BT_RSSI_STATE_MEDIUM) ||
@@ -164,8 +164,8 @@ u8 rtl8723e_dm_bt_check_coex_rssi_state1(struct ieee80211_hw *hw,
~BT_COEX_STATE_WIFI_RSSI_1_LOW;
rtlpriv->btcoexist.cstate &=
~BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI_1 state switch to High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI_1 state switch to High\n");
} else if (undecoratedsmoothed_pwdb < rssi_thresh) {
bt_rssi_state = BT_RSSI_STATE_LOW;
rtlpriv->btcoexist.cstate |=
@@ -174,12 +174,12 @@ u8 rtl8723e_dm_bt_check_coex_rssi_state1(struct ieee80211_hw *hw,
~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
rtlpriv->btcoexist.cstate &=
~BT_COEX_STATE_WIFI_RSSI_1_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI_1 state switch to Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI_1 state switch to Low\n");
} else {
bt_rssi_state = BT_RSSI_STATE_STAY_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI_1 state stay at Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI_1 state stay at Medium\n");
}
} else {
if (undecoratedsmoothed_pwdb < rssi_thresh1) {
@@ -190,12 +190,12 @@ u8 rtl8723e_dm_bt_check_coex_rssi_state1(struct ieee80211_hw *hw,
~BT_COEX_STATE_WIFI_RSSI_1_HIGH;
rtlpriv->btcoexist.cstate &=
~BT_COEX_STATE_WIFI_RSSI_1_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI_1 state switch to Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI_1 state switch to Medium\n");
} else {
bt_rssi_state = BT_RSSI_STATE_STAY_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI_1 state stay at High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI_1 state stay at High\n");
}
}
}
@@ -230,12 +230,12 @@ u8 rtl8723e_dm_bt_check_coex_rssi_state(struct ieee80211_hw *hw,
|= BT_COEX_STATE_WIFI_RSSI_HIGH;
rtlpriv->btcoexist.cstate
&= ~BT_COEX_STATE_WIFI_RSSI_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI state switch to High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI state switch to High\n");
} else {
bt_rssi_state = BT_RSSI_STATE_STAY_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI state stay at Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI state stay at Low\n");
}
} else {
if (undecoratedsmoothed_pwdb < rssi_thresh) {
@@ -244,18 +244,18 @@ u8 rtl8723e_dm_bt_check_coex_rssi_state(struct ieee80211_hw *hw,
|= BT_COEX_STATE_WIFI_RSSI_LOW;
rtlpriv->btcoexist.cstate
&= ~BT_COEX_STATE_WIFI_RSSI_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI state switch to Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI state switch to Low\n");
} else {
bt_rssi_state = BT_RSSI_STATE_STAY_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI state stay at High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI thresh error!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI thresh error!!\n");
return rtlpriv->btcoexist.bt_pre_rssi_state;
}
if ((rtlpriv->btcoexist.bt_pre_rssi_state ==
@@ -271,12 +271,12 @@ u8 rtl8723e_dm_bt_check_coex_rssi_state(struct ieee80211_hw *hw,
&= ~BT_COEX_STATE_WIFI_RSSI_LOW;
rtlpriv->btcoexist.cstate
&= ~BT_COEX_STATE_WIFI_RSSI_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI state switch to Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI state switch to Medium\n");
} else {
bt_rssi_state = BT_RSSI_STATE_STAY_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI state stay at Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI state stay at Low\n");
}
} else if ((rtlpriv->btcoexist.bt_pre_rssi_state ==
BT_RSSI_STATE_MEDIUM) ||
@@ -291,8 +291,8 @@ u8 rtl8723e_dm_bt_check_coex_rssi_state(struct ieee80211_hw *hw,
&= ~BT_COEX_STATE_WIFI_RSSI_LOW;
rtlpriv->btcoexist.cstate
&= ~BT_COEX_STATE_WIFI_RSSI_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI state switch to High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI state switch to High\n");
} else if (undecoratedsmoothed_pwdb < rssi_thresh) {
bt_rssi_state = BT_RSSI_STATE_LOW;
rtlpriv->btcoexist.cstate
@@ -301,12 +301,12 @@ u8 rtl8723e_dm_bt_check_coex_rssi_state(struct ieee80211_hw *hw,
&= ~BT_COEX_STATE_WIFI_RSSI_HIGH;
rtlpriv->btcoexist.cstate
&= ~BT_COEX_STATE_WIFI_RSSI_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI state switch to Low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI state switch to Low\n");
} else {
bt_rssi_state = BT_RSSI_STATE_STAY_MEDIUM;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI state stay at Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI state stay at Medium\n");
}
} else {
if (undecoratedsmoothed_pwdb < rssi_thresh1) {
@@ -317,12 +317,12 @@ u8 rtl8723e_dm_bt_check_coex_rssi_state(struct ieee80211_hw *hw,
&= ~BT_COEX_STATE_WIFI_RSSI_HIGH;
rtlpriv->btcoexist.cstate
&= ~BT_COEX_STATE_WIFI_RSSI_LOW;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI state switch to Medium\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI state switch to Medium\n");
} else {
bt_rssi_state = BT_RSSI_STATE_STAY_HIGH;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], RSSI state stay at High\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], RSSI state stay at High\n");
}
}
}
@@ -342,9 +342,9 @@ long rtl8723e_dm_bt_get_rx_ss(struct ieee80211_hw *hw)
undecoratedsmoothed_pwdb
= rtlpriv->dm.entry_min_undec_sm_pwdb;
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "rtl8723e_dm_bt_get_rx_ss() = %ld\n",
- undecoratedsmoothed_pwdb);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "%s = %ld\n", __func__,
+ undecoratedsmoothed_pwdb);
return undecoratedsmoothed_pwdb;
}
@@ -367,10 +367,10 @@ void rtl8723e_dm_bt_balance(struct ieee80211_hw *hw,
}
rtlpriv->btcoexist.balance_on = balance_on;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[DM][BT], Balance=[%s:%dms:%dms], write 0xc=0x%x\n",
- balance_on ? "ON" : "OFF", ms0, ms1, h2c_parameter[0]<<16 |
- h2c_parameter[1]<<8 | h2c_parameter[2]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[DM][BT], Balance=[%s:%dms:%dms], write 0xc=0x%x\n",
+ balance_on ? "ON" : "OFF", ms0, ms1, h2c_parameter[0] << 16 |
+ h2c_parameter[1] << 8 | h2c_parameter[2]);
rtl8723e_fill_h2c_cmd(hw, 0xc, 3, h2c_parameter);
}
@@ -381,8 +381,8 @@ void rtl8723e_dm_bt_agc_table(struct ieee80211_hw *hw, u8 type)
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (type == BT_AGCTABLE_OFF) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BT]AGCTable Off!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BT]AGCTable Off!\n");
rtl_write_dword(rtlpriv, 0xc78, 0x641c0001);
rtl_write_dword(rtlpriv, 0xc78, 0x631d0001);
rtl_write_dword(rtlpriv, 0xc78, 0x621e0001);
@@ -400,8 +400,8 @@ void rtl8723e_dm_bt_agc_table(struct ieee80211_hw *hw, u8 type)
rtl8723e_phy_set_rf_reg(hw, RF90_PATH_A,
RF_RX_G1, 0xfffff, 0x30355);
} else if (type == BT_AGCTABLE_ON) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BT]AGCTable On!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BT]AGCTable On!\n");
rtl_write_dword(rtlpriv, 0xc78, 0x4e1c0001);
rtl_write_dword(rtlpriv, 0xc78, 0x4d1d0001);
rtl_write_dword(rtlpriv, 0xc78, 0x4c1e0001);
@@ -428,12 +428,12 @@ void rtl8723e_dm_bt_bb_back_off_level(struct ieee80211_hw *hw, u8 type)
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (type == BT_BB_BACKOFF_OFF) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BT]BBBackOffLevel Off!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BT]BBBackOffLevel Off!\n");
rtl_write_dword(rtlpriv, 0xc04, 0x3a05611);
} else if (type == BT_BB_BACKOFF_ON) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BT]BBBackOffLevel On!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BT]BBBackOffLevel On!\n");
rtl_write_dword(rtlpriv, 0xc04, 0x3a07611);
rtlpriv->btcoexist.sw_coexist_all_off = false;
}
@@ -442,14 +442,14 @@ void rtl8723e_dm_bt_bb_back_off_level(struct ieee80211_hw *hw, u8 type)
void rtl8723e_dm_bt_fw_coex_all_off(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "rtl8723e_dm_bt_fw_coex_all_off()\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "rtl8723e_dm_bt_fw_coex_all_off()\n");
if (rtlpriv->btcoexist.fw_coexist_all_off)
return;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "rtl8723e_dm_bt_fw_coex_all_off(), real Do\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "rtl8723e_dm_bt_fw_coex_all_off(), real Do\n");
rtl8723e_dm_bt_fw_coex_all_off_8723a(hw);
rtlpriv->btcoexist.fw_coexist_all_off = true;
}
@@ -458,14 +458,14 @@ void rtl8723e_dm_bt_sw_coex_all_off(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "rtl8723e_dm_bt_sw_coex_all_off()\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "%s\n", __func__);
if (rtlpriv->btcoexist.sw_coexist_all_off)
return;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "rtl8723e_dm_bt_sw_coex_all_off(), real Do\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "%s, real Do\n", __func__);
rtl8723e_dm_bt_sw_coex_all_off_8723a(hw);
rtlpriv->btcoexist.sw_coexist_all_off = true;
}
@@ -474,13 +474,13 @@ void rtl8723e_dm_bt_hw_coex_all_off(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "rtl8723e_dm_bt_hw_coex_all_off()\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "%s\n", __func__);
if (rtlpriv->btcoexist.hw_coexist_all_off)
return;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "rtl8723e_dm_bt_hw_coex_all_off(), real Do\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "%s, real Do\n", __func__);
rtl8723e_dm_bt_hw_coex_all_off_8723a(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
index 652d8ff9cccb..53af0d209b11 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
@@ -20,7 +20,7 @@ void rtl8723e_dm_bt_turn_off_bt_coexist_before_enter_lps(struct ieee80211_hw *hw
return;
if (ppsc->inactiveps) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"[BT][DM], Before enter IPS, turn off all Coexist DM\n");
rtlpriv->btcoexist.cstate = 0;
rtlpriv->btcoexist.previous_state = 0;
@@ -68,9 +68,10 @@ void rtl_8723e_bt_wifi_media_status_notify(struct ieee80211_hw *hw,
else
h2c_parameter[2] = 0x20;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], FW write 0x19=0x%x\n",
- h2c_parameter[0]<<16|h2c_parameter[1]<<8|h2c_parameter[2]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], FW write 0x19=0x%x\n",
+ h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+ h2c_parameter[2]);
rtl8723e_fill_h2c_cmd(hw, 0x19, 3, h2c_parameter);
}
@@ -98,7 +99,7 @@ static void rtl8723e_dm_bt_set_fw_3a(struct ieee80211_hw *hw,
h2c_parameter[2] = byte3;
h2c_parameter[3] = byte4;
h2c_parameter[4] = byte5;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], FW write 0x3a(4bytes)=0x%x%8x\n",
h2c_parameter[0], h2c_parameter[1]<<24 |
h2c_parameter[2]<<16 | h2c_parameter[3]<<8 |
@@ -111,7 +112,7 @@ static bool rtl8723e_dm_bt_need_to_dec_bt_pwr(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (mgnt_link_status_query(hw) == RT_MEDIA_CONNECT) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"Need to decrease bt power\n");
rtlpriv->btcoexist.cstate |=
BT_COEX_STATE_DEC_BT_POWER;
@@ -130,12 +131,12 @@ static bool rtl8723e_dm_bt_is_same_coexist_state(struct ieee80211_hw *hw)
rtlpriv->btcoexist.cstate) &&
(rtlpriv->btcoexist.previous_state_h ==
rtlpriv->btcoexist.cstate_h)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[DM][BT], Coexist state do not change!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[DM][BT], Coexist state do not change!!\n");
return true;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[DM][BT], Coexist state changed!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[DM][BT], Coexist state changed!!\n");
return false;
}
}
@@ -146,16 +147,16 @@ static void rtl8723e_dm_bt_set_coex_table(struct ieee80211_hw *hw,
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "set coex table, set 0x6c0=0x%x\n", val_0x6c0);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "set coex table, set 0x6c0=0x%x\n", val_0x6c0);
rtl_write_dword(rtlpriv, 0x6c0, val_0x6c0);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "set coex table, set 0x6c8=0x%x\n", val_0x6c8);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "set coex table, set 0x6c8=0x%x\n", val_0x6c8);
rtl_write_dword(rtlpriv, 0x6c8, val_0x6c8);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "set coex table, set 0x6cc=0x%x\n", val_0x6cc);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "set coex table, set 0x6cc=0x%x\n", val_0x6cc);
rtl_write_byte(rtlpriv, 0x6cc, val_0x6cc);
}
@@ -164,12 +165,12 @@ static void rtl8723e_dm_bt_set_hw_pta_mode(struct ieee80211_hw *hw, bool b_mode)
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (BT_PTA_MODE_ON == b_mode) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "PTA mode on\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "PTA mode on\n");
/* Enable GPIO 0/1/2/3/8 pins for bt */
rtl_write_byte(rtlpriv, 0x40, 0x20);
rtlpriv->btcoexist.hw_coexist_all_off = false;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "PTA mode off\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE, "PTA mode off\n");
rtl_write_byte(rtlpriv, 0x40, 0x0);
}
}
@@ -181,15 +182,15 @@ static void rtl8723e_dm_bt_set_sw_rf_rx_lpf_corner(struct ieee80211_hw *hw,
if (BT_RF_RX_LPF_CORNER_SHRINK == type) {
/* Shrink RF Rx LPF corner, 0x1e[7:4]=1111 ==> [11:4] */
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "Shrink RF Rx LPF corner!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "Shrink RF Rx LPF corner!!\n");
rtl8723e_phy_set_rf_reg(hw, RF90_PATH_A, 0x1e,
0xfffff, 0xf0ff7);
rtlpriv->btcoexist.sw_coexist_all_off = false;
} else if (BT_RF_RX_LPF_CORNER_RESUME == type) {
/*Resume RF Rx LPF corner*/
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "Resume RF Rx LPF corner!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "Resume RF Rx LPF corner!!\n");
rtl8723e_phy_set_rf_reg(hw, RF90_PATH_A, 0x1e, 0xfffff,
rtlpriv->btcoexist.bt_rfreg_origin_1e);
}
@@ -204,12 +205,12 @@ static void dm_bt_set_sw_penalty_tx_rate_adapt(struct ieee80211_hw *hw,
tmp_u1 = rtl_read_byte(rtlpriv, 0x4fd);
tmp_u1 |= BIT(0);
if (BT_TX_RATE_ADAPTIVE_LOW_PENALTY == ra_type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"Tx rate adaptive, set low penalty!!\n");
tmp_u1 &= ~BIT(2);
rtlpriv->btcoexist.sw_coexist_all_off = false;
} else if (BT_TX_RATE_ADAPTIVE_NORMAL == ra_type) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"Tx rate adaptive, set normal!!\n");
tmp_u1 |= BIT(2);
}
@@ -279,14 +280,14 @@ static bool rtl8723e_dm_bt_is_2_ant_common_action(struct ieee80211_hw *hw)
if (!rtl8723e_dm_bt_is_wifi_busy(hw) &&
!rtlpriv->btcoexist.bt_busy) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "Wifi idle + Bt idle, bt coex mechanism always off!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "Wifi idle + Bt idle, bt coex mechanism always off!!\n");
rtl8723e_dm_bt_btdm_structure_reload_all_off(hw, &btdm8723);
b_common = true;
} else if (rtl8723e_dm_bt_is_wifi_busy(hw) &&
!rtlpriv->btcoexist.bt_busy) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "Wifi non-idle + Bt disabled/idle!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "Wifi non-idle + Bt disabled/idle!!\n");
btdm8723.low_penalty_rate_adaptive = true;
btdm8723.rf_rx_lpf_shrink = false;
btdm8723.reject_aggre_pkt = false;
@@ -307,14 +308,14 @@ static bool rtl8723e_dm_bt_is_2_ant_common_action(struct ieee80211_hw *hw)
b_common = true;
} else if (rtlpriv->btcoexist.bt_busy) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"Bt non-idle!\n");
if (mgnt_link_status_query(hw) == RT_MEDIA_CONNECT) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"Wifi connection exist\n");
b_common = false;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"No Wifi connection!\n");
btdm8723.rf_rx_lpf_shrink = true;
btdm8723.low_penalty_rate_adaptive = false;
@@ -359,14 +360,14 @@ static void rtl8723e_dm_bt_set_sw_full_time_dac_swing(
struct rtl_priv *rtlpriv = rtl_priv(hw);
if (sw_dac_swing_on) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], SwDacSwing = 0x%x\n", sw_dac_swing_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], SwDacSwing = 0x%x\n", sw_dac_swing_lvl);
rtl8723_phy_set_bb_reg(hw, 0x880, 0xff000000,
sw_dac_swing_lvl);
rtlpriv->btcoexist.sw_coexist_all_off = false;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], SwDacSwing Off!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], SwDacSwing Off!\n");
rtl8723_phy_set_bb_reg(hw, 0x880, 0xff000000, 0xc0);
}
}
@@ -384,9 +385,9 @@ static void rtl8723e_dm_bt_set_fw_dec_bt_pwr(
rtlpriv->btcoexist.fw_coexist_all_off = false;
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], decrease Bt Power : %s, write 0x21=0x%x\n",
- (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], decrease Bt Power : %s, write 0x21=0x%x\n",
+ (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
rtl8723e_fill_h2c_cmd(hw, 0x21, 1, h2c_parameter);
}
@@ -404,10 +405,10 @@ static void rtl8723e_dm_bt_set_fw_2_ant_hid(struct ieee80211_hw *hw,
if (b_dac_swing_on)
h2c_parameter[0] |= BIT(1); /* Dac Swing default enable */
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], turn 2-Ant+HID mode %s, DACSwing:%s, write 0x15=0x%x\n",
- (b_enable ? "ON!!" : "OFF!!"), (b_dac_swing_on ? "ON" : "OFF"),
- h2c_parameter[0]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], turn 2-Ant+HID mode %s, DACSwing:%s, write 0x15=0x%x\n",
+ (b_enable ? "ON!!" : "OFF!!"), (b_dac_swing_on ? "ON" : "OFF"),
+ h2c_parameter[0]);
rtl8723e_fill_h2c_cmd(hw, 0x15, 1, h2c_parameter);
}
@@ -424,56 +425,56 @@ static void rtl8723e_dm_bt_set_fw_tdma_ctrl(struct ieee80211_hw *hw,
h2c_parameter1[0] = 0;
if (b_enable) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], set BT PTA update manager to trigger update!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], set BT PTA update manager to trigger update!!\n");
h2c_parameter1[0] |= BIT(0);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], turn TDMA mode ON!!\n");
h2c_parameter[0] |= BIT(0); /* function enable */
if (TDMA_1ANT == ant_num) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], TDMA_1ANT\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], TDMA_1ANT\n");
h2c_parameter[0] |= BIT(1);
} else if (TDMA_2ANT == ant_num) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], TDMA_2ANT\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], TDMA_2ANT\n");
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], Unknown Ant\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], Unknown Ant\n");
}
if (TDMA_NAV_OFF == nav_en) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], TDMA_NAV_OFF\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], TDMA_NAV_OFF\n");
} else if (TDMA_NAV_ON == nav_en) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], TDMA_NAV_ON\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], TDMA_NAV_ON\n");
h2c_parameter[0] |= BIT(2);
}
if (TDMA_DAC_SWING_OFF == dac_swing_en) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], TDMA_DAC_SWING_OFF\n");
} else if (TDMA_DAC_SWING_ON == dac_swing_en) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], TDMA_DAC_SWING_ON\n");
h2c_parameter[0] |= BIT(4);
}
rtlpriv->btcoexist.fw_coexist_all_off = false;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], set BT PTA update manager to no update!!\n");
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], turn TDMA mode OFF!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], set BT PTA update manager to no update!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], turn TDMA mode OFF!!\n");
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], FW2AntTDMA, write 0x26=0x%x\n",
- h2c_parameter1[0]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], FW2AntTDMA, write 0x26=0x%x\n",
+ h2c_parameter1[0]);
rtl8723e_fill_h2c_cmd(hw, 0x26, 1, h2c_parameter1);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], FW2AntTDMA, write 0x14=0x%x\n",
h2c_parameter[0]);
rtl8723e_fill_h2c_cmd(hw, 0x14, 1, h2c_parameter);
@@ -486,18 +487,18 @@ static void rtl8723e_dm_bt_set_fw_ignore_wlan_act(struct ieee80211_hw *hw,
u8 h2c_parameter[1] = {0};
if (b_enable) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], BT Ignore Wlan_Act !!\n");
h2c_parameter[0] |= BIT(0); /* function enable */
rtlpriv->btcoexist.fw_coexist_all_off = false;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], BT don't ignore Wlan_Act !!\n");
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], set FW for BT Ignore Wlan_Act, write 0x25=0x%x\n",
- h2c_parameter[0]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], set FW for BT Ignore Wlan_Act, write 0x25=0x%x\n",
+ h2c_parameter[0]);
rtl8723e_fill_h2c_cmd(hw, 0x25, 1, h2c_parameter);
}
@@ -513,43 +514,43 @@ static void rtl8723e_dm_bt_set_fw_tra_tdma_ctrl(struct ieee80211_hw *hw,
/* Only 8723 B cut should do this */
if (IS_VENDOR_8723_A_CUT(rtlhal->version)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], not 8723B cut, don't set Traditional TDMA!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], not 8723B cut, don't set Traditional TDMA!!\n");
return;
}
if (b_enable) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], turn TTDMA mode ON!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], turn TTDMA mode ON!!\n");
h2c_parameter[0] |= BIT(0); /* function enable */
if (TDMA_1ANT == ant_num) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], TTDMA_1ANT\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], TTDMA_1ANT\n");
h2c_parameter[0] |= BIT(1);
} else if (TDMA_2ANT == ant_num) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], TTDMA_2ANT\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], TTDMA_2ANT\n");
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], Unknown Ant\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], Unknown Ant\n");
}
if (TDMA_NAV_OFF == nav_en) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], TTDMA_NAV_OFF\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], TTDMA_NAV_OFF\n");
} else if (TDMA_NAV_ON == nav_en) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex], TTDMA_NAV_ON\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex], TTDMA_NAV_ON\n");
h2c_parameter[1] |= BIT(0);
}
rtlpriv->btcoexist.fw_coexist_all_off = false;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], turn TTDMA mode OFF!!\n");
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], FW Traditional TDMA, write 0x33=0x%x\n",
h2c_parameter[0] << 8 | h2c_parameter[1]);
@@ -563,9 +564,9 @@ static void rtl8723e_dm_bt_set_fw_dac_swing_level(struct ieee80211_hw *hw,
u8 h2c_parameter[1] = {0};
h2c_parameter[0] = dac_swing_lvl;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], write 0x29=0x%x\n", h2c_parameter[0]);
rtl8723e_fill_h2c_cmd(hw, 0x29, 1, h2c_parameter);
@@ -582,9 +583,9 @@ static void rtl8723e_dm_bt_set_fw_bt_hid_info(struct ieee80211_hw *hw,
h2c_parameter[0] |= BIT(0);
rtlpriv->btcoexist.fw_coexist_all_off = false;
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], Set BT HID information=0x%x\n", b_enable);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], write 0x24=0x%x\n", h2c_parameter[0]);
rtl8723e_fill_h2c_cmd(hw, 0x24, 1, h2c_parameter);
@@ -597,9 +598,9 @@ static void rtl8723e_dm_bt_set_fw_bt_retry_index(struct ieee80211_hw *hw,
u8 h2c_parameter[1] = {0};
h2c_parameter[0] = retry_index;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], Set BT Retry Index=%d\n", retry_index);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], write 0x23=0x%x\n", h2c_parameter[0]);
rtl8723e_fill_h2c_cmd(hw, 0x23, 1, h2c_parameter);
@@ -614,12 +615,12 @@ static void rtl8723e_dm_bt_set_fw_wlan_act(struct ieee80211_hw *hw,
h2c_parameter_hi[0] = wlan_act_hi;
h2c_parameter_lo[0] = wlan_act_lo;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], Set WLAN_ACT Hi:Lo=0x%x/0x%x\n",
wlan_act_hi, wlan_act_lo);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], write 0x22=0x%x\n", h2c_parameter_hi[0]);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], write 0x11=0x%x\n", h2c_parameter_lo[0]);
/* WLAN_ACT = High duration, unit:ms */
@@ -646,107 +647,107 @@ void rtl8723e_dm_bt_set_bt_dm(struct ieee80211_hw *hw,
/* check new setting is different with the old one, */
/* if all the same, don't do the setting again. */
if (memcmp(btdm_8723, btdm, sizeof(struct btdm_8723)) == 0) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"[BTCoex], the same coexist setting, return!!\n");
return;
} else { /* save the new coexist setting */
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"[BTCoex], UPDATE TO NEW COEX SETTING!!\n");
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"[BTCoex], original/new bAllOff=0x%x/ 0x%x\n",
btdm_8723->all_off, btdm->all_off);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"[BTCoex], original/new agc_table_en=0x%x/ 0x%x\n",
btdm_8723->agc_table_en, btdm->agc_table_en);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new adc_back_off_on=0x%x/ 0x%x\n",
- btdm_8723->adc_back_off_on,
- btdm->adc_back_off_on);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new b2_ant_hid_en=0x%x/ 0x%x\n",
- btdm_8723->b2_ant_hid_en, btdm->b2_ant_hid_en);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new bLowPenaltyRateAdaptive=0x%x/ 0x%x\n",
- btdm_8723->low_penalty_rate_adaptive,
- btdm->low_penalty_rate_adaptive);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new bRfRxLpfShrink=0x%x/ 0x%x\n",
- btdm_8723->rf_rx_lpf_shrink,
- btdm->rf_rx_lpf_shrink);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new bRejectAggrePkt=0x%x/ 0x%x\n",
- btdm_8723->reject_aggre_pkt,
- btdm->reject_aggre_pkt);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new tdma_on=0x%x/ 0x%x\n",
- btdm_8723->tdma_on, btdm->tdma_on);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new tdmaAnt=0x%x/ 0x%x\n",
- btdm_8723->tdma_ant, btdm->tdma_ant);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new tdmaNav=0x%x/ 0x%x\n",
- btdm_8723->tdma_nav, btdm->tdma_nav);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new tdma_dac_swing=0x%x/ 0x%x\n",
- btdm_8723->tdma_dac_swing, btdm->tdma_dac_swing);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new fw_dac_swing_lvl=0x%x/ 0x%x\n",
- btdm_8723->fw_dac_swing_lvl,
- btdm->fw_dac_swing_lvl);
-
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new bTraTdmaOn=0x%x/ 0x%x\n",
- btdm_8723->tra_tdma_on, btdm->tra_tdma_on);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new traTdmaAnt=0x%x/ 0x%x\n",
- btdm_8723->tra_tdma_ant, btdm->tra_tdma_ant);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new traTdmaNav=0x%x/ 0x%x\n",
- btdm_8723->tra_tdma_nav, btdm->tra_tdma_nav);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new bPsTdmaOn=0x%x/ 0x%x\n",
- btdm_8723->ps_tdma_on, btdm->ps_tdma_on);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new adc_back_off_on=0x%x/ 0x%x\n",
+ btdm_8723->adc_back_off_on,
+ btdm->adc_back_off_on);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new b2_ant_hid_en=0x%x/ 0x%x\n",
+ btdm_8723->b2_ant_hid_en, btdm->b2_ant_hid_en);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new bLowPenaltyRateAdaptive=0x%x/ 0x%x\n",
+ btdm_8723->low_penalty_rate_adaptive,
+ btdm->low_penalty_rate_adaptive);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new bRfRxLpfShrink=0x%x/ 0x%x\n",
+ btdm_8723->rf_rx_lpf_shrink,
+ btdm->rf_rx_lpf_shrink);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new bRejectAggrePkt=0x%x/ 0x%x\n",
+ btdm_8723->reject_aggre_pkt,
+ btdm->reject_aggre_pkt);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new tdma_on=0x%x/ 0x%x\n",
+ btdm_8723->tdma_on, btdm->tdma_on);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new tdmaAnt=0x%x/ 0x%x\n",
+ btdm_8723->tdma_ant, btdm->tdma_ant);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new tdmaNav=0x%x/ 0x%x\n",
+ btdm_8723->tdma_nav, btdm->tdma_nav);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new tdma_dac_swing=0x%x/ 0x%x\n",
+ btdm_8723->tdma_dac_swing, btdm->tdma_dac_swing);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new fw_dac_swing_lvl=0x%x/ 0x%x\n",
+ btdm_8723->fw_dac_swing_lvl,
+ btdm->fw_dac_swing_lvl);
+
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new bTraTdmaOn=0x%x/ 0x%x\n",
+ btdm_8723->tra_tdma_on, btdm->tra_tdma_on);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new traTdmaAnt=0x%x/ 0x%x\n",
+ btdm_8723->tra_tdma_ant, btdm->tra_tdma_ant);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new traTdmaNav=0x%x/ 0x%x\n",
+ btdm_8723->tra_tdma_nav, btdm->tra_tdma_nav);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new bPsTdmaOn=0x%x/ 0x%x\n",
+ btdm_8723->ps_tdma_on, btdm->ps_tdma_on);
for (i = 0; i < 5; i++) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new psTdmaByte[i]=0x%x/ 0x%x\n",
- btdm_8723->ps_tdma_byte[i],
- btdm->ps_tdma_byte[i]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new psTdmaByte[i]=0x%x/ 0x%x\n",
+ btdm_8723->ps_tdma_byte[i],
+ btdm->ps_tdma_byte[i]);
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"[BTCoex], original/new bIgnoreWlanAct=0x%x/ 0x%x\n",
btdm_8723->ignore_wlan_act,
btdm->ignore_wlan_act);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"[BTCoex], original/new bPtaOn=0x%x/ 0x%x\n",
btdm_8723->pta_on, btdm->pta_on);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"[BTCoex], original/new val_0x6c0=0x%x/ 0x%x\n",
btdm_8723->val_0x6c0, btdm->val_0x6c0);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"[BTCoex], original/new val_0x6c8=0x%x/ 0x%x\n",
btdm_8723->val_0x6c8, btdm->val_0x6c8);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"[BTCoex], original/new val_0x6cc=0x%x/ 0x%x\n",
btdm_8723->val_0x6cc, btdm->val_0x6cc);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new sw_dac_swing_on=0x%x/ 0x%x\n",
- btdm_8723->sw_dac_swing_on,
- btdm->sw_dac_swing_on);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new sw_dac_swing_lvl=0x%x/ 0x%x\n",
- btdm_8723->sw_dac_swing_lvl,
- btdm->sw_dac_swing_lvl);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new wlanActHi=0x%x/ 0x%x\n",
- btdm_8723->wlan_act_hi, btdm->wlan_act_hi);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new wlanActLo=0x%x/ 0x%x\n",
- btdm_8723->wlan_act_lo, btdm->wlan_act_lo);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], original/new btRetryIndex=0x%x/ 0x%x\n",
- btdm_8723->bt_retry_index, btdm->bt_retry_index);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new sw_dac_swing_on=0x%x/ 0x%x\n",
+ btdm_8723->sw_dac_swing_on,
+ btdm->sw_dac_swing_on);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new sw_dac_swing_lvl=0x%x/ 0x%x\n",
+ btdm_8723->sw_dac_swing_lvl,
+ btdm->sw_dac_swing_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new wlanActHi=0x%x/ 0x%x\n",
+ btdm_8723->wlan_act_hi, btdm->wlan_act_hi);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new wlanActLo=0x%x/ 0x%x\n",
+ btdm_8723->wlan_act_lo, btdm->wlan_act_lo);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], original/new btRetryIndex=0x%x/ 0x%x\n",
+ btdm_8723->bt_retry_index, btdm->bt_retry_index);
memcpy(btdm_8723, btdm, sizeof(struct btdm_8723));
}
@@ -756,14 +757,14 @@ void rtl8723e_dm_bt_set_bt_dm(struct ieee80211_hw *hw,
*/
if (rtlpriv->btcoexist.hold_for_bt_operation) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], set to ignore wlanAct for BT OP!!\n");
rtl8723e_dm_bt_set_fw_ignore_wlan_act(hw, true);
return;
}
if (btdm->all_off) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], disable all coexist mechanism !!\n");
rtl8723e_btdm_coex_all_off(hw);
return;
@@ -929,34 +930,34 @@ static u8 rtl8723e_dm_bt_bt_tx_rx_counter_level(struct ieee80211_hw *hw)
bt_tx_rx_cnt = rtl8723e_dm_bt_tx_rx_couter_h(hw)
+ rtl8723e_dm_bt_tx_rx_couter_l(hw);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters = %d\n", bt_tx_rx_cnt);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters = %d\n", bt_tx_rx_cnt);
rtlpriv->btcoexist.cstate_h &= ~
(BT_COEX_STATE_BT_CNT_LEVEL_0 | BT_COEX_STATE_BT_CNT_LEVEL_1|
BT_COEX_STATE_BT_CNT_LEVEL_2);
if (bt_tx_rx_cnt >= BT_TXRX_CNT_THRES_3) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters at level 3\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters at level 3\n");
bt_tx_rx_cnt_lvl = BT_TXRX_CNT_LEVEL_3;
rtlpriv->btcoexist.cstate_h |=
BT_COEX_STATE_BT_CNT_LEVEL_3;
} else if (bt_tx_rx_cnt >= BT_TXRX_CNT_THRES_2) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters at level 2\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters at level 2\n");
bt_tx_rx_cnt_lvl = BT_TXRX_CNT_LEVEL_2;
rtlpriv->btcoexist.cstate_h |=
BT_COEX_STATE_BT_CNT_LEVEL_2;
} else if (bt_tx_rx_cnt >= BT_TXRX_CNT_THRES_1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters at level 1\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters at level 1\n");
bt_tx_rx_cnt_lvl = BT_TXRX_CNT_LEVEL_1;
rtlpriv->btcoexist.cstate_h |=
BT_COEX_STATE_BT_CNT_LEVEL_1;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters at level 0\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters at level 0\n");
bt_tx_rx_cnt_lvl = BT_TXRX_CNT_LEVEL_0;
rtlpriv->btcoexist.cstate_h |=
BT_COEX_STATE_BT_CNT_LEVEL_0;
@@ -979,11 +980,11 @@ static void rtl8723e_dm_bt_2_ant_hid_sco_esco(struct ieee80211_hw *hw)
btdm8723.reject_aggre_pkt = false;
bt_tx_rx_cnt_lvl = rtl8723e_dm_bt_bt_tx_rx_counter_level(hw);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters = %d\n", bt_tx_rx_cnt_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters = %d\n", bt_tx_rx_cnt_lvl);
if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "HT40\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "HT40\n");
/* coex table */
btdm8723.val_0x6c0 = 0x55555555;
btdm8723.val_0x6c8 = 0xffff;
@@ -997,24 +998,24 @@ static void rtl8723e_dm_bt_2_ant_hid_sco_esco(struct ieee80211_hw *hw)
/* fw mechanism */
btdm8723.ps_tdma_on = true;
if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters >= 1400\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters >= 1400\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0x5;
btdm8723.ps_tdma_byte[2] = 0x5;
btdm8723.ps_tdma_byte[3] = 0x2;
btdm8723.ps_tdma_byte[4] = 0x80;
} else if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0xa;
btdm8723.ps_tdma_byte[2] = 0xa;
btdm8723.ps_tdma_byte[3] = 0x2;
btdm8723.ps_tdma_byte[4] = 0x80;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters < 1200\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters < 1200\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0xf;
btdm8723.ps_tdma_byte[2] = 0xf;
@@ -1022,8 +1023,8 @@ static void rtl8723e_dm_bt_2_ant_hid_sco_esco(struct ieee80211_hw *hw)
btdm8723.ps_tdma_byte[4] = 0x80;
}
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "HT20 or Legacy\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "HT20 or Legacy\n");
bt_rssi_state =
rtl8723e_dm_bt_check_coex_rssi_state(hw, 2, 47, 0);
bt_rssi_state1 =
@@ -1037,14 +1038,14 @@ static void rtl8723e_dm_bt_2_ant_hid_sco_esco(struct ieee80211_hw *hw)
/* sw mechanism */
if ((bt_rssi_state == BT_RSSI_STATE_HIGH) ||
(bt_rssi_state == BT_RSSI_STATE_STAY_HIGH)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "Wifi rssi high\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "Wifi rssi high\n");
btdm8723.agc_table_en = true;
btdm8723.adc_back_off_on = true;
btdm8723.sw_dac_swing_on = false;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "Wifi rssi low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "Wifi rssi low\n");
btdm8723.agc_table_en = false;
btdm8723.adc_back_off_on = false;
btdm8723.sw_dac_swing_on = false;
@@ -1054,30 +1055,30 @@ static void rtl8723e_dm_bt_2_ant_hid_sco_esco(struct ieee80211_hw *hw)
btdm8723.ps_tdma_on = true;
if ((bt_rssi_state1 == BT_RSSI_STATE_HIGH) ||
(bt_rssi_state1 == BT_RSSI_STATE_STAY_HIGH)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "Wifi rssi-1 high\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "Wifi rssi-1 high\n");
/* only rssi high we need to do this, */
/* when rssi low, the value will modified by fw */
rtl_write_byte(rtlpriv, 0x883, 0x40);
if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters >= 1400\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters >= 1400\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0x5;
btdm8723.ps_tdma_byte[2] = 0x5;
btdm8723.ps_tdma_byte[3] = 0x83;
btdm8723.ps_tdma_byte[4] = 0x80;
} else if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters>= 1200 && < 1400\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters>= 1200 && < 1400\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0xa;
btdm8723.ps_tdma_byte[2] = 0xa;
btdm8723.ps_tdma_byte[3] = 0x83;
btdm8723.ps_tdma_byte[4] = 0x80;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters < 1200\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters < 1200\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0xf;
btdm8723.ps_tdma_byte[2] = 0xf;
@@ -1085,27 +1086,27 @@ static void rtl8723e_dm_bt_2_ant_hid_sco_esco(struct ieee80211_hw *hw)
btdm8723.ps_tdma_byte[4] = 0x80;
}
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "Wifi rssi-1 low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "Wifi rssi-1 low\n");
if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters >= 1400\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters >= 1400\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0x5;
btdm8723.ps_tdma_byte[2] = 0x5;
btdm8723.ps_tdma_byte[3] = 0x2;
btdm8723.ps_tdma_byte[4] = 0x80;
} else if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0xa;
btdm8723.ps_tdma_byte[2] = 0xa;
btdm8723.ps_tdma_byte[3] = 0x2;
btdm8723.ps_tdma_byte[4] = 0x80;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters < 1200\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters < 1200\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0xf;
btdm8723.ps_tdma_byte[2] = 0xf;
@@ -1120,13 +1121,13 @@ static void rtl8723e_dm_bt_2_ant_hid_sco_esco(struct ieee80211_hw *hw)
/* Always ignore WlanAct if bHid|bSCOBusy|bSCOeSCO */
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT btInqPageStartTime = 0x%x, btTxRxCntLvl = %d\n",
- hal_coex_8723.bt_inq_page_start_time, bt_tx_rx_cnt_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT btInqPageStartTime = 0x%x, btTxRxCntLvl = %d\n",
+ hal_coex_8723.bt_inq_page_start_time, bt_tx_rx_cnt_lvl);
if ((hal_coex_8723.bt_inq_page_start_time) ||
(BT_TXRX_CNT_LEVEL_3 == bt_tx_rx_cnt_lvl)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], Set BT inquiry / page scan 0x3a setting\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], Set BT inquiry / page scan 0x3a setting\n");
btdm8723.ps_tdma_on = true;
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0x5;
@@ -1157,11 +1158,11 @@ static void rtl8723e_dm_bt_2_ant_ftp_a2dp(struct ieee80211_hw *hw)
bt_tx_rx_cnt_lvl = rtl8723e_dm_bt_bt_tx_rx_counter_level(hw);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters = %d\n", bt_tx_rx_cnt_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters = %d\n", bt_tx_rx_cnt_lvl);
if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "HT40\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG, "HT40\n");
bt_rssi_state =
rtl8723e_dm_bt_check_coex_rssi_state(hw, 2, 37, 0);
@@ -1179,27 +1180,27 @@ static void rtl8723e_dm_bt_2_ant_ftp_a2dp(struct ieee80211_hw *hw)
btdm8723.ps_tdma_on = true;
if ((bt_rssi_state == BT_RSSI_STATE_HIGH) ||
(bt_rssi_state == BT_RSSI_STATE_STAY_HIGH)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "Wifi rssi high\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "Wifi rssi high\n");
if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters >= 1400\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters >= 1400\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0x5;
btdm8723.ps_tdma_byte[2] = 0x5;
btdm8723.ps_tdma_byte[3] = 0x81;
btdm8723.ps_tdma_byte[4] = 0x80;
} else if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0xa;
btdm8723.ps_tdma_byte[2] = 0xa;
btdm8723.ps_tdma_byte[3] = 0x81;
btdm8723.ps_tdma_byte[4] = 0x80;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters < 1200\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters < 1200\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0xf;
btdm8723.ps_tdma_byte[2] = 0xf;
@@ -1207,11 +1208,11 @@ static void rtl8723e_dm_bt_2_ant_ftp_a2dp(struct ieee80211_hw *hw)
btdm8723.ps_tdma_byte[4] = 0x80;
}
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "Wifi rssi low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "Wifi rssi low\n");
if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters >= 1400\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters >= 1400\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0x5;
btdm8723.ps_tdma_byte[2] = 0x5;
@@ -1219,16 +1220,16 @@ static void rtl8723e_dm_bt_2_ant_ftp_a2dp(struct ieee80211_hw *hw)
btdm8723.ps_tdma_byte[4] = 0x80;
} else if (bt_tx_rx_cnt_lvl ==
BT_TXRX_CNT_LEVEL_1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0xa;
btdm8723.ps_tdma_byte[2] = 0xa;
btdm8723.ps_tdma_byte[3] = 0x0;
btdm8723.ps_tdma_byte[4] = 0x80;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters < 1200\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters < 1200\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0xf;
btdm8723.ps_tdma_byte[2] = 0xf;
@@ -1237,8 +1238,8 @@ static void rtl8723e_dm_bt_2_ant_ftp_a2dp(struct ieee80211_hw *hw)
}
}
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "HT20 or Legacy\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "HT20 or Legacy\n");
bt_rssi_state =
rtl8723e_dm_bt_check_coex_rssi_state(hw, 2, 47, 0);
bt_rssi_state1 =
@@ -1252,14 +1253,14 @@ static void rtl8723e_dm_bt_2_ant_ftp_a2dp(struct ieee80211_hw *hw)
/* sw mechanism */
if ((bt_rssi_state == BT_RSSI_STATE_HIGH) ||
(bt_rssi_state == BT_RSSI_STATE_STAY_HIGH)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "Wifi rssi high\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "Wifi rssi high\n");
btdm8723.agc_table_en = true;
btdm8723.adc_back_off_on = true;
btdm8723.sw_dac_swing_on = false;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "Wifi rssi low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "Wifi rssi low\n");
btdm8723.agc_table_en = false;
btdm8723.adc_back_off_on = false;
btdm8723.sw_dac_swing_on = false;
@@ -1269,30 +1270,30 @@ static void rtl8723e_dm_bt_2_ant_ftp_a2dp(struct ieee80211_hw *hw)
btdm8723.ps_tdma_on = true;
if ((bt_rssi_state1 == BT_RSSI_STATE_HIGH) ||
(bt_rssi_state1 == BT_RSSI_STATE_STAY_HIGH)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "Wifi rssi-1 high\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "Wifi rssi-1 high\n");
/* only rssi high we need to do this, */
/* when rssi low, the value will modified by fw */
rtl_write_byte(rtlpriv, 0x883, 0x40);
if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters >= 1400\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters >= 1400\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0x5;
btdm8723.ps_tdma_byte[2] = 0x5;
btdm8723.ps_tdma_byte[3] = 0x81;
btdm8723.ps_tdma_byte[4] = 0x80;
} else if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0xa;
btdm8723.ps_tdma_byte[2] = 0xa;
btdm8723.ps_tdma_byte[3] = 0x81;
btdm8723.ps_tdma_byte[4] = 0x80;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters < 1200\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters < 1200\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0xf;
btdm8723.ps_tdma_byte[2] = 0xf;
@@ -1300,27 +1301,27 @@ static void rtl8723e_dm_bt_2_ant_ftp_a2dp(struct ieee80211_hw *hw)
btdm8723.ps_tdma_byte[4] = 0x80;
}
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "Wifi rssi-1 low\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "Wifi rssi-1 low\n");
if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_2) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters >= 1400\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters >= 1400\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0x5;
btdm8723.ps_tdma_byte[2] = 0x5;
btdm8723.ps_tdma_byte[3] = 0x0;
btdm8723.ps_tdma_byte[4] = 0x80;
} else if (bt_tx_rx_cnt_lvl == BT_TXRX_CNT_LEVEL_1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters >= 1200 && < 1400\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0xa;
btdm8723.ps_tdma_byte[2] = 0xa;
btdm8723.ps_tdma_byte[3] = 0x0;
btdm8723.ps_tdma_byte[4] = 0x80;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT TxRx Counters < 1200\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT TxRx Counters < 1200\n");
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0xf;
btdm8723.ps_tdma_byte[2] = 0xf;
@@ -1333,14 +1334,14 @@ static void rtl8723e_dm_bt_2_ant_ftp_a2dp(struct ieee80211_hw *hw)
if (rtl8723e_dm_bt_need_to_dec_bt_pwr(hw))
btdm8723.dec_bt_pwr = true;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT btInqPageStartTime = 0x%x, btTxRxCntLvl = %d\n",
- hal_coex_8723.bt_inq_page_start_time, bt_tx_rx_cnt_lvl);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT btInqPageStartTime = 0x%x, btTxRxCntLvl = %d\n",
+ hal_coex_8723.bt_inq_page_start_time, bt_tx_rx_cnt_lvl);
if ((hal_coex_8723.bt_inq_page_start_time) ||
(BT_TXRX_CNT_LEVEL_3 == bt_tx_rx_cnt_lvl)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], Set BT inquiry / page scan 0x3a setting\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], Set BT inquiry / page scan 0x3a setting\n");
btdm8723.ps_tdma_on = true;
btdm8723.ps_tdma_byte[0] = 0xa3;
btdm8723.ps_tdma_byte[1] = 0x5;
@@ -1366,20 +1367,20 @@ static void rtl8723e_dm_bt_inq_page_monitor(struct ieee80211_hw *hw)
rtlpriv->btcoexist.cstate |=
BT_COEX_STATE_BT_INQ_PAGE;
hal_coex_8723.bt_inq_page_start_time = cur_time;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT Inquiry/page is started at time : 0x%x\n",
- hal_coex_8723.bt_inq_page_start_time);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT Inquiry/page is started at time : 0x%x\n",
+ hal_coex_8723.bt_inq_page_start_time);
}
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BT Inquiry/page started time : 0x%x, cur_time : 0x%x\n",
- hal_coex_8723.bt_inq_page_start_time, cur_time);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BT Inquiry/page started time : 0x%x, cur_time : 0x%x\n",
+ hal_coex_8723.bt_inq_page_start_time, cur_time);
if (hal_coex_8723.bt_inq_page_start_time) {
if ((((long)cur_time -
(long)hal_coex_8723.bt_inq_page_start_time) / HZ)
>= 10) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"[BTCoex], BT Inquiry/page >= 10sec!!!\n");
hal_coex_8723.bt_inq_page_start_time = 0;
rtlpriv->btcoexist.cstate &=
@@ -1406,14 +1407,14 @@ static void _rtl8723e_dm_bt_coexist_2_ant(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 bt_info_original;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"[BTCoex] Get bt info by fw!!\n");
_rtl8723_dm_bt_check_wifi_state(hw);
if (hal_coex_8723.c2h_bt_info_req_sent) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "[BTCoex] c2h for bt_info not rcvd yet!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "[BTCoex] c2h for bt_info not rcvd yet!!\n");
}
bt_info_original = hal_coex_8723.c2h_bt_info_original;
@@ -1426,8 +1427,8 @@ static void _rtl8723e_dm_bt_coexist_2_ant(struct ieee80211_hw *hw)
if (rtl8723e_dm_bt_is_2_ant_common_action(hw)) {
rtlpriv->btcoexist.bt_profile_case = BT_COEX_MECH_COMMON;
rtlpriv->btcoexist.bt_profile_action = BT_COEX_MECH_COMMON;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "Action 2-Ant common.\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "Action 2-Ant common.\n");
} else {
if ((bt_info_original & BTINFO_B_HID) ||
(bt_info_original & BTINFO_B_SCO_BUSY) ||
@@ -1438,8 +1439,8 @@ static void _rtl8723e_dm_bt_coexist_2_ant(struct ieee80211_hw *hw)
BT_COEX_MECH_HID_SCO_ESCO;
rtlpriv->btcoexist.bt_profile_action =
BT_COEX_MECH_HID_SCO_ESCO;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BTInfo: bHid|bSCOBusy|bSCOeSCO\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BTInfo: bHid|bSCOBusy|bSCOeSCO\n");
rtl8723e_dm_bt_2_ant_hid_sco_esco(hw);
} else if ((bt_info_original & BTINFO_B_FTP) ||
(bt_info_original & BTINFO_B_A2DP)) {
@@ -1449,8 +1450,8 @@ static void _rtl8723e_dm_bt_coexist_2_ant(struct ieee80211_hw *hw)
BT_COEX_MECH_FTP_A2DP;
rtlpriv->btcoexist.bt_profile_action =
BT_COEX_MECH_FTP_A2DP;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "BTInfo: bFTP|bA2DP\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "BTInfo: bFTP|bA2DP\n");
rtl8723e_dm_bt_2_ant_ftp_a2dp(hw);
} else {
rtlpriv->btcoexist.cstate |=
@@ -1459,8 +1460,8 @@ static void _rtl8723e_dm_bt_coexist_2_ant(struct ieee80211_hw *hw)
BT_COEX_MECH_NONE;
rtlpriv->btcoexist.bt_profile_action =
BT_COEX_MECH_NONE;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], BTInfo: undefined case!!!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], BTInfo: undefined case!!!!\n");
rtl8723e_dm_bt_2_ant_hid_sco_esco(hw);
}
}
@@ -1513,7 +1514,7 @@ static void rtl8723e_dm_bt_query_bt_information(struct ieee80211_hw *hw)
h2c_parameter[0] |= BIT(0);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"Query Bt information, write 0x38=0x%x\n", h2c_parameter[0]);
rtl8723e_fill_h2c_cmd(hw, 0x38, 1, h2c_parameter);
@@ -1548,10 +1549,10 @@ static void rtl8723e_dm_bt_bt_hw_counters_monitor(struct ieee80211_hw *hw)
hal_coex_8723.low_priority_tx = reg_lp_tx;
hal_coex_8723.low_priority_rx = reg_lp_rx;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"High Priority Tx/Rx (reg 0x%x)=%x(%d)/%x(%d)\n",
reg_hp_tx_rx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"Low Priority Tx/Rx (reg 0x%x)=%x(%d)/%x(%d)\n",
reg_lp_tx_rx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
rtlpriv->btcoexist.lps_counter = 0;
@@ -1584,26 +1585,26 @@ static void rtl8723e_dm_bt_bt_enable_disable_check(struct ieee80211_hw *hw)
if (bt_alife) {
rtlpriv->btcoexist.bt_active_zero_cnt = 0;
rtlpriv->btcoexist.cur_bt_disabled = false;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "8723A BT is enabled !!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "8723A BT is enabled !!\n");
} else {
rtlpriv->btcoexist.bt_active_zero_cnt++;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "8723A bt all counters=0, %d times!!\n",
- rtlpriv->btcoexist.bt_active_zero_cnt);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "8723A bt all counters=0, %d times!!\n",
+ rtlpriv->btcoexist.bt_active_zero_cnt);
if (rtlpriv->btcoexist.bt_active_zero_cnt >= 2) {
rtlpriv->btcoexist.cur_bt_disabled = true;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "8723A BT is disabled !!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "8723A BT is disabled !!\n");
}
}
if (rtlpriv->btcoexist.pre_bt_disabled !=
rtlpriv->btcoexist.cur_bt_disabled) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST,
- DBG_TRACE, "8723A BT is from %s to %s!!\n",
- (rtlpriv->btcoexist.pre_bt_disabled ?
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST,
+ DBG_TRACE, "8723A BT is from %s to %s!!\n",
+ (rtlpriv->btcoexist.pre_bt_disabled ?
"disabled" : "enabled"),
- (rtlpriv->btcoexist.cur_bt_disabled ?
+ (rtlpriv->btcoexist.cur_bt_disabled ?
"disabled" : "enabled"));
rtlpriv->btcoexist.pre_bt_disabled
= rtlpriv->btcoexist.cur_bt_disabled;
@@ -1620,22 +1621,22 @@ void rtl8723e_dm_bt_coexist_8723(struct ieee80211_hw *hw)
rtl8723e_dm_bt_bt_enable_disable_check(hw);
if (rtlpriv->btcoexist.bt_ant_num == ANT_X2) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"[BTCoex], 2 Ant mechanism\n");
_rtl8723e_dm_bt_coexist_2_ant(hw);
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
"[BTCoex], 1 Ant mechanism\n");
_rtl8723e_dm_bt_coexist_1_ant(hw);
}
if (!rtl8723e_dm_bt_is_same_coexist_state(hw)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
- "[BTCoex], Coexist State[bitMap] change from 0x%x%8x to 0x%x%8x\n",
- rtlpriv->btcoexist.previous_state_h,
- rtlpriv->btcoexist.previous_state,
- rtlpriv->btcoexist.cstate_h,
- rtlpriv->btcoexist.cstate);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ "[BTCoex], Coexist State[bitMap] change from 0x%x%8x to 0x%x%8x\n",
+ rtlpriv->btcoexist.previous_state_h,
+ rtlpriv->btcoexist.previous_state,
+ rtlpriv->btcoexist.cstate_h,
+ rtlpriv->btcoexist.cstate);
rtlpriv->btcoexist.previous_state
= rtlpriv->btcoexist.cstate;
rtlpriv->btcoexist.previous_state_h
@@ -1658,14 +1659,14 @@ static void rtl8723e_dm_bt_parse_bt_info(struct ieee80211_hw *hw,
else if (i == 1)
hal_coex_8723.bt_retry_cnt = tmp_buf[i];
if (i == len-1)
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "0x%2x]", tmp_buf[i]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "0x%2x]", tmp_buf[i]);
else
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "0x%2x, ", tmp_buf[i]);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "0x%2x, ", tmp_buf[i]);
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"BT info bt_info (Data)= 0x%x\n",
hal_coex_8723.c2h_bt_info_original);
bt_info = hal_coex_8723.c2h_bt_info_original;
@@ -1677,12 +1678,12 @@ static void rtl8723e_dm_bt_parse_bt_info(struct ieee80211_hw *hw,
if (bt_info & BTINFO_B_CONNECTION) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"[BTC2H], BTInfo: bConnect=true\n");
rtlpriv->btcoexist.bt_busy = true;
rtlpriv->btcoexist.cstate &= ~BT_COEX_STATE_BT_IDLE;
} else {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"[BTC2H], BTInfo: bConnect=false\n");
rtlpriv->btcoexist.bt_busy = false;
rtlpriv->btcoexist.cstate |= BT_COEX_STATE_BT_IDLE;
@@ -1697,14 +1698,14 @@ void rtl_8723e_c2h_command_handle(struct ieee80211_hw *hw)
u8 u1b_tmp = 0;
memset(&c2h_event, 0, sizeof(c2h_event));
u1b_tmp = rtl_read_byte(rtlpriv, REG_C2HEVT_MSG_NORMAL);
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
"&&&&&&: REG_C2HEVT_MSG_NORMAL is 0x%x\n", u1b_tmp);
c2h_event.cmd_id = u1b_tmp & 0xF;
c2h_event.cmd_len = (u1b_tmp & 0xF0) >> 4;
c2h_event.cmd_seq = rtl_read_byte(rtlpriv, REG_C2HEVT_MSG_NORMAL + 1);
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- "cmd_id: %d, cmd_len: %d, cmd_seq: %d\n",
- c2h_event.cmd_id , c2h_event.cmd_len, c2h_event.cmd_seq);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "cmd_id: %d, cmd_len: %d, cmd_seq: %d\n",
+ c2h_event.cmd_id, c2h_event.cmd_len, c2h_event.cmd_seq);
u1b_tmp = rtl_read_byte(rtlpriv, 0x01AF);
if (u1b_tmp == C2H_EVT_HOST_CLOSE) {
return;
@@ -1714,8 +1715,8 @@ void rtl_8723e_c2h_command_handle(struct ieee80211_hw *hw)
}
ptmp_buf = kzalloc(c2h_event.cmd_len, GFP_KERNEL);
if (ptmp_buf == NULL) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "malloc cmd buf failed\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE,
+ "malloc cmd buf failed\n");
return;
}
@@ -1733,13 +1734,13 @@ void rtl_8723e_c2h_command_handle(struct ieee80211_hw *hw)
break;
case C2H_V0_BT_INFO:
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE,
"BT info Byte[0] (ID) is 0x%x\n",
c2h_event.cmd_id);
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE,
"BT info Byte[1] (Seq) is 0x%x\n",
c2h_event.cmd_seq);
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE,
"BT info Byte[2] (Data)= 0x%x\n", ptmp_buf[0]);
rtl8723e_dm_bt_parse_bt_info(hw, ptmp_buf, c2h_event.cmd_len);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
index 7a46c6a9deae..a36dc6e726d2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hw.c
@@ -122,8 +122,8 @@ void rtl8723e_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HAL_DEF_WOWLAN:
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", variable);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -187,8 +187,8 @@ void rtl8723e_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_SLOT_TIME:{
u8 e_aci;
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "HW_VAR_SLOT_TIME %x\n", val[0]);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "HW_VAR_SLOT_TIME %x\n", val[0]);
rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
@@ -227,9 +227,9 @@ void rtl8723e_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
*val = min_spacing_to_set;
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
- mac->min_space_cfg);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
mac->min_space_cfg);
@@ -242,9 +242,9 @@ void rtl8723e_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
density_to_set = *((u8 *)val);
mac->min_space_cfg |= (density_to_set << 3);
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
- mac->min_space_cfg);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
mac->min_space_cfg);
@@ -289,9 +289,9 @@ void rtl8723e_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
p_regtoset[index]);
}
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_AMPDU_FACTOR: %#x\n",
- factor_toset);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_FACTOR: %#x\n",
+ factor_toset);
}
break;
}
@@ -328,9 +328,9 @@ void rtl8723e_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
acm_ctrl |= ACMHW_VOQEN;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
- acm);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
+ acm);
break;
}
} else {
@@ -345,16 +345,16 @@ void rtl8723e_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
acm_ctrl &= (~ACMHW_VOQEN);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n",
- e_aci);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n",
+ e_aci);
break;
}
}
- RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
- "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
- acm_ctrl);
+ rtl_dbg(rtlpriv, COMP_QOS, DBG_TRACE,
+ "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
+ acm_ctrl);
rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
break;
}
@@ -526,8 +526,8 @@ void rtl8723e_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", variable);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -703,8 +703,8 @@ static bool _rtl8712e_init_mac(struct ieee80211_hw *hw)
} while (tmpu2b != 0xc290 && retry < 100);
if (retry >= 100) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "InitMAC(): ePHY configure fail!!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "InitMAC(): ePHY configure fail!!!\n");
return false;
}
@@ -878,14 +878,14 @@ void rtl8723e_enable_hw_security_config(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 sec_reg_value;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
- rtlpriv->sec.pairwise_enc_algorithm,
- rtlpriv->sec.group_enc_algorithm);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
+ rtlpriv->sec.pairwise_enc_algorithm,
+ rtlpriv->sec.group_enc_algorithm);
if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "not open hw encryption\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "not open hw encryption\n");
return;
}
@@ -900,8 +900,8 @@ void rtl8723e_enable_hw_security_config(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "The SECR-value %x\n", sec_reg_value);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "The SECR-value %x\n", sec_reg_value);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
@@ -942,8 +942,8 @@ int rtl8723e_hw_init(struct ieee80211_hw *hw)
err = rtl8723_download_fw(hw, false, FW_8723A_POLLING_TIMEOUT_COUNT);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Failed to download FW. Init HW without FW now..\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Failed to download FW. Init HW without FW now..\n");
err = 1;
goto exit;
}
@@ -1009,7 +1009,7 @@ int rtl8723e_hw_init(struct ieee80211_hw *hw)
tmp_u1b = efuse_read_1byte(hw, 0x1FA);
if (!(tmp_u1b & BIT(0))) {
rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0F, 0x05);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "PA BIAS path A\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "PA BIAS path A\n");
}
if (!(tmp_u1b & BIT(4))) {
@@ -1018,7 +1018,7 @@ int rtl8723e_hw_init(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x80);
udelay(10);
rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x90);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n");
}
rtl8723e_dm_init(hw);
exit:
@@ -1069,16 +1069,16 @@ static enum version_8723e _rtl8723e_read_chip_version(struct ieee80211_hw *hw)
}
switch (version) {
case VERSION_TEST_UMC_CHIP_8723:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Chip Version ID: VERSION_TEST_UMC_CHIP_8723.\n");
- break;
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Chip Version ID: VERSION_TEST_UMC_CHIP_8723.\n");
+ break;
case VERSION_NORMAL_UMC_CHIP_8723_1T1R_A_CUT:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Chip Version ID: VERSION_NORMAL_UMC_CHIP_8723_1T1R_A_CUT.\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Chip Version ID: VERSION_NORMAL_UMC_CHIP_8723_1T1R_A_CUT.\n");
break;
case VERSION_NORMAL_UMC_CHIP_8723_1T1R_B_CUT:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Chip Version ID: VERSION_NORMAL_UMC_CHIP_8723_1T1R_B_CUT.\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Chip Version ID: VERSION_NORMAL_UMC_CHIP_8723_1T1R_B_CUT.\n");
break;
default:
pr_err("Chip Version ID: Unknown. Bug?\n");
@@ -1088,7 +1088,7 @@ static enum version_8723e _rtl8723e_read_chip_version(struct ieee80211_hw *hw)
if (IS_8723_SERIES(version))
rtlphy->rf_type = RF_1T1R;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Chip RF Type: %s\n",
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Chip RF Type: %s\n",
(rtlphy->rf_type == RF_2T2R) ? "RF_2T2R" : "RF_1T1R");
return version;
@@ -1103,30 +1103,30 @@ static int _rtl8723e_set_media_status(struct ieee80211_hw *hw,
u8 mode = MSR_NOLINK;
rtl_write_dword(rtlpriv, REG_BCN_CTRL, 0);
- RT_TRACE(rtlpriv, COMP_BEACON, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_BEACON, DBG_LOUD,
"clear 0x550 when set HW_VAR_MEDIA_STATUS\n");
switch (type) {
case NL80211_IFTYPE_UNSPECIFIED:
mode = MSR_NOLINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
"Set Network type to NO LINK!\n");
break;
case NL80211_IFTYPE_ADHOC:
mode = MSR_ADHOC;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
"Set Network type to Ad Hoc!\n");
break;
case NL80211_IFTYPE_STATION:
mode = MSR_INFRA;
ledaction = LED_CTL_LINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
"Set Network type to STA!\n");
break;
case NL80211_IFTYPE_AP:
mode = MSR_AP;
ledaction = LED_CTL_LINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
"Set Network type to AP!\n");
break;
default:
@@ -1153,9 +1153,9 @@ static int _rtl8723e_set_media_status(struct ieee80211_hw *hw,
_rtl8723e_resume_tx_beacon(hw);
_rtl8723e_disable_bcn_sub_func(hw);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n",
- mode);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n",
+ mode);
}
rtl_write_byte(rtlpriv, MSR, bt_msr | mode);
@@ -1350,8 +1350,8 @@ void rtl8723e_set_beacon_interval(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
u16 bcn_interval = mac->beacon_interval;
- RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
- "beacon_interval:%d\n", bcn_interval);
+ rtl_dbg(rtlpriv, COMP_BEACON, DBG_DMESG,
+ "beacon_interval:%d\n", bcn_interval);
rtl8723e_disable_interrupt(hw);
rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
rtl8723e_enable_interrupt(hw);
@@ -1363,8 +1363,8 @@ void rtl8723e_update_interrupt_mask(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
- "add_msr:%x, rm_msr:%x\n", add_msr, rm_msr);
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD,
+ "add_msr:%x, rm_msr:%x\n", add_msr, rm_msr);
if (add_msr)
rtlpci->irq_mask[0] |= add_msr;
@@ -1782,8 +1782,8 @@ static void _rtl8723e_hal_customized_behavior(struct ieee80211_hw *hw)
default:
break;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "RT Customized ID: 0x%02X\n", rtlhal->oem_id);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "RT Customized ID: 0x%02X\n", rtlhal->oem_id);
}
void rtl8723e_read_eeprom_info(struct ieee80211_hw *hw)
@@ -1806,19 +1806,19 @@ void rtl8723e_read_eeprom_info(struct ieee80211_hw *hw)
else
rtlpriv->dm.rfpath_rxenable[0] =
rtlpriv->dm.rfpath_rxenable[1] = true;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
- rtlhal->version);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
+ rtlhal->version);
tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
if (tmp_u1b & BIT(4)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
rtlefuse->epromtype = EEPROM_93C46;
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
}
if (tmp_u1b & BIT(5)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
rtlefuse->autoload_failflag = false;
_rtl8723e_read_adapter_info(hw, false);
} else {
@@ -1914,8 +1914,8 @@ static void rtl8723e_update_hal_rate_table(struct ieee80211_hw *hw,
rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "%x\n", rtl_read_dword(rtlpriv, REG_ARFR0));
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "%x\n", rtl_read_dword(rtlpriv, REG_ARFR0));
}
static void rtl8723e_update_hal_rate_mask(struct ieee80211_hw *hw,
@@ -2036,17 +2036,17 @@ static void rtl8723e_update_hal_rate_mask(struct ieee80211_hw *hw,
}
sta_entry->ratr_index = ratr_index;
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "ratr_bitmap :%x\n", ratr_bitmap);
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "ratr_bitmap :%x\n", ratr_bitmap);
*(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) |
(ratr_index << 28);
rate_mask[4] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x\n",
- ratr_index, ratr_bitmap,
- rate_mask[0], rate_mask[1],
- rate_mask[2], rate_mask[3],
- rate_mask[4]);
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x\n",
+ ratr_index, ratr_bitmap,
+ rate_mask[0], rate_mask[1],
+ rate_mask[2], rate_mask[3],
+ rate_mask[4]);
rtl8723e_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask);
}
@@ -2111,15 +2111,15 @@ bool rtl8723e_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
e_rfpowerstate_toset = (u1tmp & BIT(1)) ? ERFON : ERFOFF;
if (ppsc->hwradiooff && (e_rfpowerstate_toset == ERFON)) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "GPIOChangeRF - HW Radio ON, RF ON\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "GPIOChangeRF - HW Radio ON, RF ON\n");
e_rfpowerstate_toset = ERFON;
ppsc->hwradiooff = false;
b_actuallyset = true;
} else if (!ppsc->hwradiooff && (e_rfpowerstate_toset == ERFOFF)) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "GPIOChangeRF - HW Radio OFF, RF OFF\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "GPIOChangeRF - HW Radio OFF, RF OFF\n");
e_rfpowerstate_toset = ERFOFF;
ppsc->hwradiooff = true;
@@ -2170,7 +2170,7 @@ void rtl8723e_set_key(struct ieee80211_hw *hw, u32 key_index,
u8 cam_offset = 0;
u8 clear_number = 5;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
for (idx = 0; idx < clear_number; idx++) {
rtl_cam_mark_invalid(hw, cam_offset + idx);
@@ -2198,8 +2198,8 @@ void rtl8723e_set_key(struct ieee80211_hw *hw, u32 key_index,
enc_algo = CAM_AES;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", enc_algo);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", enc_algo);
enc_algo = CAM_TKIP;
break;
}
@@ -2229,26 +2229,26 @@ void rtl8723e_set_key(struct ieee80211_hw *hw, u32 key_index,
}
if (rtlpriv->sec.key_len[key_index] == 0) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "delete one entry, entry_id is %d\n",
- entry_id);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "delete one entry, entry_id is %d\n",
+ entry_id);
if (mac->opmode == NL80211_IFTYPE_AP)
rtl_cam_del_entry(hw, p_macaddr);
rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
} else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "add one entry\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "add one entry\n");
if (is_pairwise) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set Pairwise key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set Pairwise key\n");
rtl_cam_add_one_entry(hw, macaddr, key_index,
entry_id, enc_algo,
CAM_CONFIG_NO_USEDK,
rtlpriv->sec.key_buf[key_index]);
} else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set group key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set group key\n");
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
rtl_cam_add_one_entry(hw,
@@ -2288,9 +2288,9 @@ static void rtl8723e_bt_var_init(struct ieee80211_hw *hw)
rtlpriv->btcoexist.bt_radio_shared_type =
rtlpriv->btcoexist.eeprom_bt_radio_shared;
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "BT Coexistence = 0x%x\n",
- rtlpriv->btcoexist.bt_coexistence);
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "BT Coexistence = 0x%x\n",
+ rtlpriv->btcoexist.bt_coexistence);
if (rtlpriv->btcoexist.bt_coexistence) {
rtlpriv->btcoexist.bt_busy_traffic = false;
@@ -2301,47 +2301,47 @@ static void rtl8723e_bt_var_init(struct ieee80211_hw *hw)
rtlpriv->btcoexist.previous_state = 0;
if (rtlpriv->btcoexist.bt_ant_num == ANT_X2) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "BlueTooth BT_Ant_Num = Antx2\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "BlueTooth BT_Ant_Num = Antx2\n");
} else if (rtlpriv->btcoexist.bt_ant_num == ANT_X1) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "BlueTooth BT_Ant_Num = Antx1\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "BlueTooth BT_Ant_Num = Antx1\n");
}
switch (rtlpriv->btcoexist.bt_coexist_type) {
case BT_2WIRE:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "BlueTooth BT_CoexistType = BT_2Wire\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "BlueTooth BT_CoexistType = BT_2Wire\n");
break;
case BT_ISSC_3WIRE:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "BlueTooth BT_CoexistType = BT_ISSC_3Wire\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "BlueTooth BT_CoexistType = BT_ISSC_3Wire\n");
break;
case BT_ACCEL:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "BlueTooth BT_CoexistType = BT_ACCEL\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "BlueTooth BT_CoexistType = BT_ACCEL\n");
break;
case BT_CSR_BC4:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "BlueTooth BT_CoexistType = BT_CSR_BC4\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "BlueTooth BT_CoexistType = BT_CSR_BC4\n");
break;
case BT_CSR_BC8:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "BlueTooth BT_CoexistType = BT_CSR_BC8\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "BlueTooth BT_CoexistType = BT_CSR_BC8\n");
break;
case BT_RTL8756:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "BlueTooth BT_CoexistType = BT_RTL8756\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "BlueTooth BT_CoexistType = BT_RTL8756\n");
break;
default:
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "BlueTooth BT_CoexistType = Unknown\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "BlueTooth BT_CoexistType = Unknown\n");
break;
}
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "BlueTooth BT_Ant_isolation = %d\n",
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "BlueTooth BT_Ant_isolation = %d\n",
rtlpriv->btcoexist.bt_ant_isolation);
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
- "BT_RadioSharedType = 0x%x\n",
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_TRACE,
+ "BT_RadioSharedType = 0x%x\n",
rtlpriv->btcoexist.bt_radio_shared_type);
rtlpriv->btcoexist.bt_active_zero_cnt = 0;
rtlpriv->btcoexist.cur_bt_disabled = false;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c
index 5e503dbc463b..7fab02e01a8c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/led.c
@@ -19,8 +19,8 @@ void rtl8723e_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
u8 ledcfg;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
- "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD,
+ "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
switch (pled->ledpin) {
case LED_PIN_GPIO0:
@@ -48,8 +48,8 @@ void rtl8723e_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 ledcfg;
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
- "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD,
+ "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
@@ -128,7 +128,7 @@ void rtl8723e_led_control(struct ieee80211_hw *hw,
ledaction == LED_CTL_POWER_ON)) {
return;
}
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d,\n",
- ledaction);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d,\n",
+ ledaction);
_rtl8723e_sw_led_control(hw, ledaction);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
index 772aecedf0b4..fa0eed434d4f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c
@@ -38,9 +38,9 @@ u32 rtl8723e_phy_query_rf_reg(struct ieee80211_hw *hw,
u32 original_value = 0, readback_value, bitshift;
struct rtl_phy *rtlphy = &rtlpriv->phy;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
- regaddr, rfpath, bitmask);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
+ regaddr, rfpath, bitmask);
spin_lock(&rtlpriv->locks.rf_lock);
@@ -54,9 +54,9 @@ u32 rtl8723e_phy_query_rf_reg(struct ieee80211_hw *hw,
spin_unlock(&rtlpriv->locks.rf_lock);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
- regaddr, rfpath, bitmask, original_value);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
+ regaddr, rfpath, bitmask, original_value);
return readback_value;
}
@@ -69,9 +69,9 @@ void rtl8723e_phy_set_rf_reg(struct ieee80211_hw *hw,
struct rtl_phy *rtlphy = &rtlpriv->phy;
u32 original_value = 0, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, rfpath);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
spin_lock(&rtlpriv->locks.rf_lock);
@@ -99,9 +99,9 @@ void rtl8723e_phy_set_rf_reg(struct ieee80211_hw *hw,
spin_unlock(&rtlpriv->locks.rf_lock);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, rfpath);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
}
@@ -185,30 +185,30 @@ static bool _rtl8723e_phy_bb8192c_config_parafile(struct ieee80211_hw *hw)
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
bool rtstatus;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "\n");
rtstatus = _rtl8723e_phy_config_bb_with_headerfile(hw,
BASEBAND_CONFIG_PHY_REG);
- if (rtstatus != true) {
+ if (!rtstatus) {
pr_err("Write BB Reg Fail!!\n");
return false;
}
if (rtlphy->rf_type == RF_1T2R) {
_rtl8723e_phy_bb_config_1t(hw);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Config to 1T!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Config to 1T!!\n");
}
if (rtlefuse->autoload_failflag == false) {
rtlphy->pwrgroup_cnt = 0;
rtstatus = _rtl8723e_phy_config_bb_with_pgheaderfile(hw,
BASEBAND_CONFIG_PHY_REG);
}
- if (rtstatus != true) {
+ if (!rtstatus) {
pr_err("BB_PG Reg Fail!!\n");
return false;
}
rtstatus =
_rtl8723e_phy_config_bb_with_headerfile(hw, BASEBAND_CONFIG_AGC_TAB);
- if (rtstatus != true) {
+ if (!rtstatus) {
pr_err("AGC Table Fail\n");
return false;
}
@@ -226,12 +226,12 @@ static bool _rtl8723e_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
u32 arraylength;
u32 *ptrarray;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl723MACPHY_Array\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl723MACPHY_Array\n");
arraylength = RTL8723E_MACARRAYLENGTH;
ptrarray = RTL8723EMAC_ARRAY;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Img:RTL8192CEMAC_2T_ARRAY\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Img:RTL8192CEMAC_2T_ARRAY\n");
for (i = 0; i < arraylength; i = i + 2)
rtl_write_byte(rtlpriv, ptrarray[i], (u8) ptrarray[i + 1]);
return true;
@@ -267,20 +267,20 @@ static bool _rtl8723e_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD,
phy_regarray_table[i + 1]);
udelay(1);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "The phy_regarray_table[0] is %x Rtl819XPHY_REGArray[1] is %x\n",
- phy_regarray_table[i],
- phy_regarray_table[i + 1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "The phy_regarray_table[0] is %x Rtl819XPHY_REGArray[1] is %x\n",
+ phy_regarray_table[i],
+ phy_regarray_table[i + 1]);
}
} else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
for (i = 0; i < agctab_arraylen; i = i + 2) {
rtl_set_bbreg(hw, agctab_array_table[i], MASKDWORD,
agctab_array_table[i + 1]);
udelay(1);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "The agctab_array_table[0] is %x Rtl819XPHY_REGArray[1] is %x\n",
- agctab_array_table[i],
- agctab_array_table[i + 1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "The agctab_array_table[0] is %x Rtl819XPHY_REGArray[1] is %x\n",
+ agctab_array_table[i],
+ agctab_array_table[i + 1]);
}
}
return true;
@@ -296,146 +296,146 @@ static void store_pwrindex_diffrate_offset(struct ieee80211_hw *hw,
if (regaddr == RTXAGC_A_RATE18_06) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][0] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][0] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][0]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][0] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][0]);
}
if (regaddr == RTXAGC_A_RATE54_24) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][1] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][1] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][1] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
pwrgroup_cnt][1]);
}
if (regaddr == RTXAGC_A_CCK1_MCS32) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][6] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][6] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][6]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][6] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][6]);
}
if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0xffffff00) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][7] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][7] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][7]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][7] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][7]);
}
if (regaddr == RTXAGC_A_MCS03_MCS00) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][2] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][2] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][2]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][2] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][2]);
}
if (regaddr == RTXAGC_A_MCS07_MCS04) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][3] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][3] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][3]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][3] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][3]);
}
if (regaddr == RTXAGC_A_MCS11_MCS08) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][4] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][4] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][4]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][4] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][4]);
}
if (regaddr == RTXAGC_A_MCS15_MCS12) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][5] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][5] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][5]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][5] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][5]);
}
if (regaddr == RTXAGC_B_RATE18_06) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][8] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][8] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][8]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][8] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][8]);
}
if (regaddr == RTXAGC_B_RATE54_24) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][9] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][9] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][9]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][9] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][9]);
}
if (regaddr == RTXAGC_B_CCK1_55_MCS32) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][14] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][14] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][14]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][14] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][14]);
}
if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][15] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][15] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][15]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][15] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][15]);
}
if (regaddr == RTXAGC_B_MCS03_MCS00) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][10] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][10] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][10]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][10] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][10]);
}
if (regaddr == RTXAGC_B_MCS07_MCS04) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][11] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][11] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][11]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][11] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][11]);
}
if (regaddr == RTXAGC_B_MCS11_MCS08) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][12] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][12] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][12]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][12] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][12]);
}
if (regaddr == RTXAGC_B_MCS15_MCS12) {
rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][13] =
data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][13] = 0x%x\n",
- rtlphy->pwrgroup_cnt,
- rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
- pwrgroup_cnt][13]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][13] = 0x%x\n",
+ rtlphy->pwrgroup_cnt,
+ rtlphy->mcs_txpwrlevel_origoffset
+ [rtlphy->pwrgroup_cnt][13]);
rtlphy->pwrgroup_cnt++;
}
@@ -473,8 +473,8 @@ static bool _rtl8723e_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
phy_regarray_table_pg[i + 2]);
}
} else {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "configtype != BaseBand_Config_PHY_REG\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "configtype != BaseBand_Config_PHY_REG\n");
}
return true;
}
@@ -534,21 +534,21 @@ void rtl8723e_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
rtlphy->default_initialgain[3] =
(u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
- rtlphy->default_initialgain[0],
- rtlphy->default_initialgain[1],
- rtlphy->default_initialgain[2],
- rtlphy->default_initialgain[3]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
+ rtlphy->default_initialgain[0],
+ rtlphy->default_initialgain[1],
+ rtlphy->default_initialgain[2],
+ rtlphy->default_initialgain[3]);
rtlphy->framesync = (u8) rtl_get_bbreg(hw,
ROFDM0_RXDETECTOR3, MASKBYTE0);
rtlphy->framesync_c34 = rtl_get_bbreg(hw,
ROFDM0_RXDETECTOR2, MASKDWORD);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Default framesync (0x%x) = 0x%x\n",
- ROFDM0_RXDETECTOR3, rtlphy->framesync);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Default framesync (0x%x) = 0x%x\n",
+ ROFDM0_RXDETECTOR3, rtlphy->framesync);
}
void rtl8723e_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
@@ -622,7 +622,7 @@ void rtl8723e_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
u8 cckpowerlevel[2], ofdmpowerlevel[2];
- if (rtlefuse->txpwr_fromeprom == false)
+ if (!rtlefuse->txpwr_fromeprom)
return;
_rtl8723e_get_txpower_index(hw, channel,
&cckpowerlevel[0], &ofdmpowerlevel[0]);
@@ -650,9 +650,9 @@ bool rtl8723e_phy_update_txpower_dbm(struct ieee80211_hw *hw, long power_indbm)
ofdmtxpwridx -= rtlefuse->legacy_ht_txpowerdiff;
else
ofdmtxpwridx = 0;
- RT_TRACE(rtlpriv, COMP_TXAGC, DBG_TRACE,
- "%lx dBm, ccktxpwridx = %d, ofdmtxpwridx = %d\n",
- power_indbm, ccktxpwridx, ofdmtxpwridx);
+ rtl_dbg(rtlpriv, COMP_TXAGC, DBG_TRACE,
+ "%lx dBm, ccktxpwridx = %d, ofdmtxpwridx = %d\n",
+ power_indbm, ccktxpwridx, ofdmtxpwridx);
for (idx = 0; idx < 14; idx++) {
for (rf_path = 0; rf_path < 2; rf_path++) {
rtlefuse->txpwrlevel_cck[rf_path][idx] = ccktxpwridx;
@@ -734,10 +734,10 @@ void rtl8723e_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
u8 reg_bw_opmode;
u8 reg_prsr_rsc;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
- "Switch to %s bandwidth\n",
- rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
- "20MHz" : "40MHz");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE,
+ "Switch to %s bandwidth\n",
+ rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
+ "20MHz" : "40MHz");
if (is_hal_stop(rtlhal)) {
rtlphy->set_bwmode_inprogress = false;
@@ -791,7 +791,7 @@ void rtl8723e_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
}
rtl8723e_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
rtlphy->set_bwmode_inprogress = false;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
}
void rtl8723e_phy_set_bw_mode(struct ieee80211_hw *hw,
@@ -808,8 +808,8 @@ void rtl8723e_phy_set_bw_mode(struct ieee80211_hw *hw,
if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
rtl8723e_phy_set_bw_mode_callback(hw);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "false driver sleep or unload\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "false driver sleep or unload\n");
rtlphy->set_bwmode_inprogress = false;
rtlphy->current_chan_bw = tmp_bw;
}
@@ -822,8 +822,8 @@ void rtl8723e_phy_sw_chnl_callback(struct ieee80211_hw *hw)
struct rtl_phy *rtlphy = &rtlpriv->phy;
u32 delay;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
- "switch to channel%d\n", rtlphy->current_channel);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE,
+ "switch to channel%d\n", rtlphy->current_channel);
if (is_hal_stop(rtlhal))
return;
do {
@@ -841,7 +841,7 @@ void rtl8723e_phy_sw_chnl_callback(struct ieee80211_hw *hw)
}
break;
} while (true);
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
}
u8 rtl8723e_phy_sw_chnl(struct ieee80211_hw *hw)
@@ -861,12 +861,12 @@ u8 rtl8723e_phy_sw_chnl(struct ieee80211_hw *hw)
rtlphy->sw_chnl_step = 0;
if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
rtl8723e_phy_sw_chnl_callback(hw);
- RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- "sw_chnl_inprogress false schedule workitem\n");
+ rtl_dbg(rtlpriv, COMP_CHAN, DBG_LOUD,
+ "sw_chnl_inprogress false schedule workitem\n");
rtlphy->sw_chnl_inprogress = false;
} else {
- RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- "sw_chnl_inprogress false driver sleep or unload\n");
+ rtl_dbg(rtlpriv, COMP_CHAN, DBG_LOUD,
+ "sw_chnl_inprogress false driver sleep or unload\n");
rtlphy->sw_chnl_inprogress = false;
}
return 1;
@@ -991,9 +991,9 @@ static bool _rtl8723e_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
_rtl8723e_phy_sw_rf_seting(hw, channel);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n",
- currentcmd->cmdid);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n",
+ currentcmd->cmdid);
break;
}
@@ -1444,24 +1444,24 @@ bool rtl8723e_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
struct rtl_phy *rtlphy = &rtlpriv->phy;
bool postprocessing = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
- iotype, rtlphy->set_io_inprogress);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
+ iotype, rtlphy->set_io_inprogress);
do {
switch (iotype) {
case IO_CMD_RESUME_DM_BY_SCAN:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "[IO CMD] Resume DM after scan.\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "[IO CMD] Resume DM after scan.\n");
postprocessing = true;
break;
case IO_CMD_PAUSE_BAND0_DM_BY_SCAN:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "[IO CMD] Pause DM before scan.\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "[IO CMD] Pause DM before scan.\n");
postprocessing = true;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", iotype);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", iotype);
break;
}
} while (false);
@@ -1472,7 +1472,7 @@ bool rtl8723e_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
return false;
}
rtl8723e_phy_set_io(hw);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "IO Type(%#x)\n", iotype);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, "IO Type(%#x)\n", iotype);
return true;
}
@@ -1482,9 +1482,9 @@ static void rtl8723e_phy_set_io(struct ieee80211_hw *hw)
struct rtl_phy *rtlphy = &rtlpriv->phy;
struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "--->Cmd(%#x), set_io_inprogress(%d)\n",
- rtlphy->current_io_type, rtlphy->set_io_inprogress);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "--->Cmd(%#x), set_io_inprogress(%d)\n",
+ rtlphy->current_io_type, rtlphy->set_io_inprogress);
switch (rtlphy->current_io_type) {
case IO_CMD_RESUME_DM_BY_SCAN:
dm_digtable->cur_igvalue = rtlphy->initgain_backup.xaagccore1;
@@ -1497,14 +1497,14 @@ static void rtl8723e_phy_set_io(struct ieee80211_hw *hw)
rtl8723e_dm_write_dig(hw);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n",
- rtlphy->current_io_type);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n",
+ rtlphy->current_io_type);
break;
}
rtlphy->set_io_inprogress = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "(%#x)\n", rtlphy->current_io_type);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "(%#x)\n", rtlphy->current_io_type);
}
static void rtl8723e_phy_set_rf_on(struct ieee80211_hw *hw)
@@ -1541,8 +1541,8 @@ static void _rtl8723e_phy_set_rf_sleep(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "Switch RF timeout !!!.\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "Switch RF timeout !!!.\n");
return;
}
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
@@ -1569,18 +1569,17 @@ static bool _rtl8723e_phy_set_rf_power_state(struct ieee80211_hw *hw,
do {
initializecount++;
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic enable\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic enable\n");
rtstatus = rtl_ps_enable_nic(hw);
} while (!rtstatus && (initializecount < 10));
RT_CLEAR_PS_LEVEL(ppsc,
RT_RF_OFF_LEVL_HALT_NIC);
} else {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "Set ERFON sleeped:%d ms\n",
- jiffies_to_msecs(jiffies -
- ppsc->
- last_sleep_jiffies));
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "Set ERFON slept:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_sleep_jiffies));
ppsc->last_awake_jiffies = jiffies;
rtl8723e_phy_set_rf_on(hw);
}
@@ -1594,8 +1593,8 @@ static bool _rtl8723e_phy_set_rf_power_state(struct ieee80211_hw *hw,
break;
case ERFOFF:
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic disable\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic disable\n");
rtl_ps_disable_nic(hw);
RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
} else {
@@ -1619,33 +1618,33 @@ static bool _rtl8723e_phy_set_rf_power_state(struct ieee80211_hw *hw,
queue_id++;
continue;
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
- (i + 1), queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
+ (i + 1), queue_id,
+ skb_queue_len(&ring->queue));
udelay(10);
i++;
}
if (i >= MAX_DOZE_WAITING_TIMES_9x) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
- MAX_DOZE_WAITING_TIMES_9x,
- queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue));
break;
}
}
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "Set ERFSLEEP awaked:%d ms\n",
- jiffies_to_msecs(jiffies -
- ppsc->last_awake_jiffies));
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "Set ERFSLEEP awaked:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_awake_jiffies));
ppsc->last_sleep_jiffies = jiffies;
_rtl8723e_phy_set_rf_sleep(hw);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", rfpwr_state);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", rfpwr_state);
bresult = false;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c
index 9058527a7f94..b8ed80c84266 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/rf.c
@@ -49,7 +49,7 @@ void rtl8723e_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
if (rtlefuse->eeprom_regulatory != 0)
turbo_scanoff = true;
- if (mac->act_scanning == true) {
+ if (mac->act_scanning) {
tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
@@ -479,13 +479,13 @@ static bool _rtl8723e_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
break;
}
- if (rtstatus != true) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio[%d] Fail!!\n", rfpath);
+ if (!rtstatus) {
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Radio[%d] Fail!!\n", rfpath);
return false;
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "\n");
return rtstatus;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
index a04ce15d5538..e3ee91b7ea8d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c
@@ -362,14 +362,13 @@ void rtl8723e_tx_fill_desc(struct ieee80211_hw *hw,
bool lastseg = ((hdr->frame_control &
cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0);
- dma_addr_t mapping = pci_map_single(rtlpci->pdev,
- skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
u8 bw_40 = 0;
- if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error\n");
+ if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error\n");
return;
}
if (mac->opmode == NL80211_IFTYPE_STATION) {
@@ -477,8 +476,8 @@ void rtl8723e_tx_fill_desc(struct ieee80211_hw *hw,
if (ieee80211_is_data_qos(fc)) {
if (mac->rdg_en) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "Enable RDG function.\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "Enable RDG function.\n");
set_tx_desc_rdg_enable(pdesc, 1);
set_tx_desc_htc(pdesc, 1);
}
@@ -517,7 +516,7 @@ void rtl8723e_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_bmc(pdesc, 1);
}
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
}
void rtl8723e_tx_fill_cmddesc(struct ieee80211_hw *hw,
@@ -529,16 +528,15 @@ void rtl8723e_tx_fill_cmddesc(struct ieee80211_hw *hw,
u8 fw_queue = QSLT_BEACON;
__le32 *pdesc = (__le32 *)pdesc8;
- dma_addr_t mapping = pci_map_single(rtlpci->pdev,
- skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
__le16 fc = hdr->frame_control;
- if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error\n");
+ if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error\n");
return;
}
clear_pci_tx_desc_content(pdesc, TX_DESC_SIZE);
@@ -591,7 +589,7 @@ void rtl8723e_set_desc(struct ieee80211_hw *hw, u8 *pdesc8,
{
__le32 *pdesc = (__le32 *)pdesc8;
- if (istx == true) {
+ if (istx) {
switch (desc_name) {
case HW_DESC_OWN:
set_tx_desc_own(pdesc, 1);
@@ -632,7 +630,7 @@ u64 rtl8723e_get_desc(struct ieee80211_hw *hw,
u32 ret = 0;
__le32 *pdesc = (__le32 *)pdesc8;
- if (istx == true) {
+ if (istx) {
switch (desc_name) {
case HW_DESC_OWN:
ret = get_tx_desc_own(pdesc);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c
index c9b3d9d09c48..c3c990cc032f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c
@@ -225,9 +225,9 @@ static void rtl8723be_dm_init_txpower_tracking(struct ieee80211_hw *hw)
rtlpriv->dm.delta_power_index_last[RF90_PATH_A] = 0;
rtlpriv->dm.power_index_offset[RF90_PATH_A] = 0;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- " rtlpriv->dm.txpower_tracking = %d\n",
- rtlpriv->dm.txpower_tracking);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "rtlpriv->dm.txpower_tracking = %d\n",
+ rtlpriv->dm.txpower_tracking);
}
static void rtl8723be_dm_init_dynamic_atc_switch(struct ieee80211_hw *hw)
@@ -265,33 +265,33 @@ static void rtl8723be_dm_find_minimum_rssi(struct ieee80211_hw *hw)
if ((mac->link_state < MAC80211_LINKED) &&
(rtlpriv->dm.entry_min_undec_sm_pwdb == 0)) {
rtl_dm_dig->min_undec_pwdb_for_dm = 0;
- RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "Not connected to any\n");
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "Not connected to any\n");
}
if (mac->link_state >= MAC80211_LINKED) {
if (mac->opmode == NL80211_IFTYPE_AP ||
mac->opmode == NL80211_IFTYPE_ADHOC) {
rtl_dm_dig->min_undec_pwdb_for_dm =
rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "AP Client PWDB = 0x%lx\n",
- rtlpriv->dm.entry_min_undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "AP Client PWDB = 0x%lx\n",
+ rtlpriv->dm.entry_min_undec_sm_pwdb);
} else {
rtl_dm_dig->min_undec_pwdb_for_dm =
rtlpriv->dm.undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "STA Default Port PWDB = 0x%x\n",
- rtl_dm_dig->min_undec_pwdb_for_dm);
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "STA Default Port PWDB = 0x%x\n",
+ rtl_dm_dig->min_undec_pwdb_for_dm);
}
} else {
rtl_dm_dig->min_undec_pwdb_for_dm =
rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "AP Ext Port or disconnect PWDB = 0x%x\n",
- rtl_dm_dig->min_undec_pwdb_for_dm);
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "AP Ext Port or disconnect PWDB = 0x%x\n",
+ rtl_dm_dig->min_undec_pwdb_for_dm);
}
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "MinUndecoratedPWDBForDM =%d\n",
- rtl_dm_dig->min_undec_pwdb_for_dm);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "MinUndecoratedPWDBForDM =%d\n",
+ rtl_dm_dig->min_undec_pwdb_for_dm);
}
static void rtl8723be_dm_check_rssi_monitor(struct ieee80211_hw *hw)
@@ -421,7 +421,7 @@ static void rtl8723be_dm_dig(struct ieee80211_hw *hw)
} else {
dm_digtable->rx_gain_max = dm_dig_max;
dig_min_0 = dm_dig_min;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "no link\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "no link\n");
}
if (rtlpriv->falsealm_cnt.cnt_all > 10000) {
@@ -576,18 +576,18 @@ static void rtl8723be_dm_false_alarm_counter_statistics(
rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(15) | BIT(14), 0);
rtl_set_bbreg(hw, DM_REG_CCK_FA_RST_11N, BIT(15) | BIT(14), 2);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "cnt_parity_fail = %d, cnt_rate_illegal = %d, cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
- falsealm_cnt->cnt_parity_fail,
- falsealm_cnt->cnt_rate_illegal,
- falsealm_cnt->cnt_crc8_fail,
- falsealm_cnt->cnt_mcs_fail);
-
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
- falsealm_cnt->cnt_ofdm_fail,
- falsealm_cnt->cnt_cck_fail,
- falsealm_cnt->cnt_all);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "cnt_parity_fail = %d, cnt_rate_illegal = %d, cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
+ falsealm_cnt->cnt_parity_fail,
+ falsealm_cnt->cnt_rate_illegal,
+ falsealm_cnt->cnt_crc8_fail,
+ falsealm_cnt->cnt_mcs_fail);
+
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
+ falsealm_cnt->cnt_ofdm_fail,
+ falsealm_cnt->cnt_cck_fail,
+ falsealm_cnt->cnt_all);
}
static void rtl8723be_dm_dynamic_txpower(struct ieee80211_hw *hw)
@@ -747,18 +747,18 @@ static void rtl8723be_dm_txpower_tracking_callback_thermalmeter(
/*Initilization ( 7 steps in total )*/
rtlpriv->dm.txpower_trackinginit = true;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "rtl8723be_dm_txpower_tracking_callback_thermalmeter\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "%s\n", __func__);
thermalvalue = (u8)rtl_get_rfreg(hw,
RF90_PATH_A, RF_T_METER, 0xfc00);
if (!rtlpriv->dm.txpower_track_control || thermalvalue == 0 ||
rtlefuse->eeprom_thermalmeter == 0xFF)
return;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x\n",
- thermalvalue, rtldm->thermalvalue,
- rtlefuse->eeprom_thermalmeter);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x\n",
+ thermalvalue, rtldm->thermalvalue,
+ rtlefuse->eeprom_thermalmeter);
/*3 Initialize ThermalValues of RFCalibrateInfo*/
if (!rtldm->thermalvalue) {
rtlpriv->dm.thermalvalue_lck = thermalvalue;
@@ -792,10 +792,10 @@ static void rtl8723be_dm_txpower_tracking_callback_thermalmeter(
(thermalvalue - rtlpriv->dm.thermalvalue_iqk) :
(rtlpriv->dm.thermalvalue_iqk - thermalvalue);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x delta 0x%x delta_lck 0x%x delta_iqk 0x%x\n",
- thermalvalue, rtlpriv->dm.thermalvalue,
- rtlefuse->eeprom_thermalmeter, delta, delta_lck, delta_iqk);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x delta 0x%x delta_lck 0x%x delta_iqk 0x%x\n",
+ thermalvalue, rtlpriv->dm.thermalvalue,
+ rtlefuse->eeprom_thermalmeter, delta, delta_lck, delta_iqk);
/* 6 If necessary, do LCK.*/
if (delta_lck >= IQK_THRESHOLD) {
rtlpriv->dm.thermalvalue_lck = thermalvalue;
@@ -876,7 +876,7 @@ static void rtl8723be_dm_txpower_tracking_callback_thermalmeter(
}
rtldm->txpowercount = 0;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "end\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "end\n");
}
@@ -890,13 +890,13 @@ void rtl8723be_dm_check_txpower_tracking(struct ieee80211_hw *hw)
if (!rtlpriv->dm.tm_trigger) {
rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, BIT(17) | BIT(16),
0x03);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Trigger 8723be Thermal Meter!!\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Trigger 8723be Thermal Meter!!\n");
rtlpriv->dm.tm_trigger = 1;
return;
} else {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Schedule TxPowerTracking !!\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Schedule TxPowerTracking !!\n");
rtl8723be_dm_txpower_tracking_callback_thermalmeter(hw);
rtlpriv->dm.tm_trigger = 0;
}
@@ -914,14 +914,14 @@ static void rtl8723be_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
struct ieee80211_sta *sta = NULL;
if (is_hal_stop(rtlhal)) {
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "driver is going to unload\n");
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "driver is going to unload\n");
return;
}
if (!rtlpriv->dm.useramask) {
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "driver does not control rate adaptive mask\n");
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "driver does not control rate adaptive mask\n");
return;
}
@@ -949,14 +949,14 @@ static void rtl8723be_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
p_ra->ratr_state = DM_RATR_STA_LOW;
if (p_ra->pre_ratr_state != p_ra->ratr_state) {
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "RSSI = %ld\n",
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "RSSI = %ld\n",
rtlpriv->dm.undec_sm_pwdb);
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "RSSI_LEVEL = %d\n", p_ra->ratr_state);
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "PreState = %d, CurState = %d\n",
- p_ra->pre_ratr_state, p_ra->ratr_state);
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "RSSI_LEVEL = %d\n", p_ra->ratr_state);
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "PreState = %d, CurState = %d\n",
+ p_ra->pre_ratr_state, p_ra->ratr_state);
rcu_read_lock();
sta = rtl_find_sta(hw, mac->bssid);
@@ -1073,8 +1073,8 @@ static void rtl8723be_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
dm_digtable->pre_cck_cca_thres = dm_digtable->cur_cck_cca_thres;
dm_digtable->cur_cck_cca_thres = cur_cck_cca_thresh;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "CCK cca thresh hold =%x\n", dm_digtable->cur_cck_cca_thres);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "CCK cca thresh hold =%x\n", dm_digtable->cur_cck_cca_thres);
}
static void rtl8723be_dm_dynamic_edcca(struct ieee80211_hw *hw)
@@ -1121,8 +1121,8 @@ static void rtl8723be_dm_dynamic_atc_switch(struct ieee80211_hw *hw)
}
if (rtlpriv->cfg->ops->get_btc_status()) {
if (!rtlpriv->btcoexist.btc_ops->btc_is_bt_disabled(rtlpriv)) {
- RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
- "odm_DynamicATCSwitch(): Disable CFO tracking for BT!!\n");
+ rtl_dbg(rtlpriv, COMP_BT_COEXIST, DBG_LOUD,
+ "odm_DynamicATCSwitch(): Disable CFO tracking for BT!!\n");
return;
}
}
@@ -1152,11 +1152,11 @@ static void rtl8723be_dm_dynamic_atc_switch(struct ieee80211_hw *hw)
(rtldm->cfo_ave_pre - cfo_ave) :
(cfo_ave - rtldm->cfo_ave_pre);
- if (cfo_ave_diff > 20 && rtldm->large_cfo_hit == 0) {
- rtldm->large_cfo_hit = 1;
+ if (cfo_ave_diff > 20 && !rtldm->large_cfo_hit) {
+ rtldm->large_cfo_hit = true;
return;
} else
- rtldm->large_cfo_hit = 0;
+ rtldm->large_cfo_hit = false;
rtldm->cfo_ave_pre = cfo_ave;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
index aa56058af56e..b3e6c91e26c0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
@@ -41,22 +41,22 @@ static void _rtl8723be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
unsigned long flag;
u8 idx;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
while (true) {
spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
if (rtlhal->h2c_setinprogress) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "H2C set in progress! Wait to set..element_id(%d).\n",
- element_id);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "H2C set in progress! Wait to set..element_id(%d).\n",
+ element_id);
while (rtlhal->h2c_setinprogress) {
spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
flag);
h2c_waitcounter++;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Wait 100 us (%d times)...\n",
- h2c_waitcounter);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Wait 100 us (%d times)...\n",
+ h2c_waitcounter);
udelay(100);
if (h2c_waitcounter > 1000)
@@ -107,9 +107,9 @@ static void _rtl8723be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
while (!isfw_read) {
wait_h2c_limmit--;
if (wait_h2c_limmit == 0) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Waiting too long for FW read clear HMEBox(%d)!\n",
- boxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Waiting too long for FW read clear HMEBox(%d)!\n",
+ boxnum);
break;
}
@@ -118,24 +118,24 @@ static void _rtl8723be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
isfw_read = _rtl8723be_check_fw_read_last_h2c(hw,
boxnum);
u1b_tmp = rtl_read_byte(rtlpriv, 0x130);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Waiting for FW read clear HMEBox(%d)!!! 0x130 = %2x\n",
- boxnum, u1b_tmp);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Waiting for FW read clear HMEBox(%d)!!! 0x130 = %2x\n",
+ boxnum, u1b_tmp);
}
if (!isfw_read) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n",
- boxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n",
+ boxnum);
break;
}
memset(boxcontent, 0, sizeof(boxcontent));
memset(boxextcontent, 0, sizeof(boxextcontent));
boxcontent[0] = element_id;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Write element_id box_reg(%4x) = %2x\n",
- box_reg, element_id);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Write element_id box_reg(%4x) = %2x\n",
+ box_reg, element_id);
switch (cmd_len) {
case 1:
@@ -182,16 +182,16 @@ static void _rtl8723be_fill_h2c_command(struct ieee80211_hw *hw, u8 element_id,
if (rtlhal->last_hmeboxnum == 4)
rtlhal->last_hmeboxnum = 0;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "pHalData->last_hmeboxnum = %d\n",
- rtlhal->last_hmeboxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "pHalData->last_hmeboxnum = %d\n",
+ rtlhal->last_hmeboxnum);
}
spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
rtlhal->h2c_setinprogress = false;
spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
}
void rtl8723be_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
@@ -229,8 +229,8 @@ void rtl8723be_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
if (bt_ctrl_lps)
mode = (bt_lps_on ? FW_PS_MIN_MODE : FW_PS_ACTIVE_MODE);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, "FW LPS mode = %d (coex:%d)\n",
- mode, bt_ctrl_lps);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG, "FW LPS mode = %d (coex:%d)\n",
+ mode, bt_ctrl_lps);
switch (mode) {
case FW_PS_MIN_MODE:
@@ -572,15 +572,15 @@ void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
b_dlok = true;
if (b_dlok) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Set RSVD page location to Fw.\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Set RSVD page location to Fw.\n");
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG, "H2C_RSVDPAGE:\n",
u1rsvdpageloc, sizeof(u1rsvdpageloc));
rtl8723be_fill_h2c_cmd(hw, H2C_8723B_RSVDPAGE,
sizeof(u1rsvdpageloc), u1rsvdpageloc);
} else
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Set RSVD page location to Fw FAIL!!!!!!.\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set RSVD page location to Fw FAIL!!!!!!.\n");
}
/*Should check FW support p2p or not.*/
@@ -607,11 +607,11 @@ void rtl8723be_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw,
switch (p2p_ps_state) {
case P2P_PS_DISABLE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_DISABLE\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_DISABLE\n");
memset(p2p_ps_offload, 0, sizeof(*p2p_ps_offload));
break;
case P2P_PS_ENABLE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_ENABLE\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_ENABLE\n");
/* update CTWindow value. */
if (p2pinfo->ctwindow > 0) {
p2p_ps_offload->ctwindow_en = 1;
@@ -668,11 +668,11 @@ void rtl8723be_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw,
}
break;
case P2P_PS_SCAN:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN\n");
p2p_ps_offload->discovery = 1;
break;
case P2P_PS_SCAN_DONE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN_DONE\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN_DONE\n");
p2p_ps_offload->discovery = 0;
p2pinfo->p2p_ps_state = P2P_PS_ENABLE;
break;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
index 979e5bfe5f45..0748aedce2ad 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
@@ -37,11 +37,10 @@ static void _rtl8723be_return_beacon_queue_skb(struct ieee80211_hw *hw)
struct rtl_tx_desc *entry = &ring->desc[ring->idx];
struct sk_buff *skb = __skb_dequeue(&ring->queue);
- pci_unmap_single(rtlpci->pdev,
- rtlpriv->cfg->ops->get_desc(
- hw,
- (u8 *)entry, true, HW_DESC_TXBUFF_ADDR),
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&rtlpci->pdev->dev,
+ rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry,
+ true, HW_DESC_TXBUFF_ADDR),
+ skb->len, DMA_TO_DEVICE);
kfree_skb(skb);
ring->idx = (ring->idx + 1) % ring->entries;
}
@@ -146,9 +145,9 @@ static void _rtl8723be_set_fw_clock_on(struct ieee80211_hw *hw, u8 rpwm_val,
if (content & IMR_CPWM) {
rtl_write_word(rtlpriv, isr_regaddr, 0x0100);
rtlhal->fw_ps_state = FW_PS_STATE_RF_ON;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Receive CPWM INT!!! Set pHalData->FwPSState = %X\n",
- rtlhal->fw_ps_state);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Receive CPWM INT!!! Set pHalData->FwPSState = %X\n",
+ rtlhal->fw_ps_state);
}
}
@@ -331,8 +330,8 @@ void rtl8723be_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HAL_DEF_WOWLAN:
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", variable);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -436,8 +435,8 @@ void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_SLOT_TIME:{
u8 e_aci;
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "HW_VAR_SLOT_TIME %x\n", val[0]);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "HW_VAR_SLOT_TIME %x\n", val[0]);
rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
@@ -479,9 +478,9 @@ void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
*val = min_spacing_to_set;
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
- mac->min_space_cfg);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
mac->min_space_cfg);
@@ -494,9 +493,9 @@ void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
density_to_set = *((u8 *)val);
mac->min_space_cfg |= (density_to_set << 3);
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
- mac->min_space_cfg);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
mac->min_space_cfg);
@@ -534,9 +533,9 @@ void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_AMPDU_FACTOR: %#x\n",
- factor_toset);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_FACTOR: %#x\n",
+ factor_toset);
}
}
break;
@@ -571,9 +570,9 @@ void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
acm_ctrl |= ACMHW_VOQEN;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
- acm);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
+ acm);
break;
}
} else {
@@ -588,16 +587,16 @@ void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
acm_ctrl &= (~ACMHW_VOQEN);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n",
- e_aci);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n",
+ e_aci);
break;
}
}
- RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
- "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
- acm_ctrl);
+ rtl_dbg(rtlpriv, COMP_QOS, DBG_TRACE,
+ "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
+ acm_ctrl);
rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
}
break;
@@ -705,8 +704,8 @@ void rtl8723be_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", variable);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -821,8 +820,8 @@ static bool _rtl8723be_init_mac(struct ieee80211_hw *hw)
if (!rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK,
PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,
RTL8723_NIC_ENABLE_FLOW)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "init MAC Fail as power on failure\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "init MAC Fail as power on failure\n");
return false;
}
@@ -859,7 +858,7 @@ static bool _rtl8723be_init_mac(struct ieee80211_hw *hw)
rtl_write_word(rtlpriv, REG_CR, 0x2ff);
if (!rtlhal->mac_func_enable) {
- if (_rtl8723be_llt_table_init(hw) == false)
+ if (!_rtl8723be_llt_table_init(hw))
return false;
}
@@ -1121,14 +1120,14 @@ void rtl8723be_enable_hw_security_config(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 sec_reg_value;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
- rtlpriv->sec.pairwise_enc_algorithm,
- rtlpriv->sec.group_enc_algorithm);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
+ rtlpriv->sec.pairwise_enc_algorithm,
+ rtlpriv->sec.group_enc_algorithm);
if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "not open hw encryption\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "not open hw encryption\n");
return;
}
@@ -1143,8 +1142,8 @@ void rtl8723be_enable_hw_security_config(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "The SECR-value %x\n", sec_reg_value);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "The SECR-value %x\n", sec_reg_value);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
}
@@ -1208,8 +1207,8 @@ static bool _rtl8723be_check_pcie_dma_hang(struct rtl_priv *rtlpriv)
*/
tmp = rtl_read_byte(rtlpriv, REG_DBI_CTRL + 3);
if ((tmp & BIT(0)) || (tmp & BIT(1))) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "CheckPcieDMAHang8723BE(): true!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "CheckPcieDMAHang8723BE(): true!!\n");
return true;
}
return false;
@@ -1222,8 +1221,8 @@ static void _rtl8723be_reset_pcie_interface_dma(struct rtl_priv *rtlpriv,
bool release_mac_rx_pause;
u8 backup_pcie_dma_pause;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "ResetPcieInterfaceDMA8723BE()\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "ResetPcieInterfaceDMA8723BE()\n");
/* Revise Note: Follow the document "PCIe RX DMA Hang Reset Flow_v03"
* released by SD1 Alan.
@@ -1375,8 +1374,8 @@ int rtl8723be_hw_init(struct ieee80211_hw *hw)
err = rtl8723_download_fw(hw, true, FW_8723B_POLLING_TIMEOUT_COUNT);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Failed to download FW. Init HW without FW now..\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Failed to download FW. Init HW without FW now..\n");
err = 1;
goto exit;
}
@@ -1460,7 +1459,7 @@ static enum version_8723e _rtl8723be_read_chip_version(struct ieee80211_hw *hw)
value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG1);
if ((value32 & (CHIP_8723B)) != CHIP_8723B)
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "unknown chip version\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "unknown chip version\n");
else
version = (enum version_8723e)CHIP_8723B;
@@ -1476,9 +1475,9 @@ static enum version_8723e _rtl8723be_read_chip_version(struct ieee80211_hw *hw)
if (((value32 & EXT_VENDOR_ID) >> 18) == 0x01)
version = (enum version_8723e)(version | CHIP_VENDOR_SMIC);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Chip RF Type: %s\n", (rtlphy->rf_type == RF_2T2R) ?
- "RF_2T2R" : "RF_1T1R");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Chip RF Type: %s\n", (rtlphy->rf_type == RF_2T2R) ?
+ "RF_2T2R" : "RF_1T1R");
return version;
}
@@ -1494,26 +1493,26 @@ static int _rtl8723be_set_media_status(struct ieee80211_hw *hw,
switch (type) {
case NL80211_IFTYPE_UNSPECIFIED:
mode = MSR_NOLINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to NO LINK!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to NO LINK!\n");
break;
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_MESH_POINT:
mode = MSR_ADHOC;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to Ad Hoc!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to Ad Hoc!\n");
break;
case NL80211_IFTYPE_STATION:
mode = MSR_INFRA;
ledaction = LED_CTL_LINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to STA!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to STA!\n");
break;
case NL80211_IFTYPE_AP:
mode = MSR_AP;
ledaction = LED_CTL_LINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to AP!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to AP!\n");
break;
default:
pr_err("Network type %d not support!\n", type);
@@ -1538,9 +1537,9 @@ static int _rtl8723be_set_media_status(struct ieee80211_hw *hw,
_rtl8723be_resume_tx_beacon(hw);
_rtl8723be_disable_bcn_sub_func(hw);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n",
- mode);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n",
+ mode);
}
rtl_write_byte(rtlpriv, MSR, bt_msr | mode);
@@ -1702,8 +1701,8 @@ void rtl8723be_set_beacon_interval(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
u16 bcn_interval = mac->beacon_interval;
- RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
- "beacon_interval:%d\n", bcn_interval);
+ rtl_dbg(rtlpriv, COMP_BEACON, DBG_DMESG,
+ "beacon_interval:%d\n", bcn_interval);
rtl8723be_disable_interrupt(hw);
rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
rtl8723be_enable_interrupt(hw);
@@ -1715,8 +1714,8 @@ void rtl8723be_update_interrupt_mask(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
- "add_msr:%x, rm_msr:%x\n", add_msr, rm_msr);
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD,
+ "add_msr:%x, rm_msr:%x\n", add_msr, rm_msr);
if (add_msr)
rtlpci->irq_mask[0] |= add_msr;
@@ -1747,15 +1746,15 @@ static void _rtl8723be_read_power_value_fromprom(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 path, addr = EEPROM_TX_PWR_INX, group, cnt = 0;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "hal_ReadPowerValueFromPROM8723BE(): PROMContent[0x%x]=0x%x\n",
- (addr + 1), hwinfo[addr + 1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "hal_ReadPowerValueFromPROM8723BE(): PROMContent[0x%x]=0x%x\n",
+ (addr + 1), hwinfo[addr + 1]);
if (0xFF == hwinfo[addr + 1]) /*YJ,add,120316*/
autoload_fail = true;
if (autoload_fail) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "auto load fail : Use Default value!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "auto load fail : Use Default value!\n");
for (path = 0; path < MAX_RF_PATH; path++) {
/* 2.4G default value */
for (group = 0 ; group < MAX_CHNL_GROUP_24G; group++) {
@@ -2099,8 +2098,8 @@ static void _rtl8723be_read_adapter_info(struct ieee80211_hw *hw,
rtlefuse->board_type |= BIT(2); /* ODM_BOARD_BT */
rtlhal->board_type = rtlefuse->board_type;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "board_type = 0x%x\n", rtlefuse->board_type);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "board_type = 0x%x\n", rtlefuse->board_type);
rtlhal->package_type = _rtl8723be_read_package_type(hw);
@@ -2237,8 +2236,8 @@ static void _rtl8723be_hal_customized_behavior(struct ieee80211_hw *hw)
default:
break;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "RT Customized ID: 0x%02X\n", rtlhal->oem_id);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "RT Customized ID: 0x%02X\n", rtlhal->oem_id);
}
void rtl8723be_read_eeprom_info(struct ieee80211_hw *hw)
@@ -2255,18 +2254,18 @@ void rtl8723be_read_eeprom_info(struct ieee80211_hw *hw)
else
rtlpriv->dm.rfpath_rxenable[0] =
rtlpriv->dm.rfpath_rxenable[1] = true;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
- rtlhal->version);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
+ rtlhal->version);
tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
if (tmp_u1b & BIT(4)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
rtlefuse->epromtype = EEPROM_93C46;
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
}
if (tmp_u1b & BIT(5)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
rtlefuse->autoload_failflag = false;
_rtl8723be_read_adapter_info(hw, false);
} else {
@@ -2417,8 +2416,8 @@ static void rtl8723be_update_hal_rate_mask(struct ieee80211_hw *hw,
sta_entry->ratr_index = ratr_index;
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "ratr_bitmap :%x\n", ratr_bitmap);
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "ratr_bitmap :%x\n", ratr_bitmap);
*(u32 *)&rate_mask = (ratr_bitmap & 0x0fffffff) |
(ratr_index << 28);
rate_mask[0] = macid;
@@ -2431,13 +2430,13 @@ static void rtl8723be_update_hal_rate_mask(struct ieee80211_hw *hw,
rate_mask[5] = (u8)((ratr_bitmap & 0x00ff0000) >> 16);
rate_mask[6] = (u8)((ratr_bitmap & 0xff000000) >> 24);
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x:%x:%x\n",
- ratr_index, ratr_bitmap,
- rate_mask[0], rate_mask[1],
- rate_mask[2], rate_mask[3],
- rate_mask[4], rate_mask[5],
- rate_mask[6]);
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x:%x:%x\n",
+ ratr_index, ratr_bitmap,
+ rate_mask[0], rate_mask[1],
+ rate_mask[2], rate_mask[3],
+ rate_mask[4], rate_mask[5],
+ rate_mask[6]);
rtl8723be_fill_h2c_cmd(hw, H2C_8723B_RA_MASK, 7, rate_mask);
_rtl8723be_set_bcn_ctrl_reg(hw, BIT(3), 0);
}
@@ -2500,15 +2499,15 @@ bool rtl8723be_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
e_rfpowerstate_toset = (u1tmp & BIT(1)) ? ERFON : ERFOFF;
if ((ppsc->hwradiooff) && (e_rfpowerstate_toset == ERFON)) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "GPIOChangeRF - HW Radio ON, RF ON\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "GPIOChangeRF - HW Radio ON, RF ON\n");
e_rfpowerstate_toset = ERFON;
ppsc->hwradiooff = false;
b_actuallyset = true;
} else if (!ppsc->hwradiooff && (e_rfpowerstate_toset == ERFOFF)) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "GPIOChangeRF - HW Radio OFF, RF OFF\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "GPIOChangeRF - HW Radio OFF, RF OFF\n");
e_rfpowerstate_toset = ERFOFF;
ppsc->hwradiooff = true;
@@ -2559,7 +2558,7 @@ void rtl8723be_set_key(struct ieee80211_hw *hw, u32 key_index,
u8 cam_offset = 0;
u8 clear_number = 5;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
for (idx = 0; idx < clear_number; idx++) {
rtl_cam_mark_invalid(hw, cam_offset + idx);
@@ -2587,8 +2586,8 @@ void rtl8723be_set_key(struct ieee80211_hw *hw, u32 key_index,
enc_algo = CAM_AES;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", enc_algo);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", enc_algo);
enc_algo = CAM_TKIP;
break;
}
@@ -2618,26 +2617,26 @@ void rtl8723be_set_key(struct ieee80211_hw *hw, u32 key_index,
}
if (rtlpriv->sec.key_len[key_index] == 0) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "delete one entry, entry_id is %d\n",
- entry_id);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "delete one entry, entry_id is %d\n",
+ entry_id);
if (mac->opmode == NL80211_IFTYPE_AP)
rtl_cam_del_entry(hw, p_macaddr);
rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
} else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "add one entry\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "add one entry\n");
if (is_pairwise) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set Pairwise key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set Pairwise key\n");
rtl_cam_add_one_entry(hw, macaddr, key_index,
entry_id, enc_algo,
CAM_CONFIG_NO_USEDK,
rtlpriv->sec.key_buf[key_index]);
} else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set group key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set group key\n");
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
rtl_cam_add_one_entry(hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c
index 525f2c47da5b..3954624ab314 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/led.c
@@ -19,8 +19,8 @@ void rtl8723be_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
u8 ledcfg;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
- "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD,
+ "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
switch (pled->ledpin) {
case LED_PIN_GPIO0:
@@ -47,8 +47,8 @@ void rtl8723be_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 ledcfg;
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
- "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD,
+ "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
@@ -127,6 +127,6 @@ void rtl8723be_led_control(struct ieee80211_hw *hw,
ledaction == LED_CTL_POWER_ON)) {
return;
}
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d,\n", ledaction);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d,\n", ledaction);
_rtl8723be_sw_led_control(hw, ledaction);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
index 9528ac3f3b87..f09f55b0468a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
@@ -34,9 +34,9 @@ u32 rtl8723be_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, readback_value, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
- regaddr, rfpath, bitmask);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
+ regaddr, rfpath, bitmask);
spin_lock(&rtlpriv->locks.rf_lock);
@@ -46,9 +46,9 @@ u32 rtl8723be_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
spin_unlock(&rtlpriv->locks.rf_lock);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
- regaddr, rfpath, bitmask, original_value);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
+ regaddr, rfpath, bitmask, original_value);
return readback_value;
}
@@ -59,9 +59,9 @@ void rtl8723be_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path path,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, path);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, path);
spin_lock(&rtlpriv->locks.rf_lock);
@@ -77,9 +77,9 @@ void rtl8723be_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path path,
spin_unlock(&rtlpriv->locks.rf_lock);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, path);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, path);
}
@@ -158,18 +158,18 @@ static bool _rtl8723be_check_positive(struct ieee80211_hw *hw,
rtlhal->type_alna << 16 |
rtlhal->type_apa << 24;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "===> [8812A] CheckPositive (cond1, cond2) = (0x%X 0x%X)\n",
- cond1, cond2);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "===> [8812A] CheckPositive (driver1, driver2) = (0x%X 0x%X)\n",
- driver1, driver2);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "===> [8812A] CheckPositive (cond1, cond2) = (0x%X 0x%X)\n",
+ cond1, cond2);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "===> [8812A] CheckPositive (driver1, driver2) = (0x%X 0x%X)\n",
+ driver1, driver2);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- " (Platform, Interface) = (0x%X, 0x%X)\n", 0x04, intf);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- " (Board, Package) = (0x%X, 0x%X)\n",
- rtlhal->board_type, rtlhal->package_type);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "(Platform, Interface) = (0x%X, 0x%X)\n", 0x04, intf);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "(Board, Package) = (0x%X, 0x%X)\n",
+ rtlhal->board_type, rtlhal->package_type);
/*============== Value Defined Check ===============*/
/*QFN Type [15:12] and Cut Version [27:24] need to do value check*/
@@ -283,9 +283,9 @@ static void _rtl8723be_phy_set_txpower_by_rate_base(struct ieee80211_hw *hw,
struct rtl_phy *rtlphy = &rtlpriv->phy;
if (path > RF90_PATH_D) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid Rf Path %d in phy_SetTxPowerByRatBase()\n",
- path);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid Rf Path %d in phy_SetTxPowerByRatBase()\n",
+ path);
return;
}
@@ -304,15 +304,15 @@ static void _rtl8723be_phy_set_txpower_by_rate_base(struct ieee80211_hw *hw,
rtlphy->txpwr_by_rate_base_24g[path][txnum][3] = value;
break;
default:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid RateSection %d in Band 2.4G, Rf Path %d, %dTx in PHY_SetTxPowerByRateBase()\n",
- rate_section, path, txnum);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid RateSection %d in Band 2.4G, Rf Path %d, %dTx in PHY_SetTxPowerByRateBase()\n",
+ rate_section, path, txnum);
break;
}
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid Band %d in PHY_SetTxPowerByRateBase()\n",
- band);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid Band %d in PHY_SetTxPowerByRateBase()\n",
+ band);
}
}
@@ -325,9 +325,9 @@ static u8 _rtl8723be_phy_get_txpower_by_rate_base(struct ieee80211_hw *hw,
struct rtl_phy *rtlphy = &rtlpriv->phy;
u8 value = 0;
if (path > RF90_PATH_D) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid Rf Path %d in PHY_GetTxPowerByRateBase()\n",
- path);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid Rf Path %d in PHY_GetTxPowerByRateBase()\n",
+ path);
return 0;
}
@@ -346,15 +346,15 @@ static u8 _rtl8723be_phy_get_txpower_by_rate_base(struct ieee80211_hw *hw,
value = rtlphy->txpwr_by_rate_base_24g[path][txnum][3];
break;
default:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid RateSection %d in Band 2.4G, Rf Path %d, %dTx in PHY_GetTxPowerByRateBase()\n",
- rate_section, path, txnum);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid RateSection %d in Band 2.4G, Rf Path %d, %dTx in PHY_GetTxPowerByRateBase()\n",
+ rate_section, path, txnum);
break;
}
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid Band %d in PHY_GetTxPowerByRateBase()\n",
- band);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid Band %d in PHY_GetTxPowerByRateBase()\n",
+ band);
}
return value;
@@ -477,8 +477,8 @@ static void _rtl8723be_phy_convert_txpower_dbm_to_relative_value(
&rtlphy->tx_power_by_rate_offset[BAND_ON_2_4G][rfpath][RF_2TX][7],
0, 3, base);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "<===_rtl8723be_phy_convert_txpower_dbm_to_relative_value()\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "<===%s\n", __func__);
}
static void phy_txpower_by_rate_config(struct ieee80211_hw *hw)
@@ -588,7 +588,7 @@ static bool _rtl8723be_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read rtl8723beMACPHY_Array\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Read rtl8723beMACPHY_Array\n");
return rtl8723be_phy_config_with_headerfile(hw,
RTL8723BEMAC_1T_ARRAY, RTL8723BEMAC_1T_ARRAYLEN,
@@ -684,16 +684,16 @@ static void _rtl8723be_store_tx_power_by_rate(struct ieee80211_hw *hw,
u8 rate_section = _rtl8723be_get_rate_section_index(regaddr);
if (band != BAND_ON_2_4G && band != BAND_ON_5G) {
- RT_TRACE(rtlpriv, FPHY, PHY_TXPWR, "Invalid Band %d\n", band);
+ rtl_dbg(rtlpriv, FPHY, PHY_TXPWR, "Invalid Band %d\n", band);
return;
}
if (rfpath > MAX_RF_PATH - 1) {
- RT_TRACE(rtlpriv, FPHY, PHY_TXPWR,
- "Invalid RfPath %d\n", rfpath);
+ rtl_dbg(rtlpriv, FPHY, PHY_TXPWR,
+ "Invalid RfPath %d\n", rfpath);
return;
}
if (txnum > MAX_RF_PATH - 1) {
- RT_TRACE(rtlpriv, FPHY, PHY_TXPWR, "Invalid TxNum %d\n", txnum);
+ rtl_dbg(rtlpriv, FPHY, PHY_TXPWR, "Invalid TxNum %d\n", txnum);
return;
}
@@ -734,8 +734,8 @@ static bool _rtl8723be_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
}
}
} else {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "configtype != BaseBand_Config_PHY_REG\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "configtype != BaseBand_Config_PHY_REG\n");
}
return true;
}
@@ -747,7 +747,7 @@ bool rtl8723be_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
bool ret = true;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
switch (rfpath) {
case RF90_PATH_A:
ret = rtl8723be_phy_config_with_headerfile(hw,
@@ -762,8 +762,8 @@ bool rtl8723be_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
case RF90_PATH_C:
break;
case RF90_PATH_D:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", rfpath);
break;
}
return ret;
@@ -783,21 +783,21 @@ void rtl8723be_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
rtlphy->default_initialgain[3] =
(u8)rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
- rtlphy->default_initialgain[0],
- rtlphy->default_initialgain[1],
- rtlphy->default_initialgain[2],
- rtlphy->default_initialgain[3]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
+ rtlphy->default_initialgain[0],
+ rtlphy->default_initialgain[1],
+ rtlphy->default_initialgain[2],
+ rtlphy->default_initialgain[3]);
rtlphy->framesync = (u8)rtl_get_bbreg(hw, ROFDM0_RXDETECTOR3,
MASKBYTE0);
rtlphy->framesync_c34 = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR2,
MASKDWORD);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Default framesync (0x%x) = 0x%x\n",
- ROFDM0_RXDETECTOR3, rtlphy->framesync);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Default framesync (0x%x) = 0x%x\n",
+ ROFDM0_RXDETECTOR3, rtlphy->framesync);
}
static u8 _rtl8723be_phy_get_ratesection_intxpower_byrate(enum radio_path path,
@@ -950,16 +950,16 @@ static u8 _rtl8723be_get_txpower_index(struct ieee80211_hw *hw, u8 path,
if (channel > 14 || channel < 1) {
index = 0;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Illegal channel!\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Illegal channel!\n");
}
if (RX_HAL_IS_CCK_RATE(rate))
txpower = rtlefuse->txpwrlevel_cck[path][index];
else if (DESC92C_RATE6M <= rate)
txpower = rtlefuse->txpwrlevel_ht40_1s[path][index];
else
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "invalid rate\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "invalid rate\n");
if (DESC92C_RATE6M <= rate && rate <= DESC92C_RATE54M &&
!RX_HAL_IS_CCK_RATE(rate))
@@ -1099,11 +1099,11 @@ static void _rtl8723be_phy_set_txpower_index(struct ieee80211_hw *hw,
break;
default:
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Invalid Rate!!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD, "Invalid Rate!!\n");
break;
}
} else {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Invalid RFPath!!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD, "Invalid RFPath!!\n");
}
}
@@ -1187,10 +1187,10 @@ void rtl8723be_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
u8 reg_bw_opmode;
u8 reg_prsr_rsc;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
- "Switch to %s bandwidth\n",
- rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
- "20MHz" : "40MHz");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE,
+ "Switch to %s bandwidth\n",
+ rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
+ "20MHz" : "40MHz");
if (is_hal_stop(rtlhal)) {
rtlphy->set_bwmode_inprogress = false;
@@ -1244,7 +1244,7 @@ void rtl8723be_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
}
rtl8723be_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
rtlphy->set_bwmode_inprogress = false;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD, "\n");
}
void rtl8723be_phy_set_bw_mode(struct ieee80211_hw *hw,
@@ -1261,8 +1261,8 @@ void rtl8723be_phy_set_bw_mode(struct ieee80211_hw *hw,
if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
rtl8723be_phy_set_bw_mode_callback(hw);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "false driver sleep or unload\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "false driver sleep or unload\n");
rtlphy->set_bwmode_inprogress = false;
rtlphy->current_chan_bw = tmp_bw;
}
@@ -1275,8 +1275,8 @@ void rtl8723be_phy_sw_chnl_callback(struct ieee80211_hw *hw)
struct rtl_phy *rtlphy = &rtlpriv->phy;
u32 delay = 0;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
- "switch to channel%d\n", rtlphy->current_channel);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE,
+ "switch to channel%d\n", rtlphy->current_channel);
if (is_hal_stop(rtlhal))
return;
do {
@@ -1296,7 +1296,7 @@ void rtl8723be_phy_sw_chnl_callback(struct ieee80211_hw *hw)
}
break;
} while (true);
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
}
u8 rtl8723be_phy_sw_chnl(struct ieee80211_hw *hw)
@@ -1316,13 +1316,13 @@ u8 rtl8723be_phy_sw_chnl(struct ieee80211_hw *hw)
rtlphy->sw_chnl_step = 0;
if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
rtl8723be_phy_sw_chnl_callback(hw);
- RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- "sw_chnl_inprogress false schedule workitem current channel %d\n",
- rtlphy->current_channel);
+ rtl_dbg(rtlpriv, COMP_CHAN, DBG_LOUD,
+ "sw_chnl_inprogress false schedule workitem current channel %d\n",
+ rtlphy->current_channel);
rtlphy->sw_chnl_inprogress = false;
} else {
- RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- "sw_chnl_inprogress false driver sleep or unload\n");
+ rtl_dbg(rtlpriv, COMP_CHAN, DBG_LOUD,
+ "sw_chnl_inprogress false driver sleep or unload\n");
rtlphy->sw_chnl_inprogress = false;
}
return 1;
@@ -1428,9 +1428,9 @@ static bool _rtl8723be_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
}
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n",
- currentcmd->cmdid);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n",
+ currentcmd->cmdid);
break;
}
@@ -2058,7 +2058,7 @@ static void _rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw,
for (i = 0; i < retrycount; i++) {
patha_ok = _rtl8723be_phy_path_a_iqk(hw);
if (patha_ok == 0x01) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
"Path A Tx IQK Success!!\n");
result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
0x3FF0000) >> 16;
@@ -2066,36 +2066,36 @@ static void _rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw,
0x3FF0000) >> 16;
break;
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Path A Tx IQK Fail!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Path A Tx IQK Fail!!\n");
}
}
/* path A RX IQK */
for (i = 0; i < retrycount; i++) {
patha_ok = _rtl8723be_phy_path_a_rx_iqk(hw);
if (patha_ok == 0x03) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Path A Rx IQK Success!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Path A Rx IQK Success!!\n");
result[t][2] = (rtl_get_bbreg(hw, 0xea4, MASKDWORD) &
0x3FF0000) >> 16;
result[t][3] = (rtl_get_bbreg(hw, 0xeac, MASKDWORD) &
0x3FF0000) >> 16;
break;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Path A Rx IQK Fail!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Path A Rx IQK Fail!!\n");
}
if (0x00 == patha_ok)
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Path A IQK Fail!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Path A IQK Fail!!\n");
if (is2t) {
/* path B TX IQK */
for (i = 0; i < retrycount; i++) {
pathb_ok = _rtl8723be_phy_path_b_iqk(hw);
if (pathb_ok == 0x01) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Path B Tx IQK Success!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Path B Tx IQK Success!!\n");
result[t][4] = (rtl_get_bbreg(hw, 0xe94,
MASKDWORD) &
0x3FF0000) >> 16;
@@ -2104,15 +2104,15 @@ static void _rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw,
0x3FF0000) >> 16;
break;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Path B Tx IQK Fail!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Path B Tx IQK Fail!!\n");
}
/* path B RX IQK */
for (i = 0; i < retrycount; i++) {
pathb_ok = _rtl8723be_phy_path_b_rx_iqk(hw);
if (pathb_ok == 0x03) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Path B Rx IQK Success!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Path B Rx IQK Success!!\n");
result[t][6] = (rtl_get_bbreg(hw, 0xea4,
MASKDWORD) &
0x3FF0000) >> 16;
@@ -2121,8 +2121,8 @@ static void _rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw,
0x3FF0000) >> 16;
break;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Path B Rx IQK Fail!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Path B Rx IQK Fail!!\n");
}
}
@@ -2150,7 +2150,7 @@ static void _rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw,
rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x01008c00);
rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x01008c00);
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "8723be IQK Finish!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "8723be IQK Finish!!\n");
}
static u8 _get_right_chnl_place_for_iqk(u8 chnl)
@@ -2224,14 +2224,14 @@ static void _rtl8723be_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
} else {
rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
}
static void _rtl8723be_phy_set_rfpath_switch(struct ieee80211_hw *hw,
bool bmain, bool is2t)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
if (bmain) /* left antenna */
rtl_set_bbreg(hw, 0x92C, MASKDWORD, 0x1);
@@ -2418,24 +2418,24 @@ bool rtl8723be_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
struct rtl_phy *rtlphy = &rtlpriv->phy;
bool b_postprocessing = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
- iotype, rtlphy->set_io_inprogress);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
+ iotype, rtlphy->set_io_inprogress);
do {
switch (iotype) {
case IO_CMD_RESUME_DM_BY_SCAN:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "[IO CMD] Resume DM after scan.\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "[IO CMD] Resume DM after scan.\n");
b_postprocessing = true;
break;
case IO_CMD_PAUSE_BAND0_DM_BY_SCAN:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "[IO CMD] Pause DM before scan.\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "[IO CMD] Pause DM before scan.\n");
b_postprocessing = true;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", iotype);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", iotype);
break;
}
} while (false);
@@ -2446,7 +2446,7 @@ bool rtl8723be_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
return false;
}
rtl8723be_phy_set_io(hw);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "IO Type(%#x)\n", iotype);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, "IO Type(%#x)\n", iotype);
return true;
}
@@ -2456,9 +2456,9 @@ static void rtl8723be_phy_set_io(struct ieee80211_hw *hw)
struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
struct rtl_phy *rtlphy = &rtlpriv->phy;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "--->Cmd(%#x), set_io_inprogress(%d)\n",
- rtlphy->current_io_type, rtlphy->set_io_inprogress);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "--->Cmd(%#x), set_io_inprogress(%d)\n",
+ rtlphy->current_io_type, rtlphy->set_io_inprogress);
switch (rtlphy->current_io_type) {
case IO_CMD_RESUME_DM_BY_SCAN:
dm_digtable->cur_igvalue = rtlphy->initgain_backup.xaagccore1;
@@ -2472,14 +2472,14 @@ static void rtl8723be_phy_set_io(struct ieee80211_hw *hw)
rtl_set_bbreg(hw, RCCK0_CCA, 0xff0000, 0x40);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n",
- rtlphy->current_io_type);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n",
+ rtlphy->current_io_type);
break;
}
rtlphy->set_io_inprogress = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "(%#x)\n", rtlphy->current_io_type);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "(%#x)\n", rtlphy->current_io_type);
}
static void rtl8723be_phy_set_rf_on(struct ieee80211_hw *hw)
@@ -2522,16 +2522,16 @@ static bool _rtl8723be_phy_set_rf_power_state(struct ieee80211_hw *hw,
u32 initializecount = 0;
do {
initializecount++;
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic enable\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic enable\n");
rtstatus = rtl_ps_enable_nic(hw);
} while (!rtstatus && (initializecount < 10));
RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
} else {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "Set ERFON sleeped:%d ms\n",
- jiffies_to_msecs(jiffies -
- ppsc->last_sleep_jiffies));
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "Set ERFON slept:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_sleep_jiffies));
ppsc->last_awake_jiffies = jiffies;
rtl8723be_phy_set_rf_on(hw);
}
@@ -2555,27 +2555,27 @@ static bool _rtl8723be_phy_set_rf_power_state(struct ieee80211_hw *hw,
queue_id++;
continue;
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
- (i + 1), queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
+ (i + 1), queue_id,
+ skb_queue_len(&ring->queue));
udelay(10);
i++;
}
if (i >= MAX_DOZE_WAITING_TIMES_9x) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
- MAX_DOZE_WAITING_TIMES_9x,
- queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue));
break;
}
}
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic disable\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic disable\n");
rtl_ps_disable_nic(hw);
RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
} else {
@@ -2599,34 +2599,34 @@ static bool _rtl8723be_phy_set_rf_power_state(struct ieee80211_hw *hw,
queue_id++;
continue;
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
- (i + 1), queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
+ (i + 1), queue_id,
+ skb_queue_len(&ring->queue));
udelay(10);
i++;
}
if (i >= MAX_DOZE_WAITING_TIMES_9x) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
- MAX_DOZE_WAITING_TIMES_9x,
- queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue));
break;
}
}
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "Set ERFSLEEP awaked:%d ms\n",
- jiffies_to_msecs(jiffies -
- ppsc->last_awake_jiffies));
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "Set ERFSLEEP awaked:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_awake_jiffies));
ppsc->last_sleep_jiffies = jiffies;
_rtl8723be_phy_set_rf_sleep(hw);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", rfpwr_state);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", rfpwr_state);
bresult = false;
break;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c
index af72e489e31c..8a856fb42b8d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c
@@ -478,12 +478,12 @@ static bool _rtl8723be_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
}
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio[%d] Fail!!\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Radio[%d] Fail!!\n", rfpath);
return false;
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "\n");
return rtstatus;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
index b8081e196cdf..559ab78687c3 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c
@@ -340,9 +340,9 @@ bool rtl8723be_rx_query_desc(struct ieee80211_hw *hw,
else
wake_match = 0;
if (wake_match)
- RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD,
- "GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n",
- wake_match);
+ rtl_dbg(rtlpriv, COMP_RXDESC, DBG_LOUD,
+ "GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n",
+ wake_match);
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->band = hw->conf.chandef.chan->band;
@@ -442,10 +442,10 @@ void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw,
memset(skb->data, 0, EM_HDR_LEN);
}
buf_len = skb->len;
- mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "DMA mapping error\n");
+ mapping = dma_map_single(&rtlpci->pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "DMA mapping error\n");
return;
}
clear_pci_tx_desc_content(pdesc, sizeof(struct tx_desc_8723be));
@@ -459,9 +459,9 @@ void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN +
EM_HDR_LEN);
if (ptcb_desc->empkt_num) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "Insert 8 byte.pTcb->EMPktNum:%d\n",
- ptcb_desc->empkt_num);
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "Insert 8 byte.pTcb->EMPktNum:%d\n",
+ ptcb_desc->empkt_num);
_rtl8723be_insert_emcontent(ptcb_desc,
(__le32 *)(skb->data));
}
@@ -551,8 +551,8 @@ void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw,
/* from being overwritten by retried packet rate.*/
if (ieee80211_is_data_qos(fc)) {
if (mac->rdg_en) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "Enable RDG function.\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "Enable RDG function.\n");
set_tx_desc_rdg_enable(pdesc, 1);
set_tx_desc_htc(pdesc, 1);
}
@@ -583,7 +583,7 @@ void rtl8723be_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_bmc(pdesc, 1);
}
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
}
void rtl8723be_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc8,
@@ -595,13 +595,12 @@ void rtl8723be_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc8,
u8 fw_queue = QSLT_BEACON;
__le32 *pdesc = (__le32 *)pdesc8;
- dma_addr_t mapping = pci_map_single(rtlpci->pdev,
- skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
- if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error\n");
+ if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error\n");
return;
}
clear_pci_tx_desc_content(pdesc, TX_DESC_SIZE);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
index 37036e653e56..36c00b89ccae 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/fw_common.c
@@ -41,7 +41,7 @@ void rtl8723_write_fw(struct ieee80211_hw *hw,
u32 page_nums, remain_size;
u32 page, offset;
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size);
rtl_fill_dummy(bufferptr, &size);
@@ -63,7 +63,7 @@ void rtl8723_write_fw(struct ieee80211_hw *hw,
page = page_nums;
rtl_fw_page_write(hw, page, (bufferptr + offset), remain_size);
}
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, "FW write done.\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE, "FW write done.\n");
}
EXPORT_SYMBOL_GPL(rtl8723_write_fw);
@@ -109,8 +109,8 @@ void rtl8723be_firmware_selfreset(struct ieee80211_hw *hw)
u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, (u1b_tmp | BIT(2)));
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- " _8051Reset8723be(): 8051 reset success .\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "_8051Reset8723be(): 8051 reset success .\n");
}
EXPORT_SYMBOL_GPL(rtl8723be_firmware_selfreset);
@@ -143,9 +143,9 @@ int rtl8723_fw_free_to_go(struct ieee80211_hw *hw, bool is_8723be,
do {
value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
if (value32 & WINTINI_RDY) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
- "Polling FW ready success!! REG_MCUFWDL:0x%08x .\n",
- value32);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE,
+ "Polling FW ready success!! REG_MCUFWDL:0x%08x .\n",
+ value32);
err = 0;
goto exit;
}
@@ -188,10 +188,10 @@ int rtl8723_download_fw(struct ieee80211_hw *hw,
else
max_page = 8;
if (rtlpriv->cfg->ops->is_fw_header(pfwheader)) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
- "Firmware Version(%d), Signature(%#x), Size(%d)\n",
- pfwheader->version, pfwheader->signature,
- (int)sizeof(struct rtlwifi_firmware_header));
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD,
+ "Firmware Version(%d), Signature(%#x), Size(%d)\n",
+ pfwheader->version, pfwheader->signature,
+ (int)sizeof(struct rtlwifi_firmware_header));
pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c
index debecc623a01..47b6c1aa36b0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723com/phy_common.c
@@ -14,15 +14,15 @@ u32 rtl8723_phy_query_bb_reg(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 returnvalue, originalvalue, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x)\n", regaddr, bitmask);
originalvalue = rtl_read_dword(rtlpriv, regaddr);
bitshift = rtl8723_phy_calculate_bit_shift(bitmask);
returnvalue = (originalvalue & bitmask) >> bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "BBR MASK=0x%x Addr[0x%x]=0x%x\n", bitmask,
- regaddr, originalvalue);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "BBR MASK=0x%x Addr[0x%x]=0x%x\n", bitmask,
+ regaddr, originalvalue);
return returnvalue;
}
EXPORT_SYMBOL_GPL(rtl8723_phy_query_bb_reg);
@@ -33,9 +33,9 @@ void rtl8723_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 originalvalue, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x)\n", regaddr, bitmask,
- data);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n", regaddr, bitmask,
+ data);
if (bitmask != MASKDWORD) {
originalvalue = rtl_read_dword(rtlpriv, regaddr);
@@ -45,21 +45,17 @@ void rtl8723_phy_set_bb_reg(struct ieee80211_hw *hw, u32 regaddr,
rtl_write_dword(rtlpriv, regaddr, data);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x)\n",
- regaddr, bitmask, data);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
}
EXPORT_SYMBOL_GPL(rtl8723_phy_set_bb_reg);
u32 rtl8723_phy_calculate_bit_shift(u32 bitmask)
{
- u32 i;
+ u32 i = ffs(bitmask);
- for (i = 0; i <= 31; i++) {
- if (((bitmask >> i) & 0x1) == 1)
- break;
- }
- return i;
+ return i ? i - 1 : 32;
}
EXPORT_SYMBOL_GPL(rtl8723_phy_calculate_bit_shift);
@@ -105,9 +101,9 @@ u32 rtl8723_phy_rf_serial_read(struct ieee80211_hw *hw,
else
retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
BLSSIREADBACKDATA);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "RFR-%d Addr[0x%x]=0x%x\n",
- rfpath, pphyreg->rf_rb, retvalue);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "RFR-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rf_rb, retvalue);
return retvalue;
}
EXPORT_SYMBOL_GPL(rtl8723_phy_rf_serial_read);
@@ -130,10 +126,10 @@ void rtl8723_phy_rf_serial_write(struct ieee80211_hw *hw,
newoffset = offset;
data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "RFW-%d Addr[0x%x]=0x%x\n",
- rfpath, pphyreg->rf3wire_offset,
- data_and_addr);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "RFW-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rf3wire_offset,
+ data_and_addr);
}
EXPORT_SYMBOL_GPL(rtl8723_phy_rf_serial_write);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
index 97a30ccf0b27..f6bff0ebd6b0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
@@ -437,26 +437,26 @@ static void rtl8821ae_dm_find_minimum_rssi(struct ieee80211_hw *hw)
mac->opmode == NL80211_IFTYPE_ADHOC) {
rtl_dm_dig->min_undec_pwdb_for_dm =
rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "AP Client PWDB = 0x%lx\n",
- rtlpriv->dm.entry_min_undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "AP Client PWDB = 0x%lx\n",
+ rtlpriv->dm.entry_min_undec_sm_pwdb);
} else {
rtl_dm_dig->min_undec_pwdb_for_dm =
rtlpriv->dm.undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "STA Default Port PWDB = 0x%x\n",
- rtl_dm_dig->min_undec_pwdb_for_dm);
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "STA Default Port PWDB = 0x%x\n",
+ rtl_dm_dig->min_undec_pwdb_for_dm);
}
} else {
rtl_dm_dig->min_undec_pwdb_for_dm =
rtlpriv->dm.entry_min_undec_sm_pwdb;
- RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "AP Ext Port or disconnect PWDB = 0x%x\n",
- rtl_dm_dig->min_undec_pwdb_for_dm);
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "AP Ext Port or disconnect PWDB = 0x%x\n",
+ rtl_dm_dig->min_undec_pwdb_for_dm);
}
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "MinUndecoratedPWDBForDM =%d\n",
- rtl_dm_dig->min_undec_pwdb_for_dm);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "MinUndecoratedPWDBForDM =%d\n",
+ rtl_dm_dig->min_undec_pwdb_for_dm);
}
static void rtl8812ae_dm_rssi_dump_to_register(struct ieee80211_hw *hw)
@@ -626,11 +626,11 @@ static void rtl8821ae_dm_dig(struct ieee80211_hw *hw)
u8 dm_dig_max, dm_dig_min, offset;
u8 current_igi = dm_digtable->cur_igvalue;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "\n");
if (mac->act_scanning) {
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "Return: In Scan Progress\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Return: In Scan Progress\n");
return;
}
@@ -666,10 +666,10 @@ static void rtl8821ae_dm_dig(struct ieee80211_hw *hw)
dm_digtable->rx_gain_max =
dm_digtable->rssi_val_min + offset;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "dm_digtable->rssi_val_min=0x%x,dm_digtable->rx_gain_max = 0x%x\n",
- dm_digtable->rssi_val_min,
- dm_digtable->rx_gain_max);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "dm_digtable->rssi_val_min=0x%x,dm_digtable->rx_gain_max = 0x%x\n",
+ dm_digtable->rssi_val_min,
+ dm_digtable->rx_gain_max);
if (rtlpriv->dm.one_entry_only) {
offset = 0;
@@ -682,22 +682,21 @@ static void rtl8821ae_dm_dig(struct ieee80211_hw *hw)
dig_min_0 =
dm_digtable->rssi_val_min - offset;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "bOneEntryOnly=TRUE, dig_min_0=0x%x\n",
- dig_min_0);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "bOneEntryOnly=TRUE, dig_min_0=0x%x\n",
+ dig_min_0);
} else {
dig_min_0 = dm_dig_min;
}
} else {
dm_digtable->rx_gain_max = dm_dig_max;
dig_min_0 = dm_dig_min;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "No Link\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "No Link\n");
}
if (rtlpriv->falsealm_cnt.cnt_all > 10000) {
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "Abnormally false alarm case.\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Abnormally false alarm case.\n");
if (dm_digtable->large_fa_hit != 3)
dm_digtable->large_fa_hit++;
@@ -728,23 +727,23 @@ static void rtl8821ae_dm_dig(struct ieee80211_hw *hw)
dig_min_0;
dm_digtable->rx_gain_min =
dig_min_0;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "Normal Case: At Lower Bound\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Normal Case: At Lower Bound\n");
} else {
dm_digtable->forbidden_igi--;
dm_digtable->rx_gain_min =
(dm_digtable->forbidden_igi + 1);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "Normal Case: Approach Lower Bound\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Normal Case: Approach Lower Bound\n");
}
} else {
dm_digtable->large_fa_hit = 0;
}
}
}
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "pDM_DigTable->LargeFAHit=%d\n",
- dm_digtable->large_fa_hit);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "pDM_DigTable->LargeFAHit=%d\n",
+ dm_digtable->large_fa_hit);
if (rtlpriv->dm.dbginfo.num_qry_beacon_pkt < 10)
dm_digtable->rx_gain_min = dm_dig_min;
@@ -754,15 +753,15 @@ static void rtl8821ae_dm_dig(struct ieee80211_hw *hw)
/*Adjust initial gain by false alarm*/
if (mac->link_state >= MAC80211_LINKED) {
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "DIG AfterLink\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "DIG AfterLink\n");
if (first_connect) {
if (dm_digtable->rssi_val_min <= dig_max_of_min)
current_igi = dm_digtable->rssi_val_min;
else
current_igi = dig_max_of_min;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "First Connect\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "First Connect\n");
} else {
if (rtlpriv->falsealm_cnt.cnt_all > DM_DIG_FA_TH2)
current_igi = current_igi + 4;
@@ -774,17 +773,17 @@ static void rtl8821ae_dm_dig(struct ieee80211_hw *hw)
if ((rtlpriv->dm.dbginfo.num_qry_beacon_pkt < 10) &&
(rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH1)) {
current_igi = dm_digtable->rx_gain_min;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "Beacon is less than 10 and FA is less than 768, IGI GOES TO 0x1E!!!!!!!!!!!!\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Beacon is less than 10 and FA is less than 768, IGI GOES TO 0x1E!!!!!!!!!!!!\n");
}
}
} else {
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "DIG BeforeLink\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "DIG BeforeLink\n");
if (first_disconnect) {
current_igi = dm_digtable->rx_gain_min;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "First DisConnect\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "First DisConnect\n");
} else {
/* 2012.03.30 LukeLee: enable DIG before
* link but with very high thresholds
@@ -799,11 +798,11 @@ static void rtl8821ae_dm_dig(struct ieee80211_hw *hw)
if (current_igi >= 0x3e)
current_igi = 0x3e;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "England DIG\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "England DIG\n");
}
}
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "DIG End Adjust IGI\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "DIG End Adjust IGI\n");
/* Check initial gain by upper/lower bound*/
if (current_igi > dm_digtable->rx_gain_max)
@@ -811,13 +810,13 @@ static void rtl8821ae_dm_dig(struct ieee80211_hw *hw)
if (current_igi < dm_digtable->rx_gain_min)
current_igi = dm_digtable->rx_gain_min;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "rx_gain_max=0x%x, rx_gain_min=0x%x\n",
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "rx_gain_max=0x%x, rx_gain_min=0x%x\n",
dm_digtable->rx_gain_max, dm_digtable->rx_gain_min);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "TotalFA=%d\n", rtlpriv->falsealm_cnt.cnt_all);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "CurIGValue=0x%x\n", current_igi);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "TotalFA=%d\n", rtlpriv->falsealm_cnt.cnt_all);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "CurIGValue=0x%x\n", current_igi);
rtl8821ae_dm_write_dig(hw, current_igi);
dm_digtable->media_connect_0 =
@@ -880,12 +879,12 @@ static void rtl8821ae_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
rtl_set_bbreg(hw, ODM_REG_CCK_FA_RST_11AC, BIT(15), 0);
rtl_set_bbreg(hw, ODM_REG_CCK_FA_RST_11AC, BIT(15), 1);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "Cnt_Cck_fail=%d\n",
- falsealm_cnt->cnt_cck_fail);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "cnt_ofdm_fail=%d\n",
- falsealm_cnt->cnt_ofdm_fail);
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "Total False Alarm=%d\n",
- falsealm_cnt->cnt_all);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "Cnt_Cck_fail=%d\n",
+ falsealm_cnt->cnt_cck_fail);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "cnt_ofdm_fail=%d\n",
+ falsealm_cnt->cnt_ofdm_fail);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "Total False Alarm=%d\n",
+ falsealm_cnt->cnt_all);
}
static void rtl8812ae_dm_check_txpower_tracking_thermalmeter(
@@ -896,13 +895,13 @@ static void rtl8812ae_dm_check_txpower_tracking_thermalmeter(
if (!rtlpriv->dm.tm_trigger) {
rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER_88E,
BIT(17) | BIT(16), 0x03);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Trigger 8812 Thermal Meter!!\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Trigger 8812 Thermal Meter!!\n");
rtlpriv->dm.tm_trigger = 1;
return;
}
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Schedule TxPowerTracking direct call!!\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Schedule TxPowerTracking direct call!!\n");
rtl8812ae_dm_txpower_tracking_callback_thermalmeter(hw);
}
@@ -981,8 +980,8 @@ void rtl8821ae_dm_update_init_rate(struct ieee80211_hw *hw, u8 rate)
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
u8 p = 0;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Get C2H Command! Rate=0x%x\n", rate);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Get C2H Command! Rate=0x%x\n", rate);
rtldm->tx_rate = rate;
@@ -1145,9 +1144,9 @@ u8 rtl8821ae_hw_rate_to_mrate(struct ieee80211_hw *hw, u8 rate)
ret_rate = MGN_VHT2SS_MCS9;
break;
default:
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "HwRateToMRate8812(): Non supported Rate [%x]!!!\n",
- rate);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "HwRateToMRate8812(): Non supported Rate [%x]!!!\n",
+ rate);
break;
}
return ret_rate;
@@ -1187,8 +1186,8 @@ void rtl8812ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
tx_rate =
rtl8821ae_hw_rate_to_mrate(hw, rtldm->tx_rate);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "===>rtl8812ae_dm_txpwr_track_set_pwr\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "===>%s\n", __func__);
/*20130429 Mimic Modify High Rate BBSwing Limit.*/
if (tx_rate != 0xFF) {
/*CCK*/
@@ -1259,13 +1258,13 @@ void rtl8812ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
else
pwr_tracking_limit = 24;
}
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "TxRate=0x%x, PwrTrackingLimit=%d\n",
- tx_rate, pwr_tracking_limit);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "TxRate=0x%x, PwrTrackingLimit=%d\n",
+ tx_rate, pwr_tracking_limit);
if (method == BBSWING) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "===>rtl8812ae_dm_txpwr_track_set_pwr\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "===>%s\n", __func__);
if (rf_path == RF90_PATH_A) {
u32 tmp;
@@ -1276,10 +1275,10 @@ void rtl8812ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
pwr_tracking_limit :
rtldm->ofdm_index[RF90_PATH_A];
tmp = final_swing_idx[RF90_PATH_A];
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "pDM_Odm->RFCalibrateInfo.OFDM_index[ODM_RF_PATH_A]=%d,pDM_Odm->RealBbSwingIdx[ODM_RF_PATH_A]=%d\n",
- rtldm->ofdm_index[RF90_PATH_A],
- final_swing_idx[RF90_PATH_A]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "pDM_Odm->RFCalibrateInfo.OFDM_index[ODM_RF_PATH_A]=%d,pDM_Odm->RealBbSwingIdx[ODM_RF_PATH_A]=%d\n",
+ rtldm->ofdm_index[RF90_PATH_A],
+ final_swing_idx[RF90_PATH_A]);
rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000,
txscaling_tbl[tmp]);
@@ -1292,20 +1291,20 @@ void rtl8812ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
pwr_tracking_limit :
rtldm->ofdm_index[RF90_PATH_B];
tmp = final_swing_idx[RF90_PATH_B];
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "pDM_Odm->RFCalibrateInfo.OFDM_index[ODM_RF_PATH_B]=%d, pDM_Odm->RealBbSwingIdx[ODM_RF_PATH_B]=%d\n",
- rtldm->ofdm_index[RF90_PATH_B],
- final_swing_idx[RF90_PATH_B]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "pDM_Odm->RFCalibrateInfo.OFDM_index[ODM_RF_PATH_B]=%d, pDM_Odm->RealBbSwingIdx[ODM_RF_PATH_B]=%d\n",
+ rtldm->ofdm_index[RF90_PATH_B],
+ final_swing_idx[RF90_PATH_B]);
rtl_set_bbreg(hw, RB_TXSCALE, 0xFFE00000,
txscaling_tbl[tmp]);
}
} else if (method == MIX_MODE) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "pDM_Odm->DefaultOfdmIndex=%d, pDM_Odm->Absolute_OFDMSwingIdx[RFPath]=%d, RF_Path = %d\n",
- rtldm->default_ofdm_index,
- rtldm->absolute_ofdm_swing_idx[rf_path],
- rf_path);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "pDM_Odm->DefaultOfdmIndex=%d, pDM_Odm->Absolute_OFDMSwingIdx[RFPath]=%d, RF_Path = %d\n",
+ rtldm->default_ofdm_index,
+ rtldm->absolute_ofdm_swing_idx[rf_path],
+ rf_path);
final_ofdm_swing_index = rtldm->default_ofdm_index +
rtldm->absolute_ofdm_swing_idx[rf_path];
@@ -1333,10 +1332,10 @@ void rtl8812ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
rtlphy->current_channel,
RF90_PATH_A);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "******Path_A Over BBSwing Limit ,PwrTrackingLimit = %d ,Remnant TxAGC Value = %d\n",
- pwr_tracking_limit,
- rtldm->remnant_ofdm_swing_idx[rf_path]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "******Path_A Over BBSwing Limit ,PwrTrackingLimit = %d ,Remnant TxAGC Value = %d\n",
+ pwr_tracking_limit,
+ rtldm->remnant_ofdm_swing_idx[rf_path]);
} else if (final_ofdm_swing_index < 0) {
rtldm->remnant_cck_idx = final_ofdm_swing_index;
/* CCK Follow the same compensate value as Path A*/
@@ -1352,15 +1351,15 @@ void rtl8812ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
rtl8821ae_phy_set_txpower_level_by_path(hw,
rtlphy->current_channel, RF90_PATH_A);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "******Path_A Lower then BBSwing lower bound 0 , Remnant TxAGC Value = %d\n",
- rtldm->remnant_ofdm_swing_idx[rf_path]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "******Path_A Lower then BBSwing lower bound 0 , Remnant TxAGC Value = %d\n",
+ rtldm->remnant_ofdm_swing_idx[rf_path]);
} else {
rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000,
txscaling_tbl[(u8)final_ofdm_swing_index]);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "******Path_A Compensate with BBSwing, Final_OFDM_Swing_Index = %d\n",
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "******Path_A Compensate with BBSwing, Final_OFDM_Swing_Index = %d\n",
final_ofdm_swing_index);
/*If TxAGC has changed, reset TxAGC again*/
if (rtldm->modify_txagc_flag_path_a) {
@@ -1372,9 +1371,9 @@ void rtl8812ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
rtlphy->current_channel, RF90_PATH_A);
rtldm->modify_txagc_flag_path_a = false;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
- DBG_LOUD,
- "******Path_A pDM_Odm->Modify_TxAGC_Flag = FALSE\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING,
+ DBG_LOUD,
+ "******Path_A pDM_Odm->Modify_TxAGC_Flag = FALSE\n");
}
}
}
@@ -1395,9 +1394,9 @@ void rtl8812ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
rtl8821ae_phy_set_txpower_level_by_path(hw,
rtlphy->current_channel, RF90_PATH_B);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "******Path_B Over BBSwing Limit , PwrTrackingLimit = %d , Remnant TxAGC Value = %d\n",
- pwr_tracking_limit,
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "******Path_B Over BBSwing Limit , PwrTrackingLimit = %d , Remnant TxAGC Value = %d\n",
+ pwr_tracking_limit,
rtldm->remnant_ofdm_swing_idx[rf_path]);
} else if (final_ofdm_swing_index < 0) {
rtldm->remnant_ofdm_swing_idx[rf_path] =
@@ -1412,15 +1411,15 @@ void rtl8812ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
rtl8821ae_phy_set_txpower_level_by_path(hw,
rtlphy->current_channel, RF90_PATH_B);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "******Path_B Lower then BBSwing lower bound 0 , Remnant TxAGC Value = %d\n",
- rtldm->remnant_ofdm_swing_idx[rf_path]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "******Path_B Lower then BBSwing lower bound 0 , Remnant TxAGC Value = %d\n",
+ rtldm->remnant_ofdm_swing_idx[rf_path]);
} else {
rtl_set_bbreg(hw, RB_TXSCALE, 0xFFE00000,
txscaling_tbl[(u8)final_ofdm_swing_index]);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "******Path_B Compensate with BBSwing ,Final_OFDM_Swing_Index = %d\n",
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "******Path_B Compensate with BBSwing ,Final_OFDM_Swing_Index = %d\n",
final_ofdm_swing_index);
/*If TxAGC has changed, reset TxAGC again*/
if (rtldm->modify_txagc_flag_path_b) {
@@ -1433,8 +1432,8 @@ void rtl8812ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
rtldm->modify_txagc_flag_path_b =
false;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "******Path_B pDM_Odm->Modify_TxAGC_Flag = FALSE\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "******Path_B pDM_Odm->Modify_TxAGC_Flag = FALSE\n");
}
}
}
@@ -1474,18 +1473,18 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
rtldm->txpower_trackinginit = true;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "pDM_Odm->BbSwingIdxCckBase: %d, pDM_Odm->BbSwingIdxOfdmBase[A]:%d, pDM_Odm->DefaultOfdmIndex: %d\n",
- rtldm->swing_idx_cck_base,
- rtldm->swing_idx_ofdm_base[RF90_PATH_A],
- rtldm->default_ofdm_index);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "pDM_Odm->BbSwingIdxCckBase: %d, pDM_Odm->BbSwingIdxOfdmBase[A]:%d, pDM_Odm->DefaultOfdmIndex: %d\n",
+ rtldm->swing_idx_cck_base,
+ rtldm->swing_idx_ofdm_base[RF90_PATH_A],
+ rtldm->default_ofdm_index);
thermal_value = (u8)rtl_get_rfreg(hw, RF90_PATH_A,
/*0x42: RF Reg[15:10] 88E*/
RF_T_METER_8812A, 0xfc00);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Thermal Meter = 0x%X, EFUSE Thermal Base = 0x%X\n",
- thermal_value, rtlefuse->eeprom_thermalmeter);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Thermal Meter = 0x%X, EFUSE Thermal Base = 0x%X\n",
+ thermal_value, rtlefuse->eeprom_thermalmeter);
if (!rtldm->txpower_track_control ||
rtlefuse->eeprom_thermalmeter == 0 ||
rtlefuse->eeprom_thermalmeter == 0xFF)
@@ -1494,8 +1493,8 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
/* 3. Initialize ThermalValues of RFCalibrateInfo*/
if (rtlhal->reloadtxpowerindex)
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "reload ofdm index for band switch\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "reload ofdm index for band switch\n");
/*4. Calculate average thermal meter*/
rtldm->thermalvalue_avg[rtldm->thermalvalue_avg_index] = thermal_value;
@@ -1514,9 +1513,9 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
if (thermal_value_avg_count) {
thermal_value = (u8)(thermal_value_avg /
thermal_value_avg_count);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "AVG Thermal Meter = 0x%X, EFUSE Thermal Base = 0x%X\n",
- thermal_value, rtlefuse->eeprom_thermalmeter);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "AVG Thermal Meter = 0x%X, EFUSE Thermal Base = 0x%X\n",
+ thermal_value, rtlefuse->eeprom_thermalmeter);
}
/*5. Calculate delta, delta_LCK, delta_IQK.
@@ -1533,17 +1532,17 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
(thermal_value - rtldm->thermalvalue_iqk) :
(rtldm->thermalvalue_iqk - thermal_value);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "(delta, delta_LCK, delta_IQK) = (%d, %d, %d)\n",
- delta, delta_lck, delta_iqk);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "(delta, delta_LCK, delta_IQK) = (%d, %d, %d)\n",
+ delta, delta_lck, delta_iqk);
/* 6. If necessary, do LCK.
* Delta temperature is equal to or larger than 20 centigrade.
*/
if (delta_lck >= IQK_THRESHOLD) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "delta_LCK(%d) >= Threshold_IQK(%d)\n",
- delta_lck, IQK_THRESHOLD);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "delta_LCK(%d) >= Threshold_IQK(%d)\n",
+ delta_lck, IQK_THRESHOLD);
rtldm->thermalvalue_lck = thermal_value;
rtl8821ae_phy_lc_calibrate(hw);
}
@@ -1564,9 +1563,9 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
/*7.1 The Final Power Index = BaseIndex + PowerIndexOffset*/
if (thermal_value > rtlefuse->eeprom_thermalmeter) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "delta_swing_table_idx_tup_a[%d] = %d\n",
- delta, delta_swing_table_idx_tup_a[delta]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "delta_swing_table_idx_tup_a[%d] = %d\n",
+ delta, delta_swing_table_idx_tup_a[delta]);
rtldm->delta_power_index_last[RF90_PATH_A] =
rtldm->delta_power_index[RF90_PATH_A];
rtldm->delta_power_index[RF90_PATH_A] =
@@ -1576,13 +1575,13 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
delta_swing_table_idx_tup_a[delta];
/*Record delta swing for mix mode power tracking*/
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "******Temp is higher and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n",
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "******Temp is higher and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n",
rtldm->absolute_ofdm_swing_idx[RF90_PATH_A]);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "delta_swing_table_idx_tup_b[%d] = %d\n",
- delta, delta_swing_table_idx_tup_b[delta]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "delta_swing_table_idx_tup_b[%d] = %d\n",
+ delta, delta_swing_table_idx_tup_b[delta]);
rtldm->delta_power_index_last[RF90_PATH_B] =
rtldm->delta_power_index[RF90_PATH_B];
rtldm->delta_power_index[RF90_PATH_B] =
@@ -1592,13 +1591,13 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
delta_swing_table_idx_tup_b[delta];
/*Record delta swing for mix mode power tracking*/
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "******Temp is higher and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_B] = %d\n",
- rtldm->absolute_ofdm_swing_idx[RF90_PATH_B]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "******Temp is higher and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_B] = %d\n",
+ rtldm->absolute_ofdm_swing_idx[RF90_PATH_B]);
} else {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "delta_swing_table_idx_tdown_a[%d] = %d\n",
- delta, delta_swing_table_idx_tdown_a[delta]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "delta_swing_table_idx_tdown_a[%d] = %d\n",
+ delta, delta_swing_table_idx_tdown_a[delta]);
rtldm->delta_power_index_last[RF90_PATH_A] =
rtldm->delta_power_index[RF90_PATH_A];
@@ -1608,13 +1607,13 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
rtldm->absolute_ofdm_swing_idx[RF90_PATH_A] =
-1 * delta_swing_table_idx_tdown_a[delta];
/* Record delta swing for mix mode power tracking*/
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "******Temp is lower and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n",
- rtldm->absolute_ofdm_swing_idx[RF90_PATH_A]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "******Temp is lower and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n",
+ rtldm->absolute_ofdm_swing_idx[RF90_PATH_A]);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "deltaSwingTableIdx_TDOWN_B[%d] = %d\n",
- delta, delta_swing_table_idx_tdown_b[delta]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "deltaSwingTableIdx_TDOWN_B[%d] = %d\n",
+ delta, delta_swing_table_idx_tdown_b[delta]);
rtldm->delta_power_index_last[RF90_PATH_B] =
rtldm->delta_power_index[RF90_PATH_B];
@@ -1625,15 +1624,15 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
-1 * delta_swing_table_idx_tdown_b[delta];
/*Record delta swing for mix mode power tracking*/
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "******Temp is lower and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_B] = %d\n",
- rtldm->absolute_ofdm_swing_idx[RF90_PATH_B]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "******Temp is lower and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_B] = %d\n",
+ rtldm->absolute_ofdm_swing_idx[RF90_PATH_B]);
}
for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "============================= [Path-%c]Calculating PowerIndexOffset =============================\n",
- (p == RF90_PATH_A ? 'A' : 'B'));
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "============================= [Path-%c]Calculating PowerIndexOffset =============================\n",
+ (p == RF90_PATH_A ? 'A' : 'B'));
if (rtldm->delta_power_index[p] ==
rtldm->delta_power_index_last[p])
@@ -1647,12 +1646,12 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
/* Power Index Diff between 2
* times Power Tracking
*/
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "[Path-%c] PowerIndexOffset(%d) =DeltaPowerIndex(%d) -DeltaPowerIndexLast(%d)\n",
- (p == RF90_PATH_A ? 'A' : 'B'),
- rtldm->power_index_offset[p],
- rtldm->delta_power_index[p] ,
- rtldm->delta_power_index_last[p]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "[Path-%c] PowerIndexOffset(%d) =DeltaPowerIndex(%d) -DeltaPowerIndexLast(%d)\n",
+ (p == RF90_PATH_A ? 'A' : 'B'),
+ rtldm->power_index_offset[p],
+ rtldm->delta_power_index[p],
+ rtldm->delta_power_index_last[p]);
rtldm->ofdm_index[p] =
rtldm->swing_idx_ofdm_base[p] +
@@ -1666,17 +1665,17 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
/****Print BB Swing Base and Index Offset */
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "The 'CCK' final index(%d) = BaseIndex(%d) + PowerIndexOffset(%d)\n",
- rtldm->swing_idx_cck,
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "The 'CCK' final index(%d) = BaseIndex(%d) + PowerIndexOffset(%d)\n",
+ rtldm->swing_idx_cck,
rtldm->swing_idx_cck_base,
rtldm->power_index_offset[p]);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "The 'OFDM' final index(%d) = BaseIndex[%c](%d) + PowerIndexOffset(%d)\n",
- rtldm->swing_idx_ofdm[p],
- (p == RF90_PATH_A ? 'A' : 'B'),
- rtldm->swing_idx_ofdm_base[p],
- rtldm->power_index_offset[p]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "The 'OFDM' final index(%d) = BaseIndex[%c](%d) + PowerIndexOffset(%d)\n",
+ rtldm->swing_idx_ofdm[p],
+ (p == RF90_PATH_A ? 'A' : 'B'),
+ rtldm->swing_idx_ofdm_base[p],
+ rtldm->power_index_offset[p]);
/*7.1 Handle boundary conditions of index.*/
@@ -1685,32 +1684,32 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
else if (rtldm->ofdm_index[p] < ofdm_min_index)
rtldm->ofdm_index[p] = ofdm_min_index;
}
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "\n\n====================================================================================\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "\n\n====================================================================================\n");
if (rtldm->cck_index > TXSCALE_TABLE_SIZE - 1)
rtldm->cck_index = TXSCALE_TABLE_SIZE - 1;
else if (rtldm->cck_index < 0)
rtldm->cck_index = 0;
} else {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "The thermal meter is unchanged or TxPowerTracking OFF(%d): ThermalValue: %d , pDM_Odm->RFCalibrateInfo.ThermalValue: %d\n",
- rtldm->txpower_track_control,
- thermal_value,
- rtldm->thermalvalue);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "The thermal meter is unchanged or TxPowerTracking OFF(%d): ThermalValue: %d , pDM_Odm->RFCalibrateInfo.ThermalValue: %d\n",
+ rtldm->txpower_track_control,
+ thermal_value,
+ rtldm->thermalvalue);
for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++)
rtldm->power_index_offset[p] = 0;
}
/*Print Swing base & current*/
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "TxPowerTracking: [CCK] Swing Current Index: %d,Swing Base Index: %d\n",
- rtldm->cck_index, rtldm->swing_idx_cck_base);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "TxPowerTracking: [CCK] Swing Current Index: %d,Swing Base Index: %d\n",
+ rtldm->cck_index, rtldm->swing_idx_cck_base);
for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "TxPowerTracking: [OFDM] Swing Current Index: %d,Swing Base Index[%c]: %d\n",
- rtldm->ofdm_index[p],
- (p == RF90_PATH_A ? 'A' : 'B'),
- rtldm->swing_idx_ofdm_base[p]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "TxPowerTracking: [OFDM] Swing Current Index: %d,Swing Base Index[%c]: %d\n",
+ rtldm->ofdm_index[p],
+ (p == RF90_PATH_A ? 'A' : 'B'),
+ rtldm->swing_idx_ofdm_base[p]);
}
if ((rtldm->power_index_offset[RF90_PATH_A] != 0 ||
@@ -1727,52 +1726,52 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
*tx power in tx agc for 88E.
*/
if (thermal_value > rtldm->thermalvalue) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Temperature Increasing(A): delta_pi: %d , delta_t: %d, Now_t: %d,EFUSE_t: %d, Last_t: %d\n",
- rtldm->power_index_offset[RF90_PATH_A],
- delta, thermal_value,
- rtlefuse->eeprom_thermalmeter,
- rtldm->thermalvalue);
-
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Temperature Increasing(B): delta_pi: %d ,delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
- rtldm->power_index_offset[RF90_PATH_B],
- delta, thermal_value,
- rtlefuse->eeprom_thermalmeter,
- rtldm->thermalvalue);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Temperature Increasing(A): delta_pi: %d , delta_t: %d, Now_t: %d,EFUSE_t: %d, Last_t: %d\n",
+ rtldm->power_index_offset[RF90_PATH_A],
+ delta, thermal_value,
+ rtlefuse->eeprom_thermalmeter,
+ rtldm->thermalvalue);
+
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Temperature Increasing(B): delta_pi: %d ,delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
+ rtldm->power_index_offset[RF90_PATH_B],
+ delta, thermal_value,
+ rtlefuse->eeprom_thermalmeter,
+ rtldm->thermalvalue);
} else if (thermal_value < rtldm->thermalvalue) { /*Low temperature*/
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Temperature Decreasing(A): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
- rtldm->power_index_offset[RF90_PATH_A],
- delta, thermal_value,
- rtlefuse->eeprom_thermalmeter,
- rtldm->thermalvalue);
-
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Temperature Decreasing(B): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
- rtldm->power_index_offset[RF90_PATH_B],
- delta, thermal_value,
- rtlefuse->eeprom_thermalmeter,
- rtldm->thermalvalue);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Temperature Decreasing(A): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
+ rtldm->power_index_offset[RF90_PATH_A],
+ delta, thermal_value,
+ rtlefuse->eeprom_thermalmeter,
+ rtldm->thermalvalue);
+
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Temperature Decreasing(B): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
+ rtldm->power_index_offset[RF90_PATH_B],
+ delta, thermal_value,
+ rtlefuse->eeprom_thermalmeter,
+ rtldm->thermalvalue);
}
if (thermal_value > rtlefuse->eeprom_thermalmeter) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Temperature(%d) higher than PG value(%d)\n",
- thermal_value, rtlefuse->eeprom_thermalmeter);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Temperature(%d) higher than PG value(%d)\n",
+ thermal_value, rtlefuse->eeprom_thermalmeter);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "**********Enter POWER Tracking MIX_MODE**********\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "**********Enter POWER Tracking MIX_MODE**********\n");
for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++)
rtl8812ae_dm_txpwr_track_set_pwr(hw, MIX_MODE,
p, 0);
} else {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Temperature(%d) lower than PG value(%d)\n",
- thermal_value, rtlefuse->eeprom_thermalmeter);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Temperature(%d) lower than PG value(%d)\n",
+ thermal_value, rtlefuse->eeprom_thermalmeter);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "**********Enter POWER Tracking MIX_MODE**********\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "**********Enter POWER Tracking MIX_MODE**********\n");
for (p = RF90_PATH_A; p < MAX_PATH_NUM_8812A; p++)
rtl8812ae_dm_txpwr_track_set_pwr(hw, MIX_MODE,
p, index_for_channel);
@@ -1783,9 +1782,9 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
rtldm->swing_idx_ofdm_base[p] =
rtldm->swing_idx_ofdm[p];
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "pDM_Odm->RFCalibrateInfo.ThermalValue =%d ThermalValue= %d\n",
- rtldm->thermalvalue, thermal_value);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "pDM_Odm->RFCalibrateInfo.ThermalValue =%d ThermalValue= %d\n",
+ rtldm->thermalvalue, thermal_value);
/*Record last Power Tracking Thermal Value*/
rtldm->thermalvalue = thermal_value;
}
@@ -1794,8 +1793,8 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
if (delta_iqk >= IQK_THRESHOLD)
rtl8812ae_do_iqk(hw, delta_iqk, thermal_value, 8);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "<===rtl8812ae_dm_txpower_tracking_callback_thermalmeter\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "<===%s\n", __func__);
}
static void rtl8821ae_get_delta_swing_table(struct ieee80211_hw *hw,
@@ -1865,7 +1864,7 @@ void rtl8821ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
if (rtldm->tx_rate != 0xFF)
tx_rate = rtl8821ae_hw_rate_to_mrate(hw, rtldm->tx_rate);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "===>%s\n", __func__);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "===>%s\n", __func__);
if (tx_rate != 0xFF) { /* Mimic Modify High Rate BBSwing Limit.*/
/*CCK*/
@@ -1908,33 +1907,33 @@ void rtl8821ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
else
pwr_tracking_limit = 24;
}
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "TxRate=0x%x, PwrTrackingLimit=%d\n",
- tx_rate, pwr_tracking_limit);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "TxRate=0x%x, PwrTrackingLimit=%d\n",
+ tx_rate, pwr_tracking_limit);
if (method == BBSWING) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "===>%s\n", __func__);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "===>%s\n", __func__);
if (rf_path == RF90_PATH_A) {
final_swing_idx[RF90_PATH_A] =
(rtldm->ofdm_index[RF90_PATH_A] >
pwr_tracking_limit) ?
pwr_tracking_limit :
rtldm->ofdm_index[RF90_PATH_A];
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "pDM_Odm->RFCalibrateInfo.OFDM_index[ODM_RF_PATH_A]=%d,pDM_Odm->RealBbSwingIdx[ODM_RF_PATH_A]=%d\n",
- rtldm->ofdm_index[RF90_PATH_A],
- final_swing_idx[RF90_PATH_A]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "pDM_Odm->RFCalibrateInfo.OFDM_index[ODM_RF_PATH_A]=%d,pDM_Odm->RealBbSwingIdx[ODM_RF_PATH_A]=%d\n",
+ rtldm->ofdm_index[RF90_PATH_A],
+ final_swing_idx[RF90_PATH_A]);
rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000,
txscaling_tbl[final_swing_idx[RF90_PATH_A]]);
}
} else if (method == MIX_MODE) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "pDM_Odm->DefaultOfdmIndex=%d,pDM_Odm->Absolute_OFDMSwingIdx[RFPath]=%d, RF_Path = %d\n",
- rtldm->default_ofdm_index,
- rtldm->absolute_ofdm_swing_idx[rf_path],
- rf_path);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "pDM_Odm->DefaultOfdmIndex=%d,pDM_Odm->Absolute_OFDMSwingIdx[RFPath]=%d, RF_Path = %d\n",
+ rtldm->default_ofdm_index,
+ rtldm->absolute_ofdm_swing_idx[rf_path],
+ rf_path);
final_ofdm_swing_index =
rtldm->default_ofdm_index +
@@ -1961,10 +1960,10 @@ void rtl8821ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
rtlphy->current_channel,
RF90_PATH_A);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
" ******Path_A Over BBSwing Limit , PwrTrackingLimit = %d , Remnant TxAGC Value = %d\n",
- pwr_tracking_limit,
- rtldm->remnant_ofdm_swing_idx[rf_path]);
+ pwr_tracking_limit,
+ rtldm->remnant_ofdm_swing_idx[rf_path]);
} else if (final_ofdm_swing_index < 0) {
rtldm->remnant_cck_idx = final_ofdm_swing_index;
/* CCK Follow the same compensate value as Path A*/
@@ -1980,16 +1979,16 @@ void rtl8821ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
rtl8821ae_phy_set_txpower_level_by_path(hw,
rtlphy->current_channel, RF90_PATH_A);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "******Path_A Lower then BBSwing lower bound 0 , Remnant TxAGC Value = %d\n",
- rtldm->remnant_ofdm_swing_idx[rf_path]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "******Path_A Lower then BBSwing lower bound 0 , Remnant TxAGC Value = %d\n",
+ rtldm->remnant_ofdm_swing_idx[rf_path]);
} else {
rtl_set_bbreg(hw, RA_TXSCALE, 0xFFE00000,
txscaling_tbl[(u8)final_ofdm_swing_index]);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "******Path_A Compensate with BBSwing ,Final_OFDM_Swing_Index = %d\n",
- final_ofdm_swing_index);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "******Path_A Compensate with BBSwing ,Final_OFDM_Swing_Index = %d\n",
+ final_ofdm_swing_index);
/*If TxAGC has changed, reset TxAGC again*/
if (rtldm->modify_txagc_flag_path_a) {
rtldm->remnant_cck_idx = 0;
@@ -2001,9 +2000,9 @@ void rtl8821ae_dm_txpwr_track_set_pwr(struct ieee80211_hw *hw,
rtldm->modify_txagc_flag_path_a = false;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
- DBG_LOUD,
- "******Path_A pDM_Odm->Modify_TxAGC_Flag= FALSE\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING,
+ DBG_LOUD,
+ "******Path_A pDM_Odm->Modify_TxAGC_Flag= FALSE\n");
}
}
}
@@ -2042,12 +2041,12 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
rtldm->txpower_trackinginit = true;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "===>%s,\n pDM_Odm->BbSwingIdxCckBase: %d,pDM_Odm->BbSwingIdxOfdmBase[A]:%d, pDM_Odm->DefaultOfdmIndex: %d\n",
- __func__,
- rtldm->swing_idx_cck_base,
- rtldm->swing_idx_ofdm_base[RF90_PATH_A],
- rtldm->default_ofdm_index);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "===>%s,\n pDM_Odm->BbSwingIdxCckBase: %d,pDM_Odm->BbSwingIdxOfdmBase[A]:%d, pDM_Odm->DefaultOfdmIndex: %d\n",
+ __func__,
+ rtldm->swing_idx_cck_base,
+ rtldm->swing_idx_ofdm_base[RF90_PATH_A],
+ rtldm->default_ofdm_index);
/*0x42: RF Reg[15:10] 88E*/
thermal_value = (u8)rtl_get_rfreg(hw,
RF90_PATH_A, RF_T_METER_8812A, 0xfc00);
@@ -2059,8 +2058,8 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
/* 3. Initialize ThermalValues of RFCalibrateInfo*/
if (rtlhal->reloadtxpowerindex) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "reload ofdm index for band switch\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "reload ofdm index for band switch\n");
}
/*4. Calculate average thermal meter*/
@@ -2080,9 +2079,9 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
if (thermal_value_avg_count) {
thermal_value = (u8)(thermal_value_avg /
thermal_value_avg_count);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "AVG Thermal Meter = 0x%X, EFUSE Thermal Base = 0x%X\n",
- thermal_value, rtlefuse->eeprom_thermalmeter);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "AVG Thermal Meter = 0x%X, EFUSE Thermal Base = 0x%X\n",
+ thermal_value, rtlefuse->eeprom_thermalmeter);
}
/*5. Calculate delta, delta_LCK, delta_IQK.
@@ -2099,16 +2098,16 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
(thermal_value - rtldm->thermalvalue_iqk) :
(rtldm->thermalvalue_iqk - thermal_value);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "(delta, delta_LCK, delta_IQK) = (%d, %d, %d)\n",
- delta, delta_lck, delta_iqk);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "(delta, delta_LCK, delta_IQK) = (%d, %d, %d)\n",
+ delta, delta_lck, delta_iqk);
/* 6. If necessary, do LCK. */
/*Delta temperature is equal to or larger than 20 centigrade.*/
if (delta_lck >= IQK_THRESHOLD) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "delta_LCK(%d) >= Threshold_IQK(%d)\n",
- delta_lck, IQK_THRESHOLD);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "delta_LCK(%d) >= Threshold_IQK(%d)\n",
+ delta_lck, IQK_THRESHOLD);
rtldm->thermalvalue_lck = thermal_value;
rtl8821ae_phy_lc_calibrate(hw);
}
@@ -2129,9 +2128,9 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
/*7.1 The Final Power Index = BaseIndex + PowerIndexOffset*/
if (thermal_value > rtlefuse->eeprom_thermalmeter) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "delta_swing_table_idx_tup_a[%d] = %d\n",
- delta, delta_swing_table_idx_tup_a[delta]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "delta_swing_table_idx_tup_a[%d] = %d\n",
+ delta, delta_swing_table_idx_tup_a[delta]);
rtldm->delta_power_index_last[RF90_PATH_A] =
rtldm->delta_power_index[RF90_PATH_A];
rtldm->delta_power_index[RF90_PATH_A] =
@@ -2141,13 +2140,13 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
delta_swing_table_idx_tup_a[delta];
/*Record delta swing for mix mode power tracking*/
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "******Temp is higher and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n",
- rtldm->absolute_ofdm_swing_idx[RF90_PATH_A]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "******Temp is higher and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n",
+ rtldm->absolute_ofdm_swing_idx[RF90_PATH_A]);
} else {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "delta_swing_table_idx_tdown_a[%d] = %d\n",
- delta, delta_swing_table_idx_tdown_a[delta]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "delta_swing_table_idx_tdown_a[%d] = %d\n",
+ delta, delta_swing_table_idx_tdown_a[delta]);
rtldm->delta_power_index_last[RF90_PATH_A] =
rtldm->delta_power_index[RF90_PATH_A];
@@ -2157,15 +2156,15 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
rtldm->absolute_ofdm_swing_idx[RF90_PATH_A] =
-1 * delta_swing_table_idx_tdown_a[delta];
/* Record delta swing for mix mode power tracking*/
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "******Temp is lower and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n",
- rtldm->absolute_ofdm_swing_idx[RF90_PATH_A]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "******Temp is lower and pDM_Odm->Absolute_OFDMSwingIdx[ODM_RF_PATH_A] = %d\n",
+ rtldm->absolute_ofdm_swing_idx[RF90_PATH_A]);
}
for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "\n\n================================ [Path-%c]Calculating PowerIndexOffset ================================\n",
- (p == RF90_PATH_A ? 'A' : 'B'));
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "\n\n================================ [Path-%c]Calculating PowerIndexOffset ================================\n",
+ (p == RF90_PATH_A ? 'A' : 'B'));
/*If Thermal value changes but lookup table value
* still the same
*/
@@ -2179,9 +2178,9 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
rtldm->delta_power_index_last[p];
/*Power Index Diff between 2 times Power Tracking*/
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "[Path-%c] PowerIndexOffset(%d) = DeltaPowerIndex(%d) - DeltaPowerIndexLast(%d)\n",
- (p == RF90_PATH_A ? 'A' : 'B'),
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "[Path-%c] PowerIndexOffset(%d) = DeltaPowerIndex(%d) - DeltaPowerIndexLast(%d)\n",
+ (p == RF90_PATH_A ? 'A' : 'B'),
rtldm->power_index_offset[p],
rtldm->delta_power_index[p] ,
rtldm->delta_power_index_last[p]);
@@ -2198,17 +2197,17 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
/*********Print BB Swing Base and Index Offset********/
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "The 'CCK' final index(%d) = BaseIndex(%d) + PowerIndexOffset(%d)\n",
- rtldm->swing_idx_cck,
- rtldm->swing_idx_cck_base,
- rtldm->power_index_offset[p]);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "The 'OFDM' final index(%d) = BaseIndex[%c](%d) + PowerIndexOffset(%d)\n",
- rtldm->swing_idx_ofdm[p],
- (p == RF90_PATH_A ? 'A' : 'B'),
- rtldm->swing_idx_ofdm_base[p],
- rtldm->power_index_offset[p]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "The 'CCK' final index(%d) = BaseIndex(%d) + PowerIndexOffset(%d)\n",
+ rtldm->swing_idx_cck,
+ rtldm->swing_idx_cck_base,
+ rtldm->power_index_offset[p]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "The 'OFDM' final index(%d) = BaseIndex[%c](%d) + PowerIndexOffset(%d)\n",
+ rtldm->swing_idx_ofdm[p],
+ (p == RF90_PATH_A ? 'A' : 'B'),
+ rtldm->swing_idx_ofdm_base[p],
+ rtldm->power_index_offset[p]);
/*7.1 Handle boundary conditions of index.*/
@@ -2217,32 +2216,32 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
else if (rtldm->ofdm_index[p] < ofdm_min_index)
rtldm->ofdm_index[p] = ofdm_min_index;
}
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "\n\n========================================================================================================\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "\n\n========================================================================================================\n");
if (rtldm->cck_index > TXSCALE_TABLE_SIZE - 1)
rtldm->cck_index = TXSCALE_TABLE_SIZE - 1;
else if (rtldm->cck_index < 0)
rtldm->cck_index = 0;
} else {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "The thermal meter is unchanged or TxPowerTracking OFF(%d):ThermalValue: %d , pDM_Odm->RFCalibrateInfo.ThermalValue: %d\n",
- rtldm->txpower_track_control,
- thermal_value,
- rtldm->thermalvalue);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "The thermal meter is unchanged or TxPowerTracking OFF(%d):ThermalValue: %d , pDM_Odm->RFCalibrateInfo.ThermalValue: %d\n",
+ rtldm->txpower_track_control,
+ thermal_value,
+ rtldm->thermalvalue);
for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
rtldm->power_index_offset[p] = 0;
}
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "TxPowerTracking: [CCK] Swing Current Index: %d, Swing Base Index: %d\n",
- /*Print Swing base & current*/
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "TxPowerTracking: [CCK] Swing Current Index: %d, Swing Base Index: %d\n",
+ /*Print Swing base & current*/
rtldm->cck_index, rtldm->swing_idx_cck_base);
for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "TxPowerTracking: [OFDM] Swing Current Index: %d, Swing Base Index[%c]: %d\n",
- rtldm->ofdm_index[p],
- (p == RF90_PATH_A ? 'A' : 'B'),
- rtldm->swing_idx_ofdm_base[p]);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "TxPowerTracking: [OFDM] Swing Current Index: %d, Swing Base Index[%c]: %d\n",
+ rtldm->ofdm_index[p],
+ (p == RF90_PATH_A ? 'A' : 'B'),
+ rtldm->swing_idx_ofdm_base[p]);
}
if ((rtldm->power_index_offset[RF90_PATH_A] != 0 ||
@@ -2259,38 +2258,38 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
* set tx power in tx agc for 88E.
*/
if (thermal_value > rtldm->thermalvalue) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Temperature Increasing(A): delta_pi: %d , delta_t: %d,Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
- rtldm->power_index_offset[RF90_PATH_A],
- delta, thermal_value,
- rtlefuse->eeprom_thermalmeter,
- rtldm->thermalvalue);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Temperature Increasing(A): delta_pi: %d , delta_t: %d,Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
+ rtldm->power_index_offset[RF90_PATH_A],
+ delta, thermal_value,
+ rtlefuse->eeprom_thermalmeter,
+ rtldm->thermalvalue);
} else if (thermal_value < rtldm->thermalvalue) { /*Low temperature*/
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Temperature Decreasing(A): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
- rtldm->power_index_offset[RF90_PATH_A],
- delta, thermal_value,
- rtlefuse->eeprom_thermalmeter,
- rtldm->thermalvalue);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Temperature Decreasing(A): delta_pi: %d , delta_t: %d, Now_t: %d, EFUSE_t: %d, Last_t: %d\n",
+ rtldm->power_index_offset[RF90_PATH_A],
+ delta, thermal_value,
+ rtlefuse->eeprom_thermalmeter,
+ rtldm->thermalvalue);
}
if (thermal_value > rtlefuse->eeprom_thermalmeter) {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Temperature(%d) higher than PG value(%d)\n",
- thermal_value, rtlefuse->eeprom_thermalmeter);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Temperature(%d) higher than PG value(%d)\n",
+ thermal_value, rtlefuse->eeprom_thermalmeter);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "****Enter POWER Tracking MIX_MODE****\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "****Enter POWER Tracking MIX_MODE****\n");
for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
rtl8821ae_dm_txpwr_track_set_pwr(hw,
MIX_MODE, p, index_for_channel);
} else {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Temperature(%d) lower than PG value(%d)\n",
- thermal_value, rtlefuse->eeprom_thermalmeter);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Temperature(%d) lower than PG value(%d)\n",
+ thermal_value, rtlefuse->eeprom_thermalmeter);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "*****Enter POWER Tracking MIX_MODE*****\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "*****Enter POWER Tracking MIX_MODE*****\n");
for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
rtl8812ae_dm_txpwr_track_set_pwr(hw,
MIX_MODE, p, index_for_channel);
@@ -2300,9 +2299,9 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
for (p = RF90_PATH_A; p < MAX_PATH_NUM_8821A; p++)
rtldm->swing_idx_ofdm_base[p] = rtldm->swing_idx_ofdm[p];
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
- rtldm->thermalvalue, thermal_value);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "pDM_Odm->RFCalibrateInfo.ThermalValue = %d ThermalValue= %d\n",
+ rtldm->thermalvalue, thermal_value);
/*Record last Power Tracking Thermal Value*/
rtldm->thermalvalue = thermal_value;
}
@@ -2323,7 +2322,7 @@ void rtl8821ae_dm_txpower_tracking_callback_thermalmeter(
}
}
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "<===%s\n", __func__);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "<===%s\n", __func__);
}
void rtl8821ae_dm_check_txpower_tracking_thermalmeter(struct ieee80211_hw *hw)
@@ -2332,13 +2331,13 @@ void rtl8821ae_dm_check_txpower_tracking_thermalmeter(struct ieee80211_hw *hw)
if (!rtlpriv->dm.tm_trigger) {
rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER_88E, BIT(17)|BIT(16),
0x03);
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Trigger 8821ae Thermal Meter!!\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Trigger 8821ae Thermal Meter!!\n");
rtlpriv->dm.tm_trigger = 1;
return;
} else {
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Schedule TxPowerTracking !!\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Schedule TxPowerTracking !!\n");
rtl8821ae_dm_txpower_tracking_callback_thermalmeter(hw);
rtlpriv->dm.tm_trigger = 0;
@@ -2357,14 +2356,14 @@ static void rtl8821ae_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
struct ieee80211_sta *sta = NULL;
if (is_hal_stop(rtlhal)) {
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "driver is going to unload\n");
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "driver is going to unload\n");
return;
}
if (!rtlpriv->dm.useramask) {
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "driver does not control rate adaptive mask\n");
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "driver does not control rate adaptive mask\n");
return;
}
@@ -2392,14 +2391,14 @@ static void rtl8821ae_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
p_ra->ratr_state = DM_RATR_STA_LOW;
if (p_ra->pre_ratr_state != p_ra->ratr_state) {
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "RSSI = %ld\n",
- rtlpriv->dm.undec_sm_pwdb);
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "RSSI_LEVEL = %d\n", p_ra->ratr_state);
- RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
- "PreState = %d, CurState = %d\n",
- p_ra->pre_ratr_state, p_ra->ratr_state);
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "RSSI = %ld\n",
+ rtlpriv->dm.undec_sm_pwdb);
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "RSSI_LEVEL = %d\n", p_ra->ratr_state);
+ rtl_dbg(rtlpriv, COMP_RATE, DBG_LOUD,
+ "PreState = %d, CurState = %d\n",
+ p_ra->pre_ratr_state, p_ra->ratr_state);
rcu_read_lock();
sta = rtl_find_sta(hw, mac->bssid);
@@ -2454,22 +2453,22 @@ static void rtl8821ae_dm_edca_choose_traffic_idx(
if (b_bias_on_rx) {
if (cur_tx_bytes > (cur_rx_bytes*4)) {
*pb_is_cur_rdl_state = false;
- RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD,
- "Uplink Traffic\n");
+ rtl_dbg(rtlpriv, COMP_TURBO, DBG_LOUD,
+ "Uplink Traffic\n");
} else {
*pb_is_cur_rdl_state = true;
- RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD,
- "Balance Traffic\n");
+ rtl_dbg(rtlpriv, COMP_TURBO, DBG_LOUD,
+ "Balance Traffic\n");
}
} else {
if (cur_rx_bytes > (cur_tx_bytes*4)) {
*pb_is_cur_rdl_state = true;
- RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD,
- "Downlink Traffic\n");
+ rtl_dbg(rtlpriv, COMP_TURBO, DBG_LOUD,
+ "Downlink Traffic\n");
} else {
*pb_is_cur_rdl_state = false;
- RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD,
- "Balance Traffic\n");
+ rtl_dbg(rtlpriv, COMP_TURBO, DBG_LOUD,
+ "Balance Traffic\n");
}
}
return;
@@ -2492,11 +2491,11 @@ static void rtl8821ae_dm_check_edca_turbo(struct ieee80211_hw *hw)
bool b_bias_on_rx = false;
bool b_edca_turbo_on = false;
- RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD,
- "rtl8821ae_dm_check_edca_turbo=====>\n");
- RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD,
- "Original BE PARAM: 0x%x\n",
- rtl_read_dword(rtlpriv, DM_REG_EDCA_BE_11N));
+ rtl_dbg(rtlpriv, COMP_TURBO, DBG_LOUD,
+ "%s=====>\n", __func__);
+ rtl_dbg(rtlpriv, COMP_TURBO, DBG_LOUD,
+ "Original BE PARAM: 0x%x\n",
+ rtl_read_dword(rtlpriv, DM_REG_EDCA_BE_11N));
if (rtlpriv->dm.dbginfo.num_non_be_pkt > 0x100)
rtlpriv->dm.is_any_nonbepkts = true;
@@ -2528,20 +2527,20 @@ static void rtl8821ae_dm_check_edca_turbo(struct ieee80211_hw *hw)
}
}
- RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD,
- "bIsAnyNonBEPkts : 0x%x bDisableFrameBursting : 0x%x\n",
- rtlpriv->dm.is_any_nonbepkts,
- rtlpriv->dm.disable_framebursting);
+ rtl_dbg(rtlpriv, COMP_TURBO, DBG_LOUD,
+ "bIsAnyNonBEPkts : 0x%x bDisableFrameBursting : 0x%x\n",
+ rtlpriv->dm.is_any_nonbepkts,
+ rtlpriv->dm.disable_framebursting);
- RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD,
- "bEdcaTurboOn : 0x%x bBiasOnRx : 0x%x\n",
- b_edca_turbo_on, b_bias_on_rx);
+ rtl_dbg(rtlpriv, COMP_TURBO, DBG_LOUD,
+ "bEdcaTurboOn : 0x%x bBiasOnRx : 0x%x\n",
+ b_edca_turbo_on, b_bias_on_rx);
if (b_edca_turbo_on) {
- RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD,
- "curTxOkCnt : 0x%llx\n", cur_tx_ok_cnt);
- RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD,
- "curRxOkCnt : 0x%llx\n", cur_rx_ok_cnt);
+ rtl_dbg(rtlpriv, COMP_TURBO, DBG_LOUD,
+ "curTxOkCnt : 0x%llx\n", cur_tx_ok_cnt);
+ rtl_dbg(rtlpriv, COMP_TURBO, DBG_LOUD,
+ "curRxOkCnt : 0x%llx\n", cur_rx_ok_cnt);
if (b_bias_on_rx)
rtl8821ae_dm_edca_choose_traffic_idx(hw, cur_tx_ok_cnt,
cur_rx_ok_cnt, true, pb_is_cur_rdl_state);
@@ -2553,14 +2552,14 @@ static void rtl8821ae_dm_check_edca_turbo(struct ieee80211_hw *hw)
rtl_write_dword(rtlpriv, DM_REG_EDCA_BE_11N, edca_be);
- RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD,
- "EDCA Turbo on: EDCA_BE:0x%x\n", edca_be);
+ rtl_dbg(rtlpriv, COMP_TURBO, DBG_LOUD,
+ "EDCA Turbo on: EDCA_BE:0x%x\n", edca_be);
rtlpriv->dm.current_turbo_edca = true;
- RT_TRACE(rtlpriv, COMP_TURBO, DBG_LOUD,
- "EDCA_BE_DL : 0x%x EDCA_BE_UL : 0x%x EDCA_BE : 0x%x\n",
- edca_be_dl, edca_be_ul, edca_be);
+ rtl_dbg(rtlpriv, COMP_TURBO, DBG_LOUD,
+ "EDCA_BE_DL : 0x%x EDCA_BE_UL : 0x%x EDCA_BE : 0x%x\n",
+ edca_be_dl, edca_be_ul, edca_be);
} else {
if (rtlpriv->dm.current_turbo_edca) {
u8 tmp = AC0_BE;
@@ -2606,8 +2605,8 @@ static void rtl8821ae_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
dm_digtable->pre_cck_cca_thres = dm_digtable->cur_cck_cca_thres;
dm_digtable->cur_cck_cca_thres = cur_cck_cca_thresh;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
- "CCK cca thresh hold =%x\n", dm_digtable->cur_cck_cca_thres);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_TRACE,
+ "CCK cca thresh hold =%x\n", dm_digtable->cur_cck_cca_thres);
}
static void rtl8821ae_dm_dynamic_atc_switch(struct ieee80211_hw *hw)
@@ -2626,9 +2625,9 @@ static void rtl8821ae_dm_dynamic_atc_switch(struct ieee80211_hw *hw)
rtldm->atc_status = ATC_STATUS_ON;
}
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "No link!!\n");
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "atc_status = %d\n", rtldm->atc_status);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "No link!!\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "atc_status = %d\n", rtldm->atc_status);
if (rtldm->crystal_cap != rtlpriv->efuse.crystalcap) {
rtldm->crystal_cap = rtlpriv->efuse.crystalcap;
@@ -2643,8 +2642,8 @@ static void rtl8821ae_dm_dynamic_atc_switch(struct ieee80211_hw *hw)
0xfff000, (crystal_cap |
(crystal_cap << 6)));
}
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "crystal_cap = 0x%x\n",
- rtldm->crystal_cap);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "crystal_cap = 0x%x\n",
+ rtldm->crystal_cap);
} else{
/*1. Calculate CFO for path-A & path-B*/
cfo_khz_a = (int)(rtldm->cfo_tail[0] * 3125) / 1280;
@@ -2653,15 +2652,15 @@ static void rtl8821ae_dm_dynamic_atc_switch(struct ieee80211_hw *hw)
/*2.No new packet*/
if (packet_count == rtldm->packet_count_pre) {
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "packet counter doesn't change\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "packet counter doesn't change\n");
return;
}
rtldm->packet_count_pre = packet_count;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "packet counter = %d\n",
- rtldm->packet_count);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "packet counter = %d\n",
+ rtldm->packet_count);
/*3.Average CFO*/
if (rtlpriv->phy.rf_type == RF_1T1R)
@@ -2669,22 +2668,22 @@ static void rtl8821ae_dm_dynamic_atc_switch(struct ieee80211_hw *hw)
else
cfo_ave = (cfo_khz_a + cfo_khz_b) >> 1;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "cfo_khz_a = %dkHz, cfo_khz_b = %dkHz, cfo_ave = %dkHz\n",
- cfo_khz_a, cfo_khz_b, cfo_ave);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "cfo_khz_a = %dkHz, cfo_khz_b = %dkHz, cfo_ave = %dkHz\n",
+ cfo_khz_a, cfo_khz_b, cfo_ave);
/*4.Avoid abnormal large CFO*/
cfo_ave_diff = (rtldm->cfo_ave_pre >= cfo_ave) ?
(rtldm->cfo_ave_pre - cfo_ave) :
(cfo_ave - rtldm->cfo_ave_pre);
- if (cfo_ave_diff > 20 && rtldm->large_cfo_hit == 0) {
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "first large CFO hit\n");
- rtldm->large_cfo_hit = 1;
+ if (cfo_ave_diff > 20 && !rtldm->large_cfo_hit) {
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "first large CFO hit\n");
+ rtldm->large_cfo_hit = true;
return;
} else
- rtldm->large_cfo_hit = 0;
+ rtldm->large_cfo_hit = false;
rtldm->cfo_ave_pre = cfo_ave;
@@ -2701,9 +2700,9 @@ static void rtl8821ae_dm_dynamic_atc_switch(struct ieee80211_hw *hw)
rtldm->cfo_threshold = CFO_THRESHOLD_XTAL;
}
}
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "Dynamic threshold = %d\n",
- rtldm->cfo_threshold);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Dynamic threshold = %d\n",
+ rtldm->cfo_threshold);
/* 2.Calculate Xtal offset*/
if (cfo_ave > rtldm->cfo_threshold && rtldm->crystal_cap < 0x3f)
@@ -2711,9 +2710,9 @@ static void rtl8821ae_dm_dynamic_atc_switch(struct ieee80211_hw *hw)
else if ((cfo_ave < -rtlpriv->dm.cfo_threshold) &&
rtlpriv->dm.crystal_cap > 0)
adjust_xtal = ((cfo_ave + CFO_THRESHOLD_XTAL) >> 2) - 1;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "Crystal cap = 0x%x, Crystal cap offset = %d\n",
- rtldm->crystal_cap, adjust_xtal);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Crystal cap = 0x%x, Crystal cap offset = %d\n",
+ rtldm->crystal_cap, adjust_xtal);
/*3.Adjudt Crystal Cap.*/
if (adjust_xtal != 0) {
@@ -2735,9 +2734,9 @@ static void rtl8821ae_dm_dynamic_atc_switch(struct ieee80211_hw *hw)
rtl_set_bbreg(hw, REG_MAC_PHY_CTRL,
0xfff000, (crystal_cap |
(crystal_cap << 6)));
- RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
- "New crystal cap = 0x%x\n",
- rtldm->crystal_cap);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "New crystal cap = 0x%x\n",
+ rtldm->crystal_cap);
}
}
}
@@ -2781,7 +2780,7 @@ void rtl8821ae_dm_watchdog(struct ieee80211_hw *hw)
spin_unlock(&rtlpriv->locks.rf_ps_lock);
rtlpriv->dm.dbginfo.num_qry_beacon_pkt = 0;
- RT_TRACE(rtlpriv, COMP_DIG, DBG_DMESG, "\n");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_DMESG, "\n");
}
void rtl8821ae_dm_set_tx_ant_by_tx_info(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
index fe32d397d287..1ae56e15ca7f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
@@ -39,7 +39,7 @@ static void _rtl8821ae_write_fw(struct ieee80211_hw *hw,
u32 pagenums, remainsize;
u32 page, offset;
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "FW size is %d bytes,\n", size);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "FW size is %d bytes,\n", size);
rtl_fill_dummy(bufferptr, &size);
@@ -75,9 +75,9 @@ static int _rtl8821ae_fw_free_to_go(struct ieee80211_hw *hw)
(!(value32 & FWDL_CHKSUM_RPT)));
if (counter >= FW_8821AE_POLLING_TIMEOUT_COUNT) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "chksum report fail! REG_MCUFWDL:0x%08x .\n",
- value32);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "chksum report fail! REG_MCUFWDL:0x%08x .\n",
+ value32);
goto exit;
}
value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
@@ -154,15 +154,15 @@ int rtl8821ae_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
fwsize = rtlhal->fwsize;
}
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- "%s Firmware SIZE %d\n",
- buse_wake_on_wlan_fw ? "Wowlan" : "Normal", fwsize);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "%s Firmware SIZE %d\n",
+ buse_wake_on_wlan_fw ? "Wowlan" : "Normal", fwsize);
if (IS_FW_HEADER_EXIST_8812(pfwheader) ||
IS_FW_HEADER_EXIST_8821(pfwheader)) {
- RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
- "Firmware Version(%d), Signature(%#x)\n",
- pfwheader->version, pfwheader->signature);
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "Firmware Version(%d), Signature(%#x)\n",
+ pfwheader->version, pfwheader->signature);
pfwdata = pfwdata + sizeof(struct rtlwifi_firmware_header);
fwsize = fwsize - sizeof(struct rtlwifi_firmware_header);
@@ -180,11 +180,11 @@ int rtl8821ae_download_fw(struct ieee80211_hw *hw, bool buse_wake_on_wlan_fw)
err = _rtl8821ae_fw_free_to_go(hw);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
- "Firmware is not ready to run!\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_DMESG,
+ "Firmware is not ready to run!\n");
} else {
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD,
- "Firmware is ready to run!\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD,
+ "Firmware is ready to run!\n");
}
return 0;
@@ -199,13 +199,13 @@ void rtl8821ae_set_fw_related_for_wowlan(struct ieee80211_hw *hw,
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
/* 1. Before WoWLAN or After WOWLAN we need to re-download Fw. */
if (rtl8821ae_download_fw(hw, used_wowlan_fw)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Re-Download Firmware failed!!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Re-Download Firmware failed!!\n");
rtlhal->fw_ready = false;
return;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Re-Download Firmware Success !!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Re-Download Firmware Success !!\n");
rtlhal->fw_ready = true;
/* 2. Re-Init the variables about Fw related setting. */
@@ -249,22 +249,22 @@ static void _rtl8821ae_fill_h2c_command(struct ieee80211_hw *hw,
unsigned long flag = 0;
u8 idx = 0;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
while (true) {
spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
if (rtlhal->h2c_setinprogress) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "H2C set in progress! Wait to set..element_id(%d).\n",
- element_id);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "H2C set in progress! Wait to set..element_id(%d).\n",
+ element_id);
while (rtlhal->h2c_setinprogress) {
spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
flag);
h2c_waitcounter++;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Wait 100 us (%d times)...\n",
- h2c_waitcounter);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Wait 100 us (%d times)...\n",
+ h2c_waitcounter);
udelay(100);
if (h2c_waitcounter > 1000)
@@ -300,8 +300,8 @@ static void _rtl8821ae_fill_h2c_command(struct ieee80211_hw *hw,
box_extreg = REG_HMEBOX_EXT_3;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", boxnum);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", boxnum);
break;
}
@@ -324,9 +324,9 @@ static void _rtl8821ae_fill_h2c_command(struct ieee80211_hw *hw,
/*wait until Fw read*/
wait_h2c_limmit--;
if (wait_h2c_limmit == 0) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Waiting too long for FW read clear HMEBox(%d)!\n",
- boxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Waiting too long for FW read clear HMEBox(%d)!\n",
+ boxnum);
break;
}
@@ -335,25 +335,25 @@ static void _rtl8821ae_fill_h2c_command(struct ieee80211_hw *hw,
isfw_read =
_rtl8821ae_check_fw_read_last_h2c(hw, boxnum);
u1b_tmp = rtl_read_byte(rtlpriv, 0x130);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Waiting for FW read clear HMEBox(%d)!!! 0x130 = %2x\n",
- boxnum, u1b_tmp);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Waiting for FW read clear HMEBox(%d)!!! 0x130 = %2x\n",
+ boxnum, u1b_tmp);
}
}
if (!isfw_read) {
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n",
- boxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n",
+ boxnum);
break;
}
memset(boxcontent, 0, sizeof(boxcontent));
memset(boxextcontent, 0, sizeof(boxextcontent));
boxcontent[0] = element_id;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "Write element_id box_reg(%4x) = %2x\n",
- box_reg, element_id);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Write element_id box_reg(%4x) = %2x\n",
+ box_reg, element_id);
switch (cmd_len) {
case 1:
@@ -389,8 +389,8 @@ static void _rtl8821ae_fill_h2c_command(struct ieee80211_hw *hw,
}
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", cmd_len);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", cmd_len);
break;
}
@@ -400,16 +400,16 @@ static void _rtl8821ae_fill_h2c_command(struct ieee80211_hw *hw,
if (rtlhal->last_hmeboxnum == 4)
rtlhal->last_hmeboxnum = 0;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
- "pHalData->last_hmeboxnum = %d\n",
- rtlhal->last_hmeboxnum);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "pHalData->last_hmeboxnum = %d\n",
+ rtlhal->last_hmeboxnum);
}
spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
rtlhal->h2c_setinprogress = false;
spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
}
void rtl8821ae_fill_h2c_cmd(struct ieee80211_hw *hw,
@@ -458,8 +458,8 @@ void rtl8821ae_firmware_selfreset(struct ieee80211_hw *hw)
u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN+1);
rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN+1, (u1b_tmp | BIT(2)));
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "_8051Reset8812ae(): 8051 reset success .\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "_8051Reset8812ae(): 8051 reset success .\n");
}
void rtl8821ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
@@ -478,8 +478,8 @@ void rtl8821ae_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
if (bt_ctrl_lps)
mode = (bt_lps_on ? FW_PS_MIN_MODE : FW_PS_ACTIVE_MODE);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG, "FW LPS mode = %d (coex:%d)\n",
- mode, bt_ctrl_lps);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG, "FW LPS mode = %d (coex:%d)\n",
+ mode, bt_ctrl_lps);
switch (mode) {
case FW_PS_MIN_MODE:
@@ -590,7 +590,7 @@ void rtl8821ae_set_fw_wowlan_mode(struct ieee80211_hw *hw, bool func_en)
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
u8 fw_wowlan_info[H2C_8821AE_WOWLAN_LENGTH] = {0};
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "enable(%d)\n", func_en);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD, "enable(%d)\n", func_en);
SET_8812_H2CCMD_WOWLAN_FUNC_ENABLE(fw_wowlan_info,
(func_en ? true : false));
@@ -624,9 +624,9 @@ void rtl8821ae_set_fw_remote_wake_ctrl_cmd(struct ieee80211_hw *hw,
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
u8 remote_wake_ctrl_parm[H2C_8821AE_REMOTE_WAKE_CTRL_LEN] = {0};
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "enable=%d, ARP offload=%d, GTK offload=%d\n",
- enable, ppsc->arp_offload_enable, ppsc->gtk_offload_enable);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "enable=%d, ARP offload=%d, GTK offload=%d\n",
+ enable, ppsc->arp_offload_enable, ppsc->gtk_offload_enable);
SET_8812_H2CCMD_REMOTE_WAKECTRL_ENABLE(remote_wake_ctrl_parm, enable);
SET_8812_H2CCMD_REMOTE_WAKE_CTRL_ARP_OFFLOAD_EN(remote_wake_ctrl_parm,
@@ -651,7 +651,7 @@ void rtl8821ae_set_fw_keep_alive_cmd(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 keep_alive_info[H2C_8821AE_KEEP_ALIVE_CTRL_LENGTH] = {0};
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Enable(%d)\n", func_en);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD, "Enable(%d)\n", func_en);
SET_8812_H2CCMD_KEEP_ALIVE_ENABLE(keep_alive_info, func_en);
/* 1: the period is controled by driver, 0: by Fw default */
@@ -690,9 +690,9 @@ void rtl8821ae_set_fw_global_info_cmd(struct ieee80211_hw *hw)
struct rtl_security *sec = &rtlpriv->sec;
u8 remote_wakeup_sec_info[H2C_8821AE_AOAC_GLOBAL_INFO_LEN] = {0};
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "PairwiseEncAlgorithm=%d, GroupEncAlgorithm=%d\n",
- sec->pairwise_enc_algorithm, sec->group_enc_algorithm);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "PairwiseEncAlgorithm=%d, GroupEncAlgorithm=%d\n",
+ sec->pairwise_enc_algorithm, sec->group_enc_algorithm);
SET_8812_H2CCMD_AOAC_GLOBAL_INFO_PAIRWISE_ENC_ALG(
remote_wakeup_sec_info,
@@ -1646,8 +1646,8 @@ out:
}
if (!b_dlok)
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Set RSVD page location to Fw FAIL!!!!!!.\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set RSVD page location to Fw FAIL!!!!!!.\n");
}
void rtl8821ae_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
@@ -1771,8 +1771,8 @@ out:
b_dlok = true;
if (!b_dl_finished && b_dlok) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Set RSVD page location to Fw.\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Set RSVD page location to Fw.\n");
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
"H2C_RSVDPAGE:\n", u1rsvdpageloc, 5);
rtl8821ae_fill_h2c_cmd(hw, H2C_8821AE_RSVDPAGE,
@@ -1788,8 +1788,8 @@ out:
}
if (!b_dlok) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Set RSVD page location to Fw FAIL!!!!!!.\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set RSVD page location to Fw FAIL!!!!!!.\n");
}
}
@@ -1815,11 +1815,11 @@ void rtl8821ae_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
switch (p2p_ps_state) {
case P2P_PS_DISABLE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_DISABLE\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_DISABLE\n");
memset(p2p_ps_offload, 0, sizeof(*p2p_ps_offload));
break;
case P2P_PS_ENABLE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_ENABLE\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_ENABLE\n");
/* update CTWindow value. */
if (p2pinfo->ctwindow > 0) {
p2p_ps_offload->ctwindow_en = 1;
@@ -1873,11 +1873,11 @@ void rtl8821ae_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state)
}
break;
case P2P_PS_SCAN:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN\n");
p2p_ps_offload->discovery = 1;
break;
case P2P_PS_SCAN_DONE:
- RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN_DONE\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_LOUD, "P2P_PS_SCAN_DONE\n");
p2p_ps_offload->discovery = 0;
p2pinfo->p2p_ps_state = P2P_PS_ENABLE;
break;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index 198d419ebb9c..33ffc24d3675 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -33,11 +33,10 @@ static void _rtl8821ae_return_beacon_queue_skb(struct ieee80211_hw *hw)
struct rtl_tx_desc *entry = &ring->desc[ring->idx];
struct sk_buff *skb = __skb_dequeue(&ring->queue);
- pci_unmap_single(rtlpci->pdev,
- rtlpriv->cfg->ops->get_desc(
- hw,
- (u8 *)entry, true, HW_DESC_TXBUFF_ADDR),
- skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&rtlpci->pdev->dev,
+ rtlpriv->cfg->ops->get_desc(hw, (u8 *)entry,
+ true, HW_DESC_TXBUFF_ADDR),
+ skb->len, DMA_TO_DEVICE);
kfree_skb(skb);
ring->idx = (ring->idx + 1) % ring->entries;
}
@@ -143,9 +142,9 @@ change_done:
if (content & IMR_CPWM) {
rtl_write_word(rtlpriv, isr_regaddr, 0x0100);
rtlhal->fw_ps_state = FW_PS_STATE_RF_ON_8821AE;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Receive CPWM INT!!! Set rtlhal->FwPSState = %X\n",
- rtlhal->fw_ps_state);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Receive CPWM INT!!! Set rtlhal->FwPSState = %X\n",
+ rtlhal->fw_ps_state);
}
}
@@ -330,8 +329,8 @@ static void _rtl8821ae_download_rsvd_page(struct ieee80211_hw *hw,
} while (!(bcnvalid_reg & BIT(0)) && dlbcn_count < 5);
if (!(bcnvalid_reg & BIT(0)))
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Download RSVD page failed!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Download RSVD page failed!\n");
if (bcnvalid_reg & BIT(0) && rtlhal->enter_pnp_sleep) {
rtl_write_byte(rtlpriv, REG_TDECTRL + 2, bcnvalid_reg | BIT(0));
_rtl8821ae_return_beacon_queue_skb(hw);
@@ -365,8 +364,8 @@ static void _rtl8821ae_download_rsvd_page(struct ieee80211_hw *hw,
} while (!(bcnvalid_reg & BIT(0)) && dlbcn_count < 5);
if (!(bcnvalid_reg & BIT(0)))
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "2 Download RSVD page failed!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "2 Download RSVD page failed!\n");
}
}
@@ -458,8 +457,8 @@ void rtl8821ae_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
*((bool *)(val)) = false;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", variable);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -511,8 +510,8 @@ void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
case HW_VAR_SLOT_TIME:{
u8 e_aci;
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "HW_VAR_SLOT_TIME %x\n", val[0]);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "HW_VAR_SLOT_TIME %x\n", val[0]);
rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
@@ -558,9 +557,9 @@ void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
*val = min_spacing_to_set;
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
- mac->min_space_cfg);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
mac->min_space_cfg);
@@ -572,9 +571,9 @@ void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
density_to_set = *((u8 *)val);
mac->min_space_cfg |= (density_to_set << 3);
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
- mac->min_space_cfg);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
+ mac->min_space_cfg);
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
mac->min_space_cfg);
@@ -632,9 +631,9 @@ void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
acm_ctrl |= ACMHW_VOQEN;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
- acm);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
+ acm);
break;
}
} else {
@@ -649,16 +648,16 @@ void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
acm_ctrl &= (~ACMHW_VOQEN);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n",
- e_aci);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n",
+ e_aci);
break;
}
}
- RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
- "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
- acm_ctrl);
+ rtl_dbg(rtlpriv, COMP_QOS, DBG_TRACE,
+ "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
+ acm_ctrl);
rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
break; }
case HW_VAR_RCR:
@@ -761,9 +760,9 @@ void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
u32 us_nav_upper = *(u32 *)val;
if (us_nav_upper > HAL_92C_NAV_UPPER_UNIT * 0xFF) {
- RT_TRACE(rtlpriv, COMP_INIT , DBG_WARNING,
- "The setting value (0x%08X us) of NAV_UPPER is larger than (%d * 0xFF)!!!\n",
- us_nav_upper, HAL_92C_NAV_UPPER_UNIT);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_WARNING,
+ "The setting value (0x%08X us) of NAV_UPPER is larger than (%d * 0xFF)!!!\n",
+ us_nav_upper, HAL_92C_NAV_UPPER_UNIT);
break;
}
rtl_write_byte(rtlpriv, REG_NAV_UPPER,
@@ -779,8 +778,8 @@ void rtl8821ae_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
array);
break; }
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", variable);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", variable);
break;
}
}
@@ -910,16 +909,16 @@ static bool _rtl8821ae_init_mac(struct ieee80211_hw *hw)
if (!rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_ALL_MSK,
PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,
RTL8812_NIC_ENABLE_FLOW)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "init 8812 MAC Fail as power on failure\n");
- return false;
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "init 8812 MAC Fail as power on failure\n");
+ return false;
}
} else {
/* HW Power on sequence */
if (!rtl_hal_pwrseqcmdparsing(rtlpriv, PWR_CUT_A_MSK,
PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK,
RTL8821A_NIC_ENABLE_FLOW)){
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
"init 8821 MAC Fail as power on failure\n");
return false;
}
@@ -1161,14 +1160,14 @@ void rtl8821ae_enable_hw_security_config(struct ieee80211_hw *hw)
u8 sec_reg_value;
u8 tmp;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
- rtlpriv->sec.pairwise_enc_algorithm,
- rtlpriv->sec.group_enc_algorithm);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
+ rtlpriv->sec.pairwise_enc_algorithm,
+ rtlpriv->sec.group_enc_algorithm);
if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "not open hw encryption\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "not open hw encryption\n");
return;
}
@@ -1184,8 +1183,8 @@ void rtl8821ae_enable_hw_security_config(struct ieee80211_hw *hw)
tmp = rtl_read_byte(rtlpriv, REG_CR + 1);
rtl_write_byte(rtlpriv, REG_CR + 1, tmp | BIT(1));
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "The SECR-value %x\n", sec_reg_value);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "The SECR-value %x\n", sec_reg_value);
rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
}
@@ -1207,10 +1206,10 @@ static void rtl8821ae_macid_initialize_mediastatus(struct ieee80211_hw *hw)
rtlpriv->cfg->ops->set_hw_reg(hw,
HW_VAR_H2C_FW_MEDIASTATUSRPT, media_rpt);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Initialize MacId media status: from %d to %d\n",
- MAC_ID_STATIC_FOR_BROADCAST_MULTICAST,
- MAC_ID_STATIC_FOR_BT_CLIENT_END);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Initialize MacId media status: from %d to %d\n",
+ MAC_ID_STATIC_FOR_BROADCAST_MULTICAST,
+ MAC_ID_STATIC_FOR_BT_CLIENT_END);
}
static bool _rtl8821ae_check_pcie_dma_hang(struct ieee80211_hw *hw)
@@ -1229,8 +1228,8 @@ static bool _rtl8821ae_check_pcie_dma_hang(struct ieee80211_hw *hw)
/* read reg 0x350 Bit[24] if 1 : TX hang */
tmp = rtl_read_byte(rtlpriv, REG_DBI_CTRL + 3);
if ((tmp & BIT(0)) || (tmp & BIT(1))) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "CheckPcieDMAHang8821AE(): true! Reset PCIE DMA!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "CheckPcieDMAHang8821AE(): true! Reset PCIE DMA!\n");
return true;
} else {
return false;
@@ -1247,7 +1246,7 @@ static bool _rtl8821ae_reset_pcie_interface_dma(struct ieee80211_hw *hw,
bool release_mac_rx_pause;
u8 backup_pcie_dma_pause;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
/* 1. Disable register write lock. 0x1c[1] = 0 */
tmp = rtl_read_byte(rtlpriv, REG_RSV_CTRL);
@@ -1346,8 +1345,8 @@ static void _rtl8821ae_get_wakeup_reason(struct ieee80211_hw *hw)
fw_reason = rtl_read_byte(rtlpriv, REG_MCUTST_WOWLAN);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "WOL Read 0x1c7 = %02X\n",
- fw_reason);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD, "WOL Read 0x1c7 = %02X\n",
+ fw_reason);
ppsc->wakeup_reason = 0;
@@ -1356,63 +1355,63 @@ static void _rtl8821ae_get_wakeup_reason(struct ieee80211_hw *hw)
switch (fw_reason) {
case FW_WOW_V2_PTK_UPDATE_EVENT:
ppsc->wakeup_reason = WOL_REASON_PTK_UPDATE;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "It's a WOL PTK Key update event!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "It's a WOL PTK Key update event!\n");
break;
case FW_WOW_V2_GTK_UPDATE_EVENT:
ppsc->wakeup_reason = WOL_REASON_GTK_UPDATE;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "It's a WOL GTK Key update event!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "It's a WOL GTK Key update event!\n");
break;
case FW_WOW_V2_DISASSOC_EVENT:
ppsc->wakeup_reason = WOL_REASON_DISASSOC;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "It's a disassociation event!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "It's a disassociation event!\n");
break;
case FW_WOW_V2_DEAUTH_EVENT:
ppsc->wakeup_reason = WOL_REASON_DEAUTH;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "It's a deauth event!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "It's a deauth event!\n");
break;
case FW_WOW_V2_FW_DISCONNECT_EVENT:
ppsc->wakeup_reason = WOL_REASON_AP_LOST;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "It's a Fw disconnect decision (AP lost) event!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "It's a Fw disconnect decision (AP lost) event!\n");
break;
case FW_WOW_V2_MAGIC_PKT_EVENT:
ppsc->wakeup_reason = WOL_REASON_MAGIC_PKT;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "It's a magic packet event!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "It's a magic packet event!\n");
break;
case FW_WOW_V2_UNICAST_PKT_EVENT:
ppsc->wakeup_reason = WOL_REASON_UNICAST_PKT;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "It's an unicast packet event!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "It's an unicast packet event!\n");
break;
case FW_WOW_V2_PATTERN_PKT_EVENT:
ppsc->wakeup_reason = WOL_REASON_PATTERN_PKT;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "It's a pattern match event!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "It's a pattern match event!\n");
break;
case FW_WOW_V2_RTD3_SSID_MATCH_EVENT:
ppsc->wakeup_reason = WOL_REASON_RTD3_SSID_MATCH;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "It's an RTD3 Ssid match event!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "It's an RTD3 Ssid match event!\n");
break;
case FW_WOW_V2_REALWOW_V2_WAKEUPPKT:
ppsc->wakeup_reason = WOL_REASON_REALWOW_V2_WAKEUPPKT;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "It's an RealWoW wake packet event!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "It's an RealWoW wake packet event!\n");
break;
case FW_WOW_V2_REALWOW_V2_ACKLOST:
ppsc->wakeup_reason = WOL_REASON_REALWOW_V2_ACKLOST;
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "It's an RealWoW ack lost event!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "It's an RealWoW ack lost event!\n");
break;
default:
- RT_TRACE(rtlpriv, COMP_POWER, DBG_DMESG,
- "WOL Read 0x1c7 = %02X, Unknown reason!\n",
- fw_reason);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "WOL Read 0x1c7 = %02X, Unknown reason!\n",
+ fw_reason);
break;
}
}
@@ -1484,9 +1483,9 @@ static bool _rtl8821ae_dynamic_rqpn(struct ieee80211_hw *hw, u32 boundary,
rtlpriv->cfg->ops->get_hw_reg(hw, HAL_DEF_WOWLAN,
(u8 *)(&support_remote_wakeup));
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "boundary=%#X, NPQ_RQPNValue=%#X, RQPNValue=%#X\n",
- boundary, npq_rqpn_value, rqpn_val);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "boundary=%#X, NPQ_RQPNValue=%#X, RQPNValue=%#X\n",
+ boundary, npq_rqpn_value, rqpn_val);
/* stop PCIe DMA
* 1. 0x301[7:0] = 0xFE */
@@ -1500,12 +1499,12 @@ static bool _rtl8821ae_dynamic_rqpn(struct ieee80211_hw *hw, u32 boundary,
tmp16 = rtl_read_word(rtlpriv, REG_TXPKT_EMPTY);
count++;
if ((count % 200) == 0) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Tx queue is not empty for 20ms!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Tx queue is not empty for 20ms!\n");
}
if (count >= 1000) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Wait for Tx FIFO empty timeout!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Wait for Tx FIFO empty timeout!\n");
break;
}
}
@@ -1521,8 +1520,8 @@ static bool _rtl8821ae_dynamic_rqpn(struct ieee80211_hw *hw, u32 boundary,
udelay(100);
count++;
if (count >= 500) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Wait for TX State Machine ready timeout !!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Wait for TX State Machine ready timeout !!\n");
break;
}
}
@@ -1540,9 +1539,9 @@ static bool _rtl8821ae_dynamic_rqpn(struct ieee80211_hw *hw, u32 boundary,
count++;
} while (!(tmp & BIT(1)) && count < 100);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Wait until Rx DMA Idle. count=%d REG[0x286]=0x%x\n",
- count, tmp);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Wait until Rx DMA Idle. count=%d REG[0x286]=0x%x\n",
+ count, tmp);
/* reset BB
* 7. 0x02 [0] = 0 */
@@ -1599,8 +1598,8 @@ static bool _rtl8821ae_dynamic_rqpn(struct ieee80211_hw *hw, u32 boundary,
/* init LLT
* 17. init LLT */
if (!_rtl8821ae_init_llt_table(hw, boundary)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING,
- "Failed to init LLT table!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_WARNING,
+ "Failed to init LLT table!\n");
return false;
}
@@ -1620,7 +1619,7 @@ static bool _rtl8821ae_dynamic_rqpn(struct ieee80211_hw *hw, u32 boundary,
tmp = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL);
rtl_write_byte(rtlpriv, REG_RXDMA_CONTROL, (tmp&~BIT(2)));
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "End.\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "End.\n");
return ret;
}
@@ -1655,12 +1654,12 @@ static void _rtl8821ae_enable_l1off(struct ieee80211_hw *hw)
u8 tmp = 0;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "--->\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "--->\n");
tmp = _rtl8821ae_dbi_read(rtlpriv, 0x160);
if (!(tmp & (BIT(2) | BIT(3)))) {
- RT_TRACE(rtlpriv, COMP_POWER | COMP_INIT, DBG_LOUD,
- "0x160(%#x)return!!\n", tmp);
+ rtl_dbg(rtlpriv, COMP_POWER | COMP_INIT, DBG_LOUD,
+ "0x160(%#x)return!!\n", tmp);
return;
}
@@ -1670,7 +1669,7 @@ static void _rtl8821ae_enable_l1off(struct ieee80211_hw *hw)
tmp = _rtl8821ae_dbi_read(rtlpriv, 0x718);
_rtl8821ae_dbi_write(rtlpriv, 0x718, tmp | BIT(5));
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "<---\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "<---\n");
}
static void _rtl8821ae_enable_ltr(struct ieee80211_hw *hw)
@@ -1678,13 +1677,13 @@ static void _rtl8821ae_enable_ltr(struct ieee80211_hw *hw)
u8 tmp = 0;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "--->\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "--->\n");
/* Check 0x98[10] */
tmp = _rtl8821ae_dbi_read(rtlpriv, 0x99);
if (!(tmp & BIT(2))) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "<---0x99(%#x) return!!\n", tmp);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "<---0x99(%#x) return!!\n", tmp);
return;
}
@@ -1701,7 +1700,7 @@ static void _rtl8821ae_enable_ltr(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, 0x7a4, (tmp & (~BIT(0))));
rtl_write_byte(rtlpriv, 0x7a4, (tmp | BIT(0)));
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "<---\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "<---\n");
}
static bool _rtl8821ae_wowlan_initialize_adapter(struct ieee80211_hw *hw)
@@ -1724,14 +1723,14 @@ static bool _rtl8821ae_wowlan_initialize_adapter(struct ieee80211_hw *hw)
/* Release Pcie Interface Rx DMA to allow wake packet DMA. */
rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 1, 0xFE);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Enable PCIE Rx DMA.\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD, "Enable PCIE Rx DMA.\n");
/* Check wake up event.
* We should check wake packet bit before disable wowlan by H2C or
* Fw will clear the bit. */
tmp = rtl_read_byte(rtlpriv, REG_FTISR + 3);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Read REG_FTISR 0x13f = %#X\n", tmp);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Read REG_FTISR 0x13f = %#X\n", tmp);
/* Set the WoWLAN related function control disable. */
rtl8821ae_set_fw_wowlan_mode(hw, false);
@@ -1796,7 +1795,7 @@ static void _rtl8821ae_poweroff_adapter(struct ieee80211_hw *hw)
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
/* Combo (PCIe + USB) Card and PCIe-MF Card */
/* 1. Run LPS WL RFOFF flow */
- /* RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ /* rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
"=====>CardDisableRTL8812E,RTL8821A_NIC_LPS_ENTER_FLOW\n");
*/
rtl_hal_pwrseqcmdparsing(rtlpriv,
@@ -1862,8 +1861,8 @@ int rtl8821ae_hw_init(struct ieee80211_hw *hw)
tmp_u1b = rtl_read_byte(rtlpriv, REG_CR);
if (tmp_u1b != 0 && tmp_u1b != 0xEA) {
rtlhal->mac_func_enable = true;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "MAC has already power on.\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "MAC has already power on.\n");
} else {
rtlhal->mac_func_enable = false;
rtlhal->fw_ps_state = FW_PS_STATE_ALL_ON_8821AE;
@@ -1895,7 +1894,7 @@ int rtl8821ae_hw_init(struct ieee80211_hw *hw)
}
rtstatus = _rtl8821ae_init_mac(hw);
- if (rtstatus != true) {
+ if (!rtstatus) {
pr_err("Init MAC failed\n");
err = 1;
return err;
@@ -1907,8 +1906,8 @@ int rtl8821ae_hw_init(struct ieee80211_hw *hw)
err = rtl8821ae_download_fw(hw, false);
if (err) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Failed to download FW. Init HW without FW now\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Failed to download FW. Init HW without FW now\n");
err = 1;
rtlhal->fw_ready = false;
return err;
@@ -1987,7 +1986,7 @@ int rtl8821ae_hw_init(struct ieee80211_hw *hw)
rtl8821ae_dm_init(hw);
rtl8821ae_macid_initialize_mediastatus(hw);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "rtl8821ae_hw_init() <====\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "%s() <====\n", __func__);
return err;
}
@@ -2000,16 +1999,16 @@ static enum version_8821ae _rtl8821ae_read_chip_version(struct ieee80211_hw *hw)
u32 value32;
value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "ReadChipVersion8812A 0xF0 = 0x%x\n", value32);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "ReadChipVersion8812A 0xF0 = 0x%x\n", value32);
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
rtlphy->rf_type = RF_2T2R;
else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE)
rtlphy->rf_type = RF_1T1R;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "RF_Type is %x!!\n", rtlphy->rf_type);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "RF_Type is %x!!\n", rtlphy->rf_type);
if (value32 & TRP_VAUX_EN) {
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
@@ -2049,44 +2048,44 @@ static enum version_8821ae _rtl8821ae_read_chip_version(struct ieee80211_hw *hw)
switch (version) {
case VERSION_TEST_CHIP_1T1R_8812:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Chip Version ID: VERSION_TEST_CHIP_1T1R_8812\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Chip Version ID: VERSION_TEST_CHIP_1T1R_8812\n");
break;
case VERSION_TEST_CHIP_2T2R_8812:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Chip Version ID: VERSION_TEST_CHIP_2T2R_8812\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Chip Version ID: VERSION_TEST_CHIP_2T2R_8812\n");
break;
case VERSION_NORMAL_TSMC_CHIP_1T1R_8812:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Chip Version ID:VERSION_NORMAL_TSMC_CHIP_1T1R_8812\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Chip Version ID:VERSION_NORMAL_TSMC_CHIP_1T1R_8812\n");
break;
case VERSION_NORMAL_TSMC_CHIP_2T2R_8812:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Chip Version ID: VERSION_NORMAL_TSMC_CHIP_2T2R_8812\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Chip Version ID: VERSION_NORMAL_TSMC_CHIP_2T2R_8812\n");
break;
case VERSION_NORMAL_TSMC_CHIP_1T1R_8812_C_CUT:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Chip Version ID: VERSION_NORMAL_TSMC_CHIP_1T1R_8812 C CUT\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Chip Version ID: VERSION_NORMAL_TSMC_CHIP_1T1R_8812 C CUT\n");
break;
case VERSION_NORMAL_TSMC_CHIP_2T2R_8812_C_CUT:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Chip Version ID: VERSION_NORMAL_TSMC_CHIP_2T2R_8812 C CUT\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Chip Version ID: VERSION_NORMAL_TSMC_CHIP_2T2R_8812 C CUT\n");
break;
case VERSION_TEST_CHIP_8821:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Chip Version ID: VERSION_TEST_CHIP_8821\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Chip Version ID: VERSION_TEST_CHIP_8821\n");
break;
case VERSION_NORMAL_TSMC_CHIP_8821:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Chip Version ID: VERSION_NORMAL_TSMC_CHIP_8821 A CUT\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Chip Version ID: VERSION_NORMAL_TSMC_CHIP_8821 A CUT\n");
break;
case VERSION_NORMAL_TSMC_CHIP_8821_B_CUT:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Chip Version ID: VERSION_NORMAL_TSMC_CHIP_8821 B CUT\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Chip Version ID: VERSION_NORMAL_TSMC_CHIP_8821 B CUT\n");
break;
default:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Chip Version ID: Unknown (0x%X)\n", version);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Chip Version ID: Unknown (0x%X)\n", version);
break;
}
@@ -2102,7 +2101,7 @@ static int _rtl8821ae_set_media_status(struct ieee80211_hw *hw,
bt_msr &= 0xfc;
rtl_write_dword(rtlpriv, REG_BCN_CTRL, 0);
- RT_TRACE(rtlpriv, COMP_BEACON, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_BEACON, DBG_LOUD,
"clear 0x550 when set HW_VAR_MEDIA_STATUS\n");
if (type == NL80211_IFTYPE_UNSPECIFIED ||
@@ -2114,33 +2113,33 @@ static int _rtl8821ae_set_media_status(struct ieee80211_hw *hw,
_rtl8821ae_resume_tx_beacon(hw);
_rtl8821ae_disable_bcn_sub_func(hw);
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n",
- type);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set HW_VAR_MEDIA_STATUS: No such media status(%x).\n",
+ type);
}
switch (type) {
case NL80211_IFTYPE_UNSPECIFIED:
bt_msr |= MSR_NOLINK;
ledaction = LED_CTL_LINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to NO LINK!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to NO LINK!\n");
break;
case NL80211_IFTYPE_ADHOC:
bt_msr |= MSR_ADHOC;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to Ad Hoc!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to Ad Hoc!\n");
break;
case NL80211_IFTYPE_STATION:
bt_msr |= MSR_INFRA;
ledaction = LED_CTL_LINK;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to STA!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to STA!\n");
break;
case NL80211_IFTYPE_AP:
bt_msr |= MSR_AP;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Set Network type to AP!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to AP!\n");
break;
default:
pr_err("Network type %d not support!\n", type);
@@ -2183,7 +2182,7 @@ int rtl8821ae_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "rtl8821ae_set_network_type!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "%s!\n", __func__);
if (_rtl8821ae_set_media_status(hw, type))
return -EOPNOTSUPP;
@@ -2283,16 +2282,16 @@ static void _rtl8821ae_clear_pci_pme_status(struct ieee80211_hw *hw)
* offset 0x34 from the Function Header */
pci_read_config_byte(rtlpci->pdev, 0x34, &cap_pointer);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "PCI configuration 0x34 = 0x%2x\n", cap_pointer);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "PCI configuration 0x34 = 0x%2x\n", cap_pointer);
do {
pci_read_config_word(rtlpci->pdev, cap_pointer, &cap_hdr);
cap_id = cap_hdr & 0xFF;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "in pci configuration, cap_pointer%x = %x\n",
- cap_pointer, cap_id);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "in pci configuration, cap_pointer%x = %x\n",
+ cap_pointer, cap_id);
if (cap_id == 0x01) {
break;
@@ -2322,17 +2321,17 @@ static void _rtl8821ae_clear_pci_pme_status(struct ieee80211_hw *hw)
/* Read it back to check */
pci_read_config_byte(rtlpci->pdev, cap_pointer + 5,
&pmcs_reg);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Clear PME status 0x%2x to 0x%2x\n",
- cap_pointer + 5, pmcs_reg);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Clear PME status 0x%2x to 0x%2x\n",
+ cap_pointer + 5, pmcs_reg);
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "PME status(0x%2x) = 0x%2x\n",
- cap_pointer + 5, pmcs_reg);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "PME status(0x%2x) = 0x%2x\n",
+ cap_pointer + 5, pmcs_reg);
}
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING,
- "Cannot find PME Capability\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_WARNING,
+ "Cannot find PME Capability\n");
}
}
@@ -2354,13 +2353,13 @@ void rtl8821ae_card_disable(struct ieee80211_hw *hw)
if (!(support_remote_wakeup && mac->opmode == NL80211_IFTYPE_STATION)
|| !rtlhal->enter_pnp_sleep) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Normal Power off\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Normal Power off\n");
mac->link_state = MAC80211_NOLINK;
opmode = NL80211_IFTYPE_UNSPECIFIED;
_rtl8821ae_set_media_status(hw, opmode);
_rtl8821ae_poweroff_adapter(hw);
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Wowlan Supported.\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Wowlan Supported.\n");
/* 3 <1> Prepare for configuring wowlan related infomations */
/* Clear Fw WoWLAN event. */
rtl_write_byte(rtlpriv, REG_MCUTST_WOWLAN, 0x0);
@@ -2410,9 +2409,9 @@ void rtl8821ae_card_disable(struct ieee80211_hw *hw)
udelay(10);
tmp = rtl_read_byte(rtlpriv, REG_RXDMA_CONTROL);
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Wait Rx DMA Finished before host sleep. count=%d\n",
- count);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Wait Rx DMA Finished before host sleep. count=%d\n",
+ count);
/* reset trx ring */
rtlpriv->intf_ops->reset_trx_ring(hw);
@@ -2438,7 +2437,7 @@ void rtl8821ae_card_disable(struct ieee80211_hw *hw)
/* Stop Pcie Interface Tx DMA. */
rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 1, 0xff);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Stop PCIE Tx DMA.\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD, "Stop PCIE Tx DMA.\n");
/* Wait for TxDMA idle. */
count = 0;
@@ -2447,9 +2446,9 @@ void rtl8821ae_card_disable(struct ieee80211_hw *hw)
udelay(10);
count++;
} while ((tmp != 0) && (count < 100));
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Wait Tx DMA Finished before host sleep. count=%d\n",
- count);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Wait Tx DMA Finished before host sleep. count=%d\n",
+ count);
if (rtlhal->hw_rof_enable) {
printk("hw_rof_enable\n");
@@ -2501,8 +2500,8 @@ void rtl8821ae_set_beacon_interval(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
u16 bcn_interval = mac->beacon_interval;
- RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
- "beacon_interval:%d\n", bcn_interval);
+ rtl_dbg(rtlpriv, COMP_BEACON, DBG_DMESG,
+ "beacon_interval:%d\n", bcn_interval);
rtl8821ae_disable_interrupt(hw);
rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
rtl8821ae_enable_interrupt(hw);
@@ -2514,8 +2513,8 @@ void rtl8821ae_update_interrupt_mask(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
- "add_msr:%x, rm_msr:%x\n", add_msr, rm_msr);
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD,
+ "add_msr:%x, rm_msr:%x\n", add_msr, rm_msr);
if (add_msr)
rtlpci->irq_mask[0] |= add_msr;
@@ -2586,15 +2585,15 @@ static void _rtl8821ae_read_power_value_fromprom(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 rfpath, eeaddr = EEPROM_TX_PWR_INX, group, txcount = 0;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "hal_ReadPowerValueFromPROM8821ae(): hwinfo[0x%x]=0x%x\n",
- (eeaddr + 1), hwinfo[eeaddr + 1]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "hal_ReadPowerValueFromPROM8821ae(): hwinfo[0x%x]=0x%x\n",
+ (eeaddr + 1), hwinfo[eeaddr + 1]);
if (hwinfo[eeaddr + 1] == 0xFF) /*YJ,add,120316*/
autoload_fail = true;
if (autoload_fail) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "auto load fail : Use Default value!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "auto load fail : Use Default value!\n");
for (rfpath = 0 ; rfpath < MAX_RF_PATH ; rfpath++) {
/*2.4G default value*/
for (group = 0 ; group < MAX_CHNL_GROUP_24G; group++) {
@@ -3048,8 +3047,8 @@ static void _rtl8821ae_read_rfe_type(struct ieee80211_hw *hw, u8 *hwinfo,
rtlhal->rfe_type = 0x04;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "RFE Type: 0x%2x\n", rtlhal->rfe_type);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "RFE Type: 0x%2x\n", rtlhal->rfe_type);
}
static void _rtl8812ae_read_bt_coexist_info_from_hwpg(struct ieee80211_hw *hw,
@@ -3153,8 +3152,8 @@ static void _rtl8821ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_
rtlefuse->board_type |= ODM_BOARD_BT;
rtlhal->board_type = rtlefuse->board_type;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "board_type = 0x%x\n", rtlefuse->board_type);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "board_type = 0x%x\n", rtlefuse->board_type);
rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
if (rtlefuse->eeprom_channelplan == 0xff)
@@ -3176,8 +3175,8 @@ static void _rtl8821ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_
}
rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter);
if (!rtlefuse->autoload_failflag) {
rtlefuse->antenna_div_cfg =
@@ -3197,7 +3196,7 @@ static void _rtl8821ae_read_adapter_info(struct ieee80211_hw *hw, bool b_pseudo_
rtlefuse->antenna_div_type = 0;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
"SWAS: bHwAntDiv = %x, TRxAntDivType = %x\n",
rtlefuse->antenna_div_cfg, rtlefuse->antenna_div_type);
@@ -3246,8 +3245,8 @@ exit:
default:
break;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "RT Customized ID: 0x%02X\n", rtlhal->oem_id);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "RT Customized ID: 0x%02X\n", rtlhal->oem_id);
}*/
void rtl8821ae_read_eeprom_info(struct ieee80211_hw *hw)
@@ -3264,20 +3263,20 @@ void rtl8821ae_read_eeprom_info(struct ieee80211_hw *hw)
else
rtlpriv->dm.rfpath_rxenable[0] =
rtlpriv->dm.rfpath_rxenable[1] = true;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
- rtlhal->version);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "VersionID = 0x%4x\n",
+ rtlhal->version);
tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
if (tmp_u1b & BIT(4)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
rtlefuse->epromtype = EEPROM_93C46;
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
}
if (tmp_u1b & BIT(5)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
rtlefuse->autoload_failflag = false;
_rtl8821ae_read_adapter_info(hw, false);
} else {
@@ -3378,8 +3377,8 @@ static void rtl8821ae_update_hal_rate_table(struct ieee80211_hw *hw,
rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "%x\n", rtl_read_dword(rtlpriv, REG_ARFR0));
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "%x\n", rtl_read_dword(rtlpriv, REG_ARFR0));
}
static u32 _rtl8821ae_rate_to_bitmap_2ssvht(__le16 vht_rate)
@@ -3524,8 +3523,8 @@ static void rtl8821ae_update_hal_rate_mask(struct ieee80211_hw *hw,
sta_entry = (struct rtl_sta_info *)sta->drv_priv;
wirelessmode = sta_entry->wireless_mode;
- RT_TRACE(rtlpriv, COMP_RATR, DBG_LOUD,
- "wireless mode = 0x%x\n", wirelessmode);
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_LOUD,
+ "wireless mode = 0x%x\n", wirelessmode);
if (mac->opmode == NL80211_IFTYPE_STATION ||
mac->opmode == NL80211_IFTYPE_MESH_POINT) {
curtxbw_40mhz = mac->bw_40;
@@ -3675,8 +3674,8 @@ static void rtl8821ae_update_hal_rate_mask(struct ieee80211_hw *hw,
ratr_bitmap = _rtl8821ae_set_ra_vht_ratr_bitmap(hw, wirelessmode,
ratr_bitmap);
- RT_TRACE(rtlpriv, COMP_RATR, DBG_LOUD,
- "ratr_bitmap :%x\n", ratr_bitmap);
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_LOUD,
+ "ratr_bitmap :%x\n", ratr_bitmap);
/* *(u32 *)& rate_mask = EF4BYTE((ratr_bitmap & 0x0fffffff) |
(ratr_index << 28)); */
@@ -3692,10 +3691,10 @@ static void rtl8821ae_update_hal_rate_mask(struct ieee80211_hw *hw,
rate_mask[5] = (u8)((ratr_bitmap & 0x00ff0000) >> 16);
rate_mask[6] = (u8)((ratr_bitmap & 0xff000000) >> 24);
- RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
- "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x:%x:%x\n",
- ratr_index, ratr_bitmap,
- rate_mask[0], rate_mask[1],
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "Rate_index:%x, ratr_val:%x, %x:%x:%x:%x:%x:%x:%x\n",
+ ratr_index, ratr_bitmap,
+ rate_mask[0], rate_mask[1],
rate_mask[2], rate_mask[3],
rate_mask[4], rate_mask[5],
rate_mask[6]);
@@ -3710,7 +3709,7 @@ void rtl8821ae_update_hal_rate_tbl(struct ieee80211_hw *hw,
if (rtlpriv->dm.useramask)
rtl8821ae_update_hal_rate_mask(hw, sta, rssi_level, update_bw);
else
- /*RT_TRACE(rtlpriv, COMP_RATR,DBG_LOUD,
+ /*rtl_dbg(rtlpriv, COMP_RATR,DBG_LOUD,
"rtl8821ae_update_hal_rate_tbl() Error! 8821ae FW RA Only\n");*/
rtl8821ae_update_hal_rate_table(hw, sta);
}
@@ -3782,16 +3781,16 @@ bool rtl8821ae_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
e_rfpowerstate_toset = (u1tmp & BIT(1)) ? ERFON : ERFOFF;
if ((ppsc->hwradiooff) && (e_rfpowerstate_toset == ERFON)) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "GPIOChangeRF - HW Radio ON, RF ON\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "GPIOChangeRF - HW Radio ON, RF ON\n");
e_rfpowerstate_toset = ERFON;
ppsc->hwradiooff = false;
b_actuallyset = true;
} else if ((!ppsc->hwradiooff)
&& (e_rfpowerstate_toset == ERFOFF)) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "GPIOChangeRF - HW Radio OFF, RF OFF\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "GPIOChangeRF - HW Radio OFF, RF OFF\n");
e_rfpowerstate_toset = ERFOFF;
ppsc->hwradiooff = true;
@@ -3841,7 +3840,7 @@ void rtl8821ae_set_key(struct ieee80211_hw *hw, u32 key_index,
u8 cam_offset = 0;
u8 clear_number = 5;
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
for (idx = 0; idx < clear_number; idx++) {
rtl_cam_mark_invalid(hw, cam_offset + idx);
@@ -3868,8 +3867,8 @@ void rtl8821ae_set_key(struct ieee80211_hw *hw, u32 key_index,
enc_algo = CAM_AES;
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", enc_algo);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", enc_algo);
enc_algo = CAM_TKIP;
break;
}
@@ -3898,26 +3897,26 @@ void rtl8821ae_set_key(struct ieee80211_hw *hw, u32 key_index,
}
if (rtlpriv->sec.key_len[key_index] == 0) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "delete one entry, entry_id is %d\n",
- entry_id);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "delete one entry, entry_id is %d\n",
+ entry_id);
if (mac->opmode == NL80211_IFTYPE_AP)
rtl_cam_del_entry(hw, p_macaddr);
rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
} else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "add one entry\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "add one entry\n");
if (is_pairwise) {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set Pairwise key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set Pairwise key\n");
rtl_cam_add_one_entry(hw, macaddr, key_index,
entry_id, enc_algo,
CAM_CONFIG_NO_USEDK,
rtlpriv->sec.key_buf[key_index]);
} else {
- RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
- "set group key\n");
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set group key\n");
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
rtl_cam_add_one_entry(hw,
@@ -3982,7 +3981,7 @@ void rtl8821ae_allow_all_destaddr(struct ieee80211_hw *hw,
if (write_into_reg)
rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
- RT_TRACE(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_TURBO | COMP_INIT, DBG_LOUD,
"receive_config=0x%08X, write_into_reg=%d\n",
rtlpci->receive_config, write_into_reg);
}
@@ -4035,9 +4034,9 @@ void rtl8821ae_add_wowlan_pattern(struct ieee80211_hw *hw,
cam |= BIT(26);
rtl_write_dword(rtlpriv, REG_PKTBUF_DBG_DATA_L, cam);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "WRITE entry[%d] 0x%x: %x\n", addr,
- REG_PKTBUF_DBG_DATA_L, cam);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "WRITE entry[%d] 0x%x: %x\n", addr,
+ REG_PKTBUF_DBG_DATA_L, cam);
/* Write to Rx packet buffer. */
rtl_write_word(rtlpriv, REG_RXPKTBUF_CTRL, 0x0f01);
@@ -4045,18 +4044,18 @@ void rtl8821ae_add_wowlan_pattern(struct ieee80211_hw *hw,
cam = rtl_pattern->mask[addr - 2];
rtl_write_dword(rtlpriv, REG_PKTBUF_DBG_DATA_L, cam);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "WRITE entry[%d] 0x%x: %x\n", addr,
- REG_PKTBUF_DBG_DATA_L, cam);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "WRITE entry[%d] 0x%x: %x\n", addr,
+ REG_PKTBUF_DBG_DATA_L, cam);
rtl_write_word(rtlpriv, REG_RXPKTBUF_CTRL, 0x0f01);
} else if (addr == 3 || addr == 5) {/* WKFM[127:0] */
cam = rtl_pattern->mask[addr - 2];
rtl_write_dword(rtlpriv, REG_PKTBUF_DBG_DATA_H, cam);
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
- "WRITE entry[%d] 0x%x: %x\n", addr,
- REG_PKTBUF_DBG_DATA_H, cam);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
+ "WRITE entry[%d] 0x%x: %x\n", addr,
+ REG_PKTBUF_DBG_DATA_H, cam);
rtl_write_word(rtlpriv, REG_RXPKTBUF_CTRL, 0xf001);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c
index dd7553e80ab1..7d6fb134c10f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/led.c
@@ -20,8 +20,8 @@ void rtl8821ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
u8 ledcfg;
struct rtl_priv *rtlpriv = rtl_priv(hw);
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
- "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD,
+ "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
switch (pled->ledpin) {
case LED_PIN_GPIO0:
@@ -37,8 +37,8 @@ void rtl8821ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg & 0x10);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = true;
@@ -64,9 +64,9 @@ void rtl8812ae_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
}
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
- "In SwLedOn, LedAddr:%X LEDPIN=%d\n",
- ledreg, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD,
+ "In SwLedOn, LedAddr:%X LEDPIN=%d\n",
+ ledreg, pled->ledpin);
ledcfg = rtl_read_byte(rtlpriv, ledreg);
ledcfg |= BIT(5); /*Set 0x4c[21]*/
@@ -81,8 +81,8 @@ void rtl8821ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 ledcfg;
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
- "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD,
+ "LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin);
ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
@@ -109,8 +109,8 @@ void rtl8821ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
rtl_write_byte(rtlpriv, REG_LEDCFG1, ledcfg|BIT(3));
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
- "switch case %#x not processed\n", pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
+ "switch case %#x not processed\n", pled->ledpin);
break;
}
pled->ledon = false;
@@ -135,9 +135,9 @@ void rtl8812ae_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
break;
}
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
- "In SwLedOff,LedAddr:%X LEDPIN=%d\n",
- ledreg, pled->ledpin);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD,
+ "In SwLedOff,LedAddr:%X LEDPIN=%d\n",
+ ledreg, pled->ledpin);
/*Open-drain arrangement for controlling the LED*/
if (rtlpriv->ledctl.led_opendrain) {
u8 ledcfg = rtl_read_byte(rtlpriv, ledreg);
@@ -207,7 +207,7 @@ void rtl8821ae_led_control(struct ieee80211_hw *hw,
ledaction == LED_CTL_POWER_ON)) {
return;
}
- RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d,\n",
- ledaction);
+ rtl_dbg(rtlpriv, COMP_LED, DBG_LOUD, "ledaction %d,\n",
+ ledaction);
_rtl8821ae_sw_led_control(hw, ledaction);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
index b8a2b2326902..f41a7643b9c4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
@@ -27,7 +27,12 @@ static u32 _rtl8821ae_phy_rf_serial_read(struct ieee80211_hw *hw,
static void _rtl8821ae_phy_rf_serial_write(struct ieee80211_hw *hw,
enum radio_path rfpath, u32 offset,
u32 data);
-static u32 _rtl8821ae_phy_calculate_bit_shift(u32 bitmask);
+static u32 _rtl8821ae_phy_calculate_bit_shift(u32 bitmask)
+{
+ u32 i = ffs(bitmask);
+
+ return i ? i - 1 : 32;
+}
static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw);
/*static bool _rtl8812ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);*/
static bool _rtl8821ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
@@ -96,16 +101,16 @@ u32 rtl8821ae_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 returnvalue, originalvalue, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x)\n",
- regaddr, bitmask);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x)\n",
+ regaddr, bitmask);
originalvalue = rtl_read_dword(rtlpriv, regaddr);
bitshift = _rtl8821ae_phy_calculate_bit_shift(bitmask);
returnvalue = (originalvalue & bitmask) >> bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "BBR MASK=0x%x Addr[0x%x]=0x%x\n",
- bitmask, regaddr, originalvalue);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "BBR MASK=0x%x Addr[0x%x]=0x%x\n",
+ bitmask, regaddr, originalvalue);
return returnvalue;
}
@@ -115,9 +120,9 @@ void rtl8821ae_phy_set_bb_reg(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 originalvalue, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x)\n",
- regaddr, bitmask, data);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
if (bitmask != MASKDWORD) {
originalvalue = rtl_read_dword(rtlpriv, regaddr);
@@ -128,9 +133,9 @@ void rtl8821ae_phy_set_bb_reg(struct ieee80211_hw *hw,
rtl_write_dword(rtlpriv, regaddr, data);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x)\n",
- regaddr, bitmask, data);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
}
u32 rtl8821ae_phy_query_rf_reg(struct ieee80211_hw *hw,
@@ -140,9 +145,9 @@ u32 rtl8821ae_phy_query_rf_reg(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, readback_value, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
- regaddr, rfpath, bitmask);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
+ regaddr, rfpath, bitmask);
spin_lock(&rtlpriv->locks.rf_lock);
@@ -152,9 +157,9 @@ u32 rtl8821ae_phy_query_rf_reg(struct ieee80211_hw *hw,
spin_unlock(&rtlpriv->locks.rf_lock);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
- regaddr, rfpath, bitmask, original_value);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
+ regaddr, rfpath, bitmask, original_value);
return readback_value;
}
@@ -166,9 +171,9 @@ void rtl8821ae_phy_set_rf_reg(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 original_value, bitshift;
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, rfpath);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
spin_lock(&rtlpriv->locks.rf_lock);
@@ -183,8 +188,8 @@ void rtl8821ae_phy_set_rf_reg(struct ieee80211_hw *hw,
spin_unlock(&rtlpriv->locks.rf_lock);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
regaddr, bitmask, data, rfpath);
}
@@ -267,20 +272,9 @@ static void _rtl8821ae_phy_rf_serial_write(struct ieee80211_hw *hw,
data_and_addr = ((newoffset << 20) |
(data & 0x000fffff)) & 0x0fffffff;
rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
- RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
- "RFW-%d Addr[0x%x]=0x%x\n",
- rfpath, pphyreg->rf3wire_offset, data_and_addr);
-}
-
-static u32 _rtl8821ae_phy_calculate_bit_shift(u32 bitmask)
-{
- u32 i;
-
- for (i = 0; i <= 31; i++) {
- if (((bitmask >> i) & 0x1) == 1)
- break;
- }
- return i;
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "RFW-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rf3wire_offset, data_and_addr);
}
bool rtl8821ae_phy_mac_config(struct ieee80211_hw *hw)
@@ -370,7 +364,7 @@ static void _rtl8812ae_phy_set_rfe_reg_24g(struct ieee80211_hw *hw)
rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
break;
}
- /* fall through */
+ fallthrough;
case 0:
case 2:
default:
@@ -450,10 +444,10 @@ u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8 band,
u32 out = 0x200;
const s8 auto_temp = -1;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "===> PHY_GetTXBBSwing_8812A, bbSwing_2G: %d, bbSwing_5G: %d,autoload_failflag=%d.\n",
- (int)swing_2g, (int)swing_5g,
- (int)rtlefuse->autoload_failflag);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "===> PHY_GetTXBBSwing_8812A, bbSwing_2G: %d, bbSwing_5G: %d,autoload_failflag=%d.\n",
+ (int)swing_2g, (int)swing_5g,
+ (int)rtlefuse->autoload_failflag);
if (rtlefuse->autoload_failflag) {
if (band == BAND_ON_2_4G) {
@@ -531,9 +525,9 @@ u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8 band,
swing_a = (swing & 0x3) >> 0; /* 0xC6/C7[1:0] */
swing_b = (swing & 0xC) >> 2; /* 0xC6/C7[3:2] */
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "===> PHY_GetTXBBSwing_8812A, swingA: 0x%X, swingB: 0x%X\n",
- swing_a, swing_b);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "===> PHY_GetTXBBSwing_8812A, swingA: 0x%X, swingB: 0x%X\n",
+ swing_a, swing_b);
/* 3 Path-A */
if (swing_a == 0x0) {
@@ -589,8 +583,8 @@ u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8 band,
}
}
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "<=== PHY_GetTXBBSwing_8812A, out = 0x%X\n", out);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "<=== PHY_GetTXBBSwing_8812A, out = 0x%X\n", out);
return out;
}
@@ -652,23 +646,23 @@ void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
count = 0;
reg_41a = rtl_read_word(rtlpriv, REG_TXPKT_EMPTY);
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "Reg41A value %d\n", reg_41a);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "Reg41A value %d\n", reg_41a);
reg_41a &= 0x30;
while ((reg_41a != 0x30) && (count < 50)) {
udelay(50);
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD, "Delay 50us\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD, "Delay 50us\n");
reg_41a = rtl_read_word(rtlpriv, REG_TXPKT_EMPTY);
reg_41a &= 0x30;
count++;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "Reg41A value %d\n", reg_41a);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "Reg41A value %d\n", reg_41a);
}
if (count != 0)
- RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
- "PHY_SwitchWirelessBand8812(): Switch to 5G Band. Count = %d reg41A=0x%x\n",
- count, reg_41a);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "PHY_SwitchWirelessBand8812(): Switch to 5G Band. Count = %d reg41A=0x%x\n",
+ count, reg_41a);
/* 2012/02/01, Sinda add registry to switch workaround
without long-run verification for scan issue. */
@@ -693,9 +687,9 @@ void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
rtl_set_bbreg(hw, RTXPATH, 0xf0, 0);
rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, 0xf);
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
- "==>PHY_SwitchWirelessBand8812() BAND_ON_5G settings OFDM index 0x%x\n",
- rtlpriv->dm.ofdm_index[RF90_PATH_A]);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "==>PHY_SwitchWirelessBand8812() BAND_ON_5G settings OFDM index 0x%x\n",
+ rtlpriv->dm.ofdm_index[RF90_PATH_A]);
}
if ((rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) ||
@@ -722,8 +716,8 @@ void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
rtl8821ae_dm_clear_txpower_tracking_state(hw);
}
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
- "<==rtl8821ae_phy_switch_wirelessband():Switch Band OK.\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE,
+ "<==%s():Switch Band OK.\n", __func__);
return;
}
@@ -756,18 +750,18 @@ static bool _rtl8821ae_check_positive(struct ieee80211_hw *hw,
rtlhal->type_alna << 16 |
rtlhal->type_apa << 24;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "===> [8812A] CheckPositive (cond1, cond2) = (0x%X 0x%X)\n",
- cond1, cond2);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "===> [8812A] CheckPositive (driver1, driver2) = (0x%X 0x%X)\n",
- driver1, driver2);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "===> [8812A] CheckPositive (cond1, cond2) = (0x%X 0x%X)\n",
+ cond1, cond2);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "===> [8812A] CheckPositive (driver1, driver2) = (0x%X 0x%X)\n",
+ driver1, driver2);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- " (Platform, Interface) = (0x%X, 0x%X)\n", 0x04, intf);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- " (Board, Package) = (0x%X, 0x%X)\n",
- rtlhal->board_type, rtlhal->package_type);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ " (Platform, Interface) = (0x%X, 0x%X)\n", 0x04, intf);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ " (Board, Package) = (0x%X, 0x%X)\n",
+ rtlhal->board_type, rtlhal->package_type);
/*============== Value Defined Check ===============*/
/*QFN Type [15:12] and Cut Version [27:24] need to do value check*/
@@ -918,7 +912,7 @@ static void _rtl8821ae_phy_set_txpower_by_rate_base(struct ieee80211_hw *hw,
struct rtl_phy *rtlphy = &rtlpriv->phy;
if (path > RF90_PATH_D) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
"Invalid Rf Path %d in phy_SetTxPowerByRatBase()\n", path);
return;
}
@@ -944,9 +938,9 @@ static void _rtl8821ae_phy_set_txpower_by_rate_base(struct ieee80211_hw *hw,
rtlphy->txpwr_by_rate_base_24g[path][txnum][5] = value;
break;
default:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid RateSection %d in Band 2.4G,Rf Path %d, %dTx in PHY_SetTxPowerByRateBase()\n",
- rate_section, path, txnum);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid RateSection %d in Band 2.4G,Rf Path %d, %dTx in PHY_SetTxPowerByRateBase()\n",
+ rate_section, path, txnum);
break;
}
} else if (band == BAND_ON_5G) {
@@ -967,13 +961,13 @@ static void _rtl8821ae_phy_set_txpower_by_rate_base(struct ieee80211_hw *hw,
rtlphy->txpwr_by_rate_base_5g[path][txnum][4] = value;
break;
default:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
"Invalid RateSection %d in Band 5G, Rf Path %d, %dTx in PHY_SetTxPowerByRateBase()\n",
rate_section, path, txnum);
break;
}
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
"Invalid Band %d in PHY_SetTxPowerByRateBase()\n", band);
}
}
@@ -987,9 +981,9 @@ static u8 _rtl8821ae_phy_get_txpower_by_rate_base(struct ieee80211_hw *hw,
u8 value = 0;
if (path > RF90_PATH_D) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid Rf Path %d in PHY_GetTxPowerByRateBase()\n",
- path);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid Rf Path %d in PHY_GetTxPowerByRateBase()\n",
+ path);
return 0;
}
@@ -1014,9 +1008,9 @@ static u8 _rtl8821ae_phy_get_txpower_by_rate_base(struct ieee80211_hw *hw,
value = rtlphy->txpwr_by_rate_base_24g[path][txnum][5];
break;
default:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid RateSection %d in Band 2.4G, Rf Path %d, %dTx in PHY_GetTxPowerByRateBase()\n",
- rate_section, path, txnum);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid RateSection %d in Band 2.4G, Rf Path %d, %dTx in PHY_GetTxPowerByRateBase()\n",
+ rate_section, path, txnum);
break;
}
} else if (band == BAND_ON_5G) {
@@ -1037,14 +1031,14 @@ static u8 _rtl8821ae_phy_get_txpower_by_rate_base(struct ieee80211_hw *hw,
value = rtlphy->txpwr_by_rate_base_5g[path][txnum][4];
break;
default:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid RateSection %d in Band 5G, Rf Path %d, %dTx in PHY_GetTxPowerByRateBase()\n",
- rate_section, path, txnum);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid RateSection %d in Band 5G, Rf Path %d, %dTx in PHY_GetTxPowerByRateBase()\n",
+ rate_section, path, txnum);
break;
}
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Invalid Band %d in PHY_GetTxPowerByRateBase()\n", band);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Invalid Band %d in PHY_GetTxPowerByRateBase()\n", band);
}
return value;
@@ -1144,7 +1138,7 @@ static void _rtl8812ae_phy_cross_reference_ht_and_vht_txpower_limit(struct ieee8
[bw][rate_section][channel][RF90_PATH_A];
if (temp_pwrlmt == MAX_POWER_INDEX) {
if (bw == 0 || bw == 1) { /*5G 20M 40M VHT and HT can cross reference*/
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
"No power limit table of the specified band %d, bandwidth %d, ratesection %d, channel %d, rf path %d\n",
1, bw, rate_section, channel, RF90_PATH_A);
if (rate_section == 2) {
@@ -1161,7 +1155,9 @@ static void _rtl8812ae_phy_cross_reference_ht_and_vht_txpower_limit(struct ieee8
rtlphy->txpwr_limit_5g[regulation][bw][3][channel][RF90_PATH_A];
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "use other value %d\n", temp_pwrlmt);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "use other value %d\n",
+ temp_pwrlmt);
}
}
}
@@ -1218,7 +1214,7 @@ static u8 _rtl8812ae_phy_get_txpower_by_rate_base_index(struct ieee80211_hw *hw,
break;
default:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
"Wrong rate 0x%x to obtain index in 2.4G in PHY_GetTxPowerByRateBaseIndex()\n",
rate);
break;
@@ -1285,7 +1281,7 @@ static u8 _rtl8812ae_phy_get_txpower_by_rate_base_index(struct ieee80211_hw *hw,
break;
default:
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
"Wrong rate 0x%x to obtain index in 5G in PHY_GetTxPowerByRateBaseIndex()\n",
rate);
break;
@@ -1306,7 +1302,7 @@ static void _rtl8812ae_phy_convert_txpower_limit_to_power_index(struct ieee80211
s8 temp_value = 0, temp_pwrlmt = 0;
u8 rf_path = 0;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
"=====> _rtl8812ae_phy_convert_txpower_limit_to_power_index()\n");
_rtl8812ae_phy_cross_reference_ht_and_vht_txpower_limit(hw);
@@ -1355,7 +1351,7 @@ static void _rtl8812ae_phy_convert_txpower_limit_to_power_index(struct ieee80211
temp_value;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
"TxPwrLimit_2_4G[regulation %d][bw %d][rateSection %d][channel %d] = %d\n(TxPwrLimit in dBm %d - BW40PwrLmt2_4G[channel %d][rfpath %d] %d)\n",
regulation, bw, rate_section, channel,
rtlphy->txpwr_limit_2_4g[regulation][bw]
@@ -1420,7 +1416,7 @@ static void _rtl8812ae_phy_convert_txpower_limit_to_power_index(struct ieee80211
[rf_path] = temp_value;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
"TxPwrLimit_5G[regulation %d][bw %d][rateSection %d][channel %d] =%d\n(TxPwrLimit in dBm %d - BW40PwrLmt5G[chnl group %d][rfpath %d] %d)\n",
regulation, bw, rate_section,
channel, rtlphy->txpwr_limit_5g[regulation]
@@ -1431,8 +1427,8 @@ static void _rtl8812ae_phy_convert_txpower_limit_to_power_index(struct ieee80211
}
}
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "<===== _rtl8812ae_phy_convert_txpower_limit_to_power_index()\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "<===== %s()\n", __func__);
}
static void _rtl8821ae_phy_init_txpower_limit(struct ieee80211_hw *hw)
@@ -1441,8 +1437,8 @@ static void _rtl8821ae_phy_init_txpower_limit(struct ieee80211_hw *hw)
struct rtl_phy *rtlphy = &rtlpriv->phy;
u8 i, j, k, l, m;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "=====> _rtl8821ae_phy_init_txpower_limit()!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "=====>`%s()!\n", __func__);
for (i = 0; i < MAX_REGULATION_NUM; ++i) {
for (j = 0; j < MAX_2_4G_BANDWIDTH_NUM; ++j)
@@ -1463,8 +1459,8 @@ static void _rtl8821ae_phy_init_txpower_limit(struct ieee80211_hw *hw)
= MAX_POWER_INDEX;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "<===== _rtl8821ae_phy_init_txpower_limit()!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "<===== %s()!\n", __func__);
}
static void _rtl8821ae_phy_convert_txpower_dbm_to_relative_value(struct ieee80211_hw *hw)
@@ -1574,7 +1570,7 @@ static void _rtl8821ae_phy_convert_txpower_dbm_to_relative_value(struct ieee8021
0, 3, base);
}
- RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_TRACE,
"<===_rtl8821ae_phy_convert_txpower_dbm_to_relative_value()\n");
}
@@ -1630,13 +1626,13 @@ static s8 _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(struct ieee80211_hw *hw,
channel_index = i;
}
} else
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, "Invalid Band %d in %s\n",
- band, __func__);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD, "Invalid Band %d in %s\n",
+ band, __func__);
if (channel_index == -1)
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Invalid Channel %d of Band %d in %s\n", channel,
- band, __func__);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Invalid Channel %d of Band %d in %s\n", channel,
+ band, __func__);
return channel_index;
}
@@ -1655,9 +1651,9 @@ static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw, u8 *pregul
if (!_rtl8812ae_get_integer_from_string((char *)pchannel, &channel) ||
!_rtl8812ae_get_integer_from_string((char *)ppower_limit,
&power_limit)) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Illegal index of pwr_lmt table [chnl %d][val %d]\n",
- channel, power_limit);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Illegal index of pwr_lmt table [chnl %d][val %d]\n",
+ channel, power_limit);
}
power_limit = power_limit > MAX_POWER_INDEX ?
@@ -1717,10 +1713,10 @@ static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw, u8 *pregul
[rate_section][channel_index][RF90_PATH_A] =
power_limit;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "2.4G [regula %d][bw %d][sec %d][chnl %d][val %d]\n",
- regulation, bandwidth, rate_section, channel_index,
- rtlphy->txpwr_limit_2_4g[regulation][bandwidth]
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "2.4G [regula %d][bw %d][sec %d][chnl %d][val %d]\n",
+ regulation, bandwidth, rate_section, channel_index,
+ rtlphy->txpwr_limit_2_4g[regulation][bandwidth]
[rate_section][channel_index][RF90_PATH_A]);
} else if (_rtl8812ae_eq_n_byte(pband, (u8 *)("5G"), 2)) {
ret = _rtl8812ae_phy_get_chnl_idx_of_txpwr_lmt(hw,
@@ -1740,14 +1736,14 @@ static void _rtl8812ae_phy_set_txpower_limit(struct ieee80211_hw *hw, u8 *pregul
rtlphy->txpwr_limit_5g[regulation][bandwidth]
[rate_section][channel_index][RF90_PATH_A] = power_limit;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "5G: [regul %d][bw %d][sec %d][chnl %d][val %d]\n",
- regulation, bandwidth, rate_section, channel,
- rtlphy->txpwr_limit_5g[regulation][bandwidth]
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "5G: [regul %d][bw %d][sec %d][chnl %d][val %d]\n",
+ regulation, bandwidth, rate_section, channel,
+ rtlphy->txpwr_limit_5g[regulation][bandwidth]
[rate_section][channel_index][RF90_PATH_A]);
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Cannot recognize the band info in %s\n", pband);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Cannot recognize the band info in %s\n", pband);
return;
}
}
@@ -1779,8 +1775,7 @@ static void _rtl8821ae_phy_read_and_config_txpwr_lmt(struct ieee80211_hw *hw)
array = RTL8821AE_TXPWR_LMT;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "\n");
for (i = 0; i < array_len; i += 7) {
u8 *regulation = array[i];
@@ -1812,7 +1807,7 @@ static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw)
rtstatus = _rtl8821ae_phy_config_bb_with_headerfile(hw,
BASEBAND_CONFIG_PHY_REG);
- if (rtstatus != true) {
+ if (!rtstatus) {
pr_err("Write BB Reg Fail!!\n");
return false;
}
@@ -1821,7 +1816,7 @@ static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw)
rtstatus = _rtl8821ae_phy_config_bb_with_pgheaderfile(hw,
BASEBAND_CONFIG_PHY_REG);
}
- if (rtstatus != true) {
+ if (!rtstatus) {
pr_err("BB_PG Reg Fail!!\n");
return false;
}
@@ -1835,7 +1830,7 @@ static bool _rtl8821ae_phy_bb8821a_config_parafile(struct ieee80211_hw *hw)
rtstatus = _rtl8821ae_phy_config_bb_with_headerfile(hw,
BASEBAND_CONFIG_AGC_TAB);
- if (rtstatus != true) {
+ if (!rtstatus) {
pr_err("AGC Table Fail\n");
return false;
}
@@ -1903,7 +1898,7 @@ static bool _rtl8821ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
u32 arraylength;
u32 *ptrarray;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Read MAC_REG_Array\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Read MAC_REG_Array\n");
if (rtlhal->hw_type == HARDWARE_TYPE_RTL8821AE) {
arraylength = RTL8821AE_MAC_1T_ARRAYLEN;
ptrarray = RTL8821AE_MAC_REG_ARRAY;
@@ -1911,8 +1906,8 @@ static bool _rtl8821ae_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
arraylength = RTL8812AE_MAC_1T_ARRAYLEN;
ptrarray = RTL8812AE_MAC_REG_ARRAY;
}
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Img: MAC_REG_ARRAY LEN %d\n", arraylength);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Img: MAC_REG_ARRAY LEN %d\n", arraylength);
return __rtl8821ae_phy_config_with_headerfile(hw,
ptrarray, arraylength, rtl_write_byte_with_val32);
@@ -1978,22 +1973,22 @@ static void _rtl8821ae_store_tx_power_by_rate(struct ieee80211_hw *hw,
u8 rate_section = _rtl8821ae_get_rate_section_index(regaddr);
if (band != BAND_ON_2_4G && band != BAND_ON_5G) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid Band %d\n", band);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid Band %d\n", band);
band = BAND_ON_2_4G;
}
if (rfpath >= MAX_RF_PATH) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid RfPath %d\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid RfPath %d\n", rfpath);
rfpath = MAX_RF_PATH - 1;
}
if (txnum >= MAX_RF_PATH) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid TxNum %d\n", txnum);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid TxNum %d\n", txnum);
txnum = MAX_RF_PATH - 1;
}
rtlphy->tx_power_by_rate_offset[band][rfpath][txnum][rate_section] = data;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "TxPwrByRateOffset[Band %d][RfPath %d][TxNum %d][RateSection %d] = 0x%x\n",
- band, rfpath, txnum, rate_section,
- rtlphy->tx_power_by_rate_offset[band][rfpath][txnum][rate_section]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "TxPwrByRateOffset[Band %d][RfPath %d][TxNum %d][RateSection %d] = 0x%x\n",
+ band, rfpath, txnum, rate_section,
+ rtlphy->tx_power_by_rate_offset[band][rfpath][txnum][rate_section]);
}
static bool _rtl8821ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
@@ -2015,8 +2010,8 @@ static bool _rtl8821ae_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
}
if (configtype != BASEBAND_CONFIG_PHY_REG) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "configtype != BaseBand_Config_PHY_REG\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "configtype != BaseBand_Config_PHY_REG\n");
return true;
}
for (i = 0; i < arraylen; i += 6) {
@@ -2082,9 +2077,9 @@ bool rtl8812ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
radioa_array_table_a = RTL8812AE_RADIOA_ARRAY;
radioa_arraylen_b = RTL8812AE_RADIOB_1TARRAYLEN;
radioa_array_table_b = RTL8812AE_RADIOB_ARRAY;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Radio_A:RTL8821AE_RADIOA_ARRAY %d\n", radioa_arraylen_a);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Radio_A:RTL8821AE_RADIOA_ARRAY %d\n", radioa_arraylen_a);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
switch (rfpath) {
case RF90_PATH_A:
return __rtl8821ae_phy_config_with_headerfile(hw,
@@ -2113,9 +2108,9 @@ bool rtl8821ae_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
radioa_arraylen = RTL8821AE_RADIOA_1TARRAYLEN;
radioa_array_table = RTL8821AE_RADIOA_ARRAY;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "Radio_A:RTL8821AE_RADIOA_ARRAY %d\n", radioa_arraylen);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Radio_A:RTL8821AE_RADIOA_ARRAY %d\n", radioa_arraylen);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Radio No %x\n", rfpath);
switch (rfpath) {
case RF90_PATH_A:
return __rtl8821ae_phy_config_with_headerfile(hw,
@@ -2146,21 +2141,21 @@ void rtl8821ae_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
rtlphy->default_initialgain[3] =
(u8)rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
- rtlphy->default_initialgain[0],
- rtlphy->default_initialgain[1],
- rtlphy->default_initialgain[2],
- rtlphy->default_initialgain[3]);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
+ rtlphy->default_initialgain[0],
+ rtlphy->default_initialgain[1],
+ rtlphy->default_initialgain[2],
+ rtlphy->default_initialgain[3]);
rtlphy->framesync = (u8)rtl_get_bbreg(hw,
ROFDM0_RXDETECTOR3, MASKBYTE0);
rtlphy->framesync_c34 = rtl_get_bbreg(hw,
ROFDM0_RXDETECTOR2, MASKDWORD);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Default framesync (0x%x) = 0x%x\n",
- ROFDM0_RXDETECTOR3, rtlphy->framesync);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Default framesync (0x%x) = 0x%x\n",
+ ROFDM0_RXDETECTOR3, rtlphy->framesync);
}
static void phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
@@ -2427,14 +2422,14 @@ static s8 _rtl8812ae_phy_get_txpower_limit(struct ieee80211_hw *hw,
rate_section = 5;
break;
default:
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
"Wrong rate 0x%x\n", rate);
break;
}
if (band_temp == BAND_ON_5G && rate_section == 0)
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Wrong rate 0x%x: No CCK in 5G Band\n", rate);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Wrong rate 0x%x: No CCK in 5G Band\n", rate);
/*workaround for wrong index combination to obtain tx power limit,
OFDM only exists in BW 20M*/
@@ -2459,10 +2454,10 @@ static s8 _rtl8812ae_phy_get_txpower_limit(struct ieee80211_hw *hw,
if (band_temp == -1 || regulation == -1 || bandwidth_temp == -1 ||
rate_section == -1 || channel_temp == -1) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Wrong index value to access power limit table [band %d][regulation %d][bandwidth %d][rf_path %d][rate_section %d][chnl %d]\n",
- band_temp, regulation, bandwidth_temp, rf_path,
- rate_section, channel_temp);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Wrong index value to access power limit table [band %d][regulation %d][bandwidth %d][rf_path %d][rate_section %d][chnl %d]\n",
+ band_temp, regulation, bandwidth_temp, rf_path,
+ rate_section, channel_temp);
return MAX_POWER_INDEX;
}
@@ -2496,8 +2491,8 @@ static s8 _rtl8812ae_phy_get_txpower_limit(struct ieee80211_hw *hw,
rtlphy->txpwr_limit_5g[regu][chnl]
[sec][chnl][rf_path];
} else {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "No power limit table of the specified band\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "No power limit table of the specified band\n");
}
return power_limit;
}
@@ -2605,7 +2600,7 @@ static s8 _rtl8821ae_phy_get_txpower_by_rate(struct ieee80211_hw *hw,
else
tx_pwr_diff = tx_pwr_diff > limit ? limit : tx_pwr_diff;
}
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"Maximum power by rate %d, final power by rate %d\n",
limit, tx_pwr_diff);
}
@@ -2628,7 +2623,7 @@ static u8 _rtl8821ae_get_txpower_index(struct ieee80211_hw *hw, u8 path,
(channel > 14 || channel < 1)) ||
((rtlhal->current_bandtype == BAND_ON_5G) && (channel <= 14))) {
index = 0;
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"Illegal channel!!\n");
}
@@ -2639,7 +2634,7 @@ static u8 _rtl8821ae_get_txpower_index(struct ieee80211_hw *hw, u8 path,
else if (DESC_RATE6M <= rate)
txpower = rtlefuse->txpwrlevel_ht40_1s[path][index];
else
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "invalid rate\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "invalid rate\n");
if (DESC_RATE6M <= rate && rate <= DESC_RATE54M &&
!RTL8821AE_RX_HAL_IS_CCK_RATE(rate))
@@ -2673,8 +2668,8 @@ static u8 _rtl8821ae_get_txpower_index(struct ieee80211_hw *hw, u8 path,
if (DESC_RATE6M <= rate)
txpower = rtlefuse->txpwr_5g_bw40base[path][index];
else
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_WARNING,
- "INVALID Rate.\n");
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_WARNING,
+ "INVALID Rate.\n");
if (DESC_RATE6M <= rate && rate <= DESC_RATE54M &&
!RTL8821AE_RX_HAL_IS_CCK_RATE(rate))
@@ -2940,7 +2935,7 @@ static void _rtl8821ae_phy_set_txpower_index(struct ieee80211_hw *hw,
MASKBYTE3, power_index);
break;
default:
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
"Invalid Rate!!\n");
break;
}
@@ -3139,13 +3134,13 @@ static void _rtl8821ae_phy_set_txpower_index(struct ieee80211_hw *hw,
MASKBYTE3, power_index);
break;
default:
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Invalid Rate!!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Invalid Rate!!\n");
break;
}
} else {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Invalid RFPath!!\n");
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Invalid RFPath!!\n");
}
}
@@ -3352,7 +3347,7 @@ static void _rtl8821ae_phy_set_reg_bw(struct rtl_priv *rtlpriv, u8 bw)
rtl_write_word(rtlpriv, REG_TRXPTCL_CTL, tmp & 0xFF7F);
break;
default:
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "unknown Bandwidth: 0x%x\n", bw);
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING, "unknown Bandwidth: 0x%x\n", bw);
break;
}
}
@@ -3403,11 +3398,11 @@ void rtl8821ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
u8 sub_chnl = 0;
u8 l1pk_val = 0;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
- "Switch to %s bandwidth\n",
- (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
- "20MHz" :
- (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40 ?
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE,
+ "Switch to %s bandwidth\n",
+ (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
+ "20MHz" :
+ (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40 ?
"40MHz" : "80MHz")));
_rtl8821ae_phy_set_reg_bw(rtlpriv, rtlphy->current_chan_bw);
@@ -3477,7 +3472,7 @@ void rtl8821ae_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
rtl8821ae_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
rtlphy->set_bwmode_inprogress = false;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD, "\n");
}
void rtl8821ae_phy_set_bw_mode(struct ieee80211_hw *hw,
@@ -3494,8 +3489,8 @@ void rtl8821ae_phy_set_bw_mode(struct ieee80211_hw *hw,
if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw)))
rtl8821ae_phy_set_bw_mode_callback(hw);
else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "FALSE driver sleep or unload\n");
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "FALSE driver sleep or unload\n");
rtlphy->set_bwmode_inprogress = false;
rtlphy->current_chan_bw = tmp_bw;
}
@@ -3510,8 +3505,8 @@ void rtl8821ae_phy_sw_chnl_callback(struct ieee80211_hw *hw)
u8 path;
u32 data;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
- "switch to channel%d\n", rtlphy->current_channel);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE,
+ "switch to channel%d\n", rtlphy->current_channel);
if (is_hal_stop(rtlhal))
return;
@@ -3553,7 +3548,7 @@ void rtl8821ae_phy_sw_chnl_callback(struct ieee80211_hw *hw)
}
}
}
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
}
u8 rtl8821ae_phy_sw_chnl(struct ieee80211_hw *hw)
@@ -3570,8 +3565,8 @@ u8 rtl8821ae_phy_sw_chnl(struct ieee80211_hw *hw)
return 0;
if ((is_hal_stop(rtlhal)) || (RT_CANNOT_IO(hw))) {
- RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
- "sw_chnl_inprogress false driver sleep or unload\n");
+ rtl_dbg(rtlpriv, COMP_CHAN, DBG_LOUD,
+ "sw_chnl_inprogress false driver sleep or unload\n");
return 0;
}
while (rtlphy->lck_inprogress && timecount < timeout) {
@@ -3588,16 +3583,16 @@ u8 rtl8821ae_phy_sw_chnl(struct ieee80211_hw *hw)
if (channel == 0)
channel = 1;
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
- "switch to channel%d, band type is %d\n",
- rtlphy->current_channel, rtlhal->current_bandtype);
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE,
+ "switch to channel%d, band type is %d\n",
+ rtlphy->current_channel, rtlhal->current_bandtype);
rtl8821ae_phy_sw_chnl_callback(hw);
rtl8821ae_dm_clear_txpower_tracking_state(hw);
rtl8821ae_phy_set_txpower_level(hw, rtlphy->current_channel);
- RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "\n");
rtlphy->sw_chnl_inprogress = false;
return 1;
}
@@ -3638,7 +3633,7 @@ static void _rtl8821ae_iqk_backup_macbb(struct ieee80211_hw *hw,
for (i = 0; i < mac_bb_num; i++)
macbb_backup[i] = rtl_read_dword(rtlpriv, backup_macbb_reg[i]);
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, "BackupMacBB Success!!!!\n");
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD, "BackupMacBB Success!!!!\n");
}
static void _rtl8821ae_iqk_backup_afe(struct ieee80211_hw *hw, u32 *afe_backup,
@@ -3651,7 +3646,7 @@ static void _rtl8821ae_iqk_backup_afe(struct ieee80211_hw *hw, u32 *afe_backup,
/*Save AFE Parameters */
for (i = 0; i < afe_num; i++)
afe_backup[i] = rtl_read_dword(rtlpriv, backup_afe_REG[i]);
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, "BackupAFE Success!!!!\n");
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD, "BackupAFE Success!!!!\n");
}
static void _rtl8821ae_iqk_backup_rf(struct ieee80211_hw *hw, u32 *rfa_backup,
@@ -3669,7 +3664,7 @@ static void _rtl8821ae_iqk_backup_rf(struct ieee80211_hw *hw, u32 *rfa_backup,
rfb_backup[i] = rtl_get_rfreg(hw, RF90_PATH_B, backup_rf_reg[i],
BMASKDWORD);
}
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, "BackupRF Success!!!!\n");
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD, "BackupRF Success!!!!\n");
}
static void _rtl8821ae_iqk_configure_mac(
@@ -3698,13 +3693,13 @@ static void _rtl8821ae_iqk_tx_fill_iqc(struct ieee80211_hw *hw,
rtl_write_dword(rtlpriv, 0xcc8, 0x20000000);
rtl_set_bbreg(hw, 0xccc, 0x000007ff, tx_y);
rtl_set_bbreg(hw, 0xcd4, 0x000007ff, tx_x);
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD,
- "TX_X = %x;;TX_Y = %x =====> fill to IQC\n",
- tx_x, tx_y);
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD,
- "0xcd4 = %x;;0xccc = %x ====>fill to IQC\n",
- rtl_get_bbreg(hw, 0xcd4, 0x000007ff),
- rtl_get_bbreg(hw, 0xccc, 0x000007ff));
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD,
+ "TX_X = %x;;TX_Y = %x =====> fill to IQC\n",
+ tx_x, tx_y);
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD,
+ "0xcd4 = %x;;0xccc = %x ====>fill to IQC\n",
+ rtl_get_bbreg(hw, 0xcd4, 0x000007ff),
+ rtl_get_bbreg(hw, 0xccc, 0x000007ff));
break;
default:
break;
@@ -3720,12 +3715,12 @@ static void _rtl8821ae_iqk_rx_fill_iqc(struct ieee80211_hw *hw,
rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C */
rtl_set_bbreg(hw, 0xc10, 0x000003ff, rx_x>>1);
rtl_set_bbreg(hw, 0xc10, 0x03ff0000, rx_y>>1);
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD,
- "rx_x = %x;;rx_y = %x ====>fill to IQC\n",
- rx_x>>1, rx_y>>1);
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD,
- "0xc10 = %x ====>fill to IQC\n",
- rtl_read_dword(rtlpriv, 0xc10));
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD,
+ "rx_x = %x;;rx_y = %x ====>fill to IQC\n",
+ rx_x >> 1, rx_y >> 1);
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD,
+ "0xc10 = %x ====>fill to IQC\n",
+ rtl_read_dword(rtlpriv, 0xc10));
break;
default:
break;
@@ -3750,9 +3745,9 @@ static void _rtl8821ae_iqk_tx(struct ieee80211_hw *hw, enum radio_path path)
int i, k, vdf_y[3], vdf_x[3],
ii, dx = 0, dy = 0, tx_finish = 0, rx_finish = 0;
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD,
- "BandWidth = %d.\n",
- rtlphy->current_chan_bw);
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD,
+ "BandWidth = %d.\n",
+ rtlphy->current_chan_bw);
if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80)
vdf_enable = true;
@@ -3856,7 +3851,7 @@ static void _rtl8821ae_iqk_tx(struct ieee80211_hw *hw, enum radio_path path)
rtl_write_dword(rtlpriv, 0xc8c, 0x00163e96);
if (vdf_enable == 1) {
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, "VDF_enable\n");
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD, "VDF_enable\n");
for (k = 0; k <= 2; k++) {
switch (k) {
case 0:
@@ -3870,9 +3865,9 @@ static void _rtl8821ae_iqk_tx(struct ieee80211_hw *hw, enum radio_path path)
rtl_set_bbreg(hw, 0xce8, BIT(31), 0x0);
break;
case 2:
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD,
"vdf_y[1] = %x;;;vdf_y[0] = %x\n", vdf_y[1]>>21 & 0x00007ff, vdf_y[0]>>21 & 0x00007ff);
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD,
"vdf_x[1] = %x;;;vdf_x[0] = %x\n", vdf_x[1]>>21 & 0x00007ff, vdf_x[0]>>21 & 0x00007ff);
tx_dt[cal] = (vdf_y[1]>>20)-(vdf_y[0]>>20);
tx_dt[cal] = ((16*tx_dt[cal])*10000/15708);
@@ -3992,7 +3987,7 @@ static void _rtl8821ae_iqk_tx(struct ieee80211_hw *hw, enum radio_path path)
if (vdf_enable == 1) {
rtl_set_bbreg(hw, 0xce8, BIT(31), 0x0); /* TX VDF Disable */
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, "RXVDF Start\n");
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD, "RXVDF Start\n");
for (k = 0; k <= 2; k++) {
/* ====== RX mode TXK (RXK Step 1) ====== */
rtl_set_bbreg(hw, 0x82c, BIT(31), 0x0); /* [31] = 0 --> Page C */
@@ -4029,14 +4024,17 @@ static void _rtl8821ae_iqk_tx(struct ieee80211_hw *hw, enum radio_path path)
break;
case 2:
{
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD,
- "VDF_Y[1] = %x;;;VDF_Y[0] = %x\n",
- vdf_y[1]>>21 & 0x00007ff, vdf_y[0]>>21 & 0x00007ff);
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD,
- "VDF_X[1] = %x;;;VDF_X[0] = %x\n",
- vdf_x[1]>>21 & 0x00007ff, vdf_x[0]>>21 & 0x00007ff);
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD,
+ "VDF_Y[1] = %x;;;VDF_Y[0] = %x\n",
+ vdf_y[1] >> 21 & 0x00007ff,
+ vdf_y[0] >> 21 & 0x00007ff);
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD,
+ "VDF_X[1] = %x;;;VDF_X[0] = %x\n",
+ vdf_x[1] >> 21 & 0x00007ff,
+ vdf_x[0] >> 21 & 0x00007ff);
rx_dt[cal] = (vdf_y[1]>>20)-(vdf_y[0]>>20);
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, "Rx_dt = %d\n", rx_dt[cal]);
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD, "Rx_dt = %d\n",
+ rx_dt[cal]);
rx_dt[cal] = ((16*rx_dt[cal])*10000/13823);
rx_dt[cal] = (rx_dt[cal] >> 1)+(rx_dt[cal] & BIT(0));
rtl_write_dword(rtlpriv, 0xc80, 0x18008c20);/* TX_TONE_idx[9:0], TxK_Mask[29] TX_Tone = 16 */
@@ -4098,10 +4096,10 @@ static void _rtl8821ae_iqk_tx(struct ieee80211_hw *hw, enum radio_path path)
tx_x0_rxk[cal] = tx_x0[cal];
tx_y0_rxk[cal] = tx_y0[cal];
tx0iqkok = true;
- RT_TRACE(rtlpriv,
- COMP_IQK,
- DBG_LOUD,
- "RXK Step 1 fail\n");
+ rtl_dbg(rtlpriv,
+ COMP_IQK,
+ DBG_LOUD,
+ "RXK Step 1 fail\n");
}
/* ====== RX IQK ====== */
@@ -4257,8 +4255,8 @@ static void _rtl8821ae_iqk_tx(struct ieee80211_hw *hw, enum radio_path path)
tx_x0_rxk[cal] = tx_x0[cal];
tx_y0_rxk[cal] = tx_y0[cal];
tx0iqkok = true;
- RT_TRACE(rtlpriv, COMP_IQK,
- DBG_LOUD, "1");
+ rtl_dbg(rtlpriv, COMP_IQK,
+ DBG_LOUD, "1");
}
/* ====== RX IQK ====== */
@@ -4352,20 +4350,20 @@ static void _rtl8821ae_iqk_tx(struct ieee80211_hw *hw, enum radio_path path)
/* FillIQK Result */
switch (path) {
case RF90_PATH_A:
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD,
- "========Path_A =======\n");
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD,
+ "========Path_A =======\n");
if (tx_average == 0)
break;
for (i = 0; i < tx_average; i++) {
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD,
- "TX_X0_RXK[%d] = %x ;; TX_Y0_RXK[%d] = %x\n", i,
- (tx_x0_rxk[i])>>21&0x000007ff, i,
- (tx_y0_rxk[i])>>21&0x000007ff);
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD,
- "TX_X0[%d] = %x ;; TX_Y0[%d] = %x\n", i,
- (tx_x0[i])>>21&0x000007ff, i,
- (tx_y0[i])>>21&0x000007ff);
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD,
+ "TX_X0_RXK[%d] = %x ;; TX_Y0_RXK[%d] = %x\n", i,
+ (tx_x0_rxk[i]) >> 21 & 0x000007ff, i,
+ (tx_y0_rxk[i]) >> 21 & 0x000007ff);
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD,
+ "TX_X0[%d] = %x ;; TX_Y0[%d] = %x\n", i,
+ (tx_x0[i]) >> 21 & 0x000007ff, i,
+ (tx_y0[i]) >> 21 & 0x000007ff);
}
for (i = 0; i < tx_average; i++) {
for (ii = i+1; ii < tx_average; ii++) {
@@ -4393,7 +4391,7 @@ static void _rtl8821ae_iqk_tx(struct ieee80211_hw *hw, enum radio_path path)
break;
for (i = 0; i < rx_average; i++)
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD,
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD,
"RX_X0[%d] = %x ;; RX_Y0[%d] = %x\n", i,
(rx_x0[i])>>21&0x000007ff, i,
(rx_y0[i])>>21&0x000007ff);
@@ -4439,8 +4437,8 @@ static void _rtl8821ae_iqk_restore_rf(struct ieee80211_hw *hw,
switch (path) {
case RF90_PATH_A:
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD,
- "RestoreRF Path A Success!!!!\n");
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD,
+ "RestoreRF Path A Success!!!!\n");
break;
default:
break;
@@ -4468,7 +4466,7 @@ static void _rtl8821ae_iqk_restore_afe(struct ieee80211_hw *hw,
rtl_write_dword(rtlpriv, 0xcc4, 0x20040000);
rtl_write_dword(rtlpriv, 0xcc8, 0x20000000);
rtl_write_dword(rtlpriv, 0xcb8, 0x0);
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, "RestoreAFE Success!!!!\n");
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD, "RestoreAFE Success!!!!\n");
}
static void _rtl8821ae_iqk_restore_macbb(struct ieee80211_hw *hw,
@@ -4483,7 +4481,7 @@ static void _rtl8821ae_iqk_restore_macbb(struct ieee80211_hw *hw,
/* Reload MacBB Parameters */
for (i = 0; i < macbb_num; i++)
rtl_write_dword(rtlpriv, backup_macbb_reg[i], macbb_backup[i]);
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD, "RestoreMacBB Success!!!!\n");
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD, "RestoreMacBB Success!!!!\n");
}
#undef MACBB_REG_NUM
@@ -4531,7 +4529,7 @@ static void _rtl8821ae_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool main)
struct rtl_priv *rtlpriv = rtl_priv(hw);
/* struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); */
/* struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); */
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
if (main)
rtl_set_bbreg(hw, RA_RFE_PINMUX + 4, BIT(29) | BIT(28), 0x1);
@@ -4579,11 +4577,11 @@ void rtl8821ae_reset_iqk_result(struct ieee80211_hw *hw)
struct rtl_phy *rtlphy = &rtlpriv->phy;
u8 i;
- RT_TRACE(rtlpriv, COMP_IQK, DBG_LOUD,
- "rtl8812ae_dm_reset_iqk_result:: settings regs %d default regs %d\n",
- (int)(sizeof(rtlphy->iqk_matrix) /
- sizeof(struct iqk_matrix_regs)),
- IQK_MATRIX_SETTINGS_NUM);
+ rtl_dbg(rtlpriv, COMP_IQK, DBG_LOUD,
+ "rtl8812ae_dm_reset_iqk_result:: settings regs %d default regs %d\n",
+ (int)(sizeof(rtlphy->iqk_matrix) /
+ sizeof(struct iqk_matrix_regs)),
+ IQK_MATRIX_SETTINGS_NUM);
for (i = 0; i < IQK_MATRIX_SETTINGS_NUM; i++) {
rtlphy->iqk_matrix[i].value[0][0] = 0x100;
@@ -4630,20 +4628,20 @@ bool rtl8821ae_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
struct rtl_phy *rtlphy = &rtlpriv->phy;
bool postprocessing = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
- iotype, rtlphy->set_io_inprogress);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
+ iotype, rtlphy->set_io_inprogress);
do {
switch (iotype) {
case IO_CMD_RESUME_DM_BY_SCAN:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "[IO CMD] Resume DM after scan.\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "[IO CMD] Resume DM after scan.\n");
postprocessing = true;
break;
case IO_CMD_PAUSE_BAND0_DM_BY_SCAN:
case IO_CMD_PAUSE_BAND1_DM_BY_SCAN:
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "[IO CMD] Pause DM before scan.\n");
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "[IO CMD] Pause DM before scan.\n");
postprocessing = true;
break;
default:
@@ -4659,7 +4657,7 @@ bool rtl8821ae_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
return false;
}
rtl8821ae_phy_set_io(hw);
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, "IO Type(%#x)\n", iotype);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, "IO Type(%#x)\n", iotype);
return true;
}
@@ -4669,9 +4667,9 @@ static void rtl8821ae_phy_set_io(struct ieee80211_hw *hw)
struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
struct rtl_phy *rtlphy = &rtlpriv->phy;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "--->Cmd(%#x), set_io_inprogress(%d)\n",
- rtlphy->current_io_type, rtlphy->set_io_inprogress);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "--->Cmd(%#x), set_io_inprogress(%d)\n",
+ rtlphy->current_io_type, rtlphy->set_io_inprogress);
switch (rtlphy->current_io_type) {
case IO_CMD_RESUME_DM_BY_SCAN:
if (rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC)
@@ -4696,8 +4694,8 @@ static void rtl8821ae_phy_set_io(struct ieee80211_hw *hw)
break;
}
rtlphy->set_io_inprogress = false;
- RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
- "(%#x)\n", rtlphy->current_io_type);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "(%#x)\n", rtlphy->current_io_type);
}
static void rtl8821ae_phy_set_rf_on(struct ieee80211_hw *hw)
@@ -4731,18 +4729,17 @@ static bool _rtl8821ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
do {
initializecount++;
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic enable\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic enable\n");
rtstatus = rtl_ps_enable_nic(hw);
} while (!rtstatus && (initializecount < 10));
RT_CLEAR_PS_LEVEL(ppsc,
RT_RF_OFF_LEVL_HALT_NIC);
} else {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "Set ERFON sleeped:%d ms\n",
- jiffies_to_msecs(jiffies -
- ppsc->
- last_sleep_jiffies));
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "Set ERFON slept:%d ms\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_sleep_jiffies));
ppsc->last_awake_jiffies = jiffies;
rtl8821ae_phy_set_rf_on(hw);
}
@@ -4763,27 +4760,27 @@ static bool _rtl8821ae_phy_set_rf_power_state(struct ieee80211_hw *hw,
queue_id++;
continue;
} else {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
- (i + 1), queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "eRf Off/Sleep: %d times TcbBusyQueue[%d] =%d before doze!\n",
+ (i + 1), queue_id,
+ skb_queue_len(&ring->queue));
udelay(10);
i++;
}
if (i >= MAX_DOZE_WAITING_TIMES_9x) {
- RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
- "\n ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
- MAX_DOZE_WAITING_TIMES_9x,
- queue_id,
- skb_queue_len(&ring->queue));
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "\n ERFSLEEP: %d times TcbBusyQueue[%d] = %d !\n",
+ MAX_DOZE_WAITING_TIMES_9x,
+ queue_id,
+ skb_queue_len(&ring->queue));
break;
}
}
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
- RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
- "IPS Set eRf nic disable\n");
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic disable\n");
rtl_ps_disable_nic(hw);
RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
} else {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c
index a6e56872e063..e339f2383e6d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/rf.c
@@ -428,13 +428,13 @@ static bool _rtl8821ae_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
}
if (!rtstatus) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
- "Radio[%d] Fail!!\n", rfpath);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Radio[%d] Fail!!\n", rfpath);
return false;
}
}
/*put arrays in dm.c*/
- RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "\n");
return rtstatus;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
index cd809c992245..9d6f8dcbf2d6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c
@@ -391,7 +391,7 @@ static bool rtl8821ae_get_rxdesc_is_ht(struct ieee80211_hw *hw, __le32 *pdesc)
rx_rate = get_rx_desc_rxmcs(pdesc);
- RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD, "rx_rate=0x%02x.\n", rx_rate);
+ rtl_dbg(rtlpriv, COMP_RXDESC, DBG_LOUD, "rx_rate=0x%02x.\n", rx_rate);
if ((rx_rate >= DESC_RATEMCS0) && (rx_rate <= DESC_RATEMCS15))
return true;
@@ -405,7 +405,7 @@ static bool rtl8821ae_get_rxdesc_is_vht(struct ieee80211_hw *hw, __le32 *pdesc)
rx_rate = get_rx_desc_rxmcs(pdesc);
- RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD, "rx_rate=0x%02x.\n", rx_rate);
+ rtl_dbg(rtlpriv, COMP_RXDESC, DBG_LOUD, "rx_rate=0x%02x.\n", rx_rate);
if (rx_rate >= DESC_RATEVHT1SS_MCS0)
return true;
@@ -461,12 +461,12 @@ bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw,
status->vht_nss = rtl8821ae_get_rx_vht_nss(hw, pdesc);
status->is_cck = RTL8821AE_RX_HAL_IS_CCK_RATE(status->rate);
- RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD,
- "rx_packet_bw=%s,is_ht %d, is_vht %d, vht_nss=%d,is_short_gi %d.\n",
- (status->rx_packet_bw == 2) ? "80M" :
- (status->rx_packet_bw == 1) ? "40M" : "20M",
- status->is_ht, status->is_vht, status->vht_nss,
- status->is_short_gi);
+ rtl_dbg(rtlpriv, COMP_RXDESC, DBG_LOUD,
+ "rx_packet_bw=%s,is_ht %d, is_vht %d, vht_nss=%d,is_short_gi %d.\n",
+ (status->rx_packet_bw == 2) ? "80M" :
+ (status->rx_packet_bw == 1) ? "40M" : "20M",
+ status->is_ht, status->is_vht, status->vht_nss,
+ status->is_short_gi);
if (get_rx_status_desc_rpt_sel(pdesc))
status->packet_report_type = C2H_PACKET;
@@ -483,9 +483,9 @@ bool rtl8821ae_rx_query_desc(struct ieee80211_hw *hw,
wake_match = 0;
if (wake_match)
- RT_TRACE(rtlpriv, COMP_RXDESC, DBG_LOUD,
- "GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n",
- wake_match);
+ rtl_dbg(rtlpriv, COMP_RXDESC, DBG_LOUD,
+ "GGGGGGGGGGGGGet Wakeup Packet!! WakeMatch=%d\n",
+ wake_match);
rx_status->freq = hw->conf.chandef.chan->center_freq;
rx_status->band = hw->conf.chandef.chan->band;
@@ -558,9 +558,10 @@ static u8 rtl8821ae_bw_mapping(struct ieee80211_hw *hw,
struct rtl_phy *rtlphy = &rtlpriv->phy;
u8 bw_setting_of_desc = 0;
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "rtl8821ae_bw_mapping, current_chan_bw %d, packet_bw %d\n",
- rtlphy->current_chan_bw, ptcb_desc->packet_bw);
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "%s, current_chan_bw %d, packet_bw %d\n",
+ __func__,
+ rtlphy->current_chan_bw, ptcb_desc->packet_bw);
if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80) {
if (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_80)
@@ -602,8 +603,9 @@ static u8 rtl8821ae_sc_mapping(struct ieee80211_hw *hw,
sc_setting_of_desc =
VHT_DATA_SC_40_UPPER_OF_80MHZ;
else
- RT_TRACE(rtlpriv, COMP_SEND, DBG_LOUD,
- "rtl8821ae_sc_mapping: Not Correct Primary40MHz Setting\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_LOUD,
+ "%s: Not Correct Primary40MHz Setting\n",
+ __func__);
} else {
if ((mac->cur_40_prime_sc ==
HAL_PRIME_CHNL_OFFSET_LOWER) &&
@@ -630,8 +632,9 @@ static u8 rtl8821ae_sc_mapping(struct ieee80211_hw *hw,
sc_setting_of_desc =
VHT_DATA_SC_20_UPPERST_OF_80MHZ;
else
- RT_TRACE(rtlpriv, COMP_SEND, DBG_LOUD,
- "rtl8821ae_sc_mapping: Not Correct Primary40MHz Setting\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_LOUD,
+ "%s: Not Correct Primary40MHz Setting\n",
+ __func__);
}
} else if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
if (ptcb_desc->packet_bw == HT_CHANNEL_WIDTH_20_40) {
@@ -690,11 +693,11 @@ void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw,
memset(skb->data, 0, EM_HDR_LEN);
}
buf_len = skb->len;
- mapping = pci_map_single(rtlpci->pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error\n");
+ mapping = dma_map_single(&rtlpci->pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error\n");
return;
}
clear_pci_tx_desc_content(pdesc, sizeof(struct tx_desc_8821ae));
@@ -708,9 +711,9 @@ void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw,
set_tx_desc_offset(pdesc, USB_HWDESC_HEADER_LEN +
EM_HDR_LEN);
if (ptcb_desc->empkt_num) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "Insert 8 byte.pTcb->EMPktNum:%d\n",
- ptcb_desc->empkt_num);
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "Insert 8 byte.pTcb->EMPktNum:%d\n",
+ ptcb_desc->empkt_num);
rtl8821ae_insert_emcontent(ptcb_desc,
(__le32 *)skb->data);
}
@@ -789,8 +792,8 @@ void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw,
if (ieee80211_is_data_qos(fc)) {
if (mac->rdg_en) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "Enable RDG function.\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "Enable RDG function.\n");
set_tx_desc_rdg_enable(pdesc, 1);
set_tx_desc_htc(pdesc, 1);
}
@@ -822,7 +825,7 @@ void rtl8821ae_tx_fill_desc(struct ieee80211_hw *hw,
}
rtl8821ae_dm_set_tx_ant_by_tx_info(hw, pdesc8, ptcb_desc->mac_id);
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "\n");
}
void rtl8821ae_tx_fill_cmddesc(struct ieee80211_hw *hw,
@@ -834,13 +837,12 @@ void rtl8821ae_tx_fill_cmddesc(struct ieee80211_hw *hw,
u8 fw_queue = QSLT_BEACON;
__le32 *pdesc = (__le32 *)pdesc8;
- dma_addr_t mapping = pci_map_single(rtlpci->pdev,
- skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ dma_addr_t mapping = dma_map_single(&rtlpci->pdev->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
- if (pci_dma_mapping_error(rtlpci->pdev, mapping)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
- "DMA mapping error\n");
+ if (dma_mapping_error(&rtlpci->pdev->dev, mapping)) {
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "DMA mapping error\n");
return;
}
clear_pci_tx_desc_content(pdesc, TX_DESC_SIZE);
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index d05e709536ea..06e073defad6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -259,15 +259,15 @@ static int _rtl_usb_init_tx(struct ieee80211_hw *hw)
? USB_HIGH_SPEED_BULK_SIZE
: USB_FULL_SPEED_BULK_SIZE;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "USB Max Bulk-out Size=%d\n",
- rtlusb->max_bulk_out_size);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "USB Max Bulk-out Size=%d\n",
+ rtlusb->max_bulk_out_size);
for (i = 0; i < __RTL_TXQ_NUM; i++) {
u32 ep_num = rtlusb->ep_map.ep_mapping[i];
if (!ep_num) {
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "Invalid endpoint map setting!\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Invalid endpoint map setting!\n");
return -EINVAL;
}
}
@@ -289,7 +289,7 @@ static int _rtl_usb_init_tx(struct ieee80211_hw *hw)
return 0;
}
-static void _rtl_rx_work(unsigned long param);
+static void _rtl_rx_work(struct tasklet_struct *t);
static int _rtl_usb_init_rx(struct ieee80211_hw *hw)
{
@@ -310,8 +310,8 @@ static int _rtl_usb_init_rx(struct ieee80211_hw *hw)
init_usb_anchor(&rtlusb->rx_cleanup_urbs);
skb_queue_head_init(&rtlusb->rx_queue);
- rtlusb->rx_work_tasklet.func = _rtl_rx_work;
- rtlusb->rx_work_tasklet.data = (unsigned long)rtlusb;
+ rtlusb->rx_work_tasklet.func = (void(*))_rtl_rx_work;
+ rtlusb->rx_work_tasklet.data = (unsigned long)&rtlusb->rx_work_tasklet;
return 0;
}
@@ -337,10 +337,10 @@ static int _rtl_usb_init(struct ieee80211_hw *hw)
else if (usb_endpoint_dir_out(pep_desc))
rtlusb->out_ep_nums++;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
- "USB EP(0x%02x), MaxPacketSize=%d, Interval=%d\n",
- pep_desc->bEndpointAddress, pep_desc->wMaxPacketSize,
- pep_desc->bInterval);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "USB EP(0x%02x), MaxPacketSize=%d, Interval=%d\n",
+ pep_desc->bEndpointAddress, pep_desc->wMaxPacketSize,
+ pep_desc->bInterval);
}
if (rtlusb->in_ep_nums < rtlpriv->cfg->usb_interface_cfg->in_ep_num) {
pr_err("Too few input end points found\n");
@@ -528,9 +528,9 @@ static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb)
#define __RX_SKB_MAX_QUEUED 64
-static void _rtl_rx_work(unsigned long param)
+static void _rtl_rx_work(struct tasklet_struct *t)
{
- struct rtl_usb *rtlusb = (struct rtl_usb *)param;
+ struct rtl_usb *rtlusb = from_tasklet(rtlusb, t, rx_work_tasklet);
struct ieee80211_hw *hw = usb_get_intfdata(rtlusb->intf);
struct sk_buff *skb;
@@ -933,7 +933,7 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw,
memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
if (ieee80211_is_auth(fc)) {
- RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n");
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n");
}
if (rtlpriv->psc.sw_ps_enabled) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index 13421cf2d201..fdccfd29fd61 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -1966,7 +1966,6 @@ struct rtl_efuse {
u8 txpwr_safetyflag; /* Band edge enable flag */
u16 eeprom_txpowerdiff;
- u8 legacy_httxpowerdiff; /* Legacy to HT rate power diff */
u8 antenna_txpwdiff[3];
u8 eeprom_regulatory;
@@ -2936,9 +2935,6 @@ enum bt_radio_shared {
#define RT_SET_PS_LEVEL(ppsc, _ps_flg) \
(ppsc->cur_ps_level |= _ps_flg)
-#define container_of_dwork_rtl(x, y, z) \
- container_of(to_delayed_work(x), y, z)
-
#define FILL_OCTET_STRING(_os, _octet, _len) \
(_os).octet = (u8 *)(_octet); \
(_os).length = (_len);
diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
index f769c982cc91..3852c4f0ac0b 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.c
+++ b/drivers/net/wireless/realtek/rtw88/debug.c
@@ -229,7 +229,8 @@ static int rtw_debugfs_get_rsvd_page(struct seq_file *m, void *v)
if (!buf)
return -ENOMEM;
- ret = rtw_dump_drv_rsvd_page(rtwdev, offset, buf_size, (u32 *)buf);
+ ret = rtw_fw_dump_fifo(rtwdev, RTW_FW_FIFO_SEL_RSVD_PAGE, offset,
+ buf_size, (u32 *)buf);
if (ret) {
rtw_err(rtwdev, "failed to dump rsvd page\n");
vfree(buf);
@@ -427,12 +428,11 @@ static int rtw_debug_get_mac_page(struct seq_file *m, void *v)
{
struct rtw_debugfs_priv *debugfs_priv = m->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
- u32 val;
u32 page = debugfs_priv->cb_data;
int i, n;
int max = 0xff;
- val = rtw_read32(rtwdev, debugfs_priv->cb_data);
+ rtw_read32(rtwdev, debugfs_priv->cb_data);
for (n = 0; n <= max; ) {
seq_printf(m, "\n%8.8x ", n + page);
for (i = 0; i < 4 && n <= max; i++, n += 4)
@@ -447,12 +447,11 @@ static int rtw_debug_get_bb_page(struct seq_file *m, void *v)
{
struct rtw_debugfs_priv *debugfs_priv = m->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
- u32 val;
u32 page = debugfs_priv->cb_data;
int i, n;
int max = 0xff;
- val = rtw_read32(rtwdev, debugfs_priv->cb_data);
+ rtw_read32(rtwdev, debugfs_priv->cb_data);
for (n = 0; n <= max; ) {
seq_printf(m, "\n%8.8x ", n + page);
for (i = 0; i < 4 && n <= max; i++, n += 4)
@@ -545,6 +544,28 @@ static void rtw_print_rate(struct seq_file *m, u8 rate)
}
}
+#define case_REGD(src) \
+ case RTW_REGD_##src: return #src
+
+static const char *rtw_get_regd_string(u8 regd)
+{
+ switch (regd) {
+ case_REGD(FCC);
+ case_REGD(MKK);
+ case_REGD(ETSI);
+ case_REGD(IC);
+ case_REGD(KCC);
+ case_REGD(ACMA);
+ case_REGD(CHILE);
+ case_REGD(UKRAINE);
+ case_REGD(MEXICO);
+ case_REGD(CN);
+ case_REGD(WW);
+ default:
+ return "Unknown";
+ }
+}
+
static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v)
{
struct rtw_debugfs_priv *debugfs_priv = m->private;
@@ -556,6 +577,7 @@ static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v)
u8 ch = hal->current_channel;
u8 regd = rtwdev->regd.txpwr_regd;
+ seq_printf(m, "regulatory: %s\n", rtw_get_regd_string(regd));
seq_printf(m, "%-4s %-10s %-3s%6s %-4s %4s (%-4s %-4s) %-4s\n",
"path", "rate", "pwr", "", "base", "", "byr", "lmt", "rem");
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index 63b00bc19000..042015bc8055 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -193,6 +193,15 @@ void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset,
}
EXPORT_SYMBOL(rtw_fw_c2h_cmd_rx_irqsafe);
+void rtw_fw_c2h_cmd_isr(struct rtw_dev *rtwdev)
+{
+ if (rtw_read8(rtwdev, REG_MCU_TST_CFG) == VAL_FW_TRIGGER)
+ rtw_fw_recovery(rtwdev);
+ else
+ rtw_warn(rtwdev, "unhandled firmware c2h interrupt\n");
+}
+EXPORT_SYMBOL(rtw_fw_c2h_cmd_isr);
+
static void rtw_fw_send_h2c_command(struct rtw_dev *rtwdev,
u8 *h2c)
{
@@ -1404,29 +1413,16 @@ free:
return ret;
}
-int rtw_dump_drv_rsvd_page(struct rtw_dev *rtwdev,
- u32 offset, u32 size, u32 *buf)
+static void rtw_fw_read_fifo_page(struct rtw_dev *rtwdev, u32 offset, u32 size,
+ u32 *buf, u32 residue, u16 start_pg)
{
- struct rtw_fifo_conf *fifo = &rtwdev->fifo;
- u32 residue, i;
- u16 start_pg;
+ u32 i;
u16 idx = 0;
u16 ctl;
u8 rcr;
- if (size & 0x3) {
- rtw_warn(rtwdev, "should be 4-byte aligned\n");
- return -EINVAL;
- }
-
- offset += fifo->rsvd_boundary << TX_PAGE_SIZE_SHIFT;
- residue = offset & (FIFO_PAGE_SIZE - 1);
- start_pg = offset >> FIFO_PAGE_SIZE_SHIFT;
- start_pg += RSVD_PAGE_START_ADDR;
-
rcr = rtw_read8(rtwdev, REG_RCR + 2);
ctl = rtw_read16(rtwdev, REG_PKTBUF_DBG_CTRL) & 0xf000;
-
/* disable rx clock gate */
rtw_write8(rtwdev, REG_RCR, rcr | BIT(3));
@@ -1448,6 +1444,64 @@ int rtw_dump_drv_rsvd_page(struct rtw_dev *rtwdev,
out:
rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, ctl);
rtw_write8(rtwdev, REG_RCR + 2, rcr);
+}
+
+static void rtw_fw_read_fifo(struct rtw_dev *rtwdev, enum rtw_fw_fifo_sel sel,
+ u32 offset, u32 size, u32 *buf)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ u32 start_pg, residue;
+
+ if (sel >= RTW_FW_FIFO_MAX) {
+ rtw_dbg(rtwdev, RTW_DBG_FW, "wrong fw fifo sel\n");
+ return;
+ }
+ if (sel == RTW_FW_FIFO_SEL_RSVD_PAGE)
+ offset += rtwdev->fifo.rsvd_boundary << TX_PAGE_SIZE_SHIFT;
+ residue = offset & (FIFO_PAGE_SIZE - 1);
+ start_pg = (offset >> FIFO_PAGE_SIZE_SHIFT) + chip->fw_fifo_addr[sel];
+
+ rtw_fw_read_fifo_page(rtwdev, offset, size, buf, residue, start_pg);
+}
+
+static bool rtw_fw_dump_check_size(struct rtw_dev *rtwdev,
+ enum rtw_fw_fifo_sel sel,
+ u32 start_addr, u32 size)
+{
+ switch (sel) {
+ case RTW_FW_FIFO_SEL_TX:
+ case RTW_FW_FIFO_SEL_RX:
+ if ((start_addr + size) > rtwdev->chip->fw_fifo_addr[sel])
+ return false;
+ /*fall through*/
+ default:
+ return true;
+ }
+}
+
+int rtw_fw_dump_fifo(struct rtw_dev *rtwdev, u8 fifo_sel, u32 addr, u32 size,
+ u32 *buffer)
+{
+ if (!rtwdev->chip->fw_fifo_addr) {
+ rtw_dbg(rtwdev, RTW_DBG_FW, "chip not support dump fw fifo\n");
+ return -ENOTSUPP;
+ }
+
+ if (size == 0 || !buffer)
+ return -EINVAL;
+
+ if (size & 0x3) {
+ rtw_dbg(rtwdev, RTW_DBG_FW, "not 4byte alignment\n");
+ return -EINVAL;
+ }
+
+ if (!rtw_fw_dump_check_size(rtwdev, fifo_sel, addr, size)) {
+ rtw_dbg(rtwdev, RTW_DBG_FW, "fw fifo dump size overflow\n");
+ return -EINVAL;
+ }
+
+ rtw_fw_read_fifo(rtwdev, fifo_sel, addr, size, buffer);
+
return 0;
}
diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
index 686dcd3bbda6..08644540d259 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.h
+++ b/drivers/net/wireless/realtek/rtw88/fw.h
@@ -16,7 +16,6 @@
#define FIFO_PAGE_SIZE_SHIFT 12
#define FIFO_PAGE_SIZE 4096
-#define RSVD_PAGE_START_ADDR 0x780
#define FIFO_DUMP_ADDR 0x8000
#define DLFW_PAGE_SIZE_SHIFT_LEGACY 12
@@ -508,6 +507,20 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define SET_NLO_LOC_NLO_INFO(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16))
+#define GET_FW_DUMP_LEN(_header) \
+ le32_get_bits(*((__le32 *)(_header) + 0x00), GENMASK(15, 0))
+#define GET_FW_DUMP_SEQ(_header) \
+ le32_get_bits(*((__le32 *)(_header) + 0x00), GENMASK(22, 16))
+#define GET_FW_DUMP_MORE(_header) \
+ le32_get_bits(*((__le32 *)(_header) + 0x00), BIT(23))
+#define GET_FW_DUMP_VERSION(_header) \
+ le32_get_bits(*((__le32 *)(_header) + 0x00), GENMASK(31, 24))
+#define GET_FW_DUMP_TLV_TYPE(_header) \
+ le32_get_bits(*((__le32 *)(_header) + 0x01), GENMASK(15, 0))
+#define GET_FW_DUMP_TLV_LEN(_header) \
+ le32_get_bits(*((__le32 *)(_header) + 0x01), GENMASK(31, 16))
+#define GET_FW_DUMP_TLV_VAL(_header) \
+ le32_get_bits(*((__le32 *)(_header) + 0x02), GENMASK(31, 0))
static inline struct rtw_c2h_cmd *get_c2h_from_skb(struct sk_buff *skb)
{
u32 pkt_offset;
@@ -564,5 +577,8 @@ void rtw_fw_update_pkt_probe_req(struct rtw_dev *rtwdev,
struct cfg80211_ssid *ssid);
void rtw_fw_channel_switch(struct rtw_dev *rtwdev, bool enable);
void rtw_fw_h2c_cmd_dbg(struct rtw_dev *rtwdev, u8 *h2c);
+void rtw_fw_c2h_cmd_isr(struct rtw_dev *rtwdev);
+int rtw_fw_dump_fifo(struct rtw_dev *rtwdev, u8 fifo_sel, u32 addr, u32 size,
+ u32 *buffer);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index 19b9b7ab016b..59028b121b00 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -114,18 +114,13 @@ static int rtw_mac_pre_system_cfg(struct rtw_dev *rtwdev)
static bool do_pwr_poll_cmd(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target)
{
- u32 cnt;
+ u32 val;
target &= mask;
- for (cnt = 0; cnt < RTW_PWR_POLLING_CNT; cnt++) {
- if ((rtw_read8(rtwdev, addr) & mask) == target)
- return true;
-
- udelay(50);
- }
-
- return false;
+ return read_poll_timeout_atomic(rtw_read8, val, (val & mask) == target,
+ 50, 50 * RTW_PWR_POLLING_CNT, false,
+ rtwdev, addr) == 0;
}
static int rtw_pwr_cmd_polling(struct rtw_dev *rtwdev,
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index 6b199152abcf..c92fba2fa480 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -358,13 +358,10 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
rtw_leave_lps_deep(rtwdev);
if (changed & BSS_CHANGED_ASSOC) {
- enum rtw_net_type net_type;
-
+ rtw_vif_assoc_changed(rtwvif, conf);
if (conf->assoc) {
rtw_coex_connect_notify(rtwdev, COEX_ASSOCIATE_FINISH);
- net_type = RTW_NET_MGD_LINKED;
- rtwvif->aid = conf->aid;
rtw_fw_download_rsvd_page(rtwdev);
rtw_send_rsvd_page_h2c(rtwdev);
rtw_coex_media_status_notify(rtwdev, conf->assoc);
@@ -372,12 +369,9 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
rtw_bf_assoc(rtwdev, vif, conf);
} else {
rtw_leave_lps(rtwdev);
- net_type = RTW_NET_NO_LINK;
- rtwvif->aid = 0;
rtw_bf_disassoc(rtwdev, vif, conf);
}
- rtwvif->net_type = net_type;
config |= PORT_SET_NET_TYPE;
config |= PORT_SET_AID;
}
@@ -429,56 +423,17 @@ static int rtw_ops_conf_tx(struct ieee80211_hw *hw,
return 0;
}
-static u8 rtw_acquire_macid(struct rtw_dev *rtwdev)
-{
- unsigned long mac_id;
-
- mac_id = find_first_zero_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM);
- if (mac_id < RTW_MAX_MAC_ID_NUM)
- set_bit(mac_id, rtwdev->mac_id_map);
-
- return mac_id;
-}
-
-static void rtw_release_macid(struct rtw_dev *rtwdev, u8 mac_id)
-{
- clear_bit(mac_id, rtwdev->mac_id_map);
-}
-
static int rtw_ops_sta_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct rtw_dev *rtwdev = hw->priv;
- struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
- int i;
int ret = 0;
mutex_lock(&rtwdev->mutex);
-
- si->mac_id = rtw_acquire_macid(rtwdev);
- if (si->mac_id >= RTW_MAX_MAC_ID_NUM) {
- ret = -ENOSPC;
- goto out;
- }
-
- si->sta = sta;
- si->vif = vif;
- si->init_ra_lv = 1;
- ewma_rssi_init(&si->avg_rssi);
- for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
- rtw_txq_init(rtwdev, sta->txq[i]);
-
- rtw_update_sta_info(rtwdev, si);
- rtw_fw_media_status_report(rtwdev, si->mac_id, true);
-
- rtwdev->sta_cnt++;
-
- rtw_info(rtwdev, "sta %pM joined with macid %d\n",
- sta->addr, si->mac_id);
-
-out:
+ ret = rtw_sta_add(rtwdev, sta, vif);
mutex_unlock(&rtwdev->mutex);
+
return ret;
}
@@ -487,25 +442,11 @@ static int rtw_ops_sta_remove(struct ieee80211_hw *hw,
struct ieee80211_sta *sta)
{
struct rtw_dev *rtwdev = hw->priv;
- struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
- int i;
mutex_lock(&rtwdev->mutex);
-
- rtw_release_macid(rtwdev, si->mac_id);
- rtw_fw_media_status_report(rtwdev, si->mac_id, false);
-
- for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
- rtw_txq_cleanup(rtwdev, sta->txq[i]);
-
- kfree(si->mask);
-
- rtwdev->sta_cnt--;
-
- rtw_info(rtwdev, "sta %pM with macid %d left\n",
- sta->addr, si->mac_id);
-
+ rtw_sta_remove(rtwdev, sta, true);
mutex_unlock(&rtwdev->mutex);
+
return 0;
}
@@ -845,6 +786,17 @@ static void rtw_ops_set_wakeup(struct ieee80211_hw *hw, bool enabled)
}
#endif
+static void rtw_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type)
+{
+ struct rtw_dev *rtwdev = hw->priv;
+
+ mutex_lock(&rtwdev->mutex);
+ if (reconfig_type == IEEE80211_RECONFIG_TYPE_RESTART)
+ clear_bit(RTW_FLAG_RESTARTING, rtwdev->flags);
+ mutex_unlock(&rtwdev->mutex);
+}
+
const struct ieee80211_ops rtw_ops = {
.tx = rtw_ops_tx,
.wake_tx_queue = rtw_ops_wake_tx_queue,
@@ -871,6 +823,7 @@ const struct ieee80211_ops rtw_ops = {
.set_bitrate_mask = rtw_ops_set_bitrate_mask,
.set_antenna = rtw_ops_set_antenna,
.get_antenna = rtw_ops_get_antenna,
+ .reconfig_complete = rtw_reconfig_complete,
#ifdef CONFIG_PM
.suspend = rtw_ops_suspend,
.resume = rtw_ops_resume,
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 54044abf30d7..565efd880624 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -259,6 +259,198 @@ static void rtw_c2h_work(struct work_struct *work)
}
}
+static u8 rtw_acquire_macid(struct rtw_dev *rtwdev)
+{
+ unsigned long mac_id;
+
+ mac_id = find_first_zero_bit(rtwdev->mac_id_map, RTW_MAX_MAC_ID_NUM);
+ if (mac_id < RTW_MAX_MAC_ID_NUM)
+ set_bit(mac_id, rtwdev->mac_id_map);
+
+ return mac_id;
+}
+
+int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif)
+{
+ struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+ int i;
+
+ si->mac_id = rtw_acquire_macid(rtwdev);
+ if (si->mac_id >= RTW_MAX_MAC_ID_NUM)
+ return -ENOSPC;
+
+ si->sta = sta;
+ si->vif = vif;
+ si->init_ra_lv = 1;
+ ewma_rssi_init(&si->avg_rssi);
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ rtw_txq_init(rtwdev, sta->txq[i]);
+
+ rtw_update_sta_info(rtwdev, si);
+ rtw_fw_media_status_report(rtwdev, si->mac_id, true);
+
+ rtwdev->sta_cnt++;
+ rtw_info(rtwdev, "sta %pM joined with macid %d\n",
+ sta->addr, si->mac_id);
+
+ return 0;
+}
+
+void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
+ bool fw_exist)
+{
+ struct rtw_sta_info *si = (struct rtw_sta_info *)sta->drv_priv;
+ int i;
+
+ rtw_release_macid(rtwdev, si->mac_id);
+ if (fw_exist)
+ rtw_fw_media_status_report(rtwdev, si->mac_id, false);
+
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ rtw_txq_cleanup(rtwdev, sta->txq[i]);
+
+ kfree(si->mask);
+
+ rtwdev->sta_cnt--;
+ rtw_info(rtwdev, "sta %pM with macid %d left\n",
+ sta->addr, si->mac_id);
+}
+
+static bool rtw_fw_dump_crash_log(struct rtw_dev *rtwdev)
+{
+ u32 size = rtwdev->chip->fw_rxff_size;
+ u32 *buf;
+ u8 seq;
+ bool ret = true;
+
+ buf = vmalloc(size);
+ if (!buf)
+ goto exit;
+
+ if (rtw_fw_dump_fifo(rtwdev, RTW_FW_FIFO_SEL_RXBUF_FW, 0, size, buf)) {
+ rtw_dbg(rtwdev, RTW_DBG_FW, "dump fw fifo fail\n");
+ goto free_buf;
+ }
+
+ if (GET_FW_DUMP_LEN(buf) == 0) {
+ rtw_dbg(rtwdev, RTW_DBG_FW, "fw crash dump's length is 0\n");
+ goto free_buf;
+ }
+
+ seq = GET_FW_DUMP_SEQ(buf);
+ if (seq > 0 && seq != (rtwdev->fw.prev_dump_seq + 1)) {
+ rtw_dbg(rtwdev, RTW_DBG_FW,
+ "fw crash dump's seq is wrong: %d\n", seq);
+ goto free_buf;
+ }
+ if (seq == 0 &&
+ (GET_FW_DUMP_TLV_TYPE(buf) != FW_CD_TYPE ||
+ GET_FW_DUMP_TLV_LEN(buf) != FW_CD_LEN ||
+ GET_FW_DUMP_TLV_VAL(buf) != FW_CD_VAL)) {
+ rtw_dbg(rtwdev, RTW_DBG_FW, "fw crash dump's tlv is wrong\n");
+ goto free_buf;
+ }
+
+ print_hex_dump_bytes("rtw88 fw dump: ", DUMP_PREFIX_OFFSET, buf, size);
+
+ if (GET_FW_DUMP_MORE(buf) == 1) {
+ rtwdev->fw.prev_dump_seq = seq;
+ ret = false;
+ }
+
+free_buf:
+ vfree(buf);
+exit:
+ rtw_write8(rtwdev, REG_MCU_TST_CFG, 0);
+
+ return ret;
+}
+
+void rtw_vif_assoc_changed(struct rtw_vif *rtwvif,
+ struct ieee80211_bss_conf *conf)
+{
+ if (conf && conf->assoc) {
+ rtwvif->aid = conf->aid;
+ rtwvif->net_type = RTW_NET_MGD_LINKED;
+ } else {
+ rtwvif->aid = 0;
+ rtwvif->net_type = RTW_NET_NO_LINK;
+ }
+}
+
+static void rtw_reset_key_iter(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *data)
+{
+ struct rtw_dev *rtwdev = (struct rtw_dev *)data;
+ struct rtw_sec_desc *sec = &rtwdev->sec;
+
+ rtw_sec_clear_cam(rtwdev, sec, key->hw_key_idx);
+}
+
+static void rtw_reset_sta_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw_dev *rtwdev = (struct rtw_dev *)data;
+
+ if (rtwdev->sta_cnt == 0) {
+ rtw_warn(rtwdev, "sta count before reset should not be 0\n");
+ return;
+ }
+ rtw_sta_remove(rtwdev, sta, false);
+}
+
+static void rtw_reset_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct rtw_dev *rtwdev = (struct rtw_dev *)data;
+ struct rtw_vif *rtwvif = (struct rtw_vif *)vif->drv_priv;
+
+ rtw_bf_disassoc(rtwdev, vif, NULL);
+ rtw_vif_assoc_changed(rtwvif, NULL);
+ rtw_txq_cleanup(rtwdev, vif->txq);
+}
+
+void rtw_fw_recovery(struct rtw_dev *rtwdev)
+{
+ if (!test_bit(RTW_FLAG_RESTARTING, rtwdev->flags))
+ ieee80211_queue_work(rtwdev->hw, &rtwdev->fw_recovery_work);
+}
+
+static void rtw_fw_recovery_work(struct work_struct *work)
+{
+ struct rtw_dev *rtwdev = container_of(work, struct rtw_dev,
+ fw_recovery_work);
+
+ /* rtw_fw_dump_crash_log() returns false indicates that there are
+ * still more log to dump. Driver set 0x1cf[7:0] = 0x1 to tell firmware
+ * to dump the remaining part of the log, and firmware will trigger an
+ * IMR_C2HCMD interrupt to inform driver the log is ready.
+ */
+ if (!rtw_fw_dump_crash_log(rtwdev)) {
+ rtw_write8(rtwdev, REG_HRCV_MSG, 1);
+ return;
+ }
+ rtwdev->fw.prev_dump_seq = 0;
+
+ WARN(1, "firmware crash, start reset and recover\n");
+
+ mutex_lock(&rtwdev->mutex);
+
+ set_bit(RTW_FLAG_RESTARTING, rtwdev->flags);
+ rcu_read_lock();
+ rtw_iterate_keys_rcu(rtwdev, NULL, rtw_reset_key_iter, rtwdev);
+ rcu_read_unlock();
+ rtw_iterate_stas_atomic(rtwdev, rtw_reset_sta_iter, rtwdev);
+ rtw_iterate_vifs_atomic(rtwdev, rtw_reset_vif_iter, rtwdev);
+ rtw_enter_ips(rtwdev);
+
+ mutex_unlock(&rtwdev->mutex);
+
+ ieee80211_restart_hw(rtwdev->hw);
+}
+
struct rtw_txq_ba_iter_data {
};
@@ -474,10 +666,10 @@ static u8 hw_bw_cap_to_bitamp(u8 bw_cap)
case EFUSE_HW_CAP_IGNORE:
case EFUSE_HW_CAP_SUPP_BW80:
bw |= BIT(RTW_CHANNEL_WIDTH_80);
- /* fall through */
+ fallthrough;
case EFUSE_HW_CAP_SUPP_BW40:
bw |= BIT(RTW_CHANNEL_WIDTH_40);
- /* fall through */
+ fallthrough;
default:
bw |= BIT(RTW_CHANNEL_WIDTH_20);
break;
@@ -1422,8 +1614,7 @@ int rtw_core_init(struct rtw_dev *rtwdev)
timer_setup(&rtwdev->tx_report.purge_timer,
rtw_tx_report_purge_timer, 0);
- tasklet_init(&rtwdev->tx_tasklet, rtw_tx_tasklet,
- (unsigned long)rtwdev);
+ tasklet_setup(&rtwdev->tx_tasklet, rtw_tx_tasklet);
INIT_DELAYED_WORK(&rtwdev->watch_dog_work, rtw_watch_dog_work);
INIT_DELAYED_WORK(&coex->bt_relink_work, rtw_coex_bt_relink_work);
@@ -1432,6 +1623,7 @@ int rtw_core_init(struct rtw_dev *rtwdev)
INIT_DELAYED_WORK(&coex->wl_remain_work, rtw_coex_wl_remain_work);
INIT_DELAYED_WORK(&coex->bt_remain_work, rtw_coex_bt_remain_work);
INIT_WORK(&rtwdev->c2h_work, rtw_c2h_work);
+ INIT_WORK(&rtwdev->fw_recovery_work, rtw_fw_recovery_work);
INIT_WORK(&rtwdev->ba_work, rtw_txq_ba_work);
skb_queue_head_init(&rtwdev->c2h_queue);
skb_queue_head_init(&rtwdev->coex.queue);
@@ -1473,6 +1665,9 @@ int rtw_core_init(struct rtw_dev *rtwdev)
ret = rtw_load_firmware(rtwdev, RTW_WOWLAN_FW);
if (ret) {
rtw_warn(rtwdev, "no wow firmware loaded\n");
+ wait_for_completion(&rtwdev->fw.completion);
+ if (rtwdev->fw.firmware)
+ release_firmware(rtwdev->fw.firmware);
return ret;
}
}
@@ -1487,6 +1682,8 @@ void rtw_core_deinit(struct rtw_dev *rtwdev)
struct rtw_rsvd_page *rsvd_pkt, *tmp;
unsigned long flags;
+ rtw_wait_firmware_completion(rtwdev);
+
if (fw->firmware)
release_firmware(fw->firmware);
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index 276b5d381467..ffb02e614217 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -359,6 +359,7 @@ enum rtw_flags {
RTW_FLAG_DIG_DISABLE,
RTW_FLAG_BUSY_TRAFFIC,
RTW_FLAG_WOWLAN,
+ RTW_FLAG_RESTARTING,
NUM_OF_RTW_FLAGS,
};
@@ -1082,6 +1083,17 @@ enum rtw_wlan_cpu {
RTW_WCPU_11N,
};
+enum rtw_fw_fifo_sel {
+ RTW_FW_FIFO_SEL_TX,
+ RTW_FW_FIFO_SEL_RX,
+ RTW_FW_FIFO_SEL_RSVD_PAGE,
+ RTW_FW_FIFO_SEL_REPORT,
+ RTW_FW_FIFO_SEL_LLT,
+ RTW_FW_FIFO_SEL_RXBUF_FW,
+
+ RTW_FW_FIFO_MAX,
+};
+
/* hardware configuration for each IC */
struct rtw_chip_info {
struct rtw_chip_ops *ops;
@@ -1098,6 +1110,7 @@ struct rtw_chip_info {
u32 ptct_efuse_size;
u32 txff_size;
u32 rxff_size;
+ u32 fw_rxff_size;
u8 band;
u8 page_size;
u8 csi_buf_pg_num;
@@ -1108,6 +1121,8 @@ struct rtw_chip_info {
bool rx_ldpc;
u8 max_power_index;
+ u16 fw_fifo_addr[RTW_FW_FIFO_MAX];
+
bool ht_supported;
bool vht_supported;
u8 lps_deep_mode_supported;
@@ -1606,6 +1621,9 @@ struct rtw_fifo_conf {
const struct rtw_rqpn *rqpn;
};
+#define FW_CD_TYPE 0xffff
+#define FW_CD_LEN 4
+#define FW_CD_VAL 0xaabbccdd
struct rtw_fw_state {
const struct firmware *firmware;
struct rtw_dev *rtwdev;
@@ -1614,6 +1632,7 @@ struct rtw_fw_state {
u8 sub_version;
u8 sub_index;
u16 h2c_version;
+ u8 prev_dump_seq;
};
struct rtw_hal {
@@ -1699,6 +1718,7 @@ struct rtw_dev {
/* c2h cmd queue & handler work */
struct sk_buff_head c2h_queue;
struct work_struct c2h_work;
+ struct work_struct fw_recovery_work;
/* used to protect txqs list */
spinlock_t txq_lock;
@@ -1799,6 +1819,11 @@ static inline bool rtw_chip_has_rx_ldpc(struct rtw_dev *rtwdev)
return rtwdev->chip->rx_ldpc;
}
+static inline void rtw_release_macid(struct rtw_dev *rtwdev, u8 mac_id)
+{
+ clear_bit(mac_id, rtwdev->mac_id_map);
+}
+
void rtw_get_channel_params(struct cfg80211_chan_def *chandef,
struct rtw_channel_params *ch_param);
bool check_hw_ready(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 target);
@@ -1821,5 +1846,12 @@ void rtw_core_deinit(struct rtw_dev *rtwdev);
int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw);
void rtw_unregister_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw);
u16 rtw_desc_to_bitrate(u8 desc_rate);
+void rtw_vif_assoc_changed(struct rtw_vif *rtwvif,
+ struct ieee80211_bss_conf *conf);
+int rtw_sta_add(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif);
+void rtw_sta_remove(struct rtw_dev *rtwdev, struct ieee80211_sta *sta,
+ bool fw_exist);
+void rtw_fw_recovery(struct rtw_dev *rtwdev);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index 3413973bc475..676d861aaf99 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -109,7 +109,7 @@ static void rtw_pci_free_tx_ring_skbs(struct rtw_dev *rtwdev,
tx_data = rtw_pci_get_tx_data(skb);
dma = tx_data->dma;
- pci_unmap_single(pdev, dma, skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&pdev->dev, dma, skb->len, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
}
}
@@ -125,7 +125,7 @@ static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev,
rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
/* free the ring itself */
- pci_free_consistent(pdev, ring_sz, head, tx_ring->r.dma);
+ dma_free_coherent(&pdev->dev, ring_sz, head, tx_ring->r.dma);
tx_ring->r.head = NULL;
}
@@ -144,7 +144,7 @@ static void rtw_pci_free_rx_ring_skbs(struct rtw_dev *rtwdev,
continue;
dma = *((dma_addr_t *)skb->cb);
- pci_unmap_single(pdev, dma, buf_sz, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
dev_kfree_skb(skb);
rx_ring->buf[i] = NULL;
}
@@ -159,7 +159,7 @@ static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev,
rtw_pci_free_rx_ring_skbs(rtwdev, rx_ring);
- pci_free_consistent(pdev, ring_sz, head, rx_ring->r.dma);
+ dma_free_coherent(&pdev->dev, ring_sz, head, rx_ring->r.dma);
}
static void rtw_pci_free_trx_ring(struct rtw_dev *rtwdev)
@@ -194,7 +194,7 @@ static int rtw_pci_init_tx_ring(struct rtw_dev *rtwdev,
return -EINVAL;
}
- head = pci_zalloc_consistent(pdev, ring_sz, &dma);
+ head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
if (!head) {
rtw_err(rtwdev, "failed to allocate tx ring\n");
return -ENOMEM;
@@ -223,8 +223,8 @@ static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
if (!skb)
return -EINVAL;
- dma = pci_map_single(pdev, skb->data, buf_sz, PCI_DMA_FROMDEVICE);
- if (pci_dma_mapping_error(pdev, dma))
+ dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, dma))
return -EBUSY;
*((dma_addr_t *)skb->cb) = dma;
@@ -272,7 +272,7 @@ static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev,
return -EINVAL;
}
- head = pci_zalloc_consistent(pdev, ring_sz, &dma);
+ head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
if (!head) {
rtw_err(rtwdev, "failed to allocate rx ring\n");
return -ENOMEM;
@@ -311,11 +311,11 @@ err_out:
if (!skb)
continue;
dma = *((dma_addr_t *)skb->cb);
- pci_unmap_single(pdev, dma, buf_sz, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
rx_ring->buf[i] = NULL;
}
- pci_free_consistent(pdev, ring_sz, head, dma);
+ dma_free_coherent(&pdev->dev, ring_sz, head, dma);
rtw_err(rtwdev, "failed to init rx buffer\n");
@@ -389,6 +389,7 @@ static int rtw_pci_init(struct rtw_dev *rtwdev)
IMR_VODOK |
IMR_ROK |
IMR_BCNDMAINT_E |
+ IMR_C2HCMD |
0;
rtwpci->irq_mask[1] = IMR_TXFOVW |
0;
@@ -675,8 +676,7 @@ static void rtw_pci_release_rsvd_page(struct rtw_pci *rtwpci,
tx_data = rtw_pci_get_tx_data(prev);
dma = tx_data->dma;
- pci_unmap_single(rtwpci->pdev, dma, prev->len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&rtwpci->pdev->dev, dma, prev->len, DMA_TO_DEVICE);
dev_kfree_skb_any(prev);
}
@@ -755,9 +755,9 @@ static int rtw_pci_tx_write_data(struct rtw_dev *rtwdev,
memset(pkt_desc, 0, tx_pkt_desc_sz);
pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue);
rtw_tx_fill_tx_desc(pkt_info, skb);
- dma = pci_map_single(rtwpci->pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(rtwpci->pdev, dma))
+ dma = dma_map_single(&rtwpci->pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&rtwpci->pdev->dev, dma))
return -EBUSY;
/* after this we got dma mapped, there is no way back */
@@ -896,8 +896,8 @@ static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
break;
}
tx_data = rtw_pci_get_tx_data(skb);
- pci_unmap_single(rtwpci->pdev, tx_data->dma, skb->len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
+ DMA_TO_DEVICE);
/* just free command packets from host to card */
if (hw_queue == RTW_TX_QUEUE_H2C) {
@@ -1080,6 +1080,8 @@ static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev)
rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_H2C);
if (irq_status[0] & IMR_ROK)
rtw_pci_rx_isr(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU);
+ if (unlikely(irq_status[0] & IMR_C2HCMD))
+ rtw_fw_c2h_cmd_isr(rtwdev);
/* all of the jobs for this interrupt have been done */
rtw_pci_enable_interrupt(rtwdev, rtwpci);
@@ -1599,6 +1601,8 @@ void rtw_pci_shutdown(struct pci_dev *pdev)
if (chip->ops->shutdown)
chip->ops->shutdown(rtwdev);
+
+ pci_set_power_state(pdev, PCI_D3hot);
}
EXPORT_SYMBOL(rtw_pci_shutdown);
diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h
index 024c2bc275cb..ca17aa9cf7dc 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.h
+++ b/drivers/net/wireless/realtek/rtw88/pci.h
@@ -9,8 +9,8 @@
#define RTK_BEQ_TX_DESC_NUM 256
#define RTK_MAX_RX_DESC_NUM 512
-/* 8K + rx desc size */
-#define RTK_PCI_RX_BUF_SIZE (8192 + 24)
+/* 11K + rx desc size */
+#define RTK_PCI_RX_BUF_SIZE (11454 + 24)
#define RTK_PCI_CTRL 0x300
#define BIT_RST_TRXDMA_INTF BIT(20)
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
index 8d93f3159746..5cd9cc42648e 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.c
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
@@ -147,12 +147,13 @@ void rtw_phy_dig_write(struct rtw_dev *rtwdev, u8 igi)
{
struct rtw_chip_info *chip = rtwdev->chip;
struct rtw_hal *hal = &rtwdev->hal;
- const struct rtw_hw_reg *dig_cck = &chip->dig_cck[0];
u32 addr, mask;
u8 path;
- if (dig_cck)
+ if (chip->dig_cck) {
+ const struct rtw_hw_reg *dig_cck = &chip->dig_cck[0];
rtw_write32_mask(rtwdev, dig_cck->addr, dig_cck->mask, igi >> 1);
+ }
for (path = 0; path < hal->rf_path_num; path++) {
addr = chip->dig[path].addr;
@@ -1522,7 +1523,7 @@ static u8 rtw_get_channel_group(u8 channel)
switch (channel) {
default:
WARN_ON(1);
- /* fall through */
+ fallthrough;
case 1:
case 2:
case 36:
@@ -1668,7 +1669,7 @@ static u8 rtw_phy_get_2g_tx_power_index(struct rtw_dev *rtwdev,
switch (bandwidth) {
default:
WARN_ON(1);
- /* fall through */
+ fallthrough;
case RTW_CHANNEL_WIDTH_20:
tx_power += pwr_idx_2g->ht_1s_diff.bw20 * factor;
if (above_2ss)
@@ -1712,7 +1713,7 @@ static u8 rtw_phy_get_5g_tx_power_index(struct rtw_dev *rtwdev,
switch (bandwidth) {
default:
WARN_ON(1);
- /* fall through */
+ fallthrough;
case RTW_CHANNEL_WIDTH_20:
tx_power += pwr_idx_5g->ht_1s_diff.bw20 * factor;
if (above_2ss)
diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
index 8f468d6b5f78..86b94c008a27 100644
--- a/drivers/net/wireless/realtek/rtw88/reg.h
+++ b/drivers/net/wireless/realtek/rtw88/reg.h
@@ -126,6 +126,9 @@
BIT_WINTINI_RDY | BIT_RAM_DL_SEL)
#define FW_READY_MASK 0xffff
+#define REG_MCU_TST_CFG 0x84
+#define VAL_FW_TRIGGER 0x1
+
#define REG_EFUSE_ACCESS 0x00CF
#define EFUSE_ACCESS_ON 0x69
#define EFUSE_ACCESS_OFF 0x00
@@ -616,6 +619,8 @@
#define BIT_ANAPAR_BTPS BIT(22)
#define REG_RSTB_SEL 0x1c38
+#define REG_HRCV_MSG 0x1cf
+
#define REG_IGN_GNTBT4 0x4160
#define RF_MODE 0x00
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
index d8863d8a5468..da2e7415be8f 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
@@ -68,7 +68,7 @@ static const u32 rtw8821c_txscale_tbl[] = {
0x2d3, 0x2fe, 0x32b, 0x35c, 0x38e, 0x3c4, 0x3fe
};
-static const u8 rtw8821c_get_swing_index(struct rtw_dev *rtwdev)
+static u8 rtw8821c_get_swing_index(struct rtw_dev *rtwdev)
{
u8 i = 0;
u32 swing, table_value;
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
index 351cd055a295..22d0dd640ac9 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -1009,12 +1009,12 @@ static int rtw8822b_set_antenna(struct rtw_dev *rtwdev,
antenna_tx, antenna_rx);
if (!rtw8822b_check_rf_path(antenna_tx)) {
- rtw_info(rtwdev, "unsupport tx path 0x%x\n", antenna_tx);
+ rtw_info(rtwdev, "unsupported tx path 0x%x\n", antenna_tx);
return -EINVAL;
}
if (!rtw8822b_check_rf_path(antenna_rx)) {
- rtw_info(rtwdev, "unsupport rx path 0x%x\n", antenna_rx);
+ rtw_info(rtwdev, "unsupported rx path 0x%x\n", antenna_rx);
return -EINVAL;
}
@@ -2442,6 +2442,7 @@ struct rtw_chip_info rtw8822b_hw_spec = {
.ptct_efuse_size = 96,
.txff_size = 262144,
.rxff_size = 24576,
+ .fw_rxff_size = 12288,
.txgi_factor = 1,
.is_pwr_by_rate_dec = true,
.max_power_index = 0x3f,
@@ -2504,6 +2505,8 @@ struct rtw_chip_info rtw8822b_hw_spec = {
.coex_info_hw_regs_num = ARRAY_SIZE(coex_info_hw_regs_8822b),
.coex_info_hw_regs = coex_info_hw_regs_8822b,
+
+ .fw_fifo_addr = {0x780, 0x700, 0x780, 0x660, 0x650, 0x680},
};
EXPORT_SYMBOL(rtw8822b_hw_spec);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index 426808413baa..e37300e98517 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -154,25 +154,16 @@ static void rtw8822c_rf_minmax_cmp(struct rtw_dev *rtwdev, u32 value,
}
}
-static void swap_u32(u32 *v1, u32 *v2)
-{
- u32 tmp;
-
- tmp = *v1;
- *v1 = *v2;
- *v2 = tmp;
-}
-
static void __rtw8822c_dac_iq_sort(struct rtw_dev *rtwdev, u32 *v1, u32 *v2)
{
if (*v1 >= 0x200 && *v2 >= 0x200) {
if (*v1 > *v2)
- swap_u32(v1, v2);
+ swap(*v1, *v2);
} else if (*v1 < 0x200 && *v2 < 0x200) {
if (*v1 > *v2)
- swap_u32(v1, v2);
+ swap(*v1, *v2);
} else if (*v1 < 0x200 && *v2 >= 0x200) {
- swap_u32(v1, v2);
+ swap(*v1, *v2);
}
}
@@ -2014,7 +2005,7 @@ static int rtw8822c_set_antenna(struct rtw_dev *rtwdev,
case BB_PATH_AB:
break;
default:
- rtw_info(rtwdev, "unsupport tx path 0x%x\n", antenna_tx);
+ rtw_info(rtwdev, "unsupported tx path 0x%x\n", antenna_tx);
return -EINVAL;
}
@@ -2024,7 +2015,7 @@ static int rtw8822c_set_antenna(struct rtw_dev *rtwdev,
case BB_PATH_AB:
break;
default:
- rtw_info(rtwdev, "unsupport rx path 0x%x\n", antenna_rx);
+ rtw_info(rtwdev, "unsupported rx path 0x%x\n", antenna_rx);
return -EINVAL;
}
@@ -4303,6 +4294,7 @@ struct rtw_chip_info rtw8822c_hw_spec = {
.ptct_efuse_size = 124,
.txff_size = 262144,
.rxff_size = 24576,
+ .fw_rxff_size = 12288,
.txgi_factor = 2,
.is_pwr_by_rate_dec = false,
.max_power_index = 0x7f,
@@ -4373,6 +4365,8 @@ struct rtw_chip_info rtw8822c_hw_spec = {
.coex_info_hw_regs_num = ARRAY_SIZE(coex_info_hw_regs_8822c),
.coex_info_hw_regs = coex_info_hw_regs_8822c,
+
+ .fw_fifo_addr = {0x780, 0x700, 0x780, 0x660, 0x650, 0x680},
};
EXPORT_SYMBOL(rtw8822c_hw_spec);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
index 08d01a7bb1bf..3a204a7533df 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c_table.c
@@ -23889,7 +23889,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 7, 0, 0, 0, 11, 60, },
{ 8, 0, 0, 0, 11, 72, },
{ 9, 0, 0, 0, 11, 60, },
- { 0, 0, 0, 0, 12, 52, },
+ { 0, 0, 0, 0, 12, 44, },
{ 2, 0, 0, 0, 12, 60, },
{ 1, 0, 0, 0, 12, 68, },
{ 3, 0, 0, 0, 12, 52, },
@@ -23899,7 +23899,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 7, 0, 0, 0, 12, 60, },
{ 8, 0, 0, 0, 12, 52, },
{ 9, 0, 0, 0, 12, 60, },
- { 0, 0, 0, 0, 13, 48, },
+ { 0, 0, 0, 0, 13, 40, },
{ 2, 0, 0, 0, 13, 60, },
{ 1, 0, 0, 0, 13, 68, },
{ 3, 0, 0, 0, 13, 48, },
@@ -24029,7 +24029,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 7, 0, 0, 1, 11, 60, },
{ 8, 0, 0, 1, 11, 52, },
{ 9, 0, 0, 1, 11, 60, },
- { 0, 0, 0, 1, 12, 40, },
+ { 0, 0, 0, 1, 12, 32, },
{ 2, 0, 0, 1, 12, 60, },
{ 1, 0, 0, 1, 12, 76, },
{ 3, 0, 0, 1, 12, 40, },
@@ -24039,7 +24039,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 7, 0, 0, 1, 12, 60, },
{ 8, 0, 0, 1, 12, 40, },
{ 9, 0, 0, 1, 12, 60, },
- { 0, 0, 0, 1, 13, 28, },
+ { 0, 0, 0, 1, 13, 20, },
{ 2, 0, 0, 1, 13, 60, },
{ 1, 0, 0, 1, 13, 76, },
{ 3, 0, 0, 1, 13, 28, },
@@ -24169,7 +24169,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 7, 0, 0, 2, 11, 60, },
{ 8, 0, 0, 2, 11, 52, },
{ 9, 0, 0, 2, 11, 60, },
- { 0, 0, 0, 2, 12, 40, },
+ { 0, 0, 0, 2, 12, 32, },
{ 2, 0, 0, 2, 12, 60, },
{ 1, 0, 0, 2, 12, 76, },
{ 3, 0, 0, 2, 12, 40, },
@@ -24179,7 +24179,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 7, 0, 0, 2, 12, 60, },
{ 8, 0, 0, 2, 12, 40, },
{ 9, 0, 0, 2, 12, 60, },
- { 0, 0, 0, 2, 13, 28, },
+ { 0, 0, 0, 2, 13, 20, },
{ 2, 0, 0, 2, 13, 60, },
{ 1, 0, 0, 2, 13, 76, },
{ 3, 0, 0, 2, 13, 28, },
@@ -24309,7 +24309,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 7, 0, 0, 3, 11, 36, },
{ 8, 0, 0, 3, 11, 52, },
{ 9, 0, 0, 3, 11, 36, },
- { 0, 0, 0, 3, 12, 40, },
+ { 0, 0, 0, 3, 12, 32, },
{ 2, 0, 0, 3, 12, 36, },
{ 1, 0, 0, 3, 12, 66, },
{ 3, 0, 0, 3, 12, 40, },
@@ -24319,7 +24319,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type0[] = {
{ 7, 0, 0, 3, 12, 36, },
{ 8, 0, 0, 3, 12, 40, },
{ 9, 0, 0, 3, 12, 36, },
- { 0, 0, 0, 3, 13, 28, },
+ { 0, 0, 0, 3, 13, 20, },
{ 2, 0, 0, 3, 13, 36, },
{ 1, 0, 0, 3, 13, 66, },
{ 3, 0, 0, 3, 13, 28, },
@@ -25844,7 +25844,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 7, 0, 0, 0, 11, 60, },
{ 8, 0, 0, 0, 11, 72, },
{ 9, 0, 0, 0, 11, 60, },
- { 0, 0, 0, 0, 12, 52, },
+ { 0, 0, 0, 0, 12, 44, },
{ 2, 0, 0, 0, 12, 60, },
{ 1, 0, 0, 0, 12, 68, },
{ 3, 0, 0, 0, 12, 52, },
@@ -25854,7 +25854,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 7, 0, 0, 0, 12, 60, },
{ 8, 0, 0, 0, 12, 52, },
{ 9, 0, 0, 0, 12, 60, },
- { 0, 0, 0, 0, 13, 48, },
+ { 0, 0, 0, 0, 13, 40, },
{ 2, 0, 0, 0, 13, 60, },
{ 1, 0, 0, 0, 13, 68, },
{ 3, 0, 0, 0, 13, 48, },
@@ -25984,7 +25984,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 7, 0, 0, 1, 11, 60, },
{ 8, 0, 0, 1, 11, 52, },
{ 9, 0, 0, 1, 11, 60, },
- { 0, 0, 0, 1, 12, 40, },
+ { 0, 0, 0, 1, 12, 32, },
{ 2, 0, 0, 1, 12, 60, },
{ 1, 0, 0, 1, 12, 76, },
{ 3, 0, 0, 1, 12, 40, },
@@ -25994,7 +25994,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 7, 0, 0, 1, 12, 60, },
{ 8, 0, 0, 1, 12, 40, },
{ 9, 0, 0, 1, 12, 60, },
- { 0, 0, 0, 1, 13, 28, },
+ { 0, 0, 0, 1, 13, 20, },
{ 2, 0, 0, 1, 13, 60, },
{ 1, 0, 0, 1, 13, 76, },
{ 3, 0, 0, 1, 13, 28, },
@@ -26124,7 +26124,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 7, 0, 0, 2, 11, 60, },
{ 8, 0, 0, 2, 11, 52, },
{ 9, 0, 0, 2, 11, 60, },
- { 0, 0, 0, 2, 12, 40, },
+ { 0, 0, 0, 2, 12, 32, },
{ 2, 0, 0, 2, 12, 60, },
{ 1, 0, 0, 2, 12, 76, },
{ 3, 0, 0, 2, 12, 40, },
@@ -26134,7 +26134,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 7, 0, 0, 2, 12, 60, },
{ 8, 0, 0, 2, 12, 40, },
{ 9, 0, 0, 2, 12, 60, },
- { 0, 0, 0, 2, 13, 28, },
+ { 0, 0, 0, 2, 13, 20, },
{ 2, 0, 0, 2, 13, 60, },
{ 1, 0, 0, 2, 13, 76, },
{ 3, 0, 0, 2, 13, 28, },
@@ -26264,7 +26264,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 7, 0, 0, 3, 11, 36, },
{ 8, 0, 0, 3, 11, 52, },
{ 9, 0, 0, 3, 11, 36, },
- { 0, 0, 0, 3, 12, 40, },
+ { 0, 0, 0, 3, 12, 32, },
{ 2, 0, 0, 3, 12, 36, },
{ 1, 0, 0, 3, 12, 66, },
{ 3, 0, 0, 3, 12, 40, },
@@ -26274,7 +26274,7 @@ static const struct rtw_txpwr_lmt_cfg_pair rtw8822c_txpwr_lmt_type5[] = {
{ 7, 0, 0, 3, 12, 36, },
{ 8, 0, 0, 3, 12, 40, },
{ 9, 0, 0, 3, 12, 36, },
- { 0, 0, 0, 3, 13, 28, },
+ { 0, 0, 0, 3, 13, 20, },
{ 2, 0, 0, 3, 13, 36, },
{ 1, 0, 0, 3, 13, 66, },
{ 3, 0, 0, 3, 13, 28, },
diff --git a/drivers/net/wireless/realtek/rtw88/tx.c b/drivers/net/wireless/realtek/rtw88/tx.c
index 7fcc992b01a8..ca8072177ae3 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.c
+++ b/drivers/net/wireless/realtek/rtw88/tx.c
@@ -587,9 +587,9 @@ static void rtw_txq_push(struct rtw_dev *rtwdev,
rcu_read_unlock();
}
-void rtw_tx_tasklet(unsigned long data)
+void rtw_tx_tasklet(struct tasklet_struct *t)
{
- struct rtw_dev *rtwdev = (void *)data;
+ struct rtw_dev *rtwdev = from_tasklet(rtwdev, t, tx_tasklet);
struct rtw_txq *rtwtxq, *tmp;
spin_lock_bh(&rtwdev->txq_lock);
diff --git a/drivers/net/wireless/realtek/rtw88/tx.h b/drivers/net/wireless/realtek/rtw88/tx.h
index cfe84eef5923..6673dbcaa21c 100644
--- a/drivers/net/wireless/realtek/rtw88/tx.h
+++ b/drivers/net/wireless/realtek/rtw88/tx.h
@@ -94,7 +94,7 @@ void rtw_tx(struct rtw_dev *rtwdev,
struct sk_buff *skb);
void rtw_txq_init(struct rtw_dev *rtwdev, struct ieee80211_txq *txq);
void rtw_txq_cleanup(struct rtw_dev *rtwdev, struct ieee80211_txq *txq);
-void rtw_tx_tasklet(unsigned long data);
+void rtw_tx_tasklet(struct tasklet_struct *t);
void rtw_tx_pkt_info_update(struct rtw_dev *rtwdev,
struct rtw_tx_pkt_info *pkt_info,
struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/realtek/rtw88/util.h b/drivers/net/wireless/realtek/rtw88/util.h
index 41c10e7144df..0c23b5069be0 100644
--- a/drivers/net/wireless/realtek/rtw88/util.h
+++ b/drivers/net/wireless/realtek/rtw88/util.h
@@ -17,6 +17,8 @@ struct rtw_dev;
ieee80211_iterate_stations_atomic(rtwdev->hw, iterator, data)
#define rtw_iterate_keys(rtwdev, vif, iterator, data) \
ieee80211_iter_keys(rtwdev->hw, vif, iterator, data)
+#define rtw_iterate_keys_rcu(rtwdev, vif, iterator, data) \
+ ieee80211_iter_keys_rcu((rtwdev)->hw, vif, iterator, data)
static inline u8 *get_hdr_bssid(struct ieee80211_hdr *hdr)
{
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 8852a1832951..75b5d545b49e 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -3112,7 +3112,7 @@ static int rndis_wlan_get_caps(struct usbnet *usbdev, struct wiphy *wiphy)
retval = rndis_query_oid(usbdev,
RNDIS_OID_802_11_NETWORK_TYPES_SUPPORTED,
&networks_supported, &len);
- if (retval >= 0) {
+ if (!retval) {
n = le32_to_cpu(networks_supported.num_items);
if (n > 8)
n = 8;
@@ -3137,7 +3137,7 @@ static int rndis_wlan_get_caps(struct usbnet *usbdev, struct wiphy *wiphy)
retval = rndis_query_oid(usbdev,
RNDIS_OID_802_11_CAPABILITY,
&caps, &len);
- if (retval >= 0) {
+ if (!retval) {
netdev_dbg(usbdev->net, "RNDIS_OID_802_11_CAPABILITY -> len %d, "
"ver %d, pmkids %d, auth-encr-pairs %d\n",
le32_to_cpu(caps.length),
diff --git a/drivers/net/wireless/rsi/rsi_91x_coex.c b/drivers/net/wireless/rsi/rsi_91x_coex.c
index c8ba148f8c6c..a0c5d02ae88c 100644
--- a/drivers/net/wireless/rsi/rsi_91x_coex.c
+++ b/drivers/net/wireless/rsi/rsi_91x_coex.c
@@ -1,4 +1,4 @@
-/**
+/*
* Copyright (c) 2018 Redpine Signals Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c
index 3644d7d99463..2d49c5b5eefb 100644
--- a/drivers/net/wireless/rsi/rsi_91x_core.c
+++ b/drivers/net/wireless/rsi/rsi_91x_core.c
@@ -1,4 +1,4 @@
-/**
+/*
* Copyright (c) 2014 Redpine Signals Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
diff --git a/drivers/net/wireless/rsi/rsi_91x_debugfs.c b/drivers/net/wireless/rsi/rsi_91x_debugfs.c
index c71b41e45423..24a417ea2ae7 100644
--- a/drivers/net/wireless/rsi/rsi_91x_debugfs.c
+++ b/drivers/net/wireless/rsi/rsi_91x_debugfs.c
@@ -1,4 +1,4 @@
-/**
+/*
* Copyright (c) 2014 Redpine Signals Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
index 6f8d5f9a9f7e..3f7e3cfb6f00 100644
--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
+++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
@@ -1,4 +1,4 @@
-/**
+/*
* Copyright (c) 2014 Redpine Signals Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 5c0adb0efc5d..16025300cddb 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -1,4 +1,4 @@
-/**
+/*
* Copyright (c) 2014 Redpine Signals Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
@@ -731,7 +731,7 @@ static int rsi_mac80211_config(struct ieee80211_hw *hw,
/**
* rsi_get_connected_channel() - This function is used to get the current
* connected channel number.
- * @adapter: Pointer to the adapter structure.
+ * @vif: Pointer to the ieee80211_vif structure.
*
* Return: Current connected AP's channel number is returned.
*/
@@ -855,7 +855,7 @@ static void rsi_mac80211_bss_info_changed(struct ieee80211_hw *hw,
/**
* rsi_mac80211_conf_filter() - This function configure the device's RX filter.
* @hw: Pointer to the ieee80211_hw structure.
- * @changed: Changed flags set.
+ * @changed_flags: Changed flags set.
* @total_flags: Total initial flags set.
* @multicast: Multicast.
*
@@ -936,6 +936,7 @@ static int rsi_mac80211_conf_tx(struct ieee80211_hw *hw,
* @hw: Pointer to the ieee80211_hw structure.
* @vif: Pointer to the ieee80211_vif structure.
* @key: Pointer to the ieee80211_key_conf structure.
+ * @sta: Pointer to the ieee80211_sta structure.
*
* Return: status: 0 on success, negative error codes on failure.
*/
@@ -1237,6 +1238,7 @@ static int rsi_mac80211_set_rate_mask(struct ieee80211_hw *hw,
* @common: Pointer to the driver private structure.
* @bssid: pointer to the bssid.
* @rssi: RSSI value.
+ * @vif: Pointer to the ieee80211_vif structure.
*/
static void rsi_perform_cqm(struct rsi_common *common,
u8 *bssid,
diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c
index 29d83049c5f5..9a3d2439a8e7 100644
--- a/drivers/net/wireless/rsi/rsi_91x_main.c
+++ b/drivers/net/wireless/rsi/rsi_91x_main.c
@@ -1,4 +1,4 @@
-/**
+/*
* Copyright (c) 2014 Redpine Signals Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
@@ -148,6 +148,7 @@ static struct sk_buff *rsi_prepare_skb(struct rsi_common *common,
/**
* rsi_read_pkt() - This function reads frames from the card.
* @common: Pointer to the driver private structure.
+ * @rx_pkt: Received pkt.
* @rcv_pkt_len: Received pkt length. In case of USB it is 0.
*
* Return: 0 on success, -1 on failure.
@@ -279,7 +280,7 @@ void rsi_set_bt_context(void *priv, void *bt_context)
/**
* rsi_91x_init() - This function initializes os interface operations.
- * @void: Void.
+ * @oper_mode: One of DEV_OPMODE_*.
*
* Return: Pointer to the adapter structure on success, NULL on failure .
*/
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index 9cc8a335d519..33c76d39a8e9 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -1,4 +1,4 @@
-/**
+/*
* Copyright (c) 2014 Redpine Signals Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
@@ -477,7 +477,6 @@ static int rsi_load_radio_caps(struct rsi_common *common)
* @common: Pointer to the driver private structure.
* @msg: Pointer to received packet.
* @msg_len: Length of the received packet.
- * @type: Type of received packet.
*
* Return: 0 on success, -1 on failure.
*/
@@ -528,6 +527,8 @@ static int rsi_mgmt_pkt_to_core(struct rsi_common *common,
* @bssid: bssid.
* @qos_enable: Qos is enabled.
* @aid: Aid (unique for all STA).
+ * @sta_id: station id.
+ * @vif: Pointer to the ieee80211_vif structure.
*
* Return: status: 0 on success, corresponding negative error code on failure.
*/
@@ -603,6 +604,7 @@ int rsi_hal_send_sta_notify_frame(struct rsi_common *common, enum opmode opmode,
* @ssn: ssn.
* @buf_size: buffer size.
* @event: notification about station connection.
+ * @sta_id: station id.
*
* Return: 0 on success, corresponding negative error code on failure.
*/
@@ -699,7 +701,10 @@ static int rsi_program_bb_rf(struct rsi_common *common)
/**
* rsi_set_vap_capabilities() - This function send vap capability to firmware.
* @common: Pointer to the driver private structure.
- * @opmode: Operating mode of device.
+ * @mode: Operating mode of device.
+ * @mac_addr: MAC address
+ * @vap_id: Rate information - offset and mask
+ * @vap_status: VAP status - ADD, DELETE or UPDATE
*
* Return: 0 on success, corresponding negative error code on failure.
*/
@@ -780,6 +785,8 @@ int rsi_set_vap_capabilities(struct rsi_common *common,
* @key_type: Type of key: GROUP/PAIRWISE.
* @key_id: Key index.
* @cipher: Type of cipher used.
+ * @sta_id: Station id.
+ * @vif: Pointer to the ieee80211_vif structure.
*
* Return: 0 on success, -1 on failure.
*/
@@ -1045,6 +1052,7 @@ static int rsi_send_reset_mac(struct rsi_common *common)
/**
* rsi_band_check() - This function programs the band
* @common: Pointer to the driver private structure.
+ * @curchan: Pointer to the current channel structure.
*
* Return: 0 on success, corresponding error code on failure.
*/
@@ -1165,7 +1173,6 @@ int rsi_set_channel(struct rsi_common *common,
* rsi_send_radio_params_update() - This function sends the radio
* parameters update to device
* @common: Pointer to the driver private structure.
- * @channel: Channel value to be set.
*
* Return: 0 on success, corresponding error code on failure.
*/
@@ -1289,6 +1296,9 @@ static bool rsi_map_rates(u16 rate, int *offset)
* rsi_send_auto_rate_request() - This function is to set rates for connection
* and send autorate request to firmware.
* @common: Pointer to the driver private structure.
+ * @sta: mac80211 station.
+ * @sta_id: station id.
+ * @vif: Pointer to the ieee80211_vif structure.
*
* Return: 0 on success, corresponding error code on failure.
*/
@@ -1439,10 +1449,15 @@ static int rsi_send_auto_rate_request(struct rsi_common *common,
* help of sta notify params by sending an internal
* management frame to firmware.
* @common: Pointer to the driver private structure.
+ * @opmode: Operating mode of device.
* @status: Bss status type.
- * @bssid: Bssid.
+ * @addr: Address of the register.
* @qos_enable: Qos is enabled.
* @aid: Aid (unique for all STAs).
+ * @sta: mac80211 station.
+ * @sta_id: station id.
+ * @assoc_cap: capabilities.
+ * @vif: Pointer to the ieee80211_vif structure.
*
* Return: None.
*/
@@ -1535,9 +1550,9 @@ static int rsi_eeprom_read(struct rsi_common *common)
* This function sends a frame to block/unblock
* data queues in the firmware
*
- * @param common Pointer to the driver private structure.
- * @param block event - block if true, unblock if false
- * @return 0 on success, -1 on failure.
+ * @common: Pointer to the driver private structure.
+ * @block_event: Event block if true, unblock if false
+ * returns 0 on success, -1 on failure.
*/
int rsi_send_block_unblock_frame(struct rsi_common *common, bool block_event)
{
@@ -1581,7 +1596,7 @@ int rsi_send_block_unblock_frame(struct rsi_common *common, bool block_event)
* @common: Pointer to the driver private structure.
* @rx_filter_word: Flags of filter packets
*
- * @Return: 0 on success, -1 on failure.
+ * Returns 0 on success, -1 on failure.
*/
int rsi_send_rx_filter_frame(struct rsi_common *common, u16 rx_filter_word)
{
diff --git a/drivers/net/wireless/rsi/rsi_91x_ps.c b/drivers/net/wireless/rsi/rsi_91x_ps.c
index 01472fac8b9a..fdaa5a7260dd 100644
--- a/drivers/net/wireless/rsi/rsi_91x_ps.c
+++ b/drivers/net/wireless/rsi/rsi_91x_ps.c
@@ -1,4 +1,4 @@
-/**
+/*
* Copyright (c) 2014 Redpine Signals Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index a04ff75c409f..a7b8684143f4 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -1,4 +1,4 @@
-/**
+/*
* Copyright (c) 2014 Redpine Signals Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
@@ -799,7 +799,7 @@ static int rsi_sdio_host_intf_write_pkt(struct rsi_hw *adapter,
/**
* rsi_sdio_host_intf_read_pkt() - This function reads the packet
- from the device.
+ * from the device.
* @adapter: Pointer to the adapter data structure.
* @pkt: Pointer to the packet data to be read from the the device.
* @length: Length of the data to be read from the device.
@@ -832,11 +832,10 @@ int rsi_sdio_host_intf_read_pkt(struct rsi_hw *adapter,
* rsi_init_sdio_interface() - This function does init specific to SDIO.
*
* @adapter: Pointer to the adapter data structure.
- * @pkt: Pointer to the packet data to be read from the the device.
+ * @pfunction: Pointer to the sdio_func structure.
*
* Return: 0 on success, -1 on failure.
*/
-
static int rsi_init_sdio_interface(struct rsi_hw *adapter,
struct sdio_func *pfunction)
{
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
index 449f6d23c5e3..7825c9a889d3 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
@@ -1,4 +1,4 @@
-/**
+/*
* Copyright (c) 2014 Redpine Signals Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
diff --git a/drivers/net/wireless/st/cw1200/wsm.c b/drivers/net/wireless/st/cw1200/wsm.c
index c86f31dcc981..d9b6147bbb52 100644
--- a/drivers/net/wireless/st/cw1200/wsm.c
+++ b/drivers/net/wireless/st/cw1200/wsm.c
@@ -1028,14 +1028,12 @@ static int wsm_find_complete_indication(struct cw1200_common *priv,
static int wsm_ba_timeout_indication(struct cw1200_common *priv,
struct wsm_buf *buf)
{
- u32 dummy;
u8 tid;
- u8 dummy2;
u8 addr[ETH_ALEN];
- dummy = WSM_GET32(buf);
+ WSM_GET32(buf);
tid = WSM_GET8(buf);
- dummy2 = WSM_GET8(buf);
+ WSM_GET8(buf);
WSM_GET(buf, addr, ETH_ALEN);
pr_info("BlockACK timeout, tid %d, addr %pM\n",
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 480a8d084878..136a0d3b23c9 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -558,7 +558,7 @@ static int wl1251_build_null_data(struct wl1251 *wl)
out:
dev_kfree_skb(skb);
if (ret)
- wl1251_warning("cmd buld null data failed: %d", ret);
+ wl1251_warning("cmd build null data failed: %d", ret);
return ret;
}
diff --git a/drivers/net/wireless/ti/wl1251/reg.h b/drivers/net/wireless/ti/wl1251/reg.h
index e03f8321ea60..890176c915ab 100644
--- a/drivers/net/wireless/ti/wl1251/reg.h
+++ b/drivers/net/wireless/ti/wl1251/reg.h
@@ -217,7 +217,7 @@ enum wl12xx_acx_int_reg {
Halt eCPU - 32bit RW
------------------------------------------
0 HALT_ECPU Halt Embedded CPU - This bit is the
- compliment of bit 1 (MDATA2) in the SOR_CFG register.
+ complement of bit 1 (MDATA2) in the SOR_CFG register.
During a hardware reset, this bit holds
the inverse of MDATA2.
When downloading firmware from the host,
diff --git a/drivers/net/wireless/ti/wl12xx/reg.h b/drivers/net/wireless/ti/wl12xx/reg.h
index 247f558ba630..8ff018808020 100644
--- a/drivers/net/wireless/ti/wl12xx/reg.h
+++ b/drivers/net/wireless/ti/wl12xx/reg.h
@@ -139,7 +139,7 @@
Halt eCPU - 32bit RW
------------------------------------------
0 HALT_ECPU Halt Embedded CPU - This bit is the
- compliment of bit 1 (MDATA2) in the SOR_CFG register.
+ complement of bit 1 (MDATA2) in the SOR_CFG register.
During a hardware reset, this bit holds
the inverse of MDATA2.
When downloading firmware from the host,
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 6ef8fc9ae627..32a2e27cc561 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -825,7 +825,7 @@ out:
*
* @wl: wl struct
* @buf: buffer containing the command, with all headers, must work with dma
- * @len: length of the buffer
+ * @buf_len: length of the buffer
* @answer: is answer needed
*/
int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer)
@@ -855,7 +855,8 @@ EXPORT_SYMBOL_GPL(wl1271_cmd_test);
* @wl: wl struct
* @id: acx id
* @buf: buffer for the response, including all headers, must work with dma
- * @len: length of buf
+ * @cmd_len: length of command
+ * @res_len: length of payload
*/
int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf,
size_t cmd_len, size_t res_len)
@@ -1080,7 +1081,7 @@ int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif)
out:
dev_kfree_skb(skb);
if (ret)
- wl1271_warning("cmd buld null data failed %d", ret);
+ wl1271_warning("cmd build null data failed %d", ret);
return ret;
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c
index 48adb1876ab9..cce8d75d8b81 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.c
+++ b/drivers/net/wireless/ti/wlcore/debugfs.c
@@ -122,13 +122,6 @@ static void chip_op_handler(struct wl1271 *wl, unsigned long value,
pm_runtime_put_autosuspend(wl->dev);
}
-
-static inline void no_write_handler(struct wl1271 *wl,
- unsigned long value,
- unsigned long param)
-{
-}
-
#define WL12XX_CONF_DEBUGFS(param, conf_sub_struct, \
min_val, max_val, write_handler_locked, \
write_handler_arg) \
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h
index fc3bb0d2ab8d..b143293e694f 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.h
+++ b/drivers/net/wireless/ti/wlcore/debugfs.h
@@ -78,13 +78,13 @@ static ssize_t sub## _ ##name## _read(struct file *file, \
struct wl1271 *wl = file->private_data; \
struct struct_type *stats = wl->stats.fw_stats; \
char buf[DEBUGFS_FORMAT_BUFFER_SIZE] = ""; \
- int res, i; \
+ int i; \
\
wl1271_debugfs_update_stats(wl); \
\
for (i = 0; i < len; i++) \
- res = snprintf(buf, sizeof(buf), "%s[%d] = %d\n", \
- buf, i, stats->sub.name[i]); \
+ snprintf(buf, sizeof(buf), "%s[%d] = %d\n", \
+ buf, i, stats->sub.name[i]); \
\
return wl1271_format_buffer(userbuf, count, ppos, "%s", buf); \
} \
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index d2bbd5108f7e..6863fd552d5e 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -30,7 +30,6 @@
#include "sysfs.h"
#define WL1271_BOOT_RETRIES 3
-#define WL1271_SUSPEND_SLEEP 100
#define WL1271_WAKEUP_TIMEOUT 500
static char *fwlog_param;
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 686161db8706..026e88b80bfc 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -134,8 +134,8 @@ static const struct {
/**
* iw_valid_channel - validate channel in regulatory domain
- * @reg_comain - regulatory domain
- * @channel - channel to validate
+ * @reg_comain: regulatory domain
+ * @channel: channel to validate
*
* Returns 0 if invalid in the specified regulatory domain, non-zero if valid.
*/
@@ -154,7 +154,7 @@ static int iw_valid_channel(int reg_domain, int channel)
/**
* iw_default_channel - get default channel for a regulatory domain
- * @reg_comain - regulatory domain
+ * @reg_domain: regulatory domain
*
* Returns the default channel for a regulatory domain
*/
@@ -237,6 +237,7 @@ static int wl3501_get_flash_mac_addr(struct wl3501_card *this)
/**
* wl3501_set_to_wla - Move 'size' bytes from PC to card
+ * @this: Card
* @dest: Card addressing space
* @src: PC addressing space
* @size: Bytes to move
@@ -259,6 +260,7 @@ static void wl3501_set_to_wla(struct wl3501_card *this, u16 dest, void *src,
/**
* wl3501_get_from_wla - Move 'size' bytes from card to PC
+ * @this: Card
* @src: Card addressing space
* @dest: PC addressing space
* @size: Bytes to move
@@ -455,7 +457,7 @@ out:
/**
* wl3501_send_pkt - Send a packet.
- * @this - card
+ * @this: Card
*
* Send a packet.
*
@@ -720,7 +722,7 @@ static void wl3501_mgmt_scan_confirm(struct wl3501_card *this, u16 addr)
/**
* wl3501_block_interrupt - Mask interrupt from SUTRO
- * @this - card
+ * @this: Card
*
* Mask interrupt from SUTRO. (i.e. SUTRO cannot interrupt the HOST)
* Return: 1 if interrupt is originally enabled
@@ -737,7 +739,7 @@ static int wl3501_block_interrupt(struct wl3501_card *this)
/**
* wl3501_unblock_interrupt - Enable interrupt from SUTRO
- * @this - card
+ * @this: Card
*
* Enable interrupt from SUTRO. (i.e. SUTRO can interrupt the HOST)
* Return: 1 if interrupt is originally enabled
@@ -1110,8 +1112,8 @@ static inline void wl3501_ack_interrupt(struct wl3501_card *this)
/**
* wl3501_interrupt - Hardware interrupt from card.
- * @irq - Interrupt number
- * @dev_id - net_device
+ * @irq: Interrupt number
+ * @dev_id: net_device
*
* We must acknowledge the interrupt as soon as possible, and block the
* interrupt from the same card immediately to prevent re-entry.
@@ -1247,7 +1249,7 @@ static int wl3501_close(struct net_device *dev)
/**
* wl3501_reset - Reset the SUTRO.
- * @dev - network device
+ * @dev: network device
*
* It is almost the same as wl3501_open(). In fact, we may just wl3501_close()
* and wl3501_open() again, but I wouldn't like to free_irq() when the driver
@@ -1410,7 +1412,7 @@ static struct iw_statistics *wl3501_get_wireless_stats(struct net_device *dev)
/**
* wl3501_detach - deletes a driver "instance"
- * @link - FILL_IN
+ * @link: FILL_IN
*
* This deletes a driver "instance". The device is de-registered with Card
* Services. If it has been released, all local data structures are freed.
@@ -1431,9 +1433,7 @@ static void wl3501_detach(struct pcmcia_device *link)
wl3501_release(link);
unregister_netdev(dev);
-
- if (link->priv)
- free_netdev(link->priv);
+ free_netdev(dev);
}
static int wl3501_get_name(struct net_device *dev, struct iw_request_info *info,
diff --git a/drivers/net/wireless/zydas/zd1201.c b/drivers/net/wireless/zydas/zd1201.c
index 41641fc2be74..718c4ee865ba 100644
--- a/drivers/net/wireless/zydas/zd1201.c
+++ b/drivers/net/wireless/zydas/zd1201.c
@@ -1652,15 +1652,11 @@ static int zd1201_set_maxassoc(struct net_device *dev,
struct iw_request_info *info, struct iw_param *rrq, char *extra)
{
struct zd1201 *zd = netdev_priv(dev);
- int err;
if (!zd->ap)
return -EOPNOTSUPP;
- err = zd1201_setconfig16(zd, ZD1201_RID_CNFMAXASSOCSTATIONS, rrq->value);
- if (err)
- return err;
- return 0;
+ return zd1201_setconfig16(zd, ZD1201_RID_CNFMAXASSOCSTATIONS, rrq->value);
}
static int zd1201_get_maxassoc(struct net_device *dev,
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_chip.c b/drivers/net/wireless/zydas/zd1211rw/zd_chip.c
index 0af4b1986e48..3bb51dc8d035 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_chip.c
@@ -1375,8 +1375,8 @@ static inline u8 zd_rate_from_ofdm_plcp_header(const void *rx_frame)
/**
* zd_rx_rate - report zd-rate
- * @rx_frame - received frame
- * @rx_status - rx_status as given by the device
+ * @rx_frame: received frame
+ * @status: rx_status as given by the device
*
* This function converts the rate as encoded in the received packet to the
* zd-rate, we are using on other places in the driver.
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
index a9999d10ae81..3ef8533205f9 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
@@ -416,11 +416,10 @@ int zd_restore_settings(struct zd_mac *mac)
/**
* zd_mac_tx_status - reports tx status of a packet if required
- * @hw - a &struct ieee80211_hw pointer
- * @skb - a sk-buffer
- * @flags: extra flags to set in the TX status info
+ * @hw: a &struct ieee80211_hw pointer
+ * @skb: a sk-buffer
* @ackssi: ACK signal strength
- * @success - True for successful transmission of the frame
+ * @tx_status: success and/or retry
*
* This information calls ieee80211_tx_status_irqsafe() if required by the
* control information. It copies the control information into the status
@@ -477,7 +476,7 @@ static void zd_mac_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
/**
* zd_mac_tx_failed - callback for failed frames
- * @dev: the mac80211 wireless device
+ * @urb: pointer to the urb structure
*
* This function is called if a frame couldn't be successfully
* transferred. The first frame from the tx queue, will be selected and
@@ -913,9 +912,9 @@ static int fill_ctrlset(struct zd_mac *mac,
/**
* zd_op_tx - transmits a network frame to the device
*
- * @dev: mac80211 hardware device
- * @skb: socket buffer
+ * @hw: a &struct ieee80211_hw pointer
* @control: the control structure
+ * @skb: socket buffer
*
* This function transmit an IEEE 802.11 network frame to the device. The
* control block of the skbuff will be initialized. If necessary the incoming
@@ -946,7 +945,7 @@ fail:
/**
* filter_ack - filters incoming packets for acknowledgements
- * @dev: the mac80211 device
+ * @hw: a &struct ieee80211_hw pointer
* @rx_hdr: received header
* @stats: the status for the received packet
*
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
index 65b5985ad402..66367ab7e4c1 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
@@ -378,7 +378,6 @@ static inline void handle_regs_int(struct urb *urb)
int len;
u16 int_num;
- ZD_ASSERT(in_interrupt());
spin_lock_irqsave(&intr->lock, flags);
int_num = le16_to_cpu(*(__le16 *)(urb->transfer_buffer+2));
@@ -1140,9 +1139,9 @@ static void zd_rx_idle_timer_handler(struct work_struct *work)
zd_usb_reset_rx(usb);
}
-static void zd_usb_reset_rx_idle_timer_tasklet(unsigned long param)
+static void zd_usb_reset_rx_idle_timer_tasklet(struct tasklet_struct *t)
{
- struct zd_usb *usb = (struct zd_usb *)param;
+ struct zd_usb *usb = from_tasklet(usb, t, rx.reset_timer_tasklet);
zd_usb_reset_rx_idle_timer(usb);
}
@@ -1178,8 +1177,9 @@ static inline void init_usb_rx(struct zd_usb *usb)
}
ZD_ASSERT(rx->fragment_length == 0);
INIT_DELAYED_WORK(&rx->idle_work, zd_rx_idle_timer_handler);
- rx->reset_timer_tasklet.func = zd_usb_reset_rx_idle_timer_tasklet;
- rx->reset_timer_tasklet.data = (unsigned long)usb;
+ rx->reset_timer_tasklet.func = (void (*))
+ zd_usb_reset_rx_idle_timer_tasklet;
+ rx->reset_timer_tasklet.data = (unsigned long)&rx->reset_timer_tasklet;
}
static inline void init_usb_tx(struct zd_usb *usb)
diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
index f5bb7ace2ff5..84f2983bf384 100644
--- a/drivers/nfc/pn533/usb.c
+++ b/drivers/nfc/pn533/usb.c
@@ -210,7 +210,7 @@ static void pn533_usb_abort_cmd(struct pn533 *dev, gfp_t flags)
usb_kill_urb(phy->in_urb);
}
-/* ACR122 specific structs and fucntions */
+/* ACR122 specific structs and functions */
/* ACS ACR122 pn533 frame definitions */
#define PN533_ACR122_TX_FRAME_HEADER_LEN (sizeof(struct pn533_acr122_tx_frame) \
diff --git a/drivers/nfc/s3fwrn5/Kconfig b/drivers/nfc/s3fwrn5/Kconfig
index af9d18690afe..3f8b6da58280 100644
--- a/drivers/nfc/s3fwrn5/Kconfig
+++ b/drivers/nfc/s3fwrn5/Kconfig
@@ -2,6 +2,7 @@
config NFC_S3FWRN5
tristate
select CRYPTO
+ select CRYPTO_HASH
help
Core driver for Samsung S3FWRN5 NFC chip. Contains core utilities
of chip. It's intended to be used by PHYs to avoid duplicating lots
diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c
index 69857f080704..ec930ee2c847 100644
--- a/drivers/nfc/s3fwrn5/firmware.c
+++ b/drivers/nfc/s3fwrn5/firmware.c
@@ -348,7 +348,7 @@ static int s3fwrn5_fw_get_base_addr(
}
static inline bool
-s3fwrn5_fw_is_custom(struct s3fwrn5_fw_cmd_get_bootinfo_rsp *bootinfo)
+s3fwrn5_fw_is_custom(const struct s3fwrn5_fw_cmd_get_bootinfo_rsp *bootinfo)
{
return !!bootinfo->hw_version[2];
}
@@ -399,7 +399,7 @@ err:
return ret;
}
-bool s3fwrn5_fw_check_version(struct s3fwrn5_fw_info *fw_info, u32 version)
+bool s3fwrn5_fw_check_version(const struct s3fwrn5_fw_info *fw_info, u32 version)
{
struct s3fwrn5_fw_version *new = (void *) &fw_info->fw.version;
struct s3fwrn5_fw_version *old = (void *) &version;
diff --git a/drivers/nfc/s3fwrn5/firmware.h b/drivers/nfc/s3fwrn5/firmware.h
index cf1a83a5a525..3c83e6730d30 100644
--- a/drivers/nfc/s3fwrn5/firmware.h
+++ b/drivers/nfc/s3fwrn5/firmware.h
@@ -91,7 +91,7 @@ struct s3fwrn5_fw_info {
void s3fwrn5_fw_init(struct s3fwrn5_fw_info *fw_info, const char *fw_name);
int s3fwrn5_fw_setup(struct s3fwrn5_fw_info *fw_info);
-bool s3fwrn5_fw_check_version(struct s3fwrn5_fw_info *fw_info, u32 version);
+bool s3fwrn5_fw_check_version(const struct s3fwrn5_fw_info *fw_info, u32 version);
int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info);
void s3fwrn5_fw_cleanup(struct s3fwrn5_fw_info *fw_info);
diff --git a/drivers/nfc/s3fwrn5/i2c.c b/drivers/nfc/s3fwrn5/i2c.c
index b4eb926d220a..dc995286be84 100644
--- a/drivers/nfc/s3fwrn5/i2c.c
+++ b/drivers/nfc/s3fwrn5/i2c.c
@@ -164,7 +164,6 @@ out:
static irqreturn_t s3fwrn5_i2c_irq_thread_fn(int irq, void *phy_id)
{
struct s3fwrn5_i2c_phy *phy = phy_id;
- int ret = 0;
if (!phy || !phy->ndev) {
WARN_ON_ONCE(1);
@@ -179,10 +178,9 @@ static irqreturn_t s3fwrn5_i2c_irq_thread_fn(int irq, void *phy_id)
switch (phy->mode) {
case S3FWRN5_MODE_NCI:
case S3FWRN5_MODE_FW:
- ret = s3fwrn5_i2c_read(phy);
+ s3fwrn5_i2c_read(phy);
break;
case S3FWRN5_MODE_COLD:
- ret = -EREMOTEIO;
break;
}
@@ -200,13 +198,21 @@ static int s3fwrn5_i2c_parse_dt(struct i2c_client *client)
if (!np)
return -ENODEV;
- phy->gpio_en = of_get_named_gpio(np, "s3fwrn5,en-gpios", 0);
- if (!gpio_is_valid(phy->gpio_en))
- return -ENODEV;
+ phy->gpio_en = of_get_named_gpio(np, "en-gpios", 0);
+ if (!gpio_is_valid(phy->gpio_en)) {
+ /* Support also deprecated property */
+ phy->gpio_en = of_get_named_gpio(np, "s3fwrn5,en-gpios", 0);
+ if (!gpio_is_valid(phy->gpio_en))
+ return -ENODEV;
+ }
- phy->gpio_fw_wake = of_get_named_gpio(np, "s3fwrn5,fw-gpios", 0);
- if (!gpio_is_valid(phy->gpio_fw_wake))
- return -ENODEV;
+ phy->gpio_fw_wake = of_get_named_gpio(np, "wake-gpios", 0);
+ if (!gpio_is_valid(phy->gpio_fw_wake)) {
+ /* Support also deprecated property */
+ phy->gpio_fw_wake = of_get_named_gpio(np, "s3fwrn5,fw-gpios", 0);
+ if (!gpio_is_valid(phy->gpio_fw_wake))
+ return -ENODEV;
+ }
return 0;
}
diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c
index f25f1ec5f9e9..807eae04c1e3 100644
--- a/drivers/nfc/st-nci/se.c
+++ b/drivers/nfc/st-nci/se.c
@@ -331,8 +331,7 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev,
skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
return -EPROTO;
- transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
- skb->len - 2, GFP_KERNEL);
+ transaction = devm_kzalloc(dev, skb->len - 2, GFP_KERNEL);
if (!transaction)
return -ENOMEM;
diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c
index 6586378cacb0..c8bdf078d111 100644
--- a/drivers/nfc/st21nfca/se.c
+++ b/drivers/nfc/st21nfca/se.c
@@ -315,8 +315,7 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG)
return -EPROTO;
- transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
- skb->len - 2, GFP_KERNEL);
+ transaction = devm_kzalloc(dev, skb->len - 2, GFP_KERNEL);
if (!transaction)
return -ENOMEM;
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index d91618641be6..18450437d5d5 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -74,13 +74,6 @@ config OF_NET
depends on NETDEVICES
def_bool y
-config OF_MDIO
- def_tristate PHYLIB
- depends on PHYLIB
- select FIXED_PHY
- help
- OpenFirmware MDIO bus (Ethernet PHY) accessors
-
config OF_RESERVED_MEM
bool
depends on OF_EARLY_FLATTREE
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index 663a4af0cccd..6e1e5212f058 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -9,7 +9,6 @@ obj-$(CONFIG_OF_ADDRESS) += address.o
obj-$(CONFIG_OF_IRQ) += irq.o
obj-$(CONFIG_OF_NET) += of_net.o
obj-$(CONFIG_OF_UNITTEST) += unittest.o
-obj-$(CONFIG_OF_MDIO) += of_mdio.o
obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o
obj-$(CONFIG_OF_RESOLVE) += resolver.o
obj-$(CONFIG_OF_OVERLAY) += overlay.o
diff --git a/drivers/of/base.c b/drivers/of/base.c
index ea44fea99813..161a23631472 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1869,6 +1869,7 @@ int of_remove_property(struct device_node *np, struct property *prop)
return rc;
}
+EXPORT_SYMBOL_GPL(of_remove_property);
int __of_update_property(struct device_node *np, struct property *newprop,
struct property **oldpropp)
diff --git a/drivers/ptp/ptp_ines.c b/drivers/ptp/ptp_ines.c
index 7711651ff19e..4700ffbdfced 100644
--- a/drivers/ptp/ptp_ines.c
+++ b/drivers/ptp/ptp_ines.c
@@ -93,9 +93,6 @@ MODULE_LICENSE("GPL");
#define TC_E2E_PTP_V2 2
#define TC_P2P_PTP_V2 3
-#define OFF_PTP_CLOCK_ID 20
-#define OFF_PTP_PORT_NUM 28
-
#define PHY_SPEED_10 0
#define PHY_SPEED_100 1
#define PHY_SPEED_1000 2
@@ -443,57 +440,41 @@ static void ines_link_state(struct mii_timestamper *mii_ts,
static bool ines_match(struct sk_buff *skb, unsigned int ptp_class,
struct ines_timestamp *ts, struct device *dev)
{
- u8 *msgtype, *data = skb_mac_header(skb);
- unsigned int offset = 0;
- __be16 *portn, *seqid;
- __be64 *clkid;
+ struct ptp_header *hdr;
+ u16 portn, seqid;
+ u8 msgtype;
+ u64 clkid;
if (unlikely(ptp_class & PTP_CLASS_V1))
return false;
- if (ptp_class & PTP_CLASS_VLAN)
- offset += VLAN_HLEN;
-
- switch (ptp_class & PTP_CLASS_PMASK) {
- case PTP_CLASS_IPV4:
- offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
- break;
- case PTP_CLASS_IPV6:
- offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
- break;
- case PTP_CLASS_L2:
- offset += ETH_HLEN;
- break;
- default:
- return false;
- }
-
- if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
+ hdr = ptp_parse_header(skb, ptp_class);
+ if (!hdr)
return false;
- msgtype = data + offset;
- clkid = (__be64 *)(data + offset + OFF_PTP_CLOCK_ID);
- portn = (__be16 *)(data + offset + OFF_PTP_PORT_NUM);
- seqid = (__be16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
+ msgtype = ptp_get_msgtype(hdr, ptp_class);
+ clkid = be64_to_cpup((__be64 *)&hdr->source_port_identity.clock_identity.id[0]);
+ portn = be16_to_cpu(hdr->source_port_identity.port_number);
+ seqid = be16_to_cpu(hdr->sequence_id);
- if (tag_to_msgtype(ts->tag & 0x7) != (*msgtype & 0xf)) {
+ if (tag_to_msgtype(ts->tag & 0x7) != msgtype) {
dev_dbg(dev, "msgtype mismatch ts %hhu != skb %hhu\n",
- tag_to_msgtype(ts->tag & 0x7), *msgtype & 0xf);
+ tag_to_msgtype(ts->tag & 0x7), msgtype);
return false;
}
- if (cpu_to_be64(ts->clkid) != *clkid) {
+ if (ts->clkid != clkid) {
dev_dbg(dev, "clkid mismatch ts %llx != skb %llx\n",
- cpu_to_be64(ts->clkid), *clkid);
+ ts->clkid, clkid);
return false;
}
- if (ts->portnum != ntohs(*portn)) {
+ if (ts->portnum != portn) {
dev_dbg(dev, "portn mismatch ts %hu != skb %hu\n",
- ts->portnum, ntohs(*portn));
+ ts->portnum, portn);
return false;
}
- if (ts->seqid != ntohs(*seqid)) {
+ if (ts->seqid != seqid) {
dev_dbg(dev, "seqid mismatch ts %hu != skb %hu\n",
- ts->seqid, ntohs(*seqid));
+ ts->seqid, seqid);
return false;
}
@@ -663,8 +644,7 @@ static void ines_txtstamp(struct mii_timestamper *mii_ts,
spin_unlock_irqrestore(&port->lock, flags);
- if (old_skb)
- kfree_skb(old_skb);
+ kfree_skb(old_skb);
schedule_delayed_work(&port->ts_work, 1);
}
@@ -694,35 +674,16 @@ static void ines_txtstamp_work(struct work_struct *work)
static bool is_sync_pdelay_resp(struct sk_buff *skb, int type)
{
- u8 *data = skb->data, *msgtype;
- unsigned int offset = 0;
-
- if (type & PTP_CLASS_VLAN)
- offset += VLAN_HLEN;
+ struct ptp_header *hdr;
+ u8 msgtype;
- switch (type & PTP_CLASS_PMASK) {
- case PTP_CLASS_IPV4:
- offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
- break;
- case PTP_CLASS_IPV6:
- offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
- break;
- case PTP_CLASS_L2:
- offset += ETH_HLEN;
- break;
- default:
- return 0;
- }
-
- if (type & PTP_CLASS_V1)
- offset += OFF_PTP_CONTROL;
-
- if (skb->len < offset + 1)
- return 0;
+ hdr = ptp_parse_header(skb, type);
+ if (!hdr)
+ return false;
- msgtype = data + offset;
+ msgtype = ptp_get_msgtype(hdr, type);
- switch ((*msgtype & 0xf)) {
+ switch ((msgtype & 0xf)) {
case SYNC:
case PDELAY_RESP:
return true;
diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c
index c09c16be0edf..beb5f74944cd 100644
--- a/drivers/ptp/ptp_qoriq.c
+++ b/drivers/ptp/ptp_qoriq.c
@@ -72,6 +72,10 @@ static void set_fipers(struct ptp_qoriq *ptp_qoriq)
set_alarm(ptp_qoriq);
ptp_qoriq->write(&regs->fiper_regs->tmr_fiper1, ptp_qoriq->tmr_fiper1);
ptp_qoriq->write(&regs->fiper_regs->tmr_fiper2, ptp_qoriq->tmr_fiper2);
+
+ if (ptp_qoriq->fiper3_support)
+ ptp_qoriq->write(&regs->fiper_regs->tmr_fiper3,
+ ptp_qoriq->tmr_fiper3);
}
int extts_clean_up(struct ptp_qoriq *ptp_qoriq, int index, bool update_event)
@@ -366,6 +370,7 @@ static u32 ptp_qoriq_nominal_freq(u32 clk_src)
* "fsl,tmr-add"
* "fsl,tmr-fiper1"
* "fsl,tmr-fiper2"
+ * "fsl,tmr-fiper3" (required only for DPAA2 and ENETC hardware)
* "fsl,max-adj"
*
* Return 0 if success
@@ -412,6 +417,7 @@ static int ptp_qoriq_auto_config(struct ptp_qoriq *ptp_qoriq,
ptp_qoriq->tmr_add = freq_comp;
ptp_qoriq->tmr_fiper1 = DEFAULT_FIPER1_PERIOD - ptp_qoriq->tclk_period;
ptp_qoriq->tmr_fiper2 = DEFAULT_FIPER2_PERIOD - ptp_qoriq->tclk_period;
+ ptp_qoriq->tmr_fiper3 = DEFAULT_FIPER3_PERIOD - ptp_qoriq->tclk_period;
/* max_adj = 1000000000 * (freq_ratio - 1.0) - 1
* freq_ratio = reference_clock_freq / nominal_freq
@@ -446,6 +452,10 @@ int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
else
ptp_qoriq->extts_fifo_support = false;
+ if (of_device_is_compatible(node, "fsl,dpaa2-ptp") ||
+ of_device_is_compatible(node, "fsl,enetc-ptp"))
+ ptp_qoriq->fiper3_support = true;
+
if (of_property_read_u32(node,
"fsl,tclk-period", &ptp_qoriq->tclk_period) ||
of_property_read_u32(node,
@@ -457,7 +467,10 @@ int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
of_property_read_u32(node,
"fsl,tmr-fiper2", &ptp_qoriq->tmr_fiper2) ||
of_property_read_u32(node,
- "fsl,max-adj", &ptp_qoriq->caps.max_adj)) {
+ "fsl,max-adj", &ptp_qoriq->caps.max_adj) ||
+ (ptp_qoriq->fiper3_support &&
+ of_property_read_u32(node, "fsl,tmr-fiper3",
+ &ptp_qoriq->tmr_fiper3))) {
pr_warn("device tree node missing required elements, try automatic configuration\n");
if (ptp_qoriq_auto_config(ptp_qoriq, node))
@@ -502,6 +515,11 @@ int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
ptp_qoriq->write(&regs->ctrl_regs->tmr_prsc, ptp_qoriq->tmr_prsc);
ptp_qoriq->write(&regs->fiper_regs->tmr_fiper1, ptp_qoriq->tmr_fiper1);
ptp_qoriq->write(&regs->fiper_regs->tmr_fiper2, ptp_qoriq->tmr_fiper2);
+
+ if (ptp_qoriq->fiper3_support)
+ ptp_qoriq->write(&regs->fiper_regs->tmr_fiper3,
+ ptp_qoriq->tmr_fiper3);
+
set_alarm(ptp_qoriq);
ptp_qoriq->write(&regs->ctrl_regs->tmr_ctrl,
tmr_ctrl|FIPERST|RTPE|TE|FRD);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index c314e9495c1b..38017c4a31e9 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -65,6 +65,8 @@ int chsc_error_from_response(int response)
case 0x0100:
case 0x0102:
return -ENOMEM;
+ case 0x0108: /* "HW limit exceeded" for the op 0x003d */
+ return -EUSERS;
default:
return -EIO;
}
@@ -1114,7 +1116,7 @@ int chsc_enable_facility(int operation_code)
return ret;
}
-int __init chsc_get_cssid(int idx)
+int __init chsc_get_cssid_iid(int idx, u8 *cssid, u8 *iid)
{
struct {
struct chsc_header request;
@@ -1125,7 +1127,8 @@ int __init chsc_get_cssid(int idx)
u32 reserved2[3];
struct {
u8 cssid;
- u32 : 24;
+ u8 iid;
+ u32 : 16;
} list[0];
} *sdcal_area;
int ret;
@@ -1151,8 +1154,10 @@ int __init chsc_get_cssid(int idx)
}
if ((addr_t) &sdcal_area->list[idx] <
- (addr_t) &sdcal_area->response + sdcal_area->response.length)
- ret = sdcal_area->list[idx].cssid;
+ (addr_t) &sdcal_area->response + sdcal_area->response.length) {
+ *cssid = sdcal_area->list[idx].cssid;
+ *iid = sdcal_area->list[idx].iid;
+ }
else
ret = -ENODEV;
exit:
@@ -1340,6 +1345,7 @@ EXPORT_SYMBOL_GPL(chsc_scm_info);
* chsc_pnso() - Perform Network-Subchannel Operation
* @schid: id of the subchannel on which PNSO is performed
* @pnso_area: request and response block for the operation
+ * @oc: Operation Code
* @resume_token: resume token for multiblock response
* @cnc: Boolean change-notification control
*
@@ -1347,10 +1353,8 @@ EXPORT_SYMBOL_GPL(chsc_scm_info);
*
* Returns 0 on success.
*/
-int chsc_pnso(struct subchannel_id schid,
- struct chsc_pnso_area *pnso_area,
- struct chsc_pnso_resume_token resume_token,
- int cnc)
+int chsc_pnso(struct subchannel_id schid, struct chsc_pnso_area *pnso_area,
+ u8 oc, struct chsc_pnso_resume_token resume_token, int cnc)
{
memset(pnso_area, 0, sizeof(*pnso_area));
pnso_area->request.length = 0x0030;
@@ -1359,7 +1363,7 @@ int chsc_pnso(struct subchannel_id schid,
pnso_area->ssid = schid.ssid;
pnso_area->sch = schid.sch_no;
pnso_area->cssid = schid.cssid;
- pnso_area->oc = 0; /* Store-network-bridging-information list */
+ pnso_area->oc = oc;
pnso_area->resume_token = resume_token;
pnso_area->n = (cnc != 0);
if (chsc(pnso_area))
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 7ecf7e4c402e..c2b83b68bc57 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -205,12 +205,10 @@ struct chsc_scm_info {
int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token);
-int chsc_pnso(struct subchannel_id schid,
- struct chsc_pnso_area *pnso_area,
- struct chsc_pnso_resume_token resume_token,
- int cnc);
+int chsc_pnso(struct subchannel_id schid, struct chsc_pnso_area *pnso_area,
+ u8 oc, struct chsc_pnso_resume_token resume_token, int cnc);
-int __init chsc_get_cssid(int idx);
+int __init chsc_get_cssid_iid(int idx, u8 *cssid, u8 *iid);
#ifdef CONFIG_SCM_BUS
int scm_update_information(void);
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index aca022239b33..1981eb62d329 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -854,7 +854,7 @@ css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
if (css_general_characteristics.mcss) {
css->global_pgid.pgid_high.ext_cssid.version = 0x80;
css->global_pgid.pgid_high.ext_cssid.cssid =
- (css->cssid < 0) ? 0 : css->cssid;
+ css->id_valid ? css->cssid : 0;
} else {
css->global_pgid.pgid_high.cpu_addr = stap();
}
@@ -877,7 +877,7 @@ static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a,
{
struct channel_subsystem *css = to_css(dev);
- if (css->cssid < 0)
+ if (!css->id_valid)
return -EINVAL;
return sprintf(buf, "%x\n", css->cssid);
@@ -975,7 +975,12 @@ static int __init setup_css(int nr)
css->device.dma_mask = &css->device.coherent_dma_mask;
mutex_init(&css->mutex);
- css->cssid = chsc_get_cssid(nr);
+ ret = chsc_get_cssid_iid(nr, &css->cssid, &css->iid);
+ if (!ret) {
+ css->id_valid = true;
+ pr_info("Partition identifier %01x.%01x\n", css->cssid,
+ css->iid);
+ }
css_generate_pgid(css, (u32) (get_tod_clock() >> 32));
ret = device_register(&css->device);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 8d832900a63d..3f322ea0f498 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -115,7 +115,9 @@ extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
void css_update_ssd_info(struct subchannel *sch);
struct channel_subsystem {
- int cssid;
+ u8 cssid;
+ u8 iid;
+ bool id_valid; /* cssid,iid */
struct channel_path *chps[__MAX_CHPID + 1];
struct device device;
struct pgid global_pgid;
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 963fcc9054c6..0fe7b2f2e7f5 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -714,6 +714,7 @@ EXPORT_SYMBOL_GPL(ccw_device_get_schid);
* ccw_device_pnso() - Perform Network-Subchannel Operation
* @cdev: device on which PNSO is performed
* @pnso_area: request and response block for the operation
+ * @oc: Operation Code
* @resume_token: resume token for multiblock response
* @cnc: Boolean change-notification control
*
@@ -722,17 +723,101 @@ EXPORT_SYMBOL_GPL(ccw_device_get_schid);
* Returns 0 on success.
*/
int ccw_device_pnso(struct ccw_device *cdev,
- struct chsc_pnso_area *pnso_area,
- struct chsc_pnso_resume_token resume_token,
- int cnc)
+ struct chsc_pnso_area *pnso_area, u8 oc,
+ struct chsc_pnso_resume_token resume_token, int cnc)
{
struct subchannel_id schid;
ccw_device_get_schid(cdev, &schid);
- return chsc_pnso(schid, pnso_area, resume_token, cnc);
+ return chsc_pnso(schid, pnso_area, oc, resume_token, cnc);
}
EXPORT_SYMBOL_GPL(ccw_device_pnso);
+/**
+ * ccw_device_get_cssid() - obtain Channel Subsystem ID
+ * @cdev: device to obtain the CSSID for
+ * @cssid: The resulting Channel Subsystem ID
+ */
+int ccw_device_get_cssid(struct ccw_device *cdev, u8 *cssid)
+{
+ struct device *sch_dev = cdev->dev.parent;
+ struct channel_subsystem *css = to_css(sch_dev->parent);
+
+ if (css->id_valid)
+ *cssid = css->cssid;
+ return css->id_valid ? 0 : -ENODEV;
+}
+EXPORT_SYMBOL_GPL(ccw_device_get_cssid);
+
+/**
+ * ccw_device_get_iid() - obtain MIF-image ID
+ * @cdev: device to obtain the MIF-image ID for
+ * @iid: The resulting MIF-image ID
+ */
+int ccw_device_get_iid(struct ccw_device *cdev, u8 *iid)
+{
+ struct device *sch_dev = cdev->dev.parent;
+ struct channel_subsystem *css = to_css(sch_dev->parent);
+
+ if (css->id_valid)
+ *iid = css->iid;
+ return css->id_valid ? 0 : -ENODEV;
+}
+EXPORT_SYMBOL_GPL(ccw_device_get_iid);
+
+/**
+ * ccw_device_get_chpid() - obtain Channel Path ID
+ * @cdev: device to obtain the Channel Path ID for
+ * @chp_idx: Index of the channel path
+ * @chpid: The resulting Channel Path ID
+ */
+int ccw_device_get_chpid(struct ccw_device *cdev, int chp_idx, u8 *chpid)
+{
+ struct subchannel *sch = to_subchannel(cdev->dev.parent);
+ int mask;
+
+ if ((chp_idx < 0) || (chp_idx > 7))
+ return -EINVAL;
+ mask = 0x80 >> chp_idx;
+ if (!(sch->schib.pmcw.pim & mask))
+ return -ENODEV;
+
+ *chpid = sch->schib.pmcw.chpid[chp_idx];
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ccw_device_get_chpid);
+
+/**
+ * ccw_device_get_chid() - obtain Channel ID associated with specified CHPID
+ * @cdev: device to obtain the Channel ID for
+ * @chp_idx: Index of the channel path
+ * @chid: The resulting Channel ID
+ */
+int ccw_device_get_chid(struct ccw_device *cdev, int chp_idx, u16 *chid)
+{
+ struct chp_id cssid_chpid;
+ struct channel_path *chp;
+ int rc;
+
+ chp_id_init(&cssid_chpid);
+ rc = ccw_device_get_chpid(cdev, chp_idx, &cssid_chpid.id);
+ if (rc)
+ return rc;
+ chp = chpid_to_chp(cssid_chpid);
+ if (!chp)
+ return -ENODEV;
+
+ mutex_lock(&chp->lock);
+ if (chp->desc_fmt1.flags & 0x10)
+ *chid = chp->desc_fmt1.chid;
+ else
+ rc = -ENODEV;
+ mutex_unlock(&chp->lock);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(ccw_device_get_chid);
+
/*
* Allocate zeroed dma coherent 31 bit addressable memory using
* the subchannels dma pool. Maximal size of allocation supported
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig
index 53120e68796e..bf236d474538 100644
--- a/drivers/s390/net/Kconfig
+++ b/drivers/s390/net/Kconfig
@@ -107,7 +107,7 @@ config QETH_OSX
config CCWGROUP
tristate
- default (LCS || CTCM || QETH)
+ default (LCS || CTCM || QETH || SMC)
config ISM
tristate "Support for ISM vPCI Adapter"
diff --git a/drivers/s390/net/ctcm_fsms.h b/drivers/s390/net/ctcm_fsms.h
index 225737295cb4..d98c486724d4 100644
--- a/drivers/s390/net/ctcm_fsms.h
+++ b/drivers/s390/net/ctcm_fsms.h
@@ -159,7 +159,6 @@ extern const char *ctc_ch_state_names[];
void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg);
void ctcm_purge_skb_queue(struct sk_buff_head *q);
-void fsm_action_nop(fsm_instance *fi, int event, void *arg);
/*
* ----- non-static actions for ctcm channel statemachine -----
diff --git a/drivers/s390/net/ctcm_mpc.h b/drivers/s390/net/ctcm_mpc.h
index 441d7b211f0f..da41b26f76d1 100644
--- a/drivers/s390/net/ctcm_mpc.h
+++ b/drivers/s390/net/ctcm_mpc.h
@@ -228,7 +228,6 @@ static inline void ctcmpc_dump32(char *buf, int len)
ctcmpc_dumpit(buf, 32);
}
-int ctcmpc_open(struct net_device *);
void ctcm_ccw_check_rc(struct channel *, int, char *);
void mpc_group_ready(unsigned long adev);
void mpc_channel_action(struct channel *ch, int direction, int action);
diff --git a/drivers/s390/net/ism.h b/drivers/s390/net/ism.h
index 1901e9c80ed8..38fe90c2597d 100644
--- a/drivers/s390/net/ism.h
+++ b/drivers/s390/net/ism.h
@@ -16,6 +16,7 @@
#define ISM_DMB_WORD_OFFSET 1
#define ISM_DMB_BIT_OFFSET (ISM_DMB_WORD_OFFSET * 32)
#define ISM_NR_DMBS 1920
+#define ISM_IDENT_MASK 0x00FFFF
#define ISM_REG_SBA 0x1
#define ISM_REG_IEQ 0x2
@@ -206,6 +207,12 @@ struct ism_dev {
#define ISM_CREATE_REQ(dmb, idx, sf, offset) \
((dmb) | (idx) << 24 | (sf) << 23 | (offset))
+struct ism_systemeid {
+ u8 seid_string[24];
+ u8 serial_number[4];
+ u8 type[4];
+};
+
static inline void __ism_read_cmd(struct ism_dev *ism, void *data,
unsigned long offset, unsigned long len)
{
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index 5fbe9eae84d1..fe96ca3c88a5 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -13,6 +13,8 @@
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/processor.h>
#include <net/smc.h>
#include <asm/debug.h>
@@ -387,6 +389,42 @@ static int ism_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx,
return 0;
}
+static struct ism_systemeid SYSTEM_EID = {
+ .seid_string = "IBM-SYSZ-IBMSEID00000000",
+ .serial_number = "0000",
+ .type = "0000",
+};
+
+static void ism_create_system_eid(void)
+{
+ struct cpuid id;
+ u16 ident_tail;
+ char tmp[5];
+
+ get_cpu_id(&id);
+ ident_tail = (u16)(id.ident & ISM_IDENT_MASK);
+ snprintf(tmp, 5, "%04X", ident_tail);
+ memcpy(&SYSTEM_EID.serial_number, tmp, 4);
+ snprintf(tmp, 5, "%04X", id.machine);
+ memcpy(&SYSTEM_EID.type, tmp, 4);
+}
+
+static void ism_get_system_eid(struct smcd_dev *smcd, u8 **eid)
+{
+ *eid = &SYSTEM_EID.seid_string[0];
+}
+
+static u16 ism_get_chid(struct smcd_dev *smcd)
+{
+ struct ism_dev *ismdev;
+
+ ismdev = (struct ism_dev *)smcd->priv;
+ if (!ismdev || !ismdev->pdev)
+ return 0;
+
+ return to_zpci(ismdev->pdev)->pchid;
+}
+
static void ism_handle_event(struct ism_dev *ism)
{
struct smcd_event *entry;
@@ -443,6 +481,8 @@ static const struct smcd_ops ism_ops = {
.reset_vlan_required = ism_reset_vlan_required,
.signal_event = ism_signal_ieq,
.move_data = ism_move,
+ .get_system_eid = ism_get_system_eid,
+ .get_chid = ism_get_chid,
};
static int ism_dev_init(struct ism_dev *ism)
@@ -471,6 +511,10 @@ static int ism_dev_init(struct ism_dev *ism)
if (ret)
goto unreg_ieq;
+ if (!ism_add_vlan_id(ism->smcd, ISM_RESERVED_VLANID))
+ /* hardware is V2 capable */
+ ism_create_system_eid();
+
ret = smcd_register_dev(ism->smcd);
if (ret)
goto unreg_ieq;
@@ -550,6 +594,9 @@ static void ism_dev_exit(struct ism_dev *ism)
struct pci_dev *pdev = ism->pdev;
smcd_unregister_dev(ism->smcd);
+ if (SYSTEM_EID.serial_number[0] != '0' ||
+ SYSTEM_EID.type[0] != '0')
+ ism_del_vlan_id(ism->smcd, ISM_RESERVED_VLANID);
unregister_ieq(ism);
unregister_sba(ism);
free_irq(pci_irq_vector(pdev, 0), ism);
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index ecfd6d152e86..f73b4756ed5e 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -177,8 +177,8 @@ struct qeth_vnicc_info {
/**
* some more defs
*/
-#define QETH_TX_TIMEOUT 100 * HZ
-#define QETH_RCD_TIMEOUT 60 * HZ
+#define QETH_TX_TIMEOUT (100 * HZ)
+#define QETH_RCD_TIMEOUT (60 * HZ)
#define QETH_RECLAIM_WORK_TIME HZ
#define QETH_MAX_PORTNO 15
@@ -195,8 +195,8 @@ struct qeth_vnicc_info {
#define QETH_IN_BUF_SIZE_DEFAULT 65536
#define QETH_IN_BUF_COUNT_DEFAULT 64
#define QETH_IN_BUF_COUNT_HSDEFAULT 128
-#define QETH_IN_BUF_COUNT_MIN 8
-#define QETH_IN_BUF_COUNT_MAX 128
+#define QETH_IN_BUF_COUNT_MIN 8U
+#define QETH_IN_BUF_COUNT_MAX 128U
#define QETH_MAX_BUFFER_ELEMENTS(card) ((card)->qdio.in_buf_size >> 12)
#define QETH_IN_BUF_REQUEUE_THRESHOLD(card) \
((card)->qdio.in_buf_pool.buf_count / 2)
@@ -278,6 +278,26 @@ struct qeth_hdr {
} hdr;
} __attribute__ ((packed));
+#define QETH_QIB_PQUE_ORDER_RR 0
+#define QETH_QIB_PQUE_UNITS_SBAL 2
+#define QETH_QIB_PQUE_PRIO_DEFAULT 4
+
+struct qeth_qib_parms {
+ char pcit_magic[4];
+ u32 pcit_a;
+ u32 pcit_b;
+ u32 pcit_c;
+ char blkt_magic[4];
+ u32 blkt_total;
+ u32 blkt_inter_packet;
+ u32 blkt_inter_packet_jumbo;
+ char pque_magic[4];
+ u8 pque_order;
+ u8 pque_units;
+ u16 reserved;
+ u32 pque_priority[4];
+};
+
/*TCP Segmentation Offload header*/
struct qeth_hdr_ext_tso {
__u16 hdr_tot_len;
@@ -420,12 +440,6 @@ struct qeth_qdio_out_buffer {
struct qeth_card;
-enum qeth_out_q_states {
- QETH_OUT_Q_UNLOCKED,
- QETH_OUT_Q_LOCKED,
- QETH_OUT_Q_LOCKED_FLUSH,
-};
-
#define QETH_CARD_STAT_ADD(_c, _stat, _val) ((_c)->stats._stat += (_val))
#define QETH_CARD_STAT_INC(_c, _stat) QETH_CARD_STAT_ADD(_c, _stat, 1)
@@ -486,12 +500,13 @@ struct qeth_qdio_out_q {
struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q];
struct qdio_outbuf_state *bufstates; /* convenience pointer */
struct qeth_out_q_stats stats;
+ spinlock_t lock;
+ unsigned int priority;
u8 next_buf_to_fill;
u8 max_elements;
u8 queue_no;
u8 do_pack;
struct qeth_card *card;
- atomic_t state;
/*
* number of buffers that are currently filled (PRIMED)
* -> these buffers are hardware-owned
@@ -544,7 +559,7 @@ struct qeth_qdio_info {
int in_buf_size;
/* output */
- int no_out_queues;
+ unsigned int no_out_queues;
struct qeth_qdio_out_q *out_qs[QETH_MAX_OUT_QUEUES];
struct qdio_outbuf_state *out_bufstates;
@@ -680,14 +695,27 @@ struct qeth_card_blkt {
int inter_packet_jumbo;
};
+enum qeth_pnso_mode {
+ QETH_PNSO_NONE,
+ QETH_PNSO_BRIDGEPORT,
+ QETH_PNSO_ADDR_INFO,
+};
+
#define QETH_BROADCAST_WITH_ECHO 0x01
#define QETH_BROADCAST_WITHOUT_ECHO 0x02
struct qeth_card_info {
unsigned short unit_addr2;
unsigned short cula;
- u8 chpid;
__u16 func_level;
char mcl_level[QETH_MCL_LENGTH + 1];
+ /* doubleword below corresponds to net_if_token */
+ u16 ddev_devno;
+ u8 cssid;
+ u8 iid;
+ u8 ssid;
+ u8 chpid;
+ u16 chid;
+ u8 ids_valid:1; /* cssid,iid,chid */
u8 dev_addr_is_registered:1;
u8 open_when_online:1;
u8 promisc_mode:1;
@@ -696,6 +724,7 @@ struct qeth_card_info {
/* no bitfield, we take a pointer on these two: */
u8 has_lp2lp_cso_v6;
u8 has_lp2lp_cso_v4;
+ enum qeth_pnso_mode pnso_mode;
enum qeth_card_types type;
enum qeth_link_types link_type;
int broadcast_capable;
@@ -745,7 +774,7 @@ struct qeth_discipline {
const struct device_type *devtype;
int (*setup) (struct ccwgroup_device *);
void (*remove) (struct ccwgroup_device *);
- int (*set_online)(struct qeth_card *card);
+ int (*set_online)(struct qeth_card *card, bool carrier_ok);
void (*set_offline)(struct qeth_card *card);
int (*do_ioctl)(struct net_device *dev, struct ifreq *rq, int cmd);
int (*control_event_handler)(struct qeth_card *card,
@@ -780,6 +809,9 @@ struct qeth_switch_info {
struct qeth_priv {
unsigned int rx_copybreak;
+ unsigned int tx_wanted_queues;
+ u32 brport_hw_features;
+ u32 brport_features;
};
#define QETH_NAPI_WEIGHT NAPI_POLL_WEIGHT
@@ -804,12 +836,16 @@ struct qeth_card {
struct workqueue_struct *event_wq;
struct workqueue_struct *cmd_wq;
wait_queue_head_t wait_q;
+
+ struct mutex ip_lock;
+ /* protected by ip_lock: */
DECLARE_HASHTABLE(ip_htable, 4);
+ struct qeth_ipato ipato;
+
DECLARE_HASHTABLE(local_addrs4, 4);
DECLARE_HASHTABLE(local_addrs6, 4);
spinlock_t local_addrs4_lock;
spinlock_t local_addrs6_lock;
- struct mutex ip_lock;
DECLARE_HASHTABLE(rx_mode_addrs, 4);
struct work_struct rx_mode_work;
struct work_struct kernel_thread_starter;
@@ -817,13 +853,12 @@ struct qeth_card {
unsigned long thread_start_mask;
unsigned long thread_allowed_mask;
unsigned long thread_running_mask;
- struct qeth_ipato ipato;
struct list_head cmd_waiter_list;
/* QDIO buffer handling */
struct qeth_qdio_info qdio;
int read_or_write_problem;
struct qeth_osn_info osn_info;
- struct qeth_discipline *discipline;
+ const struct qeth_discipline *discipline;
atomic_t force_alloc_skb;
struct service_level qeth_service_level;
struct qdio_ssqd_desc ssqd;
@@ -857,8 +892,20 @@ struct qeth_trap_id {
__u16 devno;
} __packed;
-/*some helper functions*/
-#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
+static inline bool qeth_uses_tx_prio_queueing(struct qeth_card *card)
+{
+ return card->qdio.do_prio_queueing != QETH_NO_PRIO_QUEUEING;
+}
+
+static inline unsigned int qeth_tx_actual_queues(struct qeth_card *card)
+{
+ struct qeth_priv *priv = netdev_priv(card->dev);
+
+ if (qeth_uses_tx_prio_queueing(card))
+ return min(card->dev->num_tx_queues, card->qdio.no_out_queues);
+
+ return min(priv->tx_wanted_queues, card->qdio.no_out_queues);
+}
static inline u16 qeth_iqd_translate_txq(struct net_device *dev, u16 txq)
{
@@ -1001,8 +1048,8 @@ static inline int qeth_send_simple_setassparms_v6(struct qeth_card *card,
int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb);
-extern struct qeth_discipline qeth_l2_discipline;
-extern struct qeth_discipline qeth_l3_discipline;
+extern const struct qeth_discipline qeth_l2_discipline;
+extern const struct qeth_discipline qeth_l3_discipline;
extern const struct ethtool_ops qeth_ethtool_ops;
extern const struct ethtool_ops qeth_osn_ethtool_ops;
extern const struct attribute_group *qeth_generic_attr_groups[];
@@ -1022,13 +1069,11 @@ extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS];
struct net_device *qeth_clone_netdev(struct net_device *orig);
struct qeth_card *qeth_get_card_by_busid(char *bus_id);
-void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int);
+void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
+ int clear_start_mask);
int qeth_threads_running(struct qeth_card *, unsigned long);
-int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok);
-int qeth_stop_channel(struct qeth_channel *channel);
int qeth_set_offline(struct qeth_card *card, bool resetting);
-void qeth_print_status_message(struct qeth_card *);
int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
int (*reply_cb)
(struct qeth_card *, struct qeth_reply *, unsigned long),
@@ -1052,12 +1097,7 @@ void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason);
void qeth_put_cmd(struct qeth_cmd_buffer *iob);
int qeth_schedule_recovery(struct qeth_card *card);
-void qeth_flush_local_addrs(struct qeth_card *card);
int qeth_poll(struct napi_struct *napi, int budget);
-void qeth_clear_ipacmd_list(struct qeth_card *);
-int qeth_qdio_clear_card(struct qeth_card *, int);
-void qeth_clear_working_pool_list(struct qeth_card *);
-void qeth_drain_output_queues(struct qeth_card *card);
void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable);
int qeth_setadpparms_change_macaddr(struct qeth_card *);
void qeth_tx_timeout(struct net_device *, unsigned int txqueue);
@@ -1081,9 +1121,7 @@ int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...);
int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
-void qeth_trace_features(struct qeth_card *);
int qeth_setassparms_cb(struct qeth_card *, struct qeth_reply *, unsigned long);
-int qeth_setup_netdev(struct qeth_card *card);
int qeth_set_features(struct net_device *, netdev_features_t);
void qeth_enable_hw_features(struct net_device *dev);
netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 6a7398251423..93c9b30ab17a 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -17,6 +17,7 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/log2.h>
+#include <linux/io.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/mii.h>
@@ -35,7 +36,6 @@
#include <asm/ebcdic.h>
#include <asm/chpid.h>
-#include <asm/io.h>
#include <asm/sysinfo.h>
#include <asm/diag.h>
#include <asm/cio.h>
@@ -201,7 +201,7 @@ int qeth_threads_running(struct qeth_card *card, unsigned long threads)
}
EXPORT_SYMBOL_GPL(qeth_threads_running);
-void qeth_clear_working_pool_list(struct qeth_card *card)
+static void qeth_clear_working_pool_list(struct qeth_card *card)
{
struct qeth_buffer_pool_entry *pool_entry, *tmp;
struct qeth_qdio_q *queue = card->qdio.in_q;
@@ -209,14 +209,12 @@ void qeth_clear_working_pool_list(struct qeth_card *card)
QETH_CARD_TEXT(card, 5, "clwrklst");
list_for_each_entry_safe(pool_entry, tmp,
- &card->qdio.in_buf_pool.entry_list, list){
- list_del(&pool_entry->list);
- }
+ &card->qdio.in_buf_pool.entry_list, list)
+ list_del(&pool_entry->list);
for (i = 0; i < ARRAY_SIZE(queue->bufs); i++)
queue->bufs[i].pool_entry = NULL;
}
-EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list);
static void qeth_free_pool_entry(struct qeth_buffer_pool_entry *entry)
{
@@ -482,6 +480,7 @@ static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
atomic_read(&c->state) ==
QETH_QDIO_BUF_HANDLED_DELAYED) {
struct qeth_qdio_out_buffer *f = c;
+
QETH_CARD_TEXT(f->q->card, 5, "fp");
QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f);
/* release here to avoid interleaving between
@@ -508,7 +507,6 @@ static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
}
}
-
static void qeth_qdio_handle_aob(struct qeth_card *card,
unsigned long phys_aob_addr)
{
@@ -658,12 +656,11 @@ static void qeth_flush_local_addrs6(struct qeth_card *card)
spin_unlock_irq(&card->local_addrs6_lock);
}
-void qeth_flush_local_addrs(struct qeth_card *card)
+static void qeth_flush_local_addrs(struct qeth_card *card)
{
qeth_flush_local_addrs4(card);
qeth_flush_local_addrs6(card);
}
-EXPORT_SYMBOL_GPL(qeth_flush_local_addrs);
static void qeth_add_local_addrs4(struct qeth_card *card,
struct qeth_ipacmd_local_addrs4 *cmd)
@@ -886,6 +883,7 @@ static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
{
const char *ipa_name;
int com = cmd->hdr.command;
+
ipa_name = qeth_get_ipa_cmd_name(com);
if (rc)
@@ -917,12 +915,12 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
dev_err(&card->gdev->dev,
"Interface %s is down because the adjacent port is no longer in reflective relay mode\n",
- QETH_CARD_IFNAME(card));
+ netdev_name(card->dev));
schedule_work(&card->close_dev_work);
} else {
dev_warn(&card->gdev->dev,
"The link for interface %s on CHPID 0x%X failed\n",
- QETH_CARD_IFNAME(card), card->info.chpid);
+ netdev_name(card->dev), card->info.chpid);
qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
netif_carrier_off(card->dev);
}
@@ -930,7 +928,7 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
case IPA_CMD_STARTLAN:
dev_info(&card->gdev->dev,
"The link for %s on CHPID 0x%X has been restored\n",
- QETH_CARD_IFNAME(card), card->info.chpid);
+ netdev_name(card->dev), card->info.chpid);
if (card->info.hwtrap)
card->info.hwtrap = 2;
qeth_schedule_recovery(card);
@@ -965,7 +963,7 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
}
}
-void qeth_clear_ipacmd_list(struct qeth_card *card)
+static void qeth_clear_ipacmd_list(struct qeth_card *card)
{
struct qeth_cmd_buffer *iob;
unsigned long flags;
@@ -977,7 +975,6 @@ void qeth_clear_ipacmd_list(struct qeth_card *card)
qeth_notify_cmd(iob, -ECANCELED);
spin_unlock_irqrestore(&card->lock, flags);
}
-EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
static int qeth_check_idx_response(struct qeth_card *card,
unsigned char *buffer)
@@ -1256,7 +1253,7 @@ static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
return 0;
}
QETH_CARD_TEXT(card, 2, "DGENCHK");
- return -EIO;
+ return -EIO;
}
return 0;
}
@@ -1502,7 +1499,7 @@ static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
}
}
-void qeth_drain_output_queues(struct qeth_card *card)
+static void qeth_drain_output_queues(struct qeth_card *card)
{
int i;
@@ -1513,25 +1510,13 @@ void qeth_drain_output_queues(struct qeth_card *card)
qeth_drain_output_queue(card->qdio.out_qs[i], false);
}
}
-EXPORT_SYMBOL_GPL(qeth_drain_output_queues);
-static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
+static void qeth_osa_set_output_queues(struct qeth_card *card, bool single)
{
unsigned int max = single ? 1 : card->dev->num_tx_queues;
- unsigned int count;
- int rc;
-
- count = IS_VM_NIC(card) ? min(max, card->dev->real_num_tx_queues) : max;
-
- rtnl_lock();
- rc = netif_set_real_num_tx_queues(card->dev, count);
- rtnl_unlock();
-
- if (rc)
- return rc;
if (card->qdio.no_out_queues == max)
- return 0;
+ return;
if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
qeth_free_qdio_queues(card);
@@ -1540,14 +1525,12 @@ static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
card->qdio.no_out_queues = max;
- return 0;
}
static int qeth_update_from_chp_desc(struct qeth_card *card)
{
struct ccw_device *ccwdev;
struct channel_path_desc_fmt0 *chp_dsc;
- int rc = 0;
QETH_CARD_TEXT(card, 2, "chp_desc");
@@ -1560,12 +1543,12 @@ static int qeth_update_from_chp_desc(struct qeth_card *card)
if (IS_OSD(card) || IS_OSX(card))
/* CHPP field bit 6 == 1 -> single queue */
- rc = qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
+ qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
kfree(chp_dsc);
QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues);
QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level);
- return rc;
+ return 0;
}
static void qeth_init_qdio_info(struct qeth_card *card)
@@ -1617,7 +1600,7 @@ static void qeth_start_kernel_thread(struct work_struct *work)
struct task_struct *ts;
struct qeth_card *card = container_of(work, struct qeth_card,
kernel_thread_starter);
- QETH_CARD_TEXT(card , 2, "strthrd");
+ QETH_CARD_TEXT(card, 2, "strthrd");
if (card->read.state != CH_STATE_UP &&
card->write.state != CH_STATE_UP)
@@ -1754,7 +1737,7 @@ static int qeth_halt_channel(struct qeth_card *card,
return 0;
}
-int qeth_stop_channel(struct qeth_channel *channel)
+static int qeth_stop_channel(struct qeth_channel *channel)
{
struct ccw_device *cdev = channel->ccwdev;
int rc;
@@ -1772,7 +1755,6 @@ int qeth_stop_channel(struct qeth_channel *channel)
return rc;
}
-EXPORT_SYMBOL_GPL(qeth_stop_channel);
static int qeth_start_channel(struct qeth_channel *channel)
{
@@ -1842,7 +1824,7 @@ static int qeth_clear_halt_card(struct qeth_card *card, int halt)
return qeth_clear_channels(card);
}
-int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
+static int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
{
int rc = 0;
@@ -1870,7 +1852,6 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
QETH_CARD_TEXT_(card, 3, "2err%d", rc);
return rc;
}
-EXPORT_SYMBOL_GPL(qeth_qdio_clear_card);
static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
{
@@ -2311,12 +2292,10 @@ static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
u16 addr = (card->info.cula << 8) + card->info.unit_addr2;
u8 port = ((u8)card->dev->dev_port) | 0x80;
struct ccw1 *ccw = __ccw_from_cmd(iob);
- struct ccw_dev_id dev_id;
qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE,
iob->data);
qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data);
- ccw_device_get_id(CARD_DDEV(card), &dev_id);
iob->finalize = qeth_idx_finalize_cmd;
port |= QETH_IDX_ACT_INVAL_FRAME;
@@ -2325,7 +2304,7 @@ static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
&card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
&card->info.func_level, 2);
- memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &dev_id.devno, 2);
+ memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &card->info.ddev_devno, 2);
memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2);
}
@@ -2599,7 +2578,6 @@ static int qeth_ulp_setup(struct qeth_card *card)
{
__u16 temp;
struct qeth_cmd_buffer *iob;
- struct ccw_dev_id dev_id;
QETH_CARD_TEXT(card, 2, "ulpsetup");
@@ -2614,8 +2592,7 @@ static int qeth_ulp_setup(struct qeth_card *card)
memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
&card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
- ccw_device_get_id(CARD_DDEV(card), &dev_id);
- memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2);
+ memcpy(QETH_ULP_SETUP_CUA(iob->data), &card->info.ddev_devno, 2);
temp = (card->info.cula << 8) + card->info.unit_addr2;
memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL);
@@ -2702,9 +2679,11 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
card->qdio.out_qs[i] = queue;
queue->card = card;
queue->queue_no = i;
+ spin_lock_init(&queue->lock);
timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES;
+ queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT;
/* give outbound qeth_qdio_buffers their qdio_buffers */
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
@@ -2765,30 +2744,44 @@ static void qeth_free_qdio_queues(struct qeth_card *card)
}
}
-static void qeth_create_qib_param_field(struct qeth_card *card,
- char *param_field)
+static void qeth_fill_qib_parms(struct qeth_card *card,
+ struct qeth_qib_parms *parms)
{
+ struct qeth_qdio_out_q *queue;
+ unsigned int i;
- param_field[0] = _ascebc['P'];
- param_field[1] = _ascebc['C'];
- param_field[2] = _ascebc['I'];
- param_field[3] = _ascebc['T'];
- *((unsigned int *) (&param_field[4])) = QETH_PCI_THRESHOLD_A(card);
- *((unsigned int *) (&param_field[8])) = QETH_PCI_THRESHOLD_B(card);
- *((unsigned int *) (&param_field[12])) = QETH_PCI_TIMER_VALUE(card);
-}
+ parms->pcit_magic[0] = 'P';
+ parms->pcit_magic[1] = 'C';
+ parms->pcit_magic[2] = 'I';
+ parms->pcit_magic[3] = 'T';
+ ASCEBC(parms->pcit_magic, sizeof(parms->pcit_magic));
+ parms->pcit_a = QETH_PCI_THRESHOLD_A(card);
+ parms->pcit_b = QETH_PCI_THRESHOLD_B(card);
+ parms->pcit_c = QETH_PCI_TIMER_VALUE(card);
+
+ parms->blkt_magic[0] = 'B';
+ parms->blkt_magic[1] = 'L';
+ parms->blkt_magic[2] = 'K';
+ parms->blkt_magic[3] = 'T';
+ ASCEBC(parms->blkt_magic, sizeof(parms->blkt_magic));
+ parms->blkt_total = card->info.blkt.time_total;
+ parms->blkt_inter_packet = card->info.blkt.inter_packet;
+ parms->blkt_inter_packet_jumbo = card->info.blkt.inter_packet_jumbo;
+
+ /* Prio-queueing implicitly uses the default priorities: */
+ if (qeth_uses_tx_prio_queueing(card) || card->qdio.no_out_queues == 1)
+ return;
-static void qeth_create_qib_param_field_blkt(struct qeth_card *card,
- char *param_field)
-{
- param_field[16] = _ascebc['B'];
- param_field[17] = _ascebc['L'];
- param_field[18] = _ascebc['K'];
- param_field[19] = _ascebc['T'];
- *((unsigned int *) (&param_field[20])) = card->info.blkt.time_total;
- *((unsigned int *) (&param_field[24])) = card->info.blkt.inter_packet;
- *((unsigned int *) (&param_field[28])) =
- card->info.blkt.inter_packet_jumbo;
+ parms->pque_magic[0] = 'P';
+ parms->pque_magic[1] = 'Q';
+ parms->pque_magic[2] = 'U';
+ parms->pque_magic[3] = 'E';
+ ASCEBC(parms->pque_magic, sizeof(parms->pque_magic));
+ parms->pque_order = QETH_QIB_PQUE_ORDER_RR;
+ parms->pque_units = QETH_QIB_PQUE_UNITS_SBAL;
+
+ qeth_for_each_output_queue(card, queue, i)
+ parms->pque_priority[i] = queue->priority;
}
static int qeth_qdio_activate(struct qeth_card *card)
@@ -2870,7 +2863,7 @@ static int qeth_mpc_initialize(struct qeth_card *card)
return 0;
}
-void qeth_print_status_message(struct qeth_card *card)
+static void qeth_print_status_message(struct qeth_card *card)
{
switch (card->info.type) {
case QETH_CARD_TYPE_OSD:
@@ -2911,7 +2904,6 @@ void qeth_print_status_message(struct qeth_card *card)
(card->info.mcl_level[0]) ? ")" : "",
qeth_get_cardname_short(card));
}
-EXPORT_SYMBOL_GPL(qeth_print_status_message);
static void qeth_initialize_working_pool_list(struct qeth_card *card)
{
@@ -3068,7 +3060,6 @@ static int qeth_init_qdio_queues(struct qeth_card *card)
queue->bulk_max = qeth_tx_select_bulk_max(card, queue);
atomic_set(&queue->used_buffers, 0);
atomic_set(&queue->set_pci_flags_count, 0);
- atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i));
}
return 0;
@@ -3425,7 +3416,6 @@ static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
}
free_page(info);
- return;
}
static int qeth_hw_trap_cb(struct qeth_card *card,
@@ -3549,8 +3539,9 @@ static unsigned int qeth_rx_refill_queue(struct qeth_card *card,
static void qeth_buffer_reclaim_work(struct work_struct *work)
{
- struct qeth_card *card = container_of(work, struct qeth_card,
- buffer_reclaim_work.work);
+ struct qeth_card *card = container_of(to_delayed_work(work),
+ struct qeth_card,
+ buffer_reclaim_work);
local_bh_disable();
napi_schedule(&card->napi);
@@ -3740,37 +3731,31 @@ static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
{
- int index;
- int flush_cnt = 0;
- int q_was_packing = 0;
-
/*
* check if weed have to switch to non-packing mode or if
* we have to get a pci flag out on the queue
*/
if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
!atomic_read(&queue->set_pci_flags_count)) {
- if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
- QETH_OUT_Q_UNLOCKED) {
- /*
- * If we get in here, there was no action in
- * do_send_packet. So, we check if there is a
- * packing buffer to be flushed here.
- */
- index = queue->next_buf_to_fill;
- q_was_packing = queue->do_pack;
- /* queue->do_pack may change */
- barrier();
- flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
- if (!flush_cnt &&
- !atomic_read(&queue->set_pci_flags_count))
- flush_cnt += qeth_prep_flush_pack_buffer(queue);
+ unsigned int index, flush_cnt;
+ bool q_was_packing;
+
+ spin_lock(&queue->lock);
+
+ index = queue->next_buf_to_fill;
+ q_was_packing = queue->do_pack;
+
+ flush_cnt = qeth_switch_to_nonpacking_if_needed(queue);
+ if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count))
+ flush_cnt = qeth_prep_flush_pack_buffer(queue);
+
+ if (flush_cnt) {
+ qeth_flush_buffers(queue, index, flush_cnt);
if (q_was_packing)
QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
- if (flush_cnt)
- qeth_flush_buffers(queue, index, flush_cnt);
- atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
}
+
+ spin_unlock(&queue->lock);
}
}
@@ -4282,29 +4267,22 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
unsigned int offset, unsigned int hd_len,
int elements_needed)
{
+ unsigned int start_index = queue->next_buf_to_fill;
struct qeth_qdio_out_buffer *buffer;
unsigned int next_element;
struct netdev_queue *txq;
bool stopped = false;
- int start_index;
int flush_count = 0;
int do_pack = 0;
- int tmp;
int rc = 0;
- /* spin until we get the queue ... */
- while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
- QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
- start_index = queue->next_buf_to_fill;
buffer = queue->bufs[queue->next_buf_to_fill];
/* Just a sanity check, the wake/stop logic should ensure that we always
* get a free buffer.
*/
- if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
- atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
+ if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
return -EBUSY;
- }
txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
@@ -4327,8 +4305,6 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
QETH_QDIO_BUF_EMPTY) {
qeth_flush_buffers(queue, start_index,
flush_count);
- atomic_set(&queue->state,
- QETH_OUT_Q_UNLOCKED);
rc = -EBUSY;
goto out;
}
@@ -4360,31 +4336,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
if (flush_count)
qeth_flush_buffers(queue, start_index, flush_count);
- else if (!atomic_read(&queue->set_pci_flags_count))
- atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
- /*
- * queue->state will go from LOCKED -> UNLOCKED or from
- * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
- * (switch packing state or flush buffer to get another pci flag out).
- * In that case we will enter this loop
- */
- while (atomic_dec_return(&queue->state)) {
- start_index = queue->next_buf_to_fill;
- /* check if we can go back to non-packing state */
- tmp = qeth_switch_to_nonpacking_if_needed(queue);
- /*
- * check if we need to flush a packing buffer to get a pci
- * flag out on the queue
- */
- if (!tmp && !atomic_read(&queue->set_pci_flags_count))
- tmp = qeth_prep_flush_pack_buffer(queue);
- if (tmp) {
- qeth_flush_buffers(queue, start_index, tmp);
- flush_count += tmp;
- }
- }
+
out:
- /* at this point the queue is UNLOCKED again */
if (do_pack)
QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
@@ -4458,8 +4411,10 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
} else {
/* TODO: drop skb_orphan() once TX completion is fast enough */
skb_orphan(skb);
+ spin_lock(&queue->lock);
rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
hd_len, elements);
+ spin_unlock(&queue->lock);
}
if (rc && !push_len)
@@ -4955,7 +4910,6 @@ int qeth_vm_request_mac(struct qeth_card *card)
{
struct diag26c_mac_resp *response;
struct diag26c_mac_req *request;
- struct ccw_dev_id id;
int rc;
QETH_CARD_TEXT(card, 2, "vmreqmac");
@@ -4967,11 +4921,10 @@ int qeth_vm_request_mac(struct qeth_card *card)
goto out;
}
- ccw_device_get_id(CARD_DDEV(card), &id);
request->resp_buf_len = sizeof(*response);
request->resp_version = DIAG26C_VERSION2;
request->op_code = DIAG26C_GET_MAC;
- request->devno = id.devno;
+ request->devno = card->info.ddev_devno;
QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
@@ -5044,7 +4997,6 @@ static void qeth_determine_capabilities(struct qeth_card *card)
card->options.cq = QETH_CQ_NOTAVAILABLE;
}
-
out_offline:
if (ddev_offline == 1)
qeth_stop_channel(channel);
@@ -5052,25 +5004,51 @@ out:
return;
}
+static void qeth_read_ccw_conf_data(struct qeth_card *card)
+{
+ struct qeth_card_info *info = &card->info;
+ struct ccw_device *cdev = CARD_DDEV(card);
+ struct ccw_dev_id dev_id;
+
+ QETH_CARD_TEXT(card, 2, "ccwconfd");
+ ccw_device_get_id(cdev, &dev_id);
+
+ info->ddev_devno = dev_id.devno;
+ info->ids_valid = !ccw_device_get_cssid(cdev, &info->cssid) &&
+ !ccw_device_get_iid(cdev, &info->iid) &&
+ !ccw_device_get_chid(cdev, 0, &info->chid);
+ info->ssid = dev_id.ssid;
+
+ dev_info(&card->gdev->dev, "CHID: %x CHPID: %x\n",
+ info->chid, info->chpid);
+
+ QETH_CARD_TEXT_(card, 3, "devn%x", info->ddev_devno);
+ QETH_CARD_TEXT_(card, 3, "cssid:%x", info->cssid);
+ QETH_CARD_TEXT_(card, 3, "iid:%x", info->iid);
+ QETH_CARD_TEXT_(card, 3, "ssid:%x", info->ssid);
+ QETH_CARD_TEXT_(card, 3, "chpid:%x", info->chpid);
+ QETH_CARD_TEXT_(card, 3, "chid:%x", info->chid);
+ QETH_CARD_TEXT_(card, 3, "idval%x", info->ids_valid);
+}
+
static int qeth_qdio_establish(struct qeth_card *card)
{
struct qdio_buffer **out_sbal_ptrs[QETH_MAX_OUT_QUEUES];
struct qdio_buffer **in_sbal_ptrs[QETH_MAX_IN_QUEUES];
+ struct qeth_qib_parms *qib_parms = NULL;
struct qdio_initialize init_data;
- char *qib_param_field;
unsigned int i;
int rc = 0;
QETH_CARD_TEXT(card, 2, "qdioest");
- qib_param_field = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL);
- if (!qib_param_field) {
- rc = -ENOMEM;
- goto out_free_nothing;
- }
+ if (!IS_IQD(card) && !IS_VM_NIC(card)) {
+ qib_parms = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL);
+ if (!qib_parms)
+ return -ENOMEM;
- qeth_create_qib_param_field(card, qib_param_field);
- qeth_create_qib_param_field_blkt(card, qib_param_field);
+ qeth_fill_qib_parms(card, qib_parms);
+ }
in_sbal_ptrs[0] = card->qdio.in_q->qdio_bufs;
if (card->options.cq == QETH_CQ_ENABLED)
@@ -5083,7 +5061,7 @@ static int qeth_qdio_establish(struct qeth_card *card)
init_data.q_format = IS_IQD(card) ? QDIO_IQDIO_QFMT :
QDIO_QETH_QFMT;
init_data.qib_param_field_format = 0;
- init_data.qib_param_field = qib_param_field;
+ init_data.qib_param_field = (void *)qib_parms;
init_data.no_input_qs = card->qdio.no_in_queues;
init_data.no_output_qs = card->qdio.no_out_queues;
init_data.input_handler = qeth_qdio_input_handler;
@@ -5120,9 +5098,9 @@ static int qeth_qdio_establish(struct qeth_card *card)
default:
break;
}
+
out:
- kfree(qib_param_field);
-out_free_nothing:
+ kfree(qib_parms);
return rc;
}
@@ -5138,7 +5116,7 @@ static void qeth_core_free_card(struct qeth_card *card)
kfree(card);
}
-void qeth_trace_features(struct qeth_card *card)
+static void qeth_trace_features(struct qeth_card *card)
{
QETH_CARD_TEXT(card, 2, "features");
QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4));
@@ -5147,7 +5125,6 @@ void qeth_trace_features(struct qeth_card *card)
QETH_CARD_HEX(card, 2, &card->info.diagass_support,
sizeof(card->info.diagass_support));
}
-EXPORT_SYMBOL_GPL(qeth_trace_features);
static struct ccw_device_id qeth_ids[] = {
{CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
@@ -5178,7 +5155,7 @@ static struct ccw_driver qeth_ccw_driver = {
.remove = ccwgroup_remove_ccwdev,
};
-int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
+static int qeth_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
{
int retries = 3;
int rc;
@@ -5220,6 +5197,7 @@ retriable:
}
qeth_determine_capabilities(card);
+ qeth_read_ccw_conf_data(card);
qeth_idx_init(card);
rc = qeth_idx_activate_read_channel(card);
@@ -5291,6 +5269,8 @@ retriable:
QETH_CARD_TEXT_(card, 2, "8err%d", rc);
}
+ qeth_trace_features(card);
+
if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
(card->info.hwtrap && qeth_hw_trap(card, QETH_DIAGS_TRAP_ARM)))
card->info.hwtrap = 0;
@@ -5316,21 +5296,53 @@ out:
CARD_DEVID(card), rc);
return rc;
}
-EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
static int qeth_set_online(struct qeth_card *card)
{
+ bool carrier_ok;
int rc;
mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
QETH_CARD_TEXT(card, 2, "setonlin");
- rc = card->discipline->set_online(card);
+ rc = qeth_hardsetup_card(card, &carrier_ok);
+ if (rc) {
+ QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
+ rc = -ENODEV;
+ goto err_hardsetup;
+ }
+
+ qeth_print_status_message(card);
+
+ if (card->dev->reg_state != NETREG_REGISTERED)
+ /* no need for locking / error handling at this early stage: */
+ qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card));
+
+ rc = card->discipline->set_online(card, carrier_ok);
+ if (rc)
+ goto err_online;
+
+ /* let user_space know that device is online */
+ kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
mutex_unlock(&card->conf_mutex);
mutex_unlock(&card->discipline_mutex);
+ return 0;
+
+err_online:
+err_hardsetup:
+ qeth_qdio_clear_card(card, 0);
+ qeth_clear_working_pool_list(card);
+ qeth_flush_local_addrs(card);
+ qeth_stop_channel(&card->data);
+ qeth_stop_channel(&card->write);
+ qeth_stop_channel(&card->read);
+ qdio_free(CARD_DDEV(card));
+
+ mutex_unlock(&card->conf_mutex);
+ mutex_unlock(&card->discipline_mutex);
return rc;
}
@@ -5347,6 +5359,9 @@ int qeth_set_offline(struct qeth_card *card, bool resetting)
card->info.hwtrap = 1;
}
+ /* cancel any stalled cmd that might block the rtnl: */
+ qeth_clear_ipacmd_list(card);
+
rtnl_lock();
card->info.open_when_online = card->dev->flags & IFF_UP;
dev_close(card->dev);
@@ -5354,8 +5369,16 @@ int qeth_set_offline(struct qeth_card *card, bool resetting)
netif_carrier_off(card->dev);
rtnl_unlock();
+ cancel_work_sync(&card->rx_mode_work);
+
card->discipline->set_offline(card);
+ qeth_qdio_clear_card(card, 0);
+ qeth_drain_output_queues(card);
+ qeth_clear_working_pool_list(card);
+ qeth_flush_local_addrs(card);
+ card->info.promisc_mode = 0;
+
rc = qeth_stop_channel(&card->data);
rc2 = qeth_stop_channel(&card->write);
rc3 = qeth_stop_channel(&card->read);
@@ -6025,6 +6048,7 @@ EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
static void qeth_unregister_dbf_views(void)
{
int x;
+
for (x = 0; x < QETH_DBF_INFOS; x++) {
debug_unregister(qeth_dbf[x].id);
qeth_dbf[x].id = NULL;
@@ -6220,6 +6244,7 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
priv = netdev_priv(dev);
priv->rx_copybreak = QETH_RX_COPYBREAK;
+ priv->tx_wanted_queues = IS_IQD(card) ? QETH_IQD_MIN_TXQ : 1;
dev->ml_priv = card;
dev->watchdog_timeo = QETH_TX_TIMEOUT;
@@ -6230,8 +6255,16 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
SET_NETDEV_DEV(dev, &card->gdev->dev);
netif_carrier_off(dev);
- dev->ethtool_ops = IS_OSN(card) ? &qeth_osn_ethtool_ops :
- &qeth_ethtool_ops;
+ if (IS_OSN(card)) {
+ dev->ethtool_ops = &qeth_osn_ethtool_ops;
+ } else {
+ dev->ethtool_ops = &qeth_ethtool_ops;
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ dev->hw_features |= NETIF_F_SG;
+ dev->vlan_features |= NETIF_F_SG;
+ if (IS_IQD(card))
+ dev->features |= NETIF_F_SG;
+ }
return dev;
}
@@ -6247,28 +6280,6 @@ struct net_device *qeth_clone_netdev(struct net_device *orig)
return clone;
}
-int qeth_setup_netdev(struct qeth_card *card)
-{
- struct net_device *dev = card->dev;
- unsigned int num_tx_queues;
-
- dev->priv_flags &= ~IFF_TX_SKB_SHARING;
- dev->hw_features |= NETIF_F_SG;
- dev->vlan_features |= NETIF_F_SG;
-
- if (IS_IQD(card)) {
- dev->features |= NETIF_F_SG;
- num_tx_queues = QETH_IQD_MIN_TXQ;
- } else if (IS_VM_NIC(card)) {
- num_tx_queues = 1;
- } else {
- num_tx_queues = dev->real_num_tx_queues;
- }
-
- return qeth_set_real_num_tx_queues(card, num_tx_queues);
-}
-EXPORT_SYMBOL_GPL(qeth_setup_netdev);
-
static int qeth_core_probe_device(struct ccwgroup_device *gdev)
{
struct qeth_card *card;
@@ -6401,6 +6412,7 @@ static int qeth_core_set_offline(struct ccwgroup_device *gdev)
static void qeth_core_shutdown(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+
qeth_set_allowed_threads(card, 0, 1);
if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
@@ -6939,6 +6951,7 @@ int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count)
return rc;
}
+EXPORT_SYMBOL_GPL(qeth_set_real_num_tx_queues);
u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
u8 cast_type, struct net_device *sb_dev)
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index b459def0fb26..6541bab96822 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -719,15 +719,8 @@ struct qeth_sbp_port_entry {
struct net_if_token token;
} __packed;
-struct qeth_sbp_query_ports {
- __u8 primary_bp_supported;
- __u8 secondary_bp_supported;
- __u8 num_entries;
- __u8 entry_length;
- struct qeth_sbp_port_entry entry[];
-} __packed;
-
-struct qeth_sbp_state_change {
+/* For IPA_SBP_QUERY_BRIDGE_PORTS, IPA_SBP_BRIDGE_PORT_STATE_CHANGE */
+struct qeth_sbp_port_data {
__u8 primary_bp_supported;
__u8 secondary_bp_supported;
__u8 num_entries;
@@ -741,8 +734,7 @@ struct qeth_ipacmd_setbridgeport {
union {
struct qeth_sbp_query_cmds_supp query_cmds_supp;
struct qeth_sbp_set_primary set_primary;
- struct qeth_sbp_query_ports query_ports;
- struct qeth_sbp_state_change state_change;
+ struct qeth_sbp_port_data port_data;
} data;
} __packed;
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 8def82336f53..4441b3393eaf 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -52,7 +52,7 @@ static ssize_t qeth_dev_if_name_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", QETH_CARD_IFNAME(card));
+ return sprintf(buf, "%s\n", netdev_name(card->dev));
}
static DEVICE_ATTR(if_name, 0444, qeth_dev_if_name_show, NULL);
@@ -103,21 +103,21 @@ static ssize_t qeth_dev_portno_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
- char *tmp;
unsigned int portno, limit;
int rc = 0;
+ rc = kstrtouint(buf, 16, &portno);
+ if (rc)
+ return rc;
+ if (portno > QETH_MAX_PORTNO)
+ return -EINVAL;
+
mutex_lock(&card->conf_mutex);
if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
goto out;
}
- portno = simple_strtoul(buf, &tmp, 16);
- if (portno > QETH_MAX_PORTNO) {
- rc = -EINVAL;
- goto out;
- }
limit = (card->ssqd.pcnt ? card->ssqd.pcnt - 1 : card->ssqd.pcnt);
if (portno > limit) {
rc = -EINVAL;
@@ -164,9 +164,11 @@ static ssize_t qeth_dev_prioqing_show(struct device *dev,
return sprintf(buf, "%s\n", "by skb-priority");
case QETH_PRIO_Q_ING_VLAN:
return sprintf(buf, "%s\n", "by VLAN headers");
- default:
+ case QETH_PRIO_Q_ING_FIXED:
return sprintf(buf, "always queue %i\n",
card->qdio.default_out_queue);
+ default:
+ return sprintf(buf, "disabled\n");
}
}
@@ -248,19 +250,19 @@ static ssize_t qeth_dev_bufcnt_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
unsigned int cnt;
- char *tmp;
int rc = 0;
+ rc = kstrtouint(buf, 10, &cnt);
+ if (rc)
+ return rc;
+
mutex_lock(&card->conf_mutex);
if (card->state != CARD_STATE_DOWN) {
rc = -EPERM;
goto out;
}
- cnt = simple_strtoul(buf, &tmp, 10);
- cnt = (cnt < QETH_IN_BUF_COUNT_MIN) ? QETH_IN_BUF_COUNT_MIN :
- ((cnt > QETH_IN_BUF_COUNT_MAX) ? QETH_IN_BUF_COUNT_MAX : cnt);
-
+ cnt = clamp(cnt, QETH_IN_BUF_COUNT_MIN, QETH_IN_BUF_COUNT_MAX);
rc = qeth_resize_buffer_pool(card, cnt);
out:
@@ -341,18 +343,15 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
struct net_device *ndev;
- char *tmp;
- int i, rc = 0;
enum qeth_discipline_id newdis;
+ unsigned int input;
+ int rc;
- mutex_lock(&card->discipline_mutex);
- if (card->state != CARD_STATE_DOWN) {
- rc = -EPERM;
- goto out;
- }
+ rc = kstrtouint(buf, 16, &input);
+ if (rc)
+ return rc;
- i = simple_strtoul(buf, &tmp, 16);
- switch (i) {
+ switch (input) {
case 0:
newdis = QETH_DISCIPLINE_LAYER3;
break;
@@ -360,7 +359,12 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
newdis = QETH_DISCIPLINE_LAYER2;
break;
default:
- rc = -EINVAL;
+ return -EINVAL;
+ }
+
+ mutex_lock(&card->discipline_mutex);
+ if (card->state != CARD_STATE_DOWN) {
+ rc = -EPERM;
goto out;
}
@@ -551,20 +555,21 @@ static DEVICE_ATTR(hw_trap, 0644, qeth_hw_trap_show,
static ssize_t qeth_dev_blkt_store(struct qeth_card *card,
const char *buf, size_t count, int *value, int max_value)
{
- char *tmp;
- int i, rc = 0;
+ unsigned int input;
+ int rc;
+
+ rc = kstrtouint(buf, 10, &input);
+ if (rc)
+ return rc;
+
+ if (input > max_value)
+ return -EINVAL;
mutex_lock(&card->conf_mutex);
- if (card->state != CARD_STATE_DOWN) {
+ if (card->state != CARD_STATE_DOWN)
rc = -EPERM;
- goto out;
- }
- i = simple_strtoul(buf, &tmp, 10);
- if (i <= max_value)
- *value = i;
else
- rc = -EINVAL;
-out:
+ *value = input;
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c
index f870c5322bfe..b5caa723326e 100644
--- a/drivers/s390/net/qeth_ethtool.c
+++ b/drivers/s390/net/qeth_ethtool.c
@@ -211,13 +211,19 @@ static void qeth_get_channels(struct net_device *dev,
static int qeth_set_channels(struct net_device *dev,
struct ethtool_channels *channels)
{
+ struct qeth_priv *priv = netdev_priv(dev);
struct qeth_card *card = dev->ml_priv;
+ int rc;
if (channels->rx_count == 0 || channels->tx_count == 0)
return -EINVAL;
if (channels->tx_count > card->qdio.no_out_queues)
return -EINVAL;
+ /* Prio-queueing needs all TX queues: */
+ if (qeth_uses_tx_prio_queueing(card))
+ return -EPERM;
+
if (IS_IQD(card)) {
if (channels->tx_count < QETH_IQD_MIN_TXQ)
return -EINVAL;
@@ -228,13 +234,13 @@ static int qeth_set_channels(struct net_device *dev,
if (netif_running(dev) &&
channels->tx_count < dev->real_num_tx_queues)
return -EPERM;
- } else {
- /* OSA still uses the legacy prio-queue mechanism: */
- if (!IS_VM_NIC(card))
- return -EOPNOTSUPP;
}
- return qeth_set_real_num_tx_queues(card, channels->tx_count);
+ rc = qeth_set_real_num_tx_queues(card, channels->tx_count);
+ if (!rc)
+ priv->tx_wanted_queues = channels->tx_count;
+
+ return rc;
}
static int qeth_get_ts_info(struct net_device *dev,
diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h
index adf25c9fd2b3..296d73d84326 100644
--- a/drivers/s390/net/qeth_l2.h
+++ b/drivers/s390/net/qeth_l2.h
@@ -23,7 +23,7 @@ int qeth_l2_vnicc_set_state(struct qeth_card *card, u32 vnicc, bool state);
int qeth_l2_vnicc_get_state(struct qeth_card *card, u32 vnicc, bool *state);
int qeth_l2_vnicc_set_timeout(struct qeth_card *card, u32 timeout);
int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout);
-bool qeth_l2_vnicc_is_in_use(struct qeth_card *card);
+bool qeth_bridgeport_allowed(struct qeth_card *card);
struct qeth_mac {
u8 mac_addr[ETH_ALEN];
@@ -31,4 +31,11 @@ struct qeth_mac {
struct hlist_node hnode;
};
+static inline bool qeth_bridgeport_is_in_use(struct qeth_card *card)
+{
+ return card->options.sbp.role ||
+ card->options.sbp.reflect_promisc ||
+ card->options.sbp.hostnotification;
+}
+
#endif /* __QETH_L2_H__ */
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 6384f7adba66..28f6dda95736 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -17,24 +17,17 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
#include <linux/list.h>
#include <linux/hash.h>
#include <linux/hashtable.h>
+#include <net/switchdev.h>
#include <asm/chsc.h>
+#include <asm/css_chars.h>
#include <asm/setup.h>
#include "qeth_core.h"
#include "qeth_l2.h"
-static void qeth_bridgeport_query_support(struct qeth_card *card);
-static void qeth_bridge_state_change(struct qeth_card *card,
- struct qeth_ipa_cmd *cmd);
-static void qeth_addr_change_event(struct qeth_card *card,
- struct qeth_ipa_cmd *cmd);
-static void qeth_l2_vnicc_set_defaults(struct qeth_card *card);
-static void qeth_l2_vnicc_init(struct qeth_card *card);
-static bool qeth_l2_vnicc_recover_timeout(struct qeth_card *card, u32 vnicc,
- u32 *timeout);
-
static int qeth_l2_setdelmac_makerc(struct qeth_card *card, u16 retcode)
{
int rc;
@@ -190,7 +183,7 @@ static void qeth_l2_fill_header(struct qeth_qdio_out_q *queue,
/* VSWITCH relies on the VLAN
* information to be present in
* the QDIO header */
- if (veth->h_vlan_proto == __constant_htons(ETH_P_8021Q)) {
+ if (veth->h_vlan_proto == htons(ETH_P_8021Q)) {
hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_VLAN;
hdr->hdr.l2.vlan_id = ntohs(veth->h_vlan_TCI);
}
@@ -273,26 +266,31 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
return qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
}
-static void qeth_l2_stop_card(struct qeth_card *card)
+static void qeth_l2_set_pnso_mode(struct qeth_card *card,
+ enum qeth_pnso_mode mode)
{
- QETH_CARD_TEXT(card, 2, "stopcard");
+ spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
+ WRITE_ONCE(card->info.pnso_mode, mode);
+ spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
- qeth_set_allowed_threads(card, 0, 1);
+ if (mode == QETH_PNSO_NONE)
+ drain_workqueue(card->event_wq);
+}
- cancel_work_sync(&card->rx_mode_work);
- qeth_l2_drain_rx_mode_cache(card);
+static void qeth_l2_dev2br_fdb_flush(struct qeth_card *card)
+{
+ struct switchdev_notifier_fdb_info info;
- if (card->state == CARD_STATE_SOFTSETUP) {
- qeth_clear_ipacmd_list(card);
- card->state = CARD_STATE_DOWN;
- }
+ QETH_CARD_TEXT(card, 2, "fdbflush");
- qeth_qdio_clear_card(card, 0);
- qeth_drain_output_queues(card);
- qeth_clear_working_pool_list(card);
- flush_workqueue(card->event_wq);
- qeth_flush_local_addrs(card);
- card->info.promisc_mode = 0;
+ info.addr = NULL;
+ /* flush all VLANs: */
+ info.vid = 0;
+ info.added_by_user = false;
+ info.offloaded = true;
+
+ call_switchdev_notifiers(SWITCHDEV_FDB_FLUSH_TO_BRIDGE,
+ card->dev, &info.info, NULL);
}
static int qeth_l2_request_initial_mac(struct qeth_card *card)
@@ -573,52 +571,10 @@ static u16 qeth_l2_select_queue(struct net_device *dev, struct sk_buff *skb,
return qeth_iqd_select_queue(dev, skb,
qeth_get_ether_cast_type(skb),
sb_dev);
+ if (qeth_uses_tx_prio_queueing(card))
+ return qeth_get_priority_queue(card, skb);
- return IS_VM_NIC(card) ? netdev_pick_tx(dev, skb, sb_dev) :
- qeth_get_priority_queue(card, skb);
-}
-
-static const struct device_type qeth_l2_devtype = {
- .name = "qeth_layer2",
- .groups = qeth_l2_attr_groups,
-};
-
-static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
-{
- struct qeth_card *card = dev_get_drvdata(&gdev->dev);
- int rc;
-
- if (IS_OSN(card))
- dev_notice(&gdev->dev, "OSN support will be dropped in 2021\n");
-
- qeth_l2_vnicc_set_defaults(card);
- mutex_init(&card->sbp_lock);
-
- if (gdev->dev.type == &qeth_generic_devtype) {
- rc = qeth_l2_create_device_attributes(&gdev->dev);
- if (rc)
- return rc;
- }
-
- INIT_WORK(&card->rx_mode_work, qeth_l2_rx_mode_work);
- return 0;
-}
-
-static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
-{
- struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
-
- if (cgdev->dev.type == &qeth_generic_devtype)
- qeth_l2_remove_device_attributes(&cgdev->dev);
- qeth_set_allowed_threads(card, 0, 1);
- wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
-
- if (cgdev->state == CCWGROUP_ONLINE)
- qeth_set_offline(card, false);
-
- cancel_work_sync(&card->close_dev_work);
- if (card->dev->reg_state == NETREG_REGISTERED)
- unregister_netdev(card->dev);
+ return netdev_pick_tx(dev, skb, sb_dev);
}
static void qeth_l2_set_rx_mode(struct net_device *dev)
@@ -631,6 +587,7 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
/**
* qeth_l2_pnso() - perform network subchannel operation
* @card: qeth_card structure pointer
+ * @oc: Operation Code
* @cnc: Boolean Change-Notification Control
* @cb: Callback function will be executed for each element
* of the address list
@@ -641,7 +598,7 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
* control" is set, further changes in the address list will be reported
* via the IPA command.
*/
-static int qeth_l2_pnso(struct qeth_card *card, int cnc,
+static int qeth_l2_pnso(struct qeth_card *card, u8 oc, int cnc,
void (*cb)(void *priv, struct chsc_pnso_naid_l2 *entry),
void *priv)
{
@@ -652,13 +609,14 @@ static int qeth_l2_pnso(struct qeth_card *card, int cnc,
int i, size, elems;
int rc;
- QETH_CARD_TEXT(card, 2, "PNSO");
rr = (struct chsc_pnso_area *)get_zeroed_page(GFP_KERNEL);
if (rr == NULL)
return -ENOMEM;
do {
+ QETH_CARD_TEXT(card, 2, "PNSO");
/* on the first iteration, naihdr.resume_token will be zero */
- rc = ccw_device_pnso(ddev, rr, rr->naihdr.resume_token, cnc);
+ rc = ccw_device_pnso(ddev, rr, oc, rr->naihdr.resume_token,
+ cnc);
if (rc)
continue;
if (cb == NULL)
@@ -694,6 +652,218 @@ static int qeth_l2_pnso(struct qeth_card *card, int cnc,
return rc;
}
+static bool qeth_is_my_net_if_token(struct qeth_card *card,
+ struct net_if_token *token)
+{
+ return ((card->info.ddev_devno == token->devnum) &&
+ (card->info.cssid == token->cssid) &&
+ (card->info.iid == token->iid) &&
+ (card->info.ssid == token->ssid) &&
+ (card->info.chpid == token->chpid) &&
+ (card->info.chid == token->chid));
+}
+
+/**
+ * qeth_l2_dev2br_fdb_notify() - update fdb of master bridge
+ * @card: qeth_card structure pointer
+ * @code: event bitmask: high order bit 0x80 set to
+ * 1 - removal of an object
+ * 0 - addition of an object
+ * Object type(s):
+ * 0x01 - VLAN, 0x02 - MAC, 0x03 - VLAN and MAC
+ * @token: "network token" structure identifying 'physical' location
+ * of the target
+ * @addr_lnid: structure with MAC address and VLAN ID of the target
+ */
+static void qeth_l2_dev2br_fdb_notify(struct qeth_card *card, u8 code,
+ struct net_if_token *token,
+ struct mac_addr_lnid *addr_lnid)
+{
+ struct switchdev_notifier_fdb_info info;
+ u8 ntfy_mac[ETH_ALEN];
+
+ ether_addr_copy(ntfy_mac, addr_lnid->mac);
+ /* Ignore VLAN only changes */
+ if (!(code & IPA_ADDR_CHANGE_CODE_MACADDR))
+ return;
+ /* Ignore mcast entries */
+ if (is_multicast_ether_addr(ntfy_mac))
+ return;
+ /* Ignore my own addresses */
+ if (qeth_is_my_net_if_token(card, token))
+ return;
+
+ info.addr = ntfy_mac;
+ /* don't report VLAN IDs */
+ info.vid = 0;
+ info.added_by_user = false;
+ info.offloaded = true;
+
+ if (code & IPA_ADDR_CHANGE_CODE_REMOVAL) {
+ call_switchdev_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
+ card->dev, &info.info, NULL);
+ QETH_CARD_TEXT(card, 4, "andelmac");
+ QETH_CARD_TEXT_(card, 4,
+ "mc%012lx", ether_addr_to_u64(ntfy_mac));
+ } else {
+ call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
+ card->dev, &info.info, NULL);
+ QETH_CARD_TEXT(card, 4, "anaddmac");
+ QETH_CARD_TEXT_(card, 4,
+ "mc%012lx", ether_addr_to_u64(ntfy_mac));
+ }
+}
+
+static void qeth_l2_dev2br_an_set_cb(void *priv,
+ struct chsc_pnso_naid_l2 *entry)
+{
+ u8 code = IPA_ADDR_CHANGE_CODE_MACADDR;
+ struct qeth_card *card = priv;
+
+ if (entry->addr_lnid.lnid < VLAN_N_VID)
+ code |= IPA_ADDR_CHANGE_CODE_VLANID;
+ qeth_l2_dev2br_fdb_notify(card, code,
+ (struct net_if_token *)&entry->nit,
+ (struct mac_addr_lnid *)&entry->addr_lnid);
+}
+
+/**
+ * qeth_l2_dev2br_an_set() -
+ * Enable or disable 'dev to bridge network address notification'
+ * @card: qeth_card structure pointer
+ * @enable: Enable or disable 'dev to bridge network address notification'
+ *
+ * Returns negative errno-compatible error indication or 0 on success.
+ *
+ * On enable, emits a series of address notifications for all
+ * currently registered hosts.
+ *
+ * Must be called under rtnl_lock
+ */
+static int qeth_l2_dev2br_an_set(struct qeth_card *card, bool enable)
+{
+ int rc;
+
+ if (enable) {
+ QETH_CARD_TEXT(card, 2, "anseton");
+ rc = qeth_l2_pnso(card, PNSO_OC_NET_ADDR_INFO, 1,
+ qeth_l2_dev2br_an_set_cb, card);
+ if (rc == -EAGAIN)
+ /* address notification enabled, but inconsistent
+ * addresses reported -> disable address notification
+ */
+ qeth_l2_pnso(card, PNSO_OC_NET_ADDR_INFO, 0,
+ NULL, NULL);
+ } else {
+ QETH_CARD_TEXT(card, 2, "ansetoff");
+ rc = qeth_l2_pnso(card, PNSO_OC_NET_ADDR_INFO, 0, NULL, NULL);
+ }
+
+ return rc;
+}
+
+static int qeth_l2_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev, u32 filter_mask,
+ int nlflags)
+{
+ struct qeth_priv *priv = netdev_priv(dev);
+ struct qeth_card *card = dev->ml_priv;
+ u16 mode = BRIDGE_MODE_UNDEF;
+
+ /* Do not even show qeth devs that cannot do bridge_setlink */
+ if (!priv->brport_hw_features || !netif_device_present(dev) ||
+ qeth_bridgeport_is_in_use(card))
+ return -EOPNOTSUPP;
+
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
+ mode, priv->brport_features,
+ priv->brport_hw_features,
+ nlflags, filter_mask, NULL);
+}
+
+static const struct nla_policy qeth_brport_policy[IFLA_BRPORT_MAX + 1] = {
+ [IFLA_BRPORT_LEARNING_SYNC] = { .type = NLA_U8 },
+};
+
+/**
+ * qeth_l2_bridge_setlink() - set bridgeport attributes
+ * @dev: netdevice
+ * @nlh: netlink message header
+ * @flags: bridge flags (here: BRIDGE_FLAGS_SELF)
+ * @extack: extended ACK report struct
+ *
+ * Called under rtnl_lock
+ */
+static int qeth_l2_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
+ u16 flags, struct netlink_ext_ack *extack)
+{
+ struct qeth_priv *priv = netdev_priv(dev);
+ struct nlattr *bp_tb[IFLA_BRPORT_MAX + 1];
+ struct qeth_card *card = dev->ml_priv;
+ struct nlattr *attr, *nested_attr;
+ bool enable, has_protinfo = false;
+ int rem1, rem2;
+ int rc;
+
+ if (!netif_device_present(dev))
+ return -ENODEV;
+ if (!(priv->brport_hw_features))
+ return -EOPNOTSUPP;
+
+ nlmsg_for_each_attr(attr, nlh, sizeof(struct ifinfomsg), rem1) {
+ if (nla_type(attr) == IFLA_PROTINFO) {
+ rc = nla_parse_nested(bp_tb, IFLA_BRPORT_MAX, attr,
+ qeth_brport_policy, extack);
+ if (rc)
+ return rc;
+ has_protinfo = true;
+ } else if (nla_type(attr) == IFLA_AF_SPEC) {
+ nla_for_each_nested(nested_attr, attr, rem2) {
+ if (nla_type(nested_attr) == IFLA_BRIDGE_FLAGS)
+ continue;
+ NL_SET_ERR_MSG_ATTR(extack, nested_attr,
+ "Unsupported attribute");
+ return -EINVAL;
+ }
+ } else {
+ NL_SET_ERR_MSG_ATTR(extack, attr, "Unsupported attribute");
+ return -EINVAL;
+ }
+ }
+ if (!has_protinfo)
+ return 0;
+ if (!bp_tb[IFLA_BRPORT_LEARNING_SYNC])
+ return -EINVAL;
+ enable = !!nla_get_u8(bp_tb[IFLA_BRPORT_LEARNING_SYNC]);
+
+ if (enable == !!(priv->brport_features & BR_LEARNING_SYNC))
+ return 0;
+
+ mutex_lock(&card->sbp_lock);
+ /* do not change anything if BridgePort is enabled */
+ if (qeth_bridgeport_is_in_use(card)) {
+ NL_SET_ERR_MSG(extack, "n/a (BridgePort)");
+ rc = -EBUSY;
+ } else if (enable) {
+ qeth_l2_set_pnso_mode(card, QETH_PNSO_ADDR_INFO);
+ rc = qeth_l2_dev2br_an_set(card, true);
+ if (rc)
+ qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
+ else
+ priv->brport_features |= BR_LEARNING_SYNC;
+ } else {
+ rc = qeth_l2_dev2br_an_set(card, false);
+ if (!rc) {
+ qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
+ priv->brport_features ^= BR_LEARNING_SYNC;
+ qeth_l2_dev2br_fdb_flush(card);
+ }
+ }
+ mutex_unlock(&card->sbp_lock);
+
+ return rc;
+}
+
static const struct net_device_ops qeth_l2_netdev_ops = {
.ndo_open = qeth_open,
.ndo_stop = qeth_stop,
@@ -707,9 +877,11 @@ static const struct net_device_ops qeth_l2_netdev_ops = {
.ndo_set_mac_address = qeth_l2_set_mac_address,
.ndo_vlan_rx_add_vid = qeth_l2_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qeth_l2_vlan_rx_kill_vid,
- .ndo_tx_timeout = qeth_tx_timeout,
+ .ndo_tx_timeout = qeth_tx_timeout,
.ndo_fix_features = qeth_fix_features,
- .ndo_set_features = qeth_set_features
+ .ndo_set_features = qeth_set_features,
+ .ndo_bridge_getlink = qeth_l2_bridge_getlink,
+ .ndo_bridge_setlink = qeth_l2_bridge_setlink,
};
static const struct net_device_ops qeth_osn_netdev_ops = {
@@ -723,18 +895,12 @@ static const struct net_device_ops qeth_osn_netdev_ops = {
static int qeth_l2_setup_netdev(struct qeth_card *card)
{
- int rc;
-
if (IS_OSN(card)) {
card->dev->netdev_ops = &qeth_osn_netdev_ops;
card->dev->flags |= IFF_NOARP;
goto add_napi;
}
- rc = qeth_setup_netdev(card);
- if (rc)
- return rc;
-
card->dev->needed_headroom = sizeof(struct qeth_hdr);
card->dev->netdev_ops = &qeth_l2_netdev_ops;
card->dev->priv_flags |= IFF_UNICAST_FLT;
@@ -810,135 +976,81 @@ static void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
if (card->options.sbp.hostnotification) {
if (qeth_bridgeport_an_set(card, 1))
card->options.sbp.hostnotification = 0;
- } else {
- qeth_bridgeport_an_set(card, 0);
}
}
-static int qeth_l2_set_online(struct qeth_card *card)
+/**
+ * qeth_l2_detect_dev2br_support() -
+ * Detect whether this card supports 'dev to bridge fdb network address
+ * change notification' and thus can support the learning_sync bridgeport
+ * attribute
+ * @card: qeth_card structure pointer
+ *
+ * This is a destructive test and must be called before dev2br or
+ * bridgeport address notification is enabled!
+ */
+static void qeth_l2_detect_dev2br_support(struct qeth_card *card)
{
- struct ccwgroup_device *gdev = card->gdev;
- struct net_device *dev = card->dev;
- int rc = 0;
- bool carrier_ok;
-
- rc = qeth_core_hardsetup_card(card, &carrier_ok);
- if (rc) {
- QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
- rc = -ENODEV;
- goto out_remove;
- }
-
- mutex_lock(&card->sbp_lock);
- qeth_bridgeport_query_support(card);
- if (card->options.sbp.supported_funcs) {
- qeth_l2_setup_bridgeport_attrs(card);
- dev_info(&card->gdev->dev,
- "The device represents a Bridge Capable Port\n");
- }
- mutex_unlock(&card->sbp_lock);
-
- qeth_l2_register_dev_addr(card);
-
- /* for the rx_bcast characteristic, init VNICC after setmac */
- qeth_l2_vnicc_init(card);
-
- qeth_trace_features(card);
- qeth_l2_trace_features(card);
-
- qeth_print_status_message(card);
-
- /* softsetup */
- QETH_CARD_TEXT(card, 2, "softsetp");
-
- card->state = CARD_STATE_SOFTSETUP;
-
- qeth_set_allowed_threads(card, 0xffffffff, 0);
+ struct qeth_priv *priv = netdev_priv(card->dev);
+ bool dev2br_supported;
+ int rc;
- if (dev->reg_state != NETREG_REGISTERED) {
- rc = qeth_l2_setup_netdev(card);
- if (rc)
- goto out_remove;
+ QETH_CARD_TEXT(card, 2, "d2brsup");
+ if (!IS_IQD(card))
+ return;
- if (carrier_ok)
- netif_carrier_on(dev);
+ /* dev2br requires valid cssid,iid,chid */
+ if (!card->info.ids_valid) {
+ dev2br_supported = false;
+ } else if (css_general_characteristics.enarf) {
+ dev2br_supported = true;
} else {
- rtnl_lock();
- if (carrier_ok)
- netif_carrier_on(dev);
- else
- netif_carrier_off(dev);
-
- netif_device_attach(dev);
- qeth_enable_hw_features(dev);
-
- if (card->info.open_when_online) {
- card->info.open_when_online = 0;
- dev_open(dev, NULL);
- }
- rtnl_unlock();
+ /* Old machines don't have the feature bit:
+ * Probe by testing whether a disable succeeds
+ */
+ rc = qeth_l2_pnso(card, PNSO_OC_NET_ADDR_INFO, 0, NULL, NULL);
+ dev2br_supported = !rc;
}
- /* let user_space know that device is online */
- kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
- return 0;
+ QETH_CARD_TEXT_(card, 2, "D2Bsup%02x", dev2br_supported);
-out_remove:
- qeth_l2_stop_card(card);
- qeth_stop_channel(&card->data);
- qeth_stop_channel(&card->write);
- qeth_stop_channel(&card->read);
- qdio_free(CARD_DDEV(card));
- return rc;
-}
-
-static void qeth_l2_set_offline(struct qeth_card *card)
-{
- qeth_l2_stop_card(card);
-}
-
-static int __init qeth_l2_init(void)
-{
- pr_info("register layer 2 discipline\n");
- return 0;
+ if (dev2br_supported)
+ priv->brport_hw_features |= BR_LEARNING_SYNC;
+ else
+ priv->brport_hw_features &= ~BR_LEARNING_SYNC;
}
-static void __exit qeth_l2_exit(void)
+static void qeth_l2_enable_brport_features(struct qeth_card *card)
{
- pr_info("unregister layer 2 discipline\n");
-}
+ struct qeth_priv *priv = netdev_priv(card->dev);
+ int rc;
-/* Returns zero if the command is successfully "consumed" */
-static int qeth_l2_control_event(struct qeth_card *card,
- struct qeth_ipa_cmd *cmd)
-{
- switch (cmd->hdr.command) {
- case IPA_CMD_SETBRIDGEPORT_OSA:
- case IPA_CMD_SETBRIDGEPORT_IQD:
- if (cmd->data.sbp.hdr.command_code ==
- IPA_SBP_BRIDGE_PORT_STATE_CHANGE) {
- qeth_bridge_state_change(card, cmd);
- return 0;
- } else
- return 1;
- case IPA_CMD_ADDRESS_CHANGE_NOTIF:
- qeth_addr_change_event(card, cmd);
- return 0;
- default:
- return 1;
+ if (priv->brport_features & BR_LEARNING_SYNC) {
+ if (priv->brport_hw_features & BR_LEARNING_SYNC) {
+ qeth_l2_set_pnso_mode(card, QETH_PNSO_ADDR_INFO);
+ rc = qeth_l2_dev2br_an_set(card, true);
+ if (rc == -EAGAIN) {
+ /* Recoverable error, retry once */
+ qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
+ qeth_l2_dev2br_fdb_flush(card);
+ qeth_l2_set_pnso_mode(card, QETH_PNSO_ADDR_INFO);
+ rc = qeth_l2_dev2br_an_set(card, true);
+ }
+ if (rc) {
+ netdev_err(card->dev,
+ "failed to enable bridge learning_sync: %d\n",
+ rc);
+ qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
+ qeth_l2_dev2br_fdb_flush(card);
+ priv->brport_features ^= BR_LEARNING_SYNC;
+ }
+ } else {
+ dev_warn(&card->gdev->dev,
+ "bridge learning_sync not supported\n");
+ priv->brport_features ^= BR_LEARNING_SYNC;
+ }
}
}
-struct qeth_discipline qeth_l2_discipline = {
- .devtype = &qeth_l2_devtype,
- .setup = qeth_l2_probe_device,
- .remove = qeth_l2_remove_device,
- .set_online = qeth_l2_set_online,
- .set_offline = qeth_l2_set_offline,
- .do_ioctl = NULL,
- .control_event_handler = qeth_l2_control_event,
-};
-EXPORT_SYMBOL_GPL(qeth_l2_discipline);
-
#ifdef CONFIG_QETH_OSN
static void qeth_osn_assist_cb(struct qeth_card *card,
struct qeth_cmd_buffer *iob,
@@ -1013,7 +1125,6 @@ void qeth_osn_deregister(struct net_device *dev)
QETH_CARD_TEXT(card, 2, "osndereg");
card->osn_info.assist_cb = NULL;
card->osn_info.data_cb = NULL;
- return;
}
EXPORT_SYMBOL(qeth_osn_deregister);
#endif
@@ -1090,15 +1201,14 @@ static void qeth_bridge_emit_host_event(struct qeth_card *card,
struct qeth_bridge_state_data {
struct work_struct worker;
struct qeth_card *card;
- struct qeth_sbp_state_change qports;
+ u8 role;
+ u8 state;
};
static void qeth_bridge_state_change_worker(struct work_struct *work)
{
struct qeth_bridge_state_data *data =
container_of(work, struct qeth_bridge_state_data, worker);
- /* We are only interested in the first entry - local port */
- struct qeth_sbp_port_entry *entry = &data->qports.entry[0];
char env_locrem[32];
char env_role[32];
char env_state[32];
@@ -1109,22 +1219,16 @@ static void qeth_bridge_state_change_worker(struct work_struct *work)
NULL
};
- /* Role should not change by itself, but if it did, */
- /* information from the hardware is authoritative. */
- mutex_lock(&data->card->sbp_lock);
- data->card->options.sbp.role = entry->role;
- mutex_unlock(&data->card->sbp_lock);
-
snprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange");
snprintf(env_role, sizeof(env_role), "ROLE=%s",
- (entry->role == QETH_SBP_ROLE_NONE) ? "none" :
- (entry->role == QETH_SBP_ROLE_PRIMARY) ? "primary" :
- (entry->role == QETH_SBP_ROLE_SECONDARY) ? "secondary" :
+ (data->role == QETH_SBP_ROLE_NONE) ? "none" :
+ (data->role == QETH_SBP_ROLE_PRIMARY) ? "primary" :
+ (data->role == QETH_SBP_ROLE_SECONDARY) ? "secondary" :
"<INVALID>");
snprintf(env_state, sizeof(env_state), "STATE=%s",
- (entry->state == QETH_SBP_STATE_INACTIVE) ? "inactive" :
- (entry->state == QETH_SBP_STATE_STANDBY) ? "standby" :
- (entry->state == QETH_SBP_STATE_ACTIVE) ? "active" :
+ (data->state == QETH_SBP_STATE_INACTIVE) ? "inactive" :
+ (data->state == QETH_SBP_STATE_STANDBY) ? "standby" :
+ (data->state == QETH_SBP_STATE_ACTIVE) ? "active" :
"<INVALID>");
kobject_uevent_env(&data->card->gdev->dev.kobj,
KOBJ_CHANGE, env);
@@ -1134,10 +1238,8 @@ static void qeth_bridge_state_change_worker(struct work_struct *work)
static void qeth_bridge_state_change(struct qeth_card *card,
struct qeth_ipa_cmd *cmd)
{
- struct qeth_sbp_state_change *qports =
- &cmd->data.sbp.data.state_change;
+ struct qeth_sbp_port_data *qports = &cmd->data.sbp.data.port_data;
struct qeth_bridge_state_data *data;
- int extrasize;
QETH_CARD_TEXT(card, 2, "brstchng");
if (qports->num_entries == 0) {
@@ -1148,44 +1250,136 @@ static void qeth_bridge_state_change(struct qeth_card *card,
QETH_CARD_TEXT_(card, 2, "BPsz%04x", qports->entry_length);
return;
}
- extrasize = sizeof(struct qeth_sbp_port_entry) * qports->num_entries;
- data = kzalloc(sizeof(struct qeth_bridge_state_data) + extrasize,
- GFP_ATOMIC);
+
+ data = kzalloc(sizeof(*data), GFP_ATOMIC);
if (!data) {
QETH_CARD_TEXT(card, 2, "BPSalloc");
return;
}
INIT_WORK(&data->worker, qeth_bridge_state_change_worker);
data->card = card;
- memcpy(&data->qports, qports,
- sizeof(struct qeth_sbp_state_change) + extrasize);
+ /* Information for the local port: */
+ data->role = qports->entry[0].role;
+ data->state = qports->entry[0].state;
+
queue_work(card->event_wq, &data->worker);
}
struct qeth_addr_change_data {
- struct work_struct worker;
+ struct delayed_work dwork;
struct qeth_card *card;
struct qeth_ipacmd_addr_change ac_event;
};
+static void qeth_l2_dev2br_worker(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct qeth_addr_change_data *data;
+ struct qeth_card *card;
+ struct qeth_priv *priv;
+ unsigned int i;
+ int rc;
+
+ data = container_of(dwork, struct qeth_addr_change_data, dwork);
+ card = data->card;
+ priv = netdev_priv(card->dev);
+
+ QETH_CARD_TEXT(card, 4, "dev2brew");
+
+ if (READ_ONCE(card->info.pnso_mode) == QETH_PNSO_NONE)
+ goto free;
+
+ /* Potential re-config in progress, try again later: */
+ if (!rtnl_trylock()) {
+ queue_delayed_work(card->event_wq, dwork,
+ msecs_to_jiffies(100));
+ return;
+ }
+ if (!netif_device_present(card->dev))
+ goto out_unlock;
+
+ if (data->ac_event.lost_event_mask) {
+ QETH_DBF_MESSAGE(3,
+ "Address change notification overflow on device %x\n",
+ CARD_DEVID(card));
+ /* Card fdb and bridge fdb are out of sync, card has stopped
+ * notifications (no need to drain_workqueue). Purge all
+ * 'extern_learn' entries from the parent bridge and restart
+ * the notifications.
+ */
+ qeth_l2_dev2br_fdb_flush(card);
+ rc = qeth_l2_dev2br_an_set(card, true);
+ if (rc) {
+ /* TODO: if we want to retry after -EAGAIN, be
+ * aware there could be stale entries in the
+ * workqueue now, that need to be drained.
+ * For now we give up:
+ */
+ netdev_err(card->dev,
+ "bridge learning_sync failed to recover: %d\n",
+ rc);
+ WRITE_ONCE(card->info.pnso_mode,
+ QETH_PNSO_NONE);
+ /* To remove fdb entries reported by an_set: */
+ qeth_l2_dev2br_fdb_flush(card);
+ priv->brport_features ^= BR_LEARNING_SYNC;
+ } else {
+ QETH_DBF_MESSAGE(3,
+ "Address Notification resynced on device %x\n",
+ CARD_DEVID(card));
+ }
+ } else {
+ for (i = 0; i < data->ac_event.num_entries; i++) {
+ struct qeth_ipacmd_addr_change_entry *entry =
+ &data->ac_event.entry[i];
+ qeth_l2_dev2br_fdb_notify(card,
+ entry->change_code,
+ &entry->token,
+ &entry->addr_lnid);
+ }
+ }
+
+out_unlock:
+ rtnl_unlock();
+
+free:
+ kfree(data);
+}
+
static void qeth_addr_change_event_worker(struct work_struct *work)
{
- struct qeth_addr_change_data *data =
- container_of(work, struct qeth_addr_change_data, worker);
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct qeth_addr_change_data *data;
+ struct qeth_card *card;
int i;
+ data = container_of(dwork, struct qeth_addr_change_data, dwork);
+ card = data->card;
+
QETH_CARD_TEXT(data->card, 4, "adrchgew");
+
+ if (READ_ONCE(card->info.pnso_mode) == QETH_PNSO_NONE)
+ goto free;
+
if (data->ac_event.lost_event_mask) {
+ /* Potential re-config in progress, try again later: */
+ if (!mutex_trylock(&card->sbp_lock)) {
+ queue_delayed_work(card->event_wq, dwork,
+ msecs_to_jiffies(100));
+ return;
+ }
+
dev_info(&data->card->gdev->dev,
"Address change notification stopped on %s (%s)\n",
- data->card->dev->name,
+ netdev_name(card->dev),
(data->ac_event.lost_event_mask == 0x01)
? "Overflow"
: (data->ac_event.lost_event_mask == 0x02)
? "Bridge port state change"
: "Unknown reason");
- mutex_lock(&data->card->sbp_lock);
+
data->card->options.sbp.hostnotification = 0;
+ card->info.pnso_mode = QETH_PNSO_NONE;
mutex_unlock(&data->card->sbp_lock);
qeth_bridge_emit_host_event(data->card, anev_abort,
0, NULL, NULL);
@@ -1199,6 +1393,8 @@ static void qeth_addr_change_event_worker(struct work_struct *work)
&entry->token,
&entry->addr_lnid);
}
+
+free:
kfree(data);
}
@@ -1210,6 +1406,9 @@ static void qeth_addr_change_event(struct qeth_card *card,
struct qeth_addr_change_data *data;
int extrasize;
+ if (card->info.pnso_mode == QETH_PNSO_NONE)
+ return;
+
QETH_CARD_TEXT(card, 4, "adrchgev");
if (cmd->hdr.return_code != 0x0000) {
if (cmd->hdr.return_code == 0x0010) {
@@ -1229,11 +1428,14 @@ static void qeth_addr_change_event(struct qeth_card *card,
QETH_CARD_TEXT(card, 2, "ACNalloc");
return;
}
- INIT_WORK(&data->worker, qeth_addr_change_event_worker);
+ if (card->info.pnso_mode == QETH_PNSO_BRIDGEPORT)
+ INIT_DELAYED_WORK(&data->dwork, qeth_addr_change_event_worker);
+ else
+ INIT_DELAYED_WORK(&data->dwork, qeth_l2_dev2br_worker);
data->card = card;
memcpy(&data->ac_event, hostevs,
sizeof(struct qeth_ipacmd_addr_change) + extrasize);
- queue_work(card->event_wq, &data->worker);
+ queue_delayed_work(card->event_wq, &data->dwork, 0);
}
/* SETBRIDGEPORT support; sending commands */
@@ -1418,8 +1620,8 @@ static int qeth_bridgeport_query_ports_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
- struct qeth_sbp_query_ports *qports = &cmd->data.sbp.data.query_ports;
struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param;
+ struct qeth_sbp_port_data *qports;
int rc;
QETH_CARD_TEXT(card, 2, "brqprtcb");
@@ -1427,6 +1629,7 @@ static int qeth_bridgeport_query_ports_cb(struct qeth_card *card,
if (rc)
return rc;
+ qports = &cmd->data.sbp.data.port_data;
if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) {
QETH_CARD_TEXT_(card, 2, "SBPs%04x", qports->entry_length);
return -EINVAL;
@@ -1554,18 +1757,18 @@ int qeth_bridgeport_an_set(struct qeth_card *card, int enable)
if (enable) {
qeth_bridge_emit_host_event(card, anev_reset, 0, NULL, NULL);
- rc = qeth_l2_pnso(card, 1, qeth_bridgeport_an_set_cb, card);
- } else
- rc = qeth_l2_pnso(card, 0, NULL, NULL);
+ qeth_l2_set_pnso_mode(card, QETH_PNSO_BRIDGEPORT);
+ rc = qeth_l2_pnso(card, PNSO_OC_NET_BRIDGE_INFO, 1,
+ qeth_bridgeport_an_set_cb, card);
+ if (rc)
+ qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
+ } else {
+ rc = qeth_l2_pnso(card, PNSO_OC_NET_BRIDGE_INFO, 0, NULL, NULL);
+ qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
+ }
return rc;
}
-static bool qeth_bridgeport_is_in_use(struct qeth_card *card)
-{
- return (card->options.sbp.role || card->options.sbp.reflect_promisc ||
- card->options.sbp.hostnotification);
-}
-
/* VNIC Characteristics support */
/* handle VNICC IPA command return codes; convert to error codes */
@@ -1711,6 +1914,19 @@ static int qeth_l2_vnicc_getset_timeout(struct qeth_card *card, u32 vnicc,
return qeth_send_ipa_cmd(card, iob, qeth_l2_vnicc_request_cb, timeout);
}
+/* recover user timeout setting */
+static bool qeth_l2_vnicc_recover_timeout(struct qeth_card *card, u32 vnicc,
+ u32 *timeout)
+{
+ if (card->options.vnicc.sup_chars & vnicc &&
+ card->options.vnicc.getset_timeout_sup & vnicc &&
+ !qeth_l2_vnicc_getset_timeout(card, vnicc, IPA_VNICC_SET_TIMEOUT,
+ timeout))
+ return false;
+ *timeout = QETH_VNICC_DEFAULT_TIMEOUT;
+ return true;
+}
+
/* set current VNICC flag state; called from sysfs store function */
int qeth_l2_vnicc_set_state(struct qeth_card *card, u32 vnicc, bool state)
{
@@ -1851,7 +2067,7 @@ int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout)
}
/* check if VNICC is currently enabled */
-bool qeth_l2_vnicc_is_in_use(struct qeth_card *card)
+static bool _qeth_l2_vnicc_is_in_use(struct qeth_card *card)
{
if (!card->options.vnicc.sup_chars)
return false;
@@ -1866,17 +2082,19 @@ bool qeth_l2_vnicc_is_in_use(struct qeth_card *card)
return true;
}
-/* recover user timeout setting */
-static bool qeth_l2_vnicc_recover_timeout(struct qeth_card *card, u32 vnicc,
- u32 *timeout)
+/**
+ * qeth_bridgeport_allowed - are any qeth_bridgeport functions allowed?
+ * @card: qeth_card structure pointer
+ *
+ * qeth_bridgeport functionality is mutually exclusive with usage of the
+ * VNIC Characteristics and dev2br address notifications
+ */
+bool qeth_bridgeport_allowed(struct qeth_card *card)
{
- if (card->options.vnicc.sup_chars & vnicc &&
- card->options.vnicc.getset_timeout_sup & vnicc &&
- !qeth_l2_vnicc_getset_timeout(card, vnicc, IPA_VNICC_SET_TIMEOUT,
- timeout))
- return false;
- *timeout = QETH_VNICC_DEFAULT_TIMEOUT;
- return true;
+ struct qeth_priv *priv = netdev_priv(card->dev);
+
+ return (!_qeth_l2_vnicc_is_in_use(card) &&
+ !(priv->brport_features & BR_LEARNING_SYNC));
}
/* recover user characteristic setting */
@@ -1967,6 +2185,182 @@ static void qeth_l2_vnicc_set_defaults(struct qeth_card *card)
card->options.vnicc.wanted_chars = QETH_VNICC_DEFAULT;
}
+static const struct device_type qeth_l2_devtype = {
+ .name = "qeth_layer2",
+ .groups = qeth_l2_attr_groups,
+};
+
+static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
+{
+ struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ int rc;
+
+ if (IS_OSN(card))
+ dev_notice(&gdev->dev, "OSN support will be dropped in 2021\n");
+
+ qeth_l2_vnicc_set_defaults(card);
+ mutex_init(&card->sbp_lock);
+
+ if (gdev->dev.type == &qeth_generic_devtype) {
+ rc = qeth_l2_create_device_attributes(&gdev->dev);
+ if (rc)
+ return rc;
+ }
+
+ INIT_WORK(&card->rx_mode_work, qeth_l2_rx_mode_work);
+ return 0;
+}
+
+static void qeth_l2_remove_device(struct ccwgroup_device *gdev)
+{
+ struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+
+ if (gdev->dev.type == &qeth_generic_devtype)
+ qeth_l2_remove_device_attributes(&gdev->dev);
+ qeth_set_allowed_threads(card, 0, 1);
+ wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
+
+ if (gdev->state == CCWGROUP_ONLINE)
+ qeth_set_offline(card, false);
+
+ cancel_work_sync(&card->close_dev_work);
+ if (card->dev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(card->dev);
+}
+
+static int qeth_l2_set_online(struct qeth_card *card, bool carrier_ok)
+{
+ struct net_device *dev = card->dev;
+ int rc = 0;
+
+ /* query before bridgeport_notification may be enabled */
+ qeth_l2_detect_dev2br_support(card);
+
+ mutex_lock(&card->sbp_lock);
+ qeth_bridgeport_query_support(card);
+ if (card->options.sbp.supported_funcs) {
+ qeth_l2_setup_bridgeport_attrs(card);
+ dev_info(&card->gdev->dev,
+ "The device represents a Bridge Capable Port\n");
+ }
+ mutex_unlock(&card->sbp_lock);
+
+ qeth_l2_register_dev_addr(card);
+
+ /* for the rx_bcast characteristic, init VNICC after setmac */
+ qeth_l2_vnicc_init(card);
+
+ qeth_l2_trace_features(card);
+
+ /* softsetup */
+ QETH_CARD_TEXT(card, 2, "softsetp");
+
+ card->state = CARD_STATE_SOFTSETUP;
+
+ qeth_set_allowed_threads(card, 0xffffffff, 0);
+
+ if (dev->reg_state != NETREG_REGISTERED) {
+ rc = qeth_l2_setup_netdev(card);
+ if (rc)
+ goto err_setup;
+
+ if (carrier_ok)
+ netif_carrier_on(dev);
+ } else {
+ rtnl_lock();
+ rc = qeth_set_real_num_tx_queues(card,
+ qeth_tx_actual_queues(card));
+ if (rc) {
+ rtnl_unlock();
+ goto err_set_queues;
+ }
+
+ if (carrier_ok)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+
+ netif_device_attach(dev);
+ qeth_enable_hw_features(dev);
+ qeth_l2_enable_brport_features(card);
+
+ if (card->info.open_when_online) {
+ card->info.open_when_online = 0;
+ dev_open(dev, NULL);
+ }
+ rtnl_unlock();
+ }
+ return 0;
+
+err_set_queues:
+err_setup:
+ qeth_set_allowed_threads(card, 0, 1);
+ card->state = CARD_STATE_DOWN;
+ return rc;
+}
+
+static void qeth_l2_set_offline(struct qeth_card *card)
+{
+ struct qeth_priv *priv = netdev_priv(card->dev);
+
+ qeth_set_allowed_threads(card, 0, 1);
+ qeth_l2_drain_rx_mode_cache(card);
+
+ if (card->state == CARD_STATE_SOFTSETUP)
+ card->state = CARD_STATE_DOWN;
+
+ qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
+ if (priv->brport_features & BR_LEARNING_SYNC) {
+ rtnl_lock();
+ qeth_l2_dev2br_fdb_flush(card);
+ rtnl_unlock();
+ }
+}
+
+/* Returns zero if the command is successfully "consumed" */
+static int qeth_l2_control_event(struct qeth_card *card,
+ struct qeth_ipa_cmd *cmd)
+{
+ switch (cmd->hdr.command) {
+ case IPA_CMD_SETBRIDGEPORT_OSA:
+ case IPA_CMD_SETBRIDGEPORT_IQD:
+ if (cmd->data.sbp.hdr.command_code ==
+ IPA_SBP_BRIDGE_PORT_STATE_CHANGE) {
+ qeth_bridge_state_change(card, cmd);
+ return 0;
+ }
+
+ return 1;
+ case IPA_CMD_ADDRESS_CHANGE_NOTIF:
+ qeth_addr_change_event(card, cmd);
+ return 0;
+ default:
+ return 1;
+ }
+}
+
+const struct qeth_discipline qeth_l2_discipline = {
+ .devtype = &qeth_l2_devtype,
+ .setup = qeth_l2_probe_device,
+ .remove = qeth_l2_remove_device,
+ .set_online = qeth_l2_set_online,
+ .set_offline = qeth_l2_set_offline,
+ .do_ioctl = NULL,
+ .control_event_handler = qeth_l2_control_event,
+};
+EXPORT_SYMBOL_GPL(qeth_l2_discipline);
+
+static int __init qeth_l2_init(void)
+{
+ pr_info("register layer 2 discipline\n");
+ return 0;
+}
+
+static void __exit qeth_l2_exit(void)
+{
+ pr_info("unregister layer 2 discipline\n");
+}
+
module_init(qeth_l2_init);
module_exit(qeth_l2_exit);
MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c
index 86bcae992f72..4ba3bc57263f 100644
--- a/drivers/s390/net/qeth_l2_sys.c
+++ b/drivers/s390/net/qeth_l2_sys.c
@@ -18,7 +18,7 @@ static ssize_t qeth_bridge_port_role_state_show(struct device *dev,
int rc = 0;
char *word;
- if (qeth_l2_vnicc_is_in_use(card))
+ if (!qeth_bridgeport_allowed(card))
return sprintf(buf, "n/a (VNIC characteristics)\n");
mutex_lock(&card->sbp_lock);
@@ -65,7 +65,7 @@ static ssize_t qeth_bridge_port_role_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (qeth_l2_vnicc_is_in_use(card))
+ if (!qeth_bridgeport_allowed(card))
return sprintf(buf, "n/a (VNIC characteristics)\n");
return qeth_bridge_port_role_state_show(dev, attr, buf, 0);
@@ -90,7 +90,7 @@ static ssize_t qeth_bridge_port_role_store(struct device *dev,
mutex_lock(&card->conf_mutex);
mutex_lock(&card->sbp_lock);
- if (qeth_l2_vnicc_is_in_use(card))
+ if (!qeth_bridgeport_allowed(card))
rc = -EBUSY;
else if (card->options.sbp.reflect_promisc)
/* Forbid direct manipulation */
@@ -116,7 +116,7 @@ static ssize_t qeth_bridge_port_state_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- if (qeth_l2_vnicc_is_in_use(card))
+ if (!qeth_bridgeport_allowed(card))
return sprintf(buf, "n/a (VNIC characteristics)\n");
return qeth_bridge_port_role_state_show(dev, attr, buf, 1);
@@ -131,7 +131,7 @@ static ssize_t qeth_bridgeport_hostnotification_show(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
int enabled;
- if (qeth_l2_vnicc_is_in_use(card))
+ if (!qeth_bridgeport_allowed(card))
return sprintf(buf, "n/a (VNIC characteristics)\n");
enabled = card->options.sbp.hostnotification;
@@ -153,10 +153,11 @@ static ssize_t qeth_bridgeport_hostnotification_store(struct device *dev,
mutex_lock(&card->conf_mutex);
mutex_lock(&card->sbp_lock);
- if (qeth_l2_vnicc_is_in_use(card))
+ if (!qeth_bridgeport_allowed(card))
rc = -EBUSY;
else if (qeth_card_hw_is_reachable(card)) {
rc = qeth_bridgeport_an_set(card, enable);
+ /* sbp_lock ensures ordering vs notifications-stopped events */
if (!rc)
card->options.sbp.hostnotification = enable;
} else
@@ -178,7 +179,7 @@ static ssize_t qeth_bridgeport_reflect_show(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
char *state;
- if (qeth_l2_vnicc_is_in_use(card))
+ if (!qeth_bridgeport_allowed(card))
return sprintf(buf, "n/a (VNIC characteristics)\n");
if (card->options.sbp.reflect_promisc) {
@@ -214,7 +215,7 @@ static ssize_t qeth_bridgeport_reflect_store(struct device *dev,
mutex_lock(&card->conf_mutex);
mutex_lock(&card->sbp_lock);
- if (qeth_l2_vnicc_is_in_use(card))
+ if (!qeth_bridgeport_allowed(card))
rc = -EBUSY;
else if (card->options.sbp.role != QETH_SBP_ROLE_NONE)
rc = -EPERM;
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 6ccfe2121095..acd130cfbab3 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -96,7 +96,7 @@ struct qeth_ipato_entry {
struct list_head entry;
enum qeth_prot_versions proto;
char addr[16];
- int mask_bits;
+ unsigned int mask_bits;
};
extern const struct attribute_group *qeth_l3_attr_groups[];
@@ -110,7 +110,7 @@ int qeth_l3_setrouting_v6(struct qeth_card *);
int qeth_l3_add_ipato_entry(struct qeth_card *, struct qeth_ipato_entry *);
int qeth_l3_del_ipato_entry(struct qeth_card *card,
enum qeth_prot_versions proto, u8 *addr,
- int mask_bits);
+ unsigned int mask_bits);
void qeth_l3_update_ipato(struct qeth_card *card);
int qeth_l3_modify_hsuid(struct qeth_card *card, bool add);
int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip,
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 09ef518ca1ea..b1c1d2510d55 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -97,7 +97,7 @@ static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
return false;
qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits,
- (addr->proto == QETH_PROT_IPV4)? 4:16);
+ (addr->proto == QETH_PROT_IPV4) ? 4 : 16);
list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
if (addr->proto != ipatoe->proto)
continue;
@@ -105,11 +105,9 @@ static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card,
(ipatoe->proto == QETH_PROT_IPV4) ?
4 : 16);
if (addr->proto == QETH_PROT_IPV4)
- rc = !memcmp(addr_bits, ipatoe_bits,
- min(32, ipatoe->mask_bits));
+ rc = !memcmp(addr_bits, ipatoe_bits, ipatoe->mask_bits);
else
- rc = !memcmp(addr_bits, ipatoe_bits,
- min(128, ipatoe->mask_bits));
+ rc = !memcmp(addr_bits, ipatoe_bits, ipatoe->mask_bits);
if (rc)
break;
}
@@ -314,7 +312,8 @@ static int qeth_l3_setdelip_cb(struct qeth_card *card, struct qeth_reply *reply,
}
static int qeth_l3_send_setdelmc(struct qeth_card *card,
- struct qeth_ipaddr *addr, int ipacmd)
+ struct qeth_ipaddr *addr,
+ enum qeth_ipa_cmds ipacmd)
{
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
@@ -535,14 +534,13 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
QETH_CARD_TEXT(card, 2, "addipato");
- mutex_lock(&card->conf_mutex);
mutex_lock(&card->ip_lock);
list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
if (ipatoe->proto != new->proto)
continue;
if (!memcmp(ipatoe->addr, new->addr,
- (ipatoe->proto == QETH_PROT_IPV4)? 4:16) &&
+ (ipatoe->proto == QETH_PROT_IPV4) ? 4 : 16) &&
(ipatoe->mask_bits == new->mask_bits)) {
rc = -EEXIST;
break;
@@ -555,28 +553,26 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card,
}
mutex_unlock(&card->ip_lock);
- mutex_unlock(&card->conf_mutex);
return rc;
}
int qeth_l3_del_ipato_entry(struct qeth_card *card,
enum qeth_prot_versions proto, u8 *addr,
- int mask_bits)
+ unsigned int mask_bits)
{
struct qeth_ipato_entry *ipatoe, *tmp;
int rc = -ENOENT;
QETH_CARD_TEXT(card, 2, "delipato");
- mutex_lock(&card->conf_mutex);
mutex_lock(&card->ip_lock);
list_for_each_entry_safe(ipatoe, tmp, &card->ipato.entries, entry) {
if (ipatoe->proto != proto)
continue;
if (!memcmp(ipatoe->addr, addr,
- (proto == QETH_PROT_IPV4)? 4:16) &&
+ (proto == QETH_PROT_IPV4) ? 4 : 16) &&
(ipatoe->mask_bits == mask_bits)) {
list_del(&ipatoe->entry);
qeth_l3_update_ipato(card);
@@ -586,7 +582,6 @@ int qeth_l3_del_ipato_entry(struct qeth_card *card,
}
mutex_unlock(&card->ip_lock);
- mutex_unlock(&card->conf_mutex);
return rc;
}
@@ -596,7 +591,6 @@ int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip,
enum qeth_prot_versions proto)
{
struct qeth_ipaddr addr;
- int rc;
qeth_l3_init_ipaddr(&addr, type, proto);
if (proto == QETH_PROT_IPV4)
@@ -604,11 +598,7 @@ int qeth_l3_modify_rxip_vipa(struct qeth_card *card, bool add, const u8 *ip,
else
memcpy(&addr.u.a6.addr, ip, 16);
- mutex_lock(&card->conf_mutex);
- rc = qeth_l3_modify_ip(card, &addr, add);
- mutex_unlock(&card->conf_mutex);
-
- return rc;
+ return qeth_l3_modify_ip(card, &addr, add);
}
int qeth_l3_modify_hsuid(struct qeth_card *card, bool add)
@@ -716,16 +706,16 @@ static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card)
if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
dev_info(&card->gdev->dev,
- "ARP processing not supported on %s!\n",
- QETH_CARD_IFNAME(card));
+ "ARP processing not supported on %s!\n",
+ netdev_name(card->dev));
return 0;
}
rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
IPA_CMD_ASS_START, NULL);
if (rc) {
dev_warn(&card->gdev->dev,
- "Starting ARP processing support for %s failed\n",
- QETH_CARD_IFNAME(card));
+ "Starting ARP processing support for %s failed\n",
+ netdev_name(card->dev));
}
return rc;
}
@@ -738,8 +728,8 @@ static int qeth_l3_start_ipa_source_mac(struct qeth_card *card)
if (!qeth_is_supported(card, IPA_SOURCE_MAC)) {
dev_info(&card->gdev->dev,
- "Inbound source MAC-address not supported on %s\n",
- QETH_CARD_IFNAME(card));
+ "Inbound source MAC-address not supported on %s\n",
+ netdev_name(card->dev));
return -EOPNOTSUPP;
}
@@ -747,8 +737,8 @@ static int qeth_l3_start_ipa_source_mac(struct qeth_card *card)
IPA_CMD_ASS_START, NULL);
if (rc)
dev_warn(&card->gdev->dev,
- "Starting source MAC-address support for %s failed\n",
- QETH_CARD_IFNAME(card));
+ "Starting source MAC-address support for %s failed\n",
+ netdev_name(card->dev));
return rc;
}
@@ -760,7 +750,7 @@ static int qeth_l3_start_ipa_vlan(struct qeth_card *card)
if (!qeth_is_supported(card, IPA_FULL_VLAN)) {
dev_info(&card->gdev->dev,
- "VLAN not supported on %s\n", QETH_CARD_IFNAME(card));
+ "VLAN not supported on %s\n", netdev_name(card->dev));
return -EOPNOTSUPP;
}
@@ -768,8 +758,8 @@ static int qeth_l3_start_ipa_vlan(struct qeth_card *card)
IPA_CMD_ASS_START, NULL);
if (rc) {
dev_warn(&card->gdev->dev,
- "Starting VLAN support for %s failed\n",
- QETH_CARD_IFNAME(card));
+ "Starting VLAN support for %s failed\n",
+ netdev_name(card->dev));
} else {
dev_info(&card->gdev->dev, "VLAN enabled\n");
}
@@ -784,8 +774,8 @@ static int qeth_l3_start_ipa_multicast(struct qeth_card *card)
if (!qeth_is_supported(card, IPA_MULTICASTING)) {
dev_info(&card->gdev->dev,
- "Multicast not supported on %s\n",
- QETH_CARD_IFNAME(card));
+ "Multicast not supported on %s\n",
+ netdev_name(card->dev));
return -EOPNOTSUPP;
}
@@ -793,8 +783,8 @@ static int qeth_l3_start_ipa_multicast(struct qeth_card *card)
IPA_CMD_ASS_START, NULL);
if (rc) {
dev_warn(&card->gdev->dev,
- "Starting multicast support for %s failed\n",
- QETH_CARD_IFNAME(card));
+ "Starting multicast support for %s failed\n",
+ netdev_name(card->dev));
} else {
dev_info(&card->gdev->dev, "Multicast enabled\n");
card->dev->flags |= IFF_MULTICAST;
@@ -817,7 +807,7 @@ static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
if (rc) {
dev_err(&card->gdev->dev,
"Activating IPv6 support for %s failed\n",
- QETH_CARD_IFNAME(card));
+ netdev_name(card->dev));
return rc;
}
rc = qeth_send_simple_setassparms_v6(card, IPA_IPV6, IPA_CMD_ASS_START,
@@ -825,15 +815,15 @@ static int qeth_l3_softsetup_ipv6(struct qeth_card *card)
if (rc) {
dev_err(&card->gdev->dev,
"Activating IPv6 support for %s failed\n",
- QETH_CARD_IFNAME(card));
+ netdev_name(card->dev));
return rc;
}
rc = qeth_send_simple_setassparms_v6(card, IPA_PASSTHRU,
IPA_CMD_ASS_START, NULL);
if (rc) {
dev_warn(&card->gdev->dev,
- "Enabling the passthrough mode for %s failed\n",
- QETH_CARD_IFNAME(card));
+ "Enabling the passthrough mode for %s failed\n",
+ netdev_name(card->dev));
return rc;
}
out:
@@ -847,7 +837,7 @@ static int qeth_l3_start_ipa_ipv6(struct qeth_card *card)
if (!qeth_is_supported(card, IPA_IPV6)) {
dev_info(&card->gdev->dev,
- "IPv6 not supported on %s\n", QETH_CARD_IFNAME(card));
+ "IPv6 not supported on %s\n", netdev_name(card->dev));
return 0;
}
return qeth_l3_softsetup_ipv6(card);
@@ -862,16 +852,17 @@ static int qeth_l3_start_ipa_broadcast(struct qeth_card *card)
card->info.broadcast_capable = 0;
if (!qeth_is_supported(card, IPA_FILTERING)) {
dev_info(&card->gdev->dev,
- "Broadcast not supported on %s\n",
- QETH_CARD_IFNAME(card));
+ "Broadcast not supported on %s\n",
+ netdev_name(card->dev));
rc = -EOPNOTSUPP;
goto out;
}
rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
IPA_CMD_ASS_START, NULL);
if (rc) {
- dev_warn(&card->gdev->dev, "Enabling broadcast filtering for "
- "%s failed\n", QETH_CARD_IFNAME(card));
+ dev_warn(&card->gdev->dev,
+ "Enabling broadcast filtering for %s failed\n",
+ netdev_name(card->dev));
goto out;
}
@@ -879,8 +870,8 @@ static int qeth_l3_start_ipa_broadcast(struct qeth_card *card)
IPA_CMD_ASS_CONFIGURE, &filter_data);
if (rc) {
dev_warn(&card->gdev->dev,
- "Setting up broadcast filtering for %s failed\n",
- QETH_CARD_IFNAME(card));
+ "Setting up broadcast filtering for %s failed\n",
+ netdev_name(card->dev));
goto out;
}
card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO;
@@ -888,8 +879,9 @@ static int qeth_l3_start_ipa_broadcast(struct qeth_card *card)
rc = qeth_send_simple_setassparms(card, IPA_FILTERING,
IPA_CMD_ASS_ENABLE, &filter_data);
if (rc) {
- dev_warn(&card->gdev->dev, "Setting up broadcast echo "
- "filtering for %s failed\n", QETH_CARD_IFNAME(card));
+ dev_warn(&card->gdev->dev,
+ "Setting up broadcast echo filtering for %s failed\n",
+ netdev_name(card->dev));
goto out;
}
card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO;
@@ -1152,33 +1144,6 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev,
return 0;
}
-static void qeth_l3_stop_card(struct qeth_card *card)
-{
- QETH_CARD_TEXT(card, 2, "stopcard");
-
- qeth_set_allowed_threads(card, 0, 1);
-
- cancel_work_sync(&card->rx_mode_work);
- qeth_l3_drain_rx_mode_cache(card);
-
- if (card->options.sniffer &&
- (card->info.promisc_mode == SET_PROMISC_MODE_ON))
- qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
-
- if (card->state == CARD_STATE_SOFTSETUP) {
- qeth_l3_clear_ip_htable(card, 1);
- qeth_clear_ipacmd_list(card);
- card->state = CARD_STATE_DOWN;
- }
-
- qeth_qdio_clear_card(card, 0);
- qeth_drain_output_queues(card);
- qeth_clear_working_pool_list(card);
- flush_workqueue(card->event_wq);
- qeth_flush_local_addrs(card);
- card->info.promisc_mode = 0;
-}
-
static void qeth_l3_set_promisc_mode(struct qeth_card *card)
{
bool enable = card->dev->flags & IFF_PROMISC;
@@ -1234,7 +1199,6 @@ static void qeth_l3_rx_mode_work(struct work_struct *work)
kfree(addr);
break;
}
- addr->ref_counter = 1;
fallthrough;
default:
/* for next call to set_rx_mode(): */
@@ -1869,8 +1833,10 @@ static u16 qeth_l3_osa_select_queue(struct net_device *dev, struct sk_buff *skb,
{
struct qeth_card *card = dev->ml_priv;
- return IS_VM_NIC(card) ? netdev_pick_tx(dev, skb, sb_dev) :
- qeth_get_priority_queue(card, skb);
+ if (qeth_uses_tx_prio_queueing(card))
+ return qeth_get_priority_queue(card, skb);
+
+ return netdev_pick_tx(dev, skb, sb_dev);
}
static const struct net_device_ops qeth_l3_netdev_ops = {
@@ -1913,10 +1879,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
unsigned int headroom;
int rc;
- rc = qeth_setup_netdev(card);
- if (rc)
- return rc;
-
if (IS_OSD(card) || IS_OSX(card)) {
card->dev->netdev_ops = &qeth_l3_osa_netdev_ops;
@@ -2024,21 +1986,10 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
qeth_l3_clear_ipato_list(card);
}
-static int qeth_l3_set_online(struct qeth_card *card)
+static int qeth_l3_set_online(struct qeth_card *card, bool carrier_ok)
{
- struct ccwgroup_device *gdev = card->gdev;
struct net_device *dev = card->dev;
int rc = 0;
- bool carrier_ok;
-
- rc = qeth_core_hardsetup_card(card, &carrier_ok);
- if (rc) {
- QETH_CARD_TEXT_(card, 2, "2err%04x", rc);
- rc = -ENODEV;
- goto out_remove;
- }
-
- qeth_print_status_message(card);
/* softsetup */
QETH_CARD_TEXT(card, 2, "softsetp");
@@ -2065,12 +2016,19 @@ static int qeth_l3_set_online(struct qeth_card *card)
if (dev->reg_state != NETREG_REGISTERED) {
rc = qeth_l3_setup_netdev(card);
if (rc)
- goto out_remove;
+ goto err_setup;
if (carrier_ok)
netif_carrier_on(dev);
} else {
rtnl_lock();
+ rc = qeth_set_real_num_tx_queues(card,
+ qeth_tx_actual_queues(card));
+ if (rc) {
+ rtnl_unlock();
+ goto err_set_queues;
+ }
+
if (carrier_ok)
netif_carrier_on(dev);
else
@@ -2085,22 +2043,29 @@ static int qeth_l3_set_online(struct qeth_card *card)
}
rtnl_unlock();
}
- qeth_trace_features(card);
- /* let user_space know that device is online */
- kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
return 0;
-out_remove:
- qeth_l3_stop_card(card);
- qeth_stop_channel(&card->data);
- qeth_stop_channel(&card->write);
- qeth_stop_channel(&card->read);
- qdio_free(CARD_DDEV(card));
+
+err_set_queues:
+err_setup:
+ qeth_set_allowed_threads(card, 0, 1);
+ card->state = CARD_STATE_DOWN;
+ qeth_l3_clear_ip_htable(card, 1);
return rc;
}
static void qeth_l3_set_offline(struct qeth_card *card)
{
- qeth_l3_stop_card(card);
+ qeth_set_allowed_threads(card, 0, 1);
+ qeth_l3_drain_rx_mode_cache(card);
+
+ if (card->options.sniffer &&
+ (card->info.promisc_mode == SET_PROMISC_MODE_ON))
+ qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
+
+ if (card->state == CARD_STATE_SOFTSETUP) {
+ card->state = CARD_STATE_DOWN;
+ qeth_l3_clear_ip_htable(card, 1);
+ }
}
/* Returns zero if the command is successfully "consumed" */
@@ -2110,7 +2075,7 @@ static int qeth_l3_control_event(struct qeth_card *card,
return 1;
}
-struct qeth_discipline qeth_l3_discipline = {
+const struct qeth_discipline qeth_l3_discipline = {
.devtype = &qeth_l3_devtype,
.setup = qeth_l3_probe_device,
.remove = qeth_l3_remove_device,
@@ -2174,7 +2139,6 @@ static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev)
static int qeth_l3_ip_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
-
struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
struct net_device *dev = ifa->ifa_dev->dev;
struct qeth_ipaddr addr;
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index dd0b39082534..997fbb7006a7 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -285,7 +285,7 @@ static ssize_t qeth_l3_dev_ipato_enable_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return sprintf(buf, "%i\n", card->ipato.enabled? 1:0);
+ return sprintf(buf, "%u\n", card->ipato.enabled ? 1 : 0);
}
static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
@@ -301,19 +301,21 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
goto out;
}
+ mutex_lock(&card->ip_lock);
if (sysfs_streq(buf, "toggle")) {
enable = !card->ipato.enabled;
} else if (kstrtobool(buf, &enable)) {
rc = -EINVAL;
- goto out;
+ goto unlock_ip;
}
if (card->ipato.enabled != enable) {
card->ipato.enabled = enable;
- mutex_lock(&card->ip_lock);
qeth_l3_update_ipato(card);
- mutex_unlock(&card->ip_lock);
}
+
+unlock_ip:
+ mutex_unlock(&card->ip_lock);
out:
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
@@ -328,7 +330,7 @@ static ssize_t qeth_l3_dev_ipato_invert4_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return sprintf(buf, "%i\n", card->ipato.invert4? 1:0);
+ return sprintf(buf, "%u\n", card->ipato.invert4 ? 1 : 0);
}
static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
@@ -339,7 +341,7 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
bool invert;
int rc = 0;
- mutex_lock(&card->conf_mutex);
+ mutex_lock(&card->ip_lock);
if (sysfs_streq(buf, "toggle")) {
invert = !card->ipato.invert4;
} else if (kstrtobool(buf, &invert)) {
@@ -349,12 +351,11 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
if (card->ipato.invert4 != invert) {
card->ipato.invert4 = invert;
- mutex_lock(&card->ip_lock);
qeth_l3_update_ipato(card);
- mutex_unlock(&card->ip_lock);
}
+
out:
- mutex_unlock(&card->conf_mutex);
+ mutex_unlock(&card->ip_lock);
return rc ? rc : count;
}
@@ -406,29 +407,29 @@ static ssize_t qeth_l3_dev_ipato_add4_show(struct device *dev,
}
static int qeth_l3_parse_ipatoe(const char *buf, enum qeth_prot_versions proto,
- u8 *addr, int *mask_bits)
+ u8 *addr, unsigned int *mask_bits)
{
- const char *start, *end;
- char *tmp;
- char buffer[40] = {0, };
+ char *sep;
+ int rc;
- start = buf;
- /* get address string */
- end = strchr(start, '/');
- if (!end || (end - start >= 40)) {
+ /* Expected input pattern: %addr/%mask */
+ sep = strnchr(buf, 40, '/');
+ if (!sep)
return -EINVAL;
- }
- strncpy(buffer, start, end - start);
- if (qeth_l3_string_to_ipaddr(buffer, proto, addr)) {
- return -EINVAL;
- }
- start = end + 1;
- *mask_bits = simple_strtoul(start, &tmp, 10);
- if (!strlen(start) ||
- (tmp == start) ||
- (*mask_bits > ((proto == QETH_PROT_IPV4) ? 32 : 128))) {
+
+ /* Terminate the %addr sub-string, and parse it: */
+ *sep = '\0';
+ rc = qeth_l3_string_to_ipaddr(buf, proto, addr);
+ if (rc)
+ return rc;
+
+ rc = kstrtouint(sep + 1, 10, mask_bits);
+ if (rc)
+ return rc;
+
+ if (*mask_bits > ((proto == QETH_PROT_IPV4) ? 32 : 128))
return -EINVAL;
- }
+
return 0;
}
@@ -436,8 +437,8 @@ static ssize_t qeth_l3_dev_ipato_add_store(const char *buf, size_t count,
struct qeth_card *card, enum qeth_prot_versions proto)
{
struct qeth_ipato_entry *ipatoe;
+ unsigned int mask_bits;
u8 addr[16];
- int mask_bits;
int rc = 0;
rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
@@ -449,7 +450,7 @@ static ssize_t qeth_l3_dev_ipato_add_store(const char *buf, size_t count,
return -ENOMEM;
ipatoe->proto = proto;
- memcpy(ipatoe->addr, addr, (proto == QETH_PROT_IPV4)? 4:16);
+ memcpy(ipatoe->addr, addr, (proto == QETH_PROT_IPV4) ? 4 : 16);
ipatoe->mask_bits = mask_bits;
rc = qeth_l3_add_ipato_entry(card, ipatoe);
@@ -474,8 +475,8 @@ static QETH_DEVICE_ATTR(ipato_add4, add4, 0644,
static ssize_t qeth_l3_dev_ipato_del_store(const char *buf, size_t count,
struct qeth_card *card, enum qeth_prot_versions proto)
{
+ unsigned int mask_bits;
u8 addr[16];
- int mask_bits;
int rc = 0;
rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
@@ -500,7 +501,7 @@ static ssize_t qeth_l3_dev_ipato_invert6_show(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
- return sprintf(buf, "%i\n", card->ipato.invert6? 1:0);
+ return sprintf(buf, "%u\n", card->ipato.invert6 ? 1 : 0);
}
static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
@@ -510,7 +511,7 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
bool invert;
int rc = 0;
- mutex_lock(&card->conf_mutex);
+ mutex_lock(&card->ip_lock);
if (sysfs_streq(buf, "toggle")) {
invert = !card->ipato.invert6;
} else if (kstrtobool(buf, &invert)) {
@@ -520,12 +521,11 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
if (card->ipato.invert6 != invert) {
card->ipato.invert6 = invert;
- mutex_lock(&card->ip_lock);
qeth_l3_update_ipato(card);
- mutex_unlock(&card->ip_lock);
}
+
out:
- mutex_unlock(&card->conf_mutex);
+ mutex_unlock(&card->ip_lock);
return rc ? rc : count;
}
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index 7c3ae52f2b15..dac54041ad8d 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -1164,17 +1164,12 @@ void ssb_pci_exit(struct ssb_bus *bus)
int ssb_pci_init(struct ssb_bus *bus)
{
struct pci_dev *pdev;
- int err;
if (bus->bustype != SSB_BUSTYPE_PCI)
return 0;
pdev = bus->host_pci;
mutex_init(&bus->sprom_mutex);
- err = device_create_file(&pdev->dev, &dev_attr_ssb_sprom);
- if (err)
- goto out;
-out:
- return err;
+ return device_create_file(&pdev->dev, &dev_attr_ssb_sprom);
}
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 4c960b66de8e..ea84d08747df 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -439,7 +439,7 @@ static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info)
return 0;
}
-static const struct genl_ops tcmu_genl_ops[] = {
+static const struct genl_small_ops tcmu_genl_ops[] = {
{
.cmd = TCMU_CMD_SET_FEATURES,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
@@ -477,8 +477,8 @@ static struct genl_family tcmu_genl_family __ro_after_init = {
.mcgrps = tcmu_mcgrps,
.n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
.netnsok = true,
- .ops = tcmu_genl_ops,
- .n_ops = ARRAY_SIZE(tcmu_genl_ops),
+ .small_ops = tcmu_genl_ops,
+ .n_small_ops = ARRAY_SIZE(tcmu_genl_ops),
};
#define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index))
diff --git a/drivers/thermal/thermal_netlink.c b/drivers/thermal/thermal_netlink.c
index af7b2383e8f6..da2891fcac89 100644
--- a/drivers/thermal/thermal_netlink.c
+++ b/drivers/thermal/thermal_netlink.c
@@ -545,7 +545,7 @@ static int thermal_genl_cmd_dumpit(struct sk_buff *skb,
{
struct param p = { .msg = skb };
const struct genl_dumpit_info *info = genl_dumpit_info(cb);
- int cmd = info->ops->cmd;
+ int cmd = info->op.cmd;
int ret;
void *hdr;
@@ -601,7 +601,7 @@ out_free_msg:
return ret;
}
-static const struct genl_ops thermal_genl_ops[] = {
+static const struct genl_small_ops thermal_genl_ops[] = {
{
.cmd = THERMAL_GENL_CMD_TZ_GET_ID,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
@@ -635,8 +635,8 @@ static struct genl_family thermal_gnl_family __ro_after_init = {
.version = THERMAL_GENL_VERSION,
.maxattr = THERMAL_GENL_ATTR_MAX,
.policy = thermal_genl_policy,
- .ops = thermal_genl_ops,
- .n_ops = ARRAY_SIZE(thermal_genl_ops),
+ .small_ops = thermal_genl_ops,
+ .n_small_ops = ARRAY_SIZE(thermal_genl_ops),
.mcgrps = thermal_genl_mcgrps,
.n_mcgrps = ARRAY_SIZE(thermal_genl_mcgrps),
};
diff --git a/fs/dlm/netlink.c b/fs/dlm/netlink.c
index e338c407cb75..67f68d48d60c 100644
--- a/fs/dlm/netlink.c
+++ b/fs/dlm/netlink.c
@@ -62,7 +62,7 @@ static int user_cmd(struct sk_buff *skb, struct genl_info *info)
return 0;
}
-static const struct genl_ops dlm_nl_ops[] = {
+static const struct genl_small_ops dlm_nl_ops[] = {
{
.cmd = DLM_CMD_HELLO,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
@@ -73,8 +73,8 @@ static const struct genl_ops dlm_nl_ops[] = {
static struct genl_family family __ro_after_init = {
.name = DLM_GENL_NAME,
.version = DLM_GENL_VERSION,
- .ops = dlm_nl_ops,
- .n_ops = ARRAY_SIZE(dlm_nl_ops),
+ .small_ops = dlm_nl_ops,
+ .n_small_ops = ARRAY_SIZE(dlm_nl_ops),
.module = THIS_MODULE,
};
diff --git a/fs/io_uring.c b/fs/io_uring.c
index fc6de6b4784e..2e1dc354cd08 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -5080,6 +5080,12 @@ static bool io_arm_poll_handler(struct io_kiocb *req)
mask |= POLLIN | POLLRDNORM;
if (def->pollout)
mask |= POLLOUT | POLLWRNORM;
+
+ /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
+ if ((req->opcode == IORING_OP_RECVMSG) &&
+ (req->sr_msg.msg_flags & MSG_ERRQUEUE))
+ mask &= ~POLLIN;
+
mask |= POLLERR | POLLPRI;
ipt.pt._qproc = io_async_queue_proc;
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 64f367044e25..2f98d2fce62e 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -279,6 +279,31 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL)
+/* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a
+ * fullsock and its parent fullsock cannot be traced by
+ * sk_to_full_sk().
+ *
+ * e.g. sock_ops->sk is a request_sock and it is under syncookie mode.
+ * Its listener-sk is not attached to the rsk_listener.
+ * In this case, the caller holds the listener-sk (unlocked),
+ * set its sock_ops->sk to req_sk, and call this SOCK_OPS"_SK" with
+ * the listener-sk such that the cgroup-bpf-progs of the
+ * listener-sk will be run.
+ *
+ * Regardless of syncookie mode or not,
+ * calling bpf_setsockopt on listener-sk will not make sense anyway,
+ * so passing 'sock_ops->sk == req_sk' to the bpf prog is appropriate here.
+ */
+#define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk) \
+({ \
+ int __ret = 0; \
+ if (cgroup_bpf_enabled) \
+ __ret = __cgroup_bpf_run_filter_sock_ops(sk, \
+ sock_ops, \
+ BPF_CGROUP_SOCK_OPS); \
+ __ret; \
+})
+
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
({ \
int __ret = 0; \
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 55f694b63164..2b16bf48aab6 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -34,6 +34,8 @@ struct btf_type;
struct exception_table_entry;
struct seq_operations;
struct bpf_iter_aux_info;
+struct bpf_local_storage;
+struct bpf_local_storage_map;
extern struct idr btf_idr;
extern spinlock_t btf_idr_lock;
@@ -80,7 +82,7 @@ struct bpf_map_ops {
void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
int fd);
void (*map_fd_put_ptr)(void *ptr);
- u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
+ int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
u32 (*map_fd_sys_lookup_elem)(void *ptr);
void (*map_seq_show_elem)(struct bpf_map *map, void *key,
struct seq_file *m);
@@ -104,6 +106,25 @@ struct bpf_map_ops {
__poll_t (*map_poll)(struct bpf_map *map, struct file *filp,
struct poll_table_struct *pts);
+ /* Functions called by bpf_local_storage maps */
+ int (*map_local_storage_charge)(struct bpf_local_storage_map *smap,
+ void *owner, u32 size);
+ void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap,
+ void *owner, u32 size);
+ struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner);
+
+ /* map_meta_equal must be implemented for maps that can be
+ * used as an inner map. It is a runtime check to ensure
+ * an inner map can be inserted to an outer map.
+ *
+ * Some properties of the inner map has been used during the
+ * verification time. When inserting an inner map at the runtime,
+ * map_meta_equal has to ensure the inserting map has the same
+ * properties that the verifier has used earlier.
+ */
+ bool (*map_meta_equal)(const struct bpf_map *meta0,
+ const struct bpf_map *meta1);
+
/* BTF name and id of struct allocated by map_alloc */
const char * const map_btf_name;
int *map_btf_id;
@@ -227,6 +248,9 @@ int map_check_no_btf(const struct bpf_map *map,
const struct btf_type *key_type,
const struct btf_type *value_type);
+bool bpf_map_meta_equal(const struct bpf_map *meta0,
+ const struct bpf_map *meta1);
+
extern const struct bpf_map_ops bpf_map_offload_ops;
/* function argument constraints */
@@ -268,6 +292,9 @@ enum bpf_arg_type {
ARG_PTR_TO_ALLOC_MEM, /* pointer to dynamically allocated memory */
ARG_PTR_TO_ALLOC_MEM_OR_NULL, /* pointer to dynamically allocated memory or NULL */
ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */
+ ARG_PTR_TO_BTF_ID_SOCK_COMMON, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */
+ ARG_PTR_TO_PERCPU_BTF_ID, /* pointer to in-kernel percpu type */
+ __BPF_ARG_TYPE_MAX,
};
/* type of values returned from helper functions */
@@ -281,6 +308,8 @@ enum bpf_return_type {
RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */
RET_PTR_TO_ALLOC_MEM_OR_NULL, /* returns a pointer to dynamically allocated memory or NULL */
RET_PTR_TO_BTF_ID_OR_NULL, /* returns a pointer to a btf_id or NULL */
+ RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL, /* returns a pointer to a valid memory or a btf_id or NULL */
+ RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */
};
/* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs
@@ -302,13 +331,18 @@ struct bpf_func_proto {
};
enum bpf_arg_type arg_type[5];
};
- int *btf_id; /* BTF ids of arguments */
- bool (*check_btf_id)(u32 btf_id, u32 arg); /* if the argument btf_id is
- * valid. Often used if more
- * than one btf id is permitted
- * for this argument.
- */
+ union {
+ struct {
+ u32 *arg1_btf_id;
+ u32 *arg2_btf_id;
+ u32 *arg3_btf_id;
+ u32 *arg4_btf_id;
+ u32 *arg5_btf_id;
+ };
+ u32 *arg_btf_id[5];
+ };
int *ret_btf_id; /* return value btf_id */
+ bool (*allowed)(const struct bpf_prog *prog);
};
/* bpf_context is intentionally undefined structure. Pointer to bpf_context is
@@ -352,14 +386,29 @@ enum bpf_reg_type {
PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */
PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */
PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */
- PTR_TO_BTF_ID, /* reg points to kernel struct */
- PTR_TO_BTF_ID_OR_NULL, /* reg points to kernel struct or NULL */
+ /* PTR_TO_BTF_ID points to a kernel struct that does not need
+ * to be null checked by the BPF program. This does not imply the
+ * pointer is _not_ null and in practice this can easily be a null
+ * pointer when reading pointer chains. The assumption is program
+ * context will handle null pointer dereference typically via fault
+ * handling. The verifier must keep this in mind and can make no
+ * assumptions about null or non-null when doing branch analysis.
+ * Further, when passed into helpers the helpers can not, without
+ * additional context, assume the value is non-null.
+ */
+ PTR_TO_BTF_ID,
+ /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not
+ * been checked for null. Used primarily to inform the verifier
+ * an explicit null check is required for this struct.
+ */
+ PTR_TO_BTF_ID_OR_NULL,
PTR_TO_MEM, /* reg points to valid memory region */
PTR_TO_MEM_OR_NULL, /* reg points to valid memory region or NULL */
PTR_TO_RDONLY_BUF, /* reg points to a readonly buffer */
PTR_TO_RDONLY_BUF_OR_NULL, /* reg points to a readonly buffer or NULL */
PTR_TO_RDWR_BUF, /* reg points to a read/write buffer */
PTR_TO_RDWR_BUF_OR_NULL, /* reg points to a read/write buffer or NULL */
+ PTR_TO_PERCPU_BTF_ID, /* reg points to a percpu kernel variable */
};
/* The information passed from prog-specific *_is_valid_access
@@ -514,6 +563,8 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
/* these two functions are called from generated trampoline */
u64 notrace __bpf_prog_enter(void);
void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
+void notrace __bpf_prog_enter_sleepable(void);
+void notrace __bpf_prog_exit_sleepable(void);
struct bpf_ksym {
unsigned long start;
@@ -559,6 +610,13 @@ struct bpf_trampoline {
struct bpf_ksym ksym;
};
+struct bpf_attach_target_info {
+ struct btf_func_model fmodel;
+ long tgt_addr;
+ const char *tgt_name;
+ const struct btf_type *tgt_type;
+};
+
#define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */
struct bpf_dispatcher_prog {
@@ -586,9 +644,10 @@ static __always_inline unsigned int bpf_dispatcher_nop_func(
return bpf_func(ctx, insnsi);
}
#ifdef CONFIG_BPF_JIT
-struct bpf_trampoline *bpf_trampoline_lookup(u64 key);
-int bpf_trampoline_link_prog(struct bpf_prog *prog);
-int bpf_trampoline_unlink_prog(struct bpf_prog *prog);
+int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr);
+int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr);
+struct bpf_trampoline *bpf_trampoline_get(u64 key,
+ struct bpf_attach_target_info *tgt_info);
void bpf_trampoline_put(struct bpf_trampoline *tr);
#define BPF_DISPATCHER_INIT(_name) { \
.mutex = __MUTEX_INITIALIZER(_name.mutex), \
@@ -633,17 +692,20 @@ void bpf_image_ksym_del(struct bpf_ksym *ksym);
void bpf_ksym_add(struct bpf_ksym *ksym);
void bpf_ksym_del(struct bpf_ksym *ksym);
#else
-static inline struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
+static inline int bpf_trampoline_link_prog(struct bpf_prog *prog,
+ struct bpf_trampoline *tr)
{
- return NULL;
+ return -ENOTSUPP;
}
-static inline int bpf_trampoline_link_prog(struct bpf_prog *prog)
+static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog,
+ struct bpf_trampoline *tr)
{
return -ENOTSUPP;
}
-static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog)
+static inline struct bpf_trampoline *bpf_trampoline_get(u64 key,
+ struct bpf_attach_target_info *tgt_info)
{
- return -ENOTSUPP;
+ return ERR_PTR(-EOPNOTSUPP);
}
static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
#define DEFINE_BPF_DISPATCHER(name)
@@ -670,16 +732,19 @@ enum bpf_jit_poke_reason {
/* Descriptor of pokes pointing /into/ the JITed image. */
struct bpf_jit_poke_descriptor {
- void *ip;
+ void *tailcall_target;
+ void *tailcall_bypass;
+ void *bypass_addr;
union {
struct {
struct bpf_map *map;
u32 key;
} tail_call;
};
- bool ip_stable;
+ bool tailcall_target_stable;
u8 adj_off;
u16 reason;
+ u32 insn_idx;
};
/* reg_type info for ctx arguments */
@@ -704,13 +769,18 @@ struct bpf_prog_aux {
u32 max_rdonly_access;
u32 max_rdwr_access;
const struct bpf_ctx_arg_aux *ctx_arg_info;
- struct bpf_prog *linked_prog;
+ struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */
+ struct bpf_prog *dst_prog;
+ struct bpf_trampoline *dst_trampoline;
+ enum bpf_prog_type saved_dst_prog_type;
+ enum bpf_attach_type saved_dst_attach_type;
bool verifier_zext; /* Zero extensions has been inserted by verifier. */
bool offload_requested;
bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
bool func_proto_unreliable;
+ bool sleepable;
+ bool tail_call_reachable;
enum bpf_tramp_prog_type trampoline_prog_type;
- struct bpf_trampoline *trampoline;
struct hlist_node tramp_hlist;
/* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
const struct btf_type *attach_func_proto;
@@ -723,6 +793,7 @@ struct bpf_prog_aux {
struct bpf_ksym ksym;
const struct bpf_prog_ops *ops;
struct bpf_map **used_maps;
+ struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */
struct bpf_prog *prog;
struct user_struct *user;
u64 load_time; /* ns since boottime */
@@ -1218,12 +1289,18 @@ typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog,
union bpf_iter_link_info *linfo,
struct bpf_iter_aux_info *aux);
typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux);
+typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux,
+ struct seq_file *seq);
+typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux,
+ struct bpf_link_info *info);
#define BPF_ITER_CTX_ARG_MAX 2
struct bpf_iter_reg {
const char *target;
bpf_iter_attach_target_t attach_target;
bpf_iter_detach_target_t detach_target;
+ bpf_iter_show_fdinfo_t show_fdinfo;
+ bpf_iter_fill_link_info_t fill_link_info;
u32 ctx_arg_info_size;
struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX];
const struct bpf_iter_seq_info *seq_info;
@@ -1250,6 +1327,10 @@ int bpf_iter_new_fd(struct bpf_link *link);
bool bpf_link_is_iter(struct bpf_link *link);
struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop);
int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx);
+void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux,
+ struct seq_file *seq);
+int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux,
+ struct bpf_link_info *info);
int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value);
@@ -1292,6 +1373,8 @@ int bpf_check(struct bpf_prog **fp, union bpf_attr *attr,
union bpf_attr __user *uattr);
void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
+struct btf *bpf_get_btf_vmlinux(void);
+
/* Map specifics */
struct xdp_buff;
struct sk_buff;
@@ -1333,6 +1416,9 @@ int bpf_prog_test_run_tracing(struct bpf_prog *prog,
int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
const union bpf_attr *kattr,
union bpf_attr __user *uattr);
+int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
bool btf_ctx_access(int off, int size, enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info);
@@ -1340,8 +1426,8 @@ int btf_struct_access(struct bpf_verifier_log *log,
const struct btf_type *t, int off, int size,
enum bpf_access_type atype,
u32 *next_btf_id);
-int btf_resolve_helper_id(struct bpf_verifier_log *log,
- const struct bpf_func_proto *fn, int);
+bool btf_struct_ids_match(struct bpf_verifier_log *log,
+ int off, u32 id, u32 need_type_id);
int btf_distill_func_proto(struct bpf_verifier_log *log,
struct btf *btf,
@@ -1354,10 +1440,11 @@ int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
struct bpf_reg_state *regs);
int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
struct bpf_reg_state *reg);
-int btf_check_type_match(struct bpf_verifier_env *env, struct bpf_prog *prog,
+int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
struct btf *btf, const struct btf_type *t);
struct bpf_prog *bpf_prog_by_id(u32 id);
+struct bpf_link *bpf_link_by_id(u32 id);
const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id);
#else /* !CONFIG_BPF_SYSCALL */
@@ -1637,6 +1724,7 @@ int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog,
struct bpf_prog *old, u32 which);
int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog);
int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype);
+int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags);
void sock_map_unhash(struct sock *sk);
void sock_map_close(struct sock *sk, long timeout);
#else
@@ -1658,6 +1746,12 @@ static inline int sock_map_prog_detach(const union bpf_attr *attr,
{
return -EOPNOTSUPP;
}
+
+static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
+ u64 flags)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_BPF_STREAM_PARSER */
#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
@@ -1736,6 +1830,10 @@ extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto;
extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto;
extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto;
extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto;
+extern const struct bpf_func_proto bpf_copy_from_user_proto;
+extern const struct bpf_func_proto bpf_snprintf_btf_proto;
+extern const struct bpf_func_proto bpf_per_cpu_ptr_proto;
+extern const struct bpf_func_proto bpf_this_cpu_ptr_proto;
const struct bpf_func_proto *bpf_tracing_func_proto(
enum bpf_func_id func_id, const struct bpf_prog *prog);
@@ -1850,4 +1948,7 @@ enum bpf_text_poke_type {
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
void *addr1, void *addr2);
+struct btf_id_set;
+bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
+
#endif /* _LINUX_BPF_H */
diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h
new file mode 100644
index 000000000000..b2c9463f36a1
--- /dev/null
+++ b/include/linux/bpf_local_storage.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 Facebook
+ * Copyright 2020 Google LLC.
+ */
+
+#ifndef _BPF_LOCAL_STORAGE_H
+#define _BPF_LOCAL_STORAGE_H
+
+#include <linux/bpf.h>
+#include <linux/rculist.h>
+#include <linux/list.h>
+#include <linux/hash.h>
+#include <linux/types.h>
+#include <uapi/linux/btf.h>
+
+#define BPF_LOCAL_STORAGE_CACHE_SIZE 16
+
+struct bpf_local_storage_map_bucket {
+ struct hlist_head list;
+ raw_spinlock_t lock;
+};
+
+/* Thp map is not the primary owner of a bpf_local_storage_elem.
+ * Instead, the container object (eg. sk->sk_bpf_storage) is.
+ *
+ * The map (bpf_local_storage_map) is for two purposes
+ * 1. Define the size of the "local storage". It is
+ * the map's value_size.
+ *
+ * 2. Maintain a list to keep track of all elems such
+ * that they can be cleaned up during the map destruction.
+ *
+ * When a bpf local storage is being looked up for a
+ * particular object, the "bpf_map" pointer is actually used
+ * as the "key" to search in the list of elem in
+ * the respective bpf_local_storage owned by the object.
+ *
+ * e.g. sk->sk_bpf_storage is the mini-map with the "bpf_map" pointer
+ * as the searching key.
+ */
+struct bpf_local_storage_map {
+ struct bpf_map map;
+ /* Lookup elem does not require accessing the map.
+ *
+ * Updating/Deleting requires a bucket lock to
+ * link/unlink the elem from the map. Having
+ * multiple buckets to improve contention.
+ */
+ struct bpf_local_storage_map_bucket *buckets;
+ u32 bucket_log;
+ u16 elem_size;
+ u16 cache_idx;
+};
+
+struct bpf_local_storage_data {
+ /* smap is used as the searching key when looking up
+ * from the object's bpf_local_storage.
+ *
+ * Put it in the same cacheline as the data to minimize
+ * the number of cachelines access during the cache hit case.
+ */
+ struct bpf_local_storage_map __rcu *smap;
+ u8 data[] __aligned(8);
+};
+
+/* Linked to bpf_local_storage and bpf_local_storage_map */
+struct bpf_local_storage_elem {
+ struct hlist_node map_node; /* Linked to bpf_local_storage_map */
+ struct hlist_node snode; /* Linked to bpf_local_storage */
+ struct bpf_local_storage __rcu *local_storage;
+ struct rcu_head rcu;
+ /* 8 bytes hole */
+ /* The data is stored in aother cacheline to minimize
+ * the number of cachelines access during a cache hit.
+ */
+ struct bpf_local_storage_data sdata ____cacheline_aligned;
+};
+
+struct bpf_local_storage {
+ struct bpf_local_storage_data __rcu *cache[BPF_LOCAL_STORAGE_CACHE_SIZE];
+ struct hlist_head list; /* List of bpf_local_storage_elem */
+ void *owner; /* The object that owns the above "list" of
+ * bpf_local_storage_elem.
+ */
+ struct rcu_head rcu;
+ raw_spinlock_t lock; /* Protect adding/removing from the "list" */
+};
+
+/* U16_MAX is much more than enough for sk local storage
+ * considering a tcp_sock is ~2k.
+ */
+#define BPF_LOCAL_STORAGE_MAX_VALUE_SIZE \
+ min_t(u32, \
+ (KMALLOC_MAX_SIZE - MAX_BPF_STACK - \
+ sizeof(struct bpf_local_storage_elem)), \
+ (U16_MAX - sizeof(struct bpf_local_storage_elem)))
+
+#define SELEM(_SDATA) \
+ container_of((_SDATA), struct bpf_local_storage_elem, sdata)
+#define SDATA(_SELEM) (&(_SELEM)->sdata)
+
+#define BPF_LOCAL_STORAGE_CACHE_SIZE 16
+
+struct bpf_local_storage_cache {
+ spinlock_t idx_lock;
+ u64 idx_usage_counts[BPF_LOCAL_STORAGE_CACHE_SIZE];
+};
+
+#define DEFINE_BPF_STORAGE_CACHE(name) \
+static struct bpf_local_storage_cache name = { \
+ .idx_lock = __SPIN_LOCK_UNLOCKED(name.idx_lock), \
+}
+
+u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache);
+void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache,
+ u16 idx);
+
+/* Helper functions for bpf_local_storage */
+int bpf_local_storage_map_alloc_check(union bpf_attr *attr);
+
+struct bpf_local_storage_map *bpf_local_storage_map_alloc(union bpf_attr *attr);
+
+struct bpf_local_storage_data *
+bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
+ struct bpf_local_storage_map *smap,
+ bool cacheit_lockit);
+
+void bpf_local_storage_map_free(struct bpf_local_storage_map *smap);
+
+int bpf_local_storage_map_check_btf(const struct bpf_map *map,
+ const struct btf *btf,
+ const struct btf_type *key_type,
+ const struct btf_type *value_type);
+
+void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
+ struct bpf_local_storage_elem *selem);
+
+bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
+ struct bpf_local_storage_elem *selem,
+ bool uncharge_omem);
+
+void bpf_selem_unlink(struct bpf_local_storage_elem *selem);
+
+void bpf_selem_link_map(struct bpf_local_storage_map *smap,
+ struct bpf_local_storage_elem *selem);
+
+void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem);
+
+struct bpf_local_storage_elem *
+bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, void *value,
+ bool charge_mem);
+
+int
+bpf_local_storage_alloc(void *owner,
+ struct bpf_local_storage_map *smap,
+ struct bpf_local_storage_elem *first_selem);
+
+struct bpf_local_storage_data *
+bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
+ void *value, u64 map_flags);
+
+#endif /* _BPF_LOCAL_STORAGE_H */
diff --git a/include/linux/bpf_lsm.h b/include/linux/bpf_lsm.h
index af74712af585..aaacb6aafc87 100644
--- a/include/linux/bpf_lsm.h
+++ b/include/linux/bpf_lsm.h
@@ -17,9 +17,28 @@
#include <linux/lsm_hook_defs.h>
#undef LSM_HOOK
+struct bpf_storage_blob {
+ struct bpf_local_storage __rcu *storage;
+};
+
+extern struct lsm_blob_sizes bpf_lsm_blob_sizes;
+
int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog,
const struct bpf_prog *prog);
+static inline struct bpf_storage_blob *bpf_inode(
+ const struct inode *inode)
+{
+ if (unlikely(!inode->i_security))
+ return NULL;
+
+ return inode->i_security + bpf_lsm_blob_sizes.lbs_inode;
+}
+
+extern const struct bpf_func_proto bpf_inode_storage_get_proto;
+extern const struct bpf_func_proto bpf_inode_storage_delete_proto;
+void bpf_inode_storage_free(struct inode *inode);
+
#else /* !CONFIG_BPF_LSM */
static inline int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog,
@@ -28,6 +47,16 @@ static inline int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog,
return -EOPNOTSUPP;
}
+static inline struct bpf_storage_blob *bpf_inode(
+ const struct inode *inode)
+{
+ return NULL;
+}
+
+static inline void bpf_inode_storage_free(struct inode *inode)
+{
+}
+
#endif /* CONFIG_BPF_LSM */
#endif /* _LINUX_BPF_LSM_H */
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index a52a5688418e..2e6f568377f1 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -107,6 +107,9 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_SK_STORAGE, sk_storage_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKHASH, sock_hash_ops)
#endif
+#ifdef CONFIG_BPF_LSM
+BPF_MAP_TYPE(BPF_MAP_TYPE_INODE_STORAGE, inode_storage_map_ops)
+#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops)
#if defined(CONFIG_XDP_SOCKETS)
BPF_MAP_TYPE(BPF_MAP_TYPE_XSKMAP, xsk_map_ops)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 53c7bd568c5d..e83ef6f6bf43 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -308,6 +308,13 @@ struct bpf_insn_aux_data {
u32 map_index; /* index into used_maps[] */
u32 map_off; /* offset from value base address */
};
+ struct {
+ enum bpf_reg_type reg_type; /* type of pseudo_btf_id */
+ union {
+ u32 btf_id; /* btf_id for struct typed var */
+ u32 mem_size; /* mem_size for non-struct typed var */
+ };
+ } btf_var;
};
u64 map_key_state; /* constant (32 bit) key tracking for maps */
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
@@ -347,8 +354,9 @@ static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log)
static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
{
- return (log->level && log->ubuf && !bpf_verifier_log_full(log)) ||
- log->level == BPF_LOG_KERNEL;
+ return log &&
+ ((log->level && log->ubuf && !bpf_verifier_log_full(log)) ||
+ log->level == BPF_LOG_KERNEL);
}
#define BPF_MAX_SUBPROGS 256
@@ -358,6 +366,9 @@ struct bpf_subprog_info {
u32 start; /* insn idx of function entry point */
u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
u16 stack_depth; /* max. stack depth used by this function */
+ bool has_tail_call;
+ bool tail_call_reachable;
+ bool has_ld_abs;
};
/* single container for all structs
@@ -446,4 +457,17 @@ bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
int check_ctx_reg(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg, int regno);
+/* this lives here instead of in bpf.h because it needs to dereference tgt_prog */
+static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog,
+ u32 btf_id)
+{
+ return tgt_prog ? (((u64)tgt_prog->aux->id) << 32 | btf_id) : btf_id;
+}
+
+int bpf_check_attach_target(struct bpf_verifier_log *log,
+ const struct bpf_prog *prog,
+ const struct bpf_prog *tgt_prog,
+ u32 btf_id,
+ struct bpf_attach_target_info *tgt_info);
+
#endif /* _LINUX_BPF_VERIFIER_H */
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 6ad4c000661a..d0bd226d6bd9 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -30,6 +30,7 @@
#define PHY_ID_BCM57780 0x03625d90
#define PHY_ID_BCM89610 0x03625cd0
+#define PHY_ID_BCM72113 0x35905310
#define PHY_ID_BCM7250 0xae025280
#define PHY_ID_BCM7255 0xae025120
#define PHY_ID_BCM7260 0xae025190
diff --git a/include/linux/btf.h b/include/linux/btf.h
index 8b81fbb4497c..2bf641829664 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include <uapi/linux/btf.h>
+#include <uapi/linux/bpf.h>
#define BTF_TYPE_EMIT(type) ((void)(type *)0)
@@ -13,6 +14,7 @@ struct btf;
struct btf_member;
struct btf_type;
union bpf_attr;
+struct btf_show;
extern const struct file_operations btf_fops;
@@ -46,8 +48,45 @@ int btf_get_info_by_fd(const struct btf *btf,
const struct btf_type *btf_type_id_size(const struct btf *btf,
u32 *type_id,
u32 *ret_size);
+
+/*
+ * Options to control show behaviour.
+ * - BTF_SHOW_COMPACT: no formatting around type information
+ * - BTF_SHOW_NONAME: no struct/union member names/types
+ * - BTF_SHOW_PTR_RAW: show raw (unobfuscated) pointer values;
+ * equivalent to %px.
+ * - BTF_SHOW_ZERO: show zero-valued struct/union members; they
+ * are not displayed by default
+ * - BTF_SHOW_UNSAFE: skip use of bpf_probe_read() to safely read
+ * data before displaying it.
+ */
+#define BTF_SHOW_COMPACT BTF_F_COMPACT
+#define BTF_SHOW_NONAME BTF_F_NONAME
+#define BTF_SHOW_PTR_RAW BTF_F_PTR_RAW
+#define BTF_SHOW_ZERO BTF_F_ZERO
+#define BTF_SHOW_UNSAFE (1ULL << 4)
+
void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
struct seq_file *m);
+int btf_type_seq_show_flags(const struct btf *btf, u32 type_id, void *obj,
+ struct seq_file *m, u64 flags);
+
+/*
+ * Copy len bytes of string representation of obj of BTF type_id into buf.
+ *
+ * @btf: struct btf object
+ * @type_id: type id of type obj points to
+ * @obj: pointer to typed data
+ * @buf: buffer to write to
+ * @len: maximum length to write to buf
+ * @flags: show options (see above)
+ *
+ * Return: length that would have been/was copied as per snprintf, or
+ * negative error.
+ */
+int btf_type_snprintf_show(const struct btf *btf, u32 type_id, void *obj,
+ char *buf, int len, u64 flags);
+
int btf_get_fd_by_id(u32 id);
u32 btf_id(const struct btf *btf);
bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
@@ -64,14 +103,18 @@ const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf,
u32 id, u32 *res_id);
const struct btf_type *
btf_resolve_size(const struct btf *btf, const struct btf_type *type,
- u32 *type_size, const struct btf_type **elem_type,
- u32 *total_nelems);
+ u32 *type_size);
#define for_each_member(i, struct_type, member) \
for (i = 0, member = btf_type_member(struct_type); \
i < btf_type_vlen(struct_type); \
i++, member++)
+#define for_each_vsi(i, datasec_type, member) \
+ for (i = 0, member = btf_type_var_secinfo(datasec_type); \
+ i < btf_type_vlen(datasec_type); \
+ i++, member++)
+
static inline bool btf_type_is_ptr(const struct btf_type *t)
{
return BTF_INFO_KIND(t->info) == BTF_KIND_PTR;
@@ -107,6 +150,21 @@ static inline bool btf_type_is_func_proto(const struct btf_type *t)
return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC_PROTO;
}
+static inline bool btf_type_is_var(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_VAR;
+}
+
+/* union is only a special case of struct:
+ * all its offsetof(member) == 0
+ */
+static inline bool btf_type_is_struct(const struct btf_type *t)
+{
+ u8 kind = BTF_INFO_KIND(t->info);
+
+ return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION;
+}
+
static inline u16 btf_type_vlen(const struct btf_type *t)
{
return BTF_INFO_VLEN(t->info);
@@ -141,6 +199,12 @@ static inline const struct btf_member *btf_type_member(const struct btf_type *t)
return (const struct btf_member *)(t + 1);
}
+static inline const struct btf_var_secinfo *btf_type_var_secinfo(
+ const struct btf_type *t)
+{
+ return (const struct btf_var_secinfo *)(t + 1);
+}
+
#ifdef CONFIG_BPF_SYSCALL
const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id);
const char *btf_name_by_offset(const struct btf *btf, u32 offset);
diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h
index 4867d549e3c1..57890b357f85 100644
--- a/include/linux/btf_ids.h
+++ b/include/linux/btf_ids.h
@@ -3,6 +3,11 @@
#ifndef _LINUX_BTF_IDS_H
#define _LINUX_BTF_IDS_H
+struct btf_id_set {
+ u32 cnt;
+ u32 ids[];
+};
+
#ifdef CONFIG_DEBUG_INFO_BTF
#include <linux/compiler.h> /* for __PASTE */
@@ -62,7 +67,7 @@ asm( \
".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
"." #scope " " #name "; \n" \
#name ":; \n" \
-".popsection; \n"); \
+".popsection; \n");
#define BTF_ID_LIST(name) \
__BTF_ID_LIST(name, local) \
@@ -71,6 +76,13 @@ extern u32 name[];
#define BTF_ID_LIST_GLOBAL(name) \
__BTF_ID_LIST(name, globl)
+/* The BTF_ID_LIST_SINGLE macro defines a BTF_ID_LIST with
+ * a single entry.
+ */
+#define BTF_ID_LIST_SINGLE(name, prefix, typename) \
+ BTF_ID_LIST(name) \
+ BTF_ID(prefix, typename)
+
/*
* The BTF_ID_UNUSED macro defines 4 zero bytes.
* It's used when we want to define 'unused' entry
@@ -88,12 +100,57 @@ asm( \
".zero 4 \n" \
".popsection; \n");
+/*
+ * The BTF_SET_START/END macros pair defines sorted list of
+ * BTF IDs plus its members count, with following layout:
+ *
+ * BTF_SET_START(list)
+ * BTF_ID(type1, name1)
+ * BTF_ID(type2, name2)
+ * BTF_SET_END(list)
+ *
+ * __BTF_ID__set__list:
+ * .zero 4
+ * list:
+ * __BTF_ID__type1__name1__3:
+ * .zero 4
+ * __BTF_ID__type2__name2__4:
+ * .zero 4
+ *
+ */
+#define __BTF_SET_START(name, scope) \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+"." #scope " __BTF_ID__set__" #name "; \n" \
+"__BTF_ID__set__" #name ":; \n" \
+".zero 4 \n" \
+".popsection; \n");
+
+#define BTF_SET_START(name) \
+__BTF_ID_LIST(name, local) \
+__BTF_SET_START(name, local)
+
+#define BTF_SET_START_GLOBAL(name) \
+__BTF_ID_LIST(name, globl) \
+__BTF_SET_START(name, globl)
+
+#define BTF_SET_END(name) \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+".size __BTF_ID__set__" #name ", .-" #name " \n" \
+".popsection; \n"); \
+extern struct btf_id_set name;
+
#else
#define BTF_ID_LIST(name) static u32 name[5];
#define BTF_ID(prefix, name)
#define BTF_ID_UNUSED
#define BTF_ID_LIST_GLOBAL(name) u32 name[1];
+#define BTF_ID_LIST_SINGLE(name, prefix, typename) static u32 name[1];
+#define BTF_SET_START(name) static struct btf_id_set name = { 0 };
+#define BTF_SET_START_GLOBAL(name) static struct btf_id_set name = { 0 };
+#define BTF_SET_END(name)
#endif /* CONFIG_DEBUG_INFO_BTF */
diff --git a/include/linux/can/core.h b/include/linux/can/core.h
index e20a0cd09ba5..5fb8d0e3f9c1 100644
--- a/include/linux/can/core.h
+++ b/include/linux/can/core.h
@@ -2,7 +2,7 @@
/*
* linux/can/core.h
*
- * Protoypes and definitions for CAN protocol modules using the PF_CAN core
+ * Prototypes and definitions for CAN protocol modules using the PF_CAN core
*
* Authors: Oliver Hartkopp <oliver.hartkopp@volkswagen.de>
* Urs Thuermann <urs.thuermann@volkswagen.de>
@@ -18,13 +18,6 @@
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#define CAN_VERSION "20170425"
-
-/* increment this number each time you change some user-space interface */
-#define CAN_ABI_VERSION "9"
-
-#define CAN_VERSION_STRING "rev " CAN_VERSION " abi " CAN_ABI_VERSION
-
#define DNAME(dev) ((dev) ? (dev)->name : "any")
/**
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 5e3d45525bd3..41ff31795320 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -82,15 +82,30 @@ struct can_priv {
#endif
};
+#define CAN_SYNC_SEG 1
+
+/*
+ * can_bit_time() - Duration of one bit
+ *
+ * Please refer to ISO 11898-1:2015, section 11.3.1.1 "Bit time" for
+ * additional information.
+ *
+ * Return: the number of time quanta in one bit.
+ */
+static inline unsigned int can_bit_time(const struct can_bittiming *bt)
+{
+ return CAN_SYNC_SEG + bt->prop_seg + bt->phase_seg1 + bt->phase_seg2;
+}
+
/*
* get_can_dlc(value) - helper macro to cast a given data length code (dlc)
- * to __u8 and ensure the dlc value to be max. 8 bytes.
+ * to u8 and ensure the dlc value to be max. 8 bytes.
*
* To be used in the CAN netdriver receive path to ensure conformance with
* ISO 11898-1 Chapter 8.4.2.3 (DLC field)
*/
-#define get_can_dlc(i) (min_t(__u8, (i), CAN_MAX_DLC))
-#define get_canfd_dlc(i) (min_t(__u8, (i), CANFD_MAX_DLC))
+#define get_can_dlc(i) (min_t(u8, (i), CAN_MAX_DLC))
+#define get_canfd_dlc(i) (min_t(u8, (i), CANFD_MAX_DLC))
/* Check for outgoing skbs that have not been created by the CAN subsystem */
static inline bool can_skb_headroom_valid(struct net_device *dev,
@@ -108,7 +123,7 @@ static inline bool can_skb_headroom_valid(struct net_device *dev,
skb->ip_summed = CHECKSUM_UNNECESSARY;
- /* preform proper loopback on capable devices */
+ /* perform proper loopback on capable devices */
if (dev->flags & IFF_ECHO)
skb->pkt_type = PACKET_LOOPBACK;
else
@@ -201,8 +216,8 @@ void can_bus_off(struct net_device *dev);
void can_change_state(struct net_device *dev, struct can_frame *cf,
enum can_state tx_state, enum can_state rx_state);
-void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
- unsigned int idx);
+int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
+ unsigned int idx);
struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx,
u8 *len_ptr);
unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx);
diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h
index 1b78a0cfb615..f1b38088b765 100644
--- a/include/linux/can/rx-offload.h
+++ b/include/linux/can/rx-offload.h
@@ -35,6 +35,9 @@ int can_rx_offload_add_timestamp(struct net_device *dev,
int can_rx_offload_add_fifo(struct net_device *dev,
struct can_rx_offload *offload,
unsigned int weight);
+int can_rx_offload_add_manual(struct net_device *dev,
+ struct can_rx_offload *offload,
+ unsigned int weight);
int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload,
u64 reg);
int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload);
diff --git a/include/linux/cookie.h b/include/linux/cookie.h
new file mode 100644
index 000000000000..0c159f585109
--- /dev/null
+++ b/include/linux/cookie.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_COOKIE_H
+#define __LINUX_COOKIE_H
+
+#include <linux/atomic.h>
+#include <linux/percpu.h>
+#include <asm/local.h>
+
+struct pcpu_gen_cookie {
+ local_t nesting;
+ u64 last;
+} __aligned(16);
+
+struct gen_cookie {
+ struct pcpu_gen_cookie __percpu *local;
+ atomic64_t forward_last ____cacheline_aligned_in_smp;
+ atomic64_t reverse_last;
+};
+
+#define COOKIE_LOCAL_BATCH 4096
+
+#define DEFINE_COOKIE(name) \
+ static DEFINE_PER_CPU(struct pcpu_gen_cookie, __##name); \
+ static struct gen_cookie name = { \
+ .local = &__##name, \
+ .forward_last = ATOMIC64_INIT(0), \
+ .reverse_last = ATOMIC64_INIT(0), \
+ }
+
+static __always_inline u64 gen_cookie_next(struct gen_cookie *gc)
+{
+ struct pcpu_gen_cookie *local = this_cpu_ptr(gc->local);
+ u64 val;
+
+ if (likely(local_inc_return(&local->nesting) == 1)) {
+ val = local->last;
+ if (__is_defined(CONFIG_SMP) &&
+ unlikely((val & (COOKIE_LOCAL_BATCH - 1)) == 0)) {
+ s64 next = atomic64_add_return(COOKIE_LOCAL_BATCH,
+ &gc->forward_last);
+ val = next - COOKIE_LOCAL_BATCH;
+ }
+ local->last = ++val;
+ } else {
+ val = atomic64_dec_return(&gc->reverse_last);
+ }
+ local_dec(&local->nesting);
+ return val;
+}
+
+#endif /* __LINUX_COOKIE_H */
diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h
index 311aa04e7520..88cd72dfa4e0 100644
--- a/include/linux/dsa/8021q.h
+++ b/include/linux/dsa/8021q.h
@@ -5,37 +5,49 @@
#ifndef _NET_DSA_8021Q_H
#define _NET_DSA_8021Q_H
+#include <linux/refcount.h>
#include <linux/types.h>
struct dsa_switch;
struct sk_buff;
struct net_device;
struct packet_type;
+struct dsa_8021q_context;
struct dsa_8021q_crosschip_link {
struct list_head list;
int port;
- struct dsa_switch *other_ds;
+ struct dsa_8021q_context *other_ctx;
int other_port;
refcount_t refcount;
};
+struct dsa_8021q_ops {
+ int (*vlan_add)(struct dsa_switch *ds, int port, u16 vid, u16 flags);
+ int (*vlan_del)(struct dsa_switch *ds, int port, u16 vid);
+};
+
+struct dsa_8021q_context {
+ const struct dsa_8021q_ops *ops;
+ struct dsa_switch *ds;
+ struct list_head crosschip_links;
+ /* EtherType of RX VID, used for filtering on master interface */
+ __be16 proto;
+};
+
#define DSA_8021Q_N_SUBVLAN 8
#if IS_ENABLED(CONFIG_NET_DSA_TAG_8021Q)
-int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int index,
- bool enabled);
+int dsa_8021q_setup(struct dsa_8021q_context *ctx, bool enabled);
-int dsa_8021q_crosschip_bridge_join(struct dsa_switch *ds, int port,
- struct dsa_switch *other_ds,
- int other_port,
- struct list_head *crosschip_links);
+int dsa_8021q_crosschip_bridge_join(struct dsa_8021q_context *ctx, int port,
+ struct dsa_8021q_context *other_ctx,
+ int other_port);
-int dsa_8021q_crosschip_bridge_leave(struct dsa_switch *ds, int port,
- struct dsa_switch *other_ds,
- int other_port,
- struct list_head *crosschip_links);
+int dsa_8021q_crosschip_bridge_leave(struct dsa_8021q_context *ctx, int port,
+ struct dsa_8021q_context *other_ctx,
+ int other_port);
struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
u16 tpid, u16 tci);
@@ -56,24 +68,21 @@ bool vid_is_dsa_8021q(u16 vid);
#else
-int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int index,
- bool enabled)
+int dsa_8021q_setup(struct dsa_8021q_context *ctx, bool enabled)
{
return 0;
}
-int dsa_8021q_crosschip_bridge_join(struct dsa_switch *ds, int port,
- struct dsa_switch *other_ds,
- int other_port,
- struct list_head *crosschip_links)
+int dsa_8021q_crosschip_bridge_join(struct dsa_8021q_context *ctx, int port,
+ struct dsa_8021q_context *other_ctx,
+ int other_port)
{
return 0;
}
-int dsa_8021q_crosschip_bridge_leave(struct dsa_switch *ds, int port,
- struct dsa_switch *other_ds,
- int other_port,
- struct list_head *crosschip_links)
+int dsa_8021q_crosschip_bridge_leave(struct dsa_8021q_context *ctx, int port,
+ struct dsa_8021q_context *other_ctx,
+ int other_port)
{
return 0;
}
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 969a80211df6..6408b446051f 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -241,6 +241,27 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
ETHTOOL_COALESCE_PKT_RATE_LOW | ETHTOOL_COALESCE_PKT_RATE_HIGH | \
ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL)
+#define ETHTOOL_STAT_NOT_SET (~0ULL)
+
+/**
+ * struct ethtool_pause_stats - statistics for IEEE 802.3x pause frames
+ * @tx_pause_frames: transmitted pause frame count. Reported to user space
+ * as %ETHTOOL_A_PAUSE_STAT_TX_FRAMES.
+ *
+ * Equivalent to `30.3.4.2 aPAUSEMACCtrlFramesTransmitted`
+ * from the standard.
+ *
+ * @rx_pause_frames: received pause frame count. Reported to user space
+ * as %ETHTOOL_A_PAUSE_STAT_RX_FRAMES. Equivalent to:
+ *
+ * Equivalent to `30.3.4.3 aPAUSEMACCtrlFramesReceived`
+ * from the standard.
+ */
+struct ethtool_pause_stats {
+ u64 tx_pause_frames;
+ u64 rx_pause_frames;
+};
+
/**
* struct ethtool_ops - optional netdev operations
* @supported_coalesce_params: supported types of interrupt coalescing.
@@ -282,6 +303,9 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32,
* Returns a negative error code or zero.
* @get_ringparam: Report ring sizes
* @set_ringparam: Set ring sizes. Returns a negative error code or zero.
+ * @get_pause_stats: Report pause frame statistics. Drivers must not zero
+ * statistics which they don't report. The stats structure is initialized
+ * to ETHTOOL_STAT_NOT_SET indicating driver does not report statistics.
* @get_pauseparam: Report pause parameters
* @set_pauseparam: Set pause parameters. Returns a negative error code
* or zero.
@@ -418,6 +442,8 @@ struct ethtool_ops {
struct ethtool_ringparam *);
int (*set_ringparam)(struct net_device *,
struct ethtool_ringparam *);
+ void (*get_pause_stats)(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats);
void (*get_pauseparam)(struct net_device *,
struct ethtool_pauseparam*);
int (*set_pauseparam)(struct net_device *,
@@ -479,6 +505,10 @@ struct ethtool_ops {
struct ethtool_fecparam *);
void (*get_ethtool_phy_stats)(struct net_device *,
struct ethtool_stats *, u64 *);
+ int (*get_phy_tunable)(struct net_device *,
+ const struct ethtool_tunable *, void *);
+ int (*set_phy_tunable)(struct net_device *,
+ const struct ethtool_tunable *, const void *);
};
int ethtool_check_ops(const struct ethtool_ops *ops);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index ebfb7cfb65f1..20fc24c9779a 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -1236,13 +1236,17 @@ struct bpf_sock_addr_kern {
struct bpf_sock_ops_kern {
struct sock *sk;
- u32 op;
union {
u32 args[4];
u32 reply;
u32 replylong[4];
};
- u32 is_fullsock;
+ struct sk_buff *syn_skb;
+ struct sk_buff *skb;
+ void *skb_data_end;
+ u8 op;
+ u8 is_fullsock;
+ u8 remaining_opt_len;
u64 temp; /* temp and everything after is not
* initialized to 0 before calling
* the BPF program. New fields that
@@ -1283,6 +1287,8 @@ int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len);
struct bpf_sk_lookup_kern {
u16 family;
u16 protocol;
+ __be16 sport;
+ u16 dport;
struct {
__be32 saddr;
__be32 daddr;
@@ -1291,8 +1297,6 @@ struct bpf_sk_lookup_kern {
const struct in6_addr *saddr;
const struct in6_addr *daddr;
} v6;
- __be16 sport;
- u16 dport;
struct sock *selected_sk;
bool no_reuseport;
};
diff --git a/include/linux/fsl/ptp_qoriq.h b/include/linux/fsl/ptp_qoriq.h
index 884b8f8ca06d..01acebe37fab 100644
--- a/include/linux/fsl/ptp_qoriq.h
+++ b/include/linux/fsl/ptp_qoriq.h
@@ -136,6 +136,7 @@ struct ptp_qoriq_registers {
#define DEFAULT_TMR_PRSC 2
#define DEFAULT_FIPER1_PERIOD 1000000000
#define DEFAULT_FIPER2_PERIOD 1000000000
+#define DEFAULT_FIPER3_PERIOD 1000000000
struct ptp_qoriq {
void __iomem *base;
@@ -147,6 +148,7 @@ struct ptp_qoriq {
struct dentry *debugfs_root;
struct device *dev;
bool extts_fifo_support;
+ bool fiper3_support;
int irq;
int phc_index;
u32 tclk_period; /* nanoseconds */
@@ -155,6 +157,7 @@ struct ptp_qoriq {
u32 cksel;
u32 tmr_fiper1;
u32 tmr_fiper2;
+ u32 tmr_fiper3;
u32 (*read)(unsigned __iomem *addr);
void (*write)(unsigned __iomem *addr, u32 val);
};
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index c47f43e65a2f..770408b2fdaf 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -151,6 +151,9 @@
#define IEEE80211_ANO_NETTYPE_WILD 15
+/* bits unique to S1G beacon */
+#define IEEE80211_S1G_BCN_NEXT_TBTT 0x100
+
/* control extension - for IEEE80211_FTYPE_CTL | IEEE80211_STYPE_CTL_EXT */
#define IEEE80211_CTL_EXT_POLL 0x2000
#define IEEE80211_CTL_EXT_SPR 0x3000
@@ -554,6 +557,28 @@ static inline bool ieee80211_is_s1g_beacon(__le16 fc)
}
/**
+ * ieee80211_next_tbtt_present - check if IEEE80211_FTYPE_EXT &&
+ * IEEE80211_STYPE_S1G_BEACON && IEEE80211_S1G_BCN_NEXT_TBTT
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline bool ieee80211_next_tbtt_present(__le16 fc)
+{
+ return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) ==
+ cpu_to_le16(IEEE80211_FTYPE_EXT | IEEE80211_STYPE_S1G_BEACON) &&
+ fc & cpu_to_le16(IEEE80211_S1G_BCN_NEXT_TBTT);
+}
+
+/**
+ * ieee80211_is_s1g_short_beacon - check if next tbtt present bit is set. Only
+ * true for S1G beacons when they're short.
+ * @fc: frame control bytes in little-endian byteorder
+ */
+static inline bool ieee80211_is_s1g_short_beacon(__le16 fc)
+{
+ return ieee80211_is_s1g_beacon(fc) && ieee80211_next_tbtt_present(fc);
+}
+
+/**
* ieee80211_is_atim - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ATIM
* @fc: frame control bytes in little-endian byteorder
*/
@@ -962,6 +987,25 @@ enum ieee80211_vht_opmode_bits {
IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF = 0x80,
};
+/**
+ * enum ieee80211_s1g_chanwidth
+ * These are defined in IEEE802.11-2016ah Table 10-20
+ * as BSS Channel Width
+ *
+ * @IEEE80211_S1G_CHANWIDTH_1MHZ: 1MHz operating channel
+ * @IEEE80211_S1G_CHANWIDTH_2MHZ: 2MHz operating channel
+ * @IEEE80211_S1G_CHANWIDTH_4MHZ: 4MHz operating channel
+ * @IEEE80211_S1G_CHANWIDTH_8MHZ: 8MHz operating channel
+ * @IEEE80211_S1G_CHANWIDTH_16MHZ: 16MHz operating channel
+ */
+enum ieee80211_s1g_chanwidth {
+ IEEE80211_S1G_CHANWIDTH_1MHZ = 0,
+ IEEE80211_S1G_CHANWIDTH_2MHZ = 1,
+ IEEE80211_S1G_CHANWIDTH_4MHZ = 3,
+ IEEE80211_S1G_CHANWIDTH_8MHZ = 7,
+ IEEE80211_S1G_CHANWIDTH_16MHZ = 15,
+};
+
#define WLAN_SA_QUERY_TR_ID_LEN 2
#define WLAN_MEMBERSHIP_LEN 8
#define WLAN_USER_POSITION_LEN 16
@@ -1034,6 +1078,13 @@ struct ieee80211_ext {
u8 change_seq;
u8 variable[0];
} __packed s1g_beacon;
+ struct {
+ u8 sa[ETH_ALEN];
+ __le32 timestamp;
+ u8 change_seq;
+ u8 next_tbtt[3];
+ u8 variable[0];
+ } __packed s1g_short_beacon;
} u;
} __packed __aligned(2);
@@ -1070,6 +1121,11 @@ struct ieee80211_mgmt {
} __packed assoc_resp, reassoc_resp;
struct {
__le16 capab_info;
+ __le16 status_code;
+ u8 variable[0];
+ } __packed s1g_assoc_resp, s1g_reassoc_resp;
+ struct {
+ __le16 capab_info;
__le16 listen_interval;
u8 current_ap[ETH_ALEN];
/* followed by SSID and Supported rates */
@@ -2294,8 +2350,11 @@ ieee80211_he_6ghz_oper(const struct ieee80211_he_operation *he_oper)
}
/* HE Spatial Reuse defines */
-#define IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT 0x4
-#define IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT 0x8
+#define IEEE80211_HE_SPR_PSR_DISALLOWED BIT(0)
+#define IEEE80211_HE_SPR_NON_SRG_OBSS_PD_SR_DISALLOWED BIT(1)
+#define IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT BIT(2)
+#define IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT BIT(3)
+#define IEEE80211_HE_SPR_HESIGA_SR_VAL15_ALLOWED BIT(4)
/*
* ieee80211_he_spr_size - calculate 802.11ax HE Spatial Reuse IE size
@@ -2330,84 +2389,93 @@ ieee80211_he_spr_size(const u8 *he_spr_ie)
}
/* S1G Capabilities Information field */
-#define S1G_CAPAB_B0_S1G_LONG BIT(0)
-#define S1G_CAPAB_B0_SGI_1MHZ BIT(1)
-#define S1G_CAPAB_B0_SGI_2MHZ BIT(2)
-#define S1G_CAPAB_B0_SGI_4MHZ BIT(3)
-#define S1G_CAPAB_B0_SGI_8MHZ BIT(4)
-#define S1G_CAPAB_B0_SGI_16MHZ BIT(5)
-#define S1G_CAPAB_B0_SUPP_CH_WIDTH_MASK (BIT(6) | BIT(7))
-#define S1G_CAPAB_B0_SUPP_CH_WIDTH_SHIFT 6
-
-#define S1G_CAPAB_B1_RX_LDPC BIT(0)
-#define S1G_CAPAB_B1_TX_STBC BIT(1)
-#define S1G_CAPAB_B1_RX_STBC BIT(2)
-#define S1G_CAPAB_B1_SU_BFER BIT(3)
-#define S1G_CAPAB_B1_SU_BFEE BIT(4)
-#define S1G_CAPAB_B1_BFEE_STS_MASK (BIT(5) | BIT(6) | BIT(7))
-#define S1G_CAPAB_B1_BFEE_STS_SHIFT 5
-
-#define S1G_CAPAB_B2_SOUNDING_DIMENSIONS_MASK (BIT(0) | BIT(1) | BIT(2))
-#define S1G_CAPAB_B2_SOUNDING_DIMENSIONS_SHIFT 0
-#define S1G_CAPAB_B2_MU_BFER BIT(3)
-#define S1G_CAPAB_B2_MU_BFEE BIT(4)
-#define S1G_CAPAB_B2_PLUS_HTC_VHT BIT(5)
-#define S1G_CAPAB_B2_TRAVELING_PILOT_MASK (BIT(6) | BIT(7))
-#define S1G_CAPAB_B2_TRAVELING_PILOT_SHIFT 6
-
-#define S1G_CAPAB_B3_RD_RESPONDER BIT(0)
-#define S1G_CAPAB_B3_HT_DELAYED_BA BIT(1)
-#define S1G_CAPAB_B3_MAX_MPDU_LEN BIT(2)
-#define S1G_CAPAB_B3_MAX_AMPDU_LEN_EXP_MASK (BIT(3) | BIT(4))
-#define S1G_CAPAB_B3_MAX_AMPDU_LEN_EXP_SHIFT 3
-#define S1G_CAPAB_B3_MIN_MPDU_START_MASK (BIT(5) | BIT(6) | BIT(7))
-#define S1G_CAPAB_B3_MIN_MPDU_START_SHIFT 5
-
-#define S1G_CAPAB_B4_UPLINK_SYNC BIT(0)
-#define S1G_CAPAB_B4_DYNAMIC_AID BIT(1)
-#define S1G_CAPAB_B4_BAT BIT(2)
-#define S1G_CAPAB_B4_TIME_ADE BIT(3)
-#define S1G_CAPAB_B4_NON_TIM BIT(4)
-#define S1G_CAPAB_B4_GROUP_AID BIT(5)
-#define S1G_CAPAB_B4_STA_TYPE_MASK (BIT(6) | BIT(7))
-#define S1G_CAPAB_B4_STA_TYPE_SHIFT 6
-
-#define S1G_CAPAB_B5_CENT_AUTH_CONTROL BIT(0)
-#define S1G_CAPAB_B5_DIST_AUTH_CONTROL BIT(1)
-#define S1G_CAPAB_B5_AMSDU BIT(2)
-#define S1G_CAPAB_B5_AMPDU BIT(3)
-#define S1G_CAPAB_B5_ASYMMETRIC_BA BIT(4)
-#define S1G_CAPAB_B5_FLOW_CONTROL BIT(5)
-#define S1G_CAPAB_B5_SECTORIZED_BEAM_MASK (BIT(6) | BIT(7))
-#define S1G_CAPAB_B5_SECTORIZED_BEAM_SHIFT 6
-
-#define S1G_CAPAB_B6_OBSS_MITIGATION BIT(0)
-#define S1G_CAPAB_B6_FRAGMENT_BA BIT(1)
-#define S1G_CAPAB_B6_NDP_PS_POLL BIT(2)
-#define S1G_CAPAB_B6_RAW_OPERATION BIT(3)
-#define S1G_CAPAB_B6_PAGE_SLICING BIT(4)
-#define S1G_CAPAB_B6_TXOP_SHARING_IMP_ACK BIT(5)
-#define S1G_CAPAB_B6_VHT_LINK_ADAPT_MASK (BIT(6) | BIT(7))
-#define S1G_CAPAB_B6_VHT_LINK_ADAPT_SHIFT 6
-
-#define S1G_CAPAB_B7_TACK_AS_PS_POLL BIT(0)
-#define S1G_CAPAB_B7_DUP_1MHZ BIT(1)
-#define S1G_CAPAB_B7_MCS_NEGOTIATION BIT(2)
-#define S1G_CAPAB_B7_1MHZ_CTL_RESPONSE_PREAMBLE BIT(3)
-#define S1G_CAPAB_B7_NDP_BFING_REPORT_POLL BIT(4)
-#define S1G_CAPAB_B7_UNSOLICITED_DYN_AID BIT(5)
-#define S1G_CAPAB_B7_SECTOR_TRAINING_OPERATION BIT(6)
-#define S1G_CAPAB_B7_TEMP_PS_MODE_SWITCH BIT(7)
-
-#define S1G_CAPAB_B8_TWT_GROUPING BIT(0)
-#define S1G_CAPAB_B8_BDT BIT(1)
-#define S1G_CAPAB_B8_COLOR_MASK (BIT(2) | BIT(3) | BIT(4))
-#define S1G_CAPAB_B8_COLOR_SHIFT 2
-#define S1G_CAPAB_B8_TWT_REQUEST BIT(5)
-#define S1G_CAPAB_B8_TWT_RESPOND BIT(6)
-#define S1G_CAPAB_B8_PV1_FRAME BIT(7)
-
-#define S1G_CAPAB_B9_LINK_ADAPT_PER_CONTROL_RESPONSE BIT(0)
+#define IEEE80211_S1G_CAPABILITY_LEN 15
+
+#define S1G_CAP0_S1G_LONG BIT(0)
+#define S1G_CAP0_SGI_1MHZ BIT(1)
+#define S1G_CAP0_SGI_2MHZ BIT(2)
+#define S1G_CAP0_SGI_4MHZ BIT(3)
+#define S1G_CAP0_SGI_8MHZ BIT(4)
+#define S1G_CAP0_SGI_16MHZ BIT(5)
+#define S1G_CAP0_SUPP_CH_WIDTH GENMASK(7, 6)
+
+#define S1G_SUPP_CH_WIDTH_2 0
+#define S1G_SUPP_CH_WIDTH_4 1
+#define S1G_SUPP_CH_WIDTH_8 2
+#define S1G_SUPP_CH_WIDTH_16 3
+#define S1G_SUPP_CH_WIDTH_MAX(cap) ((1 << FIELD_GET(S1G_CAP0_SUPP_CH_WIDTH, \
+ cap[0])) << 1)
+
+#define S1G_CAP1_RX_LDPC BIT(0)
+#define S1G_CAP1_TX_STBC BIT(1)
+#define S1G_CAP1_RX_STBC BIT(2)
+#define S1G_CAP1_SU_BFER BIT(3)
+#define S1G_CAP1_SU_BFEE BIT(4)
+#define S1G_CAP1_BFEE_STS GENMASK(7, 5)
+
+#define S1G_CAP2_SOUNDING_DIMENSIONS GENMASK(2, 0)
+#define S1G_CAP2_MU_BFER BIT(3)
+#define S1G_CAP2_MU_BFEE BIT(4)
+#define S1G_CAP2_PLUS_HTC_VHT BIT(5)
+#define S1G_CAP2_TRAVELING_PILOT GENMASK(7, 6)
+
+#define S1G_CAP3_RD_RESPONDER BIT(0)
+#define S1G_CAP3_HT_DELAYED_BA BIT(1)
+#define S1G_CAP3_MAX_MPDU_LEN BIT(2)
+#define S1G_CAP3_MAX_AMPDU_LEN_EXP GENMASK(4, 3)
+#define S1G_CAP3_MIN_MPDU_START GENMASK(7, 5)
+
+#define S1G_CAP4_UPLINK_SYNC BIT(0)
+#define S1G_CAP4_DYNAMIC_AID BIT(1)
+#define S1G_CAP4_BAT BIT(2)
+#define S1G_CAP4_TIME_ADE BIT(3)
+#define S1G_CAP4_NON_TIM BIT(4)
+#define S1G_CAP4_GROUP_AID BIT(5)
+#define S1G_CAP4_STA_TYPE GENMASK(7, 6)
+
+#define S1G_CAP5_CENT_AUTH_CONTROL BIT(0)
+#define S1G_CAP5_DIST_AUTH_CONTROL BIT(1)
+#define S1G_CAP5_AMSDU BIT(2)
+#define S1G_CAP5_AMPDU BIT(3)
+#define S1G_CAP5_ASYMMETRIC_BA BIT(4)
+#define S1G_CAP5_FLOW_CONTROL BIT(5)
+#define S1G_CAP5_SECTORIZED_BEAM GENMASK(7, 6)
+
+#define S1G_CAP6_OBSS_MITIGATION BIT(0)
+#define S1G_CAP6_FRAGMENT_BA BIT(1)
+#define S1G_CAP6_NDP_PS_POLL BIT(2)
+#define S1G_CAP6_RAW_OPERATION BIT(3)
+#define S1G_CAP6_PAGE_SLICING BIT(4)
+#define S1G_CAP6_TXOP_SHARING_IMP_ACK BIT(5)
+#define S1G_CAP6_VHT_LINK_ADAPT GENMASK(7, 6)
+
+#define S1G_CAP7_TACK_AS_PS_POLL BIT(0)
+#define S1G_CAP7_DUP_1MHZ BIT(1)
+#define S1G_CAP7_MCS_NEGOTIATION BIT(2)
+#define S1G_CAP7_1MHZ_CTL_RESPONSE_PREAMBLE BIT(3)
+#define S1G_CAP7_NDP_BFING_REPORT_POLL BIT(4)
+#define S1G_CAP7_UNSOLICITED_DYN_AID BIT(5)
+#define S1G_CAP7_SECTOR_TRAINING_OPERATION BIT(6)
+#define S1G_CAP7_TEMP_PS_MODE_SWITCH BIT(7)
+
+#define S1G_CAP8_TWT_GROUPING BIT(0)
+#define S1G_CAP8_BDT BIT(1)
+#define S1G_CAP8_COLOR GENMASK(4, 2)
+#define S1G_CAP8_TWT_REQUEST BIT(5)
+#define S1G_CAP8_TWT_RESPOND BIT(6)
+#define S1G_CAP8_PV1_FRAME BIT(7)
+
+#define S1G_CAP9_LINK_ADAPT_PER_CONTROL_RESPONSE BIT(0)
+
+#define S1G_OPER_CH_WIDTH_PRIMARY_1MHZ BIT(0)
+#define S1G_OPER_CH_WIDTH_OPER GENMASK(4, 1)
+
+
+#define LISTEN_INT_USF GENMASK(15, 14)
+#define LISTEN_INT_UI GENMASK(13, 0)
+
+#define IEEE80211_MAX_USF FIELD_MAX(LISTEN_INT_USF)
+#define IEEE80211_MAX_UI FIELD_MAX(LISTEN_INT_UI)
/* Authentication algorithms */
#define WLAN_AUTH_OPEN 0
@@ -2808,6 +2876,8 @@ enum ieee80211_eid {
WLAN_EID_REDUCED_NEIGHBOR_REPORT = 201,
+ WLAN_EID_AID_REQUEST = 210,
+ WLAN_EID_AID_RESPONSE = 211,
WLAN_EID_S1G_BCN_COMPAT = 213,
WLAN_EID_S1G_SHORT_BCN_INTERVAL = 214,
WLAN_EID_S1G_CAPABILITIES = 217,
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 6479a38e52fa..556caed00258 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -19,7 +19,13 @@ struct br_ip {
#if IS_ENABLED(CONFIG_IPV6)
struct in6_addr ip6;
#endif
- } u;
+ } src;
+ union {
+ __be32 ip4;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr ip6;
+#endif
+ } dst;
__be16 proto;
__u16 vid;
};
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h
index 5bda8cf457b6..2a7660843444 100644
--- a/include/linux/if_tun.h
+++ b/include/linux/if_tun.h
@@ -27,9 +27,18 @@ struct tun_xdp_hdr {
#if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE)
struct socket *tun_get_socket(struct file *);
struct ptr_ring *tun_get_tx_ring(struct file *file);
-bool tun_is_xdp_frame(void *ptr);
-void *tun_xdp_to_ptr(void *ptr);
-void *tun_ptr_to_xdp(void *ptr);
+static inline bool tun_is_xdp_frame(void *ptr)
+{
+ return (unsigned long)ptr & TUN_XDP_FLAG;
+}
+static inline void *tun_xdp_to_ptr(struct xdp_frame *xdp)
+{
+ return (void *)((unsigned long)xdp | TUN_XDP_FLAG);
+}
+static inline struct xdp_frame *tun_ptr_to_xdp(void *ptr)
+{
+ return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG);
+}
void tun_ptr_free(void *ptr);
#else
#include <linux/err.h>
@@ -48,11 +57,11 @@ static inline bool tun_is_xdp_frame(void *ptr)
{
return false;
}
-static inline void *tun_xdp_to_ptr(void *ptr)
+static inline void *tun_xdp_to_ptr(struct xdp_frame *xdp)
{
return NULL;
}
-static inline void *tun_ptr_to_xdp(void *ptr)
+static inline struct xdp_frame *tun_ptr_to_xdp(void *ptr)
{
return NULL;
}
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index 0ef2d800fda7..84abb30a3fbb 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -75,6 +75,8 @@ static inline size_t inet_diag_msg_attrs_size(void)
#ifdef CONFIG_SOCK_CGROUP_DATA
+ nla_total_size_64bit(sizeof(u64)) /* INET_DIAG_CGROUP_ID */
#endif
+ + nla_total_size(sizeof(struct inet_diag_sockopt))
+ /* INET_DIAG_SOCKOPT */
;
}
int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index a44789d027cc..dda61d150a13 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -177,17 +177,6 @@ static inline int inet6_sdif(const struct sk_buff *skb)
return 0;
}
-/* can not be used in TCP layer after tcp_v6_fill_cb */
-static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb)
-{
-#if defined(CONFIG_NET_L3_MASTER_DEV)
- if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
- skb && ipv6_l3mdev_skb(IP6CB(skb)->flags))
- return true;
-#endif
- return false;
-}
-
struct tcp6_request_sock {
struct tcp_request_sock tcp6rsk_tcp;
};
@@ -345,17 +334,6 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk)
return (struct raw6_sock *)sk;
}
-static inline void inet_sk_copy_descendant(struct sock *sk_to,
- const struct sock *sk_from)
-{
- int ancestor_size = sizeof(struct inet_sock);
-
- if (sk_from->sk_family == PF_INET6)
- ancestor_size += sizeof(struct ipv6_pinfo);
-
- __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size);
-}
-
#define __ipv6_only_sock(sk) (sk->sk_ipv6only)
#define ipv6_only_sock(sk) (__ipv6_only_sock(sk))
#define ipv6_sk_rxinfo(sk) ((sk)->sk_family == PF_INET6 && \
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index 898cbf00332a..dbd69b3d170b 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -306,7 +306,7 @@ static inline u32 linkmode_adv_to_mii_10gbt_adv_t(unsigned long *advertising)
/**
* mii_10gbt_stat_mod_linkmode_lpa_t
* @advertising: target the linkmode advertisement settings
- * @adv: value of the C45 10GBASE-T AN STATUS register
+ * @lpa: value of the C45 10GBASE-T AN STATUS register
*
* A small helper function that translates C45 10GBASE-T AN STATUS register bits
* to linkmode advertisement settings. Other bits in advertising aren't changed.
@@ -358,6 +358,12 @@ static inline int mdiobus_c45_read(struct mii_bus *bus, int prtad, int devad,
return mdiobus_read(bus, prtad, mdiobus_c45_addr(devad, regnum));
}
+static inline int mdiobus_c45_write(struct mii_bus *bus, int prtad, int devad,
+ u16 regnum, u16 val)
+{
+ return mdiobus_write(bus, prtad, mdiobus_c45_addr(devad, regnum), val);
+}
+
int mdiobus_register_device(struct mdio_device *mdiodev);
int mdiobus_unregister_device(struct mdio_device *mdiodev);
bool mdiobus_is_registered_device(struct mii_bus *bus, int addr);
@@ -365,6 +371,7 @@ struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr);
/**
* mdio_module_driver() - Helper macro for registering mdio drivers
+ * @_mdio_driver: driver to register
*
* Helper macro for MDIO drivers which do not do anything special in module
* init/exit. Each module may only use this macro once, and calling it
diff --git a/drivers/net/phy/mdio-i2c.h b/include/linux/mdio/mdio-i2c.h
index b1d27f7cd23f..b1d27f7cd23f 100644
--- a/drivers/net/phy/mdio-i2c.h
+++ b/include/linux/mdio/mdio-i2c.h
diff --git a/drivers/net/phy/mdio-xgene.h b/include/linux/mdio/mdio-xgene.h
index 8af93ada8b64..8af93ada8b64 100644
--- a/drivers/net/phy/mdio-xgene.h
+++ b/include/linux/mdio/mdio-xgene.h
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 75f880c25bb8..416ee6dd2574 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -27,6 +27,7 @@
#define PHY_ID_KSZ8061 0x00221570
#define PHY_ID_KSZ9031 0x00221620
#define PHY_ID_KSZ9131 0x00221640
+#define PHY_ID_LAN8814 0x00221660
#define PHY_ID_KSZ886X 0x00221430
#define PHY_ID_KSZ8863 0x00221435
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 4d3376e20f5e..cf824366a7d1 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -366,6 +366,7 @@ enum {
enum {
MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT = 0x1,
MLX5_GENERAL_SUBTYPE_PCI_POWER_CHANGE_EVENT = 0x5,
+ MLX5_GENERAL_SUBTYPE_FW_LIVE_PATCH_EVENT = 0x7,
MLX5_GENERAL_SUBTYPE_PCI_SYNC_FOR_FW_UPDATE_EVENT = 0x8,
};
@@ -816,7 +817,7 @@ struct mlx5_mini_cqe8 {
__be32 rx_hash_result;
struct {
__be16 checksum;
- __be16 rsvd;
+ __be16 stridx;
};
struct {
__be16 wqe_counter;
@@ -836,6 +837,7 @@ enum {
enum {
MLX5_CQE_FORMAT_CSUM = 0x1,
+ MLX5_CQE_FORMAT_CSUM_STRIDX = 0x3,
};
#define MLX5_MINI_CQE_ARRAY_SIZE 8
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 372100c755e7..add85094f9a5 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -501,6 +501,7 @@ struct mlx5_mpfs;
struct mlx5_eswitch;
struct mlx5_lag;
struct mlx5_devcom;
+struct mlx5_fw_reset;
struct mlx5_eq_table;
struct mlx5_irq_table;
@@ -578,6 +579,7 @@ struct mlx5_priv {
struct mlx5_core_sriov sriov;
struct mlx5_lag *lag;
struct mlx5_devcom *devcom;
+ struct mlx5_fw_reset *fw_reset;
struct mlx5_core_roce roce;
struct mlx5_fc_stats fc_stats;
struct mlx5_rl_table rl_table;
@@ -643,7 +645,6 @@ struct mlx5_pps {
};
struct mlx5_clock {
- struct mlx5_core_dev *mdev;
struct mlx5_nb pps_nb;
seqlock_t lock;
struct cyclecounter cycles;
diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h
index c16827eeba9c..b0ae8020f13e 100644
--- a/include/linux/mlx5/eswitch.h
+++ b/include/linux/mlx5/eswitch.h
@@ -74,15 +74,16 @@ bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw);
bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw);
/* Reg C0 usage:
- * Reg C0 = < ESW_VHCA_ID_BITS(8) | ESW_VPORT BITS(8) | ESW_CHAIN_TAG(16) >
+ * Reg C0 = < ESW_PFNUM_BITS(4) | ESW_VPORT BITS(12) | ESW_CHAIN_TAG(16) >
*
- * Highest 8 bits of the reg c0 is the vhca_id, next 8 bits is vport_num,
- * the rest (lowest 16 bits) is left for tc chain tag restoration.
- * VHCA_ID + VPORT comprise the SOURCE_PORT matching.
+ * Highest 4 bits of the reg c0 is the PF_NUM (range 0-15), 12 bits of
+ * unique non-zero vport id (range 1-4095). The rest (lowest 16 bits) is left
+ * for tc chain tag restoration.
+ * PFNUM + VPORT comprise the SOURCE_PORT matching.
*/
-#define ESW_VHCA_ID_BITS 8
-#define ESW_VPORT_BITS 8
-#define ESW_SOURCE_PORT_METADATA_BITS (ESW_VHCA_ID_BITS + ESW_VPORT_BITS)
+#define ESW_VPORT_BITS 12
+#define ESW_PFNUM_BITS 4
+#define ESW_SOURCE_PORT_METADATA_BITS (ESW_PFNUM_BITS + ESW_VPORT_BITS)
#define ESW_SOURCE_PORT_METADATA_OFFSET (32 - ESW_SOURCE_PORT_METADATA_BITS)
#define ESW_CHAIN_TAG_METADATA_BITS (32 - ESW_SOURCE_PORT_METADATA_BITS)
#define ESW_CHAIN_TAG_METADATA_MASK GENMASK(ESW_CHAIN_TAG_METADATA_BITS - 1,\
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 92d991d93757..846d94ad04bc 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -76,6 +76,7 @@ enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_SNIFFER_RX,
MLX5_FLOW_NAMESPACE_SNIFFER_TX,
MLX5_FLOW_NAMESPACE_EGRESS,
+ MLX5_FLOW_NAMESPACE_EGRESS_KERNEL,
MLX5_FLOW_NAMESPACE_RDMA_RX,
MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL,
MLX5_FLOW_NAMESPACE_RDMA_TX,
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 36492a1342cf..d75ef8aa8fac 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -245,6 +245,10 @@ enum {
MLX5_ETH_WQE_SWP_OUTER_L4_UDP = 1 << 5,
};
+enum {
+ MLX5_ETH_WQE_FT_META_IPSEC = BIT(0),
+};
+
struct mlx5_wqe_eth_seg {
u8 swp_outer_l4_offset;
u8 swp_outer_l3_offset;
@@ -253,7 +257,7 @@ struct mlx5_wqe_eth_seg {
u8 cs_flags;
u8 swp_flags;
__be16 mss;
- __be32 rsvd2;
+ __be32 flow_table_metadata;
union {
struct {
__be16 sz;
diff --git a/include/linux/net.h b/include/linux/net.h
index ae713c851342..0dcd51feef02 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -42,6 +42,8 @@ struct net;
#define SOCK_PASSCRED 3
#define SOCK_PASSSEC 4
+#define PROTO_CMSG_DATA_ONLY 0x0001
+
#ifndef ARCH_HAS_SOCKET_TYPES
/**
* enum sock_type - Socket types
@@ -136,6 +138,7 @@ typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
struct proto_ops {
int family;
+ unsigned int flags;
struct module *owner;
int (*release) (struct socket *sock);
int (*bind) (struct socket *sock,
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 18dec08439f9..964b494b0e8d 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -70,6 +70,7 @@ struct udp_tunnel_nic;
struct bpf_prog;
struct xdp_buff;
+void synchronize_net(void);
void netdev_set_default_ethtool_ops(struct net_device *dev,
const struct ethtool_ops *ops);
@@ -211,9 +212,8 @@ struct netdev_hw_addr {
unsigned char type;
#define NETDEV_HW_ADDR_T_LAN 1
#define NETDEV_HW_ADDR_T_SAN 2
-#define NETDEV_HW_ADDR_T_SLAVE 3
-#define NETDEV_HW_ADDR_T_UNICAST 4
-#define NETDEV_HW_ADDR_T_MULTICAST 5
+#define NETDEV_HW_ADDR_T_UNICAST 3
+#define NETDEV_HW_ADDR_T_MULTICAST 4
bool global_use;
int sync_cnt;
int refcount;
@@ -354,7 +354,7 @@ enum {
NAPI_STATE_MISSED, /* reschedule a napi */
NAPI_STATE_DISABLE, /* Disable pending */
NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
- NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */
+ NAPI_STATE_LISTED, /* NAPI added to system lists */
NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */
NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */
};
@@ -364,7 +364,7 @@ enum {
NAPIF_STATE_MISSED = BIT(NAPI_STATE_MISSED),
NAPIF_STATE_DISABLE = BIT(NAPI_STATE_DISABLE),
NAPIF_STATE_NPSVC = BIT(NAPI_STATE_NPSVC),
- NAPIF_STATE_HASHED = BIT(NAPI_STATE_HASHED),
+ NAPIF_STATE_LISTED = BIT(NAPI_STATE_LISTED),
NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL),
NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL),
};
@@ -489,20 +489,6 @@ static inline bool napi_complete(struct napi_struct *n)
}
/**
- * napi_hash_del - remove a NAPI from global table
- * @napi: NAPI context
- *
- * Warning: caller must observe RCU grace period
- * before freeing memory containing @napi, if
- * this function returns true.
- * Note: core networking stack automatically calls it
- * from netif_napi_del().
- * Drivers might want to call this helper to combine all
- * the needed RCU grace periods into a single one.
- */
-bool napi_hash_del(struct napi_struct *napi);
-
-/**
* napi_disable - prevent NAPI from scheduling
* @n: NAPI context
*
@@ -618,7 +604,7 @@ struct netdev_queue {
/* Subordinate device that the queue has been assigned to */
struct net_device *sb_dev;
#ifdef CONFIG_XDP_SOCKETS
- struct xdp_umem *umem;
+ struct xsk_buff_pool *pool;
#endif
/*
* write-mostly part
@@ -640,11 +626,16 @@ struct netdev_queue {
extern int sysctl_fb_tunnels_only_for_init_net;
extern int sysctl_devconf_inherit_init_net;
+/*
+ * sysctl_fb_tunnels_only_for_init_net == 0 : For all netns
+ * == 1 : For initns only
+ * == 2 : For none.
+ */
static inline bool net_has_fallback_tunnels(const struct net *net)
{
- return net == &init_net ||
- !IS_ENABLED(CONFIG_SYSCTL) ||
- !sysctl_fb_tunnels_only_for_init_net;
+ return !IS_ENABLED(CONFIG_SYSCTL) ||
+ !sysctl_fb_tunnels_only_for_init_net ||
+ (net == &init_net && sysctl_fb_tunnels_only_for_init_net == 1);
}
static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
@@ -751,7 +742,7 @@ struct netdev_rx_queue {
struct net_device *dev;
struct xdp_rxq_info xdp_rxq;
#ifdef CONFIG_XDP_SOCKETS
- struct xdp_umem *umem;
+ struct xsk_buff_pool *pool;
#endif
} ____cacheline_aligned_in_smp;
@@ -879,7 +870,7 @@ enum bpf_netdev_command {
/* BPF program for offload callbacks, invoked at program load time. */
BPF_OFFLOAD_MAP_ALLOC,
BPF_OFFLOAD_MAP_FREE,
- XDP_SETUP_XSK_UMEM,
+ XDP_SETUP_XSK_POOL,
};
struct bpf_prog_offload_ops;
@@ -913,9 +904,9 @@ struct netdev_bpf {
struct {
struct bpf_offloaded_map *offmap;
};
- /* XDP_SETUP_XSK_UMEM */
+ /* XDP_SETUP_XSK_POOL */
struct {
- struct xdp_umem *umem;
+ struct xsk_buff_pool *pool;
u16 queue_id;
} xsk;
};
@@ -1285,6 +1276,9 @@ struct netdev_net_notifier {
* int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm *p,
* int cmd);
* Add, change, delete or get information on an IPv4 tunnel.
+ * struct net_device *(*ndo_get_peer_dev)(struct net_device *dev);
+ * If a device is paired with a peer device, return the peer instance.
+ * The caller must be under RCU read context.
*/
struct net_device_ops {
int (*ndo_init)(struct net_device *dev);
@@ -1492,6 +1486,7 @@ struct net_device_ops {
struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev);
int (*ndo_tunnel_ctl)(struct net_device *dev,
struct ip_tunnel_parm *p, int cmd);
+ struct net_device * (*ndo_get_peer_dev)(struct net_device *dev);
};
/**
@@ -2208,6 +2203,22 @@ int netdev_get_num_tc(struct net_device *dev)
return dev->num_tc;
}
+static inline void net_prefetch(void *p)
+{
+ prefetch(p);
+#if L1_CACHE_BYTES < 128
+ prefetch((u8 *)p + L1_CACHE_BYTES);
+#endif
+}
+
+static inline void net_prefetchw(void *p)
+{
+ prefetchw(p);
+#if L1_CACHE_BYTES < 128
+ prefetchw((u8 *)p + L1_CACHE_BYTES);
+#endif
+}
+
void netdev_unbind_sb_channel(struct net_device *dev,
struct net_device *sb_dev);
int netdev_bind_sb_channel_queue(struct net_device *dev,
@@ -2363,12 +2374,26 @@ static inline void netif_tx_napi_add(struct net_device *dev,
}
/**
+ * __netif_napi_del - remove a NAPI context
+ * @napi: NAPI context
+ *
+ * Warning: caller must observe RCU grace period before freeing memory
+ * containing @napi. Drivers might want to call this helper to combine
+ * all the needed RCU grace periods into a single one.
+ */
+void __netif_napi_del(struct napi_struct *napi);
+
+/**
* netif_napi_del - remove a NAPI context
* @napi: NAPI context
*
* netif_napi_del() removes a NAPI context from the network device NAPI list
*/
-void netif_napi_del(struct napi_struct *napi);
+static inline void netif_napi_del(struct napi_struct *napi)
+{
+ __netif_napi_del(napi);
+ synchronize_net();
+}
struct napi_gro_cb {
/* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
@@ -2522,6 +2547,16 @@ struct pcpu_lstats {
void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes);
+static inline void dev_sw_netstats_rx_add(struct net_device *dev, unsigned int len)
+{
+ struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
+
+ u64_stats_update_begin(&tstats->syncp);
+ tstats->rx_bytes += len;
+ tstats->rx_packets++;
+ u64_stats_update_end(&tstats->syncp);
+}
+
static inline void dev_lstats_add(struct net_device *dev, unsigned int len)
{
struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats);
@@ -2792,7 +2827,6 @@ static inline void unregister_netdevice(struct net_device *dev)
int netdev_refcnt_read(const struct net_device *dev);
void free_netdev(struct net_device *dev);
void netdev_freemem(struct net_device *dev);
-void synchronize_net(void);
int init_dummy_netdev(struct net_device *dev);
struct net_device *netdev_get_xmit_slave(struct net_device *dev,
@@ -3777,6 +3811,7 @@ void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog);
int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb);
int netif_rx(struct sk_buff *skb);
int netif_rx_ni(struct sk_buff *skb);
+int netif_rx_any_context(struct sk_buff *skb);
int netif_receive_skb(struct sk_buff *skb);
int netif_receive_skb_core(struct sk_buff *skb);
void netif_receive_skb_list(struct list_head *head);
@@ -4464,6 +4499,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
struct rtnl_link_stats64 *storage);
void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
const struct net_device_stats *netdev_stats);
+void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
+ const struct pcpu_sw_netstats __percpu *netstats);
extern int netdev_max_backlog;
extern int netdev_tstamp_prequeue;
@@ -4704,16 +4741,6 @@ int netdev_class_create_file_ns(const struct class_attribute *class_attr,
void netdev_class_remove_file_ns(const struct class_attribute *class_attr,
const void *ns);
-static inline int netdev_class_create_file(const struct class_attribute *class_attr)
-{
- return netdev_class_create_file_ns(class_attr, NULL);
-}
-
-static inline void netdev_class_remove_file(const struct class_attribute *class_attr)
-{
- netdev_class_remove_file_ns(class_attr, NULL);
-}
-
extern const struct kobj_ns_type_operations net_ns_type_operations;
const char *netdev_drivername(const struct net_device *dev);
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
index 1db83c931d9c..0c7d8d1e945d 100644
--- a/include/linux/netfilter/nf_conntrack_common.h
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -8,9 +8,9 @@
struct ip_conntrack_stat {
unsigned int found;
unsigned int invalid;
- unsigned int ignore;
unsigned int insert;
unsigned int insert_failed;
+ unsigned int clash_resolve;
unsigned int drop;
unsigned int early_drop;
unsigned int error;
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index e3e49f0e5c13..666cd0390699 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -68,12 +68,14 @@ netlink_kernel_create(struct net *net, int unit, struct netlink_kernel_cfg *cfg)
* @_msg: message string to report - don't access directly, use
* %NL_SET_ERR_MSG
* @bad_attr: attribute with error
+ * @policy: policy for a bad attribute
* @cookie: cookie data to return to userspace (for success)
* @cookie_len: actual cookie data length
*/
struct netlink_ext_ack {
const char *_msg;
const struct nlattr *bad_attr;
+ const struct nla_policy *policy;
u8 cookie[NETLINK_MAX_COOKIE_LEN];
u8 cookie_len;
};
@@ -95,21 +97,29 @@ struct netlink_ext_ack {
#define NL_SET_ERR_MSG_MOD(extack, msg) \
NL_SET_ERR_MSG((extack), KBUILD_MODNAME ": " msg)
-#define NL_SET_BAD_ATTR(extack, attr) do { \
- if ((extack)) \
+#define NL_SET_BAD_ATTR_POLICY(extack, attr, pol) do { \
+ if ((extack)) { \
(extack)->bad_attr = (attr); \
+ (extack)->policy = (pol); \
+ } \
} while (0)
-#define NL_SET_ERR_MSG_ATTR(extack, attr, msg) do { \
- static const char __msg[] = msg; \
- struct netlink_ext_ack *__extack = (extack); \
- \
- if (__extack) { \
- __extack->_msg = __msg; \
- __extack->bad_attr = (attr); \
- } \
+#define NL_SET_BAD_ATTR(extack, attr) NL_SET_BAD_ATTR_POLICY(extack, attr, NULL)
+
+#define NL_SET_ERR_MSG_ATTR_POL(extack, attr, pol, msg) do { \
+ static const char __msg[] = msg; \
+ struct netlink_ext_ack *__extack = (extack); \
+ \
+ if (__extack) { \
+ __extack->_msg = __msg; \
+ __extack->bad_attr = (attr); \
+ __extack->policy = (pol); \
+ } \
} while (0)
+#define NL_SET_ERR_MSG_ATTR(extack, attr, msg) \
+ NL_SET_ERR_MSG_ATTR_POL(extack, attr, NULL, msg)
+
static inline void nl_set_extack_cookie_u64(struct netlink_ext_ack *extack,
u64 cookie)
{
diff --git a/include/linux/of.h b/include/linux/of.h
index 5cf7ae0465d1..481ec0467285 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -929,6 +929,11 @@ static inline int of_machine_is_compatible(const char *compat)
return 0;
}
+static inline int of_remove_property(struct device_node *np, struct property *prop)
+{
+ return 0;
+}
+
static inline bool of_console_check(const struct device_node *dn, const char *name, int index)
{
return false;
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index 1efb88d9f892..cfe8c607a628 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -17,6 +17,7 @@ bool of_mdiobus_child_is_phy(struct device_node *child);
int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np);
int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
struct device_node *np);
+struct mdio_device *of_mdio_find_device(struct device_node *np);
struct phy_device *of_phy_find_device(struct device_node *phy_np);
struct phy_device *
of_phy_connect(struct net_device *dev, struct device_node *phy_np,
@@ -74,6 +75,11 @@ static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *
return mdiobus_register(mdio);
}
+static inline struct mdio_device *of_mdio_find_device(struct device_node *np)
+{
+ return NULL;
+}
+
static inline struct phy_device *of_phy_find_device(struct device_node *phy_np)
{
return NULL;
diff --git a/include/linux/pcs-lynx.h b/include/linux/pcs-lynx.h
new file mode 100644
index 000000000000..a6440d6ebe95
--- /dev/null
+++ b/include/linux/pcs-lynx.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2020 NXP
+ * Lynx PCS helpers
+ */
+
+#ifndef __LINUX_PCS_LYNX_H
+#define __LINUX_PCS_LYNX_H
+
+#include <linux/mdio.h>
+#include <linux/phylink.h>
+
+struct lynx_pcs {
+ struct phylink_pcs pcs;
+ struct mdio_device *mdio;
+};
+
+struct lynx_pcs *lynx_pcs_create(struct mdio_device *mdio);
+
+void lynx_pcs_destroy(struct lynx_pcs *pcs);
+
+#endif /* __LINUX_PCS_LYNX_H */
diff --git a/include/linux/mdio-xpcs.h b/include/linux/pcs/pcs-xpcs.h
index 9a841aa5982d..351c1c9aedc5 100644
--- a/include/linux/mdio-xpcs.h
+++ b/include/linux/pcs/pcs-xpcs.h
@@ -4,8 +4,8 @@
* Synopsys DesignWare XPCS helpers
*/
-#ifndef __LINUX_MDIO_XPCS_H
-#define __LINUX_MDIO_XPCS_H
+#ifndef __LINUX_PCS_XPCS_H
+#define __LINUX_PCS_XPCS_H
#include <linux/phy.h>
#include <linux/phylink.h>
@@ -29,7 +29,7 @@ struct mdio_xpcs_ops {
int (*probe)(struct mdio_xpcs_args *xpcs, phy_interface_t interface);
};
-#if IS_ENABLED(CONFIG_MDIO_XPCS)
+#if IS_ENABLED(CONFIG_PCS_XPCS)
struct mdio_xpcs_ops *mdio_xpcs_get_ops(void);
#else
static inline struct mdio_xpcs_ops *mdio_xpcs_get_ops(void)
@@ -38,4 +38,4 @@ static inline struct mdio_xpcs_ops *mdio_xpcs_get_ops(void)
}
#endif
-#endif /* __LINUX_MDIO_XPCS_H */
+#endif /* __LINUX_PCS_XPCS_H */
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 3a09d2bf69ea..eb3cb1a98b45 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -82,7 +82,39 @@ extern const int phy_10gbit_features_array[1];
#define PHY_POLL_CABLE_TEST 0x00000004
#define MDIO_DEVICE_IS_PHY 0x80000000
-/* Interface Mode definitions */
+/**
+ * enum phy_interface_t - Interface Mode definitions
+ *
+ * @PHY_INTERFACE_MODE_NA: Not Applicable - don't touch
+ * @PHY_INTERFACE_MODE_INTERNAL: No interface, MAC and PHY combined
+ * @PHY_INTERFACE_MODE_MII: Median-independent interface
+ * @PHY_INTERFACE_MODE_GMII: Gigabit median-independent interface
+ * @PHY_INTERFACE_MODE_SGMII: Serial gigabit media-independent interface
+ * @PHY_INTERFACE_MODE_TBI: Ten Bit Interface
+ * @PHY_INTERFACE_MODE_REVMII: Reverse Media Independent Interface
+ * @PHY_INTERFACE_MODE_RMII: Reduced Media Independent Interface
+ * @PHY_INTERFACE_MODE_RGMII: Reduced gigabit media-independent interface
+ * @PHY_INTERFACE_MODE_RGMII_ID: RGMII with Internal RX+TX delay
+ * @PHY_INTERFACE_MODE_RGMII_RXID: RGMII with Internal RX delay
+ * @PHY_INTERFACE_MODE_RGMII_TXID: RGMII with Internal RX delay
+ * @PHY_INTERFACE_MODE_RTBI: Reduced TBI
+ * @PHY_INTERFACE_MODE_SMII: ??? MII
+ * @PHY_INTERFACE_MODE_XGMII: 10 gigabit media-independent interface
+ * @PHY_INTERFACE_MODE_XLGMII:40 gigabit media-independent interface
+ * @PHY_INTERFACE_MODE_MOCA: Multimedia over Coax
+ * @PHY_INTERFACE_MODE_QSGMII: Quad SGMII
+ * @PHY_INTERFACE_MODE_TRGMII: Turbo RGMII
+ * @PHY_INTERFACE_MODE_1000BASEX: 1000 BaseX
+ * @PHY_INTERFACE_MODE_2500BASEX: 2500 BaseX
+ * @PHY_INTERFACE_MODE_RXAUI: Reduced XAUI
+ * @PHY_INTERFACE_MODE_XAUI: 10 Gigabit Attachment Unit Interface
+ * @PHY_INTERFACE_MODE_10GBASER: 10G BaseR
+ * @PHY_INTERFACE_MODE_USXGMII: Universal Serial 10GE MII
+ * @PHY_INTERFACE_MODE_10GKR: 10GBASE-KR - with Clause 73 AN
+ * @PHY_INTERFACE_MODE_MAX: Book keeping
+ *
+ * Describes the interface between the MAC and PHY.
+ */
typedef enum {
PHY_INTERFACE_MODE_NA,
PHY_INTERFACE_MODE_INTERNAL,
@@ -116,8 +148,8 @@ typedef enum {
} phy_interface_t;
/**
- * phy_supported_speeds - return all speeds currently supported by a phy device
- * @phy: The phy device to return supported speeds of.
+ * phy_supported_speeds - return all speeds currently supported by a PHY device
+ * @phy: The PHY device to return supported speeds of.
* @speeds: buffer to store supported speeds in.
* @size: size of speeds buffer.
*
@@ -134,9 +166,9 @@ unsigned int phy_supported_speeds(struct phy_device *phy,
* phy_modes - map phy_interface_t enum to device tree binding of phy-mode
* @interface: enum phy_interface_t value
*
- * Description: maps 'enum phy_interface_t' defined in this file
+ * Description: maps enum &phy_interface_t defined in this file
* into the device tree binding of 'phy-mode', so that Ethernet
- * device driver can get phy interface from device tree.
+ * device driver can get PHY interface from device tree.
*/
static inline const char *phy_modes(phy_interface_t interface)
{
@@ -215,6 +247,14 @@ struct sfp_bus;
struct sfp_upstream_ops;
struct sk_buff;
+/**
+ * struct mdio_bus_stats - Statistics counters for MDIO busses
+ * @transfers: Total number of transfers, i.e. @writes + @reads
+ * @errors: Number of MDIO transfers that returned an error
+ * @writes: Number of write transfers
+ * @reads: Number of read transfers
+ * @syncp: Synchronisation for incrementing statistics
+ */
struct mdio_bus_stats {
u64_stats_t transfers;
u64_stats_t errors;
@@ -224,7 +264,15 @@ struct mdio_bus_stats {
struct u64_stats_sync syncp;
};
-/* Represents a shared structure between different phydev's in the same
+/**
+ * struct phy_package_shared - Shared information in PHY packages
+ * @addr: Common PHY address used to combine PHYs in one package
+ * @refcnt: Number of PHYs connected to this shared data
+ * @flags: Initialization of PHY package
+ * @priv_size: Size of the shared private data @priv
+ * @priv: Driver private data shared across a PHY package
+ *
+ * Represents a shared structure between different phydev's in the same
* package, for example a quad PHY. See phy_package_join() and
* phy_package_leave().
*/
@@ -247,7 +295,14 @@ struct phy_package_shared {
#define PHY_SHARED_F_INIT_DONE 0
#define PHY_SHARED_F_PROBE_DONE 1
-/*
+/**
+ * struct mii_bus - Represents an MDIO bus
+ *
+ * @owner: Who owns this device
+ * @name: User friendly name for this MDIO device, or driver name
+ * @id: Unique identifier for this bus, typical from bus hierarchy
+ * @priv: Driver private data
+ *
* The Bus class for PHYs. Devices which provide access to
* PHYs should register using this structure
*/
@@ -256,49 +311,58 @@ struct mii_bus {
const char *name;
char id[MII_BUS_ID_SIZE];
void *priv;
+ /** @read: Perform a read transfer on the bus */
int (*read)(struct mii_bus *bus, int addr, int regnum);
+ /** @write: Perform a write transfer on the bus */
int (*write)(struct mii_bus *bus, int addr, int regnum, u16 val);
+ /** @reset: Perform a reset of the bus */
int (*reset)(struct mii_bus *bus);
+
+ /** @stats: Statistic counters per device on the bus */
struct mdio_bus_stats stats[PHY_MAX_ADDR];
- /*
- * A lock to ensure that only one thing can read/write
+ /**
+ * @mdio_lock: A lock to ensure that only one thing can read/write
* the MDIO bus at a time
*/
struct mutex mdio_lock;
+ /** @parent: Parent device of this bus */
struct device *parent;
+ /** @state: State of bus structure */
enum {
MDIOBUS_ALLOCATED = 1,
MDIOBUS_REGISTERED,
MDIOBUS_UNREGISTERED,
MDIOBUS_RELEASED,
} state;
+
+ /** @dev: Kernel device representation */
struct device dev;
- /* list of all PHYs on bus */
+ /** @mdio_map: list of all MDIO devices on bus */
struct mdio_device *mdio_map[PHY_MAX_ADDR];
- /* PHY addresses to be ignored when probing */
+ /** @phy_mask: PHY addresses to be ignored when probing */
u32 phy_mask;
- /* PHY addresses to ignore the TA/read failure */
+ /** @phy_ignore_ta_mask: PHY addresses to ignore the TA/read failure */
u32 phy_ignore_ta_mask;
- /*
- * An array of interrupts, each PHY's interrupt at the index
+ /**
+ * @irq: An array of interrupts, each PHY's interrupt at the index
* matching its address
*/
int irq[PHY_MAX_ADDR];
- /* GPIO reset pulse width in microseconds */
+ /** @reset_delay_us: GPIO reset pulse width in microseconds */
int reset_delay_us;
- /* GPIO reset deassert delay in microseconds */
+ /** @reset_post_delay_us: GPIO reset deassert delay in microseconds */
int reset_post_delay_us;
- /* RESET GPIO descriptor pointer */
+ /** @reset_gpiod: Reset GPIO descriptor pointer */
struct gpio_desc *reset_gpiod;
- /* bus capabilities, used for probing */
+ /** @probe_capabilities: bus capabilities, used for probing */
enum {
MDIOBUS_NO_CAP = 0,
MDIOBUS_C22,
@@ -306,15 +370,22 @@ struct mii_bus {
MDIOBUS_C22_C45,
} probe_capabilities;
- /* protect access to the shared element */
+ /** @shared_lock: protect access to the shared element */
struct mutex shared_lock;
- /* shared state across different PHYs */
+ /** @shared: shared state across different PHYs */
struct phy_package_shared *shared[PHY_MAX_ADDR];
};
#define to_mii_bus(d) container_of(d, struct mii_bus, dev)
-struct mii_bus *mdiobus_alloc_size(size_t);
+struct mii_bus *mdiobus_alloc_size(size_t size);
+
+/**
+ * mdiobus_alloc - Allocate an MDIO bus structure
+ *
+ * The internal state of the MDIO bus will be set of MDIOBUS_ALLOCATED ready
+ * for the driver to register the bus.
+ */
static inline struct mii_bus *mdiobus_alloc(void)
{
return mdiobus_alloc_size(0);
@@ -341,40 +412,41 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
#define PHY_INTERRUPT_DISABLED false
#define PHY_INTERRUPT_ENABLED true
-/* PHY state machine states:
+/**
+ * enum phy_state - PHY state machine states:
*
- * DOWN: PHY device and driver are not ready for anything. probe
+ * @PHY_DOWN: PHY device and driver are not ready for anything. probe
* should be called if and only if the PHY is in this state,
* given that the PHY device exists.
- * - PHY driver probe function will set the state to READY
+ * - PHY driver probe function will set the state to @PHY_READY
*
- * READY: PHY is ready to send and receive packets, but the
+ * @PHY_READY: PHY is ready to send and receive packets, but the
* controller is not. By default, PHYs which do not implement
* probe will be set to this state by phy_probe().
* - start will set the state to UP
*
- * UP: The PHY and attached device are ready to do work.
+ * @PHY_UP: The PHY and attached device are ready to do work.
* Interrupts should be started here.
- * - timer moves to NOLINK or RUNNING
+ * - timer moves to @PHY_NOLINK or @PHY_RUNNING
*
- * NOLINK: PHY is up, but not currently plugged in.
- * - irq or timer will set RUNNING if link comes back
- * - phy_stop moves to HALTED
+ * @PHY_NOLINK: PHY is up, but not currently plugged in.
+ * - irq or timer will set @PHY_RUNNING if link comes back
+ * - phy_stop moves to @PHY_HALTED
*
- * RUNNING: PHY is currently up, running, and possibly sending
+ * @PHY_RUNNING: PHY is currently up, running, and possibly sending
* and/or receiving packets
- * - irq or timer will set NOLINK if link goes down
- * - phy_stop moves to HALTED
+ * - irq or timer will set @PHY_NOLINK if link goes down
+ * - phy_stop moves to @PHY_HALTED
*
- * CABLETEST: PHY is performing a cable test. Packet reception/sending
+ * @PHY_CABLETEST: PHY is performing a cable test. Packet reception/sending
* is not expected to work, carrier will be indicated as down. PHY will be
* poll once per second, or on interrupt for it current state.
* Once complete, move to UP to restart the PHY.
- * - phy_stop aborts the running test and moves to HALTED
+ * - phy_stop aborts the running test and moves to @PHY_HALTED
*
- * HALTED: PHY is up, but no polling or interrupts are done. Or
+ * @PHY_HALTED: PHY is up, but no polling or interrupts are done. Or
* PHY is in an error state.
- * - phy_start moves to UP
+ * - phy_start moves to @PHY_UP
*/
enum phy_state {
PHY_DOWN = 0,
@@ -403,34 +475,67 @@ struct phy_c45_device_ids {
struct macsec_context;
struct macsec_ops;
-/* phy_device: An instance of a PHY
+/**
+ * struct phy_device - An instance of a PHY
*
- * drv: Pointer to the driver for this PHY instance
- * phy_id: UID for this device found during discovery
- * c45_ids: 802.3-c45 Device Identifers if is_c45.
- * is_c45: Set to true if this phy uses clause 45 addressing.
- * is_internal: Set to true if this phy is internal to a MAC.
- * is_pseudo_fixed_link: Set to true if this phy is an Ethernet switch, etc.
- * is_gigabit_capable: Set to true if PHY supports 1000Mbps
- * has_fixups: Set to true if this phy has fixups/quirks.
- * suspended: Set to true if this phy has been suspended successfully.
- * suspended_by_mdio_bus: Set to true if this phy was suspended by MDIO bus.
- * sysfs_links: Internal boolean tracking sysfs symbolic links setup/removal.
- * loopback_enabled: Set true if this phy has been loopbacked successfully.
- * downshifted_rate: Set true if link speed has been downshifted.
- * state: state of the PHY for management purposes
- * dev_flags: Device-specific flags used by the PHY driver.
- * irq: IRQ number of the PHY's interrupt (-1 if none)
- * phy_timer: The timer for handling the state machine
- * sfp_bus_attached: flag indicating whether the SFP bus has been attached
- * sfp_bus: SFP bus attached to this PHY's fiber port
- * attached_dev: The attached enet driver's device instance ptr
- * adjust_link: Callback for the enet controller to respond to
- * changes in the link state.
- * macsec_ops: MACsec offloading ops.
+ * @mdio: MDIO bus this PHY is on
+ * @drv: Pointer to the driver for this PHY instance
+ * @phy_id: UID for this device found during discovery
+ * @c45_ids: 802.3-c45 Device Identifiers if is_c45.
+ * @is_c45: Set to true if this PHY uses clause 45 addressing.
+ * @is_internal: Set to true if this PHY is internal to a MAC.
+ * @is_pseudo_fixed_link: Set to true if this PHY is an Ethernet switch, etc.
+ * @is_gigabit_capable: Set to true if PHY supports 1000Mbps
+ * @has_fixups: Set to true if this PHY has fixups/quirks.
+ * @suspended: Set to true if this PHY has been suspended successfully.
+ * @suspended_by_mdio_bus: Set to true if this PHY was suspended by MDIO bus.
+ * @sysfs_links: Internal boolean tracking sysfs symbolic links setup/removal.
+ * @loopback_enabled: Set true if this PHY has been loopbacked successfully.
+ * @downshifted_rate: Set true if link speed has been downshifted.
+ * @state: State of the PHY for management purposes
+ * @dev_flags: Device-specific flags used by the PHY driver.
+ * @irq: IRQ number of the PHY's interrupt (-1 if none)
+ * @phy_timer: The timer for handling the state machine
+ * @phylink: Pointer to phylink instance for this PHY
+ * @sfp_bus_attached: Flag indicating whether the SFP bus has been attached
+ * @sfp_bus: SFP bus attached to this PHY's fiber port
+ * @attached_dev: The attached enet driver's device instance ptr
+ * @adjust_link: Callback for the enet controller to respond to changes: in the
+ * link state.
+ * @phy_link_change: Callback for phylink for notification of link change
+ * @macsec_ops: MACsec offloading ops.
*
- * speed, duplex, pause, supported, advertising, lp_advertising,
- * and autoneg are used like in mii_if_info
+ * @speed: Current link speed
+ * @duplex: Current duplex
+ * @pause: Current pause
+ * @asym_pause: Current asymmetric pause
+ * @supported: Combined MAC/PHY supported linkmodes
+ * @advertising: Currently advertised linkmodes
+ * @adv_old: Saved advertised while power saving for WoL
+ * @lp_advertising: Current link partner advertised linkmodes
+ * @eee_broken_modes: Energy efficient ethernet modes which should be prohibited
+ * @autoneg: Flag autoneg being used
+ * @link: Current link state
+ * @autoneg_complete: Flag auto negotiation of the link has completed
+ * @mdix: Current crossover
+ * @mdix_ctrl: User setting of crossover
+ * @interrupts: Flag interrupts have been enabled
+ * @interface: enum phy_interface_t value
+ * @skb: Netlink message for cable diagnostics
+ * @nest: Netlink nest used for cable diagnostics
+ * @ehdr: nNtlink header for cable diagnostics
+ * @phy_led_triggers: Array of LED triggers
+ * @phy_num_led_triggers: Number of triggers in @phy_led_triggers
+ * @led_link_trigger: LED trigger for link up/down
+ * @last_triggered: last LED trigger for link speed
+ * @master_slave_set: User requested master/slave configuration
+ * @master_slave_get: Current master/slave advertisement
+ * @master_slave_state: Current master/slave configuration
+ * @mii_ts: Pointer to time stamper callbacks
+ * @lock: Mutex for serialization access to PHY
+ * @state_queue: Work queue for state machine
+ * @shared: Pointer to private data shared by phys in one package
+ * @priv: Pointer to driver private data
*
* interrupts currently only supports enabled or disabled,
* but could be changed in the future to support enabling
@@ -550,9 +655,18 @@ struct phy_device {
#define to_phy_device(d) container_of(to_mdio_device(d), \
struct phy_device, mdio)
-/* A structure containing possible configuration parameters
+/**
+ * struct phy_tdr_config - Configuration of a TDR raw test
+ *
+ * @first: Distance for first data collection point
+ * @last: Distance for last data collection point
+ * @step: Step between data collection points
+ * @pair: Bitmap of cable pairs to collect data for
+ *
+ * A structure containing possible configuration parameters
* for a TDR cable test. The driver does not need to implement
* all the parameters, but should report what is actually used.
+ * All distances are in centimeters.
*/
struct phy_tdr_config {
u32 first;
@@ -562,18 +676,20 @@ struct phy_tdr_config {
};
#define PHY_PAIR_ALL -1
-/* struct phy_driver: Driver structure for a particular PHY type
+/**
+ * struct phy_driver - Driver structure for a particular PHY type
*
- * driver_data: static driver data
- * phy_id: The result of reading the UID registers of this PHY
+ * @mdiodrv: Data common to all MDIO devices
+ * @phy_id: The result of reading the UID registers of this PHY
* type, and ANDing them with the phy_id_mask. This driver
* only works for PHYs with IDs which match this field
- * name: The friendly name of this PHY type
- * phy_id_mask: Defines the important bits of the phy_id
- * features: A mandatory list of features (speed, duplex, etc)
+ * @name: The friendly name of this PHY type
+ * @phy_id_mask: Defines the important bits of the phy_id
+ * @features: A mandatory list of features (speed, duplex, etc)
* supported by this PHY
- * flags: A bitfield defining certain other features this PHY
+ * @flags: A bitfield defining certain other features this PHY
* supports (like interrupts)
+ * @driver_data: Static driver data
*
* All functions are optional. If config_aneg or read_status
* are not implemented, the phy core uses the genphy versions.
@@ -592,151 +708,178 @@ struct phy_driver {
u32 flags;
const void *driver_data;
- /*
- * Called to issue a PHY software reset
+ /**
+ * @soft_reset: Called to issue a PHY software reset
*/
int (*soft_reset)(struct phy_device *phydev);
- /*
- * Called to initialize the PHY,
+ /**
+ * @config_init: Called to initialize the PHY,
* including after a reset
*/
int (*config_init)(struct phy_device *phydev);
- /*
- * Called during discovery. Used to set
+ /**
+ * @probe: Called during discovery. Used to set
* up device-specific structures, if any
*/
int (*probe)(struct phy_device *phydev);
- /*
- * Probe the hardware to determine what abilities it has.
- * Should only set phydev->supported.
+ /**
+ * @get_features: Probe the hardware to determine what
+ * abilities it has. Should only set phydev->supported.
*/
int (*get_features)(struct phy_device *phydev);
/* PHY Power Management */
+ /** @suspend: Suspend the hardware, saving state if needed */
int (*suspend)(struct phy_device *phydev);
+ /** @resume: Resume the hardware, restoring state if needed */
int (*resume)(struct phy_device *phydev);
- /*
- * Configures the advertisement and resets
+ /**
+ * @config_aneg: Configures the advertisement and resets
* autonegotiation if phydev->autoneg is on,
* forces the speed to the current settings in phydev
* if phydev->autoneg is off
*/
int (*config_aneg)(struct phy_device *phydev);
- /* Determines the auto negotiation result */
+ /** @aneg_done: Determines the auto negotiation result */
int (*aneg_done)(struct phy_device *phydev);
- /* Determines the negotiated speed and duplex */
+ /** @read_status: Determines the negotiated speed and duplex */
int (*read_status)(struct phy_device *phydev);
- /* Clears any pending interrupts */
+ /** @ack_interrupt: Clears any pending interrupts */
int (*ack_interrupt)(struct phy_device *phydev);
- /* Enables or disables interrupts */
+ /** @config_intr: Enables or disables interrupts */
int (*config_intr)(struct phy_device *phydev);
- /*
- * Checks if the PHY generated an interrupt.
+ /**
+ * @did_interrupt: Checks if the PHY generated an interrupt.
* For multi-PHY devices with shared PHY interrupt pin
* Set interrupt bits have to be cleared.
*/
int (*did_interrupt)(struct phy_device *phydev);
- /* Override default interrupt handling */
+ /** @handle_interrupt: Override default interrupt handling */
irqreturn_t (*handle_interrupt)(struct phy_device *phydev);
- /* Clears up any memory if needed */
+ /** @remove: Clears up any memory if needed */
void (*remove)(struct phy_device *phydev);
- /* Returns true if this is a suitable driver for the given
- * phydev. If NULL, matching is based on phy_id and
- * phy_id_mask.
+ /**
+ * @match_phy_device: Returns true if this is a suitable
+ * driver for the given phydev. If NULL, matching is based on
+ * phy_id and phy_id_mask.
*/
int (*match_phy_device)(struct phy_device *phydev);
- /* Some devices (e.g. qnap TS-119P II) require PHY register changes to
- * enable Wake on LAN, so set_wol is provided to be called in the
- * ethernet driver's set_wol function. */
+ /**
+ * @set_wol: Some devices (e.g. qnap TS-119P II) require PHY
+ * register changes to enable Wake on LAN, so set_wol is
+ * provided to be called in the ethernet driver's set_wol
+ * function.
+ */
int (*set_wol)(struct phy_device *dev, struct ethtool_wolinfo *wol);
- /* See set_wol, but for checking whether Wake on LAN is enabled. */
+ /**
+ * @get_wol: See set_wol, but for checking whether Wake on LAN
+ * is enabled.
+ */
void (*get_wol)(struct phy_device *dev, struct ethtool_wolinfo *wol);
- /*
- * Called to inform a PHY device driver when the core is about to
- * change the link state. This callback is supposed to be used as
- * fixup hook for drivers that need to take action when the link
- * state changes. Drivers are by no means allowed to mess with the
+ /**
+ * @link_change_notify: Called to inform a PHY device driver
+ * when the core is about to change the link state. This
+ * callback is supposed to be used as fixup hook for drivers
+ * that need to take action when the link state
+ * changes. Drivers are by no means allowed to mess with the
* PHY device structure in their implementations.
*/
void (*link_change_notify)(struct phy_device *dev);
- /*
- * Phy specific driver override for reading a MMD register.
- * This function is optional for PHY specific drivers. When
- * not provided, the default MMD read function will be used
- * by phy_read_mmd(), which will use either a direct read for
- * Clause 45 PHYs or an indirect read for Clause 22 PHYs.
- * devnum is the MMD device number within the PHY device,
- * regnum is the register within the selected MMD device.
+ /**
+ * @read_mmd: PHY specific driver override for reading a MMD
+ * register. This function is optional for PHY specific
+ * drivers. When not provided, the default MMD read function
+ * will be used by phy_read_mmd(), which will use either a
+ * direct read for Clause 45 PHYs or an indirect read for
+ * Clause 22 PHYs. devnum is the MMD device number within the
+ * PHY device, regnum is the register within the selected MMD
+ * device.
*/
int (*read_mmd)(struct phy_device *dev, int devnum, u16 regnum);
- /*
- * Phy specific driver override for writing a MMD register.
- * This function is optional for PHY specific drivers. When
- * not provided, the default MMD write function will be used
- * by phy_write_mmd(), which will use either a direct write for
- * Clause 45 PHYs, or an indirect write for Clause 22 PHYs.
- * devnum is the MMD device number within the PHY device,
- * regnum is the register within the selected MMD device.
- * val is the value to be written.
+ /**
+ * @write_mmd: PHY specific driver override for writing a MMD
+ * register. This function is optional for PHY specific
+ * drivers. When not provided, the default MMD write function
+ * will be used by phy_write_mmd(), which will use either a
+ * direct write for Clause 45 PHYs, or an indirect write for
+ * Clause 22 PHYs. devnum is the MMD device number within the
+ * PHY device, regnum is the register within the selected MMD
+ * device. val is the value to be written.
*/
int (*write_mmd)(struct phy_device *dev, int devnum, u16 regnum,
u16 val);
+ /** @read_page: Return the current PHY register page number */
int (*read_page)(struct phy_device *dev);
+ /** @write_page: Set the current PHY register page number */
int (*write_page)(struct phy_device *dev, int page);
- /* Get the size and type of the eeprom contained within a plug-in
- * module */
+ /**
+ * @module_info: Get the size and type of the eeprom contained
+ * within a plug-in module
+ */
int (*module_info)(struct phy_device *dev,
struct ethtool_modinfo *modinfo);
- /* Get the eeprom information from the plug-in module */
+ /**
+ * @module_eeprom: Get the eeprom information from the plug-in
+ * module
+ */
int (*module_eeprom)(struct phy_device *dev,
struct ethtool_eeprom *ee, u8 *data);
- /* Start a cable test */
+ /** @cable_test_start: Start a cable test */
int (*cable_test_start)(struct phy_device *dev);
- /* Start a raw TDR cable test */
+ /** @cable_test_tdr_start: Start a raw TDR cable test */
int (*cable_test_tdr_start)(struct phy_device *dev,
const struct phy_tdr_config *config);
- /* Once per second, or on interrupt, request the status of the
- * test.
+ /**
+ * @cable_test_get_status: Once per second, or on interrupt,
+ * request the status of the test.
*/
int (*cable_test_get_status)(struct phy_device *dev, bool *finished);
- /* Get statistics from the phy using ethtool */
+ /* Get statistics from the PHY using ethtool */
+ /** @get_sset_count: Number of statistic counters */
int (*get_sset_count)(struct phy_device *dev);
+ /** @get_strings: Names of the statistic counters */
void (*get_strings)(struct phy_device *dev, u8 *data);
+ /** @get_stats: Return the statistic counter values */
void (*get_stats)(struct phy_device *dev,
struct ethtool_stats *stats, u64 *data);
/* Get and Set PHY tunables */
+ /** @get_tunable: Return the value of a tunable */
int (*get_tunable)(struct phy_device *dev,
struct ethtool_tunable *tuna, void *data);
+ /** @set_tunable: Set the value of a tunable */
int (*set_tunable)(struct phy_device *dev,
struct ethtool_tunable *tuna,
const void *data);
+ /** @set_loopback: Set the loopback mood of the PHY */
int (*set_loopback)(struct phy_device *dev, bool enable);
+ /** @get_sqi: Get the signal quality indication */
int (*get_sqi)(struct phy_device *dev);
+ /** @get_sqi_max: Get the maximum signal quality indication */
int (*get_sqi_max)(struct phy_device *dev);
};
#define to_phy_driver(d) container_of(to_mdio_common_driver(d), \
@@ -890,6 +1033,24 @@ static inline int __phy_modify_changed(struct phy_device *phydev, u32 regnum,
*/
int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum);
+/**
+ * phy_read_mmd_poll_timeout - Periodically poll a PHY register until a
+ * condition is met or a timeout occurs
+ *
+ * @phydev: The phy_device struct
+ * @devaddr: The MMD to read from
+ * @regnum: The register on the MMD to read
+ * @val: Variable to read the register into
+ * @cond: Break condition (usually involving @val)
+ * @sleep_us: Maximum time to sleep between reads in us (0
+ * tight-loops). Should be less than ~20ms since usleep_range
+ * is used (see Documentation/timers/timers-howto.rst).
+ * @timeout_us: Timeout in us, 0 means never timeout
+ * @sleep_before_read: if it is true, sleep @sleep_us before read.
+ * Returns 0 on success and -ETIMEDOUT upon a timeout. In either
+ * case, the last read value at @args is stored in @val. Must not
+ * be called from atomic context if sleep_us or timeout_us are used.
+ */
#define phy_read_mmd_poll_timeout(phydev, devaddr, regnum, val, cond, \
sleep_us, timeout_us, sleep_before_read) \
({ \
@@ -1161,7 +1322,7 @@ static inline bool phy_is_internal(struct phy_device *phydev)
/**
* phy_interface_mode_is_rgmii - Convenience function for testing if a
* PHY interface mode is RGMII (all variants)
- * @mode: the phy_interface_t enum
+ * @mode: the &phy_interface_t enum
*/
static inline bool phy_interface_mode_is_rgmii(phy_interface_t mode)
{
@@ -1170,11 +1331,11 @@ static inline bool phy_interface_mode_is_rgmii(phy_interface_t mode)
};
/**
- * phy_interface_mode_is_8023z() - does the phy interface mode use 802.3z
+ * phy_interface_mode_is_8023z() - does the PHY interface mode use 802.3z
* negotiation
* @mode: one of &enum phy_interface_t
*
- * Returns true if the phy interface mode uses the 16-bit negotiation
+ * Returns true if the PHY interface mode uses the 16-bit negotiation
* word as defined in 802.3z. (See 802.3-2015 37.2.1 Config_Reg encoding)
*/
static inline bool phy_interface_mode_is_8023z(phy_interface_t mode)
@@ -1193,7 +1354,7 @@ static inline bool phy_interface_is_rgmii(struct phy_device *phydev)
return phy_interface_mode_is_rgmii(phydev->interface);
};
-/*
+/**
* phy_is_pseudo_fixed_link - Convenience function for testing if this
* PHY is the CPU port facing side of an Ethernet switch, or similar.
* @phydev: the phy_device struct
@@ -1566,8 +1727,9 @@ static inline int mdiobus_register_board_info(const struct mdio_board_info *i,
/**
- * module_phy_driver() - Helper macro for registering PHY drivers
+ * phy_module_driver() - Helper macro for registering PHY drivers
* @__phy_drivers: array of PHY drivers to register
+ * @__count: Numbers of members in array
*
* Helper macro for PHY drivers which do not do anything special in module
* init/exit. Each module may only use this macro once, and calling it
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index c36fb41a7d90..d81a714cfbbd 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -490,4 +490,7 @@ void phylink_mii_c22_pcs_an_restart(struct mdio_device *pcs);
void phylink_mii_c45_pcs_get_state(struct mdio_device *pcs,
struct phylink_link_state *state);
+
+void phylink_decode_usxgmii_word(struct phylink_link_state *state,
+ uint16_t lpa);
#endif
diff --git a/include/linux/platform_data/macb.h b/include/linux/platform_data/macb.h
deleted file mode 100644
index aa5b5562d6f7..000000000000
--- a/include/linux/platform_data/macb.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2004-2006 Atmel Corporation
- */
-#ifndef __MACB_PDATA_H__
-#define __MACB_PDATA_H__
-
-#include <linux/clk.h>
-
-/**
- * struct macb_platform_data - platform data for MACB Ethernet
- * @pclk: platform clock
- * @hclk: AHB clock
- */
-struct macb_platform_data {
- struct clk *pclk;
- struct clk *hclk;
-};
-
-#endif /* __MACB_PDATA_H__ */
diff --git a/include/linux/prefetch.h b/include/linux/prefetch.h
index 13eafebf3549..b83a3f944f28 100644
--- a/include/linux/prefetch.h
+++ b/include/linux/prefetch.h
@@ -15,6 +15,7 @@
#include <asm/processor.h>
#include <asm/cache.h>
+struct page;
/*
prefetch(x) attempts to pre-emptively get the memory pointed to
by address "x" into the CPU L1 cache.
@@ -62,4 +63,11 @@ static inline void prefetch_range(void *addr, size_t len)
#endif
}
+static inline void prefetch_page_address(struct page *page)
+{
+#if defined(WANT_PAGE_VIRTUAL) || defined(HASHED_PAGE_VIRTUAL)
+ prefetch(page);
+#endif
+}
+
#endif
diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h
index dd00fa41f7e7..c6487b7ab026 100644
--- a/include/linux/ptp_classify.h
+++ b/include/linux/ptp_classify.h
@@ -36,7 +36,6 @@
#define OFF_PTP_SOURCE_UUID 22 /* PTPv1 only */
#define OFF_PTP_SEQUENCE_ID 30
-#define OFF_PTP_CONTROL 32 /* PTPv1 only */
/* Below defines should actually be removed at some point in time. */
#define IP6_HLEN 40
@@ -44,6 +43,30 @@
#define OFF_IHL 14
#define IPV4_HLEN(data) (((struct iphdr *)(data + OFF_IHL))->ihl << 2)
+struct clock_identity {
+ u8 id[8];
+} __packed;
+
+struct port_identity {
+ struct clock_identity clock_identity;
+ __be16 port_number;
+} __packed;
+
+struct ptp_header {
+ u8 tsmt; /* transportSpecific | messageType */
+ u8 ver; /* reserved | versionPTP */
+ __be16 message_length;
+ u8 domain_number;
+ u8 reserved1;
+ u8 flag_field[2];
+ __be64 correction;
+ __be32 reserved2;
+ struct port_identity source_port_identity;
+ __be16 sequence_id;
+ u8 control;
+ u8 log_message_interval;
+} __packed;
+
#if defined(CONFIG_NET_PTP_CLASSIFY)
/**
* ptp_classify_raw - classify a PTP packet
@@ -57,6 +80,46 @@
*/
unsigned int ptp_classify_raw(const struct sk_buff *skb);
+/**
+ * ptp_parse_header - Get pointer to the PTP v2 header
+ * @skb: packet buffer
+ * @type: type of the packet (see ptp_classify_raw())
+ *
+ * This function takes care of the VLAN, UDP, IPv4 and IPv6 headers. The length
+ * is checked.
+ *
+ * Note, internally skb_mac_header() is used. Make sure that the @skb is
+ * initialized accordingly.
+ *
+ * Return: Pointer to the ptp v2 header or NULL if not found
+ */
+struct ptp_header *ptp_parse_header(struct sk_buff *skb, unsigned int type);
+
+/**
+ * ptp_get_msgtype - Extract ptp message type from given header
+ * @hdr: ptp header
+ * @type: type of the packet (see ptp_classify_raw())
+ *
+ * This function returns the message type for a given ptp header. It takes care
+ * of the different ptp header versions (v1 or v2).
+ *
+ * Return: The message type
+ */
+static inline u8 ptp_get_msgtype(const struct ptp_header *hdr,
+ unsigned int type)
+{
+ u8 msgtype;
+
+ if (unlikely(type & PTP_CLASS_V1)) {
+ /* msg type is located at the control field for ptp v1 */
+ msgtype = hdr->control;
+ } else {
+ msgtype = hdr->tsmt & 0x0f;
+ }
+
+ return msgtype;
+}
+
void __init ptp_classifier_init(void);
#else
static inline void ptp_classifier_init(void)
@@ -66,5 +129,18 @@ static inline unsigned int ptp_classify_raw(struct sk_buff *skb)
{
return PTP_CLASS_NONE;
}
+static inline struct ptp_header *ptp_parse_header(struct sk_buff *skb,
+ unsigned int type)
+{
+ return NULL;
+}
+static inline u8 ptp_get_msgtype(const struct ptp_header *hdr,
+ unsigned int type)
+{
+ /* The return is meaningless. The stub function would not be
+ * executed since no available header from ptp_parse_header.
+ */
+ return 0;
+}
#endif
#endif /* _PTP_CLASSIFY_H_ */
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index cdd73afc4c46..57fb295ea41a 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -21,6 +21,7 @@
#include <linux/qed/common_hsi.h>
#include <linux/qed/qed_chain.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <net/devlink.h>
enum dcbx_protocol_type {
DCBX_PROTOCOL_ISCSI,
@@ -780,6 +781,11 @@ enum qed_nvm_flash_cmd {
QED_NVM_FLASH_CMD_NVM_MAX,
};
+struct qed_devlink {
+ struct qed_dev *cdev;
+ struct devlink_health_reporter *fw_reporter;
+};
+
struct qed_common_cb_ops {
void (*arfs_filter_op)(void *dev, void *fltr, u8 fw_rc);
void (*link_update)(void *dev, struct qed_link_output *link);
@@ -845,10 +851,9 @@ struct qed_common_ops {
struct qed_dev* (*probe)(struct pci_dev *dev,
struct qed_probe_params *params);
- void (*remove)(struct qed_dev *cdev);
+ void (*remove)(struct qed_dev *cdev);
- int (*set_power_state)(struct qed_dev *cdev,
- pci_power_t state);
+ int (*set_power_state)(struct qed_dev *cdev, pci_power_t state);
void (*set_name) (struct qed_dev *cdev, char name[]);
@@ -856,50 +861,51 @@ struct qed_common_ops {
* PF params required for the call before slowpath_start is
* documented within the qed_pf_params structure definition.
*/
- void (*update_pf_params)(struct qed_dev *cdev,
- struct qed_pf_params *params);
- int (*slowpath_start)(struct qed_dev *cdev,
- struct qed_slowpath_params *params);
+ void (*update_pf_params)(struct qed_dev *cdev,
+ struct qed_pf_params *params);
- int (*slowpath_stop)(struct qed_dev *cdev);
+ int (*slowpath_start)(struct qed_dev *cdev,
+ struct qed_slowpath_params *params);
+
+ int (*slowpath_stop)(struct qed_dev *cdev);
/* Requests to use `cnt' interrupts for fastpath.
* upon success, returns number of interrupts allocated for fastpath.
*/
- int (*set_fp_int)(struct qed_dev *cdev,
- u16 cnt);
+ int (*set_fp_int)(struct qed_dev *cdev, u16 cnt);
/* Fills `info' with pointers required for utilizing interrupts */
- int (*get_fp_int)(struct qed_dev *cdev,
- struct qed_int_info *info);
-
- u32 (*sb_init)(struct qed_dev *cdev,
- struct qed_sb_info *sb_info,
- void *sb_virt_addr,
- dma_addr_t sb_phy_addr,
- u16 sb_id,
- enum qed_sb_type type);
-
- u32 (*sb_release)(struct qed_dev *cdev,
- struct qed_sb_info *sb_info,
- u16 sb_id,
- enum qed_sb_type type);
-
- void (*simd_handler_config)(struct qed_dev *cdev,
- void *token,
- int index,
- void (*handler)(void *));
-
- void (*simd_handler_clean)(struct qed_dev *cdev,
- int index);
- int (*dbg_grc)(struct qed_dev *cdev,
- void *buffer, u32 *num_dumped_bytes);
+ int (*get_fp_int)(struct qed_dev *cdev, struct qed_int_info *info);
+
+ u32 (*sb_init)(struct qed_dev *cdev,
+ struct qed_sb_info *sb_info,
+ void *sb_virt_addr,
+ dma_addr_t sb_phy_addr,
+ u16 sb_id,
+ enum qed_sb_type type);
+
+ u32 (*sb_release)(struct qed_dev *cdev,
+ struct qed_sb_info *sb_info,
+ u16 sb_id,
+ enum qed_sb_type type);
+
+ void (*simd_handler_config)(struct qed_dev *cdev,
+ void *token,
+ int index,
+ void (*handler)(void *));
+
+ void (*simd_handler_clean)(struct qed_dev *cdev, int index);
+
+ int (*dbg_grc)(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes);
int (*dbg_grc_size)(struct qed_dev *cdev);
- int (*dbg_all_data) (struct qed_dev *cdev, void *buffer);
+ int (*dbg_all_data)(struct qed_dev *cdev, void *buffer);
- int (*dbg_all_data_size) (struct qed_dev *cdev);
+ int (*dbg_all_data_size)(struct qed_dev *cdev);
+
+ int (*report_fatal_error)(struct devlink *devlink,
+ enum qed_hw_err_type err_type);
/**
* @brief can_link_change - can the instance change the link or not
@@ -1138,6 +1144,10 @@ struct qed_common_ops {
*
*/
int (*set_grc_config)(struct qed_dev *cdev, u32 cfg_id, u32 val);
+
+ struct devlink* (*devlink_register)(struct qed_dev *cdev);
+
+ void (*devlink_unregister)(struct devlink *devlink);
};
#define MASK_FIELD(_name, _value) \
diff --git a/include/linux/rcupdate_trace.h b/include/linux/rcupdate_trace.h
index d9015aac78c6..3e7919fc5f34 100644
--- a/include/linux/rcupdate_trace.h
+++ b/include/linux/rcupdate_trace.h
@@ -50,6 +50,7 @@ static inline void rcu_read_lock_trace(void)
struct task_struct *t = current;
WRITE_ONCE(t->trc_reader_nesting, READ_ONCE(t->trc_reader_nesting) + 1);
+ barrier();
if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) &&
t->trc_reader_special.b.need_mb)
smp_mb(); // Pairs with update-side barriers
@@ -72,6 +73,9 @@ static inline void rcu_read_unlock_trace(void)
rcu_lock_release(&rcu_trace_lock_map);
nesting = READ_ONCE(t->trc_reader_nesting) - 1;
+ barrier(); // Critical section before disabling.
+ // Disable IPI-based setting of .need_qs.
+ WRITE_ONCE(t->trc_reader_nesting, INT_MIN);
if (likely(!READ_ONCE(t->trc_reader_special.s)) || nesting) {
WRITE_ONCE(t->trc_reader_nesting, nesting);
return; // We assume shallow reader nesting.
@@ -82,7 +86,14 @@ static inline void rcu_read_unlock_trace(void)
void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
void synchronize_rcu_tasks_trace(void);
void rcu_barrier_tasks_trace(void);
-
+#else
+/*
+ * The BPF JIT forms these addresses even when it doesn't call these
+ * functions, so provide definitions that result in runtime errors.
+ */
+static inline void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func) { BUG(); }
+static inline void rcu_read_lock_trace(void) { BUG(); }
+static inline void rcu_read_unlock_trace(void) { BUG(); }
#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
#endif /* __LINUX_RCUPDATE_TRACE_H */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 416bf95cd5f2..a828cf99c521 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2548,6 +2548,11 @@ static inline int skb_mac_header_was_set(const struct sk_buff *skb)
return skb->mac_header != (typeof(skb->mac_header))~0U;
}
+static inline void skb_unset_mac_header(struct sk_buff *skb)
+{
+ skb->mac_header = (typeof(skb->mac_header))~0U;
+}
+
static inline void skb_reset_mac_header(struct sk_buff *skb)
{
skb->mac_header = skb->data - skb->head;
@@ -3568,6 +3573,9 @@ int skb_ensure_writable(struct sk_buff *skb, int write_len);
int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
int skb_vlan_pop(struct sk_buff *skb);
int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
+int skb_eth_pop(struct sk_buff *skb);
+int skb_eth_push(struct sk_buff *skb, const unsigned char *dst,
+ const unsigned char *src);
int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
int mac_len, bool ethernet);
int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index 1e9ed840b9fc..fec0c5ac1c4f 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -308,6 +308,8 @@ struct sk_psock *sk_psock_init(struct sock *sk, int node);
int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock);
void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock);
void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock);
+void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock);
+void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock);
int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
struct sk_msg *msg);
@@ -340,23 +342,6 @@ static inline void sk_psock_update_proto(struct sock *sk,
struct sk_psock *psock,
struct proto *ops)
{
- /* Initialize saved callbacks and original proto only once, since this
- * function may be called multiple times for a psock, e.g. when
- * psock->progs.msg_parser is updated.
- *
- * Since we've not installed the new proto, psock is not yet in use and
- * we can initialize it without synchronization.
- */
- if (!psock->sk_proto) {
- struct proto *orig = READ_ONCE(sk->sk_prot);
-
- psock->saved_unhash = orig->unhash;
- psock->saved_close = orig->close;
- psock->saved_write_space = sk->sk_write_space;
-
- psock->sk_proto = orig;
- }
-
/* Pairs with lockless read in sk_clone_lock() */
WRITE_ONCE(sk->sk_prot, ops);
}
diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
index 15fe980a27ea..0b9ecd8cf979 100644
--- a/include/linux/sock_diag.h
+++ b/include/linux/sock_diag.h
@@ -25,7 +25,19 @@ void sock_diag_unregister(const struct sock_diag_handler *h);
void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
-u64 sock_gen_cookie(struct sock *sk);
+u64 __sock_gen_cookie(struct sock *sk);
+
+static inline u64 sock_gen_cookie(struct sock *sk)
+{
+ u64 cookie;
+
+ preempt_disable();
+ cookie = __sock_gen_cookie(sk);
+ preempt_enable();
+
+ return cookie;
+}
+
int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie);
void sock_diag_save_cookie(struct sock *sk, __u32 *cookie);
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index bd964c31d333..628e28903b8b 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -198,5 +198,8 @@ struct plat_stmmacenet_data {
int mac_port_sel_speed;
bool en_tx_lpi_clockgating;
int has_xgmac;
+ bool vlan_fail_q_en;
+ u8 vlan_fail_q;
+ unsigned int eee_usecs_rate;
};
#endif
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 14b62d7df942..2f87377e9af7 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -92,6 +92,8 @@ struct tcp_options_received {
smc_ok : 1, /* SMC seen on SYN packet */
snd_wscale : 4, /* Window scaling received from sender */
rcv_wscale : 4; /* Window scaling to send to receiver */
+ u8 saw_unknown:1, /* Received unknown option */
+ unused:7;
u8 num_sacks; /* Number of SACK blocks */
u16 user_mss; /* mss requested by user in ioctl */
u16 mss_clamp; /* Maximal mss, negotiated at connection setup */
@@ -132,6 +134,7 @@ struct tcp_request_sock {
* FastOpen it's the seq#
* after data-in-SYN.
*/
+ u8 syn_tos;
};
static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
@@ -237,14 +240,13 @@ struct tcp_sock {
repair : 1,
frto : 1;/* F-RTO (RFC5682) activated in CA_Loss */
u8 repair_queue;
- u8 syn_data:1, /* SYN includes data */
+ u8 save_syn:2, /* Save headers of SYN packet */
+ syn_data:1, /* SYN includes data */
syn_fastopen:1, /* SYN includes Fast Open option */
syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */
syn_fastopen_ch:1, /* Active TFO re-enabling probe */
syn_data_acked:1,/* data in SYN is acked by SYN-ACK */
- save_syn:1, /* Save headers of SYN packet */
- is_cwnd_limited:1,/* forward progress limited by snd_cwnd? */
- syn_smc:1; /* SYN includes SMC */
+ is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */
u32 tlp_high_seq; /* snd_nxt at the time of TLP */
u32 tcp_tx_delay; /* delay (in usec) added to TX packets */
@@ -391,6 +393,9 @@ struct tcp_sock {
#if IS_ENABLED(CONFIG_MPTCP)
bool is_mptcp;
#endif
+#if IS_ENABLED(CONFIG_SMC)
+ bool syn_smc; /* SYN includes SMC */
+#endif
#ifdef CONFIG_TCP_MD5SIG
/* TCP AF-Specific parts; only used by MD5 Signature support so far */
@@ -406,7 +411,7 @@ struct tcp_sock {
* socket. Used to retransmit SYNACKs etc.
*/
struct request_sock __rcu *fastopen_rsk;
- u32 *saved_syn;
+ struct saved_syn *saved_syn;
};
enum tsq_enum {
@@ -484,6 +489,12 @@ static inline void tcp_saved_syn_free(struct tcp_sock *tp)
tp->saved_syn = NULL;
}
+static inline u32 tcp_saved_syn_len(const struct saved_syn *saved_syn)
+{
+ return saved_syn->mac_hdrlen + saved_syn->network_hdrlen +
+ saved_syn->tcp_hdrlen;
+}
+
struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk,
const struct sk_buff *orig_skb);
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 8caac20556b4..9873e1c8cd16 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -484,6 +484,9 @@ struct hci_dev {
enum suspended_state suspend_state;
bool scanning_paused;
bool suspended;
+ u8 wake_reason;
+ bdaddr_t wake_addr;
+ u8 wake_addr_type;
wait_queue_head_t suspend_wait_q;
DECLARE_BITMAP(suspend_tasks, __SUSPEND_NUM_TASKS);
@@ -1750,6 +1753,9 @@ void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
u8 addr_type, s8 rssi, u8 *name, u8 name_len);
void mgmt_discovering(struct hci_dev *hdev, u8 discovering);
+void mgmt_suspending(struct hci_dev *hdev, u8 state);
+void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
+ u8 addr_type);
bool mgmt_powering_down(struct hci_dev *hdev);
void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent);
void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent);
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 8f1e6a7a2df8..1d1232917de7 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -665,6 +665,8 @@ struct l2cap_ops {
struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
unsigned long hdr_len,
unsigned long len, int nb);
+ int (*filter) (struct l2cap_chan * chan,
+ struct sk_buff *skb);
};
struct l2cap_conn {
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index beae5c3980f0..6b55155e05e9 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -572,6 +572,8 @@ struct mgmt_rp_add_advertising {
#define MGMT_ADV_FLAG_SEC_1M BIT(7)
#define MGMT_ADV_FLAG_SEC_2M BIT(8)
#define MGMT_ADV_FLAG_SEC_CODED BIT(9)
+#define MGMT_ADV_FLAG_CAN_SET_TX_POWER BIT(10)
+#define MGMT_ADV_FLAG_HW_OFFLOAD BIT(11)
#define MGMT_ADV_FLAG_SEC_MASK (MGMT_ADV_FLAG_SEC_1M | MGMT_ADV_FLAG_SEC_2M | \
MGMT_ADV_FLAG_SEC_CODED)
@@ -840,6 +842,7 @@ struct mgmt_ev_device_connected {
#define MGMT_DEV_DISCONN_LOCAL_HOST 0x02
#define MGMT_DEV_DISCONN_REMOTE 0x03
#define MGMT_DEV_DISCONN_AUTH_FAILURE 0x04
+#define MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND 0x05
#define MGMT_EV_DEVICE_DISCONNECTED 0x000C
struct mgmt_ev_device_disconnected {
@@ -1028,3 +1031,18 @@ struct mgmt_ev_adv_monitor_added {
struct mgmt_ev_adv_monitor_removed {
__le16 monitor_handle;
} __packed;
+
+#define MGMT_EV_CONTROLLER_SUSPEND 0x002d
+struct mgmt_ev_controller_suspend {
+ __u8 suspend_state;
+} __packed;
+
+#define MGMT_EV_CONTROLLER_RESUME 0x002e
+struct mgmt_ev_controller_resume {
+ __u8 wake_reason;
+ struct mgmt_addr_info addr;
+} __packed;
+
+#define MGMT_WAKE_REASON_NON_BT_WAKE 0x0
+#define MGMT_WAKE_REASON_UNEXPECTED 0x1
+#define MGMT_WAKE_REASON_REMOTE_WAKE 0x2
diff --git a/include/net/bpf_sk_storage.h b/include/net/bpf_sk_storage.h
index 5036c94c0503..3c516dd07caf 100644
--- a/include/net/bpf_sk_storage.h
+++ b/include/net/bpf_sk_storage.h
@@ -3,6 +3,17 @@
#ifndef _BPF_SK_STORAGE_H
#define _BPF_SK_STORAGE_H
+#include <linux/rculist.h>
+#include <linux/list.h>
+#include <linux/hash.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/bpf.h>
+#include <net/sock.h>
+#include <uapi/linux/sock_diag.h>
+#include <uapi/linux/btf.h>
+#include <linux/bpf_local_storage.h>
+
struct sock;
void bpf_sk_storage_free(struct sock *sk);
@@ -10,6 +21,7 @@ void bpf_sk_storage_free(struct sock *sk);
extern const struct bpf_func_proto bpf_sk_storage_get_proto;
extern const struct bpf_func_proto bpf_sk_storage_delete_proto;
+struct bpf_local_storage_elem;
struct bpf_sk_storage_diag;
struct sk_buff;
struct nlattr;
diff --git a/include/net/caif/caif_spi.h b/include/net/caif/caif_spi.h
deleted file mode 100644
index a0bf4cbce71b..000000000000
--- a/include/net/caif/caif_spi.h
+++ /dev/null
@@ -1,155 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) ST-Ericsson AB 2010
- * Author: Daniel Martensson / Daniel.Martensson@stericsson.com
- */
-
-#ifndef CAIF_SPI_H_
-#define CAIF_SPI_H_
-
-#include <net/caif/caif_device.h>
-
-#define SPI_CMD_WR 0x00
-#define SPI_CMD_RD 0x01
-#define SPI_CMD_EOT 0x02
-#define SPI_CMD_IND 0x04
-
-#define SPI_DMA_BUF_LEN 8192
-
-#define WL_SZ 2 /* 16 bits. */
-#define SPI_CMD_SZ 4 /* 32 bits. */
-#define SPI_IND_SZ 4 /* 32 bits. */
-
-#define SPI_XFER 0
-#define SPI_SS_ON 1
-#define SPI_SS_OFF 2
-#define SPI_TERMINATE 3
-
-/* Minimum time between different levels is 50 microseconds. */
-#define MIN_TRANSITION_TIME_USEC 50
-
-/* Defines for calculating duration of SPI transfers for a particular
- * number of bytes.
- */
-#define SPI_MASTER_CLK_MHZ 13
-#define SPI_XFER_TIME_USEC(bytes, clk) (((bytes) * 8) / clk)
-
-/* Normally this should be aligned on the modem in order to benefit from full
- * duplex transfers. However a size of 8188 provokes errors when running with
- * the modem. These errors occur when packet sizes approaches 4 kB of data.
- */
-#define CAIF_MAX_SPI_FRAME 4092
-
-/* Maximum number of uplink CAIF frames that can reside in the same SPI frame.
- * This number should correspond with the modem setting. The application side
- * CAIF accepts any number of embedded downlink CAIF frames.
- */
-#define CAIF_MAX_SPI_PKTS 9
-
-/* Decides if SPI buffers should be prefilled with 0xFF pattern for easier
- * debugging. Both TX and RX buffers will be filled before the transfer.
- */
-#define CFSPI_DBG_PREFILL 0
-
-/* Structure describing a SPI transfer. */
-struct cfspi_xfer {
- u16 tx_dma_len;
- u16 rx_dma_len;
- void *va_tx[2];
- dma_addr_t pa_tx[2];
- void *va_rx;
- dma_addr_t pa_rx;
-};
-
-/* Structure implemented by the SPI interface. */
-struct cfspi_ifc {
- void (*ss_cb) (bool assert, struct cfspi_ifc *ifc);
- void (*xfer_done_cb) (struct cfspi_ifc *ifc);
- void *priv;
-};
-
-/* Structure implemented by SPI clients. */
-struct cfspi_dev {
- int (*init_xfer) (struct cfspi_xfer *xfer, struct cfspi_dev *dev);
- void (*sig_xfer) (bool xfer, struct cfspi_dev *dev);
- struct cfspi_ifc *ifc;
- char *name;
- u32 clk_mhz;
- void *priv;
-};
-
-/* Enumeration describing the CAIF SPI state. */
-enum cfspi_state {
- CFSPI_STATE_WAITING = 0,
- CFSPI_STATE_AWAKE,
- CFSPI_STATE_FETCH_PKT,
- CFSPI_STATE_GET_NEXT,
- CFSPI_STATE_INIT_XFER,
- CFSPI_STATE_WAIT_ACTIVE,
- CFSPI_STATE_SIG_ACTIVE,
- CFSPI_STATE_WAIT_XFER_DONE,
- CFSPI_STATE_XFER_DONE,
- CFSPI_STATE_WAIT_INACTIVE,
- CFSPI_STATE_SIG_INACTIVE,
- CFSPI_STATE_DELIVER_PKT,
- CFSPI_STATE_MAX,
-};
-
-/* Structure implemented by SPI physical interfaces. */
-struct cfspi {
- struct caif_dev_common cfdev;
- struct net_device *ndev;
- struct platform_device *pdev;
- struct sk_buff_head qhead;
- struct sk_buff_head chead;
- u16 cmd;
- u16 tx_cpck_len;
- u16 tx_npck_len;
- u16 rx_cpck_len;
- u16 rx_npck_len;
- struct cfspi_ifc ifc;
- struct cfspi_xfer xfer;
- struct cfspi_dev *dev;
- unsigned long state;
- struct work_struct work;
- struct workqueue_struct *wq;
- struct list_head list;
- int flow_off_sent;
- u32 qd_low_mark;
- u32 qd_high_mark;
- struct completion comp;
- wait_queue_head_t wait;
- spinlock_t lock;
- bool flow_stop;
- bool slave;
- bool slave_talked;
-#ifdef CONFIG_DEBUG_FS
- enum cfspi_state dbg_state;
- u16 pcmd;
- u16 tx_ppck_len;
- u16 rx_ppck_len;
- struct dentry *dbgfs_dir;
- struct dentry *dbgfs_state;
- struct dentry *dbgfs_frame;
-#endif /* CONFIG_DEBUG_FS */
-};
-
-extern int spi_frm_align;
-extern int spi_up_head_align;
-extern int spi_up_tail_align;
-extern int spi_down_head_align;
-extern int spi_down_tail_align;
-extern struct platform_driver cfspi_spi_driver;
-
-void cfspi_dbg_state(struct cfspi *cfspi, int state);
-int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len);
-int cfspi_xmitlen(struct cfspi *cfspi);
-int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len);
-int cfspi_spi_remove(struct platform_device *pdev);
-int cfspi_spi_probe(struct platform_device *pdev);
-int cfspi_xmitfrm(struct cfspi *cfspi, u8 *buf, size_t len);
-int cfspi_xmitlen(struct cfspi *cfspi);
-int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len);
-void cfspi_xfer(struct work_struct *work);
-
-#endif /* CAIF_SPI_H_ */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index d9e6b9fbd95b..aee47f2b5709 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -96,6 +96,16 @@ struct wiphy;
* @IEEE80211_CHAN_NO_10MHZ: 10 MHz bandwidth is not permitted
* on this channel.
* @IEEE80211_CHAN_NO_HE: HE operation is not permitted on this channel.
+ * @IEEE80211_CHAN_1MHZ: 1 MHz bandwidth is permitted
+ * on this channel.
+ * @IEEE80211_CHAN_2MHZ: 2 MHz bandwidth is permitted
+ * on this channel.
+ * @IEEE80211_CHAN_4MHZ: 4 MHz bandwidth is permitted
+ * on this channel.
+ * @IEEE80211_CHAN_8MHZ: 8 MHz bandwidth is permitted
+ * on this channel.
+ * @IEEE80211_CHAN_16MHZ: 16 MHz bandwidth is permitted
+ * on this channel.
*
*/
enum ieee80211_channel_flags {
@@ -113,6 +123,11 @@ enum ieee80211_channel_flags {
IEEE80211_CHAN_NO_20MHZ = 1<<11,
IEEE80211_CHAN_NO_10MHZ = 1<<12,
IEEE80211_CHAN_NO_HE = 1<<13,
+ IEEE80211_CHAN_1MHZ = 1<<14,
+ IEEE80211_CHAN_2MHZ = 1<<15,
+ IEEE80211_CHAN_4MHZ = 1<<16,
+ IEEE80211_CHAN_8MHZ = 1<<17,
+ IEEE80211_CHAN_16MHZ = 1<<18,
};
#define IEEE80211_CHAN_NO_HT40 \
@@ -254,13 +269,23 @@ struct ieee80211_rate {
* struct ieee80211_he_obss_pd - AP settings for spatial reuse
*
* @enable: is the feature enabled.
+ * @sr_ctrl: The SR Control field of SRP element.
+ * @non_srg_max_offset: non-SRG maximum tx power offset
* @min_offset: minimal tx power offset an associated station shall use
* @max_offset: maximum tx power offset an associated station shall use
+ * @bss_color_bitmap: bitmap that indicates the BSS color values used by
+ * members of the SRG
+ * @partial_bssid_bitmap: bitmap that indicates the partial BSSID values
+ * used by members of the SRG
*/
struct ieee80211_he_obss_pd {
bool enable;
+ u8 sr_ctrl;
+ u8 non_srg_max_offset;
u8 min_offset;
u8 max_offset;
+ u8 bss_color_bitmap[8];
+ u8 partial_bssid_bitmap[8];
};
/**
@@ -450,6 +475,7 @@ struct ieee80211_sta_s1g_cap {
* @ht_cap: HT capabilities in this band
* @vht_cap: VHT capabilities in this band
* @edmg_cap: EDMG capabilities in this band
+ * @s1g_cap: S1G capabilities in this band (S1B band only, of course)
* @n_iftype_data: number of iftype data entries
* @iftype_data: interface type data entries. Note that the bits in
* @types_mask inside this structure cannot overlap (i.e. only
@@ -678,7 +704,10 @@ struct cfg80211_bitrate_mask {
u32 legacy;
u8 ht_mcs[IEEE80211_HT_MCS_MASK_LEN];
u16 vht_mcs[NL80211_VHT_NSS_MAX];
+ u16 he_mcs[NL80211_HE_NSS_MAX];
enum nl80211_txrate_gi gi;
+ enum nl80211_he_gi he_gi;
+ enum nl80211_he_ltf he_ltf;
} control[NUM_NL80211_BANDS];
};
@@ -1065,6 +1094,39 @@ struct cfg80211_acl_data {
};
/**
+ * struct cfg80211_fils_discovery - FILS discovery parameters from
+ * IEEE Std 802.11ai-2016, Annex C.3 MIB detail.
+ *
+ * @min_interval: Minimum packet interval in TUs (0 - 10000)
+ * @max_interval: Maximum packet interval in TUs (0 - 10000)
+ * @tmpl_len: Template length
+ * @tmpl: Template data for FILS discovery frame including the action
+ * frame headers.
+ */
+struct cfg80211_fils_discovery {
+ u32 min_interval;
+ u32 max_interval;
+ size_t tmpl_len;
+ const u8 *tmpl;
+};
+
+/**
+ * struct cfg80211_unsol_bcast_probe_resp - Unsolicited broadcast probe
+ * response parameters in 6GHz.
+ *
+ * @interval: Packet interval in TUs. Maximum allowed is 20 TU, as mentioned
+ * in IEEE P802.11ax/D6.0 26.17.2.3.2 - AP behavior for fast passive
+ * scanning
+ * @tmpl_len: Template length
+ * @tmpl: Template data for probe response
+ */
+struct cfg80211_unsol_bcast_probe_resp {
+ u32 interval;
+ size_t tmpl_len;
+ const u8 *tmpl;
+};
+
+/**
* enum cfg80211_ap_settings_flags - AP settings flags
*
* Used by cfg80211_ap_settings
@@ -1111,6 +1173,8 @@ enum cfg80211_ap_settings_flags {
* @he_obss_pd: OBSS Packet Detection settings
* @he_bss_color: BSS Color settings
* @he_oper: HE operation IE (or %NULL if HE isn't enabled)
+ * @fils_discovery: FILS discovery transmission parameters
+ * @unsol_bcast_probe_resp: Unsolicited broadcast probe response parameters
*/
struct cfg80211_ap_settings {
struct cfg80211_chan_def chandef;
@@ -1141,6 +1205,8 @@ struct cfg80211_ap_settings {
u32 flags;
struct ieee80211_he_obss_pd he_obss_pd;
struct cfg80211_he_bss_color he_bss_color;
+ struct cfg80211_fils_discovery fils_discovery;
+ struct cfg80211_unsol_bcast_probe_resp unsol_bcast_probe_resp;
};
/**
@@ -1784,6 +1850,7 @@ struct mpath_info {
* (or NULL for no change)
* @basic_rates_len: number of basic rates
* @ap_isolate: do not forward packets between connected stations
+ * (0 = no, 1 = yes, -1 = do not change)
* @ht_opmode: HT Operation mode
* (u16 = opmode, -1 = do not change)
* @p2p_ctwindow: P2P CT Window (-1 = no change)
@@ -2039,6 +2106,27 @@ struct cfg80211_scan_info {
};
/**
+ * struct cfg80211_scan_6ghz_params - relevant for 6 GHz only
+ *
+ * @short_bssid: short ssid to scan for
+ * @bssid: bssid to scan for
+ * @channel_idx: idx of the channel in the channel array in the scan request
+ * which the above info relvant to
+ * @unsolicited_probe: the AP transmits unsolicited probe response every 20 TU
+ * @short_ssid_valid: short_ssid is valid and can be used
+ * @psc_no_listen: when set, and the channel is a PSC channel, no need to wait
+ * 20 TUs before starting to send probe requests.
+ */
+struct cfg80211_scan_6ghz_params {
+ u32 short_ssid;
+ u32 channel_idx;
+ u8 bssid[ETH_ALEN];
+ bool unsolicited_probe;
+ bool short_ssid_valid;
+ bool psc_no_listen;
+};
+
+/**
* struct cfg80211_scan_request - scan request description
*
* @ssids: SSIDs to scan for (active scan only)
@@ -2065,6 +2153,10 @@ struct cfg80211_scan_info {
* @mac_addr_mask: MAC address mask used with randomisation, bits that
* are 0 in the mask should be randomised, bits that are 1 should
* be taken from the @mac_addr
+ * @scan_6ghz: relevant for split scan request only,
+ * true if this is the second scan request
+ * @n_6ghz_params: number of 6 GHz params
+ * @scan_6ghz_params: 6 GHz params
* @bssid: BSSID to scan for (most commonly, the wildcard BSSID)
*/
struct cfg80211_scan_request {
@@ -2092,6 +2184,9 @@ struct cfg80211_scan_request {
struct cfg80211_scan_info info;
bool notified;
bool no_cck;
+ bool scan_6ghz;
+ u32 n_6ghz_params;
+ struct cfg80211_scan_6ghz_params *scan_6ghz_params;
/* keep last */
struct ieee80211_channel *channels[];
@@ -2471,6 +2566,8 @@ enum cfg80211_assoc_req_flags {
* @fils_nonces: FILS nonces (part of AAD) for protecting (Re)Association
* Request/Response frame or %NULL if FILS is not used. This field starts
* with 16 octets of STA Nonce followed by 16 octets of AP Nonce.
+ * @s1g_capa: S1G capability override
+ * @s1g_capa_mask: S1G capability override mask
*/
struct cfg80211_assoc_request {
struct cfg80211_bss *bss;
@@ -2485,6 +2582,7 @@ struct cfg80211_assoc_request {
const u8 *fils_kek;
size_t fils_kek_len;
const u8 *fils_nonces;
+ struct ieee80211_s1g_cap s1g_capa, s1g_capa_mask;
};
/**
@@ -4160,6 +4258,8 @@ struct cfg80211_ops {
/**
* enum wiphy_flags - wiphy capability flags
*
+ * @WIPHY_FLAG_SPLIT_SCAN_6GHZ: if set to true, the scan request will be split
+ * into two, first for legacy bands and second for UHB.
* @WIPHY_FLAG_NETNS_OK: if not set, do not allow changing the netns of this
* wiphy at all
* @WIPHY_FLAG_PS_ON_BY_DEFAULT: if set to true, powersave will be enabled
@@ -4203,7 +4303,7 @@ struct cfg80211_ops {
enum wiphy_flags {
WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK = BIT(0),
/* use hole at 1 */
- /* use hole at 2 */
+ WIPHY_FLAG_SPLIT_SCAN_6GHZ = BIT(2),
WIPHY_FLAG_NETNS_OK = BIT(3),
WIPHY_FLAG_PS_ON_BY_DEFAULT = BIT(4),
WIPHY_FLAG_4ADDR_AP = BIT(5),
@@ -5276,6 +5376,16 @@ ieee80211_channel_to_khz(const struct ieee80211_channel *chan)
}
/**
+ * ieee80211_s1g_channel_width - get allowed channel width from @chan
+ *
+ * Only allowed for band NL80211_BAND_S1GHZ
+ * @chan: channel
+ * Return: The allowed channel width for this center_freq
+ */
+enum nl80211_chan_width
+ieee80211_s1g_channel_width(const struct ieee80211_channel *chan);
+
+/**
* ieee80211_channel_to_freq_khz - convert channel number to frequency
* @chan: channel number
* @band: band, necessary due to channel number overlap
diff --git a/include/net/devlink.h b/include/net/devlink.h
index 8f3c8a443238..b01bb9bca5a2 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -20,6 +20,14 @@
#include <uapi/linux/devlink.h>
#include <linux/xarray.h>
+#define DEVLINK_RELOAD_STATS_ARRAY_SIZE \
+ (__DEVLINK_RELOAD_LIMIT_MAX * __DEVLINK_RELOAD_ACTION_MAX)
+
+struct devlink_dev_stats {
+ u32 reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE];
+ u32 remote_reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE];
+};
+
struct devlink_ops;
struct devlink {
@@ -38,6 +46,7 @@ struct devlink {
struct list_head trap_policer_list;
const struct devlink_ops *ops;
struct xarray snapshot_ids;
+ struct devlink_dev_stats stats;
struct device *dev;
possible_net_t _net;
struct mutex lock; /* Serializes access to devlink instance specific objects such as
@@ -57,13 +66,30 @@ struct devlink_port_phys_attrs {
u32 split_subport_number; /* If the port is split, this is the number of subport. */
};
+/**
+ * struct devlink_port_pci_pf_attrs - devlink port's PCI PF attributes
+ * @controller: Associated controller number
+ * @pf: Associated PCI PF number for this port.
+ * @external: when set, indicates if a port is for an external controller
+ */
struct devlink_port_pci_pf_attrs {
- u16 pf; /* Associated PCI PF for this port. */
+ u32 controller;
+ u16 pf;
+ u8 external:1;
};
+/**
+ * struct devlink_port_pci_vf_attrs - devlink port's PCI VF attributes
+ * @controller: Associated controller number
+ * @pf: Associated PCI PF number for this port.
+ * @vf: Associated PCI VF for of the PCI PF for this port.
+ * @external: when set, indicates if a port is for an external controller
+ */
struct devlink_port_pci_vf_attrs {
- u16 pf; /* Associated PCI PF for this port. */
- u16 vf; /* Associated PCI VF for of the PCI PF for this port. */
+ u32 controller;
+ u16 pf;
+ u16 vf;
+ u8 external:1;
};
/**
@@ -73,6 +99,9 @@ struct devlink_port_pci_vf_attrs {
* @splittable: indicates if the port can be split.
* @lanes: maximum number of lanes the port supports. 0 value is not passed to netlink.
* @switch_id: if the port is part of switch, this is buffer with ID, otherwise this is NULL
+ * @phys: physical port attributes
+ * @pci_pf: PCI PF port attributes
+ * @pci_vf: PCI VF port attributes
*/
struct devlink_port_attrs {
u8 split:1,
@@ -90,6 +119,7 @@ struct devlink_port_attrs {
struct devlink_port {
struct list_head list;
struct list_head param_list;
+ struct list_head region_list;
struct devlink *devlink;
unsigned int index;
bool registered;
@@ -372,6 +402,25 @@ struct devlink_param_gset_ctx {
};
/**
+ * struct devlink_flash_notify - devlink dev flash notify data
+ * @status_msg: current status string
+ * @component: firmware component being updated
+ * @done: amount of work completed of total amount
+ * @total: amount of work expected to be done
+ * @timeout: expected max timeout in seconds
+ *
+ * These are values to be given to userland to be displayed in order
+ * to show current activity in a firmware update process.
+ */
+struct devlink_flash_notify {
+ const char *status_msg;
+ const char *component;
+ unsigned long done;
+ unsigned long total;
+ unsigned long timeout;
+};
+
+/**
* struct devlink_param - devlink configuration parameter data
* @name: name of the parameter
* @generic: indicates if the parameter is generic or driver specific
@@ -420,6 +469,7 @@ enum devlink_param_generic_id {
DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
DEVLINK_PARAM_GENERIC_ID_RESET_DEV_ON_DRV_PROBE,
DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_REMOTE_DEV_RESET,
/* add new param generic ids above here*/
__DEVLINK_PARAM_GENERIC_ID_MAX,
@@ -457,6 +507,9 @@ enum devlink_param_generic_id {
#define DEVLINK_PARAM_GENERIC_ENABLE_ROCE_NAME "enable_roce"
#define DEVLINK_PARAM_GENERIC_ENABLE_ROCE_TYPE DEVLINK_PARAM_TYPE_BOOL
+#define DEVLINK_PARAM_GENERIC_ENABLE_REMOTE_DEV_RESET_NAME "enable_remote_dev_reset"
+#define DEVLINK_PARAM_GENERIC_ENABLE_REMOTE_DEV_RESET_TYPE DEVLINK_PARAM_TYPE_BOOL
+
#define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \
{ \
.id = DEVLINK_PARAM_GENERIC_ID_##_id, \
@@ -511,6 +564,24 @@ enum devlink_param_generic_id {
/* Firmware bundle identifier */
#define DEVLINK_INFO_VERSION_GENERIC_FW_BUNDLE_ID "fw.bundle_id"
+/**
+ * struct devlink_flash_update_params - Flash Update parameters
+ * @file_name: the name of the flash firmware file to update from
+ * @component: the flash component to update
+ *
+ * With the exception of file_name, drivers must opt-in to parameters by
+ * setting the appropriate bit in the supported_flash_update_params field in
+ * their devlink_ops structure.
+ */
+struct devlink_flash_update_params {
+ const char *file_name;
+ const char *component;
+ u32 overwrite_mask;
+};
+
+#define DEVLINK_SUPPORT_FLASH_UPDATE_COMPONENT BIT(0)
+#define DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK BIT(1)
+
struct devlink_region;
struct devlink_info_req;
@@ -522,12 +593,36 @@ struct devlink_info_req;
* the data variable must be updated to point to the snapshot data.
* The function will be called while the devlink instance lock is
* held.
+ * @priv: Pointer to driver private data for the region operation
*/
struct devlink_region_ops {
const char *name;
void (*destructor)(const void *data);
- int (*snapshot)(struct devlink *devlink, struct netlink_ext_ack *extack,
+ int (*snapshot)(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
u8 **data);
+ void *priv;
+};
+
+/**
+ * struct devlink_port_region_ops - Region operations for a port
+ * @name: region name
+ * @destructor: callback used to free snapshot memory when deleting
+ * @snapshot: callback to request an immediate snapshot. On success,
+ * the data variable must be updated to point to the snapshot data.
+ * The function will be called while the devlink instance lock is
+ * held.
+ * @priv: Pointer to driver private data for the region operation
+ */
+struct devlink_port_region_ops {
+ const char *name;
+ void (*destructor)(const void *data);
+ int (*snapshot)(struct devlink_port *port,
+ const struct devlink_port_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data);
+ void *priv;
};
struct devlink_fmsg;
@@ -546,6 +641,7 @@ enum devlink_health_reporter_state {
* @dump: callback to dump an object
* if priv_ctx is NULL, run a full dump
* @diagnose: callback to diagnose the current status
+ * @test: callback to trigger a test event
*/
struct devlink_health_reporter_ops {
@@ -558,6 +654,24 @@ struct devlink_health_reporter_ops {
int (*diagnose)(struct devlink_health_reporter *reporter,
struct devlink_fmsg *fmsg,
struct netlink_ext_ack *extack);
+ int (*test)(struct devlink_health_reporter *reporter,
+ struct netlink_ext_ack *extack);
+};
+
+/**
+ * struct devlink_trap_metadata - Packet trap metadata.
+ * @trap_name: Trap name.
+ * @trap_group_name: Trap group name.
+ * @input_dev: Input netdevice.
+ * @fa_cookie: Flow action user cookie.
+ * @trap_type: Trap type.
+ */
+struct devlink_trap_metadata {
+ const char *trap_name;
+ const char *trap_group_name;
+ struct net_device *input_dev;
+ const struct flow_action_cookie *fa_cookie;
+ enum devlink_trap_type trap_type;
};
/**
@@ -704,6 +818,22 @@ enum devlink_trap_generic_id {
DEVLINK_TRAP_GENERIC_ID_FLOW_ACTION_SAMPLE,
DEVLINK_TRAP_GENERIC_ID_FLOW_ACTION_TRAP,
DEVLINK_TRAP_GENERIC_ID_EARLY_DROP,
+ DEVLINK_TRAP_GENERIC_ID_VXLAN_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_LLC_SNAP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_VLAN_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_PPPOE_PPP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_MPLS_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_ARP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_IP_1_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_IP_N_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_GRE_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_UDP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_TCP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_IPSEC_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_SCTP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_DCCP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_GTP_PARSING,
+ DEVLINK_TRAP_GENERIC_ID_ESP_PARSING,
/* Add new generic trap IDs above */
__DEVLINK_TRAP_GENERIC_ID_MAX,
@@ -739,6 +869,7 @@ enum devlink_trap_group_generic_id {
DEVLINK_TRAP_GROUP_GENERIC_ID_PTP_GENERAL,
DEVLINK_TRAP_GROUP_GENERIC_ID_ACL_SAMPLE,
DEVLINK_TRAP_GROUP_GENERIC_ID_ACL_TRAP,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_PARSER_ERROR_DROPS,
/* Add new generic trap group IDs above */
__DEVLINK_TRAP_GROUP_GENERIC_ID_MAX,
@@ -894,6 +1025,39 @@ enum devlink_trap_group_generic_id {
"flow_action_trap"
#define DEVLINK_TRAP_GENERIC_NAME_EARLY_DROP \
"early_drop"
+#define DEVLINK_TRAP_GENERIC_NAME_VXLAN_PARSING \
+ "vxlan_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_LLC_SNAP_PARSING \
+ "llc_snap_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_VLAN_PARSING \
+ "vlan_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_PPPOE_PPP_PARSING \
+ "pppoe_ppp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_MPLS_PARSING \
+ "mpls_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_ARP_PARSING \
+ "arp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_IP_1_PARSING \
+ "ip_1_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_IP_N_PARSING \
+ "ip_n_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_GRE_PARSING \
+ "gre_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_UDP_PARSING \
+ "udp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_TCP_PARSING \
+ "tcp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_IPSEC_PARSING \
+ "ipsec_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_SCTP_PARSING \
+ "sctp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_DCCP_PARSING \
+ "dccp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_GTP_PARSING \
+ "gtp_parsing"
+#define DEVLINK_TRAP_GENERIC_NAME_ESP_PARSING \
+ "esp_parsing"
+
#define DEVLINK_TRAP_GROUP_GENERIC_NAME_L2_DROPS \
"l2_drops"
@@ -945,6 +1109,8 @@ enum devlink_trap_group_generic_id {
"acl_sample"
#define DEVLINK_TRAP_GROUP_GENERIC_NAME_ACL_TRAP \
"acl_trap"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_PARSER_ERROR_DROPS \
+ "parser_error_drops"
#define DEVLINK_TRAP_GENERIC(_type, _init_action, _id, _group_id, \
_metadata_cap) \
@@ -991,9 +1157,20 @@ enum devlink_trap_group_generic_id {
}
struct devlink_ops {
+ /**
+ * @supported_flash_update_params:
+ * mask of parameters supported by the driver's .flash_update
+ * implemementation.
+ */
+ u32 supported_flash_update_params;
+ unsigned long reload_actions;
+ unsigned long reload_limits;
int (*reload_down)(struct devlink *devlink, bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
struct netlink_ext_ack *extack);
- int (*reload_up)(struct devlink *devlink,
+ int (*reload_up)(struct devlink *devlink, enum devlink_reload_action action,
+ enum devlink_reload_limit limit, u32 *actions_performed,
struct netlink_ext_ack *extack);
int (*port_type_set)(struct devlink_port *devlink_port,
enum devlink_port_type port_type);
@@ -1051,8 +1228,15 @@ struct devlink_ops {
struct netlink_ext_ack *extack);
int (*info_get)(struct devlink *devlink, struct devlink_info_req *req,
struct netlink_ext_ack *extack);
- int (*flash_update)(struct devlink *devlink, const char *file_name,
- const char *component,
+ /**
+ * @flash_update: Device flash update function
+ *
+ * Used to perform a flash update for the device. The set of
+ * parameters supported by the driver should be set in
+ * supported_flash_update_params.
+ */
+ int (*flash_update)(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
struct netlink_ext_ack *extack);
/**
* @trap_init: Trap initialization function.
@@ -1098,6 +1282,16 @@ struct devlink_ops {
const struct devlink_trap_policer *policer,
struct netlink_ext_ack *extack);
/**
+ * @trap_group_action_set: Trap group action set function.
+ *
+ * If this callback is populated, it will take precedence over looping
+ * over all traps in a group and calling .trap_action_set().
+ */
+ int (*trap_group_action_set)(struct devlink *devlink,
+ const struct devlink_trap_group *group,
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack);
+ /**
* @trap_policer_init: Trap policer initialization function.
*
* Should be used by device drivers to initialize the trap policer in
@@ -1203,9 +1397,10 @@ void devlink_port_type_ib_set(struct devlink_port *devlink_port,
void devlink_port_type_clear(struct devlink_port *devlink_port);
void devlink_port_attrs_set(struct devlink_port *devlink_port,
struct devlink_port_attrs *devlink_port_attrs);
-void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, u16 pf);
-void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port,
- u16 pf, u16 vf);
+void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, u32 controller,
+ u16 pf, bool external);
+void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, u32 controller,
+ u16 pf, u16 vf, bool external);
int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
u32 size, u16 ingress_pools_count,
u16 egress_pools_count, u16 ingress_tc_count,
@@ -1289,7 +1484,13 @@ struct devlink_region *
devlink_region_create(struct devlink *devlink,
const struct devlink_region_ops *ops,
u32 region_max_snapshots, u64 region_size);
+struct devlink_region *
+devlink_port_region_create(struct devlink_port *port,
+ const struct devlink_port_region_ops *ops,
+ u32 region_max_snapshots, u64 region_size);
void devlink_region_destroy(struct devlink_region *region);
+void devlink_port_region_destroy(struct devlink_region *region);
+
int devlink_region_snapshot_id_get(struct devlink *devlink, u32 *id);
void devlink_region_snapshot_id_put(struct devlink *devlink, u32 id);
int devlink_region_snapshot_create(struct devlink_region *region,
@@ -1371,6 +1572,9 @@ void
devlink_health_reporter_recovery_done(struct devlink_health_reporter *reporter);
bool devlink_is_reload_failed(const struct devlink *devlink);
+void devlink_remote_reload_actions_performed(struct devlink *devlink,
+ enum devlink_reload_limit limit,
+ u32 actions_performed);
void devlink_flash_update_begin_notify(struct devlink *devlink);
void devlink_flash_update_end_notify(struct devlink *devlink);
@@ -1379,6 +1583,10 @@ void devlink_flash_update_status_notify(struct devlink *devlink,
const char *component,
unsigned long done,
unsigned long total);
+void devlink_flash_update_timeout_notify(struct devlink *devlink,
+ const char *status_msg,
+ const char *component,
+ unsigned long timeout);
int devlink_traps_register(struct devlink *devlink,
const struct devlink_trap *traps,
diff --git a/include/net/drop_monitor.h b/include/net/drop_monitor.h
deleted file mode 100644
index 3f5b6ddb3179..000000000000
--- a/include/net/drop_monitor.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-
-#ifndef _NET_DROP_MONITOR_H_
-#define _NET_DROP_MONITOR_H_
-
-#include <linux/ktime.h>
-#include <linux/netdevice.h>
-#include <linux/skbuff.h>
-#include <net/flow_offload.h>
-
-/**
- * struct net_dm_hw_metadata - Hardware-supplied packet metadata.
- * @trap_group_name: Hardware trap group name.
- * @trap_name: Hardware trap name.
- * @input_dev: Input netdevice.
- * @fa_cookie: Flow action user cookie.
- */
-struct net_dm_hw_metadata {
- const char *trap_group_name;
- const char *trap_name;
- struct net_device *input_dev;
- const struct flow_action_cookie *fa_cookie;
-};
-
-#if IS_REACHABLE(CONFIG_NET_DROP_MONITOR)
-void net_dm_hw_report(struct sk_buff *skb,
- const struct net_dm_hw_metadata *hw_metadata);
-#else
-static inline void
-net_dm_hw_report(struct sk_buff *skb,
- const struct net_dm_hw_metadata *hw_metadata)
-{
-}
-#endif
-
-#endif /* _NET_DROP_MONITOR_H_ */
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 75c8fac82017..35429a140dfa 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -74,8 +74,8 @@ struct dsa_device_ops {
struct sk_buff *(*xmit)(struct sk_buff *skb, struct net_device *dev);
struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt);
- int (*flow_dissect)(const struct sk_buff *skb, __be16 *proto,
- int *offset);
+ void (*flow_dissect)(const struct sk_buff *skb, __be16 *proto,
+ int *offset);
/* Used to determine which traffic should match the DSA filter in
* eth_type_trans, and which, if any, should bypass it and be processed
* as regular on the master net device.
@@ -84,6 +84,13 @@ struct dsa_device_ops {
unsigned int overhead;
const char *name;
enum dsa_tag_protocol proto;
+ /* Some tagging protocols either mangle or shift the destination MAC
+ * address, in which case the DSA master would drop packets on ingress
+ * if what it understands out of the destination MAC address is not in
+ * its RX filter.
+ */
+ bool promisc_on_master;
+ bool tail_tag;
};
/* This structure defines the control interfaces that are overlayed by the
@@ -208,6 +215,7 @@ struct dsa_port {
u8 stp_state;
struct net_device *bridge_dev;
struct devlink_port devlink_port;
+ bool devlink_port_setup;
struct phylink *pl;
struct phylink_config pl_config;
@@ -301,6 +309,14 @@ struct dsa_switch {
*/
bool configure_vlan_while_not_filtering;
+ /* If the switch driver always programs the CPU port as egress tagged
+ * despite the VLAN configuration indicating otherwise, then setting
+ * @untag_bridge_pvid will force the DSA receive path to pop the bridge's
+ * default_pvid VLAN tagged frames to offer a consistent behavior
+ * between a vlan_filtering=0 and vlan_filtering=1 bridge device.
+ */
+ bool untag_bridge_pvid;
+
/* In case vlan_filtering_is_global is set, the VLAN awareness state
* should be retrieved from here and not from the per-port settings.
*/
@@ -536,7 +552,8 @@ struct dsa_switch_ops {
* VLAN support
*/
int (*port_vlan_filtering)(struct dsa_switch *ds, int port,
- bool vlan_filtering);
+ bool vlan_filtering,
+ struct switchdev_trans *trans);
int (*port_vlan_prepare)(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan);
void (*port_vlan_add)(struct dsa_switch *ds, int port,
@@ -612,11 +629,14 @@ struct dsa_switch_ops {
bool (*port_rxtstamp)(struct dsa_switch *ds, int port,
struct sk_buff *skb, unsigned int type);
- /* Devlink parameters */
+ /* Devlink parameters, etc */
int (*devlink_param_get)(struct dsa_switch *ds, u32 id,
struct devlink_param_gset_ctx *ctx);
int (*devlink_param_set)(struct dsa_switch *ds, u32 id,
struct devlink_param_gset_ctx *ctx);
+ int (*devlink_info_get)(struct dsa_switch *ds,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack);
/*
* MTU change functionality. Switches can also adjust their MRU through
@@ -658,12 +678,44 @@ void dsa_devlink_resource_occ_get_register(struct dsa_switch *ds,
void *occ_get_priv);
void dsa_devlink_resource_occ_get_unregister(struct dsa_switch *ds,
u64 resource_id);
+struct devlink_region *
+dsa_devlink_region_create(struct dsa_switch *ds,
+ const struct devlink_region_ops *ops,
+ u32 region_max_snapshots, u64 region_size);
+struct devlink_region *
+dsa_devlink_port_region_create(struct dsa_switch *ds,
+ int port,
+ const struct devlink_port_region_ops *ops,
+ u32 region_max_snapshots, u64 region_size);
+void dsa_devlink_region_destroy(struct devlink_region *region);
+
struct dsa_port *dsa_port_from_netdev(struct net_device *netdev);
struct dsa_devlink_priv {
struct dsa_switch *ds;
};
+static inline struct dsa_switch *dsa_devlink_to_ds(struct devlink *dl)
+{
+ struct dsa_devlink_priv *dl_priv = devlink_priv(dl);
+
+ return dl_priv->ds;
+}
+
+static inline
+struct dsa_switch *dsa_devlink_port_to_ds(struct devlink_port *port)
+{
+ struct devlink *dl = port->devlink;
+ struct dsa_devlink_priv *dl_priv = devlink_priv(dl);
+
+ return dl_priv->ds;
+}
+
+static inline int dsa_devlink_port_to_port(struct devlink_port *port)
+{
+ return port->index;
+}
+
struct dsa_switch_driver {
struct list_head list;
const struct dsa_switch_ops *ops;
@@ -689,6 +741,32 @@ static inline bool dsa_can_decode(const struct sk_buff *skb,
return false;
}
+/* All DSA tags that push the EtherType to the right (basically all except tail
+ * tags, which don't break dissection) can be treated the same from the
+ * perspective of the flow dissector.
+ *
+ * We need to return:
+ * - offset: the (B - A) difference between:
+ * A. the position of the real EtherType and
+ * B. the current skb->data (aka ETH_HLEN bytes into the frame, aka 2 bytes
+ * after the normal EtherType was supposed to be)
+ * The offset in bytes is exactly equal to the tagger overhead (and half of
+ * that, in __be16 shorts).
+ *
+ * - proto: the value of the real EtherType.
+ */
+static inline void dsa_tag_generic_flow_dissect(const struct sk_buff *skb,
+ __be16 *proto, int *offset)
+{
+#if IS_ENABLED(CONFIG_NET_DSA)
+ const struct dsa_device_ops *ops = skb->dev->dsa_ptr->tag_ops;
+ int tag_len = ops->overhead;
+
+ *offset = tag_len;
+ *proto = ((__be16 *)skb->data)[(tag_len / 2) - 1];
+#endif
+}
+
#if IS_ENABLED(CONFIG_NET_DSA)
static inline int __dsa_netdevice_ops_check(struct net_device *dev)
{
diff --git a/include/net/dst.h b/include/net/dst.h
index 6ae2e625050d..8ea8812b0b41 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -214,7 +214,7 @@ dst_allfrag(const struct dst_entry *dst)
static inline int
dst_metric_locked(const struct dst_entry *dst, int metric)
{
- return dst_metric(dst, RTAX_LOCK) & (1<<metric);
+ return dst_metric(dst, RTAX_LOCK) & (1 << metric);
}
static inline void dst_hold(struct dst_entry *dst)
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 8899d7429ccb..e55ec1597ce7 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -41,6 +41,8 @@ struct genl_info;
* (private)
* @ops: the operations supported by this family
* @n_ops: number of operations supported by this family
+ * @small_ops: the small-struct operations supported by this family
+ * @n_small_ops: number of small-struct operations supported by this family
*/
struct genl_family {
int id; /* private */
@@ -48,8 +50,12 @@ struct genl_family {
char name[GENL_NAMSIZ];
unsigned int version;
unsigned int maxattr;
- bool netnsok;
- bool parallel_ops;
+ unsigned int mcgrp_offset; /* private */
+ u8 netnsok:1;
+ u8 parallel_ops:1;
+ u8 n_ops;
+ u8 n_small_ops;
+ u8 n_mcgrps;
const struct nla_policy *policy;
int (*pre_doit)(const struct genl_ops *ops,
struct sk_buff *skb,
@@ -58,10 +64,8 @@ struct genl_family {
struct sk_buff *skb,
struct genl_info *info);
const struct genl_ops * ops;
+ const struct genl_small_ops *small_ops;
const struct genl_multicast_group *mcgrps;
- unsigned int n_ops;
- unsigned int n_mcgrps;
- unsigned int mcgrp_offset; /* private */
struct module *module;
};
@@ -101,14 +105,6 @@ static inline void genl_info_net_set(struct genl_info *info, struct net *net)
#define GENL_SET_ERR_MSG(info, msg) NL_SET_ERR_MSG((info)->extack, msg)
-static inline int genl_err_attr(struct genl_info *info, int err,
- const struct nlattr *attr)
-{
- info->extack->bad_attr = attr;
-
- return err;
-}
-
enum genl_validate_flags {
GENL_DONT_VALIDATE_STRICT = BIT(0),
GENL_DONT_VALIDATE_DUMP = BIT(1),
@@ -116,28 +112,33 @@ enum genl_validate_flags {
};
/**
- * struct genl_info - info that is available during dumpit op call
- * @family: generic netlink family - for internal genl code usage
- * @ops: generic netlink ops - for internal genl code usage
- * @attrs: netlink attributes
+ * struct genl_small_ops - generic netlink operations (small version)
+ * @cmd: command identifier
+ * @internal_flags: flags used by the family
+ * @flags: flags
+ * @validate: validation flags from enum genl_validate_flags
+ * @doit: standard command callback
+ * @dumpit: callback for dumpers
+ *
+ * This is a cut-down version of struct genl_ops for users who don't need
+ * most of the ancillary infra and want to save space.
*/
-struct genl_dumpit_info {
- const struct genl_family *family;
- const struct genl_ops *ops;
- struct nlattr **attrs;
+struct genl_small_ops {
+ int (*doit)(struct sk_buff *skb, struct genl_info *info);
+ int (*dumpit)(struct sk_buff *skb, struct netlink_callback *cb);
+ u8 cmd;
+ u8 internal_flags;
+ u8 flags;
+ u8 validate;
};
-static inline const struct genl_dumpit_info *
-genl_dumpit_info(struct netlink_callback *cb)
-{
- return cb->data;
-}
-
/**
* struct genl_ops - generic netlink operations
* @cmd: command identifier
* @internal_flags: flags used by the family
* @flags: flags
+ * @maxattr: maximum number of attributes supported
+ * @policy: netlink policy (takes precedence over family policy)
* @validate: validation flags from enum genl_validate_flags
* @doit: standard command callback
* @start: start callback for dumps
@@ -151,12 +152,32 @@ struct genl_ops {
int (*dumpit)(struct sk_buff *skb,
struct netlink_callback *cb);
int (*done)(struct netlink_callback *cb);
+ const struct nla_policy *policy;
+ unsigned int maxattr;
u8 cmd;
u8 internal_flags;
u8 flags;
u8 validate;
};
+/**
+ * struct genl_info - info that is available during dumpit op call
+ * @family: generic netlink family - for internal genl code usage
+ * @ops: generic netlink ops - for internal genl code usage
+ * @attrs: netlink attributes
+ */
+struct genl_dumpit_info {
+ const struct genl_family *family;
+ struct genl_ops op;
+ struct nlattr **attrs;
+};
+
+static inline const struct genl_dumpit_info *
+genl_dumpit_info(struct netlink_callback *cb)
+{
+ return cb->data;
+}
+
int genl_register_family(struct genl_family *family);
int genl_unregister_family(const struct genl_family *family);
void genl_notify(const struct genl_family *family, struct sk_buff *skb,
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index aa8893c68c50..7338b3865a2a 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -86,6 +86,8 @@ struct inet_connection_sock {
struct timer_list icsk_retransmit_timer;
struct timer_list icsk_delack_timer;
__u32 icsk_rto;
+ __u32 icsk_rto_min;
+ __u32 icsk_delack_max;
__u32 icsk_pmtu_cookie;
const struct tcp_congestion_ops *icsk_ca_ops;
const struct inet_connection_sock_af_ops *icsk_af_ops;
@@ -94,7 +96,8 @@ struct inet_connection_sock {
void (*icsk_clean_acked)(struct sock *sk, u32 acked_seq);
struct hlist_node icsk_listen_portaddr_node;
unsigned int (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
- __u8 icsk_ca_state:6,
+ __u8 icsk_ca_state:5,
+ icsk_ca_initialized:1,
icsk_ca_setsockopt:1,
icsk_ca_dst_locked:1;
__u8 icsk_retransmits;
@@ -107,7 +110,7 @@ struct inet_connection_sock {
__u8 pending; /* ACK is pending */
__u8 quick; /* Scheduled number of quick acks */
__u8 pingpong; /* The session is interactive */
- __u8 blocked; /* Delayed ACK was blocked by socket lock */
+ __u8 retry; /* Number of attempts */
__u32 ato; /* Predicted tick of soft clock */
unsigned long timeout; /* Currently scheduled timeout */
__u32 lrcvtime; /* timestamp of last received data packet */
@@ -195,7 +198,8 @@ static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what)
sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
#endif
} else if (what == ICSK_TIME_DACK) {
- icsk->icsk_ack.blocked = icsk->icsk_ack.pending = 0;
+ icsk->icsk_ack.pending = 0;
+ icsk->icsk_ack.retry = 0;
#ifdef INET_CSK_CLEAR_TIMERS
sk_stop_timer(sk, &icsk->icsk_delack_timer);
#endif
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index a3702d1d4875..89163ef8cf4b 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -296,13 +296,6 @@ static inline void __inet_sk_copy_descendant(struct sock *sk_to,
memcpy(inet_sk(sk_to) + 1, inet_sk(sk_from) + 1,
sk_from->sk_prot->obj_size - ancestor_size);
}
-#if !(IS_ENABLED(CONFIG_IPV6))
-static inline void inet_sk_copy_descendant(struct sock *sk_to,
- const struct sock *sk_from)
-{
- __inet_sk_copy_descendant(sk_to, sk_from, sizeof(struct inet_sock));
-}
-#endif
int inet_sk_rebuild_header(struct sock *sk);
diff --git a/include/net/ip.h b/include/net/ip.h
index 2a52787db64a..2d6b985d11cc 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -151,7 +151,7 @@ int igmp_mc_init(void);
int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
__be32 saddr, __be32 daddr,
- struct ip_options_rcu *opt);
+ struct ip_options_rcu *opt, u8 tos);
int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
struct net_device *orig_dev);
void ip_list_rcv(struct list_head *head, struct packet_type *pt,
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 9a59a33787cb..d609e957a3ec 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -25,9 +25,6 @@
#include <linux/ip.h>
#include <linux/ipv6.h> /* for struct ipv6hdr */
#include <net/ipv6.h>
-#if IS_ENABLED(CONFIG_IP_VS_IPV6)
-#include <linux/netfilter_ipv6/ip6_tables.h>
-#endif
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <net/netfilter/nf_conntrack.h>
#endif
diff --git a/include/net/ipv6_stubs.h b/include/net/ipv6_stubs.h
index d7a7f7c81e7b..8fce558b5fea 100644
--- a/include/net/ipv6_stubs.h
+++ b/include/net/ipv6_stubs.h
@@ -63,6 +63,9 @@ struct ipv6_stub {
int encap_type);
#endif
struct neigh_table *nd_tbl;
+
+ int (*ipv6_fragment)(struct net *net, struct sock *sk, struct sk_buff *skb,
+ int (*output)(struct net *, struct sock *, struct sk_buff *));
};
extern const struct ipv6_stub *ipv6_stub __read_mostly;
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 66e2bfd165e8..e8e295dae744 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -317,6 +317,9 @@ struct ieee80211_vif_chanctx_switch {
* @BSS_CHANGED_TWT: TWT status changed
* @BSS_CHANGED_HE_OBSS_PD: OBSS Packet Detection status changed.
* @BSS_CHANGED_HE_BSS_COLOR: BSS Color has changed
+ * @BSS_CHANGED_FILS_DISCOVERY: FILS discovery status changed.
+ * @BSS_CHANGED_UNSOL_BCAST_PROBE_RESP: Unsolicited broadcast probe response
+ * status changed.
*
*/
enum ieee80211_bss_change {
@@ -350,6 +353,8 @@ enum ieee80211_bss_change {
BSS_CHANGED_TWT = 1<<27,
BSS_CHANGED_HE_OBSS_PD = 1<<28,
BSS_CHANGED_HE_BSS_COLOR = 1<<29,
+ BSS_CHANGED_FILS_DISCOVERY = 1<<30,
+ BSS_CHANGED_UNSOL_BCAST_PROBE_RESP = 1<<31,
/* when adding here, make sure to change ieee80211_reconfig */
};
@@ -491,6 +496,18 @@ struct ieee80211_ftm_responder_params {
};
/**
+ * struct ieee80211_fils_discovery - FILS discovery parameters from
+ * IEEE Std 802.11ai-2016, Annex C.3 MIB detail.
+ *
+ * @min_interval: Minimum packet interval in TUs (0 - 10000)
+ * @max_interval: Maximum packet interval in TUs (0 - 10000)
+ */
+struct ieee80211_fils_discovery {
+ u32 min_interval;
+ u32 max_interval;
+};
+
+/**
* struct ieee80211_bss_conf - holds the BSS's changing parameters
*
* This structure keeps information about a BSS (and an association
@@ -607,6 +624,12 @@ struct ieee80211_ftm_responder_params {
* @he_oper: HE operation information of the AP we are connected to
* @he_obss_pd: OBSS Packet Detection parameters.
* @he_bss_color: BSS coloring settings, if BSS supports HE
+ * @fils_discovery: FILS discovery configuration
+ * @unsol_bcast_probe_resp_interval: Unsolicited broadcast probe response
+ * interval.
+ * @s1g: BSS is S1G BSS (affects Association Request format).
+ * @beacon_tx_rate: The configured beacon transmit rate that needs to be passed
+ * to driver when rate control is offloaded to firmware.
*/
struct ieee80211_bss_conf {
const u8 *bssid;
@@ -674,6 +697,10 @@ struct ieee80211_bss_conf {
} he_oper;
struct ieee80211_he_obss_pd he_obss_pd;
struct cfg80211_he_bss_color he_bss_color;
+ struct ieee80211_fils_discovery fils_discovery;
+ u32 unsol_bcast_probe_resp_interval;
+ bool s1g;
+ struct cfg80211_bitrate_mask beacon_tx_rate;
};
/**
@@ -720,9 +747,8 @@ struct ieee80211_bss_conf {
* @IEEE80211_TX_INTFL_OFFCHAN_TX_OK: Internal to mac80211. Used to indicate
* that a frame can be transmitted while the queues are stopped for
* off-channel operation.
- * @IEEE80211_TX_INTFL_NEED_TXPROCESSING: completely internal to mac80211,
- * used to indicate that a pending frame requires TX processing before
- * it can be sent out.
+ * @IEEE80211_TX_CTL_HW_80211_ENCAP: This frame uses hardware encapsulation
+ * (header conversion)
* @IEEE80211_TX_INTFL_RETRIED: completely internal to mac80211,
* used to indicate that a frame was already retried due to PS
* @IEEE80211_TX_INTFL_DONT_ENCRYPT: completely internal to mac80211,
@@ -791,7 +817,7 @@ enum mac80211_tx_info_flags {
IEEE80211_TX_STAT_AMPDU_NO_BACK = BIT(11),
IEEE80211_TX_CTL_RATE_CTRL_PROBE = BIT(12),
IEEE80211_TX_INTFL_OFFCHAN_TX_OK = BIT(13),
- IEEE80211_TX_INTFL_NEED_TXPROCESSING = BIT(14),
+ IEEE80211_TX_CTL_HW_80211_ENCAP = BIT(14),
IEEE80211_TX_INTFL_RETRIED = BIT(15),
IEEE80211_TX_INTFL_DONT_ENCRYPT = BIT(16),
IEEE80211_TX_CTL_NO_PS_BUFFER = BIT(17),
@@ -812,6 +838,8 @@ enum mac80211_tx_info_flags {
#define IEEE80211_TX_CTL_STBC_SHIFT 23
+#define IEEE80211_TX_RC_S1G_MCS IEEE80211_TX_RC_VHT_MCS
+
/**
* enum mac80211_tx_control_flags - flags to describe transmit control
*
@@ -823,8 +851,9 @@ enum mac80211_tx_info_flags {
* @IEEE80211_TX_CTRL_AMSDU: This frame is an A-MSDU frame
* @IEEE80211_TX_CTRL_FAST_XMIT: This frame is going through the fast_xmit path
* @IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP: This frame skips mesh path lookup
- * @IEEE80211_TX_CTRL_HW_80211_ENCAP: This frame uses hardware encapsulation
- * (header conversion)
+ * @IEEE80211_TX_INTCFL_NEED_TXPROCESSING: completely internal to mac80211,
+ * used to indicate that a pending frame requires TX processing before
+ * it can be sent out.
* @IEEE80211_TX_CTRL_NO_SEQNO: Do not overwrite the sequence number that
* has already been assigned to this frame.
*
@@ -837,7 +866,7 @@ enum mac80211_tx_control_flags {
IEEE80211_TX_CTRL_AMSDU = BIT(3),
IEEE80211_TX_CTRL_FAST_XMIT = BIT(4),
IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP = BIT(5),
- IEEE80211_TX_CTRL_HW_80211_ENCAP = BIT(6),
+ IEEE80211_TX_INTCFL_NEED_TXPROCESSING = BIT(6),
IEEE80211_TX_CTRL_NO_SEQNO = BIT(7),
};
@@ -1002,7 +1031,8 @@ ieee80211_rate_get_vht_nss(const struct ieee80211_tx_rate *rate)
* @status.ampdu_ack_len: AMPDU ack length
* @status.ampdu_len: AMPDU length
* @status.antenna: (legacy, kept only for iwlegacy)
- * @status.tx_time: airtime consumed for transmission
+ * @status.tx_time: airtime consumed for transmission; note this is only
+ * used for WMM AC, not for airtime fairness
* @status.is_valid_ack_signal: ACK signal is valid
* @status.status_driver_data: driver use area
* @ack: union part for pure ACK data
@@ -1095,12 +1125,14 @@ ieee80211_info_get_tx_time_est(struct ieee80211_tx_info *info)
* @info: Basic tx status information
* @skb: Packet skb (can be NULL if not provided by the driver)
* @rate: The TX rate that was used when sending the packet
+ * @free_list: list where processed skbs are stored to be free'd by the driver
*/
struct ieee80211_tx_status {
struct ieee80211_sta *sta;
struct ieee80211_tx_info *info;
struct sk_buff *skb;
struct rate_info *rate;
+ struct list_head *free_list;
};
/**
@@ -1606,6 +1638,21 @@ enum ieee80211_vif_flags {
IEEE80211_VIF_GET_NOA_UPDATE = BIT(3),
};
+
+/**
+ * enum ieee80211_offload_flags - virtual interface offload flags
+ *
+ * @IEEE80211_OFFLOAD_ENCAP_ENABLED: tx encapsulation offload is enabled
+ * The driver supports sending frames passed as 802.3 frames by mac80211.
+ * It must also support sending 802.11 packets for the same interface.
+ * @IEEE80211_OFFLOAD_ENCAP_4ADDR: support 4-address mode encapsulation offload
+ */
+
+enum ieee80211_offload_flags {
+ IEEE80211_OFFLOAD_ENCAP_ENABLED = BIT(0),
+ IEEE80211_OFFLOAD_ENCAP_4ADDR = BIT(1),
+};
+
/**
* struct ieee80211_vif - per-interface data
*
@@ -1626,6 +1673,11 @@ enum ieee80211_vif_flags {
* these need to be set (or cleared) when the interface is added
* or, if supported by the driver, the interface type is changed
* at runtime, mac80211 will never touch this field
+ * @offloaad_flags: hardware offload capabilities/flags for this interface.
+ * These are initialized by mac80211 before calling .add_interface,
+ * .change_interface or .update_vif_offload and updated by the driver
+ * within these ops, based on supported features or runtime change
+ * restrictions.
* @hw_queue: hardware queue for each AC
* @cab_queue: content-after-beacon (DTIM beacon really) queue, AP mode only
* @chanctx_conf: The channel context this interface is assigned to, or %NULL
@@ -1645,6 +1697,8 @@ enum ieee80211_vif_flags {
* @txq: the multicast data TX queue (if driver uses the TXQ abstraction)
* @txqs_stopped: per AC flag to indicate that intermediate TXQs are stopped,
* protected by fq->lock.
+ * @offload_flags: 802.3 -> 802.11 enapsulation offload flags, see
+ * &enum ieee80211_offload_flags.
*/
struct ieee80211_vif {
enum nl80211_iftype type;
@@ -1662,6 +1716,7 @@ struct ieee80211_vif {
struct ieee80211_chanctx_conf __rcu *chanctx_conf;
u32 driver_flags;
+ u32 offload_flags;
#ifdef CONFIG_MAC80211_DEBUGFS
struct dentry *debugfs_dir;
@@ -2328,6 +2383,9 @@ struct ieee80211_txq {
* aggregating MPDUs with the same keyid, allowing mac80211 to keep Tx
* A-MPDU sessions active while rekeying with Extended Key ID.
*
+ * @IEEE80211_HW_SUPPORTS_TX_ENCAP_OFFLOAD: Hardware supports tx encapsulation
+ * offload
+ *
* @NUM_IEEE80211_HW_FLAGS: number of hardware flags, used for sizing arrays
*/
enum ieee80211_hw_flags {
@@ -2380,6 +2438,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_SUPPORTS_MULTI_BSSID,
IEEE80211_HW_SUPPORTS_ONLY_HE_MULTI_BSSID,
IEEE80211_HW_AMPDU_KEYBORDER_SUPPORT,
+ IEEE80211_HW_SUPPORTS_TX_ENCAP_OFFLOAD,
/* keep last, obviously */
NUM_IEEE80211_HW_FLAGS
@@ -3736,7 +3795,7 @@ enum ieee80211_reconfig_type {
* decremented, and when they reach 1 the driver must call
* ieee80211_csa_finish(). Drivers which use ieee80211_beacon_get()
* get the csa counter decremented by mac80211, but must check if it is
- * 1 using ieee80211_csa_is_complete() after the beacon has been
+ * 1 using ieee80211_beacon_counter_is_complete() after the beacon has been
* transmitted and then call ieee80211_csa_finish().
* If the CSA count starts as zero or 1, this function will not be called,
* since there won't be any time to beacon before the switch anyway.
@@ -3814,6 +3873,10 @@ enum ieee80211_reconfig_type {
* @set_tid_config: Apply TID specific configurations. This callback may sleep.
* @reset_tid_config: Reset TID specific configuration for the peer.
* This callback may sleep.
+ * @update_vif_offload: Update virtual interface offload flags
+ * This callback may sleep.
+ * @sta_set_4addr: Called to notify the driver when a station starts/stops using
+ * 4-address mode
*/
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw,
@@ -4125,6 +4188,10 @@ struct ieee80211_ops {
int (*reset_tid_config)(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u8 tids);
+ void (*update_vif_offload)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+ void (*sta_set_4addr)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enabled);
};
/**
@@ -4763,21 +4830,21 @@ void ieee80211_tx_status_8023(struct ieee80211_hw *hw,
*/
void ieee80211_report_low_ack(struct ieee80211_sta *sta, u32 num_packets);
-#define IEEE80211_MAX_CSA_COUNTERS_NUM 2
+#define IEEE80211_MAX_CNTDWN_COUNTERS_NUM 2
/**
* struct ieee80211_mutable_offsets - mutable beacon offsets
* @tim_offset: position of TIM element
* @tim_length: size of TIM element
- * @csa_counter_offs: array of IEEE80211_MAX_CSA_COUNTERS_NUM offsets
- * to CSA counters. This array can contain zero values which
+ * @cntdwn_counter_offs: array of IEEE80211_MAX_CNTDWN_COUNTERS_NUM offsets
+ * to countdown counters. This array can contain zero values which
* should be ignored.
*/
struct ieee80211_mutable_offsets {
u16 tim_offset;
u16 tim_length;
- u16 csa_counter_offs[IEEE80211_MAX_CSA_COUNTERS_NUM];
+ u16 cntdwn_counter_offs[IEEE80211_MAX_CNTDWN_COUNTERS_NUM];
};
/**
@@ -4846,31 +4913,31 @@ static inline struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
}
/**
- * ieee80211_csa_update_counter - request mac80211 to decrement the csa counter
+ * ieee80211_beacon_update_cntdwn - request mac80211 to decrement the beacon countdown
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
*
- * The csa counter should be updated after each beacon transmission.
+ * The beacon counter should be updated after each beacon transmission.
* This function is called implicitly when
* ieee80211_beacon_get/ieee80211_beacon_get_tim are called, however if the
* beacon frames are generated by the device, the driver should call this
- * function after each beacon transmission to sync mac80211's csa counters.
+ * function after each beacon transmission to sync mac80211's beacon countdown.
*
- * Return: new csa counter value
+ * Return: new countdown value
*/
-u8 ieee80211_csa_update_counter(struct ieee80211_vif *vif);
+u8 ieee80211_beacon_update_cntdwn(struct ieee80211_vif *vif);
/**
- * ieee80211_csa_set_counter - request mac80211 to set csa counter
+ * ieee80211_beacon_set_cntdwn - request mac80211 to set beacon countdown
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
* @counter: the new value for the counter
*
- * The csa counter can be changed by the device, this API should be
+ * The beacon countdown can be changed by the device, this API should be
* used by the device driver to update csa counter in mac80211.
*
- * It should never be used together with ieee80211_csa_update_counter(),
+ * It should never be used together with ieee80211_beacon_update_cntdwn(),
* as it will cause a race condition around the counter value.
*/
-void ieee80211_csa_set_counter(struct ieee80211_vif *vif, u8 counter);
+void ieee80211_beacon_set_cntdwn(struct ieee80211_vif *vif, u8 counter);
/**
* ieee80211_csa_finish - notify mac80211 about channel switch
@@ -4883,13 +4950,12 @@ void ieee80211_csa_set_counter(struct ieee80211_vif *vif, u8 counter);
void ieee80211_csa_finish(struct ieee80211_vif *vif);
/**
- * ieee80211_csa_is_complete - find out if counters reached 1
+ * ieee80211_beacon_cntdwn_is_complete - find out if countdown reached 1
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
*
- * This function returns whether the channel switch counters reached zero.
+ * This function returns whether the countdown reached zero.
*/
-bool ieee80211_csa_is_complete(struct ieee80211_vif *vif);
-
+bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif);
/**
* ieee80211_proberesp_get - retrieve a Probe Response template
@@ -5344,11 +5410,15 @@ void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw);
* @IEEE80211_IFACE_ITER_RESUME_ALL: During resume, iterate over all
* interfaces, even if they haven't been re-added to the driver yet.
* @IEEE80211_IFACE_ITER_ACTIVE: Iterate only active interfaces (netdev is up).
+ * @IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER: Skip any interfaces where SDATA
+ * is not in the driver. This may fix crashes during firmware recovery
+ * for instance.
*/
enum ieee80211_interface_iteration_flags {
IEEE80211_IFACE_ITER_NORMAL = 0,
IEEE80211_IFACE_ITER_RESUME_ALL = BIT(0),
IEEE80211_IFACE_ITER_ACTIVE = BIT(1),
+ IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER = BIT(2),
};
/**
@@ -5648,7 +5718,7 @@ void ieee80211_send_eosp_nullfunc(struct ieee80211_sta *pubsta, int tid);
/**
* ieee80211_sta_register_airtime - register airtime usage for a sta/tid
*
- * Register airtime usage for a given sta on a given tid. The driver can call
+ * Register airtime usage for a given sta on a given tid. The driver must call
* this function to notify mac80211 that a station used a certain amount of
* airtime. This information will be used by the TXQ scheduler to schedule
* stations in a way that ensures airtime fairness.
@@ -6594,4 +6664,29 @@ u32 ieee80211_calc_tx_airtime(struct ieee80211_hw *hw,
*/
bool ieee80211_set_hw_80211_encap(struct ieee80211_vif *vif, bool enable);
+/**
+ * ieee80211_get_fils_discovery_tmpl - Get FILS discovery template.
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * The driver is responsible for freeing the returned skb.
+ *
+ * Return: FILS discovery template. %NULL on error.
+ */
+struct sk_buff *ieee80211_get_fils_discovery_tmpl(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_get_unsol_bcast_probe_resp_tmpl - Get unsolicited broadcast
+ * probe response template.
+ * @hw: pointer obtained from ieee80211_alloc_hw().
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * The driver is responsible for freeing the returned skb.
+ *
+ * Return: Unsolicited broadcast probe response template. %NULL on error.
+ */
+struct sk_buff *
+ieee80211_get_unsol_bcast_probe_resp_tmpl(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
#endif /* MAC80211_H */
diff --git a/include/net/mptcp.h b/include/net/mptcp.h
index 3525d2822abe..753ba7e755d6 100644
--- a/include/net/mptcp.h
+++ b/include/net/mptcp.h
@@ -85,8 +85,7 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
unsigned int *size, unsigned int remaining,
struct mptcp_out_options *opts);
-void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb,
- struct tcp_options_received *opt_rx);
+void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb);
void mptcp_write_options(__be32 *ptr, struct mptcp_out_options *opts);
@@ -185,8 +184,7 @@ static inline bool mptcp_established_options(struct sock *sk,
}
static inline void mptcp_incoming_options(struct sock *sk,
- struct sk_buff *skb,
- struct tcp_options_received *opt_rx)
+ struct sk_buff *skb)
{
}
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 2ee5901bec7a..22bc07f4b043 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -230,7 +230,7 @@ extern struct list_head net_namespace_list;
struct net *get_net_ns_by_pid(pid_t pid);
struct net *get_net_ns_by_fd(int fd);
-u64 net_gen_cookie(struct net *net);
+u64 __net_gen_cookie(struct net *net);
#ifdef CONFIG_SYSCTL
void ipx_register_sysctl(void);
diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h
index 0d3920896d50..716db4a0fed8 100644
--- a/include/net/netfilter/nf_log.h
+++ b/include/net/netfilter/nf_log.h
@@ -108,6 +108,7 @@ int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb,
unsigned int logflags);
void nf_log_dump_sk_uid_gid(struct net *net, struct nf_log_buf *m,
struct sock *sk);
+void nf_log_dump_vlan(struct nf_log_buf *m, const struct sk_buff *skb);
void nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
unsigned int hooknum, const struct sk_buff *skb,
const struct net_device *in,
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 224d194ad29d..3f7e56b1171e 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -14,6 +14,8 @@
#include <net/netlink.h>
#include <net/flow_offload.h>
+#define NFT_MAX_HOOKS (NF_INET_INGRESS + 1)
+
struct module;
#define NFT_JUMP_STACK_SIZE 16
@@ -148,13 +150,6 @@ static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
memcpy(dst, src, len);
}
-static inline void nft_data_debug(const struct nft_data *data)
-{
- pr_debug("data[0]=%x data[1]=%x data[2]=%x data[3]=%x\n",
- data->data[0], data->data[1],
- data->data[2], data->data[3]);
-}
-
/**
* struct nft_ctx - nf_tables rule/set context
*
@@ -952,6 +947,8 @@ struct nft_chain {
bound:1,
genmask:2;
char *name;
+ u16 udlen;
+ u8 *udata;
/* Only used during control plane commit phase: */
struct nft_rule **rules_next;
@@ -984,7 +981,7 @@ struct nft_chain_type {
int family;
struct module *owner;
unsigned int hook_mask;
- nf_hookfn *hooks[NF_MAX_HOOKS];
+ nf_hookfn *hooks[NFT_MAX_HOOKS];
int (*ops_register)(struct net *net, const struct nf_hook_ops *ops);
void (*ops_unregister)(struct net *net, const struct nf_hook_ops *ops);
};
@@ -1082,8 +1079,16 @@ struct nft_table {
flags:8,
genmask:2;
char *name;
+ u16 udlen;
+ u8 *udata;
};
+static inline bool nft_base_chain_netdev(int family, u32 hooknum)
+{
+ return family == NFPROTO_NETDEV ||
+ (family == NFPROTO_INET && hooknum == NF_INET_INGRESS);
+}
+
void nft_register_chain_type(const struct nft_chain_type *);
void nft_unregister_chain_type(const struct nft_chain_type *);
@@ -1123,6 +1128,8 @@ struct nft_object {
u32 genmask:2,
use:30;
u64 handle;
+ u16 udlen;
+ u8 *udata;
/* runtime data below here */
const struct nft_object_ops *ops ____cacheline_aligned;
unsigned char data[]
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index 78516de14d31..8657e6815b07 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -23,10 +23,19 @@ extern struct nft_object_type nft_secmark_obj_type;
int nf_tables_core_module_init(void);
void nf_tables_core_module_exit(void);
+struct nft_bitwise_fast_expr {
+ u32 mask;
+ u32 xor;
+ enum nft_registers sreg:8;
+ enum nft_registers dreg:8;
+};
+
struct nft_cmp_fast_expr {
u32 data;
+ u32 mask;
enum nft_registers sreg:8;
u8 len;
+ bool inv;
};
struct nft_immediate_expr {
@@ -66,6 +75,8 @@ struct nft_payload_set {
extern const struct nft_expr_ops nft_payload_fast_ops;
+extern const struct nft_expr_ops nft_bitwise_fast_ops;
+
extern struct static_key_false nft_counters_enabled;
extern struct static_key_false nft_trace_enabled;
diff --git a/include/net/netfilter/nf_tables_ipv4.h b/include/net/netfilter/nf_tables_ipv4.h
index ed7b511f0a59..1f7bea39ad1b 100644
--- a/include/net/netfilter/nf_tables_ipv4.h
+++ b/include/net/netfilter/nf_tables_ipv4.h
@@ -53,4 +53,37 @@ static inline void nft_set_pktinfo_ipv4_validate(struct nft_pktinfo *pkt,
nft_set_pktinfo_unspec(pkt, skb);
}
+static inline int nft_set_pktinfo_ipv4_ingress(struct nft_pktinfo *pkt,
+ struct sk_buff *skb)
+{
+ struct iphdr *iph;
+ u32 len, thoff;
+
+ if (!pskb_may_pull(skb, sizeof(*iph)))
+ return -1;
+
+ iph = ip_hdr(skb);
+ if (iph->ihl < 5 || iph->version != 4)
+ goto inhdr_error;
+
+ len = ntohs(iph->tot_len);
+ thoff = iph->ihl * 4;
+ if (skb->len < len) {
+ __IP_INC_STATS(nft_net(pkt), IPSTATS_MIB_INTRUNCATEDPKTS);
+ return -1;
+ } else if (len < thoff) {
+ goto inhdr_error;
+ }
+
+ pkt->tprot_set = true;
+ pkt->tprot = iph->protocol;
+ pkt->xt.thoff = thoff;
+ pkt->xt.fragoff = ntohs(iph->frag_off) & IP_OFFSET;
+
+ return 0;
+
+inhdr_error:
+ __IP_INC_STATS(nft_net(pkt), IPSTATS_MIB_INHDRERRORS);
+ return -1;
+}
#endif
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h
index d0f1c537b017..867de29f3f7a 100644
--- a/include/net/netfilter/nf_tables_ipv6.h
+++ b/include/net/netfilter/nf_tables_ipv6.h
@@ -70,4 +70,50 @@ static inline void nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt,
nft_set_pktinfo_unspec(pkt, skb);
}
+static inline int nft_set_pktinfo_ipv6_ingress(struct nft_pktinfo *pkt,
+ struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ unsigned int flags = IP6_FH_F_AUTH;
+ unsigned short frag_off;
+ unsigned int thoff = 0;
+ struct inet6_dev *idev;
+ struct ipv6hdr *ip6h;
+ int protohdr;
+ u32 pkt_len;
+
+ if (!pskb_may_pull(skb, sizeof(*ip6h)))
+ return -1;
+
+ ip6h = ipv6_hdr(skb);
+ if (ip6h->version != 6)
+ goto inhdr_error;
+
+ pkt_len = ntohs(ip6h->payload_len);
+ if (pkt_len + sizeof(*ip6h) > skb->len) {
+ idev = __in6_dev_get(nft_in(pkt));
+ __IP6_INC_STATS(nft_net(pkt), idev, IPSTATS_MIB_INTRUNCATEDPKTS);
+ return -1;
+ }
+
+ protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags);
+ if (protohdr < 0)
+ goto inhdr_error;
+
+ pkt->tprot_set = true;
+ pkt->tprot = protohdr;
+ pkt->xt.thoff = thoff;
+ pkt->xt.fragoff = frag_off;
+
+ return 0;
+
+inhdr_error:
+ idev = __in6_dev_get(nft_in(pkt));
+ __IP6_INC_STATS(nft_net(pkt), idev, IPSTATS_MIB_INHDRERRORS);
+ return -1;
+#else
+ return -1;
+#endif
+}
+
#endif
diff --git a/include/net/netlink.h b/include/net/netlink.h
index 271620f6bc7f..7356f41d23ba 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -181,8 +181,6 @@ enum {
NLA_S64,
NLA_BITFIELD32,
NLA_REJECT,
- NLA_EXACT_LEN,
- NLA_MIN_LEN,
__NLA_TYPE_MAX,
};
@@ -199,11 +197,12 @@ struct netlink_range_validation_signed {
enum nla_policy_validation {
NLA_VALIDATE_NONE,
NLA_VALIDATE_RANGE,
+ NLA_VALIDATE_RANGE_WARN_TOO_LONG,
NLA_VALIDATE_MIN,
NLA_VALIDATE_MAX,
+ NLA_VALIDATE_MASK,
NLA_VALIDATE_RANGE_PTR,
NLA_VALIDATE_FUNCTION,
- NLA_VALIDATE_WARN_TOO_LONG,
};
/**
@@ -222,7 +221,7 @@ enum nla_policy_validation {
* NLA_NUL_STRING Maximum length of string (excluding NUL)
* NLA_FLAG Unused
* NLA_BINARY Maximum length of attribute payload
- * NLA_MIN_LEN Minimum length of attribute payload
+ * (but see also below with the validation type)
* NLA_NESTED,
* NLA_NESTED_ARRAY Length verification is done by checking len of
* nested header (or empty); len field is used if
@@ -237,11 +236,6 @@ enum nla_policy_validation {
* just like "All other"
* NLA_BITFIELD32 Unused
* NLA_REJECT Unused
- * NLA_EXACT_LEN Attribute should have exactly this length, otherwise
- * it is rejected or warned about, the latter happening
- * if and only if the `validation_type' is set to
- * NLA_VALIDATE_WARN_TOO_LONG.
- * NLA_MIN_LEN Minimum length of attribute payload
* All other Minimum length of attribute payload
*
* Meaning of validation union:
@@ -296,6 +290,11 @@ enum nla_policy_validation {
* pointer to a struct netlink_range_validation_signed
* that indicates the min/max values.
* Use NLA_POLICY_FULL_RANGE_SIGNED().
+ *
+ * NLA_BINARY If the validation type is like the ones for integers
+ * above, then the min/max length (not value like for
+ * integers) of the attribute is enforced.
+ *
* All other Unused - but note that it's a union
*
* Meaning of `validate' field, use via NLA_POLICY_VALIDATE_FN:
@@ -309,7 +308,7 @@ enum nla_policy_validation {
* static const struct nla_policy my_policy[ATTR_MAX+1] = {
* [ATTR_FOO] = { .type = NLA_U16 },
* [ATTR_BAR] = { .type = NLA_STRING, .len = BARSIZ },
- * [ATTR_BAZ] = { .type = NLA_EXACT_LEN, .len = sizeof(struct mystruct) },
+ * [ATTR_BAZ] = NLA_POLICY_EXACT_LEN(sizeof(struct mystruct)),
* [ATTR_GOO] = NLA_POLICY_BITFIELD32(myvalidflags),
* };
*/
@@ -319,6 +318,7 @@ struct nla_policy {
u16 len;
union {
const u32 bitfield32_valid;
+ const u32 mask;
const char *reject_message;
const struct nla_policy *nested_policy;
struct netlink_range_validation *range;
@@ -335,9 +335,10 @@ struct nla_policy {
* nesting validation starts here.
*
* Additionally, it means that NLA_UNSPEC is actually NLA_REJECT
- * for any types >= this, so need to use NLA_MIN_LEN to get the
- * previous pure { .len = xyz } behaviour. The advantage of this
- * is that types not specified in the policy will be rejected.
+ * for any types >= this, so need to use NLA_POLICY_MIN_LEN() to
+ * get the previous pure { .len = xyz } behaviour. The advantage
+ * of this is that types not specified in the policy will be
+ * rejected.
*
* For completely new families it should be set to 1 so that the
* validation is enforced for all attributes. For existing ones
@@ -349,12 +350,6 @@ struct nla_policy {
};
};
-#define NLA_POLICY_EXACT_LEN(_len) { .type = NLA_EXACT_LEN, .len = _len }
-#define NLA_POLICY_EXACT_LEN_WARN(_len) \
- { .type = NLA_EXACT_LEN, .len = _len, \
- .validation_type = NLA_VALIDATE_WARN_TOO_LONG, }
-#define NLA_POLICY_MIN_LEN(_len) { .type = NLA_MIN_LEN, .len = _len }
-
#define NLA_POLICY_ETH_ADDR NLA_POLICY_EXACT_LEN(ETH_ALEN)
#define NLA_POLICY_ETH_ADDR_COMPAT NLA_POLICY_EXACT_LEN_WARN(ETH_ALEN)
@@ -369,20 +364,25 @@ struct nla_policy {
#define NLA_POLICY_BITFIELD32(valid) \
{ .type = NLA_BITFIELD32, .bitfield32_valid = valid }
+#define __NLA_IS_UINT_TYPE(tp) \
+ (tp == NLA_U8 || tp == NLA_U16 || tp == NLA_U32 || tp == NLA_U64)
+#define __NLA_IS_SINT_TYPE(tp) \
+ (tp == NLA_S8 || tp == NLA_S16 || tp == NLA_S32 || tp == NLA_S64)
+
#define __NLA_ENSURE(condition) BUILD_BUG_ON_ZERO(!(condition))
#define NLA_ENSURE_UINT_TYPE(tp) \
- (__NLA_ENSURE(tp == NLA_U8 || tp == NLA_U16 || \
- tp == NLA_U32 || tp == NLA_U64 || \
- tp == NLA_MSECS) + tp)
+ (__NLA_ENSURE(__NLA_IS_UINT_TYPE(tp)) + tp)
+#define NLA_ENSURE_UINT_OR_BINARY_TYPE(tp) \
+ (__NLA_ENSURE(__NLA_IS_UINT_TYPE(tp) || \
+ tp == NLA_MSECS || \
+ tp == NLA_BINARY) + tp)
#define NLA_ENSURE_SINT_TYPE(tp) \
- (__NLA_ENSURE(tp == NLA_S8 || tp == NLA_S16 || \
- tp == NLA_S32 || tp == NLA_S64) + tp)
-#define NLA_ENSURE_INT_TYPE(tp) \
- (__NLA_ENSURE(tp == NLA_S8 || tp == NLA_U8 || \
- tp == NLA_S16 || tp == NLA_U16 || \
- tp == NLA_S32 || tp == NLA_U32 || \
- tp == NLA_S64 || tp == NLA_U64 || \
- tp == NLA_MSECS) + tp)
+ (__NLA_ENSURE(__NLA_IS_SINT_TYPE(tp)) + tp)
+#define NLA_ENSURE_INT_OR_BINARY_TYPE(tp) \
+ (__NLA_ENSURE(__NLA_IS_UINT_TYPE(tp) || \
+ __NLA_IS_SINT_TYPE(tp) || \
+ tp == NLA_MSECS || \
+ tp == NLA_BINARY) + tp)
#define NLA_ENSURE_NO_VALIDATION_PTR(tp) \
(__NLA_ENSURE(tp != NLA_BITFIELD32 && \
tp != NLA_REJECT && \
@@ -390,14 +390,14 @@ struct nla_policy {
tp != NLA_NESTED_ARRAY) + tp)
#define NLA_POLICY_RANGE(tp, _min, _max) { \
- .type = NLA_ENSURE_INT_TYPE(tp), \
+ .type = NLA_ENSURE_INT_OR_BINARY_TYPE(tp), \
.validation_type = NLA_VALIDATE_RANGE, \
.min = _min, \
.max = _max \
}
#define NLA_POLICY_FULL_RANGE(tp, _range) { \
- .type = NLA_ENSURE_UINT_TYPE(tp), \
+ .type = NLA_ENSURE_UINT_OR_BINARY_TYPE(tp), \
.validation_type = NLA_VALIDATE_RANGE_PTR, \
.range = _range, \
}
@@ -409,17 +409,23 @@ struct nla_policy {
}
#define NLA_POLICY_MIN(tp, _min) { \
- .type = NLA_ENSURE_INT_TYPE(tp), \
+ .type = NLA_ENSURE_INT_OR_BINARY_TYPE(tp), \
.validation_type = NLA_VALIDATE_MIN, \
.min = _min, \
}
#define NLA_POLICY_MAX(tp, _max) { \
- .type = NLA_ENSURE_INT_TYPE(tp), \
+ .type = NLA_ENSURE_INT_OR_BINARY_TYPE(tp), \
.validation_type = NLA_VALIDATE_MAX, \
.max = _max, \
}
+#define NLA_POLICY_MASK(tp, _mask) { \
+ .type = NLA_ENSURE_UINT_TYPE(tp), \
+ .validation_type = NLA_VALIDATE_MASK, \
+ .mask = _mask, \
+}
+
#define NLA_POLICY_VALIDATE_FN(tp, fn, ...) { \
.type = NLA_ENSURE_NO_VALIDATION_PTR(tp), \
.validation_type = NLA_VALIDATE_FUNCTION, \
@@ -427,6 +433,15 @@ struct nla_policy {
.len = __VA_ARGS__ + 0, \
}
+#define NLA_POLICY_EXACT_LEN(_len) NLA_POLICY_RANGE(NLA_BINARY, _len, _len)
+#define NLA_POLICY_EXACT_LEN_WARN(_len) { \
+ .type = NLA_BINARY, \
+ .validation_type = NLA_VALIDATE_RANGE_WARN_TOO_LONG, \
+ .min = _len, \
+ .max = _len \
+}
+#define NLA_POLICY_MIN_LEN(_len) NLA_POLICY_MIN(NLA_BINARY, _len)
+
/**
* struct nl_info - netlink source information
* @nlh: Netlink message header of original request
@@ -1931,11 +1946,21 @@ void nla_get_range_unsigned(const struct nla_policy *pt,
void nla_get_range_signed(const struct nla_policy *pt,
struct netlink_range_validation_signed *range);
-int netlink_policy_dump_start(const struct nla_policy *policy,
- unsigned int maxtype,
- unsigned long *state);
-bool netlink_policy_dump_loop(unsigned long state);
-int netlink_policy_dump_write(struct sk_buff *skb, unsigned long state);
-void netlink_policy_dump_free(unsigned long state);
+struct netlink_policy_dump_state;
+
+int netlink_policy_dump_add_policy(struct netlink_policy_dump_state **pstate,
+ const struct nla_policy *policy,
+ unsigned int maxtype);
+int netlink_policy_dump_get_policy_idx(struct netlink_policy_dump_state *state,
+ const struct nla_policy *policy,
+ unsigned int maxtype);
+bool netlink_policy_dump_loop(struct netlink_policy_dump_state *state);
+int netlink_policy_dump_write(struct sk_buff *skb,
+ struct netlink_policy_dump_state *state);
+int netlink_policy_dump_attr_size_estimate(const struct nla_policy *pt);
+int netlink_policy_dump_write_attr(struct sk_buff *skb,
+ const struct nla_policy *pt,
+ int nestattr);
+void netlink_policy_dump_free(struct netlink_policy_dump_state *state);
#endif
diff --git a/include/net/netns/can.h b/include/net/netns/can.h
index b6ab7d1530d7..52fbd8291a96 100644
--- a/include/net/netns/can.h
+++ b/include/net/netns/can.h
@@ -15,7 +15,6 @@ struct can_rcv_lists_stats;
struct netns_can {
#if IS_ENABLED(CONFIG_PROC_FS)
struct proc_dir_entry *proc_dir;
- struct proc_dir_entry *pde_version;
struct proc_dir_entry *pde_stats;
struct proc_dir_entry *pde_reset_stats;
struct proc_dir_entry *pde_rcvlist_all;
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 9e36738c1fe1..8e4fcac4df72 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -183,6 +183,7 @@ struct netns_ipv4 {
unsigned int sysctl_tcp_fastopen_blackhole_timeout;
atomic_t tfo_active_disable_times;
unsigned long tfo_active_disable_stamp;
+ int sysctl_tcp_reflect_tos;
int sysctl_udp_wmem_min;
int sysctl_udp_rmem_min;
diff --git a/include/net/netns/nexthop.h b/include/net/netns/nexthop.h
index 1937476c94a0..1849e77eb68a 100644
--- a/include/net/netns/nexthop.h
+++ b/include/net/netns/nexthop.h
@@ -14,6 +14,6 @@ struct netns_nexthop {
unsigned int seq; /* protected by rtnl_mutex */
u32 last_id_allocated;
- struct atomic_notifier_head notifier_chain;
+ struct blocking_notifier_head notifier_chain;
};
#endif
diff --git a/include/net/nexthop.h b/include/net/nexthop.h
index 3a4f9e3b91a5..2fd76a9b6dc8 100644
--- a/include/net/nexthop.h
+++ b/include/net/nexthop.h
@@ -105,13 +105,9 @@ struct nexthop {
};
enum nexthop_event_type {
- NEXTHOP_EVENT_ADD,
NEXTHOP_EVENT_DEL
};
-int call_nexthop_notifier(struct notifier_block *nb, struct net *net,
- enum nexthop_event_type event_type,
- struct nexthop *nh);
int register_nexthop_notifier(struct net *net, struct notifier_block *nb);
int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb);
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index ac8c890a2657..4ed32e6b0201 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -19,12 +19,9 @@ struct qdisc_walker {
int (*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *);
};
-#define QDISC_ALIGNTO 64
-#define QDISC_ALIGN(len) (((len) + QDISC_ALIGNTO-1) & ~(QDISC_ALIGNTO-1))
-
static inline void *qdisc_priv(struct Qdisc *q)
{
- return (char *) q + QDISC_ALIGN(sizeof(struct Qdisc));
+ return &q->privdata;
}
/*
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index b2eb8b4ba697..29e41ff3ec93 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -41,6 +41,13 @@ struct request_sock_ops {
int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req);
+struct saved_syn {
+ u32 mac_hdrlen;
+ u32 network_hdrlen;
+ u32 tcp_hdrlen;
+ u8 data[];
+};
+
/* struct request_sock - mini sock to represent a connection request
*/
struct request_sock {
@@ -60,7 +67,7 @@ struct request_sock {
struct timer_list rsk_timer;
const struct request_sock_ops *rsk_ops;
struct sock *sk;
- u32 *saved_syn;
+ struct saved_syn *saved_syn;
u32 secid;
u32 peer_secid;
};
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index d60e7c39d60c..d8fd8676fc72 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -91,7 +91,7 @@ struct Qdisc {
struct net_rate_estimator __rcu *rate_est;
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
struct gnet_stats_queue __percpu *cpu_qstats;
- int padded;
+ int pad;
refcount_t refcnt;
/*
@@ -112,6 +112,9 @@ struct Qdisc {
/* for NOLOCK qdisc, true if there are no enqueued skbs */
bool empty;
struct rcu_head rcu;
+
+ /* private data */
+ long privdata[] ____cacheline_aligned;
};
static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
@@ -1047,12 +1050,6 @@ static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
return 0;
}
-static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch,
- struct sk_buff **to_free)
-{
- return __qdisc_queue_drop_head(sch, &sch->q, to_free);
-}
-
static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
{
const struct qdisc_skb_head *qh = &sch->q;
diff --git a/include/net/smc.h b/include/net/smc.h
index 646feb4bc75f..e441aa97ad61 100644
--- a/include/net/smc.h
+++ b/include/net/smc.h
@@ -37,6 +37,8 @@ struct smcd_dmb {
#define ISM_EVENT_GID 1
#define ISM_EVENT_SWR 2
+#define ISM_RESERVED_VLANID 0x1FFF
+
#define ISM_ERROR 0xFFFF
struct smcd_event {
@@ -63,6 +65,8 @@ struct smcd_ops {
int (*move_data)(struct smcd_dev *dev, u64 dmb_tok, unsigned int idx,
bool sf, unsigned int offset, void *data,
unsigned int size);
+ void (*get_system_eid)(struct smcd_dev *dev, u8 **eid);
+ u16 (*get_chid)(struct smcd_dev *dev);
};
struct smcd_dev {
diff --git a/include/net/sock.h b/include/net/sock.h
index 064637d1ddf6..a5c6ae78df77 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -246,7 +246,7 @@ struct sock_common {
/* public: */
};
-struct bpf_sk_storage;
+struct bpf_local_storage;
/**
* struct sock - network layer representation of sockets
@@ -517,7 +517,7 @@ struct sock {
void (*sk_destruct)(struct sock *sk);
struct sock_reuseport __rcu *sk_reuseport_cb;
#ifdef CONFIG_BPF_SYSCALL
- struct bpf_sk_storage __rcu *sk_bpf_storage;
+ struct bpf_local_storage __rcu *sk_bpf_storage;
#endif
struct rcu_head sk_rcu;
};
@@ -845,7 +845,6 @@ enum sock_flags {
SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */
SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
- SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
SOCK_MEMALLOC, /* VM depends on this socket for swapping */
SOCK_TIMESTAMPING_RX_SOFTWARE, /* %SOF_TIMESTAMPING_RX_SOFTWARE */
SOCK_FASYNC, /* fasync() active */
@@ -1478,7 +1477,7 @@ sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
{
if (!sk_has_account(sk))
return true;
- return size<= sk->sk_forward_alloc ||
+ return size <= sk->sk_forward_alloc ||
__sk_mem_schedule(sk, size, SK_MEM_RECV) ||
skb_pfmemalloc(skb);
}
@@ -1526,7 +1525,6 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
DECLARE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key);
static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
{
- sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
sk_wmem_queued_add(sk, -skb->truesize);
sk_mem_uncharge(sk, skb->truesize);
if (static_branch_unlikely(&tcp_tx_skb_cache_key) &&
@@ -2197,6 +2195,8 @@ void sk_reset_timer(struct sock *sk, struct timer_list *timer,
void sk_stop_timer(struct sock *sk, struct timer_list *timer);
+void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer);
+
int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
struct sk_buff *skb, unsigned int flags,
void (*destructor)(struct sock *sk,
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index ff2246914301..53e8b4994296 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -203,6 +203,7 @@ enum switchdev_notifier_type {
SWITCHDEV_FDB_ADD_TO_DEVICE,
SWITCHDEV_FDB_DEL_TO_DEVICE,
SWITCHDEV_FDB_OFFLOADED,
+ SWITCHDEV_FDB_FLUSH_TO_BRIDGE,
SWITCHDEV_PORT_OBJ_ADD, /* Blocking. */
SWITCHDEV_PORT_OBJ_DEL, /* Blocking. */
diff --git a/include/net/tc_act/tc_tunnel_key.h b/include/net/tc_act/tc_tunnel_key.h
index e1057b255f69..879fe8cff581 100644
--- a/include/net/tc_act/tc_tunnel_key.h
+++ b/include/net/tc_act/tc_tunnel_key.h
@@ -56,7 +56,10 @@ static inline struct ip_tunnel_info *tcf_tunnel_info(const struct tc_action *a)
{
#ifdef CONFIG_NET_CLS_ACT
struct tcf_tunnel_key *t = to_tunnel_key(a);
- struct tcf_tunnel_key_params *params = rtnl_dereference(t->params);
+ struct tcf_tunnel_key_params *params;
+
+ params = rcu_dereference_protected(t->params,
+ lockdep_is_held(&a->tcfa_lock));
return &params->tcft_enc_metadata->u.tun_info;
#else
diff --git a/include/net/tc_act/tc_vlan.h b/include/net/tc_act/tc_vlan.h
index 4e2502408c31..f051046ba034 100644
--- a/include/net/tc_act/tc_vlan.h
+++ b/include/net/tc_act/tc_vlan.h
@@ -11,6 +11,8 @@
struct tcf_vlan_params {
int tcfv_action;
+ unsigned char tcfv_push_dst[ETH_ALEN];
+ unsigned char tcfv_push_src[ETH_ALEN];
u16 tcfv_push_vid;
__be16 tcfv_push_proto;
u8 tcfv_push_prio;
diff --git a/include/net/tcp.h b/include/net/tcp.h
index eab6c7510b5b..d4ef5bf94168 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -394,7 +394,7 @@ void tcp_metrics_init(void);
bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
void tcp_close(struct sock *sk, long timeout);
void tcp_init_sock(struct sock *sk);
-void tcp_init_transfer(struct sock *sk, int bpf_op);
+void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb);
__poll_t tcp_poll(struct file *file, struct socket *sock,
struct poll_table_struct *wait);
int tcp_getsockopt(struct sock *sk, int level, int optname,
@@ -455,7 +455,8 @@ enum tcp_synack_type {
struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
struct request_sock *req,
struct tcp_fastopen_cookie *foc,
- enum tcp_synack_type synack_type);
+ enum tcp_synack_type synack_type,
+ struct sk_buff *syn_skb);
int tcp_disconnect(struct sock *sk, int flags);
void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
@@ -699,7 +700,7 @@ static inline void tcp_fast_path_check(struct sock *sk)
static inline u32 tcp_rto_min(struct sock *sk)
{
const struct dst_entry *dst = __sk_dst_get(sk);
- u32 rto_min = TCP_RTO_MIN;
+ u32 rto_min = inet_csk(sk)->icsk_rto_min;
if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
@@ -941,16 +942,6 @@ INDIRECT_CALLABLE_DECLARE(void tcp_v6_early_demux(struct sk_buff *skb));
#endif
-static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
-{
-#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
- if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
- skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
- return true;
-#endif
- return false;
-}
-
/* TCP_SKB_CB reference means this can not be used from early demux */
static inline int tcp_v4_sdif(struct sk_buff *skb)
{
@@ -1113,7 +1104,7 @@ void tcp_get_available_congestion_control(char *buf, size_t len);
void tcp_get_allowed_congestion_control(char *buf, size_t len);
int tcp_set_allowed_congestion_control(char *allowed);
int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
- bool reinit, bool cap_net_admin);
+ bool cap_net_admin);
u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
@@ -1423,6 +1414,8 @@ static inline int tcp_full_space(const struct sock *sk)
return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
}
+void tcp_cleanup_rbuf(struct sock *sk, int copied);
+
/* We provision sk_rcvbuf around 200% of sk_rcvlowat.
* If 87.5 % (7/8) of the space has been consumed, we want to override
* SO_RCVLOWAT constraint, since we are receiving skbs with too small
@@ -2035,7 +2028,8 @@ struct tcp_request_sock_ops {
int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
struct flowi *fl, struct request_sock *req,
struct tcp_fastopen_cookie *foc,
- enum tcp_synack_type synack_type);
+ enum tcp_synack_type synack_type,
+ struct sk_buff *syn_skb);
};
extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops;
@@ -2233,6 +2227,22 @@ int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock,
struct msghdr *msg, int len, int flags);
#endif /* CONFIG_NET_SOCK_MSG */
+#ifdef CONFIG_CGROUP_BPF
+static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
+ struct sk_buff *skb,
+ unsigned int end_offset)
+{
+ skops->skb = skb;
+ skops->skb_data_end = skb->data + end_offset;
+}
+#else
+static inline void bpf_skops_init_skb(struct bpf_sock_ops_kern *skops,
+ struct sk_buff *skb,
+ unsigned int end_offset)
+{
+}
+#endif
+
/* Call BPF_SOCK_OPS program that returns an int. If the return value
* is < 0, then the BPF op failed (for example if the loaded BPF
* program does not support the chosen operation or there is no BPF
diff --git a/include/net/tls.h b/include/net/tls.h
index e5dac7e74e79..baf1e99d8193 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -679,10 +679,6 @@ int decrypt_skb(struct sock *sk, struct sk_buff *skb,
struct scatterlist *sgout);
struct sk_buff *tls_encrypt_skb(struct sk_buff *skb);
-struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
- struct net_device *dev,
- struct sk_buff *skb);
-
int tls_sw_fallback_init(struct sock *sk,
struct tls_offload_context_tx *offload_ctx,
struct tls_crypto_info *crypto_info);
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index 94bb7a882250..2ea453dac876 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -200,11 +200,27 @@ enum udp_tunnel_nic_info_flags {
UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN = BIT(3),
};
+struct udp_tunnel_nic;
+
+#define UDP_TUNNEL_NIC_MAX_SHARING_DEVICES (U16_MAX / 2)
+
+struct udp_tunnel_nic_shared {
+ struct udp_tunnel_nic *udp_tunnel_nic_info;
+
+ struct list_head devices;
+};
+
+struct udp_tunnel_nic_shared_node {
+ struct net_device *dev;
+ struct list_head list;
+};
+
/**
* struct udp_tunnel_nic_info - driver UDP tunnel offload information
* @set_port: callback for adding a new port
* @unset_port: callback for removing a port
* @sync_table: callback for syncing the entire port table at once
+ * @shared: reference to device global state (optional)
* @flags: device flags from enum udp_tunnel_nic_info_flags
* @tables: UDP port tables this device has
* @tables.n_entries: number of entries in this table
@@ -213,6 +229,12 @@ enum udp_tunnel_nic_info_flags {
* Drivers are expected to provide either @set_port and @unset_port callbacks
* or the @sync_table callback. Callbacks are invoked with rtnl lock held.
*
+ * Devices which (misguidedly) share the UDP tunnel port table across multiple
+ * netdevs should allocate an instance of struct udp_tunnel_nic_shared and
+ * point @shared at it.
+ * There must never be more than %UDP_TUNNEL_NIC_MAX_SHARING_DEVICES devices
+ * sharing a table.
+ *
* Known limitations:
* - UDP tunnel port notifications are fundamentally best-effort -
* it is likely the driver will both see skbs which use a UDP tunnel port,
@@ -234,6 +256,8 @@ struct udp_tunnel_nic_info {
/* all at once */
int (*sync_table)(struct net_device *dev, unsigned int table);
+ struct udp_tunnel_nic_shared *shared;
+
unsigned int flags;
struct udp_tunnel_nic_table_info {
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index c9d87cc40c11..1a9559c0cbdd 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -18,25 +18,19 @@ struct xsk_queue;
struct xdp_buff;
struct xdp_umem {
- struct xsk_queue *fq;
- struct xsk_queue *cq;
- struct xsk_buff_pool *pool;
+ void *addrs;
u64 size;
u32 headroom;
u32 chunk_size;
+ u32 chunks;
+ u32 npgs;
struct user_struct *user;
refcount_t users;
- struct work_struct work;
- struct page **pgs;
- u32 npgs;
- u16 queue_id;
- u8 need_wakeup;
u8 flags;
- int id;
- struct net_device *dev;
bool zc;
- spinlock_t xsk_tx_list_lock;
- struct list_head xsk_tx_list;
+ struct page **pgs;
+ int id;
+ struct list_head xsk_dma_list;
};
struct xsk_map {
@@ -48,10 +42,11 @@ struct xsk_map {
struct xdp_sock {
/* struct sock must be the first member of struct xdp_sock */
struct sock sk;
- struct xsk_queue *rx;
+ struct xsk_queue *rx ____cacheline_aligned_in_smp;
struct net_device *dev;
struct xdp_umem *umem;
struct list_head flush_node;
+ struct xsk_buff_pool *pool;
u16 queue_id;
bool zc;
enum {
@@ -59,10 +54,9 @@ struct xdp_sock {
XSK_BOUND,
XSK_UNBOUND,
} state;
- /* Protects multiple processes in the control path */
- struct mutex mutex;
+
struct xsk_queue *tx ____cacheline_aligned_in_smp;
- struct list_head list;
+ struct list_head tx_list;
/* Mutual exclusion of NAPI TX thread and sendmsg error paths
* in the SKB destructor callback.
*/
@@ -77,6 +71,10 @@ struct xdp_sock {
struct list_head map_list;
/* Protects map_list */
spinlock_t map_list_lock;
+ /* Protects multiple processes in the control path */
+ struct mutex mutex;
+ struct xsk_queue *fq_tmp; /* Only as tmp storage before bind */
+ struct xsk_queue *cq_tmp; /* Only as tmp storage before bind */
};
#ifdef CONFIG_XDP_SOCKETS
diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
index ccf848f7efa4..5b1ee8a9976d 100644
--- a/include/net/xdp_sock_drv.h
+++ b/include/net/xdp_sock_drv.h
@@ -11,47 +11,50 @@
#ifdef CONFIG_XDP_SOCKETS
-void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
-bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc);
-void xsk_umem_consume_tx_done(struct xdp_umem *umem);
-struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id);
-void xsk_set_rx_need_wakeup(struct xdp_umem *umem);
-void xsk_set_tx_need_wakeup(struct xdp_umem *umem);
-void xsk_clear_rx_need_wakeup(struct xdp_umem *umem);
-void xsk_clear_tx_need_wakeup(struct xdp_umem *umem);
-bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem);
+void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
+bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
+void xsk_tx_release(struct xsk_buff_pool *pool);
+struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
+ u16 queue_id);
+void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool);
+void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool);
+void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool);
+void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool);
+bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool);
-static inline u32 xsk_umem_get_headroom(struct xdp_umem *umem)
+static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
{
- return XDP_PACKET_HEADROOM + umem->headroom;
+ return XDP_PACKET_HEADROOM + pool->headroom;
}
-static inline u32 xsk_umem_get_chunk_size(struct xdp_umem *umem)
+static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
{
- return umem->chunk_size;
+ return pool->chunk_size;
}
-static inline u32 xsk_umem_get_rx_frame_size(struct xdp_umem *umem)
+static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
{
- return xsk_umem_get_chunk_size(umem) - xsk_umem_get_headroom(umem);
+ return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool);
}
-static inline void xsk_buff_set_rxq_info(struct xdp_umem *umem,
+static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
struct xdp_rxq_info *rxq)
{
- xp_set_rxq_info(umem->pool, rxq);
+ xp_set_rxq_info(pool, rxq);
}
-static inline void xsk_buff_dma_unmap(struct xdp_umem *umem,
+static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
unsigned long attrs)
{
- xp_dma_unmap(umem->pool, attrs);
+ xp_dma_unmap(pool, attrs);
}
-static inline int xsk_buff_dma_map(struct xdp_umem *umem, struct device *dev,
- unsigned long attrs)
+static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
+ struct device *dev, unsigned long attrs)
{
- return xp_dma_map(umem->pool, dev, attrs, umem->pgs, umem->npgs);
+ struct xdp_umem *umem = pool->umem;
+
+ return xp_dma_map(pool, dev, attrs, umem->pgs, umem->npgs);
}
static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
@@ -68,14 +71,14 @@ static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
return xp_get_frame_dma(xskb);
}
-static inline struct xdp_buff *xsk_buff_alloc(struct xdp_umem *umem)
+static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
{
- return xp_alloc(umem->pool);
+ return xp_alloc(pool);
}
-static inline bool xsk_buff_can_alloc(struct xdp_umem *umem, u32 count)
+static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
{
- return xp_can_alloc(umem->pool, count);
+ return xp_can_alloc(pool, count);
}
static inline void xsk_buff_free(struct xdp_buff *xdp)
@@ -85,100 +88,104 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
xp_free(xskb);
}
-static inline dma_addr_t xsk_buff_raw_get_dma(struct xdp_umem *umem, u64 addr)
+static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
+ u64 addr)
{
- return xp_raw_get_dma(umem->pool, addr);
+ return xp_raw_get_dma(pool, addr);
}
-static inline void *xsk_buff_raw_get_data(struct xdp_umem *umem, u64 addr)
+static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
{
- return xp_raw_get_data(umem->pool, addr);
+ return xp_raw_get_data(pool, addr);
}
-static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
+static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
{
struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
+ if (!pool->dma_need_sync)
+ return;
+
xp_dma_sync_for_cpu(xskb);
}
-static inline void xsk_buff_raw_dma_sync_for_device(struct xdp_umem *umem,
+static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
dma_addr_t dma,
size_t size)
{
- xp_dma_sync_for_device(umem->pool, dma, size);
+ xp_dma_sync_for_device(pool, dma, size);
}
#else
-static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
+static inline void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
{
}
-static inline bool xsk_umem_consume_tx(struct xdp_umem *umem,
- struct xdp_desc *desc)
+static inline bool xsk_tx_peek_desc(struct xsk_buff_pool *pool,
+ struct xdp_desc *desc)
{
return false;
}
-static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem)
+static inline void xsk_tx_release(struct xsk_buff_pool *pool)
{
}
-static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
- u16 queue_id)
+static inline struct xsk_buff_pool *
+xsk_get_pool_from_qid(struct net_device *dev, u16 queue_id)
{
return NULL;
}
-static inline void xsk_set_rx_need_wakeup(struct xdp_umem *umem)
+static inline void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
{
}
-static inline void xsk_set_tx_need_wakeup(struct xdp_umem *umem)
+static inline void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
{
}
-static inline void xsk_clear_rx_need_wakeup(struct xdp_umem *umem)
+static inline void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
{
}
-static inline void xsk_clear_tx_need_wakeup(struct xdp_umem *umem)
+static inline void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
{
}
-static inline bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem)
+static inline bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
{
return false;
}
-static inline u32 xsk_umem_get_headroom(struct xdp_umem *umem)
+static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
{
return 0;
}
-static inline u32 xsk_umem_get_chunk_size(struct xdp_umem *umem)
+static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
{
return 0;
}
-static inline u32 xsk_umem_get_rx_frame_size(struct xdp_umem *umem)
+static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
{
return 0;
}
-static inline void xsk_buff_set_rxq_info(struct xdp_umem *umem,
+static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
struct xdp_rxq_info *rxq)
{
}
-static inline void xsk_buff_dma_unmap(struct xdp_umem *umem,
+static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
unsigned long attrs)
{
}
-static inline int xsk_buff_dma_map(struct xdp_umem *umem, struct device *dev,
- unsigned long attrs)
+static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
+ struct device *dev, unsigned long attrs)
{
return 0;
}
@@ -193,12 +200,12 @@ static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
return 0;
}
-static inline struct xdp_buff *xsk_buff_alloc(struct xdp_umem *umem)
+static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
{
return NULL;
}
-static inline bool xsk_buff_can_alloc(struct xdp_umem *umem, u32 count)
+static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
{
return false;
}
@@ -207,21 +214,22 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
{
}
-static inline dma_addr_t xsk_buff_raw_get_dma(struct xdp_umem *umem, u64 addr)
+static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
+ u64 addr)
{
return 0;
}
-static inline void *xsk_buff_raw_get_data(struct xdp_umem *umem, u64 addr)
+static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
{
return NULL;
}
-static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
+static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp, struct xsk_buff_pool *pool)
{
}
-static inline void xsk_buff_raw_dma_sync_for_device(struct xdp_umem *umem,
+static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
dma_addr_t dma,
size_t size)
{
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 9e806c781025..b2a06f10b62c 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1996,6 +1996,39 @@ static inline int xfrm_tunnel_check(struct sk_buff *skb, struct xfrm_state *x,
return 0;
}
+extern const int xfrm_msg_min[XFRM_NR_MSGTYPES];
+extern const struct nla_policy xfrma_policy[XFRMA_MAX+1];
+
+struct xfrm_translator {
+ /* Allocate frag_list and put compat translation there */
+ int (*alloc_compat)(struct sk_buff *skb, const struct nlmsghdr *src);
+
+ /* Allocate nlmsg with 64-bit translaton of received 32-bit message */
+ struct nlmsghdr *(*rcv_msg_compat)(const struct nlmsghdr *nlh,
+ int maxtype, const struct nla_policy *policy,
+ struct netlink_ext_ack *extack);
+
+ /* Translate 32-bit user_policy from sockptr */
+ int (*xlate_user_policy_sockptr)(u8 **pdata32, int optlen);
+
+ struct module *owner;
+};
+
+#if IS_ENABLED(CONFIG_XFRM_USER_COMPAT)
+extern int xfrm_register_translator(struct xfrm_translator *xtr);
+extern int xfrm_unregister_translator(struct xfrm_translator *xtr);
+extern struct xfrm_translator *xfrm_get_translator(void);
+extern void xfrm_put_translator(struct xfrm_translator *xtr);
+#else
+static inline struct xfrm_translator *xfrm_get_translator(void)
+{
+ return NULL;
+}
+static inline void xfrm_put_translator(struct xfrm_translator *xtr)
+{
+}
+#endif
+
#if IS_ENABLED(CONFIG_IPV6)
static inline bool xfrm6_local_dontfrag(const struct sock *sk)
{
diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
index 6842990e2712..0140d086dc84 100644
--- a/include/net/xsk_buff_pool.h
+++ b/include/net/xsk_buff_pool.h
@@ -13,6 +13,8 @@ struct xsk_buff_pool;
struct xdp_rxq_info;
struct xsk_queue;
struct xdp_desc;
+struct xdp_umem;
+struct xdp_sock;
struct device;
struct page;
@@ -26,34 +28,68 @@ struct xdp_buff_xsk {
struct list_head free_list_node;
};
+struct xsk_dma_map {
+ dma_addr_t *dma_pages;
+ struct device *dev;
+ struct net_device *netdev;
+ refcount_t users;
+ struct list_head list; /* Protected by the RTNL_LOCK */
+ u32 dma_pages_cnt;
+ bool dma_need_sync;
+};
+
struct xsk_buff_pool {
- struct xsk_queue *fq;
+ /* Members only used in the control path first. */
+ struct device *dev;
+ struct net_device *netdev;
+ struct list_head xsk_tx_list;
+ /* Protects modifications to the xsk_tx_list */
+ spinlock_t xsk_tx_list_lock;
+ refcount_t users;
+ struct xdp_umem *umem;
+ struct work_struct work;
struct list_head free_list;
+ u32 heads_cnt;
+ u16 queue_id;
+
+ /* Data path members as close to free_heads at the end as possible. */
+ struct xsk_queue *fq ____cacheline_aligned_in_smp;
+ struct xsk_queue *cq;
+ /* For performance reasons, each buff pool has its own array of dma_pages
+ * even when they are identical.
+ */
dma_addr_t *dma_pages;
struct xdp_buff_xsk *heads;
u64 chunk_mask;
u64 addrs_cnt;
u32 free_list_cnt;
u32 dma_pages_cnt;
- u32 heads_cnt;
u32 free_heads_cnt;
u32 headroom;
u32 chunk_size;
u32 frame_len;
+ u8 cached_need_wakeup;
+ bool uses_need_wakeup;
bool dma_need_sync;
bool unaligned;
void *addrs;
- struct device *dev;
struct xdp_buff_xsk *free_heads[];
};
/* AF_XDP core. */
-struct xsk_buff_pool *xp_create(struct page **pages, u32 nr_pages, u32 chunks,
- u32 chunk_size, u32 headroom, u64 size,
- bool unaligned);
-void xp_set_fq(struct xsk_buff_pool *pool, struct xsk_queue *fq);
+struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
+ struct xdp_umem *umem);
+int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
+ u16 queue_id, u16 flags);
+int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem,
+ struct net_device *dev, u16 queue_id);
void xp_destroy(struct xsk_buff_pool *pool);
void xp_release(struct xdp_buff_xsk *xskb);
+void xp_get_pool(struct xsk_buff_pool *pool);
+void xp_put_pool(struct xsk_buff_pool *pool);
+void xp_clear_dev(struct xsk_buff_pool *pool);
+void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
+void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
/* AF_XDP, and XDP core. */
void xp_free(struct xdp_buff_xsk *xskb);
@@ -80,9 +116,6 @@ static inline dma_addr_t xp_get_frame_dma(struct xdp_buff_xsk *xskb)
void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb);
static inline void xp_dma_sync_for_cpu(struct xdp_buff_xsk *xskb)
{
- if (!xskb->pool->dma_need_sync)
- return;
-
xp_dma_sync_for_cpu_slow(xskb);
}
diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h
index 0ac4e7fba086..1e9db9577441 100644
--- a/include/soc/mscc/ocelot.h
+++ b/include/soc/mscc/ocelot.h
@@ -101,6 +101,7 @@
#define OCELOT_TAG_LEN 16
#define OCELOT_SHORT_PREFIX_LEN 4
#define OCELOT_LONG_PREFIX_LEN 16
+#define OCELOT_TOTAL_TAG_LEN (OCELOT_SHORT_PREFIX_LEN + OCELOT_TAG_LEN)
#define OCELOT_SPEED_2500 0
#define OCELOT_SPEED_1000 1
@@ -122,6 +123,8 @@ enum ocelot_target {
QSYS,
REW,
SYS,
+ S0,
+ S1,
S2,
HSIO,
PTP,
@@ -392,13 +395,6 @@ enum ocelot_reg {
SYS_CM_DATA_RD,
SYS_CM_OP,
SYS_CM_DATA,
- S2_CORE_UPDATE_CTRL = S2 << TARGET_OFFSET,
- S2_CORE_MV_CFG,
- S2_CACHE_ENTRY_DAT,
- S2_CACHE_MASK_DAT,
- S2_CACHE_ACTION_DAT,
- S2_CACHE_CNT_DAT,
- S2_CACHE_TG_DAT,
PTP_PIN_CFG = PTP << TARGET_OFFSET,
PTP_PIN_TOD_SEC_MSB,
PTP_PIN_TOD_SEC_LSB,
@@ -517,6 +513,29 @@ enum ocelot_regfield {
REGFIELD_MAX
};
+enum {
+ /* VCAP_CORE_CFG */
+ VCAP_CORE_UPDATE_CTRL,
+ VCAP_CORE_MV_CFG,
+ /* VCAP_CORE_CACHE */
+ VCAP_CACHE_ENTRY_DAT,
+ VCAP_CACHE_MASK_DAT,
+ VCAP_CACHE_ACTION_DAT,
+ VCAP_CACHE_CNT_DAT,
+ VCAP_CACHE_TG_DAT,
+ /* VCAP_CONST */
+ VCAP_CONST_VCAP_VER,
+ VCAP_CONST_ENTRY_WIDTH,
+ VCAP_CONST_ENTRY_CNT,
+ VCAP_CONST_ENTRY_SWCNT,
+ VCAP_CONST_ENTRY_TG_WIDTH,
+ VCAP_CONST_ACTION_DEF_CNT,
+ VCAP_CONST_ACTION_WIDTH,
+ VCAP_CONST_CNT_WIDTH,
+ VCAP_CONST_CORE_CNT,
+ VCAP_CONST_IF_CNT,
+};
+
enum ocelot_ptp_pins {
PTP_PIN_0,
PTP_PIN_1,
@@ -540,6 +559,8 @@ enum ocelot_tag_prefix {
struct ocelot;
struct ocelot_ops {
+ struct net_device *(*port_to_netdev)(struct ocelot *ocelot, int port);
+ int (*netdev_to_port)(struct net_device *dev);
int (*reset)(struct ocelot *ocelot);
u16 (*wm_enc)(u16 value);
};
@@ -612,11 +633,9 @@ struct ocelot {
struct list_head multicast;
- struct ocelot_vcap_block block;
-
- const struct vcap_field *vcap_is2_keys;
- const struct vcap_field *vcap_is2_actions;
- const struct vcap_props *vcap;
+ struct list_head dummy_rules;
+ struct ocelot_vcap_block block[3];
+ struct vcap_props *vcap;
/* Workqueue to check statistics for overflow with its lock */
struct mutex stats_lock;
@@ -660,6 +679,24 @@ struct ocelot_policer {
#define ocelot_fields_write(ocelot, id, reg, val) regmap_fields_write((ocelot)->regfields[(reg)], (id), (val))
#define ocelot_fields_read(ocelot, id, reg, val) regmap_fields_read((ocelot)->regfields[(reg)], (id), (val))
+#define ocelot_target_read_ix(ocelot, target, reg, gi, ri) \
+ __ocelot_target_read_ix(ocelot, target, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
+#define ocelot_target_read_gix(ocelot, target, reg, gi) \
+ __ocelot_target_read_ix(ocelot, target, reg, reg##_GSZ * (gi))
+#define ocelot_target_read_rix(ocelot, target, reg, ri) \
+ __ocelot_target_read_ix(ocelot, target, reg, reg##_RSZ * (ri))
+#define ocelot_target_read(ocelot, target, reg) \
+ __ocelot_target_read_ix(ocelot, target, reg, 0)
+
+#define ocelot_target_write_ix(ocelot, target, val, reg, gi, ri) \
+ __ocelot_target_write_ix(ocelot, target, val, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
+#define ocelot_target_write_gix(ocelot, target, val, reg, gi) \
+ __ocelot_target_write_ix(ocelot, target, val, reg, reg##_GSZ * (gi))
+#define ocelot_target_write_rix(ocelot, target, val, reg, ri) \
+ __ocelot_target_write_ix(ocelot, target, val, reg, reg##_RSZ * (ri))
+#define ocelot_target_write(ocelot, target, val, reg) \
+ __ocelot_target_write_ix(ocelot, target, val, reg, 0)
+
/* I/O */
u32 ocelot_port_readl(struct ocelot_port *port, u32 reg);
void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg);
@@ -667,14 +704,15 @@ u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset);
void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset);
void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
u32 offset);
+u32 __ocelot_target_read_ix(struct ocelot *ocelot, enum ocelot_target target,
+ u32 reg, u32 offset);
+void __ocelot_target_write_ix(struct ocelot *ocelot, enum ocelot_target target,
+ u32 val, u32 reg, u32 offset);
/* Hardware initialization */
int ocelot_regfields_init(struct ocelot *ocelot,
const struct reg_field *const regfields);
struct regmap *ocelot_regmap_init(struct ocelot *ocelot, struct resource *res);
-void ocelot_configure_cpu(struct ocelot *ocelot, int npi,
- enum ocelot_tag_prefix injection,
- enum ocelot_tag_prefix extraction);
int ocelot_init(struct ocelot *ocelot);
void ocelot_deinit(struct ocelot *ocelot);
void ocelot_init_port(struct ocelot *ocelot, int port);
@@ -692,8 +730,8 @@ int ocelot_get_ts_info(struct ocelot *ocelot, int port,
void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs);
void ocelot_adjust_link(struct ocelot *ocelot, int port,
struct phy_device *phydev);
-void ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
- bool vlan_aware);
+int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, bool enabled,
+ struct switchdev_trans *trans);
void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state);
int ocelot_port_bridge_join(struct ocelot *ocelot, int port,
struct net_device *bridge);
@@ -710,8 +748,8 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid);
int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr);
int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr);
-int ocelot_port_add_txtstamp_skb(struct ocelot_port *ocelot_port,
- struct sk_buff *skb);
+void ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port,
+ struct sk_buff *clone);
void ocelot_get_txtstamp(struct ocelot *ocelot);
void ocelot_port_set_maxlen(struct ocelot *ocelot, int port, size_t sdu);
int ocelot_get_max_mtu(struct ocelot *ocelot, int port);
diff --git a/include/soc/mscc/ocelot_ptp.h b/include/soc/mscc/ocelot_ptp.h
index 4a6b2f71b6b2..6a7388fa7cc5 100644
--- a/include/soc/mscc/ocelot_ptp.h
+++ b/include/soc/mscc/ocelot_ptp.h
@@ -53,6 +53,7 @@ int ocelot_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan);
int ocelot_ptp_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on);
-int ocelot_init_timestamp(struct ocelot *ocelot, struct ptp_clock_info *info);
+int ocelot_init_timestamp(struct ocelot *ocelot,
+ const struct ptp_clock_info *info);
int ocelot_deinit_timestamp(struct ocelot *ocelot);
#endif
diff --git a/include/soc/mscc/ocelot_vcap.h b/include/soc/mscc/ocelot_vcap.h
index 5748373ab4d3..96300adf3648 100644
--- a/include/soc/mscc/ocelot_vcap.h
+++ b/include/soc/mscc/ocelot_vcap.h
@@ -6,17 +6,22 @@
#ifndef _OCELOT_VCAP_H_
#define _OCELOT_VCAP_H_
+#include <soc/mscc/ocelot.h>
+
/* =================================================================
* VCAP Common
* =================================================================
*/
enum {
- /* VCAP_IS1, */
+ VCAP_ES0,
+ VCAP_IS1,
VCAP_IS2,
- /* VCAP_ES0, */
+ __VCAP_COUNT,
};
+#define OCELOT_NUM_VCAP_BLOCKS __VCAP_COUNT
+
struct vcap_props {
u16 tg_width; /* Type-group width (in bits) */
u16 sw_count; /* Sub word count */
@@ -33,6 +38,11 @@ struct vcap_props {
} action_table[2];
u16 counter_words; /* Number of counter words */
u16 counter_width; /* Counter width (in bits) */
+
+ enum ocelot_target target;
+
+ const struct vcap_field *keys;
+ const struct vcap_field *actions;
};
/* VCAP Type-Group values */
@@ -41,6 +51,61 @@ struct vcap_props {
#define VCAP_TG_HALF 2 /* Half entry */
#define VCAP_TG_QUARTER 3 /* Quarter entry */
+#define VCAP_CORE_UPDATE_CTRL_UPDATE_CMD(x) (((x) << 22) & GENMASK(24, 22))
+#define VCAP_CORE_UPDATE_CTRL_UPDATE_CMD_M GENMASK(24, 22)
+#define VCAP_CORE_UPDATE_CTRL_UPDATE_CMD_X(x) (((x) & GENMASK(24, 22)) >> 22)
+#define VCAP_CORE_UPDATE_CTRL_UPDATE_ENTRY_DIS BIT(21)
+#define VCAP_CORE_UPDATE_CTRL_UPDATE_ACTION_DIS BIT(20)
+#define VCAP_CORE_UPDATE_CTRL_UPDATE_CNT_DIS BIT(19)
+#define VCAP_CORE_UPDATE_CTRL_UPDATE_ADDR(x) (((x) << 3) & GENMASK(18, 3))
+#define VCAP_CORE_UPDATE_CTRL_UPDATE_ADDR_M GENMASK(18, 3)
+#define VCAP_CORE_UPDATE_CTRL_UPDATE_ADDR_X(x) (((x) & GENMASK(18, 3)) >> 3)
+#define VCAP_CORE_UPDATE_CTRL_UPDATE_SHOT BIT(2)
+#define VCAP_CORE_UPDATE_CTRL_CLEAR_CACHE BIT(1)
+#define VCAP_CORE_UPDATE_CTRL_MV_TRAFFIC_IGN BIT(0)
+
+#define VCAP_CORE_MV_CFG_MV_NUM_POS(x) (((x) << 16) & GENMASK(31, 16))
+#define VCAP_CORE_MV_CFG_MV_NUM_POS_M GENMASK(31, 16)
+#define VCAP_CORE_MV_CFG_MV_NUM_POS_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define VCAP_CORE_MV_CFG_MV_SIZE(x) ((x) & GENMASK(15, 0))
+#define VCAP_CORE_MV_CFG_MV_SIZE_M GENMASK(15, 0)
+
+#define VCAP_CACHE_ENTRY_DAT_RSZ 0x4
+
+#define VCAP_CACHE_MASK_DAT_RSZ 0x4
+
+#define VCAP_CACHE_ACTION_DAT_RSZ 0x4
+
+#define VCAP_CACHE_CNT_DAT_RSZ 0x4
+
+#define VCAP_STICKY_VCAP_ROW_DELETED_STICKY BIT(0)
+
+#define TCAM_BIST_CTRL_TCAM_BIST BIT(1)
+#define TCAM_BIST_CTRL_TCAM_INIT BIT(0)
+
+#define TCAM_BIST_CFG_TCAM_BIST_SOE_ENA BIT(8)
+#define TCAM_BIST_CFG_TCAM_HCG_DIS BIT(7)
+#define TCAM_BIST_CFG_TCAM_CG_DIS BIT(6)
+#define TCAM_BIST_CFG_TCAM_BIAS(x) ((x) & GENMASK(5, 0))
+#define TCAM_BIST_CFG_TCAM_BIAS_M GENMASK(5, 0)
+
+#define TCAM_BIST_STAT_BIST_RT_ERR BIT(15)
+#define TCAM_BIST_STAT_BIST_PENC_ERR BIT(14)
+#define TCAM_BIST_STAT_BIST_COMP_ERR BIT(13)
+#define TCAM_BIST_STAT_BIST_ADDR_ERR BIT(12)
+#define TCAM_BIST_STAT_BIST_BL1E_ERR BIT(11)
+#define TCAM_BIST_STAT_BIST_BL1_ERR BIT(10)
+#define TCAM_BIST_STAT_BIST_BL0E_ERR BIT(9)
+#define TCAM_BIST_STAT_BIST_BL0_ERR BIT(8)
+#define TCAM_BIST_STAT_BIST_PH1_ERR BIT(7)
+#define TCAM_BIST_STAT_BIST_PH0_ERR BIT(6)
+#define TCAM_BIST_STAT_BIST_PV1_ERR BIT(5)
+#define TCAM_BIST_STAT_BIST_PV0_ERR BIT(4)
+#define TCAM_BIST_STAT_BIST_RUN BIT(3)
+#define TCAM_BIST_STAT_BIST_ERR BIT(2)
+#define TCAM_BIST_STAT_BIST_BUSY BIT(1)
+#define TCAM_BIST_STAT_TCAM_RDY BIT(0)
+
/* =================================================================
* VCAP IS2
* =================================================================
@@ -202,4 +267,137 @@ enum vcap_is2_action_field {
VCAP_IS2_ACT_HIT_CNT,
};
+/* =================================================================
+ * VCAP IS1
+ * =================================================================
+ */
+
+/* IS1 half key types */
+#define IS1_TYPE_S1_NORMAL 0
+#define IS1_TYPE_S1_5TUPLE_IP4 1
+
+/* IS1 full key types */
+#define IS1_TYPE_S1_NORMAL_IP6 0
+#define IS1_TYPE_S1_7TUPLE 1
+#define IS2_TYPE_S1_5TUPLE_IP6 2
+
+enum {
+ IS1_ACTION_TYPE_NORMAL,
+ IS1_ACTION_TYPE_MAX,
+};
+
+enum vcap_is1_half_key_field {
+ VCAP_IS1_HK_TYPE,
+ VCAP_IS1_HK_LOOKUP,
+ VCAP_IS1_HK_IGR_PORT_MASK,
+ VCAP_IS1_HK_RSV,
+ VCAP_IS1_HK_OAM_Y1731,
+ VCAP_IS1_HK_L2_MC,
+ VCAP_IS1_HK_L2_BC,
+ VCAP_IS1_HK_IP_MC,
+ VCAP_IS1_HK_VLAN_TAGGED,
+ VCAP_IS1_HK_VLAN_DBL_TAGGED,
+ VCAP_IS1_HK_TPID,
+ VCAP_IS1_HK_VID,
+ VCAP_IS1_HK_DEI,
+ VCAP_IS1_HK_PCP,
+ /* Specific Fields for IS1 Half Key S1_NORMAL */
+ VCAP_IS1_HK_L2_SMAC,
+ VCAP_IS1_HK_ETYPE_LEN,
+ VCAP_IS1_HK_ETYPE,
+ VCAP_IS1_HK_IP_SNAP,
+ VCAP_IS1_HK_IP4,
+ VCAP_IS1_HK_L3_FRAGMENT,
+ VCAP_IS1_HK_L3_FRAG_OFS_GT0,
+ VCAP_IS1_HK_L3_OPTIONS,
+ VCAP_IS1_HK_L3_DSCP,
+ VCAP_IS1_HK_L3_IP4_SIP,
+ VCAP_IS1_HK_TCP_UDP,
+ VCAP_IS1_HK_TCP,
+ VCAP_IS1_HK_L4_SPORT,
+ VCAP_IS1_HK_L4_RNG,
+ /* Specific Fields for IS1 Half Key S1_5TUPLE_IP4 */
+ VCAP_IS1_HK_IP4_INNER_TPID,
+ VCAP_IS1_HK_IP4_INNER_VID,
+ VCAP_IS1_HK_IP4_INNER_DEI,
+ VCAP_IS1_HK_IP4_INNER_PCP,
+ VCAP_IS1_HK_IP4_IP4,
+ VCAP_IS1_HK_IP4_L3_FRAGMENT,
+ VCAP_IS1_HK_IP4_L3_FRAG_OFS_GT0,
+ VCAP_IS1_HK_IP4_L3_OPTIONS,
+ VCAP_IS1_HK_IP4_L3_DSCP,
+ VCAP_IS1_HK_IP4_L3_IP4_DIP,
+ VCAP_IS1_HK_IP4_L3_IP4_SIP,
+ VCAP_IS1_HK_IP4_L3_PROTO,
+ VCAP_IS1_HK_IP4_TCP_UDP,
+ VCAP_IS1_HK_IP4_TCP,
+ VCAP_IS1_HK_IP4_L4_RNG,
+ VCAP_IS1_HK_IP4_IP_PAYLOAD_S1_5TUPLE,
+};
+
+enum vcap_is1_action_field {
+ VCAP_IS1_ACT_DSCP_ENA,
+ VCAP_IS1_ACT_DSCP_VAL,
+ VCAP_IS1_ACT_QOS_ENA,
+ VCAP_IS1_ACT_QOS_VAL,
+ VCAP_IS1_ACT_DP_ENA,
+ VCAP_IS1_ACT_DP_VAL,
+ VCAP_IS1_ACT_PAG_OVERRIDE_MASK,
+ VCAP_IS1_ACT_PAG_VAL,
+ VCAP_IS1_ACT_RSV,
+ VCAP_IS1_ACT_VID_REPLACE_ENA,
+ VCAP_IS1_ACT_VID_ADD_VAL,
+ VCAP_IS1_ACT_FID_SEL,
+ VCAP_IS1_ACT_FID_VAL,
+ VCAP_IS1_ACT_PCP_DEI_ENA,
+ VCAP_IS1_ACT_PCP_VAL,
+ VCAP_IS1_ACT_DEI_VAL,
+ VCAP_IS1_ACT_VLAN_POP_CNT_ENA,
+ VCAP_IS1_ACT_VLAN_POP_CNT,
+ VCAP_IS1_ACT_CUSTOM_ACE_TYPE_ENA,
+ VCAP_IS1_ACT_HIT_STICKY,
+};
+
+/* =================================================================
+ * VCAP ES0
+ * =================================================================
+ */
+
+enum {
+ ES0_ACTION_TYPE_NORMAL,
+ ES0_ACTION_TYPE_MAX,
+};
+
+enum vcap_es0_key_field {
+ VCAP_ES0_EGR_PORT,
+ VCAP_ES0_IGR_PORT,
+ VCAP_ES0_RSV,
+ VCAP_ES0_L2_MC,
+ VCAP_ES0_L2_BC,
+ VCAP_ES0_VID,
+ VCAP_ES0_DP,
+ VCAP_ES0_PCP,
+};
+
+enum vcap_es0_action_field {
+ VCAP_ES0_ACT_PUSH_OUTER_TAG,
+ VCAP_ES0_ACT_PUSH_INNER_TAG,
+ VCAP_ES0_ACT_TAG_A_TPID_SEL,
+ VCAP_ES0_ACT_TAG_A_VID_SEL,
+ VCAP_ES0_ACT_TAG_A_PCP_SEL,
+ VCAP_ES0_ACT_TAG_A_DEI_SEL,
+ VCAP_ES0_ACT_TAG_B_TPID_SEL,
+ VCAP_ES0_ACT_TAG_B_VID_SEL,
+ VCAP_ES0_ACT_TAG_B_PCP_SEL,
+ VCAP_ES0_ACT_TAG_B_DEI_SEL,
+ VCAP_ES0_ACT_VID_A_VAL,
+ VCAP_ES0_ACT_PCP_A_VAL,
+ VCAP_ES0_ACT_DEI_A_VAL,
+ VCAP_ES0_ACT_VID_B_VAL,
+ VCAP_ES0_ACT_PCP_B_VAL,
+ VCAP_ES0_ACT_DEI_B_VAL,
+ VCAP_ES0_ACT_RSV,
+ VCAP_ES0_ACT_HIT_STICKY,
+};
+
#endif /* _OCELOT_VCAP_H_ */
diff --git a/include/trace/events/devlink.h b/include/trace/events/devlink.h
index 6f60a78d9a7e..44d8e2981065 100644
--- a/include/trace/events/devlink.h
+++ b/include/trace/events/devlink.h
@@ -171,6 +171,43 @@ TRACE_EVENT(devlink_health_reporter_state_update,
__entry->new_state)
);
+/*
+ * Tracepoint for devlink packet trap:
+ */
+TRACE_EVENT(devlink_trap_report,
+ TP_PROTO(const struct devlink *devlink, struct sk_buff *skb,
+ const struct devlink_trap_metadata *metadata),
+
+ TP_ARGS(devlink, skb, metadata),
+
+ TP_STRUCT__entry(
+ __string(bus_name, devlink->dev->bus->name)
+ __string(dev_name, dev_name(devlink->dev))
+ __string(driver_name, devlink->dev->driver->name)
+ __string(trap_name, metadata->trap_name)
+ __string(trap_group_name, metadata->trap_group_name)
+ __dynamic_array(char, input_dev_name, IFNAMSIZ)
+ ),
+
+ TP_fast_assign(
+ struct net_device *input_dev = metadata->input_dev;
+
+ __assign_str(bus_name, devlink->dev->bus->name);
+ __assign_str(dev_name, dev_name(devlink->dev));
+ __assign_str(driver_name, devlink->dev->driver->name);
+ __assign_str(trap_name, metadata->trap_name);
+ __assign_str(trap_group_name, metadata->trap_group_name);
+ __assign_str(input_dev_name,
+ (input_dev ? input_dev->name : "NULL"));
+ ),
+
+ TP_printk("bus_name=%s dev_name=%s driver_name=%s trap_name=%s "
+ "trap_group_name=%s input_dev_name=%s", __get_str(bus_name),
+ __get_str(dev_name), __get_str(driver_name),
+ __get_str(trap_name), __get_str(trap_group_name),
+ __get_str(input_dev_name))
+);
+
#endif /* _TRACE_DEVLINK_H */
/* This part must be outside protection */
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index c33079b986e8..e70c90116eda 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -68,21 +68,14 @@ enum rxrpc_client_trace {
rxrpc_client_chan_activate,
rxrpc_client_chan_disconnect,
rxrpc_client_chan_pass,
- rxrpc_client_chan_unstarted,
rxrpc_client_chan_wait_failed,
rxrpc_client_cleanup,
- rxrpc_client_count,
rxrpc_client_discard,
rxrpc_client_duplicate,
rxrpc_client_exposed,
rxrpc_client_replace,
rxrpc_client_to_active,
- rxrpc_client_to_culled,
rxrpc_client_to_idle,
- rxrpc_client_to_inactive,
- rxrpc_client_to_upgrade,
- rxrpc_client_to_waiting,
- rxrpc_client_uncount,
};
enum rxrpc_call_trace {
@@ -271,29 +264,14 @@ enum rxrpc_tx_point {
EM(rxrpc_client_chan_activate, "ChActv") \
EM(rxrpc_client_chan_disconnect, "ChDisc") \
EM(rxrpc_client_chan_pass, "ChPass") \
- EM(rxrpc_client_chan_unstarted, "ChUnst") \
EM(rxrpc_client_chan_wait_failed, "ChWtFl") \
EM(rxrpc_client_cleanup, "Clean ") \
- EM(rxrpc_client_count, "Count ") \
EM(rxrpc_client_discard, "Discar") \
EM(rxrpc_client_duplicate, "Duplic") \
EM(rxrpc_client_exposed, "Expose") \
EM(rxrpc_client_replace, "Replac") \
EM(rxrpc_client_to_active, "->Actv") \
- EM(rxrpc_client_to_culled, "->Cull") \
- EM(rxrpc_client_to_idle, "->Idle") \
- EM(rxrpc_client_to_inactive, "->Inac") \
- EM(rxrpc_client_to_upgrade, "->Upgd") \
- EM(rxrpc_client_to_waiting, "->Wait") \
- E_(rxrpc_client_uncount, "Uncoun")
-
-#define rxrpc_conn_cache_states \
- EM(RXRPC_CONN_CLIENT_INACTIVE, "Inac") \
- EM(RXRPC_CONN_CLIENT_WAITING, "Wait") \
- EM(RXRPC_CONN_CLIENT_ACTIVE, "Actv") \
- EM(RXRPC_CONN_CLIENT_UPGRADE, "Upgd") \
- EM(RXRPC_CONN_CLIENT_CULLED, "Cull") \
- E_(RXRPC_CONN_CLIENT_IDLE, "Idle") \
+ E_(rxrpc_client_to_idle, "->Idle")
#define rxrpc_call_traces \
EM(rxrpc_call_connected, "CON") \
@@ -594,23 +572,20 @@ TRACE_EVENT(rxrpc_client,
__field(int, channel )
__field(int, usage )
__field(enum rxrpc_client_trace, op )
- __field(enum rxrpc_conn_cache_state, cs )
),
TP_fast_assign(
- __entry->conn = conn->debug_id;
+ __entry->conn = conn ? conn->debug_id : 0;
__entry->channel = channel;
- __entry->usage = atomic_read(&conn->usage);
+ __entry->usage = conn ? atomic_read(&conn->usage) : -2;
__entry->op = op;
- __entry->cid = conn->proto.cid;
- __entry->cs = conn->cache_state;
+ __entry->cid = conn ? conn->proto.cid : 0;
),
- TP_printk("C=%08x h=%2d %s %s i=%08x u=%d",
+ TP_printk("C=%08x h=%2d %s i=%08x u=%d",
__entry->conn,
__entry->channel,
__print_symbolic(__entry->op, rxrpc_client_traces),
- __print_symbolic(__entry->cs, rxrpc_conn_cache_states),
__entry->cid,
__entry->usage)
);
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index b6238b2209b7..bf5a99d803e4 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -124,6 +124,7 @@ enum bpf_cmd {
BPF_ENABLE_STATS,
BPF_ITER_CREATE,
BPF_LINK_DETACH,
+ BPF_PROG_BIND_MAP,
};
enum bpf_map_type {
@@ -155,6 +156,7 @@ enum bpf_map_type {
BPF_MAP_TYPE_DEVMAP_HASH,
BPF_MAP_TYPE_STRUCT_OPS,
BPF_MAP_TYPE_RINGBUF,
+ BPF_MAP_TYPE_INODE_STORAGE,
};
/* Note that tracing related programs such as
@@ -345,19 +347,45 @@ enum bpf_link_type {
/* The verifier internal test flag. Behavior is undefined */
#define BPF_F_TEST_STATE_FREQ (1U << 3)
+/* If BPF_F_SLEEPABLE is used in BPF_PROG_LOAD command, the verifier will
+ * restrict map and helper usage for such programs. Sleepable BPF programs can
+ * only be attached to hooks where kernel execution context allows sleeping.
+ * Such programs are allowed to use helpers that may sleep like
+ * bpf_copy_from_user().
+ */
+#define BPF_F_SLEEPABLE (1U << 4)
+
/* When BPF ldimm64's insn[0].src_reg != 0 then this can have
- * two extensions:
- *
- * insn[0].src_reg: BPF_PSEUDO_MAP_FD BPF_PSEUDO_MAP_VALUE
- * insn[0].imm: map fd map fd
- * insn[1].imm: 0 offset into value
- * insn[0].off: 0 0
- * insn[1].off: 0 0
- * ldimm64 rewrite: address of map address of map[0]+offset
- * verifier type: CONST_PTR_TO_MAP PTR_TO_MAP_VALUE
+ * the following extensions:
+ *
+ * insn[0].src_reg: BPF_PSEUDO_MAP_FD
+ * insn[0].imm: map fd
+ * insn[1].imm: 0
+ * insn[0].off: 0
+ * insn[1].off: 0
+ * ldimm64 rewrite: address of map
+ * verifier type: CONST_PTR_TO_MAP
*/
#define BPF_PSEUDO_MAP_FD 1
+/* insn[0].src_reg: BPF_PSEUDO_MAP_VALUE
+ * insn[0].imm: map fd
+ * insn[1].imm: offset into value
+ * insn[0].off: 0
+ * insn[1].off: 0
+ * ldimm64 rewrite: address of map[0]+offset
+ * verifier type: PTR_TO_MAP_VALUE
+ */
#define BPF_PSEUDO_MAP_VALUE 2
+/* insn[0].src_reg: BPF_PSEUDO_BTF_ID
+ * insn[0].imm: kernel btd id of VAR
+ * insn[1].imm: 0
+ * insn[0].off: 0
+ * insn[1].off: 0
+ * ldimm64 rewrite: address of the kernel variable
+ * verifier type: PTR_TO_BTF_ID or PTR_TO_MEM, depending on whether the var
+ * is struct/union.
+ */
+#define BPF_PSEUDO_BTF_ID 3
/* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
* offset to another bpf function
@@ -404,6 +432,12 @@ enum {
/* Enable memory-mapping BPF map */
BPF_F_MMAPABLE = (1U << 10),
+
+/* Share perf_event among processes */
+ BPF_F_PRESERVE_ELEMS = (1U << 11),
+
+/* Create a map that is suitable to be an inner map with dynamic max entries */
+ BPF_F_INNER_MAP = (1U << 12),
};
/* Flags for BPF_PROG_QUERY. */
@@ -414,6 +448,11 @@ enum {
*/
#define BPF_F_QUERY_EFFECTIVE (1U << 0)
+/* Flags for BPF_PROG_TEST_RUN */
+
+/* If set, run the test on the cpu specified by bpf_attr.test.cpu */
+#define BPF_F_TEST_RUN_ON_CPU (1U << 0)
+
/* type for BPF_ENABLE_STATS */
enum bpf_stats_type {
/* enabled run_time_ns and run_cnt */
@@ -556,6 +595,8 @@ union bpf_attr {
*/
__aligned_u64 ctx_in;
__aligned_u64 ctx_out;
+ __u32 flags;
+ __u32 cpu;
} test;
struct { /* anonymous struct used by BPF_*_GET_*_ID */
@@ -622,8 +663,13 @@ union bpf_attr {
};
__u32 attach_type; /* attach type */
__u32 flags; /* extra flags */
- __aligned_u64 iter_info; /* extra bpf_iter_link_info */
- __u32 iter_info_len; /* iter_info length */
+ union {
+ __u32 target_btf_id; /* btf_id of target to attach to */
+ struct {
+ __aligned_u64 iter_info; /* extra bpf_iter_link_info */
+ __u32 iter_info_len; /* iter_info length */
+ };
+ };
} link_create;
struct { /* struct used by BPF_LINK_UPDATE command */
@@ -649,6 +695,12 @@ union bpf_attr {
__u32 flags;
} iter_create;
+ struct { /* struct used by BPF_PROG_BIND_MAP command */
+ __u32 prog_fd;
+ __u32 map_fd;
+ __u32 flags; /* extra flags */
+ } prog_bind_map;
+
} __attribute__((aligned(8)));
/* The description below is an attempt at providing documentation to eBPF
@@ -1438,8 +1490,8 @@ union bpf_attr {
* Return
* The return value depends on the result of the test, and can be:
*
- * * 0, if the *skb* task belongs to the cgroup2.
- * * 1, if the *skb* task does not belong to the cgroup2.
+ * * 0, if current task belongs to the cgroup2.
+ * * 1, if current task does not belong to the cgroup2.
* * A negative error code, if an error occurred.
*
* long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
@@ -1649,7 +1701,7 @@ union bpf_attr {
* **TCP_CONGESTION**, **TCP_BPF_IW**,
* **TCP_BPF_SNDCWND_CLAMP**, **TCP_SAVE_SYN**,
* **TCP_KEEPIDLE**, **TCP_KEEPINTVL**, **TCP_KEEPCNT**,
- * **TCP_SYNCNT**, **TCP_USER_TIMEOUT**.
+ * **TCP_SYNCNT**, **TCP_USER_TIMEOUT**, **TCP_NOTSENT_LOWAT**.
* * **IPPROTO_IP**, which supports *optname* **IP_TOS**.
* * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**.
* Return
@@ -2204,7 +2256,7 @@ union bpf_attr {
* Description
* This helper is used in programs implementing policies at the
* skb socket level. If the sk_buff *skb* is allowed to pass (i.e.
- * if the verdeict eBPF program returns **SK_PASS**), redirect it
+ * if the verdict eBPF program returns **SK_PASS**), redirect it
* to the socket referenced by *map* (of type
* **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and
* egress interfaces can be used for redirection. The
@@ -2496,7 +2548,7 @@ union bpf_attr {
* result is from *reuse*\ **->socks**\ [] using the hash of the
* tuple.
*
- * long bpf_sk_release(struct bpf_sock *sock)
+ * long bpf_sk_release(void *sock)
* Description
* Release the reference held by *sock*. *sock* must be a
* non-**NULL** pointer that was returned from
@@ -2676,7 +2728,7 @@ union bpf_attr {
* result is from *reuse*\ **->socks**\ [] using the hash of the
* tuple.
*
- * long bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
+ * long bpf_tcp_check_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
* Description
* Check whether *iph* and *th* contain a valid SYN cookie ACK for
* the listening socket in *sk*.
@@ -2807,7 +2859,7 @@ union bpf_attr {
*
* **-ERANGE** if resulting value was out of range.
*
- * void *bpf_sk_storage_get(struct bpf_map *map, struct bpf_sock *sk, void *value, u64 flags)
+ * void *bpf_sk_storage_get(struct bpf_map *map, void *sk, void *value, u64 flags)
* Description
* Get a bpf-local-storage from a *sk*.
*
@@ -2823,6 +2875,9 @@ union bpf_attr {
* "type". The bpf-local-storage "type" (i.e. the *map*) is
* searched against all bpf-local-storages residing at *sk*.
*
+ * *sk* is a kernel **struct sock** pointer for LSM program.
+ * *sk* is a **struct bpf_sock** pointer for other program types.
+ *
* An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be
* used such that a new bpf-local-storage will be
* created if one does not exist. *value* can be used
@@ -2835,13 +2890,14 @@ union bpf_attr {
* **NULL** if not found or there was an error in adding
* a new bpf-local-storage.
*
- * long bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk)
+ * long bpf_sk_storage_delete(struct bpf_map *map, void *sk)
* Description
* Delete a bpf-local-storage from a *sk*.
* Return
* 0 on success.
*
* **-ENOENT** if the bpf-local-storage cannot be found.
+ * **-EINVAL** if sk is not a fullsock (e.g. a request_sock).
*
* long bpf_send_signal(u32 sig)
* Description
@@ -2858,7 +2914,7 @@ union bpf_attr {
*
* **-EAGAIN** if bpf program can try again.
*
- * s64 bpf_tcp_gen_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
+ * s64 bpf_tcp_gen_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
* Description
* Try to issue a SYN cookie for the packet with corresponding
* IP/TCP headers, *iph* and *th*, on the listening socket in *sk*.
@@ -3087,7 +3143,7 @@ union bpf_attr {
* Return
* The id is returned or 0 in case the id could not be retrieved.
*
- * long bpf_sk_assign(struct sk_buff *skb, struct bpf_sock *sk, u64 flags)
+ * long bpf_sk_assign(struct sk_buff *skb, void *sk, u64 flags)
* Description
* Helper is overloaded depending on BPF program type. This
* description applies to **BPF_PROG_TYPE_SCHED_CLS** and
@@ -3215,11 +3271,11 @@ union bpf_attr {
*
* **-EOVERFLOW** if an overflow happened: The same object will be tried again.
*
- * u64 bpf_sk_cgroup_id(struct bpf_sock *sk)
+ * u64 bpf_sk_cgroup_id(void *sk)
* Description
* Return the cgroup v2 id of the socket *sk*.
*
- * *sk* must be a non-**NULL** pointer to a full socket, e.g. one
+ * *sk* must be a non-**NULL** pointer to a socket, e.g. one
* returned from **bpf_sk_lookup_xxx**\ (),
* **bpf_sk_fullsock**\ (), etc. The format of returned id is
* same as in **bpf_skb_cgroup_id**\ ().
@@ -3229,7 +3285,7 @@ union bpf_attr {
* Return
* The id is returned or 0 in case the id could not be retrieved.
*
- * u64 bpf_sk_ancestor_cgroup_id(struct bpf_sock *sk, int ancestor_level)
+ * u64 bpf_sk_ancestor_cgroup_id(void *sk, int ancestor_level)
* Description
* Return id of cgroup v2 that is ancestor of cgroup associated
* with the *sk* at the *ancestor_level*. The root cgroup is at
@@ -3337,38 +3393,38 @@ union bpf_attr {
* Description
* Dynamically cast a *sk* pointer to a *tcp6_sock* pointer.
* Return
- * *sk* if casting is valid, or NULL otherwise.
+ * *sk* if casting is valid, or **NULL** otherwise.
*
* struct tcp_sock *bpf_skc_to_tcp_sock(void *sk)
* Description
* Dynamically cast a *sk* pointer to a *tcp_sock* pointer.
* Return
- * *sk* if casting is valid, or NULL otherwise.
+ * *sk* if casting is valid, or **NULL** otherwise.
*
* struct tcp_timewait_sock *bpf_skc_to_tcp_timewait_sock(void *sk)
* Description
* Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer.
* Return
- * *sk* if casting is valid, or NULL otherwise.
+ * *sk* if casting is valid, or **NULL** otherwise.
*
* struct tcp_request_sock *bpf_skc_to_tcp_request_sock(void *sk)
* Description
* Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer.
* Return
- * *sk* if casting is valid, or NULL otherwise.
+ * *sk* if casting is valid, or **NULL** otherwise.
*
* struct udp6_sock *bpf_skc_to_udp6_sock(void *sk)
* Description
* Dynamically cast a *sk* pointer to a *udp6_sock* pointer.
* Return
- * *sk* if casting is valid, or NULL otherwise.
+ * *sk* if casting is valid, or **NULL** otherwise.
*
* long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags)
* Description
* Return a user or a kernel stack in bpf program provided buffer.
* To achieve this, the helper needs *task*, which is a valid
- * pointer to struct task_struct. To store the stacktrace, the
- * bpf program provides *buf* with a nonnegative *size*.
+ * pointer to **struct task_struct**. To store the stacktrace, the
+ * bpf program provides *buf* with a nonnegative *size*.
*
* The last argument, *flags*, holds the number of stack frames to
* skip (from 0 to 255), masked with
@@ -3395,6 +3451,293 @@ union bpf_attr {
* A non-negative value equal to or less than *size* on success,
* or a negative error in case of failure.
*
+ * long bpf_load_hdr_opt(struct bpf_sock_ops *skops, void *searchby_res, u32 len, u64 flags)
+ * Description
+ * Load header option. Support reading a particular TCP header
+ * option for bpf program (**BPF_PROG_TYPE_SOCK_OPS**).
+ *
+ * If *flags* is 0, it will search the option from the
+ * *skops*\ **->skb_data**. The comment in **struct bpf_sock_ops**
+ * has details on what skb_data contains under different
+ * *skops*\ **->op**.
+ *
+ * The first byte of the *searchby_res* specifies the
+ * kind that it wants to search.
+ *
+ * If the searching kind is an experimental kind
+ * (i.e. 253 or 254 according to RFC6994). It also
+ * needs to specify the "magic" which is either
+ * 2 bytes or 4 bytes. It then also needs to
+ * specify the size of the magic by using
+ * the 2nd byte which is "kind-length" of a TCP
+ * header option and the "kind-length" also
+ * includes the first 2 bytes "kind" and "kind-length"
+ * itself as a normal TCP header option also does.
+ *
+ * For example, to search experimental kind 254 with
+ * 2 byte magic 0xeB9F, the searchby_res should be
+ * [ 254, 4, 0xeB, 0x9F, 0, 0, .... 0 ].
+ *
+ * To search for the standard window scale option (3),
+ * the *searchby_res* should be [ 3, 0, 0, .... 0 ].
+ * Note, kind-length must be 0 for regular option.
+ *
+ * Searching for No-Op (0) and End-of-Option-List (1) are
+ * not supported.
+ *
+ * *len* must be at least 2 bytes which is the minimal size
+ * of a header option.
+ *
+ * Supported flags:
+ *
+ * * **BPF_LOAD_HDR_OPT_TCP_SYN** to search from the
+ * saved_syn packet or the just-received syn packet.
+ *
+ * Return
+ * > 0 when found, the header option is copied to *searchby_res*.
+ * The return value is the total length copied. On failure, a
+ * negative error code is returned:
+ *
+ * **-EINVAL** if a parameter is invalid.
+ *
+ * **-ENOMSG** if the option is not found.
+ *
+ * **-ENOENT** if no syn packet is available when
+ * **BPF_LOAD_HDR_OPT_TCP_SYN** is used.
+ *
+ * **-ENOSPC** if there is not enough space. Only *len* number of
+ * bytes are copied.
+ *
+ * **-EFAULT** on failure to parse the header options in the
+ * packet.
+ *
+ * **-EPERM** if the helper cannot be used under the current
+ * *skops*\ **->op**.
+ *
+ * long bpf_store_hdr_opt(struct bpf_sock_ops *skops, const void *from, u32 len, u64 flags)
+ * Description
+ * Store header option. The data will be copied
+ * from buffer *from* with length *len* to the TCP header.
+ *
+ * The buffer *from* should have the whole option that
+ * includes the kind, kind-length, and the actual
+ * option data. The *len* must be at least kind-length
+ * long. The kind-length does not have to be 4 byte
+ * aligned. The kernel will take care of the padding
+ * and setting the 4 bytes aligned value to th->doff.
+ *
+ * This helper will check for duplicated option
+ * by searching the same option in the outgoing skb.
+ *
+ * This helper can only be called during
+ * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**.
+ *
+ * Return
+ * 0 on success, or negative error in case of failure:
+ *
+ * **-EINVAL** If param is invalid.
+ *
+ * **-ENOSPC** if there is not enough space in the header.
+ * Nothing has been written
+ *
+ * **-EEXIST** if the option already exists.
+ *
+ * **-EFAULT** on failrue to parse the existing header options.
+ *
+ * **-EPERM** if the helper cannot be used under the current
+ * *skops*\ **->op**.
+ *
+ * long bpf_reserve_hdr_opt(struct bpf_sock_ops *skops, u32 len, u64 flags)
+ * Description
+ * Reserve *len* bytes for the bpf header option. The
+ * space will be used by **bpf_store_hdr_opt**\ () later in
+ * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**.
+ *
+ * If **bpf_reserve_hdr_opt**\ () is called multiple times,
+ * the total number of bytes will be reserved.
+ *
+ * This helper can only be called during
+ * **BPF_SOCK_OPS_HDR_OPT_LEN_CB**.
+ *
+ * Return
+ * 0 on success, or negative error in case of failure:
+ *
+ * **-EINVAL** if a parameter is invalid.
+ *
+ * **-ENOSPC** if there is not enough space in the header.
+ *
+ * **-EPERM** if the helper cannot be used under the current
+ * *skops*\ **->op**.
+ *
+ * void *bpf_inode_storage_get(struct bpf_map *map, void *inode, void *value, u64 flags)
+ * Description
+ * Get a bpf_local_storage from an *inode*.
+ *
+ * Logically, it could be thought of as getting the value from
+ * a *map* with *inode* as the **key**. From this
+ * perspective, the usage is not much different from
+ * **bpf_map_lookup_elem**\ (*map*, **&**\ *inode*) except this
+ * helper enforces the key must be an inode and the map must also
+ * be a **BPF_MAP_TYPE_INODE_STORAGE**.
+ *
+ * Underneath, the value is stored locally at *inode* instead of
+ * the *map*. The *map* is used as the bpf-local-storage
+ * "type". The bpf-local-storage "type" (i.e. the *map*) is
+ * searched against all bpf_local_storage residing at *inode*.
+ *
+ * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be
+ * used such that a new bpf_local_storage will be
+ * created if one does not exist. *value* can be used
+ * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify
+ * the initial value of a bpf_local_storage. If *value* is
+ * **NULL**, the new bpf_local_storage will be zero initialized.
+ * Return
+ * A bpf_local_storage pointer is returned on success.
+ *
+ * **NULL** if not found or there was an error in adding
+ * a new bpf_local_storage.
+ *
+ * int bpf_inode_storage_delete(struct bpf_map *map, void *inode)
+ * Description
+ * Delete a bpf_local_storage from an *inode*.
+ * Return
+ * 0 on success.
+ *
+ * **-ENOENT** if the bpf_local_storage cannot be found.
+ *
+ * long bpf_d_path(struct path *path, char *buf, u32 sz)
+ * Description
+ * Return full path for given **struct path** object, which
+ * needs to be the kernel BTF *path* object. The path is
+ * returned in the provided buffer *buf* of size *sz* and
+ * is zero terminated.
+ *
+ * Return
+ * On success, the strictly positive length of the string,
+ * including the trailing NUL character. On error, a negative
+ * value.
+ *
+ * long bpf_copy_from_user(void *dst, u32 size, const void *user_ptr)
+ * Description
+ * Read *size* bytes from user space address *user_ptr* and store
+ * the data in *dst*. This is a wrapper of **copy_from_user**\ ().
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * long bpf_snprintf_btf(char *str, u32 str_size, struct btf_ptr *ptr, u32 btf_ptr_size, u64 flags)
+ * Description
+ * Use BTF to store a string representation of *ptr*->ptr in *str*,
+ * using *ptr*->type_id. This value should specify the type
+ * that *ptr*->ptr points to. LLVM __builtin_btf_type_id(type, 1)
+ * can be used to look up vmlinux BTF type ids. Traversing the
+ * data structure using BTF, the type information and values are
+ * stored in the first *str_size* - 1 bytes of *str*. Safe copy of
+ * the pointer data is carried out to avoid kernel crashes during
+ * operation. Smaller types can use string space on the stack;
+ * larger programs can use map data to store the string
+ * representation.
+ *
+ * The string can be subsequently shared with userspace via
+ * bpf_perf_event_output() or ring buffer interfaces.
+ * bpf_trace_printk() is to be avoided as it places too small
+ * a limit on string size to be useful.
+ *
+ * *flags* is a combination of
+ *
+ * **BTF_F_COMPACT**
+ * no formatting around type information
+ * **BTF_F_NONAME**
+ * no struct/union member names/types
+ * **BTF_F_PTR_RAW**
+ * show raw (unobfuscated) pointer values;
+ * equivalent to printk specifier %px.
+ * **BTF_F_ZERO**
+ * show zero-valued struct/union members; they
+ * are not displayed by default
+ *
+ * Return
+ * The number of bytes that were written (or would have been
+ * written if output had to be truncated due to string size),
+ * or a negative error in cases of failure.
+ *
+ * long bpf_seq_printf_btf(struct seq_file *m, struct btf_ptr *ptr, u32 ptr_size, u64 flags)
+ * Description
+ * Use BTF to write to seq_write a string representation of
+ * *ptr*->ptr, using *ptr*->type_id as per bpf_snprintf_btf().
+ * *flags* are identical to those used for bpf_snprintf_btf.
+ * Return
+ * 0 on success or a negative error in case of failure.
+ *
+ * u64 bpf_skb_cgroup_classid(struct sk_buff *skb)
+ * Description
+ * See **bpf_get_cgroup_classid**\ () for the main description.
+ * This helper differs from **bpf_get_cgroup_classid**\ () in that
+ * the cgroup v1 net_cls class is retrieved only from the *skb*'s
+ * associated socket instead of the current process.
+ * Return
+ * The id is returned or 0 in case the id could not be retrieved.
+ *
+ * long bpf_redirect_neigh(u32 ifindex, u64 flags)
+ * Description
+ * Redirect the packet to another net device of index *ifindex*
+ * and fill in L2 addresses from neighboring subsystem. This helper
+ * is somewhat similar to **bpf_redirect**\ (), except that it
+ * populates L2 addresses as well, meaning, internally, the helper
+ * performs a FIB lookup based on the skb's networking header to
+ * get the address of the next hop and then relies on the neighbor
+ * lookup for the L2 address of the nexthop.
+ *
+ * The *flags* argument is reserved and must be 0. The helper is
+ * currently only supported for tc BPF program types, and enabled
+ * for IPv4 and IPv6 protocols.
+ * Return
+ * The helper returns **TC_ACT_REDIRECT** on success or
+ * **TC_ACT_SHOT** on error.
+ *
+ * void *bpf_per_cpu_ptr(const void *percpu_ptr, u32 cpu)
+ * Description
+ * Take a pointer to a percpu ksym, *percpu_ptr*, and return a
+ * pointer to the percpu kernel variable on *cpu*. A ksym is an
+ * extern variable decorated with '__ksym'. For ksym, there is a
+ * global var (either static or global) defined of the same name
+ * in the kernel. The ksym is percpu if the global var is percpu.
+ * The returned pointer points to the global percpu var on *cpu*.
+ *
+ * bpf_per_cpu_ptr() has the same semantic as per_cpu_ptr() in the
+ * kernel, except that bpf_per_cpu_ptr() may return NULL. This
+ * happens if *cpu* is larger than nr_cpu_ids. The caller of
+ * bpf_per_cpu_ptr() must check the returned value.
+ * Return
+ * A pointer pointing to the kernel percpu variable on *cpu*, or
+ * NULL, if *cpu* is invalid.
+ *
+ * void *bpf_this_cpu_ptr(const void *percpu_ptr)
+ * Description
+ * Take a pointer to a percpu ksym, *percpu_ptr*, and return a
+ * pointer to the percpu kernel variable on this cpu. See the
+ * description of 'ksym' in **bpf_per_cpu_ptr**\ ().
+ *
+ * bpf_this_cpu_ptr() has the same semantic as this_cpu_ptr() in
+ * the kernel. Different from **bpf_per_cpu_ptr**\ (), it would
+ * never return NULL.
+ * Return
+ * A pointer pointing to the kernel percpu variable on this cpu.
+ *
+ * long bpf_redirect_peer(u32 ifindex, u64 flags)
+ * Description
+ * Redirect the packet to another net device of index *ifindex*.
+ * This helper is somewhat similar to **bpf_redirect**\ (), except
+ * that the redirection happens to the *ifindex*' peer device and
+ * the netns switch takes place from ingress to ingress without
+ * going through the CPU's backlog queue.
+ *
+ * The *flags* argument is reserved and must be 0. The helper is
+ * currently only supported for tc BPF program types at the ingress
+ * hook and for veth device types. The peer device must reside in a
+ * different network namespace.
+ * Return
+ * The helper returns **TC_ACT_REDIRECT** on success or
+ * **TC_ACT_SHOT** on error.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -3539,6 +3882,20 @@ union bpf_attr {
FN(skc_to_tcp_request_sock), \
FN(skc_to_udp6_sock), \
FN(get_task_stack), \
+ FN(load_hdr_opt), \
+ FN(store_hdr_opt), \
+ FN(reserve_hdr_opt), \
+ FN(inode_storage_get), \
+ FN(inode_storage_delete), \
+ FN(d_path), \
+ FN(copy_from_user), \
+ FN(snprintf_btf), \
+ FN(seq_printf_btf), \
+ FN(skb_cgroup_classid), \
+ FN(redirect_neigh), \
+ FN(bpf_per_cpu_ptr), \
+ FN(bpf_this_cpu_ptr), \
+ FN(redirect_peer), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
@@ -3648,9 +4005,13 @@ enum {
BPF_F_SYSCTL_BASE_NAME = (1ULL << 0),
};
-/* BPF_FUNC_sk_storage_get flags */
+/* BPF_FUNC_<kernel_obj>_storage_get flags */
enum {
- BPF_SK_STORAGE_GET_F_CREATE = (1ULL << 0),
+ BPF_LOCAL_STORAGE_GET_F_CREATE = (1ULL << 0),
+ /* BPF_SK_STORAGE_GET_F_CREATE is only kept for backward compatibility
+ * and BPF_LOCAL_STORAGE_GET_F_CREATE must be used instead.
+ */
+ BPF_SK_STORAGE_GET_F_CREATE = BPF_LOCAL_STORAGE_GET_F_CREATE,
};
/* BPF_FUNC_read_branch_records flags. */
@@ -4071,6 +4432,15 @@ struct bpf_link_info {
__u64 cgroup_id;
__u32 attach_type;
} cgroup;
+ struct {
+ __aligned_u64 target_name; /* in/out: target_name buffer ptr */
+ __u32 target_name_len; /* in/out: target_name buffer len */
+ union {
+ struct {
+ __u32 map_id;
+ } map;
+ };
+ } iter;
struct {
__u32 netns_ino;
__u32 attach_type;
@@ -4158,6 +4528,36 @@ struct bpf_sock_ops {
__u64 bytes_received;
__u64 bytes_acked;
__bpf_md_ptr(struct bpf_sock *, sk);
+ /* [skb_data, skb_data_end) covers the whole TCP header.
+ *
+ * BPF_SOCK_OPS_PARSE_HDR_OPT_CB: The packet received
+ * BPF_SOCK_OPS_HDR_OPT_LEN_CB: Not useful because the
+ * header has not been written.
+ * BPF_SOCK_OPS_WRITE_HDR_OPT_CB: The header and options have
+ * been written so far.
+ * BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB: The SYNACK that concludes
+ * the 3WHS.
+ * BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB: The ACK that concludes
+ * the 3WHS.
+ *
+ * bpf_load_hdr_opt() can also be used to read a particular option.
+ */
+ __bpf_md_ptr(void *, skb_data);
+ __bpf_md_ptr(void *, skb_data_end);
+ __u32 skb_len; /* The total length of a packet.
+ * It includes the header, options,
+ * and payload.
+ */
+ __u32 skb_tcp_flags; /* tcp_flags of the header. It provides
+ * an easy way to check for tcp_flags
+ * without parsing skb_data.
+ *
+ * In particular, the skb_tcp_flags
+ * will still be available in
+ * BPF_SOCK_OPS_HDR_OPT_LEN even though
+ * the outgoing header has not
+ * been written yet.
+ */
};
/* Definitions for bpf_sock_ops_cb_flags */
@@ -4166,8 +4566,51 @@ enum {
BPF_SOCK_OPS_RETRANS_CB_FLAG = (1<<1),
BPF_SOCK_OPS_STATE_CB_FLAG = (1<<2),
BPF_SOCK_OPS_RTT_CB_FLAG = (1<<3),
+ /* Call bpf for all received TCP headers. The bpf prog will be
+ * called under sock_ops->op == BPF_SOCK_OPS_PARSE_HDR_OPT_CB
+ *
+ * Please refer to the comment in BPF_SOCK_OPS_PARSE_HDR_OPT_CB
+ * for the header option related helpers that will be useful
+ * to the bpf programs.
+ *
+ * It could be used at the client/active side (i.e. connect() side)
+ * when the server told it that the server was in syncookie
+ * mode and required the active side to resend the bpf-written
+ * options. The active side can keep writing the bpf-options until
+ * it received a valid packet from the server side to confirm
+ * the earlier packet (and options) has been received. The later
+ * example patch is using it like this at the active side when the
+ * server is in syncookie mode.
+ *
+ * The bpf prog will usually turn this off in the common cases.
+ */
+ BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG = (1<<4),
+ /* Call bpf when kernel has received a header option that
+ * the kernel cannot handle. The bpf prog will be called under
+ * sock_ops->op == BPF_SOCK_OPS_PARSE_HDR_OPT_CB.
+ *
+ * Please refer to the comment in BPF_SOCK_OPS_PARSE_HDR_OPT_CB
+ * for the header option related helpers that will be useful
+ * to the bpf programs.
+ */
+ BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG = (1<<5),
+ /* Call bpf when the kernel is writing header options for the
+ * outgoing packet. The bpf prog will first be called
+ * to reserve space in a skb under
+ * sock_ops->op == BPF_SOCK_OPS_HDR_OPT_LEN_CB. Then
+ * the bpf prog will be called to write the header option(s)
+ * under sock_ops->op == BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
+ *
+ * Please refer to the comment in BPF_SOCK_OPS_HDR_OPT_LEN_CB
+ * and BPF_SOCK_OPS_WRITE_HDR_OPT_CB for the header option
+ * related helpers that will be useful to the bpf programs.
+ *
+ * The kernel gets its chance to reserve space and write
+ * options first before the BPF program does.
+ */
+ BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG = (1<<6),
/* Mask of all currently supported cb flags */
- BPF_SOCK_OPS_ALL_CB_FLAGS = 0xF,
+ BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7F,
};
/* List of known BPF sock_ops operators.
@@ -4223,6 +4666,63 @@ enum {
*/
BPF_SOCK_OPS_RTT_CB, /* Called on every RTT.
*/
+ BPF_SOCK_OPS_PARSE_HDR_OPT_CB, /* Parse the header option.
+ * It will be called to handle
+ * the packets received at
+ * an already established
+ * connection.
+ *
+ * sock_ops->skb_data:
+ * Referring to the received skb.
+ * It covers the TCP header only.
+ *
+ * bpf_load_hdr_opt() can also
+ * be used to search for a
+ * particular option.
+ */
+ BPF_SOCK_OPS_HDR_OPT_LEN_CB, /* Reserve space for writing the
+ * header option later in
+ * BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
+ * Arg1: bool want_cookie. (in
+ * writing SYNACK only)
+ *
+ * sock_ops->skb_data:
+ * Not available because no header has
+ * been written yet.
+ *
+ * sock_ops->skb_tcp_flags:
+ * The tcp_flags of the
+ * outgoing skb. (e.g. SYN, ACK, FIN).
+ *
+ * bpf_reserve_hdr_opt() should
+ * be used to reserve space.
+ */
+ BPF_SOCK_OPS_WRITE_HDR_OPT_CB, /* Write the header options
+ * Arg1: bool want_cookie. (in
+ * writing SYNACK only)
+ *
+ * sock_ops->skb_data:
+ * Referring to the outgoing skb.
+ * It covers the TCP header
+ * that has already been written
+ * by the kernel and the
+ * earlier bpf-progs.
+ *
+ * sock_ops->skb_tcp_flags:
+ * The tcp_flags of the outgoing
+ * skb. (e.g. SYN, ACK, FIN).
+ *
+ * bpf_store_hdr_opt() should
+ * be used to write the
+ * option.
+ *
+ * bpf_load_hdr_opt() can also
+ * be used to search for a
+ * particular option that
+ * has already been written
+ * by the kernel or the
+ * earlier bpf-progs.
+ */
};
/* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
@@ -4250,6 +4750,63 @@ enum {
enum {
TCP_BPF_IW = 1001, /* Set TCP initial congestion window */
TCP_BPF_SNDCWND_CLAMP = 1002, /* Set sndcwnd_clamp */
+ TCP_BPF_DELACK_MAX = 1003, /* Max delay ack in usecs */
+ TCP_BPF_RTO_MIN = 1004, /* Min delay ack in usecs */
+ /* Copy the SYN pkt to optval
+ *
+ * BPF_PROG_TYPE_SOCK_OPS only. It is similar to the
+ * bpf_getsockopt(TCP_SAVED_SYN) but it does not limit
+ * to only getting from the saved_syn. It can either get the
+ * syn packet from:
+ *
+ * 1. the just-received SYN packet (only available when writing the
+ * SYNACK). It will be useful when it is not necessary to
+ * save the SYN packet for latter use. It is also the only way
+ * to get the SYN during syncookie mode because the syn
+ * packet cannot be saved during syncookie.
+ *
+ * OR
+ *
+ * 2. the earlier saved syn which was done by
+ * bpf_setsockopt(TCP_SAVE_SYN).
+ *
+ * The bpf_getsockopt(TCP_BPF_SYN*) option will hide where the
+ * SYN packet is obtained.
+ *
+ * If the bpf-prog does not need the IP[46] header, the
+ * bpf-prog can avoid parsing the IP header by using
+ * TCP_BPF_SYN. Otherwise, the bpf-prog can get both
+ * IP[46] and TCP header by using TCP_BPF_SYN_IP.
+ *
+ * >0: Total number of bytes copied
+ * -ENOSPC: Not enough space in optval. Only optlen number of
+ * bytes is copied.
+ * -ENOENT: The SYN skb is not available now and the earlier SYN pkt
+ * is not saved by setsockopt(TCP_SAVE_SYN).
+ */
+ TCP_BPF_SYN = 1005, /* Copy the TCP header */
+ TCP_BPF_SYN_IP = 1006, /* Copy the IP[46] and TCP header */
+ TCP_BPF_SYN_MAC = 1007, /* Copy the MAC, IP[46], and TCP header */
+};
+
+enum {
+ BPF_LOAD_HDR_OPT_TCP_SYN = (1ULL << 0),
+};
+
+/* args[0] value during BPF_SOCK_OPS_HDR_OPT_LEN_CB and
+ * BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
+ */
+enum {
+ BPF_WRITE_HDR_TCP_CURRENT_MSS = 1, /* Kernel is finding the
+ * total option spaces
+ * required for an established
+ * sk in order to calculate the
+ * MSS. No skb is actually
+ * sent.
+ */
+ BPF_WRITE_HDR_TCP_SYNACK_COOKIE = 2, /* Kernel is in syncookie mode
+ * when sending a SYN.
+ */
};
struct bpf_perf_event_value {
@@ -4447,4 +5004,34 @@ struct bpf_sk_lookup {
__u32 local_port; /* Host byte order */
};
+/*
+ * struct btf_ptr is used for typed pointer representation; the
+ * type id is used to render the pointer data as the appropriate type
+ * via the bpf_snprintf_btf() helper described above. A flags field -
+ * potentially to specify additional details about the BTF pointer
+ * (rather than its mode of display) - is included for future use.
+ * Display flags - BTF_F_* - are passed to bpf_snprintf_btf separately.
+ */
+struct btf_ptr {
+ void *ptr;
+ __u32 type_id;
+ __u32 flags; /* BTF ptr flags; unused at present. */
+};
+
+/*
+ * Flags to control bpf_snprintf_btf() behaviour.
+ * - BTF_F_COMPACT: no formatting around type information
+ * - BTF_F_NONAME: no struct/union member names/types
+ * - BTF_F_PTR_RAW: show raw (unobfuscated) pointer values;
+ * equivalent to %px.
+ * - BTF_F_ZERO: show zero-valued struct/union members; they
+ * are not displayed by default
+ */
+enum {
+ BTF_F_COMPACT = (1ULL << 0),
+ BTF_F_NONAME = (1ULL << 1),
+ BTF_F_PTR_RAW = (1ULL << 2),
+ BTF_F_ZERO = (1ULL << 3),
+};
+
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/include/uapi/linux/can/isotp.h b/include/uapi/linux/can/isotp.h
new file mode 100644
index 000000000000..7793b26aa154
--- /dev/null
+++ b/include/uapi/linux/can/isotp.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: ((GPL-2.0-only WITH Linux-syscall-note) OR BSD-3-Clause) */
+/*
+ * linux/can/isotp.h
+ *
+ * Definitions for isotp CAN sockets (ISO 15765-2:2016)
+ *
+ * Copyright (c) 2020 Volkswagen Group Electronic Research
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Volkswagen nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef _UAPI_CAN_ISOTP_H
+#define _UAPI_CAN_ISOTP_H
+
+#include <linux/types.h>
+#include <linux/can.h>
+
+#define SOL_CAN_ISOTP (SOL_CAN_BASE + CAN_ISOTP)
+
+/* for socket options affecting the socket (not the global system) */
+
+#define CAN_ISOTP_OPTS 1 /* pass struct can_isotp_options */
+
+#define CAN_ISOTP_RECV_FC 2 /* pass struct can_isotp_fc_options */
+
+/* sockopts to force stmin timer values for protocol regression tests */
+
+#define CAN_ISOTP_TX_STMIN 3 /* pass __u32 value in nano secs */
+ /* use this time instead of value */
+ /* provided in FC from the receiver */
+
+#define CAN_ISOTP_RX_STMIN 4 /* pass __u32 value in nano secs */
+ /* ignore received CF frames which */
+ /* timestamps differ less than val */
+
+#define CAN_ISOTP_LL_OPTS 5 /* pass struct can_isotp_ll_options */
+
+struct can_isotp_options {
+
+ __u32 flags; /* set flags for isotp behaviour. */
+ /* __u32 value : flags see below */
+
+ __u32 frame_txtime; /* frame transmission time (N_As/N_Ar) */
+ /* __u32 value : time in nano secs */
+
+ __u8 ext_address; /* set address for extended addressing */
+ /* __u8 value : extended address */
+
+ __u8 txpad_content; /* set content of padding byte (tx) */
+ /* __u8 value : content on tx path */
+
+ __u8 rxpad_content; /* set content of padding byte (rx) */
+ /* __u8 value : content on rx path */
+
+ __u8 rx_ext_address; /* set address for extended addressing */
+ /* __u8 value : extended address (rx) */
+};
+
+struct can_isotp_fc_options {
+
+ __u8 bs; /* blocksize provided in FC frame */
+ /* __u8 value : blocksize. 0 = off */
+
+ __u8 stmin; /* separation time provided in FC frame */
+ /* __u8 value : */
+ /* 0x00 - 0x7F : 0 - 127 ms */
+ /* 0x80 - 0xF0 : reserved */
+ /* 0xF1 - 0xF9 : 100 us - 900 us */
+ /* 0xFA - 0xFF : reserved */
+
+ __u8 wftmax; /* max. number of wait frame transmiss. */
+ /* __u8 value : 0 = omit FC N_PDU WT */
+};
+
+struct can_isotp_ll_options {
+
+ __u8 mtu; /* generated & accepted CAN frame type */
+ /* __u8 value : */
+ /* CAN_MTU (16) -> standard CAN 2.0 */
+ /* CANFD_MTU (72) -> CAN FD frame */
+
+ __u8 tx_dl; /* tx link layer data length in bytes */
+ /* (configured maximum payload length) */
+ /* __u8 value : 8,12,16,20,24,32,48,64 */
+ /* => rx path supports all LL_DL values */
+
+ __u8 tx_flags; /* set into struct canfd_frame.flags */
+ /* at frame creation: e.g. CANFD_BRS */
+ /* Obsolete when the BRS flag is fixed */
+ /* by the CAN netdriver configuration */
+};
+
+/* flags for isotp behaviour */
+
+#define CAN_ISOTP_LISTEN_MODE 0x001 /* listen only (do not send FC) */
+#define CAN_ISOTP_EXTEND_ADDR 0x002 /* enable extended addressing */
+#define CAN_ISOTP_TX_PADDING 0x004 /* enable CAN frame padding tx path */
+#define CAN_ISOTP_RX_PADDING 0x008 /* enable CAN frame padding rx path */
+#define CAN_ISOTP_CHK_PAD_LEN 0x010 /* check received CAN frame padding */
+#define CAN_ISOTP_CHK_PAD_DATA 0x020 /* check received CAN frame padding */
+#define CAN_ISOTP_HALF_DUPLEX 0x040 /* half duplex error state handling */
+#define CAN_ISOTP_FORCE_TXSTMIN 0x080 /* ignore stmin from received FC */
+#define CAN_ISOTP_FORCE_RXSTMIN 0x100 /* ignore CFs depending on rx stmin */
+#define CAN_ISOTP_RX_EXT_ADDR 0x200 /* different rx extended addressing */
+#define CAN_ISOTP_WAIT_TX_DONE 0x400 /* wait for tx completion */
+
+
+/* default values */
+
+#define CAN_ISOTP_DEFAULT_FLAGS 0
+#define CAN_ISOTP_DEFAULT_EXT_ADDRESS 0x00
+#define CAN_ISOTP_DEFAULT_PAD_CONTENT 0xCC /* prevent bit-stuffing */
+#define CAN_ISOTP_DEFAULT_FRAME_TXTIME 0
+#define CAN_ISOTP_DEFAULT_RECV_BS 0
+#define CAN_ISOTP_DEFAULT_RECV_STMIN 0x00
+#define CAN_ISOTP_DEFAULT_RECV_WFTMAX 0
+
+#define CAN_ISOTP_DEFAULT_LL_MTU CAN_MTU
+#define CAN_ISOTP_DEFAULT_LL_TX_DL CAN_MAX_DLEN
+#define CAN_ISOTP_DEFAULT_LL_TX_FLAGS 0
+
+/*
+ * Remark on CAN_ISOTP_DEFAULT_RECV_* values:
+ *
+ * We can strongly assume, that the Linux Kernel implementation of
+ * CAN_ISOTP is capable to run with BS=0, STmin=0 and WFTmax=0.
+ * But as we like to be able to behave as a commonly available ECU,
+ * these default settings can be changed via sockopts.
+ * For that reason the STmin value is intentionally _not_ checked for
+ * consistency and copied directly into the flow control (FC) frame.
+ */
+
+#endif /* !_UAPI_CAN_ISOTP_H */
diff --git a/include/uapi/linux/can/raw.h b/include/uapi/linux/can/raw.h
index 6a11d308eb5c..3386aa81fdf2 100644
--- a/include/uapi/linux/can/raw.h
+++ b/include/uapi/linux/can/raw.h
@@ -49,6 +49,9 @@
#include <linux/can.h>
#define SOL_CAN_RAW (SOL_CAN_BASE + CAN_RAW)
+enum {
+ SCM_CAN_RAW_ERRQUEUE = 1,
+};
/* for socket options affecting the socket (not the global system) */
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index cfef4245ea5a..0113bc4db9f5 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -13,6 +13,8 @@
#ifndef _UAPI_LINUX_DEVLINK_H_
#define _UAPI_LINUX_DEVLINK_H_
+#include <linux/const.h>
+
#define DEVLINK_GENL_NAME "devlink"
#define DEVLINK_GENL_VERSION 0x1
#define DEVLINK_GENL_MCGRP_CONFIG_NAME "config"
@@ -122,6 +124,8 @@ enum devlink_command {
DEVLINK_CMD_TRAP_POLICER_NEW,
DEVLINK_CMD_TRAP_POLICER_DEL,
+ DEVLINK_CMD_HEALTH_REPORTER_TEST,
+
/* add new commands above here */
__DEVLINK_CMD_MAX,
DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1
@@ -193,6 +197,9 @@ enum devlink_port_flavour {
* port that faces the PCI VF.
*/
DEVLINK_PORT_FLAVOUR_VIRTUAL, /* Any virtual port facing the user. */
+ DEVLINK_PORT_FLAVOUR_UNUSED, /* Port which exists in the switch, but
+ * is not used in any way.
+ */
};
enum devlink_param_cmode {
@@ -228,6 +235,28 @@ enum {
DEVLINK_ATTR_STATS_MAX = __DEVLINK_ATTR_STATS_MAX - 1
};
+/* Specify what sections of a flash component can be overwritten when
+ * performing an update. Overwriting of firmware binary sections is always
+ * implicitly assumed to be allowed.
+ *
+ * Each section must be documented in
+ * Documentation/networking/devlink/devlink-flash.rst
+ *
+ */
+enum {
+ DEVLINK_FLASH_OVERWRITE_SETTINGS_BIT,
+ DEVLINK_FLASH_OVERWRITE_IDENTIFIERS_BIT,
+
+ __DEVLINK_FLASH_OVERWRITE_MAX_BIT,
+ DEVLINK_FLASH_OVERWRITE_MAX_BIT = __DEVLINK_FLASH_OVERWRITE_MAX_BIT - 1
+};
+
+#define DEVLINK_FLASH_OVERWRITE_SETTINGS _BITUL(DEVLINK_FLASH_OVERWRITE_SETTINGS_BIT)
+#define DEVLINK_FLASH_OVERWRITE_IDENTIFIERS _BITUL(DEVLINK_FLASH_OVERWRITE_IDENTIFIERS_BIT)
+
+#define DEVLINK_SUPPORTED_FLASH_OVERWRITE_SECTIONS \
+ (_BITUL(__DEVLINK_FLASH_OVERWRITE_MAX_BIT) - 1)
+
/**
* enum devlink_trap_action - Packet trap action.
* @DEVLINK_TRAP_ACTION_DROP: Packet is dropped by the device and a copy is not
@@ -272,6 +301,29 @@ enum {
DEVLINK_ATTR_TRAP_METADATA_TYPE_FA_COOKIE,
};
+enum devlink_reload_action {
+ DEVLINK_RELOAD_ACTION_UNSPEC,
+ DEVLINK_RELOAD_ACTION_DRIVER_REINIT, /* Driver entities re-instantiation */
+ DEVLINK_RELOAD_ACTION_FW_ACTIVATE, /* FW activate */
+
+ /* Add new reload actions above */
+ __DEVLINK_RELOAD_ACTION_MAX,
+ DEVLINK_RELOAD_ACTION_MAX = __DEVLINK_RELOAD_ACTION_MAX - 1
+};
+
+enum devlink_reload_limit {
+ DEVLINK_RELOAD_LIMIT_UNSPEC, /* unspecified, no constraints */
+ DEVLINK_RELOAD_LIMIT_NO_RESET, /* No reset allowed, no down time allowed,
+ * no link flap and no configuration is lost.
+ */
+
+ /* Add new reload limit above */
+ __DEVLINK_RELOAD_LIMIT_MAX,
+ DEVLINK_RELOAD_LIMIT_MAX = __DEVLINK_RELOAD_LIMIT_MAX - 1
+};
+
+#define DEVLINK_RELOAD_LIMITS_VALID_MASK (BIT(__DEVLINK_RELOAD_LIMIT_MAX) - 1)
+
enum devlink_attr {
/* don't change the order or add anything between, this is ABI! */
DEVLINK_ATTR_UNSPEC,
@@ -458,6 +510,23 @@ enum devlink_attr {
DEVLINK_ATTR_PORT_LANES, /* u32 */
DEVLINK_ATTR_PORT_SPLITTABLE, /* u8 */
+ DEVLINK_ATTR_PORT_EXTERNAL, /* u8 */
+ DEVLINK_ATTR_PORT_CONTROLLER_NUMBER, /* u32 */
+
+ DEVLINK_ATTR_FLASH_UPDATE_STATUS_TIMEOUT, /* u64 */
+ DEVLINK_ATTR_FLASH_UPDATE_OVERWRITE_MASK, /* bitfield32 */
+
+ DEVLINK_ATTR_RELOAD_ACTION, /* u8 */
+ DEVLINK_ATTR_RELOAD_ACTIONS_PERFORMED, /* bitfield32 */
+ DEVLINK_ATTR_RELOAD_LIMITS, /* bitfield32 */
+
+ DEVLINK_ATTR_DEV_STATS, /* nested */
+ DEVLINK_ATTR_RELOAD_STATS, /* nested */
+ DEVLINK_ATTR_RELOAD_STATS_ENTRY, /* nested */
+ DEVLINK_ATTR_RELOAD_STATS_LIMIT, /* u8 */
+ DEVLINK_ATTR_RELOAD_STATS_VALUE, /* u32 */
+ DEVLINK_ATTR_REMOTE_RELOAD_STATS, /* nested */
+
/* add new attributes above here, update the policy in devlink.c */
__DEVLINK_ATTR_MAX,
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index b4f2d134e713..9ca87bc73c44 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1617,6 +1617,8 @@ enum ethtool_link_mode_bit_indices {
ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT = 87,
ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT = 88,
ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT = 89,
+ ETHTOOL_LINK_MODE_100baseFX_Half_BIT = 90,
+ ETHTOOL_LINK_MODE_100baseFX_Full_BIT = 91,
/* must be last entry */
__ETHTOOL_LINK_MODE_MASK_NBITS
};
diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h
index 72ba36be9655..e2bf36e6964b 100644
--- a/include/uapi/linux/ethtool_netlink.h
+++ b/include/uapi/linux/ethtool_netlink.h
@@ -92,9 +92,12 @@ enum {
#define ETHTOOL_FLAG_COMPACT_BITSETS (1 << 0)
/* provide optional reply for SET or ACT requests */
#define ETHTOOL_FLAG_OMIT_REPLY (1 << 1)
+/* request statistics, if supported by the driver */
+#define ETHTOOL_FLAG_STATS (1 << 2)
#define ETHTOOL_FLAG_ALL (ETHTOOL_FLAG_COMPACT_BITSETS | \
- ETHTOOL_FLAG_OMIT_REPLY)
+ ETHTOOL_FLAG_OMIT_REPLY | \
+ ETHTOOL_FLAG_STATS)
enum {
ETHTOOL_A_HEADER_UNSPEC,
@@ -377,12 +380,25 @@ enum {
ETHTOOL_A_PAUSE_AUTONEG, /* u8 */
ETHTOOL_A_PAUSE_RX, /* u8 */
ETHTOOL_A_PAUSE_TX, /* u8 */
+ ETHTOOL_A_PAUSE_STATS, /* nest - _PAUSE_STAT_* */
/* add new constants above here */
__ETHTOOL_A_PAUSE_CNT,
ETHTOOL_A_PAUSE_MAX = (__ETHTOOL_A_PAUSE_CNT - 1)
};
+enum {
+ ETHTOOL_A_PAUSE_STAT_UNSPEC,
+ ETHTOOL_A_PAUSE_STAT_PAD,
+
+ ETHTOOL_A_PAUSE_STAT_TX_FRAMES,
+ ETHTOOL_A_PAUSE_STAT_RX_FRAMES,
+
+ /* add new constants above here */
+ __ETHTOOL_A_PAUSE_STAT_CNT,
+ ETHTOOL_A_PAUSE_STAT_MAX = (__ETHTOOL_A_PAUSE_STAT_CNT - 1)
+};
+
/* EEE */
enum {
diff --git a/include/uapi/linux/genetlink.h b/include/uapi/linux/genetlink.h
index 9c0636ec2286..d83f214b4134 100644
--- a/include/uapi/linux/genetlink.h
+++ b/include/uapi/linux/genetlink.h
@@ -64,6 +64,8 @@ enum {
CTRL_ATTR_OPS,
CTRL_ATTR_MCAST_GROUPS,
CTRL_ATTR_POLICY,
+ CTRL_ATTR_OP_POLICY,
+ CTRL_ATTR_OP,
__CTRL_ATTR_MAX,
};
@@ -85,6 +87,15 @@ enum {
__CTRL_ATTR_MCAST_GRP_MAX,
};
+enum {
+ CTRL_ATTR_POLICY_UNSPEC,
+ CTRL_ATTR_POLICY_DO,
+ CTRL_ATTR_POLICY_DUMP,
+
+ __CTRL_ATTR_POLICY_DUMP_MAX,
+ CTRL_ATTR_POLICY_DUMP_MAX = __CTRL_ATTR_POLICY_DUMP_MAX - 1
+};
+
#define CTRL_ATTR_MCAST_GRP_MAX (__CTRL_ATTR_MCAST_GRP_MAX - 1)
diff --git a/include/uapi/linux/gtp.h b/include/uapi/linux/gtp.h
index c7d66755d212..79f9191bbb24 100644
--- a/include/uapi/linux/gtp.h
+++ b/include/uapi/linux/gtp.h
@@ -2,6 +2,8 @@
#ifndef _UAPI_LINUX_GTP_H_
#define _UAPI_LINUX_GTP_H_
+#define GTP_GENL_MCGRP_NAME "gtp"
+
enum gtp_genl_cmds {
GTP_CMD_NEWPDP,
GTP_CMD_DELPDP,
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index c1227aecd38f..4c687686aa8f 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -455,10 +455,33 @@ enum {
enum {
MDBA_MDB_EATTR_UNSPEC,
MDBA_MDB_EATTR_TIMER,
+ MDBA_MDB_EATTR_SRC_LIST,
+ MDBA_MDB_EATTR_GROUP_MODE,
+ MDBA_MDB_EATTR_SOURCE,
+ MDBA_MDB_EATTR_RTPROT,
__MDBA_MDB_EATTR_MAX
};
#define MDBA_MDB_EATTR_MAX (__MDBA_MDB_EATTR_MAX - 1)
+/* per mdb entry source */
+enum {
+ MDBA_MDB_SRCLIST_UNSPEC,
+ MDBA_MDB_SRCLIST_ENTRY,
+ __MDBA_MDB_SRCLIST_MAX
+};
+#define MDBA_MDB_SRCLIST_MAX (__MDBA_MDB_SRCLIST_MAX - 1)
+
+/* per mdb entry per source attributes
+ * these are embedded in MDBA_MDB_SRCLIST_ENTRY
+ */
+enum {
+ MDBA_MDB_SRCATTR_UNSPEC,
+ MDBA_MDB_SRCATTR_ADDRESS,
+ MDBA_MDB_SRCATTR_TIMER,
+ __MDBA_MDB_SRCATTR_MAX
+};
+#define MDBA_MDB_SRCATTR_MAX (__MDBA_MDB_SRCATTR_MAX - 1)
+
/* multicast router types */
enum {
MDB_RTR_TYPE_DISABLED,
@@ -495,6 +518,8 @@ struct br_mdb_entry {
__u8 state;
#define MDB_FLAGS_OFFLOAD (1 << 0)
#define MDB_FLAGS_FAST_LEAVE (1 << 1)
+#define MDB_FLAGS_STAR_EXCL (1 << 2)
+#define MDB_FLAGS_BLOCKED (1 << 3)
__u8 flags;
__u16 vid;
struct {
@@ -509,10 +534,23 @@ struct br_mdb_entry {
enum {
MDBA_SET_ENTRY_UNSPEC,
MDBA_SET_ENTRY,
+ MDBA_SET_ENTRY_ATTRS,
__MDBA_SET_ENTRY_MAX,
};
#define MDBA_SET_ENTRY_MAX (__MDBA_SET_ENTRY_MAX - 1)
+/* [MDBA_SET_ENTRY_ATTRS] = {
+ * [MDBE_ATTR_xxx]
+ * ...
+ * }
+ */
+enum {
+ MDBE_ATTR_UNSPEC,
+ MDBE_ATTR_SOURCE,
+ __MDBE_ATTR_MAX,
+};
+#define MDBE_ATTR_MAX (__MDBE_ATTR_MAX - 1)
+
/* Embedded inside LINK_XSTATS_TYPE_BRIDGE */
enum {
BRIDGE_XSTATS_UNSPEC,
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 7fba4de511de..c4b23f06f69e 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -7,24 +7,23 @@
/* This struct should be in sync with struct rtnl_link_stats64 */
struct rtnl_link_stats {
- __u32 rx_packets; /* total packets received */
- __u32 tx_packets; /* total packets transmitted */
- __u32 rx_bytes; /* total bytes received */
- __u32 tx_bytes; /* total bytes transmitted */
- __u32 rx_errors; /* bad packets received */
- __u32 tx_errors; /* packet transmit problems */
- __u32 rx_dropped; /* no space in linux buffers */
- __u32 tx_dropped; /* no space available in linux */
- __u32 multicast; /* multicast packets received */
+ __u32 rx_packets;
+ __u32 tx_packets;
+ __u32 rx_bytes;
+ __u32 tx_bytes;
+ __u32 rx_errors;
+ __u32 tx_errors;
+ __u32 rx_dropped;
+ __u32 tx_dropped;
+ __u32 multicast;
__u32 collisions;
-
/* detailed rx_errors: */
__u32 rx_length_errors;
- __u32 rx_over_errors; /* receiver ring buff overflow */
- __u32 rx_crc_errors; /* recved pkt with crc error */
- __u32 rx_frame_errors; /* recv'd frame alignment error */
- __u32 rx_fifo_errors; /* recv'r fifo overrun */
- __u32 rx_missed_errors; /* receiver missed packet */
+ __u32 rx_over_errors;
+ __u32 rx_crc_errors;
+ __u32 rx_frame_errors;
+ __u32 rx_fifo_errors;
+ __u32 rx_missed_errors;
/* detailed tx_errors */
__u32 tx_aborted_errors;
@@ -37,29 +36,200 @@ struct rtnl_link_stats {
__u32 rx_compressed;
__u32 tx_compressed;
- __u32 rx_nohandler; /* dropped, no handler found */
+ __u32 rx_nohandler;
};
-/* The main device statistics structure */
+/**
+ * struct rtnl_link_stats64 - The main device statistics structure.
+ *
+ * @rx_packets: Number of good packets received by the interface.
+ * For hardware interfaces counts all good packets received from the device
+ * by the host, including packets which host had to drop at various stages
+ * of processing (even in the driver).
+ *
+ * @tx_packets: Number of packets successfully transmitted.
+ * For hardware interfaces counts packets which host was able to successfully
+ * hand over to the device, which does not necessarily mean that packets
+ * had been successfully transmitted out of the device, only that device
+ * acknowledged it copied them out of host memory.
+ *
+ * @rx_bytes: Number of good received bytes, corresponding to @rx_packets.
+ *
+ * For IEEE 802.3 devices should count the length of Ethernet Frames
+ * excluding the FCS.
+ *
+ * @tx_bytes: Number of good transmitted bytes, corresponding to @tx_packets.
+ *
+ * For IEEE 802.3 devices should count the length of Ethernet Frames
+ * excluding the FCS.
+ *
+ * @rx_errors: Total number of bad packets received on this network device.
+ * This counter must include events counted by @rx_length_errors,
+ * @rx_crc_errors, @rx_frame_errors and other errors not otherwise
+ * counted.
+ *
+ * @tx_errors: Total number of transmit problems.
+ * This counter must include events counter by @tx_aborted_errors,
+ * @tx_carrier_errors, @tx_fifo_errors, @tx_heartbeat_errors,
+ * @tx_window_errors and other errors not otherwise counted.
+ *
+ * @rx_dropped: Number of packets received but not processed,
+ * e.g. due to lack of resources or unsupported protocol.
+ * For hardware interfaces this counter should not include packets
+ * dropped by the device which are counted separately in
+ * @rx_missed_errors (since procfs folds those two counters together).
+ *
+ * @tx_dropped: Number of packets dropped on their way to transmission,
+ * e.g. due to lack of resources.
+ *
+ * @multicast: Multicast packets received.
+ * For hardware interfaces this statistic is commonly calculated
+ * at the device level (unlike @rx_packets) and therefore may include
+ * packets which did not reach the host.
+ *
+ * For IEEE 802.3 devices this counter may be equivalent to:
+ *
+ * - 30.3.1.1.21 aMulticastFramesReceivedOK
+ *
+ * @collisions: Number of collisions during packet transmissions.
+ *
+ * @rx_length_errors: Number of packets dropped due to invalid length.
+ * Part of aggregate "frame" errors in `/proc/net/dev`.
+ *
+ * For IEEE 802.3 devices this counter should be equivalent to a sum
+ * of the following attributes:
+ *
+ * - 30.3.1.1.23 aInRangeLengthErrors
+ * - 30.3.1.1.24 aOutOfRangeLengthField
+ * - 30.3.1.1.25 aFrameTooLongErrors
+ *
+ * @rx_over_errors: Receiver FIFO overflow event counter.
+ *
+ * Historically the count of overflow events. Such events may be
+ * reported in the receive descriptors or via interrupts, and may
+ * not correspond one-to-one with dropped packets.
+ *
+ * The recommended interpretation for high speed interfaces is -
+ * number of packets dropped because they did not fit into buffers
+ * provided by the host, e.g. packets larger than MTU or next buffer
+ * in the ring was not available for a scatter transfer.
+ *
+ * Part of aggregate "frame" errors in `/proc/net/dev`.
+ *
+ * This statistics was historically used interchangeably with
+ * @rx_fifo_errors.
+ *
+ * This statistic corresponds to hardware events and is not commonly used
+ * on software devices.
+ *
+ * @rx_crc_errors: Number of packets received with a CRC error.
+ * Part of aggregate "frame" errors in `/proc/net/dev`.
+ *
+ * For IEEE 802.3 devices this counter must be equivalent to:
+ *
+ * - 30.3.1.1.6 aFrameCheckSequenceErrors
+ *
+ * @rx_frame_errors: Receiver frame alignment errors.
+ * Part of aggregate "frame" errors in `/proc/net/dev`.
+ *
+ * For IEEE 802.3 devices this counter should be equivalent to:
+ *
+ * - 30.3.1.1.7 aAlignmentErrors
+ *
+ * @rx_fifo_errors: Receiver FIFO error counter.
+ *
+ * Historically the count of overflow events. Those events may be
+ * reported in the receive descriptors or via interrupts, and may
+ * not correspond one-to-one with dropped packets.
+ *
+ * This statistics was used interchangeably with @rx_over_errors.
+ * Not recommended for use in drivers for high speed interfaces.
+ *
+ * This statistic is used on software devices, e.g. to count software
+ * packet queue overflow (can) or sequencing errors (GRE).
+ *
+ * @rx_missed_errors: Count of packets missed by the host.
+ * Folded into the "drop" counter in `/proc/net/dev`.
+ *
+ * Counts number of packets dropped by the device due to lack
+ * of buffer space. This usually indicates that the host interface
+ * is slower than the network interface, or host is not keeping up
+ * with the receive packet rate.
+ *
+ * This statistic corresponds to hardware events and is not used
+ * on software devices.
+ *
+ * @tx_aborted_errors:
+ * Part of aggregate "carrier" errors in `/proc/net/dev`.
+ * For IEEE 802.3 devices capable of half-duplex operation this counter
+ * must be equivalent to:
+ *
+ * - 30.3.1.1.11 aFramesAbortedDueToXSColls
+ *
+ * High speed interfaces may use this counter as a general device
+ * discard counter.
+ *
+ * @tx_carrier_errors: Number of frame transmission errors due to loss
+ * of carrier during transmission.
+ * Part of aggregate "carrier" errors in `/proc/net/dev`.
+ *
+ * For IEEE 802.3 devices this counter must be equivalent to:
+ *
+ * - 30.3.1.1.13 aCarrierSenseErrors
+ *
+ * @tx_fifo_errors: Number of frame transmission errors due to device
+ * FIFO underrun / underflow. This condition occurs when the device
+ * begins transmission of a frame but is unable to deliver the
+ * entire frame to the transmitter in time for transmission.
+ * Part of aggregate "carrier" errors in `/proc/net/dev`.
+ *
+ * @tx_heartbeat_errors: Number of Heartbeat / SQE Test errors for
+ * old half-duplex Ethernet.
+ * Part of aggregate "carrier" errors in `/proc/net/dev`.
+ *
+ * For IEEE 802.3 devices possibly equivalent to:
+ *
+ * - 30.3.2.1.4 aSQETestErrors
+ *
+ * @tx_window_errors: Number of frame transmission errors due
+ * to late collisions (for Ethernet - after the first 64B of transmission).
+ * Part of aggregate "carrier" errors in `/proc/net/dev`.
+ *
+ * For IEEE 802.3 devices this counter must be equivalent to:
+ *
+ * - 30.3.1.1.10 aLateCollisions
+ *
+ * @rx_compressed: Number of correctly received compressed packets.
+ * This counters is only meaningful for interfaces which support
+ * packet compression (e.g. CSLIP, PPP).
+ *
+ * @tx_compressed: Number of transmitted compressed packets.
+ * This counters is only meaningful for interfaces which support
+ * packet compression (e.g. CSLIP, PPP).
+ *
+ * @rx_nohandler: Number of packets received on the interface
+ * but dropped by the networking stack because the device is
+ * not designated to receive packets (e.g. backup link in a bond).
+ */
struct rtnl_link_stats64 {
- __u64 rx_packets; /* total packets received */
- __u64 tx_packets; /* total packets transmitted */
- __u64 rx_bytes; /* total bytes received */
- __u64 tx_bytes; /* total bytes transmitted */
- __u64 rx_errors; /* bad packets received */
- __u64 tx_errors; /* packet transmit problems */
- __u64 rx_dropped; /* no space in linux buffers */
- __u64 tx_dropped; /* no space available in linux */
- __u64 multicast; /* multicast packets received */
+ __u64 rx_packets;
+ __u64 tx_packets;
+ __u64 rx_bytes;
+ __u64 tx_bytes;
+ __u64 rx_errors;
+ __u64 tx_errors;
+ __u64 rx_dropped;
+ __u64 tx_dropped;
+ __u64 multicast;
__u64 collisions;
/* detailed rx_errors: */
__u64 rx_length_errors;
- __u64 rx_over_errors; /* receiver ring buff overflow */
- __u64 rx_crc_errors; /* recved pkt with crc error */
- __u64 rx_frame_errors; /* recv'd frame alignment error */
- __u64 rx_fifo_errors; /* recv'r fifo overrun */
- __u64 rx_missed_errors; /* receiver missed packet */
+ __u64 rx_over_errors;
+ __u64 rx_crc_errors;
+ __u64 rx_frame_errors;
+ __u64 rx_fifo_errors;
+ __u64 rx_missed_errors;
/* detailed tx_errors */
__u64 tx_aborted_errors;
@@ -71,8 +241,7 @@ struct rtnl_link_stats64 {
/* for cslip etc */
__u64 rx_compressed;
__u64 tx_compressed;
-
- __u64 rx_nohandler; /* dropped, no handler found */
+ __u64 rx_nohandler;
};
/* The struct should be in sync with struct ifmap */
diff --git a/include/uapi/linux/if_pppol2tp.h b/include/uapi/linux/if_pppol2tp.h
index 060b4d1f3129..a91044328bc9 100644
--- a/include/uapi/linux/if_pppol2tp.h
+++ b/include/uapi/linux/if_pppol2tp.h
@@ -75,7 +75,7 @@ struct pppol2tpv3in6_addr {
};
/* Socket options:
- * DEBUG - bitmask of debug message categories
+ * DEBUG - bitmask of debug message categories (not used)
* SENDSEQ - 0 => don't send packets with sequence numbers
* 1 => send packets with sequence numbers
* RECVSEQ - 0 => receive packet sequence numbers are optional
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index 5ba122c1949a..20ee93f0f876 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -160,6 +160,7 @@ enum {
INET_DIAG_ULP_INFO,
INET_DIAG_SK_BPF_STORAGES,
INET_DIAG_CGROUP_ID,
+ INET_DIAG_SOCKOPT,
__INET_DIAG_MAX,
};
@@ -183,6 +184,23 @@ struct inet_diag_meminfo {
__u32 idiag_tmem;
};
+/* INET_DIAG_SOCKOPT */
+
+struct inet_diag_sockopt {
+ __u8 recverr:1,
+ is_icsk:1,
+ freebind:1,
+ hdrincl:1,
+ mc_loop:1,
+ transparent:1,
+ mc_all:1,
+ nodefrag:1;
+ __u8 bind_address_no_port:1,
+ recverr_rfc4884:1,
+ defer_connect:1,
+ unused:5;
+};
+
/* INET_DIAG_VEGASINFO */
struct tcpvegas_info {
diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h
index 61158f5a1a5b..30c80d5ba4bf 100644
--- a/include/uapi/linux/l2tp.h
+++ b/include/uapi/linux/l2tp.h
@@ -108,7 +108,7 @@ enum {
L2TP_ATTR_VLAN_ID, /* u16 (not used) */
L2TP_ATTR_COOKIE, /* 0, 4 or 8 bytes */
L2TP_ATTR_PEER_COOKIE, /* 0, 4 or 8 bytes */
- L2TP_ATTR_DEBUG, /* u32, enum l2tp_debug_flags */
+ L2TP_ATTR_DEBUG, /* u32, enum l2tp_debug_flags (not used) */
L2TP_ATTR_RECV_SEQ, /* u8 */
L2TP_ATTR_SEND_SEQ, /* u8 */
L2TP_ATTR_LNS_MODE, /* u8 */
@@ -144,6 +144,7 @@ enum {
L2TP_ATTR_RX_OOS_PACKETS, /* u64 */
L2TP_ATTR_RX_ERRORS, /* u64 */
L2TP_ATTR_STATS_PAD,
+ L2TP_ATTR_RX_COOKIE_DISCARDS, /* u64 */
__L2TP_ATTR_STATS_MAX,
};
@@ -177,7 +178,9 @@ enum l2tp_seqmode {
};
/**
- * enum l2tp_debug_flags - debug message categories for L2TP tunnels/sessions
+ * enum l2tp_debug_flags - debug message categories for L2TP tunnels/sessions.
+ *
+ * Unused.
*
* @L2TP_MSG_DEBUG: verbose debug (if compiled in)
* @L2TP_MSG_CONTROL: userspace - kernel interface
diff --git a/include/uapi/linux/mroute.h b/include/uapi/linux/mroute.h
index 11c8c1fc1124..1a42f5f9b31b 100644
--- a/include/uapi/linux/mroute.h
+++ b/include/uapi/linux/mroute.h
@@ -113,8 +113,8 @@ struct igmpmsg {
__u32 unused1,unused2;
unsigned char im_msgtype; /* What is this */
unsigned char im_mbz; /* Must be zero */
- unsigned char im_vif; /* Interface (this ought to be a vifi_t!) */
- unsigned char unused3;
+ unsigned char im_vif; /* Low 8 bits of Interface */
+ unsigned char im_vif_hi; /* High 8 bits of Interface */
struct in_addr im_src,im_dst;
};
@@ -169,6 +169,7 @@ enum {
IPMRA_CREPORT_SRC_ADDR,
IPMRA_CREPORT_DST_ADDR,
IPMRA_CREPORT_PKT,
+ IPMRA_CREPORT_TABLE,
__IPMRA_CREPORT_MAX
};
#define IPMRA_CREPORT_MAX (__IPMRA_CREPORT_MAX - 1)
diff --git a/include/uapi/linux/netfilter.h b/include/uapi/linux/netfilter.h
index ca9e63d6e0e4..ef9a44286e23 100644
--- a/include/uapi/linux/netfilter.h
+++ b/include/uapi/linux/netfilter.h
@@ -45,7 +45,8 @@ enum nf_inet_hooks {
NF_INET_FORWARD,
NF_INET_LOCAL_OUT,
NF_INET_POST_ROUTING,
- NF_INET_NUMHOOKS
+ NF_INET_NUMHOOKS,
+ NF_INET_INGRESS = NF_INET_NUMHOOKS,
};
enum nf_dev_hooks {
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 2b8e12f7a4a6..98272cb5f617 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -172,6 +172,7 @@ enum nft_table_flags {
* @NFTA_TABLE_NAME: name of the table (NLA_STRING)
* @NFTA_TABLE_FLAGS: bitmask of enum nft_table_flags (NLA_U32)
* @NFTA_TABLE_USE: number of chains in this table (NLA_U32)
+ * @NFTA_TABLE_USERDATA: user data (NLA_BINARY)
*/
enum nft_table_attributes {
NFTA_TABLE_UNSPEC,
@@ -180,6 +181,7 @@ enum nft_table_attributes {
NFTA_TABLE_USE,
NFTA_TABLE_HANDLE,
NFTA_TABLE_PAD,
+ NFTA_TABLE_USERDATA,
__NFTA_TABLE_MAX
};
#define NFTA_TABLE_MAX (__NFTA_TABLE_MAX - 1)
@@ -206,6 +208,7 @@ enum nft_chain_flags {
* @NFTA_CHAIN_COUNTERS: counter specification of the chain (NLA_NESTED: nft_counter_attributes)
* @NFTA_CHAIN_FLAGS: chain flags
* @NFTA_CHAIN_ID: uniquely identifies a chain in a transaction (NLA_U32)
+ * @NFTA_CHAIN_USERDATA: user data (NLA_BINARY)
*/
enum nft_chain_attributes {
NFTA_CHAIN_UNSPEC,
@@ -220,6 +223,7 @@ enum nft_chain_attributes {
NFTA_CHAIN_PAD,
NFTA_CHAIN_FLAGS,
NFTA_CHAIN_ID,
+ NFTA_CHAIN_USERDATA,
__NFTA_CHAIN_MAX
};
#define NFTA_CHAIN_MAX (__NFTA_CHAIN_MAX - 1)
@@ -745,10 +749,12 @@ enum nft_payload_bases {
*
* @NFT_PAYLOAD_CSUM_NONE: no checksumming
* @NFT_PAYLOAD_CSUM_INET: internet checksum (RFC 791)
+ * @NFT_PAYLOAD_CSUM_SCTP: CRC-32c, for use in SCTP header (RFC 3309)
*/
enum nft_payload_csum_types {
NFT_PAYLOAD_CSUM_NONE,
NFT_PAYLOAD_CSUM_INET,
+ NFT_PAYLOAD_CSUM_SCTP,
};
enum nft_payload_csum_flags {
@@ -1008,10 +1014,12 @@ enum nft_socket_attributes {
*
* @NFT_SOCKET_TRANSPARENT: Value of the IP(V6)_TRANSPARENT socket option
* @NFT_SOCKET_MARK: Value of the socket mark
+ * @NFT_SOCKET_WILDCARD: Whether the socket is zero-bound (e.g. 0.0.0.0 or ::0)
*/
enum nft_socket_keys {
NFT_SOCKET_TRANSPARENT,
NFT_SOCKET_MARK,
+ NFT_SOCKET_WILDCARD,
__NFT_SOCKET_MAX
};
#define NFT_SOCKET_MAX (__NFT_SOCKET_MAX - 1)
@@ -1555,6 +1563,7 @@ enum nft_ct_expectation_attributes {
* @NFTA_OBJ_DATA: stateful object data (NLA_NESTED)
* @NFTA_OBJ_USE: number of references to this expression (NLA_U32)
* @NFTA_OBJ_HANDLE: object handle (NLA_U64)
+ * @NFTA_OBJ_USERDATA: user data (NLA_BINARY)
*/
enum nft_object_attributes {
NFTA_OBJ_UNSPEC,
@@ -1565,6 +1574,7 @@ enum nft_object_attributes {
NFTA_OBJ_USE,
NFTA_OBJ_HANDLE,
NFTA_OBJ_PAD,
+ NFTA_OBJ_USERDATA,
__NFTA_OBJ_MAX
};
#define NFTA_OBJ_MAX (__NFTA_OBJ_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nfnetlink_conntrack.h b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
index 262881792671..d8484be72fdc 100644
--- a/include/uapi/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
@@ -247,7 +247,7 @@ enum ctattr_stats_cpu {
CTA_STATS_FOUND,
CTA_STATS_NEW, /* no longer used */
CTA_STATS_INVALID,
- CTA_STATS_IGNORE,
+ CTA_STATS_IGNORE, /* no longer used */
CTA_STATS_DELETE, /* no longer used */
CTA_STATS_DELETE_LIST, /* no longer used */
CTA_STATS_INSERT,
@@ -256,6 +256,7 @@ enum ctattr_stats_cpu {
CTA_STATS_EARLY_DROP,
CTA_STATS_ERROR,
CTA_STATS_SEARCH_RESTART,
+ CTA_STATS_CLASH_RESOLVE,
__CTA_STATS_MAX,
};
#define CTA_STATS_MAX (__CTA_STATS_MAX - 1)
diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
index eac8a6a648ea..c3816ff7bfc3 100644
--- a/include/uapi/linux/netlink.h
+++ b/include/uapi/linux/netlink.h
@@ -129,6 +129,7 @@ struct nlmsgerr {
* @NLMSGERR_ATTR_COOKIE: arbitrary subsystem specific cookie to
* be used - in the success case - to identify a created
* object or operation or similar (binary)
+ * @NLMSGERR_ATTR_POLICY: policy for a rejected attribute
* @__NLMSGERR_ATTR_MAX: number of attributes
* @NLMSGERR_ATTR_MAX: highest attribute number
*/
@@ -137,6 +138,7 @@ enum nlmsgerr_attrs {
NLMSGERR_ATTR_MSG,
NLMSGERR_ATTR_OFFS,
NLMSGERR_ATTR_COOKIE,
+ NLMSGERR_ATTR_POLICY,
__NLMSGERR_ATTR_MAX,
NLMSGERR_ATTR_MAX = __NLMSGERR_ATTR_MAX - 1
@@ -331,6 +333,7 @@ enum netlink_attribute_type {
* the index, if limited inside the nesting (U32)
* @NL_POLICY_TYPE_ATTR_BITFIELD32_MASK: valid mask for the
* bitfield32 type (U32)
+ * @NL_POLICY_TYPE_ATTR_MASK: mask of valid bits for unsigned integers (U64)
* @NL_POLICY_TYPE_ATTR_PAD: pad attribute for 64-bit alignment
*/
enum netlink_policy_type_attr {
@@ -346,6 +349,7 @@ enum netlink_policy_type_attr {
NL_POLICY_TYPE_ATTR_POLICY_MAXTYPE,
NL_POLICY_TYPE_ATTR_BITFIELD32_MASK,
NL_POLICY_TYPE_ATTR_PAD,
+ NL_POLICY_TYPE_ATTR_MASK,
/* keep last */
__NL_POLICY_TYPE_ATTR_MAX,
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 631f3a997b3c..47700a2b9af9 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -252,9 +252,13 @@
* DOC: SAE authentication offload
*
* By setting @NL80211_EXT_FEATURE_SAE_OFFLOAD flag drivers can indicate they
- * support offloading SAE authentication for WPA3-Personal networks. In
- * %NL80211_CMD_CONNECT the password for SAE should be specified using
- * %NL80211_ATTR_SAE_PASSWORD.
+ * support offloading SAE authentication for WPA3-Personal networks in station
+ * mode. Similarly @NL80211_EXT_FEATURE_SAE_OFFLOAD_AP flag can be set by
+ * drivers indicating the offload support in AP mode.
+ *
+ * The password for SAE should be specified using %NL80211_ATTR_SAE_PASSWORD in
+ * %NL80211_CMD_CONNECT and %NL80211_CMD_START_AP for station and AP mode
+ * respectively.
*/
/**
@@ -647,13 +651,9 @@
* authentication/association or not receiving a response from the AP.
* Non-zero %NL80211_ATTR_STATUS_CODE value is indicated in that case as
* well to remain backwards compatible.
- * When establishing a security association, drivers that support 4 way
- * handshake offload should send %NL80211_CMD_PORT_AUTHORIZED event when
- * the 4 way handshake is completed successfully.
* @NL80211_CMD_ROAM: Notification indicating the card/driver roamed by itself.
- * When a security association was established with the new AP (e.g. if
- * the FT protocol was used for roaming or the driver completed the 4 way
- * handshake), this event should be followed by an
+ * When a security association was established on an 802.1X network using
+ * fast transition, this event should be followed by an
* %NL80211_CMD_PORT_AUTHORIZED event.
* @NL80211_CMD_DISCONNECT: drop a given connection; also used to notify
* userspace that a connection was dropped by the AP or due to other
@@ -1067,13 +1067,11 @@
* @NL80211_CMD_DEL_PMK: For offloaded 4-Way handshake, delete the previously
* configured PMK for the authenticator address identified by
* %NL80211_ATTR_MAC.
- * @NL80211_CMD_PORT_AUTHORIZED: An event that indicates that the 4 way
- * handshake was completed successfully by the driver. The BSSID is
- * specified with %NL80211_ATTR_MAC. Drivers that support 4 way handshake
- * offload should send this event after indicating 802.11 association with
- * %NL80211_CMD_CONNECT or %NL80211_CMD_ROAM. If the 4 way handshake failed
- * %NL80211_CMD_DISCONNECT should be indicated instead.
- *
+ * @NL80211_CMD_PORT_AUTHORIZED: An event that indicates an 802.1X FT roam was
+ * completed successfully. Drivers that support 4 way handshake offload
+ * should send this event after indicating 802.1X FT assocation with
+ * %NL80211_CMD_ROAM. If the 4 way handshake failed %NL80211_CMD_DISCONNECT
+ * should be indicated instead.
* @NL80211_CMD_CONTROL_PORT_FRAME: Control Port (e.g. PAE) frame TX request
* and RX notification. This command is used both as a request to transmit
* a control port frame and as a notification that a control port frame
@@ -2082,10 +2080,10 @@ enum nl80211_commands {
* operation).
* @NL80211_ATTR_CSA_IES: Nested set of attributes containing the IE information
* for the time while performing a channel switch.
- * @NL80211_ATTR_CSA_C_OFF_BEACON: An array of offsets (u16) to the channel
- * switch counters in the beacons tail (%NL80211_ATTR_BEACON_TAIL).
- * @NL80211_ATTR_CSA_C_OFF_PRESP: An array of offsets (u16) to the channel
- * switch counters in the probe response (%NL80211_ATTR_PROBE_RESP).
+ * @NL80211_ATTR_CNTDWN_OFFS_BEACON: An array of offsets (u16) to the channel
+ * switch or color change counters in the beacons tail (%NL80211_ATTR_BEACON_TAIL).
+ * @NL80211_ATTR_CNTDWN_OFFS_PRESP: An array of offsets (u16) to the channel
+ * switch or color change counters in the probe response (%NL80211_ATTR_PROBE_RESP).
*
* @NL80211_ATTR_RXMGMT_FLAGS: flags for nl80211_send_mgmt(), u32.
* As specified in the &enum nl80211_rxmgmt_flags.
@@ -2515,6 +2513,20 @@ enum nl80211_commands {
* @NL80211_ATTR_HE_6GHZ_CAPABILITY: HE 6 GHz Band Capability element (from
* association request when used with NL80211_CMD_NEW_STATION).
*
+ * @NL80211_ATTR_FILS_DISCOVERY: Optional parameter to configure FILS
+ * discovery. It is a nested attribute, see
+ * &enum nl80211_fils_discovery_attributes.
+ *
+ * @NL80211_ATTR_UNSOL_BCAST_PROBE_RESP: Optional parameter to configure
+ * unsolicited broadcast probe response. It is a nested attribute, see
+ * &enum nl80211_unsol_bcast_probe_resp_attributes.
+ *
+ * @NL80211_ATTR_S1G_CAPABILITY: S1G Capability information element (from
+ * association request when used with NL80211_CMD_NEW_STATION)
+ * @NL80211_ATTR_S1G_CAPABILITY_MASK: S1G Capability Information element
+ * override mask. Used with NL80211_ATTR_S1G_CAPABILITY in
+ * NL80211_CMD_ASSOCIATE or NL80211_CMD_CONNECT.
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2821,8 +2833,8 @@ enum nl80211_attrs {
NL80211_ATTR_CH_SWITCH_COUNT,
NL80211_ATTR_CH_SWITCH_BLOCK_TX,
NL80211_ATTR_CSA_IES,
- NL80211_ATTR_CSA_C_OFF_BEACON,
- NL80211_ATTR_CSA_C_OFF_PRESP,
+ NL80211_ATTR_CNTDWN_OFFS_BEACON,
+ NL80211_ATTR_CNTDWN_OFFS_PRESP,
NL80211_ATTR_RXMGMT_FLAGS,
@@ -2997,6 +3009,13 @@ enum nl80211_attrs {
NL80211_ATTR_HE_6GHZ_CAPABILITY,
+ NL80211_ATTR_FILS_DISCOVERY,
+
+ NL80211_ATTR_UNSOL_BCAST_PROBE_RESP,
+
+ NL80211_ATTR_S1G_CAPABILITY,
+ NL80211_ATTR_S1G_CAPABILITY_MASK,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -3009,6 +3028,8 @@ enum nl80211_attrs {
#define NL80211_ATTR_MESH_PARAMS NL80211_ATTR_MESH_CONFIG
#define NL80211_ATTR_IFACE_SOCKET_OWNER NL80211_ATTR_SOCKET_OWNER
#define NL80211_ATTR_SAE_DATA NL80211_ATTR_AUTH_DATA
+#define NL80211_ATTR_CSA_C_OFF_BEACON NL80211_ATTR_CNTDWN_OFFS_BEACON
+#define NL80211_ATTR_CSA_C_OFF_PRESP NL80211_ATTR_CNTDWN_OFFS_PRESP
/*
* Allow user space programs to use #ifdef on new attributes by defining them
@@ -3187,6 +3208,18 @@ enum nl80211_he_gi {
};
/**
+ * enum nl80211_he_ltf - HE long training field
+ * @NL80211_RATE_INFO_HE_1xLTF: 3.2 usec
+ * @NL80211_RATE_INFO_HE_2xLTF: 6.4 usec
+ * @NL80211_RATE_INFO_HE_4xLTF: 12.8 usec
+ */
+enum nl80211_he_ltf {
+ NL80211_RATE_INFO_HE_1XLTF,
+ NL80211_RATE_INFO_HE_2XLTF,
+ NL80211_RATE_INFO_HE_4XLTF,
+};
+
+/**
* enum nl80211_he_ru_alloc - HE RU allocation values
* @NL80211_RATE_INFO_HE_RU_ALLOC_26: 26-tone RU allocation
* @NL80211_RATE_INFO_HE_RU_ALLOC_52: 52-tone RU allocation
@@ -3725,6 +3758,16 @@ enum nl80211_wmm_rule {
* @NL80211_FREQUENCY_ATTR_NO_HE: HE operation is not allowed on this channel
* in current regulatory domain.
* @NL80211_FREQUENCY_ATTR_OFFSET: frequency offset in KHz
+ * @NL80211_FREQUENCY_ATTR_1MHZ: 1 MHz operation is allowed
+ * on this channel in current regulatory domain.
+ * @NL80211_FREQUENCY_ATTR_2MHZ: 2 MHz operation is allowed
+ * on this channel in current regulatory domain.
+ * @NL80211_FREQUENCY_ATTR_4MHZ: 4 MHz operation is allowed
+ * on this channel in current regulatory domain.
+ * @NL80211_FREQUENCY_ATTR_8MHZ: 8 MHz operation is allowed
+ * on this channel in current regulatory domain.
+ * @NL80211_FREQUENCY_ATTR_16MHZ: 16 MHz operation is allowed
+ * on this channel in current regulatory domain.
* @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number
* currently defined
* @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use
@@ -3756,6 +3799,11 @@ enum nl80211_frequency_attr {
NL80211_FREQUENCY_ATTR_WMM,
NL80211_FREQUENCY_ATTR_NO_HE,
NL80211_FREQUENCY_ATTR_OFFSET,
+ NL80211_FREQUENCY_ATTR_1MHZ,
+ NL80211_FREQUENCY_ATTR_2MHZ,
+ NL80211_FREQUENCY_ATTR_4MHZ,
+ NL80211_FREQUENCY_ATTR_8MHZ,
+ NL80211_FREQUENCY_ATTR_16MHZ,
/* keep last */
__NL80211_FREQUENCY_ATTR_AFTER_LAST,
@@ -4049,6 +4097,7 @@ enum nl80211_user_reg_hint_type {
* receiving frames destined to the local BSS
* @NL80211_SURVEY_INFO_MAX: highest survey info attribute number
* currently defined
+ * @NL80211_SURVEY_INFO_FREQUENCY_OFFSET: center frequency offset in KHz
* @__NL80211_SURVEY_INFO_AFTER_LAST: internal use
*/
enum nl80211_survey_info {
@@ -4064,6 +4113,7 @@ enum nl80211_survey_info {
NL80211_SURVEY_INFO_TIME_SCAN,
NL80211_SURVEY_INFO_PAD,
NL80211_SURVEY_INFO_TIME_BSS_RX,
+ NL80211_SURVEY_INFO_FREQUENCY_OFFSET,
/* keep last */
__NL80211_SURVEY_INFO_AFTER_LAST,
@@ -4741,6 +4791,10 @@ enum nl80211_key_attributes {
* @NL80211_TXRATE_VHT: VHT rates allowed for TX rate selection,
* see &struct nl80211_txrate_vht
* @NL80211_TXRATE_GI: configure GI, see &enum nl80211_txrate_gi
+ * @NL80211_TXRATE_HE: HE rates allowed for TX rate selection,
+ * see &struct nl80211_txrate_he
+ * @NL80211_TXRATE_HE_GI: configure HE GI, 0.8us, 1.6us and 3.2us.
+ * @NL80211_TXRATE_HE_LTF: configure HE LTF, 1XLTF, 2XLTF and 4XLTF.
* @__NL80211_TXRATE_AFTER_LAST: internal
* @NL80211_TXRATE_MAX: highest TX rate attribute
*/
@@ -4750,6 +4804,9 @@ enum nl80211_tx_rate_attributes {
NL80211_TXRATE_HT,
NL80211_TXRATE_VHT,
NL80211_TXRATE_GI,
+ NL80211_TXRATE_HE,
+ NL80211_TXRATE_HE_GI,
+ NL80211_TXRATE_HE_LTF,
/* keep last */
__NL80211_TXRATE_AFTER_LAST,
@@ -4767,6 +4824,15 @@ struct nl80211_txrate_vht {
__u16 mcs[NL80211_VHT_NSS_MAX];
};
+#define NL80211_HE_NSS_MAX 8
+/**
+ * struct nl80211_txrate_he - HE MCS/NSS txrate bitmap
+ * @mcs: MCS bitmap table for each NSS (array index 0 for 1 stream, etc.)
+ */
+struct nl80211_txrate_he {
+ __u16 mcs[NL80211_HE_NSS_MAX];
+};
+
enum nl80211_txrate_gi {
NL80211_TXRATE_DEFAULT_GI,
NL80211_TXRATE_FORCE_SGI,
@@ -5821,6 +5887,15 @@ enum nl80211_feature_flags {
* handshake with PSK in AP mode (PSK is passed as part of the start AP
* command).
*
+ * @NL80211_EXT_FEATURE_SAE_OFFLOAD_AP: Device wants to do SAE authentication
+ * in AP mode (SAE password is passed as part of the start AP command).
+ *
+ * @NL80211_EXT_FEATURE_FILS_DISCOVERY: Driver/device supports FILS discovery
+ * frames transmission
+ *
+ * @NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP: Driver/device supports
+ * unsolicited broadcast probe response transmission
+ *
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
*/
@@ -5878,6 +5953,9 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211_TX_STATUS,
NL80211_EXT_FEATURE_OPERATING_CHANNEL_VALIDATION,
NL80211_EXT_FEATURE_4WAY_HANDSHAKE_AP_PSK,
+ NL80211_EXT_FEATURE_SAE_OFFLOAD_AP,
+ NL80211_EXT_FEATURE_FILS_DISCOVERY,
+ NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
@@ -5992,6 +6070,8 @@ enum nl80211_timeout_reason {
* @NL80211_SCAN_FLAG_FREQ_KHZ: report scan results with
* %NL80211_ATTR_SCAN_FREQ_KHZ. This also means
* %NL80211_ATTR_SCAN_FREQUENCIES will not be included.
+ * @NL80211_SCAN_FLAG_COLOCATED_6GHZ: scan for colocated APs reported by
+ * 2.4/5 GHz APs
*/
enum nl80211_scan_flags {
NL80211_SCAN_FLAG_LOW_PRIORITY = 1<<0,
@@ -6008,6 +6088,7 @@ enum nl80211_scan_flags {
NL80211_SCAN_FLAG_RANDOM_SN = 1<<11,
NL80211_SCAN_FLAG_MIN_PREQ_CONTENT = 1<<12,
NL80211_SCAN_FLAG_FREQ_KHZ = 1<<13,
+ NL80211_SCAN_FLAG_COLOCATED_6GHZ = 1<<14,
};
/**
@@ -6910,6 +6991,13 @@ enum nl80211_peer_measurement_ftm_resp {
*
* @NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET: the OBSS PD minimum tx power offset.
* @NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET: the OBSS PD maximum tx power offset.
+ * @NL80211_HE_OBSS_PD_ATTR_NON_SRG_MAX_OFFSET: the non-SRG OBSS PD maximum
+ * tx power offset.
+ * @NL80211_HE_OBSS_PD_ATTR_BSS_COLOR_BITMAP: bitmap that indicates the BSS color
+ * values used by members of the SRG.
+ * @NL80211_HE_OBSS_PD_ATTR_PARTIAL_BSSID_BITMAP: bitmap that indicates the partial
+ * BSSID values used by members of the SRG.
+ * @NL80211_HE_OBSS_PD_ATTR_SR_CTRL: The SR Control field of SRP element.
*
* @__NL80211_HE_OBSS_PD_ATTR_LAST: Internal
* @NL80211_HE_OBSS_PD_ATTR_MAX: highest OBSS PD attribute.
@@ -6919,6 +7007,10 @@ enum nl80211_obss_pd_attributes {
NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET,
NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET,
+ NL80211_HE_OBSS_PD_ATTR_NON_SRG_MAX_OFFSET,
+ NL80211_HE_OBSS_PD_ATTR_BSS_COLOR_BITMAP,
+ NL80211_HE_OBSS_PD_ATTR_PARTIAL_BSSID_BITMAP,
+ NL80211_HE_OBSS_PD_ATTR_SR_CTRL,
/* keep last */
__NL80211_HE_OBSS_PD_ATTR_LAST,
@@ -6972,4 +7064,64 @@ enum nl80211_iftype_akm_attributes {
NL80211_IFTYPE_AKM_ATTR_MAX = __NL80211_IFTYPE_AKM_ATTR_LAST - 1,
};
+/**
+ * enum nl80211_fils_discovery_attributes - FILS discovery configuration
+ * from IEEE Std 802.11ai-2016, Annex C.3 MIB detail.
+ *
+ * @__NL80211_FILS_DISCOVERY_ATTR_INVALID: Invalid
+ *
+ * @NL80211_FILS_DISCOVERY_ATTR_INT_MIN: Minimum packet interval (u32, TU).
+ * Allowed range: 0..10000 (TU = Time Unit)
+ * @NL80211_FILS_DISCOVERY_ATTR_INT_MAX: Maximum packet interval (u32, TU).
+ * Allowed range: 0..10000 (TU = Time Unit)
+ * @NL80211_FILS_DISCOVERY_ATTR_TMPL: Template data for FILS discovery action
+ * frame including the headers.
+ *
+ * @__NL80211_FILS_DISCOVERY_ATTR_LAST: Internal
+ * @NL80211_FILS_DISCOVERY_ATTR_MAX: highest attribute
+ */
+enum nl80211_fils_discovery_attributes {
+ __NL80211_FILS_DISCOVERY_ATTR_INVALID,
+
+ NL80211_FILS_DISCOVERY_ATTR_INT_MIN,
+ NL80211_FILS_DISCOVERY_ATTR_INT_MAX,
+ NL80211_FILS_DISCOVERY_ATTR_TMPL,
+
+ /* keep last */
+ __NL80211_FILS_DISCOVERY_ATTR_LAST,
+ NL80211_FILS_DISCOVERY_ATTR_MAX = __NL80211_FILS_DISCOVERY_ATTR_LAST - 1
+};
+
+/*
+ * FILS discovery template minimum length with action frame headers and
+ * mandatory fields.
+ */
+#define NL80211_FILS_DISCOVERY_TMPL_MIN_LEN 42
+
+/**
+ * enum nl80211_unsol_bcast_probe_resp_attributes - Unsolicited broadcast probe
+ * response configuration. Applicable only in 6GHz.
+ *
+ * @__NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_INVALID: Invalid
+ *
+ * @NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_INT: Maximum packet interval (u32, TU).
+ * Allowed range: 0..20 (TU = Time Unit). IEEE P802.11ax/D6.0
+ * 26.17.2.3.2 (AP behavior for fast passive scanning).
+ * @NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_TMPL: Unsolicited broadcast probe response
+ * frame template (binary).
+ *
+ * @__NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_LAST: Internal
+ * @NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_MAX: highest attribute
+ */
+enum nl80211_unsol_bcast_probe_resp_attributes {
+ __NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_INVALID,
+
+ NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_INT,
+ NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_TMPL,
+
+ /* keep last */
+ __NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_LAST,
+ NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_MAX =
+ __NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_LAST - 1
+};
#endif /* __LINUX_NL80211_H */
diff --git a/include/uapi/linux/tc_act/tc_mpls.h b/include/uapi/linux/tc_act/tc_mpls.h
index 9360e95273c7..9e4e8f52a779 100644
--- a/include/uapi/linux/tc_act/tc_mpls.h
+++ b/include/uapi/linux/tc_act/tc_mpls.h
@@ -10,6 +10,7 @@
#define TCA_MPLS_ACT_PUSH 2
#define TCA_MPLS_ACT_MODIFY 3
#define TCA_MPLS_ACT_DEC_TTL 4
+#define TCA_MPLS_ACT_MAC_PUSH 5
struct tc_mpls {
tc_gen; /* generic TC action fields. */
diff --git a/include/uapi/linux/tc_act/tc_vlan.h b/include/uapi/linux/tc_act/tc_vlan.h
index 168995b54a70..5b306fe815cc 100644
--- a/include/uapi/linux/tc_act/tc_vlan.h
+++ b/include/uapi/linux/tc_act/tc_vlan.h
@@ -16,6 +16,8 @@
#define TCA_VLAN_ACT_POP 1
#define TCA_VLAN_ACT_PUSH 2
#define TCA_VLAN_ACT_MODIFY 3
+#define TCA_VLAN_ACT_POP_ETH 4
+#define TCA_VLAN_ACT_PUSH_ETH 5
struct tc_vlan {
tc_gen;
@@ -30,6 +32,8 @@ enum {
TCA_VLAN_PUSH_VLAN_PROTOCOL,
TCA_VLAN_PAD,
TCA_VLAN_PUSH_VLAN_PRIORITY,
+ TCA_VLAN_PUSH_ETH_DST,
+ TCA_VLAN_PUSH_ETH_SRC,
__TCA_VLAN_MAX,
};
#define TCA_VLAN_MAX (__TCA_VLAN_MAX - 1)
diff --git a/include/uapi/linux/tipc.h b/include/uapi/linux/tipc.h
index add01db1daef..80ea15e12113 100644
--- a/include/uapi/linux/tipc.h
+++ b/include/uapi/linux/tipc.h
@@ -254,6 +254,8 @@ static inline int tipc_aead_key_size(struct tipc_aead_key *key)
return sizeof(*key) + key->keylen;
}
+#define TIPC_REKEYING_NOW (~0U)
+
/* The macros and functions below are deprecated:
*/
diff --git a/include/uapi/linux/tipc_netlink.h b/include/uapi/linux/tipc_netlink.h
index dc0d23a50e69..d847dd671d79 100644
--- a/include/uapi/linux/tipc_netlink.h
+++ b/include/uapi/linux/tipc_netlink.h
@@ -165,6 +165,8 @@ enum {
TIPC_NLA_NODE_UP, /* flag */
TIPC_NLA_NODE_ID, /* data */
TIPC_NLA_NODE_KEY, /* data */
+ TIPC_NLA_NODE_KEY_MASTER, /* flag */
+ TIPC_NLA_NODE_REKEYING, /* u32 */
__TIPC_NLA_NODE_MAX,
TIPC_NLA_NODE_MAX = __TIPC_NLA_NODE_MAX - 1
diff --git a/init/Kconfig b/init/Kconfig
index 2a583ef2ff66..c9446911cf41 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1692,6 +1692,7 @@ config BPF_SYSCALL
bool "Enable bpf() system call"
select BPF
select IRQ_WORK
+ select TASKS_TRACE_RCU
default n
help
Enable the bpf() system call that allows to manipulate eBPF
@@ -1711,6 +1712,8 @@ config BPF_JIT_DEFAULT_ON
def_bool ARCH_WANT_DEFAULT_BPF_JIT || BPF_JIT_ALWAYS_ON
depends on HAVE_EBPF_JIT && BPF_JIT
+source "kernel/bpf/preload/Kconfig"
+
config USERFAULTFD
bool "Enable userfaultfd() system call"
depends on MMU
diff --git a/kernel/Makefile b/kernel/Makefile
index b74820d8b264..e5bc66a94b70 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -12,7 +12,7 @@ obj-y = fork.o exec_domain.o panic.o \
notifier.o ksysfs.o cred.o reboot.o \
async.o range.o smpboot.o ucount.o regset.o
-obj-$(CONFIG_BPFILTER) += usermode_driver.o
+obj-$(CONFIG_USERMODE_DRIVER) += usermode_driver.o
obj-$(CONFIG_MODULES) += kmod.o
obj-$(CONFIG_MULTIUSER) += groups.o
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index e6eb9c0402da..bdc8cd1b6767 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -5,6 +5,7 @@ CFLAGS_core.o += $(call cc-disable-warning, override-init)
obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o bpf_iter.o map_iter.o task_iter.o prog_iter.o
obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o
obj-$(CONFIG_BPF_SYSCALL) += local_storage.o queue_stack_maps.o ringbuf.o
+obj-${CONFIG_BPF_LSM} += bpf_inode_storage.o
obj-$(CONFIG_BPF_SYSCALL) += disasm.o
obj-$(CONFIG_BPF_JIT) += trampoline.o
obj-$(CONFIG_BPF_SYSCALL) += btf.o
@@ -12,6 +13,7 @@ obj-$(CONFIG_BPF_JIT) += dispatcher.o
ifeq ($(CONFIG_NET),y)
obj-$(CONFIG_BPF_SYSCALL) += devmap.o
obj-$(CONFIG_BPF_SYSCALL) += cpumap.o
+obj-$(CONFIG_BPF_SYSCALL) += bpf_local_storage.o
obj-$(CONFIG_BPF_SYSCALL) += offload.o
obj-$(CONFIG_BPF_SYSCALL) += net_namespace.o
endif
@@ -29,3 +31,4 @@ ifeq ($(CONFIG_BPF_JIT),y)
obj-$(CONFIG_BPF_SYSCALL) += bpf_struct_ops.o
obj-${CONFIG_BPF_LSM} += bpf_lsm.o
endif
+obj-$(CONFIG_BPF_PRELOAD) += preload/
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 8ff419b632a6..c6c81eceb68f 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -10,11 +10,13 @@
#include <linux/filter.h>
#include <linux/perf_event.h>
#include <uapi/linux/btf.h>
+#include <linux/rcupdate_trace.h>
#include "map_in_map.h"
#define ARRAY_CREATE_FLAG_MASK \
- (BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK)
+ (BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \
+ BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP)
static void bpf_array_free_percpu(struct bpf_array *array)
{
@@ -60,7 +62,11 @@ int array_map_alloc_check(union bpf_attr *attr)
return -EINVAL;
if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
- attr->map_flags & BPF_F_MMAPABLE)
+ attr->map_flags & (BPF_F_MMAPABLE | BPF_F_INNER_MAP))
+ return -EINVAL;
+
+ if (attr->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY &&
+ attr->map_flags & BPF_F_PRESERVE_ELEMS)
return -EINVAL;
if (attr->value_size > KMALLOC_MAX_SIZE)
@@ -208,7 +214,7 @@ static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm,
}
/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
-static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
+static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
{
struct bpf_array *array = container_of(map, struct bpf_array, map);
struct bpf_insn *insn = insn_buf;
@@ -217,6 +223,9 @@ static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
const int map_ptr = BPF_REG_1;
const int index = BPF_REG_2;
+ if (map->map_flags & BPF_F_INNER_MAP)
+ return -EOPNOTSUPP;
+
*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
if (!map->bypass_spec_v1) {
@@ -487,6 +496,15 @@ static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
vma->vm_pgoff + pgoff);
}
+static bool array_map_meta_equal(const struct bpf_map *meta0,
+ const struct bpf_map *meta1)
+{
+ if (!bpf_map_meta_equal(meta0, meta1))
+ return false;
+ return meta0->map_flags & BPF_F_INNER_MAP ? true :
+ meta0->max_entries == meta1->max_entries;
+}
+
struct bpf_iter_seq_array_map_info {
struct bpf_map *map;
void *percpu_value_buf;
@@ -625,6 +643,7 @@ static const struct bpf_iter_seq_info iter_seq_info = {
static int array_map_btf_id;
const struct bpf_map_ops array_map_ops = {
+ .map_meta_equal = array_map_meta_equal,
.map_alloc_check = array_map_alloc_check,
.map_alloc = array_map_alloc,
.map_free = array_map_free,
@@ -647,6 +666,7 @@ const struct bpf_map_ops array_map_ops = {
static int percpu_array_map_btf_id;
const struct bpf_map_ops percpu_array_map_ops = {
+ .map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = array_map_alloc_check,
.map_alloc = array_map_alloc,
.map_free = array_map_free,
@@ -888,6 +908,7 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
struct bpf_prog *old,
struct bpf_prog *new)
{
+ u8 *old_addr, *new_addr, *old_bypass_addr;
struct prog_poke_elem *elem;
struct bpf_array_aux *aux;
@@ -908,12 +929,13 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
* there could be danger of use after free otherwise.
* 2) Initially when we start tracking aux, the program
* is not JITed yet and also does not have a kallsyms
- * entry. We skip these as poke->ip_stable is not
- * active yet. The JIT will do the final fixup before
- * setting it stable. The various poke->ip_stable are
- * successively activated, so tail call updates can
- * arrive from here while JIT is still finishing its
- * final fixup for non-activated poke entries.
+ * entry. We skip these as poke->tailcall_target_stable
+ * is not active yet. The JIT will do the final fixup
+ * before setting it stable. The various
+ * poke->tailcall_target_stable are successively
+ * activated, so tail call updates can arrive from here
+ * while JIT is still finishing its final fixup for
+ * non-activated poke entries.
* 3) On program teardown, the program's kallsym entry gets
* removed out of RCU callback, but we can only untrack
* from sleepable context, therefore bpf_arch_text_poke()
@@ -930,7 +952,7 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
* 5) Any other error happening below from bpf_arch_text_poke()
* is a unexpected bug.
*/
- if (!READ_ONCE(poke->ip_stable))
+ if (!READ_ONCE(poke->tailcall_target_stable))
continue;
if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
continue;
@@ -938,12 +960,39 @@ static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
poke->tail_call.key != key)
continue;
- ret = bpf_arch_text_poke(poke->ip, BPF_MOD_JUMP,
- old ? (u8 *)old->bpf_func +
- poke->adj_off : NULL,
- new ? (u8 *)new->bpf_func +
- poke->adj_off : NULL);
- BUG_ON(ret < 0 && ret != -EINVAL);
+ old_bypass_addr = old ? NULL : poke->bypass_addr;
+ old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
+ new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
+
+ if (new) {
+ ret = bpf_arch_text_poke(poke->tailcall_target,
+ BPF_MOD_JUMP,
+ old_addr, new_addr);
+ BUG_ON(ret < 0 && ret != -EINVAL);
+ if (!old) {
+ ret = bpf_arch_text_poke(poke->tailcall_bypass,
+ BPF_MOD_JUMP,
+ poke->bypass_addr,
+ NULL);
+ BUG_ON(ret < 0 && ret != -EINVAL);
+ }
+ } else {
+ ret = bpf_arch_text_poke(poke->tailcall_bypass,
+ BPF_MOD_JUMP,
+ old_bypass_addr,
+ poke->bypass_addr);
+ BUG_ON(ret < 0 && ret != -EINVAL);
+ /* let other CPUs finish the execution of program
+ * so that it will not possible to expose them
+ * to invalid nop, stack unwind, nop state
+ */
+ if (!ret)
+ synchronize_rcu();
+ ret = bpf_arch_text_poke(poke->tailcall_target,
+ BPF_MOD_JUMP,
+ old_addr, NULL);
+ BUG_ON(ret < 0 && ret != -EINVAL);
+ }
}
}
}
@@ -1003,6 +1052,11 @@ static void prog_array_map_free(struct bpf_map *map)
fd_array_map_free(map);
}
+/* prog_array->aux->{type,jited} is a runtime binding.
+ * Doing static check alone in the verifier is not enough.
+ * Thus, prog_array_map cannot be used as an inner_map
+ * and map_meta_equal is not implemented.
+ */
static int prog_array_map_btf_id;
const struct bpf_map_ops prog_array_map_ops = {
.map_alloc_check = fd_array_map_alloc_check,
@@ -1090,6 +1144,9 @@ static void perf_event_fd_array_release(struct bpf_map *map,
struct bpf_event_entry *ee;
int i;
+ if (map->map_flags & BPF_F_PRESERVE_ELEMS)
+ return;
+
rcu_read_lock();
for (i = 0; i < array->map.max_entries; i++) {
ee = READ_ONCE(array->ptrs[i]);
@@ -1099,11 +1156,19 @@ static void perf_event_fd_array_release(struct bpf_map *map,
rcu_read_unlock();
}
+static void perf_event_fd_array_map_free(struct bpf_map *map)
+{
+ if (map->map_flags & BPF_F_PRESERVE_ELEMS)
+ bpf_fd_array_map_clear(map);
+ fd_array_map_free(map);
+}
+
static int perf_event_array_map_btf_id;
const struct bpf_map_ops perf_event_array_map_ops = {
+ .map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = fd_array_map_alloc_check,
.map_alloc = array_map_alloc,
- .map_free = fd_array_map_free,
+ .map_free = perf_event_fd_array_map_free,
.map_get_next_key = array_map_get_next_key,
.map_lookup_elem = fd_array_map_lookup_elem,
.map_delete_elem = fd_array_map_delete_elem,
@@ -1137,6 +1202,7 @@ static void cgroup_fd_array_free(struct bpf_map *map)
static int cgroup_array_map_btf_id;
const struct bpf_map_ops cgroup_array_map_ops = {
+ .map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = fd_array_map_alloc_check,
.map_alloc = array_map_alloc,
.map_free = cgroup_fd_array_free,
@@ -1190,7 +1256,7 @@ static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
return READ_ONCE(*inner_map);
}
-static u32 array_of_map_gen_lookup(struct bpf_map *map,
+static int array_of_map_gen_lookup(struct bpf_map *map,
struct bpf_insn *insn_buf)
{
struct bpf_array *array = container_of(map, struct bpf_array, map);
diff --git a/kernel/bpf/bpf_inode_storage.c b/kernel/bpf/bpf_inode_storage.c
new file mode 100644
index 000000000000..6edff97ad594
--- /dev/null
+++ b/kernel/bpf/bpf_inode_storage.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 Facebook
+ * Copyright 2020 Google LLC.
+ */
+
+#include <linux/rculist.h>
+#include <linux/list.h>
+#include <linux/hash.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/bpf.h>
+#include <linux/bpf_local_storage.h>
+#include <net/sock.h>
+#include <uapi/linux/sock_diag.h>
+#include <uapi/linux/btf.h>
+#include <linux/bpf_lsm.h>
+#include <linux/btf_ids.h>
+#include <linux/fdtable.h>
+
+DEFINE_BPF_STORAGE_CACHE(inode_cache);
+
+static struct bpf_local_storage __rcu **
+inode_storage_ptr(void *owner)
+{
+ struct inode *inode = owner;
+ struct bpf_storage_blob *bsb;
+
+ bsb = bpf_inode(inode);
+ if (!bsb)
+ return NULL;
+ return &bsb->storage;
+}
+
+static struct bpf_local_storage_data *inode_storage_lookup(struct inode *inode,
+ struct bpf_map *map,
+ bool cacheit_lockit)
+{
+ struct bpf_local_storage *inode_storage;
+ struct bpf_local_storage_map *smap;
+ struct bpf_storage_blob *bsb;
+
+ bsb = bpf_inode(inode);
+ if (!bsb)
+ return NULL;
+
+ inode_storage = rcu_dereference(bsb->storage);
+ if (!inode_storage)
+ return NULL;
+
+ smap = (struct bpf_local_storage_map *)map;
+ return bpf_local_storage_lookup(inode_storage, smap, cacheit_lockit);
+}
+
+void bpf_inode_storage_free(struct inode *inode)
+{
+ struct bpf_local_storage_elem *selem;
+ struct bpf_local_storage *local_storage;
+ bool free_inode_storage = false;
+ struct bpf_storage_blob *bsb;
+ struct hlist_node *n;
+
+ bsb = bpf_inode(inode);
+ if (!bsb)
+ return;
+
+ rcu_read_lock();
+
+ local_storage = rcu_dereference(bsb->storage);
+ if (!local_storage) {
+ rcu_read_unlock();
+ return;
+ }
+
+ /* Netiher the bpf_prog nor the bpf-map's syscall
+ * could be modifying the local_storage->list now.
+ * Thus, no elem can be added-to or deleted-from the
+ * local_storage->list by the bpf_prog or by the bpf-map's syscall.
+ *
+ * It is racing with bpf_local_storage_map_free() alone
+ * when unlinking elem from the local_storage->list and
+ * the map's bucket->list.
+ */
+ raw_spin_lock_bh(&local_storage->lock);
+ hlist_for_each_entry_safe(selem, n, &local_storage->list, snode) {
+ /* Always unlink from map before unlinking from
+ * local_storage.
+ */
+ bpf_selem_unlink_map(selem);
+ free_inode_storage = bpf_selem_unlink_storage_nolock(
+ local_storage, selem, false);
+ }
+ raw_spin_unlock_bh(&local_storage->lock);
+ rcu_read_unlock();
+
+ /* free_inoode_storage should always be true as long as
+ * local_storage->list was non-empty.
+ */
+ if (free_inode_storage)
+ kfree_rcu(local_storage, rcu);
+}
+
+static void *bpf_fd_inode_storage_lookup_elem(struct bpf_map *map, void *key)
+{
+ struct bpf_local_storage_data *sdata;
+ struct file *f;
+ int fd;
+
+ fd = *(int *)key;
+ f = fget_raw(fd);
+ if (!f)
+ return NULL;
+
+ sdata = inode_storage_lookup(f->f_inode, map, true);
+ fput(f);
+ return sdata ? sdata->data : NULL;
+}
+
+static int bpf_fd_inode_storage_update_elem(struct bpf_map *map, void *key,
+ void *value, u64 map_flags)
+{
+ struct bpf_local_storage_data *sdata;
+ struct file *f;
+ int fd;
+
+ fd = *(int *)key;
+ f = fget_raw(fd);
+ if (!f || !inode_storage_ptr(f->f_inode))
+ return -EBADF;
+
+ sdata = bpf_local_storage_update(f->f_inode,
+ (struct bpf_local_storage_map *)map,
+ value, map_flags);
+ fput(f);
+ return PTR_ERR_OR_ZERO(sdata);
+}
+
+static int inode_storage_delete(struct inode *inode, struct bpf_map *map)
+{
+ struct bpf_local_storage_data *sdata;
+
+ sdata = inode_storage_lookup(inode, map, false);
+ if (!sdata)
+ return -ENOENT;
+
+ bpf_selem_unlink(SELEM(sdata));
+
+ return 0;
+}
+
+static int bpf_fd_inode_storage_delete_elem(struct bpf_map *map, void *key)
+{
+ struct file *f;
+ int fd, err;
+
+ fd = *(int *)key;
+ f = fget_raw(fd);
+ if (!f)
+ return -EBADF;
+
+ err = inode_storage_delete(f->f_inode, map);
+ fput(f);
+ return err;
+}
+
+BPF_CALL_4(bpf_inode_storage_get, struct bpf_map *, map, struct inode *, inode,
+ void *, value, u64, flags)
+{
+ struct bpf_local_storage_data *sdata;
+
+ if (flags & ~(BPF_LOCAL_STORAGE_GET_F_CREATE))
+ return (unsigned long)NULL;
+
+ /* explicitly check that the inode_storage_ptr is not
+ * NULL as inode_storage_lookup returns NULL in this case and
+ * bpf_local_storage_update expects the owner to have a
+ * valid storage pointer.
+ */
+ if (!inode_storage_ptr(inode))
+ return (unsigned long)NULL;
+
+ sdata = inode_storage_lookup(inode, map, true);
+ if (sdata)
+ return (unsigned long)sdata->data;
+
+ /* This helper must only called from where the inode is gurranteed
+ * to have a refcount and cannot be freed.
+ */
+ if (flags & BPF_LOCAL_STORAGE_GET_F_CREATE) {
+ sdata = bpf_local_storage_update(
+ inode, (struct bpf_local_storage_map *)map, value,
+ BPF_NOEXIST);
+ return IS_ERR(sdata) ? (unsigned long)NULL :
+ (unsigned long)sdata->data;
+ }
+
+ return (unsigned long)NULL;
+}
+
+BPF_CALL_2(bpf_inode_storage_delete,
+ struct bpf_map *, map, struct inode *, inode)
+{
+ /* This helper must only called from where the inode is gurranteed
+ * to have a refcount and cannot be freed.
+ */
+ return inode_storage_delete(inode, map);
+}
+
+static int notsupp_get_next_key(struct bpf_map *map, void *key,
+ void *next_key)
+{
+ return -ENOTSUPP;
+}
+
+static struct bpf_map *inode_storage_map_alloc(union bpf_attr *attr)
+{
+ struct bpf_local_storage_map *smap;
+
+ smap = bpf_local_storage_map_alloc(attr);
+ if (IS_ERR(smap))
+ return ERR_CAST(smap);
+
+ smap->cache_idx = bpf_local_storage_cache_idx_get(&inode_cache);
+ return &smap->map;
+}
+
+static void inode_storage_map_free(struct bpf_map *map)
+{
+ struct bpf_local_storage_map *smap;
+
+ smap = (struct bpf_local_storage_map *)map;
+ bpf_local_storage_cache_idx_free(&inode_cache, smap->cache_idx);
+ bpf_local_storage_map_free(smap);
+}
+
+static int inode_storage_map_btf_id;
+const struct bpf_map_ops inode_storage_map_ops = {
+ .map_meta_equal = bpf_map_meta_equal,
+ .map_alloc_check = bpf_local_storage_map_alloc_check,
+ .map_alloc = inode_storage_map_alloc,
+ .map_free = inode_storage_map_free,
+ .map_get_next_key = notsupp_get_next_key,
+ .map_lookup_elem = bpf_fd_inode_storage_lookup_elem,
+ .map_update_elem = bpf_fd_inode_storage_update_elem,
+ .map_delete_elem = bpf_fd_inode_storage_delete_elem,
+ .map_check_btf = bpf_local_storage_map_check_btf,
+ .map_btf_name = "bpf_local_storage_map",
+ .map_btf_id = &inode_storage_map_btf_id,
+ .map_owner_storage_ptr = inode_storage_ptr,
+};
+
+BTF_ID_LIST_SINGLE(bpf_inode_storage_btf_ids, struct, inode)
+
+const struct bpf_func_proto bpf_inode_storage_get_proto = {
+ .func = bpf_inode_storage_get,
+ .gpl_only = false,
+ .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
+ .arg1_type = ARG_CONST_MAP_PTR,
+ .arg2_type = ARG_PTR_TO_BTF_ID,
+ .arg2_btf_id = &bpf_inode_storage_btf_ids[0],
+ .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
+ .arg4_type = ARG_ANYTHING,
+};
+
+const struct bpf_func_proto bpf_inode_storage_delete_proto = {
+ .func = bpf_inode_storage_delete,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_CONST_MAP_PTR,
+ .arg2_type = ARG_PTR_TO_BTF_ID,
+ .arg2_btf_id = &bpf_inode_storage_btf_ids[0],
+};
diff --git a/kernel/bpf/bpf_iter.c b/kernel/bpf/bpf_iter.c
index 8faa2ce89396..8f10e30ea0b0 100644
--- a/kernel/bpf/bpf_iter.c
+++ b/kernel/bpf/bpf_iter.c
@@ -88,8 +88,8 @@ static ssize_t bpf_seq_read(struct file *file, char __user *buf, size_t size,
mutex_lock(&seq->lock);
if (!seq->buf) {
- seq->size = PAGE_SIZE;
- seq->buf = kmalloc(seq->size, GFP_KERNEL);
+ seq->size = PAGE_SIZE << 3;
+ seq->buf = kvmalloc(seq->size, GFP_KERNEL);
if (!seq->buf) {
err = -ENOMEM;
goto done;
@@ -390,10 +390,68 @@ out_unlock:
return ret;
}
+static void bpf_iter_link_show_fdinfo(const struct bpf_link *link,
+ struct seq_file *seq)
+{
+ struct bpf_iter_link *iter_link =
+ container_of(link, struct bpf_iter_link, link);
+ bpf_iter_show_fdinfo_t show_fdinfo;
+
+ seq_printf(seq,
+ "target_name:\t%s\n",
+ iter_link->tinfo->reg_info->target);
+
+ show_fdinfo = iter_link->tinfo->reg_info->show_fdinfo;
+ if (show_fdinfo)
+ show_fdinfo(&iter_link->aux, seq);
+}
+
+static int bpf_iter_link_fill_link_info(const struct bpf_link *link,
+ struct bpf_link_info *info)
+{
+ struct bpf_iter_link *iter_link =
+ container_of(link, struct bpf_iter_link, link);
+ char __user *ubuf = u64_to_user_ptr(info->iter.target_name);
+ bpf_iter_fill_link_info_t fill_link_info;
+ u32 ulen = info->iter.target_name_len;
+ const char *target_name;
+ u32 target_len;
+
+ if (!ulen ^ !ubuf)
+ return -EINVAL;
+
+ target_name = iter_link->tinfo->reg_info->target;
+ target_len = strlen(target_name);
+ info->iter.target_name_len = target_len + 1;
+
+ if (ubuf) {
+ if (ulen >= target_len + 1) {
+ if (copy_to_user(ubuf, target_name, target_len + 1))
+ return -EFAULT;
+ } else {
+ char zero = '\0';
+
+ if (copy_to_user(ubuf, target_name, ulen - 1))
+ return -EFAULT;
+ if (put_user(zero, ubuf + ulen - 1))
+ return -EFAULT;
+ return -ENOSPC;
+ }
+ }
+
+ fill_link_info = iter_link->tinfo->reg_info->fill_link_info;
+ if (fill_link_info)
+ return fill_link_info(&iter_link->aux, info);
+
+ return 0;
+}
+
static const struct bpf_link_ops bpf_iter_link_lops = {
.release = bpf_iter_link_release,
.dealloc = bpf_iter_link_dealloc,
.update_prog = bpf_iter_link_replace,
+ .show_fdinfo = bpf_iter_link_show_fdinfo,
+ .fill_link_info = bpf_iter_link_fill_link_info,
};
bool bpf_link_is_iter(struct bpf_link *link)
diff --git a/kernel/bpf/bpf_local_storage.c b/kernel/bpf/bpf_local_storage.c
new file mode 100644
index 000000000000..5d3a7af9ba9b
--- /dev/null
+++ b/kernel/bpf/bpf_local_storage.c
@@ -0,0 +1,600 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+#include <linux/rculist.h>
+#include <linux/list.h>
+#include <linux/hash.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/bpf.h>
+#include <linux/btf_ids.h>
+#include <linux/bpf_local_storage.h>
+#include <net/sock.h>
+#include <uapi/linux/sock_diag.h>
+#include <uapi/linux/btf.h>
+
+#define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_CLONE)
+
+static struct bpf_local_storage_map_bucket *
+select_bucket(struct bpf_local_storage_map *smap,
+ struct bpf_local_storage_elem *selem)
+{
+ return &smap->buckets[hash_ptr(selem, smap->bucket_log)];
+}
+
+static int mem_charge(struct bpf_local_storage_map *smap, void *owner, u32 size)
+{
+ struct bpf_map *map = &smap->map;
+
+ if (!map->ops->map_local_storage_charge)
+ return 0;
+
+ return map->ops->map_local_storage_charge(smap, owner, size);
+}
+
+static void mem_uncharge(struct bpf_local_storage_map *smap, void *owner,
+ u32 size)
+{
+ struct bpf_map *map = &smap->map;
+
+ if (map->ops->map_local_storage_uncharge)
+ map->ops->map_local_storage_uncharge(smap, owner, size);
+}
+
+static struct bpf_local_storage __rcu **
+owner_storage(struct bpf_local_storage_map *smap, void *owner)
+{
+ struct bpf_map *map = &smap->map;
+
+ return map->ops->map_owner_storage_ptr(owner);
+}
+
+static bool selem_linked_to_storage(const struct bpf_local_storage_elem *selem)
+{
+ return !hlist_unhashed(&selem->snode);
+}
+
+static bool selem_linked_to_map(const struct bpf_local_storage_elem *selem)
+{
+ return !hlist_unhashed(&selem->map_node);
+}
+
+struct bpf_local_storage_elem *
+bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
+ void *value, bool charge_mem)
+{
+ struct bpf_local_storage_elem *selem;
+
+ if (charge_mem && mem_charge(smap, owner, smap->elem_size))
+ return NULL;
+
+ selem = kzalloc(smap->elem_size, GFP_ATOMIC | __GFP_NOWARN);
+ if (selem) {
+ if (value)
+ memcpy(SDATA(selem)->data, value, smap->map.value_size);
+ return selem;
+ }
+
+ if (charge_mem)
+ mem_uncharge(smap, owner, smap->elem_size);
+
+ return NULL;
+}
+
+/* local_storage->lock must be held and selem->local_storage == local_storage.
+ * The caller must ensure selem->smap is still valid to be
+ * dereferenced for its smap->elem_size and smap->cache_idx.
+ */
+bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
+ struct bpf_local_storage_elem *selem,
+ bool uncharge_mem)
+{
+ struct bpf_local_storage_map *smap;
+ bool free_local_storage;
+ void *owner;
+
+ smap = rcu_dereference(SDATA(selem)->smap);
+ owner = local_storage->owner;
+
+ /* All uncharging on the owner must be done first.
+ * The owner may be freed once the last selem is unlinked
+ * from local_storage.
+ */
+ if (uncharge_mem)
+ mem_uncharge(smap, owner, smap->elem_size);
+
+ free_local_storage = hlist_is_singular_node(&selem->snode,
+ &local_storage->list);
+ if (free_local_storage) {
+ mem_uncharge(smap, owner, sizeof(struct bpf_local_storage));
+ local_storage->owner = NULL;
+
+ /* After this RCU_INIT, owner may be freed and cannot be used */
+ RCU_INIT_POINTER(*owner_storage(smap, owner), NULL);
+
+ /* local_storage is not freed now. local_storage->lock is
+ * still held and raw_spin_unlock_bh(&local_storage->lock)
+ * will be done by the caller.
+ *
+ * Although the unlock will be done under
+ * rcu_read_lock(), it is more intutivie to
+ * read if kfree_rcu(local_storage, rcu) is done
+ * after the raw_spin_unlock_bh(&local_storage->lock).
+ *
+ * Hence, a "bool free_local_storage" is returned
+ * to the caller which then calls the kfree_rcu()
+ * after unlock.
+ */
+ }
+ hlist_del_init_rcu(&selem->snode);
+ if (rcu_access_pointer(local_storage->cache[smap->cache_idx]) ==
+ SDATA(selem))
+ RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL);
+
+ kfree_rcu(selem, rcu);
+
+ return free_local_storage;
+}
+
+static void __bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem)
+{
+ struct bpf_local_storage *local_storage;
+ bool free_local_storage = false;
+
+ if (unlikely(!selem_linked_to_storage(selem)))
+ /* selem has already been unlinked from sk */
+ return;
+
+ local_storage = rcu_dereference(selem->local_storage);
+ raw_spin_lock_bh(&local_storage->lock);
+ if (likely(selem_linked_to_storage(selem)))
+ free_local_storage = bpf_selem_unlink_storage_nolock(
+ local_storage, selem, true);
+ raw_spin_unlock_bh(&local_storage->lock);
+
+ if (free_local_storage)
+ kfree_rcu(local_storage, rcu);
+}
+
+void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
+ struct bpf_local_storage_elem *selem)
+{
+ RCU_INIT_POINTER(selem->local_storage, local_storage);
+ hlist_add_head_rcu(&selem->snode, &local_storage->list);
+}
+
+void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem)
+{
+ struct bpf_local_storage_map *smap;
+ struct bpf_local_storage_map_bucket *b;
+
+ if (unlikely(!selem_linked_to_map(selem)))
+ /* selem has already be unlinked from smap */
+ return;
+
+ smap = rcu_dereference(SDATA(selem)->smap);
+ b = select_bucket(smap, selem);
+ raw_spin_lock_bh(&b->lock);
+ if (likely(selem_linked_to_map(selem)))
+ hlist_del_init_rcu(&selem->map_node);
+ raw_spin_unlock_bh(&b->lock);
+}
+
+void bpf_selem_link_map(struct bpf_local_storage_map *smap,
+ struct bpf_local_storage_elem *selem)
+{
+ struct bpf_local_storage_map_bucket *b = select_bucket(smap, selem);
+
+ raw_spin_lock_bh(&b->lock);
+ RCU_INIT_POINTER(SDATA(selem)->smap, smap);
+ hlist_add_head_rcu(&selem->map_node, &b->list);
+ raw_spin_unlock_bh(&b->lock);
+}
+
+void bpf_selem_unlink(struct bpf_local_storage_elem *selem)
+{
+ /* Always unlink from map before unlinking from local_storage
+ * because selem will be freed after successfully unlinked from
+ * the local_storage.
+ */
+ bpf_selem_unlink_map(selem);
+ __bpf_selem_unlink_storage(selem);
+}
+
+struct bpf_local_storage_data *
+bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
+ struct bpf_local_storage_map *smap,
+ bool cacheit_lockit)
+{
+ struct bpf_local_storage_data *sdata;
+ struct bpf_local_storage_elem *selem;
+
+ /* Fast path (cache hit) */
+ sdata = rcu_dereference(local_storage->cache[smap->cache_idx]);
+ if (sdata && rcu_access_pointer(sdata->smap) == smap)
+ return sdata;
+
+ /* Slow path (cache miss) */
+ hlist_for_each_entry_rcu(selem, &local_storage->list, snode)
+ if (rcu_access_pointer(SDATA(selem)->smap) == smap)
+ break;
+
+ if (!selem)
+ return NULL;
+
+ sdata = SDATA(selem);
+ if (cacheit_lockit) {
+ /* spinlock is needed to avoid racing with the
+ * parallel delete. Otherwise, publishing an already
+ * deleted sdata to the cache will become a use-after-free
+ * problem in the next bpf_local_storage_lookup().
+ */
+ raw_spin_lock_bh(&local_storage->lock);
+ if (selem_linked_to_storage(selem))
+ rcu_assign_pointer(local_storage->cache[smap->cache_idx],
+ sdata);
+ raw_spin_unlock_bh(&local_storage->lock);
+ }
+
+ return sdata;
+}
+
+static int check_flags(const struct bpf_local_storage_data *old_sdata,
+ u64 map_flags)
+{
+ if (old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
+ /* elem already exists */
+ return -EEXIST;
+
+ if (!old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
+ /* elem doesn't exist, cannot update it */
+ return -ENOENT;
+
+ return 0;
+}
+
+int bpf_local_storage_alloc(void *owner,
+ struct bpf_local_storage_map *smap,
+ struct bpf_local_storage_elem *first_selem)
+{
+ struct bpf_local_storage *prev_storage, *storage;
+ struct bpf_local_storage **owner_storage_ptr;
+ int err;
+
+ err = mem_charge(smap, owner, sizeof(*storage));
+ if (err)
+ return err;
+
+ storage = kzalloc(sizeof(*storage), GFP_ATOMIC | __GFP_NOWARN);
+ if (!storage) {
+ err = -ENOMEM;
+ goto uncharge;
+ }
+
+ INIT_HLIST_HEAD(&storage->list);
+ raw_spin_lock_init(&storage->lock);
+ storage->owner = owner;
+
+ bpf_selem_link_storage_nolock(storage, first_selem);
+ bpf_selem_link_map(smap, first_selem);
+
+ owner_storage_ptr =
+ (struct bpf_local_storage **)owner_storage(smap, owner);
+ /* Publish storage to the owner.
+ * Instead of using any lock of the kernel object (i.e. owner),
+ * cmpxchg will work with any kernel object regardless what
+ * the running context is, bh, irq...etc.
+ *
+ * From now on, the owner->storage pointer (e.g. sk->sk_bpf_storage)
+ * is protected by the storage->lock. Hence, when freeing
+ * the owner->storage, the storage->lock must be held before
+ * setting owner->storage ptr to NULL.
+ */
+ prev_storage = cmpxchg(owner_storage_ptr, NULL, storage);
+ if (unlikely(prev_storage)) {
+ bpf_selem_unlink_map(first_selem);
+ err = -EAGAIN;
+ goto uncharge;
+
+ /* Note that even first_selem was linked to smap's
+ * bucket->list, first_selem can be freed immediately
+ * (instead of kfree_rcu) because
+ * bpf_local_storage_map_free() does a
+ * synchronize_rcu() before walking the bucket->list.
+ * Hence, no one is accessing selem from the
+ * bucket->list under rcu_read_lock().
+ */
+ }
+
+ return 0;
+
+uncharge:
+ kfree(storage);
+ mem_uncharge(smap, owner, sizeof(*storage));
+ return err;
+}
+
+/* sk cannot be going away because it is linking new elem
+ * to sk->sk_bpf_storage. (i.e. sk->sk_refcnt cannot be 0).
+ * Otherwise, it will become a leak (and other memory issues
+ * during map destruction).
+ */
+struct bpf_local_storage_data *
+bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
+ void *value, u64 map_flags)
+{
+ struct bpf_local_storage_data *old_sdata = NULL;
+ struct bpf_local_storage_elem *selem;
+ struct bpf_local_storage *local_storage;
+ int err;
+
+ /* BPF_EXIST and BPF_NOEXIST cannot be both set */
+ if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST) ||
+ /* BPF_F_LOCK can only be used in a value with spin_lock */
+ unlikely((map_flags & BPF_F_LOCK) &&
+ !map_value_has_spin_lock(&smap->map)))
+ return ERR_PTR(-EINVAL);
+
+ local_storage = rcu_dereference(*owner_storage(smap, owner));
+ if (!local_storage || hlist_empty(&local_storage->list)) {
+ /* Very first elem for the owner */
+ err = check_flags(NULL, map_flags);
+ if (err)
+ return ERR_PTR(err);
+
+ selem = bpf_selem_alloc(smap, owner, value, true);
+ if (!selem)
+ return ERR_PTR(-ENOMEM);
+
+ err = bpf_local_storage_alloc(owner, smap, selem);
+ if (err) {
+ kfree(selem);
+ mem_uncharge(smap, owner, smap->elem_size);
+ return ERR_PTR(err);
+ }
+
+ return SDATA(selem);
+ }
+
+ if ((map_flags & BPF_F_LOCK) && !(map_flags & BPF_NOEXIST)) {
+ /* Hoping to find an old_sdata to do inline update
+ * such that it can avoid taking the local_storage->lock
+ * and changing the lists.
+ */
+ old_sdata =
+ bpf_local_storage_lookup(local_storage, smap, false);
+ err = check_flags(old_sdata, map_flags);
+ if (err)
+ return ERR_PTR(err);
+ if (old_sdata && selem_linked_to_storage(SELEM(old_sdata))) {
+ copy_map_value_locked(&smap->map, old_sdata->data,
+ value, false);
+ return old_sdata;
+ }
+ }
+
+ raw_spin_lock_bh(&local_storage->lock);
+
+ /* Recheck local_storage->list under local_storage->lock */
+ if (unlikely(hlist_empty(&local_storage->list))) {
+ /* A parallel del is happening and local_storage is going
+ * away. It has just been checked before, so very
+ * unlikely. Return instead of retry to keep things
+ * simple.
+ */
+ err = -EAGAIN;
+ goto unlock_err;
+ }
+
+ old_sdata = bpf_local_storage_lookup(local_storage, smap, false);
+ err = check_flags(old_sdata, map_flags);
+ if (err)
+ goto unlock_err;
+
+ if (old_sdata && (map_flags & BPF_F_LOCK)) {
+ copy_map_value_locked(&smap->map, old_sdata->data, value,
+ false);
+ selem = SELEM(old_sdata);
+ goto unlock;
+ }
+
+ /* local_storage->lock is held. Hence, we are sure
+ * we can unlink and uncharge the old_sdata successfully
+ * later. Hence, instead of charging the new selem now
+ * and then uncharge the old selem later (which may cause
+ * a potential but unnecessary charge failure), avoid taking
+ * a charge at all here (the "!old_sdata" check) and the
+ * old_sdata will not be uncharged later during
+ * bpf_selem_unlink_storage_nolock().
+ */
+ selem = bpf_selem_alloc(smap, owner, value, !old_sdata);
+ if (!selem) {
+ err = -ENOMEM;
+ goto unlock_err;
+ }
+
+ /* First, link the new selem to the map */
+ bpf_selem_link_map(smap, selem);
+
+ /* Second, link (and publish) the new selem to local_storage */
+ bpf_selem_link_storage_nolock(local_storage, selem);
+
+ /* Third, remove old selem, SELEM(old_sdata) */
+ if (old_sdata) {
+ bpf_selem_unlink_map(SELEM(old_sdata));
+ bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata),
+ false);
+ }
+
+unlock:
+ raw_spin_unlock_bh(&local_storage->lock);
+ return SDATA(selem);
+
+unlock_err:
+ raw_spin_unlock_bh(&local_storage->lock);
+ return ERR_PTR(err);
+}
+
+u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache)
+{
+ u64 min_usage = U64_MAX;
+ u16 i, res = 0;
+
+ spin_lock(&cache->idx_lock);
+
+ for (i = 0; i < BPF_LOCAL_STORAGE_CACHE_SIZE; i++) {
+ if (cache->idx_usage_counts[i] < min_usage) {
+ min_usage = cache->idx_usage_counts[i];
+ res = i;
+
+ /* Found a free cache_idx */
+ if (!min_usage)
+ break;
+ }
+ }
+ cache->idx_usage_counts[res]++;
+
+ spin_unlock(&cache->idx_lock);
+
+ return res;
+}
+
+void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache,
+ u16 idx)
+{
+ spin_lock(&cache->idx_lock);
+ cache->idx_usage_counts[idx]--;
+ spin_unlock(&cache->idx_lock);
+}
+
+void bpf_local_storage_map_free(struct bpf_local_storage_map *smap)
+{
+ struct bpf_local_storage_elem *selem;
+ struct bpf_local_storage_map_bucket *b;
+ unsigned int i;
+
+ /* Note that this map might be concurrently cloned from
+ * bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone
+ * RCU read section to finish before proceeding. New RCU
+ * read sections should be prevented via bpf_map_inc_not_zero.
+ */
+ synchronize_rcu();
+
+ /* bpf prog and the userspace can no longer access this map
+ * now. No new selem (of this map) can be added
+ * to the owner->storage or to the map bucket's list.
+ *
+ * The elem of this map can be cleaned up here
+ * or when the storage is freed e.g.
+ * by bpf_sk_storage_free() during __sk_destruct().
+ */
+ for (i = 0; i < (1U << smap->bucket_log); i++) {
+ b = &smap->buckets[i];
+
+ rcu_read_lock();
+ /* No one is adding to b->list now */
+ while ((selem = hlist_entry_safe(
+ rcu_dereference_raw(hlist_first_rcu(&b->list)),
+ struct bpf_local_storage_elem, map_node))) {
+ bpf_selem_unlink(selem);
+ cond_resched_rcu();
+ }
+ rcu_read_unlock();
+ }
+
+ /* While freeing the storage we may still need to access the map.
+ *
+ * e.g. when bpf_sk_storage_free() has unlinked selem from the map
+ * which then made the above while((selem = ...)) loop
+ * exit immediately.
+ *
+ * However, while freeing the storage one still needs to access the
+ * smap->elem_size to do the uncharging in
+ * bpf_selem_unlink_storage_nolock().
+ *
+ * Hence, wait another rcu grace period for the storage to be freed.
+ */
+ synchronize_rcu();
+
+ kvfree(smap->buckets);
+ kfree(smap);
+}
+
+int bpf_local_storage_map_alloc_check(union bpf_attr *attr)
+{
+ if (attr->map_flags & ~BPF_LOCAL_STORAGE_CREATE_FLAG_MASK ||
+ !(attr->map_flags & BPF_F_NO_PREALLOC) ||
+ attr->max_entries ||
+ attr->key_size != sizeof(int) || !attr->value_size ||
+ /* Enforce BTF for userspace sk dumping */
+ !attr->btf_key_type_id || !attr->btf_value_type_id)
+ return -EINVAL;
+
+ if (!bpf_capable())
+ return -EPERM;
+
+ if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE)
+ return -E2BIG;
+
+ return 0;
+}
+
+struct bpf_local_storage_map *bpf_local_storage_map_alloc(union bpf_attr *attr)
+{
+ struct bpf_local_storage_map *smap;
+ unsigned int i;
+ u32 nbuckets;
+ u64 cost;
+ int ret;
+
+ smap = kzalloc(sizeof(*smap), GFP_USER | __GFP_NOWARN);
+ if (!smap)
+ return ERR_PTR(-ENOMEM);
+ bpf_map_init_from_attr(&smap->map, attr);
+
+ nbuckets = roundup_pow_of_two(num_possible_cpus());
+ /* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
+ nbuckets = max_t(u32, 2, nbuckets);
+ smap->bucket_log = ilog2(nbuckets);
+ cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap);
+
+ ret = bpf_map_charge_init(&smap->map.memory, cost);
+ if (ret < 0) {
+ kfree(smap);
+ return ERR_PTR(ret);
+ }
+
+ smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets,
+ GFP_USER | __GFP_NOWARN);
+ if (!smap->buckets) {
+ bpf_map_charge_finish(&smap->map.memory);
+ kfree(smap);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for (i = 0; i < nbuckets; i++) {
+ INIT_HLIST_HEAD(&smap->buckets[i].list);
+ raw_spin_lock_init(&smap->buckets[i].lock);
+ }
+
+ smap->elem_size =
+ sizeof(struct bpf_local_storage_elem) + attr->value_size;
+
+ return smap;
+}
+
+int bpf_local_storage_map_check_btf(const struct bpf_map *map,
+ const struct btf *btf,
+ const struct btf_type *key_type,
+ const struct btf_type *value_type)
+{
+ u32 int_data;
+
+ if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
+ return -EINVAL;
+
+ int_data = *(u32 *)(key_type + 1);
+ if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
+ return -EINVAL;
+
+ return 0;
+}
diff --git a/kernel/bpf/bpf_lsm.c b/kernel/bpf/bpf_lsm.c
index fb278144e9fd..78ea8a7bd27f 100644
--- a/kernel/bpf/bpf_lsm.c
+++ b/kernel/bpf/bpf_lsm.c
@@ -11,6 +11,8 @@
#include <linux/bpf_lsm.h>
#include <linux/kallsyms.h>
#include <linux/bpf_verifier.h>
+#include <net/bpf_sk_storage.h>
+#include <linux/bpf_local_storage.h>
/* For every LSM hook that allows attachment of BPF programs, declare a nop
* function where a BPF program can be attached.
@@ -45,10 +47,27 @@ int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog,
return 0;
}
+static const struct bpf_func_proto *
+bpf_lsm_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
+{
+ switch (func_id) {
+ case BPF_FUNC_inode_storage_get:
+ return &bpf_inode_storage_get_proto;
+ case BPF_FUNC_inode_storage_delete:
+ return &bpf_inode_storage_delete_proto;
+ case BPF_FUNC_sk_storage_get:
+ return &bpf_sk_storage_get_proto;
+ case BPF_FUNC_sk_storage_delete:
+ return &bpf_sk_storage_delete_proto;
+ default:
+ return tracing_prog_func_proto(func_id, prog);
+ }
+}
+
const struct bpf_prog_ops lsm_prog_ops = {
};
const struct bpf_verifier_ops lsm_verifier_ops = {
- .get_func_proto = tracing_prog_func_proto,
+ .get_func_proto = bpf_lsm_func_proto,
.is_valid_access = btf_ctx_access,
};
diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c
index 969c5d47f81f..4c3b543bb33b 100644
--- a/kernel/bpf/bpf_struct_ops.c
+++ b/kernel/bpf/bpf_struct_ops.c
@@ -298,8 +298,7 @@ static int check_zero_holes(const struct btf_type *t, void *data)
return -EINVAL;
mtype = btf_type_by_id(btf_vmlinux, member->type);
- mtype = btf_resolve_size(btf_vmlinux, mtype, &msize,
- NULL, NULL);
+ mtype = btf_resolve_size(btf_vmlinux, mtype, &msize);
if (IS_ERR(mtype))
return PTR_ERR(mtype);
prev_mend = moff + msize;
@@ -396,8 +395,7 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
u32 msize;
mtype = btf_type_by_id(btf_vmlinux, member->type);
- mtype = btf_resolve_size(btf_vmlinux, mtype, &msize,
- NULL, NULL);
+ mtype = btf_resolve_size(btf_vmlinux, mtype, &msize);
if (IS_ERR(mtype)) {
err = PTR_ERR(mtype);
goto reset_unlock;
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 91afdd4c82e3..ed7d02e8bc93 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -21,6 +21,8 @@
#include <linux/btf_ids.h>
#include <linux/skmsg.h>
#include <linux/perf_event.h>
+#include <linux/bsearch.h>
+#include <linux/btf_ids.h>
#include <net/sock.h>
/* BTF (BPF Type Format) is the meta data format which describes
@@ -186,11 +188,6 @@
i < btf_type_vlen(struct_type); \
i++, member++)
-#define for_each_vsi(i, struct_type, member) \
- for (i = 0, member = btf_type_var_secinfo(struct_type); \
- i < btf_type_vlen(struct_type); \
- i++, member++)
-
#define for_each_vsi_from(i, from, struct_type, member) \
for (i = from, member = btf_type_var_secinfo(struct_type) + from; \
i < btf_type_vlen(struct_type); \
@@ -282,6 +279,91 @@ static const char *btf_type_str(const struct btf_type *t)
return btf_kind_str[BTF_INFO_KIND(t->info)];
}
+/* Chunk size we use in safe copy of data to be shown. */
+#define BTF_SHOW_OBJ_SAFE_SIZE 32
+
+/*
+ * This is the maximum size of a base type value (equivalent to a
+ * 128-bit int); if we are at the end of our safe buffer and have
+ * less than 16 bytes space we can't be assured of being able
+ * to copy the next type safely, so in such cases we will initiate
+ * a new copy.
+ */
+#define BTF_SHOW_OBJ_BASE_TYPE_SIZE 16
+
+/* Type name size */
+#define BTF_SHOW_NAME_SIZE 80
+
+/*
+ * Common data to all BTF show operations. Private show functions can add
+ * their own data to a structure containing a struct btf_show and consult it
+ * in the show callback. See btf_type_show() below.
+ *
+ * One challenge with showing nested data is we want to skip 0-valued
+ * data, but in order to figure out whether a nested object is all zeros
+ * we need to walk through it. As a result, we need to make two passes
+ * when handling structs, unions and arrays; the first path simply looks
+ * for nonzero data, while the second actually does the display. The first
+ * pass is signalled by show->state.depth_check being set, and if we
+ * encounter a non-zero value we set show->state.depth_to_show to
+ * the depth at which we encountered it. When we have completed the
+ * first pass, we will know if anything needs to be displayed if
+ * depth_to_show > depth. See btf_[struct,array]_show() for the
+ * implementation of this.
+ *
+ * Another problem is we want to ensure the data for display is safe to
+ * access. To support this, the anonymous "struct {} obj" tracks the data
+ * object and our safe copy of it. We copy portions of the data needed
+ * to the object "copy" buffer, but because its size is limited to
+ * BTF_SHOW_OBJ_COPY_LEN bytes, multiple copies may be required as we
+ * traverse larger objects for display.
+ *
+ * The various data type show functions all start with a call to
+ * btf_show_start_type() which returns a pointer to the safe copy
+ * of the data needed (or if BTF_SHOW_UNSAFE is specified, to the
+ * raw data itself). btf_show_obj_safe() is responsible for
+ * using copy_from_kernel_nofault() to update the safe data if necessary
+ * as we traverse the object's data. skbuff-like semantics are
+ * used:
+ *
+ * - obj.head points to the start of the toplevel object for display
+ * - obj.size is the size of the toplevel object
+ * - obj.data points to the current point in the original data at
+ * which our safe data starts. obj.data will advance as we copy
+ * portions of the data.
+ *
+ * In most cases a single copy will suffice, but larger data structures
+ * such as "struct task_struct" will require many copies. The logic in
+ * btf_show_obj_safe() handles the logic that determines if a new
+ * copy_from_kernel_nofault() is needed.
+ */
+struct btf_show {
+ u64 flags;
+ void *target; /* target of show operation (seq file, buffer) */
+ void (*showfn)(struct btf_show *show, const char *fmt, va_list args);
+ const struct btf *btf;
+ /* below are used during iteration */
+ struct {
+ u8 depth;
+ u8 depth_to_show;
+ u8 depth_check;
+ u8 array_member:1,
+ array_terminated:1;
+ u16 array_encoding;
+ u32 type_id;
+ int status; /* non-zero for error */
+ const struct btf_type *type;
+ const struct btf_member *member;
+ char name[BTF_SHOW_NAME_SIZE]; /* space for member name/type */
+ } state;
+ struct {
+ u32 size;
+ void *head;
+ void *data;
+ u8 safe[BTF_SHOW_OBJ_SAFE_SIZE];
+ } obj;
+};
+
struct btf_kind_operations {
s32 (*check_meta)(struct btf_verifier_env *env,
const struct btf_type *t,
@@ -298,9 +380,9 @@ struct btf_kind_operations {
const struct btf_type *member_type);
void (*log_details)(struct btf_verifier_env *env,
const struct btf_type *t);
- void (*seq_show)(const struct btf *btf, const struct btf_type *t,
+ void (*show)(const struct btf *btf, const struct btf_type *t,
u32 type_id, void *data, u8 bits_offsets,
- struct seq_file *m);
+ struct btf_show *show);
};
static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS];
@@ -353,16 +435,6 @@ static bool btf_type_nosize_or_null(const struct btf_type *t)
return !t || btf_type_nosize(t);
}
-/* union is only a special case of struct:
- * all its offsetof(member) == 0
- */
-static bool btf_type_is_struct(const struct btf_type *t)
-{
- u8 kind = BTF_INFO_KIND(t->info);
-
- return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION;
-}
-
static bool __btf_type_is_struct(const struct btf_type *t)
{
return BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT;
@@ -373,11 +445,6 @@ static bool btf_type_is_array(const struct btf_type *t)
return BTF_INFO_KIND(t->info) == BTF_KIND_ARRAY;
}
-static bool btf_type_is_var(const struct btf_type *t)
-{
- return BTF_INFO_KIND(t->info) == BTF_KIND_VAR;
-}
-
static bool btf_type_is_datasec(const struct btf_type *t)
{
return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC;
@@ -526,11 +593,6 @@ static const struct btf_var *btf_type_var(const struct btf_type *t)
return (const struct btf_var *)(t + 1);
}
-static const struct btf_var_secinfo *btf_type_var_secinfo(const struct btf_type *t)
-{
- return (const struct btf_var_secinfo *)(t + 1);
-}
-
static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t)
{
return kind_ops[BTF_INFO_KIND(t->info)];
@@ -677,6 +739,488 @@ bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
return true;
}
+/* Similar to btf_type_skip_modifiers() but does not skip typedefs. */
+static const struct btf_type *btf_type_skip_qualifiers(const struct btf *btf,
+ u32 id)
+{
+ const struct btf_type *t = btf_type_by_id(btf, id);
+
+ while (btf_type_is_modifier(t) &&
+ BTF_INFO_KIND(t->info) != BTF_KIND_TYPEDEF) {
+ id = t->type;
+ t = btf_type_by_id(btf, t->type);
+ }
+
+ return t;
+}
+
+#define BTF_SHOW_MAX_ITER 10
+
+#define BTF_KIND_BIT(kind) (1ULL << kind)
+
+/*
+ * Populate show->state.name with type name information.
+ * Format of type name is
+ *
+ * [.member_name = ] (type_name)
+ */
+static const char *btf_show_name(struct btf_show *show)
+{
+ /* BTF_MAX_ITER array suffixes "[]" */
+ const char *array_suffixes = "[][][][][][][][][][]";
+ const char *array_suffix = &array_suffixes[strlen(array_suffixes)];
+ /* BTF_MAX_ITER pointer suffixes "*" */
+ const char *ptr_suffixes = "**********";
+ const char *ptr_suffix = &ptr_suffixes[strlen(ptr_suffixes)];
+ const char *name = NULL, *prefix = "", *parens = "";
+ const struct btf_member *m = show->state.member;
+ const struct btf_type *t = show->state.type;
+ const struct btf_array *array;
+ u32 id = show->state.type_id;
+ const char *member = NULL;
+ bool show_member = false;
+ u64 kinds = 0;
+ int i;
+
+ show->state.name[0] = '\0';
+
+ /*
+ * Don't show type name if we're showing an array member;
+ * in that case we show the array type so don't need to repeat
+ * ourselves for each member.
+ */
+ if (show->state.array_member)
+ return "";
+
+ /* Retrieve member name, if any. */
+ if (m) {
+ member = btf_name_by_offset(show->btf, m->name_off);
+ show_member = strlen(member) > 0;
+ id = m->type;
+ }
+
+ /*
+ * Start with type_id, as we have resolved the struct btf_type *
+ * via btf_modifier_show() past the parent typedef to the child
+ * struct, int etc it is defined as. In such cases, the type_id
+ * still represents the starting type while the struct btf_type *
+ * in our show->state points at the resolved type of the typedef.
+ */
+ t = btf_type_by_id(show->btf, id);
+ if (!t)
+ return "";
+
+ /*
+ * The goal here is to build up the right number of pointer and
+ * array suffixes while ensuring the type name for a typedef
+ * is represented. Along the way we accumulate a list of
+ * BTF kinds we have encountered, since these will inform later
+ * display; for example, pointer types will not require an
+ * opening "{" for struct, we will just display the pointer value.
+ *
+ * We also want to accumulate the right number of pointer or array
+ * indices in the format string while iterating until we get to
+ * the typedef/pointee/array member target type.
+ *
+ * We start by pointing at the end of pointer and array suffix
+ * strings; as we accumulate pointers and arrays we move the pointer
+ * or array string backwards so it will show the expected number of
+ * '*' or '[]' for the type. BTF_SHOW_MAX_ITER of nesting of pointers
+ * and/or arrays and typedefs are supported as a precaution.
+ *
+ * We also want to get typedef name while proceeding to resolve
+ * type it points to so that we can add parentheses if it is a
+ * "typedef struct" etc.
+ */
+ for (i = 0; i < BTF_SHOW_MAX_ITER; i++) {
+
+ switch (BTF_INFO_KIND(t->info)) {
+ case BTF_KIND_TYPEDEF:
+ if (!name)
+ name = btf_name_by_offset(show->btf,
+ t->name_off);
+ kinds |= BTF_KIND_BIT(BTF_KIND_TYPEDEF);
+ id = t->type;
+ break;
+ case BTF_KIND_ARRAY:
+ kinds |= BTF_KIND_BIT(BTF_KIND_ARRAY);
+ parens = "[";
+ if (!t)
+ return "";
+ array = btf_type_array(t);
+ if (array_suffix > array_suffixes)
+ array_suffix -= 2;
+ id = array->type;
+ break;
+ case BTF_KIND_PTR:
+ kinds |= BTF_KIND_BIT(BTF_KIND_PTR);
+ if (ptr_suffix > ptr_suffixes)
+ ptr_suffix -= 1;
+ id = t->type;
+ break;
+ default:
+ id = 0;
+ break;
+ }
+ if (!id)
+ break;
+ t = btf_type_skip_qualifiers(show->btf, id);
+ }
+ /* We may not be able to represent this type; bail to be safe */
+ if (i == BTF_SHOW_MAX_ITER)
+ return "";
+
+ if (!name)
+ name = btf_name_by_offset(show->btf, t->name_off);
+
+ switch (BTF_INFO_KIND(t->info)) {
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ prefix = BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT ?
+ "struct" : "union";
+ /* if it's an array of struct/union, parens is already set */
+ if (!(kinds & (BTF_KIND_BIT(BTF_KIND_ARRAY))))
+ parens = "{";
+ break;
+ case BTF_KIND_ENUM:
+ prefix = "enum";
+ break;
+ default:
+ break;
+ }
+
+ /* pointer does not require parens */
+ if (kinds & BTF_KIND_BIT(BTF_KIND_PTR))
+ parens = "";
+ /* typedef does not require struct/union/enum prefix */
+ if (kinds & BTF_KIND_BIT(BTF_KIND_TYPEDEF))
+ prefix = "";
+
+ if (!name)
+ name = "";
+
+ /* Even if we don't want type name info, we want parentheses etc */
+ if (show->flags & BTF_SHOW_NONAME)
+ snprintf(show->state.name, sizeof(show->state.name), "%s",
+ parens);
+ else
+ snprintf(show->state.name, sizeof(show->state.name),
+ "%s%s%s(%s%s%s%s%s%s)%s",
+ /* first 3 strings comprise ".member = " */
+ show_member ? "." : "",
+ show_member ? member : "",
+ show_member ? " = " : "",
+ /* ...next is our prefix (struct, enum, etc) */
+ prefix,
+ strlen(prefix) > 0 && strlen(name) > 0 ? " " : "",
+ /* ...this is the type name itself */
+ name,
+ /* ...suffixed by the appropriate '*', '[]' suffixes */
+ strlen(ptr_suffix) > 0 ? " " : "", ptr_suffix,
+ array_suffix, parens);
+
+ return show->state.name;
+}
+
+static const char *__btf_show_indent(struct btf_show *show)
+{
+ const char *indents = " ";
+ const char *indent = &indents[strlen(indents)];
+
+ if ((indent - show->state.depth) >= indents)
+ return indent - show->state.depth;
+ return indents;
+}
+
+static const char *btf_show_indent(struct btf_show *show)
+{
+ return show->flags & BTF_SHOW_COMPACT ? "" : __btf_show_indent(show);
+}
+
+static const char *btf_show_newline(struct btf_show *show)
+{
+ return show->flags & BTF_SHOW_COMPACT ? "" : "\n";
+}
+
+static const char *btf_show_delim(struct btf_show *show)
+{
+ if (show->state.depth == 0)
+ return "";
+
+ if ((show->flags & BTF_SHOW_COMPACT) && show->state.type &&
+ BTF_INFO_KIND(show->state.type->info) == BTF_KIND_UNION)
+ return "|";
+
+ return ",";
+}
+
+__printf(2, 3) static void btf_show(struct btf_show *show, const char *fmt, ...)
+{
+ va_list args;
+
+ if (!show->state.depth_check) {
+ va_start(args, fmt);
+ show->showfn(show, fmt, args);
+ va_end(args);
+ }
+}
+
+/* Macros are used here as btf_show_type_value[s]() prepends and appends
+ * format specifiers to the format specifier passed in; these do the work of
+ * adding indentation, delimiters etc while the caller simply has to specify
+ * the type value(s) in the format specifier + value(s).
+ */
+#define btf_show_type_value(show, fmt, value) \
+ do { \
+ if ((value) != 0 || (show->flags & BTF_SHOW_ZERO) || \
+ show->state.depth == 0) { \
+ btf_show(show, "%s%s" fmt "%s%s", \
+ btf_show_indent(show), \
+ btf_show_name(show), \
+ value, btf_show_delim(show), \
+ btf_show_newline(show)); \
+ if (show->state.depth > show->state.depth_to_show) \
+ show->state.depth_to_show = show->state.depth; \
+ } \
+ } while (0)
+
+#define btf_show_type_values(show, fmt, ...) \
+ do { \
+ btf_show(show, "%s%s" fmt "%s%s", btf_show_indent(show), \
+ btf_show_name(show), \
+ __VA_ARGS__, btf_show_delim(show), \
+ btf_show_newline(show)); \
+ if (show->state.depth > show->state.depth_to_show) \
+ show->state.depth_to_show = show->state.depth; \
+ } while (0)
+
+/* How much is left to copy to safe buffer after @data? */
+static int btf_show_obj_size_left(struct btf_show *show, void *data)
+{
+ return show->obj.head + show->obj.size - data;
+}
+
+/* Is object pointed to by @data of @size already copied to our safe buffer? */
+static bool btf_show_obj_is_safe(struct btf_show *show, void *data, int size)
+{
+ return data >= show->obj.data &&
+ (data + size) < (show->obj.data + BTF_SHOW_OBJ_SAFE_SIZE);
+}
+
+/*
+ * If object pointed to by @data of @size falls within our safe buffer, return
+ * the equivalent pointer to the same safe data. Assumes
+ * copy_from_kernel_nofault() has already happened and our safe buffer is
+ * populated.
+ */
+static void *__btf_show_obj_safe(struct btf_show *show, void *data, int size)
+{
+ if (btf_show_obj_is_safe(show, data, size))
+ return show->obj.safe + (data - show->obj.data);
+ return NULL;
+}
+
+/*
+ * Return a safe-to-access version of data pointed to by @data.
+ * We do this by copying the relevant amount of information
+ * to the struct btf_show obj.safe buffer using copy_from_kernel_nofault().
+ *
+ * If BTF_SHOW_UNSAFE is specified, just return data as-is; no
+ * safe copy is needed.
+ *
+ * Otherwise we need to determine if we have the required amount
+ * of data (determined by the @data pointer and the size of the
+ * largest base type we can encounter (represented by
+ * BTF_SHOW_OBJ_BASE_TYPE_SIZE). Having that much data ensures
+ * that we will be able to print some of the current object,
+ * and if more is needed a copy will be triggered.
+ * Some objects such as structs will not fit into the buffer;
+ * in such cases additional copies when we iterate over their
+ * members may be needed.
+ *
+ * btf_show_obj_safe() is used to return a safe buffer for
+ * btf_show_start_type(); this ensures that as we recurse into
+ * nested types we always have safe data for the given type.
+ * This approach is somewhat wasteful; it's possible for example
+ * that when iterating over a large union we'll end up copying the
+ * same data repeatedly, but the goal is safety not performance.
+ * We use stack data as opposed to per-CPU buffers because the
+ * iteration over a type can take some time, and preemption handling
+ * would greatly complicate use of the safe buffer.
+ */
+static void *btf_show_obj_safe(struct btf_show *show,
+ const struct btf_type *t,
+ void *data)
+{
+ const struct btf_type *rt;
+ int size_left, size;
+ void *safe = NULL;
+
+ if (show->flags & BTF_SHOW_UNSAFE)
+ return data;
+
+ rt = btf_resolve_size(show->btf, t, &size);
+ if (IS_ERR(rt)) {
+ show->state.status = PTR_ERR(rt);
+ return NULL;
+ }
+
+ /*
+ * Is this toplevel object? If so, set total object size and
+ * initialize pointers. Otherwise check if we still fall within
+ * our safe object data.
+ */
+ if (show->state.depth == 0) {
+ show->obj.size = size;
+ show->obj.head = data;
+ } else {
+ /*
+ * If the size of the current object is > our remaining
+ * safe buffer we _may_ need to do a new copy. However
+ * consider the case of a nested struct; it's size pushes
+ * us over the safe buffer limit, but showing any individual
+ * struct members does not. In such cases, we don't need
+ * to initiate a fresh copy yet; however we definitely need
+ * at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes left
+ * in our buffer, regardless of the current object size.
+ * The logic here is that as we resolve types we will
+ * hit a base type at some point, and we need to be sure
+ * the next chunk of data is safely available to display
+ * that type info safely. We cannot rely on the size of
+ * the current object here because it may be much larger
+ * than our current buffer (e.g. task_struct is 8k).
+ * All we want to do here is ensure that we can print the
+ * next basic type, which we can if either
+ * - the current type size is within the safe buffer; or
+ * - at least BTF_SHOW_OBJ_BASE_TYPE_SIZE bytes are left in
+ * the safe buffer.
+ */
+ safe = __btf_show_obj_safe(show, data,
+ min(size,
+ BTF_SHOW_OBJ_BASE_TYPE_SIZE));
+ }
+
+ /*
+ * We need a new copy to our safe object, either because we haven't
+ * yet copied and are intializing safe data, or because the data
+ * we want falls outside the boundaries of the safe object.
+ */
+ if (!safe) {
+ size_left = btf_show_obj_size_left(show, data);
+ if (size_left > BTF_SHOW_OBJ_SAFE_SIZE)
+ size_left = BTF_SHOW_OBJ_SAFE_SIZE;
+ show->state.status = copy_from_kernel_nofault(show->obj.safe,
+ data, size_left);
+ if (!show->state.status) {
+ show->obj.data = data;
+ safe = show->obj.safe;
+ }
+ }
+
+ return safe;
+}
+
+/*
+ * Set the type we are starting to show and return a safe data pointer
+ * to be used for showing the associated data.
+ */
+static void *btf_show_start_type(struct btf_show *show,
+ const struct btf_type *t,
+ u32 type_id, void *data)
+{
+ show->state.type = t;
+ show->state.type_id = type_id;
+ show->state.name[0] = '\0';
+
+ return btf_show_obj_safe(show, t, data);
+}
+
+static void btf_show_end_type(struct btf_show *show)
+{
+ show->state.type = NULL;
+ show->state.type_id = 0;
+ show->state.name[0] = '\0';
+}
+
+static void *btf_show_start_aggr_type(struct btf_show *show,
+ const struct btf_type *t,
+ u32 type_id, void *data)
+{
+ void *safe_data = btf_show_start_type(show, t, type_id, data);
+
+ if (!safe_data)
+ return safe_data;
+
+ btf_show(show, "%s%s%s", btf_show_indent(show),
+ btf_show_name(show),
+ btf_show_newline(show));
+ show->state.depth++;
+ return safe_data;
+}
+
+static void btf_show_end_aggr_type(struct btf_show *show,
+ const char *suffix)
+{
+ show->state.depth--;
+ btf_show(show, "%s%s%s%s", btf_show_indent(show), suffix,
+ btf_show_delim(show), btf_show_newline(show));
+ btf_show_end_type(show);
+}
+
+static void btf_show_start_member(struct btf_show *show,
+ const struct btf_member *m)
+{
+ show->state.member = m;
+}
+
+static void btf_show_start_array_member(struct btf_show *show)
+{
+ show->state.array_member = 1;
+ btf_show_start_member(show, NULL);
+}
+
+static void btf_show_end_member(struct btf_show *show)
+{
+ show->state.member = NULL;
+}
+
+static void btf_show_end_array_member(struct btf_show *show)
+{
+ show->state.array_member = 0;
+ btf_show_end_member(show);
+}
+
+static void *btf_show_start_array_type(struct btf_show *show,
+ const struct btf_type *t,
+ u32 type_id,
+ u16 array_encoding,
+ void *data)
+{
+ show->state.array_encoding = array_encoding;
+ show->state.array_terminated = 0;
+ return btf_show_start_aggr_type(show, t, type_id, data);
+}
+
+static void btf_show_end_array_type(struct btf_show *show)
+{
+ show->state.array_encoding = 0;
+ show->state.array_terminated = 0;
+ btf_show_end_aggr_type(show, "]");
+}
+
+static void *btf_show_start_struct_type(struct btf_show *show,
+ const struct btf_type *t,
+ u32 type_id,
+ void *data)
+{
+ return btf_show_start_aggr_type(show, t, type_id, data);
+}
+
+static void btf_show_end_struct_type(struct btf_show *show)
+{
+ btf_show_end_aggr_type(show, "}");
+}
+
__printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log,
const char *fmt, ...)
{
@@ -1079,23 +1623,27 @@ static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env)
* *type_size: (x * y * sizeof(u32)). Hence, *type_size always
* corresponds to the return type.
* *elem_type: u32
+ * *elem_id: id of u32
* *total_nelems: (x * y). Hence, individual elem size is
* (*type_size / *total_nelems)
+ * *type_id: id of type if it's changed within the function, 0 if not
*
* type: is not an array (e.g. const struct X)
* return type: type "struct X"
* *type_size: sizeof(struct X)
* *elem_type: same as return type ("struct X")
+ * *elem_id: 0
* *total_nelems: 1
+ * *type_id: id of type if it's changed within the function, 0 if not
*/
-const struct btf_type *
-btf_resolve_size(const struct btf *btf, const struct btf_type *type,
- u32 *type_size, const struct btf_type **elem_type,
- u32 *total_nelems)
+static const struct btf_type *
+__btf_resolve_size(const struct btf *btf, const struct btf_type *type,
+ u32 *type_size, const struct btf_type **elem_type,
+ u32 *elem_id, u32 *total_nelems, u32 *type_id)
{
const struct btf_type *array_type = NULL;
- const struct btf_array *array;
- u32 i, size, nelems = 1;
+ const struct btf_array *array = NULL;
+ u32 i, size, nelems = 1, id = 0;
for (i = 0; i < MAX_RESOLVE_DEPTH; i++) {
switch (BTF_INFO_KIND(type->info)) {
@@ -1116,6 +1664,7 @@ btf_resolve_size(const struct btf *btf, const struct btf_type *type,
case BTF_KIND_VOLATILE:
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
+ id = type->type;
type = btf_type_by_id(btf, type->type);
break;
@@ -1146,10 +1695,21 @@ resolved:
*total_nelems = nelems;
if (elem_type)
*elem_type = type;
+ if (elem_id)
+ *elem_id = array ? array->type : 0;
+ if (type_id && id)
+ *type_id = id;
return array_type ? : type;
}
+const struct btf_type *
+btf_resolve_size(const struct btf *btf, const struct btf_type *type,
+ u32 *type_size)
+{
+ return __btf_resolve_size(btf, type, type_size, NULL, NULL, NULL, NULL);
+}
+
/* The input param "type_id" must point to a needs_resolve type */
static const struct btf_type *btf_type_id_resolve(const struct btf *btf,
u32 *type_id)
@@ -1250,11 +1810,11 @@ static int btf_df_resolve(struct btf_verifier_env *env,
return -EINVAL;
}
-static void btf_df_seq_show(const struct btf *btf, const struct btf_type *t,
- u32 type_id, void *data, u8 bits_offsets,
- struct seq_file *m)
+static void btf_df_show(const struct btf *btf, const struct btf_type *t,
+ u32 type_id, void *data, u8 bits_offsets,
+ struct btf_show *show)
{
- seq_printf(m, "<unsupported kind:%u>", BTF_INFO_KIND(t->info));
+ btf_show(show, "<unsupported kind:%u>", BTF_INFO_KIND(t->info));
}
static int btf_int_check_member(struct btf_verifier_env *env,
@@ -1427,7 +1987,7 @@ static void btf_int_log(struct btf_verifier_env *env,
btf_int_encoding_str(BTF_INT_ENCODING(int_data)));
}
-static void btf_int128_print(struct seq_file *m, void *data)
+static void btf_int128_print(struct btf_show *show, void *data)
{
/* data points to a __int128 number.
* Suppose
@@ -1446,9 +2006,10 @@ static void btf_int128_print(struct seq_file *m, void *data)
lower_num = *(u64 *)data;
#endif
if (upper_num == 0)
- seq_printf(m, "0x%llx", lower_num);
+ btf_show_type_value(show, "0x%llx", lower_num);
else
- seq_printf(m, "0x%llx%016llx", upper_num, lower_num);
+ btf_show_type_values(show, "0x%llx%016llx", upper_num,
+ lower_num);
}
static void btf_int128_shift(u64 *print_num, u16 left_shift_bits,
@@ -1492,8 +2053,8 @@ static void btf_int128_shift(u64 *print_num, u16 left_shift_bits,
#endif
}
-static void btf_bitfield_seq_show(void *data, u8 bits_offset,
- u8 nr_bits, struct seq_file *m)
+static void btf_bitfield_show(void *data, u8 bits_offset,
+ u8 nr_bits, struct btf_show *show)
{
u16 left_shift_bits, right_shift_bits;
u8 nr_copy_bytes;
@@ -1513,14 +2074,14 @@ static void btf_bitfield_seq_show(void *data, u8 bits_offset,
right_shift_bits = BITS_PER_U128 - nr_bits;
btf_int128_shift(print_num, left_shift_bits, right_shift_bits);
- btf_int128_print(m, print_num);
+ btf_int128_print(show, print_num);
}
-static void btf_int_bits_seq_show(const struct btf *btf,
- const struct btf_type *t,
- void *data, u8 bits_offset,
- struct seq_file *m)
+static void btf_int_bits_show(const struct btf *btf,
+ const struct btf_type *t,
+ void *data, u8 bits_offset,
+ struct btf_show *show)
{
u32 int_data = btf_type_int(t);
u8 nr_bits = BTF_INT_BITS(int_data);
@@ -1533,55 +2094,77 @@ static void btf_int_bits_seq_show(const struct btf *btf,
total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
- btf_bitfield_seq_show(data, bits_offset, nr_bits, m);
+ btf_bitfield_show(data, bits_offset, nr_bits, show);
}
-static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
- u32 type_id, void *data, u8 bits_offset,
- struct seq_file *m)
+static void btf_int_show(const struct btf *btf, const struct btf_type *t,
+ u32 type_id, void *data, u8 bits_offset,
+ struct btf_show *show)
{
u32 int_data = btf_type_int(t);
u8 encoding = BTF_INT_ENCODING(int_data);
bool sign = encoding & BTF_INT_SIGNED;
u8 nr_bits = BTF_INT_BITS(int_data);
+ void *safe_data;
+
+ safe_data = btf_show_start_type(show, t, type_id, data);
+ if (!safe_data)
+ return;
if (bits_offset || BTF_INT_OFFSET(int_data) ||
BITS_PER_BYTE_MASKED(nr_bits)) {
- btf_int_bits_seq_show(btf, t, data, bits_offset, m);
- return;
+ btf_int_bits_show(btf, t, safe_data, bits_offset, show);
+ goto out;
}
switch (nr_bits) {
case 128:
- btf_int128_print(m, data);
+ btf_int128_print(show, safe_data);
break;
case 64:
if (sign)
- seq_printf(m, "%lld", *(s64 *)data);
+ btf_show_type_value(show, "%lld", *(s64 *)safe_data);
else
- seq_printf(m, "%llu", *(u64 *)data);
+ btf_show_type_value(show, "%llu", *(u64 *)safe_data);
break;
case 32:
if (sign)
- seq_printf(m, "%d", *(s32 *)data);
+ btf_show_type_value(show, "%d", *(s32 *)safe_data);
else
- seq_printf(m, "%u", *(u32 *)data);
+ btf_show_type_value(show, "%u", *(u32 *)safe_data);
break;
case 16:
if (sign)
- seq_printf(m, "%d", *(s16 *)data);
+ btf_show_type_value(show, "%d", *(s16 *)safe_data);
else
- seq_printf(m, "%u", *(u16 *)data);
+ btf_show_type_value(show, "%u", *(u16 *)safe_data);
break;
case 8:
+ if (show->state.array_encoding == BTF_INT_CHAR) {
+ /* check for null terminator */
+ if (show->state.array_terminated)
+ break;
+ if (*(char *)data == '\0') {
+ show->state.array_terminated = 1;
+ break;
+ }
+ if (isprint(*(char *)data)) {
+ btf_show_type_value(show, "'%c'",
+ *(char *)safe_data);
+ break;
+ }
+ }
if (sign)
- seq_printf(m, "%d", *(s8 *)data);
+ btf_show_type_value(show, "%d", *(s8 *)safe_data);
else
- seq_printf(m, "%u", *(u8 *)data);
+ btf_show_type_value(show, "%u", *(u8 *)safe_data);
break;
default:
- btf_int_bits_seq_show(btf, t, data, bits_offset, m);
+ btf_int_bits_show(btf, t, safe_data, bits_offset, show);
+ break;
}
+out:
+ btf_show_end_type(show);
}
static const struct btf_kind_operations int_ops = {
@@ -1590,7 +2173,7 @@ static const struct btf_kind_operations int_ops = {
.check_member = btf_int_check_member,
.check_kflag_member = btf_int_check_kflag_member,
.log_details = btf_int_log,
- .seq_show = btf_int_seq_show,
+ .show = btf_int_show,
};
static int btf_modifier_check_member(struct btf_verifier_env *env,
@@ -1854,34 +2437,44 @@ static int btf_ptr_resolve(struct btf_verifier_env *env,
return 0;
}
-static void btf_modifier_seq_show(const struct btf *btf,
- const struct btf_type *t,
- u32 type_id, void *data,
- u8 bits_offset, struct seq_file *m)
+static void btf_modifier_show(const struct btf *btf,
+ const struct btf_type *t,
+ u32 type_id, void *data,
+ u8 bits_offset, struct btf_show *show)
{
if (btf->resolved_ids)
t = btf_type_id_resolve(btf, &type_id);
else
t = btf_type_skip_modifiers(btf, type_id, NULL);
- btf_type_ops(t)->seq_show(btf, t, type_id, data, bits_offset, m);
+ btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show);
}
-static void btf_var_seq_show(const struct btf *btf, const struct btf_type *t,
- u32 type_id, void *data, u8 bits_offset,
- struct seq_file *m)
+static void btf_var_show(const struct btf *btf, const struct btf_type *t,
+ u32 type_id, void *data, u8 bits_offset,
+ struct btf_show *show)
{
t = btf_type_id_resolve(btf, &type_id);
- btf_type_ops(t)->seq_show(btf, t, type_id, data, bits_offset, m);
+ btf_type_ops(t)->show(btf, t, type_id, data, bits_offset, show);
}
-static void btf_ptr_seq_show(const struct btf *btf, const struct btf_type *t,
- u32 type_id, void *data, u8 bits_offset,
- struct seq_file *m)
+static void btf_ptr_show(const struct btf *btf, const struct btf_type *t,
+ u32 type_id, void *data, u8 bits_offset,
+ struct btf_show *show)
{
- /* It is a hashed value */
- seq_printf(m, "%p", *(void **)data);
+ void *safe_data;
+
+ safe_data = btf_show_start_type(show, t, type_id, data);
+ if (!safe_data)
+ return;
+
+ /* It is a hashed value unless BTF_SHOW_PTR_RAW is specified */
+ if (show->flags & BTF_SHOW_PTR_RAW)
+ btf_show_type_value(show, "0x%px", *(void **)safe_data);
+ else
+ btf_show_type_value(show, "0x%p", *(void **)safe_data);
+ btf_show_end_type(show);
}
static void btf_ref_type_log(struct btf_verifier_env *env,
@@ -1896,7 +2489,7 @@ static struct btf_kind_operations modifier_ops = {
.check_member = btf_modifier_check_member,
.check_kflag_member = btf_modifier_check_kflag_member,
.log_details = btf_ref_type_log,
- .seq_show = btf_modifier_seq_show,
+ .show = btf_modifier_show,
};
static struct btf_kind_operations ptr_ops = {
@@ -1905,7 +2498,7 @@ static struct btf_kind_operations ptr_ops = {
.check_member = btf_ptr_check_member,
.check_kflag_member = btf_generic_check_kflag_member,
.log_details = btf_ref_type_log,
- .seq_show = btf_ptr_seq_show,
+ .show = btf_ptr_show,
};
static s32 btf_fwd_check_meta(struct btf_verifier_env *env,
@@ -1946,7 +2539,7 @@ static struct btf_kind_operations fwd_ops = {
.check_member = btf_df_check_member,
.check_kflag_member = btf_df_check_kflag_member,
.log_details = btf_fwd_type_log,
- .seq_show = btf_df_seq_show,
+ .show = btf_df_show,
};
static int btf_array_check_member(struct btf_verifier_env *env,
@@ -2105,28 +2698,90 @@ static void btf_array_log(struct btf_verifier_env *env,
array->type, array->index_type, array->nelems);
}
-static void btf_array_seq_show(const struct btf *btf, const struct btf_type *t,
- u32 type_id, void *data, u8 bits_offset,
- struct seq_file *m)
+static void __btf_array_show(const struct btf *btf, const struct btf_type *t,
+ u32 type_id, void *data, u8 bits_offset,
+ struct btf_show *show)
{
const struct btf_array *array = btf_type_array(t);
const struct btf_kind_operations *elem_ops;
const struct btf_type *elem_type;
- u32 i, elem_size, elem_type_id;
+ u32 i, elem_size = 0, elem_type_id;
+ u16 encoding = 0;
elem_type_id = array->type;
- elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size);
+ elem_type = btf_type_skip_modifiers(btf, elem_type_id, NULL);
+ if (elem_type && btf_type_has_size(elem_type))
+ elem_size = elem_type->size;
+
+ if (elem_type && btf_type_is_int(elem_type)) {
+ u32 int_type = btf_type_int(elem_type);
+
+ encoding = BTF_INT_ENCODING(int_type);
+
+ /*
+ * BTF_INT_CHAR encoding never seems to be set for
+ * char arrays, so if size is 1 and element is
+ * printable as a char, we'll do that.
+ */
+ if (elem_size == 1)
+ encoding = BTF_INT_CHAR;
+ }
+
+ if (!btf_show_start_array_type(show, t, type_id, encoding, data))
+ return;
+
+ if (!elem_type)
+ goto out;
elem_ops = btf_type_ops(elem_type);
- seq_puts(m, "[");
+
for (i = 0; i < array->nelems; i++) {
- if (i)
- seq_puts(m, ",");
- elem_ops->seq_show(btf, elem_type, elem_type_id, data,
- bits_offset, m);
+ btf_show_start_array_member(show);
+
+ elem_ops->show(btf, elem_type, elem_type_id, data,
+ bits_offset, show);
data += elem_size;
+
+ btf_show_end_array_member(show);
+
+ if (show->state.array_terminated)
+ break;
+ }
+out:
+ btf_show_end_array_type(show);
+}
+
+static void btf_array_show(const struct btf *btf, const struct btf_type *t,
+ u32 type_id, void *data, u8 bits_offset,
+ struct btf_show *show)
+{
+ const struct btf_member *m = show->state.member;
+
+ /*
+ * First check if any members would be shown (are non-zero).
+ * See comments above "struct btf_show" definition for more
+ * details on how this works at a high-level.
+ */
+ if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) {
+ if (!show->state.depth_check) {
+ show->state.depth_check = show->state.depth + 1;
+ show->state.depth_to_show = 0;
+ }
+ __btf_array_show(btf, t, type_id, data, bits_offset, show);
+ show->state.member = m;
+
+ if (show->state.depth_check != show->state.depth + 1)
+ return;
+ show->state.depth_check = 0;
+
+ if (show->state.depth_to_show <= show->state.depth)
+ return;
+ /*
+ * Reaching here indicates we have recursed and found
+ * non-zero array member(s).
+ */
}
- seq_puts(m, "]");
+ __btf_array_show(btf, t, type_id, data, bits_offset, show);
}
static struct btf_kind_operations array_ops = {
@@ -2135,7 +2790,7 @@ static struct btf_kind_operations array_ops = {
.check_member = btf_array_check_member,
.check_kflag_member = btf_generic_check_kflag_member,
.log_details = btf_array_log,
- .seq_show = btf_array_seq_show,
+ .show = btf_array_show,
};
static int btf_struct_check_member(struct btf_verifier_env *env,
@@ -2358,15 +3013,18 @@ int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t)
return off;
}
-static void btf_struct_seq_show(const struct btf *btf, const struct btf_type *t,
- u32 type_id, void *data, u8 bits_offset,
- struct seq_file *m)
+static void __btf_struct_show(const struct btf *btf, const struct btf_type *t,
+ u32 type_id, void *data, u8 bits_offset,
+ struct btf_show *show)
{
- const char *seq = BTF_INFO_KIND(t->info) == BTF_KIND_UNION ? "|" : ",";
const struct btf_member *member;
+ void *safe_data;
u32 i;
- seq_puts(m, "{");
+ safe_data = btf_show_start_struct_type(show, t, type_id, data);
+ if (!safe_data)
+ return;
+
for_each_member(i, t, member) {
const struct btf_type *member_type = btf_type_by_id(btf,
member->type);
@@ -2375,23 +3033,65 @@ static void btf_struct_seq_show(const struct btf *btf, const struct btf_type *t,
u32 bytes_offset;
u8 bits8_offset;
- if (i)
- seq_puts(m, seq);
+ btf_show_start_member(show, member);
member_offset = btf_member_bit_offset(t, member);
bitfield_size = btf_member_bitfield_size(t, member);
bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset);
bits8_offset = BITS_PER_BYTE_MASKED(member_offset);
if (bitfield_size) {
- btf_bitfield_seq_show(data + bytes_offset, bits8_offset,
- bitfield_size, m);
+ safe_data = btf_show_start_type(show, member_type,
+ member->type,
+ data + bytes_offset);
+ if (safe_data)
+ btf_bitfield_show(safe_data,
+ bits8_offset,
+ bitfield_size, show);
+ btf_show_end_type(show);
} else {
ops = btf_type_ops(member_type);
- ops->seq_show(btf, member_type, member->type,
- data + bytes_offset, bits8_offset, m);
+ ops->show(btf, member_type, member->type,
+ data + bytes_offset, bits8_offset, show);
}
+
+ btf_show_end_member(show);
}
- seq_puts(m, "}");
+
+ btf_show_end_struct_type(show);
+}
+
+static void btf_struct_show(const struct btf *btf, const struct btf_type *t,
+ u32 type_id, void *data, u8 bits_offset,
+ struct btf_show *show)
+{
+ const struct btf_member *m = show->state.member;
+
+ /*
+ * First check if any members would be shown (are non-zero).
+ * See comments above "struct btf_show" definition for more
+ * details on how this works at a high-level.
+ */
+ if (show->state.depth > 0 && !(show->flags & BTF_SHOW_ZERO)) {
+ if (!show->state.depth_check) {
+ show->state.depth_check = show->state.depth + 1;
+ show->state.depth_to_show = 0;
+ }
+ __btf_struct_show(btf, t, type_id, data, bits_offset, show);
+ /* Restore saved member data here */
+ show->state.member = m;
+ if (show->state.depth_check != show->state.depth + 1)
+ return;
+ show->state.depth_check = 0;
+
+ if (show->state.depth_to_show <= show->state.depth)
+ return;
+ /*
+ * Reaching here indicates we have recursed and found
+ * non-zero child values.
+ */
+ }
+
+ __btf_struct_show(btf, t, type_id, data, bits_offset, show);
}
static struct btf_kind_operations struct_ops = {
@@ -2400,7 +3100,7 @@ static struct btf_kind_operations struct_ops = {
.check_member = btf_struct_check_member,
.check_kflag_member = btf_generic_check_kflag_member,
.log_details = btf_struct_log,
- .seq_show = btf_struct_seq_show,
+ .show = btf_struct_show,
};
static int btf_enum_check_member(struct btf_verifier_env *env,
@@ -2531,24 +3231,35 @@ static void btf_enum_log(struct btf_verifier_env *env,
btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
}
-static void btf_enum_seq_show(const struct btf *btf, const struct btf_type *t,
- u32 type_id, void *data, u8 bits_offset,
- struct seq_file *m)
+static void btf_enum_show(const struct btf *btf, const struct btf_type *t,
+ u32 type_id, void *data, u8 bits_offset,
+ struct btf_show *show)
{
const struct btf_enum *enums = btf_type_enum(t);
u32 i, nr_enums = btf_type_vlen(t);
- int v = *(int *)data;
+ void *safe_data;
+ int v;
+
+ safe_data = btf_show_start_type(show, t, type_id, data);
+ if (!safe_data)
+ return;
+
+ v = *(int *)safe_data;
for (i = 0; i < nr_enums; i++) {
- if (v == enums[i].val) {
- seq_printf(m, "%s",
- __btf_name_by_offset(btf,
- enums[i].name_off));
- return;
- }
+ if (v != enums[i].val)
+ continue;
+
+ btf_show_type_value(show, "%s",
+ __btf_name_by_offset(btf,
+ enums[i].name_off));
+
+ btf_show_end_type(show);
+ return;
}
- seq_printf(m, "%d", v);
+ btf_show_type_value(show, "%d", v);
+ btf_show_end_type(show);
}
static struct btf_kind_operations enum_ops = {
@@ -2557,7 +3268,7 @@ static struct btf_kind_operations enum_ops = {
.check_member = btf_enum_check_member,
.check_kflag_member = btf_enum_check_kflag_member,
.log_details = btf_enum_log,
- .seq_show = btf_enum_seq_show,
+ .show = btf_enum_show,
};
static s32 btf_func_proto_check_meta(struct btf_verifier_env *env,
@@ -2644,7 +3355,7 @@ static struct btf_kind_operations func_proto_ops = {
.check_member = btf_df_check_member,
.check_kflag_member = btf_df_check_kflag_member,
.log_details = btf_func_proto_log,
- .seq_show = btf_df_seq_show,
+ .show = btf_df_show,
};
static s32 btf_func_check_meta(struct btf_verifier_env *env,
@@ -2678,7 +3389,7 @@ static struct btf_kind_operations func_ops = {
.check_member = btf_df_check_member,
.check_kflag_member = btf_df_check_kflag_member,
.log_details = btf_ref_type_log,
- .seq_show = btf_df_seq_show,
+ .show = btf_df_show,
};
static s32 btf_var_check_meta(struct btf_verifier_env *env,
@@ -2742,7 +3453,7 @@ static const struct btf_kind_operations var_ops = {
.check_member = btf_df_check_member,
.check_kflag_member = btf_df_check_kflag_member,
.log_details = btf_var_log,
- .seq_show = btf_var_seq_show,
+ .show = btf_var_show,
};
static s32 btf_datasec_check_meta(struct btf_verifier_env *env,
@@ -2868,24 +3579,28 @@ static void btf_datasec_log(struct btf_verifier_env *env,
btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
}
-static void btf_datasec_seq_show(const struct btf *btf,
- const struct btf_type *t, u32 type_id,
- void *data, u8 bits_offset,
- struct seq_file *m)
+static void btf_datasec_show(const struct btf *btf,
+ const struct btf_type *t, u32 type_id,
+ void *data, u8 bits_offset,
+ struct btf_show *show)
{
const struct btf_var_secinfo *vsi;
const struct btf_type *var;
u32 i;
- seq_printf(m, "section (\"%s\") = {", __btf_name_by_offset(btf, t->name_off));
+ if (!btf_show_start_type(show, t, type_id, data))
+ return;
+
+ btf_show_type_value(show, "section (\"%s\") = {",
+ __btf_name_by_offset(btf, t->name_off));
for_each_vsi(i, t, vsi) {
var = btf_type_by_id(btf, vsi->type);
if (i)
- seq_puts(m, ",");
- btf_type_ops(var)->seq_show(btf, var, vsi->type,
- data + vsi->offset, bits_offset, m);
+ btf_show(show, ",");
+ btf_type_ops(var)->show(btf, var, vsi->type,
+ data + vsi->offset, bits_offset, show);
}
- seq_puts(m, "}");
+ btf_show_end_type(show);
}
static const struct btf_kind_operations datasec_ops = {
@@ -2894,7 +3609,7 @@ static const struct btf_kind_operations datasec_ops = {
.check_member = btf_df_check_member,
.check_kflag_member = btf_df_check_kflag_member,
.log_details = btf_datasec_log,
- .seq_show = btf_datasec_seq_show,
+ .show = btf_datasec_show,
};
static int btf_func_proto_check(struct btf_verifier_env *env,
@@ -3688,7 +4403,7 @@ errout:
struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog)
{
- struct bpf_prog *tgt_prog = prog->aux->linked_prog;
+ struct bpf_prog *tgt_prog = prog->aux->dst_prog;
if (tgt_prog) {
return tgt_prog->aux->btf;
@@ -3715,7 +4430,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
struct bpf_insn_access_aux *info)
{
const struct btf_type *t = prog->aux->attach_func_proto;
- struct bpf_prog *tgt_prog = prog->aux->linked_prog;
+ struct bpf_prog *tgt_prog = prog->aux->dst_prog;
struct btf *btf = bpf_prog_get_target_btf(prog);
const char *tname = prog->aux->attach_func_name;
struct bpf_verifier_log *log = info->log;
@@ -3842,7 +4557,14 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
info->reg_type = PTR_TO_BTF_ID;
if (tgt_prog) {
- ret = btf_translate_to_vmlinux(log, btf, t, tgt_prog->type, arg);
+ enum bpf_prog_type tgt_type;
+
+ if (tgt_prog->type == BPF_PROG_TYPE_EXT)
+ tgt_type = tgt_prog->aux->saved_dst_prog_type;
+ else
+ tgt_type = tgt_prog->type;
+
+ ret = btf_translate_to_vmlinux(log, btf, t, tgt_type, arg);
if (ret > 0) {
info->btf_id = ret;
return true;
@@ -3870,16 +4592,22 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
return true;
}
-int btf_struct_access(struct bpf_verifier_log *log,
- const struct btf_type *t, int off, int size,
- enum bpf_access_type atype,
- u32 *next_btf_id)
+enum bpf_struct_walk_result {
+ /* < 0 error */
+ WALK_SCALAR = 0,
+ WALK_PTR,
+ WALK_STRUCT,
+};
+
+static int btf_struct_walk(struct bpf_verifier_log *log,
+ const struct btf_type *t, int off, int size,
+ u32 *next_btf_id)
{
u32 i, moff, mtrue_end, msize = 0, total_nelems = 0;
const struct btf_type *mtype, *elem_type = NULL;
const struct btf_member *member;
const char *tname, *mname;
- u32 vlen;
+ u32 vlen, elem_id, mid;
again:
tname = __btf_name_by_offset(btf_vmlinux, t->name_off);
@@ -3915,14 +4643,13 @@ again:
/* Only allow structure for now, can be relaxed for
* other types later.
*/
- elem_type = btf_type_skip_modifiers(btf_vmlinux,
- array_elem->type, NULL);
- if (!btf_type_is_struct(elem_type))
+ t = btf_type_skip_modifiers(btf_vmlinux, array_elem->type,
+ NULL);
+ if (!btf_type_is_struct(t))
goto error;
- off = (off - moff) % elem_type->size;
- return btf_struct_access(log, elem_type, off, size, atype,
- next_btf_id);
+ off = (off - moff) % t->size;
+ goto again;
error:
bpf_log(log, "access beyond struct %s at off %u size %u\n",
@@ -3951,7 +4678,7 @@ error:
*/
if (off <= moff &&
BITS_ROUNDUP_BYTES(end_bit) <= off + size)
- return SCALAR_VALUE;
+ return WALK_SCALAR;
/* off may be accessing a following member
*
@@ -3973,11 +4700,13 @@ error:
break;
/* type of the field */
+ mid = member->type;
mtype = btf_type_by_id(btf_vmlinux, member->type);
mname = __btf_name_by_offset(btf_vmlinux, member->name_off);
- mtype = btf_resolve_size(btf_vmlinux, mtype, &msize,
- &elem_type, &total_nelems);
+ mtype = __btf_resolve_size(btf_vmlinux, mtype, &msize,
+ &elem_type, &elem_id, &total_nelems,
+ &mid);
if (IS_ERR(mtype)) {
bpf_log(log, "field %s doesn't have size\n", mname);
return -EFAULT;
@@ -3991,7 +4720,7 @@ error:
if (btf_type_is_array(mtype)) {
u32 elem_idx;
- /* btf_resolve_size() above helps to
+ /* __btf_resolve_size() above helps to
* linearize a multi-dimensional array.
*
* The logic here is treating an array
@@ -4039,6 +4768,7 @@ error:
elem_idx = (off - moff) / msize;
moff += elem_idx * msize;
mtype = elem_type;
+ mid = elem_id;
}
/* the 'off' we're looking for is either equal to start
@@ -4048,6 +4778,12 @@ error:
/* our field must be inside that union or struct */
t = mtype;
+ /* return if the offset matches the member offset */
+ if (off == moff) {
+ *next_btf_id = mid;
+ return WALK_STRUCT;
+ }
+
/* adjust offset we're looking for */
off -= moff;
goto again;
@@ -4063,11 +4799,10 @@ error:
mname, moff, tname, off, size);
return -EACCES;
}
-
stype = btf_type_skip_modifiers(btf_vmlinux, mtype->type, &id);
if (btf_type_is_struct(stype)) {
*next_btf_id = id;
- return PTR_TO_BTF_ID;
+ return WALK_PTR;
}
}
@@ -4084,23 +4819,82 @@ error:
return -EACCES;
}
- return SCALAR_VALUE;
+ return WALK_SCALAR;
}
bpf_log(log, "struct %s doesn't have field at offset %d\n", tname, off);
return -EINVAL;
}
-int btf_resolve_helper_id(struct bpf_verifier_log *log,
- const struct bpf_func_proto *fn, int arg)
+int btf_struct_access(struct bpf_verifier_log *log,
+ const struct btf_type *t, int off, int size,
+ enum bpf_access_type atype __maybe_unused,
+ u32 *next_btf_id)
+{
+ int err;
+ u32 id;
+
+ do {
+ err = btf_struct_walk(log, t, off, size, &id);
+
+ switch (err) {
+ case WALK_PTR:
+ /* If we found the pointer or scalar on t+off,
+ * we're done.
+ */
+ *next_btf_id = id;
+ return PTR_TO_BTF_ID;
+ case WALK_SCALAR:
+ return SCALAR_VALUE;
+ case WALK_STRUCT:
+ /* We found nested struct, so continue the search
+ * by diving in it. At this point the offset is
+ * aligned with the new type, so set it to 0.
+ */
+ t = btf_type_by_id(btf_vmlinux, id);
+ off = 0;
+ break;
+ default:
+ /* It's either error or unknown return value..
+ * scream and leave.
+ */
+ if (WARN_ONCE(err > 0, "unknown btf_struct_walk return value"))
+ return -EINVAL;
+ return err;
+ }
+ } while (t);
+
+ return -EINVAL;
+}
+
+bool btf_struct_ids_match(struct bpf_verifier_log *log,
+ int off, u32 id, u32 need_type_id)
{
- int id;
+ const struct btf_type *type;
+ int err;
- if (fn->arg_type[arg] != ARG_PTR_TO_BTF_ID || !btf_vmlinux)
- return -EINVAL;
- id = fn->btf_id[arg];
- if (!id || id > btf_vmlinux->nr_types)
- return -EINVAL;
- return id;
+ /* Are we already done? */
+ if (need_type_id == id && off == 0)
+ return true;
+
+again:
+ type = btf_type_by_id(btf_vmlinux, id);
+ if (!type)
+ return false;
+ err = btf_struct_walk(log, type, off, 1, &id);
+ if (err != WALK_STRUCT)
+ return false;
+
+ /* We found nested struct object. If it matches
+ * the requested ID, we're done. Otherwise let's
+ * continue the search with offset 0 in the new
+ * type.
+ */
+ if (need_type_id != id) {
+ off = 0;
+ goto again;
+ }
+
+ return true;
}
static int __get_type_size(struct btf *btf, u32 btf_id,
@@ -4298,7 +5092,7 @@ static int btf_check_func_type_match(struct bpf_verifier_log *log,
}
/* Compare BTFs of given program with BTF of target program */
-int btf_check_type_match(struct bpf_verifier_env *env, struct bpf_prog *prog,
+int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
struct btf *btf2, const struct btf_type *t2)
{
struct btf *btf1 = prog->aux->btf;
@@ -4306,7 +5100,7 @@ int btf_check_type_match(struct bpf_verifier_env *env, struct bpf_prog *prog,
u32 btf_id = 0;
if (!prog->aux->func_info) {
- bpf_log(&env->log, "Program extension requires BTF\n");
+ bpf_log(log, "Program extension requires BTF\n");
return -EINVAL;
}
@@ -4318,7 +5112,7 @@ int btf_check_type_match(struct bpf_verifier_env *env, struct bpf_prog *prog,
if (!t1 || !btf_type_is_func(t1))
return -EFAULT;
- return btf_check_func_type_match(&env->log, btf1, t1, btf2, t2);
+ return btf_check_func_type_match(log, btf1, t1, btf2, t2);
}
/* Compare BTF of a function with given bpf_reg_state.
@@ -4469,7 +5263,7 @@ int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
return -EFAULT;
}
if (prog_type == BPF_PROG_TYPE_EXT)
- prog_type = prog->aux->linked_prog->type;
+ prog_type = prog->aux->dst_prog->type;
t = btf_type_by_id(btf, t->type);
if (!t || !btf_type_is_func_proto(t)) {
@@ -4516,12 +5310,93 @@ int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
return 0;
}
+static void btf_type_show(const struct btf *btf, u32 type_id, void *obj,
+ struct btf_show *show)
+{
+ const struct btf_type *t = btf_type_by_id(btf, type_id);
+
+ show->btf = btf;
+ memset(&show->state, 0, sizeof(show->state));
+ memset(&show->obj, 0, sizeof(show->obj));
+
+ btf_type_ops(t)->show(btf, t, type_id, obj, 0, show);
+}
+
+static void btf_seq_show(struct btf_show *show, const char *fmt,
+ va_list args)
+{
+ seq_vprintf((struct seq_file *)show->target, fmt, args);
+}
+
+int btf_type_seq_show_flags(const struct btf *btf, u32 type_id,
+ void *obj, struct seq_file *m, u64 flags)
+{
+ struct btf_show sseq;
+
+ sseq.target = m;
+ sseq.showfn = btf_seq_show;
+ sseq.flags = flags;
+
+ btf_type_show(btf, type_id, obj, &sseq);
+
+ return sseq.state.status;
+}
+
void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj,
struct seq_file *m)
{
- const struct btf_type *t = btf_type_by_id(btf, type_id);
+ (void) btf_type_seq_show_flags(btf, type_id, obj, m,
+ BTF_SHOW_NONAME | BTF_SHOW_COMPACT |
+ BTF_SHOW_ZERO | BTF_SHOW_UNSAFE);
+}
+
+struct btf_show_snprintf {
+ struct btf_show show;
+ int len_left; /* space left in string */
+ int len; /* length we would have written */
+};
+
+static void btf_snprintf_show(struct btf_show *show, const char *fmt,
+ va_list args)
+{
+ struct btf_show_snprintf *ssnprintf = (struct btf_show_snprintf *)show;
+ int len;
- btf_type_ops(t)->seq_show(btf, t, type_id, obj, 0, m);
+ len = vsnprintf(show->target, ssnprintf->len_left, fmt, args);
+
+ if (len < 0) {
+ ssnprintf->len_left = 0;
+ ssnprintf->len = len;
+ } else if (len > ssnprintf->len_left) {
+ /* no space, drive on to get length we would have written */
+ ssnprintf->len_left = 0;
+ ssnprintf->len += len;
+ } else {
+ ssnprintf->len_left -= len;
+ ssnprintf->len += len;
+ show->target += len;
+ }
+}
+
+int btf_type_snprintf_show(const struct btf *btf, u32 type_id, void *obj,
+ char *buf, int len, u64 flags)
+{
+ struct btf_show_snprintf ssnprintf;
+
+ ssnprintf.show.target = buf;
+ ssnprintf.show.flags = flags;
+ ssnprintf.show.showfn = btf_snprintf_show;
+ ssnprintf.len_left = len;
+ ssnprintf.len = 0;
+
+ btf_type_show(btf, type_id, obj, (struct btf_show *)&ssnprintf);
+
+ /* If we encontered an error, return it. */
+ if (ssnprintf.show.state.status)
+ return ssnprintf.show.state.status;
+
+ /* Otherwise return length we would have written */
+ return ssnprintf.len;
}
#ifdef CONFIG_PROC_FS
@@ -4661,3 +5536,15 @@ u32 btf_id(const struct btf *btf)
{
return btf->id;
}
+
+static int btf_id_cmp_func(const void *a, const void *b)
+{
+ const int *pa = a, *pb = b;
+
+ return *pa - *pb;
+}
+
+bool btf_id_set_contains(const struct btf_id_set *set, u32 id)
+{
+ return bsearch(&id, set->ids, set->cnt, sizeof(u32), btf_id_cmp_func) != NULL;
+}
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 03e284873644..9268d77898b7 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -98,6 +98,8 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag
fp->jit_requested = ebpf_jit_enabled();
INIT_LIST_HEAD_RCU(&fp->aux->ksym.lnode);
+ mutex_init(&fp->aux->used_maps_mutex);
+ mutex_init(&fp->aux->dst_mutex);
return fp;
}
@@ -253,6 +255,8 @@ struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
void __bpf_prog_free(struct bpf_prog *fp)
{
if (fp->aux) {
+ mutex_destroy(&fp->aux->used_maps_mutex);
+ mutex_destroy(&fp->aux->dst_mutex);
free_percpu(fp->aux->stats);
kfree(fp->aux->poke_tab);
kfree(fp->aux);
@@ -773,7 +777,8 @@ int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
if (size > poke_tab_max)
return -ENOSPC;
- if (poke->ip || poke->ip_stable || poke->adj_off)
+ if (poke->tailcall_target || poke->tailcall_target_stable ||
+ poke->tailcall_bypass || poke->adj_off || poke->bypass_addr)
return -EINVAL;
switch (poke->reason) {
@@ -1747,8 +1752,9 @@ bool bpf_prog_array_compatible(struct bpf_array *array,
static int bpf_check_tail_call(const struct bpf_prog *fp)
{
struct bpf_prog_aux *aux = fp->aux;
- int i;
+ int i, ret = 0;
+ mutex_lock(&aux->used_maps_mutex);
for (i = 0; i < aux->used_map_cnt; i++) {
struct bpf_map *map = aux->used_maps[i];
struct bpf_array *array;
@@ -1757,11 +1763,15 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
continue;
array = container_of(map, struct bpf_array, map);
- if (!bpf_prog_array_compatible(array, fp))
- return -EINVAL;
+ if (!bpf_prog_array_compatible(array, fp)) {
+ ret = -EINVAL;
+ goto out;
+ }
}
- return 0;
+out:
+ mutex_unlock(&aux->used_maps_mutex);
+ return ret;
}
static void bpf_prog_select_func(struct bpf_prog *fp)
@@ -2130,7 +2140,8 @@ static void bpf_prog_free_deferred(struct work_struct *work)
if (aux->prog->has_callchain_buf)
put_callchain_buffers();
#endif
- bpf_trampoline_put(aux->trampoline);
+ if (aux->dst_trampoline)
+ bpf_trampoline_put(aux->dst_trampoline);
for (i = 0; i < aux->func_cnt; i++)
bpf_jit_free(aux->func[i]);
if (aux->func_cnt) {
@@ -2146,8 +2157,8 @@ void bpf_prog_free(struct bpf_prog *fp)
{
struct bpf_prog_aux *aux = fp->aux;
- if (aux->linked_prog)
- bpf_prog_put(aux->linked_prog);
+ if (aux->dst_prog)
+ bpf_prog_put(aux->dst_prog);
INIT_WORK(&aux->work, bpf_prog_free_deferred);
schedule_work(&aux->work);
}
@@ -2208,6 +2219,8 @@ const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak;
const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto __weak;
const struct bpf_func_proto bpf_get_local_storage_proto __weak;
const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto __weak;
+const struct bpf_func_proto bpf_snprintf_btf_proto __weak;
+const struct bpf_func_proto bpf_seq_printf_btf_proto __weak;
const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void)
{
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index 6386b7bb98f2..c61a23b564aa 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -79,8 +79,6 @@ struct bpf_cpu_map {
static DEFINE_PER_CPU(struct list_head, cpu_map_flush_list);
-static int bq_flush_to_queue(struct xdp_bulk_queue *bq);
-
static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
{
u32 value_size = attr->value_size;
@@ -157,8 +155,7 @@ static void cpu_map_kthread_stop(struct work_struct *work)
kthread_stop(rcpu->kthread);
}
-static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
- struct xdp_frame *xdpf,
+static struct sk_buff *cpu_map_build_skb(struct xdp_frame *xdpf,
struct sk_buff *skb)
{
unsigned int hard_start_headroom;
@@ -367,7 +364,7 @@ static int cpu_map_kthread_run(void *data)
struct sk_buff *skb = skbs[i];
int ret;
- skb = cpu_map_build_skb(rcpu, xdpf, skb);
+ skb = cpu_map_build_skb(xdpf, skb);
if (!skb) {
xdp_return_frame(xdpf);
continue;
@@ -658,6 +655,7 @@ static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
static int cpu_map_btf_id;
const struct bpf_map_ops cpu_map_ops = {
+ .map_meta_equal = bpf_map_meta_equal,
.map_alloc = cpu_map_alloc,
.map_free = cpu_map_free,
.map_delete_elem = cpu_map_delete_elem,
@@ -669,7 +667,7 @@ const struct bpf_map_ops cpu_map_ops = {
.map_btf_id = &cpu_map_btf_id,
};
-static int bq_flush_to_queue(struct xdp_bulk_queue *bq)
+static void bq_flush_to_queue(struct xdp_bulk_queue *bq)
{
struct bpf_cpu_map_entry *rcpu = bq->obj;
unsigned int processed = 0, drops = 0;
@@ -678,7 +676,7 @@ static int bq_flush_to_queue(struct xdp_bulk_queue *bq)
int i;
if (unlikely(!bq->count))
- return 0;
+ return;
q = rcpu->queue;
spin_lock(&q->producer_lock);
@@ -701,13 +699,12 @@ static int bq_flush_to_queue(struct xdp_bulk_queue *bq)
/* Feedback loop via tracepoints */
trace_xdp_cpumap_enqueue(rcpu->map_id, processed, drops, to_cpu);
- return 0;
}
/* Runs under RCU-read-side, plus in softirq under NAPI protection.
* Thus, safe percpu variable access.
*/
-static int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
+static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
{
struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list);
struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
@@ -728,8 +725,6 @@ static int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
if (!bq->flush_node.prev)
list_add(&bq->flush_node, flush_list);
-
- return 0;
}
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index 10abb06065bb..2b5ca93c17de 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -341,14 +341,14 @@ bool dev_map_can_have_prog(struct bpf_map *map)
return false;
}
-static int bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
+static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
{
struct net_device *dev = bq->dev;
int sent = 0, drops = 0, err = 0;
int i;
if (unlikely(!bq->count))
- return 0;
+ return;
for (i = 0; i < bq->count; i++) {
struct xdp_frame *xdpf = bq->q[i];
@@ -369,7 +369,7 @@ out:
trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, drops, err);
bq->dev_rx = NULL;
__list_del_clearprev(&bq->flush_node);
- return 0;
+ return;
error:
/* If ndo_xdp_xmit fails with an errno, no frames have been
* xmit'ed and it's our responsibility to them free all.
@@ -421,8 +421,8 @@ struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
/* Runs under RCU-read-side, plus in softirq under NAPI protection.
* Thus, safe percpu variable access.
*/
-static int bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
- struct net_device *dev_rx)
+static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
+ struct net_device *dev_rx)
{
struct list_head *flush_list = this_cpu_ptr(&dev_flush_list);
struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq);
@@ -441,8 +441,6 @@ static int bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf,
if (!bq->flush_node.prev)
list_add(&bq->flush_node, flush_list);
-
- return 0;
}
static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
@@ -462,7 +460,8 @@ static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
if (unlikely(!xdpf))
return -EOVERFLOW;
- return bq_enqueue(dev, xdpf, dev_rx);
+ bq_enqueue(dev, xdpf, dev_rx);
+ return 0;
}
static struct xdp_buff *dev_map_run_prog(struct net_device *dev,
@@ -751,6 +750,7 @@ static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
static int dev_map_btf_id;
const struct bpf_map_ops dev_map_ops = {
+ .map_meta_equal = bpf_map_meta_equal,
.map_alloc = dev_map_alloc,
.map_free = dev_map_free,
.map_get_next_key = dev_map_get_next_key,
@@ -764,6 +764,7 @@ const struct bpf_map_ops dev_map_ops = {
static int dev_map_hash_map_btf_id;
const struct bpf_map_ops dev_map_hash_ops = {
+ .map_meta_equal = bpf_map_meta_equal,
.map_alloc = dev_map_alloc,
.map_free = dev_map_free,
.map_get_next_key = dev_map_hash_get_next_key,
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 7df28a45c66b..1815e97d4c9c 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -9,6 +9,7 @@
#include <linux/rculist_nulls.h>
#include <linux/random.h>
#include <uapi/linux/btf.h>
+#include <linux/rcupdate_trace.h>
#include "percpu_freelist.h"
#include "bpf_lru_list.h"
#include "map_in_map.h"
@@ -577,8 +578,7 @@ static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
struct htab_elem *l;
u32 hash, key_size;
- /* Must be called with rcu_read_lock. */
- WARN_ON_ONCE(!rcu_read_lock_held());
+ WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
key_size = map->key_size;
@@ -612,7 +612,7 @@ static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
* bpf_prog
* __htab_map_lookup_elem
*/
-static u32 htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
+static int htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
{
struct bpf_insn *insn = insn_buf;
const int ret = BPF_REG_0;
@@ -651,7 +651,7 @@ static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key)
return __htab_lru_map_lookup_elem(map, key, false);
}
-static u32 htab_lru_map_gen_lookup(struct bpf_map *map,
+static int htab_lru_map_gen_lookup(struct bpf_map *map,
struct bpf_insn *insn_buf)
{
struct bpf_insn *insn = insn_buf;
@@ -941,7 +941,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
/* unknown flags */
return -EINVAL;
- WARN_ON_ONCE(!rcu_read_lock_held());
+ WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
key_size = map->key_size;
@@ -1032,7 +1032,7 @@ static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
/* unknown flags */
return -EINVAL;
- WARN_ON_ONCE(!rcu_read_lock_held());
+ WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
key_size = map->key_size;
@@ -1220,7 +1220,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
u32 hash, key_size;
int ret = -ENOENT;
- WARN_ON_ONCE(!rcu_read_lock_held());
+ WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
key_size = map->key_size;
@@ -1252,7 +1252,7 @@ static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
u32 hash, key_size;
int ret = -ENOENT;
- WARN_ON_ONCE(!rcu_read_lock_held());
+ WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held());
key_size = map->key_size;
@@ -1803,6 +1803,7 @@ static const struct bpf_iter_seq_info iter_seq_info = {
static int htab_map_btf_id;
const struct bpf_map_ops htab_map_ops = {
+ .map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = htab_map_alloc_check,
.map_alloc = htab_map_alloc,
.map_free = htab_map_free,
@@ -1820,6 +1821,7 @@ const struct bpf_map_ops htab_map_ops = {
static int htab_lru_map_btf_id;
const struct bpf_map_ops htab_lru_map_ops = {
+ .map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = htab_map_alloc_check,
.map_alloc = htab_map_alloc,
.map_free = htab_map_free,
@@ -1940,6 +1942,7 @@ static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key,
static int htab_percpu_map_btf_id;
const struct bpf_map_ops htab_percpu_map_ops = {
+ .map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = htab_map_alloc_check,
.map_alloc = htab_map_alloc,
.map_free = htab_map_free,
@@ -1956,6 +1959,7 @@ const struct bpf_map_ops htab_percpu_map_ops = {
static int htab_lru_percpu_map_btf_id;
const struct bpf_map_ops htab_lru_percpu_map_ops = {
+ .map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = htab_map_alloc_check,
.map_alloc = htab_map_alloc,
.map_free = htab_map_free,
@@ -2066,7 +2070,7 @@ static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key)
return READ_ONCE(*inner_map);
}
-static u32 htab_of_map_gen_lookup(struct bpf_map *map,
+static int htab_of_map_gen_lookup(struct bpf_map *map,
struct bpf_insn *insn_buf)
{
struct bpf_insn *insn = insn_buf;
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index be43ab3e619f..25520f5eeaf6 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -601,6 +601,56 @@ const struct bpf_func_proto bpf_event_output_data_proto = {
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
};
+BPF_CALL_3(bpf_copy_from_user, void *, dst, u32, size,
+ const void __user *, user_ptr)
+{
+ int ret = copy_from_user(dst, user_ptr, size);
+
+ if (unlikely(ret)) {
+ memset(dst, 0, size);
+ ret = -EFAULT;
+ }
+
+ return ret;
+}
+
+const struct bpf_func_proto bpf_copy_from_user_proto = {
+ .func = bpf_copy_from_user,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_UNINIT_MEM,
+ .arg2_type = ARG_CONST_SIZE_OR_ZERO,
+ .arg3_type = ARG_ANYTHING,
+};
+
+BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
+{
+ if (cpu >= nr_cpu_ids)
+ return (unsigned long)NULL;
+
+ return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu);
+}
+
+const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
+ .func = bpf_per_cpu_ptr,
+ .gpl_only = false,
+ .ret_type = RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL,
+ .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
+ .arg2_type = ARG_ANYTHING,
+};
+
+BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
+{
+ return (unsigned long)this_cpu_ptr((const void __percpu *)percpu_ptr);
+}
+
+const struct bpf_func_proto bpf_this_cpu_ptr_proto = {
+ .func = bpf_this_cpu_ptr,
+ .gpl_only = false,
+ .ret_type = RET_PTR_TO_MEM_OR_BTF_ID,
+ .arg1_type = ARG_PTR_TO_PERCPU_BTF_ID,
+};
+
const struct bpf_func_proto bpf_get_current_task_proto __weak;
const struct bpf_func_proto bpf_probe_read_user_proto __weak;
const struct bpf_func_proto bpf_probe_read_user_str_proto __weak;
@@ -661,8 +711,16 @@ bpf_base_func_proto(enum bpf_func_id func_id)
if (!perfmon_capable())
return NULL;
return bpf_get_trace_printk_proto();
+ case BPF_FUNC_snprintf_btf:
+ if (!perfmon_capable())
+ return NULL;
+ return &bpf_snprintf_btf_proto;
case BPF_FUNC_jiffies64:
return &bpf_jiffies64_proto;
+ case BPF_FUNC_bpf_per_cpu_ptr:
+ return &bpf_per_cpu_ptr_proto;
+ case BPF_FUNC_bpf_this_cpu_ptr:
+ return &bpf_this_cpu_ptr_proto;
default:
break;
}
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index 18f4969552ac..dd4b7fd60ee7 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -20,6 +20,7 @@
#include <linux/filter.h>
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
+#include "preload/bpf_preload.h"
enum bpf_type {
BPF_TYPE_UNSPEC = 0,
@@ -371,9 +372,10 @@ static struct dentry *
bpf_lookup(struct inode *dir, struct dentry *dentry, unsigned flags)
{
/* Dots in names (e.g. "/sys/fs/bpf/foo.bar") are reserved for future
- * extensions.
+ * extensions. That allows popoulate_bpffs() create special files.
*/
- if (strchr(dentry->d_name.name, '.'))
+ if ((dir->i_mode & S_IALLUGO) &&
+ strchr(dentry->d_name.name, '.'))
return ERR_PTR(-EPERM);
return simple_lookup(dir, dentry, flags);
@@ -411,6 +413,27 @@ static const struct inode_operations bpf_dir_iops = {
.unlink = simple_unlink,
};
+/* pin iterator link into bpffs */
+static int bpf_iter_link_pin_kernel(struct dentry *parent,
+ const char *name, struct bpf_link *link)
+{
+ umode_t mode = S_IFREG | S_IRUSR;
+ struct dentry *dentry;
+ int ret;
+
+ inode_lock(parent->d_inode);
+ dentry = lookup_one_len(name, parent, strlen(name));
+ if (IS_ERR(dentry)) {
+ inode_unlock(parent->d_inode);
+ return PTR_ERR(dentry);
+ }
+ ret = bpf_mkobj_ops(dentry, mode, link, &bpf_link_iops,
+ &bpf_iter_fops);
+ dput(dentry);
+ inode_unlock(parent->d_inode);
+ return ret;
+}
+
static int bpf_obj_do_pin(const char __user *pathname, void *raw,
enum bpf_type type)
{
@@ -640,6 +663,91 @@ static int bpf_parse_param(struct fs_context *fc, struct fs_parameter *param)
return 0;
}
+struct bpf_preload_ops *bpf_preload_ops;
+EXPORT_SYMBOL_GPL(bpf_preload_ops);
+
+static bool bpf_preload_mod_get(void)
+{
+ /* If bpf_preload.ko wasn't loaded earlier then load it now.
+ * When bpf_preload is built into vmlinux the module's __init
+ * function will populate it.
+ */
+ if (!bpf_preload_ops) {
+ request_module("bpf_preload");
+ if (!bpf_preload_ops)
+ return false;
+ }
+ /* And grab the reference, so the module doesn't disappear while the
+ * kernel is interacting with the kernel module and its UMD.
+ */
+ if (!try_module_get(bpf_preload_ops->owner)) {
+ pr_err("bpf_preload module get failed.\n");
+ return false;
+ }
+ return true;
+}
+
+static void bpf_preload_mod_put(void)
+{
+ if (bpf_preload_ops)
+ /* now user can "rmmod bpf_preload" if necessary */
+ module_put(bpf_preload_ops->owner);
+}
+
+static DEFINE_MUTEX(bpf_preload_lock);
+
+static int populate_bpffs(struct dentry *parent)
+{
+ struct bpf_preload_info objs[BPF_PRELOAD_LINKS] = {};
+ struct bpf_link *links[BPF_PRELOAD_LINKS] = {};
+ int err = 0, i;
+
+ /* grab the mutex to make sure the kernel interactions with bpf_preload
+ * UMD are serialized
+ */
+ mutex_lock(&bpf_preload_lock);
+
+ /* if bpf_preload.ko wasn't built into vmlinux then load it */
+ if (!bpf_preload_mod_get())
+ goto out;
+
+ if (!bpf_preload_ops->info.tgid) {
+ /* preload() will start UMD that will load BPF iterator programs */
+ err = bpf_preload_ops->preload(objs);
+ if (err)
+ goto out_put;
+ for (i = 0; i < BPF_PRELOAD_LINKS; i++) {
+ links[i] = bpf_link_by_id(objs[i].link_id);
+ if (IS_ERR(links[i])) {
+ err = PTR_ERR(links[i]);
+ goto out_put;
+ }
+ }
+ for (i = 0; i < BPF_PRELOAD_LINKS; i++) {
+ err = bpf_iter_link_pin_kernel(parent,
+ objs[i].link_name, links[i]);
+ if (err)
+ goto out_put;
+ /* do not unlink successfully pinned links even
+ * if later link fails to pin
+ */
+ links[i] = NULL;
+ }
+ /* finish() will tell UMD process to exit */
+ err = bpf_preload_ops->finish();
+ if (err)
+ goto out_put;
+ }
+out_put:
+ bpf_preload_mod_put();
+out:
+ mutex_unlock(&bpf_preload_lock);
+ for (i = 0; i < BPF_PRELOAD_LINKS && err; i++)
+ if (!IS_ERR_OR_NULL(links[i]))
+ bpf_link_put(links[i]);
+ return err;
+}
+
static int bpf_fill_super(struct super_block *sb, struct fs_context *fc)
{
static const struct tree_descr bpf_rfiles[] = { { "" } };
@@ -656,8 +764,8 @@ static int bpf_fill_super(struct super_block *sb, struct fs_context *fc)
inode = sb->s_root->d_inode;
inode->i_op = &bpf_dir_iops;
inode->i_mode &= ~S_IALLUGO;
+ populate_bpffs(sb->s_root);
inode->i_mode |= S_ISVTX | opts->mode;
-
return 0;
}
@@ -707,6 +815,8 @@ static int __init bpf_init(void)
{
int ret;
+ mutex_init(&bpf_preload_lock);
+
ret = sysfs_create_mount_point(fs_kobj, "bpf");
if (ret)
return ret;
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index 44474bf3ab7a..00e32f2ec3e6 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -732,6 +732,7 @@ static int trie_check_btf(const struct bpf_map *map,
static int trie_map_btf_id;
const struct bpf_map_ops trie_map_ops = {
+ .map_meta_equal = bpf_map_meta_equal,
.map_alloc = trie_alloc,
.map_free = trie_free,
.map_get_next_key = trie_get_next_key,
diff --git a/kernel/bpf/map_in_map.c b/kernel/bpf/map_in_map.c
index 17738c93bec8..39ab0b68cade 100644
--- a/kernel/bpf/map_in_map.c
+++ b/kernel/bpf/map_in_map.c
@@ -17,23 +17,17 @@ struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd)
if (IS_ERR(inner_map))
return inner_map;
- /* prog_array->aux->{type,jited} is a runtime binding.
- * Doing static check alone in the verifier is not enough.
- */
- if (inner_map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
- inner_map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE ||
- inner_map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE ||
- inner_map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
- fdput(f);
- return ERR_PTR(-ENOTSUPP);
- }
-
/* Does not support >1 level map-in-map */
if (inner_map->inner_map_meta) {
fdput(f);
return ERR_PTR(-EINVAL);
}
+ if (!inner_map->ops->map_meta_equal) {
+ fdput(f);
+ return ERR_PTR(-ENOTSUPP);
+ }
+
if (map_value_has_spin_lock(inner_map)) {
fdput(f);
return ERR_PTR(-ENOTSUPP);
@@ -81,15 +75,14 @@ bool bpf_map_meta_equal(const struct bpf_map *meta0,
return meta0->map_type == meta1->map_type &&
meta0->key_size == meta1->key_size &&
meta0->value_size == meta1->value_size &&
- meta0->map_flags == meta1->map_flags &&
- meta0->max_entries == meta1->max_entries;
+ meta0->map_flags == meta1->map_flags;
}
void *bpf_map_fd_get_ptr(struct bpf_map *map,
struct file *map_file /* not used */,
int ufd)
{
- struct bpf_map *inner_map;
+ struct bpf_map *inner_map, *inner_map_meta;
struct fd f;
f = fdget(ufd);
@@ -97,7 +90,8 @@ void *bpf_map_fd_get_ptr(struct bpf_map *map,
if (IS_ERR(inner_map))
return inner_map;
- if (bpf_map_meta_equal(map->inner_map_meta, inner_map))
+ inner_map_meta = map->inner_map_meta;
+ if (inner_map_meta->ops->map_meta_equal(inner_map_meta, inner_map))
bpf_map_inc(inner_map);
else
inner_map = ERR_PTR(-EINVAL);
diff --git a/kernel/bpf/map_in_map.h b/kernel/bpf/map_in_map.h
index a507bf6ef8b9..bcb7534afb3c 100644
--- a/kernel/bpf/map_in_map.h
+++ b/kernel/bpf/map_in_map.h
@@ -11,8 +11,6 @@ struct bpf_map;
struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd);
void bpf_map_meta_free(struct bpf_map *map_meta);
-bool bpf_map_meta_equal(const struct bpf_map *meta0,
- const struct bpf_map *meta1);
void *bpf_map_fd_get_ptr(struct bpf_map *map, struct file *map_file,
int ufd);
void bpf_map_fd_put_ptr(void *ptr);
diff --git a/kernel/bpf/map_iter.c b/kernel/bpf/map_iter.c
index af86048e5afd..6a9542af4212 100644
--- a/kernel/bpf/map_iter.c
+++ b/kernel/bpf/map_iter.c
@@ -149,6 +149,19 @@ static void bpf_iter_detach_map(struct bpf_iter_aux_info *aux)
bpf_map_put_with_uref(aux->map);
}
+void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux,
+ struct seq_file *seq)
+{
+ seq_printf(seq, "map_id:\t%u\n", aux->map->id);
+}
+
+int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux,
+ struct bpf_link_info *info)
+{
+ info->iter.map.map_id = aux->map->id;
+ return 0;
+}
+
DEFINE_BPF_ITER_FUNC(bpf_map_elem, struct bpf_iter_meta *meta,
struct bpf_map *map, void *key, void *value)
@@ -156,6 +169,8 @@ static const struct bpf_iter_reg bpf_map_elem_reg_info = {
.target = "bpf_map_elem",
.attach_target = bpf_iter_attach_map,
.detach_target = bpf_iter_detach_map,
+ .show_fdinfo = bpf_iter_map_show_fdinfo,
+ .fill_link_info = bpf_iter_map_fill_link_info,
.ctx_arg_info_size = 2,
.ctx_arg_info = {
{ offsetof(struct bpf_iter__bpf_map_elem, key),
diff --git a/kernel/bpf/percpu_freelist.c b/kernel/bpf/percpu_freelist.c
index b367430e611c..3d897de89061 100644
--- a/kernel/bpf/percpu_freelist.c
+++ b/kernel/bpf/percpu_freelist.c
@@ -17,6 +17,8 @@ int pcpu_freelist_init(struct pcpu_freelist *s)
raw_spin_lock_init(&head->lock);
head->first = NULL;
}
+ raw_spin_lock_init(&s->extralist.lock);
+ s->extralist.first = NULL;
return 0;
}
@@ -40,12 +42,50 @@ static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head,
raw_spin_unlock(&head->lock);
}
+static inline bool pcpu_freelist_try_push_extra(struct pcpu_freelist *s,
+ struct pcpu_freelist_node *node)
+{
+ if (!raw_spin_trylock(&s->extralist.lock))
+ return false;
+
+ pcpu_freelist_push_node(&s->extralist, node);
+ raw_spin_unlock(&s->extralist.lock);
+ return true;
+}
+
+static inline void ___pcpu_freelist_push_nmi(struct pcpu_freelist *s,
+ struct pcpu_freelist_node *node)
+{
+ int cpu, orig_cpu;
+
+ orig_cpu = cpu = raw_smp_processor_id();
+ while (1) {
+ struct pcpu_freelist_head *head;
+
+ head = per_cpu_ptr(s->freelist, cpu);
+ if (raw_spin_trylock(&head->lock)) {
+ pcpu_freelist_push_node(head, node);
+ raw_spin_unlock(&head->lock);
+ return;
+ }
+ cpu = cpumask_next(cpu, cpu_possible_mask);
+ if (cpu >= nr_cpu_ids)
+ cpu = 0;
+
+ /* cannot lock any per cpu lock, try extralist */
+ if (cpu == orig_cpu &&
+ pcpu_freelist_try_push_extra(s, node))
+ return;
+ }
+}
+
void __pcpu_freelist_push(struct pcpu_freelist *s,
struct pcpu_freelist_node *node)
{
- struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist);
-
- ___pcpu_freelist_push(head, node);
+ if (in_nmi())
+ ___pcpu_freelist_push_nmi(s, node);
+ else
+ ___pcpu_freelist_push(this_cpu_ptr(s->freelist), node);
}
void pcpu_freelist_push(struct pcpu_freelist *s,
@@ -81,7 +121,7 @@ again:
}
}
-struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
+static struct pcpu_freelist_node *___pcpu_freelist_pop(struct pcpu_freelist *s)
{
struct pcpu_freelist_head *head;
struct pcpu_freelist_node *node;
@@ -102,8 +142,59 @@ struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
if (cpu >= nr_cpu_ids)
cpu = 0;
if (cpu == orig_cpu)
- return NULL;
+ break;
+ }
+
+ /* per cpu lists are all empty, try extralist */
+ raw_spin_lock(&s->extralist.lock);
+ node = s->extralist.first;
+ if (node)
+ s->extralist.first = node->next;
+ raw_spin_unlock(&s->extralist.lock);
+ return node;
+}
+
+static struct pcpu_freelist_node *
+___pcpu_freelist_pop_nmi(struct pcpu_freelist *s)
+{
+ struct pcpu_freelist_head *head;
+ struct pcpu_freelist_node *node;
+ int orig_cpu, cpu;
+
+ orig_cpu = cpu = raw_smp_processor_id();
+ while (1) {
+ head = per_cpu_ptr(s->freelist, cpu);
+ if (raw_spin_trylock(&head->lock)) {
+ node = head->first;
+ if (node) {
+ head->first = node->next;
+ raw_spin_unlock(&head->lock);
+ return node;
+ }
+ raw_spin_unlock(&head->lock);
+ }
+ cpu = cpumask_next(cpu, cpu_possible_mask);
+ if (cpu >= nr_cpu_ids)
+ cpu = 0;
+ if (cpu == orig_cpu)
+ break;
}
+
+ /* cannot pop from per cpu lists, try extralist */
+ if (!raw_spin_trylock(&s->extralist.lock))
+ return NULL;
+ node = s->extralist.first;
+ if (node)
+ s->extralist.first = node->next;
+ raw_spin_unlock(&s->extralist.lock);
+ return node;
+}
+
+struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
+{
+ if (in_nmi())
+ return ___pcpu_freelist_pop_nmi(s);
+ return ___pcpu_freelist_pop(s);
}
struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
diff --git a/kernel/bpf/percpu_freelist.h b/kernel/bpf/percpu_freelist.h
index fbf8a8a28979..3c76553cfe57 100644
--- a/kernel/bpf/percpu_freelist.h
+++ b/kernel/bpf/percpu_freelist.h
@@ -13,6 +13,7 @@ struct pcpu_freelist_head {
struct pcpu_freelist {
struct pcpu_freelist_head __percpu *freelist;
+ struct pcpu_freelist_head extralist;
};
struct pcpu_freelist_node {
diff --git a/kernel/bpf/preload/.gitignore b/kernel/bpf/preload/.gitignore
new file mode 100644
index 000000000000..856a4c5ad0dd
--- /dev/null
+++ b/kernel/bpf/preload/.gitignore
@@ -0,0 +1,4 @@
+/FEATURE-DUMP.libbpf
+/bpf_helper_defs.h
+/feature
+/bpf_preload_umd
diff --git a/kernel/bpf/preload/Kconfig b/kernel/bpf/preload/Kconfig
new file mode 100644
index 000000000000..ace49111d3a3
--- /dev/null
+++ b/kernel/bpf/preload/Kconfig
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config USERMODE_DRIVER
+ bool
+ default n
+
+menuconfig BPF_PRELOAD
+ bool "Preload BPF file system with kernel specific program and map iterators"
+ depends on BPF
+ # The dependency on !COMPILE_TEST prevents it from being enabled
+ # in allmodconfig or allyesconfig configurations
+ depends on !COMPILE_TEST
+ select USERMODE_DRIVER
+ help
+ This builds kernel module with several embedded BPF programs that are
+ pinned into BPF FS mount point as human readable files that are
+ useful in debugging and introspection of BPF programs and maps.
+
+if BPF_PRELOAD
+config BPF_PRELOAD_UMD
+ tristate "bpf_preload kernel module with user mode driver"
+ depends on CC_CAN_LINK
+ depends on m || CC_CAN_LINK_STATIC
+ default m
+ help
+ This builds bpf_preload kernel module with embedded user mode driver.
+endif
diff --git a/kernel/bpf/preload/Makefile b/kernel/bpf/preload/Makefile
new file mode 100644
index 000000000000..23ee310b6eb4
--- /dev/null
+++ b/kernel/bpf/preload/Makefile
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0
+
+LIBBPF_SRCS = $(srctree)/tools/lib/bpf/
+LIBBPF_A = $(obj)/libbpf.a
+LIBBPF_OUT = $(abspath $(obj))
+
+$(LIBBPF_A):
+ $(Q)$(MAKE) -C $(LIBBPF_SRCS) OUTPUT=$(LIBBPF_OUT)/ $(LIBBPF_OUT)/libbpf.a
+
+userccflags += -I $(srctree)/tools/include/ -I $(srctree)/tools/include/uapi \
+ -I $(srctree)/tools/lib/ -Wno-unused-result
+
+userprogs := bpf_preload_umd
+
+clean-files := $(userprogs) bpf_helper_defs.h FEATURE-DUMP.libbpf staticobjs/ feature/
+
+bpf_preload_umd-objs := iterators/iterators.o
+bpf_preload_umd-userldlibs := $(LIBBPF_A) -lelf -lz
+
+$(obj)/bpf_preload_umd: $(LIBBPF_A)
+
+$(obj)/bpf_preload_umd_blob.o: $(obj)/bpf_preload_umd
+
+obj-$(CONFIG_BPF_PRELOAD_UMD) += bpf_preload.o
+bpf_preload-objs += bpf_preload_kern.o bpf_preload_umd_blob.o
diff --git a/kernel/bpf/preload/bpf_preload.h b/kernel/bpf/preload/bpf_preload.h
new file mode 100644
index 000000000000..2f9932276f2e
--- /dev/null
+++ b/kernel/bpf/preload/bpf_preload.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BPF_PRELOAD_H
+#define _BPF_PRELOAD_H
+
+#include <linux/usermode_driver.h>
+#include "iterators/bpf_preload_common.h"
+
+struct bpf_preload_ops {
+ struct umd_info info;
+ int (*preload)(struct bpf_preload_info *);
+ int (*finish)(void);
+ struct module *owner;
+};
+extern struct bpf_preload_ops *bpf_preload_ops;
+#define BPF_PRELOAD_LINKS 2
+#endif
diff --git a/kernel/bpf/preload/bpf_preload_kern.c b/kernel/bpf/preload/bpf_preload_kern.c
new file mode 100644
index 000000000000..79c5772465f1
--- /dev/null
+++ b/kernel/bpf/preload/bpf_preload_kern.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pid.h>
+#include <linux/fs.h>
+#include <linux/sched/signal.h>
+#include "bpf_preload.h"
+
+extern char bpf_preload_umd_start;
+extern char bpf_preload_umd_end;
+
+static int preload(struct bpf_preload_info *obj);
+static int finish(void);
+
+static struct bpf_preload_ops umd_ops = {
+ .info.driver_name = "bpf_preload",
+ .preload = preload,
+ .finish = finish,
+ .owner = THIS_MODULE,
+};
+
+static int preload(struct bpf_preload_info *obj)
+{
+ int magic = BPF_PRELOAD_START;
+ loff_t pos = 0;
+ int i, err;
+ ssize_t n;
+
+ err = fork_usermode_driver(&umd_ops.info);
+ if (err)
+ return err;
+
+ /* send the start magic to let UMD proceed with loading BPF progs */
+ n = kernel_write(umd_ops.info.pipe_to_umh,
+ &magic, sizeof(magic), &pos);
+ if (n != sizeof(magic))
+ return -EPIPE;
+
+ /* receive bpf_link IDs and names from UMD */
+ pos = 0;
+ for (i = 0; i < BPF_PRELOAD_LINKS; i++) {
+ n = kernel_read(umd_ops.info.pipe_from_umh,
+ &obj[i], sizeof(*obj), &pos);
+ if (n != sizeof(*obj))
+ return -EPIPE;
+ }
+ return 0;
+}
+
+static int finish(void)
+{
+ int magic = BPF_PRELOAD_END;
+ struct pid *tgid;
+ loff_t pos = 0;
+ ssize_t n;
+
+ /* send the last magic to UMD. It will do a normal exit. */
+ n = kernel_write(umd_ops.info.pipe_to_umh,
+ &magic, sizeof(magic), &pos);
+ if (n != sizeof(magic))
+ return -EPIPE;
+ tgid = umd_ops.info.tgid;
+ wait_event(tgid->wait_pidfd, thread_group_exited(tgid));
+ umd_ops.info.tgid = NULL;
+ return 0;
+}
+
+static int __init load_umd(void)
+{
+ int err;
+
+ err = umd_load_blob(&umd_ops.info, &bpf_preload_umd_start,
+ &bpf_preload_umd_end - &bpf_preload_umd_start);
+ if (err)
+ return err;
+ bpf_preload_ops = &umd_ops;
+ return err;
+}
+
+static void __exit fini_umd(void)
+{
+ bpf_preload_ops = NULL;
+ /* kill UMD in case it's still there due to earlier error */
+ kill_pid(umd_ops.info.tgid, SIGKILL, 1);
+ umd_ops.info.tgid = NULL;
+ umd_unload_blob(&umd_ops.info);
+}
+late_initcall(load_umd);
+module_exit(fini_umd);
+MODULE_LICENSE("GPL");
diff --git a/kernel/bpf/preload/bpf_preload_umd_blob.S b/kernel/bpf/preload/bpf_preload_umd_blob.S
new file mode 100644
index 000000000000..f1f40223b5c3
--- /dev/null
+++ b/kernel/bpf/preload/bpf_preload_umd_blob.S
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+ .section .init.rodata, "a"
+ .global bpf_preload_umd_start
+bpf_preload_umd_start:
+ .incbin "kernel/bpf/preload/bpf_preload_umd"
+ .global bpf_preload_umd_end
+bpf_preload_umd_end:
diff --git a/kernel/bpf/preload/iterators/.gitignore b/kernel/bpf/preload/iterators/.gitignore
new file mode 100644
index 000000000000..ffdb70230c8b
--- /dev/null
+++ b/kernel/bpf/preload/iterators/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+/.output
diff --git a/kernel/bpf/preload/iterators/Makefile b/kernel/bpf/preload/iterators/Makefile
new file mode 100644
index 000000000000..28fa8c1440f4
--- /dev/null
+++ b/kernel/bpf/preload/iterators/Makefile
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: GPL-2.0
+OUTPUT := .output
+CLANG ?= clang
+LLC ?= llc
+LLVM_STRIP ?= llvm-strip
+DEFAULT_BPFTOOL := $(OUTPUT)/sbin/bpftool
+BPFTOOL ?= $(DEFAULT_BPFTOOL)
+LIBBPF_SRC := $(abspath ../../../../tools/lib/bpf)
+BPFOBJ := $(OUTPUT)/libbpf.a
+BPF_INCLUDE := $(OUTPUT)
+INCLUDES := -I$(OUTPUT) -I$(BPF_INCLUDE) -I$(abspath ../../../../tools/lib) \
+ -I$(abspath ../../../../tools/include/uapi)
+CFLAGS := -g -Wall
+
+abs_out := $(abspath $(OUTPUT))
+ifeq ($(V),1)
+Q =
+msg =
+else
+Q = @
+msg = @printf ' %-8s %s%s\n' "$(1)" "$(notdir $(2))" "$(if $(3), $(3))";
+MAKEFLAGS += --no-print-directory
+submake_extras := feature_display=0
+endif
+
+.DELETE_ON_ERROR:
+
+.PHONY: all clean
+
+all: iterators.skel.h
+
+clean:
+ $(call msg,CLEAN)
+ $(Q)rm -rf $(OUTPUT) iterators
+
+iterators.skel.h: $(OUTPUT)/iterators.bpf.o | $(BPFTOOL)
+ $(call msg,GEN-SKEL,$@)
+ $(Q)$(BPFTOOL) gen skeleton $< > $@
+
+
+$(OUTPUT)/iterators.bpf.o: iterators.bpf.c $(BPFOBJ) | $(OUTPUT)
+ $(call msg,BPF,$@)
+ $(Q)$(CLANG) -g -O2 -target bpf $(INCLUDES) \
+ -c $(filter %.c,$^) -o $@ && \
+ $(LLVM_STRIP) -g $@
+
+$(OUTPUT):
+ $(call msg,MKDIR,$@)
+ $(Q)mkdir -p $(OUTPUT)
+
+$(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(OUTPUT)
+ $(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) \
+ OUTPUT=$(abspath $(dir $@))/ $(abspath $@)
+
+$(DEFAULT_BPFTOOL):
+ $(Q)$(MAKE) $(submake_extras) -C ../../../../tools/bpf/bpftool \
+ prefix= OUTPUT=$(abs_out)/ DESTDIR=$(abs_out) install
diff --git a/kernel/bpf/preload/iterators/README b/kernel/bpf/preload/iterators/README
new file mode 100644
index 000000000000..7fd6d39a9ad2
--- /dev/null
+++ b/kernel/bpf/preload/iterators/README
@@ -0,0 +1,4 @@
+WARNING:
+If you change "iterators.bpf.c" do "make -j" in this directory to rebuild "iterators.skel.h".
+Make sure to have clang 10 installed.
+See Documentation/bpf/bpf_devel_QA.rst
diff --git a/kernel/bpf/preload/iterators/bpf_preload_common.h b/kernel/bpf/preload/iterators/bpf_preload_common.h
new file mode 100644
index 000000000000..8464d1a48c05
--- /dev/null
+++ b/kernel/bpf/preload/iterators/bpf_preload_common.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _BPF_PRELOAD_COMMON_H
+#define _BPF_PRELOAD_COMMON_H
+
+#define BPF_PRELOAD_START 0x5555
+#define BPF_PRELOAD_END 0xAAAA
+
+struct bpf_preload_info {
+ char link_name[16];
+ int link_id;
+};
+
+#endif
diff --git a/kernel/bpf/preload/iterators/iterators.bpf.c b/kernel/bpf/preload/iterators/iterators.bpf.c
new file mode 100644
index 000000000000..52aa7b38e8b8
--- /dev/null
+++ b/kernel/bpf/preload/iterators/iterators.bpf.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+
+#pragma clang attribute push (__attribute__((preserve_access_index)), apply_to = record)
+struct seq_file;
+struct bpf_iter_meta {
+ struct seq_file *seq;
+ __u64 session_id;
+ __u64 seq_num;
+};
+
+struct bpf_map {
+ __u32 id;
+ char name[16];
+ __u32 max_entries;
+};
+
+struct bpf_iter__bpf_map {
+ struct bpf_iter_meta *meta;
+ struct bpf_map *map;
+};
+
+struct btf_type {
+ __u32 name_off;
+};
+
+struct btf_header {
+ __u32 str_len;
+};
+
+struct btf {
+ const char *strings;
+ struct btf_type **types;
+ struct btf_header hdr;
+};
+
+struct bpf_prog_aux {
+ __u32 id;
+ char name[16];
+ const char *attach_func_name;
+ struct bpf_prog *dst_prog;
+ struct bpf_func_info *func_info;
+ struct btf *btf;
+};
+
+struct bpf_prog {
+ struct bpf_prog_aux *aux;
+};
+
+struct bpf_iter__bpf_prog {
+ struct bpf_iter_meta *meta;
+ struct bpf_prog *prog;
+};
+#pragma clang attribute pop
+
+static const char *get_name(struct btf *btf, long btf_id, const char *fallback)
+{
+ struct btf_type **types, *t;
+ unsigned int name_off;
+ const char *str;
+
+ if (!btf)
+ return fallback;
+ str = btf->strings;
+ types = btf->types;
+ bpf_probe_read_kernel(&t, sizeof(t), types + btf_id);
+ name_off = BPF_CORE_READ(t, name_off);
+ if (name_off >= btf->hdr.str_len)
+ return fallback;
+ return str + name_off;
+}
+
+SEC("iter/bpf_map")
+int dump_bpf_map(struct bpf_iter__bpf_map *ctx)
+{
+ struct seq_file *seq = ctx->meta->seq;
+ __u64 seq_num = ctx->meta->seq_num;
+ struct bpf_map *map = ctx->map;
+
+ if (!map)
+ return 0;
+
+ if (seq_num == 0)
+ BPF_SEQ_PRINTF(seq, " id name max_entries\n");
+
+ BPF_SEQ_PRINTF(seq, "%4u %-16s%6d\n", map->id, map->name, map->max_entries);
+ return 0;
+}
+
+SEC("iter/bpf_prog")
+int dump_bpf_prog(struct bpf_iter__bpf_prog *ctx)
+{
+ struct seq_file *seq = ctx->meta->seq;
+ __u64 seq_num = ctx->meta->seq_num;
+ struct bpf_prog *prog = ctx->prog;
+ struct bpf_prog_aux *aux;
+
+ if (!prog)
+ return 0;
+
+ aux = prog->aux;
+ if (seq_num == 0)
+ BPF_SEQ_PRINTF(seq, " id name attached\n");
+
+ BPF_SEQ_PRINTF(seq, "%4u %-16s %s %s\n", aux->id,
+ get_name(aux->btf, aux->func_info[0].type_id, aux->name),
+ aux->attach_func_name, aux->dst_prog->aux->name);
+ return 0;
+}
+char LICENSE[] SEC("license") = "GPL";
diff --git a/kernel/bpf/preload/iterators/iterators.c b/kernel/bpf/preload/iterators/iterators.c
new file mode 100644
index 000000000000..b7ff87939172
--- /dev/null
+++ b/kernel/bpf/preload/iterators/iterators.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+#include <argp.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/resource.h>
+#include <bpf/libbpf.h>
+#include <bpf/bpf.h>
+#include <sys/mount.h>
+#include "iterators.skel.h"
+#include "bpf_preload_common.h"
+
+int to_kernel = -1;
+int from_kernel = 0;
+
+static int send_link_to_kernel(struct bpf_link *link, const char *link_name)
+{
+ struct bpf_preload_info obj = {};
+ struct bpf_link_info info = {};
+ __u32 info_len = sizeof(info);
+ int err;
+
+ err = bpf_obj_get_info_by_fd(bpf_link__fd(link), &info, &info_len);
+ if (err)
+ return err;
+ obj.link_id = info.id;
+ if (strlen(link_name) >= sizeof(obj.link_name))
+ return -E2BIG;
+ strcpy(obj.link_name, link_name);
+ if (write(to_kernel, &obj, sizeof(obj)) != sizeof(obj))
+ return -EPIPE;
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ struct rlimit rlim = { RLIM_INFINITY, RLIM_INFINITY };
+ struct iterators_bpf *skel;
+ int err, magic;
+ int debug_fd;
+
+ debug_fd = open("/dev/console", O_WRONLY | O_NOCTTY | O_CLOEXEC);
+ if (debug_fd < 0)
+ return 1;
+ to_kernel = dup(1);
+ close(1);
+ dup(debug_fd);
+ /* now stdin and stderr point to /dev/console */
+
+ read(from_kernel, &magic, sizeof(magic));
+ if (magic != BPF_PRELOAD_START) {
+ printf("bad start magic %d\n", magic);
+ return 1;
+ }
+ setrlimit(RLIMIT_MEMLOCK, &rlim);
+ /* libbpf opens BPF object and loads it into the kernel */
+ skel = iterators_bpf__open_and_load();
+ if (!skel) {
+ /* iterators.skel.h is little endian.
+ * libbpf doesn't support automatic little->big conversion
+ * of BPF bytecode yet.
+ * The program load will fail in such case.
+ */
+ printf("Failed load could be due to wrong endianness\n");
+ return 1;
+ }
+ err = iterators_bpf__attach(skel);
+ if (err)
+ goto cleanup;
+
+ /* send two bpf_link IDs with names to the kernel */
+ err = send_link_to_kernel(skel->links.dump_bpf_map, "maps.debug");
+ if (err)
+ goto cleanup;
+ err = send_link_to_kernel(skel->links.dump_bpf_prog, "progs.debug");
+ if (err)
+ goto cleanup;
+
+ /* The kernel will proceed with pinnging the links in bpffs.
+ * UMD will wait on read from pipe.
+ */
+ read(from_kernel, &magic, sizeof(magic));
+ if (magic != BPF_PRELOAD_END) {
+ printf("bad final magic %d\n", magic);
+ err = -EINVAL;
+ }
+cleanup:
+ iterators_bpf__destroy(skel);
+
+ return err != 0;
+}
diff --git a/kernel/bpf/preload/iterators/iterators.skel.h b/kernel/bpf/preload/iterators/iterators.skel.h
new file mode 100644
index 000000000000..cf9a6a94b3a4
--- /dev/null
+++ b/kernel/bpf/preload/iterators/iterators.skel.h
@@ -0,0 +1,412 @@
+/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
+
+/* THIS FILE IS AUTOGENERATED! */
+#ifndef __ITERATORS_BPF_SKEL_H__
+#define __ITERATORS_BPF_SKEL_H__
+
+#include <stdlib.h>
+#include <bpf/libbpf.h>
+
+struct iterators_bpf {
+ struct bpf_object_skeleton *skeleton;
+ struct bpf_object *obj;
+ struct {
+ struct bpf_map *rodata;
+ } maps;
+ struct {
+ struct bpf_program *dump_bpf_map;
+ struct bpf_program *dump_bpf_prog;
+ } progs;
+ struct {
+ struct bpf_link *dump_bpf_map;
+ struct bpf_link *dump_bpf_prog;
+ } links;
+ struct iterators_bpf__rodata {
+ char dump_bpf_map____fmt[35];
+ char dump_bpf_map____fmt_1[14];
+ char dump_bpf_prog____fmt[32];
+ char dump_bpf_prog____fmt_2[17];
+ } *rodata;
+};
+
+static void
+iterators_bpf__destroy(struct iterators_bpf *obj)
+{
+ if (!obj)
+ return;
+ if (obj->skeleton)
+ bpf_object__destroy_skeleton(obj->skeleton);
+ free(obj);
+}
+
+static inline int
+iterators_bpf__create_skeleton(struct iterators_bpf *obj);
+
+static inline struct iterators_bpf *
+iterators_bpf__open_opts(const struct bpf_object_open_opts *opts)
+{
+ struct iterators_bpf *obj;
+
+ obj = (struct iterators_bpf *)calloc(1, sizeof(*obj));
+ if (!obj)
+ return NULL;
+ if (iterators_bpf__create_skeleton(obj))
+ goto err;
+ if (bpf_object__open_skeleton(obj->skeleton, opts))
+ goto err;
+
+ return obj;
+err:
+ iterators_bpf__destroy(obj);
+ return NULL;
+}
+
+static inline struct iterators_bpf *
+iterators_bpf__open(void)
+{
+ return iterators_bpf__open_opts(NULL);
+}
+
+static inline int
+iterators_bpf__load(struct iterators_bpf *obj)
+{
+ return bpf_object__load_skeleton(obj->skeleton);
+}
+
+static inline struct iterators_bpf *
+iterators_bpf__open_and_load(void)
+{
+ struct iterators_bpf *obj;
+
+ obj = iterators_bpf__open();
+ if (!obj)
+ return NULL;
+ if (iterators_bpf__load(obj)) {
+ iterators_bpf__destroy(obj);
+ return NULL;
+ }
+ return obj;
+}
+
+static inline int
+iterators_bpf__attach(struct iterators_bpf *obj)
+{
+ return bpf_object__attach_skeleton(obj->skeleton);
+}
+
+static inline void
+iterators_bpf__detach(struct iterators_bpf *obj)
+{
+ return bpf_object__detach_skeleton(obj->skeleton);
+}
+
+static inline int
+iterators_bpf__create_skeleton(struct iterators_bpf *obj)
+{
+ struct bpf_object_skeleton *s;
+
+ s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));
+ if (!s)
+ return -1;
+ obj->skeleton = s;
+
+ s->sz = sizeof(*s);
+ s->name = "iterators_bpf";
+ s->obj = &obj->obj;
+
+ /* maps */
+ s->map_cnt = 1;
+ s->map_skel_sz = sizeof(*s->maps);
+ s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);
+ if (!s->maps)
+ goto err;
+
+ s->maps[0].name = "iterator.rodata";
+ s->maps[0].map = &obj->maps.rodata;
+ s->maps[0].mmaped = (void **)&obj->rodata;
+
+ /* programs */
+ s->prog_cnt = 2;
+ s->prog_skel_sz = sizeof(*s->progs);
+ s->progs = (struct bpf_prog_skeleton *)calloc(s->prog_cnt, s->prog_skel_sz);
+ if (!s->progs)
+ goto err;
+
+ s->progs[0].name = "dump_bpf_map";
+ s->progs[0].prog = &obj->progs.dump_bpf_map;
+ s->progs[0].link = &obj->links.dump_bpf_map;
+
+ s->progs[1].name = "dump_bpf_prog";
+ s->progs[1].prog = &obj->progs.dump_bpf_prog;
+ s->progs[1].link = &obj->links.dump_bpf_prog;
+
+ s->data_sz = 7176;
+ s->data = (void *)"\
+\x7f\x45\x4c\x46\x02\x01\x01\0\0\0\0\0\0\0\0\0\x01\0\xf7\0\x01\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\x48\x18\0\0\0\0\0\0\0\0\0\0\x40\0\0\0\0\0\x40\0\x0f\0\
+\x0e\0\x79\x12\0\0\0\0\0\0\x79\x26\0\0\0\0\0\0\x79\x17\x08\0\0\0\0\0\x15\x07\
+\x1a\0\0\0\0\0\x79\x21\x10\0\0\0\0\0\x55\x01\x08\0\0\0\0\0\xbf\xa4\0\0\0\0\0\0\
+\x07\x04\0\0\xe8\xff\xff\xff\xbf\x61\0\0\0\0\0\0\x18\x02\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\xb7\x03\0\0\x23\0\0\0\xb7\x05\0\0\0\0\0\0\x85\0\0\0\x7e\0\0\0\x61\x71\0\
+\0\0\0\0\0\x7b\x1a\xe8\xff\0\0\0\0\xb7\x01\0\0\x04\0\0\0\xbf\x72\0\0\0\0\0\0\
+\x0f\x12\0\0\0\0\0\0\x7b\x2a\xf0\xff\0\0\0\0\x61\x71\x14\0\0\0\0\0\x7b\x1a\xf8\
+\xff\0\0\0\0\xbf\xa4\0\0\0\0\0\0\x07\x04\0\0\xe8\xff\xff\xff\xbf\x61\0\0\0\0\0\
+\0\x18\x02\0\0\x23\0\0\0\0\0\0\0\0\0\0\0\xb7\x03\0\0\x0e\0\0\0\xb7\x05\0\0\x18\
+\0\0\0\x85\0\0\0\x7e\0\0\0\xb7\0\0\0\0\0\0\0\x95\0\0\0\0\0\0\0\x79\x12\0\0\0\0\
+\0\0\x79\x26\0\0\0\0\0\0\x79\x11\x08\0\0\0\0\0\x15\x01\x3b\0\0\0\0\0\x79\x17\0\
+\0\0\0\0\0\x79\x21\x10\0\0\0\0\0\x55\x01\x08\0\0\0\0\0\xbf\xa4\0\0\0\0\0\0\x07\
+\x04\0\0\xd0\xff\xff\xff\xbf\x61\0\0\0\0\0\0\x18\x02\0\0\x31\0\0\0\0\0\0\0\0\0\
+\0\0\xb7\x03\0\0\x20\0\0\0\xb7\x05\0\0\0\0\0\0\x85\0\0\0\x7e\0\0\0\x7b\x6a\xc8\
+\xff\0\0\0\0\x61\x71\0\0\0\0\0\0\x7b\x1a\xd0\xff\0\0\0\0\xb7\x03\0\0\x04\0\0\0\
+\xbf\x79\0\0\0\0\0\0\x0f\x39\0\0\0\0\0\0\x79\x71\x28\0\0\0\0\0\x79\x78\x30\0\0\
+\0\0\0\x15\x08\x18\0\0\0\0\0\xb7\x02\0\0\0\0\0\0\x0f\x21\0\0\0\0\0\0\x61\x11\
+\x04\0\0\0\0\0\x79\x83\x08\0\0\0\0\0\x67\x01\0\0\x03\0\0\0\x0f\x13\0\0\0\0\0\0\
+\x79\x86\0\0\0\0\0\0\xbf\xa1\0\0\0\0\0\0\x07\x01\0\0\xf8\xff\xff\xff\xb7\x02\0\
+\0\x08\0\0\0\x85\0\0\0\x71\0\0\0\xb7\x01\0\0\0\0\0\0\x79\xa3\xf8\xff\0\0\0\0\
+\x0f\x13\0\0\0\0\0\0\xbf\xa1\0\0\0\0\0\0\x07\x01\0\0\xf4\xff\xff\xff\xb7\x02\0\
+\0\x04\0\0\0\x85\0\0\0\x71\0\0\0\xb7\x03\0\0\x04\0\0\0\x61\xa1\xf4\xff\0\0\0\0\
+\x61\x82\x10\0\0\0\0\0\x3d\x21\x02\0\0\0\0\0\x0f\x16\0\0\0\0\0\0\xbf\x69\0\0\0\
+\0\0\0\x7b\x9a\xd8\xff\0\0\0\0\x79\x71\x18\0\0\0\0\0\x7b\x1a\xe0\xff\0\0\0\0\
+\x79\x71\x20\0\0\0\0\0\x79\x11\0\0\0\0\0\0\x0f\x31\0\0\0\0\0\0\x7b\x1a\xe8\xff\
+\0\0\0\0\xbf\xa4\0\0\0\0\0\0\x07\x04\0\0\xd0\xff\xff\xff\x79\xa1\xc8\xff\0\0\0\
+\0\x18\x02\0\0\x51\0\0\0\0\0\0\0\0\0\0\0\xb7\x03\0\0\x11\0\0\0\xb7\x05\0\0\x20\
+\0\0\0\x85\0\0\0\x7e\0\0\0\xb7\0\0\0\0\0\0\0\x95\0\0\0\0\0\0\0\x20\x20\x69\x64\
+\x20\x6e\x61\x6d\x65\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x6d\
+\x61\x78\x5f\x65\x6e\x74\x72\x69\x65\x73\x0a\0\x25\x34\x75\x20\x25\x2d\x31\x36\
+\x73\x25\x36\x64\x0a\0\x20\x20\x69\x64\x20\x6e\x61\x6d\x65\x20\x20\x20\x20\x20\
+\x20\x20\x20\x20\x20\x20\x20\x20\x61\x74\x74\x61\x63\x68\x65\x64\x0a\0\x25\x34\
+\x75\x20\x25\x2d\x31\x36\x73\x20\x25\x73\x20\x25\x73\x0a\0\x47\x50\x4c\0\x9f\
+\xeb\x01\0\x18\0\0\0\0\0\0\0\x1c\x04\0\0\x1c\x04\0\0\x09\x05\0\0\0\0\0\0\0\0\0\
+\x02\x02\0\0\0\x01\0\0\0\x02\0\0\x04\x10\0\0\0\x13\0\0\0\x03\0\0\0\0\0\0\0\x18\
+\0\0\0\x04\0\0\0\x40\0\0\0\0\0\0\0\0\0\0\x02\x08\0\0\0\0\0\0\0\0\0\0\x02\x0d\0\
+\0\0\0\0\0\0\x01\0\0\x0d\x06\0\0\0\x1c\0\0\0\x01\0\0\0\x20\0\0\0\0\0\0\x01\x04\
+\0\0\0\x20\0\0\x01\x24\0\0\0\x01\0\0\x0c\x05\0\0\0\xaf\0\0\0\x03\0\0\x04\x18\0\
+\0\0\xbd\0\0\0\x09\0\0\0\0\0\0\0\xc1\0\0\0\x0b\0\0\0\x40\0\0\0\xcc\0\0\0\x0b\0\
+\0\0\x80\0\0\0\0\0\0\0\0\0\0\x02\x0a\0\0\0\xd4\0\0\0\0\0\0\x07\0\0\0\0\xdd\0\0\
+\0\0\0\0\x08\x0c\0\0\0\xe3\0\0\0\0\0\0\x01\x08\0\0\0\x40\0\0\0\xa4\x01\0\0\x03\
+\0\0\x04\x18\0\0\0\xac\x01\0\0\x0e\0\0\0\0\0\0\0\xaf\x01\0\0\x11\0\0\0\x20\0\0\
+\0\xb4\x01\0\0\x0e\0\0\0\xa0\0\0\0\xc0\x01\0\0\0\0\0\x08\x0f\0\0\0\xc6\x01\0\0\
+\0\0\0\x01\x04\0\0\0\x20\0\0\0\xd3\x01\0\0\0\0\0\x01\x01\0\0\0\x08\0\0\x01\0\0\
+\0\0\0\0\0\x03\0\0\0\0\x10\0\0\0\x12\0\0\0\x10\0\0\0\xd8\x01\0\0\0\0\0\x01\x04\
+\0\0\0\x20\0\0\0\0\0\0\0\0\0\0\x02\x14\0\0\0\x3c\x02\0\0\x02\0\0\x04\x10\0\0\0\
+\x13\0\0\0\x03\0\0\0\0\0\0\0\x4f\x02\0\0\x15\0\0\0\x40\0\0\0\0\0\0\0\0\0\0\x02\
+\x18\0\0\0\0\0\0\0\x01\0\0\x0d\x06\0\0\0\x1c\0\0\0\x13\0\0\0\x54\x02\0\0\x01\0\
+\0\x0c\x16\0\0\0\xa0\x02\0\0\x01\0\0\x04\x08\0\0\0\xa9\x02\0\0\x19\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\x02\x1a\0\0\0\xfa\x02\0\0\x06\0\0\x04\x38\0\0\0\xac\x01\0\0\
+\x0e\0\0\0\0\0\0\0\xaf\x01\0\0\x11\0\0\0\x20\0\0\0\x07\x03\0\0\x1b\0\0\0\xc0\0\
+\0\0\x18\x03\0\0\x15\0\0\0\0\x01\0\0\x21\x03\0\0\x1d\0\0\0\x40\x01\0\0\x2b\x03\
+\0\0\x1e\0\0\0\x80\x01\0\0\0\0\0\0\0\0\0\x02\x1c\0\0\0\0\0\0\0\0\0\0\x0a\x10\0\
+\0\0\0\0\0\0\0\0\0\x02\x1f\0\0\0\0\0\0\0\0\0\0\x02\x20\0\0\0\x75\x03\0\0\x02\0\
+\0\x04\x08\0\0\0\x83\x03\0\0\x0e\0\0\0\0\0\0\0\x8c\x03\0\0\x0e\0\0\0\x20\0\0\0\
+\x2b\x03\0\0\x03\0\0\x04\x18\0\0\0\x96\x03\0\0\x1b\0\0\0\0\0\0\0\x9e\x03\0\0\
+\x21\0\0\0\x40\0\0\0\xa4\x03\0\0\x23\0\0\0\x80\0\0\0\0\0\0\0\0\0\0\x02\x22\0\0\
+\0\0\0\0\0\0\0\0\x02\x24\0\0\0\xa8\x03\0\0\x01\0\0\x04\x04\0\0\0\xb3\x03\0\0\
+\x0e\0\0\0\0\0\0\0\x1c\x04\0\0\x01\0\0\x04\x04\0\0\0\x25\x04\0\0\x0e\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\x03\0\0\0\0\x1c\0\0\0\x12\0\0\0\x23\0\0\0\x9b\x04\0\0\0\0\0\
+\x0e\x25\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x03\0\0\0\0\x1c\0\0\0\x12\0\0\0\x0e\0\0\0\
+\xaf\x04\0\0\0\0\0\x0e\x27\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x03\0\0\0\0\x1c\0\0\0\
+\x12\0\0\0\x20\0\0\0\xc5\x04\0\0\0\0\0\x0e\x29\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x03\
+\0\0\0\0\x1c\0\0\0\x12\0\0\0\x11\0\0\0\xda\x04\0\0\0\0\0\x0e\x2b\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\x03\0\0\0\0\x10\0\0\0\x12\0\0\0\x04\0\0\0\xf1\x04\0\0\0\0\0\x0e\
+\x2d\0\0\0\x01\0\0\0\xf9\x04\0\0\x04\0\0\x0f\0\0\0\0\x26\0\0\0\0\0\0\0\x23\0\0\
+\0\x28\0\0\0\x23\0\0\0\x0e\0\0\0\x2a\0\0\0\x31\0\0\0\x20\0\0\0\x2c\0\0\0\x51\0\
+\0\0\x11\0\0\0\x01\x05\0\0\x01\0\0\x0f\0\0\0\0\x2e\0\0\0\0\0\0\0\x04\0\0\0\0\
+\x62\x70\x66\x5f\x69\x74\x65\x72\x5f\x5f\x62\x70\x66\x5f\x6d\x61\x70\0\x6d\x65\
+\x74\x61\0\x6d\x61\x70\0\x63\x74\x78\0\x69\x6e\x74\0\x64\x75\x6d\x70\x5f\x62\
+\x70\x66\x5f\x6d\x61\x70\0\x69\x74\x65\x72\x2f\x62\x70\x66\x5f\x6d\x61\x70\0\
+\x30\x3a\x30\0\x2f\x68\x6f\x6d\x65\x2f\x61\x6c\x72\x75\x61\x2f\x62\x75\x69\x6c\
+\x64\x2f\x6c\x69\x6e\x75\x78\x2f\x6b\x65\x72\x6e\x65\x6c\x2f\x62\x70\x66\x2f\
+\x70\x72\x65\x6c\x6f\x61\x64\x2f\x69\x74\x65\x72\x61\x74\x6f\x72\x73\x2f\x69\
+\x74\x65\x72\x61\x74\x6f\x72\x73\x2e\x62\x70\x66\x2e\x63\0\x09\x73\x74\x72\x75\
+\x63\x74\x20\x73\x65\x71\x5f\x66\x69\x6c\x65\x20\x2a\x73\x65\x71\x20\x3d\x20\
+\x63\x74\x78\x2d\x3e\x6d\x65\x74\x61\x2d\x3e\x73\x65\x71\x3b\0\x62\x70\x66\x5f\
+\x69\x74\x65\x72\x5f\x6d\x65\x74\x61\0\x73\x65\x71\0\x73\x65\x73\x73\x69\x6f\
+\x6e\x5f\x69\x64\0\x73\x65\x71\x5f\x6e\x75\x6d\0\x73\x65\x71\x5f\x66\x69\x6c\
+\x65\0\x5f\x5f\x75\x36\x34\0\x6c\x6f\x6e\x67\x20\x6c\x6f\x6e\x67\x20\x75\x6e\
+\x73\x69\x67\x6e\x65\x64\x20\x69\x6e\x74\0\x30\x3a\x31\0\x09\x73\x74\x72\x75\
+\x63\x74\x20\x62\x70\x66\x5f\x6d\x61\x70\x20\x2a\x6d\x61\x70\x20\x3d\x20\x63\
+\x74\x78\x2d\x3e\x6d\x61\x70\x3b\0\x09\x69\x66\x20\x28\x21\x6d\x61\x70\x29\0\
+\x30\x3a\x32\0\x09\x5f\x5f\x75\x36\x34\x20\x73\x65\x71\x5f\x6e\x75\x6d\x20\x3d\
+\x20\x63\x74\x78\x2d\x3e\x6d\x65\x74\x61\x2d\x3e\x73\x65\x71\x5f\x6e\x75\x6d\
+\x3b\0\x09\x69\x66\x20\x28\x73\x65\x71\x5f\x6e\x75\x6d\x20\x3d\x3d\x20\x30\x29\
+\0\x09\x09\x42\x50\x46\x5f\x53\x45\x51\x5f\x50\x52\x49\x4e\x54\x46\x28\x73\x65\
+\x71\x2c\x20\x22\x20\x20\x69\x64\x20\x6e\x61\x6d\x65\x20\x20\x20\x20\x20\x20\
+\x20\x20\x20\x20\x20\x20\x20\x6d\x61\x78\x5f\x65\x6e\x74\x72\x69\x65\x73\x5c\
+\x6e\x22\x29\x3b\0\x62\x70\x66\x5f\x6d\x61\x70\0\x69\x64\0\x6e\x61\x6d\x65\0\
+\x6d\x61\x78\x5f\x65\x6e\x74\x72\x69\x65\x73\0\x5f\x5f\x75\x33\x32\0\x75\x6e\
+\x73\x69\x67\x6e\x65\x64\x20\x69\x6e\x74\0\x63\x68\x61\x72\0\x5f\x5f\x41\x52\
+\x52\x41\x59\x5f\x53\x49\x5a\x45\x5f\x54\x59\x50\x45\x5f\x5f\0\x09\x42\x50\x46\
+\x5f\x53\x45\x51\x5f\x50\x52\x49\x4e\x54\x46\x28\x73\x65\x71\x2c\x20\x22\x25\
+\x34\x75\x20\x25\x2d\x31\x36\x73\x25\x36\x64\x5c\x6e\x22\x2c\x20\x6d\x61\x70\
+\x2d\x3e\x69\x64\x2c\x20\x6d\x61\x70\x2d\x3e\x6e\x61\x6d\x65\x2c\x20\x6d\x61\
+\x70\x2d\x3e\x6d\x61\x78\x5f\x65\x6e\x74\x72\x69\x65\x73\x29\x3b\0\x7d\0\x62\
+\x70\x66\x5f\x69\x74\x65\x72\x5f\x5f\x62\x70\x66\x5f\x70\x72\x6f\x67\0\x70\x72\
+\x6f\x67\0\x64\x75\x6d\x70\x5f\x62\x70\x66\x5f\x70\x72\x6f\x67\0\x69\x74\x65\
+\x72\x2f\x62\x70\x66\x5f\x70\x72\x6f\x67\0\x09\x73\x74\x72\x75\x63\x74\x20\x62\
+\x70\x66\x5f\x70\x72\x6f\x67\x20\x2a\x70\x72\x6f\x67\x20\x3d\x20\x63\x74\x78\
+\x2d\x3e\x70\x72\x6f\x67\x3b\0\x09\x69\x66\x20\x28\x21\x70\x72\x6f\x67\x29\0\
+\x62\x70\x66\x5f\x70\x72\x6f\x67\0\x61\x75\x78\0\x09\x61\x75\x78\x20\x3d\x20\
+\x70\x72\x6f\x67\x2d\x3e\x61\x75\x78\x3b\0\x09\x09\x42\x50\x46\x5f\x53\x45\x51\
+\x5f\x50\x52\x49\x4e\x54\x46\x28\x73\x65\x71\x2c\x20\x22\x20\x20\x69\x64\x20\
+\x6e\x61\x6d\x65\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x61\x74\
+\x74\x61\x63\x68\x65\x64\x5c\x6e\x22\x29\x3b\0\x62\x70\x66\x5f\x70\x72\x6f\x67\
+\x5f\x61\x75\x78\0\x61\x74\x74\x61\x63\x68\x5f\x66\x75\x6e\x63\x5f\x6e\x61\x6d\
+\x65\0\x64\x73\x74\x5f\x70\x72\x6f\x67\0\x66\x75\x6e\x63\x5f\x69\x6e\x66\x6f\0\
+\x62\x74\x66\0\x09\x42\x50\x46\x5f\x53\x45\x51\x5f\x50\x52\x49\x4e\x54\x46\x28\
+\x73\x65\x71\x2c\x20\x22\x25\x34\x75\x20\x25\x2d\x31\x36\x73\x20\x25\x73\x20\
+\x25\x73\x5c\x6e\x22\x2c\x20\x61\x75\x78\x2d\x3e\x69\x64\x2c\0\x30\x3a\x34\0\
+\x30\x3a\x35\0\x09\x69\x66\x20\x28\x21\x62\x74\x66\x29\0\x62\x70\x66\x5f\x66\
+\x75\x6e\x63\x5f\x69\x6e\x66\x6f\0\x69\x6e\x73\x6e\x5f\x6f\x66\x66\0\x74\x79\
+\x70\x65\x5f\x69\x64\0\x30\0\x73\x74\x72\x69\x6e\x67\x73\0\x74\x79\x70\x65\x73\
+\0\x68\x64\x72\0\x62\x74\x66\x5f\x68\x65\x61\x64\x65\x72\0\x73\x74\x72\x5f\x6c\
+\x65\x6e\0\x09\x74\x79\x70\x65\x73\x20\x3d\x20\x62\x74\x66\x2d\x3e\x74\x79\x70\
+\x65\x73\x3b\0\x09\x62\x70\x66\x5f\x70\x72\x6f\x62\x65\x5f\x72\x65\x61\x64\x5f\
+\x6b\x65\x72\x6e\x65\x6c\x28\x26\x74\x2c\x20\x73\x69\x7a\x65\x6f\x66\x28\x74\
+\x29\x2c\x20\x74\x79\x70\x65\x73\x20\x2b\x20\x62\x74\x66\x5f\x69\x64\x29\x3b\0\
+\x09\x73\x74\x72\x20\x3d\x20\x62\x74\x66\x2d\x3e\x73\x74\x72\x69\x6e\x67\x73\
+\x3b\0\x62\x74\x66\x5f\x74\x79\x70\x65\0\x6e\x61\x6d\x65\x5f\x6f\x66\x66\0\x09\
+\x6e\x61\x6d\x65\x5f\x6f\x66\x66\x20\x3d\x20\x42\x50\x46\x5f\x43\x4f\x52\x45\
+\x5f\x52\x45\x41\x44\x28\x74\x2c\x20\x6e\x61\x6d\x65\x5f\x6f\x66\x66\x29\x3b\0\
+\x30\x3a\x32\x3a\x30\0\x09\x69\x66\x20\x28\x6e\x61\x6d\x65\x5f\x6f\x66\x66\x20\
+\x3e\x3d\x20\x62\x74\x66\x2d\x3e\x68\x64\x72\x2e\x73\x74\x72\x5f\x6c\x65\x6e\
+\x29\0\x09\x72\x65\x74\x75\x72\x6e\x20\x73\x74\x72\x20\x2b\x20\x6e\x61\x6d\x65\
+\x5f\x6f\x66\x66\x3b\0\x30\x3a\x33\0\x64\x75\x6d\x70\x5f\x62\x70\x66\x5f\x6d\
+\x61\x70\x2e\x5f\x5f\x5f\x66\x6d\x74\0\x64\x75\x6d\x70\x5f\x62\x70\x66\x5f\x6d\
+\x61\x70\x2e\x5f\x5f\x5f\x66\x6d\x74\x2e\x31\0\x64\x75\x6d\x70\x5f\x62\x70\x66\
+\x5f\x70\x72\x6f\x67\x2e\x5f\x5f\x5f\x66\x6d\x74\0\x64\x75\x6d\x70\x5f\x62\x70\
+\x66\x5f\x70\x72\x6f\x67\x2e\x5f\x5f\x5f\x66\x6d\x74\x2e\x32\0\x4c\x49\x43\x45\
+\x4e\x53\x45\0\x2e\x72\x6f\x64\x61\x74\x61\0\x6c\x69\x63\x65\x6e\x73\x65\0\x9f\
+\xeb\x01\0\x20\0\0\0\0\0\0\0\x24\0\0\0\x24\0\0\0\x44\x02\0\0\x68\x02\0\0\xa4\
+\x01\0\0\x08\0\0\0\x31\0\0\0\x01\0\0\0\0\0\0\0\x07\0\0\0\x62\x02\0\0\x01\0\0\0\
+\0\0\0\0\x17\0\0\0\x10\0\0\0\x31\0\0\0\x09\0\0\0\0\0\0\0\x42\0\0\0\x87\0\0\0\
+\x1e\x40\x01\0\x08\0\0\0\x42\0\0\0\x87\0\0\0\x24\x40\x01\0\x10\0\0\0\x42\0\0\0\
+\xfe\0\0\0\x1d\x48\x01\0\x18\0\0\0\x42\0\0\0\x1f\x01\0\0\x06\x50\x01\0\x20\0\0\
+\0\x42\0\0\0\x2e\x01\0\0\x1d\x44\x01\0\x28\0\0\0\x42\0\0\0\x53\x01\0\0\x06\x5c\
+\x01\0\x38\0\0\0\x42\0\0\0\x66\x01\0\0\x03\x60\x01\0\x70\0\0\0\x42\0\0\0\xec\
+\x01\0\0\x02\x68\x01\0\xf0\0\0\0\x42\0\0\0\x3a\x02\0\0\x01\x70\x01\0\x62\x02\0\
+\0\x1a\0\0\0\0\0\0\0\x42\0\0\0\x87\0\0\0\x1e\x84\x01\0\x08\0\0\0\x42\0\0\0\x87\
+\0\0\0\x24\x84\x01\0\x10\0\0\0\x42\0\0\0\x70\x02\0\0\x1f\x8c\x01\0\x18\0\0\0\
+\x42\0\0\0\x94\x02\0\0\x06\x98\x01\0\x20\0\0\0\x42\0\0\0\xad\x02\0\0\x0e\xa4\
+\x01\0\x28\0\0\0\x42\0\0\0\x2e\x01\0\0\x1d\x88\x01\0\x30\0\0\0\x42\0\0\0\x53\
+\x01\0\0\x06\xa8\x01\0\x40\0\0\0\x42\0\0\0\xbf\x02\0\0\x03\xac\x01\0\x80\0\0\0\
+\x42\0\0\0\x2f\x03\0\0\x02\xb4\x01\0\xb8\0\0\0\x42\0\0\0\x6a\x03\0\0\x06\x08\
+\x01\0\xd0\0\0\0\x42\0\0\0\0\0\0\0\0\0\0\0\xd8\0\0\0\x42\0\0\0\xbb\x03\0\0\x0f\
+\x14\x01\0\xe0\0\0\0\x42\0\0\0\xd0\x03\0\0\x2d\x18\x01\0\xf0\0\0\0\x42\0\0\0\
+\x07\x04\0\0\x0d\x10\x01\0\0\x01\0\0\x42\0\0\0\0\0\0\0\0\0\0\0\x08\x01\0\0\x42\
+\0\0\0\xd0\x03\0\0\x02\x18\x01\0\x20\x01\0\0\x42\0\0\0\x2e\x04\0\0\x0d\x1c\x01\
+\0\x38\x01\0\0\x42\0\0\0\0\0\0\0\0\0\0\0\x40\x01\0\0\x42\0\0\0\x2e\x04\0\0\x0d\
+\x1c\x01\0\x58\x01\0\0\x42\0\0\0\x2e\x04\0\0\x0d\x1c\x01\0\x60\x01\0\0\x42\0\0\
+\0\x5c\x04\0\0\x1b\x20\x01\0\x68\x01\0\0\x42\0\0\0\x5c\x04\0\0\x06\x20\x01\0\
+\x70\x01\0\0\x42\0\0\0\x7f\x04\0\0\x0d\x28\x01\0\x78\x01\0\0\x42\0\0\0\0\0\0\0\
+\0\0\0\0\x80\x01\0\0\x42\0\0\0\x2f\x03\0\0\x02\xb4\x01\0\xf8\x01\0\0\x42\0\0\0\
+\x3a\x02\0\0\x01\xc4\x01\0\x10\0\0\0\x31\0\0\0\x07\0\0\0\0\0\0\0\x02\0\0\0\x3e\
+\0\0\0\0\0\0\0\x08\0\0\0\x08\0\0\0\x3e\0\0\0\0\0\0\0\x10\0\0\0\x02\0\0\0\xfa\0\
+\0\0\0\0\0\0\x20\0\0\0\x08\0\0\0\x2a\x01\0\0\0\0\0\0\x70\0\0\0\x0d\0\0\0\x3e\0\
+\0\0\0\0\0\0\x80\0\0\0\x0d\0\0\0\xfa\0\0\0\0\0\0\0\xa0\0\0\0\x0d\0\0\0\x2a\x01\
+\0\0\0\0\0\0\x62\x02\0\0\x12\0\0\0\0\0\0\0\x14\0\0\0\x3e\0\0\0\0\0\0\0\x08\0\0\
+\0\x08\0\0\0\x3e\0\0\0\0\0\0\0\x10\0\0\0\x14\0\0\0\xfa\0\0\0\0\0\0\0\x20\0\0\0\
+\x18\0\0\0\x3e\0\0\0\0\0\0\0\x28\0\0\0\x08\0\0\0\x2a\x01\0\0\0\0\0\0\x80\0\0\0\
+\x1a\0\0\0\x3e\0\0\0\0\0\0\0\x90\0\0\0\x1a\0\0\0\xfa\0\0\0\0\0\0\0\xa8\0\0\0\
+\x1a\0\0\0\x62\x03\0\0\0\0\0\0\xb0\0\0\0\x1a\0\0\0\x66\x03\0\0\0\0\0\0\xc0\0\0\
+\0\x1f\0\0\0\x94\x03\0\0\0\0\0\0\xd8\0\0\0\x20\0\0\0\xfa\0\0\0\0\0\0\0\xf0\0\0\
+\0\x20\0\0\0\x3e\0\0\0\0\0\0\0\x18\x01\0\0\x24\0\0\0\x3e\0\0\0\0\0\0\0\x50\x01\
+\0\0\x1a\0\0\0\xfa\0\0\0\0\0\0\0\x60\x01\0\0\x20\0\0\0\x56\x04\0\0\0\0\0\0\x88\
+\x01\0\0\x1a\0\0\0\x2a\x01\0\0\0\0\0\0\x98\x01\0\0\x1a\0\0\0\x97\x04\0\0\0\0\0\
+\0\xa0\x01\0\0\x18\0\0\0\x3e\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\x91\0\0\0\x04\0\xf1\xff\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xe6\0\0\
+\0\0\0\x02\0\x70\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xd8\0\0\0\0\0\x02\0\xf0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\xdf\0\0\0\0\0\x03\0\x78\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\xd1\0\0\0\0\0\x03\0\x80\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xca\0\0\0\0\0\x03\0\
+\xf8\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x14\0\0\0\x01\0\x04\0\0\0\0\0\0\0\0\0\x23\
+\0\0\0\0\0\0\0\x04\x01\0\0\x01\0\x04\0\x23\0\0\0\0\0\0\0\x0e\0\0\0\0\0\0\0\x28\
+\0\0\0\x01\0\x04\0\x31\0\0\0\0\0\0\0\x20\0\0\0\0\0\0\0\xed\0\0\0\x01\0\x04\0\
+\x51\0\0\0\0\0\0\0\x11\0\0\0\0\0\0\0\0\0\0\0\x03\0\x02\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\x03\0\x03\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x03\0\
+\x04\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xc2\0\0\0\x11\0\x05\0\0\0\0\0\0\0\0\0\
+\x04\0\0\0\0\0\0\0\x3d\0\0\0\x12\0\x02\0\0\0\0\0\0\0\0\0\0\x01\0\0\0\0\0\0\x5b\
+\0\0\0\x12\0\x03\0\0\0\0\0\0\0\0\0\x08\x02\0\0\0\0\0\0\x48\0\0\0\0\0\0\0\x01\0\
+\0\0\x0d\0\0\0\xc8\0\0\0\0\0\0\0\x01\0\0\0\x0d\0\0\0\x50\0\0\0\0\0\0\0\x01\0\0\
+\0\x0d\0\0\0\xd0\x01\0\0\0\0\0\0\x01\0\0\0\x0d\0\0\0\xf0\x03\0\0\0\0\0\0\x0a\0\
+\0\0\x0d\0\0\0\xfc\x03\0\0\0\0\0\0\x0a\0\0\0\x0d\0\0\0\x08\x04\0\0\0\0\0\0\x0a\
+\0\0\0\x0d\0\0\0\x14\x04\0\0\0\0\0\0\x0a\0\0\0\x0d\0\0\0\x2c\x04\0\0\0\0\0\0\0\
+\0\0\0\x0e\0\0\0\x2c\0\0\0\0\0\0\0\0\0\0\0\x0b\0\0\0\x3c\0\0\0\0\0\0\0\0\0\0\0\
+\x0c\0\0\0\x50\0\0\0\0\0\0\0\0\0\0\0\x0b\0\0\0\x60\0\0\0\0\0\0\0\0\0\0\0\x0b\0\
+\0\0\x70\0\0\0\0\0\0\0\0\0\0\0\x0b\0\0\0\x80\0\0\0\0\0\0\0\0\0\0\0\x0b\0\0\0\
+\x90\0\0\0\0\0\0\0\0\0\0\0\x0b\0\0\0\xa0\0\0\0\0\0\0\0\0\0\0\0\x0b\0\0\0\xb0\0\
+\0\0\0\0\0\0\0\0\0\0\x0b\0\0\0\xc0\0\0\0\0\0\0\0\0\0\0\0\x0b\0\0\0\xd0\0\0\0\0\
+\0\0\0\0\0\0\0\x0b\0\0\0\xe8\0\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\xf8\0\0\0\0\0\0\0\
+\0\0\0\0\x0c\0\0\0\x08\x01\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\x18\x01\0\0\0\0\0\0\0\
+\0\0\0\x0c\0\0\0\x28\x01\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\x38\x01\0\0\0\0\0\0\0\0\
+\0\0\x0c\0\0\0\x48\x01\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\x58\x01\0\0\0\0\0\0\0\0\0\
+\0\x0c\0\0\0\x68\x01\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\x78\x01\0\0\0\0\0\0\0\0\0\0\
+\x0c\0\0\0\x88\x01\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\x98\x01\0\0\0\0\0\0\0\0\0\0\
+\x0c\0\0\0\xa8\x01\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\xb8\x01\0\0\0\0\0\0\0\0\0\0\
+\x0c\0\0\0\xc8\x01\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\xd8\x01\0\0\0\0\0\0\0\0\0\0\
+\x0c\0\0\0\xe8\x01\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\xf8\x01\0\0\0\0\0\0\0\0\0\0\
+\x0c\0\0\0\x08\x02\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\x18\x02\0\0\0\0\0\0\0\0\0\0\
+\x0c\0\0\0\x28\x02\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\x38\x02\0\0\0\0\0\0\0\0\0\0\
+\x0c\0\0\0\x48\x02\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\x58\x02\0\0\0\0\0\0\0\0\0\0\
+\x0c\0\0\0\x68\x02\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\x78\x02\0\0\0\0\0\0\0\0\0\0\
+\x0c\0\0\0\x94\x02\0\0\0\0\0\0\0\0\0\0\x0b\0\0\0\xa4\x02\0\0\0\0\0\0\0\0\0\0\
+\x0b\0\0\0\xb4\x02\0\0\0\0\0\0\0\0\0\0\x0b\0\0\0\xc4\x02\0\0\0\0\0\0\0\0\0\0\
+\x0b\0\0\0\xd4\x02\0\0\0\0\0\0\0\0\0\0\x0b\0\0\0\xe4\x02\0\0\0\0\0\0\0\0\0\0\
+\x0b\0\0\0\xf4\x02\0\0\0\0\0\0\0\0\0\0\x0b\0\0\0\x0c\x03\0\0\0\0\0\0\0\0\0\0\
+\x0c\0\0\0\x1c\x03\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\x2c\x03\0\0\0\0\0\0\0\0\0\0\
+\x0c\0\0\0\x3c\x03\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\x4c\x03\0\0\0\0\0\0\0\0\0\0\
+\x0c\0\0\0\x5c\x03\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\x6c\x03\0\0\0\0\0\0\0\0\0\0\
+\x0c\0\0\0\x7c\x03\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\x8c\x03\0\0\0\0\0\0\0\0\0\0\
+\x0c\0\0\0\x9c\x03\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\xac\x03\0\0\0\0\0\0\0\0\0\0\
+\x0c\0\0\0\xbc\x03\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\xcc\x03\0\0\0\0\0\0\0\0\0\0\
+\x0c\0\0\0\xdc\x03\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\xec\x03\0\0\0\0\0\0\0\0\0\0\
+\x0c\0\0\0\xfc\x03\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\x0c\x04\0\0\0\0\0\0\0\0\0\0\
+\x0c\0\0\0\x1c\x04\0\0\0\0\0\0\0\0\0\0\x0c\0\0\0\x4d\x4e\x40\x41\x42\x43\x4c\0\
+\x2e\x74\x65\x78\x74\0\x2e\x72\x65\x6c\x2e\x42\x54\x46\x2e\x65\x78\x74\0\x64\
+\x75\x6d\x70\x5f\x62\x70\x66\x5f\x6d\x61\x70\x2e\x5f\x5f\x5f\x66\x6d\x74\0\x64\
+\x75\x6d\x70\x5f\x62\x70\x66\x5f\x70\x72\x6f\x67\x2e\x5f\x5f\x5f\x66\x6d\x74\0\
+\x64\x75\x6d\x70\x5f\x62\x70\x66\x5f\x6d\x61\x70\0\x2e\x72\x65\x6c\x69\x74\x65\
+\x72\x2f\x62\x70\x66\x5f\x6d\x61\x70\0\x64\x75\x6d\x70\x5f\x62\x70\x66\x5f\x70\
+\x72\x6f\x67\0\x2e\x72\x65\x6c\x69\x74\x65\x72\x2f\x62\x70\x66\x5f\x70\x72\x6f\
+\x67\0\x2e\x6c\x6c\x76\x6d\x5f\x61\x64\x64\x72\x73\x69\x67\0\x6c\x69\x63\x65\
+\x6e\x73\x65\0\x69\x74\x65\x72\x61\x74\x6f\x72\x73\x2e\x62\x70\x66\x2e\x63\0\
+\x2e\x73\x74\x72\x74\x61\x62\0\x2e\x73\x79\x6d\x74\x61\x62\0\x2e\x72\x6f\x64\
+\x61\x74\x61\0\x2e\x72\x65\x6c\x2e\x42\x54\x46\0\x4c\x49\x43\x45\x4e\x53\x45\0\
+\x4c\x42\x42\x31\x5f\x37\0\x4c\x42\x42\x31\x5f\x36\0\x4c\x42\x42\x30\x5f\x34\0\
+\x4c\x42\x42\x31\x5f\x33\0\x4c\x42\x42\x30\x5f\x33\0\x64\x75\x6d\x70\x5f\x62\
+\x70\x66\x5f\x70\x72\x6f\x67\x2e\x5f\x5f\x5f\x66\x6d\x74\x2e\x32\0\x64\x75\x6d\
+\x70\x5f\x62\x70\x66\x5f\x6d\x61\x70\x2e\x5f\x5f\x5f\x66\x6d\x74\x2e\x31\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x01\0\0\0\x01\0\0\
+\0\x06\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x40\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\x04\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x4e\0\0\0\x01\0\0\0\x06\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\x40\0\0\0\0\0\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x08\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\x6d\0\0\0\x01\0\0\0\x06\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\x40\x01\0\0\0\0\0\0\x08\x02\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x08\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\xb1\0\0\0\x01\0\0\0\x02\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x48\x03\0\
+\0\0\0\0\0\x62\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\x89\0\0\0\x01\0\0\0\x03\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xaa\x03\0\0\0\0\0\0\x04\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xbd\0\0\0\x01\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xae\x03\0\0\0\0\0\0\x3d\x09\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x0b\0\0\0\x01\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\xeb\x0c\0\0\0\0\0\0\x2c\x04\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xa9\0\0\0\x02\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\x18\x11\0\0\0\0\0\0\x98\x01\0\0\0\0\0\0\x0e\0\0\0\x0e\0\0\0\x08\0\0\
+\0\0\0\0\0\x18\0\0\0\0\0\0\0\x4a\0\0\0\x09\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\xb0\x12\0\0\0\0\0\0\x20\0\0\0\0\0\0\0\x08\0\0\0\x02\0\0\0\x08\0\0\0\0\0\0\0\
+\x10\0\0\0\0\0\0\0\x69\0\0\0\x09\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xd0\x12\
+\0\0\0\0\0\0\x20\0\0\0\0\0\0\0\x08\0\0\0\x03\0\0\0\x08\0\0\0\0\0\0\0\x10\0\0\0\
+\0\0\0\0\xb9\0\0\0\x09\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xf0\x12\0\0\0\0\0\
+\0\x50\0\0\0\0\0\0\0\x08\0\0\0\x06\0\0\0\x08\0\0\0\0\0\0\0\x10\0\0\0\0\0\0\0\
+\x07\0\0\0\x09\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x40\x13\0\0\0\0\0\0\xe0\
+\x03\0\0\0\0\0\0\x08\0\0\0\x07\0\0\0\x08\0\0\0\0\0\0\0\x10\0\0\0\0\0\0\0\x7b\0\
+\0\0\x03\x4c\xff\x6f\0\0\0\x80\0\0\0\0\0\0\0\0\0\0\0\0\x20\x17\0\0\0\0\0\0\x07\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\xa1\0\0\0\x03\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\x27\x17\0\0\0\0\0\0\x1a\x01\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\x01\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
+
+ return 0;
+err:
+ bpf_object__destroy_skeleton(s);
+ return -1;
+}
+
+#endif /* __ITERATORS_BPF_SKEL_H__ */
diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c
index 44184f82916a..0ee2347ba510 100644
--- a/kernel/bpf/queue_stack_maps.c
+++ b/kernel/bpf/queue_stack_maps.c
@@ -257,6 +257,7 @@ static int queue_stack_map_get_next_key(struct bpf_map *map, void *key,
static int queue_map_btf_id;
const struct bpf_map_ops queue_map_ops = {
+ .map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = queue_stack_map_alloc_check,
.map_alloc = queue_stack_map_alloc,
.map_free = queue_stack_map_free,
@@ -273,6 +274,7 @@ const struct bpf_map_ops queue_map_ops = {
static int stack_map_btf_id;
const struct bpf_map_ops stack_map_ops = {
+ .map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = queue_stack_map_alloc_check,
.map_alloc = queue_stack_map_alloc,
.map_free = queue_stack_map_free,
diff --git a/kernel/bpf/reuseport_array.c b/kernel/bpf/reuseport_array.c
index 90b29c5b1da7..a55cd542f2ce 100644
--- a/kernel/bpf/reuseport_array.c
+++ b/kernel/bpf/reuseport_array.c
@@ -191,7 +191,7 @@ int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
rcu_read_lock();
sk = reuseport_array_lookup_elem(map, key);
if (sk) {
- *(u64 *)value = sock_gen_cookie(sk);
+ *(u64 *)value = __sock_gen_cookie(sk);
err = 0;
} else {
err = -ENOENT;
@@ -351,6 +351,7 @@ static int reuseport_array_get_next_key(struct bpf_map *map, void *key,
static int reuseport_array_map_btf_id;
const struct bpf_map_ops reuseport_array_ops = {
+ .map_meta_equal = bpf_map_meta_equal,
.map_alloc_check = reuseport_array_alloc_check,
.map_alloc = reuseport_array_alloc,
.map_free = reuseport_array_free,
diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c
index 002f8a5c9e51..31cb04a4dd2d 100644
--- a/kernel/bpf/ringbuf.c
+++ b/kernel/bpf/ringbuf.c
@@ -287,6 +287,7 @@ static __poll_t ringbuf_map_poll(struct bpf_map *map, struct file *filp,
static int ringbuf_map_btf_id;
const struct bpf_map_ops ringbuf_map_ops = {
+ .map_meta_equal = bpf_map_meta_equal,
.map_alloc = ringbuf_map_alloc,
.map_free = ringbuf_map_free,
.map_mmap = ringbuf_map_mmap,
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index cfed0ac44d38..06065fa27124 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -665,18 +665,17 @@ BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf,
return __bpf_get_stack(regs, task, NULL, buf, size, flags);
}
-BTF_ID_LIST(bpf_get_task_stack_btf_ids)
-BTF_ID(struct, task_struct)
+BTF_ID_LIST_SINGLE(bpf_get_task_stack_btf_ids, struct, task_struct)
const struct bpf_func_proto bpf_get_task_stack_proto = {
.func = bpf_get_task_stack,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID,
+ .arg1_btf_id = &bpf_get_task_stack_btf_ids[0],
.arg2_type = ARG_PTR_TO_UNINIT_MEM,
.arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
- .btf_id = bpf_get_task_stack_btf_ids,
};
BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx,
@@ -839,6 +838,7 @@ static void stack_map_free(struct bpf_map *map)
static int stack_trace_map_btf_id;
const struct bpf_map_ops stack_trace_map_ops = {
+ .map_meta_equal = bpf_map_meta_equal,
.map_alloc = stack_map_alloc,
.map_free = stack_map_free,
.map_get_next_key = stack_map_get_next_key,
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index b999e7ff2583..1110ecd7d1f3 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -4,6 +4,7 @@
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
#include <linux/bpf_lirc.h>
+#include <linux/bpf_verifier.h>
#include <linux/btf.h>
#include <linux/syscalls.h>
#include <linux/slab.h>
@@ -29,6 +30,7 @@
#include <linux/bpf_lsm.h>
#include <linux/poll.h>
#include <linux/bpf-netns.h>
+#include <linux/rcupdate_trace.h>
#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
(map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
@@ -90,6 +92,7 @@ int bpf_check_uarg_tail_zero(void __user *uaddr,
}
const struct bpf_map_ops bpf_map_offload_ops = {
+ .map_meta_equal = bpf_map_meta_equal,
.map_alloc = bpf_map_offload_map_alloc,
.map_free = bpf_map_offload_map_free,
.map_check_btf = map_check_no_btf,
@@ -157,10 +160,11 @@ static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key,
if (bpf_map_is_dev_bound(map)) {
return bpf_map_offload_update_elem(map, key, value, flags);
} else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
- map->map_type == BPF_MAP_TYPE_SOCKHASH ||
- map->map_type == BPF_MAP_TYPE_SOCKMAP ||
map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
return map->ops->map_update_elem(map, key, value, flags);
+ } else if (map->map_type == BPF_MAP_TYPE_SOCKHASH ||
+ map->map_type == BPF_MAP_TYPE_SOCKMAP) {
+ return sock_map_update_elem_sys(map, key, value, flags);
} else if (IS_FD_PROG_ARRAY(map)) {
return bpf_fd_array_map_update_elem(map, f.file, key, value,
flags);
@@ -768,7 +772,8 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf,
if (map->map_type != BPF_MAP_TYPE_HASH &&
map->map_type != BPF_MAP_TYPE_ARRAY &&
map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE &&
- map->map_type != BPF_MAP_TYPE_SK_STORAGE)
+ map->map_type != BPF_MAP_TYPE_SK_STORAGE &&
+ map->map_type != BPF_MAP_TYPE_INODE_STORAGE)
return -ENOTSUPP;
if (map->spin_lock_off + sizeof(struct bpf_spin_lock) >
map->value_size) {
@@ -1728,10 +1733,14 @@ static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
btf_put(prog->aux->btf);
bpf_prog_free_linfo(prog);
- if (deferred)
- call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
- else
+ if (deferred) {
+ if (prog->aux->sleepable)
+ call_rcu_tasks_trace(&prog->aux->rcu, __bpf_prog_put_rcu);
+ else
+ call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
+ } else {
__bpf_prog_put_rcu(&prog->aux->rcu);
+ }
}
static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
@@ -2101,6 +2110,7 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
BPF_F_ANY_ALIGNMENT |
BPF_F_TEST_STATE_FREQ |
+ BPF_F_SLEEPABLE |
BPF_F_TEST_RND_HI32))
return -EINVAL;
@@ -2145,17 +2155,18 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
prog->expected_attach_type = attr->expected_attach_type;
prog->aux->attach_btf_id = attr->attach_btf_id;
if (attr->attach_prog_fd) {
- struct bpf_prog *tgt_prog;
+ struct bpf_prog *dst_prog;
- tgt_prog = bpf_prog_get(attr->attach_prog_fd);
- if (IS_ERR(tgt_prog)) {
- err = PTR_ERR(tgt_prog);
+ dst_prog = bpf_prog_get(attr->attach_prog_fd);
+ if (IS_ERR(dst_prog)) {
+ err = PTR_ERR(dst_prog);
goto free_prog_nouncharge;
}
- prog->aux->linked_prog = tgt_prog;
+ prog->aux->dst_prog = dst_prog;
}
prog->aux->offload_requested = !!attr->prog_ifindex;
+ prog->aux->sleepable = attr->prog_flags & BPF_F_SLEEPABLE;
err = security_bpf_prog_alloc(prog->aux);
if (err)
@@ -2488,11 +2499,23 @@ struct bpf_link *bpf_link_get_from_fd(u32 ufd)
struct bpf_tracing_link {
struct bpf_link link;
enum bpf_attach_type attach_type;
+ struct bpf_trampoline *trampoline;
+ struct bpf_prog *tgt_prog;
};
static void bpf_tracing_link_release(struct bpf_link *link)
{
- WARN_ON_ONCE(bpf_trampoline_unlink_prog(link->prog));
+ struct bpf_tracing_link *tr_link =
+ container_of(link, struct bpf_tracing_link, link);
+
+ WARN_ON_ONCE(bpf_trampoline_unlink_prog(link->prog,
+ tr_link->trampoline));
+
+ bpf_trampoline_put(tr_link->trampoline);
+
+ /* tgt_prog is NULL if target is a kernel function */
+ if (tr_link->tgt_prog)
+ bpf_prog_put(tr_link->tgt_prog);
}
static void bpf_tracing_link_dealloc(struct bpf_link *link)
@@ -2532,10 +2555,15 @@ static const struct bpf_link_ops bpf_tracing_link_lops = {
.fill_link_info = bpf_tracing_link_fill_link_info,
};
-static int bpf_tracing_prog_attach(struct bpf_prog *prog)
+static int bpf_tracing_prog_attach(struct bpf_prog *prog,
+ int tgt_prog_fd,
+ u32 btf_id)
{
struct bpf_link_primer link_primer;
+ struct bpf_prog *tgt_prog = NULL;
+ struct bpf_trampoline *tr = NULL;
struct bpf_tracing_link *link;
+ u64 key = 0;
int err;
switch (prog->type) {
@@ -2564,6 +2592,28 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog)
goto out_put_prog;
}
+ if (!!tgt_prog_fd != !!btf_id) {
+ err = -EINVAL;
+ goto out_put_prog;
+ }
+
+ if (tgt_prog_fd) {
+ /* For now we only allow new targets for BPF_PROG_TYPE_EXT */
+ if (prog->type != BPF_PROG_TYPE_EXT) {
+ err = -EINVAL;
+ goto out_put_prog;
+ }
+
+ tgt_prog = bpf_prog_get(tgt_prog_fd);
+ if (IS_ERR(tgt_prog)) {
+ err = PTR_ERR(tgt_prog);
+ tgt_prog = NULL;
+ goto out_put_prog;
+ }
+
+ key = bpf_trampoline_compute_key(tgt_prog, btf_id);
+ }
+
link = kzalloc(sizeof(*link), GFP_USER);
if (!link) {
err = -ENOMEM;
@@ -2573,20 +2623,100 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog)
&bpf_tracing_link_lops, prog);
link->attach_type = prog->expected_attach_type;
- err = bpf_link_prime(&link->link, &link_primer);
- if (err) {
- kfree(link);
- goto out_put_prog;
+ mutex_lock(&prog->aux->dst_mutex);
+
+ /* There are a few possible cases here:
+ *
+ * - if prog->aux->dst_trampoline is set, the program was just loaded
+ * and not yet attached to anything, so we can use the values stored
+ * in prog->aux
+ *
+ * - if prog->aux->dst_trampoline is NULL, the program has already been
+ * attached to a target and its initial target was cleared (below)
+ *
+ * - if tgt_prog != NULL, the caller specified tgt_prog_fd +
+ * target_btf_id using the link_create API.
+ *
+ * - if tgt_prog == NULL when this function was called using the old
+ * raw_tracepoint_open API, and we need a target from prog->aux
+ *
+ * The combination of no saved target in prog->aux, and no target
+ * specified on load is illegal, and we reject that here.
+ */
+ if (!prog->aux->dst_trampoline && !tgt_prog) {
+ err = -ENOENT;
+ goto out_unlock;
+ }
+
+ if (!prog->aux->dst_trampoline ||
+ (key && key != prog->aux->dst_trampoline->key)) {
+ /* If there is no saved target, or the specified target is
+ * different from the destination specified at load time, we
+ * need a new trampoline and a check for compatibility
+ */
+ struct bpf_attach_target_info tgt_info = {};
+
+ err = bpf_check_attach_target(NULL, prog, tgt_prog, btf_id,
+ &tgt_info);
+ if (err)
+ goto out_unlock;
+
+ tr = bpf_trampoline_get(key, &tgt_info);
+ if (!tr) {
+ err = -ENOMEM;
+ goto out_unlock;
+ }
+ } else {
+ /* The caller didn't specify a target, or the target was the
+ * same as the destination supplied during program load. This
+ * means we can reuse the trampoline and reference from program
+ * load time, and there is no need to allocate a new one. This
+ * can only happen once for any program, as the saved values in
+ * prog->aux are cleared below.
+ */
+ tr = prog->aux->dst_trampoline;
+ tgt_prog = prog->aux->dst_prog;
}
- err = bpf_trampoline_link_prog(prog);
+ err = bpf_link_prime(&link->link, &link_primer);
+ if (err)
+ goto out_unlock;
+
+ err = bpf_trampoline_link_prog(prog, tr);
if (err) {
bpf_link_cleanup(&link_primer);
- goto out_put_prog;
+ link = NULL;
+ goto out_unlock;
}
+ link->tgt_prog = tgt_prog;
+ link->trampoline = tr;
+
+ /* Always clear the trampoline and target prog from prog->aux to make
+ * sure the original attach destination is not kept alive after a
+ * program is (re-)attached to another target.
+ */
+ if (prog->aux->dst_prog &&
+ (tgt_prog_fd || tr != prog->aux->dst_trampoline))
+ /* got extra prog ref from syscall, or attaching to different prog */
+ bpf_prog_put(prog->aux->dst_prog);
+ if (prog->aux->dst_trampoline && tr != prog->aux->dst_trampoline)
+ /* we allocated a new trampoline, so free the old one */
+ bpf_trampoline_put(prog->aux->dst_trampoline);
+
+ prog->aux->dst_prog = NULL;
+ prog->aux->dst_trampoline = NULL;
+ mutex_unlock(&prog->aux->dst_mutex);
+
return bpf_link_settle(&link_primer);
+out_unlock:
+ if (tr && tr != prog->aux->dst_trampoline)
+ bpf_trampoline_put(tr);
+ mutex_unlock(&prog->aux->dst_mutex);
+ kfree(link);
out_put_prog:
+ if (tgt_prog_fd && tgt_prog)
+ bpf_prog_put(tgt_prog);
bpf_prog_put(prog);
return err;
}
@@ -2700,7 +2830,7 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
tp_name = prog->aux->attach_func_name;
break;
}
- return bpf_tracing_prog_attach(prog);
+ return bpf_tracing_prog_attach(prog, 0, 0);
case BPF_PROG_TYPE_RAW_TRACEPOINT:
case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
if (strncpy_from_user(buf,
@@ -2969,7 +3099,7 @@ static int bpf_prog_query(const union bpf_attr *attr,
}
}
-#define BPF_PROG_TEST_RUN_LAST_FIELD test.ctx_out
+#define BPF_PROG_TEST_RUN_LAST_FIELD test.cpu
static int bpf_prog_test_run(const union bpf_attr *attr,
union bpf_attr __user *uattr)
@@ -3152,21 +3282,25 @@ static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
const struct bpf_map *map;
int i;
+ mutex_lock(&prog->aux->used_maps_mutex);
for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) {
map = prog->aux->used_maps[i];
if (map == (void *)addr) {
*type = BPF_PSEUDO_MAP_FD;
- return map;
+ goto out;
}
if (!map->ops->map_direct_value_meta)
continue;
if (!map->ops->map_direct_value_meta(map, addr, off)) {
*type = BPF_PSEUDO_MAP_VALUE;
- return map;
+ goto out;
}
}
+ map = NULL;
- return NULL;
+out:
+ mutex_unlock(&prog->aux->used_maps_mutex);
+ return map;
}
static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog,
@@ -3284,6 +3418,7 @@ static int bpf_prog_get_info_by_fd(struct file *file,
memcpy(info.tag, prog->tag, sizeof(prog->tag));
memcpy(info.name, prog->aux->name, sizeof(prog->aux->name));
+ mutex_lock(&prog->aux->used_maps_mutex);
ulen = info.nr_map_ids;
info.nr_map_ids = prog->aux->used_map_cnt;
ulen = min_t(u32, info.nr_map_ids, ulen);
@@ -3293,9 +3428,12 @@ static int bpf_prog_get_info_by_fd(struct file *file,
for (i = 0; i < ulen; i++)
if (put_user(prog->aux->used_maps[i]->id,
- &user_map_ids[i]))
+ &user_map_ids[i])) {
+ mutex_unlock(&prog->aux->used_maps_mutex);
return -EFAULT;
+ }
}
+ mutex_unlock(&prog->aux->used_maps_mutex);
err = set_info_rec_size(&info);
if (err)
@@ -3876,10 +4014,15 @@ err_put:
static int tracing_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
{
- if (attr->link_create.attach_type == BPF_TRACE_ITER &&
- prog->expected_attach_type == BPF_TRACE_ITER)
- return bpf_iter_link_attach(attr, prog);
+ if (attr->link_create.attach_type != prog->expected_attach_type)
+ return -EINVAL;
+ if (prog->expected_attach_type == BPF_TRACE_ITER)
+ return bpf_iter_link_attach(attr, prog);
+ else if (prog->type == BPF_PROG_TYPE_EXT)
+ return bpf_tracing_prog_attach(prog,
+ attr->link_create.target_fd,
+ attr->link_create.target_btf_id);
return -EINVAL;
}
@@ -3893,18 +4036,25 @@ static int link_create(union bpf_attr *attr)
if (CHECK_ATTR(BPF_LINK_CREATE))
return -EINVAL;
- ptype = attach_type_to_prog_type(attr->link_create.attach_type);
- if (ptype == BPF_PROG_TYPE_UNSPEC)
- return -EINVAL;
-
- prog = bpf_prog_get_type(attr->link_create.prog_fd, ptype);
+ prog = bpf_prog_get(attr->link_create.prog_fd);
if (IS_ERR(prog))
return PTR_ERR(prog);
ret = bpf_prog_attach_check_attach_type(prog,
attr->link_create.attach_type);
if (ret)
- goto err_out;
+ goto out;
+
+ if (prog->type == BPF_PROG_TYPE_EXT) {
+ ret = tracing_bpf_link_attach(attr, prog);
+ goto out;
+ }
+
+ ptype = attach_type_to_prog_type(attr->link_create.attach_type);
+ if (ptype == BPF_PROG_TYPE_UNSPEC || ptype != prog->type) {
+ ret = -EINVAL;
+ goto out;
+ }
switch (ptype) {
case BPF_PROG_TYPE_CGROUP_SKB:
@@ -3932,7 +4082,7 @@ static int link_create(union bpf_attr *attr)
ret = -EINVAL;
}
-err_out:
+out:
if (ret < 0)
bpf_prog_put(prog);
return ret;
@@ -4014,40 +4164,50 @@ static int link_detach(union bpf_attr *attr)
return ret;
}
-static int bpf_link_inc_not_zero(struct bpf_link *link)
+static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
{
- return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? 0 : -ENOENT;
+ return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT);
}
-#define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id
-
-static int bpf_link_get_fd_by_id(const union bpf_attr *attr)
+struct bpf_link *bpf_link_by_id(u32 id)
{
struct bpf_link *link;
- u32 id = attr->link_id;
- int fd, err;
- if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID))
- return -EINVAL;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
+ if (!id)
+ return ERR_PTR(-ENOENT);
spin_lock_bh(&link_idr_lock);
- link = idr_find(&link_idr, id);
/* before link is "settled", ID is 0, pretend it doesn't exist yet */
+ link = idr_find(&link_idr, id);
if (link) {
if (link->id)
- err = bpf_link_inc_not_zero(link);
+ link = bpf_link_inc_not_zero(link);
else
- err = -EAGAIN;
+ link = ERR_PTR(-EAGAIN);
} else {
- err = -ENOENT;
+ link = ERR_PTR(-ENOENT);
}
spin_unlock_bh(&link_idr_lock);
+ return link;
+}
- if (err)
- return err;
+#define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id
+
+static int bpf_link_get_fd_by_id(const union bpf_attr *attr)
+{
+ struct bpf_link *link;
+ u32 id = attr->link_id;
+ int fd;
+
+ if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID))
+ return -EINVAL;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ link = bpf_link_by_id(id);
+ if (IS_ERR(link))
+ return PTR_ERR(link);
fd = bpf_link_new_fd(link);
if (fd < 0)
@@ -4133,6 +4293,68 @@ static int bpf_iter_create(union bpf_attr *attr)
return err;
}
+#define BPF_PROG_BIND_MAP_LAST_FIELD prog_bind_map.flags
+
+static int bpf_prog_bind_map(union bpf_attr *attr)
+{
+ struct bpf_prog *prog;
+ struct bpf_map *map;
+ struct bpf_map **used_maps_old, **used_maps_new;
+ int i, ret = 0;
+
+ if (CHECK_ATTR(BPF_PROG_BIND_MAP))
+ return -EINVAL;
+
+ if (attr->prog_bind_map.flags)
+ return -EINVAL;
+
+ prog = bpf_prog_get(attr->prog_bind_map.prog_fd);
+ if (IS_ERR(prog))
+ return PTR_ERR(prog);
+
+ map = bpf_map_get(attr->prog_bind_map.map_fd);
+ if (IS_ERR(map)) {
+ ret = PTR_ERR(map);
+ goto out_prog_put;
+ }
+
+ mutex_lock(&prog->aux->used_maps_mutex);
+
+ used_maps_old = prog->aux->used_maps;
+
+ for (i = 0; i < prog->aux->used_map_cnt; i++)
+ if (used_maps_old[i] == map) {
+ bpf_map_put(map);
+ goto out_unlock;
+ }
+
+ used_maps_new = kmalloc_array(prog->aux->used_map_cnt + 1,
+ sizeof(used_maps_new[0]),
+ GFP_KERNEL);
+ if (!used_maps_new) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ memcpy(used_maps_new, used_maps_old,
+ sizeof(used_maps_old[0]) * prog->aux->used_map_cnt);
+ used_maps_new[prog->aux->used_map_cnt] = map;
+
+ prog->aux->used_map_cnt++;
+ prog->aux->used_maps = used_maps_new;
+
+ kfree(used_maps_old);
+
+out_unlock:
+ mutex_unlock(&prog->aux->used_maps_mutex);
+
+ if (ret)
+ bpf_map_put(map);
+out_prog_put:
+ bpf_prog_put(prog);
+ return ret;
+}
+
SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
{
union bpf_attr attr;
@@ -4266,6 +4488,9 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
case BPF_LINK_DETACH:
err = link_detach(&attr);
break;
+ case BPF_PROG_BIND_MAP:
+ err = bpf_prog_bind_map(&attr);
+ break;
default:
err = -EINVAL;
break;
diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c
index 99af4cea1102..5b6af30bfbcd 100644
--- a/kernel/bpf/task_iter.c
+++ b/kernel/bpf/task_iter.c
@@ -22,7 +22,8 @@ struct bpf_iter_seq_task_info {
};
static struct task_struct *task_seq_get_next(struct pid_namespace *ns,
- u32 *tid)
+ u32 *tid,
+ bool skip_if_dup_files)
{
struct task_struct *task = NULL;
struct pid *pid;
@@ -36,6 +37,12 @@ retry:
if (!task) {
++*tid;
goto retry;
+ } else if (skip_if_dup_files && task->tgid != task->pid &&
+ task->files == task->group_leader->files) {
+ put_task_struct(task);
+ task = NULL;
+ ++*tid;
+ goto retry;
}
}
rcu_read_unlock();
@@ -48,7 +55,7 @@ static void *task_seq_start(struct seq_file *seq, loff_t *pos)
struct bpf_iter_seq_task_info *info = seq->private;
struct task_struct *task;
- task = task_seq_get_next(info->common.ns, &info->tid);
+ task = task_seq_get_next(info->common.ns, &info->tid, false);
if (!task)
return NULL;
@@ -65,7 +72,7 @@ static void *task_seq_next(struct seq_file *seq, void *v, loff_t *pos)
++*pos;
++info->tid;
put_task_struct((struct task_struct *)v);
- task = task_seq_get_next(info->common.ns, &info->tid);
+ task = task_seq_get_next(info->common.ns, &info->tid, false);
if (!task)
return NULL;
@@ -148,7 +155,7 @@ again:
curr_files = *fstruct;
curr_fd = info->fd;
} else {
- curr_task = task_seq_get_next(ns, &curr_tid);
+ curr_task = task_seq_get_next(ns, &curr_tid, true);
if (!curr_task)
return NULL;
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index 9be85aa4ec5f..35c5887d82ff 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -7,6 +7,8 @@
#include <linux/rbtree_latch.h>
#include <linux/perf_event.h>
#include <linux/btf.h>
+#include <linux/rcupdate_trace.h>
+#include <linux/rcupdate_wait.h>
/* dummy _ops. The verifier will operate on target program's ops. */
const struct bpf_verifier_ops bpf_extension_verifier_ops = {
@@ -63,7 +65,7 @@ static void bpf_trampoline_ksym_add(struct bpf_trampoline *tr)
bpf_image_ksym_add(tr->image, ksym);
}
-struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
+static struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
{
struct bpf_trampoline *tr;
struct hlist_head *head;
@@ -210,9 +212,12 @@ static int bpf_trampoline_update(struct bpf_trampoline *tr)
* updates to trampoline would change the code from underneath the
* preempted task. Hence wait for tasks to voluntarily schedule or go
* to userspace.
+ * The same trampoline can hold both sleepable and non-sleepable progs.
+ * synchronize_rcu_tasks_trace() is needed to make sure all sleepable
+ * programs finish executing.
+ * Wait for these two grace periods together.
*/
-
- synchronize_rcu_tasks();
+ synchronize_rcu_mult(call_rcu_tasks, call_rcu_tasks_trace);
err = arch_prepare_bpf_trampoline(new_image, new_image + PAGE_SIZE / 2,
&tr->func.model, flags, tprogs,
@@ -256,14 +261,12 @@ static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(struct bpf_prog *prog)
}
}
-int bpf_trampoline_link_prog(struct bpf_prog *prog)
+int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
{
enum bpf_tramp_prog_type kind;
- struct bpf_trampoline *tr;
int err = 0;
int cnt;
- tr = prog->aux->trampoline;
kind = bpf_attach_type_to_tramp(prog);
mutex_lock(&tr->mutex);
if (tr->extension_prog) {
@@ -296,7 +299,7 @@ int bpf_trampoline_link_prog(struct bpf_prog *prog)
}
hlist_add_head(&prog->aux->tramp_hlist, &tr->progs_hlist[kind]);
tr->progs_cnt[kind]++;
- err = bpf_trampoline_update(prog->aux->trampoline);
+ err = bpf_trampoline_update(tr);
if (err) {
hlist_del(&prog->aux->tramp_hlist);
tr->progs_cnt[kind]--;
@@ -307,13 +310,11 @@ out:
}
/* bpf_trampoline_unlink_prog() should never fail. */
-int bpf_trampoline_unlink_prog(struct bpf_prog *prog)
+int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr)
{
enum bpf_tramp_prog_type kind;
- struct bpf_trampoline *tr;
int err;
- tr = prog->aux->trampoline;
kind = bpf_attach_type_to_tramp(prog);
mutex_lock(&tr->mutex);
if (kind == BPF_TRAMP_REPLACE) {
@@ -325,12 +326,32 @@ int bpf_trampoline_unlink_prog(struct bpf_prog *prog)
}
hlist_del(&prog->aux->tramp_hlist);
tr->progs_cnt[kind]--;
- err = bpf_trampoline_update(prog->aux->trampoline);
+ err = bpf_trampoline_update(tr);
out:
mutex_unlock(&tr->mutex);
return err;
}
+struct bpf_trampoline *bpf_trampoline_get(u64 key,
+ struct bpf_attach_target_info *tgt_info)
+{
+ struct bpf_trampoline *tr;
+
+ tr = bpf_trampoline_lookup(key);
+ if (!tr)
+ return NULL;
+
+ mutex_lock(&tr->mutex);
+ if (tr->func.addr)
+ goto out;
+
+ memcpy(&tr->func.model, &tgt_info->fmodel, sizeof(tgt_info->fmodel));
+ tr->func.addr = (void *)tgt_info->tgt_addr;
+out:
+ mutex_unlock(&tr->mutex);
+ return tr;
+}
+
void bpf_trampoline_put(struct bpf_trampoline *tr)
{
if (!tr)
@@ -344,7 +365,14 @@ void bpf_trampoline_put(struct bpf_trampoline *tr)
if (WARN_ON_ONCE(!hlist_empty(&tr->progs_hlist[BPF_TRAMP_FEXIT])))
goto out;
bpf_image_ksym_del(&tr->ksym);
- /* wait for tasks to get out of trampoline before freeing it */
+ /* This code will be executed when all bpf progs (both sleepable and
+ * non-sleepable) went through
+ * bpf_prog_put()->call_rcu[_tasks_trace]()->bpf_prog_free_deferred().
+ * Hence no need for another synchronize_rcu_tasks_trace() here,
+ * but synchronize_rcu_tasks() is still needed, since trampoline
+ * may not have had any sleepable programs and we need to wait
+ * for tasks to get out of trampoline code before freeing it.
+ */
synchronize_rcu_tasks();
bpf_jit_free_exec(tr->image);
hlist_del(&tr->hlist);
@@ -394,6 +422,17 @@ void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start)
rcu_read_unlock();
}
+void notrace __bpf_prog_enter_sleepable(void)
+{
+ rcu_read_lock_trace();
+ might_fault();
+}
+
+void notrace __bpf_prog_exit_sleepable(void)
+{
+ rcu_read_unlock_trace();
+}
+
int __weak
arch_prepare_bpf_trampoline(void *image, void *image_end,
const struct btf_func_model *m, u32 flags,
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index fba52d9ec8fc..39d7f44e7c92 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -21,6 +21,7 @@
#include <linux/ctype.h>
#include <linux/error-injection.h>
#include <linux/bpf_lsm.h>
+#include <linux/btf_ids.h>
#include "disasm.h"
@@ -238,6 +239,7 @@ struct bpf_call_arg_meta {
int ref_obj_id;
int func_id;
u32 btf_id;
+ u32 ret_btf_id;
};
struct btf *btf_vmlinux;
@@ -435,6 +437,15 @@ static bool arg_type_may_be_refcounted(enum bpf_arg_type type)
return type == ARG_PTR_TO_SOCK_COMMON;
}
+static bool arg_type_may_be_null(enum bpf_arg_type type)
+{
+ return type == ARG_PTR_TO_MAP_VALUE_OR_NULL ||
+ type == ARG_PTR_TO_MEM_OR_NULL ||
+ type == ARG_PTR_TO_CTX_OR_NULL ||
+ type == ARG_PTR_TO_SOCKET_OR_NULL ||
+ type == ARG_PTR_TO_ALLOC_MEM_OR_NULL;
+}
+
/* Determine whether the function releases some resources allocated by another
* function call. The first reference type argument will be assumed to be
* released by release_reference().
@@ -477,7 +488,12 @@ static bool is_acquire_function(enum bpf_func_id func_id,
static bool is_ptr_cast_function(enum bpf_func_id func_id)
{
return func_id == BPF_FUNC_tcp_sock ||
- func_id == BPF_FUNC_sk_fullsock;
+ func_id == BPF_FUNC_sk_fullsock ||
+ func_id == BPF_FUNC_skc_to_tcp_sock ||
+ func_id == BPF_FUNC_skc_to_tcp6_sock ||
+ func_id == BPF_FUNC_skc_to_udp6_sock ||
+ func_id == BPF_FUNC_skc_to_tcp_timewait_sock ||
+ func_id == BPF_FUNC_skc_to_tcp_request_sock;
}
/* string representation of 'enum bpf_reg_type' */
@@ -503,6 +519,7 @@ static const char * const reg_type_str[] = {
[PTR_TO_XDP_SOCK] = "xdp_sock",
[PTR_TO_BTF_ID] = "ptr_",
[PTR_TO_BTF_ID_OR_NULL] = "ptr_or_null_",
+ [PTR_TO_PERCPU_BTF_ID] = "percpu_ptr_",
[PTR_TO_MEM] = "mem",
[PTR_TO_MEM_OR_NULL] = "mem_or_null",
[PTR_TO_RDONLY_BUF] = "rdonly_buf",
@@ -569,7 +586,9 @@ static void print_verifier_state(struct bpf_verifier_env *env,
/* reg->off should be 0 for SCALAR_VALUE */
verbose(env, "%lld", reg->var_off.value + reg->off);
} else {
- if (t == PTR_TO_BTF_ID || t == PTR_TO_BTF_ID_OR_NULL)
+ if (t == PTR_TO_BTF_ID ||
+ t == PTR_TO_BTF_ID_OR_NULL ||
+ t == PTR_TO_PERCPU_BTF_ID)
verbose(env, "%s", kernel_type_name(reg->btf_id));
verbose(env, "(id=%d", reg->id);
if (reg_type_may_be_refcounted_or_null(t))
@@ -991,14 +1010,9 @@ static const int caller_saved[CALLER_SAVED_REGS] = {
static void __mark_reg_not_init(const struct bpf_verifier_env *env,
struct bpf_reg_state *reg);
-/* Mark the unknown part of a register (variable offset or scalar value) as
- * known to have the value @imm.
- */
-static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
+/* This helper doesn't clear reg->id */
+static void ___mark_reg_known(struct bpf_reg_state *reg, u64 imm)
{
- /* Clear id, off, and union(map_ptr, range) */
- memset(((u8 *)reg) + sizeof(reg->type), 0,
- offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
reg->var_off = tnum_const(imm);
reg->smin_value = (s64)imm;
reg->smax_value = (s64)imm;
@@ -1011,6 +1025,17 @@ static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
reg->u32_max_value = (u32)imm;
}
+/* Mark the unknown part of a register (variable offset or scalar value) as
+ * known to have the value @imm.
+ */
+static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm)
+{
+ /* Clear id, off, and union(map_ptr, range) */
+ memset(((u8 *)reg) + sizeof(reg->type), 0,
+ offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type));
+ ___mark_reg_known(reg, imm);
+}
+
static void __mark_reg32_known(struct bpf_reg_state *reg, u64 imm)
{
reg->var_off = tnum_const_subreg(reg->var_off, imm);
@@ -1489,6 +1514,13 @@ static int check_subprogs(struct bpf_verifier_env *env)
for (i = 0; i < insn_cnt; i++) {
u8 code = insn[i].code;
+ if (code == (BPF_JMP | BPF_CALL) &&
+ insn[i].imm == BPF_FUNC_tail_call &&
+ insn[i].src_reg != BPF_PSEUDO_CALL)
+ subprog[cur_subprog].has_tail_call = true;
+ if (BPF_CLASS(code) == BPF_LD &&
+ (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND))
+ subprog[cur_subprog].has_ld_abs = true;
if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32)
goto next;
if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL)
@@ -2183,6 +2215,7 @@ static bool is_spillable_regtype(enum bpf_reg_type type)
case PTR_TO_RDONLY_BUF_OR_NULL:
case PTR_TO_RDWR_BUF:
case PTR_TO_RDWR_BUF_OR_NULL:
+ case PTR_TO_PERCPU_BTF_ID:
return true;
default:
return false;
@@ -2200,6 +2233,20 @@ static bool register_is_const(struct bpf_reg_state *reg)
return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off);
}
+static bool __is_scalar_unbounded(struct bpf_reg_state *reg)
+{
+ return tnum_is_unknown(reg->var_off) &&
+ reg->smin_value == S64_MIN && reg->smax_value == S64_MAX &&
+ reg->umin_value == 0 && reg->umax_value == U64_MAX &&
+ reg->s32_min_value == S32_MIN && reg->s32_max_value == S32_MAX &&
+ reg->u32_min_value == 0 && reg->u32_max_value == U32_MAX;
+}
+
+static bool register_is_bounded(struct bpf_reg_state *reg)
+{
+ return reg->type == SCALAR_VALUE && !__is_scalar_unbounded(reg);
+}
+
static bool __is_pointer_value(bool allow_ptr_leaks,
const struct bpf_reg_state *reg)
{
@@ -2251,7 +2298,7 @@ static int check_stack_write(struct bpf_verifier_env *env,
if (value_regno >= 0)
reg = &cur->regs[value_regno];
- if (reg && size == BPF_REG_SIZE && register_is_const(reg) &&
+ if (reg && size == BPF_REG_SIZE && register_is_bounded(reg) &&
!register_is_null(reg) && env->bpf_capable) {
if (dst_reg != BPF_REG_FP) {
/* The backtracking logic can only recognize explicit
@@ -2625,11 +2672,18 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno,
#define MAX_PACKET_OFF 0xffff
+static enum bpf_prog_type resolve_prog_type(struct bpf_prog *prog)
+{
+ return prog->aux->dst_prog ? prog->aux->dst_prog->type : prog->type;
+}
+
static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
const struct bpf_call_arg_meta *meta,
enum bpf_access_type t)
{
- switch (env->prog->type) {
+ enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
+
+ switch (prog_type) {
/* Program types only with direct read access go here! */
case BPF_PROG_TYPE_LWT_IN:
case BPF_PROG_TYPE_LWT_OUT:
@@ -2639,7 +2693,7 @@ static bool may_access_direct_pkt_data(struct bpf_verifier_env *env,
case BPF_PROG_TYPE_CGROUP_SKB:
if (t == BPF_WRITE)
return false;
- /* fallthrough */
+ fallthrough;
/* Program types with direct read + write access go here! */
case BPF_PROG_TYPE_SCHED_CLS:
@@ -2970,10 +3024,37 @@ static int check_max_stack_depth(struct bpf_verifier_env *env)
int depth = 0, frame = 0, idx = 0, i = 0, subprog_end;
struct bpf_subprog_info *subprog = env->subprog_info;
struct bpf_insn *insn = env->prog->insnsi;
+ bool tail_call_reachable = false;
int ret_insn[MAX_CALL_FRAMES];
int ret_prog[MAX_CALL_FRAMES];
+ int j;
process_func:
+ /* protect against potential stack overflow that might happen when
+ * bpf2bpf calls get combined with tailcalls. Limit the caller's stack
+ * depth for such case down to 256 so that the worst case scenario
+ * would result in 8k stack size (32 which is tailcall limit * 256 =
+ * 8k).
+ *
+ * To get the idea what might happen, see an example:
+ * func1 -> sub rsp, 128
+ * subfunc1 -> sub rsp, 256
+ * tailcall1 -> add rsp, 256
+ * func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320)
+ * subfunc2 -> sub rsp, 64
+ * subfunc22 -> sub rsp, 128
+ * tailcall2 -> add rsp, 128
+ * func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416)
+ *
+ * tailcall will unwind the current stack frame but it will not get rid
+ * of caller's stack as shown on the example above.
+ */
+ if (idx && subprog[idx].has_tail_call && depth >= 256) {
+ verbose(env,
+ "tail_calls are not allowed when call stack of previous frames is %d bytes. Too large\n",
+ depth);
+ return -EACCES;
+ }
/* round up to 32-bytes, since this is granularity
* of interpreter stack size
*/
@@ -3002,6 +3083,10 @@ continue_func:
i);
return -EFAULT;
}
+
+ if (subprog[idx].has_tail_call)
+ tail_call_reachable = true;
+
frame++;
if (frame >= MAX_CALL_FRAMES) {
verbose(env, "the call stack of %d frames is too deep !\n",
@@ -3010,6 +3095,15 @@ continue_func:
}
goto process_func;
}
+ /* if tail call got detected across bpf2bpf calls then mark each of the
+ * currently present subprog frames as tail call reachable subprogs;
+ * this info will be utilized by JIT so that we will be preserving the
+ * tail call counter throughout bpf2bpf calls combined with tailcalls
+ */
+ if (tail_call_reachable)
+ for (j = 0; j < frame; j++)
+ subprog[ret_prog[j]].tail_call_reachable = true;
+
/* end of for() loop means the last insn of the 'subprog'
* was reached. Doesn't matter whether it was JA or EXIT
*/
@@ -3585,18 +3679,6 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
struct bpf_func_state *state = func(env, reg);
int err, min_off, max_off, i, j, slot, spi;
- if (reg->type != PTR_TO_STACK) {
- /* Allow zero-byte read from NULL, regardless of pointer type */
- if (zero_size_allowed && access_size == 0 &&
- register_is_null(reg))
- return 0;
-
- verbose(env, "R%d type=%s expected=%s\n", regno,
- reg_type_str[reg->type],
- reg_type_str[PTR_TO_STACK]);
- return -EACCES;
- }
-
if (tnum_is_const(reg->var_off)) {
min_off = max_off = reg->var_off.value + reg->off;
err = __check_stack_boundary(env, regno, min_off, access_size,
@@ -3741,9 +3823,19 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
access_size, zero_size_allowed,
"rdwr",
&env->prog->aux->max_rdwr_access);
- default: /* scalar_value|ptr_to_stack or invalid ptr */
+ case PTR_TO_STACK:
return check_stack_boundary(env, regno, access_size,
zero_size_allowed, meta);
+ default: /* scalar_value or invalid ptr */
+ /* Allow zero-byte read from NULL, regardless of pointer type */
+ if (zero_size_allowed && access_size == 0 &&
+ register_is_null(reg))
+ return 0;
+
+ verbose(env, "R%d type=%s expected=%s\n", regno,
+ reg_type_str[reg->type],
+ reg_type_str[PTR_TO_STACK]);
+ return -EACCES;
}
}
@@ -3775,10 +3867,6 @@ static int process_spin_lock(struct bpf_verifier_env *env, int regno,
struct bpf_map *map = reg->map_ptr;
u64 val = reg->var_off.value;
- if (reg->type != PTR_TO_MAP_VALUE) {
- verbose(env, "R%d is not a pointer to map_value\n", regno);
- return -EINVAL;
- }
if (!is_const) {
verbose(env,
"R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n",
@@ -3845,12 +3933,6 @@ static bool arg_type_is_mem_size(enum bpf_arg_type type)
type == ARG_CONST_SIZE_OR_ZERO;
}
-static bool arg_type_is_alloc_mem_ptr(enum bpf_arg_type type)
-{
- return type == ARG_PTR_TO_ALLOC_MEM ||
- type == ARG_PTR_TO_ALLOC_MEM_OR_NULL;
-}
-
static bool arg_type_is_alloc_size(enum bpf_arg_type type)
{
return type == ARG_CONST_ALLOC_SIZE_OR_ZERO;
@@ -3872,14 +3954,194 @@ static int int_ptr_type_to_size(enum bpf_arg_type type)
return -EINVAL;
}
+static int resolve_map_arg_type(struct bpf_verifier_env *env,
+ const struct bpf_call_arg_meta *meta,
+ enum bpf_arg_type *arg_type)
+{
+ if (!meta->map_ptr) {
+ /* kernel subsystem misconfigured verifier */
+ verbose(env, "invalid map_ptr to access map->type\n");
+ return -EACCES;
+ }
+
+ switch (meta->map_ptr->map_type) {
+ case BPF_MAP_TYPE_SOCKMAP:
+ case BPF_MAP_TYPE_SOCKHASH:
+ if (*arg_type == ARG_PTR_TO_MAP_VALUE) {
+ *arg_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON;
+ } else {
+ verbose(env, "invalid arg_type for sockmap/sockhash\n");
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ break;
+ }
+ return 0;
+}
+
+struct bpf_reg_types {
+ const enum bpf_reg_type types[10];
+ u32 *btf_id;
+};
+
+static const struct bpf_reg_types map_key_value_types = {
+ .types = {
+ PTR_TO_STACK,
+ PTR_TO_PACKET,
+ PTR_TO_PACKET_META,
+ PTR_TO_MAP_VALUE,
+ },
+};
+
+static const struct bpf_reg_types sock_types = {
+ .types = {
+ PTR_TO_SOCK_COMMON,
+ PTR_TO_SOCKET,
+ PTR_TO_TCP_SOCK,
+ PTR_TO_XDP_SOCK,
+ },
+};
+
+#ifdef CONFIG_NET
+static const struct bpf_reg_types btf_id_sock_common_types = {
+ .types = {
+ PTR_TO_SOCK_COMMON,
+ PTR_TO_SOCKET,
+ PTR_TO_TCP_SOCK,
+ PTR_TO_XDP_SOCK,
+ PTR_TO_BTF_ID,
+ },
+ .btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
+};
+#endif
+
+static const struct bpf_reg_types mem_types = {
+ .types = {
+ PTR_TO_STACK,
+ PTR_TO_PACKET,
+ PTR_TO_PACKET_META,
+ PTR_TO_MAP_VALUE,
+ PTR_TO_MEM,
+ PTR_TO_RDONLY_BUF,
+ PTR_TO_RDWR_BUF,
+ },
+};
+
+static const struct bpf_reg_types int_ptr_types = {
+ .types = {
+ PTR_TO_STACK,
+ PTR_TO_PACKET,
+ PTR_TO_PACKET_META,
+ PTR_TO_MAP_VALUE,
+ },
+};
+
+static const struct bpf_reg_types fullsock_types = { .types = { PTR_TO_SOCKET } };
+static const struct bpf_reg_types scalar_types = { .types = { SCALAR_VALUE } };
+static const struct bpf_reg_types context_types = { .types = { PTR_TO_CTX } };
+static const struct bpf_reg_types alloc_mem_types = { .types = { PTR_TO_MEM } };
+static const struct bpf_reg_types const_map_ptr_types = { .types = { CONST_PTR_TO_MAP } };
+static const struct bpf_reg_types btf_ptr_types = { .types = { PTR_TO_BTF_ID } };
+static const struct bpf_reg_types spin_lock_types = { .types = { PTR_TO_MAP_VALUE } };
+static const struct bpf_reg_types percpu_btf_ptr_types = { .types = { PTR_TO_PERCPU_BTF_ID } };
+
+static const struct bpf_reg_types *compatible_reg_types[__BPF_ARG_TYPE_MAX] = {
+ [ARG_PTR_TO_MAP_KEY] = &map_key_value_types,
+ [ARG_PTR_TO_MAP_VALUE] = &map_key_value_types,
+ [ARG_PTR_TO_UNINIT_MAP_VALUE] = &map_key_value_types,
+ [ARG_PTR_TO_MAP_VALUE_OR_NULL] = &map_key_value_types,
+ [ARG_CONST_SIZE] = &scalar_types,
+ [ARG_CONST_SIZE_OR_ZERO] = &scalar_types,
+ [ARG_CONST_ALLOC_SIZE_OR_ZERO] = &scalar_types,
+ [ARG_CONST_MAP_PTR] = &const_map_ptr_types,
+ [ARG_PTR_TO_CTX] = &context_types,
+ [ARG_PTR_TO_CTX_OR_NULL] = &context_types,
+ [ARG_PTR_TO_SOCK_COMMON] = &sock_types,
+#ifdef CONFIG_NET
+ [ARG_PTR_TO_BTF_ID_SOCK_COMMON] = &btf_id_sock_common_types,
+#endif
+ [ARG_PTR_TO_SOCKET] = &fullsock_types,
+ [ARG_PTR_TO_SOCKET_OR_NULL] = &fullsock_types,
+ [ARG_PTR_TO_BTF_ID] = &btf_ptr_types,
+ [ARG_PTR_TO_SPIN_LOCK] = &spin_lock_types,
+ [ARG_PTR_TO_MEM] = &mem_types,
+ [ARG_PTR_TO_MEM_OR_NULL] = &mem_types,
+ [ARG_PTR_TO_UNINIT_MEM] = &mem_types,
+ [ARG_PTR_TO_ALLOC_MEM] = &alloc_mem_types,
+ [ARG_PTR_TO_ALLOC_MEM_OR_NULL] = &alloc_mem_types,
+ [ARG_PTR_TO_INT] = &int_ptr_types,
+ [ARG_PTR_TO_LONG] = &int_ptr_types,
+ [ARG_PTR_TO_PERCPU_BTF_ID] = &percpu_btf_ptr_types,
+};
+
+static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
+ enum bpf_arg_type arg_type,
+ const u32 *arg_btf_id)
+{
+ struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
+ enum bpf_reg_type expected, type = reg->type;
+ const struct bpf_reg_types *compatible;
+ int i, j;
+
+ compatible = compatible_reg_types[arg_type];
+ if (!compatible) {
+ verbose(env, "verifier internal error: unsupported arg type %d\n", arg_type);
+ return -EFAULT;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(compatible->types); i++) {
+ expected = compatible->types[i];
+ if (expected == NOT_INIT)
+ break;
+
+ if (type == expected)
+ goto found;
+ }
+
+ verbose(env, "R%d type=%s expected=", regno, reg_type_str[type]);
+ for (j = 0; j + 1 < i; j++)
+ verbose(env, "%s, ", reg_type_str[compatible->types[j]]);
+ verbose(env, "%s\n", reg_type_str[compatible->types[j]]);
+ return -EACCES;
+
+found:
+ if (type == PTR_TO_BTF_ID) {
+ if (!arg_btf_id) {
+ if (!compatible->btf_id) {
+ verbose(env, "verifier internal error: missing arg compatible BTF ID\n");
+ return -EFAULT;
+ }
+ arg_btf_id = compatible->btf_id;
+ }
+
+ if (!btf_struct_ids_match(&env->log, reg->off, reg->btf_id,
+ *arg_btf_id)) {
+ verbose(env, "R%d is of type %s but %s is expected\n",
+ regno, kernel_type_name(reg->btf_id),
+ kernel_type_name(*arg_btf_id));
+ return -EACCES;
+ }
+
+ if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
+ verbose(env, "R%d is a pointer to in-kernel struct with non-zero offset\n",
+ regno);
+ return -EACCES;
+ }
+ }
+
+ return 0;
+}
+
static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
struct bpf_call_arg_meta *meta,
const struct bpf_func_proto *fn)
{
u32 regno = BPF_REG_1 + arg;
struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
- enum bpf_reg_type expected_type, type = reg->type;
enum bpf_arg_type arg_type = fn->arg_type[arg];
+ enum bpf_reg_type type = reg->type;
int err = 0;
if (arg_type == ARG_DONTCARE)
@@ -3904,120 +4166,32 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
return -EACCES;
}
- if (arg_type == ARG_PTR_TO_MAP_KEY ||
- arg_type == ARG_PTR_TO_MAP_VALUE ||
+ if (arg_type == ARG_PTR_TO_MAP_VALUE ||
arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE ||
arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL) {
- expected_type = PTR_TO_STACK;
- if (register_is_null(reg) &&
- arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL)
- /* final test in check_stack_boundary() */;
- else if (!type_is_pkt_pointer(type) &&
- type != PTR_TO_MAP_VALUE &&
- type != expected_type)
- goto err_type;
- } else if (arg_type == ARG_CONST_SIZE ||
- arg_type == ARG_CONST_SIZE_OR_ZERO ||
- arg_type == ARG_CONST_ALLOC_SIZE_OR_ZERO) {
- expected_type = SCALAR_VALUE;
- if (type != expected_type)
- goto err_type;
- } else if (arg_type == ARG_CONST_MAP_PTR) {
- expected_type = CONST_PTR_TO_MAP;
- if (type != expected_type)
- goto err_type;
- } else if (arg_type == ARG_PTR_TO_CTX ||
- arg_type == ARG_PTR_TO_CTX_OR_NULL) {
- expected_type = PTR_TO_CTX;
- if (!(register_is_null(reg) &&
- arg_type == ARG_PTR_TO_CTX_OR_NULL)) {
- if (type != expected_type)
- goto err_type;
- err = check_ctx_reg(env, reg, regno);
- if (err < 0)
- return err;
- }
- } else if (arg_type == ARG_PTR_TO_SOCK_COMMON) {
- expected_type = PTR_TO_SOCK_COMMON;
- /* Any sk pointer can be ARG_PTR_TO_SOCK_COMMON */
- if (!type_is_sk_pointer(type))
- goto err_type;
- if (reg->ref_obj_id) {
- if (meta->ref_obj_id) {
- verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
- regno, reg->ref_obj_id,
- meta->ref_obj_id);
- return -EFAULT;
- }
- meta->ref_obj_id = reg->ref_obj_id;
- }
- } else if (arg_type == ARG_PTR_TO_SOCKET ||
- arg_type == ARG_PTR_TO_SOCKET_OR_NULL) {
- expected_type = PTR_TO_SOCKET;
- if (!(register_is_null(reg) &&
- arg_type == ARG_PTR_TO_SOCKET_OR_NULL)) {
- if (type != expected_type)
- goto err_type;
- }
- } else if (arg_type == ARG_PTR_TO_BTF_ID) {
- expected_type = PTR_TO_BTF_ID;
- if (type != expected_type)
- goto err_type;
- if (!fn->check_btf_id) {
- if (reg->btf_id != meta->btf_id) {
- verbose(env, "Helper has type %s got %s in R%d\n",
- kernel_type_name(meta->btf_id),
- kernel_type_name(reg->btf_id), regno);
-
- return -EACCES;
- }
- } else if (!fn->check_btf_id(reg->btf_id, arg)) {
- verbose(env, "Helper does not support %s in R%d\n",
- kernel_type_name(reg->btf_id), regno);
+ err = resolve_map_arg_type(env, meta, &arg_type);
+ if (err)
+ return err;
+ }
- return -EACCES;
- }
- if (!tnum_is_const(reg->var_off) || reg->var_off.value || reg->off) {
- verbose(env, "R%d is a pointer to in-kernel struct with non-zero offset\n",
- regno);
- return -EACCES;
- }
- } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {
- if (meta->func_id == BPF_FUNC_spin_lock) {
- if (process_spin_lock(env, regno, true))
- return -EACCES;
- } else if (meta->func_id == BPF_FUNC_spin_unlock) {
- if (process_spin_lock(env, regno, false))
- return -EACCES;
- } else {
- verbose(env, "verifier internal error\n");
- return -EFAULT;
- }
- } else if (arg_type_is_mem_ptr(arg_type)) {
- expected_type = PTR_TO_STACK;
- /* One exception here. In case function allows for NULL to be
- * passed in as argument, it's a SCALAR_VALUE type. Final test
- * happens during stack boundary checking.
+ if (register_is_null(reg) && arg_type_may_be_null(arg_type))
+ /* A NULL register has a SCALAR_VALUE type, so skip
+ * type checking.
*/
- if (register_is_null(reg) &&
- (arg_type == ARG_PTR_TO_MEM_OR_NULL ||
- arg_type == ARG_PTR_TO_ALLOC_MEM_OR_NULL))
- /* final test in check_stack_boundary() */;
- else if (!type_is_pkt_pointer(type) &&
- type != PTR_TO_MAP_VALUE &&
- type != PTR_TO_MEM &&
- type != PTR_TO_RDONLY_BUF &&
- type != PTR_TO_RDWR_BUF &&
- type != expected_type)
- goto err_type;
- meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM;
- } else if (arg_type_is_alloc_mem_ptr(arg_type)) {
- expected_type = PTR_TO_MEM;
- if (register_is_null(reg) &&
- arg_type == ARG_PTR_TO_ALLOC_MEM_OR_NULL)
- /* final test in check_stack_boundary() */;
- else if (type != expected_type)
- goto err_type;
+ goto skip_type_check;
+
+ err = check_reg_type(env, regno, arg_type, fn->arg_btf_id[arg]);
+ if (err)
+ return err;
+
+ if (type == PTR_TO_CTX) {
+ err = check_ctx_reg(env, reg, regno);
+ if (err < 0)
+ return err;
+ }
+
+skip_type_check:
+ if (reg->ref_obj_id) {
if (meta->ref_obj_id) {
verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
regno, reg->ref_obj_id,
@@ -4025,15 +4199,6 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
return -EFAULT;
}
meta->ref_obj_id = reg->ref_obj_id;
- } else if (arg_type_is_int_ptr(arg_type)) {
- expected_type = PTR_TO_STACK;
- if (!type_is_pkt_pointer(type) &&
- type != PTR_TO_MAP_VALUE &&
- type != expected_type)
- goto err_type;
- } else {
- verbose(env, "unsupported arg_type %d\n", arg_type);
- return -EFAULT;
}
if (arg_type == ARG_CONST_MAP_PTR) {
@@ -4072,6 +4237,28 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
err = check_helper_mem_access(env, regno,
meta->map_ptr->value_size, false,
meta);
+ } else if (arg_type == ARG_PTR_TO_PERCPU_BTF_ID) {
+ if (!reg->btf_id) {
+ verbose(env, "Helper has invalid btf_id in R%d\n", regno);
+ return -EACCES;
+ }
+ meta->ret_btf_id = reg->btf_id;
+ } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {
+ if (meta->func_id == BPF_FUNC_spin_lock) {
+ if (process_spin_lock(env, regno, true))
+ return -EACCES;
+ } else if (meta->func_id == BPF_FUNC_spin_unlock) {
+ if (process_spin_lock(env, regno, false))
+ return -EACCES;
+ } else {
+ verbose(env, "verifier internal error\n");
+ return -EFAULT;
+ }
+ } else if (arg_type_is_mem_ptr(arg_type)) {
+ /* The access to this pointer is only checked when we hit the
+ * next is_mem_size argument below.
+ */
+ meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MEM);
} else if (arg_type_is_mem_size(arg_type)) {
bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO);
@@ -4137,10 +4324,43 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
}
return err;
-err_type:
- verbose(env, "R%d type=%s expected=%s\n", regno,
- reg_type_str[type], reg_type_str[expected_type]);
- return -EACCES;
+}
+
+static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
+{
+ enum bpf_attach_type eatype = env->prog->expected_attach_type;
+ enum bpf_prog_type type = resolve_prog_type(env->prog);
+
+ if (func_id != BPF_FUNC_map_update_elem)
+ return false;
+
+ /* It's not possible to get access to a locked struct sock in these
+ * contexts, so updating is safe.
+ */
+ switch (type) {
+ case BPF_PROG_TYPE_TRACING:
+ if (eatype == BPF_TRACE_ITER)
+ return true;
+ break;
+ case BPF_PROG_TYPE_SOCKET_FILTER:
+ case BPF_PROG_TYPE_SCHED_CLS:
+ case BPF_PROG_TYPE_SCHED_ACT:
+ case BPF_PROG_TYPE_XDP:
+ case BPF_PROG_TYPE_SK_REUSEPORT:
+ case BPF_PROG_TYPE_FLOW_DISSECTOR:
+ case BPF_PROG_TYPE_SK_LOOKUP:
+ return true;
+ default:
+ break;
+ }
+
+ verbose(env, "cannot update sockmap in this context\n");
+ return false;
+}
+
+static bool allow_tail_call_in_subprogs(struct bpf_verifier_env *env)
+{
+ return env->prog->jit_requested && IS_ENABLED(CONFIG_X86_64);
}
static int check_map_func_compatibility(struct bpf_verifier_env *env,
@@ -4214,7 +4434,8 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
func_id != BPF_FUNC_map_delete_elem &&
func_id != BPF_FUNC_msg_redirect_map &&
func_id != BPF_FUNC_sk_select_reuseport &&
- func_id != BPF_FUNC_map_lookup_elem)
+ func_id != BPF_FUNC_map_lookup_elem &&
+ !may_update_sockmap(env, func_id))
goto error;
break;
case BPF_MAP_TYPE_SOCKHASH:
@@ -4223,7 +4444,8 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
func_id != BPF_FUNC_map_delete_elem &&
func_id != BPF_FUNC_msg_redirect_hash &&
func_id != BPF_FUNC_sk_select_reuseport &&
- func_id != BPF_FUNC_map_lookup_elem)
+ func_id != BPF_FUNC_map_lookup_elem &&
+ !may_update_sockmap(env, func_id))
goto error;
break;
case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
@@ -4242,6 +4464,11 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
func_id != BPF_FUNC_sk_storage_delete)
goto error;
break;
+ case BPF_MAP_TYPE_INODE_STORAGE:
+ if (func_id != BPF_FUNC_inode_storage_get &&
+ func_id != BPF_FUNC_inode_storage_delete)
+ goto error;
+ break;
default:
break;
}
@@ -4251,8 +4478,8 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
case BPF_FUNC_tail_call:
if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
goto error;
- if (env->subprog_cnt > 1) {
- verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n");
+ if (env->subprog_cnt > 1 && !allow_tail_call_in_subprogs(env)) {
+ verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
return -EINVAL;
}
break;
@@ -4315,6 +4542,11 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
goto error;
break;
+ case BPF_FUNC_inode_storage_get:
+ case BPF_FUNC_inode_storage_delete:
+ if (map->map_type != BPF_MAP_TYPE_INODE_STORAGE)
+ goto error;
+ break;
default:
break;
}
@@ -4402,10 +4634,26 @@ static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id)
return count <= 1;
}
+static bool check_btf_id_ok(const struct bpf_func_proto *fn)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fn->arg_type); i++) {
+ if (fn->arg_type[i] == ARG_PTR_TO_BTF_ID && !fn->arg_btf_id[i])
+ return false;
+
+ if (fn->arg_type[i] != ARG_PTR_TO_BTF_ID && fn->arg_btf_id[i])
+ return false;
+ }
+
+ return true;
+}
+
static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
{
return check_raw_mode_ok(fn) &&
check_arg_pair_ok(fn) &&
+ check_btf_id_ok(fn) &&
check_refcount_ok(fn, func_id) ? 0 : -EINVAL;
}
@@ -4775,6 +5023,11 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
return -EINVAL;
}
+ if (fn->allowed && !fn->allowed(env->prog)) {
+ verbose(env, "helper call is not allowed in probe\n");
+ return -EINVAL;
+ }
+
/* With LD_ABS/IND some JITs save/restore skb from r1. */
changes_data = bpf_helper_changes_pkt_data(fn->func);
if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
@@ -4796,11 +5049,6 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
meta.func_id = func_id;
/* check args */
for (i = 0; i < 5; i++) {
- if (!fn->check_btf_id) {
- err = btf_resolve_helper_id(&env->log, fn, i);
- if (err > 0)
- meta.btf_id = err;
- }
err = check_func_arg(env, i, &meta, fn);
if (err)
return err;
@@ -4904,6 +5152,35 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
regs[BPF_REG_0].type = PTR_TO_MEM_OR_NULL;
regs[BPF_REG_0].id = ++env->id_gen;
regs[BPF_REG_0].mem_size = meta.mem_size;
+ } else if (fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID_OR_NULL ||
+ fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID) {
+ const struct btf_type *t;
+
+ mark_reg_known_zero(env, regs, BPF_REG_0);
+ t = btf_type_skip_modifiers(btf_vmlinux, meta.ret_btf_id, NULL);
+ if (!btf_type_is_struct(t)) {
+ u32 tsize;
+ const struct btf_type *ret;
+ const char *tname;
+
+ /* resolve the type size of ksym. */
+ ret = btf_resolve_size(btf_vmlinux, t, &tsize);
+ if (IS_ERR(ret)) {
+ tname = btf_name_by_offset(btf_vmlinux, t->name_off);
+ verbose(env, "unable to resolve the size of type '%s': %ld\n",
+ tname, PTR_ERR(ret));
+ return -EINVAL;
+ }
+ regs[BPF_REG_0].type =
+ fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID ?
+ PTR_TO_MEM : PTR_TO_MEM_OR_NULL;
+ regs[BPF_REG_0].mem_size = tsize;
+ } else {
+ regs[BPF_REG_0].type =
+ fn->ret_type == RET_PTR_TO_MEM_OR_BTF_ID ?
+ PTR_TO_BTF_ID : PTR_TO_BTF_ID_OR_NULL;
+ regs[BPF_REG_0].btf_id = meta.ret_btf_id;
+ }
} else if (fn->ret_type == RET_PTR_TO_BTF_ID_OR_NULL) {
int ret_btf_id;
@@ -5219,6 +5496,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
dst, reg_type_str[ptr_reg->type]);
return -EACCES;
case CONST_PTR_TO_MAP:
+ /* smin_val represents the known value */
+ if (known && smin_val == 0 && opcode == BPF_ADD)
+ break;
+ fallthrough;
case PTR_TO_PACKET_END:
case PTR_TO_SOCKET:
case PTR_TO_SOCKET_OR_NULL:
@@ -5634,8 +5915,7 @@ static void scalar_min_max_and(struct bpf_reg_state *dst_reg,
u64 umax_val = src_reg->umax_value;
if (src_known && dst_known) {
- __mark_reg_known(dst_reg, dst_reg->var_off.value &
- src_reg->var_off.value);
+ __mark_reg_known(dst_reg, dst_reg->var_off.value);
return;
}
@@ -5705,8 +5985,7 @@ static void scalar_min_max_or(struct bpf_reg_state *dst_reg,
u64 umin_val = src_reg->umin_value;
if (src_known && dst_known) {
- __mark_reg_known(dst_reg, dst_reg->var_off.value |
- src_reg->var_off.value);
+ __mark_reg_known(dst_reg, dst_reg->var_off.value);
return;
}
@@ -5732,6 +6011,67 @@ static void scalar_min_max_or(struct bpf_reg_state *dst_reg,
__update_reg_bounds(dst_reg);
}
+static void scalar32_min_max_xor(struct bpf_reg_state *dst_reg,
+ struct bpf_reg_state *src_reg)
+{
+ bool src_known = tnum_subreg_is_const(src_reg->var_off);
+ bool dst_known = tnum_subreg_is_const(dst_reg->var_off);
+ struct tnum var32_off = tnum_subreg(dst_reg->var_off);
+ s32 smin_val = src_reg->s32_min_value;
+
+ /* Assuming scalar64_min_max_xor will be called so it is safe
+ * to skip updating register for known case.
+ */
+ if (src_known && dst_known)
+ return;
+
+ /* We get both minimum and maximum from the var32_off. */
+ dst_reg->u32_min_value = var32_off.value;
+ dst_reg->u32_max_value = var32_off.value | var32_off.mask;
+
+ if (dst_reg->s32_min_value >= 0 && smin_val >= 0) {
+ /* XORing two positive sign numbers gives a positive,
+ * so safe to cast u32 result into s32.
+ */
+ dst_reg->s32_min_value = dst_reg->u32_min_value;
+ dst_reg->s32_max_value = dst_reg->u32_max_value;
+ } else {
+ dst_reg->s32_min_value = S32_MIN;
+ dst_reg->s32_max_value = S32_MAX;
+ }
+}
+
+static void scalar_min_max_xor(struct bpf_reg_state *dst_reg,
+ struct bpf_reg_state *src_reg)
+{
+ bool src_known = tnum_is_const(src_reg->var_off);
+ bool dst_known = tnum_is_const(dst_reg->var_off);
+ s64 smin_val = src_reg->smin_value;
+
+ if (src_known && dst_known) {
+ /* dst_reg->var_off.value has been updated earlier */
+ __mark_reg_known(dst_reg, dst_reg->var_off.value);
+ return;
+ }
+
+ /* We get both minimum and maximum from the var_off. */
+ dst_reg->umin_value = dst_reg->var_off.value;
+ dst_reg->umax_value = dst_reg->var_off.value | dst_reg->var_off.mask;
+
+ if (dst_reg->smin_value >= 0 && smin_val >= 0) {
+ /* XORing two positive sign numbers gives a positive,
+ * so safe to cast u64 result into s64.
+ */
+ dst_reg->smin_value = dst_reg->umin_value;
+ dst_reg->smax_value = dst_reg->umax_value;
+ } else {
+ dst_reg->smin_value = S64_MIN;
+ dst_reg->smax_value = S64_MAX;
+ }
+
+ __update_reg_bounds(dst_reg);
+}
+
static void __scalar32_min_max_lsh(struct bpf_reg_state *dst_reg,
u64 umin_val, u64 umax_val)
{
@@ -6040,6 +6380,11 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
scalar32_min_max_or(dst_reg, &src_reg);
scalar_min_max_or(dst_reg, &src_reg);
break;
+ case BPF_XOR:
+ dst_reg->var_off = tnum_xor(dst_reg->var_off, src_reg.var_off);
+ scalar32_min_max_xor(dst_reg, &src_reg);
+ scalar_min_max_xor(dst_reg, &src_reg);
+ break;
case BPF_LSH:
if (umax_val >= insn_bitness) {
/* Shifts greater than 31 or 63 are undefined.
@@ -6111,6 +6456,11 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
src_reg = NULL;
if (dst_reg->type != SCALAR_VALUE)
ptr_reg = dst_reg;
+ else
+ /* Make sure ID is cleared otherwise dst_reg min/max could be
+ * incorrectly propagated into other registers by find_equal_scalars()
+ */
+ dst_reg->id = 0;
if (BPF_SRC(insn->code) == BPF_X) {
src_reg = &regs[insn->src_reg];
if (src_reg->type != SCALAR_VALUE) {
@@ -6244,6 +6594,12 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
/* case: R1 = R2
* copy register state to dest reg
*/
+ if (src_reg->type == SCALAR_VALUE && !src_reg->id)
+ /* Assign src and dst registers the same ID
+ * that will be used by find_equal_scalars()
+ * to propagate min/max range.
+ */
+ src_reg->id = ++env->id_gen;
*dst_reg = *src_reg;
dst_reg->live |= REG_LIVE_WRITTEN;
dst_reg->subreg_def = DEF_NOT_SUBREG;
@@ -6256,6 +6612,11 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
return -EACCES;
} else if (src_reg->type == SCALAR_VALUE) {
*dst_reg = *src_reg;
+ /* Make sure ID is cleared otherwise
+ * dst_reg min/max could be incorrectly
+ * propagated into src_reg by find_equal_scalars()
+ */
+ dst_reg->id = 0;
dst_reg->live |= REG_LIVE_WRITTEN;
dst_reg->subreg_def = env->insn_idx + 1;
} else {
@@ -6646,14 +7007,18 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
struct bpf_reg_state *reg =
opcode == BPF_JEQ ? true_reg : false_reg;
- /* For BPF_JEQ, if this is false we know nothing Jon Snow, but
- * if it is true we know the value for sure. Likewise for
- * BPF_JNE.
+ /* JEQ/JNE comparison doesn't change the register equivalence.
+ * r1 = r2;
+ * if (r1 == 42) goto label;
+ * ...
+ * label: // here both r1 and r2 are known to be 42.
+ *
+ * Hence when marking register as known preserve it's ID.
*/
if (is_jmp32)
__mark_reg32_known(reg, val32);
else
- __mark_reg_known(reg, val);
+ ___mark_reg_known(reg, val);
break;
}
case BPF_JSET:
@@ -7044,6 +7409,30 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn,
return true;
}
+static void find_equal_scalars(struct bpf_verifier_state *vstate,
+ struct bpf_reg_state *known_reg)
+{
+ struct bpf_func_state *state;
+ struct bpf_reg_state *reg;
+ int i, j;
+
+ for (i = 0; i <= vstate->curframe; i++) {
+ state = vstate->frame[i];
+ for (j = 0; j < MAX_BPF_REG; j++) {
+ reg = &state->regs[j];
+ if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
+ *reg = *known_reg;
+ }
+
+ bpf_for_each_spilled_reg(j, state, reg) {
+ if (!reg)
+ continue;
+ if (reg->type == SCALAR_VALUE && reg->id == known_reg->id)
+ *reg = *known_reg;
+ }
+ }
+}
+
static int check_cond_jmp_op(struct bpf_verifier_env *env,
struct bpf_insn *insn, int *insn_idx)
{
@@ -7172,6 +7561,12 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
reg_combine_min_max(&other_branch_regs[insn->src_reg],
&other_branch_regs[insn->dst_reg],
src_reg, dst_reg, opcode);
+ if (src_reg->id &&
+ !WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) {
+ find_equal_scalars(this_branch, src_reg);
+ find_equal_scalars(other_branch, &other_branch_regs[insn->src_reg]);
+ }
+
}
} else if (dst_reg->type == SCALAR_VALUE) {
reg_set_min_max(&other_branch_regs[insn->dst_reg],
@@ -7179,6 +7574,12 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
opcode, is_jmp32);
}
+ if (dst_reg->type == SCALAR_VALUE && dst_reg->id &&
+ !WARN_ON_ONCE(dst_reg->id != other_branch_regs[insn->dst_reg].id)) {
+ find_equal_scalars(this_branch, dst_reg);
+ find_equal_scalars(other_branch, &other_branch_regs[insn->dst_reg]);
+ }
+
/* detect if R == 0 where R is returned from bpf_map_lookup_elem().
* NOTE: these optimizations below are related with pointer comparison
* which will never be JMP32.
@@ -7210,6 +7611,7 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
{
struct bpf_insn_aux_data *aux = cur_aux(env);
struct bpf_reg_state *regs = cur_regs(env);
+ struct bpf_reg_state *dst_reg;
struct bpf_map *map;
int err;
@@ -7226,25 +7628,45 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
if (err)
return err;
+ dst_reg = &regs[insn->dst_reg];
if (insn->src_reg == 0) {
u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
- regs[insn->dst_reg].type = SCALAR_VALUE;
+ dst_reg->type = SCALAR_VALUE;
__mark_reg_known(&regs[insn->dst_reg], imm);
return 0;
}
+ if (insn->src_reg == BPF_PSEUDO_BTF_ID) {
+ mark_reg_known_zero(env, regs, insn->dst_reg);
+
+ dst_reg->type = aux->btf_var.reg_type;
+ switch (dst_reg->type) {
+ case PTR_TO_MEM:
+ dst_reg->mem_size = aux->btf_var.mem_size;
+ break;
+ case PTR_TO_BTF_ID:
+ case PTR_TO_PERCPU_BTF_ID:
+ dst_reg->btf_id = aux->btf_var.btf_id;
+ break;
+ default:
+ verbose(env, "bpf verifier is misconfigured\n");
+ return -EFAULT;
+ }
+ return 0;
+ }
+
map = env->used_maps[aux->map_index];
mark_reg_known_zero(env, regs, insn->dst_reg);
- regs[insn->dst_reg].map_ptr = map;
+ dst_reg->map_ptr = map;
if (insn->src_reg == BPF_PSEUDO_MAP_VALUE) {
- regs[insn->dst_reg].type = PTR_TO_MAP_VALUE;
- regs[insn->dst_reg].off = aux->map_off;
+ dst_reg->type = PTR_TO_MAP_VALUE;
+ dst_reg->off = aux->map_off;
if (map_value_has_spin_lock(map))
- regs[insn->dst_reg].id = ++env->id_gen;
+ dst_reg->id = ++env->id_gen;
} else if (insn->src_reg == BPF_PSEUDO_MAP_FD) {
- regs[insn->dst_reg].type = CONST_PTR_TO_MAP;
+ dst_reg->type = CONST_PTR_TO_MAP;
} else {
verbose(env, "bpf verifier is misconfigured\n");
return -EINVAL;
@@ -7287,7 +7709,7 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
u8 mode = BPF_MODE(insn->code);
int i, err;
- if (!may_access_skb(env->prog->type)) {
+ if (!may_access_skb(resolve_prog_type(env->prog))) {
verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n");
return -EINVAL;
}
@@ -7297,18 +7719,6 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
return -EINVAL;
}
- if (env->subprog_cnt > 1) {
- /* when program has LD_ABS insn JITs and interpreter assume
- * that r1 == ctx == skb which is not the case for callees
- * that can have arbitrary arguments. It's problematic
- * for main prog as well since JITs would need to analyze
- * all functions in order to make proper register save/restore
- * decisions in the main prog. Hence disallow LD_ABS with calls
- */
- verbose(env, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n");
- return -EINVAL;
- }
-
if (insn->dst_reg != BPF_REG_0 || insn->off != 0 ||
BPF_SIZE(insn->code) == BPF_DW ||
(mode == BPF_ABS && insn->src_reg != BPF_REG_0)) {
@@ -7375,11 +7785,12 @@ static int check_return_code(struct bpf_verifier_env *env)
const struct bpf_prog *prog = env->prog;
struct bpf_reg_state *reg;
struct tnum range = tnum_range(0, 1);
+ enum bpf_prog_type prog_type = resolve_prog_type(env->prog);
int err;
/* LSM and struct_ops func-ptr's return type could be "void" */
- if ((env->prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
- env->prog->type == BPF_PROG_TYPE_LSM) &&
+ if ((prog_type == BPF_PROG_TYPE_STRUCT_OPS ||
+ prog_type == BPF_PROG_TYPE_LSM) &&
!prog->aux->attach_func_proto->type)
return 0;
@@ -7398,7 +7809,7 @@ static int check_return_code(struct bpf_verifier_env *env)
return -EACCES;
}
- switch (env->prog->type) {
+ switch (prog_type) {
case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG ||
env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG ||
@@ -7718,6 +8129,23 @@ err_free:
return ret;
}
+static int check_abnormal_return(struct bpf_verifier_env *env)
+{
+ int i;
+
+ for (i = 1; i < env->subprog_cnt; i++) {
+ if (env->subprog_info[i].has_ld_abs) {
+ verbose(env, "LD_ABS is not allowed in subprogs without BTF\n");
+ return -EINVAL;
+ }
+ if (env->subprog_info[i].has_tail_call) {
+ verbose(env, "tail_call is not allowed in subprogs without BTF\n");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
/* The minimum supported BTF func info size */
#define MIN_BPF_FUNCINFO_SIZE 8
#define MAX_FUNCINFO_REC_SIZE 252
@@ -7726,20 +8154,24 @@ static int check_btf_func(struct bpf_verifier_env *env,
const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
+ const struct btf_type *type, *func_proto, *ret_type;
u32 i, nfuncs, urec_size, min_size;
u32 krec_size = sizeof(struct bpf_func_info);
struct bpf_func_info *krecord;
struct bpf_func_info_aux *info_aux = NULL;
- const struct btf_type *type;
struct bpf_prog *prog;
const struct btf *btf;
void __user *urecord;
u32 prev_offset = 0;
+ bool scalar_return;
int ret = -ENOMEM;
nfuncs = attr->func_info_cnt;
- if (!nfuncs)
+ if (!nfuncs) {
+ if (check_abnormal_return(env))
+ return -EINVAL;
return 0;
+ }
if (nfuncs != env->subprog_cnt) {
verbose(env, "number of funcs in func_info doesn't match number of subprogs\n");
@@ -7787,25 +8219,23 @@ static int check_btf_func(struct bpf_verifier_env *env,
}
/* check insn_off */
+ ret = -EINVAL;
if (i == 0) {
if (krecord[i].insn_off) {
verbose(env,
"nonzero insn_off %u for the first func info record",
krecord[i].insn_off);
- ret = -EINVAL;
goto err_free;
}
} else if (krecord[i].insn_off <= prev_offset) {
verbose(env,
"same or smaller insn offset (%u) than previous func info record (%u)",
krecord[i].insn_off, prev_offset);
- ret = -EINVAL;
goto err_free;
}
if (env->subprog_info[i].start != krecord[i].insn_off) {
verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n");
- ret = -EINVAL;
goto err_free;
}
@@ -7814,10 +8244,26 @@ static int check_btf_func(struct bpf_verifier_env *env,
if (!type || !btf_type_is_func(type)) {
verbose(env, "invalid type id %d in func info",
krecord[i].type_id);
- ret = -EINVAL;
goto err_free;
}
info_aux[i].linkage = BTF_INFO_VLEN(type->info);
+
+ func_proto = btf_type_by_id(btf, type->type);
+ if (unlikely(!func_proto || !btf_type_is_func_proto(func_proto)))
+ /* btf_func_check() already verified it during BTF load */
+ goto err_free;
+ ret_type = btf_type_skip_modifiers(btf, func_proto->type, NULL);
+ scalar_return =
+ btf_type_is_small_int(ret_type) || btf_type_is_enum(ret_type);
+ if (i && !scalar_return && env->subprog_info[i].has_ld_abs) {
+ verbose(env, "LD_ABS is only allowed in functions that return 'int'.\n");
+ goto err_free;
+ }
+ if (i && !scalar_return && env->subprog_info[i].has_tail_call) {
+ verbose(env, "tail_call is only allowed in functions that return 'int'.\n");
+ goto err_free;
+ }
+
prev_offset = krecord[i].insn_off;
urecord += urec_size;
}
@@ -7978,8 +8424,11 @@ static int check_btf_info(struct bpf_verifier_env *env,
struct btf *btf;
int err;
- if (!attr->func_info_cnt && !attr->line_info_cnt)
+ if (!attr->func_info_cnt && !attr->line_info_cnt) {
+ if (check_abnormal_return(env))
+ return -EINVAL;
return 0;
+ }
btf = btf_get_by_fd(attr->prog_btf_fd);
if (IS_ERR(btf))
@@ -9119,6 +9568,92 @@ process_bpf_exit:
return 0;
}
+/* replace pseudo btf_id with kernel symbol address */
+static int check_pseudo_btf_id(struct bpf_verifier_env *env,
+ struct bpf_insn *insn,
+ struct bpf_insn_aux_data *aux)
+{
+ u32 datasec_id, type, id = insn->imm;
+ const struct btf_var_secinfo *vsi;
+ const struct btf_type *datasec;
+ const struct btf_type *t;
+ const char *sym_name;
+ bool percpu = false;
+ u64 addr;
+ int i;
+
+ if (!btf_vmlinux) {
+ verbose(env, "kernel is missing BTF, make sure CONFIG_DEBUG_INFO_BTF=y is specified in Kconfig.\n");
+ return -EINVAL;
+ }
+
+ if (insn[1].imm != 0) {
+ verbose(env, "reserved field (insn[1].imm) is used in pseudo_btf_id ldimm64 insn.\n");
+ return -EINVAL;
+ }
+
+ t = btf_type_by_id(btf_vmlinux, id);
+ if (!t) {
+ verbose(env, "ldimm64 insn specifies invalid btf_id %d.\n", id);
+ return -ENOENT;
+ }
+
+ if (!btf_type_is_var(t)) {
+ verbose(env, "pseudo btf_id %d in ldimm64 isn't KIND_VAR.\n",
+ id);
+ return -EINVAL;
+ }
+
+ sym_name = btf_name_by_offset(btf_vmlinux, t->name_off);
+ addr = kallsyms_lookup_name(sym_name);
+ if (!addr) {
+ verbose(env, "ldimm64 failed to find the address for kernel symbol '%s'.\n",
+ sym_name);
+ return -ENOENT;
+ }
+
+ datasec_id = btf_find_by_name_kind(btf_vmlinux, ".data..percpu",
+ BTF_KIND_DATASEC);
+ if (datasec_id > 0) {
+ datasec = btf_type_by_id(btf_vmlinux, datasec_id);
+ for_each_vsi(i, datasec, vsi) {
+ if (vsi->type == id) {
+ percpu = true;
+ break;
+ }
+ }
+ }
+
+ insn[0].imm = (u32)addr;
+ insn[1].imm = addr >> 32;
+
+ type = t->type;
+ t = btf_type_skip_modifiers(btf_vmlinux, type, NULL);
+ if (percpu) {
+ aux->btf_var.reg_type = PTR_TO_PERCPU_BTF_ID;
+ aux->btf_var.btf_id = type;
+ } else if (!btf_type_is_struct(t)) {
+ const struct btf_type *ret;
+ const char *tname;
+ u32 tsize;
+
+ /* resolve the type size of ksym. */
+ ret = btf_resolve_size(btf_vmlinux, t, &tsize);
+ if (IS_ERR(ret)) {
+ tname = btf_name_by_offset(btf_vmlinux, t->name_off);
+ verbose(env, "ldimm64 unable to resolve the size of type '%s': %ld\n",
+ tname, PTR_ERR(ret));
+ return -EINVAL;
+ }
+ aux->btf_var.reg_type = PTR_TO_MEM;
+ aux->btf_var.mem_size = tsize;
+ } else {
+ aux->btf_var.reg_type = PTR_TO_BTF_ID;
+ aux->btf_var.btf_id = type;
+ }
+ return 0;
+}
+
static int check_map_prealloc(struct bpf_map *map)
{
return (map->map_type != BPF_MAP_TYPE_HASH &&
@@ -9154,6 +9689,7 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
struct bpf_prog *prog)
{
+ enum bpf_prog_type prog_type = resolve_prog_type(prog);
/*
* Validate that trace type programs use preallocated hash maps.
*
@@ -9171,8 +9707,8 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
* now, but warnings are emitted so developers are made aware of
* the unsafety and can fix their programs before this is enforced.
*/
- if (is_tracing_prog_type(prog->type) && !is_preallocated_map(map)) {
- if (prog->type == BPF_PROG_TYPE_PERF_EVENT) {
+ if (is_tracing_prog_type(prog_type) && !is_preallocated_map(map)) {
+ if (prog_type == BPF_PROG_TYPE_PERF_EVENT) {
verbose(env, "perf_event programs can only use preallocated hash map\n");
return -EINVAL;
}
@@ -9184,8 +9720,8 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
verbose(env, "trace type programs with run-time allocated hash maps are unsafe. Switch to preallocated hash maps.\n");
}
- if ((is_tracing_prog_type(prog->type) ||
- prog->type == BPF_PROG_TYPE_SOCKET_FILTER) &&
+ if ((is_tracing_prog_type(prog_type) ||
+ prog_type == BPF_PROG_TYPE_SOCKET_FILTER) &&
map_value_has_spin_lock(map)) {
verbose(env, "tracing progs cannot use bpf_spin_lock yet\n");
return -EINVAL;
@@ -9202,6 +9738,23 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
return -EINVAL;
}
+ if (prog->aux->sleepable)
+ switch (map->map_type) {
+ case BPF_MAP_TYPE_HASH:
+ case BPF_MAP_TYPE_LRU_HASH:
+ case BPF_MAP_TYPE_ARRAY:
+ if (!is_preallocated_map(map)) {
+ verbose(env,
+ "Sleepable programs can only use preallocated hash maps\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ verbose(env,
+ "Sleepable programs can only use array and hash maps\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -9211,10 +9764,14 @@ static bool bpf_map_is_cgroup_storage(struct bpf_map *map)
map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
}
-/* look for pseudo eBPF instructions that access map FDs and
- * replace them with actual map pointers
+/* find and rewrite pseudo imm in ld_imm64 instructions:
+ *
+ * 1. if it accesses map FD, replace it with actual map pointer.
+ * 2. if it accesses btf_id of a VAR, replace it with pointer to the var.
+ *
+ * NOTE: btf_vmlinux is required for converting pseudo btf_id.
*/
-static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
+static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
{
struct bpf_insn *insn = env->prog->insnsi;
int insn_cnt = env->prog->len;
@@ -9255,6 +9812,14 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
/* valid generic load 64-bit imm */
goto next_insn;
+ if (insn[0].src_reg == BPF_PSEUDO_BTF_ID) {
+ aux = &env->insn_aux_data[i];
+ err = check_pseudo_btf_id(env, insn, aux);
+ if (err)
+ return err;
+ goto next_insn;
+ }
+
/* In final convert_pseudo_ld_imm64() step, this is
* converted into regular 64-bit imm load insn.
*/
@@ -9436,6 +10001,18 @@ static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len
}
}
+static void adjust_poke_descs(struct bpf_prog *prog, u32 len)
+{
+ struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab;
+ int i, sz = prog->aux->size_poke_tab;
+ struct bpf_jit_poke_descriptor *desc;
+
+ for (i = 0; i < sz; i++) {
+ desc = &tab[i];
+ desc->insn_idx += len - 1;
+ }
+}
+
static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off,
const struct bpf_insn *patch, u32 len)
{
@@ -9452,6 +10029,7 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
if (adjust_insn_aux_data(env, new_prog, off, len))
return NULL;
adjust_subprog_starts(env, off, len);
+ adjust_poke_descs(new_prog, len);
return new_prog;
}
@@ -9897,7 +10475,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
insn->code = BPF_LDX | BPF_PROBE_MEM |
BPF_SIZE((insn)->code);
env->prog->aux->num_exentries++;
- } else if (env->prog->type != BPF_PROG_TYPE_STRUCT_OPS) {
+ } else if (resolve_prog_type(env->prog) != BPF_PROG_TYPE_STRUCT_OPS) {
verbose(env, "Writes through BTF pointers are not allowed\n");
return -EINVAL;
}
@@ -9982,6 +10560,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
{
struct bpf_prog *prog = env->prog, **func, *tmp;
int i, j, subprog_start, subprog_end = 0, len, subprog;
+ struct bpf_map *map_ptr;
struct bpf_insn *insn;
void *old_bpf_func;
int err, num_exentries;
@@ -10049,6 +10628,31 @@ static int jit_subprogs(struct bpf_verifier_env *env)
func[i]->aux->btf = prog->aux->btf;
func[i]->aux->func_info = prog->aux->func_info;
+ for (j = 0; j < prog->aux->size_poke_tab; j++) {
+ u32 insn_idx = prog->aux->poke_tab[j].insn_idx;
+ int ret;
+
+ if (!(insn_idx >= subprog_start &&
+ insn_idx <= subprog_end))
+ continue;
+
+ ret = bpf_jit_add_poke_descriptor(func[i],
+ &prog->aux->poke_tab[j]);
+ if (ret < 0) {
+ verbose(env, "adding tail call poke descriptor failed\n");
+ goto out_free;
+ }
+
+ func[i]->insnsi[insn_idx - subprog_start].imm = ret + 1;
+
+ map_ptr = func[i]->aux->poke_tab[ret].tail_call.map;
+ ret = map_ptr->ops->map_poke_track(map_ptr, func[i]->aux);
+ if (ret < 0) {
+ verbose(env, "tracking tail call prog failed\n");
+ goto out_free;
+ }
+ }
+
/* Use bpf_prog_F_tag to indicate functions in stack traces.
* Long term would need debug info to populate names
*/
@@ -10067,6 +10671,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
num_exentries++;
}
func[i]->aux->num_exentries = num_exentries;
+ func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable;
func[i] = bpf_int_jit_compile(func[i]);
if (!func[i]->jited) {
err = -ENOTSUPP;
@@ -10074,6 +10679,19 @@ static int jit_subprogs(struct bpf_verifier_env *env)
}
cond_resched();
}
+
+ /* Untrack main program's aux structs so that during map_poke_run()
+ * we will not stumble upon the unfilled poke descriptors; each
+ * of the main program's poke descs got distributed across subprogs
+ * and got tracked onto map, so we are sure that none of them will
+ * be missed after the operation below
+ */
+ for (i = 0; i < prog->aux->size_poke_tab; i++) {
+ map_ptr = prog->aux->poke_tab[i].tail_call.map;
+
+ map_ptr->ops->map_poke_untrack(map_ptr, prog->aux);
+ }
+
/* at this point all bpf functions were successfully JITed
* now populate all bpf_calls with correct addresses and
* run last pass of JIT
@@ -10142,9 +10760,16 @@ static int jit_subprogs(struct bpf_verifier_env *env)
bpf_prog_free_unused_jited_linfo(prog);
return 0;
out_free:
- for (i = 0; i < env->subprog_cnt; i++)
- if (func[i])
- bpf_jit_free(func[i]);
+ for (i = 0; i < env->subprog_cnt; i++) {
+ if (!func[i])
+ continue;
+
+ for (j = 0; j < func[i]->aux->size_poke_tab; j++) {
+ map_ptr = func[i]->aux->poke_tab[j].tail_call.map;
+ map_ptr->ops->map_poke_untrack(map_ptr, func[i]->aux);
+ }
+ bpf_jit_free(func[i]);
+ }
kfree(func);
out_undo_insn:
/* cleanup main prog to be interpreted */
@@ -10178,6 +10803,13 @@ static int fixup_call_args(struct bpf_verifier_env *env)
return err;
}
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+ if (env->subprog_cnt > 1 && env->prog->aux->tail_call_reachable) {
+ /* When JIT fails the progs with bpf2bpf calls and tail_calls
+ * have to be rejected, since interpreter doesn't support them yet.
+ */
+ verbose(env, "tail_calls are not allowed in non-JITed programs with bpf-to-bpf calls\n");
+ return -EINVAL;
+ }
for (i = 0; i < prog->len; i++, insn++) {
if (insn->code != (BPF_JMP | BPF_CALL) ||
insn->src_reg != BPF_PSEUDO_CALL)
@@ -10341,8 +10973,9 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
* the program array.
*/
prog->cb_access = 1;
- env->prog->aux->stack_depth = MAX_BPF_STACK;
- env->prog->aux->max_pkt_offset = MAX_PACKET_OFF;
+ if (!allow_tail_call_in_subprogs(env))
+ prog->aux->stack_depth = MAX_BPF_STACK;
+ prog->aux->max_pkt_offset = MAX_PACKET_OFF;
/* mark bpf_tail_call as different opcode to avoid
* conditional branch in the interpeter for every normal
@@ -10362,6 +10995,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
.reason = BPF_POKE_REASON_TAIL_CALL,
.tail_call.map = BPF_MAP_PTR(aux->map_ptr_state),
.tail_call.key = bpf_map_key_immediate(aux),
+ .insn_idx = i + delta,
};
ret = bpf_jit_add_poke_descriptor(prog, &desc);
@@ -10427,7 +11061,9 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
if (insn->imm == BPF_FUNC_map_lookup_elem &&
ops->map_gen_lookup) {
cnt = ops->map_gen_lookup(map_ptr, insn_buf);
- if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
+ if (cnt == -EOPNOTSUPP)
+ goto patch_map_ops_generic;
+ if (cnt <= 0 || cnt >= ARRAY_SIZE(insn_buf)) {
verbose(env, "bpf verifier is misconfigured\n");
return -EINVAL;
}
@@ -10457,7 +11093,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
(int (*)(struct bpf_map *map, void *value))NULL));
BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
(int (*)(struct bpf_map *map, void *value))NULL));
-
+patch_map_ops_generic:
switch (insn->imm) {
case BPF_FUNC_map_lookup_elem:
insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) -
@@ -10810,59 +11446,79 @@ static int check_struct_ops_btf_id(struct bpf_verifier_env *env)
}
#define SECURITY_PREFIX "security_"
-static int check_attach_modify_return(struct bpf_prog *prog, unsigned long addr)
+static int check_attach_modify_return(unsigned long addr, const char *func_name)
{
if (within_error_injection_list(addr) ||
- !strncmp(SECURITY_PREFIX, prog->aux->attach_func_name,
- sizeof(SECURITY_PREFIX) - 1))
+ !strncmp(SECURITY_PREFIX, func_name, sizeof(SECURITY_PREFIX) - 1))
return 0;
return -EINVAL;
}
-static int check_attach_btf_id(struct bpf_verifier_env *env)
+/* non exhaustive list of sleepable bpf_lsm_*() functions */
+BTF_SET_START(btf_sleepable_lsm_hooks)
+#ifdef CONFIG_BPF_LSM
+BTF_ID(func, bpf_lsm_bprm_committed_creds)
+#else
+BTF_ID_UNUSED
+#endif
+BTF_SET_END(btf_sleepable_lsm_hooks)
+
+static int check_sleepable_lsm_hook(u32 btf_id)
+{
+ return btf_id_set_contains(&btf_sleepable_lsm_hooks, btf_id);
+}
+
+/* list of non-sleepable functions that are otherwise on
+ * ALLOW_ERROR_INJECTION list
+ */
+BTF_SET_START(btf_non_sleepable_error_inject)
+/* Three functions below can be called from sleepable and non-sleepable context.
+ * Assume non-sleepable from bpf safety point of view.
+ */
+BTF_ID(func, __add_to_page_cache_locked)
+BTF_ID(func, should_fail_alloc_page)
+BTF_ID(func, should_failslab)
+BTF_SET_END(btf_non_sleepable_error_inject)
+
+static int check_non_sleepable_error_inject(u32 btf_id)
+{
+ return btf_id_set_contains(&btf_non_sleepable_error_inject, btf_id);
+}
+
+int bpf_check_attach_target(struct bpf_verifier_log *log,
+ const struct bpf_prog *prog,
+ const struct bpf_prog *tgt_prog,
+ u32 btf_id,
+ struct bpf_attach_target_info *tgt_info)
{
- struct bpf_prog *prog = env->prog;
bool prog_extension = prog->type == BPF_PROG_TYPE_EXT;
- struct bpf_prog *tgt_prog = prog->aux->linked_prog;
- u32 btf_id = prog->aux->attach_btf_id;
const char prefix[] = "btf_trace_";
- struct btf_func_model fmodel;
int ret = 0, subprog = -1, i;
- struct bpf_trampoline *tr;
const struct btf_type *t;
bool conservative = true;
const char *tname;
struct btf *btf;
- long addr;
- u64 key;
-
- if (prog->type == BPF_PROG_TYPE_STRUCT_OPS)
- return check_struct_ops_btf_id(env);
-
- if (prog->type != BPF_PROG_TYPE_TRACING &&
- prog->type != BPF_PROG_TYPE_LSM &&
- !prog_extension)
- return 0;
+ long addr = 0;
if (!btf_id) {
- verbose(env, "Tracing programs must provide btf_id\n");
+ bpf_log(log, "Tracing programs must provide btf_id\n");
return -EINVAL;
}
- btf = bpf_prog_get_target_btf(prog);
+ btf = tgt_prog ? tgt_prog->aux->btf : btf_vmlinux;
if (!btf) {
- verbose(env,
+ bpf_log(log,
"FENTRY/FEXIT program can only be attached to another program annotated with BTF\n");
return -EINVAL;
}
t = btf_type_by_id(btf, btf_id);
if (!t) {
- verbose(env, "attach_btf_id %u is invalid\n", btf_id);
+ bpf_log(log, "attach_btf_id %u is invalid\n", btf_id);
return -EINVAL;
}
tname = btf_name_by_offset(btf, t->name_off);
if (!tname) {
- verbose(env, "attach_btf_id %u doesn't have a name\n", btf_id);
+ bpf_log(log, "attach_btf_id %u doesn't have a name\n", btf_id);
return -EINVAL;
}
if (tgt_prog) {
@@ -10874,26 +11530,24 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
break;
}
if (subprog == -1) {
- verbose(env, "Subprog %s doesn't exist\n", tname);
+ bpf_log(log, "Subprog %s doesn't exist\n", tname);
return -EINVAL;
}
conservative = aux->func_info_aux[subprog].unreliable;
if (prog_extension) {
if (conservative) {
- verbose(env,
+ bpf_log(log,
"Cannot replace static functions\n");
return -EINVAL;
}
if (!prog->jit_requested) {
- verbose(env,
+ bpf_log(log,
"Extension programs should be JITed\n");
return -EINVAL;
}
- env->ops = bpf_verifier_ops[tgt_prog->type];
- prog->expected_attach_type = tgt_prog->expected_attach_type;
}
if (!tgt_prog->jited) {
- verbose(env, "Can attach to only JITed progs\n");
+ bpf_log(log, "Can attach to only JITed progs\n");
return -EINVAL;
}
if (tgt_prog->type == prog->type) {
@@ -10901,7 +11555,7 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
* Cannot attach program extension to another extension.
* It's ok to attach fentry/fexit to extension program.
*/
- verbose(env, "Cannot recursively attach\n");
+ bpf_log(log, "Cannot recursively attach\n");
return -EINVAL;
}
if (tgt_prog->type == BPF_PROG_TYPE_TRACING &&
@@ -10923,32 +11577,30 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
* reasonable stack size. Hence extending fentry is not
* allowed.
*/
- verbose(env, "Cannot extend fentry/fexit\n");
+ bpf_log(log, "Cannot extend fentry/fexit\n");
return -EINVAL;
}
- key = ((u64)aux->id) << 32 | btf_id;
} else {
if (prog_extension) {
- verbose(env, "Cannot replace kernel functions\n");
+ bpf_log(log, "Cannot replace kernel functions\n");
return -EINVAL;
}
- key = btf_id;
}
switch (prog->expected_attach_type) {
case BPF_TRACE_RAW_TP:
if (tgt_prog) {
- verbose(env,
+ bpf_log(log,
"Only FENTRY/FEXIT progs are attachable to another BPF prog\n");
return -EINVAL;
}
if (!btf_type_is_typedef(t)) {
- verbose(env, "attach_btf_id %u is not a typedef\n",
+ bpf_log(log, "attach_btf_id %u is not a typedef\n",
btf_id);
return -EINVAL;
}
if (strncmp(prefix, tname, sizeof(prefix) - 1)) {
- verbose(env, "attach_btf_id %u points to wrong type name %s\n",
+ bpf_log(log, "attach_btf_id %u points to wrong type name %s\n",
btf_id, tname);
return -EINVAL;
}
@@ -10962,29 +11614,20 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
/* should never happen in valid vmlinux build */
return -EINVAL;
- /* remember two read only pointers that are valid for
- * the life time of the kernel
- */
- prog->aux->attach_func_name = tname;
- prog->aux->attach_func_proto = t;
- prog->aux->attach_btf_trace = true;
- return 0;
+ break;
case BPF_TRACE_ITER:
if (!btf_type_is_func(t)) {
- verbose(env, "attach_btf_id %u is not a function\n",
+ bpf_log(log, "attach_btf_id %u is not a function\n",
btf_id);
return -EINVAL;
}
t = btf_type_by_id(btf, t->type);
if (!btf_type_is_func_proto(t))
return -EINVAL;
- prog->aux->attach_func_name = tname;
- prog->aux->attach_func_proto = t;
- if (!bpf_iter_prog_supported(prog))
- return -EINVAL;
- ret = btf_distill_func_proto(&env->log, btf, t,
- tname, &fmodel);
- return ret;
+ ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel);
+ if (ret)
+ return ret;
+ break;
default:
if (!prog_extension)
return -EINVAL;
@@ -10993,42 +11636,30 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
case BPF_LSM_MAC:
case BPF_TRACE_FENTRY:
case BPF_TRACE_FEXIT:
- prog->aux->attach_func_name = tname;
- if (prog->type == BPF_PROG_TYPE_LSM) {
- ret = bpf_lsm_verify_prog(&env->log, prog);
- if (ret < 0)
- return ret;
- }
-
if (!btf_type_is_func(t)) {
- verbose(env, "attach_btf_id %u is not a function\n",
+ bpf_log(log, "attach_btf_id %u is not a function\n",
btf_id);
return -EINVAL;
}
if (prog_extension &&
- btf_check_type_match(env, prog, btf, t))
+ btf_check_type_match(log, prog, btf, t))
return -EINVAL;
t = btf_type_by_id(btf, t->type);
if (!btf_type_is_func_proto(t))
return -EINVAL;
- tr = bpf_trampoline_lookup(key);
- if (!tr)
- return -ENOMEM;
- /* t is either vmlinux type or another program's type */
- prog->aux->attach_func_proto = t;
- mutex_lock(&tr->mutex);
- if (tr->func.addr) {
- prog->aux->trampoline = tr;
- goto out;
- }
- if (tgt_prog && conservative) {
- prog->aux->attach_func_proto = NULL;
+
+ if ((prog->aux->saved_dst_prog_type || prog->aux->saved_dst_attach_type) &&
+ (!tgt_prog || prog->aux->saved_dst_prog_type != tgt_prog->type ||
+ prog->aux->saved_dst_attach_type != tgt_prog->expected_attach_type))
+ return -EINVAL;
+
+ if (tgt_prog && conservative)
t = NULL;
- }
- ret = btf_distill_func_proto(&env->log, btf, t,
- tname, &tr->func.model);
+
+ ret = btf_distill_func_proto(log, btf, t, tname, &tgt_info->fmodel);
if (ret < 0)
- goto out;
+ return ret;
+
if (tgt_prog) {
if (subprog == 0)
addr = (long) tgt_prog->bpf_func;
@@ -11037,31 +11668,137 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
} else {
addr = kallsyms_lookup_name(tname);
if (!addr) {
- verbose(env,
+ bpf_log(log,
"The address of function %s cannot be found\n",
tname);
- ret = -ENOENT;
- goto out;
+ return -ENOENT;
}
}
- if (prog->expected_attach_type == BPF_MODIFY_RETURN) {
- ret = check_attach_modify_return(prog, addr);
- if (ret)
- verbose(env, "%s() is not modifiable\n",
- prog->aux->attach_func_name);
+ if (prog->aux->sleepable) {
+ ret = -EINVAL;
+ switch (prog->type) {
+ case BPF_PROG_TYPE_TRACING:
+ /* fentry/fexit/fmod_ret progs can be sleepable only if they are
+ * attached to ALLOW_ERROR_INJECTION and are not in denylist.
+ */
+ if (!check_non_sleepable_error_inject(btf_id) &&
+ within_error_injection_list(addr))
+ ret = 0;
+ break;
+ case BPF_PROG_TYPE_LSM:
+ /* LSM progs check that they are attached to bpf_lsm_*() funcs.
+ * Only some of them are sleepable.
+ */
+ if (check_sleepable_lsm_hook(btf_id))
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+ if (ret) {
+ bpf_log(log, "%s is not sleepable\n", tname);
+ return ret;
+ }
+ } else if (prog->expected_attach_type == BPF_MODIFY_RETURN) {
+ if (tgt_prog) {
+ bpf_log(log, "can't modify return codes of BPF programs\n");
+ return -EINVAL;
+ }
+ ret = check_attach_modify_return(addr, tname);
+ if (ret) {
+ bpf_log(log, "%s() is not modifiable\n", tname);
+ return ret;
+ }
}
- if (ret)
- goto out;
- tr->func.addr = (void *)addr;
- prog->aux->trampoline = tr;
-out:
- mutex_unlock(&tr->mutex);
- if (ret)
- bpf_trampoline_put(tr);
+ break;
+ }
+ tgt_info->tgt_addr = addr;
+ tgt_info->tgt_name = tname;
+ tgt_info->tgt_type = t;
+ return 0;
+}
+
+static int check_attach_btf_id(struct bpf_verifier_env *env)
+{
+ struct bpf_prog *prog = env->prog;
+ struct bpf_prog *tgt_prog = prog->aux->dst_prog;
+ struct bpf_attach_target_info tgt_info = {};
+ u32 btf_id = prog->aux->attach_btf_id;
+ struct bpf_trampoline *tr;
+ int ret;
+ u64 key;
+
+ if (prog->aux->sleepable && prog->type != BPF_PROG_TYPE_TRACING &&
+ prog->type != BPF_PROG_TYPE_LSM) {
+ verbose(env, "Only fentry/fexit/fmod_ret and lsm programs can be sleepable\n");
+ return -EINVAL;
+ }
+
+ if (prog->type == BPF_PROG_TYPE_STRUCT_OPS)
+ return check_struct_ops_btf_id(env);
+
+ if (prog->type != BPF_PROG_TYPE_TRACING &&
+ prog->type != BPF_PROG_TYPE_LSM &&
+ prog->type != BPF_PROG_TYPE_EXT)
+ return 0;
+
+ ret = bpf_check_attach_target(&env->log, prog, tgt_prog, btf_id, &tgt_info);
+ if (ret)
return ret;
+
+ if (tgt_prog && prog->type == BPF_PROG_TYPE_EXT) {
+ /* to make freplace equivalent to their targets, they need to
+ * inherit env->ops and expected_attach_type for the rest of the
+ * verification
+ */
+ env->ops = bpf_verifier_ops[tgt_prog->type];
+ prog->expected_attach_type = tgt_prog->expected_attach_type;
+ }
+
+ /* store info about the attachment target that will be used later */
+ prog->aux->attach_func_proto = tgt_info.tgt_type;
+ prog->aux->attach_func_name = tgt_info.tgt_name;
+
+ if (tgt_prog) {
+ prog->aux->saved_dst_prog_type = tgt_prog->type;
+ prog->aux->saved_dst_attach_type = tgt_prog->expected_attach_type;
}
+
+ if (prog->expected_attach_type == BPF_TRACE_RAW_TP) {
+ prog->aux->attach_btf_trace = true;
+ return 0;
+ } else if (prog->expected_attach_type == BPF_TRACE_ITER) {
+ if (!bpf_iter_prog_supported(prog))
+ return -EINVAL;
+ return 0;
+ }
+
+ if (prog->type == BPF_PROG_TYPE_LSM) {
+ ret = bpf_lsm_verify_prog(&env->log, prog);
+ if (ret < 0)
+ return ret;
+ }
+
+ key = bpf_trampoline_compute_key(tgt_prog, btf_id);
+ tr = bpf_trampoline_get(key, &tgt_info);
+ if (!tr)
+ return -ENOMEM;
+
+ prog->aux->dst_trampoline = tr;
+ return 0;
+}
+
+struct btf *bpf_get_btf_vmlinux(void)
+{
+ if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
+ mutex_lock(&bpf_verifier_lock);
+ if (!btf_vmlinux)
+ btf_vmlinux = btf_parse_vmlinux();
+ mutex_unlock(&bpf_verifier_lock);
+ }
+ return btf_vmlinux;
}
int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
@@ -11097,12 +11834,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
env->ops = bpf_verifier_ops[env->prog->type];
is_priv = bpf_capable();
- if (!btf_vmlinux && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) {
- mutex_lock(&bpf_verifier_lock);
- if (!btf_vmlinux)
- btf_vmlinux = btf_parse_vmlinux();
- mutex_unlock(&bpf_verifier_lock);
- }
+ bpf_get_btf_vmlinux();
/* grab the mutex to protect few globals used by verifier */
if (!is_priv)
@@ -11145,10 +11877,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
if (is_priv)
env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ;
- ret = replace_map_fd_with_map_ptr(env);
- if (ret < 0)
- goto skip_full_check;
-
if (bpf_prog_is_dev_bound(env->prog->aux)) {
ret = bpf_prog_offload_verifier_prep(env->prog);
if (ret)
@@ -11174,6 +11902,10 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
if (ret)
goto skip_full_check;
+ ret = resolve_pseudo_ldimm64(env);
+ if (ret < 0)
+ goto skip_full_check;
+
ret = check_cfg(env);
if (ret < 0)
goto skip_full_check;
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
index 05d3e1375e4c..d5d9f2d03e8a 100644
--- a/kernel/rcu/tasks.h
+++ b/kernel/rcu/tasks.h
@@ -28,6 +28,8 @@ typedef void (*postgp_func_t)(struct rcu_tasks *rtp);
* @kthread_ptr: This flavor's grace-period/callback-invocation kthread.
* @gp_func: This flavor's grace-period-wait function.
* @gp_state: Grace period's most recent state transition (debugging).
+ * @gp_sleep: Per-grace-period sleep to prevent CPU-bound looping.
+ * @init_fract: Initial backoff sleep interval.
* @gp_jiffies: Time of last @gp_state transition.
* @gp_start: Most recent grace-period start in jiffies.
* @n_gps: Number of grace periods completed since boot.
@@ -48,6 +50,8 @@ struct rcu_tasks {
struct wait_queue_head cbs_wq;
raw_spinlock_t cbs_lock;
int gp_state;
+ int gp_sleep;
+ int init_fract;
unsigned long gp_jiffies;
unsigned long gp_start;
unsigned long n_gps;
@@ -81,7 +85,7 @@ static struct rcu_tasks rt_name = \
DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);
/* Avoid IPIing CPUs early in the grace period. */
-#define RCU_TASK_IPI_DELAY (HZ / 2)
+#define RCU_TASK_IPI_DELAY (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB) ? HZ / 2 : 0)
static int rcu_task_ipi_delay __read_mostly = RCU_TASK_IPI_DELAY;
module_param(rcu_task_ipi_delay, int, 0644);
@@ -231,7 +235,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
cond_resched();
}
/* Paranoid sleep to keep this from entering a tight loop */
- schedule_timeout_idle(HZ/10);
+ schedule_timeout_idle(rtp->gp_sleep);
set_tasks_gp_state(rtp, RTGS_WAIT_CBS);
}
@@ -329,8 +333,10 @@ static void rcu_tasks_wait_gp(struct rcu_tasks *rtp)
*/
lastreport = jiffies;
- /* Start off with HZ/10 wait and slowly back off to 1 HZ wait. */
- fract = 10;
+ // Start off with initial wait and slowly back off to 1 HZ wait.
+ fract = rtp->init_fract;
+ if (fract > HZ)
+ fract = HZ;
for (;;) {
bool firstreport;
@@ -553,6 +559,8 @@ EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
static int __init rcu_spawn_tasks_kthread(void)
{
+ rcu_tasks.gp_sleep = HZ / 10;
+ rcu_tasks.init_fract = 10;
rcu_tasks.pregp_func = rcu_tasks_pregp_step;
rcu_tasks.pertask_func = rcu_tasks_pertask;
rcu_tasks.postscan_func = rcu_tasks_postscan;
@@ -685,6 +693,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier_tasks_rude);
static int __init rcu_spawn_tasks_rude_kthread(void)
{
+ rcu_tasks_rude.gp_sleep = HZ / 10;
rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
return 0;
}
@@ -745,9 +754,9 @@ static DEFINE_PER_CPU(bool, trc_ipi_to_cpu);
// The number of detections of task quiescent state relying on
// heavyweight readers executing explicit memory barriers.
-unsigned long n_heavy_reader_attempts;
-unsigned long n_heavy_reader_updates;
-unsigned long n_heavy_reader_ofl_updates;
+static unsigned long n_heavy_reader_attempts;
+static unsigned long n_heavy_reader_updates;
+static unsigned long n_heavy_reader_ofl_updates;
void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func);
DEFINE_RCU_TASKS(rcu_tasks_trace, rcu_tasks_wait_gp, call_rcu_tasks_trace,
@@ -821,6 +830,12 @@ static void trc_read_check_handler(void *t_in)
WRITE_ONCE(t->trc_reader_checked, true);
goto reset_ipi;
}
+ // If we are racing with an rcu_read_unlock_trace(), try again later.
+ if (unlikely(t->trc_reader_nesting < 0)) {
+ if (WARN_ON_ONCE(atomic_dec_and_test(&trc_n_readers_need_end)))
+ wake_up(&trc_wait);
+ goto reset_ipi;
+ }
WRITE_ONCE(t->trc_reader_checked, true);
// Get here if the task is in a read-side critical section. Set
@@ -911,7 +926,8 @@ static void trc_wait_for_one_reader(struct task_struct *t,
// If currently running, send an IPI, either way, add to list.
trc_add_holdout(t, bhp);
- if (task_curr(t) && time_after(jiffies, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
+ if (task_curr(t) &&
+ time_after(jiffies + 1, rcu_tasks_trace.gp_start + rcu_task_ipi_delay)) {
// The task is currently running, so try IPIing it.
cpu = task_cpu(t);
@@ -1072,15 +1088,17 @@ static void rcu_tasks_trace_postgp(struct rcu_tasks *rtp)
if (ret)
break; // Count reached zero.
// Stall warning time, so make a list of the offenders.
+ rcu_read_lock();
for_each_process_thread(g, t)
if (READ_ONCE(t->trc_reader_special.b.need_qs))
trc_add_holdout(t, &holdouts);
+ rcu_read_unlock();
firstreport = true;
- list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list)
- if (READ_ONCE(t->trc_reader_special.b.need_qs)) {
+ list_for_each_entry_safe(t, g, &holdouts, trc_holdout_list) {
+ if (READ_ONCE(t->trc_reader_special.b.need_qs))
show_stalled_task_trace(t, &firstreport);
- trc_del_holdout(t);
- }
+ trc_del_holdout(t); // Release task_struct reference.
+ }
if (firstreport)
pr_err("INFO: rcu_tasks_trace detected stalls? (Counter/taskslist mismatch?)\n");
show_stalled_ipi_trace();
@@ -1163,6 +1181,17 @@ EXPORT_SYMBOL_GPL(rcu_barrier_tasks_trace);
static int __init rcu_spawn_tasks_trace_kthread(void)
{
+ if (IS_ENABLED(CONFIG_TASKS_TRACE_RCU_READ_MB)) {
+ rcu_tasks_trace.gp_sleep = HZ / 10;
+ rcu_tasks_trace.init_fract = 10;
+ } else {
+ rcu_tasks_trace.gp_sleep = HZ / 200;
+ if (rcu_tasks_trace.gp_sleep <= 0)
+ rcu_tasks_trace.gp_sleep = 1;
+ rcu_tasks_trace.init_fract = HZ / 5;
+ if (rcu_tasks_trace.init_fract <= 0)
+ rcu_tasks_trace.init_fract = 1;
+ }
rcu_tasks_trace.pregp_func = rcu_tasks_trace_pregp_step;
rcu_tasks_trace.pertask_func = rcu_tasks_trace_pertask;
rcu_tasks_trace.postscan_func = rcu_tasks_trace_postscan;
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index e2ac0e37c4ae..a2802b6ff4bb 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -34,17 +34,13 @@ struct kmem_cache *taskstats_cache;
static struct genl_family family;
-static const struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = {
+static const struct nla_policy taskstats_cmd_get_policy[] = {
[TASKSTATS_CMD_ATTR_PID] = { .type = NLA_U32 },
[TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 },
[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
-/*
- * We have to use TASKSTATS_CMD_ATTR_MAX here, it is the maxattr in the family.
- * Make sure they are always aligned.
- */
-static const struct nla_policy cgroupstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = {
+static const struct nla_policy cgroupstats_cmd_get_policy[] = {
[CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 },
};
@@ -649,47 +645,25 @@ static const struct genl_ops taskstats_ops[] = {
.cmd = TASKSTATS_CMD_GET,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = taskstats_user_cmd,
- /* policy enforced later */
- .flags = GENL_ADMIN_PERM | GENL_CMD_CAP_HASPOL,
+ .policy = taskstats_cmd_get_policy,
+ .maxattr = ARRAY_SIZE(taskstats_cmd_get_policy) - 1,
+ .flags = GENL_ADMIN_PERM,
},
{
.cmd = CGROUPSTATS_CMD_GET,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = cgroupstats_user_cmd,
- /* policy enforced later */
- .flags = GENL_CMD_CAP_HASPOL,
+ .policy = cgroupstats_cmd_get_policy,
+ .maxattr = ARRAY_SIZE(cgroupstats_cmd_get_policy) - 1,
},
};
-static int taskstats_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
- struct genl_info *info)
-{
- const struct nla_policy *policy = NULL;
-
- switch (ops->cmd) {
- case TASKSTATS_CMD_GET:
- policy = taskstats_cmd_get_policy;
- break;
- case CGROUPSTATS_CMD_GET:
- policy = cgroupstats_cmd_get_policy;
- break;
- default:
- return -EINVAL;
- }
-
- return nlmsg_validate_deprecated(info->nlhdr, GENL_HDRLEN,
- TASKSTATS_CMD_ATTR_MAX, policy,
- info->extack);
-}
-
static struct genl_family family __ro_after_init = {
.name = TASKSTATS_GENL_NAME,
.version = TASKSTATS_GENL_VERSION,
- .maxattr = TASKSTATS_CMD_ATTR_MAX,
.module = THIS_MODULE,
.ops = taskstats_ops,
.n_ops = ARRAY_SIZE(taskstats_ops),
- .pre_doit = taskstats_pre_doit,
};
/* Needed early in initialization */
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 2ecf7892a31b..4517c8b66518 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -7,6 +7,7 @@
#include <linux/slab.h>
#include <linux/bpf.h>
#include <linux/bpf_perf_event.h>
+#include <linux/btf.h>
#include <linux/filter.h>
#include <linux/uaccess.h>
#include <linux/ctype.h>
@@ -16,6 +17,9 @@
#include <linux/error-injection.h>
#include <linux/btf_ids.h>
+#include <uapi/linux/bpf.h>
+#include <uapi/linux/btf.h>
+
#include <asm/tlb.h>
#include "trace_probe.h"
@@ -67,6 +71,10 @@ static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char *name)
u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
+static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
+ u64 flags, const struct btf **btf,
+ s32 *btf_id);
+
/**
* trace_call_bpf - invoke BPF program
* @call: tracepoint event
@@ -743,19 +751,18 @@ out:
return err;
}
-BTF_ID_LIST(bpf_seq_printf_btf_ids)
-BTF_ID(struct, seq_file)
+BTF_ID_LIST_SINGLE(btf_seq_file_ids, struct, seq_file)
static const struct bpf_func_proto bpf_seq_printf_proto = {
.func = bpf_seq_printf,
.gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID,
+ .arg1_btf_id = &btf_seq_file_ids[0],
.arg2_type = ARG_PTR_TO_MEM,
.arg3_type = ARG_CONST_SIZE,
.arg4_type = ARG_PTR_TO_MEM_OR_NULL,
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
- .btf_id = bpf_seq_printf_btf_ids,
};
BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
@@ -763,17 +770,39 @@ BPF_CALL_3(bpf_seq_write, struct seq_file *, m, const void *, data, u32, len)
return seq_write(m, data, len) ? -EOVERFLOW : 0;
}
-BTF_ID_LIST(bpf_seq_write_btf_ids)
-BTF_ID(struct, seq_file)
-
static const struct bpf_func_proto bpf_seq_write_proto = {
.func = bpf_seq_write,
.gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID,
+ .arg1_btf_id = &btf_seq_file_ids[0],
.arg2_type = ARG_PTR_TO_MEM,
.arg3_type = ARG_CONST_SIZE_OR_ZERO,
- .btf_id = bpf_seq_write_btf_ids,
+};
+
+BPF_CALL_4(bpf_seq_printf_btf, struct seq_file *, m, struct btf_ptr *, ptr,
+ u32, btf_ptr_size, u64, flags)
+{
+ const struct btf *btf;
+ s32 btf_id;
+ int ret;
+
+ ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
+ if (ret)
+ return ret;
+
+ return btf_type_seq_show_flags(btf, btf_id, ptr->ptr, m, flags);
+}
+
+static const struct bpf_func_proto bpf_seq_printf_btf_proto = {
+ .func = bpf_seq_printf_btf,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_BTF_ID,
+ .arg1_btf_id = &btf_seq_file_ids[0],
+ .arg2_type = ARG_PTR_TO_MEM,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
+ .arg4_type = ARG_ANYTHING,
};
static __always_inline int
@@ -1098,6 +1127,118 @@ static const struct bpf_func_proto bpf_send_signal_thread_proto = {
.arg1_type = ARG_ANYTHING,
};
+BPF_CALL_3(bpf_d_path, struct path *, path, char *, buf, u32, sz)
+{
+ long len;
+ char *p;
+
+ if (!sz)
+ return 0;
+
+ p = d_path(path, buf, sz);
+ if (IS_ERR(p)) {
+ len = PTR_ERR(p);
+ } else {
+ len = buf + sz - p;
+ memmove(buf, p, len);
+ }
+
+ return len;
+}
+
+BTF_SET_START(btf_allowlist_d_path)
+#ifdef CONFIG_SECURITY
+BTF_ID(func, security_file_permission)
+BTF_ID(func, security_inode_getattr)
+BTF_ID(func, security_file_open)
+#endif
+#ifdef CONFIG_SECURITY_PATH
+BTF_ID(func, security_path_truncate)
+#endif
+BTF_ID(func, vfs_truncate)
+BTF_ID(func, vfs_fallocate)
+BTF_ID(func, dentry_open)
+BTF_ID(func, vfs_getattr)
+BTF_ID(func, filp_close)
+BTF_SET_END(btf_allowlist_d_path)
+
+static bool bpf_d_path_allowed(const struct bpf_prog *prog)
+{
+ return btf_id_set_contains(&btf_allowlist_d_path, prog->aux->attach_btf_id);
+}
+
+BTF_ID_LIST_SINGLE(bpf_d_path_btf_ids, struct, path)
+
+static const struct bpf_func_proto bpf_d_path_proto = {
+ .func = bpf_d_path,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_BTF_ID,
+ .arg1_btf_id = &bpf_d_path_btf_ids[0],
+ .arg2_type = ARG_PTR_TO_MEM,
+ .arg3_type = ARG_CONST_SIZE_OR_ZERO,
+ .allowed = bpf_d_path_allowed,
+};
+
+#define BTF_F_ALL (BTF_F_COMPACT | BTF_F_NONAME | \
+ BTF_F_PTR_RAW | BTF_F_ZERO)
+
+static int bpf_btf_printf_prepare(struct btf_ptr *ptr, u32 btf_ptr_size,
+ u64 flags, const struct btf **btf,
+ s32 *btf_id)
+{
+ const struct btf_type *t;
+
+ if (unlikely(flags & ~(BTF_F_ALL)))
+ return -EINVAL;
+
+ if (btf_ptr_size != sizeof(struct btf_ptr))
+ return -EINVAL;
+
+ *btf = bpf_get_btf_vmlinux();
+
+ if (IS_ERR_OR_NULL(*btf))
+ return PTR_ERR(*btf);
+
+ if (ptr->type_id > 0)
+ *btf_id = ptr->type_id;
+ else
+ return -EINVAL;
+
+ if (*btf_id > 0)
+ t = btf_type_by_id(*btf, *btf_id);
+ if (*btf_id <= 0 || !t)
+ return -ENOENT;
+
+ return 0;
+}
+
+BPF_CALL_5(bpf_snprintf_btf, char *, str, u32, str_size, struct btf_ptr *, ptr,
+ u32, btf_ptr_size, u64, flags)
+{
+ const struct btf *btf;
+ s32 btf_id;
+ int ret;
+
+ ret = bpf_btf_printf_prepare(ptr, btf_ptr_size, flags, &btf, &btf_id);
+ if (ret)
+ return ret;
+
+ return btf_type_snprintf_show(btf, btf_id, ptr->ptr, str, str_size,
+ flags);
+}
+
+const struct bpf_func_proto bpf_snprintf_btf_proto = {
+ .func = bpf_snprintf_btf,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_MEM,
+ .arg2_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_PTR_TO_MEM,
+ .arg4_type = ARG_CONST_SIZE,
+ .arg5_type = ARG_ANYTHING,
+};
+
const struct bpf_func_proto *
bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
@@ -1182,6 +1323,14 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_jiffies64_proto;
case BPF_FUNC_get_task_stack:
return &bpf_get_task_stack_proto;
+ case BPF_FUNC_copy_from_user:
+ return prog->aux->sleepable ? &bpf_copy_from_user_proto : NULL;
+ case BPF_FUNC_snprintf_btf:
+ return &bpf_snprintf_btf_proto;
+ case BPF_FUNC_bpf_per_cpu_ptr:
+ return &bpf_per_cpu_ptr_proto;
+ case BPF_FUNC_bpf_this_cpu_ptr:
+ return &bpf_this_cpu_ptr_proto;
default:
return NULL;
}
@@ -1579,6 +1728,12 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return prog->expected_attach_type == BPF_TRACE_ITER ?
&bpf_seq_write_proto :
NULL;
+ case BPF_FUNC_seq_printf_btf:
+ return prog->expected_attach_type == BPF_TRACE_ITER ?
+ &bpf_seq_printf_btf_proto :
+ NULL;
+ case BPF_FUNC_d_path:
+ return &bpf_d_path_proto;
default:
return raw_tp_prog_func_proto(func_id, prog);
}
@@ -1625,6 +1780,9 @@ const struct bpf_verifier_ops raw_tracepoint_verifier_ops = {
};
const struct bpf_prog_ops raw_tracepoint_prog_ops = {
+#ifdef CONFIG_NET
+ .test_run = bpf_prog_test_run_raw_tp,
+#endif
};
const struct bpf_verifier_ops tracing_verifier_ops = {
diff --git a/lib/nlattr.c b/lib/nlattr.c
index bc5b5cf608c4..74019c8ebf6b 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -96,8 +96,8 @@ static int nla_validate_array(const struct nlattr *head, int len, int maxtype,
continue;
if (nla_len(entry) < NLA_HDRLEN) {
- NL_SET_ERR_MSG_ATTR(extack, entry,
- "Array element too short");
+ NL_SET_ERR_MSG_ATTR_POL(extack, entry, policy,
+ "Array element too short");
return -ERANGE;
}
@@ -124,6 +124,7 @@ void nla_get_range_unsigned(const struct nla_policy *pt,
range->max = U8_MAX;
break;
case NLA_U16:
+ case NLA_BINARY:
range->max = U16_MAX;
break;
case NLA_U32:
@@ -140,6 +141,7 @@ void nla_get_range_unsigned(const struct nla_policy *pt,
switch (pt->validation_type) {
case NLA_VALIDATE_RANGE:
+ case NLA_VALIDATE_RANGE_WARN_TOO_LONG:
range->min = pt->min;
range->max = pt->max;
break;
@@ -157,9 +159,10 @@ void nla_get_range_unsigned(const struct nla_policy *pt,
}
}
-static int nla_validate_int_range_unsigned(const struct nla_policy *pt,
- const struct nlattr *nla,
- struct netlink_ext_ack *extack)
+static int nla_validate_range_unsigned(const struct nla_policy *pt,
+ const struct nlattr *nla,
+ struct netlink_ext_ack *extack,
+ unsigned int validate)
{
struct netlink_range_validation range;
u64 value;
@@ -178,15 +181,39 @@ static int nla_validate_int_range_unsigned(const struct nla_policy *pt,
case NLA_MSECS:
value = nla_get_u64(nla);
break;
+ case NLA_BINARY:
+ value = nla_len(nla);
+ break;
default:
return -EINVAL;
}
nla_get_range_unsigned(pt, &range);
+ if (pt->validation_type == NLA_VALIDATE_RANGE_WARN_TOO_LONG &&
+ pt->type == NLA_BINARY && value > range.max) {
+ pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n",
+ current->comm, pt->type);
+ if (validate & NL_VALIDATE_STRICT_ATTRS) {
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "invalid attribute length");
+ return -EINVAL;
+ }
+
+ /* this assumes min <= max (don't validate against min) */
+ return 0;
+ }
+
if (value < range.min || value > range.max) {
- NL_SET_ERR_MSG_ATTR(extack, nla,
- "integer out of range");
+ bool binary = pt->type == NLA_BINARY;
+
+ if (binary)
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "binary attribute size out of range");
+ else
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "integer out of range");
+
return -ERANGE;
}
@@ -264,8 +291,8 @@ static int nla_validate_int_range_signed(const struct nla_policy *pt,
nla_get_range_signed(pt, &range);
if (value < range.min || value > range.max) {
- NL_SET_ERR_MSG_ATTR(extack, nla,
- "integer out of range");
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "integer out of range");
return -ERANGE;
}
@@ -274,7 +301,8 @@ static int nla_validate_int_range_signed(const struct nla_policy *pt,
static int nla_validate_int_range(const struct nla_policy *pt,
const struct nlattr *nla,
- struct netlink_ext_ack *extack)
+ struct netlink_ext_ack *extack,
+ unsigned int validate)
{
switch (pt->type) {
case NLA_U8:
@@ -282,7 +310,8 @@ static int nla_validate_int_range(const struct nla_policy *pt,
case NLA_U32:
case NLA_U64:
case NLA_MSECS:
- return nla_validate_int_range_unsigned(pt, nla, extack);
+ case NLA_BINARY:
+ return nla_validate_range_unsigned(pt, nla, extack, validate);
case NLA_S8:
case NLA_S16:
case NLA_S32:
@@ -294,6 +323,37 @@ static int nla_validate_int_range(const struct nla_policy *pt,
}
}
+static int nla_validate_mask(const struct nla_policy *pt,
+ const struct nlattr *nla,
+ struct netlink_ext_ack *extack)
+{
+ u64 value;
+
+ switch (pt->type) {
+ case NLA_U8:
+ value = nla_get_u8(nla);
+ break;
+ case NLA_U16:
+ value = nla_get_u16(nla);
+ break;
+ case NLA_U32:
+ value = nla_get_u32(nla);
+ break;
+ case NLA_U64:
+ value = nla_get_u64(nla);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (value & ~(u64)pt->mask) {
+ NL_SET_ERR_MSG_ATTR(extack, nla, "reserved bit set");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int validate_nla(const struct nlattr *nla, int maxtype,
const struct nla_policy *policy, unsigned int validate,
struct netlink_ext_ack *extack, unsigned int depth)
@@ -313,15 +373,12 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
BUG_ON(pt->type > NLA_TYPE_MAX);
- if ((nla_attr_len[pt->type] && attrlen != nla_attr_len[pt->type]) ||
- (pt->type == NLA_EXACT_LEN &&
- pt->validation_type == NLA_VALIDATE_WARN_TOO_LONG &&
- attrlen != pt->len)) {
+ if (nla_attr_len[pt->type] && attrlen != nla_attr_len[pt->type]) {
pr_warn_ratelimited("netlink: '%s': attribute type %d has an invalid length.\n",
current->comm, type);
if (validate & NL_VALIDATE_STRICT_ATTRS) {
- NL_SET_ERR_MSG_ATTR(extack, nla,
- "invalid attribute length");
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "invalid attribute length");
return -EINVAL;
}
}
@@ -329,14 +386,14 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
if (validate & NL_VALIDATE_NESTED) {
if ((pt->type == NLA_NESTED || pt->type == NLA_NESTED_ARRAY) &&
!(nla->nla_type & NLA_F_NESTED)) {
- NL_SET_ERR_MSG_ATTR(extack, nla,
- "NLA_F_NESTED is missing");
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "NLA_F_NESTED is missing");
return -EINVAL;
}
if (pt->type != NLA_NESTED && pt->type != NLA_NESTED_ARRAY &&
pt->type != NLA_UNSPEC && (nla->nla_type & NLA_F_NESTED)) {
- NL_SET_ERR_MSG_ATTR(extack, nla,
- "NLA_F_NESTED not expected");
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "NLA_F_NESTED not expected");
return -EINVAL;
}
}
@@ -449,19 +506,10 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
"Unsupported attribute");
return -EINVAL;
}
- /* fall through */
- case NLA_MIN_LEN:
if (attrlen < pt->len)
goto out_err;
break;
- case NLA_EXACT_LEN:
- if (pt->validation_type != NLA_VALIDATE_WARN_TOO_LONG) {
- if (attrlen != pt->len)
- goto out_err;
- break;
- }
- /* fall through */
default:
if (pt->len)
minlen = pt->len;
@@ -479,9 +527,15 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
break;
case NLA_VALIDATE_RANGE_PTR:
case NLA_VALIDATE_RANGE:
+ case NLA_VALIDATE_RANGE_WARN_TOO_LONG:
case NLA_VALIDATE_MIN:
case NLA_VALIDATE_MAX:
- err = nla_validate_int_range(pt, nla, extack);
+ err = nla_validate_int_range(pt, nla, extack, validate);
+ if (err)
+ return err;
+ break;
+ case NLA_VALIDATE_MASK:
+ err = nla_validate_mask(pt, nla, extack);
if (err)
return err;
break;
@@ -496,7 +550,8 @@ static int validate_nla(const struct nlattr *nla, int maxtype,
return 0;
out_err:
- NL_SET_ERR_MSG_ATTR(extack, nla, "Attribute failed policy validation");
+ NL_SET_ERR_MSG_ATTR_POL(extack, nla, pt,
+ "Attribute failed policy validation");
return err;
}
@@ -816,8 +871,7 @@ EXPORT_SYMBOL(__nla_reserve);
struct nlattr *__nla_reserve_64bit(struct sk_buff *skb, int attrtype,
int attrlen, int padattr)
{
- if (nla_need_padding_for_64bit(skb))
- nla_align_64bit(skb, padattr);
+ nla_align_64bit(skb, padattr);
return __nla_reserve(skb, attrtype, attrlen);
}
diff --git a/mm/filemap.c b/mm/filemap.c
index 38546dca58fe..e3b8987153e6 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -827,10 +827,10 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
}
EXPORT_SYMBOL_GPL(replace_page_cache_page);
-static int __add_to_page_cache_locked(struct page *page,
- struct address_space *mapping,
- pgoff_t offset, gfp_t gfp_mask,
- void **shadowp)
+noinline int __add_to_page_cache_locked(struct page *page,
+ struct address_space *mapping,
+ pgoff_t offset, gfp_t gfp_mask,
+ void **shadowp)
{
XA_STATE(xas, &mapping->i_pages, offset);
int huge = PageHuge(page);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 05fe1ddb033c..e0ff3a811ec5 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3496,7 +3496,7 @@ static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
#endif /* CONFIG_FAIL_PAGE_ALLOC */
-static noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
+noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
{
return __should_fail_alloc_page(gfp_mask, order);
}
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index d4bcfd8f95bf..f292e0267bb9 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -51,12 +51,16 @@ static int vlan_group_prealloc_vid(struct vlan_group *vg,
__be16 vlan_proto, u16 vlan_id)
{
struct net_device **array;
- unsigned int pidx, vidx;
+ unsigned int vidx;
unsigned int size;
+ int pidx;
ASSERT_RTNL();
pidx = vlan_proto_idx(vlan_proto);
+ if (pidx < 0)
+ return -EINVAL;
+
vidx = vlan_id / VLAN_GROUP_ARRAY_PART_LEN;
array = vg->vlan_devices_arrays[pidx][vidx];
if (array != NULL)
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index bb7ec1a3915d..953405362795 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -36,7 +36,7 @@ struct vlan_info {
struct rcu_head rcu;
};
-static inline unsigned int vlan_proto_idx(__be16 proto)
+static inline int vlan_proto_idx(__be16 proto)
{
switch (proto) {
case htons(ETH_P_8021Q):
@@ -44,8 +44,8 @@ static inline unsigned int vlan_proto_idx(__be16 proto)
case htons(ETH_P_8021AD):
return VLAN_PROTO_8021AD;
default:
- BUG();
- return 0;
+ WARN(1, "invalid VLAN protocol: 0x%04x\n", ntohs(proto));
+ return -EINVAL;
}
}
@@ -64,17 +64,24 @@ static inline struct net_device *vlan_group_get_device(struct vlan_group *vg,
__be16 vlan_proto,
u16 vlan_id)
{
- return __vlan_group_get_device(vg, vlan_proto_idx(vlan_proto), vlan_id);
+ int pidx = vlan_proto_idx(vlan_proto);
+
+ if (pidx < 0)
+ return NULL;
+
+ return __vlan_group_get_device(vg, pidx, vlan_id);
}
static inline void vlan_group_set_device(struct vlan_group *vg,
__be16 vlan_proto, u16 vlan_id,
struct net_device *dev)
{
+ int pidx = vlan_proto_idx(vlan_proto);
struct net_device **array;
- if (!vg)
+
+ if (!vg || pidx < 0)
return;
- array = vg->vlan_devices_arrays[vlan_proto_idx(vlan_proto)]
+ array = vg->vlan_devices_arrays[pidx]
[vlan_id / VLAN_GROUP_ARRAY_PART_LEN];
array[vlan_id % VLAN_GROUP_ARRAY_PART_LEN] = dev;
}
diff --git a/net/Kconfig b/net/Kconfig
index 3831206977a1..d6567162c1cf 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -434,7 +434,6 @@ config NET_SOCK_MSG
config NET_DEVLINK
bool
default n
- imply NET_DROP_MONITOR
config PAGE_POOL
bool
diff --git a/net/atm/lec.c b/net/atm/lec.c
index b570ef919c28..dbabb65d8b67 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -1070,7 +1070,7 @@ module_exit(lane_module_cleanup);
/*
* LANE2: 3.1.3, LE_RESOLVE.request
* Non force allocates memory and fills in *tlvs, fills in *sizeoftlvs.
- * If sizeoftlvs == NULL the default TLVs associated with with this
+ * If sizeoftlvs == NULL the default TLVs associated with this
* lec will be used.
* If dst_mac == NULL, targetless LE_ARP will be sent
*/
diff --git a/net/atm/signaling.c b/net/atm/signaling.c
index fbd0c5e7b299..5de06ab8ed75 100644
--- a/net/atm/signaling.c
+++ b/net/atm/signaling.c
@@ -52,7 +52,7 @@ static void modify_qos(struct atm_vcc *vcc, struct atmsvc_msg *msg)
msg->type = as_okay;
}
/*
- * Should probably just turn around the old skb. But the, the buffer
+ * Should probably just turn around the old skb. But then, the buffer
* space accounting needs to follow the change too. Maybe later.
*/
while (!(skb = alloc_skb(sizeof(struct atmsvc_msg), GFP_KERNEL)))
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index a4faf5f904d9..206d0b424712 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -27,6 +27,7 @@
#include <linux/netdevice.h>
#include <linux/netlink.h>
#include <linux/pkt_sched.h>
+#include <linux/prandom.h>
#include <linux/printk.h>
#include <linux/random.h>
#include <linux/rculist.h>
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index d35aca0e969a..79a7dfc32e76 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -20,6 +20,7 @@
#include <linux/kref.h>
#include <linux/netdevice.h>
#include <linux/nl80211.h>
+#include <linux/prandom.h>
#include <linux/random.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
index 717fe657561d..8c1148fc73d7 100644
--- a/net/batman-adv/bat_v_ogm.c
+++ b/net/batman-adv/bat_v_ogm.c
@@ -20,6 +20,7 @@
#include <linux/lockdep.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
+#include <linux/prandom.h>
#include <linux/random.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index c350ab63cd54..ba0027d1f2df 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1863,7 +1863,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
ret = queue_work(batadv_event_workqueue, &backbone_gw->report_work);
- /* backbone_gw is unreferenced in the report work function function
+ /* backbone_gw is unreferenced in the report work function
* if queue_work() call was successful
*/
if (!ret)
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index 9fdbe3068153..9a47ef8b95c4 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -306,7 +306,7 @@ free:
* set *skb to merged packet; 2) Packet is buffered: Return true and set *skb
* to NULL; 3) Error: Return false and free skb.
*
- * Return: true when the packet is merged or buffered, false when skb is not not
+ * Return: true when the packet is merged or buffered, false when skb is not
* used.
*/
bool batadv_frag_skb_buffer(struct sk_buff **skb,
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index fa06b51c0144..dad99641df2a 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -599,7 +599,7 @@ out:
/* report to the other components the maximum amount of bytes that
* batman-adv can send over the wire (without considering the payload
* overhead). For example, this value is used by TT to compute the
- * maximum local table table size
+ * maximum local table size
*/
atomic_set(&bat_priv->packet_size_max, min_mtu);
@@ -977,23 +977,6 @@ static void batadv_hardif_remove_interface(struct batadv_hard_iface *hard_iface)
}
/**
- * batadv_hardif_remove_interfaces() - Remove all hard interfaces
- */
-void batadv_hardif_remove_interfaces(void)
-{
- struct batadv_hard_iface *hard_iface, *hard_iface_tmp;
-
- rtnl_lock();
- list_for_each_entry_safe(hard_iface, hard_iface_tmp,
- &batadv_hardif_list, list) {
- list_del_rcu(&hard_iface->list);
- batadv_hardif_generation++;
- batadv_hardif_remove_interface(hard_iface);
- }
- rtnl_unlock();
-}
-
-/**
* batadv_hard_if_event_softif() - Handle events for soft interfaces
* @event: NETDEV_* event to handle
* @net_dev: net_device which generated an event
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index bad2e50135e8..b1855d9d0b06 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -100,7 +100,6 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
struct net *net, const char *iface_name);
void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
enum batadv_hard_if_cleanup autodel);
-void batadv_hardif_remove_interfaces(void);
int batadv_hardif_min_mtu(struct net_device *soft_iface);
void batadv_update_min_mtu(struct net_device *soft_iface);
void batadv_hardif_release(struct kref *ref);
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 519c08c2cfba..70fee9b42e25 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -137,7 +137,6 @@ static void __exit batadv_exit(void)
batadv_netlink_unregister();
rtnl_link_unregister(&batadv_link_ops);
unregister_netdevice_notifier(&batadv_hard_if_notifier);
- batadv_hardif_remove_interfaces();
flush_workqueue(batadv_event_workqueue);
destroy_workqueue(batadv_event_workqueue);
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 0393bb9ed3d0..a47dc332d796 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -13,7 +13,7 @@
#define BATADV_DRIVER_DEVICE "batman-adv"
#ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2020.3"
+#define BATADV_SOURCE_VERSION "2020.4"
#endif
/* B.A.T.M.A.N. parameters */
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index ca24a2e522b7..9af99c39b9fd 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -208,7 +208,7 @@ static u8 batadv_mcast_mla_rtr_flags_bridge_get(struct batadv_priv *bat_priv,
return BATADV_MCAST_WANT_NO_RTR4 | BATADV_MCAST_WANT_NO_RTR6;
/* TODO: ask the bridge if a multicast router is present (the bridge
- * is capable of performing proper RFC4286 multicast multicast router
+ * is capable of performing proper RFC4286 multicast router
* discovery) instead of searching for a ff02::2 listener here
*/
ret = br_multicast_list_adjacent(dev, &bridge_mcast_list);
@@ -221,7 +221,7 @@ static u8 batadv_mcast_mla_rtr_flags_bridge_get(struct batadv_priv *bat_priv,
* address here, only IPv6 ones
*/
if (br_ip_entry->addr.proto == htons(ETH_P_IPV6) &&
- ipv6_addr_is_ll_all_routers(&br_ip_entry->addr.u.ip6))
+ ipv6_addr_is_ll_all_routers(&br_ip_entry->addr.dst.ip6))
flags &= ~BATADV_MCAST_WANT_NO_RTR6;
list_del(&br_ip_entry->list);
@@ -562,10 +562,10 @@ out:
static void batadv_mcast_mla_br_addr_cpy(char *dst, const struct br_ip *src)
{
if (src->proto == htons(ETH_P_IP))
- ip_eth_mc_map(src->u.ip4, dst);
+ ip_eth_mc_map(src->dst.ip4, dst);
#if IS_ENABLED(CONFIG_IPV6)
else if (src->proto == htons(ETH_P_IPV6))
- ipv6_eth_mc_map(&src->u.ip6, dst);
+ ipv6_eth_mc_map(&src->dst.ip6, dst);
#endif
else
eth_zero_addr(dst);
@@ -609,11 +609,11 @@ static int batadv_mcast_mla_bridge_get(struct net_device *dev,
continue;
if (tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
- ipv4_is_local_multicast(br_ip_entry->addr.u.ip4))
+ ipv4_is_local_multicast(br_ip_entry->addr.dst.ip4))
continue;
if (!(tvlv_flags & BATADV_MCAST_WANT_NO_RTR4) &&
- !ipv4_is_local_multicast(br_ip_entry->addr.u.ip4))
+ !ipv4_is_local_multicast(br_ip_entry->addr.dst.ip4))
continue;
}
@@ -623,11 +623,11 @@ static int batadv_mcast_mla_bridge_get(struct net_device *dev,
continue;
if (tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
- ipv6_addr_is_ll_all_nodes(&br_ip_entry->addr.u.ip6))
+ ipv6_addr_is_ll_all_nodes(&br_ip_entry->addr.dst.ip6))
continue;
if (!(tvlv_flags & BATADV_MCAST_WANT_NO_RTR6) &&
- IPV6_ADDR_MC_SCOPE(&br_ip_entry->addr.u.ip6) >
+ IPV6_ADDR_MC_SCOPE(&br_ip_entry->addr.dst.ip6) >
IPV6_ADDR_SCOPE_LINKLOCAL)
continue;
}
diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c
index dc193618a761..c7a55647b520 100644
--- a/net/batman-adv/netlink.c
+++ b/net/batman-adv/netlink.c
@@ -1350,7 +1350,7 @@ static void batadv_post_doit(const struct genl_ops *ops, struct sk_buff *skb,
}
}
-static const struct genl_ops batadv_netlink_ops[] = {
+static const struct genl_small_ops batadv_netlink_ops[] = {
{
.cmd = BATADV_CMD_GET_MESH,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
@@ -1484,8 +1484,8 @@ struct genl_family batadv_netlink_family __ro_after_init = {
.pre_doit = batadv_pre_doit,
.post_doit = batadv_post_doit,
.module = THIS_MODULE,
- .ops = batadv_netlink_ops,
- .n_ops = ARRAY_SIZE(batadv_netlink_ops),
+ .small_ops = batadv_netlink_ops,
+ .n_small_ops = ARRAY_SIZE(batadv_netlink_ops),
.mcgrps = batadv_netlink_mcgrps,
.n_mcgrps = ARRAY_SIZE(batadv_netlink_mcgrps),
};
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index 48d707850f3e..61ddd6d709a0 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -26,8 +26,8 @@
#include <linux/lockdep.h>
#include <linux/net.h>
#include <linux/netdevice.h>
+#include <linux/prandom.h>
#include <linux/printk.h>
-#include <linux/random.h>
#include <linux/rculist.h>
#include <linux/rcupdate.h>
#include <linux/seq_file.h>
@@ -250,7 +250,7 @@ static void batadv_nc_path_put(struct batadv_nc_path *nc_path)
/**
* batadv_nc_packet_free() - frees nc packet
* @nc_packet: the nc packet to free
- * @dropped: whether the packet is freed because is is dropped
+ * @dropped: whether the packet is freed because is dropped
*/
static void batadv_nc_packet_free(struct batadv_nc_packet *nc_packet,
bool dropped)
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index d267b94800d6..87017332b567 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -461,7 +461,7 @@ int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
/**
* batadv_forw_packet_free() - free a forwarding packet
* @forw_packet: The packet to free
- * @dropped: whether the packet is freed because is is dropped
+ * @dropped: whether the packet is freed because is dropped
*
* This frees a forwarding packet and releases any resources it might
* have claimed.
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index cdde943c1b83..82e7ca886605 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -648,7 +648,7 @@ static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv,
/**
* batadv_interface_add_vid() - ndo_add_vid API implementation
* @dev: the netdev of the mesh interface
- * @proto: protocol of the the vlan id
+ * @proto: protocol of the vlan id
* @vid: identifier of the new vlan
*
* Set up all the internal structures for handling the new vlan on top of the
@@ -706,7 +706,7 @@ static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
/**
* batadv_interface_kill_vid() - ndo_kill_vid API implementation
* @dev: the netdev of the mesh interface
- * @proto: protocol of the the vlan id
+ * @proto: protocol of the vlan id
* @vid: identifier of the deleted vlan
*
* Destroy all the internal structures used to handle the vlan identified by vid
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index ed519efa3c36..965336a3b89d 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -1492,7 +1492,7 @@ struct batadv_tp_vars {
/** @unacked_lock: protect unacked_list */
spinlock_t unacked_lock;
- /** @last_recv_time: time time (jiffies) a msg was received */
+ /** @last_recv_time: time (jiffies) a msg was received */
unsigned long last_recv_time;
/** @refcount: number of context where the object is used */
@@ -1996,7 +1996,7 @@ struct batadv_tt_change_node {
*/
struct batadv_tt_req_node {
/**
- * @addr: mac address address of the originator this request was sent to
+ * @addr: mac address of the originator this request was sent to
*/
u8 addr[ETH_ALEN];
diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig
index e2497d764e97..64e669acd42f 100644
--- a/net/bluetooth/Kconfig
+++ b/net/bluetooth/Kconfig
@@ -64,7 +64,6 @@ source "net/bluetooth/hidp/Kconfig"
config BT_HS
bool "Bluetooth High Speed (HS) features"
depends on BT_BREDR
- default y
help
Bluetooth High Speed includes support for off-loading
Bluetooth connections via 802.11 (wifi) physical layer
diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
index 26526be579c7..da7fd7c8c2dc 100644
--- a/net/bluetooth/a2mp.c
+++ b/net/bluetooth/a2mp.c
@@ -226,6 +226,9 @@ static int a2mp_discover_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
struct a2mp_info_req req;
found = true;
+
+ memset(&req, 0, sizeof(req));
+
req.id = cl->id;
a2mp_send(mgr, A2MP_GETINFO_REQ, __next_ident(mgr),
sizeof(req), &req);
@@ -305,6 +308,8 @@ static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb,
if (!hdev || hdev->dev_type != HCI_AMP) {
struct a2mp_info_rsp rsp;
+ memset(&rsp, 0, sizeof(rsp));
+
rsp.id = req->id;
rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
@@ -348,6 +353,8 @@ static int a2mp_getinfo_rsp(struct amp_mgr *mgr, struct sk_buff *skb,
if (!ctrl)
return -ENOMEM;
+ memset(&req, 0, sizeof(req));
+
req.id = rsp->id;
a2mp_send(mgr, A2MP_GETAMPASSOC_REQ, __next_ident(mgr), sizeof(req),
&req);
@@ -376,6 +383,8 @@ static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb,
struct a2mp_amp_assoc_rsp rsp;
rsp.id = req->id;
+ memset(&rsp, 0, sizeof(rsp));
+
if (tmp) {
rsp.status = A2MP_STATUS_COLLISION_OCCURED;
amp_mgr_put(tmp);
@@ -464,7 +473,6 @@ static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
struct a2mp_cmd *hdr)
{
struct a2mp_physlink_req *req = (void *) skb->data;
-
struct a2mp_physlink_rsp rsp;
struct hci_dev *hdev;
struct hci_conn *hcon;
@@ -475,6 +483,8 @@ static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
BT_DBG("local_id %d, remote_id %d", req->local_id, req->remote_id);
+ memset(&rsp, 0, sizeof(rsp));
+
rsp.local_id = req->remote_id;
rsp.remote_id = req->local_id;
@@ -553,6 +563,8 @@ static int a2mp_discphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
BT_DBG("local_id %d remote_id %d", req->local_id, req->remote_id);
+ memset(&rsp, 0, sizeof(rsp));
+
rsp.local_id = req->remote_id;
rsp.remote_id = req->local_id;
rsp.status = A2MP_STATUS_SUCCESS;
@@ -675,6 +687,8 @@ static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb)
if (err) {
struct a2mp_cmd_rej rej;
+ memset(&rej, 0, sizeof(rej));
+
rej.reason = cpu_to_le16(0);
hdr = (void *) skb->data;
@@ -898,6 +912,8 @@ void a2mp_send_getinfo_rsp(struct hci_dev *hdev)
BT_DBG("%s mgr %p", hdev->name, mgr);
+ memset(&rsp, 0, sizeof(rsp));
+
rsp.id = hdev->id;
rsp.status = A2MP_STATUS_INVALID_CTRL_ID;
@@ -995,6 +1011,8 @@ void a2mp_send_create_phy_link_rsp(struct hci_dev *hdev, u8 status)
if (!mgr)
return;
+ memset(&rsp, 0, sizeof(rsp));
+
hs_hcon = hci_conn_hash_lookup_state(hdev, AMP_LINK, BT_CONNECT);
if (!hs_hcon) {
rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION;
@@ -1027,6 +1045,8 @@ void a2mp_discover_amp(struct l2cap_chan *chan)
mgr->bredr_chan = chan;
+ memset(&req, 0, sizeof(req));
+
req.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
req.ext_feat = 0;
a2mp_send(mgr, A2MP_DISCOVER_REQ, 1, sizeof(req), &req);
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 9832f8445d43..d0c1024bf600 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -1388,7 +1388,7 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
return 0;
}
-/* Encrypt the the link */
+/* Encrypt the link */
static void hci_conn_encrypt(struct hci_conn *conn)
{
BT_DBG("hcon %p", conn);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 68bfe57b6625..502552d6e9af 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -808,7 +808,7 @@ static int hci_init4_req(struct hci_request *req, unsigned long opt)
* Delete Stored Link Key command. They are clearly indicating its
* absence in the bit mask of supported commands.
*
- * Check the supported commands and only if the the command is marked
+ * Check the supported commands and only if the command is marked
* as supported send it. If not supported assume that the controller
* does not have actual support for stored link keys which makes this
* command redundant anyway.
@@ -2963,7 +2963,7 @@ int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
sizeof(adv_instance->scan_rsp_data));
} else {
if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
- instance < 1 || instance > HCI_MAX_ADV_INSTANCES)
+ instance < 1 || instance > hdev->le_num_of_adv_sets)
return -EOVERFLOW;
adv_instance = kzalloc(sizeof(*adv_instance), GFP_KERNEL);
@@ -3061,6 +3061,7 @@ static int free_adv_monitor(int id, void *ptr, void *data)
idr_remove(&hdev->adv_monitors_idr, monitor->handle);
hci_free_adv_monitor(monitor);
+ hdev->adv_monitors_cnt--;
return 0;
}
@@ -3077,6 +3078,7 @@ int hci_remove_adv_monitor(struct hci_dev *hdev, u16 handle)
idr_remove(&hdev->adv_monitors_idr, monitor->handle);
hci_free_adv_monitor(monitor);
+ hdev->adv_monitors_cnt--;
} else {
/* Remove all monitors if handle is 0. */
idr_for_each(&hdev->adv_monitors_idr, &free_adv_monitor, hdev);
@@ -3442,6 +3444,16 @@ void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
}
}
+static void hci_suspend_clear_tasks(struct hci_dev *hdev)
+{
+ int i;
+
+ for (i = 0; i < __SUSPEND_NUM_TASKS; i++)
+ clear_bit(i, hdev->suspend_tasks);
+
+ wake_up(&hdev->suspend_wait_q);
+}
+
static int hci_suspend_wait_event(struct hci_dev *hdev)
{
#define WAKE_COND \
@@ -3487,12 +3499,24 @@ static int hci_change_suspend_state(struct hci_dev *hdev,
return hci_suspend_wait_event(hdev);
}
+static void hci_clear_wake_reason(struct hci_dev *hdev)
+{
+ hci_dev_lock(hdev);
+
+ hdev->wake_reason = 0;
+ bacpy(&hdev->wake_addr, BDADDR_ANY);
+ hdev->wake_addr_type = 0;
+
+ hci_dev_unlock(hdev);
+}
+
static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
void *data)
{
struct hci_dev *hdev =
container_of(nb, struct hci_dev, suspend_notifier);
int ret = 0;
+ u8 state = BT_RUNNING;
/* If powering down, wait for completion. */
if (mgmt_powering_down(hdev)) {
@@ -3513,15 +3537,27 @@ static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
* - Second, program event filter/whitelist and enable scan
*/
ret = hci_change_suspend_state(hdev, BT_SUSPEND_DISCONNECT);
+ if (!ret)
+ state = BT_SUSPEND_DISCONNECT;
/* Only configure whitelist if disconnect succeeded and wake
* isn't being prevented.
*/
- if (!ret && !(hdev->prevent_wake && hdev->prevent_wake(hdev)))
+ if (!ret && !(hdev->prevent_wake && hdev->prevent_wake(hdev))) {
ret = hci_change_suspend_state(hdev,
BT_SUSPEND_CONFIGURE_WAKE);
+ if (!ret)
+ state = BT_SUSPEND_CONFIGURE_WAKE;
+ }
+
+ hci_clear_wake_reason(hdev);
+ mgmt_suspending(hdev, state);
+
} else if (action == PM_POST_SUSPEND) {
ret = hci_change_suspend_state(hdev, BT_RUNNING);
+
+ mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
+ hdev->wake_addr_type);
}
done:
@@ -3784,6 +3820,7 @@ void hci_unregister_dev(struct hci_dev *hdev)
cancel_work_sync(&hdev->power_on);
+ hci_suspend_clear_tasks(hdev);
unregister_pm_notifier(&hdev->suspend_notifier);
cancel_work_sync(&hdev->suspend_prepare);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 4b7fc430793c..f04963914366 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -2569,7 +2569,6 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_ev_conn_complete *ev = (void *) skb->data;
- struct inquiry_entry *ie;
struct hci_conn *conn;
BT_DBG("%s", hdev->name);
@@ -2578,13 +2577,19 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
if (!conn) {
- /* Connection may not exist if auto-connected. Check the inquiry
- * cache to see if we've already discovered this bdaddr before.
- * If found and link is an ACL type, create a connection class
+ /* Connection may not exist if auto-connected. Check the bredr
+ * allowlist to see if this device is allowed to auto connect.
+ * If link is an ACL type, create a connection class
* automatically.
+ *
+ * Auto-connect will only occur if the event filter is
+ * programmed with a given address. Right now, event filter is
+ * only used during suspend.
*/
- ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
- if (ie && ev->link_type == ACL_LINK) {
+ if (ev->link_type == ACL_LINK &&
+ hci_bdaddr_list_lookup_with_flags(&hdev->whitelist,
+ &ev->bdaddr,
+ BDADDR_BREDR)) {
conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
HCI_ROLE_SLAVE);
if (!conn) {
@@ -6012,6 +6017,75 @@ static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
return true;
}
+static void hci_store_wake_reason(struct hci_dev *hdev, u8 event,
+ struct sk_buff *skb)
+{
+ struct hci_ev_le_advertising_info *adv;
+ struct hci_ev_le_direct_adv_info *direct_adv;
+ struct hci_ev_le_ext_adv_report *ext_adv;
+ const struct hci_ev_conn_complete *conn_complete = (void *)skb->data;
+ const struct hci_ev_conn_request *conn_request = (void *)skb->data;
+
+ hci_dev_lock(hdev);
+
+ /* If we are currently suspended and this is the first BT event seen,
+ * save the wake reason associated with the event.
+ */
+ if (!hdev->suspended || hdev->wake_reason)
+ goto unlock;
+
+ /* Default to remote wake. Values for wake_reason are documented in the
+ * Bluez mgmt api docs.
+ */
+ hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE;
+
+ /* Once configured for remote wakeup, we should only wake up for
+ * reconnections. It's useful to see which device is waking us up so
+ * keep track of the bdaddr of the connection event that woke us up.
+ */
+ if (event == HCI_EV_CONN_REQUEST) {
+ bacpy(&hdev->wake_addr, &conn_complete->bdaddr);
+ hdev->wake_addr_type = BDADDR_BREDR;
+ } else if (event == HCI_EV_CONN_COMPLETE) {
+ bacpy(&hdev->wake_addr, &conn_request->bdaddr);
+ hdev->wake_addr_type = BDADDR_BREDR;
+ } else if (event == HCI_EV_LE_META) {
+ struct hci_ev_le_meta *le_ev = (void *)skb->data;
+ u8 subevent = le_ev->subevent;
+ u8 *ptr = &skb->data[sizeof(*le_ev)];
+ u8 num_reports = *ptr;
+
+ if ((subevent == HCI_EV_LE_ADVERTISING_REPORT ||
+ subevent == HCI_EV_LE_DIRECT_ADV_REPORT ||
+ subevent == HCI_EV_LE_EXT_ADV_REPORT) &&
+ num_reports) {
+ adv = (void *)(ptr + 1);
+ direct_adv = (void *)(ptr + 1);
+ ext_adv = (void *)(ptr + 1);
+
+ switch (subevent) {
+ case HCI_EV_LE_ADVERTISING_REPORT:
+ bacpy(&hdev->wake_addr, &adv->bdaddr);
+ hdev->wake_addr_type = adv->bdaddr_type;
+ break;
+ case HCI_EV_LE_DIRECT_ADV_REPORT:
+ bacpy(&hdev->wake_addr, &direct_adv->bdaddr);
+ hdev->wake_addr_type = direct_adv->bdaddr_type;
+ break;
+ case HCI_EV_LE_EXT_ADV_REPORT:
+ bacpy(&hdev->wake_addr, &ext_adv->bdaddr);
+ hdev->wake_addr_type = ext_adv->bdaddr_type;
+ break;
+ }
+ }
+ } else {
+ hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED;
+ }
+
+unlock:
+ hci_dev_unlock(hdev);
+}
+
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
{
struct hci_event_hdr *hdr = (void *) skb->data;
@@ -6045,6 +6119,9 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
skb_pull(skb, HCI_EVENT_HDR_SIZE);
+ /* Store wake reason if we're suspended */
+ hci_store_wake_reason(hdev, event, skb);
+
switch (event) {
case HCI_EV_INQUIRY_COMPLETE:
hci_inquiry_complete_evt(hdev, skb);
diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
index e0269192f2e5..6f12bab4d2fa 100644
--- a/net/bluetooth/hci_request.c
+++ b/net/bluetooth/hci_request.c
@@ -1027,6 +1027,9 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
} else if (hci_is_le_conn_scanning(hdev)) {
window = hdev->le_scan_window_connect;
interval = hdev->le_scan_int_connect;
+ } else if (hci_is_adv_monitoring(hdev)) {
+ window = hdev->le_scan_window_adv_monitor;
+ interval = hdev->le_scan_int_adv_monitor;
} else {
window = hdev->le_scan_window;
interval = hdev->le_scan_interval;
@@ -1111,6 +1114,53 @@ static void hci_req_config_le_suspend_scan(struct hci_request *req)
set_bit(SUSPEND_SCAN_ENABLE, req->hdev->suspend_tasks);
}
+static void cancel_adv_timeout(struct hci_dev *hdev)
+{
+ if (hdev->adv_instance_timeout) {
+ hdev->adv_instance_timeout = 0;
+ cancel_delayed_work(&hdev->adv_instance_expire);
+ }
+}
+
+/* This function requires the caller holds hdev->lock */
+static void hci_suspend_adv_instances(struct hci_request *req)
+{
+ bt_dev_dbg(req->hdev, "Suspending advertising instances");
+
+ /* Call to disable any advertisements active on the controller.
+ * This will succeed even if no advertisements are configured.
+ */
+ __hci_req_disable_advertising(req);
+
+ /* If we are using software rotation, pause the loop */
+ if (!ext_adv_capable(req->hdev))
+ cancel_adv_timeout(req->hdev);
+}
+
+/* This function requires the caller holds hdev->lock */
+static void hci_resume_adv_instances(struct hci_request *req)
+{
+ struct adv_info *adv;
+
+ bt_dev_dbg(req->hdev, "Resuming advertising instances");
+
+ if (ext_adv_capable(req->hdev)) {
+ /* Call for each tracked instance to be re-enabled */
+ list_for_each_entry(adv, &req->hdev->adv_instances, list) {
+ __hci_req_enable_ext_advertising(req,
+ adv->instance);
+ }
+
+ } else {
+ /* Schedule for most recent instance to be restarted and begin
+ * the software rotation loop
+ */
+ __hci_req_schedule_adv_instance(req,
+ req->hdev->cur_adv_instance,
+ true);
+ }
+}
+
static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
{
bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
@@ -1153,7 +1203,7 @@ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
hdev->discovery_paused = true;
hdev->discovery_old_state = old_state;
- /* Stop advertising */
+ /* Stop directed advertising */
old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
if (old_state) {
set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
@@ -1162,6 +1212,10 @@ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
&hdev->discov_off, 0);
}
+ /* Pause other advertisements */
+ if (hdev->adv_instance_cnt)
+ hci_suspend_adv_instances(&req);
+
hdev->advertising_paused = true;
hdev->advertising_old_state = old_state;
/* Disable page scan */
@@ -1212,7 +1266,7 @@ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
/* Reset passive/background scanning to normal */
hci_req_config_le_suspend_scan(&req);
- /* Unpause advertising */
+ /* Unpause directed advertising */
hdev->advertising_paused = false;
if (hdev->advertising_old_state) {
set_bit(SUSPEND_UNPAUSE_ADVERTISING,
@@ -1223,6 +1277,10 @@ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
hdev->advertising_old_state = 0;
}
+ /* Resume other advertisements */
+ if (hdev->adv_instance_cnt)
+ hci_resume_adv_instances(&req);
+
/* Unpause discovery */
hdev->discovery_paused = false;
if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
@@ -1533,11 +1591,14 @@ void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
memset(&cp, 0, sizeof(cp));
- if (instance)
+ /* Extended scan response data doesn't allow a response to be
+ * set if the instance isn't scannable.
+ */
+ if (get_adv_instance_scan_rsp_len(hdev, instance))
len = create_instance_scan_rsp_data(hdev, instance,
cp.data);
else
- len = create_default_scan_rsp_data(hdev, cp.data);
+ len = 0;
if (hdev->scan_rsp_data_len == len &&
!memcmp(cp.data, hdev->scan_rsp_data, len))
@@ -1824,7 +1885,13 @@ int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
if (use_rpa) {
int to;
- *own_addr_type = ADDR_LE_DEV_RANDOM;
+ /* If Controller supports LL Privacy use own address type is
+ * 0x03
+ */
+ if (use_ll_privacy(hdev))
+ *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
+ else
+ *own_addr_type = ADDR_LE_DEV_RANDOM;
if (adv_instance) {
if (!adv_instance->rpa_expired &&
@@ -2183,14 +2250,6 @@ int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
return 0;
}
-static void cancel_adv_timeout(struct hci_dev *hdev)
-{
- if (hdev->adv_instance_timeout) {
- hdev->adv_instance_timeout = 0;
- cancel_delayed_work(&hdev->adv_instance_expire);
- }
-}
-
/* For a single instance:
* - force == true: The instance will be removed even when its remaining
* lifetime is not zero.
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index ade83e224567..1ab27b90ddcb 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -7301,9 +7301,10 @@ static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
goto drop;
}
- if ((chan->mode == L2CAP_MODE_ERTM ||
- chan->mode == L2CAP_MODE_STREAMING) && sk_filter(chan->data, skb))
- goto drop;
+ if (chan->ops->filter) {
+ if (chan->ops->filter(chan, skb))
+ goto drop;
+ }
if (!control->sframe) {
int err;
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index e1a3e66b1754..f1b1edd0b697 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -1521,8 +1521,6 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)
parent = bt_sk(sk)->parent;
- sock_set_flag(sk, SOCK_ZAPPED);
-
switch (chan->state) {
case BT_OPEN:
case BT_BOUND:
@@ -1549,8 +1547,11 @@ static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err)
break;
}
-
release_sock(sk);
+
+ /* Only zap after cleanup to avoid use after free race */
+ sock_set_flag(sk, SOCK_ZAPPED);
+
}
static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state,
@@ -1663,6 +1664,19 @@ static void l2cap_sock_suspend_cb(struct l2cap_chan *chan)
sk->sk_state_change(sk);
}
+static int l2cap_sock_filter(struct l2cap_chan *chan, struct sk_buff *skb)
+{
+ struct sock *sk = chan->data;
+
+ switch (chan->mode) {
+ case L2CAP_MODE_ERTM:
+ case L2CAP_MODE_STREAMING:
+ return sk_filter(sk, skb);
+ }
+
+ return 0;
+}
+
static const struct l2cap_ops l2cap_chan_ops = {
.name = "L2CAP Socket Interface",
.new_connection = l2cap_sock_new_connection_cb,
@@ -1678,6 +1692,7 @@ static const struct l2cap_ops l2cap_chan_ops = {
.get_sndtimeo = l2cap_sock_get_sndtimeo_cb,
.get_peer_pid = l2cap_sock_get_peer_pid_cb,
.alloc_skb = l2cap_sock_alloc_skb_cb,
+ .filter = l2cap_sock_filter,
};
static void l2cap_sock_destruct(struct sock *sk)
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 5bbe71002fb9..12d7b368b428 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -163,6 +163,8 @@ static const u16 mgmt_events[] = {
MGMT_EV_PHY_CONFIGURATION_CHANGED,
MGMT_EV_EXP_FEATURE_CHANGED,
MGMT_EV_DEVICE_FLAGS_CHANGED,
+ MGMT_EV_CONTROLLER_SUSPEND,
+ MGMT_EV_CONTROLLER_RESUME,
};
static const u16 mgmt_untrusted_commands[] = {
@@ -782,7 +784,8 @@ static u32 get_supported_settings(struct hci_dev *hdev)
if (lmp_ssp_capable(hdev)) {
settings |= MGMT_SETTING_SSP;
- settings |= MGMT_SETTING_HS;
+ if (IS_ENABLED(CONFIG_BT_HS))
+ settings |= MGMT_SETTING_HS;
}
if (lmp_sc_capable(hdev))
@@ -1815,6 +1818,10 @@ static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
bt_dev_dbg(hdev, "sock %p", sk);
+ if (!IS_ENABLED(CONFIG_BT_HS))
+ return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
+ MGMT_STATUS_NOT_SUPPORTED);
+
status = mgmt_bredr_support(hdev);
if (status)
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
@@ -4157,7 +4164,7 @@ static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
{
struct adv_monitor *monitor = NULL;
struct mgmt_rp_read_adv_monitor_features *rp = NULL;
- int handle;
+ int handle, err;
size_t rp_size = 0;
__u32 supported = 0;
__u16 num_handles = 0;
@@ -4192,9 +4199,13 @@ static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
if (num_handles)
memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
- return mgmt_cmd_complete(sk, hdev->id,
- MGMT_OP_READ_ADV_MONITOR_FEATURES,
- MGMT_STATUS_SUCCESS, rp, rp_size);
+ err = mgmt_cmd_complete(sk, hdev->id,
+ MGMT_OP_READ_ADV_MONITOR_FEATURES,
+ MGMT_STATUS_SUCCESS, rp, rp_size);
+
+ kfree(rp);
+
+ return err;
}
static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
@@ -7202,6 +7213,8 @@ static u32 get_supported_adv_flags(struct hci_dev *hdev)
if (ext_adv_capable(hdev)) {
flags |= MGMT_ADV_FLAG_SEC_1M;
+ flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
+ flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
if (hdev->le_features[1] & HCI_LE_PHY_2M)
flags |= MGMT_ADV_FLAG_SEC_2M;
@@ -7250,7 +7263,7 @@ static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
rp->supported_flags = cpu_to_le32(supported_flags);
rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
- rp->max_instances = HCI_MAX_ADV_INSTANCES;
+ rp->max_instances = hdev->le_num_of_adv_sets;
rp->num_instances = hdev->adv_instance_cnt;
instance = rp->instance;
@@ -7446,7 +7459,7 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
MGMT_STATUS_NOT_SUPPORTED);
- if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
+ if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
MGMT_STATUS_INVALID_PARAMS);
@@ -7699,7 +7712,7 @@ static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
MGMT_STATUS_REJECTED);
- if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
+ if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
MGMT_STATUS_INVALID_PARAMS);
@@ -8262,6 +8275,10 @@ void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
ev.addr.type = link_to_bdaddr(link_type, addr_type);
ev.reason = reason;
+ /* Report disconnects due to suspend */
+ if (hdev->suspended)
+ ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
+
mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
if (sk)
@@ -8868,6 +8885,30 @@ void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
}
+void mgmt_suspending(struct hci_dev *hdev, u8 state)
+{
+ struct mgmt_ev_controller_suspend ev;
+
+ ev.suspend_state = state;
+ mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
+}
+
+void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
+ u8 addr_type)
+{
+ struct mgmt_ev_controller_resume ev;
+
+ ev.wake_reason = reason;
+ if (bdaddr) {
+ bacpy(&ev.addr.bdaddr, bdaddr);
+ ev.addr.type = addr_type;
+ } else {
+ memset(&ev.addr, 0, sizeof(ev.addr));
+ }
+
+ mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
+}
+
static struct hci_mgmt_chan chan = {
.channel = HCI_CHANNEL_CONTROL,
.handler_count = ARRAY_SIZE(mgmt_handlers),
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index dcf7f96ff417..79ffcdef0b7a 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -1001,6 +1001,12 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname,
err = -EFAULT;
break;
+ case BT_SNDMTU:
+ case BT_RCVMTU:
+ if (put_user(sco_pi(sk)->conn->mtu, (u32 __user *)optval))
+ err = -EFAULT;
+ break;
+
default:
err = -ENOPROTOOPT;
break;
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index a66f211726e7..c1c30a9f76f3 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -11,6 +11,7 @@
#include <net/sock.h>
#include <net/tcp.h>
#include <linux/error-injection.h>
+#include <linux/smp.h>
#define CREATE_TRACE_POINTS
#include <trace/events/bpf_test_run.h>
@@ -204,6 +205,9 @@ int bpf_prog_test_run_tracing(struct bpf_prog *prog,
int b = 2, err = -EFAULT;
u32 retval = 0;
+ if (kattr->test.flags || kattr->test.cpu)
+ return -EINVAL;
+
switch (prog->expected_attach_type) {
case BPF_TRACE_FENTRY:
case BPF_TRACE_FEXIT:
@@ -236,6 +240,84 @@ out:
return err;
}
+struct bpf_raw_tp_test_run_info {
+ struct bpf_prog *prog;
+ void *ctx;
+ u32 retval;
+};
+
+static void
+__bpf_prog_test_run_raw_tp(void *data)
+{
+ struct bpf_raw_tp_test_run_info *info = data;
+
+ rcu_read_lock();
+ info->retval = BPF_PROG_RUN(info->prog, info->ctx);
+ rcu_read_unlock();
+}
+
+int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
+ const union bpf_attr *kattr,
+ union bpf_attr __user *uattr)
+{
+ void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
+ __u32 ctx_size_in = kattr->test.ctx_size_in;
+ struct bpf_raw_tp_test_run_info info;
+ int cpu = kattr->test.cpu, err = 0;
+ int current_cpu;
+
+ /* doesn't support data_in/out, ctx_out, duration, or repeat */
+ if (kattr->test.data_in || kattr->test.data_out ||
+ kattr->test.ctx_out || kattr->test.duration ||
+ kattr->test.repeat)
+ return -EINVAL;
+
+ if (ctx_size_in < prog->aux->max_ctx_offset)
+ return -EINVAL;
+
+ if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
+ return -EINVAL;
+
+ if (ctx_size_in) {
+ info.ctx = kzalloc(ctx_size_in, GFP_USER);
+ if (!info.ctx)
+ return -ENOMEM;
+ if (copy_from_user(info.ctx, ctx_in, ctx_size_in)) {
+ err = -EFAULT;
+ goto out;
+ }
+ } else {
+ info.ctx = NULL;
+ }
+
+ info.prog = prog;
+
+ current_cpu = get_cpu();
+ if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 ||
+ cpu == current_cpu) {
+ __bpf_prog_test_run_raw_tp(&info);
+ } else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
+ /* smp_call_function_single() also checks cpu_online()
+ * after csd_lock(). However, since cpu is from user
+ * space, let's do an extra quick check to filter out
+ * invalid value before smp_call_function_single().
+ */
+ err = -ENXIO;
+ } else {
+ err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp,
+ &info, 1);
+ }
+ put_cpu();
+
+ if (!err &&
+ copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32)))
+ err = -EFAULT;
+
+out:
+ kfree(info.ctx);
+ return err;
+}
+
static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
{
void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
@@ -410,6 +492,9 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
void *data;
int ret;
+ if (kattr->test.flags || kattr->test.cpu)
+ return -EINVAL;
+
data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
if (IS_ERR(data))
@@ -607,6 +692,9 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
return -EINVAL;
+ if (kattr->test.flags || kattr->test.cpu)
+ return -EINVAL;
+
if (size < ETH_HLEN)
return -EINVAL;
diff --git a/net/bpfilter/Kconfig b/net/bpfilter/Kconfig
index 73d0b12789f1..8ad0233ce497 100644
--- a/net/bpfilter/Kconfig
+++ b/net/bpfilter/Kconfig
@@ -2,6 +2,7 @@
menuconfig BPFILTER
bool "BPF based packet filtering framework (BPFILTER)"
depends on NET && BPF && INET
+ select USERMODE_DRIVER
help
This builds experimental bpfilter framework that is aiming to
provide netfilter compatible functionality via BPF
diff --git a/net/bridge/br.c b/net/bridge/br.c
index b6fe30e3768f..401eeb9142eb 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -183,6 +183,11 @@ static int br_switchdev_event(struct notifier_block *unused,
br_fdb_offloaded_set(br, p, fdb_info->addr,
fdb_info->vid, fdb_info->offloaded);
break;
+ case SWITCHDEV_FDB_FLUSH_TO_BRIDGE:
+ fdb_info = ptr;
+ /* Don't delete static entries */
+ br_fdb_delete_by_port(br, p, fdb_info->vid, 0);
+ break;
}
out:
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 9a2fb4aa1a10..6f742fee874a 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -206,27 +206,8 @@ static void br_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
struct net_bridge *br = netdev_priv(dev);
- struct pcpu_sw_netstats tmp, sum = { 0 };
- unsigned int cpu;
-
- for_each_possible_cpu(cpu) {
- unsigned int start;
- const struct pcpu_sw_netstats *bstats
- = per_cpu_ptr(br->stats, cpu);
- do {
- start = u64_stats_fetch_begin_irq(&bstats->syncp);
- memcpy(&tmp, bstats, sizeof(tmp));
- } while (u64_stats_fetch_retry_irq(&bstats->syncp, start));
- sum.tx_bytes += tmp.tx_bytes;
- sum.tx_packets += tmp.tx_packets;
- sum.rx_bytes += tmp.rx_bytes;
- sum.rx_packets += tmp.rx_packets;
- }
- stats->tx_bytes = sum.tx_bytes;
- stats->tx_packets = sum.tx_packets;
- stats->rx_bytes = sum.rx_bytes;
- stats->rx_packets = sum.rx_packets;
+ dev_fetch_sw_netstats(stats, br->stats);
}
static int br_change_mtu(struct net_device *dev, int new_mtu)
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 7629b63f6f30..e28ffadd1371 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -274,14 +274,23 @@ void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_port *prev = NULL;
struct net_bridge_port_group *p;
+ bool allow_mode_include = true;
struct hlist_node *rp;
rp = rcu_dereference(hlist_first_rcu(&br->router_list));
- p = mdst ? rcu_dereference(mdst->ports) : NULL;
+ if (mdst) {
+ p = rcu_dereference(mdst->ports);
+ if (br_multicast_should_handle_mode(br, mdst->addr.proto) &&
+ br_multicast_is_star_g(&mdst->addr))
+ allow_mode_include = false;
+ } else {
+ p = NULL;
+ }
+
while (p || rp) {
struct net_bridge_port *port, *lport, *rport;
- lport = p ? p->port : NULL;
+ lport = p ? p->key.port : NULL;
rport = hlist_entry_safe(rp, struct net_bridge_port, rlist);
if ((unsigned long)lport > (unsigned long)rport) {
@@ -292,6 +301,10 @@ void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
local_orig);
goto delivered;
}
+ if ((!allow_mode_include &&
+ p->filter_mode == MCAST_INCLUDE) ||
+ (p->flags & MDB_PG_FLAGS_BLOCKED))
+ goto delivered;
} else {
port = rport;
}
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index 5e71fc8b826f..2db800fc27ca 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -103,7 +103,7 @@ static int add_del_if(struct net_bridge *br, int ifindex, int isadd)
/*
* Legacy ioctl's through SIOCDEVPRIVATE
- * This interface is deprecated because it was too difficult to
+ * This interface is deprecated because it was too difficult
* to do the translation for 32/64bit ioctl compatibility.
*/
static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index da5ed4cf9233..e15bab19a012 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -62,25 +62,96 @@ static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
e->flags |= MDB_FLAGS_OFFLOAD;
if (flags & MDB_PG_FLAGS_FAST_LEAVE)
e->flags |= MDB_FLAGS_FAST_LEAVE;
+ if (flags & MDB_PG_FLAGS_STAR_EXCL)
+ e->flags |= MDB_FLAGS_STAR_EXCL;
+ if (flags & MDB_PG_FLAGS_BLOCKED)
+ e->flags |= MDB_FLAGS_BLOCKED;
}
-static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip)
+static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip,
+ struct nlattr **mdb_attrs)
{
memset(ip, 0, sizeof(struct br_ip));
ip->vid = entry->vid;
ip->proto = entry->addr.proto;
- if (ip->proto == htons(ETH_P_IP))
- ip->u.ip4 = entry->addr.u.ip4;
+ switch (ip->proto) {
+ case htons(ETH_P_IP):
+ ip->dst.ip4 = entry->addr.u.ip4;
+ if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
+ ip->src.ip4 = nla_get_in_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
+ break;
#if IS_ENABLED(CONFIG_IPV6)
- else
- ip->u.ip6 = entry->addr.u.ip6;
+ case htons(ETH_P_IPV6):
+ ip->dst.ip6 = entry->addr.u.ip6;
+ if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
+ ip->src.ip6 = nla_get_in6_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
+ break;
+#endif
+ }
+
+}
+
+static int __mdb_fill_srcs(struct sk_buff *skb,
+ struct net_bridge_port_group *p)
+{
+ struct net_bridge_group_src *ent;
+ struct nlattr *nest, *nest_ent;
+
+ if (hlist_empty(&p->src_list))
+ return 0;
+
+ nest = nla_nest_start(skb, MDBA_MDB_EATTR_SRC_LIST);
+ if (!nest)
+ return -EMSGSIZE;
+
+ hlist_for_each_entry_rcu(ent, &p->src_list, node,
+ lockdep_is_held(&p->key.port->br->multicast_lock)) {
+ nest_ent = nla_nest_start(skb, MDBA_MDB_SRCLIST_ENTRY);
+ if (!nest_ent)
+ goto out_cancel_err;
+ switch (ent->addr.proto) {
+ case htons(ETH_P_IP):
+ if (nla_put_in_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
+ ent->addr.src.ip4)) {
+ nla_nest_cancel(skb, nest_ent);
+ goto out_cancel_err;
+ }
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case htons(ETH_P_IPV6):
+ if (nla_put_in6_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
+ &ent->addr.src.ip6)) {
+ nla_nest_cancel(skb, nest_ent);
+ goto out_cancel_err;
+ }
+ break;
#endif
+ default:
+ nla_nest_cancel(skb, nest_ent);
+ continue;
+ }
+ if (nla_put_u32(skb, MDBA_MDB_SRCATTR_TIMER,
+ br_timer_value(&ent->timer))) {
+ nla_nest_cancel(skb, nest_ent);
+ goto out_cancel_err;
+ }
+ nla_nest_end(skb, nest_ent);
+ }
+
+ nla_nest_end(skb, nest);
+
+ return 0;
+
+out_cancel_err:
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
}
static int __mdb_fill_info(struct sk_buff *skb,
struct net_bridge_mdb_entry *mp,
struct net_bridge_port_group *p)
{
+ bool dump_srcs_mode = false;
struct timer_list *mtimer;
struct nlattr *nest_ent;
struct br_mdb_entry e;
@@ -89,7 +160,7 @@ static int __mdb_fill_info(struct sk_buff *skb,
memset(&e, 0, sizeof(e));
if (p) {
- ifindex = p->port->dev->ifindex;
+ ifindex = p->key.port->dev->ifindex;
mtimer = &p->timer;
flags = p->flags;
} else {
@@ -101,10 +172,10 @@ static int __mdb_fill_info(struct sk_buff *skb,
e.ifindex = ifindex;
e.vid = mp->addr.vid;
if (mp->addr.proto == htons(ETH_P_IP))
- e.addr.u.ip4 = mp->addr.u.ip4;
+ e.addr.u.ip4 = mp->addr.dst.ip4;
#if IS_ENABLED(CONFIG_IPV6)
if (mp->addr.proto == htons(ETH_P_IPV6))
- e.addr.u.ip6 = mp->addr.u.ip6;
+ e.addr.u.ip6 = mp->addr.dst.ip6;
#endif
e.addr.proto = mp->addr.proto;
nest_ent = nla_nest_start_noflag(skb,
@@ -115,19 +186,53 @@ static int __mdb_fill_info(struct sk_buff *skb,
if (nla_put_nohdr(skb, sizeof(e), &e) ||
nla_put_u32(skb,
MDBA_MDB_EATTR_TIMER,
- br_timer_value(mtimer))) {
- nla_nest_cancel(skb, nest_ent);
- return -EMSGSIZE;
+ br_timer_value(mtimer)))
+ goto nest_err;
+
+ switch (mp->addr.proto) {
+ case htons(ETH_P_IP):
+ dump_srcs_mode = !!(mp->br->multicast_igmp_version == 3);
+ if (mp->addr.src.ip4) {
+ if (nla_put_in_addr(skb, MDBA_MDB_EATTR_SOURCE,
+ mp->addr.src.ip4))
+ goto nest_err;
+ break;
+ }
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case htons(ETH_P_IPV6):
+ dump_srcs_mode = !!(mp->br->multicast_mld_version == 2);
+ if (!ipv6_addr_any(&mp->addr.src.ip6)) {
+ if (nla_put_in6_addr(skb, MDBA_MDB_EATTR_SOURCE,
+ &mp->addr.src.ip6))
+ goto nest_err;
+ break;
+ }
+ break;
+#endif
+ }
+ if (p) {
+ if (nla_put_u8(skb, MDBA_MDB_EATTR_RTPROT, p->rt_protocol))
+ goto nest_err;
+ if (dump_srcs_mode &&
+ (__mdb_fill_srcs(skb, p) ||
+ nla_put_u8(skb, MDBA_MDB_EATTR_GROUP_MODE,
+ p->filter_mode)))
+ goto nest_err;
}
nla_nest_end(skb, nest_ent);
return 0;
+
+nest_err:
+ nla_nest_cancel(skb, nest_ent);
+ return -EMSGSIZE;
}
static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
struct net_device *dev)
{
- int idx = 0, s_idx = cb->args[1], err = 0;
+ int idx = 0, s_idx = cb->args[1], err = 0, pidx = 0, s_pidx = cb->args[2];
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_mdb_entry *mp;
struct nlattr *nest, *nest2;
@@ -152,7 +257,7 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
break;
}
- if (mp->host_joined) {
+ if (!s_pidx && mp->host_joined) {
err = __mdb_fill_info(skb, mp, NULL);
if (err) {
nla_nest_cancel(skb, nest2);
@@ -162,15 +267,21 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
pp = &p->next) {
- if (!p->port)
+ if (!p->key.port)
continue;
+ if (pidx < s_pidx)
+ goto skip_pg;
err = __mdb_fill_info(skb, mp, p);
if (err) {
- nla_nest_cancel(skb, nest2);
+ nla_nest_end(skb, nest2);
goto out;
}
+skip_pg:
+ pidx++;
}
+ pidx = 0;
+ s_pidx = 0;
nla_nest_end(skb, nest2);
skip:
idx++;
@@ -178,6 +289,7 @@ skip:
out:
cb->args[1] = idx;
+ cb->args[2] = pidx;
nla_nest_end(skb, nest);
return err;
}
@@ -263,14 +375,15 @@ out:
static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
struct net_device *dev,
- struct br_mdb_entry *entry, u32 pid,
- u32 seq, int type, unsigned int flags)
+ struct net_bridge_mdb_entry *mp,
+ struct net_bridge_port_group *pg,
+ int type)
{
struct nlmsghdr *nlh;
struct br_port_msg *bpm;
struct nlattr *nest, *nest2;
- nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
+ nlh = nlmsg_put(skb, 0, 0, type, sizeof(*bpm), 0);
if (!nlh)
return -EMSGSIZE;
@@ -285,7 +398,7 @@ static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
if (nest2 == NULL)
goto end;
- if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(*entry), entry))
+ if (__mdb_fill_info(skb, mp, pg))
goto end;
nla_nest_end(skb, nest2);
@@ -300,10 +413,58 @@ cancel:
return -EMSGSIZE;
}
-static inline size_t rtnl_mdb_nlmsg_size(void)
+static size_t rtnl_mdb_nlmsg_size(struct net_bridge_port_group *pg)
{
- return NLMSG_ALIGN(sizeof(struct br_port_msg))
- + nla_total_size(sizeof(struct br_mdb_entry));
+ size_t nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) +
+ nla_total_size(sizeof(struct br_mdb_entry)) +
+ nla_total_size(sizeof(u32));
+ struct net_bridge_group_src *ent;
+ size_t addr_size = 0;
+
+ if (!pg)
+ goto out;
+
+ /* MDBA_MDB_EATTR_RTPROT */
+ nlmsg_size += nla_total_size(sizeof(u8));
+
+ switch (pg->key.addr.proto) {
+ case htons(ETH_P_IP):
+ /* MDBA_MDB_EATTR_SOURCE */
+ if (pg->key.addr.src.ip4)
+ nlmsg_size += nla_total_size(sizeof(__be32));
+ if (pg->key.port->br->multicast_igmp_version == 2)
+ goto out;
+ addr_size = sizeof(__be32);
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case htons(ETH_P_IPV6):
+ /* MDBA_MDB_EATTR_SOURCE */
+ if (!ipv6_addr_any(&pg->key.addr.src.ip6))
+ nlmsg_size += nla_total_size(sizeof(struct in6_addr));
+ if (pg->key.port->br->multicast_mld_version == 1)
+ goto out;
+ addr_size = sizeof(struct in6_addr);
+ break;
+#endif
+ }
+
+ /* MDBA_MDB_EATTR_GROUP_MODE */
+ nlmsg_size += nla_total_size(sizeof(u8));
+
+ /* MDBA_MDB_EATTR_SRC_LIST nested attr */
+ if (!hlist_empty(&pg->src_list))
+ nlmsg_size += nla_total_size(0);
+
+ hlist_for_each_entry(ent, &pg->src_list, node) {
+ /* MDBA_MDB_SRCLIST_ENTRY nested attr +
+ * MDBA_MDB_SRCATTR_ADDRESS + MDBA_MDB_SRCATTR_TIMER
+ */
+ nlmsg_size += nla_total_size(0) +
+ nla_total_size(addr_size) +
+ nla_total_size(sizeof(u32));
+ }
+out:
+ return nlmsg_size;
}
struct br_mdb_complete_info {
@@ -329,7 +490,7 @@ static void br_mdb_complete(struct net_device *dev, int err, void *priv)
goto out;
for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
pp = &p->next) {
- if (p->port != port)
+ if (p->key.port != port)
continue;
p->flags |= MDB_PG_FLAGS_OFFLOAD;
}
@@ -341,21 +502,22 @@ err:
static void br_mdb_switchdev_host_port(struct net_device *dev,
struct net_device *lower_dev,
- struct br_mdb_entry *entry, int type)
+ struct net_bridge_mdb_entry *mp,
+ int type)
{
struct switchdev_obj_port_mdb mdb = {
.obj = {
.id = SWITCHDEV_OBJ_ID_HOST_MDB,
.flags = SWITCHDEV_F_DEFER,
},
- .vid = entry->vid,
+ .vid = mp->addr.vid,
};
- if (entry->addr.proto == htons(ETH_P_IP))
- ip_eth_mc_map(entry->addr.u.ip4, mdb.addr);
+ if (mp->addr.proto == htons(ETH_P_IP))
+ ip_eth_mc_map(mp->addr.dst.ip4, mdb.addr);
#if IS_ENABLED(CONFIG_IPV6)
else
- ipv6_eth_mc_map(&entry->addr.u.ip6, mdb.addr);
+ ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb.addr);
#endif
mdb.obj.orig_dev = dev;
@@ -370,17 +532,19 @@ static void br_mdb_switchdev_host_port(struct net_device *dev,
}
static void br_mdb_switchdev_host(struct net_device *dev,
- struct br_mdb_entry *entry, int type)
+ struct net_bridge_mdb_entry *mp, int type)
{
struct net_device *lower_dev;
struct list_head *iter;
netdev_for_each_lower_dev(dev, lower_dev, iter)
- br_mdb_switchdev_host_port(dev, lower_dev, entry, type);
+ br_mdb_switchdev_host_port(dev, lower_dev, mp, type);
}
-static void __br_mdb_notify(struct net_device *dev, struct net_bridge_port *p,
- struct br_mdb_entry *entry, int type)
+void br_mdb_notify(struct net_device *dev,
+ struct net_bridge_mdb_entry *mp,
+ struct net_bridge_port_group *pg,
+ int type)
{
struct br_mdb_complete_info *complete_info;
struct switchdev_obj_port_mdb mdb = {
@@ -388,44 +552,45 @@ static void __br_mdb_notify(struct net_device *dev, struct net_bridge_port *p,
.id = SWITCHDEV_OBJ_ID_PORT_MDB,
.flags = SWITCHDEV_F_DEFER,
},
- .vid = entry->vid,
+ .vid = mp->addr.vid,
};
- struct net_device *port_dev;
struct net *net = dev_net(dev);
struct sk_buff *skb;
int err = -ENOBUFS;
- port_dev = __dev_get_by_index(net, entry->ifindex);
- if (entry->addr.proto == htons(ETH_P_IP))
- ip_eth_mc_map(entry->addr.u.ip4, mdb.addr);
+ if (pg) {
+ if (mp->addr.proto == htons(ETH_P_IP))
+ ip_eth_mc_map(mp->addr.dst.ip4, mdb.addr);
#if IS_ENABLED(CONFIG_IPV6)
- else
- ipv6_eth_mc_map(&entry->addr.u.ip6, mdb.addr);
+ else
+ ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb.addr);
#endif
-
- mdb.obj.orig_dev = port_dev;
- if (p && port_dev && type == RTM_NEWMDB) {
- complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
- if (complete_info) {
- complete_info->port = p;
- __mdb_entry_to_br_ip(entry, &complete_info->ip);
+ mdb.obj.orig_dev = pg->key.port->dev;
+ switch (type) {
+ case RTM_NEWMDB:
+ complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
+ if (!complete_info)
+ break;
+ complete_info->port = pg->key.port;
+ complete_info->ip = mp->addr;
mdb.obj.complete_priv = complete_info;
mdb.obj.complete = br_mdb_complete;
- if (switchdev_port_obj_add(port_dev, &mdb.obj, NULL))
+ if (switchdev_port_obj_add(pg->key.port->dev, &mdb.obj, NULL))
kfree(complete_info);
+ break;
+ case RTM_DELMDB:
+ switchdev_port_obj_del(pg->key.port->dev, &mdb.obj);
+ break;
}
- } else if (p && port_dev && type == RTM_DELMDB) {
- switchdev_port_obj_del(port_dev, &mdb.obj);
+ } else {
+ br_mdb_switchdev_host(dev, mp, type);
}
- if (!p)
- br_mdb_switchdev_host(dev, entry, type);
-
- skb = nlmsg_new(rtnl_mdb_nlmsg_size(), GFP_ATOMIC);
+ skb = nlmsg_new(rtnl_mdb_nlmsg_size(pg), GFP_ATOMIC);
if (!skb)
goto errout;
- err = nlmsg_populate_mdb_fill(skb, dev, entry, 0, 0, type, NTF_SELF);
+ err = nlmsg_populate_mdb_fill(skb, dev, mp, pg, type);
if (err < 0) {
kfree_skb(skb);
goto errout;
@@ -437,26 +602,6 @@ errout:
rtnl_set_sk_err(net, RTNLGRP_MDB, err);
}
-void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
- struct br_ip *group, int type, u8 flags)
-{
- struct br_mdb_entry entry;
-
- memset(&entry, 0, sizeof(entry));
- if (port)
- entry.ifindex = port->dev->ifindex;
- else
- entry.ifindex = dev->ifindex;
- entry.addr.proto = group->proto;
- entry.addr.u.ip4 = group->u.ip4;
-#if IS_ENABLED(CONFIG_IPV6)
- entry.addr.u.ip6 = group->u.ip6;
-#endif
- entry.vid = group->vid;
- __mdb_entry_fill_flags(&entry, flags);
- __br_mdb_notify(dev, port, &entry, type);
-}
-
static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
struct net_device *dev,
int ifindex, u32 pid,
@@ -524,33 +669,94 @@ errout:
rtnl_set_sk_err(net, RTNLGRP_MDB, err);
}
-static bool is_valid_mdb_entry(struct br_mdb_entry *entry)
+static bool is_valid_mdb_entry(struct br_mdb_entry *entry,
+ struct netlink_ext_ack *extack)
{
- if (entry->ifindex == 0)
+ if (entry->ifindex == 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Zero entry ifindex is not allowed");
return false;
+ }
if (entry->addr.proto == htons(ETH_P_IP)) {
- if (!ipv4_is_multicast(entry->addr.u.ip4))
+ if (!ipv4_is_multicast(entry->addr.u.ip4)) {
+ NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is not multicast");
return false;
- if (ipv4_is_local_multicast(entry->addr.u.ip4))
+ }
+ if (ipv4_is_local_multicast(entry->addr.u.ip4)) {
+ NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is local multicast");
return false;
+ }
#if IS_ENABLED(CONFIG_IPV6)
} else if (entry->addr.proto == htons(ETH_P_IPV6)) {
- if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6))
+ if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) {
+ NL_SET_ERR_MSG_MOD(extack, "IPv6 entry group address is link-local all nodes");
return false;
+ }
#endif
- } else
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Unknown entry protocol");
+ return false;
+ }
+
+ if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) {
+ NL_SET_ERR_MSG_MOD(extack, "Unknown entry state");
return false;
- if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY)
+ }
+ if (entry->vid >= VLAN_VID_MASK) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid entry VLAN id");
return false;
- if (entry->vid >= VLAN_VID_MASK)
+ }
+
+ return true;
+}
+
+static bool is_valid_mdb_source(struct nlattr *attr, __be16 proto,
+ struct netlink_ext_ack *extack)
+{
+ switch (proto) {
+ case htons(ETH_P_IP):
+ if (nla_len(attr) != sizeof(struct in_addr)) {
+ NL_SET_ERR_MSG_MOD(extack, "IPv4 invalid source address length");
+ return false;
+ }
+ if (ipv4_is_multicast(nla_get_in_addr(attr))) {
+ NL_SET_ERR_MSG_MOD(extack, "IPv4 multicast source address is not allowed");
+ return false;
+ }
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case htons(ETH_P_IPV6): {
+ struct in6_addr src;
+
+ if (nla_len(attr) != sizeof(struct in6_addr)) {
+ NL_SET_ERR_MSG_MOD(extack, "IPv6 invalid source address length");
+ return false;
+ }
+ src = nla_get_in6_addr(attr);
+ if (ipv6_addr_is_multicast(&src)) {
+ NL_SET_ERR_MSG_MOD(extack, "IPv6 multicast source address is not allowed");
+ return false;
+ }
+ break;
+ }
+#endif
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Invalid protocol used with source address");
return false;
+ }
return true;
}
+static const struct nla_policy br_mdbe_attrs_pol[MDBE_ATTR_MAX + 1] = {
+ [MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
+ sizeof(struct in_addr),
+ sizeof(struct in6_addr)),
+};
+
static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
- struct net_device **pdev, struct br_mdb_entry **pentry)
+ struct net_device **pdev, struct br_mdb_entry **pentry,
+ struct nlattr **mdb_attrs, struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
struct br_mdb_entry *entry;
@@ -566,51 +772,86 @@ static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
bpm = nlmsg_data(nlh);
if (bpm->ifindex == 0) {
- pr_info("PF_BRIDGE: br_mdb_parse() with invalid ifindex\n");
+ NL_SET_ERR_MSG_MOD(extack, "Invalid bridge ifindex");
return -EINVAL;
}
dev = __dev_get_by_index(net, bpm->ifindex);
if (dev == NULL) {
- pr_info("PF_BRIDGE: br_mdb_parse() with unknown ifindex\n");
+ NL_SET_ERR_MSG_MOD(extack, "Bridge device doesn't exist");
return -ENODEV;
}
if (!(dev->priv_flags & IFF_EBRIDGE)) {
- pr_info("PF_BRIDGE: br_mdb_parse() with non-bridge\n");
+ NL_SET_ERR_MSG_MOD(extack, "Device is not a bridge");
return -EOPNOTSUPP;
}
*pdev = dev;
- if (!tb[MDBA_SET_ENTRY] ||
- nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) {
- pr_info("PF_BRIDGE: br_mdb_parse() with invalid attr\n");
+ if (!tb[MDBA_SET_ENTRY]) {
+ NL_SET_ERR_MSG_MOD(extack, "Missing MDBA_SET_ENTRY attribute");
+ return -EINVAL;
+ }
+ if (nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid MDBA_SET_ENTRY attribute length");
return -EINVAL;
}
entry = nla_data(tb[MDBA_SET_ENTRY]);
- if (!is_valid_mdb_entry(entry)) {
- pr_info("PF_BRIDGE: br_mdb_parse() with invalid entry\n");
+ if (!is_valid_mdb_entry(entry, extack))
return -EINVAL;
+ *pentry = entry;
+
+ if (tb[MDBA_SET_ENTRY_ATTRS]) {
+ err = nla_parse_nested(mdb_attrs, MDBE_ATTR_MAX,
+ tb[MDBA_SET_ENTRY_ATTRS],
+ br_mdbe_attrs_pol, extack);
+ if (err)
+ return err;
+ if (mdb_attrs[MDBE_ATTR_SOURCE] &&
+ !is_valid_mdb_source(mdb_attrs[MDBE_ATTR_SOURCE],
+ entry->addr.proto, extack))
+ return -EINVAL;
+ } else {
+ memset(mdb_attrs, 0,
+ sizeof(struct nlattr *) * (MDBE_ATTR_MAX + 1));
}
- *pentry = entry;
return 0;
}
static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
- struct br_ip *group, unsigned char state)
+ struct br_mdb_entry *entry,
+ struct nlattr **mdb_attrs,
+ struct netlink_ext_ack *extack)
{
- struct net_bridge_mdb_entry *mp;
+ struct net_bridge_mdb_entry *mp, *star_mp;
struct net_bridge_port_group *p;
struct net_bridge_port_group __rcu **pp;
+ struct br_ip group, star_group;
unsigned long now = jiffies;
+ u8 filter_mode;
int err;
- mp = br_mdb_ip_get(br, group);
+ __mdb_entry_to_br_ip(entry, &group, mdb_attrs);
+
+ /* host join errors which can happen before creating the group */
+ if (!port) {
+ /* don't allow any flags for host-joined groups */
+ if (entry->state) {
+ NL_SET_ERR_MSG_MOD(extack, "Flags are not allowed for host groups");
+ return -EINVAL;
+ }
+ if (!br_multicast_is_star_g(&group)) {
+ NL_SET_ERR_MSG_MOD(extack, "Groups with sources cannot be manually host joined");
+ return -EINVAL;
+ }
+ }
+
+ mp = br_mdb_ip_get(br, &group);
if (!mp) {
- mp = br_multicast_new_group(br, group);
+ mp = br_multicast_new_group(br, &group);
err = PTR_ERR_OR_ZERO(mp);
if (err)
return err;
@@ -618,13 +859,13 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
/* host join */
if (!port) {
- /* don't allow any flags for host-joined groups */
- if (state)
- return -EINVAL;
- if (mp->host_joined)
+ if (mp->host_joined) {
+ NL_SET_ERR_MSG_MOD(extack, "Group is already joined by host");
return -EEXIST;
+ }
br_multicast_host_join(mp, false);
+ br_mdb_notify(br->dev, mp, NULL, RTM_NEWMDB);
return 0;
}
@@ -632,54 +873,69 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
for (pp = &mp->ports;
(p = mlock_dereference(*pp, br)) != NULL;
pp = &p->next) {
- if (p->port == port)
+ if (p->key.port == port) {
+ NL_SET_ERR_MSG_MOD(extack, "Group is already joined by port");
return -EEXIST;
- if ((unsigned long)p->port < (unsigned long)port)
+ }
+ if ((unsigned long)p->key.port < (unsigned long)port)
break;
}
- p = br_multicast_new_port_group(port, group, *pp, state, NULL);
- if (unlikely(!p))
+ filter_mode = br_multicast_is_star_g(&group) ? MCAST_EXCLUDE :
+ MCAST_INCLUDE;
+
+ p = br_multicast_new_port_group(port, &group, *pp, entry->state, NULL,
+ filter_mode, RTPROT_STATIC);
+ if (unlikely(!p)) {
+ NL_SET_ERR_MSG_MOD(extack, "Couldn't allocate new port group");
return -ENOMEM;
+ }
rcu_assign_pointer(*pp, p);
- if (state == MDB_TEMPORARY)
+ if (entry->state == MDB_TEMPORARY)
mod_timer(&p->timer, now + br->multicast_membership_interval);
+ br_mdb_notify(br->dev, mp, p, RTM_NEWMDB);
+ /* if we are adding a new EXCLUDE port group (*,G) it needs to be also
+ * added to all S,G entries for proper replication, if we are adding
+ * a new INCLUDE port (S,G) then all of *,G EXCLUDE ports need to be
+ * added to it for proper replication
+ */
+ if (br_multicast_should_handle_mode(br, group.proto)) {
+ switch (filter_mode) {
+ case MCAST_EXCLUDE:
+ br_multicast_star_g_handle_mode(p, MCAST_EXCLUDE);
+ break;
+ case MCAST_INCLUDE:
+ star_group = p->key.addr;
+ memset(&star_group.src, 0, sizeof(star_group.src));
+ star_mp = br_mdb_ip_get(br, &star_group);
+ if (star_mp)
+ br_multicast_sg_add_exclude_ports(star_mp, p);
+ break;
+ }
+ }
return 0;
}
static int __br_mdb_add(struct net *net, struct net_bridge *br,
- struct br_mdb_entry *entry)
+ struct net_bridge_port *p,
+ struct br_mdb_entry *entry,
+ struct nlattr **mdb_attrs,
+ struct netlink_ext_ack *extack)
{
- struct br_ip ip;
- struct net_device *dev;
- struct net_bridge_port *p = NULL;
int ret;
- if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
- return -EINVAL;
-
- if (entry->ifindex != br->dev->ifindex) {
- dev = __dev_get_by_index(net, entry->ifindex);
- if (!dev)
- return -ENODEV;
-
- p = br_port_get_rtnl(dev);
- if (!p || p->br != br || p->state == BR_STATE_DISABLED)
- return -EINVAL;
- }
-
- __mdb_entry_to_br_ip(entry, &ip);
-
spin_lock_bh(&br->multicast_lock);
- ret = br_mdb_add_group(br, p, &ip, entry->state);
+ ret = br_mdb_add_group(br, p, entry, mdb_attrs, extack);
spin_unlock_bh(&br->multicast_lock);
+
return ret;
}
static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
+ struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
struct net *net = sock_net(skb->sk);
struct net_bridge_vlan_group *vg;
struct net_bridge_port *p = NULL;
@@ -689,20 +945,43 @@ static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
struct net_bridge *br;
int err;
- err = br_mdb_parse(skb, nlh, &dev, &entry);
+ err = br_mdb_parse(skb, nlh, &dev, &entry, mdb_attrs, extack);
if (err < 0)
return err;
br = netdev_priv(dev);
+ if (!netif_running(br->dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Bridge device is not running");
+ return -EINVAL;
+ }
+
+ if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
+ NL_SET_ERR_MSG_MOD(extack, "Bridge's multicast processing is disabled");
+ return -EINVAL;
+ }
+
if (entry->ifindex != br->dev->ifindex) {
pdev = __dev_get_by_index(net, entry->ifindex);
- if (!pdev)
+ if (!pdev) {
+ NL_SET_ERR_MSG_MOD(extack, "Port net device doesn't exist");
return -ENODEV;
+ }
p = br_port_get_rtnl(pdev);
- if (!p || p->br != br || p->state == BR_STATE_DISABLED)
+ if (!p) {
+ NL_SET_ERR_MSG_MOD(extack, "Net device is not a bridge port");
+ return -EINVAL;
+ }
+
+ if (p->br != br) {
+ NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device");
return -EINVAL;
+ }
+ if (p->state == BR_STATE_DISABLED) {
+ NL_SET_ERR_MSG_MOD(extack, "Port is in disabled state");
+ return -EINVAL;
+ }
vg = nbp_vlan_group(p);
} else {
vg = br_vlan_group(br);
@@ -714,21 +993,19 @@ static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
list_for_each_entry(v, &vg->vlan_list, vlist) {
entry->vid = v->vid;
- err = __br_mdb_add(net, br, entry);
+ err = __br_mdb_add(net, br, p, entry, mdb_attrs, extack);
if (err)
break;
- __br_mdb_notify(dev, p, entry, RTM_NEWMDB);
}
} else {
- err = __br_mdb_add(net, br, entry);
- if (!err)
- __br_mdb_notify(dev, p, entry, RTM_NEWMDB);
+ err = __br_mdb_add(net, br, p, entry, mdb_attrs, extack);
}
return err;
}
-static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
+static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry,
+ struct nlattr **mdb_attrs)
{
struct net_bridge_mdb_entry *mp;
struct net_bridge_port_group *p;
@@ -739,7 +1016,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
return -EINVAL;
- __mdb_entry_to_br_ip(entry, &ip);
+ __mdb_entry_to_br_ip(entry, &ip, mdb_attrs);
spin_lock_bh(&br->multicast_lock);
mp = br_mdb_ip_get(br, &ip);
@@ -750,6 +1027,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
if (entry->ifindex == mp->br->dev->ifindex && mp->host_joined) {
br_multicast_host_leave(mp, false);
err = 0;
+ br_mdb_notify(br->dev, mp, NULL, RTM_DELMDB);
if (!mp->ports && netif_running(br->dev))
mod_timer(&mp->timer, jiffies);
goto unlock;
@@ -758,22 +1036,14 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
for (pp = &mp->ports;
(p = mlock_dereference(*pp, br)) != NULL;
pp = &p->next) {
- if (!p->port || p->port->dev->ifindex != entry->ifindex)
+ if (!p->key.port || p->key.port->dev->ifindex != entry->ifindex)
continue;
- if (p->port->state == BR_STATE_DISABLED)
+ if (p->key.port->state == BR_STATE_DISABLED)
goto unlock;
- __mdb_entry_fill_flags(entry, p->flags);
- rcu_assign_pointer(*pp, p->next);
- hlist_del_init(&p->mglist);
- del_timer(&p->timer);
- kfree_rcu(p, rcu);
+ br_multicast_del_pg(mp, p, pp);
err = 0;
-
- if (!mp->ports && !mp->host_joined &&
- netif_running(br->dev))
- mod_timer(&mp->timer, jiffies);
break;
}
@@ -785,6 +1055,7 @@ unlock:
static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
+ struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
struct net *net = sock_net(skb->sk);
struct net_bridge_vlan_group *vg;
struct net_bridge_port *p = NULL;
@@ -794,7 +1065,7 @@ static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
struct net_bridge *br;
int err;
- err = br_mdb_parse(skb, nlh, &dev, &entry);
+ err = br_mdb_parse(skb, nlh, &dev, &entry, mdb_attrs, extack);
if (err < 0)
return err;
@@ -819,14 +1090,10 @@ static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
list_for_each_entry(v, &vg->vlan_list, vlist) {
entry->vid = v->vid;
- err = __br_mdb_del(br, entry);
- if (!err)
- __br_mdb_notify(dev, p, entry, RTM_DELMDB);
+ err = __br_mdb_del(br, entry, mdb_attrs);
}
} else {
- err = __br_mdb_del(br, entry);
- if (!err)
- __br_mdb_notify(dev, p, entry, RTM_DELMDB);
+ err = __br_mdb_del(br, entry, mdb_attrs);
}
return err;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 4c4a93abde68..eae898c3cff7 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -41,6 +41,13 @@ static const struct rhashtable_params br_mdb_rht_params = {
.automatic_shrinking = true,
};
+static const struct rhashtable_params br_sg_port_rht_params = {
+ .head_offset = offsetof(struct net_bridge_port_group, rhnode),
+ .key_offset = offsetof(struct net_bridge_port_group, key),
+ .key_len = sizeof(struct net_bridge_port_group_sg_key),
+ .automatic_shrinking = true,
+};
+
static void br_multicast_start_querier(struct net_bridge *br,
struct bridge_mcast_own_query *query);
static void br_multicast_add_router(struct net_bridge *br,
@@ -50,6 +57,7 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
__be32 group,
__u16 vid,
const unsigned char *src);
+static void br_multicast_port_group_rexmit(struct timer_list *t);
static void __del_port_router(struct net_bridge_port *p);
#if IS_ENABLED(CONFIG_IPV6)
@@ -58,6 +66,26 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
const struct in6_addr *group,
__u16 vid, const unsigned char *src);
#endif
+static struct net_bridge_port_group *
+__br_multicast_add_group(struct net_bridge *br,
+ struct net_bridge_port *port,
+ struct br_ip *group,
+ const unsigned char *src,
+ u8 filter_mode,
+ bool igmpv2_mldv1,
+ bool blocked);
+static void br_multicast_find_del_pg(struct net_bridge *br,
+ struct net_bridge_port_group *pg);
+
+static struct net_bridge_port_group *
+br_sg_port_find(struct net_bridge *br,
+ struct net_bridge_port_group_sg_key *sg_p)
+{
+ lockdep_assert_held_once(&br->multicast_lock);
+
+ return rhashtable_lookup_fast(&br->sg_port_tbl, sg_p,
+ br_sg_port_rht_params);
+}
static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br,
struct br_ip *dst)
@@ -85,7 +113,7 @@ static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br,
struct br_ip br_dst;
memset(&br_dst, 0, sizeof(br_dst));
- br_dst.u.ip4 = dst;
+ br_dst.dst.ip4 = dst;
br_dst.proto = htons(ETH_P_IP);
br_dst.vid = vid;
@@ -100,7 +128,7 @@ static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br,
struct br_ip br_dst;
memset(&br_dst, 0, sizeof(br_dst));
- br_dst.u.ip6 = *dst;
+ br_dst.dst.ip6 = *dst;
br_dst.proto = htons(ETH_P_IPV6);
br_dst.vid = vid;
@@ -125,11 +153,29 @@ struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
switch (skb->protocol) {
case htons(ETH_P_IP):
- ip.u.ip4 = ip_hdr(skb)->daddr;
+ ip.dst.ip4 = ip_hdr(skb)->daddr;
+ if (br->multicast_igmp_version == 3) {
+ struct net_bridge_mdb_entry *mdb;
+
+ ip.src.ip4 = ip_hdr(skb)->saddr;
+ mdb = br_mdb_ip_get_rcu(br, &ip);
+ if (mdb)
+ return mdb;
+ ip.src.ip4 = 0;
+ }
break;
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
- ip.u.ip6 = ipv6_hdr(skb)->daddr;
+ ip.dst.ip6 = ipv6_hdr(skb)->daddr;
+ if (br->multicast_mld_version == 2) {
+ struct net_bridge_mdb_entry *mdb;
+
+ ip.src.ip6 = ipv6_hdr(skb)->saddr;
+ mdb = br_mdb_ip_get_rcu(br, &ip);
+ if (mdb)
+ return mdb;
+ memset(&ip.src.ip6, 0, sizeof(ip.src.ip6));
+ }
break;
#endif
default:
@@ -139,38 +185,438 @@ struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
return br_mdb_ip_get_rcu(br, &ip);
}
+static bool br_port_group_equal(struct net_bridge_port_group *p,
+ struct net_bridge_port *port,
+ const unsigned char *src)
+{
+ if (p->key.port != port)
+ return false;
+
+ if (!(port->flags & BR_MULTICAST_TO_UNICAST))
+ return true;
+
+ return ether_addr_equal(src, p->eth_addr);
+}
+
+static void __fwd_add_star_excl(struct net_bridge_port_group *pg,
+ struct br_ip *sg_ip)
+{
+ struct net_bridge_port_group_sg_key sg_key;
+ struct net_bridge *br = pg->key.port->br;
+ struct net_bridge_port_group *src_pg;
+
+ memset(&sg_key, 0, sizeof(sg_key));
+ sg_key.port = pg->key.port;
+ sg_key.addr = *sg_ip;
+ if (br_sg_port_find(br, &sg_key))
+ return;
+
+ src_pg = __br_multicast_add_group(br, pg->key.port, sg_ip, pg->eth_addr,
+ MCAST_INCLUDE, false, false);
+ if (IS_ERR_OR_NULL(src_pg) ||
+ src_pg->rt_protocol != RTPROT_KERNEL)
+ return;
+
+ src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL;
+}
+
+static void __fwd_del_star_excl(struct net_bridge_port_group *pg,
+ struct br_ip *sg_ip)
+{
+ struct net_bridge_port_group_sg_key sg_key;
+ struct net_bridge *br = pg->key.port->br;
+ struct net_bridge_port_group *src_pg;
+
+ memset(&sg_key, 0, sizeof(sg_key));
+ sg_key.port = pg->key.port;
+ sg_key.addr = *sg_ip;
+ src_pg = br_sg_port_find(br, &sg_key);
+ if (!src_pg || !(src_pg->flags & MDB_PG_FLAGS_STAR_EXCL) ||
+ src_pg->rt_protocol != RTPROT_KERNEL)
+ return;
+
+ br_multicast_find_del_pg(br, src_pg);
+}
+
+/* When a port group transitions to (or is added as) EXCLUDE we need to add it
+ * to all other ports' S,G entries which are not blocked by the current group
+ * for proper replication, the assumption is that any S,G blocked entries
+ * are already added so the S,G,port lookup should skip them.
+ * When a port group transitions from EXCLUDE -> INCLUDE mode or is being
+ * deleted we need to remove it from all ports' S,G entries where it was
+ * automatically installed before (i.e. where it's MDB_PG_FLAGS_STAR_EXCL).
+ */
+void br_multicast_star_g_handle_mode(struct net_bridge_port_group *pg,
+ u8 filter_mode)
+{
+ struct net_bridge *br = pg->key.port->br;
+ struct net_bridge_port_group *pg_lst;
+ struct net_bridge_mdb_entry *mp;
+ struct br_ip sg_ip;
+
+ if (WARN_ON(!br_multicast_is_star_g(&pg->key.addr)))
+ return;
+
+ mp = br_mdb_ip_get(br, &pg->key.addr);
+ if (!mp)
+ return;
+
+ memset(&sg_ip, 0, sizeof(sg_ip));
+ sg_ip = pg->key.addr;
+ for (pg_lst = mlock_dereference(mp->ports, br);
+ pg_lst;
+ pg_lst = mlock_dereference(pg_lst->next, br)) {
+ struct net_bridge_group_src *src_ent;
+
+ if (pg_lst == pg)
+ continue;
+ hlist_for_each_entry(src_ent, &pg_lst->src_list, node) {
+ if (!(src_ent->flags & BR_SGRP_F_INSTALLED))
+ continue;
+ sg_ip.src = src_ent->addr.src;
+ switch (filter_mode) {
+ case MCAST_INCLUDE:
+ __fwd_del_star_excl(pg, &sg_ip);
+ break;
+ case MCAST_EXCLUDE:
+ __fwd_add_star_excl(pg, &sg_ip);
+ break;
+ }
+ }
+ }
+}
+
+/* called when adding a new S,G with host_joined == false by default */
+static void br_multicast_sg_host_state(struct net_bridge_mdb_entry *star_mp,
+ struct net_bridge_port_group *sg)
+{
+ struct net_bridge_mdb_entry *sg_mp;
+
+ if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
+ return;
+ if (!star_mp->host_joined)
+ return;
+
+ sg_mp = br_mdb_ip_get(star_mp->br, &sg->key.addr);
+ if (!sg_mp)
+ return;
+ sg_mp->host_joined = true;
+}
+
+/* set the host_joined state of all of *,G's S,G entries */
+static void br_multicast_star_g_host_state(struct net_bridge_mdb_entry *star_mp)
+{
+ struct net_bridge *br = star_mp->br;
+ struct net_bridge_mdb_entry *sg_mp;
+ struct net_bridge_port_group *pg;
+ struct br_ip sg_ip;
+
+ if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
+ return;
+
+ memset(&sg_ip, 0, sizeof(sg_ip));
+ sg_ip = star_mp->addr;
+ for (pg = mlock_dereference(star_mp->ports, br);
+ pg;
+ pg = mlock_dereference(pg->next, br)) {
+ struct net_bridge_group_src *src_ent;
+
+ hlist_for_each_entry(src_ent, &pg->src_list, node) {
+ if (!(src_ent->flags & BR_SGRP_F_INSTALLED))
+ continue;
+ sg_ip.src = src_ent->addr.src;
+ sg_mp = br_mdb_ip_get(br, &sg_ip);
+ if (!sg_mp)
+ continue;
+ sg_mp->host_joined = star_mp->host_joined;
+ }
+ }
+}
+
+static void br_multicast_sg_del_exclude_ports(struct net_bridge_mdb_entry *sgmp)
+{
+ struct net_bridge_port_group __rcu **pp;
+ struct net_bridge_port_group *p;
+
+ /* *,G exclude ports are only added to S,G entries */
+ if (WARN_ON(br_multicast_is_star_g(&sgmp->addr)))
+ return;
+
+ /* we need the STAR_EXCLUDE ports if there are non-STAR_EXCLUDE ports
+ * we should ignore perm entries since they're managed by user-space
+ */
+ for (pp = &sgmp->ports;
+ (p = mlock_dereference(*pp, sgmp->br)) != NULL;
+ pp = &p->next)
+ if (!(p->flags & (MDB_PG_FLAGS_STAR_EXCL |
+ MDB_PG_FLAGS_PERMANENT)))
+ return;
+
+ /* currently the host can only have joined the *,G which means
+ * we treat it as EXCLUDE {}, so for an S,G it's considered a
+ * STAR_EXCLUDE entry and we can safely leave it
+ */
+ sgmp->host_joined = false;
+
+ for (pp = &sgmp->ports;
+ (p = mlock_dereference(*pp, sgmp->br)) != NULL;) {
+ if (!(p->flags & MDB_PG_FLAGS_PERMANENT))
+ br_multicast_del_pg(sgmp, p, pp);
+ else
+ pp = &p->next;
+ }
+}
+
+void br_multicast_sg_add_exclude_ports(struct net_bridge_mdb_entry *star_mp,
+ struct net_bridge_port_group *sg)
+{
+ struct net_bridge_port_group_sg_key sg_key;
+ struct net_bridge *br = star_mp->br;
+ struct net_bridge_port_group *pg;
+
+ if (WARN_ON(br_multicast_is_star_g(&sg->key.addr)))
+ return;
+ if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
+ return;
+
+ br_multicast_sg_host_state(star_mp, sg);
+ memset(&sg_key, 0, sizeof(sg_key));
+ sg_key.addr = sg->key.addr;
+ /* we need to add all exclude ports to the S,G */
+ for (pg = mlock_dereference(star_mp->ports, br);
+ pg;
+ pg = mlock_dereference(pg->next, br)) {
+ struct net_bridge_port_group *src_pg;
+
+ if (pg == sg || pg->filter_mode == MCAST_INCLUDE)
+ continue;
+
+ sg_key.port = pg->key.port;
+ if (br_sg_port_find(br, &sg_key))
+ continue;
+
+ src_pg = __br_multicast_add_group(br, pg->key.port,
+ &sg->key.addr,
+ sg->eth_addr,
+ MCAST_INCLUDE, false, false);
+ if (IS_ERR_OR_NULL(src_pg) ||
+ src_pg->rt_protocol != RTPROT_KERNEL)
+ continue;
+ src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL;
+ }
+}
+
+static void br_multicast_fwd_src_add(struct net_bridge_group_src *src)
+{
+ struct net_bridge_mdb_entry *star_mp;
+ struct net_bridge_port_group *sg;
+ struct br_ip sg_ip;
+
+ if (src->flags & BR_SGRP_F_INSTALLED)
+ return;
+
+ memset(&sg_ip, 0, sizeof(sg_ip));
+ sg_ip = src->pg->key.addr;
+ sg_ip.src = src->addr.src;
+ sg = __br_multicast_add_group(src->br, src->pg->key.port, &sg_ip,
+ src->pg->eth_addr, MCAST_INCLUDE, false,
+ !timer_pending(&src->timer));
+ if (IS_ERR_OR_NULL(sg))
+ return;
+ src->flags |= BR_SGRP_F_INSTALLED;
+ sg->flags &= ~MDB_PG_FLAGS_STAR_EXCL;
+
+ /* if it was added by user-space as perm we can skip next steps */
+ if (sg->rt_protocol != RTPROT_KERNEL &&
+ (sg->flags & MDB_PG_FLAGS_PERMANENT))
+ return;
+
+ /* the kernel is now responsible for removing this S,G */
+ del_timer(&sg->timer);
+ star_mp = br_mdb_ip_get(src->br, &src->pg->key.addr);
+ if (!star_mp)
+ return;
+
+ br_multicast_sg_add_exclude_ports(star_mp, sg);
+}
+
+static void br_multicast_fwd_src_remove(struct net_bridge_group_src *src)
+{
+ struct net_bridge_port_group *p, *pg = src->pg;
+ struct net_bridge_port_group __rcu **pp;
+ struct net_bridge_mdb_entry *mp;
+ struct br_ip sg_ip;
+
+ memset(&sg_ip, 0, sizeof(sg_ip));
+ sg_ip = pg->key.addr;
+ sg_ip.src = src->addr.src;
+
+ mp = br_mdb_ip_get(src->br, &sg_ip);
+ if (!mp)
+ return;
+
+ for (pp = &mp->ports;
+ (p = mlock_dereference(*pp, src->br)) != NULL;
+ pp = &p->next) {
+ if (!br_port_group_equal(p, pg->key.port, pg->eth_addr))
+ continue;
+
+ if (p->rt_protocol != RTPROT_KERNEL &&
+ (p->flags & MDB_PG_FLAGS_PERMANENT))
+ break;
+
+ br_multicast_del_pg(mp, p, pp);
+ break;
+ }
+ src->flags &= ~BR_SGRP_F_INSTALLED;
+}
+
+/* install S,G and based on src's timer enable or disable forwarding */
+static void br_multicast_fwd_src_handle(struct net_bridge_group_src *src)
+{
+ struct net_bridge_port_group_sg_key sg_key;
+ struct net_bridge_port_group *sg;
+ u8 old_flags;
+
+ br_multicast_fwd_src_add(src);
+
+ memset(&sg_key, 0, sizeof(sg_key));
+ sg_key.addr = src->pg->key.addr;
+ sg_key.addr.src = src->addr.src;
+ sg_key.port = src->pg->key.port;
+
+ sg = br_sg_port_find(src->br, &sg_key);
+ if (!sg || (sg->flags & MDB_PG_FLAGS_PERMANENT))
+ return;
+
+ old_flags = sg->flags;
+ if (timer_pending(&src->timer))
+ sg->flags &= ~MDB_PG_FLAGS_BLOCKED;
+ else
+ sg->flags |= MDB_PG_FLAGS_BLOCKED;
+
+ if (old_flags != sg->flags) {
+ struct net_bridge_mdb_entry *sg_mp;
+
+ sg_mp = br_mdb_ip_get(src->br, &sg_key.addr);
+ if (!sg_mp)
+ return;
+ br_mdb_notify(src->br->dev, sg_mp, sg, RTM_NEWMDB);
+ }
+}
+
+static void br_multicast_destroy_mdb_entry(struct net_bridge_mcast_gc *gc)
+{
+ struct net_bridge_mdb_entry *mp;
+
+ mp = container_of(gc, struct net_bridge_mdb_entry, mcast_gc);
+ WARN_ON(!hlist_unhashed(&mp->mdb_node));
+ WARN_ON(mp->ports);
+
+ del_timer_sync(&mp->timer);
+ kfree_rcu(mp, rcu);
+}
+
+static void br_multicast_del_mdb_entry(struct net_bridge_mdb_entry *mp)
+{
+ struct net_bridge *br = mp->br;
+
+ rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
+ br_mdb_rht_params);
+ hlist_del_init_rcu(&mp->mdb_node);
+ hlist_add_head(&mp->mcast_gc.gc_node, &br->mcast_gc_list);
+ queue_work(system_long_wq, &br->mcast_gc_work);
+}
+
static void br_multicast_group_expired(struct timer_list *t)
{
struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer);
struct net_bridge *br = mp->br;
spin_lock(&br->multicast_lock);
- if (!netif_running(br->dev) || timer_pending(&mp->timer))
+ if (hlist_unhashed(&mp->mdb_node) || !netif_running(br->dev) ||
+ timer_pending(&mp->timer))
goto out;
br_multicast_host_leave(mp, true);
if (mp->ports)
goto out;
+ br_multicast_del_mdb_entry(mp);
+out:
+ spin_unlock(&br->multicast_lock);
+}
- rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
- br_mdb_rht_params);
- hlist_del_rcu(&mp->mdb_node);
+static void br_multicast_destroy_group_src(struct net_bridge_mcast_gc *gc)
+{
+ struct net_bridge_group_src *src;
- kfree_rcu(mp, rcu);
+ src = container_of(gc, struct net_bridge_group_src, mcast_gc);
+ WARN_ON(!hlist_unhashed(&src->node));
-out:
- spin_unlock(&br->multicast_lock);
+ del_timer_sync(&src->timer);
+ kfree_rcu(src, rcu);
+}
+
+static void br_multicast_del_group_src(struct net_bridge_group_src *src)
+{
+ struct net_bridge *br = src->pg->key.port->br;
+
+ br_multicast_fwd_src_remove(src);
+ hlist_del_init_rcu(&src->node);
+ src->pg->src_ents--;
+ hlist_add_head(&src->mcast_gc.gc_node, &br->mcast_gc_list);
+ queue_work(system_long_wq, &br->mcast_gc_work);
+}
+
+static void br_multicast_destroy_port_group(struct net_bridge_mcast_gc *gc)
+{
+ struct net_bridge_port_group *pg;
+
+ pg = container_of(gc, struct net_bridge_port_group, mcast_gc);
+ WARN_ON(!hlist_unhashed(&pg->mglist));
+ WARN_ON(!hlist_empty(&pg->src_list));
+
+ del_timer_sync(&pg->rexmit_timer);
+ del_timer_sync(&pg->timer);
+ kfree_rcu(pg, rcu);
+}
+
+void br_multicast_del_pg(struct net_bridge_mdb_entry *mp,
+ struct net_bridge_port_group *pg,
+ struct net_bridge_port_group __rcu **pp)
+{
+ struct net_bridge *br = pg->key.port->br;
+ struct net_bridge_group_src *ent;
+ struct hlist_node *tmp;
+
+ rcu_assign_pointer(*pp, pg->next);
+ hlist_del_init(&pg->mglist);
+ hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
+ br_multicast_del_group_src(ent);
+ br_mdb_notify(br->dev, mp, pg, RTM_DELMDB);
+ if (!br_multicast_is_star_g(&mp->addr)) {
+ rhashtable_remove_fast(&br->sg_port_tbl, &pg->rhnode,
+ br_sg_port_rht_params);
+ br_multicast_sg_del_exclude_ports(mp);
+ } else {
+ br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);
+ }
+ hlist_add_head(&pg->mcast_gc.gc_node, &br->mcast_gc_list);
+ queue_work(system_long_wq, &br->mcast_gc_work);
+
+ if (!mp->ports && !mp->host_joined && netif_running(br->dev))
+ mod_timer(&mp->timer, jiffies);
}
-static void br_multicast_del_pg(struct net_bridge *br,
- struct net_bridge_port_group *pg)
+static void br_multicast_find_del_pg(struct net_bridge *br,
+ struct net_bridge_port_group *pg)
{
+ struct net_bridge_port_group __rcu **pp;
struct net_bridge_mdb_entry *mp;
struct net_bridge_port_group *p;
- struct net_bridge_port_group __rcu **pp;
- mp = br_mdb_ip_get(br, &pg->addr);
+ mp = br_mdb_ip_get(br, &pg->key.addr);
if (WARN_ON(!mp))
return;
@@ -180,17 +626,7 @@ static void br_multicast_del_pg(struct net_bridge *br,
if (p != pg)
continue;
- rcu_assign_pointer(*pp, p->next);
- hlist_del_init(&p->mglist);
- del_timer(&p->timer);
- br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB,
- p->flags);
- kfree_rcu(p, rcu);
-
- if (!mp->ports && !mp->host_joined &&
- netif_running(br->dev))
- mod_timer(&mp->timer, jiffies);
-
+ br_multicast_del_pg(mp, pg, pp);
return;
}
@@ -200,35 +636,98 @@ static void br_multicast_del_pg(struct net_bridge *br,
static void br_multicast_port_group_expired(struct timer_list *t)
{
struct net_bridge_port_group *pg = from_timer(pg, t, timer);
- struct net_bridge *br = pg->port->br;
+ struct net_bridge_group_src *src_ent;
+ struct net_bridge *br = pg->key.port->br;
+ struct hlist_node *tmp;
+ bool changed;
spin_lock(&br->multicast_lock);
if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT)
goto out;
- br_multicast_del_pg(br, pg);
+ changed = !!(pg->filter_mode == MCAST_EXCLUDE);
+ pg->filter_mode = MCAST_INCLUDE;
+ hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) {
+ if (!timer_pending(&src_ent->timer)) {
+ br_multicast_del_group_src(src_ent);
+ changed = true;
+ }
+ }
+
+ if (hlist_empty(&pg->src_list)) {
+ br_multicast_find_del_pg(br, pg);
+ } else if (changed) {
+ struct net_bridge_mdb_entry *mp = br_mdb_ip_get(br, &pg->key.addr);
+ if (changed && br_multicast_is_star_g(&pg->key.addr))
+ br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);
+
+ if (WARN_ON(!mp))
+ goto out;
+ br_mdb_notify(br->dev, mp, pg, RTM_NEWMDB);
+ }
out:
spin_unlock(&br->multicast_lock);
}
-static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
- __be32 group,
- u8 *igmp_type)
+static void br_multicast_gc(struct hlist_head *head)
{
+ struct net_bridge_mcast_gc *gcent;
+ struct hlist_node *tmp;
+
+ hlist_for_each_entry_safe(gcent, tmp, head, gc_node) {
+ hlist_del_init(&gcent->gc_node);
+ gcent->destroy(gcent);
+ }
+}
+
+static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
+ struct net_bridge_port_group *pg,
+ __be32 ip_dst, __be32 group,
+ bool with_srcs, bool over_lmqt,
+ u8 sflag, u8 *igmp_type,
+ bool *need_rexmit)
+{
+ struct net_bridge_port *p = pg ? pg->key.port : NULL;
+ struct net_bridge_group_src *ent;
+ size_t pkt_size, igmp_hdr_size;
+ unsigned long now = jiffies;
struct igmpv3_query *ihv3;
- size_t igmp_hdr_size;
+ void *csum_start = NULL;
+ __sum16 *csum = NULL;
struct sk_buff *skb;
struct igmphdr *ih;
struct ethhdr *eth;
+ unsigned long lmqt;
struct iphdr *iph;
+ u16 lmqt_srcs = 0;
igmp_hdr_size = sizeof(*ih);
- if (br->multicast_igmp_version == 3)
+ if (br->multicast_igmp_version == 3) {
igmp_hdr_size = sizeof(*ihv3);
- skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) +
- igmp_hdr_size + 4);
+ if (pg && with_srcs) {
+ lmqt = now + (br->multicast_last_member_interval *
+ br->multicast_last_member_count);
+ hlist_for_each_entry(ent, &pg->src_list, node) {
+ if (over_lmqt == time_after(ent->timer.expires,
+ lmqt) &&
+ ent->src_query_rexmit_cnt > 0)
+ lmqt_srcs++;
+ }
+
+ if (!lmqt_srcs)
+ return NULL;
+ igmp_hdr_size += lmqt_srcs * sizeof(__be32);
+ }
+ }
+
+ pkt_size = sizeof(*eth) + sizeof(*iph) + 4 + igmp_hdr_size;
+ if ((p && pkt_size > p->dev->mtu) ||
+ pkt_size > br->dev->mtu)
+ return NULL;
+
+ skb = netdev_alloc_skb_ip_align(br->dev, pkt_size);
if (!skb)
goto out;
@@ -238,29 +737,24 @@ static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
eth = eth_hdr(skb);
ether_addr_copy(eth->h_source, br->dev->dev_addr);
- eth->h_dest[0] = 1;
- eth->h_dest[1] = 0;
- eth->h_dest[2] = 0x5e;
- eth->h_dest[3] = 0;
- eth->h_dest[4] = 0;
- eth->h_dest[5] = 1;
+ ip_eth_mc_map(ip_dst, eth->h_dest);
eth->h_proto = htons(ETH_P_IP);
skb_put(skb, sizeof(*eth));
skb_set_network_header(skb, skb->len);
iph = ip_hdr(skb);
+ iph->tot_len = htons(pkt_size - sizeof(*eth));
iph->version = 4;
iph->ihl = 6;
iph->tos = 0xc0;
- iph->tot_len = htons(sizeof(*iph) + igmp_hdr_size + 4);
iph->id = 0;
iph->frag_off = htons(IP_DF);
iph->ttl = 1;
iph->protocol = IPPROTO_IGMP;
iph->saddr = br_opt_get(br, BROPT_MULTICAST_QUERY_USE_IFADDR) ?
inet_select_addr(br->dev, 0, RT_SCOPE_LINK) : 0;
- iph->daddr = htonl(INADDR_ALLHOSTS_GROUP);
+ iph->daddr = ip_dst;
((u8 *)&iph[1])[0] = IPOPT_RA;
((u8 *)&iph[1])[1] = 4;
((u8 *)&iph[1])[2] = 0;
@@ -280,7 +774,8 @@ static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
(HZ / IGMP_TIMER_SCALE);
ih->group = group;
ih->csum = 0;
- ih->csum = ip_compute_csum((void *)ih, sizeof(*ih));
+ csum = &ih->csum;
+ csum_start = (void *)ih;
break;
case 3:
ihv3 = igmpv3_query_hdr(skb);
@@ -290,15 +785,40 @@ static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
(HZ / IGMP_TIMER_SCALE);
ihv3->group = group;
ihv3->qqic = br->multicast_query_interval / HZ;
- ihv3->nsrcs = 0;
+ ihv3->nsrcs = htons(lmqt_srcs);
ihv3->resv = 0;
- ihv3->suppress = 0;
+ ihv3->suppress = sflag;
ihv3->qrv = 2;
ihv3->csum = 0;
- ihv3->csum = ip_compute_csum((void *)ihv3, sizeof(*ihv3));
+ csum = &ihv3->csum;
+ csum_start = (void *)ihv3;
+ if (!pg || !with_srcs)
+ break;
+
+ lmqt_srcs = 0;
+ hlist_for_each_entry(ent, &pg->src_list, node) {
+ if (over_lmqt == time_after(ent->timer.expires,
+ lmqt) &&
+ ent->src_query_rexmit_cnt > 0) {
+ ihv3->srcs[lmqt_srcs++] = ent->addr.src.ip4;
+ ent->src_query_rexmit_cnt--;
+ if (need_rexmit && ent->src_query_rexmit_cnt)
+ *need_rexmit = true;
+ }
+ }
+ if (WARN_ON(lmqt_srcs != ntohs(ihv3->nsrcs))) {
+ kfree_skb(skb);
+ return NULL;
+ }
break;
}
+ if (WARN_ON(!csum || !csum_start)) {
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ *csum = ip_compute_csum(csum_start, igmp_hdr_size);
skb_put(skb, igmp_hdr_size);
__skb_pull(skb, sizeof(*eth));
@@ -308,23 +828,54 @@ out:
#if IS_ENABLED(CONFIG_IPV6)
static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
- const struct in6_addr *grp,
- u8 *igmp_type)
-{
+ struct net_bridge_port_group *pg,
+ const struct in6_addr *ip6_dst,
+ const struct in6_addr *group,
+ bool with_srcs, bool over_llqt,
+ u8 sflag, u8 *igmp_type,
+ bool *need_rexmit)
+{
+ struct net_bridge_port *p = pg ? pg->key.port : NULL;
+ struct net_bridge_group_src *ent;
+ size_t pkt_size, mld_hdr_size;
+ unsigned long now = jiffies;
struct mld2_query *mld2q;
+ void *csum_start = NULL;
unsigned long interval;
+ __sum16 *csum = NULL;
struct ipv6hdr *ip6h;
struct mld_msg *mldq;
- size_t mld_hdr_size;
struct sk_buff *skb;
+ unsigned long llqt;
struct ethhdr *eth;
+ u16 llqt_srcs = 0;
u8 *hopopt;
mld_hdr_size = sizeof(*mldq);
- if (br->multicast_mld_version == 2)
+ if (br->multicast_mld_version == 2) {
mld_hdr_size = sizeof(*mld2q);
- skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) +
- 8 + mld_hdr_size);
+ if (pg && with_srcs) {
+ llqt = now + (br->multicast_last_member_interval *
+ br->multicast_last_member_count);
+ hlist_for_each_entry(ent, &pg->src_list, node) {
+ if (over_llqt == time_after(ent->timer.expires,
+ llqt) &&
+ ent->src_query_rexmit_cnt > 0)
+ llqt_srcs++;
+ }
+
+ if (!llqt_srcs)
+ return NULL;
+ mld_hdr_size += llqt_srcs * sizeof(struct in6_addr);
+ }
+ }
+
+ pkt_size = sizeof(*eth) + sizeof(*ip6h) + 8 + mld_hdr_size;
+ if ((p && pkt_size > p->dev->mtu) ||
+ pkt_size > br->dev->mtu)
+ return NULL;
+
+ skb = netdev_alloc_skb_ip_align(br->dev, pkt_size);
if (!skb)
goto out;
@@ -346,7 +897,7 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
ip6h->payload_len = htons(8 + mld_hdr_size);
ip6h->nexthdr = IPPROTO_HOPOPTS;
ip6h->hop_limit = 1;
- ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
+ ip6h->daddr = *ip6_dst;
if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
&ip6h->saddr)) {
kfree_skb(skb);
@@ -371,7 +922,7 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
/* ICMPv6 */
skb_set_transport_header(skb, skb->len);
- interval = ipv6_addr_any(grp) ?
+ interval = ipv6_addr_any(group) ?
br->multicast_query_response_interval :
br->multicast_last_member_interval;
*igmp_type = ICMPV6_MGM_QUERY;
@@ -383,12 +934,9 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
mldq->mld_cksum = 0;
mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
mldq->mld_reserved = 0;
- mldq->mld_mca = *grp;
- mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
- sizeof(*mldq), IPPROTO_ICMPV6,
- csum_partial(mldq,
- sizeof(*mldq),
- 0));
+ mldq->mld_mca = *group;
+ csum = &mldq->mld_cksum;
+ csum_start = (void *)mldq;
break;
case 2:
mld2q = (struct mld2_query *)icmp6_hdr(skb);
@@ -398,21 +946,43 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
mld2q->mld2q_cksum = 0;
mld2q->mld2q_resv1 = 0;
mld2q->mld2q_resv2 = 0;
- mld2q->mld2q_suppress = 0;
+ mld2q->mld2q_suppress = sflag;
mld2q->mld2q_qrv = 2;
- mld2q->mld2q_nsrcs = 0;
+ mld2q->mld2q_nsrcs = htons(llqt_srcs);
mld2q->mld2q_qqic = br->multicast_query_interval / HZ;
- mld2q->mld2q_mca = *grp;
- mld2q->mld2q_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
- sizeof(*mld2q),
- IPPROTO_ICMPV6,
- csum_partial(mld2q,
- sizeof(*mld2q),
- 0));
+ mld2q->mld2q_mca = *group;
+ csum = &mld2q->mld2q_cksum;
+ csum_start = (void *)mld2q;
+ if (!pg || !with_srcs)
+ break;
+
+ llqt_srcs = 0;
+ hlist_for_each_entry(ent, &pg->src_list, node) {
+ if (over_llqt == time_after(ent->timer.expires,
+ llqt) &&
+ ent->src_query_rexmit_cnt > 0) {
+ mld2q->mld2q_srcs[llqt_srcs++] = ent->addr.src.ip6;
+ ent->src_query_rexmit_cnt--;
+ if (need_rexmit && ent->src_query_rexmit_cnt)
+ *need_rexmit = true;
+ }
+ }
+ if (WARN_ON(llqt_srcs != ntohs(mld2q->mld2q_nsrcs))) {
+ kfree_skb(skb);
+ return NULL;
+ }
break;
}
- skb_put(skb, mld_hdr_size);
+ if (WARN_ON(!csum || !csum_start)) {
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ *csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, mld_hdr_size,
+ IPPROTO_ICMPV6,
+ csum_partial(csum_start, mld_hdr_size, 0));
+ skb_put(skb, mld_hdr_size);
__skb_pull(skb, sizeof(*eth));
out:
@@ -421,16 +991,39 @@ out:
#endif
static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
- struct br_ip *addr,
- u8 *igmp_type)
+ struct net_bridge_port_group *pg,
+ struct br_ip *ip_dst,
+ struct br_ip *group,
+ bool with_srcs, bool over_lmqt,
+ u8 sflag, u8 *igmp_type,
+ bool *need_rexmit)
{
- switch (addr->proto) {
+ __be32 ip4_dst;
+
+ switch (group->proto) {
case htons(ETH_P_IP):
- return br_ip4_multicast_alloc_query(br, addr->u.ip4, igmp_type);
+ ip4_dst = ip_dst ? ip_dst->dst.ip4 : htonl(INADDR_ALLHOSTS_GROUP);
+ return br_ip4_multicast_alloc_query(br, pg,
+ ip4_dst, group->dst.ip4,
+ with_srcs, over_lmqt,
+ sflag, igmp_type,
+ need_rexmit);
#if IS_ENABLED(CONFIG_IPV6)
- case htons(ETH_P_IPV6):
- return br_ip6_multicast_alloc_query(br, &addr->u.ip6,
- igmp_type);
+ case htons(ETH_P_IPV6): {
+ struct in6_addr ip6_dst;
+
+ if (ip_dst)
+ ip6_dst = ip_dst->dst.ip6;
+ else
+ ipv6_addr_set(&ip6_dst, htonl(0xff020000), 0, 0,
+ htonl(1));
+
+ return br_ip6_multicast_alloc_query(br, pg,
+ &ip6_dst, &group->dst.ip6,
+ with_srcs, over_lmqt,
+ sflag, igmp_type,
+ need_rexmit);
+ }
#endif
}
return NULL;
@@ -457,6 +1050,7 @@ struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
mp->br = br;
mp->addr = *group;
+ mp->mcast_gc.destroy = br_multicast_destroy_mdb_entry;
timer_setup(&mp->timer, br_multicast_group_expired, 0);
err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode,
br_mdb_rht_params);
@@ -470,12 +1064,101 @@ struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
return mp;
}
+static void br_multicast_group_src_expired(struct timer_list *t)
+{
+ struct net_bridge_group_src *src = from_timer(src, t, timer);
+ struct net_bridge_port_group *pg;
+ struct net_bridge *br = src->br;
+
+ spin_lock(&br->multicast_lock);
+ if (hlist_unhashed(&src->node) || !netif_running(br->dev) ||
+ timer_pending(&src->timer))
+ goto out;
+
+ pg = src->pg;
+ if (pg->filter_mode == MCAST_INCLUDE) {
+ br_multicast_del_group_src(src);
+ if (!hlist_empty(&pg->src_list))
+ goto out;
+ br_multicast_find_del_pg(br, pg);
+ } else {
+ br_multicast_fwd_src_handle(src);
+ }
+
+out:
+ spin_unlock(&br->multicast_lock);
+}
+
+static struct net_bridge_group_src *
+br_multicast_find_group_src(struct net_bridge_port_group *pg, struct br_ip *ip)
+{
+ struct net_bridge_group_src *ent;
+
+ switch (ip->proto) {
+ case htons(ETH_P_IP):
+ hlist_for_each_entry(ent, &pg->src_list, node)
+ if (ip->src.ip4 == ent->addr.src.ip4)
+ return ent;
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case htons(ETH_P_IPV6):
+ hlist_for_each_entry(ent, &pg->src_list, node)
+ if (!ipv6_addr_cmp(&ent->addr.src.ip6, &ip->src.ip6))
+ return ent;
+ break;
+#endif
+ }
+
+ return NULL;
+}
+
+static struct net_bridge_group_src *
+br_multicast_new_group_src(struct net_bridge_port_group *pg, struct br_ip *src_ip)
+{
+ struct net_bridge_group_src *grp_src;
+
+ if (unlikely(pg->src_ents >= PG_SRC_ENT_LIMIT))
+ return NULL;
+
+ switch (src_ip->proto) {
+ case htons(ETH_P_IP):
+ if (ipv4_is_zeronet(src_ip->src.ip4) ||
+ ipv4_is_multicast(src_ip->src.ip4))
+ return NULL;
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case htons(ETH_P_IPV6):
+ if (ipv6_addr_any(&src_ip->src.ip6) ||
+ ipv6_addr_is_multicast(&src_ip->src.ip6))
+ return NULL;
+ break;
+#endif
+ }
+
+ grp_src = kzalloc(sizeof(*grp_src), GFP_ATOMIC);
+ if (unlikely(!grp_src))
+ return NULL;
+
+ grp_src->pg = pg;
+ grp_src->br = pg->key.port->br;
+ grp_src->addr = *src_ip;
+ grp_src->mcast_gc.destroy = br_multicast_destroy_group_src;
+ timer_setup(&grp_src->timer, br_multicast_group_src_expired, 0);
+
+ hlist_add_head_rcu(&grp_src->node, &pg->src_list);
+ pg->src_ents++;
+
+ return grp_src;
+}
+
struct net_bridge_port_group *br_multicast_new_port_group(
struct net_bridge_port *port,
struct br_ip *group,
struct net_bridge_port_group __rcu *next,
unsigned char flags,
- const unsigned char *src)
+ const unsigned char *src,
+ u8 filter_mode,
+ u8 rt_protocol)
{
struct net_bridge_port_group *p;
@@ -483,12 +1166,25 @@ struct net_bridge_port_group *br_multicast_new_port_group(
if (unlikely(!p))
return NULL;
- p->addr = *group;
- p->port = port;
+ p->key.addr = *group;
+ p->key.port = port;
p->flags = flags;
+ p->filter_mode = filter_mode;
+ p->rt_protocol = rt_protocol;
+ p->mcast_gc.destroy = br_multicast_destroy_port_group;
+ INIT_HLIST_HEAD(&p->src_list);
+
+ if (!br_multicast_is_star_g(group) &&
+ rhashtable_lookup_insert_fast(&port->br->sg_port_tbl, &p->rhnode,
+ br_sg_port_rht_params)) {
+ kfree(p);
+ return NULL;
+ }
+
rcu_assign_pointer(p->next, next);
- hlist_add_head(&p->mglist, &port->mglist);
timer_setup(&p->timer, br_multicast_port_group_expired, 0);
+ timer_setup(&p->rexmit_timer, br_multicast_port_group_rexmit, 0);
+ hlist_add_head(&p->mglist, &port->mglist);
if (src)
memcpy(p->eth_addr, src, ETH_ALEN);
@@ -498,26 +1194,14 @@ struct net_bridge_port_group *br_multicast_new_port_group(
return p;
}
-static bool br_port_group_equal(struct net_bridge_port_group *p,
- struct net_bridge_port *port,
- const unsigned char *src)
-{
- if (p->port != port)
- return false;
-
- if (!(port->flags & BR_MULTICAST_TO_UNICAST))
- return true;
-
- return ether_addr_equal(src, p->eth_addr);
-}
-
void br_multicast_host_join(struct net_bridge_mdb_entry *mp, bool notify)
{
if (!mp->host_joined) {
mp->host_joined = true;
+ if (br_multicast_is_star_g(&mp->addr))
+ br_multicast_star_g_host_state(mp);
if (notify)
- br_mdb_notify(mp->br->dev, NULL, &mp->addr,
- RTM_NEWMDB, 0);
+ br_mdb_notify(mp->br->dev, mp, NULL, RTM_NEWMDB);
}
mod_timer(&mp->timer, jiffies + mp->br->multicast_membership_interval);
}
@@ -528,30 +1212,33 @@ void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify)
return;
mp->host_joined = false;
+ if (br_multicast_is_star_g(&mp->addr))
+ br_multicast_star_g_host_state(mp);
if (notify)
- br_mdb_notify(mp->br->dev, NULL, &mp->addr, RTM_DELMDB, 0);
+ br_mdb_notify(mp->br->dev, mp, NULL, RTM_DELMDB);
}
-static int br_multicast_add_group(struct net_bridge *br,
- struct net_bridge_port *port,
- struct br_ip *group,
- const unsigned char *src)
+static struct net_bridge_port_group *
+__br_multicast_add_group(struct net_bridge *br,
+ struct net_bridge_port *port,
+ struct br_ip *group,
+ const unsigned char *src,
+ u8 filter_mode,
+ bool igmpv2_mldv1,
+ bool blocked)
{
struct net_bridge_port_group __rcu **pp;
- struct net_bridge_port_group *p;
+ struct net_bridge_port_group *p = NULL;
struct net_bridge_mdb_entry *mp;
unsigned long now = jiffies;
- int err;
- spin_lock(&br->multicast_lock);
if (!netif_running(br->dev) ||
(port && port->state == BR_STATE_DISABLED))
goto out;
mp = br_multicast_new_group(br, group);
- err = PTR_ERR(mp);
if (IS_ERR(mp))
- goto err;
+ return ERR_PTR(PTR_ERR(mp));
if (!port) {
br_multicast_host_join(mp, true);
@@ -563,23 +1250,46 @@ static int br_multicast_add_group(struct net_bridge *br,
pp = &p->next) {
if (br_port_group_equal(p, port, src))
goto found;
- if ((unsigned long)p->port < (unsigned long)port)
+ if ((unsigned long)p->key.port < (unsigned long)port)
break;
}
- p = br_multicast_new_port_group(port, group, *pp, 0, src);
- if (unlikely(!p))
- goto err;
+ p = br_multicast_new_port_group(port, group, *pp, 0, src, filter_mode,
+ RTPROT_KERNEL);
+ if (unlikely(!p)) {
+ p = ERR_PTR(-ENOMEM);
+ goto out;
+ }
rcu_assign_pointer(*pp, p);
- br_mdb_notify(br->dev, port, group, RTM_NEWMDB, 0);
+ if (blocked)
+ p->flags |= MDB_PG_FLAGS_BLOCKED;
+ br_mdb_notify(br->dev, mp, p, RTM_NEWMDB);
found:
- mod_timer(&p->timer, now + br->multicast_membership_interval);
+ if (igmpv2_mldv1)
+ mod_timer(&p->timer, now + br->multicast_membership_interval);
+
out:
- err = 0;
+ return p;
+}
+
+static int br_multicast_add_group(struct net_bridge *br,
+ struct net_bridge_port *port,
+ struct br_ip *group,
+ const unsigned char *src,
+ u8 filter_mode,
+ bool igmpv2_mldv1)
+{
+ struct net_bridge_port_group *pg;
+ int err;
-err:
+ spin_lock(&br->multicast_lock);
+ pg = __br_multicast_add_group(br, port, group, src, filter_mode,
+ igmpv2_mldv1, false);
+ /* NULL is considered valid for host joined groups */
+ err = IS_ERR(pg) ? PTR_ERR(pg) : 0;
spin_unlock(&br->multicast_lock);
+
return err;
}
@@ -587,19 +1297,23 @@ static int br_ip4_multicast_add_group(struct net_bridge *br,
struct net_bridge_port *port,
__be32 group,
__u16 vid,
- const unsigned char *src)
+ const unsigned char *src,
+ bool igmpv2)
{
struct br_ip br_group;
+ u8 filter_mode;
if (ipv4_is_local_multicast(group))
return 0;
memset(&br_group, 0, sizeof(br_group));
- br_group.u.ip4 = group;
+ br_group.dst.ip4 = group;
br_group.proto = htons(ETH_P_IP);
br_group.vid = vid;
+ filter_mode = igmpv2 ? MCAST_EXCLUDE : MCAST_INCLUDE;
- return br_multicast_add_group(br, port, &br_group, src);
+ return br_multicast_add_group(br, port, &br_group, src, filter_mode,
+ igmpv2);
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -607,19 +1321,23 @@ static int br_ip6_multicast_add_group(struct net_bridge *br,
struct net_bridge_port *port,
const struct in6_addr *group,
__u16 vid,
- const unsigned char *src)
+ const unsigned char *src,
+ bool mldv1)
{
struct br_ip br_group;
+ u8 filter_mode;
if (ipv6_addr_is_ll_all_nodes(group))
return 0;
memset(&br_group, 0, sizeof(br_group));
- br_group.u.ip6 = *group;
+ br_group.dst.ip6 = *group;
br_group.proto = htons(ETH_P_IPV6);
br_group.vid = vid;
+ filter_mode = mldv1 ? MCAST_EXCLUDE : MCAST_INCLUDE;
- return br_multicast_add_group(br, port, &br_group, src);
+ return br_multicast_add_group(br, port, &br_group, src, filter_mode,
+ mldv1);
}
#endif
@@ -702,21 +1420,30 @@ static void br_multicast_select_own_querier(struct net_bridge *br,
struct sk_buff *skb)
{
if (ip->proto == htons(ETH_P_IP))
- br->ip4_querier.addr.u.ip4 = ip_hdr(skb)->saddr;
+ br->ip4_querier.addr.src.ip4 = ip_hdr(skb)->saddr;
#if IS_ENABLED(CONFIG_IPV6)
else
- br->ip6_querier.addr.u.ip6 = ipv6_hdr(skb)->saddr;
+ br->ip6_querier.addr.src.ip6 = ipv6_hdr(skb)->saddr;
#endif
}
static void __br_multicast_send_query(struct net_bridge *br,
struct net_bridge_port *port,
- struct br_ip *ip)
-{
+ struct net_bridge_port_group *pg,
+ struct br_ip *ip_dst,
+ struct br_ip *group,
+ bool with_srcs,
+ u8 sflag,
+ bool *need_rexmit)
+{
+ bool over_lmqt = !!sflag;
struct sk_buff *skb;
u8 igmp_type;
- skb = br_multicast_alloc_query(br, ip, &igmp_type);
+again_under_lmqt:
+ skb = br_multicast_alloc_query(br, pg, ip_dst, group, with_srcs,
+ over_lmqt, sflag, &igmp_type,
+ need_rexmit);
if (!skb)
return;
@@ -727,8 +1454,13 @@ static void __br_multicast_send_query(struct net_bridge *br,
NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
dev_net(port->dev), NULL, skb, NULL, skb->dev,
br_dev_queue_push_xmit);
+
+ if (over_lmqt && with_srcs && sflag) {
+ over_lmqt = false;
+ goto again_under_lmqt;
+ }
} else {
- br_multicast_select_own_querier(br, ip, skb);
+ br_multicast_select_own_querier(br, group, skb);
br_multicast_count(br, port, skb, igmp_type,
BR_MCAST_DIR_RX);
netif_rx(skb);
@@ -748,7 +1480,7 @@ static void br_multicast_send_query(struct net_bridge *br,
!br_opt_get(br, BROPT_MULTICAST_QUERIER))
return;
- memset(&br_group.u, 0, sizeof(br_group.u));
+ memset(&br_group.dst, 0, sizeof(br_group.dst));
if (port ? (own_query == &port->ip4_own_query) :
(own_query == &br->ip4_own_query)) {
@@ -764,7 +1496,8 @@ static void br_multicast_send_query(struct net_bridge *br,
if (!other_query || timer_pending(&other_query->timer))
return;
- __br_multicast_send_query(br, port, &br_group);
+ __br_multicast_send_query(br, port, NULL, NULL, &br_group, false, 0,
+ NULL);
time = jiffies;
time += own_query->startup_sent < br->multicast_startup_query_count ?
@@ -809,6 +1542,44 @@ static void br_ip6_multicast_port_query_expired(struct timer_list *t)
}
#endif
+static void br_multicast_port_group_rexmit(struct timer_list *t)
+{
+ struct net_bridge_port_group *pg = from_timer(pg, t, rexmit_timer);
+ struct bridge_mcast_other_query *other_query = NULL;
+ struct net_bridge *br = pg->key.port->br;
+ bool need_rexmit = false;
+
+ spin_lock(&br->multicast_lock);
+ if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) ||
+ !br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
+ !br_opt_get(br, BROPT_MULTICAST_QUERIER))
+ goto out;
+
+ if (pg->key.addr.proto == htons(ETH_P_IP))
+ other_query = &br->ip4_other_query;
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ other_query = &br->ip6_other_query;
+#endif
+
+ if (!other_query || timer_pending(&other_query->timer))
+ goto out;
+
+ if (pg->grp_query_rexmit_cnt) {
+ pg->grp_query_rexmit_cnt--;
+ __br_multicast_send_query(br, pg->key.port, pg, &pg->key.addr,
+ &pg->key.addr, false, 1, NULL);
+ }
+ __br_multicast_send_query(br, pg->key.port, pg, &pg->key.addr,
+ &pg->key.addr, true, 0, &need_rexmit);
+
+ if (pg->grp_query_rexmit_cnt || need_rexmit)
+ mod_timer(&pg->rexmit_timer, jiffies +
+ br->multicast_last_member_interval);
+out:
+ spin_unlock(&br->multicast_lock);
+}
+
static void br_mc_disabled_update(struct net_device *dev, bool value)
{
struct switchdev_attr attr = {
@@ -847,13 +1618,16 @@ void br_multicast_del_port(struct net_bridge_port *port)
{
struct net_bridge *br = port->br;
struct net_bridge_port_group *pg;
+ HLIST_HEAD(deleted_head);
struct hlist_node *n;
/* Take care of the remaining groups, only perm ones should be left */
spin_lock_bh(&br->multicast_lock);
hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
- br_multicast_del_pg(br, pg);
+ br_multicast_find_del_pg(br, pg);
+ hlist_move_list(&br->mcast_gc_list, &deleted_head);
spin_unlock_bh(&br->multicast_lock);
+ br_multicast_gc(&deleted_head);
del_timer_sync(&port->multicast_router_timer);
free_percpu(port->mcast_stats);
}
@@ -901,7 +1675,7 @@ void br_multicast_disable_port(struct net_bridge_port *port)
spin_lock(&br->multicast_lock);
hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
if (!(pg->flags & MDB_PG_FLAGS_PERMANENT))
- br_multicast_del_pg(br, pg);
+ br_multicast_find_del_pg(br, pg);
__del_port_router(port);
@@ -913,20 +1687,574 @@ void br_multicast_disable_port(struct net_bridge_port *port)
spin_unlock(&br->multicast_lock);
}
+static int __grp_src_delete_marked(struct net_bridge_port_group *pg)
+{
+ struct net_bridge_group_src *ent;
+ struct hlist_node *tmp;
+ int deleted = 0;
+
+ hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
+ if (ent->flags & BR_SGRP_F_DELETE) {
+ br_multicast_del_group_src(ent);
+ deleted++;
+ }
+
+ return deleted;
+}
+
+static void __grp_src_mod_timer(struct net_bridge_group_src *src,
+ unsigned long expires)
+{
+ mod_timer(&src->timer, expires);
+ br_multicast_fwd_src_handle(src);
+}
+
+static void __grp_src_query_marked_and_rexmit(struct net_bridge_port_group *pg)
+{
+ struct bridge_mcast_other_query *other_query = NULL;
+ struct net_bridge *br = pg->key.port->br;
+ u32 lmqc = br->multicast_last_member_count;
+ unsigned long lmqt, lmi, now = jiffies;
+ struct net_bridge_group_src *ent;
+
+ if (!netif_running(br->dev) ||
+ !br_opt_get(br, BROPT_MULTICAST_ENABLED))
+ return;
+
+ if (pg->key.addr.proto == htons(ETH_P_IP))
+ other_query = &br->ip4_other_query;
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ other_query = &br->ip6_other_query;
+#endif
+
+ lmqt = now + br_multicast_lmqt(br);
+ hlist_for_each_entry(ent, &pg->src_list, node) {
+ if (ent->flags & BR_SGRP_F_SEND) {
+ ent->flags &= ~BR_SGRP_F_SEND;
+ if (ent->timer.expires > lmqt) {
+ if (br_opt_get(br, BROPT_MULTICAST_QUERIER) &&
+ other_query &&
+ !timer_pending(&other_query->timer))
+ ent->src_query_rexmit_cnt = lmqc;
+ __grp_src_mod_timer(ent, lmqt);
+ }
+ }
+ }
+
+ if (!br_opt_get(br, BROPT_MULTICAST_QUERIER) ||
+ !other_query || timer_pending(&other_query->timer))
+ return;
+
+ __br_multicast_send_query(br, pg->key.port, pg, &pg->key.addr,
+ &pg->key.addr, true, 1, NULL);
+
+ lmi = now + br->multicast_last_member_interval;
+ if (!timer_pending(&pg->rexmit_timer) ||
+ time_after(pg->rexmit_timer.expires, lmi))
+ mod_timer(&pg->rexmit_timer, lmi);
+}
+
+static void __grp_send_query_and_rexmit(struct net_bridge_port_group *pg)
+{
+ struct bridge_mcast_other_query *other_query = NULL;
+ struct net_bridge *br = pg->key.port->br;
+ unsigned long now = jiffies, lmi;
+
+ if (!netif_running(br->dev) ||
+ !br_opt_get(br, BROPT_MULTICAST_ENABLED))
+ return;
+
+ if (pg->key.addr.proto == htons(ETH_P_IP))
+ other_query = &br->ip4_other_query;
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ other_query = &br->ip6_other_query;
+#endif
+
+ if (br_opt_get(br, BROPT_MULTICAST_QUERIER) &&
+ other_query && !timer_pending(&other_query->timer)) {
+ lmi = now + br->multicast_last_member_interval;
+ pg->grp_query_rexmit_cnt = br->multicast_last_member_count - 1;
+ __br_multicast_send_query(br, pg->key.port, pg, &pg->key.addr,
+ &pg->key.addr, false, 0, NULL);
+ if (!timer_pending(&pg->rexmit_timer) ||
+ time_after(pg->rexmit_timer.expires, lmi))
+ mod_timer(&pg->rexmit_timer, lmi);
+ }
+
+ if (pg->filter_mode == MCAST_EXCLUDE &&
+ (!timer_pending(&pg->timer) ||
+ time_after(pg->timer.expires, now + br_multicast_lmqt(br))))
+ mod_timer(&pg->timer, now + br_multicast_lmqt(br));
+}
+
+/* State Msg type New state Actions
+ * INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI
+ * INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI
+ * EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI
+ */
+static bool br_multicast_isinc_allow(struct net_bridge_port_group *pg,
+ void *srcs, u32 nsrcs, size_t src_size)
+{
+ struct net_bridge *br = pg->key.port->br;
+ struct net_bridge_group_src *ent;
+ unsigned long now = jiffies;
+ bool changed = false;
+ struct br_ip src_ip;
+ u32 src_idx;
+
+ memset(&src_ip, 0, sizeof(src_ip));
+ src_ip.proto = pg->key.addr.proto;
+ for (src_idx = 0; src_idx < nsrcs; src_idx++) {
+ memcpy(&src_ip.src, srcs, src_size);
+ ent = br_multicast_find_group_src(pg, &src_ip);
+ if (!ent) {
+ ent = br_multicast_new_group_src(pg, &src_ip);
+ if (ent)
+ changed = true;
+ }
+
+ if (ent)
+ __grp_src_mod_timer(ent, now + br_multicast_gmi(br));
+ srcs += src_size;
+ }
+
+ return changed;
+}
+
+/* State Msg type New state Actions
+ * INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
+ * Delete (A-B)
+ * Group Timer=GMI
+ */
+static void __grp_src_isexc_incl(struct net_bridge_port_group *pg,
+ void *srcs, u32 nsrcs, size_t src_size)
+{
+ struct net_bridge_group_src *ent;
+ struct br_ip src_ip;
+ u32 src_idx;
+
+ hlist_for_each_entry(ent, &pg->src_list, node)
+ ent->flags |= BR_SGRP_F_DELETE;
+
+ memset(&src_ip, 0, sizeof(src_ip));
+ src_ip.proto = pg->key.addr.proto;
+ for (src_idx = 0; src_idx < nsrcs; src_idx++) {
+ memcpy(&src_ip.src, srcs, src_size);
+ ent = br_multicast_find_group_src(pg, &src_ip);
+ if (ent)
+ ent->flags &= ~BR_SGRP_F_DELETE;
+ else
+ ent = br_multicast_new_group_src(pg, &src_ip);
+ if (ent)
+ br_multicast_fwd_src_handle(ent);
+ srcs += src_size;
+ }
+
+ __grp_src_delete_marked(pg);
+}
+
+/* State Msg type New state Actions
+ * EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI
+ * Delete (X-A)
+ * Delete (Y-A)
+ * Group Timer=GMI
+ */
+static bool __grp_src_isexc_excl(struct net_bridge_port_group *pg,
+ void *srcs, u32 nsrcs, size_t src_size)
+{
+ struct net_bridge *br = pg->key.port->br;
+ struct net_bridge_group_src *ent;
+ unsigned long now = jiffies;
+ bool changed = false;
+ struct br_ip src_ip;
+ u32 src_idx;
+
+ hlist_for_each_entry(ent, &pg->src_list, node)
+ ent->flags |= BR_SGRP_F_DELETE;
+
+ memset(&src_ip, 0, sizeof(src_ip));
+ src_ip.proto = pg->key.addr.proto;
+ for (src_idx = 0; src_idx < nsrcs; src_idx++) {
+ memcpy(&src_ip.src, srcs, src_size);
+ ent = br_multicast_find_group_src(pg, &src_ip);
+ if (ent) {
+ ent->flags &= ~BR_SGRP_F_DELETE;
+ } else {
+ ent = br_multicast_new_group_src(pg, &src_ip);
+ if (ent) {
+ __grp_src_mod_timer(ent,
+ now + br_multicast_gmi(br));
+ changed = true;
+ }
+ }
+ srcs += src_size;
+ }
+
+ if (__grp_src_delete_marked(pg))
+ changed = true;
+
+ return changed;
+}
+
+static bool br_multicast_isexc(struct net_bridge_port_group *pg,
+ void *srcs, u32 nsrcs, size_t src_size)
+{
+ struct net_bridge *br = pg->key.port->br;
+ bool changed = false;
+
+ switch (pg->filter_mode) {
+ case MCAST_INCLUDE:
+ __grp_src_isexc_incl(pg, srcs, nsrcs, src_size);
+ br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE);
+ changed = true;
+ break;
+ case MCAST_EXCLUDE:
+ changed = __grp_src_isexc_excl(pg, srcs, nsrcs, src_size);
+ break;
+ }
+
+ pg->filter_mode = MCAST_EXCLUDE;
+ mod_timer(&pg->timer, jiffies + br_multicast_gmi(br));
+
+ return changed;
+}
+
+/* State Msg type New state Actions
+ * INCLUDE (A) TO_IN (B) INCLUDE (A+B) (B)=GMI
+ * Send Q(G,A-B)
+ */
+static bool __grp_src_toin_incl(struct net_bridge_port_group *pg,
+ void *srcs, u32 nsrcs, size_t src_size)
+{
+ struct net_bridge *br = pg->key.port->br;
+ u32 src_idx, to_send = pg->src_ents;
+ struct net_bridge_group_src *ent;
+ unsigned long now = jiffies;
+ bool changed = false;
+ struct br_ip src_ip;
+
+ hlist_for_each_entry(ent, &pg->src_list, node)
+ ent->flags |= BR_SGRP_F_SEND;
+
+ memset(&src_ip, 0, sizeof(src_ip));
+ src_ip.proto = pg->key.addr.proto;
+ for (src_idx = 0; src_idx < nsrcs; src_idx++) {
+ memcpy(&src_ip.src, srcs, src_size);
+ ent = br_multicast_find_group_src(pg, &src_ip);
+ if (ent) {
+ ent->flags &= ~BR_SGRP_F_SEND;
+ to_send--;
+ } else {
+ ent = br_multicast_new_group_src(pg, &src_ip);
+ if (ent)
+ changed = true;
+ }
+ if (ent)
+ __grp_src_mod_timer(ent, now + br_multicast_gmi(br));
+ srcs += src_size;
+ }
+
+ if (to_send)
+ __grp_src_query_marked_and_rexmit(pg);
+
+ return changed;
+}
+
+/* State Msg type New state Actions
+ * EXCLUDE (X,Y) TO_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI
+ * Send Q(G,X-A)
+ * Send Q(G)
+ */
+static bool __grp_src_toin_excl(struct net_bridge_port_group *pg,
+ void *srcs, u32 nsrcs, size_t src_size)
+{
+ struct net_bridge *br = pg->key.port->br;
+ u32 src_idx, to_send = pg->src_ents;
+ struct net_bridge_group_src *ent;
+ unsigned long now = jiffies;
+ bool changed = false;
+ struct br_ip src_ip;
+
+ hlist_for_each_entry(ent, &pg->src_list, node)
+ if (timer_pending(&ent->timer))
+ ent->flags |= BR_SGRP_F_SEND;
+
+ memset(&src_ip, 0, sizeof(src_ip));
+ src_ip.proto = pg->key.addr.proto;
+ for (src_idx = 0; src_idx < nsrcs; src_idx++) {
+ memcpy(&src_ip.src, srcs, src_size);
+ ent = br_multicast_find_group_src(pg, &src_ip);
+ if (ent) {
+ if (timer_pending(&ent->timer)) {
+ ent->flags &= ~BR_SGRP_F_SEND;
+ to_send--;
+ }
+ } else {
+ ent = br_multicast_new_group_src(pg, &src_ip);
+ if (ent)
+ changed = true;
+ }
+ if (ent)
+ __grp_src_mod_timer(ent, now + br_multicast_gmi(br));
+ srcs += src_size;
+ }
+
+ if (to_send)
+ __grp_src_query_marked_and_rexmit(pg);
+
+ __grp_send_query_and_rexmit(pg);
+
+ return changed;
+}
+
+static bool br_multicast_toin(struct net_bridge_port_group *pg,
+ void *srcs, u32 nsrcs, size_t src_size)
+{
+ bool changed = false;
+
+ switch (pg->filter_mode) {
+ case MCAST_INCLUDE:
+ changed = __grp_src_toin_incl(pg, srcs, nsrcs, src_size);
+ break;
+ case MCAST_EXCLUDE:
+ changed = __grp_src_toin_excl(pg, srcs, nsrcs, src_size);
+ break;
+ }
+
+ return changed;
+}
+
+/* State Msg type New state Actions
+ * INCLUDE (A) TO_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
+ * Delete (A-B)
+ * Send Q(G,A*B)
+ * Group Timer=GMI
+ */
+static void __grp_src_toex_incl(struct net_bridge_port_group *pg,
+ void *srcs, u32 nsrcs, size_t src_size)
+{
+ struct net_bridge_group_src *ent;
+ u32 src_idx, to_send = 0;
+ struct br_ip src_ip;
+
+ hlist_for_each_entry(ent, &pg->src_list, node)
+ ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
+
+ memset(&src_ip, 0, sizeof(src_ip));
+ src_ip.proto = pg->key.addr.proto;
+ for (src_idx = 0; src_idx < nsrcs; src_idx++) {
+ memcpy(&src_ip.src, srcs, src_size);
+ ent = br_multicast_find_group_src(pg, &src_ip);
+ if (ent) {
+ ent->flags = (ent->flags & ~BR_SGRP_F_DELETE) |
+ BR_SGRP_F_SEND;
+ to_send++;
+ } else {
+ ent = br_multicast_new_group_src(pg, &src_ip);
+ }
+ if (ent)
+ br_multicast_fwd_src_handle(ent);
+ srcs += src_size;
+ }
+
+ __grp_src_delete_marked(pg);
+ if (to_send)
+ __grp_src_query_marked_and_rexmit(pg);
+}
+
+/* State Msg type New state Actions
+ * EXCLUDE (X,Y) TO_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=Group Timer
+ * Delete (X-A)
+ * Delete (Y-A)
+ * Send Q(G,A-Y)
+ * Group Timer=GMI
+ */
+static bool __grp_src_toex_excl(struct net_bridge_port_group *pg,
+ void *srcs, u32 nsrcs, size_t src_size)
+{
+ struct net_bridge_group_src *ent;
+ u32 src_idx, to_send = 0;
+ bool changed = false;
+ struct br_ip src_ip;
+
+ hlist_for_each_entry(ent, &pg->src_list, node)
+ ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
+
+ memset(&src_ip, 0, sizeof(src_ip));
+ src_ip.proto = pg->key.addr.proto;
+ for (src_idx = 0; src_idx < nsrcs; src_idx++) {
+ memcpy(&src_ip.src, srcs, src_size);
+ ent = br_multicast_find_group_src(pg, &src_ip);
+ if (ent) {
+ ent->flags &= ~BR_SGRP_F_DELETE;
+ } else {
+ ent = br_multicast_new_group_src(pg, &src_ip);
+ if (ent) {
+ __grp_src_mod_timer(ent, pg->timer.expires);
+ changed = true;
+ }
+ }
+ if (ent && timer_pending(&ent->timer)) {
+ ent->flags |= BR_SGRP_F_SEND;
+ to_send++;
+ }
+ srcs += src_size;
+ }
+
+ if (__grp_src_delete_marked(pg))
+ changed = true;
+ if (to_send)
+ __grp_src_query_marked_and_rexmit(pg);
+
+ return changed;
+}
+
+static bool br_multicast_toex(struct net_bridge_port_group *pg,
+ void *srcs, u32 nsrcs, size_t src_size)
+{
+ struct net_bridge *br = pg->key.port->br;
+ bool changed = false;
+
+ switch (pg->filter_mode) {
+ case MCAST_INCLUDE:
+ __grp_src_toex_incl(pg, srcs, nsrcs, src_size);
+ br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE);
+ changed = true;
+ break;
+ case MCAST_EXCLUDE:
+ changed = __grp_src_toex_excl(pg, srcs, nsrcs, src_size);
+ break;
+ }
+
+ pg->filter_mode = MCAST_EXCLUDE;
+ mod_timer(&pg->timer, jiffies + br_multicast_gmi(br));
+
+ return changed;
+}
+
+/* State Msg type New state Actions
+ * INCLUDE (A) BLOCK (B) INCLUDE (A) Send Q(G,A*B)
+ */
+static void __grp_src_block_incl(struct net_bridge_port_group *pg,
+ void *srcs, u32 nsrcs, size_t src_size)
+{
+ struct net_bridge_group_src *ent;
+ u32 src_idx, to_send = 0;
+ struct br_ip src_ip;
+
+ hlist_for_each_entry(ent, &pg->src_list, node)
+ ent->flags &= ~BR_SGRP_F_SEND;
+
+ memset(&src_ip, 0, sizeof(src_ip));
+ src_ip.proto = pg->key.addr.proto;
+ for (src_idx = 0; src_idx < nsrcs; src_idx++) {
+ memcpy(&src_ip.src, srcs, src_size);
+ ent = br_multicast_find_group_src(pg, &src_ip);
+ if (ent) {
+ ent->flags |= BR_SGRP_F_SEND;
+ to_send++;
+ }
+ srcs += src_size;
+ }
+
+ if (to_send)
+ __grp_src_query_marked_and_rexmit(pg);
+
+ if (pg->filter_mode == MCAST_INCLUDE && hlist_empty(&pg->src_list))
+ br_multicast_find_del_pg(pg->key.port->br, pg);
+}
+
+/* State Msg type New state Actions
+ * EXCLUDE (X,Y) BLOCK (A) EXCLUDE (X+(A-Y),Y) (A-X-Y)=Group Timer
+ * Send Q(G,A-Y)
+ */
+static bool __grp_src_block_excl(struct net_bridge_port_group *pg,
+ void *srcs, u32 nsrcs, size_t src_size)
+{
+ struct net_bridge_group_src *ent;
+ u32 src_idx, to_send = 0;
+ bool changed = false;
+ struct br_ip src_ip;
+
+ hlist_for_each_entry(ent, &pg->src_list, node)
+ ent->flags &= ~BR_SGRP_F_SEND;
+
+ memset(&src_ip, 0, sizeof(src_ip));
+ src_ip.proto = pg->key.addr.proto;
+ for (src_idx = 0; src_idx < nsrcs; src_idx++) {
+ memcpy(&src_ip.src, srcs, src_size);
+ ent = br_multicast_find_group_src(pg, &src_ip);
+ if (!ent) {
+ ent = br_multicast_new_group_src(pg, &src_ip);
+ if (ent) {
+ __grp_src_mod_timer(ent, pg->timer.expires);
+ changed = true;
+ }
+ }
+ if (ent && timer_pending(&ent->timer)) {
+ ent->flags |= BR_SGRP_F_SEND;
+ to_send++;
+ }
+ srcs += src_size;
+ }
+
+ if (to_send)
+ __grp_src_query_marked_and_rexmit(pg);
+
+ return changed;
+}
+
+static bool br_multicast_block(struct net_bridge_port_group *pg,
+ void *srcs, u32 nsrcs, size_t src_size)
+{
+ bool changed = false;
+
+ switch (pg->filter_mode) {
+ case MCAST_INCLUDE:
+ __grp_src_block_incl(pg, srcs, nsrcs, src_size);
+ break;
+ case MCAST_EXCLUDE:
+ changed = __grp_src_block_excl(pg, srcs, nsrcs, src_size);
+ break;
+ }
+
+ return changed;
+}
+
+static struct net_bridge_port_group *
+br_multicast_find_port(struct net_bridge_mdb_entry *mp,
+ struct net_bridge_port *p,
+ const unsigned char *src)
+{
+ struct net_bridge *br __maybe_unused = mp->br;
+ struct net_bridge_port_group *pg;
+
+ for (pg = mlock_dereference(mp->ports, br);
+ pg;
+ pg = mlock_dereference(pg->next, br))
+ if (br_port_group_equal(pg, p, src))
+ return pg;
+
+ return NULL;
+}
+
static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
struct net_bridge_port *port,
struct sk_buff *skb,
u16 vid)
{
+ bool igmpv2 = br->multicast_igmp_version == 2;
+ struct net_bridge_mdb_entry *mdst;
+ struct net_bridge_port_group *pg;
const unsigned char *src;
struct igmpv3_report *ih;
struct igmpv3_grec *grec;
- int i;
- int len;
- int num;
- int type;
- int err = 0;
+ int i, len, num, type;
+ bool changed = false;
__be32 group;
+ int err = 0;
u16 nsrcs;
ih = igmpv3_report_hdr(skb);
@@ -947,7 +2275,6 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
if (!ip_mc_may_pull(skb, len))
return -EINVAL;
- /* We treat this as an IGMPv2 report for now. */
switch (type) {
case IGMPV3_MODE_IS_INCLUDE:
case IGMPV3_MODE_IS_EXCLUDE:
@@ -962,16 +2289,62 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
}
src = eth_hdr(skb)->h_source;
- if ((type == IGMPV3_CHANGE_TO_INCLUDE ||
- type == IGMPV3_MODE_IS_INCLUDE) &&
- nsrcs == 0) {
- br_ip4_multicast_leave_group(br, port, group, vid, src);
+ if (nsrcs == 0 &&
+ (type == IGMPV3_CHANGE_TO_INCLUDE ||
+ type == IGMPV3_MODE_IS_INCLUDE)) {
+ if (!port || igmpv2) {
+ br_ip4_multicast_leave_group(br, port, group, vid, src);
+ continue;
+ }
} else {
err = br_ip4_multicast_add_group(br, port, group, vid,
- src);
+ src, igmpv2);
if (err)
break;
}
+
+ if (!port || igmpv2)
+ continue;
+
+ spin_lock_bh(&br->multicast_lock);
+ mdst = br_mdb_ip4_get(br, group, vid);
+ if (!mdst)
+ goto unlock_continue;
+ pg = br_multicast_find_port(mdst, port, src);
+ if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
+ goto unlock_continue;
+ /* reload grec */
+ grec = (void *)(skb->data + len - sizeof(*grec) - (nsrcs * 4));
+ switch (type) {
+ case IGMPV3_ALLOW_NEW_SOURCES:
+ changed = br_multicast_isinc_allow(pg, grec->grec_src,
+ nsrcs, sizeof(__be32));
+ break;
+ case IGMPV3_MODE_IS_INCLUDE:
+ changed = br_multicast_isinc_allow(pg, grec->grec_src, nsrcs,
+ sizeof(__be32));
+ break;
+ case IGMPV3_MODE_IS_EXCLUDE:
+ changed = br_multicast_isexc(pg, grec->grec_src, nsrcs,
+ sizeof(__be32));
+ break;
+ case IGMPV3_CHANGE_TO_INCLUDE:
+ changed = br_multicast_toin(pg, grec->grec_src, nsrcs,
+ sizeof(__be32));
+ break;
+ case IGMPV3_CHANGE_TO_EXCLUDE:
+ changed = br_multicast_toex(pg, grec->grec_src, nsrcs,
+ sizeof(__be32));
+ break;
+ case IGMPV3_BLOCK_OLD_SOURCES:
+ changed = br_multicast_block(pg, grec->grec_src, nsrcs,
+ sizeof(__be32));
+ break;
+ }
+ if (changed)
+ br_mdb_notify(br->dev, mdst, pg, RTM_NEWMDB);
+unlock_continue:
+ spin_unlock_bh(&br->multicast_lock);
}
return err;
@@ -983,14 +2356,16 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
struct sk_buff *skb,
u16 vid)
{
+ bool mldv1 = br->multicast_mld_version == 1;
+ struct net_bridge_mdb_entry *mdst;
+ struct net_bridge_port_group *pg;
unsigned int nsrcs_offset;
const unsigned char *src;
struct icmp6hdr *icmp6h;
struct mld2_grec *grec;
unsigned int grec_len;
- int i;
- int len;
- int num;
+ bool changed = false;
+ int i, len, num;
int err = 0;
if (!ipv6_mc_may_pull(skb, sizeof(*icmp6h)))
@@ -1024,7 +2399,6 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
grec = (struct mld2_grec *)(skb->data + len);
len += grec_len;
- /* We treat these as MLDv1 reports for now. */
switch (grec->grec_type) {
case MLD2_MODE_IS_INCLUDE:
case MLD2_MODE_IS_EXCLUDE:
@@ -1042,15 +2416,61 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
nsrcs == 0) {
- br_ip6_multicast_leave_group(br, port, &grec->grec_mca,
- vid, src);
+ if (!port || mldv1) {
+ br_ip6_multicast_leave_group(br, port,
+ &grec->grec_mca,
+ vid, src);
+ continue;
+ }
} else {
err = br_ip6_multicast_add_group(br, port,
&grec->grec_mca, vid,
- src);
+ src, mldv1);
if (err)
break;
}
+
+ if (!port || mldv1)
+ continue;
+
+ spin_lock_bh(&br->multicast_lock);
+ mdst = br_mdb_ip6_get(br, &grec->grec_mca, vid);
+ if (!mdst)
+ goto unlock_continue;
+ pg = br_multicast_find_port(mdst, port, src);
+ if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
+ goto unlock_continue;
+ switch (grec->grec_type) {
+ case MLD2_ALLOW_NEW_SOURCES:
+ changed = br_multicast_isinc_allow(pg, grec->grec_src,
+ nsrcs,
+ sizeof(struct in6_addr));
+ break;
+ case MLD2_MODE_IS_INCLUDE:
+ changed = br_multicast_isinc_allow(pg, grec->grec_src, nsrcs,
+ sizeof(struct in6_addr));
+ break;
+ case MLD2_MODE_IS_EXCLUDE:
+ changed = br_multicast_isexc(pg, grec->grec_src, nsrcs,
+ sizeof(struct in6_addr));
+ break;
+ case MLD2_CHANGE_TO_INCLUDE:
+ changed = br_multicast_toin(pg, grec->grec_src, nsrcs,
+ sizeof(struct in6_addr));
+ break;
+ case MLD2_CHANGE_TO_EXCLUDE:
+ changed = br_multicast_toex(pg, grec->grec_src, nsrcs,
+ sizeof(struct in6_addr));
+ break;
+ case MLD2_BLOCK_OLD_SOURCES:
+ changed = br_multicast_block(pg, grec->grec_src, nsrcs,
+ sizeof(struct in6_addr));
+ break;
+ }
+ if (changed)
+ br_mdb_notify(br->dev, mdst, pg, RTM_NEWMDB);
+unlock_continue:
+ spin_unlock_bh(&br->multicast_lock);
}
return err;
@@ -1065,16 +2485,16 @@ static bool br_ip4_multicast_select_querier(struct net_bridge *br,
!timer_pending(&br->ip4_other_query.timer))
goto update;
- if (!br->ip4_querier.addr.u.ip4)
+ if (!br->ip4_querier.addr.src.ip4)
goto update;
- if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.u.ip4))
+ if (ntohl(saddr) <= ntohl(br->ip4_querier.addr.src.ip4))
goto update;
return false;
update:
- br->ip4_querier.addr.u.ip4 = saddr;
+ br->ip4_querier.addr.src.ip4 = saddr;
/* update protected by general multicast_lock by caller */
rcu_assign_pointer(br->ip4_querier.port, port);
@@ -1091,13 +2511,13 @@ static bool br_ip6_multicast_select_querier(struct net_bridge *br,
!timer_pending(&br->ip6_other_query.timer))
goto update;
- if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.u.ip6) <= 0)
+ if (ipv6_addr_cmp(saddr, &br->ip6_querier.addr.src.ip6) <= 0)
goto update;
return false;
update:
- br->ip6_querier.addr.u.ip6 = *saddr;
+ br->ip6_querier.addr.src.ip6 = *saddr;
/* update protected by general multicast_lock by caller */
rcu_assign_pointer(br->ip6_querier.port, port);
@@ -1112,10 +2532,10 @@ static bool br_multicast_select_querier(struct net_bridge *br,
{
switch (saddr->proto) {
case htons(ETH_P_IP):
- return br_ip4_multicast_select_querier(br, port, saddr->u.ip4);
+ return br_ip4_multicast_select_querier(br, port, saddr->src.ip4);
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
- return br_ip6_multicast_select_querier(br, port, &saddr->u.ip6);
+ return br_ip6_multicast_select_querier(br, port, &saddr->src.ip6);
#endif
}
@@ -1245,7 +2665,8 @@ static void br_ip4_multicast_query(struct net_bridge *br,
}
} else if (transport_len >= sizeof(*ih3)) {
ih3 = igmpv3_query_hdr(skb);
- if (ih3->nsrcs)
+ if (ih3->nsrcs ||
+ (br->multicast_igmp_version == 3 && group && ih3->suppress))
goto out;
max_delay = ih3->code ?
@@ -1256,7 +2677,7 @@ static void br_ip4_multicast_query(struct net_bridge *br,
if (!group) {
saddr.proto = htons(ETH_P_IP);
- saddr.u.ip4 = iph->saddr;
+ saddr.src.ip4 = iph->saddr;
br_multicast_query_received(br, port, &br->ip4_other_query,
&saddr, max_delay);
@@ -1280,7 +2701,9 @@ static void br_ip4_multicast_query(struct net_bridge *br,
pp = &p->next) {
if (timer_pending(&p->timer) ?
time_after(p->timer.expires, now + max_delay) :
- try_to_del_timer_sync(&p->timer) >= 0)
+ try_to_del_timer_sync(&p->timer) >= 0 &&
+ (br->multicast_igmp_version == 2 ||
+ p->filter_mode == MCAST_EXCLUDE))
mod_timer(&p->timer, now + max_delay);
}
@@ -1330,6 +2753,10 @@ static int br_ip6_multicast_query(struct net_bridge *br,
mld2q = (struct mld2_query *)icmp6_hdr(skb);
if (!mld2q->mld2q_nsrcs)
group = &mld2q->mld2q_mca;
+ if (br->multicast_mld_version == 2 &&
+ !ipv6_addr_any(&mld2q->mld2q_mca) &&
+ mld2q->mld2q_suppress)
+ goto out;
max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
}
@@ -1338,7 +2765,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
if (is_general_query) {
saddr.proto = htons(ETH_P_IPV6);
- saddr.u.ip6 = ipv6_hdr(skb)->saddr;
+ saddr.src.ip6 = ipv6_hdr(skb)->saddr;
br_multicast_query_received(br, port, &br->ip6_other_query,
&saddr, max_delay);
@@ -1363,7 +2790,9 @@ static int br_ip6_multicast_query(struct net_bridge *br,
pp = &p->next) {
if (timer_pending(&p->timer) ?
time_after(p->timer.expires, now + max_delay) :
- try_to_del_timer_sync(&p->timer) >= 0)
+ try_to_del_timer_sync(&p->timer) >= 0 &&
+ (br->multicast_mld_version == 1 ||
+ p->filter_mode == MCAST_EXCLUDE))
mod_timer(&p->timer, now + max_delay);
}
@@ -1407,16 +2836,8 @@ br_multicast_leave_group(struct net_bridge *br,
if (p->flags & MDB_PG_FLAGS_PERMANENT)
break;
- rcu_assign_pointer(*pp, p->next);
- hlist_del_init(&p->mglist);
- del_timer(&p->timer);
- kfree_rcu(p, rcu);
- br_mdb_notify(br->dev, port, group, RTM_DELMDB,
- p->flags | MDB_PG_FLAGS_FAST_LEAVE);
-
- if (!mp->ports && !mp->host_joined &&
- netif_running(br->dev))
- mod_timer(&mp->timer, jiffies);
+ p->flags |= MDB_PG_FLAGS_FAST_LEAVE;
+ br_multicast_del_pg(mp, p, pp);
}
goto out;
}
@@ -1425,7 +2846,8 @@ br_multicast_leave_group(struct net_bridge *br,
goto out;
if (br_opt_get(br, BROPT_MULTICAST_QUERIER)) {
- __br_multicast_send_query(br, port, &mp->addr);
+ __br_multicast_send_query(br, port, NULL, NULL, &mp->addr,
+ false, 0, NULL);
time = jiffies + br->multicast_last_member_count *
br->multicast_last_member_interval;
@@ -1467,7 +2889,7 @@ br_multicast_leave_group(struct net_bridge *br,
for (p = mlock_dereference(mp->ports, br);
p != NULL;
p = mlock_dereference(p->next, br)) {
- if (p->port != port)
+ if (p->key.port != port)
continue;
if (!hlist_unhashed(&p->mglist) &&
@@ -1498,7 +2920,7 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
own_query = port ? &port->ip4_own_query : &br->ip4_own_query;
memset(&br_group, 0, sizeof(br_group));
- br_group.u.ip4 = group;
+ br_group.dst.ip4 = group;
br_group.proto = htons(ETH_P_IP);
br_group.vid = vid;
@@ -1522,7 +2944,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
own_query = port ? &port->ip6_own_query : &br->ip6_own_query;
memset(&br_group, 0, sizeof(br_group));
- br_group.u.ip6 = *group;
+ br_group.dst.ip6 = *group;
br_group.proto = htons(ETH_P_IPV6);
br_group.vid = vid;
@@ -1627,7 +3049,8 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
case IGMP_HOST_MEMBERSHIP_REPORT:
case IGMPV2_HOST_MEMBERSHIP_REPORT:
BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
- err = br_ip4_multicast_add_group(br, port, ih->group, vid, src);
+ err = br_ip4_multicast_add_group(br, port, ih->group, vid, src,
+ true);
break;
case IGMPV3_HOST_MEMBERSHIP_REPORT:
err = br_ip4_multicast_igmp3_report(br, port, skb, vid);
@@ -1706,7 +3129,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
src = eth_hdr(skb)->h_source;
BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid,
- src);
+ src, true);
break;
case ICMPV6_MLD2_REPORT:
err = br_ip6_multicast_mld2_report(br, port, skb, vid);
@@ -1781,6 +3204,19 @@ static void br_ip6_multicast_query_expired(struct timer_list *t)
}
#endif
+static void br_multicast_gc_work(struct work_struct *work)
+{
+ struct net_bridge *br = container_of(work, struct net_bridge,
+ mcast_gc_work);
+ HLIST_HEAD(deleted_head);
+
+ spin_lock_bh(&br->multicast_lock);
+ hlist_move_list(&br->mcast_gc_list, &deleted_head);
+ spin_unlock_bh(&br->multicast_lock);
+
+ br_multicast_gc(&deleted_head);
+}
+
void br_multicast_init(struct net_bridge *br)
{
br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX;
@@ -1821,6 +3257,8 @@ void br_multicast_init(struct net_bridge *br)
br_ip6_multicast_query_expired, 0);
#endif
INIT_HLIST_HEAD(&br->mdb_list);
+ INIT_HLIST_HEAD(&br->mcast_gc_list);
+ INIT_WORK(&br->mcast_gc_work, br_multicast_gc_work);
}
static void br_ip4_multicast_join_snoopers(struct net_bridge *br)
@@ -1924,18 +3362,18 @@ void br_multicast_stop(struct net_bridge *br)
void br_multicast_dev_del(struct net_bridge *br)
{
struct net_bridge_mdb_entry *mp;
+ HLIST_HEAD(deleted_head);
struct hlist_node *tmp;
spin_lock_bh(&br->multicast_lock);
- hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node) {
- del_timer(&mp->timer);
- rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
- br_mdb_rht_params);
- hlist_del_rcu(&mp->mdb_node);
- kfree_rcu(mp, rcu);
- }
+ hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node)
+ br_multicast_del_mdb_entry(mp);
+ hlist_move_list(&br->mcast_gc_list, &deleted_head);
spin_unlock_bh(&br->multicast_lock);
+ br_multicast_gc(&deleted_head);
+ cancel_work_sync(&br->mcast_gc_work);
+
rcu_barrier();
}
@@ -2211,7 +3649,7 @@ int br_multicast_list_adjacent(struct net_device *dev,
if (!entry)
goto unlock;
- entry->addr = group->addr;
+ entry->addr = group->key.addr;
list_add(&entry->list, br_ip_list);
count++;
}
@@ -2468,10 +3906,23 @@ void br_multicast_get_stats(const struct net_bridge *br,
int br_mdb_hash_init(struct net_bridge *br)
{
- return rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params);
+ int err;
+
+ err = rhashtable_init(&br->sg_port_tbl, &br_sg_port_rht_params);
+ if (err)
+ return err;
+
+ err = rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params);
+ if (err) {
+ rhashtable_destroy(&br->sg_port_tbl);
+ return err;
+ }
+
+ return 0;
}
void br_mdb_hash_fini(struct net_bridge *br)
{
+ rhashtable_destroy(&br->sg_port_tbl);
rhashtable_destroy(&br->mdb_hash_tbl);
}
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index da310f0ca725..92d64abffa87 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -1091,8 +1091,8 @@ static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = {
[IFLA_BR_MCAST_IGMP_VERSION] = { .type = NLA_U8 },
[IFLA_BR_MCAST_MLD_VERSION] = { .type = NLA_U8 },
[IFLA_BR_VLAN_STATS_PER_PORT] = { .type = NLA_U8 },
- [IFLA_BR_MULTI_BOOLOPT] = { .type = NLA_EXACT_LEN,
- .len = sizeof(struct br_boolopt_multi) },
+ [IFLA_BR_MULTI_BOOLOPT] =
+ NLA_POLICY_EXACT_LEN(sizeof(struct br_boolopt_multi)),
};
static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index baa1500f384f..345118e35c42 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -213,27 +213,71 @@ struct net_bridge_fdb_entry {
#define MDB_PG_FLAGS_PERMANENT BIT(0)
#define MDB_PG_FLAGS_OFFLOAD BIT(1)
#define MDB_PG_FLAGS_FAST_LEAVE BIT(2)
+#define MDB_PG_FLAGS_STAR_EXCL BIT(3)
+#define MDB_PG_FLAGS_BLOCKED BIT(4)
-struct net_bridge_port_group {
- struct net_bridge_port *port;
- struct net_bridge_port_group __rcu *next;
- struct hlist_node mglist;
- struct rcu_head rcu;
+#define PG_SRC_ENT_LIMIT 32
+
+#define BR_SGRP_F_DELETE BIT(0)
+#define BR_SGRP_F_SEND BIT(1)
+#define BR_SGRP_F_INSTALLED BIT(2)
+
+struct net_bridge_mcast_gc {
+ struct hlist_node gc_node;
+ void (*destroy)(struct net_bridge_mcast_gc *gc);
+};
+
+struct net_bridge_group_src {
+ struct hlist_node node;
+
+ struct br_ip addr;
+ struct net_bridge_port_group *pg;
+ u8 flags;
+ u8 src_query_rexmit_cnt;
struct timer_list timer;
+
+ struct net_bridge *br;
+ struct net_bridge_mcast_gc mcast_gc;
+ struct rcu_head rcu;
+};
+
+struct net_bridge_port_group_sg_key {
+ struct net_bridge_port *port;
struct br_ip addr;
+};
+
+struct net_bridge_port_group {
+ struct net_bridge_port_group __rcu *next;
+ struct net_bridge_port_group_sg_key key;
unsigned char eth_addr[ETH_ALEN] __aligned(2);
unsigned char flags;
+ unsigned char filter_mode;
+ unsigned char grp_query_rexmit_cnt;
+ unsigned char rt_protocol;
+
+ struct hlist_head src_list;
+ unsigned int src_ents;
+ struct timer_list timer;
+ struct timer_list rexmit_timer;
+ struct hlist_node mglist;
+
+ struct rhash_head rhnode;
+ struct net_bridge_mcast_gc mcast_gc;
+ struct rcu_head rcu;
};
struct net_bridge_mdb_entry {
struct rhash_head rhnode;
struct net_bridge *br;
struct net_bridge_port_group __rcu *ports;
- struct rcu_head rcu;
- struct timer_list timer;
struct br_ip addr;
bool host_joined;
+
+ struct timer_list timer;
struct hlist_node mdb_node;
+
+ struct net_bridge_mcast_gc mcast_gc;
+ struct rcu_head rcu;
};
struct net_bridge_port {
@@ -405,7 +449,9 @@ struct net_bridge {
unsigned long multicast_startup_query_interval;
struct rhashtable mdb_hash_tbl;
+ struct rhashtable sg_port_tbl;
+ struct hlist_head mcast_gc_list;
struct hlist_head mdb_list;
struct hlist_head router_list;
@@ -419,6 +465,7 @@ struct net_bridge {
struct bridge_mcast_own_query ip6_own_query;
struct bridge_mcast_querier ip6_querier;
#endif /* IS_ENABLED(CONFIG_IPV6) */
+ struct work_struct mcast_gc_work;
#endif
struct timer_list hello_timer;
@@ -766,13 +813,17 @@ br_multicast_new_group(struct net_bridge *br, struct br_ip *group);
struct net_bridge_port_group *
br_multicast_new_port_group(struct net_bridge_port *port, struct br_ip *group,
struct net_bridge_port_group __rcu *next,
- unsigned char flags, const unsigned char *src);
+ unsigned char flags, const unsigned char *src,
+ u8 filter_mode, u8 rt_protocol);
int br_mdb_hash_init(struct net_bridge *br);
void br_mdb_hash_fini(struct net_bridge *br);
-void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
- struct br_ip *group, int type, u8 flags);
+void br_mdb_notify(struct net_device *dev, struct net_bridge_mdb_entry *mp,
+ struct net_bridge_port_group *pg, int type);
void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
int type);
+void br_multicast_del_pg(struct net_bridge_mdb_entry *mp,
+ struct net_bridge_port_group *pg,
+ struct net_bridge_port_group __rcu **pp);
void br_multicast_count(struct net_bridge *br, const struct net_bridge_port *p,
const struct sk_buff *skb, u8 type, u8 dir);
int br_multicast_init_stats(struct net_bridge *br);
@@ -784,6 +835,10 @@ void br_mdb_init(void);
void br_mdb_uninit(void);
void br_multicast_host_join(struct net_bridge_mdb_entry *mp, bool notify);
void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify);
+void br_multicast_star_g_handle_mode(struct net_bridge_port_group *pg,
+ u8 filter_mode);
+void br_multicast_sg_add_exclude_ports(struct net_bridge_mdb_entry *star_mp,
+ struct net_bridge_port_group *sg);
#define mlock_dereference(X, br) \
rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
@@ -832,10 +887,52 @@ static inline bool br_multicast_querier_exists(struct net_bridge *br,
}
}
+static inline bool br_multicast_is_star_g(const struct br_ip *ip)
+{
+ switch (ip->proto) {
+ case htons(ETH_P_IP):
+ return ipv4_is_zeronet(ip->src.ip4);
+#if IS_ENABLED(CONFIG_IPV6)
+ case htons(ETH_P_IPV6):
+ return ipv6_addr_any(&ip->src.ip6);
+#endif
+ default:
+ return false;
+ }
+}
+
+static inline bool br_multicast_should_handle_mode(const struct net_bridge *br,
+ __be16 proto)
+{
+ switch (proto) {
+ case htons(ETH_P_IP):
+ return !!(br->multicast_igmp_version == 3);
+#if IS_ENABLED(CONFIG_IPV6)
+ case htons(ETH_P_IPV6):
+ return !!(br->multicast_mld_version == 2);
+#endif
+ default:
+ return false;
+ }
+}
+
static inline int br_multicast_igmp_type(const struct sk_buff *skb)
{
return BR_INPUT_SKB_CB(skb)->igmp;
}
+
+static inline unsigned long br_multicast_lmqt(const struct net_bridge *br)
+{
+ return br->multicast_last_member_interval *
+ br->multicast_last_member_count;
+}
+
+static inline unsigned long br_multicast_gmi(const struct net_bridge *br)
+{
+ /* use the RFC default of 2 for QRV */
+ return 2 * br->multicast_query_interval +
+ br->multicast_query_response_interval;
+}
#else
static inline int br_multicast_rcv(struct net_bridge *br,
struct net_bridge_port *port,
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index ee8780080be5..3e493eb85bb2 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -140,7 +140,7 @@ static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
return err == -EOPNOTSUPP ? 0 : err;
}
-/* Returns a master vlan, if it didn't exist it gets created. In all cases a
+/* Returns a master vlan, if it didn't exist it gets created. In all cases
* a reference is taken to the master vlan before returning.
*/
static struct net_bridge_vlan *
@@ -1897,8 +1897,8 @@ out_err:
}
static const struct nla_policy br_vlan_db_policy[BRIDGE_VLANDB_ENTRY_MAX + 1] = {
- [BRIDGE_VLANDB_ENTRY_INFO] = { .type = NLA_EXACT_LEN,
- .len = sizeof(struct bridge_vlan_info) },
+ [BRIDGE_VLANDB_ENTRY_INFO] =
+ NLA_POLICY_EXACT_LEN(sizeof(struct bridge_vlan_info)),
[BRIDGE_VLANDB_ENTRY_RANGE] = { .type = NLA_U16 },
[BRIDGE_VLANDB_ENTRY_STATE] = { .type = NLA_U8 },
[BRIDGE_VLANDB_ENTRY_TUNNEL_INFO] = { .type = NLA_NESTED },
diff --git a/net/bridge/netfilter/ebt_stp.c b/net/bridge/netfilter/ebt_stp.c
index 0d6d20c9105e..8f68afda5f81 100644
--- a/net/bridge/netfilter/ebt_stp.c
+++ b/net/bridge/netfilter/ebt_stp.c
@@ -15,7 +15,6 @@
#include <linux/netfilter_bridge/ebt_stp.h>
#define BPDU_TYPE_CONFIG 0
-#define BPDU_TYPE_TCN 0x80
struct stp_header {
u8 dsap;
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index d0a4d0ac7045..9cef9496a707 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -21,7 +21,6 @@
#define SRVL_FLOW_OFF 0x81
#define SRVL_FLOW_ON 0x80
#define SRVL_SET_PIN 0x82
-#define SRVL_CTRL_PKT_SIZE 1
#define container_obj(layr) container_of(layr, struct cfsrvl, layer)
diff --git a/net/can/Kconfig b/net/can/Kconfig
index 25436a715db3..224e5e0283a9 100644
--- a/net/can/Kconfig
+++ b/net/can/Kconfig
@@ -55,6 +55,20 @@ config CAN_GW
source "net/can/j1939/Kconfig"
+config CAN_ISOTP
+ tristate "ISO 15765-2:2016 CAN transport protocol"
+ help
+ CAN Transport Protocols offer support for segmented Point-to-Point
+ communication between CAN nodes via two defined CAN Identifiers.
+ As CAN frames can only transport a small amount of data bytes
+ (max. 8 bytes for 'classic' CAN and max. 64 bytes for CAN FD) this
+ segmentation is needed to transport longer PDUs as needed e.g. for
+ vehicle diagnosis (UDS, ISO 14229) or IP-over-CAN traffic.
+ This protocol driver implements data transfers according to
+ ISO 15765-2:2016 for 'classic' CAN and CAN FD frame types.
+ If you want to perform automotive vehicle diagnostic services (UDS),
+ say 'y'.
+
source "drivers/net/can/Kconfig"
endif
diff --git a/net/can/Makefile b/net/can/Makefile
index 08bd217fc051..58f2c31c1ef3 100644
--- a/net/can/Makefile
+++ b/net/can/Makefile
@@ -17,3 +17,6 @@ obj-$(CONFIG_CAN_GW) += can-gw.o
can-gw-y := gw.o
obj-$(CONFIG_CAN_J1939) += j1939/
+
+obj-$(CONFIG_CAN_ISOTP) += can-isotp.o
+can-isotp-y := isotp.o
diff --git a/net/can/af_can.c b/net/can/af_can.c
index 5c06404bdf3e..6373ab9c5507 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -338,7 +338,7 @@ static unsigned int effhash(canid_t can_id)
* can_rcv_list_find - determine optimal filterlist inside device filter struct
* @can_id: pointer to CAN identifier of a given can_filter
* @mask: pointer to CAN mask of a given can_filter
- * @d: pointer to the device filter struct
+ * @dev_rcv_lists: pointer to the device filter struct
*
* Description:
* Returns the optimal filterlist to reduce the filter handling in the
@@ -358,7 +358,7 @@ static unsigned int effhash(canid_t can_id)
*
* Return:
* Pointer to optimal filterlist for the given can_id/mask pair.
- * Constistency checked mask.
+ * Consistency checked mask.
* Reduced can_id to have a preprocessed filter compare value.
*/
static struct hlist_head *can_rcv_list_find(canid_t *can_id, canid_t *mask,
@@ -411,7 +411,7 @@ static struct hlist_head *can_rcv_list_find(canid_t *can_id, canid_t *mask,
/**
* can_rx_register - subscribe CAN frames from a specific interface
* @net: the applicable net namespace
- * @dev: pointer to netdevice (NULL => subcribe from 'all' CAN devices list)
+ * @dev: pointer to netdevice (NULL => subscribe from 'all' CAN devices list)
* @can_id: CAN identifier (see description)
* @mask: CAN mask (see description)
* @func: callback function on filter match
@@ -875,7 +875,7 @@ static __init int can_init(void)
offsetof(struct can_frame, data) !=
offsetof(struct canfd_frame, data));
- pr_info("can: controller area network core (" CAN_VERSION_STRING ")\n");
+ pr_info("can: controller area network core\n");
rcv_cache = kmem_cache_create("can_receiver", sizeof(struct receiver),
0, 0, NULL);
diff --git a/net/can/bcm.c b/net/can/bcm.c
index d14ea12affb1..0e5c37be4a2b 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
* bcm.c - Broadcast Manager to filter/send (cyclic) CAN content
*
@@ -81,8 +81,6 @@
(CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
(CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
-#define CAN_BCM_VERSION "20170425"
-
MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
@@ -1696,7 +1694,7 @@ static int __init bcm_module_init(void)
{
int err;
- pr_info("can: broadcast manager protocol (rev " CAN_BCM_VERSION " t)\n");
+ pr_info("can: broadcast manager protocol\n");
err = can_proto_register(&bcm_can_proto);
if (err < 0) {
diff --git a/net/can/gw.c b/net/can/gw.c
index 65d60c93af29..6b790b6ff8d2 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/* gw.c - CAN frame Gateway/Router/Bridge with netlink interface
*
* Copyright (c) 2019 Volkswagen Group Electronic Research
@@ -59,7 +59,6 @@
#include <net/net_namespace.h>
#include <net/sock.h>
-#define CAN_GW_VERSION "20190810"
#define CAN_GW_NAME "can-gw"
MODULE_DESCRIPTION("PF_CAN netlink gateway");
@@ -1194,8 +1193,7 @@ static __init int cgw_module_init(void)
/* sanitize given module parameter */
max_hops = clamp_t(unsigned int, max_hops, CGW_MIN_HOPS, CGW_MAX_HOPS);
- pr_info("can: netlink gateway (rev " CAN_GW_VERSION ") max_hops=%d\n",
- max_hops);
+ pr_info("can: netlink gateway - max_hops=%d\n", max_hops);
ret = register_pernet_subsys(&cangw_pernet_ops);
if (ret)
diff --git a/net/can/isotp.c b/net/can/isotp.c
new file mode 100644
index 000000000000..4c2062875893
--- /dev/null
+++ b/net/can/isotp.c
@@ -0,0 +1,1424 @@
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+/* isotp.c - ISO 15765-2 CAN transport protocol for protocol family CAN
+ *
+ * This implementation does not provide ISO-TP specific return values to the
+ * userspace.
+ *
+ * - RX path timeout of data reception leads to -ETIMEDOUT
+ * - RX path SN mismatch leads to -EILSEQ
+ * - RX path data reception with wrong padding leads to -EBADMSG
+ * - TX path flowcontrol reception timeout leads to -ECOMM
+ * - TX path flowcontrol reception overflow leads to -EMSGSIZE
+ * - TX path flowcontrol reception with wrong layout/padding leads to -EBADMSG
+ * - when a transfer (tx) is on the run the next write() blocks until it's done
+ * - use CAN_ISOTP_WAIT_TX_DONE flag to block the caller until the PDU is sent
+ * - as we have static buffers the check whether the PDU fits into the buffer
+ * is done at FF reception time (no support for sending 'wait frames')
+ * - take care of the tx-queue-len as traffic shaping is still on the TODO list
+ *
+ * Copyright (c) 2020 Volkswagen Group Electronic Research
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Volkswagen nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/hrtimer.h>
+#include <linux/wait.h>
+#include <linux/uio.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/socket.h>
+#include <linux/if_arp.h>
+#include <linux/skbuff.h>
+#include <linux/can.h>
+#include <linux/can/core.h>
+#include <linux/can/skb.h>
+#include <linux/can/isotp.h>
+#include <linux/slab.h>
+#include <net/sock.h>
+#include <net/net_namespace.h>
+
+MODULE_DESCRIPTION("PF_CAN isotp 15765-2:2016 protocol");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
+MODULE_ALIAS("can-proto-6");
+
+#define SINGLE_MASK(id) (((id) & CAN_EFF_FLAG) ? \
+ (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \
+ (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG))
+
+/* ISO 15765-2:2016 supports more than 4095 byte per ISO PDU as the FF_DL can
+ * take full 32 bit values (4 Gbyte). We would need some good concept to handle
+ * this between user space and kernel space. For now increase the static buffer
+ * to something about 8 kbyte to be able to test this new functionality.
+ */
+#define MAX_MSG_LENGTH 8200
+
+/* N_PCI type values in bits 7-4 of N_PCI bytes */
+#define N_PCI_SF 0x00 /* single frame */
+#define N_PCI_FF 0x10 /* first frame */
+#define N_PCI_CF 0x20 /* consecutive frame */
+#define N_PCI_FC 0x30 /* flow control */
+
+#define N_PCI_SZ 1 /* size of the PCI byte #1 */
+#define SF_PCI_SZ4 1 /* size of SingleFrame PCI including 4 bit SF_DL */
+#define SF_PCI_SZ8 2 /* size of SingleFrame PCI including 8 bit SF_DL */
+#define FF_PCI_SZ12 2 /* size of FirstFrame PCI including 12 bit FF_DL */
+#define FF_PCI_SZ32 6 /* size of FirstFrame PCI including 32 bit FF_DL */
+#define FC_CONTENT_SZ 3 /* flow control content size in byte (FS/BS/STmin) */
+
+#define ISOTP_CHECK_PADDING (CAN_ISOTP_CHK_PAD_LEN | CAN_ISOTP_CHK_PAD_DATA)
+
+/* Flow Status given in FC frame */
+#define ISOTP_FC_CTS 0 /* clear to send */
+#define ISOTP_FC_WT 1 /* wait */
+#define ISOTP_FC_OVFLW 2 /* overflow */
+
+enum {
+ ISOTP_IDLE = 0,
+ ISOTP_WAIT_FIRST_FC,
+ ISOTP_WAIT_FC,
+ ISOTP_WAIT_DATA,
+ ISOTP_SENDING
+};
+
+struct tpcon {
+ int idx;
+ int len;
+ u8 state;
+ u8 bs;
+ u8 sn;
+ u8 ll_dl;
+ u8 buf[MAX_MSG_LENGTH + 1];
+};
+
+struct isotp_sock {
+ struct sock sk;
+ int bound;
+ int ifindex;
+ canid_t txid;
+ canid_t rxid;
+ ktime_t tx_gap;
+ ktime_t lastrxcf_tstamp;
+ struct hrtimer rxtimer, txtimer;
+ struct can_isotp_options opt;
+ struct can_isotp_fc_options rxfc, txfc;
+ struct can_isotp_ll_options ll;
+ u32 force_tx_stmin;
+ u32 force_rx_stmin;
+ struct tpcon rx, tx;
+ struct notifier_block notifier;
+ wait_queue_head_t wait;
+};
+
+static inline struct isotp_sock *isotp_sk(const struct sock *sk)
+{
+ return (struct isotp_sock *)sk;
+}
+
+static enum hrtimer_restart isotp_rx_timer_handler(struct hrtimer *hrtimer)
+{
+ struct isotp_sock *so = container_of(hrtimer, struct isotp_sock,
+ rxtimer);
+ struct sock *sk = &so->sk;
+
+ if (so->rx.state == ISOTP_WAIT_DATA) {
+ /* we did not get new data frames in time */
+
+ /* report 'connection timed out' */
+ sk->sk_err = ETIMEDOUT;
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_error_report(sk);
+
+ /* reset rx state */
+ so->rx.state = ISOTP_IDLE;
+ }
+
+ return HRTIMER_NORESTART;
+}
+
+static int isotp_send_fc(struct sock *sk, int ae, u8 flowstatus)
+{
+ struct net_device *dev;
+ struct sk_buff *nskb;
+ struct canfd_frame *ncf;
+ struct isotp_sock *so = isotp_sk(sk);
+ int can_send_ret;
+
+ nskb = alloc_skb(so->ll.mtu + sizeof(struct can_skb_priv), gfp_any());
+ if (!nskb)
+ return 1;
+
+ dev = dev_get_by_index(sock_net(sk), so->ifindex);
+ if (!dev) {
+ kfree_skb(nskb);
+ return 1;
+ }
+
+ can_skb_reserve(nskb);
+ can_skb_prv(nskb)->ifindex = dev->ifindex;
+ can_skb_prv(nskb)->skbcnt = 0;
+
+ nskb->dev = dev;
+ can_skb_set_owner(nskb, sk);
+ ncf = (struct canfd_frame *)nskb->data;
+ skb_put(nskb, so->ll.mtu);
+
+ /* create & send flow control reply */
+ ncf->can_id = so->txid;
+
+ if (so->opt.flags & CAN_ISOTP_TX_PADDING) {
+ memset(ncf->data, so->opt.txpad_content, CAN_MAX_DLEN);
+ ncf->len = CAN_MAX_DLEN;
+ } else {
+ ncf->len = ae + FC_CONTENT_SZ;
+ }
+
+ ncf->data[ae] = N_PCI_FC | flowstatus;
+ ncf->data[ae + 1] = so->rxfc.bs;
+ ncf->data[ae + 2] = so->rxfc.stmin;
+
+ if (ae)
+ ncf->data[0] = so->opt.ext_address;
+
+ if (so->ll.mtu == CANFD_MTU)
+ ncf->flags = so->ll.tx_flags;
+
+ can_send_ret = can_send(nskb, 1);
+ if (can_send_ret)
+ pr_notice_once("can-isotp: %s: can_send_ret %d\n",
+ __func__, can_send_ret);
+
+ dev_put(dev);
+
+ /* reset blocksize counter */
+ so->rx.bs = 0;
+
+ /* reset last CF frame rx timestamp for rx stmin enforcement */
+ so->lastrxcf_tstamp = ktime_set(0, 0);
+
+ /* start rx timeout watchdog */
+ hrtimer_start(&so->rxtimer, ktime_set(1, 0), HRTIMER_MODE_REL_SOFT);
+ return 0;
+}
+
+static void isotp_rcv_skb(struct sk_buff *skb, struct sock *sk)
+{
+ struct sockaddr_can *addr = (struct sockaddr_can *)skb->cb;
+
+ BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can));
+
+ memset(addr, 0, sizeof(*addr));
+ addr->can_family = AF_CAN;
+ addr->can_ifindex = skb->dev->ifindex;
+
+ if (sock_queue_rcv_skb(sk, skb) < 0)
+ kfree_skb(skb);
+}
+
+static u8 padlen(u8 datalen)
+{
+ const u8 plen[] = {8, 8, 8, 8, 8, 8, 8, 8, 8, /* 0 - 8 */
+ 12, 12, 12, 12, /* 9 - 12 */
+ 16, 16, 16, 16, /* 13 - 16 */
+ 20, 20, 20, 20, /* 17 - 20 */
+ 24, 24, 24, 24, /* 21 - 24 */
+ 32, 32, 32, 32, 32, 32, 32, 32, /* 25 - 32 */
+ 48, 48, 48, 48, 48, 48, 48, 48, /* 33 - 40 */
+ 48, 48, 48, 48, 48, 48, 48, 48}; /* 41 - 48 */
+
+ if (datalen > 48)
+ return 64;
+
+ return plen[datalen];
+}
+
+/* check for length optimization and return 1/true when the check fails */
+static int check_optimized(struct canfd_frame *cf, int start_index)
+{
+ /* for CAN_DL <= 8 the start_index is equal to the CAN_DL as the
+ * padding would start at this point. E.g. if the padding would
+ * start at cf.data[7] cf->len has to be 7 to be optimal.
+ * Note: The data[] index starts with zero.
+ */
+ if (cf->len <= CAN_MAX_DLEN)
+ return (cf->len != start_index);
+
+ /* This relation is also valid in the non-linear DLC range, where
+ * we need to take care of the minimal next possible CAN_DL.
+ * The correct check would be (padlen(cf->len) != padlen(start_index)).
+ * But as cf->len can only take discrete values from 12, .., 64 at this
+ * point the padlen(cf->len) is always equal to cf->len.
+ */
+ return (cf->len != padlen(start_index));
+}
+
+/* check padding and return 1/true when the check fails */
+static int check_pad(struct isotp_sock *so, struct canfd_frame *cf,
+ int start_index, u8 content)
+{
+ int i;
+
+ /* no RX_PADDING value => check length of optimized frame length */
+ if (!(so->opt.flags & CAN_ISOTP_RX_PADDING)) {
+ if (so->opt.flags & CAN_ISOTP_CHK_PAD_LEN)
+ return check_optimized(cf, start_index);
+
+ /* no valid test against empty value => ignore frame */
+ return 1;
+ }
+
+ /* check datalength of correctly padded CAN frame */
+ if ((so->opt.flags & CAN_ISOTP_CHK_PAD_LEN) &&
+ cf->len != padlen(cf->len))
+ return 1;
+
+ /* check padding content */
+ if (so->opt.flags & CAN_ISOTP_CHK_PAD_DATA) {
+ for (i = start_index; i < cf->len; i++)
+ if (cf->data[i] != content)
+ return 1;
+ }
+ return 0;
+}
+
+static int isotp_rcv_fc(struct isotp_sock *so, struct canfd_frame *cf, int ae)
+{
+ struct sock *sk = &so->sk;
+
+ if (so->tx.state != ISOTP_WAIT_FC &&
+ so->tx.state != ISOTP_WAIT_FIRST_FC)
+ return 0;
+
+ hrtimer_cancel(&so->txtimer);
+
+ if ((cf->len < ae + FC_CONTENT_SZ) ||
+ ((so->opt.flags & ISOTP_CHECK_PADDING) &&
+ check_pad(so, cf, ae + FC_CONTENT_SZ, so->opt.rxpad_content))) {
+ /* malformed PDU - report 'not a data message' */
+ sk->sk_err = EBADMSG;
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_error_report(sk);
+
+ so->tx.state = ISOTP_IDLE;
+ wake_up_interruptible(&so->wait);
+ return 1;
+ }
+
+ /* get communication parameters only from the first FC frame */
+ if (so->tx.state == ISOTP_WAIT_FIRST_FC) {
+ so->txfc.bs = cf->data[ae + 1];
+ so->txfc.stmin = cf->data[ae + 2];
+
+ /* fix wrong STmin values according spec */
+ if (so->txfc.stmin > 0x7F &&
+ (so->txfc.stmin < 0xF1 || so->txfc.stmin > 0xF9))
+ so->txfc.stmin = 0x7F;
+
+ so->tx_gap = ktime_set(0, 0);
+ /* add transmission time for CAN frame N_As */
+ so->tx_gap = ktime_add_ns(so->tx_gap, so->opt.frame_txtime);
+ /* add waiting time for consecutive frames N_Cs */
+ if (so->opt.flags & CAN_ISOTP_FORCE_TXSTMIN)
+ so->tx_gap = ktime_add_ns(so->tx_gap,
+ so->force_tx_stmin);
+ else if (so->txfc.stmin < 0x80)
+ so->tx_gap = ktime_add_ns(so->tx_gap,
+ so->txfc.stmin * 1000000);
+ else
+ so->tx_gap = ktime_add_ns(so->tx_gap,
+ (so->txfc.stmin - 0xF0)
+ * 100000);
+ so->tx.state = ISOTP_WAIT_FC;
+ }
+
+ switch (cf->data[ae] & 0x0F) {
+ case ISOTP_FC_CTS:
+ so->tx.bs = 0;
+ so->tx.state = ISOTP_SENDING;
+ /* start cyclic timer for sending CF frame */
+ hrtimer_start(&so->txtimer, so->tx_gap,
+ HRTIMER_MODE_REL_SOFT);
+ break;
+
+ case ISOTP_FC_WT:
+ /* start timer to wait for next FC frame */
+ hrtimer_start(&so->txtimer, ktime_set(1, 0),
+ HRTIMER_MODE_REL_SOFT);
+ break;
+
+ case ISOTP_FC_OVFLW:
+ /* overflow on receiver side - report 'message too long' */
+ sk->sk_err = EMSGSIZE;
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_error_report(sk);
+ fallthrough;
+
+ default:
+ /* stop this tx job */
+ so->tx.state = ISOTP_IDLE;
+ wake_up_interruptible(&so->wait);
+ }
+ return 0;
+}
+
+static int isotp_rcv_sf(struct sock *sk, struct canfd_frame *cf, int pcilen,
+ struct sk_buff *skb, int len)
+{
+ struct isotp_sock *so = isotp_sk(sk);
+ struct sk_buff *nskb;
+
+ hrtimer_cancel(&so->rxtimer);
+ so->rx.state = ISOTP_IDLE;
+
+ if (!len || len > cf->len - pcilen)
+ return 1;
+
+ if ((so->opt.flags & ISOTP_CHECK_PADDING) &&
+ check_pad(so, cf, pcilen + len, so->opt.rxpad_content)) {
+ /* malformed PDU - report 'not a data message' */
+ sk->sk_err = EBADMSG;
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_error_report(sk);
+ return 1;
+ }
+
+ nskb = alloc_skb(len, gfp_any());
+ if (!nskb)
+ return 1;
+
+ memcpy(skb_put(nskb, len), &cf->data[pcilen], len);
+
+ nskb->tstamp = skb->tstamp;
+ nskb->dev = skb->dev;
+ isotp_rcv_skb(nskb, sk);
+ return 0;
+}
+
+static int isotp_rcv_ff(struct sock *sk, struct canfd_frame *cf, int ae)
+{
+ struct isotp_sock *so = isotp_sk(sk);
+ int i;
+ int off;
+ int ff_pci_sz;
+
+ hrtimer_cancel(&so->rxtimer);
+ so->rx.state = ISOTP_IDLE;
+
+ /* get the used sender LL_DL from the (first) CAN frame data length */
+ so->rx.ll_dl = padlen(cf->len);
+
+ /* the first frame has to use the entire frame up to LL_DL length */
+ if (cf->len != so->rx.ll_dl)
+ return 1;
+
+ /* get the FF_DL */
+ so->rx.len = (cf->data[ae] & 0x0F) << 8;
+ so->rx.len += cf->data[ae + 1];
+
+ /* Check for FF_DL escape sequence supporting 32 bit PDU length */
+ if (so->rx.len) {
+ ff_pci_sz = FF_PCI_SZ12;
+ } else {
+ /* FF_DL = 0 => get real length from next 4 bytes */
+ so->rx.len = cf->data[ae + 2] << 24;
+ so->rx.len += cf->data[ae + 3] << 16;
+ so->rx.len += cf->data[ae + 4] << 8;
+ so->rx.len += cf->data[ae + 5];
+ ff_pci_sz = FF_PCI_SZ32;
+ }
+
+ /* take care of a potential SF_DL ESC offset for TX_DL > 8 */
+ off = (so->rx.ll_dl > CAN_MAX_DLEN) ? 1 : 0;
+
+ if (so->rx.len + ae + off + ff_pci_sz < so->rx.ll_dl)
+ return 1;
+
+ if (so->rx.len > MAX_MSG_LENGTH) {
+ /* send FC frame with overflow status */
+ isotp_send_fc(sk, ae, ISOTP_FC_OVFLW);
+ return 1;
+ }
+
+ /* copy the first received data bytes */
+ so->rx.idx = 0;
+ for (i = ae + ff_pci_sz; i < so->rx.ll_dl; i++)
+ so->rx.buf[so->rx.idx++] = cf->data[i];
+
+ /* initial setup for this pdu reception */
+ so->rx.sn = 1;
+ so->rx.state = ISOTP_WAIT_DATA;
+
+ /* no creation of flow control frames */
+ if (so->opt.flags & CAN_ISOTP_LISTEN_MODE)
+ return 0;
+
+ /* send our first FC frame */
+ isotp_send_fc(sk, ae, ISOTP_FC_CTS);
+ return 0;
+}
+
+static int isotp_rcv_cf(struct sock *sk, struct canfd_frame *cf, int ae,
+ struct sk_buff *skb)
+{
+ struct isotp_sock *so = isotp_sk(sk);
+ struct sk_buff *nskb;
+ int i;
+
+ if (so->rx.state != ISOTP_WAIT_DATA)
+ return 0;
+
+ /* drop if timestamp gap is less than force_rx_stmin nano secs */
+ if (so->opt.flags & CAN_ISOTP_FORCE_RXSTMIN) {
+ if (ktime_to_ns(ktime_sub(skb->tstamp, so->lastrxcf_tstamp)) <
+ so->force_rx_stmin)
+ return 0;
+
+ so->lastrxcf_tstamp = skb->tstamp;
+ }
+
+ hrtimer_cancel(&so->rxtimer);
+
+ /* CFs are never longer than the FF */
+ if (cf->len > so->rx.ll_dl)
+ return 1;
+
+ /* CFs have usually the LL_DL length */
+ if (cf->len < so->rx.ll_dl) {
+ /* this is only allowed for the last CF */
+ if (so->rx.len - so->rx.idx > so->rx.ll_dl - ae - N_PCI_SZ)
+ return 1;
+ }
+
+ if ((cf->data[ae] & 0x0F) != so->rx.sn) {
+ /* wrong sn detected - report 'illegal byte sequence' */
+ sk->sk_err = EILSEQ;
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_error_report(sk);
+
+ /* reset rx state */
+ so->rx.state = ISOTP_IDLE;
+ return 1;
+ }
+ so->rx.sn++;
+ so->rx.sn %= 16;
+
+ for (i = ae + N_PCI_SZ; i < cf->len; i++) {
+ so->rx.buf[so->rx.idx++] = cf->data[i];
+ if (so->rx.idx >= so->rx.len)
+ break;
+ }
+
+ if (so->rx.idx >= so->rx.len) {
+ /* we are done */
+ so->rx.state = ISOTP_IDLE;
+
+ if ((so->opt.flags & ISOTP_CHECK_PADDING) &&
+ check_pad(so, cf, i + 1, so->opt.rxpad_content)) {
+ /* malformed PDU - report 'not a data message' */
+ sk->sk_err = EBADMSG;
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_error_report(sk);
+ return 1;
+ }
+
+ nskb = alloc_skb(so->rx.len, gfp_any());
+ if (!nskb)
+ return 1;
+
+ memcpy(skb_put(nskb, so->rx.len), so->rx.buf,
+ so->rx.len);
+
+ nskb->tstamp = skb->tstamp;
+ nskb->dev = skb->dev;
+ isotp_rcv_skb(nskb, sk);
+ return 0;
+ }
+
+ /* no creation of flow control frames */
+ if (so->opt.flags & CAN_ISOTP_LISTEN_MODE)
+ return 0;
+
+ /* perform blocksize handling, if enabled */
+ if (!so->rxfc.bs || ++so->rx.bs < so->rxfc.bs) {
+ /* start rx timeout watchdog */
+ hrtimer_start(&so->rxtimer, ktime_set(1, 0),
+ HRTIMER_MODE_REL_SOFT);
+ return 0;
+ }
+
+ /* we reached the specified blocksize so->rxfc.bs */
+ isotp_send_fc(sk, ae, ISOTP_FC_CTS);
+ return 0;
+}
+
+static void isotp_rcv(struct sk_buff *skb, void *data)
+{
+ struct sock *sk = (struct sock *)data;
+ struct isotp_sock *so = isotp_sk(sk);
+ struct canfd_frame *cf;
+ int ae = (so->opt.flags & CAN_ISOTP_EXTEND_ADDR) ? 1 : 0;
+ u8 n_pci_type, sf_dl;
+
+ /* Strictly receive only frames with the configured MTU size
+ * => clear separation of CAN2.0 / CAN FD transport channels
+ */
+ if (skb->len != so->ll.mtu)
+ return;
+
+ cf = (struct canfd_frame *)skb->data;
+
+ /* if enabled: check reception of my configured extended address */
+ if (ae && cf->data[0] != so->opt.rx_ext_address)
+ return;
+
+ n_pci_type = cf->data[ae] & 0xF0;
+
+ if (so->opt.flags & CAN_ISOTP_HALF_DUPLEX) {
+ /* check rx/tx path half duplex expectations */
+ if ((so->tx.state != ISOTP_IDLE && n_pci_type != N_PCI_FC) ||
+ (so->rx.state != ISOTP_IDLE && n_pci_type == N_PCI_FC))
+ return;
+ }
+
+ switch (n_pci_type) {
+ case N_PCI_FC:
+ /* tx path: flow control frame containing the FC parameters */
+ isotp_rcv_fc(so, cf, ae);
+ break;
+
+ case N_PCI_SF:
+ /* rx path: single frame
+ *
+ * As we do not have a rx.ll_dl configuration, we can only test
+ * if the CAN frames payload length matches the LL_DL == 8
+ * requirements - no matter if it's CAN 2.0 or CAN FD
+ */
+
+ /* get the SF_DL from the N_PCI byte */
+ sf_dl = cf->data[ae] & 0x0F;
+
+ if (cf->len <= CAN_MAX_DLEN) {
+ isotp_rcv_sf(sk, cf, SF_PCI_SZ4 + ae, skb, sf_dl);
+ } else {
+ if (skb->len == CANFD_MTU) {
+ /* We have a CAN FD frame and CAN_DL is greater than 8:
+ * Only frames with the SF_DL == 0 ESC value are valid.
+ *
+ * If so take care of the increased SF PCI size
+ * (SF_PCI_SZ8) to point to the message content behind
+ * the extended SF PCI info and get the real SF_DL
+ * length value from the formerly first data byte.
+ */
+ if (sf_dl == 0)
+ isotp_rcv_sf(sk, cf, SF_PCI_SZ8 + ae, skb,
+ cf->data[SF_PCI_SZ4 + ae]);
+ }
+ }
+ break;
+
+ case N_PCI_FF:
+ /* rx path: first frame */
+ isotp_rcv_ff(sk, cf, ae);
+ break;
+
+ case N_PCI_CF:
+ /* rx path: consecutive frame */
+ isotp_rcv_cf(sk, cf, ae, skb);
+ break;
+ }
+}
+
+static void isotp_fill_dataframe(struct canfd_frame *cf, struct isotp_sock *so,
+ int ae, int off)
+{
+ int pcilen = N_PCI_SZ + ae + off;
+ int space = so->tx.ll_dl - pcilen;
+ int num = min_t(int, so->tx.len - so->tx.idx, space);
+ int i;
+
+ cf->can_id = so->txid;
+ cf->len = num + pcilen;
+
+ if (num < space) {
+ if (so->opt.flags & CAN_ISOTP_TX_PADDING) {
+ /* user requested padding */
+ cf->len = padlen(cf->len);
+ memset(cf->data, so->opt.txpad_content, cf->len);
+ } else if (cf->len > CAN_MAX_DLEN) {
+ /* mandatory padding for CAN FD frames */
+ cf->len = padlen(cf->len);
+ memset(cf->data, CAN_ISOTP_DEFAULT_PAD_CONTENT,
+ cf->len);
+ }
+ }
+
+ for (i = 0; i < num; i++)
+ cf->data[pcilen + i] = so->tx.buf[so->tx.idx++];
+
+ if (ae)
+ cf->data[0] = so->opt.ext_address;
+}
+
+static void isotp_create_fframe(struct canfd_frame *cf, struct isotp_sock *so,
+ int ae)
+{
+ int i;
+ int ff_pci_sz;
+
+ cf->can_id = so->txid;
+ cf->len = so->tx.ll_dl;
+ if (ae)
+ cf->data[0] = so->opt.ext_address;
+
+ /* create N_PCI bytes with 12/32 bit FF_DL data length */
+ if (so->tx.len > 4095) {
+ /* use 32 bit FF_DL notation */
+ cf->data[ae] = N_PCI_FF;
+ cf->data[ae + 1] = 0;
+ cf->data[ae + 2] = (u8)(so->tx.len >> 24) & 0xFFU;
+ cf->data[ae + 3] = (u8)(so->tx.len >> 16) & 0xFFU;
+ cf->data[ae + 4] = (u8)(so->tx.len >> 8) & 0xFFU;
+ cf->data[ae + 5] = (u8)so->tx.len & 0xFFU;
+ ff_pci_sz = FF_PCI_SZ32;
+ } else {
+ /* use 12 bit FF_DL notation */
+ cf->data[ae] = (u8)(so->tx.len >> 8) | N_PCI_FF;
+ cf->data[ae + 1] = (u8)so->tx.len & 0xFFU;
+ ff_pci_sz = FF_PCI_SZ12;
+ }
+
+ /* add first data bytes depending on ae */
+ for (i = ae + ff_pci_sz; i < so->tx.ll_dl; i++)
+ cf->data[i] = so->tx.buf[so->tx.idx++];
+
+ so->tx.sn = 1;
+ so->tx.state = ISOTP_WAIT_FIRST_FC;
+}
+
+static enum hrtimer_restart isotp_tx_timer_handler(struct hrtimer *hrtimer)
+{
+ struct isotp_sock *so = container_of(hrtimer, struct isotp_sock,
+ txtimer);
+ struct sock *sk = &so->sk;
+ struct sk_buff *skb;
+ struct net_device *dev;
+ struct canfd_frame *cf;
+ enum hrtimer_restart restart = HRTIMER_NORESTART;
+ int can_send_ret;
+ int ae = (so->opt.flags & CAN_ISOTP_EXTEND_ADDR) ? 1 : 0;
+
+ switch (so->tx.state) {
+ case ISOTP_WAIT_FC:
+ case ISOTP_WAIT_FIRST_FC:
+
+ /* we did not get any flow control frame in time */
+
+ /* report 'communication error on send' */
+ sk->sk_err = ECOMM;
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_error_report(sk);
+
+ /* reset tx state */
+ so->tx.state = ISOTP_IDLE;
+ wake_up_interruptible(&so->wait);
+ break;
+
+ case ISOTP_SENDING:
+
+ /* push out the next segmented pdu */
+ dev = dev_get_by_index(sock_net(sk), so->ifindex);
+ if (!dev)
+ break;
+
+isotp_tx_burst:
+ skb = alloc_skb(so->ll.mtu + sizeof(struct can_skb_priv),
+ GFP_ATOMIC);
+ if (!skb) {
+ dev_put(dev);
+ break;
+ }
+
+ can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+ can_skb_prv(skb)->skbcnt = 0;
+
+ cf = (struct canfd_frame *)skb->data;
+ skb_put(skb, so->ll.mtu);
+
+ /* create consecutive frame */
+ isotp_fill_dataframe(cf, so, ae, 0);
+
+ /* place consecutive frame N_PCI in appropriate index */
+ cf->data[ae] = N_PCI_CF | so->tx.sn++;
+ so->tx.sn %= 16;
+ so->tx.bs++;
+
+ if (so->ll.mtu == CANFD_MTU)
+ cf->flags = so->ll.tx_flags;
+
+ skb->dev = dev;
+ can_skb_set_owner(skb, sk);
+
+ can_send_ret = can_send(skb, 1);
+ if (can_send_ret)
+ pr_notice_once("can-isotp: %s: can_send_ret %d\n",
+ __func__, can_send_ret);
+
+ if (so->tx.idx >= so->tx.len) {
+ /* we are done */
+ so->tx.state = ISOTP_IDLE;
+ dev_put(dev);
+ wake_up_interruptible(&so->wait);
+ break;
+ }
+
+ if (so->txfc.bs && so->tx.bs >= so->txfc.bs) {
+ /* stop and wait for FC */
+ so->tx.state = ISOTP_WAIT_FC;
+ dev_put(dev);
+ hrtimer_set_expires(&so->txtimer,
+ ktime_add(ktime_get(),
+ ktime_set(1, 0)));
+ restart = HRTIMER_RESTART;
+ break;
+ }
+
+ /* no gap between data frames needed => use burst mode */
+ if (!so->tx_gap)
+ goto isotp_tx_burst;
+
+ /* start timer to send next data frame with correct delay */
+ dev_put(dev);
+ hrtimer_set_expires(&so->txtimer,
+ ktime_add(ktime_get(), so->tx_gap));
+ restart = HRTIMER_RESTART;
+ break;
+
+ default:
+ WARN_ON_ONCE(1);
+ }
+
+ return restart;
+}
+
+static int isotp_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+{
+ struct sock *sk = sock->sk;
+ struct isotp_sock *so = isotp_sk(sk);
+ struct sk_buff *skb;
+ struct net_device *dev;
+ struct canfd_frame *cf;
+ int ae = (so->opt.flags & CAN_ISOTP_EXTEND_ADDR) ? 1 : 0;
+ int wait_tx_done = (so->opt.flags & CAN_ISOTP_WAIT_TX_DONE) ? 1 : 0;
+ int off;
+ int err;
+
+ if (!so->bound)
+ return -EADDRNOTAVAIL;
+
+ /* we do not support multiple buffers - for now */
+ if (so->tx.state != ISOTP_IDLE || wq_has_sleeper(&so->wait)) {
+ if (msg->msg_flags & MSG_DONTWAIT)
+ return -EAGAIN;
+
+ /* wait for complete transmission of current pdu */
+ wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
+ }
+
+ if (!size || size > MAX_MSG_LENGTH)
+ return -EINVAL;
+
+ err = memcpy_from_msg(so->tx.buf, msg, size);
+ if (err < 0)
+ return err;
+
+ dev = dev_get_by_index(sock_net(sk), so->ifindex);
+ if (!dev)
+ return -ENXIO;
+
+ skb = sock_alloc_send_skb(sk, so->ll.mtu + sizeof(struct can_skb_priv),
+ msg->msg_flags & MSG_DONTWAIT, &err);
+ if (!skb) {
+ dev_put(dev);
+ return err;
+ }
+
+ can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+ can_skb_prv(skb)->skbcnt = 0;
+
+ so->tx.state = ISOTP_SENDING;
+ so->tx.len = size;
+ so->tx.idx = 0;
+
+ cf = (struct canfd_frame *)skb->data;
+ skb_put(skb, so->ll.mtu);
+
+ /* take care of a potential SF_DL ESC offset for TX_DL > 8 */
+ off = (so->tx.ll_dl > CAN_MAX_DLEN) ? 1 : 0;
+
+ /* check for single frame transmission depending on TX_DL */
+ if (size <= so->tx.ll_dl - SF_PCI_SZ4 - ae - off) {
+ /* The message size generally fits into a SingleFrame - good.
+ *
+ * SF_DL ESC offset optimization:
+ *
+ * When TX_DL is greater 8 but the message would still fit
+ * into a 8 byte CAN frame, we can omit the offset.
+ * This prevents a protocol caused length extension from
+ * CAN_DL = 8 to CAN_DL = 12 due to the SF_SL ESC handling.
+ */
+ if (size <= CAN_MAX_DLEN - SF_PCI_SZ4 - ae)
+ off = 0;
+
+ isotp_fill_dataframe(cf, so, ae, off);
+
+ /* place single frame N_PCI w/o length in appropriate index */
+ cf->data[ae] = N_PCI_SF;
+
+ /* place SF_DL size value depending on the SF_DL ESC offset */
+ if (off)
+ cf->data[SF_PCI_SZ4 + ae] = size;
+ else
+ cf->data[ae] |= size;
+
+ so->tx.state = ISOTP_IDLE;
+ wake_up_interruptible(&so->wait);
+
+ /* don't enable wait queue for a single frame transmission */
+ wait_tx_done = 0;
+ } else {
+ /* send first frame and wait for FC */
+
+ isotp_create_fframe(cf, so, ae);
+
+ /* start timeout for FC */
+ hrtimer_start(&so->txtimer, ktime_set(1, 0), HRTIMER_MODE_REL_SOFT);
+ }
+
+ /* send the first or only CAN frame */
+ if (so->ll.mtu == CANFD_MTU)
+ cf->flags = so->ll.tx_flags;
+
+ skb->dev = dev;
+ skb->sk = sk;
+ err = can_send(skb, 1);
+ dev_put(dev);
+ if (err) {
+ pr_notice_once("can-isotp: %s: can_send_ret %d\n",
+ __func__, err);
+ return err;
+ }
+
+ if (wait_tx_done) {
+ /* wait for complete transmission of current pdu */
+ wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
+ }
+
+ return size;
+}
+
+static int isotp_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ int flags)
+{
+ struct sock *sk = sock->sk;
+ struct sk_buff *skb;
+ int err = 0;
+ int noblock;
+
+ noblock = flags & MSG_DONTWAIT;
+ flags &= ~MSG_DONTWAIT;
+
+ skb = skb_recv_datagram(sk, flags, noblock, &err);
+ if (!skb)
+ return err;
+
+ if (size < skb->len)
+ msg->msg_flags |= MSG_TRUNC;
+ else
+ size = skb->len;
+
+ err = memcpy_to_msg(msg, skb->data, size);
+ if (err < 0) {
+ skb_free_datagram(sk, skb);
+ return err;
+ }
+
+ sock_recv_timestamp(msg, sk, skb);
+
+ if (msg->msg_name) {
+ msg->msg_namelen = sizeof(struct sockaddr_can);
+ memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
+ }
+
+ skb_free_datagram(sk, skb);
+
+ return size;
+}
+
+static int isotp_release(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+ struct isotp_sock *so;
+ struct net *net;
+
+ if (!sk)
+ return 0;
+
+ so = isotp_sk(sk);
+ net = sock_net(sk);
+
+ /* wait for complete transmission of current pdu */
+ wait_event_interruptible(so->wait, so->tx.state == ISOTP_IDLE);
+
+ unregister_netdevice_notifier(&so->notifier);
+
+ lock_sock(sk);
+
+ hrtimer_cancel(&so->txtimer);
+ hrtimer_cancel(&so->rxtimer);
+
+ /* remove current filters & unregister */
+ if (so->bound) {
+ if (so->ifindex) {
+ struct net_device *dev;
+
+ dev = dev_get_by_index(net, so->ifindex);
+ if (dev) {
+ can_rx_unregister(net, dev, so->rxid,
+ SINGLE_MASK(so->rxid),
+ isotp_rcv, sk);
+ dev_put(dev);
+ }
+ }
+ }
+
+ so->ifindex = 0;
+ so->bound = 0;
+
+ sock_orphan(sk);
+ sock->sk = NULL;
+
+ release_sock(sk);
+ sock_put(sk);
+
+ return 0;
+}
+
+static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len)
+{
+ struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
+ struct sock *sk = sock->sk;
+ struct isotp_sock *so = isotp_sk(sk);
+ struct net *net = sock_net(sk);
+ int ifindex;
+ struct net_device *dev;
+ int err = 0;
+ int notify_enetdown = 0;
+
+ if (len < CAN_REQUIRED_SIZE(struct sockaddr_can, can_addr.tp))
+ return -EINVAL;
+
+ if (addr->can_addr.tp.rx_id == addr->can_addr.tp.tx_id)
+ return -EADDRNOTAVAIL;
+
+ if ((addr->can_addr.tp.rx_id | addr->can_addr.tp.tx_id) &
+ (CAN_ERR_FLAG | CAN_RTR_FLAG))
+ return -EADDRNOTAVAIL;
+
+ if (!addr->can_ifindex)
+ return -ENODEV;
+
+ lock_sock(sk);
+
+ if (so->bound && addr->can_ifindex == so->ifindex &&
+ addr->can_addr.tp.rx_id == so->rxid &&
+ addr->can_addr.tp.tx_id == so->txid)
+ goto out;
+
+ dev = dev_get_by_index(net, addr->can_ifindex);
+ if (!dev) {
+ err = -ENODEV;
+ goto out;
+ }
+ if (dev->type != ARPHRD_CAN) {
+ dev_put(dev);
+ err = -ENODEV;
+ goto out;
+ }
+ if (dev->mtu < so->ll.mtu) {
+ dev_put(dev);
+ err = -EINVAL;
+ goto out;
+ }
+ if (!(dev->flags & IFF_UP))
+ notify_enetdown = 1;
+
+ ifindex = dev->ifindex;
+
+ can_rx_register(net, dev, addr->can_addr.tp.rx_id,
+ SINGLE_MASK(addr->can_addr.tp.rx_id), isotp_rcv, sk,
+ "isotp", sk);
+
+ dev_put(dev);
+
+ if (so->bound) {
+ /* unregister old filter */
+ if (so->ifindex) {
+ dev = dev_get_by_index(net, so->ifindex);
+ if (dev) {
+ can_rx_unregister(net, dev, so->rxid,
+ SINGLE_MASK(so->rxid),
+ isotp_rcv, sk);
+ dev_put(dev);
+ }
+ }
+ }
+
+ /* switch to new settings */
+ so->ifindex = ifindex;
+ so->rxid = addr->can_addr.tp.rx_id;
+ so->txid = addr->can_addr.tp.tx_id;
+ so->bound = 1;
+
+out:
+ release_sock(sk);
+
+ if (notify_enetdown) {
+ sk->sk_err = ENETDOWN;
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_error_report(sk);
+ }
+
+ return err;
+}
+
+static int isotp_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
+{
+ struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
+ struct sock *sk = sock->sk;
+ struct isotp_sock *so = isotp_sk(sk);
+
+ if (peer)
+ return -EOPNOTSUPP;
+
+ addr->can_family = AF_CAN;
+ addr->can_ifindex = so->ifindex;
+ addr->can_addr.tp.rx_id = so->rxid;
+ addr->can_addr.tp.tx_id = so->txid;
+
+ return sizeof(*addr);
+}
+
+static int isotp_setsockopt(struct socket *sock, int level, int optname,
+ sockptr_t optval, unsigned int optlen)
+{
+ struct sock *sk = sock->sk;
+ struct isotp_sock *so = isotp_sk(sk);
+ int ret = 0;
+
+ if (level != SOL_CAN_ISOTP)
+ return -EINVAL;
+
+ switch (optname) {
+ case CAN_ISOTP_OPTS:
+ if (optlen != sizeof(struct can_isotp_options))
+ return -EINVAL;
+
+ if (copy_from_sockptr(&so->opt, optval, optlen))
+ return -EFAULT;
+
+ /* no separate rx_ext_address is given => use ext_address */
+ if (!(so->opt.flags & CAN_ISOTP_RX_EXT_ADDR))
+ so->opt.rx_ext_address = so->opt.ext_address;
+ break;
+
+ case CAN_ISOTP_RECV_FC:
+ if (optlen != sizeof(struct can_isotp_fc_options))
+ return -EINVAL;
+
+ if (copy_from_sockptr(&so->rxfc, optval, optlen))
+ return -EFAULT;
+ break;
+
+ case CAN_ISOTP_TX_STMIN:
+ if (optlen != sizeof(u32))
+ return -EINVAL;
+
+ if (copy_from_sockptr(&so->force_tx_stmin, optval, optlen))
+ return -EFAULT;
+ break;
+
+ case CAN_ISOTP_RX_STMIN:
+ if (optlen != sizeof(u32))
+ return -EINVAL;
+
+ if (copy_from_sockptr(&so->force_rx_stmin, optval, optlen))
+ return -EFAULT;
+ break;
+
+ case CAN_ISOTP_LL_OPTS:
+ if (optlen == sizeof(struct can_isotp_ll_options)) {
+ struct can_isotp_ll_options ll;
+
+ if (copy_from_sockptr(&ll, optval, optlen))
+ return -EFAULT;
+
+ /* check for correct ISO 11898-1 DLC data length */
+ if (ll.tx_dl != padlen(ll.tx_dl))
+ return -EINVAL;
+
+ if (ll.mtu != CAN_MTU && ll.mtu != CANFD_MTU)
+ return -EINVAL;
+
+ if (ll.mtu == CAN_MTU && ll.tx_dl > CAN_MAX_DLEN)
+ return -EINVAL;
+
+ memcpy(&so->ll, &ll, sizeof(ll));
+
+ /* set ll_dl for tx path to similar place as for rx */
+ so->tx.ll_dl = ll.tx_dl;
+ } else {
+ return -EINVAL;
+ }
+ break;
+
+ default:
+ ret = -ENOPROTOOPT;
+ }
+
+ return ret;
+}
+
+static int isotp_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen)
+{
+ struct sock *sk = sock->sk;
+ struct isotp_sock *so = isotp_sk(sk);
+ int len;
+ void *val;
+
+ if (level != SOL_CAN_ISOTP)
+ return -EINVAL;
+ if (get_user(len, optlen))
+ return -EFAULT;
+ if (len < 0)
+ return -EINVAL;
+
+ switch (optname) {
+ case CAN_ISOTP_OPTS:
+ len = min_t(int, len, sizeof(struct can_isotp_options));
+ val = &so->opt;
+ break;
+
+ case CAN_ISOTP_RECV_FC:
+ len = min_t(int, len, sizeof(struct can_isotp_fc_options));
+ val = &so->rxfc;
+ break;
+
+ case CAN_ISOTP_TX_STMIN:
+ len = min_t(int, len, sizeof(u32));
+ val = &so->force_tx_stmin;
+ break;
+
+ case CAN_ISOTP_RX_STMIN:
+ len = min_t(int, len, sizeof(u32));
+ val = &so->force_rx_stmin;
+ break;
+
+ case CAN_ISOTP_LL_OPTS:
+ len = min_t(int, len, sizeof(struct can_isotp_ll_options));
+ val = &so->ll;
+ break;
+
+ default:
+ return -ENOPROTOOPT;
+ }
+
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, val, len))
+ return -EFAULT;
+ return 0;
+}
+
+static int isotp_notifier(struct notifier_block *nb, unsigned long msg,
+ void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct isotp_sock *so = container_of(nb, struct isotp_sock, notifier);
+ struct sock *sk = &so->sk;
+
+ if (!net_eq(dev_net(dev), sock_net(sk)))
+ return NOTIFY_DONE;
+
+ if (dev->type != ARPHRD_CAN)
+ return NOTIFY_DONE;
+
+ if (so->ifindex != dev->ifindex)
+ return NOTIFY_DONE;
+
+ switch (msg) {
+ case NETDEV_UNREGISTER:
+ lock_sock(sk);
+ /* remove current filters & unregister */
+ if (so->bound)
+ can_rx_unregister(dev_net(dev), dev, so->rxid,
+ SINGLE_MASK(so->rxid),
+ isotp_rcv, sk);
+
+ so->ifindex = 0;
+ so->bound = 0;
+ release_sock(sk);
+
+ sk->sk_err = ENODEV;
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_error_report(sk);
+ break;
+
+ case NETDEV_DOWN:
+ sk->sk_err = ENETDOWN;
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_error_report(sk);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int isotp_init(struct sock *sk)
+{
+ struct isotp_sock *so = isotp_sk(sk);
+
+ so->ifindex = 0;
+ so->bound = 0;
+
+ so->opt.flags = CAN_ISOTP_DEFAULT_FLAGS;
+ so->opt.ext_address = CAN_ISOTP_DEFAULT_EXT_ADDRESS;
+ so->opt.rx_ext_address = CAN_ISOTP_DEFAULT_EXT_ADDRESS;
+ so->opt.rxpad_content = CAN_ISOTP_DEFAULT_PAD_CONTENT;
+ so->opt.txpad_content = CAN_ISOTP_DEFAULT_PAD_CONTENT;
+ so->opt.frame_txtime = CAN_ISOTP_DEFAULT_FRAME_TXTIME;
+ so->rxfc.bs = CAN_ISOTP_DEFAULT_RECV_BS;
+ so->rxfc.stmin = CAN_ISOTP_DEFAULT_RECV_STMIN;
+ so->rxfc.wftmax = CAN_ISOTP_DEFAULT_RECV_WFTMAX;
+ so->ll.mtu = CAN_ISOTP_DEFAULT_LL_MTU;
+ so->ll.tx_dl = CAN_ISOTP_DEFAULT_LL_TX_DL;
+ so->ll.tx_flags = CAN_ISOTP_DEFAULT_LL_TX_FLAGS;
+
+ /* set ll_dl for tx path to similar place as for rx */
+ so->tx.ll_dl = so->ll.tx_dl;
+
+ so->rx.state = ISOTP_IDLE;
+ so->tx.state = ISOTP_IDLE;
+
+ hrtimer_init(&so->rxtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
+ so->rxtimer.function = isotp_rx_timer_handler;
+ hrtimer_init(&so->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
+ so->txtimer.function = isotp_tx_timer_handler;
+
+ init_waitqueue_head(&so->wait);
+
+ so->notifier.notifier_call = isotp_notifier;
+ register_netdevice_notifier(&so->notifier);
+
+ return 0;
+}
+
+static int isotp_sock_no_ioctlcmd(struct socket *sock, unsigned int cmd,
+ unsigned long arg)
+{
+ /* no ioctls for socket layer -> hand it down to NIC layer */
+ return -ENOIOCTLCMD;
+}
+
+static const struct proto_ops isotp_ops = {
+ .family = PF_CAN,
+ .release = isotp_release,
+ .bind = isotp_bind,
+ .connect = sock_no_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .getname = isotp_getname,
+ .poll = datagram_poll,
+ .ioctl = isotp_sock_no_ioctlcmd,
+ .gettstamp = sock_gettstamp,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .setsockopt = isotp_setsockopt,
+ .getsockopt = isotp_getsockopt,
+ .sendmsg = isotp_sendmsg,
+ .recvmsg = isotp_recvmsg,
+ .mmap = sock_no_mmap,
+ .sendpage = sock_no_sendpage,
+};
+
+static struct proto isotp_proto __read_mostly = {
+ .name = "CAN_ISOTP",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct isotp_sock),
+ .init = isotp_init,
+};
+
+static const struct can_proto isotp_can_proto = {
+ .type = SOCK_DGRAM,
+ .protocol = CAN_ISOTP,
+ .ops = &isotp_ops,
+ .prot = &isotp_proto,
+};
+
+static __init int isotp_module_init(void)
+{
+ int err;
+
+ pr_info("can: isotp protocol\n");
+
+ err = can_proto_register(&isotp_can_proto);
+ if (err < 0)
+ pr_err("can: registration of isotp protocol failed\n");
+
+ return err;
+}
+
+static __exit void isotp_module_exit(void)
+{
+ can_proto_unregister(&isotp_can_proto);
+}
+
+module_init(isotp_module_init);
+module_exit(isotp_module_exit);
diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
index 0cec4152f979..e09d087ba240 100644
--- a/net/can/j1939/transport.c
+++ b/net/can/j1939/transport.c
@@ -580,6 +580,7 @@ sk_buff *j1939_tp_tx_dat_new(struct j1939_priv *priv,
skb->dev = priv->ndev;
can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = priv->ndev->ifindex;
+ can_skb_prv(skb)->skbcnt = 0;
/* reserve CAN header */
skb_reserve(skb, offsetof(struct can_frame, data));
@@ -1487,6 +1488,7 @@ j1939_session *j1939_session_fresh_new(struct j1939_priv *priv,
skb->dev = priv->ndev;
can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = priv->ndev->ifindex;
+ can_skb_prv(skb)->skbcnt = 0;
skcb = j1939_skb_to_cb(skb);
memcpy(skcb, rel_skcb, sizeof(*skcb));
diff --git a/net/can/proc.c b/net/can/proc.c
index e6881bfc3ed1..550928b8b8a2 100644
--- a/net/can/proc.c
+++ b/net/can/proc.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
* proc.c - procfs support for Protocol family CAN core module
*
@@ -54,7 +54,6 @@
* proc filenames for the PF_CAN core
*/
-#define CAN_PROC_VERSION "version"
#define CAN_PROC_STATS "stats"
#define CAN_PROC_RESET_STATS "reset_stats"
#define CAN_PROC_RCVLIST_ALL "rcvlist_all"
@@ -293,12 +292,6 @@ static int can_reset_stats_proc_show(struct seq_file *m, void *v)
return 0;
}
-static int can_version_proc_show(struct seq_file *m, void *v)
-{
- seq_printf(m, "%s\n", CAN_VERSION_STRING);
- return 0;
-}
-
static inline void can_rcvlist_proc_show_one(struct seq_file *m, int idx,
struct net_device *dev,
struct can_dev_rcv_lists *dev_rcv_lists)
@@ -441,8 +434,6 @@ void can_init_proc(struct net *net)
}
/* own procfs entries from the AF_CAN core */
- net->can.pde_version = proc_create_net_single(CAN_PROC_VERSION, 0644,
- net->can.proc_dir, can_version_proc_show, NULL);
net->can.pde_stats = proc_create_net_single(CAN_PROC_STATS, 0644,
net->can.proc_dir, can_stats_proc_show, NULL);
net->can.pde_reset_stats = proc_create_net_single(CAN_PROC_RESET_STATS,
@@ -471,9 +462,6 @@ void can_init_proc(struct net *net)
*/
void can_remove_proc(struct net *net)
{
- if (net->can.pde_version)
- remove_proc_entry(CAN_PROC_VERSION, net->can.proc_dir);
-
if (net->can.pde_stats)
remove_proc_entry(CAN_PROC_STATS, net->can.proc_dir);
diff --git a/net/can/raw.c b/net/can/raw.c
index 94a9405658dc..6ec8aa1d0da4 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/* raw.c - Raw sockets for protocol family CAN
*
* Copyright (c) 2002-2007 Volkswagen Group Electronic Research
@@ -55,8 +55,6 @@
#include <net/sock.h>
#include <net/net_namespace.h>
-#define CAN_RAW_VERSION CAN_VERSION
-
MODULE_DESCRIPTION("PF_CAN raw protocol");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
@@ -154,16 +152,16 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
if (!skb)
return;
- /* Put the datagram to the queue so that raw_recvmsg() can
- * get it from there. We need to pass the interface index to
- * raw_recvmsg(). We pass a whole struct sockaddr_can in skb->cb
- * containing the interface index.
+ /* Put the datagram to the queue so that raw_recvmsg() can get
+ * it from there. We need to pass the interface index to
+ * raw_recvmsg(). We pass a whole struct sockaddr_can in
+ * skb->cb containing the interface index.
*/
sock_skb_cb_check_size(sizeof(struct sockaddr_can));
addr = (struct sockaddr_can *)skb->cb;
memset(addr, 0, sizeof(*addr));
- addr->can_family = AF_CAN;
+ addr->can_family = AF_CAN;
addr->can_ifindex = skb->dev->ifindex;
/* add CAN specific message flags for raw_recvmsg() */
@@ -290,8 +288,8 @@ static int raw_notifier(struct notifier_block *nb,
kfree(ro->filter);
ro->ifindex = 0;
- ro->bound = 0;
- ro->count = 0;
+ ro->bound = 0;
+ ro->count = 0;
release_sock(sk);
sk->sk_err = ENODEV;
@@ -374,8 +372,8 @@ static int raw_release(struct socket *sock)
kfree(ro->filter);
ro->ifindex = 0;
- ro->bound = 0;
- ro->count = 0;
+ ro->bound = 0;
+ ro->count = 0;
free_percpu(ro->uniq);
sock_orphan(sk);
@@ -773,7 +771,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
skb_setup_tx_timestamp(skb, sk->sk_tsflags);
skb->dev = dev;
- skb->sk = sk;
+ skb->sk = sk;
skb->priority = sk->sk_priority;
err = can_send(skb, ro->loopback);
@@ -801,8 +799,12 @@ static int raw_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
int err = 0;
int noblock;
- noblock = flags & MSG_DONTWAIT;
- flags &= ~MSG_DONTWAIT;
+ noblock = flags & MSG_DONTWAIT;
+ flags &= ~MSG_DONTWAIT;
+
+ if (flags & MSG_ERRQUEUE)
+ return sock_recv_errqueue(sk, msg, size,
+ SOL_CAN_RAW, SCM_CAN_RAW_ERRQUEUE);
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
@@ -881,7 +883,7 @@ static __init int raw_module_init(void)
{
int err;
- pr_info("can: raw protocol (rev " CAN_RAW_VERSION ")\n");
+ pr_info("can: raw protocol\n");
err = can_proto_register(&raw_can_proto);
if (err < 0)
diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
index b988f48153a4..c907f0dc7f87 100644
--- a/net/core/bpf_sk_storage.c
+++ b/net/core/bpf_sk_storage.c
@@ -7,97 +7,13 @@
#include <linux/spinlock.h>
#include <linux/bpf.h>
#include <linux/btf_ids.h>
+#include <linux/bpf_local_storage.h>
#include <net/bpf_sk_storage.h>
#include <net/sock.h>
#include <uapi/linux/sock_diag.h>
#include <uapi/linux/btf.h>
-#define SK_STORAGE_CREATE_FLAG_MASK \
- (BPF_F_NO_PREALLOC | BPF_F_CLONE)
-
-struct bucket {
- struct hlist_head list;
- raw_spinlock_t lock;
-};
-
-/* Thp map is not the primary owner of a bpf_sk_storage_elem.
- * Instead, the sk->sk_bpf_storage is.
- *
- * The map (bpf_sk_storage_map) is for two purposes
- * 1. Define the size of the "sk local storage". It is
- * the map's value_size.
- *
- * 2. Maintain a list to keep track of all elems such
- * that they can be cleaned up during the map destruction.
- *
- * When a bpf local storage is being looked up for a
- * particular sk, the "bpf_map" pointer is actually used
- * as the "key" to search in the list of elem in
- * sk->sk_bpf_storage.
- *
- * Hence, consider sk->sk_bpf_storage is the mini-map
- * with the "bpf_map" pointer as the searching key.
- */
-struct bpf_sk_storage_map {
- struct bpf_map map;
- /* Lookup elem does not require accessing the map.
- *
- * Updating/Deleting requires a bucket lock to
- * link/unlink the elem from the map. Having
- * multiple buckets to improve contention.
- */
- struct bucket *buckets;
- u32 bucket_log;
- u16 elem_size;
- u16 cache_idx;
-};
-
-struct bpf_sk_storage_data {
- /* smap is used as the searching key when looking up
- * from sk->sk_bpf_storage.
- *
- * Put it in the same cacheline as the data to minimize
- * the number of cachelines access during the cache hit case.
- */
- struct bpf_sk_storage_map __rcu *smap;
- u8 data[] __aligned(8);
-};
-
-/* Linked to bpf_sk_storage and bpf_sk_storage_map */
-struct bpf_sk_storage_elem {
- struct hlist_node map_node; /* Linked to bpf_sk_storage_map */
- struct hlist_node snode; /* Linked to bpf_sk_storage */
- struct bpf_sk_storage __rcu *sk_storage;
- struct rcu_head rcu;
- /* 8 bytes hole */
- /* The data is stored in aother cacheline to minimize
- * the number of cachelines access during a cache hit.
- */
- struct bpf_sk_storage_data sdata ____cacheline_aligned;
-};
-
-#define SELEM(_SDATA) container_of((_SDATA), struct bpf_sk_storage_elem, sdata)
-#define SDATA(_SELEM) (&(_SELEM)->sdata)
-#define BPF_SK_STORAGE_CACHE_SIZE 16
-
-static DEFINE_SPINLOCK(cache_idx_lock);
-static u64 cache_idx_usage_counts[BPF_SK_STORAGE_CACHE_SIZE];
-
-struct bpf_sk_storage {
- struct bpf_sk_storage_data __rcu *cache[BPF_SK_STORAGE_CACHE_SIZE];
- struct hlist_head list; /* List of bpf_sk_storage_elem */
- struct sock *sk; /* The sk that owns the the above "list" of
- * bpf_sk_storage_elem.
- */
- struct rcu_head rcu;
- raw_spinlock_t lock; /* Protect adding/removing from the "list" */
-};
-
-static struct bucket *select_bucket(struct bpf_sk_storage_map *smap,
- struct bpf_sk_storage_elem *selem)
-{
- return &smap->buckets[hash_ptr(selem, smap->bucket_log)];
-}
+DEFINE_BPF_STORAGE_CACHE(sk_cache);
static int omem_charge(struct sock *sk, unsigned int size)
{
@@ -111,445 +27,38 @@ static int omem_charge(struct sock *sk, unsigned int size)
return -ENOMEM;
}
-static bool selem_linked_to_sk(const struct bpf_sk_storage_elem *selem)
-{
- return !hlist_unhashed(&selem->snode);
-}
-
-static bool selem_linked_to_map(const struct bpf_sk_storage_elem *selem)
-{
- return !hlist_unhashed(&selem->map_node);
-}
-
-static struct bpf_sk_storage_elem *selem_alloc(struct bpf_sk_storage_map *smap,
- struct sock *sk, void *value,
- bool charge_omem)
-{
- struct bpf_sk_storage_elem *selem;
-
- if (charge_omem && omem_charge(sk, smap->elem_size))
- return NULL;
-
- selem = kzalloc(smap->elem_size, GFP_ATOMIC | __GFP_NOWARN);
- if (selem) {
- if (value)
- memcpy(SDATA(selem)->data, value, smap->map.value_size);
- return selem;
- }
-
- if (charge_omem)
- atomic_sub(smap->elem_size, &sk->sk_omem_alloc);
-
- return NULL;
-}
-
-/* sk_storage->lock must be held and selem->sk_storage == sk_storage.
- * The caller must ensure selem->smap is still valid to be
- * dereferenced for its smap->elem_size and smap->cache_idx.
- */
-static bool __selem_unlink_sk(struct bpf_sk_storage *sk_storage,
- struct bpf_sk_storage_elem *selem,
- bool uncharge_omem)
-{
- struct bpf_sk_storage_map *smap;
- bool free_sk_storage;
- struct sock *sk;
-
- smap = rcu_dereference(SDATA(selem)->smap);
- sk = sk_storage->sk;
-
- /* All uncharging on sk->sk_omem_alloc must be done first.
- * sk may be freed once the last selem is unlinked from sk_storage.
- */
- if (uncharge_omem)
- atomic_sub(smap->elem_size, &sk->sk_omem_alloc);
-
- free_sk_storage = hlist_is_singular_node(&selem->snode,
- &sk_storage->list);
- if (free_sk_storage) {
- atomic_sub(sizeof(struct bpf_sk_storage), &sk->sk_omem_alloc);
- sk_storage->sk = NULL;
- /* After this RCU_INIT, sk may be freed and cannot be used */
- RCU_INIT_POINTER(sk->sk_bpf_storage, NULL);
-
- /* sk_storage is not freed now. sk_storage->lock is
- * still held and raw_spin_unlock_bh(&sk_storage->lock)
- * will be done by the caller.
- *
- * Although the unlock will be done under
- * rcu_read_lock(), it is more intutivie to
- * read if kfree_rcu(sk_storage, rcu) is done
- * after the raw_spin_unlock_bh(&sk_storage->lock).
- *
- * Hence, a "bool free_sk_storage" is returned
- * to the caller which then calls the kfree_rcu()
- * after unlock.
- */
- }
- hlist_del_init_rcu(&selem->snode);
- if (rcu_access_pointer(sk_storage->cache[smap->cache_idx]) ==
- SDATA(selem))
- RCU_INIT_POINTER(sk_storage->cache[smap->cache_idx], NULL);
-
- kfree_rcu(selem, rcu);
-
- return free_sk_storage;
-}
-
-static void selem_unlink_sk(struct bpf_sk_storage_elem *selem)
-{
- struct bpf_sk_storage *sk_storage;
- bool free_sk_storage = false;
-
- if (unlikely(!selem_linked_to_sk(selem)))
- /* selem has already been unlinked from sk */
- return;
-
- sk_storage = rcu_dereference(selem->sk_storage);
- raw_spin_lock_bh(&sk_storage->lock);
- if (likely(selem_linked_to_sk(selem)))
- free_sk_storage = __selem_unlink_sk(sk_storage, selem, true);
- raw_spin_unlock_bh(&sk_storage->lock);
-
- if (free_sk_storage)
- kfree_rcu(sk_storage, rcu);
-}
-
-static void __selem_link_sk(struct bpf_sk_storage *sk_storage,
- struct bpf_sk_storage_elem *selem)
-{
- RCU_INIT_POINTER(selem->sk_storage, sk_storage);
- hlist_add_head(&selem->snode, &sk_storage->list);
-}
-
-static void selem_unlink_map(struct bpf_sk_storage_elem *selem)
-{
- struct bpf_sk_storage_map *smap;
- struct bucket *b;
-
- if (unlikely(!selem_linked_to_map(selem)))
- /* selem has already be unlinked from smap */
- return;
-
- smap = rcu_dereference(SDATA(selem)->smap);
- b = select_bucket(smap, selem);
- raw_spin_lock_bh(&b->lock);
- if (likely(selem_linked_to_map(selem)))
- hlist_del_init_rcu(&selem->map_node);
- raw_spin_unlock_bh(&b->lock);
-}
-
-static void selem_link_map(struct bpf_sk_storage_map *smap,
- struct bpf_sk_storage_elem *selem)
-{
- struct bucket *b = select_bucket(smap, selem);
-
- raw_spin_lock_bh(&b->lock);
- RCU_INIT_POINTER(SDATA(selem)->smap, smap);
- hlist_add_head_rcu(&selem->map_node, &b->list);
- raw_spin_unlock_bh(&b->lock);
-}
-
-static void selem_unlink(struct bpf_sk_storage_elem *selem)
-{
- /* Always unlink from map before unlinking from sk_storage
- * because selem will be freed after successfully unlinked from
- * the sk_storage.
- */
- selem_unlink_map(selem);
- selem_unlink_sk(selem);
-}
-
-static struct bpf_sk_storage_data *
-__sk_storage_lookup(struct bpf_sk_storage *sk_storage,
- struct bpf_sk_storage_map *smap,
- bool cacheit_lockit)
-{
- struct bpf_sk_storage_data *sdata;
- struct bpf_sk_storage_elem *selem;
-
- /* Fast path (cache hit) */
- sdata = rcu_dereference(sk_storage->cache[smap->cache_idx]);
- if (sdata && rcu_access_pointer(sdata->smap) == smap)
- return sdata;
-
- /* Slow path (cache miss) */
- hlist_for_each_entry_rcu(selem, &sk_storage->list, snode)
- if (rcu_access_pointer(SDATA(selem)->smap) == smap)
- break;
-
- if (!selem)
- return NULL;
-
- sdata = SDATA(selem);
- if (cacheit_lockit) {
- /* spinlock is needed to avoid racing with the
- * parallel delete. Otherwise, publishing an already
- * deleted sdata to the cache will become a use-after-free
- * problem in the next __sk_storage_lookup().
- */
- raw_spin_lock_bh(&sk_storage->lock);
- if (selem_linked_to_sk(selem))
- rcu_assign_pointer(sk_storage->cache[smap->cache_idx],
- sdata);
- raw_spin_unlock_bh(&sk_storage->lock);
- }
-
- return sdata;
-}
-
-static struct bpf_sk_storage_data *
+static struct bpf_local_storage_data *
sk_storage_lookup(struct sock *sk, struct bpf_map *map, bool cacheit_lockit)
{
- struct bpf_sk_storage *sk_storage;
- struct bpf_sk_storage_map *smap;
+ struct bpf_local_storage *sk_storage;
+ struct bpf_local_storage_map *smap;
sk_storage = rcu_dereference(sk->sk_bpf_storage);
if (!sk_storage)
return NULL;
- smap = (struct bpf_sk_storage_map *)map;
- return __sk_storage_lookup(sk_storage, smap, cacheit_lockit);
-}
-
-static int check_flags(const struct bpf_sk_storage_data *old_sdata,
- u64 map_flags)
-{
- if (old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
- /* elem already exists */
- return -EEXIST;
-
- if (!old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
- /* elem doesn't exist, cannot update it */
- return -ENOENT;
-
- return 0;
-}
-
-static int sk_storage_alloc(struct sock *sk,
- struct bpf_sk_storage_map *smap,
- struct bpf_sk_storage_elem *first_selem)
-{
- struct bpf_sk_storage *prev_sk_storage, *sk_storage;
- int err;
-
- err = omem_charge(sk, sizeof(*sk_storage));
- if (err)
- return err;
-
- sk_storage = kzalloc(sizeof(*sk_storage), GFP_ATOMIC | __GFP_NOWARN);
- if (!sk_storage) {
- err = -ENOMEM;
- goto uncharge;
- }
- INIT_HLIST_HEAD(&sk_storage->list);
- raw_spin_lock_init(&sk_storage->lock);
- sk_storage->sk = sk;
-
- __selem_link_sk(sk_storage, first_selem);
- selem_link_map(smap, first_selem);
- /* Publish sk_storage to sk. sk->sk_lock cannot be acquired.
- * Hence, atomic ops is used to set sk->sk_bpf_storage
- * from NULL to the newly allocated sk_storage ptr.
- *
- * From now on, the sk->sk_bpf_storage pointer is protected
- * by the sk_storage->lock. Hence, when freeing
- * the sk->sk_bpf_storage, the sk_storage->lock must
- * be held before setting sk->sk_bpf_storage to NULL.
- */
- prev_sk_storage = cmpxchg((struct bpf_sk_storage **)&sk->sk_bpf_storage,
- NULL, sk_storage);
- if (unlikely(prev_sk_storage)) {
- selem_unlink_map(first_selem);
- err = -EAGAIN;
- goto uncharge;
-
- /* Note that even first_selem was linked to smap's
- * bucket->list, first_selem can be freed immediately
- * (instead of kfree_rcu) because
- * bpf_sk_storage_map_free() does a
- * synchronize_rcu() before walking the bucket->list.
- * Hence, no one is accessing selem from the
- * bucket->list under rcu_read_lock().
- */
- }
-
- return 0;
-
-uncharge:
- kfree(sk_storage);
- atomic_sub(sizeof(*sk_storage), &sk->sk_omem_alloc);
- return err;
-}
-
-/* sk cannot be going away because it is linking new elem
- * to sk->sk_bpf_storage. (i.e. sk->sk_refcnt cannot be 0).
- * Otherwise, it will become a leak (and other memory issues
- * during map destruction).
- */
-static struct bpf_sk_storage_data *sk_storage_update(struct sock *sk,
- struct bpf_map *map,
- void *value,
- u64 map_flags)
-{
- struct bpf_sk_storage_data *old_sdata = NULL;
- struct bpf_sk_storage_elem *selem;
- struct bpf_sk_storage *sk_storage;
- struct bpf_sk_storage_map *smap;
- int err;
-
- /* BPF_EXIST and BPF_NOEXIST cannot be both set */
- if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST) ||
- /* BPF_F_LOCK can only be used in a value with spin_lock */
- unlikely((map_flags & BPF_F_LOCK) && !map_value_has_spin_lock(map)))
- return ERR_PTR(-EINVAL);
-
- smap = (struct bpf_sk_storage_map *)map;
- sk_storage = rcu_dereference(sk->sk_bpf_storage);
- if (!sk_storage || hlist_empty(&sk_storage->list)) {
- /* Very first elem for this sk */
- err = check_flags(NULL, map_flags);
- if (err)
- return ERR_PTR(err);
-
- selem = selem_alloc(smap, sk, value, true);
- if (!selem)
- return ERR_PTR(-ENOMEM);
-
- err = sk_storage_alloc(sk, smap, selem);
- if (err) {
- kfree(selem);
- atomic_sub(smap->elem_size, &sk->sk_omem_alloc);
- return ERR_PTR(err);
- }
-
- return SDATA(selem);
- }
-
- if ((map_flags & BPF_F_LOCK) && !(map_flags & BPF_NOEXIST)) {
- /* Hoping to find an old_sdata to do inline update
- * such that it can avoid taking the sk_storage->lock
- * and changing the lists.
- */
- old_sdata = __sk_storage_lookup(sk_storage, smap, false);
- err = check_flags(old_sdata, map_flags);
- if (err)
- return ERR_PTR(err);
- if (old_sdata && selem_linked_to_sk(SELEM(old_sdata))) {
- copy_map_value_locked(map, old_sdata->data,
- value, false);
- return old_sdata;
- }
- }
-
- raw_spin_lock_bh(&sk_storage->lock);
-
- /* Recheck sk_storage->list under sk_storage->lock */
- if (unlikely(hlist_empty(&sk_storage->list))) {
- /* A parallel del is happening and sk_storage is going
- * away. It has just been checked before, so very
- * unlikely. Return instead of retry to keep things
- * simple.
- */
- err = -EAGAIN;
- goto unlock_err;
- }
-
- old_sdata = __sk_storage_lookup(sk_storage, smap, false);
- err = check_flags(old_sdata, map_flags);
- if (err)
- goto unlock_err;
-
- if (old_sdata && (map_flags & BPF_F_LOCK)) {
- copy_map_value_locked(map, old_sdata->data, value, false);
- selem = SELEM(old_sdata);
- goto unlock;
- }
-
- /* sk_storage->lock is held. Hence, we are sure
- * we can unlink and uncharge the old_sdata successfully
- * later. Hence, instead of charging the new selem now
- * and then uncharge the old selem later (which may cause
- * a potential but unnecessary charge failure), avoid taking
- * a charge at all here (the "!old_sdata" check) and the
- * old_sdata will not be uncharged later during __selem_unlink_sk().
- */
- selem = selem_alloc(smap, sk, value, !old_sdata);
- if (!selem) {
- err = -ENOMEM;
- goto unlock_err;
- }
-
- /* First, link the new selem to the map */
- selem_link_map(smap, selem);
-
- /* Second, link (and publish) the new selem to sk_storage */
- __selem_link_sk(sk_storage, selem);
-
- /* Third, remove old selem, SELEM(old_sdata) */
- if (old_sdata) {
- selem_unlink_map(SELEM(old_sdata));
- __selem_unlink_sk(sk_storage, SELEM(old_sdata), false);
- }
-
-unlock:
- raw_spin_unlock_bh(&sk_storage->lock);
- return SDATA(selem);
-
-unlock_err:
- raw_spin_unlock_bh(&sk_storage->lock);
- return ERR_PTR(err);
+ smap = (struct bpf_local_storage_map *)map;
+ return bpf_local_storage_lookup(sk_storage, smap, cacheit_lockit);
}
static int sk_storage_delete(struct sock *sk, struct bpf_map *map)
{
- struct bpf_sk_storage_data *sdata;
+ struct bpf_local_storage_data *sdata;
sdata = sk_storage_lookup(sk, map, false);
if (!sdata)
return -ENOENT;
- selem_unlink(SELEM(sdata));
+ bpf_selem_unlink(SELEM(sdata));
return 0;
}
-static u16 cache_idx_get(void)
-{
- u64 min_usage = U64_MAX;
- u16 i, res = 0;
-
- spin_lock(&cache_idx_lock);
-
- for (i = 0; i < BPF_SK_STORAGE_CACHE_SIZE; i++) {
- if (cache_idx_usage_counts[i] < min_usage) {
- min_usage = cache_idx_usage_counts[i];
- res = i;
-
- /* Found a free cache_idx */
- if (!min_usage)
- break;
- }
- }
- cache_idx_usage_counts[res]++;
-
- spin_unlock(&cache_idx_lock);
-
- return res;
-}
-
-static void cache_idx_free(u16 idx)
-{
- spin_lock(&cache_idx_lock);
- cache_idx_usage_counts[idx]--;
- spin_unlock(&cache_idx_lock);
-}
-
/* Called by __sk_destruct() & bpf_sk_storage_clone() */
void bpf_sk_storage_free(struct sock *sk)
{
- struct bpf_sk_storage_elem *selem;
- struct bpf_sk_storage *sk_storage;
+ struct bpf_local_storage_elem *selem;
+ struct bpf_local_storage *sk_storage;
bool free_sk_storage = false;
struct hlist_node *n;
@@ -565,7 +74,7 @@ void bpf_sk_storage_free(struct sock *sk)
* Thus, no elem can be added-to or deleted-from the
* sk_storage->list by the bpf_prog or by the bpf-map's syscall.
*
- * It is racing with bpf_sk_storage_map_free() alone
+ * It is racing with bpf_local_storage_map_free() alone
* when unlinking elem from the sk_storage->list and
* the map's bucket->list.
*/
@@ -574,8 +83,9 @@ void bpf_sk_storage_free(struct sock *sk)
/* Always unlink from map before unlinking from
* sk_storage.
*/
- selem_unlink_map(selem);
- free_sk_storage = __selem_unlink_sk(sk_storage, selem, true);
+ bpf_selem_unlink_map(selem);
+ free_sk_storage = bpf_selem_unlink_storage_nolock(sk_storage,
+ selem, true);
}
raw_spin_unlock_bh(&sk_storage->lock);
rcu_read_unlock();
@@ -584,132 +94,24 @@ void bpf_sk_storage_free(struct sock *sk)
kfree_rcu(sk_storage, rcu);
}
-static void bpf_sk_storage_map_free(struct bpf_map *map)
-{
- struct bpf_sk_storage_elem *selem;
- struct bpf_sk_storage_map *smap;
- struct bucket *b;
- unsigned int i;
-
- smap = (struct bpf_sk_storage_map *)map;
-
- cache_idx_free(smap->cache_idx);
-
- /* Note that this map might be concurrently cloned from
- * bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone
- * RCU read section to finish before proceeding. New RCU
- * read sections should be prevented via bpf_map_inc_not_zero.
- */
- synchronize_rcu();
-
- /* bpf prog and the userspace can no longer access this map
- * now. No new selem (of this map) can be added
- * to the sk->sk_bpf_storage or to the map bucket's list.
- *
- * The elem of this map can be cleaned up here
- * or
- * by bpf_sk_storage_free() during __sk_destruct().
- */
- for (i = 0; i < (1U << smap->bucket_log); i++) {
- b = &smap->buckets[i];
-
- rcu_read_lock();
- /* No one is adding to b->list now */
- while ((selem = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(&b->list)),
- struct bpf_sk_storage_elem,
- map_node))) {
- selem_unlink(selem);
- cond_resched_rcu();
- }
- rcu_read_unlock();
- }
-
- /* bpf_sk_storage_free() may still need to access the map.
- * e.g. bpf_sk_storage_free() has unlinked selem from the map
- * which then made the above while((selem = ...)) loop
- * exited immediately.
- *
- * However, the bpf_sk_storage_free() still needs to access
- * the smap->elem_size to do the uncharging in
- * __selem_unlink_sk().
- *
- * Hence, wait another rcu grace period for the
- * bpf_sk_storage_free() to finish.
- */
- synchronize_rcu();
-
- kvfree(smap->buckets);
- kfree(map);
-}
-
-/* U16_MAX is much more than enough for sk local storage
- * considering a tcp_sock is ~2k.
- */
-#define MAX_VALUE_SIZE \
- min_t(u32, \
- (KMALLOC_MAX_SIZE - MAX_BPF_STACK - sizeof(struct bpf_sk_storage_elem)), \
- (U16_MAX - sizeof(struct bpf_sk_storage_elem)))
-
-static int bpf_sk_storage_map_alloc_check(union bpf_attr *attr)
+static void sk_storage_map_free(struct bpf_map *map)
{
- if (attr->map_flags & ~SK_STORAGE_CREATE_FLAG_MASK ||
- !(attr->map_flags & BPF_F_NO_PREALLOC) ||
- attr->max_entries ||
- attr->key_size != sizeof(int) || !attr->value_size ||
- /* Enforce BTF for userspace sk dumping */
- !attr->btf_key_type_id || !attr->btf_value_type_id)
- return -EINVAL;
-
- if (!bpf_capable())
- return -EPERM;
-
- if (attr->value_size > MAX_VALUE_SIZE)
- return -E2BIG;
+ struct bpf_local_storage_map *smap;
- return 0;
+ smap = (struct bpf_local_storage_map *)map;
+ bpf_local_storage_cache_idx_free(&sk_cache, smap->cache_idx);
+ bpf_local_storage_map_free(smap);
}
-static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
+static struct bpf_map *sk_storage_map_alloc(union bpf_attr *attr)
{
- struct bpf_sk_storage_map *smap;
- unsigned int i;
- u32 nbuckets;
- u64 cost;
- int ret;
-
- smap = kzalloc(sizeof(*smap), GFP_USER | __GFP_NOWARN);
- if (!smap)
- return ERR_PTR(-ENOMEM);
- bpf_map_init_from_attr(&smap->map, attr);
-
- nbuckets = roundup_pow_of_two(num_possible_cpus());
- /* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
- nbuckets = max_t(u32, 2, nbuckets);
- smap->bucket_log = ilog2(nbuckets);
- cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap);
-
- ret = bpf_map_charge_init(&smap->map.memory, cost);
- if (ret < 0) {
- kfree(smap);
- return ERR_PTR(ret);
- }
+ struct bpf_local_storage_map *smap;
- smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets,
- GFP_USER | __GFP_NOWARN);
- if (!smap->buckets) {
- bpf_map_charge_finish(&smap->map.memory);
- kfree(smap);
- return ERR_PTR(-ENOMEM);
- }
-
- for (i = 0; i < nbuckets; i++) {
- INIT_HLIST_HEAD(&smap->buckets[i].list);
- raw_spin_lock_init(&smap->buckets[i].lock);
- }
-
- smap->elem_size = sizeof(struct bpf_sk_storage_elem) + attr->value_size;
- smap->cache_idx = cache_idx_get();
+ smap = bpf_local_storage_map_alloc(attr);
+ if (IS_ERR(smap))
+ return ERR_CAST(smap);
+ smap->cache_idx = bpf_local_storage_cache_idx_get(&sk_cache);
return &smap->map;
}
@@ -719,26 +121,9 @@ static int notsupp_get_next_key(struct bpf_map *map, void *key,
return -ENOTSUPP;
}
-static int bpf_sk_storage_map_check_btf(const struct bpf_map *map,
- const struct btf *btf,
- const struct btf_type *key_type,
- const struct btf_type *value_type)
-{
- u32 int_data;
-
- if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
- return -EINVAL;
-
- int_data = *(u32 *)(key_type + 1);
- if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
- return -EINVAL;
-
- return 0;
-}
-
static void *bpf_fd_sk_storage_lookup_elem(struct bpf_map *map, void *key)
{
- struct bpf_sk_storage_data *sdata;
+ struct bpf_local_storage_data *sdata;
struct socket *sock;
int fd, err;
@@ -756,14 +141,16 @@ static void *bpf_fd_sk_storage_lookup_elem(struct bpf_map *map, void *key)
static int bpf_fd_sk_storage_update_elem(struct bpf_map *map, void *key,
void *value, u64 map_flags)
{
- struct bpf_sk_storage_data *sdata;
+ struct bpf_local_storage_data *sdata;
struct socket *sock;
int fd, err;
fd = *(int *)key;
sock = sockfd_lookup(fd, &err);
if (sock) {
- sdata = sk_storage_update(sock->sk, map, value, map_flags);
+ sdata = bpf_local_storage_update(
+ sock->sk, (struct bpf_local_storage_map *)map, value,
+ map_flags);
sockfd_put(sock);
return PTR_ERR_OR_ZERO(sdata);
}
@@ -787,14 +174,14 @@ static int bpf_fd_sk_storage_delete_elem(struct bpf_map *map, void *key)
return err;
}
-static struct bpf_sk_storage_elem *
+static struct bpf_local_storage_elem *
bpf_sk_storage_clone_elem(struct sock *newsk,
- struct bpf_sk_storage_map *smap,
- struct bpf_sk_storage_elem *selem)
+ struct bpf_local_storage_map *smap,
+ struct bpf_local_storage_elem *selem)
{
- struct bpf_sk_storage_elem *copy_selem;
+ struct bpf_local_storage_elem *copy_selem;
- copy_selem = selem_alloc(smap, newsk, NULL, true);
+ copy_selem = bpf_selem_alloc(smap, newsk, NULL, true);
if (!copy_selem)
return NULL;
@@ -810,9 +197,9 @@ bpf_sk_storage_clone_elem(struct sock *newsk,
int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk)
{
- struct bpf_sk_storage *new_sk_storage = NULL;
- struct bpf_sk_storage *sk_storage;
- struct bpf_sk_storage_elem *selem;
+ struct bpf_local_storage *new_sk_storage = NULL;
+ struct bpf_local_storage *sk_storage;
+ struct bpf_local_storage_elem *selem;
int ret = 0;
RCU_INIT_POINTER(newsk->sk_bpf_storage, NULL);
@@ -824,8 +211,8 @@ int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk)
goto out;
hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) {
- struct bpf_sk_storage_elem *copy_selem;
- struct bpf_sk_storage_map *smap;
+ struct bpf_local_storage_elem *copy_selem;
+ struct bpf_local_storage_map *smap;
struct bpf_map *map;
smap = rcu_dereference(SDATA(selem)->smap);
@@ -833,7 +220,7 @@ int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk)
continue;
/* Note that for lockless listeners adding new element
- * here can race with cleanup in bpf_sk_storage_map_free.
+ * here can race with cleanup in bpf_local_storage_map_free.
* Try to grab map refcnt to make sure that it's still
* alive and prevent concurrent removal.
*/
@@ -849,10 +236,10 @@ int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk)
}
if (new_sk_storage) {
- selem_link_map(smap, copy_selem);
- __selem_link_sk(new_sk_storage, copy_selem);
+ bpf_selem_link_map(smap, copy_selem);
+ bpf_selem_link_storage_nolock(new_sk_storage, copy_selem);
} else {
- ret = sk_storage_alloc(newsk, smap, copy_selem);
+ ret = bpf_local_storage_alloc(newsk, smap, copy_selem);
if (ret) {
kfree(copy_selem);
atomic_sub(smap->elem_size,
@@ -861,7 +248,8 @@ int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk)
goto out;
}
- new_sk_storage = rcu_dereference(copy_selem->sk_storage);
+ new_sk_storage =
+ rcu_dereference(copy_selem->local_storage);
}
bpf_map_put(map);
}
@@ -879,9 +267,9 @@ out:
BPF_CALL_4(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk,
void *, value, u64, flags)
{
- struct bpf_sk_storage_data *sdata;
+ struct bpf_local_storage_data *sdata;
- if (flags > BPF_SK_STORAGE_GET_F_CREATE)
+ if (!sk || !sk_fullsock(sk) || flags > BPF_SK_STORAGE_GET_F_CREATE)
return (unsigned long)NULL;
sdata = sk_storage_lookup(sk, map, true);
@@ -895,7 +283,9 @@ BPF_CALL_4(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk,
* destruction).
*/
refcount_inc_not_zero(&sk->sk_refcnt)) {
- sdata = sk_storage_update(sk, map, value, BPF_NOEXIST);
+ sdata = bpf_local_storage_update(
+ sk, (struct bpf_local_storage_map *)map, value,
+ BPF_NOEXIST);
/* sk must be a fullsock (guaranteed by verifier),
* so sock_gen_put() is unnecessary.
*/
@@ -909,6 +299,9 @@ BPF_CALL_4(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk,
BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk)
{
+ if (!sk || !sk_fullsock(sk))
+ return -EINVAL;
+
if (refcount_inc_not_zero(&sk->sk_refcnt)) {
int err;
@@ -920,18 +313,44 @@ BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk)
return -ENOENT;
}
+static int sk_storage_charge(struct bpf_local_storage_map *smap,
+ void *owner, u32 size)
+{
+ return omem_charge(owner, size);
+}
+
+static void sk_storage_uncharge(struct bpf_local_storage_map *smap,
+ void *owner, u32 size)
+{
+ struct sock *sk = owner;
+
+ atomic_sub(size, &sk->sk_omem_alloc);
+}
+
+static struct bpf_local_storage __rcu **
+sk_storage_ptr(void *owner)
+{
+ struct sock *sk = owner;
+
+ return &sk->sk_bpf_storage;
+}
+
static int sk_storage_map_btf_id;
const struct bpf_map_ops sk_storage_map_ops = {
- .map_alloc_check = bpf_sk_storage_map_alloc_check,
- .map_alloc = bpf_sk_storage_map_alloc,
- .map_free = bpf_sk_storage_map_free,
+ .map_meta_equal = bpf_map_meta_equal,
+ .map_alloc_check = bpf_local_storage_map_alloc_check,
+ .map_alloc = sk_storage_map_alloc,
+ .map_free = sk_storage_map_free,
.map_get_next_key = notsupp_get_next_key,
.map_lookup_elem = bpf_fd_sk_storage_lookup_elem,
.map_update_elem = bpf_fd_sk_storage_update_elem,
.map_delete_elem = bpf_fd_sk_storage_delete_elem,
- .map_check_btf = bpf_sk_storage_map_check_btf,
- .map_btf_name = "bpf_sk_storage_map",
+ .map_check_btf = bpf_local_storage_map_check_btf,
+ .map_btf_name = "bpf_local_storage_map",
.map_btf_id = &sk_storage_map_btf_id,
+ .map_local_storage_charge = sk_storage_charge,
+ .map_local_storage_uncharge = sk_storage_uncharge,
+ .map_owner_storage_ptr = sk_storage_ptr,
};
const struct bpf_func_proto bpf_sk_storage_get_proto = {
@@ -939,7 +358,7 @@ const struct bpf_func_proto bpf_sk_storage_get_proto = {
.gpl_only = false,
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
.arg1_type = ARG_CONST_MAP_PTR,
- .arg2_type = ARG_PTR_TO_SOCKET,
+ .arg2_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
.arg4_type = ARG_ANYTHING,
};
@@ -959,7 +378,7 @@ const struct bpf_func_proto bpf_sk_storage_delete_proto = {
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_CONST_MAP_PTR,
- .arg2_type = ARG_PTR_TO_SOCKET,
+ .arg2_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
};
struct bpf_sk_storage_diag {
@@ -1022,7 +441,7 @@ bpf_sk_storage_diag_alloc(const struct nlattr *nla_stgs)
u32 nr_maps = 0;
int rem, err;
- /* bpf_sk_storage_map is currently limited to CAP_SYS_ADMIN as
+ /* bpf_local_storage_map is currently limited to CAP_SYS_ADMIN as
* the map_alloc_check() side also does.
*/
if (!bpf_capable())
@@ -1072,13 +491,13 @@ err_free:
}
EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_alloc);
-static int diag_get(struct bpf_sk_storage_data *sdata, struct sk_buff *skb)
+static int diag_get(struct bpf_local_storage_data *sdata, struct sk_buff *skb)
{
struct nlattr *nla_stg, *nla_value;
- struct bpf_sk_storage_map *smap;
+ struct bpf_local_storage_map *smap;
/* It cannot exceed max nlattr's payload */
- BUILD_BUG_ON(U16_MAX - NLA_HDRLEN < MAX_VALUE_SIZE);
+ BUILD_BUG_ON(U16_MAX - NLA_HDRLEN < BPF_LOCAL_STORAGE_MAX_VALUE_SIZE);
nla_stg = nla_nest_start(skb, SK_DIAG_BPF_STORAGE);
if (!nla_stg)
@@ -1114,9 +533,9 @@ static int bpf_sk_storage_diag_put_all(struct sock *sk, struct sk_buff *skb,
{
/* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */
unsigned int diag_size = nla_total_size(0);
- struct bpf_sk_storage *sk_storage;
- struct bpf_sk_storage_elem *selem;
- struct bpf_sk_storage_map *smap;
+ struct bpf_local_storage *sk_storage;
+ struct bpf_local_storage_elem *selem;
+ struct bpf_local_storage_map *smap;
struct nlattr *nla_stgs;
unsigned int saved_len;
int err = 0;
@@ -1169,8 +588,8 @@ int bpf_sk_storage_diag_put(struct bpf_sk_storage_diag *diag,
{
/* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */
unsigned int diag_size = nla_total_size(0);
- struct bpf_sk_storage *sk_storage;
- struct bpf_sk_storage_data *sdata;
+ struct bpf_local_storage *sk_storage;
+ struct bpf_local_storage_data *sdata;
struct nlattr *nla_stgs;
unsigned int saved_len;
int err = 0;
@@ -1197,8 +616,8 @@ int bpf_sk_storage_diag_put(struct bpf_sk_storage_diag *diag,
saved_len = skb->len;
for (i = 0; i < diag->nr_maps; i++) {
- sdata = __sk_storage_lookup(sk_storage,
- (struct bpf_sk_storage_map *)diag->maps[i],
+ sdata = bpf_local_storage_lookup(sk_storage,
+ (struct bpf_local_storage_map *)diag->maps[i],
false);
if (!sdata)
@@ -1235,19 +654,20 @@ struct bpf_iter_seq_sk_storage_map_info {
unsigned skip_elems;
};
-static struct bpf_sk_storage_elem *
+static struct bpf_local_storage_elem *
bpf_sk_storage_map_seq_find_next(struct bpf_iter_seq_sk_storage_map_info *info,
- struct bpf_sk_storage_elem *prev_selem)
+ struct bpf_local_storage_elem *prev_selem)
+ __acquires(RCU) __releases(RCU)
{
- struct bpf_sk_storage *sk_storage;
- struct bpf_sk_storage_elem *selem;
+ struct bpf_local_storage *sk_storage;
+ struct bpf_local_storage_elem *selem;
u32 skip_elems = info->skip_elems;
- struct bpf_sk_storage_map *smap;
+ struct bpf_local_storage_map *smap;
u32 bucket_id = info->bucket_id;
u32 i, count, n_buckets;
- struct bucket *b;
+ struct bpf_local_storage_map_bucket *b;
- smap = (struct bpf_sk_storage_map *)info->map;
+ smap = (struct bpf_local_storage_map *)info->map;
n_buckets = 1U << smap->bucket_log;
if (bucket_id >= n_buckets)
return NULL;
@@ -1256,16 +676,16 @@ bpf_sk_storage_map_seq_find_next(struct bpf_iter_seq_sk_storage_map_info *info,
selem = prev_selem;
count = 0;
while (selem) {
- selem = hlist_entry_safe(selem->map_node.next,
- struct bpf_sk_storage_elem, map_node);
+ selem = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&selem->map_node)),
+ struct bpf_local_storage_elem, map_node);
if (!selem) {
/* not found, unlock and go to the next bucket */
b = &smap->buckets[bucket_id++];
- raw_spin_unlock_bh(&b->lock);
+ rcu_read_unlock();
skip_elems = 0;
break;
}
- sk_storage = rcu_dereference_raw(selem->sk_storage);
+ sk_storage = rcu_dereference(selem->local_storage);
if (sk_storage) {
info->skip_elems = skip_elems + count;
return selem;
@@ -1275,10 +695,10 @@ bpf_sk_storage_map_seq_find_next(struct bpf_iter_seq_sk_storage_map_info *info,
for (i = bucket_id; i < (1U << smap->bucket_log); i++) {
b = &smap->buckets[i];
- raw_spin_lock_bh(&b->lock);
+ rcu_read_lock();
count = 0;
- hlist_for_each_entry(selem, &b->list, map_node) {
- sk_storage = rcu_dereference_raw(selem->sk_storage);
+ hlist_for_each_entry_rcu(selem, &b->list, map_node) {
+ sk_storage = rcu_dereference(selem->local_storage);
if (sk_storage && count >= skip_elems) {
info->bucket_id = i;
info->skip_elems = count;
@@ -1286,7 +706,7 @@ bpf_sk_storage_map_seq_find_next(struct bpf_iter_seq_sk_storage_map_info *info,
}
count++;
}
- raw_spin_unlock_bh(&b->lock);
+ rcu_read_unlock();
skip_elems = 0;
}
@@ -1297,7 +717,7 @@ bpf_sk_storage_map_seq_find_next(struct bpf_iter_seq_sk_storage_map_info *info,
static void *bpf_sk_storage_map_seq_start(struct seq_file *seq, loff_t *pos)
{
- struct bpf_sk_storage_elem *selem;
+ struct bpf_local_storage_elem *selem;
selem = bpf_sk_storage_map_seq_find_next(seq->private, NULL);
if (!selem)
@@ -1330,11 +750,11 @@ DEFINE_BPF_ITER_FUNC(bpf_sk_storage_map, struct bpf_iter_meta *meta,
void *value)
static int __bpf_sk_storage_map_seq_show(struct seq_file *seq,
- struct bpf_sk_storage_elem *selem)
+ struct bpf_local_storage_elem *selem)
{
struct bpf_iter_seq_sk_storage_map_info *info = seq->private;
struct bpf_iter__bpf_sk_storage_map ctx = {};
- struct bpf_sk_storage *sk_storage;
+ struct bpf_local_storage *sk_storage;
struct bpf_iter_meta meta;
struct bpf_prog *prog;
int ret = 0;
@@ -1345,8 +765,8 @@ static int __bpf_sk_storage_map_seq_show(struct seq_file *seq,
ctx.meta = &meta;
ctx.map = info->map;
if (selem) {
- sk_storage = rcu_dereference_raw(selem->sk_storage);
- ctx.sk = sk_storage->sk;
+ sk_storage = rcu_dereference(selem->local_storage);
+ ctx.sk = sk_storage->owner;
ctx.value = SDATA(selem)->data;
}
ret = bpf_iter_run_prog(prog, &ctx);
@@ -1361,18 +781,12 @@ static int bpf_sk_storage_map_seq_show(struct seq_file *seq, void *v)
}
static void bpf_sk_storage_map_seq_stop(struct seq_file *seq, void *v)
+ __releases(RCU)
{
- struct bpf_iter_seq_sk_storage_map_info *info = seq->private;
- struct bpf_sk_storage_map *smap;
- struct bucket *b;
-
- if (!v) {
+ if (!v)
(void)__bpf_sk_storage_map_seq_show(seq, v);
- } else {
- smap = (struct bpf_sk_storage_map *)info->map;
- b = &smap->buckets[info->bucket_id];
- raw_spin_unlock_bh(&b->lock);
- }
+ else
+ rcu_read_unlock();
}
static int bpf_iter_init_sk_storage_map(void *priv_data,
@@ -1437,6 +851,8 @@ static struct bpf_iter_reg bpf_sk_storage_map_reg_info = {
.target = "bpf_sk_storage_map",
.attach_target = bpf_iter_attach_map,
.detach_target = bpf_iter_detach_map,
+ .show_fdinfo = bpf_iter_map_show_fdinfo,
+ .fill_link_info = bpf_iter_map_fill_link_info,
.ctx_arg_info_size = 2,
.ctx_arg_info = {
{ offsetof(struct bpf_iter__bpf_sk_storage_map, sk),
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 639745d4f3b9..9fcaa544f11a 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -623,10 +623,11 @@ int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb,
while (length && iov_iter_count(from)) {
struct page *pages[MAX_SKB_FRAGS];
+ struct page *last_head = NULL;
size_t start;
ssize_t copied;
unsigned long truesize;
- int n = 0;
+ int refs, n = 0;
if (frag == MAX_SKB_FRAGS)
return -EMSGSIZE;
@@ -649,13 +650,37 @@ int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb,
} else {
refcount_add(truesize, &skb->sk->sk_wmem_alloc);
}
- while (copied) {
+ for (refs = 0; copied != 0; start = 0) {
int size = min_t(int, copied, PAGE_SIZE - start);
- skb_fill_page_desc(skb, frag++, pages[n], start, size);
- start = 0;
+ struct page *head = compound_head(pages[n]);
+
+ start += (pages[n] - head) << PAGE_SHIFT;
copied -= size;
n++;
+ if (frag) {
+ skb_frag_t *last = &skb_shinfo(skb)->frags[frag - 1];
+
+ if (head == skb_frag_page(last) &&
+ start == skb_frag_off(last) + skb_frag_size(last)) {
+ skb_frag_size_add(last, size);
+ /* We combined this page, we need to release
+ * a reference. Since compound pages refcount
+ * is shared among many pages, batch the refcount
+ * adjustments to limit false sharing.
+ */
+ last_head = head;
+ refs++;
+ continue;
+ }
+ }
+ if (refs) {
+ page_ref_sub(last_head, refs);
+ refs = 0;
+ }
+ skb_fill_page_desc(skb, frag++, head, start, size);
}
+ if (refs)
+ page_ref_sub(last_head, refs);
}
return 0;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 4906b44af850..751e5264fd49 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -98,6 +98,7 @@
#include <net/busy_poll.h>
#include <linux/rtnetlink.h>
#include <linux/stat.h>
+#include <net/dsa.h>
#include <net/dst.h>
#include <net/dst_metadata.h>
#include <net/pkt_sched.h>
@@ -1130,7 +1131,7 @@ EXPORT_SYMBOL(__dev_get_by_flags);
* @name: name string
*
* Network device names need to be valid file names to
- * to allow sysfs to work. We also disallow any kind of
+ * allow sysfs to work. We also disallow any kind of
* whitespace.
*/
bool dev_valid_name(const char *name)
@@ -4840,6 +4841,21 @@ int netif_rx_ni(struct sk_buff *skb)
}
EXPORT_SYMBOL(netif_rx_ni);
+int netif_rx_any_context(struct sk_buff *skb)
+{
+ /*
+ * If invoked from contexts which do not invoke bottom half
+ * processing either at return from interrupt or when softrqs are
+ * reenabled, use netif_rx_ni() which invokes bottomhalf processing
+ * directly.
+ */
+ if (in_interrupt())
+ return netif_rx(skb);
+ else
+ return netif_rx_ni(skb);
+}
+EXPORT_SYMBOL(netif_rx_any_context);
+
static __latent_entropy void net_tx_action(struct softirq_action *h)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
@@ -4914,7 +4930,7 @@ EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
static inline struct sk_buff *
sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
- struct net_device *orig_dev)
+ struct net_device *orig_dev, bool *another)
{
#ifdef CONFIG_NET_CLS_ACT
struct mini_Qdisc *miniq = rcu_dereference_bh(skb->dev->miniq_ingress);
@@ -4958,7 +4974,11 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret,
* redirecting to another netdev
*/
__skb_push(skb, skb->mac_len);
- skb_do_redirect(skb);
+ if (skb_do_redirect(skb) == -EAGAIN) {
+ __skb_pull(skb, skb->mac_len);
+ *another = true;
+ break;
+ }
return NULL;
case TC_ACT_CONSUMED:
return NULL;
@@ -5147,7 +5167,12 @@ another_round:
skip_taps:
#ifdef CONFIG_NET_INGRESS
if (static_branch_unlikely(&ingress_needed_key)) {
- skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev);
+ bool another = false;
+
+ skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev,
+ &another);
+ if (another)
+ goto another_round;
if (!skb)
goto out;
@@ -5192,7 +5217,7 @@ skip_classify:
}
}
- if (unlikely(skb_vlan_tag_present(skb))) {
+ if (unlikely(skb_vlan_tag_present(skb)) && !netdev_uses_dsa(skb->dev)) {
check_vlan_id:
if (skb_vlan_tag_get_id(skb)) {
/* Vlan id is non 0 and vlan_do_receive() above couldn't
@@ -5441,15 +5466,20 @@ static int generic_xdp_install(struct net_device *dev, struct netdev_bpf *xdp)
if (new) {
u32 i;
+ mutex_lock(&new->aux->used_maps_mutex);
+
/* generic XDP does not work with DEVMAPs that can
* have a bpf_prog installed on an entry
*/
for (i = 0; i < new->aux->used_map_cnt; i++) {
- if (dev_map_can_have_prog(new->aux->used_maps[i]))
- return -EINVAL;
- if (cpu_map_prog_allowed(new->aux->used_maps[i]))
+ if (dev_map_can_have_prog(new->aux->used_maps[i]) ||
+ cpu_map_prog_allowed(new->aux->used_maps[i])) {
+ mutex_unlock(&new->aux->used_maps_mutex);
return -EINVAL;
+ }
}
+
+ mutex_unlock(&new->aux->used_maps_mutex);
}
switch (xdp->command) {
@@ -5621,17 +5651,60 @@ static void flush_backlog(struct work_struct *work)
local_bh_enable();
}
+static bool flush_required(int cpu)
+{
+#if IS_ENABLED(CONFIG_RPS)
+ struct softnet_data *sd = &per_cpu(softnet_data, cpu);
+ bool do_flush;
+
+ local_irq_disable();
+ rps_lock(sd);
+
+ /* as insertion into process_queue happens with the rps lock held,
+ * process_queue access may race only with dequeue
+ */
+ do_flush = !skb_queue_empty(&sd->input_pkt_queue) ||
+ !skb_queue_empty_lockless(&sd->process_queue);
+ rps_unlock(sd);
+ local_irq_enable();
+
+ return do_flush;
+#endif
+ /* without RPS we can't safely check input_pkt_queue: during a
+ * concurrent remote skb_queue_splice() we can detect as empty both
+ * input_pkt_queue and process_queue even if the latter could end-up
+ * containing a lot of packets.
+ */
+ return true;
+}
+
static void flush_all_backlogs(void)
{
+ static cpumask_t flush_cpus;
unsigned int cpu;
+ /* since we are under rtnl lock protection we can use static data
+ * for the cpumask and avoid allocating on stack the possibly
+ * large mask
+ */
+ ASSERT_RTNL();
+
get_online_cpus();
- for_each_online_cpu(cpu)
- queue_work_on(cpu, system_highpri_wq,
- per_cpu_ptr(&flush_works, cpu));
+ cpumask_clear(&flush_cpus);
+ for_each_online_cpu(cpu) {
+ if (flush_required(cpu)) {
+ queue_work_on(cpu, system_highpri_wq,
+ per_cpu_ptr(&flush_works, cpu));
+ cpumask_set_cpu(cpu, &flush_cpus);
+ }
+ }
- for_each_online_cpu(cpu)
+ /* we can have in flight packet[s] on the cpus we are not flushing,
+ * synchronize_net() in rollback_registered_many() will take care of
+ * them
+ */
+ for_each_cpu(cpu, &flush_cpus)
flush_work(per_cpu_ptr(&flush_works, cpu));
put_online_cpus();
@@ -6293,7 +6366,7 @@ EXPORT_SYMBOL(__napi_schedule);
* @n: napi context
*
* Test if NAPI routine is already running, and if not mark
- * it as running. This is used as a condition variable
+ * it as running. This is used as a condition variable to
* insure only one NAPI poll instance runs. We also make
* sure there is no pending NAPI disable.
*/
@@ -6533,8 +6606,7 @@ EXPORT_SYMBOL(napi_busy_loop);
static void napi_hash_add(struct napi_struct *napi)
{
- if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state) ||
- test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
+ if (test_bit(NAPI_STATE_NO_BUSY_POLL, &napi->state))
return;
spin_lock(&napi_hash_lock);
@@ -6555,20 +6627,14 @@ static void napi_hash_add(struct napi_struct *napi)
/* Warning : caller is responsible to make sure rcu grace period
* is respected before freeing memory containing @napi
*/
-bool napi_hash_del(struct napi_struct *napi)
+static void napi_hash_del(struct napi_struct *napi)
{
- bool rcu_sync_needed = false;
-
spin_lock(&napi_hash_lock);
- if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state)) {
- rcu_sync_needed = true;
- hlist_del_rcu(&napi->napi_hash_node);
- }
+ hlist_del_init_rcu(&napi->napi_hash_node);
+
spin_unlock(&napi_hash_lock);
- return rcu_sync_needed;
}
-EXPORT_SYMBOL_GPL(napi_hash_del);
static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
{
@@ -6600,7 +6666,11 @@ static void init_gro_hash(struct napi_struct *napi)
void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int), int weight)
{
+ if (WARN_ON(test_and_set_bit(NAPI_STATE_LISTED, &napi->state)))
+ return;
+
INIT_LIST_HEAD(&napi->poll_list);
+ INIT_HLIST_NODE(&napi->napi_hash_node);
hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
napi->timer.function = napi_watchdog;
init_gro_hash(napi);
@@ -6653,18 +6723,19 @@ static void flush_gro_hash(struct napi_struct *napi)
}
/* Must be called in process context */
-void netif_napi_del(struct napi_struct *napi)
+void __netif_napi_del(struct napi_struct *napi)
{
- might_sleep();
- if (napi_hash_del(napi))
- synchronize_net();
- list_del_init(&napi->dev_list);
+ if (!test_and_clear_bit(NAPI_STATE_LISTED, &napi->state))
+ return;
+
+ napi_hash_del(napi);
+ list_del_rcu(&napi->dev_list);
napi_free_frags(napi);
flush_gro_hash(napi);
napi->gro_bitmask = 0;
}
-EXPORT_SYMBOL(netif_napi_del);
+EXPORT_SYMBOL(__netif_napi_del);
static int napi_poll(struct napi_struct *n, struct list_head *repoll)
{
@@ -9533,7 +9604,7 @@ int __netdev_update_features(struct net_device *dev)
/* driver might be less strict about feature dependencies */
features = netdev_fix_features(dev, features);
- /* some features can't be enabled if they're off an an upper device */
+ /* some features can't be enabled if they're off on an upper device */
netdev_for_each_upper_dev_rcu(dev, upper, iter)
features = netdev_sync_upper_features(dev, upper, features);
@@ -10037,6 +10108,8 @@ int netdev_refcnt_read(const struct net_device *dev)
}
EXPORT_SYMBOL(netdev_refcnt_read);
+#define WAIT_REFS_MIN_MSECS 1
+#define WAIT_REFS_MAX_MSECS 250
/**
* netdev_wait_allrefs - wait until all references are gone.
* @dev: target net_device
@@ -10052,7 +10125,7 @@ EXPORT_SYMBOL(netdev_refcnt_read);
static void netdev_wait_allrefs(struct net_device *dev)
{
unsigned long rebroadcast_time, warning_time;
- int refcnt;
+ int wait = 0, refcnt;
linkwatch_forget_dev(dev);
@@ -10086,7 +10159,13 @@ static void netdev_wait_allrefs(struct net_device *dev)
rebroadcast_time = jiffies;
}
- msleep(250);
+ if (!wait) {
+ rcu_barrier();
+ wait = WAIT_REFS_MIN_MSECS;
+ } else {
+ msleep(wait);
+ wait = min(wait << 1, WAIT_REFS_MAX_MSECS);
+ }
refcnt = netdev_refcnt_read(dev);
@@ -10249,6 +10328,40 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
}
EXPORT_SYMBOL(dev_get_stats);
+/**
+ * dev_fetch_sw_netstats - get per-cpu network device statistics
+ * @s: place to store stats
+ * @netstats: per-cpu network stats to read from
+ *
+ * Read per-cpu network statistics and populate the related fields in @s.
+ */
+void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
+ const struct pcpu_sw_netstats __percpu *netstats)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ const struct pcpu_sw_netstats *stats;
+ struct pcpu_sw_netstats tmp;
+ unsigned int start;
+
+ stats = per_cpu_ptr(netstats, cpu);
+ do {
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ tmp.rx_packets = stats->rx_packets;
+ tmp.rx_bytes = stats->rx_bytes;
+ tmp.tx_packets = stats->tx_packets;
+ tmp.tx_bytes = stats->tx_bytes;
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+
+ s->rx_packets += tmp.rx_packets;
+ s->rx_bytes += tmp.rx_bytes;
+ s->tx_packets += tmp.tx_packets;
+ s->tx_bytes += tmp.tx_bytes;
+ }
+}
+EXPORT_SYMBOL_GPL(dev_fetch_sw_netstats);
+
struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
{
struct netdev_queue *queue = dev_ingress_queue(dev);
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 80ec1cd81c64..a578634052a3 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -27,7 +27,6 @@
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/devlink.h>
-#include <net/drop_monitor.h>
#define CREATE_TRACE_POINTS
#include <trace/events/devlink.h>
@@ -84,6 +83,7 @@ EXPORT_SYMBOL(devlink_dpipe_header_ipv6);
EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwmsg);
EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_hwerr);
+EXPORT_TRACEPOINT_SYMBOL_GPL(devlink_trap_report);
static const struct nla_policy devlink_function_nl_policy[DEVLINK_PORT_FUNCTION_ATTR_MAX + 1] = {
[DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR] = { .type = NLA_BINARY },
@@ -347,8 +347,12 @@ devlink_sb_tc_index_get_from_info(struct devlink_sb *devlink_sb,
struct devlink_region {
struct devlink *devlink;
+ struct devlink_port *port;
struct list_head list;
- const struct devlink_region_ops *ops;
+ union {
+ const struct devlink_region_ops *ops;
+ const struct devlink_port_region_ops *port_ops;
+ };
struct list_head snapshot_list;
u32 max_snapshots;
u32 cur_snapshots;
@@ -374,6 +378,19 @@ devlink_region_get_by_name(struct devlink *devlink, const char *region_name)
return NULL;
}
+static struct devlink_region *
+devlink_port_region_get_by_name(struct devlink_port *port,
+ const char *region_name)
+{
+ struct devlink_region *region;
+
+ list_for_each_entry(region, &port->region_list, list)
+ if (!strcmp(region->ops->name, region_name))
+ return region;
+
+ return NULL;
+}
+
static struct devlink_snapshot *
devlink_region_snapshot_get_by_id(struct devlink_region *region, u32 id)
{
@@ -462,10 +479,115 @@ static int devlink_nl_put_handle(struct sk_buff *msg, struct devlink *devlink)
return 0;
}
+struct devlink_reload_combination {
+ enum devlink_reload_action action;
+ enum devlink_reload_limit limit;
+};
+
+static const struct devlink_reload_combination devlink_reload_invalid_combinations[] = {
+ {
+ /* can't reinitialize driver with no down time */
+ .action = DEVLINK_RELOAD_ACTION_DRIVER_REINIT,
+ .limit = DEVLINK_RELOAD_LIMIT_NO_RESET,
+ },
+};
+
+static bool
+devlink_reload_combination_is_invalid(enum devlink_reload_action action,
+ enum devlink_reload_limit limit)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(devlink_reload_invalid_combinations); i++)
+ if (devlink_reload_invalid_combinations[i].action == action &&
+ devlink_reload_invalid_combinations[i].limit == limit)
+ return true;
+ return false;
+}
+
+static bool
+devlink_reload_action_is_supported(struct devlink *devlink, enum devlink_reload_action action)
+{
+ return test_bit(action, &devlink->ops->reload_actions);
+}
+
+static bool
+devlink_reload_limit_is_supported(struct devlink *devlink, enum devlink_reload_limit limit)
+{
+ return test_bit(limit, &devlink->ops->reload_limits);
+}
+
+static int devlink_reload_stat_put(struct sk_buff *msg, enum devlink_reload_action action,
+ enum devlink_reload_limit limit, u32 value)
+{
+ struct nlattr *reload_stats_entry;
+
+ reload_stats_entry = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_STATS_ENTRY);
+ if (!reload_stats_entry)
+ return -EMSGSIZE;
+
+ if (nla_put_u8(msg, DEVLINK_ATTR_RELOAD_ACTION, action) ||
+ nla_put_u8(msg, DEVLINK_ATTR_RELOAD_STATS_LIMIT, limit) ||
+ nla_put_u32(msg, DEVLINK_ATTR_RELOAD_STATS_VALUE, value))
+ goto nla_put_failure;
+ nla_nest_end(msg, reload_stats_entry);
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(msg, reload_stats_entry);
+ return -EMSGSIZE;
+}
+
+static int devlink_reload_stats_put(struct sk_buff *msg, struct devlink *devlink, bool is_remote)
+{
+ struct nlattr *reload_stats_attr;
+ int i, j, stat_idx;
+ u32 value;
+
+ if (!is_remote)
+ reload_stats_attr = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_STATS);
+ else
+ reload_stats_attr = nla_nest_start(msg, DEVLINK_ATTR_REMOTE_RELOAD_STATS);
+
+ if (!reload_stats_attr)
+ return -EMSGSIZE;
+
+ for (j = 0; j <= DEVLINK_RELOAD_LIMIT_MAX; j++) {
+ /* Remote stats are shown even if not locally supported. Stats
+ * of actions with unspecified limit are shown though drivers
+ * don't need to register unspecified limit.
+ */
+ if (!is_remote && j != DEVLINK_RELOAD_LIMIT_UNSPEC &&
+ !devlink_reload_limit_is_supported(devlink, j))
+ continue;
+ for (i = 0; i <= DEVLINK_RELOAD_ACTION_MAX; i++) {
+ if ((!is_remote && !devlink_reload_action_is_supported(devlink, i)) ||
+ i == DEVLINK_RELOAD_ACTION_UNSPEC ||
+ devlink_reload_combination_is_invalid(i, j))
+ continue;
+
+ stat_idx = j * __DEVLINK_RELOAD_ACTION_MAX + i;
+ if (!is_remote)
+ value = devlink->stats.reload_stats[stat_idx];
+ else
+ value = devlink->stats.remote_reload_stats[stat_idx];
+ if (devlink_reload_stat_put(msg, i, j, value))
+ goto nla_put_failure;
+ }
+ }
+ nla_nest_end(msg, reload_stats_attr);
+ return 0;
+
+nla_put_failure:
+ nla_nest_cancel(msg, reload_stats_attr);
+ return -EMSGSIZE;
+}
+
static int devlink_nl_fill(struct sk_buff *msg, struct devlink *devlink,
enum devlink_command cmd, u32 portid,
u32 seq, int flags)
{
+ struct nlattr *dev_stats;
void *hdr;
hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
@@ -477,9 +599,21 @@ static int devlink_nl_fill(struct sk_buff *msg, struct devlink *devlink,
if (nla_put_u8(msg, DEVLINK_ATTR_RELOAD_FAILED, devlink->reload_failed))
goto nla_put_failure;
+ dev_stats = nla_nest_start(msg, DEVLINK_ATTR_DEV_STATS);
+ if (!dev_stats)
+ goto nla_put_failure;
+
+ if (devlink_reload_stats_put(msg, devlink, false))
+ goto dev_stats_nest_cancel;
+ if (devlink_reload_stats_put(msg, devlink, true))
+ goto dev_stats_nest_cancel;
+
+ nla_nest_end(msg, dev_stats);
genlmsg_end(msg, hdr);
return 0;
+dev_stats_nest_cancel:
+ nla_nest_cancel(msg, dev_stats);
nla_put_failure:
genlmsg_cancel(msg, hdr);
return -EMSGSIZE;
@@ -523,15 +657,20 @@ static int devlink_nl_port_attrs_put(struct sk_buff *msg,
return -EMSGSIZE;
switch (devlink_port->attrs.flavour) {
case DEVLINK_PORT_FLAVOUR_PCI_PF:
- if (nla_put_u16(msg, DEVLINK_ATTR_PORT_PCI_PF_NUMBER,
- attrs->pci_pf.pf))
+ if (nla_put_u32(msg, DEVLINK_ATTR_PORT_CONTROLLER_NUMBER,
+ attrs->pci_pf.controller) ||
+ nla_put_u16(msg, DEVLINK_ATTR_PORT_PCI_PF_NUMBER, attrs->pci_pf.pf))
+ return -EMSGSIZE;
+ if (nla_put_u8(msg, DEVLINK_ATTR_PORT_EXTERNAL, attrs->pci_pf.external))
return -EMSGSIZE;
break;
case DEVLINK_PORT_FLAVOUR_PCI_VF:
- if (nla_put_u16(msg, DEVLINK_ATTR_PORT_PCI_PF_NUMBER,
- attrs->pci_vf.pf) ||
- nla_put_u16(msg, DEVLINK_ATTR_PORT_PCI_VF_NUMBER,
- attrs->pci_vf.vf))
+ if (nla_put_u32(msg, DEVLINK_ATTR_PORT_CONTROLLER_NUMBER,
+ attrs->pci_vf.controller) ||
+ nla_put_u16(msg, DEVLINK_ATTR_PORT_PCI_PF_NUMBER, attrs->pci_vf.pf) ||
+ nla_put_u16(msg, DEVLINK_ATTR_PORT_PCI_VF_NUMBER, attrs->pci_vf.vf))
+ return -EMSGSIZE;
+ if (nla_put_u8(msg, DEVLINK_ATTR_PORT_EXTERNAL, attrs->pci_vf.external))
return -EMSGSIZE;
break;
case DEVLINK_PORT_FLAVOUR_PHYSICAL:
@@ -806,8 +945,6 @@ static int devlink_port_type_set(struct devlink *devlink,
int err;
if (devlink->ops->port_type_set) {
- if (port_type == DEVLINK_PORT_TYPE_NOTSET)
- return -EINVAL;
if (port_type == devlink_port->type)
return 0;
err = devlink->ops->port_type_set(devlink_port, port_type);
@@ -2943,9 +3080,9 @@ static void devlink_reload_netns_change(struct devlink *devlink,
DEVLINK_CMD_PARAM_NEW);
}
-static bool devlink_reload_supported(const struct devlink *devlink)
+static bool devlink_reload_supported(const struct devlink_ops *ops)
{
- return devlink->ops->reload_down && devlink->ops->reload_up;
+ return ops->reload_down && ops->reload_up;
}
static void devlink_reload_failed_set(struct devlink *devlink,
@@ -2963,33 +3100,132 @@ bool devlink_is_reload_failed(const struct devlink *devlink)
}
EXPORT_SYMBOL_GPL(devlink_is_reload_failed);
+static void
+__devlink_reload_stats_update(struct devlink *devlink, u32 *reload_stats,
+ enum devlink_reload_limit limit, u32 actions_performed)
+{
+ unsigned long actions = actions_performed;
+ int stat_idx;
+ int action;
+
+ for_each_set_bit(action, &actions, __DEVLINK_RELOAD_ACTION_MAX) {
+ stat_idx = limit * __DEVLINK_RELOAD_ACTION_MAX + action;
+ reload_stats[stat_idx]++;
+ }
+ devlink_notify(devlink, DEVLINK_CMD_NEW);
+}
+
+static void
+devlink_reload_stats_update(struct devlink *devlink, enum devlink_reload_limit limit,
+ u32 actions_performed)
+{
+ __devlink_reload_stats_update(devlink, devlink->stats.reload_stats, limit,
+ actions_performed);
+}
+
+/**
+ * devlink_remote_reload_actions_performed - Update devlink on reload actions
+ * performed which are not a direct result of devlink reload call.
+ *
+ * This should be called by a driver after performing reload actions in case it was not
+ * a result of devlink reload call. For example fw_activate was performed as a result
+ * of devlink reload triggered fw_activate on another host.
+ * The motivation for this function is to keep data on reload actions performed on this
+ * function whether it was done due to direct devlink reload call or not.
+ *
+ * @devlink: devlink
+ * @limit: reload limit
+ * @actions_performed: bitmask of actions performed
+ */
+void devlink_remote_reload_actions_performed(struct devlink *devlink,
+ enum devlink_reload_limit limit,
+ u32 actions_performed)
+{
+ if (WARN_ON(!actions_performed ||
+ actions_performed & BIT(DEVLINK_RELOAD_ACTION_UNSPEC) ||
+ actions_performed >= BIT(__DEVLINK_RELOAD_ACTION_MAX) ||
+ limit > DEVLINK_RELOAD_LIMIT_MAX))
+ return;
+
+ __devlink_reload_stats_update(devlink, devlink->stats.remote_reload_stats, limit,
+ actions_performed);
+}
+EXPORT_SYMBOL_GPL(devlink_remote_reload_actions_performed);
+
static int devlink_reload(struct devlink *devlink, struct net *dest_net,
- struct netlink_ext_ack *extack)
+ enum devlink_reload_action action, enum devlink_reload_limit limit,
+ u32 *actions_performed, struct netlink_ext_ack *extack)
{
+ u32 remote_reload_stats[DEVLINK_RELOAD_STATS_ARRAY_SIZE];
int err;
if (!devlink->reload_enabled)
return -EOPNOTSUPP;
- err = devlink->ops->reload_down(devlink, !!dest_net, extack);
+ memcpy(remote_reload_stats, devlink->stats.remote_reload_stats,
+ sizeof(remote_reload_stats));
+ err = devlink->ops->reload_down(devlink, !!dest_net, action, limit, extack);
if (err)
return err;
if (dest_net && !net_eq(dest_net, devlink_net(devlink)))
devlink_reload_netns_change(devlink, dest_net);
- err = devlink->ops->reload_up(devlink, extack);
+ err = devlink->ops->reload_up(devlink, action, limit, actions_performed, extack);
devlink_reload_failed_set(devlink, !!err);
- return err;
+ if (err)
+ return err;
+
+ WARN_ON(!(*actions_performed & BIT(action)));
+ /* Catch driver on updating the remote action within devlink reload */
+ WARN_ON(memcmp(remote_reload_stats, devlink->stats.remote_reload_stats,
+ sizeof(remote_reload_stats)));
+ devlink_reload_stats_update(devlink, limit, *actions_performed);
+ return 0;
+}
+
+static int
+devlink_nl_reload_actions_performed_snd(struct devlink *devlink, u32 actions_performed,
+ enum devlink_command cmd, struct genl_info *info)
+{
+ struct sk_buff *msg;
+ void *hdr;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq, &devlink_nl_family, 0, cmd);
+ if (!hdr)
+ goto free_msg;
+
+ if (devlink_nl_put_handle(msg, devlink))
+ goto nla_put_failure;
+
+ if (nla_put_bitfield32(msg, DEVLINK_ATTR_RELOAD_ACTIONS_PERFORMED, actions_performed,
+ actions_performed))
+ goto nla_put_failure;
+ genlmsg_end(msg, hdr);
+
+ return genlmsg_reply(msg, info);
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+free_msg:
+ nlmsg_free(msg);
+ return -EMSGSIZE;
}
static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
+ enum devlink_reload_action action;
+ enum devlink_reload_limit limit;
struct net *dest_net = NULL;
+ u32 actions_performed;
int err;
- if (!devlink_reload_supported(devlink))
+ if (!devlink_reload_supported(devlink->ops))
return -EOPNOTSUPP;
err = devlink_resources_validate(devlink, NULL, info);
@@ -3006,20 +3242,67 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
return PTR_ERR(dest_net);
}
- err = devlink_reload(devlink, dest_net, info->extack);
+ if (info->attrs[DEVLINK_ATTR_RELOAD_ACTION])
+ action = nla_get_u8(info->attrs[DEVLINK_ATTR_RELOAD_ACTION]);
+ else
+ action = DEVLINK_RELOAD_ACTION_DRIVER_REINIT;
+
+ if (!devlink_reload_action_is_supported(devlink, action)) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "Requested reload action is not supported by the driver");
+ return -EOPNOTSUPP;
+ }
+
+ limit = DEVLINK_RELOAD_LIMIT_UNSPEC;
+ if (info->attrs[DEVLINK_ATTR_RELOAD_LIMITS]) {
+ struct nla_bitfield32 limits;
+ u32 limits_selected;
+
+ limits = nla_get_bitfield32(info->attrs[DEVLINK_ATTR_RELOAD_LIMITS]);
+ limits_selected = limits.value & limits.selector;
+ if (!limits_selected) {
+ NL_SET_ERR_MSG_MOD(info->extack, "Invalid limit selected");
+ return -EINVAL;
+ }
+ for (limit = 0 ; limit <= DEVLINK_RELOAD_LIMIT_MAX ; limit++)
+ if (limits_selected & BIT(limit))
+ break;
+ /* UAPI enables multiselection, but currently it is not used */
+ if (limits_selected != BIT(limit)) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "Multiselection of limit is not supported");
+ return -EOPNOTSUPP;
+ }
+ if (!devlink_reload_limit_is_supported(devlink, limit)) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "Requested limit is not supported by the driver");
+ return -EOPNOTSUPP;
+ }
+ if (devlink_reload_combination_is_invalid(action, limit)) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "Requested limit is invalid for this action");
+ return -EINVAL;
+ }
+ }
+ err = devlink_reload(devlink, dest_net, action, limit, &actions_performed, info->extack);
if (dest_net)
put_net(dest_net);
- return err;
+ if (err)
+ return err;
+ /* For backward compatibility generate reply only if attributes used by user */
+ if (!info->attrs[DEVLINK_ATTR_RELOAD_ACTION] && !info->attrs[DEVLINK_ATTR_RELOAD_LIMITS])
+ return 0;
+
+ return devlink_nl_reload_actions_performed_snd(devlink, actions_performed,
+ DEVLINK_CMD_RELOAD, info);
}
static int devlink_nl_flash_update_fill(struct sk_buff *msg,
struct devlink *devlink,
enum devlink_command cmd,
- const char *status_msg,
- const char *component,
- unsigned long done, unsigned long total)
+ struct devlink_flash_notify *params)
{
void *hdr;
@@ -3033,19 +3316,22 @@ static int devlink_nl_flash_update_fill(struct sk_buff *msg,
if (cmd != DEVLINK_CMD_FLASH_UPDATE_STATUS)
goto out;
- if (status_msg &&
+ if (params->status_msg &&
nla_put_string(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_MSG,
- status_msg))
+ params->status_msg))
goto nla_put_failure;
- if (component &&
+ if (params->component &&
nla_put_string(msg, DEVLINK_ATTR_FLASH_UPDATE_COMPONENT,
- component))
+ params->component))
goto nla_put_failure;
if (nla_put_u64_64bit(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_DONE,
- done, DEVLINK_ATTR_PAD))
+ params->done, DEVLINK_ATTR_PAD))
goto nla_put_failure;
if (nla_put_u64_64bit(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_TOTAL,
- total, DEVLINK_ATTR_PAD))
+ params->total, DEVLINK_ATTR_PAD))
+ goto nla_put_failure;
+ if (nla_put_u64_64bit(msg, DEVLINK_ATTR_FLASH_UPDATE_STATUS_TIMEOUT,
+ params->timeout, DEVLINK_ATTR_PAD))
goto nla_put_failure;
out:
@@ -3059,10 +3345,7 @@ nla_put_failure:
static void __devlink_flash_update_notify(struct devlink *devlink,
enum devlink_command cmd,
- const char *status_msg,
- const char *component,
- unsigned long done,
- unsigned long total)
+ struct devlink_flash_notify *params)
{
struct sk_buff *msg;
int err;
@@ -3075,8 +3358,7 @@ static void __devlink_flash_update_notify(struct devlink *devlink,
if (!msg)
return;
- err = devlink_nl_flash_update_fill(msg, devlink, cmd, status_msg,
- component, done, total);
+ err = devlink_nl_flash_update_fill(msg, devlink, cmd, params);
if (err)
goto out_free_msg;
@@ -3090,17 +3372,21 @@ out_free_msg:
void devlink_flash_update_begin_notify(struct devlink *devlink)
{
+ struct devlink_flash_notify params = { 0 };
+
__devlink_flash_update_notify(devlink,
DEVLINK_CMD_FLASH_UPDATE,
- NULL, NULL, 0, 0);
+ &params);
}
EXPORT_SYMBOL_GPL(devlink_flash_update_begin_notify);
void devlink_flash_update_end_notify(struct devlink *devlink)
{
+ struct devlink_flash_notify params = { 0 };
+
__devlink_flash_update_notify(devlink,
DEVLINK_CMD_FLASH_UPDATE_END,
- NULL, NULL, 0, 0);
+ &params);
}
EXPORT_SYMBOL_GPL(devlink_flash_update_end_notify);
@@ -3110,31 +3396,78 @@ void devlink_flash_update_status_notify(struct devlink *devlink,
unsigned long done,
unsigned long total)
{
+ struct devlink_flash_notify params = {
+ .status_msg = status_msg,
+ .component = component,
+ .done = done,
+ .total = total,
+ };
+
__devlink_flash_update_notify(devlink,
DEVLINK_CMD_FLASH_UPDATE_STATUS,
- status_msg, component, done, total);
+ &params);
}
EXPORT_SYMBOL_GPL(devlink_flash_update_status_notify);
+void devlink_flash_update_timeout_notify(struct devlink *devlink,
+ const char *status_msg,
+ const char *component,
+ unsigned long timeout)
+{
+ struct devlink_flash_notify params = {
+ .status_msg = status_msg,
+ .component = component,
+ .timeout = timeout,
+ };
+
+ __devlink_flash_update_notify(devlink,
+ DEVLINK_CMD_FLASH_UPDATE_STATUS,
+ &params);
+}
+EXPORT_SYMBOL_GPL(devlink_flash_update_timeout_notify);
+
static int devlink_nl_cmd_flash_update(struct sk_buff *skb,
struct genl_info *info)
{
+ struct nlattr *nla_component, *nla_overwrite_mask;
+ struct devlink_flash_update_params params = {};
struct devlink *devlink = info->user_ptr[0];
- const char *file_name, *component;
- struct nlattr *nla_component;
+ u32 supported_params;
if (!devlink->ops->flash_update)
return -EOPNOTSUPP;
if (!info->attrs[DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME])
return -EINVAL;
- file_name = nla_data(info->attrs[DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME]);
+
+ supported_params = devlink->ops->supported_flash_update_params;
+
+ params.file_name = nla_data(info->attrs[DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME]);
nla_component = info->attrs[DEVLINK_ATTR_FLASH_UPDATE_COMPONENT];
- component = nla_component ? nla_data(nla_component) : NULL;
+ if (nla_component) {
+ if (!(supported_params & DEVLINK_SUPPORT_FLASH_UPDATE_COMPONENT)) {
+ NL_SET_ERR_MSG_ATTR(info->extack, nla_component,
+ "component update is not supported by this device");
+ return -EOPNOTSUPP;
+ }
+ params.component = nla_data(nla_component);
+ }
+
+ nla_overwrite_mask = info->attrs[DEVLINK_ATTR_FLASH_UPDATE_OVERWRITE_MASK];
+ if (nla_overwrite_mask) {
+ struct nla_bitfield32 sections;
+
+ if (!(supported_params & DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK)) {
+ NL_SET_ERR_MSG_ATTR(info->extack, nla_overwrite_mask,
+ "overwrite settings are not supported by this device");
+ return -EOPNOTSUPP;
+ }
+ sections = nla_get_bitfield32(nla_overwrite_mask);
+ params.overwrite_mask = sections.value & sections.selector;
+ }
- return devlink->ops->flash_update(devlink, file_name, component,
- info->extack);
+ return devlink->ops->flash_update(devlink, &params, info->extack);
}
static const struct devlink_param devlink_param_generic[] = {
@@ -3188,6 +3521,11 @@ static const struct devlink_param devlink_param_generic[] = {
.name = DEVLINK_PARAM_GENERIC_ENABLE_ROCE_NAME,
.type = DEVLINK_PARAM_GENERIC_ENABLE_ROCE_TYPE,
},
+ {
+ .id = DEVLINK_PARAM_GENERIC_ID_ENABLE_REMOTE_DEV_RESET,
+ .name = DEVLINK_PARAM_GENERIC_ENABLE_REMOTE_DEV_RESET_NAME,
+ .type = DEVLINK_PARAM_GENERIC_ENABLE_REMOTE_DEV_RESET_TYPE,
+ },
};
static int devlink_param_generic_verify(const struct devlink_param *param)
@@ -3875,6 +4213,11 @@ static int devlink_nl_region_fill(struct sk_buff *msg, struct devlink *devlink,
if (err)
goto nla_put_failure;
+ if (region->port)
+ if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX,
+ region->port->index))
+ goto nla_put_failure;
+
err = nla_put_string(msg, DEVLINK_ATTR_REGION_NAME, region->ops->name);
if (err)
goto nla_put_failure;
@@ -3922,6 +4265,11 @@ devlink_nl_region_notify_build(struct devlink_region *region,
if (err)
goto out_cancel_msg;
+ if (region->port)
+ if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX,
+ region->port->index))
+ goto out_cancel_msg;
+
err = nla_put_string(msg, DEVLINK_ATTR_REGION_NAME,
region->ops->name);
if (err)
@@ -4168,16 +4516,30 @@ static int devlink_nl_cmd_region_get_doit(struct sk_buff *skb,
struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
+ struct devlink_port *port = NULL;
struct devlink_region *region;
const char *region_name;
struct sk_buff *msg;
+ unsigned int index;
int err;
if (!info->attrs[DEVLINK_ATTR_REGION_NAME])
return -EINVAL;
+ if (info->attrs[DEVLINK_ATTR_PORT_INDEX]) {
+ index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
+
+ port = devlink_port_get_by_index(devlink, index);
+ if (!port)
+ return -ENODEV;
+ }
+
region_name = nla_data(info->attrs[DEVLINK_ATTR_REGION_NAME]);
- region = devlink_region_get_by_name(devlink, region_name);
+ if (port)
+ region = devlink_port_region_get_by_name(port, region_name);
+ else
+ region = devlink_region_get_by_name(devlink, region_name);
+
if (!region)
return -EINVAL;
@@ -4196,10 +4558,75 @@ static int devlink_nl_cmd_region_get_doit(struct sk_buff *skb,
return genlmsg_reply(msg, info);
}
+static int devlink_nl_cmd_region_get_port_dumpit(struct sk_buff *msg,
+ struct netlink_callback *cb,
+ struct devlink_port *port,
+ int *idx,
+ int start)
+{
+ struct devlink_region *region;
+ int err = 0;
+
+ list_for_each_entry(region, &port->region_list, list) {
+ if (*idx < start) {
+ (*idx)++;
+ continue;
+ }
+ err = devlink_nl_region_fill(msg, port->devlink,
+ DEVLINK_CMD_REGION_GET,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI, region);
+ if (err)
+ goto out;
+ (*idx)++;
+ }
+
+out:
+ return err;
+}
+
+static int devlink_nl_cmd_region_get_devlink_dumpit(struct sk_buff *msg,
+ struct netlink_callback *cb,
+ struct devlink *devlink,
+ int *idx,
+ int start)
+{
+ struct devlink_region *region;
+ struct devlink_port *port;
+ int err = 0;
+
+ mutex_lock(&devlink->lock);
+ list_for_each_entry(region, &devlink->region_list, list) {
+ if (*idx < start) {
+ (*idx)++;
+ continue;
+ }
+ err = devlink_nl_region_fill(msg, devlink,
+ DEVLINK_CMD_REGION_GET,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ NLM_F_MULTI, region);
+ if (err)
+ goto out;
+ (*idx)++;
+ }
+
+ list_for_each_entry(port, &devlink->port_list, list) {
+ err = devlink_nl_cmd_region_get_port_dumpit(msg, cb, port, idx,
+ start);
+ if (err)
+ goto out;
+ }
+
+out:
+ mutex_unlock(&devlink->lock);
+ return err;
+}
+
static int devlink_nl_cmd_region_get_dumpit(struct sk_buff *msg,
struct netlink_callback *cb)
{
- struct devlink_region *region;
struct devlink *devlink;
int start = cb->args[0];
int idx = 0;
@@ -4209,25 +4636,10 @@ static int devlink_nl_cmd_region_get_dumpit(struct sk_buff *msg,
list_for_each_entry(devlink, &devlink_list, list) {
if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
continue;
-
- mutex_lock(&devlink->lock);
- list_for_each_entry(region, &devlink->region_list, list) {
- if (idx < start) {
- idx++;
- continue;
- }
- err = devlink_nl_region_fill(msg, devlink,
- DEVLINK_CMD_REGION_GET,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- NLM_F_MULTI, region);
- if (err) {
- mutex_unlock(&devlink->lock);
- goto out;
- }
- idx++;
- }
- mutex_unlock(&devlink->lock);
+ err = devlink_nl_cmd_region_get_devlink_dumpit(msg, cb, devlink,
+ &idx, start);
+ if (err)
+ goto out;
}
out:
mutex_unlock(&devlink_mutex);
@@ -4240,8 +4652,10 @@ static int devlink_nl_cmd_region_del(struct sk_buff *skb,
{
struct devlink *devlink = info->user_ptr[0];
struct devlink_snapshot *snapshot;
+ struct devlink_port *port = NULL;
struct devlink_region *region;
const char *region_name;
+ unsigned int index;
u32 snapshot_id;
if (!info->attrs[DEVLINK_ATTR_REGION_NAME] ||
@@ -4251,7 +4665,19 @@ static int devlink_nl_cmd_region_del(struct sk_buff *skb,
region_name = nla_data(info->attrs[DEVLINK_ATTR_REGION_NAME]);
snapshot_id = nla_get_u32(info->attrs[DEVLINK_ATTR_REGION_SNAPSHOT_ID]);
- region = devlink_region_get_by_name(devlink, region_name);
+ if (info->attrs[DEVLINK_ATTR_PORT_INDEX]) {
+ index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
+
+ port = devlink_port_get_by_index(devlink, index);
+ if (!port)
+ return -ENODEV;
+ }
+
+ if (port)
+ region = devlink_port_region_get_by_name(port, region_name);
+ else
+ region = devlink_region_get_by_name(devlink, region_name);
+
if (!region)
return -EINVAL;
@@ -4268,9 +4694,11 @@ devlink_nl_cmd_region_new(struct sk_buff *skb, struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
struct devlink_snapshot *snapshot;
+ struct devlink_port *port = NULL;
struct nlattr *snapshot_id_attr;
struct devlink_region *region;
const char *region_name;
+ unsigned int index;
u32 snapshot_id;
u8 *data;
int err;
@@ -4281,7 +4709,20 @@ devlink_nl_cmd_region_new(struct sk_buff *skb, struct genl_info *info)
}
region_name = nla_data(info->attrs[DEVLINK_ATTR_REGION_NAME]);
- region = devlink_region_get_by_name(devlink, region_name);
+
+ if (info->attrs[DEVLINK_ATTR_PORT_INDEX]) {
+ index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
+
+ port = devlink_port_get_by_index(devlink, index);
+ if (!port)
+ return -ENODEV;
+ }
+
+ if (port)
+ region = devlink_port_region_get_by_name(port, region_name);
+ else
+ region = devlink_region_get_by_name(devlink, region_name);
+
if (!region) {
NL_SET_ERR_MSG_MOD(info->extack, "The requested region does not exist");
return -EINVAL;
@@ -4317,7 +4758,12 @@ devlink_nl_cmd_region_new(struct sk_buff *skb, struct genl_info *info)
}
}
- err = region->ops->snapshot(devlink, info->extack, &data);
+ if (port)
+ err = region->port_ops->snapshot(port, region->port_ops,
+ info->extack, &data);
+ else
+ err = region->ops->snapshot(devlink, region->ops,
+ info->extack, &data);
if (err)
goto err_snapshot_capture;
@@ -4439,10 +4885,12 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
const struct genl_dumpit_info *info = genl_dumpit_info(cb);
u64 ret_offset, start_offset, end_offset = U64_MAX;
struct nlattr **attrs = info->attrs;
+ struct devlink_port *port = NULL;
struct devlink_region *region;
struct nlattr *chunks_attr;
const char *region_name;
struct devlink *devlink;
+ unsigned int index;
void *hdr;
int err;
@@ -4463,8 +4911,21 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
goto out_unlock;
}
+ if (info->attrs[DEVLINK_ATTR_PORT_INDEX]) {
+ index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
+
+ port = devlink_port_get_by_index(devlink, index);
+ if (!port)
+ return -ENODEV;
+ }
+
region_name = nla_data(attrs[DEVLINK_ATTR_REGION_NAME]);
- region = devlink_region_get_by_name(devlink, region_name);
+
+ if (port)
+ region = devlink_port_region_get_by_name(port, region_name);
+ else
+ region = devlink_region_get_by_name(devlink, region_name);
+
if (!region) {
err = -EINVAL;
goto out_unlock;
@@ -4501,6 +4962,11 @@ static int devlink_nl_cmd_region_read_dumpit(struct sk_buff *skb,
if (err)
goto nla_put_failure;
+ if (region->port)
+ if (nla_put_u32(skb, DEVLINK_ATTR_PORT_INDEX,
+ region->port->index))
+ goto nla_put_failure;
+
err = nla_put_string(skb, DEVLINK_ATTR_REGION_NAME, region_name);
if (err)
goto nla_put_failure;
@@ -5895,6 +6361,7 @@ devlink_nl_cmd_health_reporter_get_dumpit(struct sk_buff *msg,
list_for_each_entry(devlink, &devlink_list, list) {
if (!net_eq(devlink_net(devlink), sock_net(msg->sk)))
continue;
+ mutex_lock(&devlink->lock);
list_for_each_entry(port, &devlink->port_list, list) {
mutex_lock(&port->reporters_lock);
list_for_each_entry(reporter, &port->reporter_list, list) {
@@ -5909,12 +6376,14 @@ devlink_nl_cmd_health_reporter_get_dumpit(struct sk_buff *msg,
NLM_F_MULTI);
if (err) {
mutex_unlock(&port->reporters_lock);
+ mutex_unlock(&devlink->lock);
goto out;
}
idx++;
}
mutex_unlock(&port->reporters_lock);
}
+ mutex_unlock(&devlink->lock);
}
out:
mutex_unlock(&devlink_mutex);
@@ -6088,6 +6557,28 @@ devlink_nl_cmd_health_reporter_dump_clear_doit(struct sk_buff *skb,
return 0;
}
+static int devlink_nl_cmd_health_reporter_test_doit(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct devlink *devlink = info->user_ptr[0];
+ struct devlink_health_reporter *reporter;
+ int err;
+
+ reporter = devlink_health_reporter_get_from_info(devlink, info);
+ if (!reporter)
+ return -EINVAL;
+
+ if (!reporter->ops->test) {
+ devlink_health_reporter_put(reporter);
+ return -EOPNOTSUPP;
+ }
+
+ err = reporter->ops->test(reporter, info->extack);
+
+ devlink_health_reporter_put(reporter);
+ return err;
+}
+
struct devlink_stats {
u64 rx_bytes;
u64 rx_packets;
@@ -6644,6 +7135,24 @@ __devlink_trap_group_action_set(struct devlink *devlink,
struct devlink_trap_item *trap_item;
int err;
+ if (devlink->ops->trap_group_action_set) {
+ err = devlink->ops->trap_group_action_set(devlink, group_item->group,
+ trap_action, extack);
+ if (err)
+ return err;
+
+ list_for_each_entry(trap_item, &devlink->trap_list, list) {
+ if (strcmp(trap_item->group_item->group->name, group_name))
+ continue;
+ if (trap_item->action != trap_action &&
+ trap_item->trap->type != DEVLINK_TRAP_TYPE_DROP)
+ continue;
+ trap_item->action = trap_action;
+ }
+
+ return 0;
+ }
+
list_for_each_entry(trap_item, &devlink->trap_list, list) {
if (strcmp(trap_item->group_item->group->name, group_name))
continue;
@@ -7000,7 +7509,8 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_BUS_NAME] = { .type = NLA_NUL_STRING },
[DEVLINK_ATTR_DEV_NAME] = { .type = NLA_NUL_STRING },
[DEVLINK_ATTR_PORT_INDEX] = { .type = NLA_U32 },
- [DEVLINK_ATTR_PORT_TYPE] = { .type = NLA_U16 },
+ [DEVLINK_ATTR_PORT_TYPE] = NLA_POLICY_RANGE(NLA_U16, DEVLINK_PORT_TYPE_AUTO,
+ DEVLINK_PORT_TYPE_IB),
[DEVLINK_ATTR_PORT_SPLIT_COUNT] = { .type = NLA_U32 },
[DEVLINK_ATTR_SB_INDEX] = { .type = NLA_U32 },
[DEVLINK_ATTR_SB_POOL_INDEX] = { .type = NLA_U16 },
@@ -7009,7 +7519,8 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE] = { .type = NLA_U8 },
[DEVLINK_ATTR_SB_THRESHOLD] = { .type = NLA_U32 },
[DEVLINK_ATTR_SB_TC_INDEX] = { .type = NLA_U16 },
- [DEVLINK_ATTR_ESWITCH_MODE] = { .type = NLA_U16 },
+ [DEVLINK_ATTR_ESWITCH_MODE] = NLA_POLICY_RANGE(NLA_U16, DEVLINK_ESWITCH_MODE_LEGACY,
+ DEVLINK_ESWITCH_MODE_SWITCHDEV),
[DEVLINK_ATTR_ESWITCH_INLINE_MODE] = { .type = NLA_U8 },
[DEVLINK_ATTR_ESWITCH_ENCAP_MODE] = { .type = NLA_U8 },
[DEVLINK_ATTR_DPIPE_TABLE_NAME] = { .type = NLA_NUL_STRING },
@@ -7028,6 +7539,8 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER] = { .type = NLA_U8 },
[DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME] = { .type = NLA_NUL_STRING },
[DEVLINK_ATTR_FLASH_UPDATE_COMPONENT] = { .type = NLA_NUL_STRING },
+ [DEVLINK_ATTR_FLASH_UPDATE_OVERWRITE_MASK] =
+ NLA_POLICY_BITFIELD32(DEVLINK_SUPPORTED_FLASH_OVERWRITE_SECTIONS),
[DEVLINK_ATTR_TRAP_NAME] = { .type = NLA_NUL_STRING },
[DEVLINK_ATTR_TRAP_ACTION] = { .type = NLA_U8 },
[DEVLINK_ATTR_TRAP_GROUP_NAME] = { .type = NLA_NUL_STRING },
@@ -7039,9 +7552,12 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
[DEVLINK_ATTR_TRAP_POLICER_RATE] = { .type = NLA_U64 },
[DEVLINK_ATTR_TRAP_POLICER_BURST] = { .type = NLA_U64 },
[DEVLINK_ATTR_PORT_FUNCTION] = { .type = NLA_NESTED },
+ [DEVLINK_ATTR_RELOAD_ACTION] = NLA_POLICY_RANGE(NLA_U8, DEVLINK_RELOAD_ACTION_DRIVER_REINIT,
+ DEVLINK_RELOAD_ACTION_MAX),
+ [DEVLINK_ATTR_RELOAD_LIMITS] = NLA_POLICY_BITFIELD32(DEVLINK_RELOAD_LIMITS_VALID_MASK),
};
-static const struct genl_ops devlink_nl_ops[] = {
+static const struct genl_small_ops devlink_nl_ops[] = {
{
.cmd = DEVLINK_CMD_GET,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
@@ -7309,6 +7825,14 @@ static const struct genl_ops devlink_nl_ops[] = {
DEVLINK_NL_FLAG_NO_LOCK,
},
{
+ .cmd = DEVLINK_CMD_HEALTH_REPORTER_TEST,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = devlink_nl_cmd_health_reporter_test_doit,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = DEVLINK_NL_FLAG_NEED_DEVLINK_OR_PORT |
+ DEVLINK_NL_FLAG_NO_LOCK,
+ },
+ {
.cmd = DEVLINK_CMD_FLASH_UPDATE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.doit = devlink_nl_cmd_flash_update,
@@ -7358,12 +7882,41 @@ static struct genl_family devlink_nl_family __ro_after_init = {
.pre_doit = devlink_nl_pre_doit,
.post_doit = devlink_nl_post_doit,
.module = THIS_MODULE,
- .ops = devlink_nl_ops,
- .n_ops = ARRAY_SIZE(devlink_nl_ops),
+ .small_ops = devlink_nl_ops,
+ .n_small_ops = ARRAY_SIZE(devlink_nl_ops),
.mcgrps = devlink_nl_mcgrps,
.n_mcgrps = ARRAY_SIZE(devlink_nl_mcgrps),
};
+static bool devlink_reload_actions_valid(const struct devlink_ops *ops)
+{
+ const struct devlink_reload_combination *comb;
+ int i;
+
+ if (!devlink_reload_supported(ops)) {
+ if (WARN_ON(ops->reload_actions))
+ return false;
+ return true;
+ }
+
+ if (WARN_ON(!ops->reload_actions ||
+ ops->reload_actions & BIT(DEVLINK_RELOAD_ACTION_UNSPEC) ||
+ ops->reload_actions >= BIT(__DEVLINK_RELOAD_ACTION_MAX)))
+ return false;
+
+ if (WARN_ON(ops->reload_limits & BIT(DEVLINK_RELOAD_LIMIT_UNSPEC) ||
+ ops->reload_limits >= BIT(__DEVLINK_RELOAD_LIMIT_MAX)))
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(devlink_reload_invalid_combinations); i++) {
+ comb = &devlink_reload_invalid_combinations[i];
+ if (ops->reload_actions == BIT(comb->action) &&
+ ops->reload_limits == BIT(comb->limit))
+ return false;
+ }
+ return true;
+}
+
/**
* devlink_alloc - Allocate new devlink instance resources
*
@@ -7380,6 +7933,9 @@ struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size)
if (WARN_ON(!ops))
return NULL;
+ if (!devlink_reload_actions_valid(ops))
+ return NULL;
+
devlink = kzalloc(sizeof(*devlink) + priv_size, GFP_KERNEL);
if (!devlink)
return NULL;
@@ -7428,7 +7984,7 @@ EXPORT_SYMBOL_GPL(devlink_register);
void devlink_unregister(struct devlink *devlink)
{
mutex_lock(&devlink_mutex);
- WARN_ON(devlink_reload_supported(devlink) &&
+ WARN_ON(devlink_reload_supported(devlink->ops) &&
devlink->reload_enabled);
devlink_notify(devlink, DEVLINK_CMD_DEL);
list_del(&devlink->list);
@@ -7506,7 +8062,8 @@ static bool devlink_port_type_should_warn(struct devlink_port *devlink_port)
{
/* Ignore CPU and DSA flavours. */
return devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_CPU &&
- devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_DSA;
+ devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_DSA &&
+ devlink_port->attrs.flavour != DEVLINK_PORT_FLAVOUR_UNUSED;
}
#define DEVLINK_PORT_TYPE_WARN_TIMEOUT (HZ * 3600)
@@ -7555,11 +8112,12 @@ int devlink_port_register(struct devlink *devlink,
devlink_port->index = port_index;
devlink_port->registered = true;
spin_lock_init(&devlink_port->type_lock);
+ INIT_LIST_HEAD(&devlink_port->reporter_list);
+ mutex_init(&devlink_port->reporters_lock);
list_add_tail(&devlink_port->list, &devlink->port_list);
INIT_LIST_HEAD(&devlink_port->param_list);
+ INIT_LIST_HEAD(&devlink_port->region_list);
mutex_unlock(&devlink->lock);
- INIT_LIST_HEAD(&devlink_port->reporter_list);
- mutex_init(&devlink_port->reporters_lock);
INIT_DELAYED_WORK(&devlink_port->type_warn_dw, &devlink_port_type_warn);
devlink_port_type_warn_schedule(devlink_port);
devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
@@ -7576,13 +8134,14 @@ void devlink_port_unregister(struct devlink_port *devlink_port)
{
struct devlink *devlink = devlink_port->devlink;
- WARN_ON(!list_empty(&devlink_port->reporter_list));
- mutex_destroy(&devlink_port->reporters_lock);
devlink_port_type_warn_cancel(devlink_port);
devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_DEL);
mutex_lock(&devlink->lock);
list_del(&devlink_port->list);
mutex_unlock(&devlink->lock);
+ WARN_ON(!list_empty(&devlink_port->reporter_list));
+ WARN_ON(!list_empty(&devlink_port->region_list));
+ mutex_destroy(&devlink_port->reporters_lock);
}
EXPORT_SYMBOL_GPL(devlink_port_unregister);
@@ -7600,14 +8159,8 @@ static void __devlink_port_type_set(struct devlink_port *devlink_port,
devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_NEW);
}
-/**
- * devlink_port_type_eth_set - Set port type to Ethernet
- *
- * @devlink_port: devlink port
- * @netdev: related netdevice
- */
-void devlink_port_type_eth_set(struct devlink_port *devlink_port,
- struct net_device *netdev)
+static void devlink_port_type_netdev_checks(struct devlink_port *devlink_port,
+ struct net_device *netdev)
{
const struct net_device_ops *ops = netdev->netdev_ops;
@@ -7641,6 +8194,24 @@ void devlink_port_type_eth_set(struct devlink_port *devlink_port,
err = ops->ndo_get_port_parent_id(netdev, &ppid);
WARN_ON(err != -EOPNOTSUPP);
}
+}
+
+/**
+ * devlink_port_type_eth_set - Set port type to Ethernet
+ *
+ * @devlink_port: devlink port
+ * @netdev: related netdevice
+ */
+void devlink_port_type_eth_set(struct devlink_port *devlink_port,
+ struct net_device *netdev)
+{
+ if (netdev)
+ devlink_port_type_netdev_checks(devlink_port, netdev);
+ else
+ dev_warn(devlink_port->devlink->dev,
+ "devlink port type for port %d set to Ethernet without a software interface reference, device type not supported by the kernel?\n",
+ devlink_port->index);
+
__devlink_port_type_set(devlink_port, DEVLINK_PORT_TYPE_ETH, netdev);
}
EXPORT_SYMBOL_GPL(devlink_port_type_eth_set);
@@ -7712,9 +8283,12 @@ EXPORT_SYMBOL_GPL(devlink_port_attrs_set);
* devlink_port_attrs_pci_pf_set - Set PCI PF port attributes
*
* @devlink_port: devlink port
+ * @controller: associated controller number for the devlink port instance
* @pf: associated PF for the devlink port instance
+ * @external: indicates if the port is for an external controller
*/
-void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, u16 pf)
+void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, u32 controller,
+ u16 pf, bool external)
{
struct devlink_port_attrs *attrs = &devlink_port->attrs;
int ret;
@@ -7723,8 +8297,9 @@ void devlink_port_attrs_pci_pf_set(struct devlink_port *devlink_port, u16 pf)
DEVLINK_PORT_FLAVOUR_PCI_PF);
if (ret)
return;
-
+ attrs->pci_pf.controller = controller;
attrs->pci_pf.pf = pf;
+ attrs->pci_pf.external = external;
}
EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_pf_set);
@@ -7732,11 +8307,13 @@ EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_pf_set);
* devlink_port_attrs_pci_vf_set - Set PCI VF port attributes
*
* @devlink_port: devlink port
+ * @controller: associated controller number for the devlink port instance
* @pf: associated PF for the devlink port instance
* @vf: associated VF of a PF for the devlink port instance
+ * @external: indicates if the port is for an external controller
*/
-void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port,
- u16 pf, u16 vf)
+void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port, u32 controller,
+ u16 pf, u16 vf, bool external)
{
struct devlink_port_attrs *attrs = &devlink_port->attrs;
int ret;
@@ -7745,8 +8322,10 @@ void devlink_port_attrs_pci_vf_set(struct devlink_port *devlink_port,
DEVLINK_PORT_FLAVOUR_PCI_VF);
if (ret)
return;
+ attrs->pci_vf.controller = controller;
attrs->pci_vf.pf = pf;
attrs->pci_vf.vf = vf;
+ attrs->pci_vf.external = external;
}
EXPORT_SYMBOL_GPL(devlink_port_attrs_pci_vf_set);
@@ -7771,15 +8350,30 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port,
break;
case DEVLINK_PORT_FLAVOUR_CPU:
case DEVLINK_PORT_FLAVOUR_DSA:
+ case DEVLINK_PORT_FLAVOUR_UNUSED:
/* As CPU and DSA ports do not have a netdevice associated
* case should not ever happen.
*/
WARN_ON(1);
return -EINVAL;
case DEVLINK_PORT_FLAVOUR_PCI_PF:
+ if (attrs->pci_pf.external) {
+ n = snprintf(name, len, "c%u", attrs->pci_pf.controller);
+ if (n >= len)
+ return -EINVAL;
+ len -= n;
+ name += n;
+ }
n = snprintf(name, len, "pf%u", attrs->pci_pf.pf);
break;
case DEVLINK_PORT_FLAVOUR_PCI_VF:
+ if (attrs->pci_vf.external) {
+ n = snprintf(name, len, "c%u", attrs->pci_vf.controller);
+ if (n >= len)
+ return -EINVAL;
+ len -= n;
+ name += n;
+ }
n = snprintf(name, len, "pf%uvf%u",
attrs->pci_vf.pf, attrs->pci_vf.vf);
break;
@@ -8431,7 +9025,7 @@ __devlink_param_driverinit_value_set(struct devlink *devlink,
int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
union devlink_param_value *init_val)
{
- if (!devlink_reload_supported(devlink))
+ if (!devlink_reload_supported(devlink->ops))
return -EOPNOTSUPP;
return __devlink_param_driverinit_value_get(&devlink->param_list,
@@ -8478,7 +9072,7 @@ int devlink_port_param_driverinit_value_get(struct devlink_port *devlink_port,
{
struct devlink *devlink = devlink_port->devlink;
- if (!devlink_reload_supported(devlink))
+ if (!devlink_reload_supported(devlink->ops))
return -EOPNOTSUPP;
return __devlink_param_driverinit_value_get(&devlink_port->param_list,
@@ -8627,6 +9221,57 @@ unlock:
EXPORT_SYMBOL_GPL(devlink_region_create);
/**
+ * devlink_port_region_create - create a new address region for a port
+ *
+ * @port: devlink port
+ * @ops: region operations and name
+ * @region_max_snapshots: Maximum supported number of snapshots for region
+ * @region_size: size of region
+ */
+struct devlink_region *
+devlink_port_region_create(struct devlink_port *port,
+ const struct devlink_port_region_ops *ops,
+ u32 region_max_snapshots, u64 region_size)
+{
+ struct devlink *devlink = port->devlink;
+ struct devlink_region *region;
+ int err = 0;
+
+ if (WARN_ON(!ops) || WARN_ON(!ops->destructor))
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&devlink->lock);
+
+ if (devlink_port_region_get_by_name(port, ops->name)) {
+ err = -EEXIST;
+ goto unlock;
+ }
+
+ region = kzalloc(sizeof(*region), GFP_KERNEL);
+ if (!region) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ region->devlink = devlink;
+ region->port = port;
+ region->max_snapshots = region_max_snapshots;
+ region->port_ops = ops;
+ region->size = region_size;
+ INIT_LIST_HEAD(&region->snapshot_list);
+ list_add_tail(&region->list, &port->region_list);
+ devlink_nl_region_notify(region, NULL, DEVLINK_CMD_REGION_NEW);
+
+ mutex_unlock(&devlink->lock);
+ return region;
+
+unlock:
+ mutex_unlock(&devlink->lock);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(devlink_port_region_create);
+
+/**
* devlink_region_destroy - destroy address region
*
* @region: devlink region to destroy
@@ -8803,6 +9448,22 @@ static const struct devlink_trap devlink_trap_generic[] = {
DEVLINK_TRAP(FLOW_ACTION_SAMPLE, CONTROL),
DEVLINK_TRAP(FLOW_ACTION_TRAP, CONTROL),
DEVLINK_TRAP(EARLY_DROP, DROP),
+ DEVLINK_TRAP(VXLAN_PARSING, DROP),
+ DEVLINK_TRAP(LLC_SNAP_PARSING, DROP),
+ DEVLINK_TRAP(VLAN_PARSING, DROP),
+ DEVLINK_TRAP(PPPOE_PPP_PARSING, DROP),
+ DEVLINK_TRAP(MPLS_PARSING, DROP),
+ DEVLINK_TRAP(ARP_PARSING, DROP),
+ DEVLINK_TRAP(IP_1_PARSING, DROP),
+ DEVLINK_TRAP(IP_N_PARSING, DROP),
+ DEVLINK_TRAP(GRE_PARSING, DROP),
+ DEVLINK_TRAP(UDP_PARSING, DROP),
+ DEVLINK_TRAP(TCP_PARSING, DROP),
+ DEVLINK_TRAP(IPSEC_PARSING, DROP),
+ DEVLINK_TRAP(SCTP_PARSING, DROP),
+ DEVLINK_TRAP(DCCP_PARSING, DROP),
+ DEVLINK_TRAP(GTP_PARSING, DROP),
+ DEVLINK_TRAP(ESP_PARSING, DROP),
};
#define DEVLINK_TRAP_GROUP(_id) \
@@ -8837,6 +9498,7 @@ static const struct devlink_trap_group devlink_trap_group_generic[] = {
DEVLINK_TRAP_GROUP(PTP_GENERAL),
DEVLINK_TRAP_GROUP(ACL_SAMPLE),
DEVLINK_TRAP_GROUP(ACL_TRAP),
+ DEVLINK_TRAP_GROUP(PARSER_ERROR_DROPS),
};
static int devlink_trap_generic_verify(const struct devlink_trap *trap)
@@ -9139,20 +9801,19 @@ devlink_trap_stats_update(struct devlink_stats __percpu *trap_stats,
}
static void
-devlink_trap_report_metadata_fill(struct net_dm_hw_metadata *hw_metadata,
- const struct devlink_trap_item *trap_item,
- struct devlink_port *in_devlink_port,
- const struct flow_action_cookie *fa_cookie)
+devlink_trap_report_metadata_set(struct devlink_trap_metadata *metadata,
+ const struct devlink_trap_item *trap_item,
+ struct devlink_port *in_devlink_port,
+ const struct flow_action_cookie *fa_cookie)
{
- struct devlink_trap_group_item *group_item = trap_item->group_item;
-
- hw_metadata->trap_group_name = group_item->group->name;
- hw_metadata->trap_name = trap_item->trap->name;
- hw_metadata->fa_cookie = fa_cookie;
+ metadata->trap_name = trap_item->trap->name;
+ metadata->trap_group_name = trap_item->group_item->group->name;
+ metadata->fa_cookie = fa_cookie;
+ metadata->trap_type = trap_item->trap->type;
spin_lock(&in_devlink_port->type_lock);
if (in_devlink_port->type == DEVLINK_PORT_TYPE_ETH)
- hw_metadata->input_dev = in_devlink_port->type_dev;
+ metadata->input_dev = in_devlink_port->type_dev;
spin_unlock(&in_devlink_port->type_lock);
}
@@ -9170,21 +9831,17 @@ void devlink_trap_report(struct devlink *devlink, struct sk_buff *skb,
{
struct devlink_trap_item *trap_item = trap_ctx;
- struct net_dm_hw_metadata hw_metadata = {};
devlink_trap_stats_update(trap_item->stats, skb->len);
devlink_trap_stats_update(trap_item->group_item->stats, skb->len);
- /* Control packets were not dropped by the device or encountered an
- * exception during forwarding and therefore should not be reported to
- * the kernel's drop monitor.
- */
- if (trap_item->trap->type == DEVLINK_TRAP_TYPE_CONTROL)
- return;
+ if (trace_devlink_trap_report_enabled()) {
+ struct devlink_trap_metadata metadata = {};
- devlink_trap_report_metadata_fill(&hw_metadata, trap_item,
- in_devlink_port, fa_cookie);
- net_dm_hw_report(skb, &hw_metadata);
+ devlink_trap_report_metadata_set(&metadata, trap_item,
+ in_devlink_port, fa_cookie);
+ trace_devlink_trap_report(devlink, skb, &metadata);
+ }
}
EXPORT_SYMBOL_GPL(devlink_trap_report);
@@ -9543,6 +10200,7 @@ out:
int devlink_compat_flash_update(struct net_device *dev, const char *file_name)
{
+ struct devlink_flash_update_params params = {};
struct devlink *devlink;
int ret;
@@ -9555,8 +10213,10 @@ int devlink_compat_flash_update(struct net_device *dev, const char *file_name)
goto out;
}
+ params.file_name = file_name;
+
mutex_lock(&devlink->lock);
- ret = devlink->ops->flash_update(devlink, file_name, NULL, NULL);
+ ret = devlink->ops->flash_update(devlink, &params, NULL);
mutex_unlock(&devlink->lock);
out:
@@ -9605,6 +10265,7 @@ int devlink_compat_switch_id_get(struct net_device *dev,
static void __net_exit devlink_pernet_pre_exit(struct net *net)
{
struct devlink *devlink;
+ u32 actions_performed;
int err;
/* In case network namespace is getting destroyed, reload
@@ -9613,9 +10274,12 @@ static void __net_exit devlink_pernet_pre_exit(struct net *net)
mutex_lock(&devlink_mutex);
list_for_each_entry(devlink, &devlink_list, list) {
if (net_eq(devlink_net(devlink), net)) {
- if (WARN_ON(!devlink_reload_supported(devlink)))
+ if (WARN_ON(!devlink_reload_supported(devlink->ops)))
continue;
- err = devlink_reload(devlink, &init_net, NULL);
+ err = devlink_reload(devlink, &init_net,
+ DEVLINK_RELOAD_ACTION_DRIVER_REINIT,
+ DEVLINK_RELOAD_LIMIT_UNSPEC,
+ &actions_performed, NULL);
if (err && err != -EOPNOTSUPP)
pr_warn("Failed to reload devlink instance into init_net\n");
}
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 9704522b0872..571f191c06d9 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -26,13 +26,14 @@
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/module.h>
-#include <net/drop_monitor.h>
#include <net/genetlink.h>
#include <net/netevent.h>
#include <net/flow_offload.h>
+#include <net/devlink.h>
#include <trace/events/skb.h>
#include <trace/events/napi.h>
+#include <trace/events/devlink.h>
#include <asm/unaligned.h>
@@ -114,13 +115,14 @@ struct net_dm_alert_ops {
int work, int budget);
void (*work_item_func)(struct work_struct *work);
void (*hw_work_item_func)(struct work_struct *work);
- void (*hw_probe)(struct sk_buff *skb,
- const struct net_dm_hw_metadata *hw_metadata);
+ void (*hw_trap_probe)(void *ignore, const struct devlink *devlink,
+ struct sk_buff *skb,
+ const struct devlink_trap_metadata *metadata);
};
struct net_dm_skb_cb {
union {
- struct net_dm_hw_metadata *hw_metadata;
+ struct devlink_trap_metadata *hw_metadata;
void *pc;
};
};
@@ -432,8 +434,9 @@ out:
}
static void
-net_dm_hw_summary_probe(struct sk_buff *skb,
- const struct net_dm_hw_metadata *hw_metadata)
+net_dm_hw_trap_summary_probe(void *ignore, const struct devlink *devlink,
+ struct sk_buff *skb,
+ const struct devlink_trap_metadata *metadata)
{
struct net_dm_hw_entries *hw_entries;
struct net_dm_hw_entry *hw_entry;
@@ -441,6 +444,9 @@ net_dm_hw_summary_probe(struct sk_buff *skb,
unsigned long flags;
int i;
+ if (metadata->trap_type == DEVLINK_TRAP_TYPE_CONTROL)
+ return;
+
hw_data = this_cpu_ptr(&dm_hw_cpu_data);
spin_lock_irqsave(&hw_data->lock, flags);
hw_entries = hw_data->hw_entries;
@@ -450,7 +456,7 @@ net_dm_hw_summary_probe(struct sk_buff *skb,
for (i = 0; i < hw_entries->num_entries; i++) {
hw_entry = &hw_entries->entries[i];
- if (!strncmp(hw_entry->trap_name, hw_metadata->trap_name,
+ if (!strncmp(hw_entry->trap_name, metadata->trap_name,
NET_DM_MAX_HW_TRAP_NAME_LEN - 1)) {
hw_entry->count++;
goto out;
@@ -460,7 +466,7 @@ net_dm_hw_summary_probe(struct sk_buff *skb,
goto out;
hw_entry = &hw_entries->entries[hw_entries->num_entries];
- strlcpy(hw_entry->trap_name, hw_metadata->trap_name,
+ strlcpy(hw_entry->trap_name, metadata->trap_name,
NET_DM_MAX_HW_TRAP_NAME_LEN - 1);
hw_entry->count = 1;
hw_entries->num_entries++;
@@ -479,7 +485,7 @@ static const struct net_dm_alert_ops net_dm_alert_summary_ops = {
.napi_poll_probe = trace_napi_poll_hit,
.work_item_func = send_dm_alert,
.hw_work_item_func = net_dm_hw_summary_work,
- .hw_probe = net_dm_hw_summary_probe,
+ .hw_trap_probe = net_dm_hw_trap_summary_probe,
};
static void net_dm_packet_trace_kfree_skb_hit(void *ignore,
@@ -705,7 +711,7 @@ static void net_dm_packet_work(struct work_struct *work)
}
static size_t
-net_dm_flow_action_cookie_size(const struct net_dm_hw_metadata *hw_metadata)
+net_dm_flow_action_cookie_size(const struct devlink_trap_metadata *hw_metadata)
{
return hw_metadata->fa_cookie ?
nla_total_size(hw_metadata->fa_cookie->cookie_len) : 0;
@@ -713,7 +719,7 @@ net_dm_flow_action_cookie_size(const struct net_dm_hw_metadata *hw_metadata)
static size_t
net_dm_hw_packet_report_size(size_t payload_len,
- const struct net_dm_hw_metadata *hw_metadata)
+ const struct devlink_trap_metadata *hw_metadata)
{
size_t size;
@@ -743,7 +749,7 @@ net_dm_hw_packet_report_size(size_t payload_len,
static int net_dm_hw_packet_report_fill(struct sk_buff *msg,
struct sk_buff *skb, size_t payload_len)
{
- struct net_dm_hw_metadata *hw_metadata;
+ struct devlink_trap_metadata *hw_metadata;
struct nlattr *attr;
void *hdr;
@@ -810,56 +816,56 @@ nla_put_failure:
return -EMSGSIZE;
}
-static struct net_dm_hw_metadata *
-net_dm_hw_metadata_clone(const struct net_dm_hw_metadata *hw_metadata)
+static struct devlink_trap_metadata *
+net_dm_hw_metadata_copy(const struct devlink_trap_metadata *metadata)
{
const struct flow_action_cookie *fa_cookie;
- struct net_dm_hw_metadata *n_hw_metadata;
+ struct devlink_trap_metadata *hw_metadata;
const char *trap_group_name;
const char *trap_name;
- n_hw_metadata = kzalloc(sizeof(*hw_metadata), GFP_ATOMIC);
- if (!n_hw_metadata)
+ hw_metadata = kzalloc(sizeof(*hw_metadata), GFP_ATOMIC);
+ if (!hw_metadata)
return NULL;
- trap_group_name = kstrdup(hw_metadata->trap_group_name, GFP_ATOMIC);
+ trap_group_name = kstrdup(metadata->trap_group_name, GFP_ATOMIC);
if (!trap_group_name)
goto free_hw_metadata;
- n_hw_metadata->trap_group_name = trap_group_name;
+ hw_metadata->trap_group_name = trap_group_name;
- trap_name = kstrdup(hw_metadata->trap_name, GFP_ATOMIC);
+ trap_name = kstrdup(metadata->trap_name, GFP_ATOMIC);
if (!trap_name)
goto free_trap_group;
- n_hw_metadata->trap_name = trap_name;
+ hw_metadata->trap_name = trap_name;
- if (hw_metadata->fa_cookie) {
+ if (metadata->fa_cookie) {
size_t cookie_size = sizeof(*fa_cookie) +
- hw_metadata->fa_cookie->cookie_len;
+ metadata->fa_cookie->cookie_len;
- fa_cookie = kmemdup(hw_metadata->fa_cookie, cookie_size,
+ fa_cookie = kmemdup(metadata->fa_cookie, cookie_size,
GFP_ATOMIC);
if (!fa_cookie)
goto free_trap_name;
- n_hw_metadata->fa_cookie = fa_cookie;
+ hw_metadata->fa_cookie = fa_cookie;
}
- n_hw_metadata->input_dev = hw_metadata->input_dev;
- if (n_hw_metadata->input_dev)
- dev_hold(n_hw_metadata->input_dev);
+ hw_metadata->input_dev = metadata->input_dev;
+ if (hw_metadata->input_dev)
+ dev_hold(hw_metadata->input_dev);
- return n_hw_metadata;
+ return hw_metadata;
free_trap_name:
kfree(trap_name);
free_trap_group:
kfree(trap_group_name);
free_hw_metadata:
- kfree(n_hw_metadata);
+ kfree(hw_metadata);
return NULL;
}
static void
-net_dm_hw_metadata_free(const struct net_dm_hw_metadata *hw_metadata)
+net_dm_hw_metadata_free(const struct devlink_trap_metadata *hw_metadata)
{
if (hw_metadata->input_dev)
dev_put(hw_metadata->input_dev);
@@ -871,7 +877,7 @@ net_dm_hw_metadata_free(const struct net_dm_hw_metadata *hw_metadata)
static void net_dm_hw_packet_report(struct sk_buff *skb)
{
- struct net_dm_hw_metadata *hw_metadata;
+ struct devlink_trap_metadata *hw_metadata;
struct sk_buff *msg;
size_t payload_len;
int rc;
@@ -924,15 +930,19 @@ static void net_dm_hw_packet_work(struct work_struct *work)
}
static void
-net_dm_hw_packet_probe(struct sk_buff *skb,
- const struct net_dm_hw_metadata *hw_metadata)
+net_dm_hw_trap_packet_probe(void *ignore, const struct devlink *devlink,
+ struct sk_buff *skb,
+ const struct devlink_trap_metadata *metadata)
{
- struct net_dm_hw_metadata *n_hw_metadata;
+ struct devlink_trap_metadata *n_hw_metadata;
ktime_t tstamp = ktime_get_real();
struct per_cpu_dm_data *hw_data;
struct sk_buff *nskb;
unsigned long flags;
+ if (metadata->trap_type == DEVLINK_TRAP_TYPE_CONTROL)
+ return;
+
if (!skb_mac_header_was_set(skb))
return;
@@ -940,7 +950,7 @@ net_dm_hw_packet_probe(struct sk_buff *skb,
if (!nskb)
return;
- n_hw_metadata = net_dm_hw_metadata_clone(hw_metadata);
+ n_hw_metadata = net_dm_hw_metadata_copy(metadata);
if (!n_hw_metadata)
goto free;
@@ -975,7 +985,7 @@ static const struct net_dm_alert_ops net_dm_alert_packet_ops = {
.napi_poll_probe = net_dm_packet_trace_napi_poll_hit,
.work_item_func = net_dm_packet_work,
.hw_work_item_func = net_dm_hw_packet_work,
- .hw_probe = net_dm_hw_packet_probe,
+ .hw_trap_probe = net_dm_hw_trap_packet_probe,
};
static const struct net_dm_alert_ops *net_dm_alert_ops_arr[] = {
@@ -983,25 +993,32 @@ static const struct net_dm_alert_ops *net_dm_alert_ops_arr[] = {
[NET_DM_ALERT_MODE_PACKET] = &net_dm_alert_packet_ops,
};
-void net_dm_hw_report(struct sk_buff *skb,
- const struct net_dm_hw_metadata *hw_metadata)
+#if IS_ENABLED(CONFIG_NET_DEVLINK)
+static int net_dm_hw_probe_register(const struct net_dm_alert_ops *ops)
{
- rcu_read_lock();
-
- if (!monitor_hw)
- goto out;
+ return register_trace_devlink_trap_report(ops->hw_trap_probe, NULL);
+}
- net_dm_alert_ops_arr[net_dm_alert_mode]->hw_probe(skb, hw_metadata);
+static void net_dm_hw_probe_unregister(const struct net_dm_alert_ops *ops)
+{
+ unregister_trace_devlink_trap_report(ops->hw_trap_probe, NULL);
+ tracepoint_synchronize_unregister();
+}
+#else
+static int net_dm_hw_probe_register(const struct net_dm_alert_ops *ops)
+{
+ return -EOPNOTSUPP;
+}
-out:
- rcu_read_unlock();
+static void net_dm_hw_probe_unregister(const struct net_dm_alert_ops *ops)
+{
}
-EXPORT_SYMBOL_GPL(net_dm_hw_report);
+#endif
static int net_dm_hw_monitor_start(struct netlink_ext_ack *extack)
{
const struct net_dm_alert_ops *ops;
- int cpu;
+ int cpu, rc;
if (monitor_hw) {
NL_SET_ERR_MSG_MOD(extack, "Hardware monitoring already enabled");
@@ -1025,13 +1042,24 @@ static int net_dm_hw_monitor_start(struct netlink_ext_ack *extack)
kfree(hw_entries);
}
+ rc = net_dm_hw_probe_register(ops);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to connect probe to devlink_trap_probe() tracepoint");
+ goto err_module_put;
+ }
+
monitor_hw = true;
return 0;
+
+err_module_put:
+ module_put(THIS_MODULE);
+ return rc;
}
static void net_dm_hw_monitor_stop(struct netlink_ext_ack *extack)
{
+ const struct net_dm_alert_ops *ops;
int cpu;
if (!monitor_hw) {
@@ -1039,12 +1067,11 @@ static void net_dm_hw_monitor_stop(struct netlink_ext_ack *extack)
return;
}
+ ops = net_dm_alert_ops_arr[net_dm_alert_mode];
+
monitor_hw = false;
- /* After this call returns we are guaranteed that no CPU is processing
- * any hardware drops.
- */
- synchronize_rcu();
+ net_dm_hw_probe_unregister(ops);
for_each_possible_cpu(cpu) {
struct per_cpu_dm_data *hw_data = &per_cpu(dm_hw_cpu_data, cpu);
@@ -1053,7 +1080,7 @@ static void net_dm_hw_monitor_stop(struct netlink_ext_ack *extack)
del_timer_sync(&hw_data->send_timer);
cancel_work_sync(&hw_data->dm_alert_work);
while ((skb = __skb_dequeue(&hw_data->drop_queue))) {
- struct net_dm_hw_metadata *hw_metadata;
+ struct devlink_trap_metadata *hw_metadata;
hw_metadata = NET_DM_SKB_CB(skb)->hw_metadata;
net_dm_hw_metadata_free(hw_metadata);
@@ -1548,7 +1575,7 @@ static const struct nla_policy net_dm_nl_policy[NET_DM_ATTR_MAX + 1] = {
[NET_DM_ATTR_HW_DROPS] = {. type = NLA_FLAG },
};
-static const struct genl_ops dropmon_ops[] = {
+static const struct genl_small_ops dropmon_ops[] = {
{
.cmd = NET_DM_CMD_CONFIG,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
@@ -1598,8 +1625,8 @@ static struct genl_family net_drop_monitor_family __ro_after_init = {
.pre_doit = net_dm_nl_pre_doit,
.post_doit = net_dm_nl_post_doit,
.module = THIS_MODULE,
- .ops = dropmon_ops,
- .n_ops = ARRAY_SIZE(dropmon_ops),
+ .small_ops = dropmon_ops,
+ .n_small_ops = ARRAY_SIZE(dropmon_ops),
.mcgrps = dropmon_mcgrps,
.n_mcgrps = ARRAY_SIZE(dropmon_mcgrps),
};
diff --git a/net/core/filter.c b/net/core/filter.c
index b5f3faac5e3b..c5e2a1c5fd8d 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -76,6 +76,10 @@
#include <net/bpf_sk_storage.h>
#include <net/transp_v6.h>
#include <linux/btf_ids.h>
+#include <net/tls.h>
+
+static const struct bpf_func_proto *
+bpf_sk_base_func_proto(enum bpf_func_id func_id);
int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len)
{
@@ -2160,13 +2164,234 @@ static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev,
return __bpf_redirect_no_mac(skb, dev, flags);
}
+#if IS_ENABLED(CONFIG_IPV6)
+static int bpf_out_neigh_v6(struct net *net, struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb_dst(skb);
+ struct net_device *dev = dst->dev;
+ u32 hh_len = LL_RESERVED_SPACE(dev);
+ const struct in6_addr *nexthop;
+ struct neighbour *neigh;
+
+ if (dev_xmit_recursion()) {
+ net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
+ goto out_drop;
+ }
+
+ skb->dev = dev;
+ skb->tstamp = 0;
+
+ if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
+ struct sk_buff *skb2;
+
+ skb2 = skb_realloc_headroom(skb, hh_len);
+ if (unlikely(!skb2)) {
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+ if (skb->sk)
+ skb_set_owner_w(skb2, skb->sk);
+ consume_skb(skb);
+ skb = skb2;
+ }
+
+ rcu_read_lock_bh();
+ nexthop = rt6_nexthop(container_of(dst, struct rt6_info, dst),
+ &ipv6_hdr(skb)->daddr);
+ neigh = ip_neigh_gw6(dev, nexthop);
+ if (likely(!IS_ERR(neigh))) {
+ int ret;
+
+ sock_confirm_neigh(skb, neigh);
+ dev_xmit_recursion_inc();
+ ret = neigh_output(neigh, skb, false);
+ dev_xmit_recursion_dec();
+ rcu_read_unlock_bh();
+ return ret;
+ }
+ rcu_read_unlock_bh();
+ IP6_INC_STATS(dev_net(dst->dev),
+ ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
+out_drop:
+ kfree_skb(skb);
+ return -ENETDOWN;
+}
+
+static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev)
+{
+ const struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ struct net *net = dev_net(dev);
+ int err, ret = NET_XMIT_DROP;
+ struct dst_entry *dst;
+ struct flowi6 fl6 = {
+ .flowi6_flags = FLOWI_FLAG_ANYSRC,
+ .flowi6_mark = skb->mark,
+ .flowlabel = ip6_flowinfo(ip6h),
+ .flowi6_oif = dev->ifindex,
+ .flowi6_proto = ip6h->nexthdr,
+ .daddr = ip6h->daddr,
+ .saddr = ip6h->saddr,
+ };
+
+ dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &fl6, NULL);
+ if (IS_ERR(dst))
+ goto out_drop;
+
+ skb_dst_set(skb, dst);
+
+ err = bpf_out_neigh_v6(net, skb);
+ if (unlikely(net_xmit_eval(err)))
+ dev->stats.tx_errors++;
+ else
+ ret = NET_XMIT_SUCCESS;
+ goto out_xmit;
+out_drop:
+ dev->stats.tx_errors++;
+ kfree_skb(skb);
+out_xmit:
+ return ret;
+}
+#else
+static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev)
+{
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
+}
+#endif /* CONFIG_IPV6 */
+
+#if IS_ENABLED(CONFIG_INET)
+static int bpf_out_neigh_v4(struct net *net, struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb_dst(skb);
+ struct rtable *rt = container_of(dst, struct rtable, dst);
+ struct net_device *dev = dst->dev;
+ u32 hh_len = LL_RESERVED_SPACE(dev);
+ struct neighbour *neigh;
+ bool is_v6gw = false;
+
+ if (dev_xmit_recursion()) {
+ net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
+ goto out_drop;
+ }
+
+ skb->dev = dev;
+ skb->tstamp = 0;
+
+ if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
+ struct sk_buff *skb2;
+
+ skb2 = skb_realloc_headroom(skb, hh_len);
+ if (unlikely(!skb2)) {
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+ if (skb->sk)
+ skb_set_owner_w(skb2, skb->sk);
+ consume_skb(skb);
+ skb = skb2;
+ }
+
+ rcu_read_lock_bh();
+ neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
+ if (likely(!IS_ERR(neigh))) {
+ int ret;
+
+ sock_confirm_neigh(skb, neigh);
+ dev_xmit_recursion_inc();
+ ret = neigh_output(neigh, skb, is_v6gw);
+ dev_xmit_recursion_dec();
+ rcu_read_unlock_bh();
+ return ret;
+ }
+ rcu_read_unlock_bh();
+out_drop:
+ kfree_skb(skb);
+ return -ENETDOWN;
+}
+
+static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev)
+{
+ const struct iphdr *ip4h = ip_hdr(skb);
+ struct net *net = dev_net(dev);
+ int err, ret = NET_XMIT_DROP;
+ struct rtable *rt;
+ struct flowi4 fl4 = {
+ .flowi4_flags = FLOWI_FLAG_ANYSRC,
+ .flowi4_mark = skb->mark,
+ .flowi4_tos = RT_TOS(ip4h->tos),
+ .flowi4_oif = dev->ifindex,
+ .flowi4_proto = ip4h->protocol,
+ .daddr = ip4h->daddr,
+ .saddr = ip4h->saddr,
+ };
+
+ rt = ip_route_output_flow(net, &fl4, NULL);
+ if (IS_ERR(rt))
+ goto out_drop;
+ if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
+ ip_rt_put(rt);
+ goto out_drop;
+ }
+
+ skb_dst_set(skb, &rt->dst);
+
+ err = bpf_out_neigh_v4(net, skb);
+ if (unlikely(net_xmit_eval(err)))
+ dev->stats.tx_errors++;
+ else
+ ret = NET_XMIT_SUCCESS;
+ goto out_xmit;
+out_drop:
+ dev->stats.tx_errors++;
+ kfree_skb(skb);
+out_xmit:
+ return ret;
+}
+#else
+static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev)
+{
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
+}
+#endif /* CONFIG_INET */
+
+static int __bpf_redirect_neigh(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ethhdr *ethh = eth_hdr(skb);
+
+ if (unlikely(skb->mac_header >= skb->network_header))
+ goto out;
+ bpf_push_mac_rcsum(skb);
+ if (is_multicast_ether_addr(ethh->h_dest))
+ goto out;
+
+ skb_pull(skb, sizeof(*ethh));
+ skb_unset_mac_header(skb);
+ skb_reset_network_header(skb);
+
+ if (skb->protocol == htons(ETH_P_IP))
+ return __bpf_redirect_neigh_v4(skb, dev);
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ return __bpf_redirect_neigh_v6(skb, dev);
+out:
+ kfree_skb(skb);
+ return -ENOTSUPP;
+}
+
+/* Internal, non-exposed redirect flags. */
+enum {
+ BPF_F_NEIGH = (1ULL << 1),
+ BPF_F_PEER = (1ULL << 2),
+#define BPF_F_REDIRECT_INTERNAL (BPF_F_NEIGH | BPF_F_PEER)
+};
+
BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
{
struct net_device *dev;
struct sk_buff *clone;
int ret;
- if (unlikely(flags & ~(BPF_F_INGRESS)))
+ if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL)))
return -EINVAL;
dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex);
@@ -2203,11 +2428,45 @@ static const struct bpf_func_proto bpf_clone_redirect_proto = {
DEFINE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
EXPORT_PER_CPU_SYMBOL_GPL(bpf_redirect_info);
+int skb_do_redirect(struct sk_buff *skb)
+{
+ struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+ struct net *net = dev_net(skb->dev);
+ struct net_device *dev;
+ u32 flags = ri->flags;
+
+ dev = dev_get_by_index_rcu(net, ri->tgt_index);
+ ri->tgt_index = 0;
+ ri->flags = 0;
+ if (unlikely(!dev))
+ goto out_drop;
+ if (flags & BPF_F_PEER) {
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+ if (unlikely(!ops->ndo_get_peer_dev ||
+ !skb_at_tc_ingress(skb)))
+ goto out_drop;
+ dev = ops->ndo_get_peer_dev(dev);
+ if (unlikely(!dev ||
+ !is_skb_forwardable(dev, skb) ||
+ net_eq(net, dev_net(dev))))
+ goto out_drop;
+ skb->dev = dev;
+ return -EAGAIN;
+ }
+ return flags & BPF_F_NEIGH ?
+ __bpf_redirect_neigh(skb, dev) :
+ __bpf_redirect(skb, dev, flags);
+out_drop:
+ kfree_skb(skb);
+ return -EINVAL;
+}
+
BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags)
{
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
- if (unlikely(flags & ~(BPF_F_INGRESS)))
+ if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL)))
return TC_ACT_SHOT;
ri->flags = flags;
@@ -2216,29 +2475,56 @@ BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags)
return TC_ACT_REDIRECT;
}
-int skb_do_redirect(struct sk_buff *skb)
+static const struct bpf_func_proto bpf_redirect_proto = {
+ .func = bpf_redirect,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_ANYTHING,
+ .arg2_type = ARG_ANYTHING,
+};
+
+BPF_CALL_2(bpf_redirect_peer, u32, ifindex, u64, flags)
{
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
- struct net_device *dev;
- dev = dev_get_by_index_rcu(dev_net(skb->dev), ri->tgt_index);
- ri->tgt_index = 0;
- if (unlikely(!dev)) {
- kfree_skb(skb);
- return -EINVAL;
- }
+ if (unlikely(flags))
+ return TC_ACT_SHOT;
- return __bpf_redirect(skb, dev, ri->flags);
+ ri->flags = BPF_F_PEER;
+ ri->tgt_index = ifindex;
+
+ return TC_ACT_REDIRECT;
}
-static const struct bpf_func_proto bpf_redirect_proto = {
- .func = bpf_redirect,
+static const struct bpf_func_proto bpf_redirect_peer_proto = {
+ .func = bpf_redirect_peer,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_ANYTHING,
.arg2_type = ARG_ANYTHING,
};
+BPF_CALL_2(bpf_redirect_neigh, u32, ifindex, u64, flags)
+{
+ struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+
+ if (unlikely(flags))
+ return TC_ACT_SHOT;
+
+ ri->flags = BPF_F_NEIGH;
+ ri->tgt_index = ifindex;
+
+ return TC_ACT_REDIRECT;
+}
+
+static const struct bpf_func_proto bpf_redirect_neigh_proto = {
+ .func = bpf_redirect_neigh,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_ANYTHING,
+ .arg2_type = ARG_ANYTHING,
+};
+
BPF_CALL_2(bpf_msg_apply_bytes, struct sk_msg *, msg, u32, bytes)
{
msg->apply_bytes = bytes;
@@ -2704,6 +2990,23 @@ static const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto = {
.gpl_only = false,
.ret_type = RET_INTEGER,
};
+
+BPF_CALL_1(bpf_skb_cgroup_classid, const struct sk_buff *, skb)
+{
+ struct sock *sk = skb_to_full_sk(skb);
+
+ if (!sk || !sk_fullsock(sk))
+ return 0;
+
+ return sock_cgroup_classid(&sk->sk_cgrp_data);
+}
+
+static const struct bpf_func_proto bpf_skb_cgroup_classid_proto = {
+ .func = bpf_skb_cgroup_classid,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+};
#endif
BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
@@ -3215,6 +3518,48 @@ static u32 __bpf_skb_max_len(const struct sk_buff *skb)
SKB_MAX_ALLOC;
}
+BPF_CALL_4(sk_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
+ u32, mode, u64, flags)
+{
+ u32 len_diff_abs = abs(len_diff);
+ bool shrink = len_diff < 0;
+ int ret = 0;
+
+ if (unlikely(flags || mode))
+ return -EINVAL;
+ if (unlikely(len_diff_abs > 0xfffU))
+ return -EFAULT;
+
+ if (!shrink) {
+ ret = skb_cow(skb, len_diff);
+ if (unlikely(ret < 0))
+ return ret;
+ __skb_push(skb, len_diff_abs);
+ memset(skb->data, 0, len_diff_abs);
+ } else {
+ if (unlikely(!pskb_may_pull(skb, len_diff_abs)))
+ return -ENOMEM;
+ __skb_pull(skb, len_diff_abs);
+ }
+ bpf_compute_data_end_sk_skb(skb);
+ if (tls_sw_has_ctx_rx(skb->sk)) {
+ struct strp_msg *rxm = strp_msg(skb);
+
+ rxm->full_len += len_diff;
+ }
+ return ret;
+}
+
+static const struct bpf_func_proto sk_skb_adjust_room_proto = {
+ .func = sk_skb_adjust_room,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
+ .arg3_type = ARG_ANYTHING,
+ .arg4_type = ARG_ANYTHING,
+};
+
BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
u32, mode, u64, flags)
{
@@ -3803,19 +4148,18 @@ static const struct bpf_func_proto bpf_skb_event_output_proto = {
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
};
-BTF_ID_LIST(bpf_skb_output_btf_ids)
-BTF_ID(struct, sk_buff)
+BTF_ID_LIST_SINGLE(bpf_skb_output_btf_ids, struct, sk_buff)
const struct bpf_func_proto bpf_skb_output_proto = {
.func = bpf_skb_event_output,
.gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID,
+ .arg1_btf_id = &bpf_skb_output_btf_ids[0],
.arg2_type = ARG_CONST_MAP_PTR,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_PTR_TO_MEM,
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
- .btf_id = bpf_skb_output_btf_ids,
};
static unsigned short bpf_tunnel_key_af(u64 flags)
@@ -4086,18 +4430,17 @@ static inline u64 __bpf_sk_cgroup_id(struct sock *sk)
{
struct cgroup *cgrp;
+ sk = sk_to_full_sk(sk);
+ if (!sk || !sk_fullsock(sk))
+ return 0;
+
cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
return cgroup_id(cgrp);
}
BPF_CALL_1(bpf_skb_cgroup_id, const struct sk_buff *, skb)
{
- struct sock *sk = skb_to_full_sk(skb);
-
- if (!sk || !sk_fullsock(sk))
- return 0;
-
- return __bpf_sk_cgroup_id(sk);
+ return __bpf_sk_cgroup_id(skb->sk);
}
static const struct bpf_func_proto bpf_skb_cgroup_id_proto = {
@@ -4113,6 +4456,10 @@ static inline u64 __bpf_sk_ancestor_cgroup_id(struct sock *sk,
struct cgroup *ancestor;
struct cgroup *cgrp;
+ sk = sk_to_full_sk(sk);
+ if (!sk || !sk_fullsock(sk))
+ return 0;
+
cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
ancestor = cgroup_ancestor(cgrp, ancestor_level);
if (!ancestor)
@@ -4124,12 +4471,7 @@ static inline u64 __bpf_sk_ancestor_cgroup_id(struct sock *sk,
BPF_CALL_2(bpf_skb_ancestor_cgroup_id, const struct sk_buff *, skb, int,
ancestor_level)
{
- struct sock *sk = skb_to_full_sk(skb);
-
- if (!sk || !sk_fullsock(sk))
- return 0;
-
- return __bpf_sk_ancestor_cgroup_id(sk, ancestor_level);
+ return __bpf_sk_ancestor_cgroup_id(skb->sk, ancestor_level);
}
static const struct bpf_func_proto bpf_skb_ancestor_cgroup_id_proto = {
@@ -4149,7 +4491,7 @@ static const struct bpf_func_proto bpf_sk_cgroup_id_proto = {
.func = bpf_sk_cgroup_id,
.gpl_only = false,
.ret_type = RET_INTEGER,
- .arg1_type = ARG_PTR_TO_SOCKET,
+ .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
};
BPF_CALL_2(bpf_sk_ancestor_cgroup_id, struct sock *, sk, int, ancestor_level)
@@ -4161,7 +4503,7 @@ static const struct bpf_func_proto bpf_sk_ancestor_cgroup_id_proto = {
.func = bpf_sk_ancestor_cgroup_id,
.gpl_only = false,
.ret_type = RET_INTEGER,
- .arg1_type = ARG_PTR_TO_SOCKET,
+ .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
.arg2_type = ARG_ANYTHING,
};
#endif
@@ -4199,24 +4541,23 @@ static const struct bpf_func_proto bpf_xdp_event_output_proto = {
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
};
-BTF_ID_LIST(bpf_xdp_output_btf_ids)
-BTF_ID(struct, xdp_buff)
+BTF_ID_LIST_SINGLE(bpf_xdp_output_btf_ids, struct, xdp_buff)
const struct bpf_func_proto bpf_xdp_output_proto = {
.func = bpf_xdp_event_output,
.gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID,
+ .arg1_btf_id = &bpf_xdp_output_btf_ids[0],
.arg2_type = ARG_CONST_MAP_PTR,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_PTR_TO_MEM,
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
- .btf_id = bpf_xdp_output_btf_ids,
};
BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb)
{
- return skb->sk ? sock_gen_cookie(skb->sk) : 0;
+ return skb->sk ? __sock_gen_cookie(skb->sk) : 0;
}
static const struct bpf_func_proto bpf_get_socket_cookie_proto = {
@@ -4228,7 +4569,7 @@ static const struct bpf_func_proto bpf_get_socket_cookie_proto = {
BPF_CALL_1(bpf_get_socket_cookie_sock_addr, struct bpf_sock_addr_kern *, ctx)
{
- return sock_gen_cookie(ctx->sk);
+ return __sock_gen_cookie(ctx->sk);
}
static const struct bpf_func_proto bpf_get_socket_cookie_sock_addr_proto = {
@@ -4240,7 +4581,7 @@ static const struct bpf_func_proto bpf_get_socket_cookie_sock_addr_proto = {
BPF_CALL_1(bpf_get_socket_cookie_sock, struct sock *, ctx)
{
- return sock_gen_cookie(ctx);
+ return __sock_gen_cookie(ctx);
}
static const struct bpf_func_proto bpf_get_socket_cookie_sock_proto = {
@@ -4252,7 +4593,7 @@ static const struct bpf_func_proto bpf_get_socket_cookie_sock_proto = {
BPF_CALL_1(bpf_get_socket_cookie_sock_ops, struct bpf_sock_ops_kern *, ctx)
{
- return sock_gen_cookie(ctx->sk);
+ return __sock_gen_cookie(ctx->sk);
}
static const struct bpf_func_proto bpf_get_socket_cookie_sock_ops_proto = {
@@ -4265,7 +4606,7 @@ static const struct bpf_func_proto bpf_get_socket_cookie_sock_ops_proto = {
static u64 __bpf_get_netns_cookie(struct sock *sk)
{
#ifdef CONFIG_NET_NS
- return net_gen_cookie(sk ? sk->sk_net.net : &init_net);
+ return __net_gen_cookie(sk ? sk->sk_net.net : &init_net);
#else
return 0;
#endif
@@ -4313,10 +4654,8 @@ static const struct bpf_func_proto bpf_get_socket_uid_proto = {
.arg1_type = ARG_PTR_TO_CTX,
};
-#define SOCKOPT_CC_REINIT (1 << 0)
-
static int _bpf_setsockopt(struct sock *sk, int level, int optname,
- char *optval, int optlen, u32 flags)
+ char *optval, int optlen)
{
char devname[IFNAMSIZ];
int val, valbool;
@@ -4449,16 +4788,15 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname,
sk->sk_prot->setsockopt == tcp_setsockopt) {
if (optname == TCP_CONGESTION) {
char name[TCP_CA_NAME_MAX];
- bool reinit = flags & SOCKOPT_CC_REINIT;
strncpy(name, optval, min_t(long, optlen,
TCP_CA_NAME_MAX-1));
name[TCP_CA_NAME_MAX-1] = 0;
- ret = tcp_set_congestion_control(sk, name, false,
- reinit, true);
+ ret = tcp_set_congestion_control(sk, name, false, true);
} else {
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
+ unsigned long timeout;
if (optlen != sizeof(int))
return -EINVAL;
@@ -4480,6 +4818,20 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname,
tp->snd_ssthresh = val;
}
break;
+ case TCP_BPF_DELACK_MAX:
+ timeout = usecs_to_jiffies(val);
+ if (timeout > TCP_DELACK_MAX ||
+ timeout < TCP_TIMEOUT_MIN)
+ return -EINVAL;
+ inet_csk(sk)->icsk_delack_max = timeout;
+ break;
+ case TCP_BPF_RTO_MIN:
+ timeout = usecs_to_jiffies(val);
+ if (timeout > TCP_RTO_MIN ||
+ timeout < TCP_TIMEOUT_MIN)
+ return -EINVAL;
+ inet_csk(sk)->icsk_rto_min = timeout;
+ break;
case TCP_SAVE_SYN:
if (val < 0 || val > 1)
ret = -EINVAL;
@@ -4513,6 +4865,10 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname,
else
icsk->icsk_user_timeout = val;
break;
+ case TCP_NOTSENT_LOWAT:
+ tp->notsent_lowat = val;
+ sk->sk_write_space(sk);
+ break;
default:
ret = -EINVAL;
}
@@ -4550,9 +4906,9 @@ static int _bpf_getsockopt(struct sock *sk, int level, int optname,
tp = tcp_sk(sk);
if (optlen <= 0 || !tp->saved_syn ||
- optlen > tp->saved_syn[0])
+ optlen > tcp_saved_syn_len(tp->saved_syn))
goto err_clear;
- memcpy(optval, tp->saved_syn + 1, optlen);
+ memcpy(optval, tp->saved_syn->data, optlen);
break;
default:
goto err_clear;
@@ -4600,9 +4956,7 @@ err_clear:
BPF_CALL_5(bpf_sock_addr_setsockopt, struct bpf_sock_addr_kern *, ctx,
int, level, int, optname, char *, optval, int, optlen)
{
- u32 flags = 0;
- return _bpf_setsockopt(ctx->sk, level, optname, optval, optlen,
- flags);
+ return _bpf_setsockopt(ctx->sk, level, optname, optval, optlen);
}
static const struct bpf_func_proto bpf_sock_addr_setsockopt_proto = {
@@ -4636,11 +4990,7 @@ static const struct bpf_func_proto bpf_sock_addr_getsockopt_proto = {
BPF_CALL_5(bpf_sock_ops_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
int, level, int, optname, char *, optval, int, optlen)
{
- u32 flags = 0;
- if (bpf_sock->op > BPF_SOCK_OPS_NEEDS_ECN)
- flags |= SOCKOPT_CC_REINIT;
- return _bpf_setsockopt(bpf_sock->sk, level, optname, optval, optlen,
- flags);
+ return _bpf_setsockopt(bpf_sock->sk, level, optname, optval, optlen);
}
static const struct bpf_func_proto bpf_sock_ops_setsockopt_proto = {
@@ -4654,9 +5004,99 @@ static const struct bpf_func_proto bpf_sock_ops_setsockopt_proto = {
.arg5_type = ARG_CONST_SIZE,
};
+static int bpf_sock_ops_get_syn(struct bpf_sock_ops_kern *bpf_sock,
+ int optname, const u8 **start)
+{
+ struct sk_buff *syn_skb = bpf_sock->syn_skb;
+ const u8 *hdr_start;
+ int ret;
+
+ if (syn_skb) {
+ /* sk is a request_sock here */
+
+ if (optname == TCP_BPF_SYN) {
+ hdr_start = syn_skb->data;
+ ret = tcp_hdrlen(syn_skb);
+ } else if (optname == TCP_BPF_SYN_IP) {
+ hdr_start = skb_network_header(syn_skb);
+ ret = skb_network_header_len(syn_skb) +
+ tcp_hdrlen(syn_skb);
+ } else {
+ /* optname == TCP_BPF_SYN_MAC */
+ hdr_start = skb_mac_header(syn_skb);
+ ret = skb_mac_header_len(syn_skb) +
+ skb_network_header_len(syn_skb) +
+ tcp_hdrlen(syn_skb);
+ }
+ } else {
+ struct sock *sk = bpf_sock->sk;
+ struct saved_syn *saved_syn;
+
+ if (sk->sk_state == TCP_NEW_SYN_RECV)
+ /* synack retransmit. bpf_sock->syn_skb will
+ * not be available. It has to resort to
+ * saved_syn (if it is saved).
+ */
+ saved_syn = inet_reqsk(sk)->saved_syn;
+ else
+ saved_syn = tcp_sk(sk)->saved_syn;
+
+ if (!saved_syn)
+ return -ENOENT;
+
+ if (optname == TCP_BPF_SYN) {
+ hdr_start = saved_syn->data +
+ saved_syn->mac_hdrlen +
+ saved_syn->network_hdrlen;
+ ret = saved_syn->tcp_hdrlen;
+ } else if (optname == TCP_BPF_SYN_IP) {
+ hdr_start = saved_syn->data +
+ saved_syn->mac_hdrlen;
+ ret = saved_syn->network_hdrlen +
+ saved_syn->tcp_hdrlen;
+ } else {
+ /* optname == TCP_BPF_SYN_MAC */
+
+ /* TCP_SAVE_SYN may not have saved the mac hdr */
+ if (!saved_syn->mac_hdrlen)
+ return -ENOENT;
+
+ hdr_start = saved_syn->data;
+ ret = saved_syn->mac_hdrlen +
+ saved_syn->network_hdrlen +
+ saved_syn->tcp_hdrlen;
+ }
+ }
+
+ *start = hdr_start;
+ return ret;
+}
+
BPF_CALL_5(bpf_sock_ops_getsockopt, struct bpf_sock_ops_kern *, bpf_sock,
int, level, int, optname, char *, optval, int, optlen)
{
+ if (IS_ENABLED(CONFIG_INET) && level == SOL_TCP &&
+ optname >= TCP_BPF_SYN && optname <= TCP_BPF_SYN_MAC) {
+ int ret, copy_len = 0;
+ const u8 *start;
+
+ ret = bpf_sock_ops_get_syn(bpf_sock, optname, &start);
+ if (ret > 0) {
+ copy_len = ret;
+ if (optlen < copy_len) {
+ copy_len = optlen;
+ ret = -ENOSPC;
+ }
+
+ memcpy(optval, start, copy_len);
+ }
+
+ /* Zero out unused buffer at the end */
+ memset(optval + copy_len, 0, optlen - copy_len);
+
+ return ret;
+ }
+
return _bpf_getsockopt(bpf_sock->sk, level, optname, optval, optlen);
}
@@ -4794,7 +5234,6 @@ static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params,
memcpy(params->smac, dev->dev_addr, ETH_ALEN);
params->h_vlan_TCI = 0;
params->h_vlan_proto = 0;
- params->ifindex = dev->ifindex;
return 0;
}
@@ -4891,6 +5330,7 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
dev = nhc->nhc_dev;
params->rt_metric = res.fi->fib_priority;
+ params->ifindex = dev->ifindex;
/* xdp and cls_bpf programs are run in RCU-bh so
* rcu_read_lock_bh is not needed here
@@ -5016,6 +5456,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
dev = res.nh->fib_nh_dev;
params->rt_metric = res.f6i->fib6_metric;
+ params->ifindex = dev->ifindex;
/* xdp and cls_bpf programs are run in RCU-bh so rcu_read_lock_bh is
* not needed here.
@@ -5601,7 +6042,7 @@ static const struct bpf_func_proto bpf_sk_lookup_udp_proto = {
BPF_CALL_1(bpf_sk_release, struct sock *, sk)
{
- if (sk_is_refcounted(sk))
+ if (sk && sk_is_refcounted(sk))
sock_gen_put(sk);
return 0;
}
@@ -5610,7 +6051,7 @@ static const struct bpf_func_proto bpf_sk_release_proto = {
.func = bpf_sk_release,
.gpl_only = false,
.ret_type = RET_INTEGER,
- .arg1_type = ARG_PTR_TO_SOCK_COMMON,
+ .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
};
BPF_CALL_5(bpf_xdp_sk_lookup_udp, struct xdp_buff *, ctx,
@@ -5992,7 +6433,7 @@ BPF_CALL_5(bpf_tcp_check_syncookie, struct sock *, sk, void *, iph, u32, iph_len
u32 cookie;
int ret;
- if (unlikely(th_len < sizeof(*th)))
+ if (unlikely(!sk || th_len < sizeof(*th)))
return -EINVAL;
/* sk_listener() allows TCP_NEW_SYN_RECV, which makes no sense here. */
@@ -6045,7 +6486,7 @@ static const struct bpf_func_proto bpf_tcp_check_syncookie_proto = {
.gpl_only = true,
.pkt_access = true,
.ret_type = RET_INTEGER,
- .arg1_type = ARG_PTR_TO_SOCK_COMMON,
+ .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
.arg2_type = ARG_PTR_TO_MEM,
.arg3_type = ARG_CONST_SIZE,
.arg4_type = ARG_PTR_TO_MEM,
@@ -6059,7 +6500,7 @@ BPF_CALL_5(bpf_tcp_gen_syncookie, struct sock *, sk, void *, iph, u32, iph_len,
u32 cookie;
u16 mss;
- if (unlikely(th_len < sizeof(*th) || th_len != th->doff * 4))
+ if (unlikely(!sk || th_len < sizeof(*th) || th_len != th->doff * 4))
return -EINVAL;
if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN)
@@ -6114,7 +6555,7 @@ static const struct bpf_func_proto bpf_tcp_gen_syncookie_proto = {
.gpl_only = true, /* __cookie_v*_init_sequence() is GPL */
.pkt_access = true,
.ret_type = RET_INTEGER,
- .arg1_type = ARG_PTR_TO_SOCK_COMMON,
+ .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
.arg2_type = ARG_PTR_TO_MEM,
.arg3_type = ARG_CONST_SIZE,
.arg4_type = ARG_PTR_TO_MEM,
@@ -6123,7 +6564,7 @@ static const struct bpf_func_proto bpf_tcp_gen_syncookie_proto = {
BPF_CALL_3(bpf_sk_assign, struct sk_buff *, skb, struct sock *, sk, u64, flags)
{
- if (flags != 0)
+ if (!sk || flags != 0)
return -EINVAL;
if (!skb_at_tc_ingress(skb))
return -EOPNOTSUPP;
@@ -6147,7 +6588,233 @@ static const struct bpf_func_proto bpf_sk_assign_proto = {
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
- .arg2_type = ARG_PTR_TO_SOCK_COMMON,
+ .arg2_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
+ .arg3_type = ARG_ANYTHING,
+};
+
+static const u8 *bpf_search_tcp_opt(const u8 *op, const u8 *opend,
+ u8 search_kind, const u8 *magic,
+ u8 magic_len, bool *eol)
+{
+ u8 kind, kind_len;
+
+ *eol = false;
+
+ while (op < opend) {
+ kind = op[0];
+
+ if (kind == TCPOPT_EOL) {
+ *eol = true;
+ return ERR_PTR(-ENOMSG);
+ } else if (kind == TCPOPT_NOP) {
+ op++;
+ continue;
+ }
+
+ if (opend - op < 2 || opend - op < op[1] || op[1] < 2)
+ /* Something is wrong in the received header.
+ * Follow the TCP stack's tcp_parse_options()
+ * and just bail here.
+ */
+ return ERR_PTR(-EFAULT);
+
+ kind_len = op[1];
+ if (search_kind == kind) {
+ if (!magic_len)
+ return op;
+
+ if (magic_len > kind_len - 2)
+ return ERR_PTR(-ENOMSG);
+
+ if (!memcmp(&op[2], magic, magic_len))
+ return op;
+ }
+
+ op += kind_len;
+ }
+
+ return ERR_PTR(-ENOMSG);
+}
+
+BPF_CALL_4(bpf_sock_ops_load_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock,
+ void *, search_res, u32, len, u64, flags)
+{
+ bool eol, load_syn = flags & BPF_LOAD_HDR_OPT_TCP_SYN;
+ const u8 *op, *opend, *magic, *search = search_res;
+ u8 search_kind, search_len, copy_len, magic_len;
+ int ret;
+
+ /* 2 byte is the minimal option len except TCPOPT_NOP and
+ * TCPOPT_EOL which are useless for the bpf prog to learn
+ * and this helper disallow loading them also.
+ */
+ if (len < 2 || flags & ~BPF_LOAD_HDR_OPT_TCP_SYN)
+ return -EINVAL;
+
+ search_kind = search[0];
+ search_len = search[1];
+
+ if (search_len > len || search_kind == TCPOPT_NOP ||
+ search_kind == TCPOPT_EOL)
+ return -EINVAL;
+
+ if (search_kind == TCPOPT_EXP || search_kind == 253) {
+ /* 16 or 32 bit magic. +2 for kind and kind length */
+ if (search_len != 4 && search_len != 6)
+ return -EINVAL;
+ magic = &search[2];
+ magic_len = search_len - 2;
+ } else {
+ if (search_len)
+ return -EINVAL;
+ magic = NULL;
+ magic_len = 0;
+ }
+
+ if (load_syn) {
+ ret = bpf_sock_ops_get_syn(bpf_sock, TCP_BPF_SYN, &op);
+ if (ret < 0)
+ return ret;
+
+ opend = op + ret;
+ op += sizeof(struct tcphdr);
+ } else {
+ if (!bpf_sock->skb ||
+ bpf_sock->op == BPF_SOCK_OPS_HDR_OPT_LEN_CB)
+ /* This bpf_sock->op cannot call this helper */
+ return -EPERM;
+
+ opend = bpf_sock->skb_data_end;
+ op = bpf_sock->skb->data + sizeof(struct tcphdr);
+ }
+
+ op = bpf_search_tcp_opt(op, opend, search_kind, magic, magic_len,
+ &eol);
+ if (IS_ERR(op))
+ return PTR_ERR(op);
+
+ copy_len = op[1];
+ ret = copy_len;
+ if (copy_len > len) {
+ ret = -ENOSPC;
+ copy_len = len;
+ }
+
+ memcpy(search_res, op, copy_len);
+ return ret;
+}
+
+static const struct bpf_func_proto bpf_sock_ops_load_hdr_opt_proto = {
+ .func = bpf_sock_ops_load_hdr_opt,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_PTR_TO_MEM,
+ .arg3_type = ARG_CONST_SIZE,
+ .arg4_type = ARG_ANYTHING,
+};
+
+BPF_CALL_4(bpf_sock_ops_store_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock,
+ const void *, from, u32, len, u64, flags)
+{
+ u8 new_kind, new_kind_len, magic_len = 0, *opend;
+ const u8 *op, *new_op, *magic = NULL;
+ struct sk_buff *skb;
+ bool eol;
+
+ if (bpf_sock->op != BPF_SOCK_OPS_WRITE_HDR_OPT_CB)
+ return -EPERM;
+
+ if (len < 2 || flags)
+ return -EINVAL;
+
+ new_op = from;
+ new_kind = new_op[0];
+ new_kind_len = new_op[1];
+
+ if (new_kind_len > len || new_kind == TCPOPT_NOP ||
+ new_kind == TCPOPT_EOL)
+ return -EINVAL;
+
+ if (new_kind_len > bpf_sock->remaining_opt_len)
+ return -ENOSPC;
+
+ /* 253 is another experimental kind */
+ if (new_kind == TCPOPT_EXP || new_kind == 253) {
+ if (new_kind_len < 4)
+ return -EINVAL;
+ /* Match for the 2 byte magic also.
+ * RFC 6994: the magic could be 2 or 4 bytes.
+ * Hence, matching by 2 byte only is on the
+ * conservative side but it is the right
+ * thing to do for the 'search-for-duplication'
+ * purpose.
+ */
+ magic = &new_op[2];
+ magic_len = 2;
+ }
+
+ /* Check for duplication */
+ skb = bpf_sock->skb;
+ op = skb->data + sizeof(struct tcphdr);
+ opend = bpf_sock->skb_data_end;
+
+ op = bpf_search_tcp_opt(op, opend, new_kind, magic, magic_len,
+ &eol);
+ if (!IS_ERR(op))
+ return -EEXIST;
+
+ if (PTR_ERR(op) != -ENOMSG)
+ return PTR_ERR(op);
+
+ if (eol)
+ /* The option has been ended. Treat it as no more
+ * header option can be written.
+ */
+ return -ENOSPC;
+
+ /* No duplication found. Store the header option. */
+ memcpy(opend, from, new_kind_len);
+
+ bpf_sock->remaining_opt_len -= new_kind_len;
+ bpf_sock->skb_data_end += new_kind_len;
+
+ return 0;
+}
+
+static const struct bpf_func_proto bpf_sock_ops_store_hdr_opt_proto = {
+ .func = bpf_sock_ops_store_hdr_opt,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_PTR_TO_MEM,
+ .arg3_type = ARG_CONST_SIZE,
+ .arg4_type = ARG_ANYTHING,
+};
+
+BPF_CALL_3(bpf_sock_ops_reserve_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock,
+ u32, len, u64, flags)
+{
+ if (bpf_sock->op != BPF_SOCK_OPS_HDR_OPT_LEN_CB)
+ return -EPERM;
+
+ if (flags || len < 2)
+ return -EINVAL;
+
+ if (len > bpf_sock->remaining_opt_len)
+ return -ENOSPC;
+
+ bpf_sock->remaining_opt_len -= len;
+
+ return 0;
+}
+
+static const struct bpf_func_proto bpf_sock_ops_reserve_hdr_opt_proto = {
+ .func = bpf_sock_ops_reserve_hdr_opt,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
.arg3_type = ARG_ANYTHING,
};
@@ -6164,6 +6831,7 @@ bool bpf_helper_changes_pkt_data(void *func)
func == bpf_skb_change_tail ||
func == sk_skb_change_tail ||
func == bpf_skb_adjust_room ||
+ func == sk_skb_adjust_room ||
func == bpf_skb_pull_data ||
func == sk_skb_pull_data ||
func == bpf_clone_redirect ||
@@ -6180,6 +6848,9 @@ bool bpf_helper_changes_pkt_data(void *func)
func == bpf_lwt_seg6_adjust_srh ||
func == bpf_lwt_seg6_action ||
#endif
+#ifdef CONFIG_INET
+ func == bpf_sock_ops_store_hdr_opt ||
+#endif
func == bpf_lwt_in_push_encap ||
func == bpf_lwt_xmit_push_encap)
return true;
@@ -6298,7 +6969,7 @@ sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return NULL;
}
default:
- return bpf_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id);
}
}
@@ -6317,7 +6988,7 @@ sk_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_perf_event_output:
return &bpf_skb_event_output_proto;
default:
- return bpf_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id);
}
}
@@ -6419,6 +7090,10 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return bpf_get_skb_set_tunnel_proto(func_id);
case BPF_FUNC_redirect:
return &bpf_redirect_proto;
+ case BPF_FUNC_redirect_neigh:
+ return &bpf_redirect_neigh_proto;
+ case BPF_FUNC_redirect_peer:
+ return &bpf_redirect_peer_proto;
case BPF_FUNC_get_route_realm:
return &bpf_get_route_realm_proto;
case BPF_FUNC_get_hash_recalc:
@@ -6449,6 +7124,10 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_skb_get_xfrm_state:
return &bpf_skb_get_xfrm_state_proto;
#endif
+#ifdef CONFIG_CGROUP_NET_CLASSID
+ case BPF_FUNC_skb_cgroup_classid:
+ return &bpf_skb_cgroup_classid_proto;
+#endif
#ifdef CONFIG_SOCK_CGROUP_DATA
case BPF_FUNC_skb_cgroup_id:
return &bpf_skb_cgroup_id_proto;
@@ -6478,7 +7157,7 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_sk_assign_proto;
#endif
default:
- return bpf_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id);
}
}
@@ -6519,7 +7198,7 @@ xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_tcp_gen_syncookie_proto;
#endif
default:
- return bpf_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id);
}
}
@@ -6551,11 +7230,17 @@ sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_sk_storage_delete:
return &bpf_sk_storage_delete_proto;
#ifdef CONFIG_INET
+ case BPF_FUNC_load_hdr_opt:
+ return &bpf_sock_ops_load_hdr_opt_proto;
+ case BPF_FUNC_store_hdr_opt:
+ return &bpf_sock_ops_store_hdr_opt_proto;
+ case BPF_FUNC_reserve_hdr_opt:
+ return &bpf_sock_ops_reserve_hdr_opt_proto;
case BPF_FUNC_tcp_sock:
return &bpf_tcp_sock_proto;
#endif /* CONFIG_INET */
default:
- return bpf_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id);
}
}
@@ -6601,7 +7286,7 @@ sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_cgroup_classid_curr_proto;
#endif
default:
- return bpf_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id);
}
}
@@ -6622,6 +7307,8 @@ sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &sk_skb_change_tail_proto;
case BPF_FUNC_skb_change_head:
return &sk_skb_change_head_proto;
+ case BPF_FUNC_skb_adjust_room:
+ return &sk_skb_adjust_room_proto;
case BPF_FUNC_get_socket_cookie:
return &bpf_get_socket_cookie_proto;
case BPF_FUNC_get_socket_uid:
@@ -6643,7 +7330,7 @@ sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_skc_lookup_tcp_proto;
#endif
default:
- return bpf_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id);
}
}
@@ -6654,7 +7341,7 @@ flow_dissector_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_skb_load_bytes:
return &bpf_flow_dissector_load_bytes_proto;
default:
- return bpf_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id);
}
}
@@ -6681,7 +7368,7 @@ lwt_out_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_skb_under_cgroup:
return &bpf_skb_under_cgroup_proto;
default:
- return bpf_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id);
}
}
@@ -7350,6 +8037,20 @@ static bool sock_ops_is_valid_access(int off, int size,
return false;
info->reg_type = PTR_TO_SOCKET_OR_NULL;
break;
+ case offsetof(struct bpf_sock_ops, skb_data):
+ if (size != sizeof(__u64))
+ return false;
+ info->reg_type = PTR_TO_PACKET;
+ break;
+ case offsetof(struct bpf_sock_ops, skb_data_end):
+ if (size != sizeof(__u64))
+ return false;
+ info->reg_type = PTR_TO_PACKET_END;
+ break;
+ case offsetof(struct bpf_sock_ops, skb_tcp_flags):
+ bpf_ctx_record_field_size(info, size_default);
+ return bpf_ctx_narrow_access_ok(off, size,
+ size_default);
default:
if (size != size_default)
return false;
@@ -8451,17 +9152,22 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
return insn - insn_buf;
switch (si->off) {
- case offsetof(struct bpf_sock_ops, op) ...
+ case offsetof(struct bpf_sock_ops, op):
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern,
+ op),
+ si->dst_reg, si->src_reg,
+ offsetof(struct bpf_sock_ops_kern, op));
+ break;
+
+ case offsetof(struct bpf_sock_ops, replylong[0]) ...
offsetof(struct bpf_sock_ops, replylong[3]):
- BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, op) !=
- sizeof_field(struct bpf_sock_ops_kern, op));
BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, reply) !=
sizeof_field(struct bpf_sock_ops_kern, reply));
BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, replylong) !=
sizeof_field(struct bpf_sock_ops_kern, replylong));
off = si->off;
- off -= offsetof(struct bpf_sock_ops, op);
- off += offsetof(struct bpf_sock_ops_kern, op);
+ off -= offsetof(struct bpf_sock_ops, replylong[0]);
+ off += offsetof(struct bpf_sock_ops_kern, replylong[0]);
if (type == BPF_WRITE)
*insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
off);
@@ -8682,6 +9388,49 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
case offsetof(struct bpf_sock_ops, sk):
SOCK_OPS_GET_SK();
break;
+ case offsetof(struct bpf_sock_ops, skb_data_end):
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern,
+ skb_data_end),
+ si->dst_reg, si->src_reg,
+ offsetof(struct bpf_sock_ops_kern,
+ skb_data_end));
+ break;
+ case offsetof(struct bpf_sock_ops, skb_data):
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern,
+ skb),
+ si->dst_reg, si->src_reg,
+ offsetof(struct bpf_sock_ops_kern,
+ skb));
+ *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
+ si->dst_reg, si->dst_reg,
+ offsetof(struct sk_buff, data));
+ break;
+ case offsetof(struct bpf_sock_ops, skb_len):
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern,
+ skb),
+ si->dst_reg, si->src_reg,
+ offsetof(struct bpf_sock_ops_kern,
+ skb));
+ *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, len),
+ si->dst_reg, si->dst_reg,
+ offsetof(struct sk_buff, len));
+ break;
+ case offsetof(struct bpf_sock_ops, skb_tcp_flags):
+ off = offsetof(struct sk_buff, cb);
+ off += offsetof(struct tcp_skb_cb, tcp_flags);
+ *target_size = sizeof_field(struct tcp_skb_cb, tcp_flags);
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern,
+ skb),
+ si->dst_reg, si->src_reg,
+ offsetof(struct bpf_sock_ops_kern,
+ skb));
+ *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
+ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct tcp_skb_cb,
+ tcp_flags),
+ si->dst_reg, si->dst_reg, off);
+ break;
}
return insn - insn_buf;
}
@@ -9356,7 +10105,7 @@ sk_lookup_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_sk_release:
return &bpf_sk_release_proto;
default:
- return bpf_base_func_proto(func_id);
+ return bpf_sk_base_func_proto(func_id);
}
}
@@ -9506,17 +10255,6 @@ BTF_SOCK_TYPE_xxx
u32 btf_sock_ids[MAX_BTF_SOCK_TYPE];
#endif
-static bool check_arg_btf_id(u32 btf_id, u32 arg)
-{
- int i;
-
- /* only one argument, no need to check arg */
- for (i = 0; i < MAX_BTF_SOCK_TYPE; i++)
- if (btf_sock_ids[i] == btf_id)
- return true;
- return false;
-}
-
BPF_CALL_1(bpf_skc_to_tcp6_sock, struct sock *, sk)
{
/* tcp6_sock type is not generated in dwarf and hence btf,
@@ -9534,8 +10272,7 @@ const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto = {
.func = bpf_skc_to_tcp6_sock,
.gpl_only = false,
.ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
- .arg1_type = ARG_PTR_TO_BTF_ID,
- .check_btf_id = check_arg_btf_id,
+ .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
.ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP6],
};
@@ -9551,8 +10288,7 @@ const struct bpf_func_proto bpf_skc_to_tcp_sock_proto = {
.func = bpf_skc_to_tcp_sock,
.gpl_only = false,
.ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
- .arg1_type = ARG_PTR_TO_BTF_ID,
- .check_btf_id = check_arg_btf_id,
+ .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
.ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP],
};
@@ -9581,8 +10317,7 @@ const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto = {
.func = bpf_skc_to_tcp_timewait_sock,
.gpl_only = false,
.ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
- .arg1_type = ARG_PTR_TO_BTF_ID,
- .check_btf_id = check_arg_btf_id,
+ .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
.ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP_TW],
};
@@ -9605,8 +10340,7 @@ const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto = {
.func = bpf_skc_to_tcp_request_sock,
.gpl_only = false,
.ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
- .arg1_type = ARG_PTR_TO_BTF_ID,
- .check_btf_id = check_arg_btf_id,
+ .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
.ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP_REQ],
};
@@ -9627,7 +10361,37 @@ const struct bpf_func_proto bpf_skc_to_udp6_sock_proto = {
.func = bpf_skc_to_udp6_sock,
.gpl_only = false,
.ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
- .arg1_type = ARG_PTR_TO_BTF_ID,
- .check_btf_id = check_arg_btf_id,
+ .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
.ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_UDP6],
};
+
+static const struct bpf_func_proto *
+bpf_sk_base_func_proto(enum bpf_func_id func_id)
+{
+ const struct bpf_func_proto *func;
+
+ switch (func_id) {
+ case BPF_FUNC_skc_to_tcp6_sock:
+ func = &bpf_skc_to_tcp6_sock_proto;
+ break;
+ case BPF_FUNC_skc_to_tcp_sock:
+ func = &bpf_skc_to_tcp_sock_proto;
+ break;
+ case BPF_FUNC_skc_to_tcp_timewait_sock:
+ func = &bpf_skc_to_tcp_timewait_sock_proto;
+ break;
+ case BPF_FUNC_skc_to_tcp_request_sock:
+ func = &bpf_skc_to_tcp_request_sock_proto;
+ break;
+ case BPF_FUNC_skc_to_udp6_sock:
+ func = &bpf_skc_to_udp6_sock_proto;
+ break;
+ default:
+ return bpf_base_func_proto(func_id);
+ }
+
+ if (!perfmon_capable())
+ return NULL;
+
+ return func;
+}
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 29806eb765cf..e21950a2c897 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -932,8 +932,14 @@ bool __skb_flow_dissect(const struct net *net,
int offset = 0;
ops = skb->dev->dsa_ptr->tag_ops;
- if (ops->flow_dissect &&
- !ops->flow_dissect(skb, &proto, &offset)) {
+ /* Tail taggers don't break flow dissection */
+ if (!ops->tail_tag) {
+ if (ops->flow_dissect)
+ ops->flow_dissect(skb, &proto, &offset);
+ else
+ dsa_tag_generic_flow_dissect(skb,
+ &proto,
+ &offset);
hlen -= offset;
nhoff += offset;
}
diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
index 6bbd06f7dc7d..c714e6a9dad4 100644
--- a/net/core/net-procfs.c
+++ b/net/core/net-procfs.c
@@ -116,6 +116,12 @@ static int dev_seq_show(struct seq_file *seq, void *v)
return 0;
}
+static u32 softnet_backlog_len(struct softnet_data *sd)
+{
+ return skb_queue_len_lockless(&sd->input_pkt_queue) +
+ skb_queue_len_lockless(&sd->process_queue);
+}
+
static struct softnet_data *softnet_get_online(loff_t *pos)
{
struct softnet_data *sd = NULL;
@@ -159,12 +165,17 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
rcu_read_unlock();
#endif
+ /* the index is the CPU id owing this sd. Since offline CPUs are not
+ * displayed, it would be othrwise not trivial for the user-space
+ * mapping the data a specific CPU
+ */
seq_printf(seq,
- "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
sd->processed, sd->dropped, sd->time_squeeze, 0,
0, 0, 0, 0, /* was fastroute */
0, /* was cpu_collision */
- sd->received_rps, flow_limit_count);
+ sd->received_rps, flow_limit_count,
+ softnet_backlog_len(sd), (int)seq->index);
return 0;
}
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index efec66fa78b7..94fff0700bdd 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -1158,8 +1158,8 @@ static ssize_t traffic_class_show(struct netdev_queue *queue,
* belongs to the root device it will be reported with just the
* traffic class, so just "0" for TC 0 for example.
*/
- return dev->num_tc < 0 ? sprintf(buf, "%u%d\n", tc, dev->num_tc) :
- sprintf(buf, "%u\n", tc);
+ return dev->num_tc < 0 ? sprintf(buf, "%d%d\n", tc, dev->num_tc) :
+ sprintf(buf, "%d\n", tc);
}
#ifdef CONFIG_XPS
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 944ab214e5ae..dbc66b896287 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -19,6 +19,7 @@
#include <linux/net_namespace.h>
#include <linux/sched/task.h>
#include <linux/uidgid.h>
+#include <linux/cookie.h>
#include <net/sock.h>
#include <net/netlink.h>
@@ -69,16 +70,16 @@ EXPORT_SYMBOL_GPL(pernet_ops_rwsem);
static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
-static atomic64_t cookie_gen;
+DEFINE_COOKIE(net_cookie);
-u64 net_gen_cookie(struct net *net)
+u64 __net_gen_cookie(struct net *net)
{
while (1) {
u64 res = atomic64_read(&net->net_cookie);
if (res)
return res;
- res = atomic64_inc_return(&cookie_gen);
+ res = gen_cookie_next(&net_cookie);
atomic64_cmpxchg(&net->net_cookie, 0, res);
}
}
@@ -1101,7 +1102,10 @@ static int __init net_ns_init(void)
panic("Could not allocate generic netns");
rcu_assign_pointer(init_net.gen, ng);
- net_gen_cookie(&init_net);
+
+ preempt_disable();
+ __net_gen_cookie(&init_net);
+ preempt_enable();
down_write(&pernet_ops_rwsem);
if (setup_net(&init_net, &init_user_ns))
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 2338753e936b..c310c7c1cef7 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -297,7 +297,7 @@ static int netpoll_owner_active(struct net_device *dev)
{
struct napi_struct *napi;
- list_for_each_entry(napi, &dev->napi_list, dev_list) {
+ list_for_each_entry_rcu(napi, &dev->napi_list, dev_list) {
if (napi->poll_owner == smp_processor_id())
return 1;
}
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 44fdbb9c6e53..105978604ffd 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -922,7 +922,7 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->min_pkt_size = value;
pkt_dev->cur_pkt_size = value;
}
- sprintf(pg_result, "OK: min_pkt_size=%u",
+ sprintf(pg_result, "OK: min_pkt_size=%d",
pkt_dev->min_pkt_size);
return count;
}
@@ -939,7 +939,7 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->max_pkt_size = value;
pkt_dev->cur_pkt_size = value;
}
- sprintf(pg_result, "OK: max_pkt_size=%u",
+ sprintf(pg_result, "OK: max_pkt_size=%d",
pkt_dev->max_pkt_size);
return count;
}
@@ -959,7 +959,7 @@ static ssize_t pktgen_if_write(struct file *file,
pkt_dev->max_pkt_size = value;
pkt_dev->cur_pkt_size = value;
}
- sprintf(pg_result, "OK: pkt_size=%u", pkt_dev->min_pkt_size);
+ sprintf(pg_result, "OK: pkt_size=%d", pkt_dev->min_pkt_size);
return count;
}
@@ -981,7 +981,7 @@ static ssize_t pktgen_if_write(struct file *file,
i += len;
pkt_dev->nfrags = value;
- sprintf(pg_result, "OK: frags=%u", pkt_dev->nfrags);
+ sprintf(pg_result, "OK: frags=%d", pkt_dev->nfrags);
return count;
}
if (!strcmp(name, "delay")) {
@@ -1146,7 +1146,7 @@ static ssize_t pktgen_if_write(struct file *file,
(!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)))))
return -ENOTSUPP;
pkt_dev->burst = value < 1 ? 1 : value;
- sprintf(pg_result, "OK: burst=%d", pkt_dev->burst);
+ sprintf(pg_result, "OK: burst=%u", pkt_dev->burst);
return count;
}
if (!strcmp(name, "node")) {
diff --git a/net/core/ptp_classifier.c b/net/core/ptp_classifier.c
index d964a5147f22..e33fde06d528 100644
--- a/net/core/ptp_classifier.c
+++ b/net/core/ptp_classifier.c
@@ -107,6 +107,36 @@ unsigned int ptp_classify_raw(const struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(ptp_classify_raw);
+struct ptp_header *ptp_parse_header(struct sk_buff *skb, unsigned int type)
+{
+ u8 *ptr = skb_mac_header(skb);
+
+ if (type & PTP_CLASS_VLAN)
+ ptr += VLAN_HLEN;
+
+ switch (type & PTP_CLASS_PMASK) {
+ case PTP_CLASS_IPV4:
+ ptr += IPV4_HLEN(ptr) + UDP_HLEN;
+ break;
+ case PTP_CLASS_IPV6:
+ ptr += IP6_HLEN + UDP_HLEN;
+ break;
+ case PTP_CLASS_L2:
+ break;
+ default:
+ return NULL;
+ }
+
+ ptr += ETH_HLEN;
+
+ /* Ensure that the entire header is present in this packet. */
+ if (ptr + sizeof(struct ptp_header) > skb->data + skb->len)
+ return NULL;
+
+ return (struct ptp_header *)ptr;
+}
+EXPORT_SYMBOL_GPL(ptp_parse_header);
+
void __init ptp_classifier_init(void)
{
static struct sock_filter ptp_filter[] __initdata = {
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 5cd6d48bb77b..1ba8f0163744 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -712,11 +712,10 @@ EXPORT_SYMBOL(kfree_skb_list);
*
* Must only be called from net_ratelimit()-ed paths.
*
- * Dumps up to can_dump_full whole packets if full_pkt, headers otherwise.
+ * Dumps whole packets if full_pkt, only headers otherwise.
*/
void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt)
{
- static atomic_t can_dump_full = ATOMIC_INIT(5);
struct skb_shared_info *sh = skb_shinfo(skb);
struct net_device *dev = skb->dev;
struct sock *sk = skb->sk;
@@ -726,9 +725,6 @@ void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt)
int i, len, seg_len;
if (full_pkt)
- full_pkt = atomic_dec_if_positive(&can_dump_full) >= 0;
-
- if (full_pkt)
len = skb->len;
else
len = min_t(int, skb->len, MAX_HEADER + 128);
@@ -895,9 +891,6 @@ void __kfree_skb_defer(struct sk_buff *skb)
void napi_consume_skb(struct sk_buff *skb, int budget)
{
- if (unlikely(!skb))
- return;
-
/* Zero budget indicate non-NAPI context called us, like netpoll */
if (unlikely(!budget)) {
dev_consume_skb_any(skb);
@@ -5562,6 +5555,73 @@ int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
}
EXPORT_SYMBOL(skb_vlan_push);
+/**
+ * skb_eth_pop() - Drop the Ethernet header at the head of a packet
+ *
+ * @skb: Socket buffer to modify
+ *
+ * Drop the Ethernet header of @skb.
+ *
+ * Expects that skb->data points to the mac header and that no VLAN tags are
+ * present.
+ *
+ * Returns 0 on success, -errno otherwise.
+ */
+int skb_eth_pop(struct sk_buff *skb)
+{
+ if (!pskb_may_pull(skb, ETH_HLEN) || skb_vlan_tagged(skb) ||
+ skb_network_offset(skb) < ETH_HLEN)
+ return -EPROTO;
+
+ skb_pull_rcsum(skb, ETH_HLEN);
+ skb_reset_mac_header(skb);
+ skb_reset_mac_len(skb);
+
+ return 0;
+}
+EXPORT_SYMBOL(skb_eth_pop);
+
+/**
+ * skb_eth_push() - Add a new Ethernet header at the head of a packet
+ *
+ * @skb: Socket buffer to modify
+ * @dst: Destination MAC address of the new header
+ * @src: Source MAC address of the new header
+ *
+ * Prepend @skb with a new Ethernet header.
+ *
+ * Expects that skb->data points to the mac header, which must be empty.
+ *
+ * Returns 0 on success, -errno otherwise.
+ */
+int skb_eth_push(struct sk_buff *skb, const unsigned char *dst,
+ const unsigned char *src)
+{
+ struct ethhdr *eth;
+ int err;
+
+ if (skb_network_offset(skb) || skb_vlan_tag_present(skb))
+ return -EPROTO;
+
+ err = skb_cow_head(skb, sizeof(*eth));
+ if (err < 0)
+ return err;
+
+ skb_push(skb, sizeof(*eth));
+ skb_reset_mac_header(skb);
+ skb_reset_mac_len(skb);
+
+ eth = eth_hdr(skb);
+ ether_addr_copy(eth->h_dest, dst);
+ ether_addr_copy(eth->h_source, src);
+ eth->h_proto = skb->protocol;
+
+ skb_postpush_rcsum(skb, eth, sizeof(*eth));
+
+ return 0;
+}
+EXPORT_SYMBOL(skb_eth_push);
+
/* Update the ethertype of hdr and the skb csum value if required. */
static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr,
__be16 ethertype)
@@ -5956,8 +6016,7 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
size = SKB_WITH_OVERHEAD(ksize(data));
memcpy((struct skb_shared_info *)(data + size),
- skb_shinfo(skb), offsetof(struct skb_shared_info,
- frags[skb_shinfo(skb)->nr_frags]));
+ skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0]));
if (skb_orphan_frags(skb, gfp_mask)) {
kfree(data);
return -ENOMEM;
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index 649583158983..654182ecf87b 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -433,10 +433,12 @@ static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
u32 off, u32 len, bool ingress)
{
- if (ingress)
- return sk_psock_skb_ingress(psock, skb);
- else
+ if (!ingress) {
+ if (!sock_writeable(psock->sk))
+ return -EAGAIN;
return skb_send_sock_locked(psock->sk, skb, off, len);
+ }
+ return sk_psock_skb_ingress(psock, skb);
}
static void sk_psock_backlog(struct work_struct *work)
@@ -494,14 +496,34 @@ end:
struct sk_psock *sk_psock_init(struct sock *sk, int node)
{
- struct sk_psock *psock = kzalloc_node(sizeof(*psock),
- GFP_ATOMIC | __GFP_NOWARN,
- node);
- if (!psock)
- return NULL;
+ struct sk_psock *psock;
+ struct proto *prot;
+
+ write_lock_bh(&sk->sk_callback_lock);
+
+ if (inet_csk_has_ulp(sk)) {
+ psock = ERR_PTR(-EINVAL);
+ goto out;
+ }
+
+ if (sk->sk_user_data) {
+ psock = ERR_PTR(-EBUSY);
+ goto out;
+ }
+
+ psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
+ if (!psock) {
+ psock = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+ prot = READ_ONCE(sk->sk_prot);
psock->sk = sk;
- psock->eval = __SK_NONE;
+ psock->eval = __SK_NONE;
+ psock->sk_proto = prot;
+ psock->saved_unhash = prot->unhash;
+ psock->saved_close = prot->close;
+ psock->saved_write_space = sk->sk_write_space;
INIT_LIST_HEAD(&psock->link);
spin_lock_init(&psock->link_lock);
@@ -516,6 +538,8 @@ struct sk_psock *sk_psock_init(struct sock *sk, int node)
rcu_assign_sk_user_data_nocopy(sk, psock);
sock_hold(sk);
+out:
+ write_unlock_bh(&sk->sk_callback_lock);
return psock;
}
EXPORT_SYMBOL_GPL(sk_psock_init);
@@ -603,6 +627,8 @@ void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
rcu_assign_sk_user_data(sk, NULL);
if (psock->progs.skb_parser)
sk_psock_stop_strp(sk, psock);
+ else if (psock->progs.skb_verdict)
+ sk_psock_stop_verdict(sk, psock);
write_unlock_bh(&sk->sk_callback_lock);
sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
@@ -660,19 +686,8 @@ EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
static int sk_psock_bpf_run(struct sk_psock *psock, struct bpf_prog *prog,
struct sk_buff *skb)
{
- int ret;
-
- skb->sk = psock->sk;
bpf_compute_data_end_sk_skb(skb);
- ret = bpf_prog_run_pin_on_cpu(prog, skb);
- /* strparser clones the skb before handing it to a upper layer,
- * meaning skb_orphan has been called. We NULL sk on the way out
- * to ensure we don't trigger a BUG_ON() in skb/sk operations
- * later and because we are not charging the memory of this skb
- * to any socket yet.
- */
- skb->sk = NULL;
- return ret;
+ return bpf_prog_run_pin_on_cpu(prog, skb);
}
static struct sk_psock *sk_psock_from_strp(struct strparser *strp)
@@ -687,38 +702,35 @@ static void sk_psock_skb_redirect(struct sk_buff *skb)
{
struct sk_psock *psock_other;
struct sock *sk_other;
- bool ingress;
sk_other = tcp_skb_bpf_redirect_fetch(skb);
+ /* This error is a buggy BPF program, it returned a redirect
+ * return code, but then didn't set a redirect interface.
+ */
if (unlikely(!sk_other)) {
kfree_skb(skb);
return;
}
psock_other = sk_psock(sk_other);
+ /* This error indicates the socket is being torn down or had another
+ * error that caused the pipe to break. We can't send a packet on
+ * a socket that is in this state so we drop the skb.
+ */
if (!psock_other || sock_flag(sk_other, SOCK_DEAD) ||
!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
kfree_skb(skb);
return;
}
- ingress = tcp_skb_bpf_ingress(skb);
- if ((!ingress && sock_writeable(sk_other)) ||
- (ingress &&
- atomic_read(&sk_other->sk_rmem_alloc) <=
- sk_other->sk_rcvbuf)) {
- if (!ingress)
- skb_set_owner_w(skb, sk_other);
- skb_queue_tail(&psock_other->ingress_skb, skb);
- schedule_work(&psock_other->work);
- } else {
- kfree_skb(skb);
- }
+ skb_queue_tail(&psock_other->ingress_skb, skb);
+ schedule_work(&psock_other->work);
}
-static void sk_psock_tls_verdict_apply(struct sk_buff *skb, int verdict)
+static void sk_psock_tls_verdict_apply(struct sk_buff *skb, struct sock *sk, int verdict)
{
switch (verdict) {
case __SK_REDIRECT:
+ skb_set_owner_r(skb, sk);
sk_psock_skb_redirect(skb);
break;
case __SK_PASS:
@@ -736,11 +748,17 @@ int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
rcu_read_lock();
prog = READ_ONCE(psock->progs.skb_verdict);
if (likely(prog)) {
+ /* We skip full set_owner_r here because if we do a SK_PASS
+ * or SK_DROP we can skip skb memory accounting and use the
+ * TLS context.
+ */
+ skb->sk = psock->sk;
tcp_skb_bpf_redirect_clear(skb);
ret = sk_psock_bpf_run(psock, prog, skb);
ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
+ skb->sk = NULL;
}
- sk_psock_tls_verdict_apply(skb, ret);
+ sk_psock_tls_verdict_apply(skb, psock->sk, ret);
rcu_read_unlock();
return ret;
}
@@ -749,7 +767,9 @@ EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
static void sk_psock_verdict_apply(struct sk_psock *psock,
struct sk_buff *skb, int verdict)
{
+ struct tcp_skb_cb *tcp;
struct sock *sk_other;
+ int err = -EIO;
switch (verdict) {
case __SK_PASS:
@@ -758,16 +778,24 @@ static void sk_psock_verdict_apply(struct sk_psock *psock,
!sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
goto out_free;
}
- if (atomic_read(&sk_other->sk_rmem_alloc) <=
- sk_other->sk_rcvbuf) {
- struct tcp_skb_cb *tcp = TCP_SKB_CB(skb);
- tcp->bpf.flags |= BPF_F_INGRESS;
+ tcp = TCP_SKB_CB(skb);
+ tcp->bpf.flags |= BPF_F_INGRESS;
+
+ /* If the queue is empty then we can submit directly
+ * into the msg queue. If its not empty we have to
+ * queue work otherwise we may get OOO data. Otherwise,
+ * if sk_psock_skb_ingress errors will be handled by
+ * retrying later from workqueue.
+ */
+ if (skb_queue_empty(&psock->ingress_skb)) {
+ err = sk_psock_skb_ingress(psock, skb);
+ }
+ if (err < 0) {
skb_queue_tail(&psock->ingress_skb, skb);
schedule_work(&psock->work);
- break;
}
- goto out_free;
+ break;
case __SK_REDIRECT:
sk_psock_skb_redirect(skb);
break;
@@ -792,9 +820,9 @@ static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
kfree_skb(skb);
goto out;
}
+ skb_set_owner_r(skb, sk);
prog = READ_ONCE(psock->progs.skb_verdict);
if (likely(prog)) {
- skb_orphan(skb);
tcp_skb_bpf_redirect_clear(skb);
ret = sk_psock_bpf_run(psock, prog, skb);
ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
@@ -817,8 +845,11 @@ static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
rcu_read_lock();
prog = READ_ONCE(psock->progs.skb_parser);
- if (likely(prog))
+ if (likely(prog)) {
+ skb->sk = psock->sk;
ret = sk_psock_bpf_run(psock, prog, skb);
+ skb->sk = NULL;
+ }
rcu_read_unlock();
return ret;
}
@@ -842,6 +873,57 @@ static void sk_psock_strp_data_ready(struct sock *sk)
rcu_read_unlock();
}
+static int sk_psock_verdict_recv(read_descriptor_t *desc, struct sk_buff *skb,
+ unsigned int offset, size_t orig_len)
+{
+ struct sock *sk = (struct sock *)desc->arg.data;
+ struct sk_psock *psock;
+ struct bpf_prog *prog;
+ int ret = __SK_DROP;
+ int len = skb->len;
+
+ /* clone here so sk_eat_skb() in tcp_read_sock does not drop our data */
+ skb = skb_clone(skb, GFP_ATOMIC);
+ if (!skb) {
+ desc->error = -ENOMEM;
+ return 0;
+ }
+
+ rcu_read_lock();
+ psock = sk_psock(sk);
+ if (unlikely(!psock)) {
+ len = 0;
+ kfree_skb(skb);
+ goto out;
+ }
+ skb_set_owner_r(skb, sk);
+ prog = READ_ONCE(psock->progs.skb_verdict);
+ if (likely(prog)) {
+ tcp_skb_bpf_redirect_clear(skb);
+ ret = sk_psock_bpf_run(psock, prog, skb);
+ ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
+ }
+ sk_psock_verdict_apply(psock, skb, ret);
+out:
+ rcu_read_unlock();
+ return len;
+}
+
+static void sk_psock_verdict_data_ready(struct sock *sk)
+{
+ struct socket *sock = sk->sk_socket;
+ read_descriptor_t desc;
+
+ if (unlikely(!sock || !sock->ops || !sock->ops->read_sock))
+ return;
+
+ desc.arg.data = sk;
+ desc.error = 0;
+ desc.count = 1;
+
+ sock->ops->read_sock(sk, &desc, sk_psock_verdict_recv);
+}
+
static void sk_psock_write_space(struct sock *sk)
{
struct sk_psock *psock;
@@ -871,6 +953,19 @@ int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
return strp_init(&psock->parser.strp, sk, &cb);
}
+void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
+{
+ struct sk_psock_parser *parser = &psock->parser;
+
+ if (parser->enabled)
+ return;
+
+ parser->saved_data_ready = sk->sk_data_ready;
+ sk->sk_data_ready = sk_psock_verdict_data_ready;
+ sk->sk_write_space = sk_psock_write_space;
+ parser->enabled = true;
+}
+
void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
{
struct sk_psock_parser *parser = &psock->parser;
@@ -896,3 +991,15 @@ void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
strp_stop(&parser->strp);
parser->enabled = false;
}
+
+void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
+{
+ struct sk_psock_parser *parser = &psock->parser;
+
+ if (!parser->enabled)
+ return;
+
+ sk->sk_data_ready = parser->saved_data_ready;
+ parser->saved_data_ready = NULL;
+ parser->enabled = false;
+}
diff --git a/net/core/sock.c b/net/core/sock.c
index 6c5c6b18eff4..4e8729357122 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -413,18 +413,6 @@ static int sock_set_timeout(long *timeo_p, sockptr_t optval, int optlen,
return 0;
}
-static void sock_warn_obsolete_bsdism(const char *name)
-{
- static int warned;
- static char warncomm[TASK_COMM_LEN];
- if (strcmp(warncomm, current->comm) && warned < 5) {
- strcpy(warncomm, current->comm);
- pr_warn("process `%s' is using obsolete %s SO_BSDCOMPAT\n",
- warncomm, name);
- warned++;
- }
-}
-
static bool sock_needs_netstamp(const struct sock *sk)
{
switch (sk->sk_family) {
@@ -769,7 +757,6 @@ static void __sock_set_timestamps(struct sock *sk, bool val, bool new, bool ns)
} else {
sock_reset_flag(sk, SOCK_RCVTSTAMP);
sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
- sock_reset_flag(sk, SOCK_TSTAMP_NEW);
}
}
@@ -984,7 +971,6 @@ set_sndbuf:
break;
case SO_BSDCOMPAT:
- sock_warn_obsolete_bsdism("setsockopt");
break;
case SO_PASSCRED:
@@ -1007,8 +993,6 @@ set_sndbuf:
__sock_set_timestamps(sk, valbool, true, true);
break;
case SO_TIMESTAMPING_NEW:
- sock_set_flag(sk, SOCK_TSTAMP_NEW);
- fallthrough;
case SO_TIMESTAMPING_OLD:
if (val & ~SOF_TIMESTAMPING_MASK) {
ret = -EINVAL;
@@ -1037,16 +1021,14 @@ set_sndbuf:
}
sk->sk_tsflags = val;
+ sock_valbool_flag(sk, SOCK_TSTAMP_NEW, optname == SO_TIMESTAMPING_NEW);
+
if (val & SOF_TIMESTAMPING_RX_SOFTWARE)
sock_enable_timestamp(sk,
SOCK_TIMESTAMPING_RX_SOFTWARE);
- else {
- if (optname == SO_TIMESTAMPING_NEW)
- sock_reset_flag(sk, SOCK_TSTAMP_NEW);
-
+ else
sock_disable_timestamp(sk,
(1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
- }
break;
case SO_RCVLOWAT:
@@ -1387,7 +1369,6 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
break;
case SO_BSDCOMPAT:
- sock_warn_obsolete_bsdism("getsockopt");
break;
case SO_TIMESTAMP_OLD:
@@ -2961,6 +2942,13 @@ void sk_stop_timer(struct sock *sk, struct timer_list* timer)
}
EXPORT_SYMBOL(sk_stop_timer);
+void sk_stop_timer_sync(struct sock *sk, struct timer_list *timer)
+{
+ if (del_timer_sync(timer))
+ __sock_put(sk);
+}
+EXPORT_SYMBOL(sk_stop_timer_sync);
+
void sock_init_data(struct socket *sock, struct sock *sk)
{
sk_init_common(sk);
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index c13ffbd33d8d..c9c45b935f99 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -11,7 +11,7 @@
#include <linux/tcp.h>
#include <linux/workqueue.h>
#include <linux/nospec.h>
-
+#include <linux/cookie.h>
#include <linux/inet_diag.h>
#include <linux/sock_diag.h>
@@ -19,16 +19,17 @@ static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
static DEFINE_MUTEX(sock_diag_table_mutex);
static struct workqueue_struct *broadcast_wq;
-static atomic64_t cookie_gen;
-u64 sock_gen_cookie(struct sock *sk)
+DEFINE_COOKIE(sock_cookie);
+
+u64 __sock_gen_cookie(struct sock *sk)
{
while (1) {
u64 res = atomic64_read(&sk->sk_cookie);
if (res)
return res;
- res = atomic64_inc_return(&cookie_gen);
+ res = gen_cookie_next(&sock_cookie);
atomic64_cmpxchg(&sk->sk_cookie, 0, res);
}
}
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index 119f52a99dc1..ddc899e83313 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
#include <linux/bpf.h>
+#include <linux/btf_ids.h>
#include <linux/filter.h>
#include <linux/errno.h>
#include <linux/file.h>
@@ -147,8 +148,8 @@ static void sock_map_add_link(struct sk_psock *psock,
static void sock_map_del_link(struct sock *sk,
struct sk_psock *psock, void *link_raw)
{
+ bool strp_stop = false, verdict_stop = false;
struct sk_psock_link *link, *tmp;
- bool strp_stop = false;
spin_lock_bh(&psock->link_lock);
list_for_each_entry_safe(link, tmp, &psock->link, list) {
@@ -158,14 +159,19 @@ static void sock_map_del_link(struct sock *sk,
map);
if (psock->parser.enabled && stab->progs.skb_parser)
strp_stop = true;
+ if (psock->parser.enabled && stab->progs.skb_verdict)
+ verdict_stop = true;
list_del(&link->list);
sk_psock_free_link(link);
}
}
spin_unlock_bh(&psock->link_lock);
- if (strp_stop) {
+ if (strp_stop || verdict_stop) {
write_lock_bh(&sk->sk_callback_lock);
- sk_psock_stop_strp(sk, psock);
+ if (strp_stop)
+ sk_psock_stop_strp(sk, psock);
+ else
+ sk_psock_stop_verdict(sk, psock);
write_unlock_bh(&sk->sk_callback_lock);
}
}
@@ -184,8 +190,6 @@ static int sock_map_init_proto(struct sock *sk, struct sk_psock *psock)
{
struct proto *prot;
- sock_owned_by_me(sk);
-
switch (sk->sk_type) {
case SOCK_STREAM:
prot = tcp_bpf_get_proto(sk, psock);
@@ -231,20 +235,21 @@ static int sock_map_link(struct bpf_map *map, struct sk_psock_progs *progs,
{
struct bpf_prog *msg_parser, *skb_parser, *skb_verdict;
struct sk_psock *psock;
- bool skb_progs;
int ret;
skb_verdict = READ_ONCE(progs->skb_verdict);
- skb_parser = READ_ONCE(progs->skb_parser);
- skb_progs = skb_parser && skb_verdict;
- if (skb_progs) {
+ if (skb_verdict) {
skb_verdict = bpf_prog_inc_not_zero(skb_verdict);
if (IS_ERR(skb_verdict))
return PTR_ERR(skb_verdict);
+ }
+
+ skb_parser = READ_ONCE(progs->skb_parser);
+ if (skb_parser) {
skb_parser = bpf_prog_inc_not_zero(skb_parser);
if (IS_ERR(skb_parser)) {
- bpf_prog_put(skb_verdict);
- return PTR_ERR(skb_parser);
+ ret = PTR_ERR(skb_parser);
+ goto out_put_skb_verdict;
}
}
@@ -253,7 +258,7 @@ static int sock_map_link(struct bpf_map *map, struct sk_psock_progs *progs,
msg_parser = bpf_prog_inc_not_zero(msg_parser);
if (IS_ERR(msg_parser)) {
ret = PTR_ERR(msg_parser);
- goto out;
+ goto out_put_skb_parser;
}
}
@@ -265,15 +270,16 @@ static int sock_map_link(struct bpf_map *map, struct sk_psock_progs *progs,
if (psock) {
if ((msg_parser && READ_ONCE(psock->progs.msg_parser)) ||
- (skb_progs && READ_ONCE(psock->progs.skb_parser))) {
+ (skb_parser && READ_ONCE(psock->progs.skb_parser)) ||
+ (skb_verdict && READ_ONCE(psock->progs.skb_verdict))) {
sk_psock_put(sk, psock);
ret = -EBUSY;
goto out_progs;
}
} else {
psock = sk_psock_init(sk, map->numa_node);
- if (!psock) {
- ret = -ENOMEM;
+ if (IS_ERR(psock)) {
+ ret = PTR_ERR(psock);
goto out_progs;
}
}
@@ -286,28 +292,32 @@ static int sock_map_link(struct bpf_map *map, struct sk_psock_progs *progs,
goto out_drop;
write_lock_bh(&sk->sk_callback_lock);
- if (skb_progs && !psock->parser.enabled) {
+ if (skb_parser && skb_verdict && !psock->parser.enabled) {
ret = sk_psock_init_strp(sk, psock);
- if (ret) {
- write_unlock_bh(&sk->sk_callback_lock);
- goto out_drop;
- }
+ if (ret)
+ goto out_unlock_drop;
psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
psock_set_prog(&psock->progs.skb_parser, skb_parser);
sk_psock_start_strp(sk, psock);
+ } else if (!skb_parser && skb_verdict && !psock->parser.enabled) {
+ psock_set_prog(&psock->progs.skb_verdict, skb_verdict);
+ sk_psock_start_verdict(sk,psock);
}
write_unlock_bh(&sk->sk_callback_lock);
return 0;
+out_unlock_drop:
+ write_unlock_bh(&sk->sk_callback_lock);
out_drop:
sk_psock_put(sk, psock);
out_progs:
if (msg_parser)
bpf_prog_put(msg_parser);
-out:
- if (skb_progs) {
- bpf_prog_put(skb_verdict);
+out_put_skb_parser:
+ if (skb_parser)
bpf_prog_put(skb_parser);
- }
+out_put_skb_verdict:
+ if (skb_verdict)
+ bpf_prog_put(skb_verdict);
return ret;
}
@@ -322,8 +332,8 @@ static int sock_map_link_no_progs(struct bpf_map *map, struct sock *sk)
if (!psock) {
psock = sk_psock_init(sk, map->numa_node);
- if (!psock)
- return -ENOMEM;
+ if (IS_ERR(psock))
+ return PTR_ERR(psock);
}
ret = sock_map_init_proto(sk, psock);
@@ -384,7 +394,7 @@ static void *sock_map_lookup(struct bpf_map *map, void *key)
struct sock *sk;
sk = __sock_map_lookup_elem(map, *(u32 *)key);
- if (!sk || !sk_fullsock(sk))
+ if (!sk)
return NULL;
if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
return NULL;
@@ -402,7 +412,7 @@ static void *sock_map_lookup_sys(struct bpf_map *map, void *key)
if (!sk)
return ERR_PTR(-ENOENT);
- sock_gen_cookie(sk);
+ __sock_gen_cookie(sk);
return &sk->sk_cookie;
}
@@ -478,8 +488,6 @@ static int sock_map_update_common(struct bpf_map *map, u32 idx,
return -EINVAL;
if (unlikely(idx >= map->max_entries))
return -E2BIG;
- if (inet_csk_has_ulp(sk))
- return -EINVAL;
link = sk_psock_init_link();
if (!link)
@@ -563,10 +571,12 @@ static bool sock_map_sk_state_allowed(const struct sock *sk)
return false;
}
-static int sock_map_update_elem(struct bpf_map *map, void *key,
- void *value, u64 flags)
+static int sock_hash_update_common(struct bpf_map *map, void *key,
+ struct sock *sk, u64 flags);
+
+int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value,
+ u64 flags)
{
- u32 idx = *(u32 *)key;
struct socket *sock;
struct sock *sk;
int ret;
@@ -595,14 +605,41 @@ static int sock_map_update_elem(struct bpf_map *map, void *key,
sock_map_sk_acquire(sk);
if (!sock_map_sk_state_allowed(sk))
ret = -EOPNOTSUPP;
+ else if (map->map_type == BPF_MAP_TYPE_SOCKMAP)
+ ret = sock_map_update_common(map, *(u32 *)key, sk, flags);
else
- ret = sock_map_update_common(map, idx, sk, flags);
+ ret = sock_hash_update_common(map, key, sk, flags);
sock_map_sk_release(sk);
out:
fput(sock->file);
return ret;
}
+static int sock_map_update_elem(struct bpf_map *map, void *key,
+ void *value, u64 flags)
+{
+ struct sock *sk = (struct sock *)value;
+ int ret;
+
+ if (unlikely(!sk || !sk_fullsock(sk)))
+ return -EINVAL;
+
+ if (!sock_map_sk_is_suitable(sk))
+ return -EOPNOTSUPP;
+
+ local_bh_disable();
+ bh_lock_sock(sk);
+ if (!sock_map_sk_state_allowed(sk))
+ ret = -EOPNOTSUPP;
+ else if (map->map_type == BPF_MAP_TYPE_SOCKMAP)
+ ret = sock_map_update_common(map, *(u32 *)key, sk, flags);
+ else
+ ret = sock_hash_update_common(map, key, sk, flags);
+ bh_unlock_sock(sk);
+ local_bh_enable();
+ return ret;
+}
+
BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, sops,
struct bpf_map *, map, void *, key, u64, flags)
{
@@ -681,8 +718,116 @@ const struct bpf_func_proto bpf_msg_redirect_map_proto = {
.arg4_type = ARG_ANYTHING,
};
+struct sock_map_seq_info {
+ struct bpf_map *map;
+ struct sock *sk;
+ u32 index;
+};
+
+struct bpf_iter__sockmap {
+ __bpf_md_ptr(struct bpf_iter_meta *, meta);
+ __bpf_md_ptr(struct bpf_map *, map);
+ __bpf_md_ptr(void *, key);
+ __bpf_md_ptr(struct sock *, sk);
+};
+
+DEFINE_BPF_ITER_FUNC(sockmap, struct bpf_iter_meta *meta,
+ struct bpf_map *map, void *key,
+ struct sock *sk)
+
+static void *sock_map_seq_lookup_elem(struct sock_map_seq_info *info)
+{
+ if (unlikely(info->index >= info->map->max_entries))
+ return NULL;
+
+ info->sk = __sock_map_lookup_elem(info->map, info->index);
+
+ /* can't return sk directly, since that might be NULL */
+ return info;
+}
+
+static void *sock_map_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(rcu)
+{
+ struct sock_map_seq_info *info = seq->private;
+
+ if (*pos == 0)
+ ++*pos;
+
+ /* pairs with sock_map_seq_stop */
+ rcu_read_lock();
+ return sock_map_seq_lookup_elem(info);
+}
+
+static void *sock_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+ __must_hold(rcu)
+{
+ struct sock_map_seq_info *info = seq->private;
+
+ ++*pos;
+ ++info->index;
+
+ return sock_map_seq_lookup_elem(info);
+}
+
+static int sock_map_seq_show(struct seq_file *seq, void *v)
+ __must_hold(rcu)
+{
+ struct sock_map_seq_info *info = seq->private;
+ struct bpf_iter__sockmap ctx = {};
+ struct bpf_iter_meta meta;
+ struct bpf_prog *prog;
+
+ meta.seq = seq;
+ prog = bpf_iter_get_info(&meta, !v);
+ if (!prog)
+ return 0;
+
+ ctx.meta = &meta;
+ ctx.map = info->map;
+ if (v) {
+ ctx.key = &info->index;
+ ctx.sk = info->sk;
+ }
+
+ return bpf_iter_run_prog(prog, &ctx);
+}
+
+static void sock_map_seq_stop(struct seq_file *seq, void *v)
+ __releases(rcu)
+{
+ if (!v)
+ (void)sock_map_seq_show(seq, NULL);
+
+ /* pairs with sock_map_seq_start */
+ rcu_read_unlock();
+}
+
+static const struct seq_operations sock_map_seq_ops = {
+ .start = sock_map_seq_start,
+ .next = sock_map_seq_next,
+ .stop = sock_map_seq_stop,
+ .show = sock_map_seq_show,
+};
+
+static int sock_map_init_seq_private(void *priv_data,
+ struct bpf_iter_aux_info *aux)
+{
+ struct sock_map_seq_info *info = priv_data;
+
+ info->map = aux->map;
+ return 0;
+}
+
+static const struct bpf_iter_seq_info sock_map_iter_seq_info = {
+ .seq_ops = &sock_map_seq_ops,
+ .init_seq_private = sock_map_init_seq_private,
+ .seq_priv_size = sizeof(struct sock_map_seq_info),
+};
+
static int sock_map_btf_id;
const struct bpf_map_ops sock_map_ops = {
+ .map_meta_equal = bpf_map_meta_equal,
.map_alloc = sock_map_alloc,
.map_free = sock_map_free,
.map_get_next_key = sock_map_get_next_key,
@@ -694,6 +839,7 @@ const struct bpf_map_ops sock_map_ops = {
.map_check_btf = map_check_no_btf,
.map_btf_name = "bpf_stab",
.map_btf_id = &sock_map_btf_id,
+ .iter_seq_info = &sock_map_iter_seq_info,
};
struct bpf_shtab_elem {
@@ -855,8 +1001,6 @@ static int sock_hash_update_common(struct bpf_map *map, void *key,
WARN_ON_ONCE(!rcu_read_lock_held());
if (unlikely(flags > BPF_EXIST))
return -EINVAL;
- if (inet_csk_has_ulp(sk))
- return -EINVAL;
link = sk_psock_init_link();
if (!link)
@@ -915,45 +1059,6 @@ out_free:
return ret;
}
-static int sock_hash_update_elem(struct bpf_map *map, void *key,
- void *value, u64 flags)
-{
- struct socket *sock;
- struct sock *sk;
- int ret;
- u64 ufd;
-
- if (map->value_size == sizeof(u64))
- ufd = *(u64 *)value;
- else
- ufd = *(u32 *)value;
- if (ufd > S32_MAX)
- return -EINVAL;
-
- sock = sockfd_lookup(ufd, &ret);
- if (!sock)
- return ret;
- sk = sock->sk;
- if (!sk) {
- ret = -EINVAL;
- goto out;
- }
- if (!sock_map_sk_is_suitable(sk)) {
- ret = -EOPNOTSUPP;
- goto out;
- }
-
- sock_map_sk_acquire(sk);
- if (!sock_map_sk_state_allowed(sk))
- ret = -EOPNOTSUPP;
- else
- ret = sock_hash_update_common(map, key, sk, flags);
- sock_map_sk_release(sk);
-out:
- fput(sock->file);
- return ret;
-}
-
static int sock_hash_get_next_key(struct bpf_map *map, void *key,
void *key_next)
{
@@ -971,7 +1076,7 @@ static int sock_hash_get_next_key(struct bpf_map *map, void *key,
if (!elem)
goto find_first_elem;
- elem_next = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&elem->node)),
+ elem_next = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&elem->node)),
struct bpf_shtab_elem, node);
if (elem_next) {
memcpy(key_next, elem_next->key, key_size);
@@ -983,7 +1088,7 @@ static int sock_hash_get_next_key(struct bpf_map *map, void *key,
find_first_elem:
for (; i < htab->buckets_num; i++) {
head = &sock_hash_select_bucket(htab, i)->head;
- elem_next = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),
+ elem_next = hlist_entry_safe(rcu_dereference(hlist_first_rcu(head)),
struct bpf_shtab_elem, node);
if (elem_next) {
memcpy(key_next, elem_next->key, key_size);
@@ -1119,7 +1224,7 @@ static void *sock_hash_lookup_sys(struct bpf_map *map, void *key)
if (!sk)
return ERR_PTR(-ENOENT);
- sock_gen_cookie(sk);
+ __sock_gen_cookie(sk);
return &sk->sk_cookie;
}
@@ -1128,7 +1233,7 @@ static void *sock_hash_lookup(struct bpf_map *map, void *key)
struct sock *sk;
sk = __sock_hash_lookup_elem(map, key);
- if (!sk || !sk_fullsock(sk))
+ if (!sk)
return NULL;
if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
return NULL;
@@ -1217,12 +1322,128 @@ const struct bpf_func_proto bpf_msg_redirect_hash_proto = {
.arg4_type = ARG_ANYTHING,
};
+struct sock_hash_seq_info {
+ struct bpf_map *map;
+ struct bpf_shtab *htab;
+ u32 bucket_id;
+};
+
+static void *sock_hash_seq_find_next(struct sock_hash_seq_info *info,
+ struct bpf_shtab_elem *prev_elem)
+{
+ const struct bpf_shtab *htab = info->htab;
+ struct bpf_shtab_bucket *bucket;
+ struct bpf_shtab_elem *elem;
+ struct hlist_node *node;
+
+ /* try to find next elem in the same bucket */
+ if (prev_elem) {
+ node = rcu_dereference(hlist_next_rcu(&prev_elem->node));
+ elem = hlist_entry_safe(node, struct bpf_shtab_elem, node);
+ if (elem)
+ return elem;
+
+ /* no more elements, continue in the next bucket */
+ info->bucket_id++;
+ }
+
+ for (; info->bucket_id < htab->buckets_num; info->bucket_id++) {
+ bucket = &htab->buckets[info->bucket_id];
+ node = rcu_dereference(hlist_first_rcu(&bucket->head));
+ elem = hlist_entry_safe(node, struct bpf_shtab_elem, node);
+ if (elem)
+ return elem;
+ }
+
+ return NULL;
+}
+
+static void *sock_hash_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(rcu)
+{
+ struct sock_hash_seq_info *info = seq->private;
+
+ if (*pos == 0)
+ ++*pos;
+
+ /* pairs with sock_hash_seq_stop */
+ rcu_read_lock();
+ return sock_hash_seq_find_next(info, NULL);
+}
+
+static void *sock_hash_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+ __must_hold(rcu)
+{
+ struct sock_hash_seq_info *info = seq->private;
+
+ ++*pos;
+ return sock_hash_seq_find_next(info, v);
+}
+
+static int sock_hash_seq_show(struct seq_file *seq, void *v)
+ __must_hold(rcu)
+{
+ struct sock_hash_seq_info *info = seq->private;
+ struct bpf_iter__sockmap ctx = {};
+ struct bpf_shtab_elem *elem = v;
+ struct bpf_iter_meta meta;
+ struct bpf_prog *prog;
+
+ meta.seq = seq;
+ prog = bpf_iter_get_info(&meta, !elem);
+ if (!prog)
+ return 0;
+
+ ctx.meta = &meta;
+ ctx.map = info->map;
+ if (elem) {
+ ctx.key = elem->key;
+ ctx.sk = elem->sk;
+ }
+
+ return bpf_iter_run_prog(prog, &ctx);
+}
+
+static void sock_hash_seq_stop(struct seq_file *seq, void *v)
+ __releases(rcu)
+{
+ if (!v)
+ (void)sock_hash_seq_show(seq, NULL);
+
+ /* pairs with sock_hash_seq_start */
+ rcu_read_unlock();
+}
+
+static const struct seq_operations sock_hash_seq_ops = {
+ .start = sock_hash_seq_start,
+ .next = sock_hash_seq_next,
+ .stop = sock_hash_seq_stop,
+ .show = sock_hash_seq_show,
+};
+
+static int sock_hash_init_seq_private(void *priv_data,
+ struct bpf_iter_aux_info *aux)
+{
+ struct sock_hash_seq_info *info = priv_data;
+
+ info->map = aux->map;
+ info->htab = container_of(aux->map, struct bpf_shtab, map);
+ return 0;
+}
+
+static const struct bpf_iter_seq_info sock_hash_iter_seq_info = {
+ .seq_ops = &sock_hash_seq_ops,
+ .init_seq_private = sock_hash_init_seq_private,
+ .seq_priv_size = sizeof(struct sock_hash_seq_info),
+};
+
static int sock_hash_map_btf_id;
const struct bpf_map_ops sock_hash_ops = {
+ .map_meta_equal = bpf_map_meta_equal,
.map_alloc = sock_hash_alloc,
.map_free = sock_hash_free,
.map_get_next_key = sock_hash_get_next_key,
- .map_update_elem = sock_hash_update_elem,
+ .map_update_elem = sock_map_update_elem,
.map_delete_elem = sock_hash_delete_elem,
.map_lookup_elem = sock_hash_lookup,
.map_lookup_elem_sys_only = sock_hash_lookup_sys,
@@ -1230,6 +1451,7 @@ const struct bpf_map_ops sock_hash_ops = {
.map_check_btf = map_check_no_btf,
.map_btf_name = "bpf_shtab",
.map_btf_id = &sock_hash_map_btf_id,
+ .iter_seq_info = &sock_hash_iter_seq_info,
};
static struct sk_psock_progs *sock_map_progs(struct bpf_map *map)
@@ -1340,3 +1562,62 @@ void sock_map_close(struct sock *sk, long timeout)
release_sock(sk);
saved_close(sk, timeout);
}
+
+static int sock_map_iter_attach_target(struct bpf_prog *prog,
+ union bpf_iter_link_info *linfo,
+ struct bpf_iter_aux_info *aux)
+{
+ struct bpf_map *map;
+ int err = -EINVAL;
+
+ if (!linfo->map.map_fd)
+ return -EBADF;
+
+ map = bpf_map_get_with_uref(linfo->map.map_fd);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ if (map->map_type != BPF_MAP_TYPE_SOCKMAP &&
+ map->map_type != BPF_MAP_TYPE_SOCKHASH)
+ goto put_map;
+
+ if (prog->aux->max_rdonly_access > map->key_size) {
+ err = -EACCES;
+ goto put_map;
+ }
+
+ aux->map = map;
+ return 0;
+
+put_map:
+ bpf_map_put_with_uref(map);
+ return err;
+}
+
+static void sock_map_iter_detach_target(struct bpf_iter_aux_info *aux)
+{
+ bpf_map_put_with_uref(aux->map);
+}
+
+static struct bpf_iter_reg sock_map_iter_reg = {
+ .target = "sockmap",
+ .attach_target = sock_map_iter_attach_target,
+ .detach_target = sock_map_iter_detach_target,
+ .show_fdinfo = bpf_iter_map_show_fdinfo,
+ .fill_link_info = bpf_iter_map_fill_link_info,
+ .ctx_arg_info_size = 2,
+ .ctx_arg_info = {
+ { offsetof(struct bpf_iter__sockmap, key),
+ PTR_TO_RDONLY_BUF_OR_NULL },
+ { offsetof(struct bpf_iter__sockmap, sk),
+ PTR_TO_BTF_ID_OR_NULL },
+ },
+};
+
+static int __init bpf_sockmap_iter_init(void)
+{
+ sock_map_iter_reg.ctx_arg_info[1].btf_id =
+ btf_sock_ids[BTF_SOCK_TYPE_SOCK];
+ return bpf_iter_reg_target(&sock_map_iter_reg);
+}
+late_initcall(bpf_sockmap_iter_init);
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 6ada114bbcca..d86d8d11cfe4 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -22,7 +22,7 @@
#include <net/busy_poll.h>
#include <net/pkt_sched.h>
-static int two __maybe_unused = 2;
+static int two = 2;
static int three = 3;
static int min_sndbuf = SOCK_MIN_SNDBUF;
static int min_rcvbuf = SOCK_MIN_RCVBUF;
@@ -546,7 +546,7 @@ static struct ctl_table net_core_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_ONE,
+ .extra2 = &two,
},
{
.procname = "devconf_inherit_init_net",
@@ -587,6 +587,19 @@ static struct ctl_table netns_core_table[] = {
{ }
};
+static int __init fb_tunnels_only_for_init_net_sysctl_setup(char *str)
+{
+ /* fallback tunnels for initns only */
+ if (!strncmp(str, "initns", 6))
+ sysctl_fb_tunnels_only_for_init_net = 1;
+ /* no fallback tunnels anywhere */
+ else if (!strncmp(str, "none", 4))
+ sysctl_fb_tunnels_only_for_init_net = 2;
+
+ return 1;
+}
+__setup("fb_tunnels=", fb_tunnels_only_for_init_net_sysctl_setup);
+
static __net_init int sysctl_core_net_init(struct net *net)
{
struct ctl_table *tbl;
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c
index 0a72510d5de1..8f3dd3b1d2d0 100644
--- a/net/dccp/ackvec.c
+++ b/net/dccp/ackvec.c
@@ -274,7 +274,7 @@ void dccp_ackvec_input(struct dccp_ackvec *av, struct sk_buff *skb)
/**
* dccp_ackvec_clear_state - Perform house-keeping / garbage-collection
* This routine is called when the peer acknowledges the receipt of Ack Vectors
- * up to and including @ackno. While based on on section A.3 of RFC 4340, here
+ * up to and including @ackno. While based on section A.3 of RFC 4340, here
* are additional precautions to prevent corrupted buffer state. In particular,
* we use tail_ackno to identify outdated records; it always marks the earliest
* packet of group (2) in 11.4.2.
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 9c28c8251125..bb3d70664dde 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -495,7 +495,8 @@ static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req
rcu_read_lock();
err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
ireq->ir_rmt_addr,
- rcu_dereference(ireq->ireq_opt));
+ rcu_dereference(ireq->ireq_opt),
+ inet_sk(sk)->tos);
rcu_read_unlock();
err = net_xmit_eval(err);
}
@@ -537,7 +538,8 @@ static void dccp_v4_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
local_bh_disable();
bh_lock_sock(ctl_sk);
err = ip_build_and_send_pkt(skb, ctl_sk,
- rxiph->daddr, rxiph->saddr, NULL);
+ rxiph->daddr, rxiph->saddr, NULL,
+ inet_sk(ctl_sk)->tos);
bh_unlock_sock(ctl_sk);
if (net_xmit_eval(err) == 0) {
@@ -731,7 +733,7 @@ int dccp_invalid_packet(struct sk_buff *skb)
return 1;
}
/*
- * If P.Data Offset is too too large for packet, drop packet and return
+ * If P.Data Offset is too large for packet, drop packet and return
*/
if (!pskb_may_pull(skb, dccph_doff * sizeof(u32))) {
DCCP_WARN("P.Data Offset(%u) too large\n", dccph_doff);
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index 0e06dfc32273..a934d2932373 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -85,7 +85,7 @@ static void dccp_retransmit_timer(struct sock *sk)
struct inet_connection_sock *icsk = inet_csk(sk);
/*
- * More than than 4MSL (8 minutes) has passed, a RESET(aborted) was
+ * More than 4MSL (8 minutes) has passed, a RESET(aborted) was
* sent, no need to retransmit, this sock is dead.
*/
if (dccp_write_timeout(sk))
@@ -176,7 +176,6 @@ static void dccp_delack_timer(struct timer_list *t)
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
/* Try again later. */
- icsk->icsk_ack.blocked = 1;
__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
sk_reset_timer(sk, &icsk->icsk_delack_timer,
jiffies + TCP_DELACK_MIN);
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 1ce9ba8cf545..2131bf2b3a67 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -225,6 +225,15 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
skb->pkt_type = PACKET_HOST;
skb->protocol = eth_type_trans(skb, skb->dev);
+ if (unlikely(cpu_dp->ds->untag_bridge_pvid)) {
+ nskb = dsa_untag_bridge_pvid(skb);
+ if (!nskb) {
+ kfree_skb(skb);
+ return 0;
+ }
+ skb = nskb;
+ }
+
s = this_cpu_ptr(p->stats64);
u64_stats_update_begin(&s->syncp);
s->rx_packets++;
@@ -330,11 +339,7 @@ EXPORT_SYMBOL_GPL(call_dsa_notifiers);
int dsa_devlink_param_get(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
{
- struct dsa_devlink_priv *dl_priv;
- struct dsa_switch *ds;
-
- dl_priv = devlink_priv(dl);
- ds = dl_priv->ds;
+ struct dsa_switch *ds = dsa_devlink_to_ds(dl);
if (!ds->ops->devlink_param_get)
return -EOPNOTSUPP;
@@ -346,11 +351,7 @@ EXPORT_SYMBOL_GPL(dsa_devlink_param_get);
int dsa_devlink_param_set(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx)
{
- struct dsa_devlink_priv *dl_priv;
- struct dsa_switch *ds;
-
- dl_priv = devlink_priv(dl);
- ds = dl_priv->ds;
+ struct dsa_switch *ds = dsa_devlink_to_ds(dl);
if (!ds->ops->devlink_param_set)
return -EOPNOTSUPP;
@@ -412,6 +413,36 @@ void dsa_devlink_resource_occ_get_unregister(struct dsa_switch *ds,
}
EXPORT_SYMBOL_GPL(dsa_devlink_resource_occ_get_unregister);
+struct devlink_region *
+dsa_devlink_region_create(struct dsa_switch *ds,
+ const struct devlink_region_ops *ops,
+ u32 region_max_snapshots, u64 region_size)
+{
+ return devlink_region_create(ds->devlink, ops, region_max_snapshots,
+ region_size);
+}
+EXPORT_SYMBOL_GPL(dsa_devlink_region_create);
+
+struct devlink_region *
+dsa_devlink_port_region_create(struct dsa_switch *ds,
+ int port,
+ const struct devlink_port_region_ops *ops,
+ u32 region_max_snapshots, u64 region_size)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+
+ return devlink_port_region_create(&dp->devlink_port, ops,
+ region_max_snapshots,
+ region_size);
+}
+EXPORT_SYMBOL_GPL(dsa_devlink_port_region_create);
+
+void dsa_devlink_region_destroy(struct devlink_region *region)
+{
+ devlink_region_destroy(region);
+}
+EXPORT_SYMBOL_GPL(dsa_devlink_region_destroy);
+
struct dsa_port *dsa_port_from_netdev(struct net_device *netdev)
{
if (!netdev || !dsa_slave_dev_check(netdev))
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index c0ffc7a2b65f..183003e45762 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -21,9 +21,6 @@
static DEFINE_MUTEX(dsa2_mutex);
LIST_HEAD(dsa_tree_list);
-static const struct devlink_ops dsa_devlink_ops = {
-};
-
struct dsa_switch *dsa_switch_find(int tree_index, int sw_index)
{
struct dsa_switch_tree *dst;
@@ -254,22 +251,11 @@ static void dsa_tree_teardown_default_cpu(struct dsa_switch_tree *dst)
static int dsa_port_setup(struct dsa_port *dp)
{
- struct dsa_switch *ds = dp->ds;
- struct dsa_switch_tree *dst = ds->dst;
- const unsigned char *id = (const unsigned char *)&dst->index;
- const unsigned char len = sizeof(dst->index);
struct devlink_port *dlp = &dp->devlink_port;
bool dsa_port_link_registered = false;
- bool devlink_port_registered = false;
- struct devlink_port_attrs attrs = {};
- struct devlink *dl = ds->devlink;
bool dsa_port_enabled = false;
int err = 0;
- attrs.phys.port_number = dp->index;
- memcpy(attrs.switch_id.id, id, len);
- attrs.switch_id.id_len = len;
-
if (dp->setup)
return 0;
@@ -278,14 +264,6 @@ static int dsa_port_setup(struct dsa_port *dp)
dsa_port_disable(dp);
break;
case DSA_PORT_TYPE_CPU:
- memset(dlp, 0, sizeof(*dlp));
- attrs.flavour = DEVLINK_PORT_FLAVOUR_CPU;
- devlink_port_attrs_set(dlp, &attrs);
- err = devlink_port_register(dl, dlp, dp->index);
- if (err)
- break;
- devlink_port_registered = true;
-
err = dsa_port_link_register_of(dp);
if (err)
break;
@@ -298,14 +276,6 @@ static int dsa_port_setup(struct dsa_port *dp)
break;
case DSA_PORT_TYPE_DSA:
- memset(dlp, 0, sizeof(*dlp));
- attrs.flavour = DEVLINK_PORT_FLAVOUR_DSA;
- devlink_port_attrs_set(dlp, &attrs);
- err = devlink_port_register(dl, dlp, dp->index);
- if (err)
- break;
- devlink_port_registered = true;
-
err = dsa_port_link_register_of(dp);
if (err)
break;
@@ -318,14 +288,6 @@ static int dsa_port_setup(struct dsa_port *dp)
break;
case DSA_PORT_TYPE_USER:
- memset(dlp, 0, sizeof(*dlp));
- attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
- devlink_port_attrs_set(dlp, &attrs);
- err = devlink_port_register(dl, dlp, dp->index);
- if (err)
- break;
- devlink_port_registered = true;
-
dp->mac = of_get_mac_address(dp->dn);
err = dsa_slave_create(dp);
if (err)
@@ -339,8 +301,6 @@ static int dsa_port_setup(struct dsa_port *dp)
dsa_port_disable(dp);
if (err && dsa_port_link_registered)
dsa_port_link_unregister_of(dp);
- if (err && devlink_port_registered)
- devlink_port_unregister(dlp);
if (err)
return err;
@@ -349,10 +309,50 @@ static int dsa_port_setup(struct dsa_port *dp)
return 0;
}
-static void dsa_port_teardown(struct dsa_port *dp)
+static int dsa_port_devlink_setup(struct dsa_port *dp)
{
struct devlink_port *dlp = &dp->devlink_port;
+ struct dsa_switch_tree *dst = dp->ds->dst;
+ struct devlink_port_attrs attrs = {};
+ struct devlink *dl = dp->ds->devlink;
+ const unsigned char *id;
+ unsigned char len;
+ int err;
+
+ id = (const unsigned char *)&dst->index;
+ len = sizeof(dst->index);
+ attrs.phys.port_number = dp->index;
+ memcpy(attrs.switch_id.id, id, len);
+ attrs.switch_id.id_len = len;
+ memset(dlp, 0, sizeof(*dlp));
+
+ switch (dp->type) {
+ case DSA_PORT_TYPE_UNUSED:
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED;
+ break;
+ case DSA_PORT_TYPE_CPU:
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_CPU;
+ break;
+ case DSA_PORT_TYPE_DSA:
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_DSA;
+ break;
+ case DSA_PORT_TYPE_USER:
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ break;
+ }
+
+ devlink_port_attrs_set(dlp, &attrs);
+ err = devlink_port_register(dl, dlp, dp->index);
+
+ if (!err)
+ dp->devlink_port_setup = true;
+
+ return err;
+}
+
+static void dsa_port_teardown(struct dsa_port *dp)
+{
if (!dp->setup)
return;
@@ -362,16 +362,13 @@ static void dsa_port_teardown(struct dsa_port *dp)
case DSA_PORT_TYPE_CPU:
dsa_port_disable(dp);
dsa_tag_driver_put(dp->tag_ops);
- devlink_port_unregister(dlp);
dsa_port_link_unregister_of(dp);
break;
case DSA_PORT_TYPE_DSA:
dsa_port_disable(dp);
- devlink_port_unregister(dlp);
dsa_port_link_unregister_of(dp);
break;
case DSA_PORT_TYPE_USER:
- devlink_port_unregister(dlp);
if (dp->slave) {
dsa_slave_destroy(dp->slave);
dp->slave = NULL;
@@ -382,9 +379,35 @@ static void dsa_port_teardown(struct dsa_port *dp)
dp->setup = false;
}
+static void dsa_port_devlink_teardown(struct dsa_port *dp)
+{
+ struct devlink_port *dlp = &dp->devlink_port;
+
+ if (dp->devlink_port_setup)
+ devlink_port_unregister(dlp);
+ dp->devlink_port_setup = false;
+}
+
+static int dsa_devlink_info_get(struct devlink *dl,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct dsa_switch *ds = dsa_devlink_to_ds(dl);
+
+ if (ds->ops->devlink_info_get)
+ return ds->ops->devlink_info_get(ds, req, extack);
+
+ return -EOPNOTSUPP;
+}
+
+static const struct devlink_ops dsa_devlink_ops = {
+ .info_get = dsa_devlink_info_get,
+};
+
static int dsa_switch_setup(struct dsa_switch *ds)
{
struct dsa_devlink_priv *dl_priv;
+ struct dsa_port *dp;
int err;
if (ds->setup)
@@ -410,9 +433,20 @@ static int dsa_switch_setup(struct dsa_switch *ds)
if (err)
goto free_devlink;
+ /* Setup devlink port instances now, so that the switch
+ * setup() can register regions etc, against the ports
+ */
+ list_for_each_entry(dp, &ds->dst->ports, list) {
+ if (dp->ds == ds) {
+ err = dsa_port_devlink_setup(dp);
+ if (err)
+ goto unregister_devlink_ports;
+ }
+ }
+
err = dsa_switch_register_notifier(ds);
if (err)
- goto unregister_devlink;
+ goto unregister_devlink_ports;
err = ds->ops->setup(ds);
if (err < 0)
@@ -440,7 +474,10 @@ static int dsa_switch_setup(struct dsa_switch *ds)
unregister_notifier:
dsa_switch_unregister_notifier(ds);
-unregister_devlink:
+unregister_devlink_ports:
+ list_for_each_entry(dp, &ds->dst->ports, list)
+ if (dp->ds == ds)
+ dsa_port_devlink_teardown(dp);
devlink_unregister(ds->devlink);
free_devlink:
devlink_free(ds->devlink);
@@ -451,6 +488,8 @@ free_devlink:
static void dsa_switch_teardown(struct dsa_switch *ds)
{
+ struct dsa_port *dp;
+
if (!ds->setup)
return;
@@ -463,6 +502,9 @@ static void dsa_switch_teardown(struct dsa_switch *ds)
ds->ops->teardown(ds);
if (ds->devlink) {
+ list_for_each_entry(dp, &ds->dst->ports, list)
+ if (dp->ds == ds)
+ dsa_port_devlink_teardown(dp);
devlink_unregister(ds->devlink);
devlink_free(ds->devlink);
ds->devlink = NULL;
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index 1653e3377cb3..12998bf04e55 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -7,6 +7,7 @@
#ifndef __DSA_PRIV_H
#define __DSA_PRIV_H
+#include <linux/if_bridge.h>
#include <linux/phy.h>
#include <linux/netdevice.h>
#include <linux/netpoll.h>
@@ -164,8 +165,6 @@ int dsa_port_vlan_add(struct dsa_port *dp,
struct switchdev_trans *trans);
int dsa_port_vlan_del(struct dsa_port *dp,
const struct switchdev_obj_port_vlan *vlan);
-int dsa_port_vid_add(struct dsa_port *dp, u16 vid, u16 flags);
-int dsa_port_vid_del(struct dsa_port *dp, u16 vid);
int dsa_port_link_register_of(struct dsa_port *dp);
void dsa_port_link_unregister_of(struct dsa_port *dp);
extern const struct phylink_mac_ops dsa_port_phylink_mac_ops;
@@ -196,6 +195,65 @@ dsa_slave_to_master(const struct net_device *dev)
return dp->cpu_dp->master;
}
+/* If under a bridge with vlan_filtering=0, make sure to send pvid-tagged
+ * frames as untagged, since the bridge will not untag them.
+ */
+static inline struct sk_buff *dsa_untag_bridge_pvid(struct sk_buff *skb)
+{
+ struct dsa_port *dp = dsa_slave_to_port(skb->dev);
+ struct net_device *br = dp->bridge_dev;
+ struct net_device *dev = skb->dev;
+ struct net_device *upper_dev;
+ u16 vid, pvid, proto;
+ int err;
+
+ if (!br || br_vlan_enabled(br))
+ return skb;
+
+ err = br_vlan_get_proto(br, &proto);
+ if (err)
+ return skb;
+
+ /* Move VLAN tag from data to hwaccel */
+ if (!skb_vlan_tag_present(skb) && skb->protocol == htons(proto)) {
+ skb = skb_vlan_untag(skb);
+ if (!skb)
+ return NULL;
+ }
+
+ if (!skb_vlan_tag_present(skb))
+ return skb;
+
+ vid = skb_vlan_tag_get_id(skb);
+
+ /* We already run under an RCU read-side critical section since
+ * we are called from netif_receive_skb_list_internal().
+ */
+ err = br_vlan_get_pvid_rcu(dev, &pvid);
+ if (err)
+ return skb;
+
+ if (vid != pvid)
+ return skb;
+
+ /* The sad part about attempting to untag from DSA is that we
+ * don't know, unless we check, if the skb will end up in
+ * the bridge's data path - br_allowed_ingress() - or not.
+ * For example, there might be an 8021q upper for the
+ * default_pvid of the bridge, which will steal VLAN-tagged traffic
+ * from the bridge's data path. This is a configuration that DSA
+ * supports because vlan_filtering is 0. In that case, we should
+ * definitely keep the tag, to make sure it keeps working.
+ */
+ upper_dev = __vlan_find_dev_deep_rcu(br, htons(proto), vid);
+ if (upper_dev)
+ return skb;
+
+ __vlan_hwaccel_clear_tag(skb);
+
+ return skb;
+}
+
/* switch.c */
int dsa_switch_register_notifier(struct dsa_switch *ds);
void dsa_switch_unregister_notifier(struct dsa_switch *ds);
diff --git a/net/dsa/master.c b/net/dsa/master.c
index 61615ebc70e9..c91de041a91d 100644
--- a/net/dsa/master.c
+++ b/net/dsa/master.c
@@ -259,6 +259,18 @@ static void dsa_netdev_ops_set(struct net_device *dev,
dev->dsa_ptr->netdev_ops = ops;
}
+static void dsa_master_set_promiscuity(struct net_device *dev, int inc)
+{
+ const struct dsa_device_ops *ops = dev->dsa_ptr->tag_ops;
+
+ if (!ops->promisc_on_master)
+ return;
+
+ rtnl_lock();
+ dev_set_promiscuity(dev, inc);
+ rtnl_unlock();
+}
+
static ssize_t tagging_show(struct device *d, struct device_attribute *attr,
char *buf)
{
@@ -314,9 +326,12 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
dev->dsa_ptr = cpu_dp;
lockdep_set_class(&dev->addr_list_lock,
&dsa_master_addr_list_lock_key);
+
+ dsa_master_set_promiscuity(dev, 1);
+
ret = dsa_master_ethtool_setup(dev);
if (ret)
- return ret;
+ goto out_err_reset_promisc;
dsa_netdev_ops_set(dev, &dsa_netdev_ops);
@@ -329,6 +344,8 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
out_err_ndo_teardown:
dsa_netdev_ops_set(dev, NULL);
dsa_master_ethtool_teardown(dev);
+out_err_reset_promisc:
+ dsa_master_set_promiscuity(dev, -1);
return ret;
}
@@ -338,6 +355,7 @@ void dsa_master_teardown(struct net_device *dev)
dsa_netdev_ops_set(dev, NULL);
dsa_master_ethtool_teardown(dev);
dsa_master_reset_mtu(dev);
+ dsa_master_set_promiscuity(dev, -1);
dev->dsa_ptr = NULL;
diff --git a/net/dsa/port.c b/net/dsa/port.c
index e23ece229c7e..73569c9af3cc 100644
--- a/net/dsa/port.c
+++ b/net/dsa/port.c
@@ -193,11 +193,44 @@ void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br)
dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
}
+/* Must be called under rcu_read_lock() */
static bool dsa_port_can_apply_vlan_filtering(struct dsa_port *dp,
bool vlan_filtering)
{
struct dsa_switch *ds = dp->ds;
- int i;
+ int err, i;
+
+ /* VLAN awareness was off, so the question is "can we turn it on".
+ * We may have had 8021q uppers, those need to go. Make sure we don't
+ * enter an inconsistent state: deny changing the VLAN awareness state
+ * as long as we have 8021q uppers.
+ */
+ if (vlan_filtering && dsa_is_user_port(ds, dp->index)) {
+ struct net_device *upper_dev, *slave = dp->slave;
+ struct net_device *br = dp->bridge_dev;
+ struct list_head *iter;
+
+ netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
+ struct bridge_vlan_info br_info;
+ u16 vid;
+
+ if (!is_vlan_dev(upper_dev))
+ continue;
+
+ vid = vlan_dev_vlan_id(upper_dev);
+
+ /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
+ * device, respectively the VID is not found, returning
+ * 0 means success, which is a failure for us here.
+ */
+ err = br_vlan_get_info(br, vid, &br_info);
+ if (err == 0) {
+ dev_err(ds->dev, "Must remove upper %s first\n",
+ upper_dev->name);
+ return false;
+ }
+ }
+ }
if (!ds->vlan_filtering_is_global)
return true;
@@ -232,28 +265,38 @@ int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
struct dsa_switch *ds = dp->ds;
int err;
- /* bridge skips -EOPNOTSUPP, so skip the prepare phase */
- if (switchdev_trans_ph_prepare(trans))
- return 0;
+ if (switchdev_trans_ph_prepare(trans)) {
+ bool apply;
- if (!ds->ops->port_vlan_filtering)
- return 0;
+ if (!ds->ops->port_vlan_filtering)
+ return -EOPNOTSUPP;
- if (!dsa_port_can_apply_vlan_filtering(dp, vlan_filtering))
- return -EINVAL;
+ /* We are called from dsa_slave_switchdev_blocking_event(),
+ * which is not under rcu_read_lock(), unlike
+ * dsa_slave_switchdev_event().
+ */
+ rcu_read_lock();
+ apply = dsa_port_can_apply_vlan_filtering(dp, vlan_filtering);
+ rcu_read_unlock();
+ if (!apply)
+ return -EINVAL;
+ }
if (dsa_port_is_vlan_filtering(dp) == vlan_filtering)
return 0;
- err = ds->ops->port_vlan_filtering(ds, dp->index,
- vlan_filtering);
+ err = ds->ops->port_vlan_filtering(ds, dp->index, vlan_filtering,
+ trans);
if (err)
return err;
- if (ds->vlan_filtering_is_global)
- ds->vlan_filtering = vlan_filtering;
- else
- dp->vlan_filtering = vlan_filtering;
+ if (switchdev_trans_ph_commit(trans)) {
+ if (ds->vlan_filtering_is_global)
+ ds->vlan_filtering = vlan_filtering;
+ else
+ dp->vlan_filtering = vlan_filtering;
+ }
+
return 0;
}
@@ -433,39 +476,6 @@ int dsa_port_vlan_del(struct dsa_port *dp,
return dsa_port_notify(dp, DSA_NOTIFIER_VLAN_DEL, &info);
}
-int dsa_port_vid_add(struct dsa_port *dp, u16 vid, u16 flags)
-{
- struct switchdev_obj_port_vlan vlan = {
- .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
- .flags = flags,
- .vid_begin = vid,
- .vid_end = vid,
- };
- struct switchdev_trans trans;
- int err;
-
- trans.ph_prepare = true;
- err = dsa_port_vlan_add(dp, &vlan, &trans);
- if (err)
- return err;
-
- trans.ph_prepare = false;
- return dsa_port_vlan_add(dp, &vlan, &trans);
-}
-EXPORT_SYMBOL(dsa_port_vid_add);
-
-int dsa_port_vid_del(struct dsa_port *dp, u16 vid)
-{
- struct switchdev_obj_port_vlan vlan = {
- .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
- .vid_begin = vid,
- .vid_end = vid,
- };
-
- return dsa_port_vlan_del(dp, &vlan);
-}
-EXPORT_SYMBOL(dsa_port_vid_del);
-
static struct phy_device *dsa_port_get_phy_device(struct dsa_port *dp)
{
struct device_node *phy_dn;
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 16e5f98d4882..3bc5ca40c9fb 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -303,13 +303,36 @@ static int dsa_slave_port_attr_set(struct net_device *dev,
return ret;
}
+/* Must be called under rcu_read_lock() */
+static int
+dsa_slave_vlan_check_for_8021q_uppers(struct net_device *slave,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct net_device *upper_dev;
+ struct list_head *iter;
+
+ netdev_for_each_upper_dev_rcu(slave, upper_dev, iter) {
+ u16 vid;
+
+ if (!is_vlan_dev(upper_dev))
+ continue;
+
+ vid = vlan_dev_vlan_id(upper_dev);
+ if (vid >= vlan->vid_begin && vid <= vlan->vid_end)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
static int dsa_slave_vlan_add(struct net_device *dev,
const struct switchdev_obj *obj,
struct switchdev_trans *trans)
{
+ struct net_device *master = dsa_slave_to_master(dev);
struct dsa_port *dp = dsa_slave_to_port(dev);
struct switchdev_obj_port_vlan vlan;
- int err;
+ int vid, err;
if (obj->orig_dev != dev)
return -EOPNOTSUPP;
@@ -319,6 +342,17 @@ static int dsa_slave_vlan_add(struct net_device *dev,
vlan = *SWITCHDEV_OBJ_PORT_VLAN(obj);
+ /* Deny adding a bridge VLAN when there is already an 802.1Q upper with
+ * the same VID.
+ */
+ if (trans->ph_prepare && br_vlan_enabled(dp->bridge_dev)) {
+ rcu_read_lock();
+ err = dsa_slave_vlan_check_for_8021q_uppers(dev, &vlan);
+ rcu_read_unlock();
+ if (err)
+ return err;
+ }
+
err = dsa_port_vlan_add(dp, &vlan, trans);
if (err)
return err;
@@ -333,6 +367,12 @@ static int dsa_slave_vlan_add(struct net_device *dev,
if (err)
return err;
+ for (vid = vlan.vid_begin; vid <= vlan.vid_end; vid++) {
+ err = vlan_vid_add(master, htons(ETH_P_8021Q), vid);
+ if (err)
+ return err;
+ }
+
return 0;
}
@@ -376,7 +416,10 @@ static int dsa_slave_port_obj_add(struct net_device *dev,
static int dsa_slave_vlan_del(struct net_device *dev,
const struct switchdev_obj *obj)
{
+ struct net_device *master = dsa_slave_to_master(dev);
struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct switchdev_obj_port_vlan *vlan;
+ int vid, err;
if (obj->orig_dev != dev)
return -EOPNOTSUPP;
@@ -384,10 +427,19 @@ static int dsa_slave_vlan_del(struct net_device *dev,
if (dsa_port_skip_vlan_configuration(dp))
return 0;
+ vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+
/* Do not deprogram the CPU port as it may be shared with other user
* ports which can be members of this VLAN as well.
*/
- return dsa_port_vlan_del(dp, SWITCHDEV_OBJ_PORT_VLAN(obj));
+ err = dsa_port_vlan_del(dp, vlan);
+ if (err)
+ return err;
+
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
+ vlan_vid_del(master, htons(ETH_P_8021Q), vid);
+
+ return 0;
}
static int dsa_slave_port_obj_del(struct net_device *dev,
@@ -1169,28 +1221,9 @@ static void dsa_slave_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct pcpu_sw_netstats *s;
- unsigned int start;
- int i;
netdev_stats_to_stats64(stats, &dev->stats);
- for_each_possible_cpu(i) {
- u64 tx_packets, tx_bytes, rx_packets, rx_bytes;
-
- s = per_cpu_ptr(p->stats64, i);
- do {
- start = u64_stats_fetch_begin_irq(&s->syncp);
- tx_packets = s->tx_packets;
- tx_bytes = s->tx_bytes;
- rx_packets = s->rx_packets;
- rx_bytes = s->rx_bytes;
- } while (u64_stats_fetch_retry_irq(&s->syncp, start));
-
- stats->tx_packets += tx_packets;
- stats->tx_bytes += tx_bytes;
- stats->rx_packets += rx_packets;
- stats->rx_bytes += rx_bytes;
- }
+ dev_fetch_sw_netstats(stats, p->stats64);
}
static int dsa_slave_get_rxnfc(struct net_device *dev,
@@ -1232,64 +1265,66 @@ static int dsa_slave_get_ts_info(struct net_device *dev,
static int dsa_slave_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
u16 vid)
{
+ struct net_device *master = dsa_slave_to_master(dev);
struct dsa_port *dp = dsa_slave_to_port(dev);
- struct bridge_vlan_info info;
+ struct switchdev_obj_port_vlan vlan = {
+ .obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
+ .vid_begin = vid,
+ .vid_end = vid,
+ /* This API only allows programming tagged, non-PVID VIDs */
+ .flags = 0,
+ };
+ struct switchdev_trans trans;
int ret;
- /* Check for a possible bridge VLAN entry now since there is no
- * need to emulate the switchdev prepare + commit phase.
- */
- if (dp->bridge_dev) {
- if (dsa_port_skip_vlan_configuration(dp))
- return 0;
+ /* User port... */
+ trans.ph_prepare = true;
+ ret = dsa_port_vlan_add(dp, &vlan, &trans);
+ if (ret)
+ return ret;
- /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
- * device, respectively the VID is not found, returning
- * 0 means success, which is a failure for us here.
- */
- ret = br_vlan_get_info(dp->bridge_dev, vid, &info);
- if (ret == 0)
- return -EBUSY;
- }
+ trans.ph_prepare = false;
+ ret = dsa_port_vlan_add(dp, &vlan, &trans);
+ if (ret)
+ return ret;
- ret = dsa_port_vid_add(dp, vid, 0);
+ /* And CPU port... */
+ trans.ph_prepare = true;
+ ret = dsa_port_vlan_add(dp->cpu_dp, &vlan, &trans);
if (ret)
return ret;
- ret = dsa_port_vid_add(dp->cpu_dp, vid, 0);
+ trans.ph_prepare = false;
+ ret = dsa_port_vlan_add(dp->cpu_dp, &vlan, &trans);
if (ret)
return ret;
- return 0;
+ return vlan_vid_add(master, proto, vid);
}
static int dsa_slave_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
u16 vid)
{
+ struct net_device *master = dsa_slave_to_master(dev);
struct dsa_port *dp = dsa_slave_to_port(dev);
- struct bridge_vlan_info info;
- int ret;
-
- /* Check for a possible bridge VLAN entry now since there is no
- * need to emulate the switchdev prepare + commit phase.
- */
- if (dp->bridge_dev) {
- if (dsa_port_skip_vlan_configuration(dp))
- return 0;
-
- /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
- * device, respectively the VID is not found, returning
- * 0 means success, which is a failure for us here.
- */
- ret = br_vlan_get_info(dp->bridge_dev, vid, &info);
- if (ret == 0)
- return -EBUSY;
- }
+ struct switchdev_obj_port_vlan vlan = {
+ .vid_begin = vid,
+ .vid_end = vid,
+ /* This API only allows programming tagged, non-PVID VIDs */
+ .flags = 0,
+ };
+ int err;
/* Do not deprogram the CPU port as it may be shared with other user
* ports which can be members of this VLAN as well.
*/
- return dsa_port_vid_del(dp, vid);
+ err = dsa_port_vlan_del(dp, &vlan);
+ if (err)
+ return err;
+
+ vlan_vid_del(master, proto, vid);
+
+ return 0;
}
struct dsa_hw_port {
@@ -1784,7 +1819,7 @@ int dsa_slave_create(struct dsa_port *port)
rtnl_lock();
ret = dsa_slave_change_mtu(slave_dev, ETH_DATA_LEN);
rtnl_unlock();
- if (ret)
+ if (ret && ret != -EOPNOTSUPP)
dev_warn(ds->dev, "nonfatal error %d setting MTU on port %d\n",
ret, port->index);
@@ -1792,8 +1827,9 @@ int dsa_slave_create(struct dsa_port *port)
ret = dsa_slave_phy_setup(slave_dev);
if (ret) {
- netdev_err(master, "error %d setting up slave PHY for %s\n",
- ret, slave_dev->name);
+ netdev_err(slave_dev,
+ "error %d setting up PHY for tree %d, switch %d, port %d\n",
+ ret, ds->dst->index, ds->index, port->index);
goto out_gcells;
}
@@ -1880,9 +1916,9 @@ static int dsa_slave_changeupper(struct net_device *dev,
return err;
}
-static int dsa_slave_upper_vlan_check(struct net_device *dev,
- struct netdev_notifier_changeupper_info *
- info)
+static int
+dsa_prevent_bridging_8021q_upper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
{
struct netlink_ext_ack *ext_ack;
struct net_device *slave;
@@ -1912,14 +1948,56 @@ static int dsa_slave_upper_vlan_check(struct net_device *dev,
return NOTIFY_DONE;
}
+static int
+dsa_slave_check_8021q_upper(struct net_device *dev,
+ struct netdev_notifier_changeupper_info *info)
+{
+ struct dsa_port *dp = dsa_slave_to_port(dev);
+ struct net_device *br = dp->bridge_dev;
+ struct bridge_vlan_info br_info;
+ struct netlink_ext_ack *extack;
+ int err = NOTIFY_DONE;
+ u16 vid;
+
+ if (!br || !br_vlan_enabled(br))
+ return NOTIFY_DONE;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+ vid = vlan_dev_vlan_id(info->upper_dev);
+
+ /* br_vlan_get_info() returns -EINVAL or -ENOENT if the
+ * device, respectively the VID is not found, returning
+ * 0 means success, which is a failure for us here.
+ */
+ err = br_vlan_get_info(br, vid, &br_info);
+ if (err == 0) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "This VLAN is already configured by the bridge");
+ return notifier_from_errno(-EBUSY);
+ }
+
+ return NOTIFY_DONE;
+}
+
static int dsa_slave_netdevice_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- if (event == NETDEV_CHANGEUPPER) {
+ switch (event) {
+ case NETDEV_PRECHANGEUPPER: {
+ struct netdev_notifier_changeupper_info *info = ptr;
+
+ if (!dsa_slave_dev_check(dev))
+ return dsa_prevent_bridging_8021q_upper(dev, ptr);
+
+ if (is_vlan_dev(info->upper_dev))
+ return dsa_slave_check_8021q_upper(dev, ptr);
+ break;
+ }
+ case NETDEV_CHANGEUPPER:
if (!dsa_slave_dev_check(dev))
- return dsa_slave_upper_vlan_check(dev, ptr);
+ return NOTIFY_DONE;
return dsa_slave_changeupper(dev, ptr);
}
diff --git a/net/dsa/switch.c b/net/dsa/switch.c
index 86c8dc5c32a0..3fb362b6874e 100644
--- a/net/dsa/switch.c
+++ b/net/dsa/switch.c
@@ -139,8 +139,15 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds,
}
}
if (unset_vlan_filtering) {
- struct switchdev_trans trans = {0};
+ struct switchdev_trans trans;
+ trans.ph_prepare = true;
+ err = dsa_port_vlan_filtering(dsa_to_port(ds, info->port),
+ false, &trans);
+ if (err && err != EOPNOTSUPP)
+ return err;
+
+ trans.ph_prepare = false;
err = dsa_port_vlan_filtering(dsa_to_port(ds, info->port),
false, &trans);
if (err && err != EOPNOTSUPP)
@@ -232,43 +239,6 @@ static int dsa_switch_mdb_del(struct dsa_switch *ds,
return 0;
}
-static int dsa_port_vlan_device_check(struct net_device *vlan_dev,
- int vlan_dev_vid,
- void *arg)
-{
- struct switchdev_obj_port_vlan *vlan = arg;
- u16 vid;
-
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- if (vid == vlan_dev_vid)
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int dsa_port_vlan_check(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
-{
- const struct dsa_port *dp = dsa_to_port(ds, port);
- int err = 0;
-
- /* Device is not bridged, let it proceed with the VLAN device
- * creation.
- */
- if (!dp->bridge_dev)
- return err;
-
- /* dsa_slave_vlan_rx_{add,kill}_vid() cannot use the prepare phase and
- * already checks whether there is an overlapping bridge VLAN entry
- * with the same VID, so here we only need to check that if we are
- * adding a bridge VLAN entry there is not an overlapping VLAN device
- * claiming that VID.
- */
- return vlan_for_each(dp->slave, dsa_port_vlan_device_check,
- (void *)vlan);
-}
-
static bool dsa_switch_vlan_match(struct dsa_switch *ds, int port,
struct dsa_notifier_vlan_info *info)
{
@@ -291,10 +261,6 @@ static int dsa_switch_vlan_prepare(struct dsa_switch *ds,
for (port = 0; port < ds->num_ports; port++) {
if (dsa_switch_vlan_match(ds, port, info)) {
- err = dsa_port_vlan_check(ds, port, info->vlan);
- if (err)
- return err;
-
err = ds->ops->port_vlan_prepare(ds, port, info->vlan);
if (err)
return err;
diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c
index 780b2a15ac9b..8e3e8a5b8559 100644
--- a/net/dsa/tag_8021q.c
+++ b/net/dsa/tag_8021q.c
@@ -146,15 +146,15 @@ EXPORT_SYMBOL_GPL(vid_is_dsa_8021q);
* user explicitly configured this @vid through the bridge core, then the @vid
* is installed again, but this time with the flags from the bridge layer.
*/
-static int dsa_8021q_vid_apply(struct dsa_switch *ds, int port, u16 vid,
+static int dsa_8021q_vid_apply(struct dsa_8021q_context *ctx, int port, u16 vid,
u16 flags, bool enabled)
{
- struct dsa_port *dp = dsa_to_port(ds, port);
+ struct dsa_port *dp = dsa_to_port(ctx->ds, port);
if (enabled)
- return dsa_port_vid_add(dp, vid, flags);
+ return ctx->ops->vlan_add(ctx->ds, dp->index, vid, flags);
- return dsa_port_vid_del(dp, vid);
+ return ctx->ops->vlan_del(ctx->ds, dp->index, vid);
}
/* RX VLAN tagging (left) and TX VLAN tagging (right) setup shown for a single
@@ -209,25 +209,29 @@ static int dsa_8021q_vid_apply(struct dsa_switch *ds, int port, u16 vid,
* +-+-----+-+-----+-+-----+-+-----+-+ +-+-----+-+-----+-+-----+-+-----+-+
* swp0 swp1 swp2 swp3 swp0 swp1 swp2 swp3
*/
-int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int port, bool enabled)
+static int dsa_8021q_setup_port(struct dsa_8021q_context *ctx, int port,
+ bool enabled)
{
- int upstream = dsa_upstream_port(ds, port);
- u16 rx_vid = dsa_8021q_rx_vid(ds, port);
- u16 tx_vid = dsa_8021q_tx_vid(ds, port);
- int i, err;
+ int upstream = dsa_upstream_port(ctx->ds, port);
+ u16 rx_vid = dsa_8021q_rx_vid(ctx->ds, port);
+ u16 tx_vid = dsa_8021q_tx_vid(ctx->ds, port);
+ struct net_device *master;
+ int i, err, subvlan;
/* The CPU port is implicitly configured by
* configuring the front-panel ports
*/
- if (!dsa_is_user_port(ds, port))
+ if (!dsa_is_user_port(ctx->ds, port))
return 0;
+ master = dsa_to_port(ctx->ds, port)->cpu_dp->master;
+
/* Add this user port's RX VID to the membership list of all others
* (including itself). This is so that bridging will not be hindered.
* L2 forwarding rules still take precedence when there are no VLAN
* restrictions, so there are no concerns about leaking traffic.
*/
- for (i = 0; i < ds->num_ports; i++) {
+ for (i = 0; i < ctx->ds->num_ports; i++) {
u16 flags;
if (i == upstream)
@@ -240,9 +244,10 @@ int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int port, bool enabled)
/* The RX VID is a regular VLAN on all others */
flags = BRIDGE_VLAN_INFO_UNTAGGED;
- err = dsa_8021q_vid_apply(ds, i, rx_vid, flags, enabled);
+ err = dsa_8021q_vid_apply(ctx, i, rx_vid, flags, enabled);
if (err) {
- dev_err(ds->dev, "Failed to apply RX VID %d to port %d: %d\n",
+ dev_err(ctx->ds->dev,
+ "Failed to apply RX VID %d to port %d: %d\n",
rx_vid, port, err);
return err;
}
@@ -251,80 +256,115 @@ int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int port, bool enabled)
/* CPU port needs to see this port's RX VID
* as tagged egress.
*/
- err = dsa_8021q_vid_apply(ds, upstream, rx_vid, 0, enabled);
+ err = dsa_8021q_vid_apply(ctx, upstream, rx_vid, 0, enabled);
if (err) {
- dev_err(ds->dev, "Failed to apply RX VID %d to port %d: %d\n",
+ dev_err(ctx->ds->dev,
+ "Failed to apply RX VID %d to port %d: %d\n",
rx_vid, port, err);
return err;
}
+ /* Add to the master's RX filter not only @rx_vid, but in fact
+ * the entire subvlan range, just in case this DSA switch might
+ * want to use sub-VLANs.
+ */
+ for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++) {
+ u16 vid = dsa_8021q_rx_vid_subvlan(ctx->ds, port, subvlan);
+
+ if (enabled)
+ vlan_vid_add(master, ctx->proto, vid);
+ else
+ vlan_vid_del(master, ctx->proto, vid);
+ }
+
/* Finally apply the TX VID on this port and on the CPU port */
- err = dsa_8021q_vid_apply(ds, port, tx_vid, BRIDGE_VLAN_INFO_UNTAGGED,
+ err = dsa_8021q_vid_apply(ctx, port, tx_vid, BRIDGE_VLAN_INFO_UNTAGGED,
enabled);
if (err) {
- dev_err(ds->dev, "Failed to apply TX VID %d on port %d: %d\n",
+ dev_err(ctx->ds->dev,
+ "Failed to apply TX VID %d on port %d: %d\n",
tx_vid, port, err);
return err;
}
- err = dsa_8021q_vid_apply(ds, upstream, tx_vid, 0, enabled);
+ err = dsa_8021q_vid_apply(ctx, upstream, tx_vid, 0, enabled);
if (err) {
- dev_err(ds->dev, "Failed to apply TX VID %d on port %d: %d\n",
+ dev_err(ctx->ds->dev,
+ "Failed to apply TX VID %d on port %d: %d\n",
tx_vid, upstream, err);
return err;
}
return err;
}
-EXPORT_SYMBOL_GPL(dsa_port_setup_8021q_tagging);
-static int dsa_8021q_crosschip_link_apply(struct dsa_switch *ds, int port,
- struct dsa_switch *other_ds,
+int dsa_8021q_setup(struct dsa_8021q_context *ctx, bool enabled)
+{
+ int rc, port;
+
+ ASSERT_RTNL();
+
+ for (port = 0; port < ctx->ds->num_ports; port++) {
+ rc = dsa_8021q_setup_port(ctx, port, enabled);
+ if (rc < 0) {
+ dev_err(ctx->ds->dev,
+ "Failed to setup VLAN tagging for port %d: %d\n",
+ port, rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dsa_8021q_setup);
+
+static int dsa_8021q_crosschip_link_apply(struct dsa_8021q_context *ctx,
+ int port,
+ struct dsa_8021q_context *other_ctx,
int other_port, bool enabled)
{
- u16 rx_vid = dsa_8021q_rx_vid(ds, port);
+ u16 rx_vid = dsa_8021q_rx_vid(ctx->ds, port);
/* @rx_vid of local @ds port @port goes to @other_port of
* @other_ds
*/
- return dsa_8021q_vid_apply(other_ds, other_port, rx_vid,
+ return dsa_8021q_vid_apply(other_ctx, other_port, rx_vid,
BRIDGE_VLAN_INFO_UNTAGGED, enabled);
}
-static int dsa_8021q_crosschip_link_add(struct dsa_switch *ds, int port,
- struct dsa_switch *other_ds,
- int other_port,
- struct list_head *crosschip_links)
+static int dsa_8021q_crosschip_link_add(struct dsa_8021q_context *ctx, int port,
+ struct dsa_8021q_context *other_ctx,
+ int other_port)
{
struct dsa_8021q_crosschip_link *c;
- list_for_each_entry(c, crosschip_links, list) {
- if (c->port == port && c->other_ds == other_ds &&
+ list_for_each_entry(c, &ctx->crosschip_links, list) {
+ if (c->port == port && c->other_ctx == other_ctx &&
c->other_port == other_port) {
refcount_inc(&c->refcount);
return 0;
}
}
- dev_dbg(ds->dev, "adding crosschip link from port %d to %s port %d\n",
- port, dev_name(other_ds->dev), other_port);
+ dev_dbg(ctx->ds->dev,
+ "adding crosschip link from port %d to %s port %d\n",
+ port, dev_name(other_ctx->ds->dev), other_port);
c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return -ENOMEM;
c->port = port;
- c->other_ds = other_ds;
+ c->other_ctx = other_ctx;
c->other_port = other_port;
refcount_set(&c->refcount, 1);
- list_add(&c->list, crosschip_links);
+ list_add(&c->list, &ctx->crosschip_links);
return 0;
}
-static void dsa_8021q_crosschip_link_del(struct dsa_switch *ds,
+static void dsa_8021q_crosschip_link_del(struct dsa_8021q_context *ctx,
struct dsa_8021q_crosschip_link *c,
- struct list_head *crosschip_links,
bool *keep)
{
*keep = !refcount_dec_and_test(&c->refcount);
@@ -332,9 +372,9 @@ static void dsa_8021q_crosschip_link_del(struct dsa_switch *ds,
if (*keep)
return;
- dev_dbg(ds->dev,
+ dev_dbg(ctx->ds->dev,
"deleting crosschip link from port %d to %s port %d\n",
- c->port, dev_name(c->other_ds->dev), c->other_port);
+ c->port, dev_name(c->other_ctx->ds->dev), c->other_port);
list_del(&c->list);
kfree(c);
@@ -347,64 +387,58 @@ static void dsa_8021q_crosschip_link_del(struct dsa_switch *ds,
* or untagged: it doesn't matter, since it should never egress a frame having
* our @rx_vid.
*/
-int dsa_8021q_crosschip_bridge_join(struct dsa_switch *ds, int port,
- struct dsa_switch *other_ds,
- int other_port,
- struct list_head *crosschip_links)
+int dsa_8021q_crosschip_bridge_join(struct dsa_8021q_context *ctx, int port,
+ struct dsa_8021q_context *other_ctx,
+ int other_port)
{
/* @other_upstream is how @other_ds reaches us. If we are part
* of disjoint trees, then we are probably connected through
* our CPU ports. If we're part of the same tree though, we should
* probably use dsa_towards_port.
*/
- int other_upstream = dsa_upstream_port(other_ds, other_port);
+ int other_upstream = dsa_upstream_port(other_ctx->ds, other_port);
int rc;
- rc = dsa_8021q_crosschip_link_add(ds, port, other_ds,
- other_port, crosschip_links);
+ rc = dsa_8021q_crosschip_link_add(ctx, port, other_ctx, other_port);
if (rc)
return rc;
- rc = dsa_8021q_crosschip_link_apply(ds, port, other_ds,
+ rc = dsa_8021q_crosschip_link_apply(ctx, port, other_ctx,
other_port, true);
if (rc)
return rc;
- rc = dsa_8021q_crosschip_link_add(ds, port, other_ds,
- other_upstream,
- crosschip_links);
+ rc = dsa_8021q_crosschip_link_add(ctx, port, other_ctx, other_upstream);
if (rc)
return rc;
- return dsa_8021q_crosschip_link_apply(ds, port, other_ds,
+ return dsa_8021q_crosschip_link_apply(ctx, port, other_ctx,
other_upstream, true);
}
EXPORT_SYMBOL_GPL(dsa_8021q_crosschip_bridge_join);
-int dsa_8021q_crosschip_bridge_leave(struct dsa_switch *ds, int port,
- struct dsa_switch *other_ds,
- int other_port,
- struct list_head *crosschip_links)
+int dsa_8021q_crosschip_bridge_leave(struct dsa_8021q_context *ctx, int port,
+ struct dsa_8021q_context *other_ctx,
+ int other_port)
{
- int other_upstream = dsa_upstream_port(other_ds, other_port);
+ int other_upstream = dsa_upstream_port(other_ctx->ds, other_port);
struct dsa_8021q_crosschip_link *c, *n;
- list_for_each_entry_safe(c, n, crosschip_links, list) {
- if (c->port == port && c->other_ds == other_ds &&
+ list_for_each_entry_safe(c, n, &ctx->crosschip_links, list) {
+ if (c->port == port && c->other_ctx == other_ctx &&
(c->other_port == other_port ||
c->other_port == other_upstream)) {
- struct dsa_switch *other_ds = c->other_ds;
+ struct dsa_8021q_context *other_ctx = c->other_ctx;
int other_port = c->other_port;
bool keep;
int rc;
- dsa_8021q_crosschip_link_del(ds, c, crosschip_links,
- &keep);
+ dsa_8021q_crosschip_link_del(ctx, c, &keep);
if (keep)
continue;
- rc = dsa_8021q_crosschip_link_apply(ds, port,
- other_ds,
+ rc = dsa_8021q_crosschip_link_apply(ctx, port,
+ other_ctx,
other_port,
false);
if (rc)
diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c
index cc8512b5f9e2..ad72dff8d524 100644
--- a/net/dsa/tag_brcm.c
+++ b/net/dsa/tag_brcm.c
@@ -107,6 +107,18 @@ static struct sk_buff *brcm_tag_xmit_ll(struct sk_buff *skb,
return skb;
}
+/* Frames with this tag have one of these two layouts:
+ * -----------------------------------
+ * | MAC DA | MAC SA | 4b tag | Type | DSA_TAG_PROTO_BRCM
+ * -----------------------------------
+ * -----------------------------------
+ * | 4b tag | MAC DA | MAC SA | Type | DSA_TAG_PROTO_BRCM_PREPEND
+ * -----------------------------------
+ * In both cases, at receive time, skb->data points 2 bytes before the actual
+ * Ethernet type field and we have an offset of 4bytes between where skb->data
+ * and where the payload starts. So the same low-level receive function can be
+ * used.
+ */
static struct sk_buff *brcm_tag_rcv_ll(struct sk_buff *skb,
struct net_device *dev,
struct packet_type *pt,
@@ -144,27 +156,6 @@ static struct sk_buff *brcm_tag_rcv_ll(struct sk_buff *skb,
return skb;
}
-
-static int brcm_tag_flow_dissect(const struct sk_buff *skb, __be16 *proto,
- int *offset)
-{
- /* We have been called on the DSA master network device after
- * eth_type_trans() which pulled the Ethernet header already.
- * Frames have one of these two layouts:
- * -----------------------------------
- * | MAC DA | MAC SA | 4b tag | Type | DSA_TAG_PROTO_BRCM
- * -----------------------------------
- * -----------------------------------
- * | 4b tag | MAC DA | MAC SA | Type | DSA_TAG_PROTO_BRCM_PREPEND
- * -----------------------------------
- * skb->data points 2 bytes before the actual Ethernet type field and
- * we have an offset of 4bytes between where skb->data and where the
- * payload starts.
- */
- *offset = BRCM_TAG_LEN;
- *proto = ((__be16 *)skb->data)[1];
- return 0;
-}
#endif
#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM)
@@ -200,7 +191,6 @@ static const struct dsa_device_ops brcm_netdev_ops = {
.xmit = brcm_tag_xmit,
.rcv = brcm_tag_rcv,
.overhead = BRCM_TAG_LEN,
- .flow_dissect = brcm_tag_flow_dissect,
};
DSA_TAG_DRIVER(brcm_netdev_ops);
@@ -229,7 +219,6 @@ static const struct dsa_device_ops brcm_prepend_netdev_ops = {
.xmit = brcm_tag_xmit_prepend,
.rcv = brcm_tag_rcv_prepend,
.overhead = BRCM_TAG_LEN,
- .flow_dissect = brcm_tag_flow_dissect,
};
DSA_TAG_DRIVER(brcm_prepend_netdev_ops);
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c
index 7ddec9794477..0b756fae68a5 100644
--- a/net/dsa/tag_dsa.c
+++ b/net/dsa/tag_dsa.c
@@ -142,20 +142,11 @@ static struct sk_buff *dsa_rcv(struct sk_buff *skb, struct net_device *dev,
return skb;
}
-static int dsa_tag_flow_dissect(const struct sk_buff *skb, __be16 *proto,
- int *offset)
-{
- *offset = 4;
- *proto = ((__be16 *)skb->data)[1];
- return 0;
-}
-
static const struct dsa_device_ops dsa_netdev_ops = {
.name = "dsa",
.proto = DSA_TAG_PROTO_DSA,
.xmit = dsa_xmit,
.rcv = dsa_rcv,
- .flow_dissect = dsa_tag_flow_dissect,
.overhead = DSA_HLEN,
};
diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c
index d6200ff98200..120614240319 100644
--- a/net/dsa/tag_edsa.c
+++ b/net/dsa/tag_edsa.c
@@ -192,20 +192,11 @@ static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev,
return skb;
}
-static int edsa_tag_flow_dissect(const struct sk_buff *skb, __be16 *proto,
- int *offset)
-{
- *offset = 8;
- *proto = ((__be16 *)skb->data)[3];
- return 0;
-}
-
static const struct dsa_device_ops edsa_netdev_ops = {
.name = "edsa",
.proto = DSA_TAG_PROTO_EDSA,
.xmit = edsa_xmit,
.rcv = edsa_rcv,
- .flow_dissect = edsa_tag_flow_dissect,
.overhead = EDSA_HLEN,
};
diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c
index bd1a3158d79a..945a9bd5ba35 100644
--- a/net/dsa/tag_ksz.c
+++ b/net/dsa/tag_ksz.c
@@ -237,6 +237,7 @@ static const struct dsa_device_ops ksz9893_netdev_ops = {
.xmit = ksz9893_xmit,
.rcv = ksz9477_rcv,
.overhead = KSZ_INGRESS_TAG_LEN,
+ .tail_tag = true,
};
DSA_TAG_DRIVER(ksz9893_netdev_ops);
diff --git a/net/dsa/tag_mtk.c b/net/dsa/tag_mtk.c
index f602fc758d68..4cdd9cf428fb 100644
--- a/net/dsa/tag_mtk.c
+++ b/net/dsa/tag_mtk.c
@@ -105,21 +105,11 @@ static struct sk_buff *mtk_tag_rcv(struct sk_buff *skb, struct net_device *dev,
return skb;
}
-static int mtk_tag_flow_dissect(const struct sk_buff *skb, __be16 *proto,
- int *offset)
-{
- *offset = 4;
- *proto = ((__be16 *)skb->data)[1];
-
- return 0;
-}
-
static const struct dsa_device_ops mtk_netdev_ops = {
.name = "mtk",
.proto = DSA_TAG_PROTO_MTK,
.xmit = mtk_tag_xmit,
.rcv = mtk_tag_rcv,
- .flow_dissect = mtk_tag_flow_dissect,
.overhead = MTK_HDR_LEN,
};
diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c
index b4fc05cafaa6..3b468aca5c53 100644
--- a/net/dsa/tag_ocelot.c
+++ b/net/dsa/tag_ocelot.c
@@ -137,13 +137,16 @@ static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct dsa_port *dp = dsa_slave_to_port(netdev);
+ struct sk_buff *clone = DSA_SKB_CB(skb)->clone;
struct dsa_switch *ds = dp->ds;
struct ocelot *ocelot = ds->priv;
struct ocelot_port *ocelot_port;
+ u8 *prefix, *injection;
u64 qos_class, rew_op;
- u8 *injection;
+ int err;
- if (unlikely(skb_cow_head(skb, OCELOT_TAG_LEN) < 0)) {
+ err = skb_cow_head(skb, OCELOT_TOTAL_TAG_LEN);
+ if (unlikely(err < 0)) {
netdev_err(netdev, "Cannot make room for tag.\n");
return NULL;
}
@@ -152,16 +155,18 @@ static struct sk_buff *ocelot_xmit(struct sk_buff *skb,
injection = skb_push(skb, OCELOT_TAG_LEN);
- memcpy(injection, ocelot_port->xmit_template, OCELOT_TAG_LEN);
+ prefix = skb_push(skb, OCELOT_SHORT_PREFIX_LEN);
+
+ memcpy(prefix, ocelot_port->xmit_template, OCELOT_TOTAL_TAG_LEN);
+
/* Fix up the fields which are not statically determined
* in the template
*/
qos_class = skb->priority;
packing(injection, &qos_class, 19, 17, OCELOT_TAG_LEN, PACK, 0);
- if (ocelot->ptp && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
- struct sk_buff *clone = DSA_SKB_CB(skb)->clone;
-
+ /* TX timestamping was requested */
+ if (clone) {
rew_op = ocelot_port->ptp_cmd;
/* Retrieve timestamp ID populated inside skb->cb[0] of the
* clone by ocelot_port_add_txtstamp_skb
@@ -179,19 +184,24 @@ static struct sk_buff *ocelot_rcv(struct sk_buff *skb,
struct net_device *netdev,
struct packet_type *pt)
{
+ struct dsa_port *cpu_dp = netdev->dsa_ptr;
+ struct dsa_switch *ds = cpu_dp->ds;
+ struct ocelot *ocelot = ds->priv;
u64 src_port, qos_class;
+ u64 vlan_tci, tag_type;
u8 *start = skb->data;
u8 *extraction;
+ u16 vlan_tpid;
/* Revert skb->data by the amount consumed by the DSA master,
* so it points to the beginning of the frame.
*/
skb_push(skb, ETH_HLEN);
- /* We don't care about the long prefix, it is just for easy entrance
+ /* We don't care about the short prefix, it is just for easy entrance
* into the DSA master's RX filter. Discard it now by moving it into
* the headroom.
*/
- skb_pull(skb, OCELOT_LONG_PREFIX_LEN);
+ skb_pull(skb, OCELOT_SHORT_PREFIX_LEN);
/* And skb->data now points to the extraction frame header.
* Keep a pointer to it.
*/
@@ -205,10 +215,12 @@ static struct sk_buff *ocelot_rcv(struct sk_buff *skb,
skb_pull(skb, ETH_HLEN);
/* Remove from inet csum the extraction header */
- skb_postpull_rcsum(skb, start, OCELOT_LONG_PREFIX_LEN + OCELOT_TAG_LEN);
+ skb_postpull_rcsum(skb, start, OCELOT_TOTAL_TAG_LEN);
packing(extraction, &src_port, 46, 43, OCELOT_TAG_LEN, UNPACK, 0);
packing(extraction, &qos_class, 19, 17, OCELOT_TAG_LEN, UNPACK, 0);
+ packing(extraction, &tag_type, 16, 16, OCELOT_TAG_LEN, UNPACK, 0);
+ packing(extraction, &vlan_tci, 15, 0, OCELOT_TAG_LEN, UNPACK, 0);
skb->dev = dsa_master_find_slave(netdev, 0, src_port);
if (!skb->dev)
@@ -223,6 +235,33 @@ static struct sk_buff *ocelot_rcv(struct sk_buff *skb,
skb->offload_fwd_mark = 1;
skb->priority = qos_class;
+ /* Ocelot switches copy frames unmodified to the CPU. However, it is
+ * possible for the user to request a VLAN modification through
+ * VCAP_IS1_ACT_VID_REPLACE_ENA. In this case, what will happen is that
+ * the VLAN ID field from the Extraction Header gets updated, but the
+ * 802.1Q header does not (the classified VLAN only becomes visible on
+ * egress through the "port tag" of front-panel ports).
+ * So, for traffic extracted by the CPU, we want to pick up the
+ * classified VLAN and manually replace the existing 802.1Q header from
+ * the packet with it, so that the operating system is always up to
+ * date with the result of tc-vlan actions.
+ * NOTE: In VLAN-unaware mode, we don't want to do that, we want the
+ * frame to remain unmodified, because the classified VLAN is always
+ * equal to the pvid of the ingress port and should not be used for
+ * processing.
+ */
+ vlan_tpid = tag_type ? ETH_P_8021AD : ETH_P_8021Q;
+
+ if (ocelot->ports[src_port]->vlan_aware &&
+ eth_hdr(skb)->h_proto == htons(vlan_tpid)) {
+ u16 dummy_vlan_tci;
+
+ skb_push_rcsum(skb, ETH_HLEN);
+ __skb_vlan_pop(skb, &dummy_vlan_tci);
+ skb_pull_rcsum(skb, ETH_HLEN);
+ __vlan_hwaccel_put_tag(skb, htons(vlan_tpid), vlan_tci);
+ }
+
return skb;
}
@@ -231,7 +270,8 @@ static const struct dsa_device_ops ocelot_netdev_ops = {
.proto = DSA_TAG_PROTO_OCELOT,
.xmit = ocelot_xmit,
.rcv = ocelot_rcv,
- .overhead = OCELOT_TAG_LEN + OCELOT_LONG_PREFIX_LEN,
+ .overhead = OCELOT_TOTAL_TAG_LEN,
+ .promisc_on_master = true,
};
MODULE_LICENSE("GPL v2");
diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c
index 7066f5e697d7..1b9e8507112b 100644
--- a/net/dsa/tag_qca.c
+++ b/net/dsa/tag_qca.c
@@ -89,21 +89,11 @@ static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev,
return skb;
}
-static int qca_tag_flow_dissect(const struct sk_buff *skb, __be16 *proto,
- int *offset)
-{
- *offset = QCA_HDR_LEN;
- *proto = ((__be16 *)skb->data)[0];
-
- return 0;
-}
-
static const struct dsa_device_ops qca_netdev_ops = {
.name = "qca",
.proto = DSA_TAG_PROTO_QCA,
.xmit = qca_tag_xmit,
.rcv = qca_tag_rcv,
- .flow_dissect = qca_tag_flow_dissect,
.overhead = QCA_HDR_LEN,
};
diff --git a/net/dsa/tag_rtl4_a.c b/net/dsa/tag_rtl4_a.c
index 7b63010fa87b..2646abe5a69e 100644
--- a/net/dsa/tag_rtl4_a.c
+++ b/net/dsa/tag_rtl4_a.c
@@ -106,22 +106,11 @@ static struct sk_buff *rtl4a_tag_rcv(struct sk_buff *skb,
return skb;
}
-static int rtl4a_tag_flow_dissect(const struct sk_buff *skb, __be16 *proto,
- int *offset)
-{
- *offset = RTL4_A_HDR_LEN;
- /* Skip past the tag and fetch the encapsulated Ethertype */
- *proto = ((__be16 *)skb->data)[1];
-
- return 0;
-}
-
static const struct dsa_device_ops rtl4a_netdev_ops = {
.name = "rtl4a",
.proto = DSA_TAG_PROTO_RTL4_A,
.xmit = rtl4a_tag_xmit,
.rcv = rtl4a_tag_rcv,
- .flow_dissect = rtl4a_tag_flow_dissect,
.overhead = RTL4_A_HDR_LEN,
};
module_dsa_tag_driver(rtl4a_netdev_ops);
diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c
index 9b4a4d719291..50496013cdb7 100644
--- a/net/dsa/tag_sja1105.c
+++ b/net/dsa/tag_sja1105.c
@@ -72,14 +72,21 @@ static inline bool sja1105_is_meta_frame(const struct sk_buff *skb)
static bool sja1105_can_use_vlan_as_tags(const struct sk_buff *skb)
{
struct vlan_ethhdr *hdr = vlan_eth_hdr(skb);
+ u16 vlan_tci;
if (hdr->h_vlan_proto == htons(ETH_P_SJA1105))
return true;
- if (hdr->h_vlan_proto != htons(ETH_P_8021Q))
+ if (hdr->h_vlan_proto != htons(ETH_P_8021Q) &&
+ !skb_vlan_tag_present(skb))
return false;
- return vid_is_dsa_8021q(ntohs(hdr->h_vlan_TCI) & VLAN_VID_MASK);
+ if (skb_vlan_tag_present(skb))
+ vlan_tci = skb_vlan_tag_get(skb);
+ else
+ vlan_tci = ntohs(hdr->h_vlan_TCI);
+
+ return vid_is_dsa_8021q(vlan_tci & VLAN_VID_MASK);
}
/* This is the first time the tagger sees the frame on RX.
@@ -283,7 +290,8 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
hdr = eth_hdr(skb);
tpid = ntohs(hdr->h_proto);
- is_tagged = (tpid == ETH_P_SJA1105 || tpid == ETH_P_8021Q);
+ is_tagged = (tpid == ETH_P_SJA1105 || tpid == ETH_P_8021Q ||
+ skb_vlan_tag_present(skb));
is_link_local = sja1105_is_link_local(skb);
is_meta = sja1105_is_meta_frame(skb);
@@ -292,7 +300,12 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
if (is_tagged) {
/* Normal traffic path. */
skb_push_rcsum(skb, ETH_HLEN);
- __skb_vlan_pop(skb, &tci);
+ if (skb_vlan_tag_present(skb)) {
+ tci = skb_vlan_tag_get(skb);
+ __vlan_hwaccel_clear_tag(skb);
+ } else {
+ __skb_vlan_pop(skb, &tci);
+ }
skb_pull_rcsum(skb, ETH_HLEN);
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
@@ -333,6 +346,16 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
is_meta);
}
+static void sja1105_flow_dissect(const struct sk_buff *skb, __be16 *proto,
+ int *offset)
+{
+ /* No tag added for management frames, all ok */
+ if (unlikely(sja1105_is_link_local(skb)))
+ return;
+
+ dsa_tag_generic_flow_dissect(skb, proto, offset);
+}
+
static const struct dsa_device_ops sja1105_netdev_ops = {
.name = "sja1105",
.proto = DSA_TAG_PROTO_SJA1105,
@@ -340,6 +363,8 @@ static const struct dsa_device_ops sja1105_netdev_ops = {
.rcv = sja1105_rcv,
.filter = sja1105_filter,
.overhead = VLAN_HLEN,
+ .flow_dissect = sja1105_flow_dissect,
+ .promisc_on_master = true,
};
MODULE_LICENSE("GPL v2");
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
index 4f8ab62f0208..3a1cc24a4f0a 100644
--- a/net/dsa/tag_trailer.c
+++ b/net/dsa/tag_trailer.c
@@ -83,6 +83,7 @@ static const struct dsa_device_ops trailer_netdev_ops = {
.xmit = trailer_xmit,
.rcv = trailer_rcv,
.overhead = 4,
+ .tail_tag = true,
};
MODULE_LICENSE("GPL");
diff --git a/net/ethtool/bitset.c b/net/ethtool/bitset.c
index dae7402eaca3..1fb3603d92ad 100644
--- a/net/ethtool/bitset.c
+++ b/net/ethtool/bitset.c
@@ -302,8 +302,7 @@ nla_put_failure:
return -EMSGSIZE;
}
-static const struct nla_policy bitset_policy[ETHTOOL_A_BITSET_MAX + 1] = {
- [ETHTOOL_A_BITSET_UNSPEC] = { .type = NLA_REJECT },
+static const struct nla_policy bitset_policy[] = {
[ETHTOOL_A_BITSET_NOMASK] = { .type = NLA_FLAG },
[ETHTOOL_A_BITSET_SIZE] = NLA_POLICY_MAX(NLA_U32,
ETHNL_MAX_BITSET_SIZE),
@@ -312,8 +311,7 @@ static const struct nla_policy bitset_policy[ETHTOOL_A_BITSET_MAX + 1] = {
[ETHTOOL_A_BITSET_MASK] = { .type = NLA_BINARY },
};
-static const struct nla_policy bit_policy[ETHTOOL_A_BITSET_BIT_MAX + 1] = {
- [ETHTOOL_A_BITSET_BIT_UNSPEC] = { .type = NLA_REJECT },
+static const struct nla_policy bit_policy[] = {
[ETHTOOL_A_BITSET_BIT_INDEX] = { .type = NLA_U32 },
[ETHTOOL_A_BITSET_BIT_NAME] = { .type = NLA_NUL_STRING },
[ETHTOOL_A_BITSET_BIT_VALUE] = { .type = NLA_FLAG },
@@ -329,10 +327,10 @@ static const struct nla_policy bit_policy[ETHTOOL_A_BITSET_BIT_MAX + 1] = {
*/
int ethnl_bitset_is_compact(const struct nlattr *bitset, bool *compact)
{
- struct nlattr *tb[ETHTOOL_A_BITSET_MAX + 1];
+ struct nlattr *tb[ARRAY_SIZE(bitset_policy)];
int ret;
- ret = nla_parse_nested(tb, ETHTOOL_A_BITSET_MAX, bitset,
+ ret = nla_parse_nested(tb, ARRAY_SIZE(bitset_policy) - 1, bitset,
bitset_policy, NULL);
if (ret < 0)
return ret;
@@ -381,10 +379,10 @@ static int ethnl_parse_bit(unsigned int *index, bool *val, unsigned int nbits,
ethnl_string_array_t names,
struct netlink_ext_ack *extack)
{
- struct nlattr *tb[ETHTOOL_A_BITSET_BIT_MAX + 1];
+ struct nlattr *tb[ARRAY_SIZE(bit_policy)];
int ret, idx;
- ret = nla_parse_nested(tb, ETHTOOL_A_BITSET_BIT_MAX, bit_attr,
+ ret = nla_parse_nested(tb, ARRAY_SIZE(bit_policy) - 1, bit_attr,
bit_policy, extack);
if (ret < 0)
return ret;
@@ -555,15 +553,15 @@ int ethnl_update_bitset32(u32 *bitmap, unsigned int nbits,
const struct nlattr *attr, ethnl_string_array_t names,
struct netlink_ext_ack *extack, bool *mod)
{
- struct nlattr *tb[ETHTOOL_A_BITSET_MAX + 1];
+ struct nlattr *tb[ARRAY_SIZE(bitset_policy)];
unsigned int change_bits;
bool no_mask;
int ret;
if (!attr)
return 0;
- ret = nla_parse_nested(tb, ETHTOOL_A_BITSET_MAX, attr, bitset_policy,
- extack);
+ ret = nla_parse_nested(tb, ARRAY_SIZE(bitset_policy) - 1, attr,
+ bitset_policy, extack);
if (ret < 0)
return ret;
@@ -608,7 +606,7 @@ int ethnl_parse_bitset(unsigned long *val, unsigned long *mask,
ethnl_string_array_t names,
struct netlink_ext_ack *extack)
{
- struct nlattr *tb[ETHTOOL_A_BITSET_MAX + 1];
+ struct nlattr *tb[ARRAY_SIZE(bitset_policy)];
const struct nlattr *bit_attr;
bool no_mask;
int rem;
@@ -616,8 +614,8 @@ int ethnl_parse_bitset(unsigned long *val, unsigned long *mask,
if (!attr)
return 0;
- ret = nla_parse_nested(tb, ETHTOOL_A_BITSET_MAX, attr, bitset_policy,
- extack);
+ ret = nla_parse_nested(tb, ARRAY_SIZE(bitset_policy) - 1, attr,
+ bitset_policy, extack);
if (ret < 0)
return ret;
no_mask = tb[ETHTOOL_A_BITSET_NOMASK];
diff --git a/net/ethtool/cabletest.c b/net/ethtool/cabletest.c
index 888f6e101f34..63560bbb7d1f 100644
--- a/net/ethtool/cabletest.c
+++ b/net/ethtool/cabletest.c
@@ -11,10 +11,9 @@
*/
#define MAX_CABLE_LENGTH_CM (150 * 100)
-static const struct nla_policy
-cable_test_act_policy[ETHTOOL_A_CABLE_TEST_MAX + 1] = {
- [ETHTOOL_A_CABLE_TEST_UNSPEC] = { .type = NLA_REJECT },
- [ETHTOOL_A_CABLE_TEST_HEADER] = { .type = NLA_NESTED },
+const struct nla_policy ethnl_cable_test_act_policy[] = {
+ [ETHTOOL_A_CABLE_TEST_HEADER] =
+ NLA_POLICY_NESTED(ethnl_header_policy),
};
static int ethnl_cable_test_started(struct phy_device *phydev, u8 cmd)
@@ -56,18 +55,12 @@ out:
int ethnl_act_cable_test(struct sk_buff *skb, struct genl_info *info)
{
- struct nlattr *tb[ETHTOOL_A_CABLE_TEST_MAX + 1];
struct ethnl_req_info req_info = {};
const struct ethtool_phy_ops *ops;
+ struct nlattr **tb = info->attrs;
struct net_device *dev;
int ret;
- ret = nlmsg_parse(info->nlhdr, GENL_HDRLEN, tb,
- ETHTOOL_A_CABLE_TEST_MAX,
- cable_test_act_policy, info->extack);
- if (ret < 0)
- return ret;
-
ret = ethnl_parse_header_dev_get(&req_info,
tb[ETHTOOL_A_CABLE_TEST_HEADER],
genl_info_net(info), info->extack,
@@ -218,18 +211,16 @@ struct cable_test_tdr_req_info {
struct ethnl_req_info base;
};
-static const struct nla_policy
-cable_test_tdr_act_cfg_policy[ETHTOOL_A_CABLE_TEST_TDR_CFG_MAX + 1] = {
+static const struct nla_policy cable_test_tdr_act_cfg_policy[] = {
[ETHTOOL_A_CABLE_TEST_TDR_CFG_FIRST] = { .type = NLA_U32 },
[ETHTOOL_A_CABLE_TEST_TDR_CFG_LAST] = { .type = NLA_U32 },
[ETHTOOL_A_CABLE_TEST_TDR_CFG_STEP] = { .type = NLA_U32 },
[ETHTOOL_A_CABLE_TEST_TDR_CFG_PAIR] = { .type = NLA_U8 },
};
-static const struct nla_policy
-cable_test_tdr_act_policy[ETHTOOL_A_CABLE_TEST_TDR_MAX + 1] = {
- [ETHTOOL_A_CABLE_TEST_TDR_UNSPEC] = { .type = NLA_REJECT },
- [ETHTOOL_A_CABLE_TEST_TDR_HEADER] = { .type = NLA_NESTED },
+const struct nla_policy ethnl_cable_test_tdr_act_policy[] = {
+ [ETHTOOL_A_CABLE_TEST_TDR_HEADER] =
+ NLA_POLICY_NESTED(ethnl_header_policy),
[ETHTOOL_A_CABLE_TEST_TDR_CFG] = { .type = NLA_NESTED },
};
@@ -238,7 +229,7 @@ static int ethnl_act_cable_test_tdr_cfg(const struct nlattr *nest,
struct genl_info *info,
struct phy_tdr_config *cfg)
{
- struct nlattr *tb[ETHTOOL_A_CABLE_TEST_TDR_CFG_MAX + 1];
+ struct nlattr *tb[ARRAY_SIZE(cable_test_tdr_act_cfg_policy)];
int ret;
cfg->first = 100;
@@ -249,8 +240,10 @@ static int ethnl_act_cable_test_tdr_cfg(const struct nlattr *nest,
if (!nest)
return 0;
- ret = nla_parse_nested(tb, ETHTOOL_A_CABLE_TEST_TDR_CFG_MAX, nest,
- cable_test_tdr_act_cfg_policy, info->extack);
+ ret = nla_parse_nested(tb,
+ ARRAY_SIZE(cable_test_tdr_act_cfg_policy) - 1,
+ nest, cable_test_tdr_act_cfg_policy,
+ info->extack);
if (ret < 0)
return ret;
@@ -313,19 +306,13 @@ static int ethnl_act_cable_test_tdr_cfg(const struct nlattr *nest,
int ethnl_act_cable_test_tdr(struct sk_buff *skb, struct genl_info *info)
{
- struct nlattr *tb[ETHTOOL_A_CABLE_TEST_TDR_MAX + 1];
struct ethnl_req_info req_info = {};
const struct ethtool_phy_ops *ops;
+ struct nlattr **tb = info->attrs;
struct phy_tdr_config cfg;
struct net_device *dev;
int ret;
- ret = nlmsg_parse(info->nlhdr, GENL_HDRLEN, tb,
- ETHTOOL_A_CABLE_TEST_TDR_MAX,
- cable_test_tdr_act_policy, info->extack);
- if (ret < 0)
- return ret;
-
ret = ethnl_parse_header_dev_get(&req_info,
tb[ETHTOOL_A_CABLE_TEST_TDR_HEADER],
genl_info_net(info), info->extack,
diff --git a/net/ethtool/channels.c b/net/ethtool/channels.c
index 9ef54cdcf662..5635604cb9ba 100644
--- a/net/ethtool/channels.c
+++ b/net/ethtool/channels.c
@@ -17,18 +17,9 @@ struct channels_reply_data {
#define CHANNELS_REPDATA(__reply_base) \
container_of(__reply_base, struct channels_reply_data, base)
-static const struct nla_policy
-channels_get_policy[ETHTOOL_A_CHANNELS_MAX + 1] = {
- [ETHTOOL_A_CHANNELS_UNSPEC] = { .type = NLA_REJECT },
- [ETHTOOL_A_CHANNELS_HEADER] = { .type = NLA_NESTED },
- [ETHTOOL_A_CHANNELS_RX_MAX] = { .type = NLA_REJECT },
- [ETHTOOL_A_CHANNELS_TX_MAX] = { .type = NLA_REJECT },
- [ETHTOOL_A_CHANNELS_OTHER_MAX] = { .type = NLA_REJECT },
- [ETHTOOL_A_CHANNELS_COMBINED_MAX] = { .type = NLA_REJECT },
- [ETHTOOL_A_CHANNELS_RX_COUNT] = { .type = NLA_REJECT },
- [ETHTOOL_A_CHANNELS_TX_COUNT] = { .type = NLA_REJECT },
- [ETHTOOL_A_CHANNELS_OTHER_COUNT] = { .type = NLA_REJECT },
- [ETHTOOL_A_CHANNELS_COMBINED_COUNT] = { .type = NLA_REJECT },
+const struct nla_policy ethnl_channels_get_policy[] = {
+ [ETHTOOL_A_CHANNELS_HEADER] =
+ NLA_POLICY_NESTED(ethnl_header_policy),
};
static int channels_prepare_data(const struct ethnl_req_info *req_base,
@@ -99,10 +90,8 @@ const struct ethnl_request_ops ethnl_channels_request_ops = {
.request_cmd = ETHTOOL_MSG_CHANNELS_GET,
.reply_cmd = ETHTOOL_MSG_CHANNELS_GET_REPLY,
.hdr_attr = ETHTOOL_A_CHANNELS_HEADER,
- .max_attr = ETHTOOL_A_CHANNELS_MAX,
.req_info_size = sizeof(struct channels_req_info),
.reply_data_size = sizeof(struct channels_reply_data),
- .request_policy = channels_get_policy,
.prepare_data = channels_prepare_data,
.reply_size = channels_reply_size,
@@ -111,14 +100,9 @@ const struct ethnl_request_ops ethnl_channels_request_ops = {
/* CHANNELS_SET */
-static const struct nla_policy
-channels_set_policy[ETHTOOL_A_CHANNELS_MAX + 1] = {
- [ETHTOOL_A_CHANNELS_UNSPEC] = { .type = NLA_REJECT },
- [ETHTOOL_A_CHANNELS_HEADER] = { .type = NLA_NESTED },
- [ETHTOOL_A_CHANNELS_RX_MAX] = { .type = NLA_REJECT },
- [ETHTOOL_A_CHANNELS_TX_MAX] = { .type = NLA_REJECT },
- [ETHTOOL_A_CHANNELS_OTHER_MAX] = { .type = NLA_REJECT },
- [ETHTOOL_A_CHANNELS_COMBINED_MAX] = { .type = NLA_REJECT },
+const struct nla_policy ethnl_channels_set_policy[] = {
+ [ETHTOOL_A_CHANNELS_HEADER] =
+ NLA_POLICY_NESTED(ethnl_header_policy),
[ETHTOOL_A_CHANNELS_RX_COUNT] = { .type = NLA_U32 },
[ETHTOOL_A_CHANNELS_TX_COUNT] = { .type = NLA_U32 },
[ETHTOOL_A_CHANNELS_OTHER_COUNT] = { .type = NLA_U32 },
@@ -127,22 +111,17 @@ channels_set_policy[ETHTOOL_A_CHANNELS_MAX + 1] = {
int ethnl_set_channels(struct sk_buff *skb, struct genl_info *info)
{
- struct nlattr *tb[ETHTOOL_A_CHANNELS_MAX + 1];
unsigned int from_channel, old_total, i;
bool mod = false, mod_combined = false;
struct ethtool_channels channels = {};
struct ethnl_req_info req_info = {};
+ struct nlattr **tb = info->attrs;
const struct nlattr *err_attr;
const struct ethtool_ops *ops;
struct net_device *dev;
u32 max_rx_in_use = 0;
int ret;
- ret = nlmsg_parse(info->nlhdr, GENL_HDRLEN, tb,
- ETHTOOL_A_CHANNELS_MAX, channels_set_policy,
- info->extack);
- if (ret < 0)
- return ret;
ret = ethnl_parse_header_dev_get(&req_info,
tb[ETHTOOL_A_CHANNELS_HEADER],
genl_info_net(info), info->extack,
@@ -223,7 +202,7 @@ int ethnl_set_channels(struct sk_buff *skb, struct genl_info *info)
from_channel = channels.combined_count +
min(channels.rx_count, channels.tx_count);
for (i = from_channel; i < old_total; i++)
- if (xdp_get_umem_from_qid(dev, i)) {
+ if (xsk_get_pool_from_qid(dev, i)) {
GENL_SET_ERR_MSG(info, "requested channel counts are too low for existing zerocopy AF_XDP sockets");
return -EINVAL;
}
diff --git a/net/ethtool/coalesce.c b/net/ethtool/coalesce.c
index 6afd99042d67..1d6bc132aa4d 100644
--- a/net/ethtool/coalesce.c
+++ b/net/ethtool/coalesce.c
@@ -51,32 +51,9 @@ __CHECK_SUPPORTED_OFFSET(COALESCE_TX_USECS_HIGH);
__CHECK_SUPPORTED_OFFSET(COALESCE_TX_MAX_FRAMES_HIGH);
__CHECK_SUPPORTED_OFFSET(COALESCE_RATE_SAMPLE_INTERVAL);
-static const struct nla_policy
-coalesce_get_policy[ETHTOOL_A_COALESCE_MAX + 1] = {
- [ETHTOOL_A_COALESCE_UNSPEC] = { .type = NLA_REJECT },
- [ETHTOOL_A_COALESCE_HEADER] = { .type = NLA_NESTED },
- [ETHTOOL_A_COALESCE_RX_USECS] = { .type = NLA_REJECT },
- [ETHTOOL_A_COALESCE_RX_MAX_FRAMES] = { .type = NLA_REJECT },
- [ETHTOOL_A_COALESCE_RX_USECS_IRQ] = { .type = NLA_REJECT },
- [ETHTOOL_A_COALESCE_RX_MAX_FRAMES_IRQ] = { .type = NLA_REJECT },
- [ETHTOOL_A_COALESCE_TX_USECS] = { .type = NLA_REJECT },
- [ETHTOOL_A_COALESCE_TX_MAX_FRAMES] = { .type = NLA_REJECT },
- [ETHTOOL_A_COALESCE_TX_USECS_IRQ] = { .type = NLA_REJECT },
- [ETHTOOL_A_COALESCE_TX_MAX_FRAMES_IRQ] = { .type = NLA_REJECT },
- [ETHTOOL_A_COALESCE_STATS_BLOCK_USECS] = { .type = NLA_REJECT },
- [ETHTOOL_A_COALESCE_USE_ADAPTIVE_RX] = { .type = NLA_REJECT },
- [ETHTOOL_A_COALESCE_USE_ADAPTIVE_TX] = { .type = NLA_REJECT },
- [ETHTOOL_A_COALESCE_PKT_RATE_LOW] = { .type = NLA_REJECT },
- [ETHTOOL_A_COALESCE_RX_USECS_LOW] = { .type = NLA_REJECT },
- [ETHTOOL_A_COALESCE_RX_MAX_FRAMES_LOW] = { .type = NLA_REJECT },
- [ETHTOOL_A_COALESCE_TX_USECS_LOW] = { .type = NLA_REJECT },
- [ETHTOOL_A_COALESCE_TX_MAX_FRAMES_LOW] = { .type = NLA_REJECT },
- [ETHTOOL_A_COALESCE_PKT_RATE_HIGH] = { .type = NLA_REJECT },
- [ETHTOOL_A_COALESCE_RX_USECS_HIGH] = { .type = NLA_REJECT },
- [ETHTOOL_A_COALESCE_RX_MAX_FRAMES_HIGH] = { .type = NLA_REJECT },
- [ETHTOOL_A_COALESCE_TX_USECS_HIGH] = { .type = NLA_REJECT },
- [ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH] = { .type = NLA_REJECT },
- [ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL] = { .type = NLA_REJECT },
+const struct nla_policy ethnl_coalesce_get_policy[] = {
+ [ETHTOOL_A_COALESCE_HEADER] =
+ NLA_POLICY_NESTED(ethnl_header_policy),
};
static int coalesce_prepare_data(const struct ethnl_req_info *req_base,
@@ -203,10 +180,8 @@ const struct ethnl_request_ops ethnl_coalesce_request_ops = {
.request_cmd = ETHTOOL_MSG_COALESCE_GET,
.reply_cmd = ETHTOOL_MSG_COALESCE_GET_REPLY,
.hdr_attr = ETHTOOL_A_COALESCE_HEADER,
- .max_attr = ETHTOOL_A_COALESCE_MAX,
.req_info_size = sizeof(struct coalesce_req_info),
.reply_data_size = sizeof(struct coalesce_reply_data),
- .request_policy = coalesce_get_policy,
.prepare_data = coalesce_prepare_data,
.reply_size = coalesce_reply_size,
@@ -215,10 +190,9 @@ const struct ethnl_request_ops ethnl_coalesce_request_ops = {
/* COALESCE_SET */
-static const struct nla_policy
-coalesce_set_policy[ETHTOOL_A_COALESCE_MAX + 1] = {
- [ETHTOOL_A_COALESCE_UNSPEC] = { .type = NLA_REJECT },
- [ETHTOOL_A_COALESCE_HEADER] = { .type = NLA_NESTED },
+const struct nla_policy ethnl_coalesce_set_policy[] = {
+ [ETHTOOL_A_COALESCE_HEADER] =
+ NLA_POLICY_NESTED(ethnl_header_policy),
[ETHTOOL_A_COALESCE_RX_USECS] = { .type = NLA_U32 },
[ETHTOOL_A_COALESCE_RX_MAX_FRAMES] = { .type = NLA_U32 },
[ETHTOOL_A_COALESCE_RX_USECS_IRQ] = { .type = NLA_U32 },
@@ -245,9 +219,9 @@ coalesce_set_policy[ETHTOOL_A_COALESCE_MAX + 1] = {
int ethnl_set_coalesce(struct sk_buff *skb, struct genl_info *info)
{
- struct nlattr *tb[ETHTOOL_A_COALESCE_MAX + 1];
struct ethtool_coalesce coalesce = {};
struct ethnl_req_info req_info = {};
+ struct nlattr **tb = info->attrs;
const struct ethtool_ops *ops;
struct net_device *dev;
u32 supported_params;
@@ -255,11 +229,6 @@ int ethnl_set_coalesce(struct sk_buff *skb, struct genl_info *info)
int ret;
u16 a;
- ret = nlmsg_parse(info->nlhdr, GENL_HDRLEN, tb,
- ETHTOOL_A_COALESCE_MAX, coalesce_set_policy,
- info->extack);
- if (ret < 0)
- return ret;
ret = ethnl_parse_header_dev_get(&req_info,
tb[ETHTOOL_A_COALESCE_HEADER],
genl_info_net(info), info->extack,
diff --git a/net/ethtool/common.c b/net/ethtool/common.c
index ed19573fccd7..24036e3055a1 100644
--- a/net/ethtool/common.c
+++ b/net/ethtool/common.c
@@ -192,6 +192,8 @@ const char link_mode_names[][ETH_GSTRING_LEN] = {
__DEFINE_LINK_MODE_NAME(400000, LR4_ER4_FR4, Full),
__DEFINE_LINK_MODE_NAME(400000, DR4, Full),
__DEFINE_LINK_MODE_NAME(400000, CR4, Full),
+ __DEFINE_LINK_MODE_NAME(100, FX, Half),
+ __DEFINE_LINK_MODE_NAME(100, FX, Full),
};
static_assert(ARRAY_SIZE(link_mode_names) == __ETHTOOL_LINK_MODE_MASK_NBITS);
diff --git a/net/ethtool/debug.c b/net/ethtool/debug.c
index 1bd026a29f3f..f99912d7957e 100644
--- a/net/ethtool/debug.c
+++ b/net/ethtool/debug.c
@@ -16,11 +16,9 @@ struct debug_reply_data {
#define DEBUG_REPDATA(__reply_base) \
container_of(__reply_base, struct debug_reply_data, base)
-static const struct nla_policy
-debug_get_policy[ETHTOOL_A_DEBUG_MAX + 1] = {
- [ETHTOOL_A_DEBUG_UNSPEC] = { .type = NLA_REJECT },
- [ETHTOOL_A_DEBUG_HEADER] = { .type = NLA_NESTED },
- [ETHTOOL_A_DEBUG_MSGMASK] = { .type = NLA_REJECT },
+const struct nla_policy ethnl_debug_get_policy[] = {
+ [ETHTOOL_A_DEBUG_HEADER] =
+ NLA_POLICY_NESTED(ethnl_header_policy),
};
static int debug_prepare_data(const struct ethnl_req_info *req_base,
@@ -69,10 +67,8 @@ const struct ethnl_request_ops ethnl_debug_request_ops = {
.request_cmd = ETHTOOL_MSG_DEBUG_GET,
.reply_cmd = ETHTOOL_MSG_DEBUG_GET_REPLY,
.hdr_attr = ETHTOOL_A_DEBUG_HEADER,
- .max_attr = ETHTOOL_A_DEBUG_MAX,
.req_info_size = sizeof(struct debug_req_info),
.reply_data_size = sizeof(struct debug_reply_data),
- .request_policy = debug_get_policy,
.prepare_data = debug_prepare_data,
.reply_size = debug_reply_size,
@@ -81,27 +77,21 @@ const struct ethnl_request_ops ethnl_debug_request_ops = {
/* DEBUG_SET */
-static const struct nla_policy
-debug_set_policy[ETHTOOL_A_DEBUG_MAX + 1] = {
- [ETHTOOL_A_DEBUG_UNSPEC] = { .type = NLA_REJECT },
- [ETHTOOL_A_DEBUG_HEADER] = { .type = NLA_NESTED },
+const struct nla_policy ethnl_debug_set_policy[] = {
+ [ETHTOOL_A_DEBUG_HEADER] =
+ NLA_POLICY_NESTED(ethnl_header_policy),
[ETHTOOL_A_DEBUG_MSGMASK] = { .type = NLA_NESTED },
};
int ethnl_set_debug(struct sk_buff *skb, struct genl_info *info)
{
- struct nlattr *tb[ETHTOOL_A_DEBUG_MAX + 1];
struct ethnl_req_info req_info = {};
+ struct nlattr **tb = info->attrs;
struct net_device *dev;
bool mod = false;
u32 msg_mask;
int ret;
- ret = nlmsg_parse(info->nlhdr, GENL_HDRLEN, tb,
- ETHTOOL_A_DEBUG_MAX, debug_set_policy,
- info->extack);
- if (ret < 0)
- return ret;
ret = ethnl_parse_header_dev_get(&req_info,
tb[ETHTOOL_A_DEBUG_HEADER],
genl_info_net(info), info->extack,
diff --git a/net/ethtool/eee.c b/net/ethtool/eee.c
index 94aa19cff22f..901b7de941ab 100644
--- a/net/ethtool/eee.c
+++ b/net/ethtool/eee.c
@@ -19,16 +19,9 @@ struct eee_reply_data {
#define EEE_REPDATA(__reply_base) \
container_of(__reply_base, struct eee_reply_data, base)
-static const struct nla_policy
-eee_get_policy[ETHTOOL_A_EEE_MAX + 1] = {
- [ETHTOOL_A_EEE_UNSPEC] = { .type = NLA_REJECT },
- [ETHTOOL_A_EEE_HEADER] = { .type = NLA_NESTED },
- [ETHTOOL_A_EEE_MODES_OURS] = { .type = NLA_REJECT },
- [ETHTOOL_A_EEE_MODES_PEER] = { .type = NLA_REJECT },
- [ETHTOOL_A_EEE_ACTIVE] = { .type = NLA_REJECT },
- [ETHTOOL_A_EEE_ENABLED] = { .type = NLA_REJECT },
- [ETHTOOL_A_EEE_TX_LPI_ENABLED] = { .type = NLA_REJECT },
- [ETHTOOL_A_EEE_TX_LPI_TIMER] = { .type = NLA_REJECT },
+const struct nla_policy ethnl_eee_get_policy[] = {
+ [ETHTOOL_A_EEE_HEADER] =
+ NLA_POLICY_NESTED(ethnl_header_policy),
};
static int eee_prepare_data(const struct ethnl_req_info *req_base,
@@ -119,10 +112,8 @@ const struct ethnl_request_ops ethnl_eee_request_ops = {
.request_cmd = ETHTOOL_MSG_EEE_GET,
.reply_cmd = ETHTOOL_MSG_EEE_GET_REPLY,
.hdr_attr = ETHTOOL_A_EEE_HEADER,
- .max_attr = ETHTOOL_A_EEE_MAX,
.req_info_size = sizeof(struct eee_req_info),
.reply_data_size = sizeof(struct eee_reply_data),
- .request_policy = eee_get_policy,
.prepare_data = eee_prepare_data,
.reply_size = eee_reply_size,
@@ -131,13 +122,10 @@ const struct ethnl_request_ops ethnl_eee_request_ops = {
/* EEE_SET */
-static const struct nla_policy
-eee_set_policy[ETHTOOL_A_EEE_MAX + 1] = {
- [ETHTOOL_A_EEE_UNSPEC] = { .type = NLA_REJECT },
- [ETHTOOL_A_EEE_HEADER] = { .type = NLA_NESTED },
+const struct nla_policy ethnl_eee_set_policy[] = {
+ [ETHTOOL_A_EEE_HEADER] =
+ NLA_POLICY_NESTED(ethnl_header_policy),
[ETHTOOL_A_EEE_MODES_OURS] = { .type = NLA_NESTED },
- [ETHTOOL_A_EEE_MODES_PEER] = { .type = NLA_REJECT },
- [ETHTOOL_A_EEE_ACTIVE] = { .type = NLA_REJECT },
[ETHTOOL_A_EEE_ENABLED] = { .type = NLA_U8 },
[ETHTOOL_A_EEE_TX_LPI_ENABLED] = { .type = NLA_U8 },
[ETHTOOL_A_EEE_TX_LPI_TIMER] = { .type = NLA_U32 },
@@ -145,18 +133,14 @@ eee_set_policy[ETHTOOL_A_EEE_MAX + 1] = {
int ethnl_set_eee(struct sk_buff *skb, struct genl_info *info)
{
- struct nlattr *tb[ETHTOOL_A_EEE_MAX + 1];
- struct ethtool_eee eee = {};
struct ethnl_req_info req_info = {};
+ struct nlattr **tb = info->attrs;
const struct ethtool_ops *ops;
+ struct ethtool_eee eee = {};
struct net_device *dev;
bool mod = false;
int ret;
- ret = nlmsg_parse(info->nlhdr, GENL_HDRLEN, tb, ETHTOOL_A_EEE_MAX,
- eee_set_policy, info->extack);
- if (ret < 0)
- return ret;
ret = ethnl_parse_header_dev_get(&req_info,
tb[ETHTOOL_A_EEE_HEADER],
genl_info_net(info), info->extack,
diff --git a/net/ethtool/features.c b/net/ethtool/features.c
index 495635f152ba..8ee4cdbd6b82 100644
--- a/net/ethtool/features.c
+++ b/net/ethtool/features.c
@@ -20,14 +20,9 @@ struct features_reply_data {
#define FEATURES_REPDATA(__reply_base) \
container_of(__reply_base, struct features_reply_data, base)
-static const struct nla_policy
-features_get_policy[ETHTOOL_A_FEATURES_MAX + 1] = {
- [ETHTOOL_A_FEATURES_UNSPEC] = { .type = NLA_REJECT },
- [ETHTOOL_A_FEATURES_HEADER] = { .type = NLA_NESTED },
- [ETHTOOL_A_FEATURES_HW] = { .type = NLA_REJECT },
- [ETHTOOL_A_FEATURES_WANTED] = { .type = NLA_REJECT },
- [ETHTOOL_A_FEATURES_ACTIVE] = { .type = NLA_REJECT },
- [ETHTOOL_A_FEATURES_NOCHANGE] = { .type = NLA_REJECT },
+const struct nla_policy ethnl_features_get_policy[] = {
+ [ETHTOOL_A_FEATURES_HEADER] =
+ NLA_POLICY_NESTED(ethnl_header_policy),
};
static void ethnl_features_to_bitmap32(u32 *dest, netdev_features_t src)
@@ -120,10 +115,8 @@ const struct ethnl_request_ops ethnl_features_request_ops = {
.request_cmd = ETHTOOL_MSG_FEATURES_GET,
.reply_cmd = ETHTOOL_MSG_FEATURES_GET_REPLY,
.hdr_attr = ETHTOOL_A_FEATURES_HEADER,
- .max_attr = ETHTOOL_A_FEATURES_MAX,
.req_info_size = sizeof(struct features_req_info),
.reply_data_size = sizeof(struct features_reply_data),
- .request_policy = features_get_policy,
.prepare_data = features_prepare_data,
.reply_size = features_reply_size,
@@ -132,14 +125,10 @@ const struct ethnl_request_ops ethnl_features_request_ops = {
/* FEATURES_SET */
-static const struct nla_policy
-features_set_policy[ETHTOOL_A_FEATURES_MAX + 1] = {
- [ETHTOOL_A_FEATURES_UNSPEC] = { .type = NLA_REJECT },
- [ETHTOOL_A_FEATURES_HEADER] = { .type = NLA_NESTED },
- [ETHTOOL_A_FEATURES_HW] = { .type = NLA_REJECT },
+const struct nla_policy ethnl_features_set_policy[] = {
+ [ETHTOOL_A_FEATURES_HEADER] =
+ NLA_POLICY_NESTED(ethnl_header_policy),
[ETHTOOL_A_FEATURES_WANTED] = { .type = NLA_NESTED },
- [ETHTOOL_A_FEATURES_ACTIVE] = { .type = NLA_REJECT },
- [ETHTOOL_A_FEATURES_NOCHANGE] = { .type = NLA_REJECT },
};
static void ethnl_features_to_bitmap(unsigned long *dest, netdev_features_t val)
@@ -229,17 +218,12 @@ int ethnl_set_features(struct sk_buff *skb, struct genl_info *info)
DECLARE_BITMAP(new_wanted, NETDEV_FEATURE_COUNT);
DECLARE_BITMAP(req_wanted, NETDEV_FEATURE_COUNT);
DECLARE_BITMAP(req_mask, NETDEV_FEATURE_COUNT);
- struct nlattr *tb[ETHTOOL_A_FEATURES_MAX + 1];
struct ethnl_req_info req_info = {};
+ struct nlattr **tb = info->attrs;
struct net_device *dev;
bool mod;
int ret;
- ret = nlmsg_parse(info->nlhdr, GENL_HDRLEN, tb,
- ETHTOOL_A_FEATURES_MAX, features_set_policy,
- info->extack);
- if (ret < 0)
- return ret;
if (!tb[ETHTOOL_A_FEATURES_WANTED])
return -EINVAL;
ret = ethnl_parse_header_dev_get(&req_info,
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index 441794e0034f..ec2cd7aab5ad 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -1706,7 +1706,7 @@ static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
min(channels.rx_count, channels.tx_count);
to_channel = curr.combined_count + max(curr.rx_count, curr.tx_count);
for (i = from_channel; i < to_channel; i++)
- if (xdp_get_umem_from_qid(dev, i))
+ if (xsk_get_pool_from_qid(dev, i))
return -EINVAL;
ret = dev->ethtool_ops->set_channels(dev, &channels);
@@ -1861,23 +1861,18 @@ static int ethtool_phys_id(struct net_device *dev, void __user *useraddr)
id.data ? (id.data * HZ) : MAX_SCHEDULE_TIMEOUT);
} else {
/* Driver expects to be called at twice the frequency in rc */
- int n = rc * 2, i, interval = HZ / n;
+ int n = rc * 2, interval = HZ / n;
+ u64 count = n * id.data, i = 0;
- /* Count down seconds */
do {
- /* Count down iterations per second */
- i = n;
- do {
- rtnl_lock();
- rc = ops->set_phys_id(dev,
- (i & 1) ? ETHTOOL_ID_OFF : ETHTOOL_ID_ON);
- rtnl_unlock();
- if (rc)
- break;
- schedule_timeout_interruptible(interval);
- } while (!signal_pending(current) && --i != 0);
- } while (!signal_pending(current) &&
- (id.data == 0 || --id.data != 0));
+ rtnl_lock();
+ rc = ops->set_phys_id(dev,
+ (i++ & 1) ? ETHTOOL_ID_OFF : ETHTOOL_ID_ON);
+ rtnl_unlock();
+ if (rc)
+ break;
+ schedule_timeout_interruptible(interval);
+ } while (!signal_pending(current) && (!id.data || i < count));
}
rtnl_lock();
@@ -2464,14 +2459,15 @@ static int ethtool_phy_tunable_valid(const struct ethtool_tunable *tuna)
static int get_phy_tunable(struct net_device *dev, void __user *useraddr)
{
- int ret;
- struct ethtool_tunable tuna;
struct phy_device *phydev = dev->phydev;
+ struct ethtool_tunable tuna;
+ bool phy_drv_tunable;
void *data;
+ int ret;
- if (!(phydev && phydev->drv && phydev->drv->get_tunable))
+ phy_drv_tunable = phydev && phydev->drv && phydev->drv->get_tunable;
+ if (!phy_drv_tunable && !dev->ethtool_ops->get_phy_tunable)
return -EOPNOTSUPP;
-
if (copy_from_user(&tuna, useraddr, sizeof(tuna)))
return -EFAULT;
ret = ethtool_phy_tunable_valid(&tuna);
@@ -2480,9 +2476,13 @@ static int get_phy_tunable(struct net_device *dev, void __user *useraddr)
data = kmalloc(tuna.len, GFP_USER);
if (!data)
return -ENOMEM;
- mutex_lock(&phydev->lock);
- ret = phydev->drv->get_tunable(phydev, &tuna, data);
- mutex_unlock(&phydev->lock);
+ if (phy_drv_tunable) {
+ mutex_lock(&phydev->lock);
+ ret = phydev->drv->get_tunable(phydev, &tuna, data);
+ mutex_unlock(&phydev->lock);
+ } else {
+ ret = dev->ethtool_ops->get_phy_tunable(dev, &tuna, data);
+ }
if (ret)
goto out;
useraddr += sizeof(tuna);
@@ -2498,12 +2498,14 @@ out:
static int set_phy_tunable(struct net_device *dev, void __user *useraddr)
{
- int ret;
- struct ethtool_tunable tuna;
struct phy_device *phydev = dev->phydev;
+ struct ethtool_tunable tuna;
+ bool phy_drv_tunable;
void *data;
+ int ret;
- if (!(phydev && phydev->drv && phydev->drv->set_tunable))
+ phy_drv_tunable = phydev && phydev->drv && phydev->drv->get_tunable;
+ if (!phy_drv_tunable && !dev->ethtool_ops->set_phy_tunable)
return -EOPNOTSUPP;
if (copy_from_user(&tuna, useraddr, sizeof(tuna)))
return -EFAULT;
@@ -2514,9 +2516,13 @@ static int set_phy_tunable(struct net_device *dev, void __user *useraddr)
data = memdup_user(useraddr, tuna.len);
if (IS_ERR(data))
return PTR_ERR(data);
- mutex_lock(&phydev->lock);
- ret = phydev->drv->set_tunable(phydev, &tuna, data);
- mutex_unlock(&phydev->lock);
+ if (phy_drv_tunable) {
+ mutex_lock(&phydev->lock);
+ ret = phydev->drv->set_tunable(phydev, &tuna, data);
+ mutex_unlock(&phydev->lock);
+ } else {
+ ret = dev->ethtool_ops->set_phy_tunable(dev, &tuna, data);
+ }
kfree(data);
return ret;
@@ -3025,13 +3031,14 @@ ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input)
case TCP_V4_FLOW:
case TCP_V6_FLOW:
match->key.basic.ip_proto = IPPROTO_TCP;
+ match->mask.basic.ip_proto = 0xff;
break;
case UDP_V4_FLOW:
case UDP_V6_FLOW:
match->key.basic.ip_proto = IPPROTO_UDP;
+ match->mask.basic.ip_proto = 0xff;
break;
}
- match->mask.basic.ip_proto = 0xff;
match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_BASIC);
match->dissector.offset[FLOW_DISSECTOR_KEY_BASIC] =
diff --git a/net/ethtool/linkinfo.c b/net/ethtool/linkinfo.c
index 5eaf173eaaca..b91839870efc 100644
--- a/net/ethtool/linkinfo.c
+++ b/net/ethtool/linkinfo.c
@@ -16,15 +16,9 @@ struct linkinfo_reply_data {
#define LINKINFO_REPDATA(__reply_base) \
container_of(__reply_base, struct linkinfo_reply_data, base)
-static const struct nla_policy
-linkinfo_get_policy[ETHTOOL_A_LINKINFO_MAX + 1] = {
- [ETHTOOL_A_LINKINFO_UNSPEC] = { .type = NLA_REJECT },
- [ETHTOOL_A_LINKINFO_HEADER] = { .type = NLA_NESTED },
- [ETHTOOL_A_LINKINFO_PORT] = { .type = NLA_REJECT },
- [ETHTOOL_A_LINKINFO_PHYADDR] = { .type = NLA_REJECT },
- [ETHTOOL_A_LINKINFO_TP_MDIX] = { .type = NLA_REJECT },
- [ETHTOOL_A_LINKINFO_TP_MDIX_CTRL] = { .type = NLA_REJECT },
- [ETHTOOL_A_LINKINFO_TRANSCEIVER] = { .type = NLA_REJECT },
+const struct nla_policy ethnl_linkinfo_get_policy[] = {
+ [ETHTOOL_A_LINKINFO_HEADER] =
+ NLA_POLICY_NESTED(ethnl_header_policy),
};
static int linkinfo_prepare_data(const struct ethnl_req_info *req_base,
@@ -83,10 +77,8 @@ const struct ethnl_request_ops ethnl_linkinfo_request_ops = {
.request_cmd = ETHTOOL_MSG_LINKINFO_GET,
.reply_cmd = ETHTOOL_MSG_LINKINFO_GET_REPLY,
.hdr_attr = ETHTOOL_A_LINKINFO_HEADER,
- .max_attr = ETHTOOL_A_LINKINFO_MAX,
.req_info_size = sizeof(struct linkinfo_req_info),
.reply_data_size = sizeof(struct linkinfo_reply_data),
- .request_policy = linkinfo_get_policy,
.prepare_data = linkinfo_prepare_data,
.reply_size = linkinfo_reply_size,
@@ -95,32 +87,24 @@ const struct ethnl_request_ops ethnl_linkinfo_request_ops = {
/* LINKINFO_SET */
-static const struct nla_policy
-linkinfo_set_policy[ETHTOOL_A_LINKINFO_MAX + 1] = {
- [ETHTOOL_A_LINKINFO_UNSPEC] = { .type = NLA_REJECT },
- [ETHTOOL_A_LINKINFO_HEADER] = { .type = NLA_NESTED },
+const struct nla_policy ethnl_linkinfo_set_policy[] = {
+ [ETHTOOL_A_LINKINFO_HEADER] =
+ NLA_POLICY_NESTED(ethnl_header_policy),
[ETHTOOL_A_LINKINFO_PORT] = { .type = NLA_U8 },
[ETHTOOL_A_LINKINFO_PHYADDR] = { .type = NLA_U8 },
- [ETHTOOL_A_LINKINFO_TP_MDIX] = { .type = NLA_REJECT },
[ETHTOOL_A_LINKINFO_TP_MDIX_CTRL] = { .type = NLA_U8 },
- [ETHTOOL_A_LINKINFO_TRANSCEIVER] = { .type = NLA_REJECT },
};
int ethnl_set_linkinfo(struct sk_buff *skb, struct genl_info *info)
{
- struct nlattr *tb[ETHTOOL_A_LINKINFO_MAX + 1];
struct ethtool_link_ksettings ksettings = {};
struct ethtool_link_settings *lsettings;
struct ethnl_req_info req_info = {};
+ struct nlattr **tb = info->attrs;
struct net_device *dev;
bool mod = false;
int ret;
- ret = nlmsg_parse(info->nlhdr, GENL_HDRLEN, tb,
- ETHTOOL_A_LINKINFO_MAX, linkinfo_set_policy,
- info->extack);
- if (ret < 0)
- return ret;
ret = ethnl_parse_header_dev_get(&req_info,
tb[ETHTOOL_A_LINKINFO_HEADER],
genl_info_net(info), info->extack,
diff --git a/net/ethtool/linkmodes.c b/net/ethtool/linkmodes.c
index 7044a2853886..c5bcb9abc8b9 100644
--- a/net/ethtool/linkmodes.c
+++ b/net/ethtool/linkmodes.c
@@ -18,17 +18,9 @@ struct linkmodes_reply_data {
#define LINKMODES_REPDATA(__reply_base) \
container_of(__reply_base, struct linkmodes_reply_data, base)
-static const struct nla_policy
-linkmodes_get_policy[ETHTOOL_A_LINKMODES_MAX + 1] = {
- [ETHTOOL_A_LINKMODES_UNSPEC] = { .type = NLA_REJECT },
- [ETHTOOL_A_LINKMODES_HEADER] = { .type = NLA_NESTED },
- [ETHTOOL_A_LINKMODES_AUTONEG] = { .type = NLA_REJECT },
- [ETHTOOL_A_LINKMODES_OURS] = { .type = NLA_REJECT },
- [ETHTOOL_A_LINKMODES_PEER] = { .type = NLA_REJECT },
- [ETHTOOL_A_LINKMODES_SPEED] = { .type = NLA_REJECT },
- [ETHTOOL_A_LINKMODES_DUPLEX] = { .type = NLA_REJECT },
- [ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG] = { .type = NLA_REJECT },
- [ETHTOOL_A_LINKMODES_MASTER_SLAVE_STATE] = { .type = NLA_REJECT },
+const struct nla_policy ethnl_linkmodes_get_policy[] = {
+ [ETHTOOL_A_LINKMODES_HEADER] =
+ NLA_POLICY_NESTED(ethnl_header_policy),
};
static int linkmodes_prepare_data(const struct ethnl_req_info *req_base,
@@ -148,10 +140,8 @@ const struct ethnl_request_ops ethnl_linkmodes_request_ops = {
.request_cmd = ETHTOOL_MSG_LINKMODES_GET,
.reply_cmd = ETHTOOL_MSG_LINKMODES_GET_REPLY,
.hdr_attr = ETHTOOL_A_LINKMODES_HEADER,
- .max_attr = ETHTOOL_A_LINKMODES_MAX,
.req_info_size = sizeof(struct linkmodes_req_info),
.reply_data_size = sizeof(struct linkmodes_reply_data),
- .request_policy = linkmodes_get_policy,
.prepare_data = linkmodes_prepare_data,
.reply_size = linkmodes_reply_size,
@@ -272,19 +262,18 @@ static const struct link_mode_info link_mode_params[] = {
__DEFINE_LINK_MODE_PARAMS(400000, LR4_ER4_FR4, Full),
__DEFINE_LINK_MODE_PARAMS(400000, DR4, Full),
__DEFINE_LINK_MODE_PARAMS(400000, CR4, Full),
+ __DEFINE_LINK_MODE_PARAMS(100, FX, Half),
+ __DEFINE_LINK_MODE_PARAMS(100, FX, Full),
};
-static const struct nla_policy
-linkmodes_set_policy[ETHTOOL_A_LINKMODES_MAX + 1] = {
- [ETHTOOL_A_LINKMODES_UNSPEC] = { .type = NLA_REJECT },
- [ETHTOOL_A_LINKMODES_HEADER] = { .type = NLA_NESTED },
+const struct nla_policy ethnl_linkmodes_set_policy[] = {
+ [ETHTOOL_A_LINKMODES_HEADER] =
+ NLA_POLICY_NESTED(ethnl_header_policy),
[ETHTOOL_A_LINKMODES_AUTONEG] = { .type = NLA_U8 },
[ETHTOOL_A_LINKMODES_OURS] = { .type = NLA_NESTED },
- [ETHTOOL_A_LINKMODES_PEER] = { .type = NLA_REJECT },
[ETHTOOL_A_LINKMODES_SPEED] = { .type = NLA_U32 },
[ETHTOOL_A_LINKMODES_DUPLEX] = { .type = NLA_U8 },
[ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG] = { .type = NLA_U8 },
- [ETHTOOL_A_LINKMODES_MASTER_SLAVE_STATE] = { .type = NLA_REJECT },
};
/* Set advertised link modes to all supported modes matching requested speed
@@ -390,18 +379,13 @@ static int ethnl_update_linkmodes(struct genl_info *info, struct nlattr **tb,
int ethnl_set_linkmodes(struct sk_buff *skb, struct genl_info *info)
{
- struct nlattr *tb[ETHTOOL_A_LINKMODES_MAX + 1];
struct ethtool_link_ksettings ksettings = {};
struct ethnl_req_info req_info = {};
+ struct nlattr **tb = info->attrs;
struct net_device *dev;
bool mod = false;
int ret;
- ret = nlmsg_parse(info->nlhdr, GENL_HDRLEN, tb,
- ETHTOOL_A_LINKMODES_MAX, linkmodes_set_policy,
- info->extack);
- if (ret < 0)
- return ret;
ret = ethnl_parse_header_dev_get(&req_info,
tb[ETHTOOL_A_LINKMODES_HEADER],
genl_info_net(info), info->extack,
diff --git a/net/ethtool/linkstate.c b/net/ethtool/linkstate.c
index 4834091ec24c..fb676f349455 100644
--- a/net/ethtool/linkstate.c
+++ b/net/ethtool/linkstate.c
@@ -20,15 +20,9 @@ struct linkstate_reply_data {
#define LINKSTATE_REPDATA(__reply_base) \
container_of(__reply_base, struct linkstate_reply_data, base)
-static const struct nla_policy
-linkstate_get_policy[ETHTOOL_A_LINKSTATE_MAX + 1] = {
- [ETHTOOL_A_LINKSTATE_UNSPEC] = { .type = NLA_REJECT },
- [ETHTOOL_A_LINKSTATE_HEADER] = { .type = NLA_NESTED },
- [ETHTOOL_A_LINKSTATE_LINK] = { .type = NLA_REJECT },
- [ETHTOOL_A_LINKSTATE_SQI] = { .type = NLA_REJECT },
- [ETHTOOL_A_LINKSTATE_SQI_MAX] = { .type = NLA_REJECT },
- [ETHTOOL_A_LINKSTATE_EXT_STATE] = { .type = NLA_REJECT },
- [ETHTOOL_A_LINKSTATE_EXT_SUBSTATE] = { .type = NLA_REJECT },
+const struct nla_policy ethnl_linkstate_get_policy[] = {
+ [ETHTOOL_A_LINKSTATE_HEADER] =
+ NLA_POLICY_NESTED(ethnl_header_policy),
};
static int linkstate_get_sqi(struct net_device *dev)
@@ -179,10 +173,8 @@ const struct ethnl_request_ops ethnl_linkstate_request_ops = {
.request_cmd = ETHTOOL_MSG_LINKSTATE_GET,
.reply_cmd = ETHTOOL_MSG_LINKSTATE_GET_REPLY,
.hdr_attr = ETHTOOL_A_LINKSTATE_HEADER,
- .max_attr = ETHTOOL_A_LINKSTATE_MAX,
.req_info_size = sizeof(struct linkstate_req_info),
.reply_data_size = sizeof(struct linkstate_reply_data),
- .request_policy = linkstate_get_policy,
.prepare_data = linkstate_prepare_data,
.reply_size = linkstate_reply_size,
diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c
index 0c3f54baec4e..50d3c8896f91 100644
--- a/net/ethtool/netlink.c
+++ b/net/ethtool/netlink.c
@@ -9,12 +9,24 @@ static struct genl_family ethtool_genl_family;
static bool ethnl_ok __read_mostly;
static u32 ethnl_bcast_seq;
-static const struct nla_policy ethnl_header_policy[ETHTOOL_A_HEADER_MAX + 1] = {
- [ETHTOOL_A_HEADER_UNSPEC] = { .type = NLA_REJECT },
+#define ETHTOOL_FLAGS_BASIC (ETHTOOL_FLAG_COMPACT_BITSETS | \
+ ETHTOOL_FLAG_OMIT_REPLY)
+#define ETHTOOL_FLAGS_STATS (ETHTOOL_FLAGS_BASIC | ETHTOOL_FLAG_STATS)
+
+const struct nla_policy ethnl_header_policy[] = {
[ETHTOOL_A_HEADER_DEV_INDEX] = { .type = NLA_U32 },
[ETHTOOL_A_HEADER_DEV_NAME] = { .type = NLA_NUL_STRING,
.len = ALTIFNAMSIZ - 1 },
- [ETHTOOL_A_HEADER_FLAGS] = { .type = NLA_U32 },
+ [ETHTOOL_A_HEADER_FLAGS] = NLA_POLICY_MASK(NLA_U32,
+ ETHTOOL_FLAGS_BASIC),
+};
+
+const struct nla_policy ethnl_header_policy_stats[] = {
+ [ETHTOOL_A_HEADER_DEV_INDEX] = { .type = NLA_U32 },
+ [ETHTOOL_A_HEADER_DEV_NAME] = { .type = NLA_NUL_STRING,
+ .len = ALTIFNAMSIZ - 1 },
+ [ETHTOOL_A_HEADER_FLAGS] = NLA_POLICY_MASK(NLA_U32,
+ ETHTOOL_FLAGS_STATS),
};
/**
@@ -37,7 +49,7 @@ int ethnl_parse_header_dev_get(struct ethnl_req_info *req_info,
const struct nlattr *header, struct net *net,
struct netlink_ext_ack *extack, bool require_dev)
{
- struct nlattr *tb[ETHTOOL_A_HEADER_MAX + 1];
+ struct nlattr *tb[ARRAY_SIZE(ethnl_header_policy)];
const struct nlattr *devname_attr;
struct net_device *dev = NULL;
u32 flags = 0;
@@ -47,19 +59,15 @@ int ethnl_parse_header_dev_get(struct ethnl_req_info *req_info,
NL_SET_ERR_MSG(extack, "request header missing");
return -EINVAL;
}
- ret = nla_parse_nested(tb, ETHTOOL_A_HEADER_MAX, header,
- ethnl_header_policy, extack);
+ /* No validation here, command policy should have a nested policy set
+ * for the header, therefore validation should have already been done.
+ */
+ ret = nla_parse_nested(tb, ARRAY_SIZE(ethnl_header_policy) - 1, header,
+ NULL, extack);
if (ret < 0)
return ret;
- if (tb[ETHTOOL_A_HEADER_FLAGS]) {
+ if (tb[ETHTOOL_A_HEADER_FLAGS])
flags = nla_get_u32(tb[ETHTOOL_A_HEADER_FLAGS]);
- if (flags & ~ETHTOOL_FLAG_ALL) {
- NL_SET_ERR_MSG_ATTR(extack, tb[ETHTOOL_A_HEADER_FLAGS],
- "unrecognized request flags");
- nl_set_extack_cookie_u32(extack, ETHTOOL_FLAG_ALL);
- return -EOPNOTSUPP;
- }
- }
devname_attr = tb[ETHTOOL_A_HEADER_DEV_NAME];
if (tb[ETHTOOL_A_HEADER_DEV_INDEX]) {
@@ -247,7 +255,7 @@ static struct ethnl_dump_ctx *ethnl_dump_context(struct netlink_callback *cb)
/**
* ethnl_default_parse() - Parse request message
* @req_info: pointer to structure to put data into
- * @nlhdr: pointer to request message header
+ * @tb: parsed attributes
* @net: request netns
* @request_ops: struct request_ops for request type
* @extack: netlink extack for error reporting
@@ -259,37 +267,24 @@ static struct ethnl_dump_ctx *ethnl_dump_context(struct netlink_callback *cb)
* Return: 0 on success or negative error code
*/
static int ethnl_default_parse(struct ethnl_req_info *req_info,
- const struct nlmsghdr *nlhdr, struct net *net,
+ struct nlattr **tb, struct net *net,
const struct ethnl_request_ops *request_ops,
struct netlink_ext_ack *extack, bool require_dev)
{
- struct nlattr **tb;
int ret;
- tb = kmalloc_array(request_ops->max_attr + 1, sizeof(tb[0]),
- GFP_KERNEL);
- if (!tb)
- return -ENOMEM;
-
- ret = nlmsg_parse(nlhdr, GENL_HDRLEN, tb, request_ops->max_attr,
- request_ops->request_policy, extack);
- if (ret < 0)
- goto out;
ret = ethnl_parse_header_dev_get(req_info, tb[request_ops->hdr_attr],
net, extack, require_dev);
if (ret < 0)
- goto out;
+ return ret;
if (request_ops->parse_request) {
ret = request_ops->parse_request(req_info, tb, extack);
if (ret < 0)
- goto out;
+ return ret;
}
- ret = 0;
-out:
- kfree(tb);
- return ret;
+ return 0;
}
/**
@@ -334,8 +329,8 @@ static int ethnl_default_doit(struct sk_buff *skb, struct genl_info *info)
return -ENOMEM;
}
- ret = ethnl_default_parse(req_info, info->nlhdr, genl_info_net(info), ops,
- info->extack, !ops->allow_nodev_do);
+ ret = ethnl_default_parse(req_info, info->attrs, genl_info_net(info),
+ ops, info->extack, !ops->allow_nodev_do);
if (ret < 0)
goto err_dev;
ethnl_init_reply_data(reply_data, ops, req_info->dev);
@@ -480,6 +475,7 @@ out:
/* generic ->start() handler for GET requests */
static int ethnl_default_start(struct netlink_callback *cb)
{
+ const struct genl_dumpit_info *info = genl_dumpit_info(cb);
struct ethnl_dump_ctx *ctx = ethnl_dump_context(cb);
struct ethnl_reply_data *reply_data;
const struct ethnl_request_ops *ops;
@@ -502,8 +498,8 @@ static int ethnl_default_start(struct netlink_callback *cb)
goto free_req_info;
}
- ret = ethnl_default_parse(req_info, cb->nlh, sock_net(cb->skb->sk), ops,
- cb->extack, false);
+ ret = ethnl_default_parse(req_info, info->attrs, sock_net(cb->skb->sk),
+ ops, cb->extack, false);
if (req_info->dev) {
/* We ignore device specification in dump requests but as the
* same parser as for non-dump (doit) requests is used, it
@@ -696,6 +692,8 @@ static const struct genl_ops ethtool_genl_ops[] = {
.start = ethnl_default_start,
.dumpit = ethnl_default_dumpit,
.done = ethnl_default_done,
+ .policy = ethnl_strset_get_policy,
+ .maxattr = ARRAY_SIZE(ethnl_strset_get_policy) - 1,
},
{
.cmd = ETHTOOL_MSG_LINKINFO_GET,
@@ -703,11 +701,15 @@ static const struct genl_ops ethtool_genl_ops[] = {
.start = ethnl_default_start,
.dumpit = ethnl_default_dumpit,
.done = ethnl_default_done,
+ .policy = ethnl_linkinfo_get_policy,
+ .maxattr = ARRAY_SIZE(ethnl_linkinfo_get_policy) - 1,
},
{
.cmd = ETHTOOL_MSG_LINKINFO_SET,
.flags = GENL_UNS_ADMIN_PERM,
.doit = ethnl_set_linkinfo,
+ .policy = ethnl_linkinfo_set_policy,
+ .maxattr = ARRAY_SIZE(ethnl_linkinfo_set_policy) - 1,
},
{
.cmd = ETHTOOL_MSG_LINKMODES_GET,
@@ -715,11 +717,15 @@ static const struct genl_ops ethtool_genl_ops[] = {
.start = ethnl_default_start,
.dumpit = ethnl_default_dumpit,
.done = ethnl_default_done,
+ .policy = ethnl_linkmodes_get_policy,
+ .maxattr = ARRAY_SIZE(ethnl_linkmodes_get_policy) - 1,
},
{
.cmd = ETHTOOL_MSG_LINKMODES_SET,
.flags = GENL_UNS_ADMIN_PERM,
.doit = ethnl_set_linkmodes,
+ .policy = ethnl_linkmodes_set_policy,
+ .maxattr = ARRAY_SIZE(ethnl_linkmodes_set_policy) - 1,
},
{
.cmd = ETHTOOL_MSG_LINKSTATE_GET,
@@ -727,6 +733,8 @@ static const struct genl_ops ethtool_genl_ops[] = {
.start = ethnl_default_start,
.dumpit = ethnl_default_dumpit,
.done = ethnl_default_done,
+ .policy = ethnl_linkstate_get_policy,
+ .maxattr = ARRAY_SIZE(ethnl_linkstate_get_policy) - 1,
},
{
.cmd = ETHTOOL_MSG_DEBUG_GET,
@@ -734,11 +742,15 @@ static const struct genl_ops ethtool_genl_ops[] = {
.start = ethnl_default_start,
.dumpit = ethnl_default_dumpit,
.done = ethnl_default_done,
+ .policy = ethnl_debug_get_policy,
+ .maxattr = ARRAY_SIZE(ethnl_debug_get_policy) - 1,
},
{
.cmd = ETHTOOL_MSG_DEBUG_SET,
.flags = GENL_UNS_ADMIN_PERM,
.doit = ethnl_set_debug,
+ .policy = ethnl_debug_set_policy,
+ .maxattr = ARRAY_SIZE(ethnl_debug_set_policy) - 1,
},
{
.cmd = ETHTOOL_MSG_WOL_GET,
@@ -747,11 +759,15 @@ static const struct genl_ops ethtool_genl_ops[] = {
.start = ethnl_default_start,
.dumpit = ethnl_default_dumpit,
.done = ethnl_default_done,
+ .policy = ethnl_wol_get_policy,
+ .maxattr = ARRAY_SIZE(ethnl_wol_get_policy) - 1,
},
{
.cmd = ETHTOOL_MSG_WOL_SET,
.flags = GENL_UNS_ADMIN_PERM,
.doit = ethnl_set_wol,
+ .policy = ethnl_wol_set_policy,
+ .maxattr = ARRAY_SIZE(ethnl_wol_set_policy) - 1,
},
{
.cmd = ETHTOOL_MSG_FEATURES_GET,
@@ -759,11 +775,15 @@ static const struct genl_ops ethtool_genl_ops[] = {
.start = ethnl_default_start,
.dumpit = ethnl_default_dumpit,
.done = ethnl_default_done,
+ .policy = ethnl_features_get_policy,
+ .maxattr = ARRAY_SIZE(ethnl_features_get_policy) - 1,
},
{
.cmd = ETHTOOL_MSG_FEATURES_SET,
.flags = GENL_UNS_ADMIN_PERM,
.doit = ethnl_set_features,
+ .policy = ethnl_features_set_policy,
+ .maxattr = ARRAY_SIZE(ethnl_features_set_policy) - 1,
},
{
.cmd = ETHTOOL_MSG_PRIVFLAGS_GET,
@@ -771,11 +791,15 @@ static const struct genl_ops ethtool_genl_ops[] = {
.start = ethnl_default_start,
.dumpit = ethnl_default_dumpit,
.done = ethnl_default_done,
+ .policy = ethnl_privflags_get_policy,
+ .maxattr = ARRAY_SIZE(ethnl_privflags_get_policy) - 1,
},
{
.cmd = ETHTOOL_MSG_PRIVFLAGS_SET,
.flags = GENL_UNS_ADMIN_PERM,
.doit = ethnl_set_privflags,
+ .policy = ethnl_privflags_set_policy,
+ .maxattr = ARRAY_SIZE(ethnl_privflags_set_policy) - 1,
},
{
.cmd = ETHTOOL_MSG_RINGS_GET,
@@ -783,11 +807,15 @@ static const struct genl_ops ethtool_genl_ops[] = {
.start = ethnl_default_start,
.dumpit = ethnl_default_dumpit,
.done = ethnl_default_done,
+ .policy = ethnl_rings_get_policy,
+ .maxattr = ARRAY_SIZE(ethnl_rings_get_policy) - 1,
},
{
.cmd = ETHTOOL_MSG_RINGS_SET,
.flags = GENL_UNS_ADMIN_PERM,
.doit = ethnl_set_rings,
+ .policy = ethnl_rings_set_policy,
+ .maxattr = ARRAY_SIZE(ethnl_rings_set_policy) - 1,
},
{
.cmd = ETHTOOL_MSG_CHANNELS_GET,
@@ -795,11 +823,15 @@ static const struct genl_ops ethtool_genl_ops[] = {
.start = ethnl_default_start,
.dumpit = ethnl_default_dumpit,
.done = ethnl_default_done,
+ .policy = ethnl_channels_get_policy,
+ .maxattr = ARRAY_SIZE(ethnl_channels_get_policy) - 1,
},
{
.cmd = ETHTOOL_MSG_CHANNELS_SET,
.flags = GENL_UNS_ADMIN_PERM,
.doit = ethnl_set_channels,
+ .policy = ethnl_channels_set_policy,
+ .maxattr = ARRAY_SIZE(ethnl_channels_set_policy) - 1,
},
{
.cmd = ETHTOOL_MSG_COALESCE_GET,
@@ -807,11 +839,15 @@ static const struct genl_ops ethtool_genl_ops[] = {
.start = ethnl_default_start,
.dumpit = ethnl_default_dumpit,
.done = ethnl_default_done,
+ .policy = ethnl_coalesce_get_policy,
+ .maxattr = ARRAY_SIZE(ethnl_coalesce_get_policy) - 1,
},
{
.cmd = ETHTOOL_MSG_COALESCE_SET,
.flags = GENL_UNS_ADMIN_PERM,
.doit = ethnl_set_coalesce,
+ .policy = ethnl_coalesce_set_policy,
+ .maxattr = ARRAY_SIZE(ethnl_coalesce_set_policy) - 1,
},
{
.cmd = ETHTOOL_MSG_PAUSE_GET,
@@ -819,11 +855,15 @@ static const struct genl_ops ethtool_genl_ops[] = {
.start = ethnl_default_start,
.dumpit = ethnl_default_dumpit,
.done = ethnl_default_done,
+ .policy = ethnl_pause_get_policy,
+ .maxattr = ARRAY_SIZE(ethnl_pause_get_policy) - 1,
},
{
.cmd = ETHTOOL_MSG_PAUSE_SET,
.flags = GENL_UNS_ADMIN_PERM,
.doit = ethnl_set_pause,
+ .policy = ethnl_pause_set_policy,
+ .maxattr = ARRAY_SIZE(ethnl_pause_set_policy) - 1,
},
{
.cmd = ETHTOOL_MSG_EEE_GET,
@@ -831,11 +871,15 @@ static const struct genl_ops ethtool_genl_ops[] = {
.start = ethnl_default_start,
.dumpit = ethnl_default_dumpit,
.done = ethnl_default_done,
+ .policy = ethnl_eee_get_policy,
+ .maxattr = ARRAY_SIZE(ethnl_eee_get_policy) - 1,
},
{
.cmd = ETHTOOL_MSG_EEE_SET,
.flags = GENL_UNS_ADMIN_PERM,
.doit = ethnl_set_eee,
+ .policy = ethnl_eee_set_policy,
+ .maxattr = ARRAY_SIZE(ethnl_eee_set_policy) - 1,
},
{
.cmd = ETHTOOL_MSG_TSINFO_GET,
@@ -843,22 +887,30 @@ static const struct genl_ops ethtool_genl_ops[] = {
.start = ethnl_default_start,
.dumpit = ethnl_default_dumpit,
.done = ethnl_default_done,
+ .policy = ethnl_tsinfo_get_policy,
+ .maxattr = ARRAY_SIZE(ethnl_tsinfo_get_policy) - 1,
},
{
.cmd = ETHTOOL_MSG_CABLE_TEST_ACT,
.flags = GENL_UNS_ADMIN_PERM,
.doit = ethnl_act_cable_test,
+ .policy = ethnl_cable_test_act_policy,
+ .maxattr = ARRAY_SIZE(ethnl_cable_test_act_policy) - 1,
},
{
.cmd = ETHTOOL_MSG_CABLE_TEST_TDR_ACT,
.flags = GENL_UNS_ADMIN_PERM,
.doit = ethnl_act_cable_test_tdr,
+ .policy = ethnl_cable_test_tdr_act_policy,
+ .maxattr = ARRAY_SIZE(ethnl_cable_test_tdr_act_policy) - 1,
},
{
.cmd = ETHTOOL_MSG_TUNNEL_INFO_GET,
.doit = ethnl_tunnel_info_doit,
.start = ethnl_tunnel_info_start,
.dumpit = ethnl_tunnel_info_dumpit,
+ .policy = ethnl_tunnel_info_get_policy,
+ .maxattr = ARRAY_SIZE(ethnl_tunnel_info_get_policy) - 1,
},
};
diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h
index e2085005caac..d8efec516d86 100644
--- a/net/ethtool/netlink.h
+++ b/net/ethtool/netlink.h
@@ -266,10 +266,8 @@ static inline void ethnl_ops_complete(struct net_device *dev)
* @request_cmd: command id for request (GET)
* @reply_cmd: command id for reply (GET_REPLY)
* @hdr_attr: attribute type for request header
- * @max_attr: maximum (top level) attribute type
* @req_info_size: size of request info
* @reply_data_size: size of reply data
- * @request_policy: netlink policy for message contents
* @allow_nodev_do: allow non-dump request with no device identification
* @parse_request:
* Parse request except common header (struct ethnl_req_info). Common
@@ -312,10 +310,8 @@ struct ethnl_request_ops {
u8 request_cmd;
u8 reply_cmd;
u16 hdr_attr;
- unsigned int max_attr;
unsigned int req_info_size;
unsigned int reply_data_size;
- const struct nla_policy *request_policy;
bool allow_nodev_do;
int (*parse_request)(struct ethnl_req_info *req_info,
@@ -349,6 +345,37 @@ extern const struct ethnl_request_ops ethnl_pause_request_ops;
extern const struct ethnl_request_ops ethnl_eee_request_ops;
extern const struct ethnl_request_ops ethnl_tsinfo_request_ops;
+extern const struct nla_policy ethnl_header_policy[ETHTOOL_A_HEADER_FLAGS + 1];
+extern const struct nla_policy ethnl_header_policy_stats[ETHTOOL_A_HEADER_FLAGS + 1];
+extern const struct nla_policy ethnl_strset_get_policy[ETHTOOL_A_STRSET_COUNTS_ONLY + 1];
+extern const struct nla_policy ethnl_linkinfo_get_policy[ETHTOOL_A_LINKINFO_HEADER + 1];
+extern const struct nla_policy ethnl_linkinfo_set_policy[ETHTOOL_A_LINKINFO_TP_MDIX_CTRL + 1];
+extern const struct nla_policy ethnl_linkmodes_get_policy[ETHTOOL_A_LINKMODES_HEADER + 1];
+extern const struct nla_policy ethnl_linkmodes_set_policy[ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG + 1];
+extern const struct nla_policy ethnl_linkstate_get_policy[ETHTOOL_A_LINKSTATE_HEADER + 1];
+extern const struct nla_policy ethnl_debug_get_policy[ETHTOOL_A_DEBUG_HEADER + 1];
+extern const struct nla_policy ethnl_debug_set_policy[ETHTOOL_A_DEBUG_MSGMASK + 1];
+extern const struct nla_policy ethnl_wol_get_policy[ETHTOOL_A_WOL_HEADER + 1];
+extern const struct nla_policy ethnl_wol_set_policy[ETHTOOL_A_WOL_SOPASS + 1];
+extern const struct nla_policy ethnl_features_get_policy[ETHTOOL_A_FEATURES_HEADER + 1];
+extern const struct nla_policy ethnl_features_set_policy[ETHTOOL_A_FEATURES_WANTED + 1];
+extern const struct nla_policy ethnl_privflags_get_policy[ETHTOOL_A_PRIVFLAGS_HEADER + 1];
+extern const struct nla_policy ethnl_privflags_set_policy[ETHTOOL_A_PRIVFLAGS_FLAGS + 1];
+extern const struct nla_policy ethnl_rings_get_policy[ETHTOOL_A_RINGS_HEADER + 1];
+extern const struct nla_policy ethnl_rings_set_policy[ETHTOOL_A_RINGS_TX + 1];
+extern const struct nla_policy ethnl_channels_get_policy[ETHTOOL_A_CHANNELS_HEADER + 1];
+extern const struct nla_policy ethnl_channels_set_policy[ETHTOOL_A_CHANNELS_COMBINED_COUNT + 1];
+extern const struct nla_policy ethnl_coalesce_get_policy[ETHTOOL_A_COALESCE_HEADER + 1];
+extern const struct nla_policy ethnl_coalesce_set_policy[ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL + 1];
+extern const struct nla_policy ethnl_pause_get_policy[ETHTOOL_A_PAUSE_HEADER + 1];
+extern const struct nla_policy ethnl_pause_set_policy[ETHTOOL_A_PAUSE_TX + 1];
+extern const struct nla_policy ethnl_eee_get_policy[ETHTOOL_A_EEE_HEADER + 1];
+extern const struct nla_policy ethnl_eee_set_policy[ETHTOOL_A_EEE_TX_LPI_TIMER + 1];
+extern const struct nla_policy ethnl_tsinfo_get_policy[ETHTOOL_A_TSINFO_HEADER + 1];
+extern const struct nla_policy ethnl_cable_test_act_policy[ETHTOOL_A_CABLE_TEST_HEADER + 1];
+extern const struct nla_policy ethnl_cable_test_tdr_act_policy[ETHTOOL_A_CABLE_TEST_TDR_CFG + 1];
+extern const struct nla_policy ethnl_tunnel_info_get_policy[ETHTOOL_A_TUNNEL_INFO_HEADER + 1];
+
int ethnl_set_linkinfo(struct sk_buff *skb, struct genl_info *info);
int ethnl_set_linkmodes(struct sk_buff *skb, struct genl_info *info);
int ethnl_set_debug(struct sk_buff *skb, struct genl_info *info);
diff --git a/net/ethtool/pause.c b/net/ethtool/pause.c
index 7aea35d1e8a5..09998dc5c185 100644
--- a/net/ethtool/pause.c
+++ b/net/ethtool/pause.c
@@ -10,20 +10,23 @@ struct pause_req_info {
struct pause_reply_data {
struct ethnl_reply_data base;
struct ethtool_pauseparam pauseparam;
+ struct ethtool_pause_stats pausestat;
};
#define PAUSE_REPDATA(__reply_base) \
container_of(__reply_base, struct pause_reply_data, base)
-static const struct nla_policy
-pause_get_policy[ETHTOOL_A_PAUSE_MAX + 1] = {
- [ETHTOOL_A_PAUSE_UNSPEC] = { .type = NLA_REJECT },
- [ETHTOOL_A_PAUSE_HEADER] = { .type = NLA_NESTED },
- [ETHTOOL_A_PAUSE_AUTONEG] = { .type = NLA_REJECT },
- [ETHTOOL_A_PAUSE_RX] = { .type = NLA_REJECT },
- [ETHTOOL_A_PAUSE_TX] = { .type = NLA_REJECT },
+const struct nla_policy ethnl_pause_get_policy[] = {
+ [ETHTOOL_A_PAUSE_HEADER] =
+ NLA_POLICY_NESTED(ethnl_header_policy_stats),
};
+static void ethtool_stats_init(u64 *stats, unsigned int n)
+{
+ while (n--)
+ stats[n] = ETHTOOL_STAT_NOT_SET;
+}
+
static int pause_prepare_data(const struct ethnl_req_info *req_base,
struct ethnl_reply_data *reply_base,
struct genl_info *info)
@@ -34,10 +37,17 @@ static int pause_prepare_data(const struct ethnl_req_info *req_base,
if (!dev->ethtool_ops->get_pauseparam)
return -EOPNOTSUPP;
+
ret = ethnl_ops_begin(dev);
if (ret < 0)
return ret;
dev->ethtool_ops->get_pauseparam(dev, &data->pauseparam);
+ if (req_base->flags & ETHTOOL_FLAG_STATS &&
+ dev->ethtool_ops->get_pause_stats) {
+ ethtool_stats_init((u64 *)&data->pausestat,
+ sizeof(data->pausestat) / 8);
+ dev->ethtool_ops->get_pause_stats(dev, &data->pausestat);
+ }
ethnl_ops_complete(dev);
return 0;
@@ -46,9 +56,50 @@ static int pause_prepare_data(const struct ethnl_req_info *req_base,
static int pause_reply_size(const struct ethnl_req_info *req_base,
const struct ethnl_reply_data *reply_base)
{
- return nla_total_size(sizeof(u8)) + /* _PAUSE_AUTONEG */
+ int n = nla_total_size(sizeof(u8)) + /* _PAUSE_AUTONEG */
nla_total_size(sizeof(u8)) + /* _PAUSE_RX */
nla_total_size(sizeof(u8)); /* _PAUSE_TX */
+
+ if (req_base->flags & ETHTOOL_FLAG_STATS)
+ n += nla_total_size(0) + /* _PAUSE_STATS */
+ nla_total_size_64bit(sizeof(u64)) *
+ (ETHTOOL_A_PAUSE_STAT_MAX - 2);
+ return n;
+}
+
+static int ethtool_put_stat(struct sk_buff *skb, u64 val, u16 attrtype,
+ u16 padtype)
+{
+ if (val == ETHTOOL_STAT_NOT_SET)
+ return 0;
+ if (nla_put_u64_64bit(skb, attrtype, val, padtype))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int pause_put_stats(struct sk_buff *skb,
+ const struct ethtool_pause_stats *pause_stats)
+{
+ const u16 pad = ETHTOOL_A_PAUSE_STAT_PAD;
+ struct nlattr *nest;
+
+ nest = nla_nest_start(skb, ETHTOOL_A_PAUSE_STATS);
+ if (!nest)
+ return -EMSGSIZE;
+
+ if (ethtool_put_stat(skb, pause_stats->tx_pause_frames,
+ ETHTOOL_A_PAUSE_STAT_TX_FRAMES, pad) ||
+ ethtool_put_stat(skb, pause_stats->rx_pause_frames,
+ ETHTOOL_A_PAUSE_STAT_RX_FRAMES, pad))
+ goto err_cancel;
+
+ nla_nest_end(skb, nest);
+ return 0;
+
+err_cancel:
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
}
static int pause_fill_reply(struct sk_buff *skb,
@@ -63,6 +114,10 @@ static int pause_fill_reply(struct sk_buff *skb,
nla_put_u8(skb, ETHTOOL_A_PAUSE_TX, !!pauseparam->tx_pause))
return -EMSGSIZE;
+ if (req_base->flags & ETHTOOL_FLAG_STATS &&
+ pause_put_stats(skb, &data->pausestat))
+ return -EMSGSIZE;
+
return 0;
}
@@ -70,10 +125,8 @@ const struct ethnl_request_ops ethnl_pause_request_ops = {
.request_cmd = ETHTOOL_MSG_PAUSE_GET,
.reply_cmd = ETHTOOL_MSG_PAUSE_GET_REPLY,
.hdr_attr = ETHTOOL_A_PAUSE_HEADER,
- .max_attr = ETHTOOL_A_PAUSE_MAX,
.req_info_size = sizeof(struct pause_req_info),
.reply_data_size = sizeof(struct pause_reply_data),
- .request_policy = pause_get_policy,
.prepare_data = pause_prepare_data,
.reply_size = pause_reply_size,
@@ -82,10 +135,9 @@ const struct ethnl_request_ops ethnl_pause_request_ops = {
/* PAUSE_SET */
-static const struct nla_policy
-pause_set_policy[ETHTOOL_A_PAUSE_MAX + 1] = {
- [ETHTOOL_A_PAUSE_UNSPEC] = { .type = NLA_REJECT },
- [ETHTOOL_A_PAUSE_HEADER] = { .type = NLA_NESTED },
+const struct nla_policy ethnl_pause_set_policy[] = {
+ [ETHTOOL_A_PAUSE_HEADER] =
+ NLA_POLICY_NESTED(ethnl_header_policy),
[ETHTOOL_A_PAUSE_AUTONEG] = { .type = NLA_U8 },
[ETHTOOL_A_PAUSE_RX] = { .type = NLA_U8 },
[ETHTOOL_A_PAUSE_TX] = { .type = NLA_U8 },
@@ -93,18 +145,14 @@ pause_set_policy[ETHTOOL_A_PAUSE_MAX + 1] = {
int ethnl_set_pause(struct sk_buff *skb, struct genl_info *info)
{
- struct nlattr *tb[ETHTOOL_A_PAUSE_MAX + 1];
struct ethtool_pauseparam params = {};
struct ethnl_req_info req_info = {};
+ struct nlattr **tb = info->attrs;
const struct ethtool_ops *ops;
struct net_device *dev;
bool mod = false;
int ret;
- ret = nlmsg_parse(info->nlhdr, GENL_HDRLEN, tb, ETHTOOL_A_PAUSE_MAX,
- pause_set_policy, info->extack);
- if (ret < 0)
- return ret;
ret = ethnl_parse_header_dev_get(&req_info,
tb[ETHTOOL_A_PAUSE_HEADER],
genl_info_net(info), info->extack,
diff --git a/net/ethtool/privflags.c b/net/ethtool/privflags.c
index 77447dceb109..fc9f3be23a19 100644
--- a/net/ethtool/privflags.c
+++ b/net/ethtool/privflags.c
@@ -18,11 +18,9 @@ struct privflags_reply_data {
#define PRIVFLAGS_REPDATA(__reply_base) \
container_of(__reply_base, struct privflags_reply_data, base)
-static const struct nla_policy
-privflags_get_policy[ETHTOOL_A_PRIVFLAGS_MAX + 1] = {
- [ETHTOOL_A_PRIVFLAGS_UNSPEC] = { .type = NLA_REJECT },
- [ETHTOOL_A_PRIVFLAGS_HEADER] = { .type = NLA_NESTED },
- [ETHTOOL_A_PRIVFLAGS_FLAGS] = { .type = NLA_REJECT },
+const struct nla_policy ethnl_privflags_get_policy[] = {
+ [ETHTOOL_A_PRIVFLAGS_HEADER] =
+ NLA_POLICY_NESTED(ethnl_header_policy),
};
static int ethnl_get_priv_flags_info(struct net_device *dev,
@@ -124,10 +122,8 @@ const struct ethnl_request_ops ethnl_privflags_request_ops = {
.request_cmd = ETHTOOL_MSG_PRIVFLAGS_GET,
.reply_cmd = ETHTOOL_MSG_PRIVFLAGS_GET_REPLY,
.hdr_attr = ETHTOOL_A_PRIVFLAGS_HEADER,
- .max_attr = ETHTOOL_A_PRIVFLAGS_MAX,
.req_info_size = sizeof(struct privflags_req_info),
.reply_data_size = sizeof(struct privflags_reply_data),
- .request_policy = privflags_get_policy,
.prepare_data = privflags_prepare_data,
.reply_size = privflags_reply_size,
@@ -137,18 +133,17 @@ const struct ethnl_request_ops ethnl_privflags_request_ops = {
/* PRIVFLAGS_SET */
-static const struct nla_policy
-privflags_set_policy[ETHTOOL_A_PRIVFLAGS_MAX + 1] = {
- [ETHTOOL_A_PRIVFLAGS_UNSPEC] = { .type = NLA_REJECT },
- [ETHTOOL_A_PRIVFLAGS_HEADER] = { .type = NLA_NESTED },
+const struct nla_policy ethnl_privflags_set_policy[] = {
+ [ETHTOOL_A_PRIVFLAGS_HEADER] =
+ NLA_POLICY_NESTED(ethnl_header_policy),
[ETHTOOL_A_PRIVFLAGS_FLAGS] = { .type = NLA_NESTED },
};
int ethnl_set_privflags(struct sk_buff *skb, struct genl_info *info)
{
- struct nlattr *tb[ETHTOOL_A_PRIVFLAGS_MAX + 1];
const char (*names)[ETH_GSTRING_LEN] = NULL;
struct ethnl_req_info req_info = {};
+ struct nlattr **tb = info->attrs;
const struct ethtool_ops *ops;
struct net_device *dev;
unsigned int nflags;
@@ -157,11 +152,6 @@ int ethnl_set_privflags(struct sk_buff *skb, struct genl_info *info)
u32 flags;
int ret;
- ret = nlmsg_parse(info->nlhdr, GENL_HDRLEN, tb,
- ETHTOOL_A_PRIVFLAGS_MAX, privflags_set_policy,
- info->extack);
- if (ret < 0)
- return ret;
if (!tb[ETHTOOL_A_PRIVFLAGS_FLAGS])
return -EINVAL;
ret = ethnl_bitset_is_compact(tb[ETHTOOL_A_PRIVFLAGS_FLAGS], &compact);
diff --git a/net/ethtool/rings.c b/net/ethtool/rings.c
index 5422526f4eef..4e097812a967 100644
--- a/net/ethtool/rings.c
+++ b/net/ethtool/rings.c
@@ -15,18 +15,9 @@ struct rings_reply_data {
#define RINGS_REPDATA(__reply_base) \
container_of(__reply_base, struct rings_reply_data, base)
-static const struct nla_policy
-rings_get_policy[ETHTOOL_A_RINGS_MAX + 1] = {
- [ETHTOOL_A_RINGS_UNSPEC] = { .type = NLA_REJECT },
- [ETHTOOL_A_RINGS_HEADER] = { .type = NLA_NESTED },
- [ETHTOOL_A_RINGS_RX_MAX] = { .type = NLA_REJECT },
- [ETHTOOL_A_RINGS_RX_MINI_MAX] = { .type = NLA_REJECT },
- [ETHTOOL_A_RINGS_RX_JUMBO_MAX] = { .type = NLA_REJECT },
- [ETHTOOL_A_RINGS_TX_MAX] = { .type = NLA_REJECT },
- [ETHTOOL_A_RINGS_RX] = { .type = NLA_REJECT },
- [ETHTOOL_A_RINGS_RX_MINI] = { .type = NLA_REJECT },
- [ETHTOOL_A_RINGS_RX_JUMBO] = { .type = NLA_REJECT },
- [ETHTOOL_A_RINGS_TX] = { .type = NLA_REJECT },
+const struct nla_policy ethnl_rings_get_policy[] = {
+ [ETHTOOL_A_RINGS_HEADER] =
+ NLA_POLICY_NESTED(ethnl_header_policy),
};
static int rings_prepare_data(const struct ethnl_req_info *req_base,
@@ -97,10 +88,8 @@ const struct ethnl_request_ops ethnl_rings_request_ops = {
.request_cmd = ETHTOOL_MSG_RINGS_GET,
.reply_cmd = ETHTOOL_MSG_RINGS_GET_REPLY,
.hdr_attr = ETHTOOL_A_RINGS_HEADER,
- .max_attr = ETHTOOL_A_RINGS_MAX,
.req_info_size = sizeof(struct rings_req_info),
.reply_data_size = sizeof(struct rings_reply_data),
- .request_policy = rings_get_policy,
.prepare_data = rings_prepare_data,
.reply_size = rings_reply_size,
@@ -109,14 +98,9 @@ const struct ethnl_request_ops ethnl_rings_request_ops = {
/* RINGS_SET */
-static const struct nla_policy
-rings_set_policy[ETHTOOL_A_RINGS_MAX + 1] = {
- [ETHTOOL_A_RINGS_UNSPEC] = { .type = NLA_REJECT },
- [ETHTOOL_A_RINGS_HEADER] = { .type = NLA_NESTED },
- [ETHTOOL_A_RINGS_RX_MAX] = { .type = NLA_REJECT },
- [ETHTOOL_A_RINGS_RX_MINI_MAX] = { .type = NLA_REJECT },
- [ETHTOOL_A_RINGS_RX_JUMBO_MAX] = { .type = NLA_REJECT },
- [ETHTOOL_A_RINGS_TX_MAX] = { .type = NLA_REJECT },
+const struct nla_policy ethnl_rings_set_policy[] = {
+ [ETHTOOL_A_RINGS_HEADER] =
+ NLA_POLICY_NESTED(ethnl_header_policy),
[ETHTOOL_A_RINGS_RX] = { .type = NLA_U32 },
[ETHTOOL_A_RINGS_RX_MINI] = { .type = NLA_U32 },
[ETHTOOL_A_RINGS_RX_JUMBO] = { .type = NLA_U32 },
@@ -125,20 +109,15 @@ rings_set_policy[ETHTOOL_A_RINGS_MAX + 1] = {
int ethnl_set_rings(struct sk_buff *skb, struct genl_info *info)
{
- struct nlattr *tb[ETHTOOL_A_RINGS_MAX + 1];
struct ethtool_ringparam ringparam = {};
struct ethnl_req_info req_info = {};
+ struct nlattr **tb = info->attrs;
const struct nlattr *err_attr;
const struct ethtool_ops *ops;
struct net_device *dev;
bool mod = false;
int ret;
- ret = nlmsg_parse(info->nlhdr, GENL_HDRLEN, tb,
- ETHTOOL_A_RINGS_MAX, rings_set_policy,
- info->extack);
- if (ret < 0)
- return ret;
ret = ethnl_parse_header_dev_get(&req_info,
tb[ETHTOOL_A_RINGS_HEADER],
genl_info_net(info), info->extack,
diff --git a/net/ethtool/strset.c b/net/ethtool/strset.c
index 82707b662fe4..0baad0ce1832 100644
--- a/net/ethtool/strset.c
+++ b/net/ethtool/strset.c
@@ -99,18 +99,15 @@ struct strset_reply_data {
#define STRSET_REPDATA(__reply_base) \
container_of(__reply_base, struct strset_reply_data, base)
-static const struct nla_policy strset_get_policy[ETHTOOL_A_STRSET_MAX + 1] = {
- [ETHTOOL_A_STRSET_UNSPEC] = { .type = NLA_REJECT },
- [ETHTOOL_A_STRSET_HEADER] = { .type = NLA_NESTED },
+const struct nla_policy ethnl_strset_get_policy[] = {
+ [ETHTOOL_A_STRSET_HEADER] =
+ NLA_POLICY_NESTED(ethnl_header_policy),
[ETHTOOL_A_STRSET_STRINGSETS] = { .type = NLA_NESTED },
+ [ETHTOOL_A_STRSET_COUNTS_ONLY] = { .type = NLA_FLAG },
};
-static const struct nla_policy
-get_stringset_policy[ETHTOOL_A_STRINGSET_MAX + 1] = {
- [ETHTOOL_A_STRINGSET_UNSPEC] = { .type = NLA_REJECT },
+static const struct nla_policy get_stringset_policy[] = {
[ETHTOOL_A_STRINGSET_ID] = { .type = NLA_U32 },
- [ETHTOOL_A_STRINGSET_COUNT] = { .type = NLA_REJECT },
- [ETHTOOL_A_STRINGSET_STRINGS] = { .type = NLA_REJECT },
};
/**
@@ -138,10 +135,10 @@ static bool strset_include(const struct strset_req_info *info,
static int strset_get_id(const struct nlattr *nest, u32 *val,
struct netlink_ext_ack *extack)
{
- struct nlattr *tb[ETHTOOL_A_STRINGSET_MAX + 1];
+ struct nlattr *tb[ARRAY_SIZE(get_stringset_policy)];
int ret;
- ret = nla_parse_nested(tb, ETHTOOL_A_STRINGSET_MAX, nest,
+ ret = nla_parse_nested(tb, ARRAY_SIZE(get_stringset_policy) - 1, nest,
get_stringset_policy, extack);
if (ret < 0)
return ret;
@@ -152,9 +149,7 @@ static int strset_get_id(const struct nlattr *nest, u32 *val,
return 0;
}
-static const struct nla_policy
-strset_stringsets_policy[ETHTOOL_A_STRINGSETS_MAX + 1] = {
- [ETHTOOL_A_STRINGSETS_UNSPEC] = { .type = NLA_REJECT },
+static const struct nla_policy strset_stringsets_policy[] = {
[ETHTOOL_A_STRINGSETS_STRINGSET] = { .type = NLA_NESTED },
};
@@ -169,7 +164,8 @@ static int strset_parse_request(struct ethnl_req_info *req_base,
if (!nest)
return 0;
- ret = nla_validate_nested(nest, ETHTOOL_A_STRINGSETS_MAX,
+ ret = nla_validate_nested(nest,
+ ARRAY_SIZE(strset_stringsets_policy) - 1,
strset_stringsets_policy, extack);
if (ret < 0)
return ret;
@@ -445,10 +441,8 @@ const struct ethnl_request_ops ethnl_strset_request_ops = {
.request_cmd = ETHTOOL_MSG_STRSET_GET,
.reply_cmd = ETHTOOL_MSG_STRSET_GET_REPLY,
.hdr_attr = ETHTOOL_A_STRSET_HEADER,
- .max_attr = ETHTOOL_A_STRSET_MAX,
.req_info_size = sizeof(struct strset_req_info),
.reply_data_size = sizeof(struct strset_reply_data),
- .request_policy = strset_get_policy,
.allow_nodev_do = true,
.parse_request = strset_parse_request,
diff --git a/net/ethtool/tsinfo.c b/net/ethtool/tsinfo.c
index 7cb5b512b77c..63b5814bd460 100644
--- a/net/ethtool/tsinfo.c
+++ b/net/ethtool/tsinfo.c
@@ -18,14 +18,9 @@ struct tsinfo_reply_data {
#define TSINFO_REPDATA(__reply_base) \
container_of(__reply_base, struct tsinfo_reply_data, base)
-static const struct nla_policy
-tsinfo_get_policy[ETHTOOL_A_TSINFO_MAX + 1] = {
- [ETHTOOL_A_TSINFO_UNSPEC] = { .type = NLA_REJECT },
- [ETHTOOL_A_TSINFO_HEADER] = { .type = NLA_NESTED },
- [ETHTOOL_A_TSINFO_TIMESTAMPING] = { .type = NLA_REJECT },
- [ETHTOOL_A_TSINFO_TX_TYPES] = { .type = NLA_REJECT },
- [ETHTOOL_A_TSINFO_RX_FILTERS] = { .type = NLA_REJECT },
- [ETHTOOL_A_TSINFO_PHC_INDEX] = { .type = NLA_REJECT },
+const struct nla_policy ethnl_tsinfo_get_policy[] = {
+ [ETHTOOL_A_TSINFO_HEADER] =
+ NLA_POLICY_NESTED(ethnl_header_policy),
};
static int tsinfo_prepare_data(const struct ethnl_req_info *req_base,
@@ -132,10 +127,8 @@ const struct ethnl_request_ops ethnl_tsinfo_request_ops = {
.request_cmd = ETHTOOL_MSG_TSINFO_GET,
.reply_cmd = ETHTOOL_MSG_TSINFO_GET_REPLY,
.hdr_attr = ETHTOOL_A_TSINFO_HEADER,
- .max_attr = ETHTOOL_A_TSINFO_MAX,
.req_info_size = sizeof(struct tsinfo_req_info),
.reply_data_size = sizeof(struct tsinfo_reply_data),
- .request_policy = tsinfo_get_policy,
.prepare_data = tsinfo_prepare_data,
.reply_size = tsinfo_reply_size,
diff --git a/net/ethtool/tunnels.c b/net/ethtool/tunnels.c
index d93bf2da0f34..e7f2ee0d2471 100644
--- a/net/ethtool/tunnels.c
+++ b/net/ethtool/tunnels.c
@@ -8,10 +8,9 @@
#include "common.h"
#include "netlink.h"
-static const struct nla_policy
-ethtool_tunnel_info_policy[ETHTOOL_A_TUNNEL_INFO_MAX + 1] = {
- [ETHTOOL_A_TUNNEL_INFO_UNSPEC] = { .type = NLA_REJECT },
- [ETHTOOL_A_TUNNEL_INFO_HEADER] = { .type = NLA_NESTED },
+const struct nla_policy ethnl_tunnel_info_get_policy[] = {
+ [ETHTOOL_A_TUNNEL_INFO_HEADER] =
+ NLA_POLICY_NESTED(ethnl_header_policy),
};
static_assert(ETHTOOL_UDP_TUNNEL_TYPE_VXLAN == ilog2(UDP_TUNNEL_TYPE_VXLAN));
@@ -161,35 +160,19 @@ err_cancel_ports:
return -EMSGSIZE;
}
-static int
-ethnl_tunnel_info_req_parse(struct ethnl_req_info *req_info,
- const struct nlmsghdr *nlhdr, struct net *net,
- struct netlink_ext_ack *extack, bool require_dev)
-{
- struct nlattr *tb[ETHTOOL_A_TUNNEL_INFO_MAX + 1];
- int ret;
-
- ret = nlmsg_parse(nlhdr, GENL_HDRLEN, tb, ETHTOOL_A_TUNNEL_INFO_MAX,
- ethtool_tunnel_info_policy, extack);
- if (ret < 0)
- return ret;
-
- return ethnl_parse_header_dev_get(req_info,
- tb[ETHTOOL_A_TUNNEL_INFO_HEADER],
- net, extack, require_dev);
-}
-
int ethnl_tunnel_info_doit(struct sk_buff *skb, struct genl_info *info)
{
struct ethnl_req_info req_info = {};
+ struct nlattr **tb = info->attrs;
struct sk_buff *rskb;
void *reply_payload;
int reply_len;
int ret;
- ret = ethnl_tunnel_info_req_parse(&req_info, info->nlhdr,
- genl_info_net(info), info->extack,
- true);
+ ret = ethnl_parse_header_dev_get(&req_info,
+ tb[ETHTOOL_A_TUNNEL_INFO_HEADER],
+ genl_info_net(info), info->extack,
+ true);
if (ret < 0)
return ret;
@@ -233,16 +216,19 @@ struct ethnl_tunnel_info_dump_ctx {
int ethnl_tunnel_info_start(struct netlink_callback *cb)
{
+ const struct genl_dumpit_info *info = genl_dumpit_info(cb);
struct ethnl_tunnel_info_dump_ctx *ctx = (void *)cb->ctx;
+ struct nlattr **tb = info->attrs;
int ret;
BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
memset(ctx, 0, sizeof(*ctx));
- ret = ethnl_tunnel_info_req_parse(&ctx->req_info, cb->nlh,
- sock_net(cb->skb->sk), cb->extack,
- false);
+ ret = ethnl_parse_header_dev_get(&ctx->req_info,
+ tb[ETHTOOL_A_TUNNEL_INFO_HEADER],
+ sock_net(cb->skb->sk), cb->extack,
+ false);
if (ctx->req_info.dev) {
dev_put(ctx->req_info.dev);
ctx->req_info.dev = NULL;
diff --git a/net/ethtool/wol.c b/net/ethtool/wol.c
index 1798421e9f1c..ada7df2331d2 100644
--- a/net/ethtool/wol.c
+++ b/net/ethtool/wol.c
@@ -17,12 +17,9 @@ struct wol_reply_data {
#define WOL_REPDATA(__reply_base) \
container_of(__reply_base, struct wol_reply_data, base)
-static const struct nla_policy
-wol_get_policy[ETHTOOL_A_WOL_MAX + 1] = {
- [ETHTOOL_A_WOL_UNSPEC] = { .type = NLA_REJECT },
- [ETHTOOL_A_WOL_HEADER] = { .type = NLA_NESTED },
- [ETHTOOL_A_WOL_MODES] = { .type = NLA_REJECT },
- [ETHTOOL_A_WOL_SOPASS] = { .type = NLA_REJECT },
+const struct nla_policy ethnl_wol_get_policy[] = {
+ [ETHTOOL_A_WOL_HEADER] =
+ NLA_POLICY_NESTED(ethnl_header_policy),
};
static int wol_prepare_data(const struct ethnl_req_info *req_base,
@@ -89,10 +86,8 @@ const struct ethnl_request_ops ethnl_wol_request_ops = {
.request_cmd = ETHTOOL_MSG_WOL_GET,
.reply_cmd = ETHTOOL_MSG_WOL_GET_REPLY,
.hdr_attr = ETHTOOL_A_WOL_HEADER,
- .max_attr = ETHTOOL_A_WOL_MAX,
.req_info_size = sizeof(struct wol_req_info),
.reply_data_size = sizeof(struct wol_reply_data),
- .request_policy = wol_get_policy,
.prepare_data = wol_prepare_data,
.reply_size = wol_reply_size,
@@ -101,10 +96,9 @@ const struct ethnl_request_ops ethnl_wol_request_ops = {
/* WOL_SET */
-static const struct nla_policy
-wol_set_policy[ETHTOOL_A_WOL_MAX + 1] = {
- [ETHTOOL_A_WOL_UNSPEC] = { .type = NLA_REJECT },
- [ETHTOOL_A_WOL_HEADER] = { .type = NLA_NESTED },
+const struct nla_policy ethnl_wol_set_policy[] = {
+ [ETHTOOL_A_WOL_HEADER] =
+ NLA_POLICY_NESTED(ethnl_header_policy),
[ETHTOOL_A_WOL_MODES] = { .type = NLA_NESTED },
[ETHTOOL_A_WOL_SOPASS] = { .type = NLA_BINARY,
.len = SOPASS_MAX },
@@ -113,16 +107,12 @@ wol_set_policy[ETHTOOL_A_WOL_MAX + 1] = {
int ethnl_set_wol(struct sk_buff *skb, struct genl_info *info)
{
struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
- struct nlattr *tb[ETHTOOL_A_WOL_MAX + 1];
struct ethnl_req_info req_info = {};
+ struct nlattr **tb = info->attrs;
struct net_device *dev;
bool mod = false;
int ret;
- ret = nlmsg_parse(info->nlhdr, GENL_HDRLEN, tb, ETHTOOL_A_WOL_MAX,
- wol_set_policy, info->extack);
- if (ret < 0)
- return ret;
ret = ethnl_parse_header_dev_get(&req_info, tb[ETHTOOL_A_WOL_HEADER],
genl_info_net(info), info->extack,
true);
diff --git a/net/hsr/hsr_debugfs.c b/net/hsr/hsr_debugfs.c
index 7e11a6c35bc3..4cfd9e829c7b 100644
--- a/net/hsr/hsr_debugfs.c
+++ b/net/hsr/hsr_debugfs.c
@@ -60,17 +60,7 @@ hsr_node_table_show(struct seq_file *sfp, void *data)
return 0;
}
-/* hsr_node_table_open - Open the node_table file
- *
- * Description:
- * This routine opens a debugfs file node_table of specific hsr
- * or prp device
- */
-static int
-hsr_node_table_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, hsr_node_table_show, inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(hsr_node_table);
void hsr_debugfs_rename(struct net_device *dev)
{
@@ -85,13 +75,6 @@ void hsr_debugfs_rename(struct net_device *dev)
priv->node_tbl_root = d;
}
-static const struct file_operations hsr_fops = {
- .open = hsr_node_table_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
/* hsr_debugfs_init - create hsr node_table file for dumping
* the node table
*
@@ -113,7 +96,7 @@ void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev)
de = debugfs_create_file("node_table", S_IFREG | 0444,
priv->node_tbl_root, priv,
- &hsr_fops);
+ &hsr_node_table_fops);
if (IS_ERR(de)) {
pr_err("Cannot create hsr node_table file\n");
debugfs_remove(priv->node_tbl_root);
diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
index 0e4681cf71db..f3c8f91dbe2c 100644
--- a/net/hsr/hsr_netlink.c
+++ b/net/hsr/hsr_netlink.c
@@ -493,7 +493,7 @@ fail:
return res;
}
-static const struct genl_ops hsr_ops[] = {
+static const struct genl_small_ops hsr_ops[] = {
{
.cmd = HSR_C_GET_NODE_STATUS,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
@@ -518,8 +518,8 @@ static struct genl_family hsr_genl_family __ro_after_init = {
.policy = hsr_genl_policy,
.netnsok = true,
.module = THIS_MODULE,
- .ops = hsr_ops,
- .n_ops = ARRAY_SIZE(hsr_ops),
+ .small_ops = hsr_ops,
+ .n_small_ops = ARRAY_SIZE(hsr_ops),
.mcgrps = hsr_mcgrps,
.n_mcgrps = ARRAY_SIZE(hsr_mcgrps),
};
diff --git a/net/ieee802154/netlink.c b/net/ieee802154/netlink.c
index 7fe3b6b6c495..b07abc38b4b3 100644
--- a/net/ieee802154/netlink.c
+++ b/net/ieee802154/netlink.c
@@ -81,7 +81,7 @@ int ieee802154_nl_reply(struct sk_buff *msg, struct genl_info *info)
return genlmsg_reply(msg, info);
}
-static const struct genl_ops ieee802154_ops[] = {
+static const struct genl_small_ops ieee802154_ops[] = {
/* see nl-phy.c */
IEEE802154_DUMP(IEEE802154_LIST_PHY, ieee802154_list_phy,
ieee802154_dump_phy),
@@ -130,8 +130,8 @@ struct genl_family nl802154_family __ro_after_init = {
.maxattr = IEEE802154_ATTR_MAX,
.policy = ieee802154_policy,
.module = THIS_MODULE,
- .ops = ieee802154_ops,
- .n_ops = ARRAY_SIZE(ieee802154_ops),
+ .small_ops = ieee802154_ops,
+ .n_small_ops = ARRAY_SIZE(ieee802154_ops),
.mcgrps = ieee802154_mcgrps,
.n_mcgrps = ARRAY_SIZE(ieee802154_mcgrps),
};
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 4307503a6f0b..b7260c8cef2e 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1017,6 +1017,7 @@ static int inet_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned lon
const struct proto_ops inet_stream_ops = {
.family = PF_INET,
+ .flags = PROTO_CMSG_DATA_ONLY,
.owner = THIS_MODULE,
.release = inet_release,
.bind = inet_bind,
diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c
index e3939f76b024..618954f82764 100644
--- a/net/ipv4/bpf_tcp_ca.c
+++ b/net/ipv4/bpf_tcp_ca.c
@@ -28,27 +28,6 @@ static u32 unsupported_ops[] = {
static const struct btf_type *tcp_sock_type;
static u32 tcp_sock_id, sock_id;
-static int btf_sk_storage_get_ids[5];
-static struct bpf_func_proto btf_sk_storage_get_proto __read_mostly;
-
-static int btf_sk_storage_delete_ids[5];
-static struct bpf_func_proto btf_sk_storage_delete_proto __read_mostly;
-
-static void convert_sk_func_proto(struct bpf_func_proto *to, int *to_btf_ids,
- const struct bpf_func_proto *from)
-{
- int i;
-
- *to = *from;
- to->btf_id = to_btf_ids;
- for (i = 0; i < ARRAY_SIZE(to->arg_type); i++) {
- if (to->arg_type[i] == ARG_PTR_TO_SOCKET) {
- to->arg_type[i] = ARG_PTR_TO_BTF_ID;
- to->btf_id[i] = tcp_sock_id;
- }
- }
-}
-
static int bpf_tcp_ca_init(struct btf *btf)
{
s32 type_id;
@@ -64,13 +43,6 @@ static int bpf_tcp_ca_init(struct btf *btf)
tcp_sock_id = type_id;
tcp_sock_type = btf_type_by_id(btf, tcp_sock_id);
- convert_sk_func_proto(&btf_sk_storage_get_proto,
- btf_sk_storage_get_ids,
- &bpf_sk_storage_get_proto);
- convert_sk_func_proto(&btf_sk_storage_delete_proto,
- btf_sk_storage_delete_ids,
- &bpf_sk_storage_delete_proto);
-
return 0;
}
@@ -185,8 +157,8 @@ static const struct bpf_func_proto bpf_tcp_send_ack_proto = {
/* In case we want to report error later */
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID,
+ .arg1_btf_id = &tcp_sock_id,
.arg2_type = ARG_ANYTHING,
- .btf_id = &tcp_sock_id,
};
static const struct bpf_func_proto *
@@ -197,9 +169,9 @@ bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,
case BPF_FUNC_tcp_send_ack:
return &bpf_tcp_send_ack_proto;
case BPF_FUNC_sk_storage_get:
- return &btf_sk_storage_get_proto;
+ return &bpf_sk_storage_get_proto;
case BPF_FUNC_sk_storage_delete:
- return &btf_sk_storage_delete_proto;
+ return &bpf_sk_storage_delete_proto;
default:
return bpf_base_func_proto(func_id);
}
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 2eb71579f4d2..471d33a0d095 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -498,7 +498,7 @@ static void cipso_v4_doi_free_rcu(struct rcu_head *entry)
/**
* cipso_v4_doi_remove - Remove an existing DOI from the CIPSO protocol engine
* @doi: the DOI value
- * @audit_secid: the LSM secid to use in the audit message
+ * @audit_info: NetLabel audit information
*
* Description:
* Removes a DOI definition from the CIPSO engine. The NetLabel routines will
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index abd083415f89..e5f69b0bf3df 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -237,7 +237,7 @@ static struct sk_buff *fou_gro_receive(struct sock *sk,
/* We can clear the encap_mark for FOU as we are essentially doing
* one of two possible things. We are either adding an L4 tunnel
- * header to the outer L3 tunnel header, or we are are simply
+ * header to the outer L3 tunnel header, or we are simply
* treating the GRE tunnel header as though it is a UDP protocol
* specific header such as VXLAN or GENEVE.
*/
@@ -429,7 +429,7 @@ next_proto:
/* We can clear the encap_mark for GUE as we are essentially doing
* one of two possible things. We are either adding an L4 tunnel
- * header to the outer L3 tunnel header, or we are are simply
+ * header to the outer L3 tunnel header, or we are simply
* treating the GRE tunnel header as though it is a UDP protocol
* specific header such as VXLAN or GENEVE.
*/
@@ -911,7 +911,7 @@ static int fou_nl_dump(struct sk_buff *skb, struct netlink_callback *cb)
return skb->len;
}
-static const struct genl_ops fou_nl_ops[] = {
+static const struct genl_small_ops fou_nl_ops[] = {
{
.cmd = FOU_CMD_ADD,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
@@ -940,8 +940,8 @@ static struct genl_family fou_nl_family __ro_after_init = {
.policy = fou_nl_policy,
.netnsok = true,
.module = THIS_MODULE,
- .ops = fou_nl_ops,
- .n_ops = ARRAY_SIZE(fou_nl_ops),
+ .small_ops = fou_nl_ops,
+ .n_small_ops = ARRAY_SIZE(fou_nl_ops),
};
size_t fou_encap_hlen(struct ip_tunnel_encap *e)
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index bdaaee52c41b..07f67ced962a 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -457,6 +457,23 @@ out_bh_enable:
local_bh_enable();
}
+/*
+ * The device used for looking up which routing table to use for sending an ICMP
+ * error is preferably the source whenever it is set, which should ensure the
+ * icmp error can be sent to the source host, else lookup using the routing
+ * table of the destination device, else use the main routing table (index 0).
+ */
+static struct net_device *icmp_get_route_lookup_dev(struct sk_buff *skb)
+{
+ struct net_device *route_lookup_dev = NULL;
+
+ if (skb->dev)
+ route_lookup_dev = skb->dev;
+ else if (skb_dst(skb))
+ route_lookup_dev = skb_dst(skb)->dev;
+ return route_lookup_dev;
+}
+
static struct rtable *icmp_route_lookup(struct net *net,
struct flowi4 *fl4,
struct sk_buff *skb_in,
@@ -465,6 +482,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
int type, int code,
struct icmp_bxm *param)
{
+ struct net_device *route_lookup_dev;
struct rtable *rt, *rt2;
struct flowi4 fl4_dec;
int err;
@@ -479,7 +497,8 @@ static struct rtable *icmp_route_lookup(struct net *net,
fl4->flowi4_proto = IPPROTO_ICMP;
fl4->fl4_icmp_type = type;
fl4->fl4_icmp_code = code;
- fl4->flowi4_oif = l3mdev_master_ifindex(skb_dst(skb_in)->dev);
+ route_lookup_dev = icmp_get_route_lookup_dev(skb_in);
+ fl4->flowi4_oif = l3mdev_master_ifindex(route_lookup_dev);
security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4));
rt = ip_route_output_key_hash(net, fl4, skb_in);
@@ -503,7 +522,7 @@ static struct rtable *icmp_route_lookup(struct net *net,
if (err)
goto relookup_failed;
- if (inet_addr_type_dev_table(net, skb_dst(skb_in)->dev,
+ if (inet_addr_type_dev_table(net, route_lookup_dev,
fl4_dec.saddr) == RTN_LOCAL) {
rt2 = __ip_route_output_key(net, &fl4_dec);
if (IS_ERR(rt2))
@@ -690,9 +709,9 @@ void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
rcu_read_unlock();
}
- tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) |
+ tos = icmp_pointers[type].error ? (RT_TOS(iph->tos) |
IPTOS_PREC_INTERNETCONTROL) :
- iph->tos;
+ iph->tos;
mark = IP4_REPLY_MARK(net, skb_in->mark);
if (__ip_options_echo(net, &icmp_param.replyopts.opt.opt, skb_in, opt))
@@ -784,7 +803,7 @@ EXPORT_SYMBOL(icmp_ndo_send);
static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
{
- const struct iphdr *iph = (const struct iphdr *) skb->data;
+ const struct iphdr *iph = (const struct iphdr *)skb->data;
const struct net_protocol *ipprot;
int protocol = iph->protocol;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index b457dd2d6c75..4148f5f78f31 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -564,7 +564,7 @@ void inet_csk_clear_xmit_timers(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
- icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0;
+ icsk->icsk_pending = icsk->icsk_ack.pending = 0;
sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
sk_stop_timer(sk, &icsk->icsk_delack_timer);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index f1bd95f243b3..366a4507b5a3 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -125,6 +125,7 @@ int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
bool net_admin)
{
const struct inet_sock *inet = inet_sk(sk);
+ struct inet_diag_sockopt inet_sockopt;
if (nla_put_u8(skb, INET_DIAG_SHUTDOWN, sk->sk_shutdown))
goto errout;
@@ -180,6 +181,22 @@ int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
r->idiag_inode = sock_i_ino(sk);
+ memset(&inet_sockopt, 0, sizeof(inet_sockopt));
+ inet_sockopt.recverr = inet->recverr;
+ inet_sockopt.is_icsk = inet->is_icsk;
+ inet_sockopt.freebind = inet->freebind;
+ inet_sockopt.hdrincl = inet->hdrincl;
+ inet_sockopt.mc_loop = inet->mc_loop;
+ inet_sockopt.transparent = inet->transparent;
+ inet_sockopt.mc_all = inet->mc_all;
+ inet_sockopt.nodefrag = inet->nodefrag;
+ inet_sockopt.bind_address_no_port = inet->bind_address_no_port;
+ inet_sockopt.recverr_rfc4884 = inet->recverr_rfc4884;
+ inet_sockopt.defer_connect = inet->defer_connect;
+ if (nla_put(skb, INET_DIAG_SOCKOPT, sizeof(inet_sockopt),
+ &inet_sockopt))
+ goto errout;
+
return 0;
errout:
return 1;
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 239e54474b65..8cbe74313f38 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -228,7 +228,7 @@ static void inet_unhash2(struct inet_hashinfo *h, struct sock *sk)
static inline int compute_score(struct sock *sk, struct net *net,
const unsigned short hnum, const __be32 daddr,
- const int dif, const int sdif, bool exact_dif)
+ const int dif, const int sdif)
{
int score = -1;
@@ -277,15 +277,13 @@ static struct sock *inet_lhash2_lookup(struct net *net,
const __be32 daddr, const unsigned short hnum,
const int dif, const int sdif)
{
- bool exact_dif = inet_exact_dif_match(net, skb);
struct inet_connection_sock *icsk;
struct sock *sk, *result = NULL;
int score, hiscore = 0;
inet_lhash2_for_each_icsk_rcu(icsk, &ilb2->head) {
sk = (struct sock *)icsk;
- score = compute_score(sk, net, hnum, daddr,
- dif, sdif, exact_dif);
+ score = compute_score(sk, net, hnum, daddr, dif, sdif);
if (score > hiscore) {
result = lookup_reuseport(net, sk, skb, doff,
saddr, sport, daddr, hnum);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 4e31f23e4117..e70291748889 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -625,9 +625,7 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
}
if (dev->header_ops) {
- /* Need space for new headers */
- if (skb_cow_head(skb, dev->needed_headroom -
- (tunnel->hlen + sizeof(struct iphdr))))
+ if (skb_cow_head(skb, 0))
goto free_skb;
tnl_params = (const struct iphdr *)skb->data;
@@ -748,7 +746,11 @@ static void ipgre_link_update(struct net_device *dev, bool set_mtu)
len = tunnel->tun_hlen - len;
tunnel->hlen = tunnel->hlen + len;
- dev->needed_headroom = dev->needed_headroom + len;
+ if (dev->header_ops)
+ dev->hard_header_len += len;
+ else
+ dev->needed_headroom += len;
+
if (set_mtu)
dev->mtu = max_t(int, dev->mtu - len, 68);
@@ -944,6 +946,7 @@ static void __gre_tunnel_init(struct net_device *dev)
tunnel->parms.iph.protocol = IPPROTO_GRE;
tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
+ dev->needed_headroom = tunnel->hlen + sizeof(tunnel->parms.iph);
dev->features |= GRE_FEATURES;
dev->hw_features |= GRE_FEATURES;
@@ -987,10 +990,14 @@ static int ipgre_tunnel_init(struct net_device *dev)
return -EINVAL;
dev->flags = IFF_BROADCAST;
dev->header_ops = &ipgre_header_ops;
+ dev->hard_header_len = tunnel->hlen + sizeof(*iph);
+ dev->needed_headroom = 0;
}
#endif
} else if (!tunnel->collect_md) {
dev->header_ops = &ipgre_header_ops;
+ dev->hard_header_len = tunnel->hlen + sizeof(*iph);
+ dev->needed_headroom = 0;
}
return ip_tunnel_init(dev);
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 948747aac4e2..da1b5038bdfd 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -47,32 +47,32 @@ void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
unsigned char *iph = skb_network_header(skb);
memcpy(&(IPCB(skb)->opt), opt, sizeof(struct ip_options));
- memcpy(iph+sizeof(struct iphdr), opt->__data, opt->optlen);
+ memcpy(iph + sizeof(struct iphdr), opt->__data, opt->optlen);
opt = &(IPCB(skb)->opt);
if (opt->srr)
- memcpy(iph+opt->srr+iph[opt->srr+1]-4, &daddr, 4);
+ memcpy(iph + opt->srr + iph[opt->srr + 1] - 4, &daddr, 4);
if (!is_frag) {
if (opt->rr_needaddr)
- ip_rt_get_source(iph+opt->rr+iph[opt->rr+2]-5, skb, rt);
+ ip_rt_get_source(iph + opt->rr + iph[opt->rr + 2] - 5, skb, rt);
if (opt->ts_needaddr)
- ip_rt_get_source(iph+opt->ts+iph[opt->ts+2]-9, skb, rt);
+ ip_rt_get_source(iph + opt->ts + iph[opt->ts + 2] - 9, skb, rt);
if (opt->ts_needtime) {
__be32 midtime;
midtime = inet_current_timestamp();
- memcpy(iph+opt->ts+iph[opt->ts+2]-5, &midtime, 4);
+ memcpy(iph + opt->ts + iph[opt->ts + 2] - 5, &midtime, 4);
}
return;
}
if (opt->rr) {
- memset(iph+opt->rr, IPOPT_NOP, iph[opt->rr+1]);
+ memset(iph + opt->rr, IPOPT_NOP, iph[opt->rr + 1]);
opt->rr = 0;
opt->rr_needaddr = 0;
}
if (opt->ts) {
- memset(iph+opt->ts, IPOPT_NOP, iph[opt->ts+1]);
+ memset(iph + opt->ts, IPOPT_NOP, iph[opt->ts + 1]);
opt->ts = 0;
opt->ts_needaddr = opt->ts_needtime = 0;
}
@@ -495,26 +495,29 @@ EXPORT_SYMBOL(ip_options_compile);
void ip_options_undo(struct ip_options *opt)
{
if (opt->srr) {
- unsigned char *optptr = opt->__data+opt->srr-sizeof(struct iphdr);
- memmove(optptr+7, optptr+3, optptr[1]-7);
- memcpy(optptr+3, &opt->faddr, 4);
+ unsigned char *optptr = opt->__data + opt->srr - sizeof(struct iphdr);
+
+ memmove(optptr + 7, optptr + 3, optptr[1] - 7);
+ memcpy(optptr + 3, &opt->faddr, 4);
}
if (opt->rr_needaddr) {
- unsigned char *optptr = opt->__data+opt->rr-sizeof(struct iphdr);
+ unsigned char *optptr = opt->__data + opt->rr - sizeof(struct iphdr);
+
optptr[2] -= 4;
- memset(&optptr[optptr[2]-1], 0, 4);
+ memset(&optptr[optptr[2] - 1], 0, 4);
}
if (opt->ts) {
- unsigned char *optptr = opt->__data+opt->ts-sizeof(struct iphdr);
+ unsigned char *optptr = opt->__data + opt->ts - sizeof(struct iphdr);
+
if (opt->ts_needtime) {
optptr[2] -= 4;
- memset(&optptr[optptr[2]-1], 0, 4);
- if ((optptr[3]&0xF) == IPOPT_TS_PRESPEC)
+ memset(&optptr[optptr[2] - 1], 0, 4);
+ if ((optptr[3] & 0xF) == IPOPT_TS_PRESPEC)
optptr[2] -= 4;
}
if (opt->ts_needaddr) {
optptr[2] -= 4;
- memset(&optptr[optptr[2]-1], 0, 4);
+ memset(&optptr[optptr[2] - 1], 0, 4);
}
}
}
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 5131cf70672a..879b76ae4435 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -143,7 +143,8 @@ static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
*
*/
int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
- __be32 saddr, __be32 daddr, struct ip_options_rcu *opt)
+ __be32 saddr, __be32 daddr, struct ip_options_rcu *opt,
+ u8 tos)
{
struct inet_sock *inet = inet_sk(sk);
struct rtable *rt = skb_rtable(skb);
@@ -156,7 +157,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
iph = ip_hdr(skb);
iph->version = 4;
iph->ihl = 5;
- iph->tos = inet->tos;
+ iph->tos = tos;
iph->ttl = ip_select_ttl(inet, &rt->dst);
iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
iph->saddr = saddr;
@@ -997,7 +998,7 @@ static int __ip_append_data(struct sock *sk,
fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
- maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu;
+ maxnonfragsize = ip_sk_ignore_df(sk) ? IP_MAX_MTU : mtu;
if (cork->length + length > maxnonfragsize - fragheaderlen) {
ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
@@ -1352,7 +1353,7 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
if (cork->flags & IPCORK_OPT)
opt = cork->opt;
- if (!(rt->dst.dev->features&NETIF_F_SG))
+ if (!(rt->dst.dev->features & NETIF_F_SG))
return -EOPNOTSUPP;
hh_len = LL_RESERVED_SPACE(rt->dst.dev);
@@ -1537,7 +1538,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
ip_select_ident(net, skb, sk);
if (opt) {
- iph->ihl += opt->optlen>>2;
+ iph->ihl += opt->optlen >> 2;
ip_options_build(skb, opt, cork->addr, rt, 0);
}
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index d2c223554ff7..ec6036713e2c 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -1124,8 +1124,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, int optname,
dev_put(dev);
err = -EINVAL;
- if (sk->sk_bound_dev_if &&
- (!midx || midx != sk->sk_bound_dev_if))
+ if (sk->sk_bound_dev_if && midx != sk->sk_bound_dev_if)
break;
inet->uc_index = ifindex;
@@ -1189,7 +1188,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, int optname,
err = -EINVAL;
if (sk->sk_bound_dev_if &&
mreq.imr_ifindex != sk->sk_bound_dev_if &&
- (!midx || midx != sk->sk_bound_dev_if))
+ midx != sk->sk_bound_dev_if)
break;
inet->mc_index = mreq.imr_ifindex;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 0c1f36404471..8b04d1dcfec4 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -360,7 +360,6 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
bool log_ecn_error)
{
- struct pcpu_sw_netstats *tstats;
const struct iphdr *iph = ip_hdr(skb);
int err;
@@ -402,12 +401,7 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
}
}
- tstats = this_cpu_ptr(tunnel->dev->tstats);
- u64_stats_update_begin(&tstats->syncp);
- tstats->rx_packets++;
- tstats->rx_bytes += skb->len;
- u64_stats_update_end(&tstats->syncp);
-
+ dev_sw_netstats_rx_add(tunnel->dev, skb->len);
skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
if (tunnel->dev->type == ARPHRD_ETHER) {
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index b2ea1a8c5fd6..25f1caf5abf9 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -433,29 +433,8 @@ EXPORT_SYMBOL(skb_tunnel_check_pmtu);
void ip_tunnel_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *tot)
{
- int i;
-
netdev_stats_to_stats64(tot, &dev->stats);
-
- for_each_possible_cpu(i) {
- const struct pcpu_sw_netstats *tstats =
- per_cpu_ptr(dev->tstats, i);
- u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
- unsigned int start;
-
- do {
- start = u64_stats_fetch_begin_irq(&tstats->syncp);
- rx_packets = tstats->rx_packets;
- tx_packets = tstats->tx_packets;
- rx_bytes = tstats->rx_bytes;
- tx_bytes = tstats->tx_bytes;
- } while (u64_stats_fetch_retry_irq(&tstats->syncp, start));
-
- tot->rx_packets += rx_packets;
- tot->tx_packets += tx_packets;
- tot->rx_bytes += rx_bytes;
- tot->tx_bytes += tx_bytes;
- }
+ dev_fetch_sw_netstats(tot, dev->tstats);
}
EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64);
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index f687abb069fa..b957cbee2cf7 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -95,7 +95,6 @@ static int vti_rcv_cb(struct sk_buff *skb, int err)
{
unsigned short family;
struct net_device *dev;
- struct pcpu_sw_netstats *tstats;
struct xfrm_state *x;
const struct xfrm_mode *inner_mode;
struct ip_tunnel *tunnel = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4;
@@ -138,13 +137,7 @@ static int vti_rcv_cb(struct sk_buff *skb, int err)
skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(skb->dev)));
skb->dev = dev;
-
- tstats = this_cpu_ptr(dev->tstats);
-
- u64_stats_update_begin(&tstats->syncp);
- tstats->rx_packets++;
- tstats->rx_bytes += skb->len;
- u64_stats_update_end(&tstats->syncp);
+ dev_sw_netstats_rx_add(dev, skb->len);
return 0;
}
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 876fd6ff1ff9..939792a38814 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1038,10 +1038,13 @@ static int ipmr_cache_report(struct mr_table *mrt,
memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
msg->im_msgtype = assert;
msg->im_mbz = 0;
- if (assert == IGMPMSG_WRVIFWHOLE)
+ if (assert == IGMPMSG_WRVIFWHOLE) {
msg->im_vif = vifi;
- else
+ msg->im_vif_hi = vifi >> 8;
+ } else {
msg->im_vif = mrt->mroute_reg_vif_num;
+ msg->im_vif_hi = mrt->mroute_reg_vif_num >> 8;
+ }
ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
sizeof(struct iphdr));
@@ -1054,6 +1057,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
ip_hdr(skb)->protocol = 0;
msg = (struct igmpmsg *)skb_network_header(skb);
msg->im_vif = vifi;
+ msg->im_vif_hi = vifi >> 8;
skb_dst_set(skb, dst_clone(skb_dst(pkt)));
/* Add our header */
igmp = skb_put(skb, sizeof(struct igmphdr));
@@ -2396,6 +2400,7 @@ static size_t igmpmsg_netlink_msgsize(size_t payloadlen)
+ nla_total_size(4) /* IPMRA_CREPORT_VIF_ID */
+ nla_total_size(4) /* IPMRA_CREPORT_SRC_ADDR */
+ nla_total_size(4) /* IPMRA_CREPORT_DST_ADDR */
+ + nla_total_size(4) /* IPMRA_CREPORT_TABLE */
/* IPMRA_CREPORT_PKT */
+ nla_total_size(payloadlen)
;
@@ -2427,11 +2432,12 @@ static void igmpmsg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt)
rtgenm = nlmsg_data(nlh);
rtgenm->rtgen_family = RTNL_FAMILY_IPMR;
if (nla_put_u8(skb, IPMRA_CREPORT_MSGTYPE, msg->im_msgtype) ||
- nla_put_u32(skb, IPMRA_CREPORT_VIF_ID, msg->im_vif) ||
+ nla_put_u32(skb, IPMRA_CREPORT_VIF_ID, msg->im_vif | (msg->im_vif_hi << 8)) ||
nla_put_in_addr(skb, IPMRA_CREPORT_SRC_ADDR,
msg->im_src.s_addr) ||
nla_put_in_addr(skb, IPMRA_CREPORT_DST_ADDR,
- msg->im_dst.s_addr))
+ msg->im_dst.s_addr) ||
+ nla_put_u32(skb, IPMRA_CREPORT_TABLE, mrt->id))
goto nla_put_failure;
nla = nla_reserve(skb, IPMRA_CREPORT_PKT, payloadlen);
diff --git a/net/ipv4/netfilter/nf_log_arp.c b/net/ipv4/netfilter/nf_log_arp.c
index 7a83f881efa9..136030ad2e54 100644
--- a/net/ipv4/netfilter/nf_log_arp.c
+++ b/net/ipv4/netfilter/nf_log_arp.c
@@ -43,16 +43,31 @@ static void dump_arp_packet(struct nf_log_buf *m,
const struct nf_loginfo *info,
const struct sk_buff *skb, unsigned int nhoff)
{
- const struct arphdr *ah;
- struct arphdr _arph;
const struct arppayload *ap;
struct arppayload _arpp;
+ const struct arphdr *ah;
+ unsigned int logflags;
+ struct arphdr _arph;
ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph);
if (ah == NULL) {
nf_log_buf_add(m, "TRUNCATED");
return;
}
+
+ if (info->type == NF_LOG_TYPE_LOG)
+ logflags = info->u.log.logflags;
+ else
+ logflags = NF_LOG_DEFAULT_MASK;
+
+ if (logflags & NF_LOG_MACDECODE) {
+ nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ",
+ eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest);
+ nf_log_dump_vlan(m, skb);
+ nf_log_buf_add(m, "MACPROTO=%04x ",
+ ntohs(eth_hdr(skb)->h_proto));
+ }
+
nf_log_buf_add(m, "ARP HTYPE=%d PTYPE=0x%04x OPCODE=%d",
ntohs(ah->ar_hrd), ntohs(ah->ar_pro), ntohs(ah->ar_op));
diff --git a/net/ipv4/netfilter/nf_log_ipv4.c b/net/ipv4/netfilter/nf_log_ipv4.c
index 0c72156130b6..d07583fac8f8 100644
--- a/net/ipv4/netfilter/nf_log_ipv4.c
+++ b/net/ipv4/netfilter/nf_log_ipv4.c
@@ -284,8 +284,10 @@ static void dump_ipv4_mac_header(struct nf_log_buf *m,
switch (dev->type) {
case ARPHRD_ETHER:
- nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
- eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
+ nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ",
+ eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest);
+ nf_log_dump_vlan(m, skb);
+ nf_log_buf_add(m, "MACPROTO=%04x ",
ntohs(eth_hdr(skb)->h_proto));
return;
default:
diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
index 134e92382275..8c0f17c6863c 100644
--- a/net/ipv4/nexthop.c
+++ b/net/ipv4/nexthop.c
@@ -42,8 +42,8 @@ static int call_nexthop_notifiers(struct net *net,
{
int err;
- err = atomic_notifier_call_chain(&net->nexthop.notifier_chain,
- event_type, nh);
+ err = blocking_notifier_call_chain(&net->nexthop.notifier_chain,
+ event_type, nh);
return notifier_to_errno(err);
}
@@ -133,12 +133,9 @@ static struct nexthop *nexthop_alloc(void)
static struct nh_group *nexthop_grp_alloc(u16 num_nh)
{
- size_t sz = offsetof(struct nexthop, nh_grp)
- + sizeof(struct nh_group)
- + sizeof(struct nh_grp_entry) * num_nh;
struct nh_group *nhg;
- nhg = kzalloc(sz, GFP_KERNEL);
+ nhg = kzalloc(struct_size(nhg, nh_entries, num_nh), GFP_KERNEL);
if (nhg)
nhg->num_nh = num_nh;
@@ -279,7 +276,7 @@ static int nh_fill_node(struct sk_buff *skb, struct nexthop *nh,
case AF_INET:
fib_nh = &nhi->fib_nh;
if (fib_nh->fib_nh_gw_family &&
- nla_put_u32(skb, NHA_GATEWAY, fib_nh->fib_nh_gw4))
+ nla_put_be32(skb, NHA_GATEWAY, fib_nh->fib_nh_gw4))
goto nla_put_failure;
break;
@@ -800,7 +797,7 @@ static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge,
return;
}
- newg->has_v4 = nhg->has_v4;
+ newg->has_v4 = false;
newg->mpath = nhg->mpath;
newg->fdb_nh = nhg->fdb_nh;
newg->num_nh = nhg->num_nh;
@@ -809,12 +806,18 @@ static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge,
nhges = nhg->nh_entries;
new_nhges = newg->nh_entries;
for (i = 0, j = 0; i < nhg->num_nh; ++i) {
+ struct nh_info *nhi;
+
/* current nexthop getting removed */
if (nhg->nh_entries[i].nh == nh) {
newg->num_nh--;
continue;
}
+ nhi = rtnl_dereference(nhges[i].nh->nh_info);
+ if (nhi->family == AF_INET)
+ newg->has_v4 = true;
+
list_del(&nhges[i].nh_list);
new_nhges[j].nh_parent = nhges[i].nh_parent;
new_nhges[j].nh = nhges[i].nh;
@@ -867,8 +870,6 @@ static void __remove_nexthop_fib(struct net *net, struct nexthop *nh)
bool do_flush = false;
struct fib_info *fi;
- call_nexthop_notifiers(net, NEXTHOP_EVENT_DEL, nh);
-
list_for_each_entry(fi, &nh->fi_list, nh_list) {
fi->fib_flags |= RTNH_F_DEAD;
do_flush = true;
@@ -906,6 +907,8 @@ static void __remove_nexthop(struct net *net, struct nexthop *nh,
static void remove_nexthop(struct net *net, struct nexthop *nh,
struct nl_info *nlinfo)
{
+ call_nexthop_notifiers(net, NEXTHOP_EVENT_DEL, nh);
+
/* remove from the tree */
rb_erase(&nh->rb_node, &net->nexthop.rb_root);
@@ -961,6 +964,23 @@ static int replace_nexthop_grp(struct net *net, struct nexthop *old,
return 0;
}
+static void nh_group_v4_update(struct nh_group *nhg)
+{
+ struct nh_grp_entry *nhges;
+ bool has_v4 = false;
+ int i;
+
+ nhges = nhg->nh_entries;
+ for (i = 0; i < nhg->num_nh; i++) {
+ struct nh_info *nhi;
+
+ nhi = rtnl_dereference(nhges[i].nh->nh_info);
+ if (nhi->family == AF_INET)
+ has_v4 = true;
+ }
+ nhg->has_v4 = has_v4;
+}
+
static int replace_nexthop_single(struct net *net, struct nexthop *old,
struct nexthop *new,
struct netlink_ext_ack *extack)
@@ -984,6 +1004,21 @@ static int replace_nexthop_single(struct net *net, struct nexthop *old,
rcu_assign_pointer(old->nh_info, newi);
rcu_assign_pointer(new->nh_info, oldi);
+ /* When replacing an IPv4 nexthop with an IPv6 nexthop, potentially
+ * update IPv4 indication in all the groups using the nexthop.
+ */
+ if (oldi->family == AF_INET && newi->family == AF_INET6) {
+ struct nh_grp_entry *nhge;
+
+ list_for_each_entry(nhge, &old->grp_list, nh_list) {
+ struct nexthop *nhp = nhge->nh_parent;
+ struct nh_group *nhg;
+
+ nhg = rtnl_dereference(nhp->nh_grp);
+ nh_group_v4_update(nhg);
+ }
+ }
+
return 0;
}
@@ -1101,7 +1136,7 @@ static int insert_nexthop(struct net *net, struct nexthop *new_nh,
while (1) {
struct nexthop *nh;
- next = rtnl_dereference(*pp);
+ next = *pp;
if (!next)
break;
@@ -1924,14 +1959,15 @@ static struct notifier_block nh_netdev_notifier = {
int register_nexthop_notifier(struct net *net, struct notifier_block *nb)
{
- return atomic_notifier_chain_register(&net->nexthop.notifier_chain, nb);
+ return blocking_notifier_chain_register(&net->nexthop.notifier_chain,
+ nb);
}
EXPORT_SYMBOL(register_nexthop_notifier);
int unregister_nexthop_notifier(struct net *net, struct notifier_block *nb)
{
- return atomic_notifier_chain_unregister(&net->nexthop.notifier_chain,
- nb);
+ return blocking_notifier_chain_unregister(&net->nexthop.notifier_chain,
+ nb);
}
EXPORT_SYMBOL(unregister_nexthop_notifier);
@@ -1951,7 +1987,7 @@ static int __net_init nexthop_net_init(struct net *net)
net->nexthop.devhash = kzalloc(sz, GFP_KERNEL);
if (!net->nexthop.devhash)
return -ENOMEM;
- ATOMIC_INIT_NOTIFIER_HEAD(&net->nexthop.notifier_chain);
+ BLOCKING_INIT_NOTIFIER_HEAD(&net->nexthop.notifier_chain);
return 0;
}
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index df6fbefe44d4..248856b301c4 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -293,7 +293,8 @@ EXPORT_SYMBOL_GPL(ping_close);
/* Checks the bind address and possibly modifies sk->sk_bound_dev_if. */
static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
- struct sockaddr *uaddr, int addr_len) {
+ struct sockaddr *uaddr, int addr_len)
+{
struct net *net = sock_net(sk);
if (sk->sk_family == AF_INET) {
struct sockaddr_in *addr = (struct sockaddr_in *) uaddr;
@@ -310,10 +311,10 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n",
sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port));
- chk_addr_ret = inet_addr_type(net, addr->sin_addr.s_addr);
-
if (addr->sin_addr.s_addr == htonl(INADDR_ANY))
chk_addr_ret = RTN_LOCAL;
+ else
+ chk_addr_ret = inet_addr_type(net, addr->sin_addr.s_addr);
if ((!inet_can_nonlocal_bind(net, isk) &&
chk_addr_ret != RTN_LOCAL) ||
@@ -383,20 +384,6 @@ static void ping_set_saddr(struct sock *sk, struct sockaddr *saddr)
}
}
-static void ping_clear_saddr(struct sock *sk, int dif)
-{
- sk->sk_bound_dev_if = dif;
- if (sk->sk_family == AF_INET) {
- struct inet_sock *isk = inet_sk(sk);
- isk->inet_rcv_saddr = isk->inet_saddr = 0;
-#if IS_ENABLED(CONFIG_IPV6)
- } else if (sk->sk_family == AF_INET6) {
- struct ipv6_pinfo *np = inet6_sk(sk);
- memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr));
- memset(&np->saddr, 0, sizeof(np->saddr));
-#endif
- }
-}
/*
* We need our own bind because there are no privileged id's == local ports.
* Moreover, we don't allow binding to multi- and broadcast addresses.
@@ -420,12 +407,13 @@ int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
goto out;
err = -EADDRINUSE;
- ping_set_saddr(sk, uaddr);
snum = ntohs(((struct sockaddr_in *)uaddr)->sin_port);
if (ping_get_port(sk, snum) != 0) {
- ping_clear_saddr(sk, dif);
+ /* Restore possibly modified sk->sk_bound_dev_if by ping_check_bind_addr(). */
+ sk->sk_bound_dev_if = dif;
goto out;
}
+ ping_set_saddr(sk, uaddr);
pr_debug("after bind(): num = %hu, dif = %d\n",
isk->inet_num,
@@ -647,7 +635,8 @@ static int ping_v4_push_pending_frames(struct sock *sk, struct pingfakehdr *pfh,
}
int ping_common_sendmsg(int family, struct msghdr *msg, size_t len,
- void *user_icmph, size_t icmph_len) {
+ void *user_icmph, size_t icmph_len)
+{
u8 type, code;
if (len > 0xFFFF)
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 355f3ca868af..7d26e0f8bdae 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -260,11 +260,12 @@ static void raw_err(struct sock *sk, struct sk_buff *skb, u32 info)
err = EHOSTUNREACH;
if (code > NR_ICMP_UNREACH)
break;
- err = icmp_err_convert[code].errno;
- harderr = icmp_err_convert[code].fatal;
if (code == ICMP_FRAG_NEEDED) {
harderr = inet->pmtudisc != IP_PMTUDISC_DONT;
err = EMSGSIZE;
+ } else {
+ err = icmp_err_convert[code].errno;
+ harderr = icmp_err_convert[code].fatal;
}
}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 58642b29a499..dc2a399cd9f4 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -623,7 +623,7 @@ static inline u32 fnhe_hashfun(__be32 daddr)
u32 hval;
net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd));
- hval = jhash_1word((__force u32) daddr, fnhe_hashrnd);
+ hval = jhash_1word((__force u32)daddr, fnhe_hashrnd);
return hash_32(hval, FNHE_HASH_SHIFT);
}
@@ -1016,13 +1016,14 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
{
struct dst_entry *dst = &rt->dst;
struct net *net = dev_net(dst->dev);
- u32 old_mtu = ipv4_mtu(dst);
struct fib_result res;
bool lock = false;
+ u32 old_mtu;
if (ip_mtu_locked(dst))
return;
+ old_mtu = ipv4_mtu(dst);
if (old_mtu < mtu)
return;
@@ -1066,7 +1067,7 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
int oif, u8 protocol)
{
- const struct iphdr *iph = (const struct iphdr *) skb->data;
+ const struct iphdr *iph = (const struct iphdr *)skb->data;
struct flowi4 fl4;
struct rtable *rt;
u32 mark = IP4_REPLY_MARK(net, skb->mark);
@@ -1083,7 +1084,7 @@ EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
{
- const struct iphdr *iph = (const struct iphdr *) skb->data;
+ const struct iphdr *iph = (const struct iphdr *)skb->data;
struct flowi4 fl4;
struct rtable *rt;
@@ -1101,7 +1102,7 @@ static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
{
- const struct iphdr *iph = (const struct iphdr *) skb->data;
+ const struct iphdr *iph = (const struct iphdr *)skb->data;
struct flowi4 fl4;
struct rtable *rt;
struct dst_entry *odst = NULL;
@@ -1131,7 +1132,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
new = true;
}
- __ip_rt_update_pmtu((struct rtable *) xfrm_dst_path(&rt->dst), &fl4, mtu);
+ __ip_rt_update_pmtu((struct rtable *)xfrm_dst_path(&rt->dst), &fl4, mtu);
if (!dst_check(&rt->dst, 0)) {
if (new)
@@ -1156,7 +1157,7 @@ EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
void ipv4_redirect(struct sk_buff *skb, struct net *net,
int oif, u8 protocol)
{
- const struct iphdr *iph = (const struct iphdr *) skb->data;
+ const struct iphdr *iph = (const struct iphdr *)skb->data;
struct flowi4 fl4;
struct rtable *rt;
@@ -1172,7 +1173,7 @@ EXPORT_SYMBOL_GPL(ipv4_redirect);
void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
{
- const struct iphdr *iph = (const struct iphdr *) skb->data;
+ const struct iphdr *iph = (const struct iphdr *)skb->data;
struct flowi4 fl4;
struct rtable *rt;
struct net *net = sock_net(sk);
@@ -1312,7 +1313,7 @@ static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
static unsigned int ipv4_mtu(const struct dst_entry *dst)
{
- const struct rtable *rt = (const struct rtable *) dst;
+ const struct rtable *rt = (const struct rtable *)dst;
unsigned int mtu = rt->rt_pmtu;
if (!mtu || time_after_eq(jiffies, rt->dst.expires))
@@ -2769,10 +2770,12 @@ struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
if (IS_ERR(rt))
return rt;
- if (flp4->flowi4_proto)
+ if (flp4->flowi4_proto) {
+ flp4->flowi4_oif = rt->dst.dev->ifindex;
rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
flowi4_to_flowi(flp4),
sk, 0);
+ }
return rt;
}
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index e03756631541..6ac473b47f30 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -286,11 +286,10 @@ struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
struct sock *sk,
struct sk_buff *skb)
{
+ struct tcp_request_sock *treq;
struct request_sock *req;
#ifdef CONFIG_MPTCP
- struct tcp_request_sock *treq;
-
if (sk_is_mptcp(sk))
ops = &mptcp_subflow_request_sock_ops;
#endif
@@ -299,8 +298,9 @@ struct request_sock *cookie_tcp_reqsk_alloc(const struct request_sock_ops *ops,
if (!req)
return NULL;
-#if IS_ENABLED(CONFIG_MPTCP)
treq = tcp_rsk(req);
+ treq->syn_tos = TCP_SKB_CB(skb)->ip_dsfield;
+#if IS_ENABLED(CONFIG_MPTCP)
treq->is_mptcp = sk_is_mptcp(sk);
if (treq->is_mptcp) {
int err = mptcp_subflow_init_cookie_req(req, sk, skb);
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 54023a46db04..3e5f4f2e705e 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -1330,6 +1330,15 @@ static struct ctl_table ipv4_net_table[] = {
.extra2 = &comp_sack_nr_max,
},
{
+ .procname = "tcp_reflect_tos",
+ .data = &init_net.ipv4.sysctl_tcp_reflect_tos,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+ {
.procname = "udp_rmem_min",
.data = &init_net.ipv4.sysctl_udp_rmem_min,
.maxlen = sizeof(init_net.ipv4.sysctl_udp_rmem_min),
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 2135ee7c806d..bae4284bf542 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -418,6 +418,8 @@ void tcp_init_sock(struct sock *sk)
INIT_LIST_HEAD(&tp->tsorted_sent_queue);
icsk->icsk_rto = TCP_TIMEOUT_INIT;
+ icsk->icsk_rto_min = TCP_RTO_MIN;
+ icsk->icsk_delack_max = TCP_DELACK_MAX;
tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
minmax_reset(&tp->rtt_min, tcp_jiffies32, ~0U);
@@ -562,7 +564,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
mask |= EPOLLIN | EPOLLRDNORM;
if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
- if (sk_stream_is_writeable(sk)) {
+ if (__sk_stream_is_writeable(sk, 1)) {
mask |= EPOLLOUT | EPOLLWRNORM;
} else { /* send SIGIO later */
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
@@ -574,7 +576,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
* pairs with the input side.
*/
smp_mb__after_atomic();
- if (sk_stream_is_writeable(sk))
+ if (__sk_stream_is_writeable(sk, 1))
mask |= EPOLLOUT | EPOLLWRNORM;
}
} else
@@ -1003,12 +1005,12 @@ ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
!tcp_skb_can_collapse_to(skb)) {
new_segment:
if (!sk_stream_memory_free(sk))
- goto wait_for_sndbuf;
+ goto wait_for_space;
skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation,
tcp_rtx_and_write_queues_empty(sk));
if (!skb)
- goto wait_for_memory;
+ goto wait_for_space;
#ifdef CONFIG_TLS_DEVICE
skb->decrypted = !!(flags & MSG_SENDPAGE_DECRYPTED);
@@ -1027,7 +1029,7 @@ new_segment:
goto new_segment;
}
if (!sk_wmem_schedule(sk, copy))
- goto wait_for_memory;
+ goto wait_for_space;
if (can_coalesce) {
skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
@@ -1068,9 +1070,8 @@ new_segment:
tcp_push_one(sk, mss_now);
continue;
-wait_for_sndbuf:
+wait_for_space:
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
-wait_for_memory:
tcp_push(sk, flags & ~MSG_MORE, mss_now,
TCP_NAGLE_PUSH, size_goal);
@@ -1281,7 +1282,7 @@ restart:
new_segment:
if (!sk_stream_memory_free(sk))
- goto wait_for_sndbuf;
+ goto wait_for_space;
if (unlikely(process_backlog >= 16)) {
process_backlog = 0;
@@ -1292,7 +1293,7 @@ new_segment:
skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation,
first_skb);
if (!skb)
- goto wait_for_memory;
+ goto wait_for_space;
process_backlog++;
skb->ip_summed = CHECKSUM_PARTIAL;
@@ -1325,7 +1326,7 @@ new_segment:
struct page_frag *pfrag = sk_page_frag(sk);
if (!sk_page_frag_refill(sk, pfrag))
- goto wait_for_memory;
+ goto wait_for_space;
if (!skb_can_coalesce(skb, i, pfrag->page,
pfrag->offset)) {
@@ -1339,7 +1340,7 @@ new_segment:
copy = min_t(int, copy, pfrag->size - pfrag->offset);
if (!sk_wmem_schedule(sk, copy))
- goto wait_for_memory;
+ goto wait_for_space;
err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
pfrag->page,
@@ -1392,9 +1393,8 @@ new_segment:
tcp_push_one(sk, mss_now);
continue;
-wait_for_sndbuf:
+wait_for_space:
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
-wait_for_memory:
if (copied)
tcp_push(sk, flags & ~MSG_MORE, mss_now,
TCP_NAGLE_PUSH, size_goal);
@@ -1526,7 +1526,7 @@ static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
* calculation of whether or not we must ACK for the sake of
* a window update.
*/
-static void tcp_cleanup_rbuf(struct sock *sk, int copied)
+void tcp_cleanup_rbuf(struct sock *sk, int copied)
{
struct tcp_sock *tp = tcp_sk(sk);
bool time_to_ack = false;
@@ -1539,10 +1539,8 @@ static void tcp_cleanup_rbuf(struct sock *sk, int copied)
if (inet_csk_ack_scheduled(sk)) {
const struct inet_connection_sock *icsk = inet_csk(sk);
- /* Delayed ACKs frequently hit locked sockets during bulk
- * receive. */
- if (icsk->icsk_ack.blocked ||
- /* Once-per-two-segments ACK was not sent by tcp_input.c */
+
+ if (/* Once-per-two-segments ACK was not sent by tcp_input.c */
tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
/*
* If this read emptied read buffer, we send ACK, if
@@ -2686,6 +2684,8 @@ int tcp_disconnect(struct sock *sk, int flags)
icsk->icsk_backoff = 0;
icsk->icsk_probes_out = 0;
icsk->icsk_rto = TCP_TIMEOUT_INIT;
+ icsk->icsk_rto_min = TCP_RTO_MIN;
+ icsk->icsk_delack_max = TCP_DELACK_MAX;
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
tp->snd_cwnd = TCP_INIT_CWND;
tp->snd_cwnd_cnt = 0;
@@ -2695,6 +2695,7 @@ int tcp_disconnect(struct sock *sk, int flags)
if (icsk->icsk_ca_ops->release)
icsk->icsk_ca_ops->release(sk);
memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
+ icsk->icsk_ca_initialized = 0;
tcp_set_ca_state(sk, TCP_CA_Open);
tp->is_sack_reneg = 0;
tcp_clear_retrans(tp);
@@ -3046,7 +3047,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, int optname,
name[val] = 0;
lock_sock(sk);
- err = tcp_set_congestion_control(sk, name, true, true,
+ err = tcp_set_congestion_control(sk, name, true,
ns_capable(sock_net(sk)->user_ns,
CAP_NET_ADMIN));
release_sock(sk);
@@ -3208,7 +3209,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level, int optname,
break;
case TCP_SAVE_SYN:
- if (val < 0 || val > 1)
+ /* 0: disable, 1: enable, 2: start from ether_header */
+ if (val < 0 || val > 2)
err = -EINVAL;
else
tp->save_syn = val;
@@ -3789,20 +3791,21 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
lock_sock(sk);
if (tp->saved_syn) {
- if (len < tp->saved_syn[0]) {
- if (put_user(tp->saved_syn[0], optlen)) {
+ if (len < tcp_saved_syn_len(tp->saved_syn)) {
+ if (put_user(tcp_saved_syn_len(tp->saved_syn),
+ optlen)) {
release_sock(sk);
return -EFAULT;
}
release_sock(sk);
return -EINVAL;
}
- len = tp->saved_syn[0];
+ len = tcp_saved_syn_len(tp->saved_syn);
if (put_user(len, optlen)) {
release_sock(sk);
return -EFAULT;
}
- if (copy_to_user(optval, tp->saved_syn + 1, len)) {
+ if (copy_to_user(optval, tp->saved_syn->data, len)) {
release_sock(sk);
return -EFAULT;
}
diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
index 7aa68f4aae6c..37f4cb2bba5c 100644
--- a/net/ipv4/tcp_bpf.c
+++ b/net/ipv4/tcp_bpf.c
@@ -567,10 +567,9 @@ static void tcp_bpf_rebuild_protos(struct proto prot[TCP_BPF_NUM_CFGS],
prot[TCP_BPF_TX].sendpage = tcp_bpf_sendpage;
}
-static void tcp_bpf_check_v6_needs_rebuild(struct sock *sk, struct proto *ops)
+static void tcp_bpf_check_v6_needs_rebuild(struct proto *ops)
{
- if (sk->sk_family == AF_INET6 &&
- unlikely(ops != smp_load_acquire(&tcpv6_prot_saved))) {
+ if (unlikely(ops != smp_load_acquire(&tcpv6_prot_saved))) {
spin_lock_bh(&tcpv6_prot_lock);
if (likely(ops != tcpv6_prot_saved)) {
tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV6], ops);
@@ -603,13 +602,11 @@ struct proto *tcp_bpf_get_proto(struct sock *sk, struct sk_psock *psock)
int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4;
int config = psock->progs.msg_parser ? TCP_BPF_TX : TCP_BPF_BASE;
- if (!psock->sk_proto) {
- struct proto *ops = READ_ONCE(sk->sk_prot);
-
- if (tcp_bpf_assert_proto_ops(ops))
+ if (sk->sk_family == AF_INET6) {
+ if (tcp_bpf_assert_proto_ops(psock->sk_proto))
return ERR_PTR(-EINVAL);
- tcp_bpf_check_v6_needs_rebuild(sk, ops);
+ tcp_bpf_check_v6_needs_rebuild(psock->sk_proto);
}
return &tcp_bpf_prots[family][config];
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 62878cf26d9c..db47ac24d057 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -176,7 +176,7 @@ void tcp_assign_congestion_control(struct sock *sk)
void tcp_init_congestion_control(struct sock *sk)
{
- const struct inet_connection_sock *icsk = inet_csk(sk);
+ struct inet_connection_sock *icsk = inet_csk(sk);
tcp_sk(sk)->prior_ssthresh = 0;
if (icsk->icsk_ca_ops->init)
@@ -185,6 +185,7 @@ void tcp_init_congestion_control(struct sock *sk)
INET_ECN_xmit(sk);
else
INET_ECN_dontxmit(sk);
+ icsk->icsk_ca_initialized = 1;
}
static void tcp_reinit_congestion_control(struct sock *sk,
@@ -340,7 +341,7 @@ out:
* already initialized.
*/
int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
- bool reinit, bool cap_net_admin)
+ bool cap_net_admin)
{
struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcp_congestion_ops *ca;
@@ -361,28 +362,14 @@ int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
goto out;
}
- if (!ca) {
+ if (!ca)
err = -ENOENT;
- } else if (!load) {
- const struct tcp_congestion_ops *old_ca = icsk->icsk_ca_ops;
-
- if (bpf_try_module_get(ca, ca->owner)) {
- if (reinit) {
- tcp_reinit_congestion_control(sk, ca);
- } else {
- icsk->icsk_ca_ops = ca;
- bpf_module_put(old_ca, old_ca->owner);
- }
- } else {
- err = -EBUSY;
- }
- } else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || cap_net_admin)) {
+ else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || cap_net_admin))
err = -EPERM;
- } else if (!bpf_try_module_get(ca, ca->owner)) {
+ else if (!bpf_try_module_get(ca, ca->owner))
err = -EBUSY;
- } else {
+ else
tcp_reinit_congestion_control(sk, ca);
- }
out:
rcu_read_unlock();
return err;
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 09b62de04eea..af2814c9342a 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -295,7 +295,7 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
refcount_set(&req->rsk_refcnt, 2);
/* Now finish processing the fastopen child socket. */
- tcp_init_transfer(child, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB);
+ tcp_init_transfer(child, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, skb);
tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b1ce2054291d..67f10d3ec240 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -138,6 +138,69 @@ void clean_acked_data_flush(void)
EXPORT_SYMBOL_GPL(clean_acked_data_flush);
#endif
+#ifdef CONFIG_CGROUP_BPF
+static void bpf_skops_parse_hdr(struct sock *sk, struct sk_buff *skb)
+{
+ bool unknown_opt = tcp_sk(sk)->rx_opt.saw_unknown &&
+ BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk),
+ BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG);
+ bool parse_all_opt = BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk),
+ BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG);
+ struct bpf_sock_ops_kern sock_ops;
+
+ if (likely(!unknown_opt && !parse_all_opt))
+ return;
+
+ /* The skb will be handled in the
+ * bpf_skops_established() or
+ * bpf_skops_write_hdr_opt().
+ */
+ switch (sk->sk_state) {
+ case TCP_SYN_RECV:
+ case TCP_SYN_SENT:
+ case TCP_LISTEN:
+ return;
+ }
+
+ sock_owned_by_me(sk);
+
+ memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
+ sock_ops.op = BPF_SOCK_OPS_PARSE_HDR_OPT_CB;
+ sock_ops.is_fullsock = 1;
+ sock_ops.sk = sk;
+ bpf_skops_init_skb(&sock_ops, skb, tcp_hdrlen(skb));
+
+ BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
+}
+
+static void bpf_skops_established(struct sock *sk, int bpf_op,
+ struct sk_buff *skb)
+{
+ struct bpf_sock_ops_kern sock_ops;
+
+ sock_owned_by_me(sk);
+
+ memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
+ sock_ops.op = bpf_op;
+ sock_ops.is_fullsock = 1;
+ sock_ops.sk = sk;
+ /* sk with TCP_REPAIR_ON does not have skb in tcp_finish_connect */
+ if (skb)
+ bpf_skops_init_skb(&sock_ops, skb, tcp_hdrlen(skb));
+
+ BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops);
+}
+#else
+static void bpf_skops_parse_hdr(struct sock *sk, struct sk_buff *skb)
+{
+}
+
+static void bpf_skops_established(struct sock *sk, int bpf_op,
+ struct sk_buff *skb)
+{
+}
+#endif
+
static void tcp_gro_dev_warn(struct sock *sk, const struct sk_buff *skb,
unsigned int len)
{
@@ -956,7 +1019,11 @@ static void tcp_check_sack_reordering(struct sock *sk, const u32 low_seq,
ts ? LINUX_MIB_TCPTSREORDER : LINUX_MIB_TCPSACKREORDER);
}
-/* This must be called before lost_out is incremented */
+ /* This must be called before lost_out or retrans_out are updated
+ * on a new loss, because we want to know if all skbs previously
+ * known to be lost have already been retransmitted, indicating
+ * that this newly lost skb is our next skb to retransmit.
+ */
static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
{
if ((!tp->retransmit_skb_hint && tp->retrans_out >= tp->lost_out) ||
@@ -966,41 +1033,36 @@ static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
tp->retransmit_skb_hint = skb;
}
-/* Sum the number of packets on the wire we have marked as lost.
- * There are two cases we care about here:
- * a) Packet hasn't been marked lost (nor retransmitted),
- * and this is the first loss.
- * b) Packet has been marked both lost and retransmitted,
- * and this means we think it was lost again.
+/* Sum the number of packets on the wire we have marked as lost, and
+ * notify the congestion control module that the given skb was marked lost.
*/
-static void tcp_sum_lost(struct tcp_sock *tp, struct sk_buff *skb)
+static void tcp_notify_skb_loss_event(struct tcp_sock *tp, const struct sk_buff *skb)
{
- __u8 sacked = TCP_SKB_CB(skb)->sacked;
-
- if (!(sacked & TCPCB_LOST) ||
- ((sacked & TCPCB_LOST) && (sacked & TCPCB_SACKED_RETRANS)))
- tp->lost += tcp_skb_pcount(skb);
+ tp->lost += tcp_skb_pcount(skb);
}
-static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb)
+void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
{
- if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
- tcp_verify_retransmit_hint(tp, skb);
+ __u8 sacked = TCP_SKB_CB(skb)->sacked;
+ struct tcp_sock *tp = tcp_sk(sk);
- tp->lost_out += tcp_skb_pcount(skb);
- tcp_sum_lost(tp, skb);
- TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
- }
-}
+ if (sacked & TCPCB_SACKED_ACKED)
+ return;
-void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb)
-{
tcp_verify_retransmit_hint(tp, skb);
-
- tcp_sum_lost(tp, skb);
- if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
+ if (sacked & TCPCB_LOST) {
+ if (sacked & TCPCB_SACKED_RETRANS) {
+ /* Account for retransmits that are lost again */
+ TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
+ tp->retrans_out -= tcp_skb_pcount(skb);
+ NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT,
+ tcp_skb_pcount(skb));
+ tcp_notify_skb_loss_event(tp, skb);
+ }
+ } else {
tp->lost_out += tcp_skb_pcount(skb);
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
+ tcp_notify_skb_loss_event(tp, skb);
}
}
@@ -2263,7 +2325,8 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
if (cnt > packets)
break;
- tcp_skb_mark_lost(tp, skb);
+ if (!(TCP_SKB_CB(skb)->sacked & TCPCB_LOST))
+ tcp_mark_skb_lost(sk, skb);
if (mark_head)
break;
@@ -2629,14 +2692,8 @@ void tcp_simple_retransmit(struct sock *sk)
unsigned int mss = tcp_current_mss(sk);
skb_rbtree_walk(skb, &sk->tcp_rtx_queue) {
- if (tcp_skb_seglen(skb) > mss &&
- !(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
- if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
- TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
- tp->retrans_out -= tcp_skb_pcount(skb);
- }
- tcp_skb_mark_lost_uncond_verify(tp, skb);
- }
+ if (tcp_skb_seglen(skb) > mss)
+ tcp_mark_skb_lost(sk, skb);
}
tcp_clear_retrans_hints_partial(tp);
@@ -3819,7 +3876,7 @@ static void tcp_parse_fastopen_option(int len, const unsigned char *cookie,
foc->exp = exp_opt;
}
-static void smc_parse_options(const struct tcphdr *th,
+static bool smc_parse_options(const struct tcphdr *th,
struct tcp_options_received *opt_rx,
const unsigned char *ptr,
int opsize)
@@ -3828,10 +3885,13 @@ static void smc_parse_options(const struct tcphdr *th,
if (static_branch_unlikely(&tcp_have_smc)) {
if (th->syn && !(opsize & 1) &&
opsize >= TCPOLEN_EXP_SMC_BASE &&
- get_unaligned_be32(ptr) == TCPOPT_SMC_MAGIC)
+ get_unaligned_be32(ptr) == TCPOPT_SMC_MAGIC) {
opt_rx->smc_ok = 1;
+ return true;
+ }
}
#endif
+ return false;
}
/* Try to parse the MSS option from the TCP header. Return 0 on failure, clamped
@@ -3892,6 +3952,7 @@ void tcp_parse_options(const struct net *net,
ptr = (const unsigned char *)(th + 1);
opt_rx->saw_tstamp = 0;
+ opt_rx->saw_unknown = 0;
while (length > 0) {
int opcode = *ptr++;
@@ -3982,15 +4043,21 @@ void tcp_parse_options(const struct net *net,
*/
if (opsize >= TCPOLEN_EXP_FASTOPEN_BASE &&
get_unaligned_be16(ptr) ==
- TCPOPT_FASTOPEN_MAGIC)
+ TCPOPT_FASTOPEN_MAGIC) {
tcp_parse_fastopen_option(opsize -
TCPOLEN_EXP_FASTOPEN_BASE,
ptr + 2, th->syn, foc, true);
- else
- smc_parse_options(th, opt_rx, ptr,
- opsize);
+ break;
+ }
+
+ if (smc_parse_options(th, opt_rx, ptr, opsize))
+ break;
+
+ opt_rx->saw_unknown = 1;
break;
+ default:
+ opt_rx->saw_unknown = 1;
}
ptr += opsize-2;
length -= opsize;
@@ -4363,7 +4430,8 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
sp[i] = sp[i + 1];
continue;
}
- this_sack++, swalk++;
+ this_sack++;
+ swalk++;
}
}
@@ -4853,7 +4921,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
int eaten;
if (sk_is_mptcp(sk))
- mptcp_incoming_options(sk, skb, &tp->rx_opt);
+ mptcp_incoming_options(sk, skb);
if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
__kfree_skb(skb);
@@ -5277,12 +5345,6 @@ static bool tcp_should_expand_sndbuf(const struct sock *sk)
return true;
}
-/* When incoming ACK allowed to free some skb from write_queue,
- * we remember this event in flag SOCK_QUEUE_SHRUNK and wake up socket
- * on the exit from tcp input handler.
- *
- * PROBLEM: sndbuf expansion does not work well with largesend.
- */
static void tcp_new_space(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -5297,16 +5359,13 @@ static void tcp_new_space(struct sock *sk)
static void tcp_check_space(struct sock *sk)
{
- if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
- sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
- /* pairs with tcp_poll() */
- smp_mb();
- if (sk->sk_socket &&
- test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
- tcp_new_space(sk);
- if (!test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
- tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED);
- }
+ /* pairs with tcp_poll() */
+ smp_mb();
+ if (sk->sk_socket &&
+ test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
+ tcp_new_space(sk);
+ if (!test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
+ tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED);
}
}
@@ -5608,6 +5667,8 @@ syn_challenge:
goto discard;
}
+ bpf_skops_parse_hdr(sk, skb);
+
return true;
discard:
@@ -5816,7 +5877,7 @@ discard:
}
EXPORT_SYMBOL(tcp_rcv_established);
-void tcp_init_transfer(struct sock *sk, int bpf_op)
+void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
@@ -5837,8 +5898,10 @@ void tcp_init_transfer(struct sock *sk, int bpf_op)
tp->snd_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
tp->snd_cwnd_stamp = tcp_jiffies32;
- tcp_call_bpf(sk, bpf_op, 0, NULL);
- tcp_init_congestion_control(sk);
+ icsk->icsk_ca_initialized = 0;
+ bpf_skops_established(sk, bpf_op, skb);
+ if (!icsk->icsk_ca_initialized)
+ tcp_init_congestion_control(sk);
tcp_init_buffer_space(sk);
}
@@ -5856,7 +5919,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
sk_mark_napi_id(sk, skb);
}
- tcp_init_transfer(sk, BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB);
+ tcp_init_transfer(sk, BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB, skb);
/* Prevent spurious tcp_cwnd_restart() on first data
* packet.
@@ -6328,7 +6391,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
} else {
tcp_try_undo_spurious_syn(sk);
tp->retrans_stamp = 0;
- tcp_init_transfer(sk, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB);
+ tcp_init_transfer(sk, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB,
+ skb);
WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
}
smp_mb();
@@ -6438,7 +6502,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
case TCP_LAST_ACK:
if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
if (sk_is_mptcp(sk))
- mptcp_incoming_options(sk, skb, &tp->rx_opt);
+ mptcp_incoming_options(sk, skb);
break;
}
fallthrough;
@@ -6617,13 +6681,27 @@ static void tcp_reqsk_record_syn(const struct sock *sk,
{
if (tcp_sk(sk)->save_syn) {
u32 len = skb_network_header_len(skb) + tcp_hdrlen(skb);
- u32 *copy;
+ struct saved_syn *saved_syn;
+ u32 mac_hdrlen;
+ void *base;
+
+ if (tcp_sk(sk)->save_syn == 2) { /* Save full header. */
+ base = skb_mac_header(skb);
+ mac_hdrlen = skb_mac_header_len(skb);
+ len += mac_hdrlen;
+ } else {
+ base = skb_network_header(skb);
+ mac_hdrlen = 0;
+ }
- copy = kmalloc(len + sizeof(u32), GFP_ATOMIC);
- if (copy) {
- copy[0] = len;
- memcpy(&copy[1], skb_network_header(skb), len);
- req->saved_syn = copy;
+ saved_syn = kmalloc(struct_size(saved_syn, data, len),
+ GFP_ATOMIC);
+ if (saved_syn) {
+ saved_syn->mac_hdrlen = mac_hdrlen;
+ saved_syn->network_hdrlen = skb_network_header_len(skb);
+ saved_syn->tcp_hdrlen = tcp_hdrlen(skb);
+ memcpy(saved_syn->data, base, len);
+ req->saved_syn = saved_syn;
}
}
}
@@ -6762,6 +6840,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
tcp_rsk(req)->snt_isn = isn;
tcp_rsk(req)->txhash = net_tx_rndhash();
+ tcp_rsk(req)->syn_tos = TCP_SKB_CB(skb)->ip_dsfield;
tcp_openreq_init_rwin(req, sk, dst);
sk_rx_queue_set(req_to_sk(req), skb);
if (!want_cookie) {
@@ -6770,7 +6849,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
}
if (fastopen_sk) {
af_ops->send_synack(fastopen_sk, dst, &fl, req,
- &foc, TCP_SYNACK_FASTOPEN);
+ &foc, TCP_SYNACK_FASTOPEN, skb);
/* Add the child socket directly into the accept queue */
if (!inet_csk_reqsk_queue_add(sk, req, fastopen_sk)) {
reqsk_fastopen_remove(fastopen_sk, req, false);
@@ -6788,7 +6867,8 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
tcp_timeout_init((struct sock *)req));
af_ops->send_synack(sk, dst, &fl, req, &foc,
!want_cookie ? TCP_SYNACK_NORMAL :
- TCP_SYNACK_COOKIE);
+ TCP_SYNACK_COOKIE,
+ skb);
if (want_cookie) {
reqsk_free(req);
return 0;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 592c73962723..7352c097ae48 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -575,7 +575,7 @@ int tcp_v4_err(struct sk_buff *skb, u32 info)
case TCP_SYN_SENT:
case TCP_SYN_RECV:
/* Only in fast or simultaneous open. If a fast open socket is
- * is already accepted it is treated as a connected one below.
+ * already accepted it is treated as a connected one below.
*/
if (fastopen && !fastopen->sk)
break;
@@ -965,18 +965,23 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
struct flowi *fl,
struct request_sock *req,
struct tcp_fastopen_cookie *foc,
- enum tcp_synack_type synack_type)
+ enum tcp_synack_type synack_type,
+ struct sk_buff *syn_skb)
{
const struct inet_request_sock *ireq = inet_rsk(req);
struct flowi4 fl4;
int err = -1;
struct sk_buff *skb;
+ u8 tos;
/* First, grab a route. */
if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
return -1;
- skb = tcp_make_synack(sk, dst, req, foc, synack_type);
+ skb = tcp_make_synack(sk, dst, req, foc, synack_type, syn_skb);
+
+ tos = sock_net(sk)->ipv4.sysctl_tcp_reflect_tos ?
+ tcp_rsk(req)->syn_tos : inet_sk(sk)->tos;
if (skb) {
__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
@@ -984,7 +989,8 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
rcu_read_lock();
err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
ireq->ir_rmt_addr,
- rcu_dereference(ireq->ireq_opt));
+ rcu_dereference(ireq->ireq_opt),
+ tos & ~INET_ECN_MASK);
rcu_read_unlock();
err = net_xmit_eval(err);
}
@@ -1529,6 +1535,10 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
newinet->inet_id = prandom_u32();
+ /* Set ToS of the new socket based upon the value of incoming SYN. */
+ if (sock_net(sk)->ipv4.sysctl_tcp_reflect_tos)
+ newinet->tos = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
+
if (!dst) {
dst = inet_csk_route_child_sock(sk, newsk, req);
if (!dst)
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index 279db8822439..6b27c481fe18 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -943,7 +943,7 @@ static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
return 0;
}
-static const struct genl_ops tcp_metrics_nl_ops[] = {
+static const struct genl_small_ops tcp_metrics_nl_ops[] = {
{
.cmd = TCP_METRICS_CMD_GET,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
@@ -966,8 +966,8 @@ static struct genl_family tcp_metrics_nl_family __ro_after_init = {
.policy = tcp_metrics_nl_policy,
.netnsok = true,
.module = THIS_MODULE,
- .ops = tcp_metrics_nl_ops,
- .n_ops = ARRAY_SIZE(tcp_metrics_nl_ops),
+ .small_ops = tcp_metrics_nl_ops,
+ .n_small_ops = ARRAY_SIZE(tcp_metrics_nl_ops),
};
static unsigned int tcpmhash_entries;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 85ff417bda7f..bf48cd73e967 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -438,6 +438,7 @@ struct tcp_out_options {
u8 ws; /* window scale, 0 to disable */
u8 num_sack_blocks; /* number of SACK blocks to include */
u8 hash_size; /* bytes in hash_location */
+ u8 bpf_opt_len; /* length of BPF hdr option */
__u8 *hash_location; /* temporary pointer, overloaded */
__u32 tsval, tsecr; /* need to include OPTION_TS */
struct tcp_fastopen_cookie *fastopen_cookie; /* Fast open cookie */
@@ -452,6 +453,145 @@ static void mptcp_options_write(__be32 *ptr, struct tcp_out_options *opts)
#endif
}
+#ifdef CONFIG_CGROUP_BPF
+static int bpf_skops_write_hdr_opt_arg0(struct sk_buff *skb,
+ enum tcp_synack_type synack_type)
+{
+ if (unlikely(!skb))
+ return BPF_WRITE_HDR_TCP_CURRENT_MSS;
+
+ if (unlikely(synack_type == TCP_SYNACK_COOKIE))
+ return BPF_WRITE_HDR_TCP_SYNACK_COOKIE;
+
+ return 0;
+}
+
+/* req, syn_skb and synack_type are used when writing synack */
+static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req,
+ struct sk_buff *syn_skb,
+ enum tcp_synack_type synack_type,
+ struct tcp_out_options *opts,
+ unsigned int *remaining)
+{
+ struct bpf_sock_ops_kern sock_ops;
+ int err;
+
+ if (likely(!BPF_SOCK_OPS_TEST_FLAG(tcp_sk(sk),
+ BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG)) ||
+ !*remaining)
+ return;
+
+ /* *remaining has already been aligned to 4 bytes, so *remaining >= 4 */
+
+ /* init sock_ops */
+ memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
+
+ sock_ops.op = BPF_SOCK_OPS_HDR_OPT_LEN_CB;
+
+ if (req) {
+ /* The listen "sk" cannot be passed here because
+ * it is not locked. It would not make too much
+ * sense to do bpf_setsockopt(listen_sk) based
+ * on individual connection request also.
+ *
+ * Thus, "req" is passed here and the cgroup-bpf-progs
+ * of the listen "sk" will be run.
+ *
+ * "req" is also used here for fastopen even the "sk" here is
+ * a fullsock "child" sk. It is to keep the behavior
+ * consistent between fastopen and non-fastopen on
+ * the bpf programming side.
+ */
+ sock_ops.sk = (struct sock *)req;
+ sock_ops.syn_skb = syn_skb;
+ } else {
+ sock_owned_by_me(sk);
+
+ sock_ops.is_fullsock = 1;
+ sock_ops.sk = sk;
+ }
+
+ sock_ops.args[0] = bpf_skops_write_hdr_opt_arg0(skb, synack_type);
+ sock_ops.remaining_opt_len = *remaining;
+ /* tcp_current_mss() does not pass a skb */
+ if (skb)
+ bpf_skops_init_skb(&sock_ops, skb, 0);
+
+ err = BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(&sock_ops, sk);
+
+ if (err || sock_ops.remaining_opt_len == *remaining)
+ return;
+
+ opts->bpf_opt_len = *remaining - sock_ops.remaining_opt_len;
+ /* round up to 4 bytes */
+ opts->bpf_opt_len = (opts->bpf_opt_len + 3) & ~3;
+
+ *remaining -= opts->bpf_opt_len;
+}
+
+static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req,
+ struct sk_buff *syn_skb,
+ enum tcp_synack_type synack_type,
+ struct tcp_out_options *opts)
+{
+ u8 first_opt_off, nr_written, max_opt_len = opts->bpf_opt_len;
+ struct bpf_sock_ops_kern sock_ops;
+ int err;
+
+ if (likely(!max_opt_len))
+ return;
+
+ memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp));
+
+ sock_ops.op = BPF_SOCK_OPS_WRITE_HDR_OPT_CB;
+
+ if (req) {
+ sock_ops.sk = (struct sock *)req;
+ sock_ops.syn_skb = syn_skb;
+ } else {
+ sock_owned_by_me(sk);
+
+ sock_ops.is_fullsock = 1;
+ sock_ops.sk = sk;
+ }
+
+ sock_ops.args[0] = bpf_skops_write_hdr_opt_arg0(skb, synack_type);
+ sock_ops.remaining_opt_len = max_opt_len;
+ first_opt_off = tcp_hdrlen(skb) - max_opt_len;
+ bpf_skops_init_skb(&sock_ops, skb, first_opt_off);
+
+ err = BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(&sock_ops, sk);
+
+ if (err)
+ nr_written = 0;
+ else
+ nr_written = max_opt_len - sock_ops.remaining_opt_len;
+
+ if (nr_written < max_opt_len)
+ memset(skb->data + first_opt_off + nr_written, TCPOPT_NOP,
+ max_opt_len - nr_written);
+}
+#else
+static void bpf_skops_hdr_opt_len(struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req,
+ struct sk_buff *syn_skb,
+ enum tcp_synack_type synack_type,
+ struct tcp_out_options *opts,
+ unsigned int *remaining)
+{
+}
+
+static void bpf_skops_write_hdr_opt(struct sock *sk, struct sk_buff *skb,
+ struct request_sock *req,
+ struct sk_buff *syn_skb,
+ enum tcp_synack_type synack_type,
+ struct tcp_out_options *opts)
+{
+}
+#endif
+
/* Write previously computed TCP options to the packet.
*
* Beware: Something in the Internet is very sensitive to the ordering of
@@ -691,6 +831,8 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
}
}
+ bpf_skops_hdr_opt_len(sk, skb, NULL, NULL, 0, opts, &remaining);
+
return MAX_TCP_OPTION_SPACE - remaining;
}
@@ -701,7 +843,8 @@ static unsigned int tcp_synack_options(const struct sock *sk,
struct tcp_out_options *opts,
const struct tcp_md5sig_key *md5,
struct tcp_fastopen_cookie *foc,
- enum tcp_synack_type synack_type)
+ enum tcp_synack_type synack_type,
+ struct sk_buff *syn_skb)
{
struct inet_request_sock *ireq = inet_rsk(req);
unsigned int remaining = MAX_TCP_OPTION_SPACE;
@@ -758,6 +901,9 @@ static unsigned int tcp_synack_options(const struct sock *sk,
smc_set_option_cond(tcp_sk(sk), ireq, opts, &remaining);
+ bpf_skops_hdr_opt_len((struct sock *)sk, skb, req, syn_skb,
+ synack_type, opts, &remaining);
+
return MAX_TCP_OPTION_SPACE - remaining;
}
@@ -826,6 +972,15 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
opts->num_sack_blocks * TCPOLEN_SACK_PERBLOCK;
}
+ if (unlikely(BPF_SOCK_OPS_TEST_FLAG(tp,
+ BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG))) {
+ unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
+
+ bpf_skops_hdr_opt_len(sk, skb, NULL, NULL, 0, opts, &remaining);
+
+ size = MAX_TCP_OPTION_SPACE - remaining;
+ }
+
return size;
}
@@ -1213,6 +1368,9 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
}
#endif
+ /* BPF prog is the last one writing header option */
+ bpf_skops_write_hdr_opt(sk, skb, NULL, NULL, 0, &opts);
+
INDIRECT_CALL_INET(icsk->icsk_af_ops->send_check,
tcp_v6_send_check, tcp_v4_send_check,
sk, skb);
@@ -1524,7 +1682,6 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
skb->truesize -= delta_truesize;
sk_wmem_queued_add(sk, -delta_truesize);
sk_mem_uncharge(sk, delta_truesize);
- sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
}
/* Any change of skb->len requires recalculation of tso factor. */
@@ -3336,20 +3493,20 @@ int tcp_send_synack(struct sock *sk)
}
/**
- * tcp_make_synack - Prepare a SYN-ACK.
- * sk: listener socket
- * dst: dst entry attached to the SYNACK
- * req: request_sock pointer
- * foc: cookie for tcp fast open
- * synack_type: Type of synback to prepare
- *
- * Allocate one skb and build a SYNACK packet.
- * @dst is consumed : Caller should not use it again.
+ * tcp_make_synack - Allocate one skb and build a SYNACK packet.
+ * @sk: listener socket
+ * @dst: dst entry attached to the SYNACK. It is consumed and caller
+ * should not use it again.
+ * @req: request_sock pointer
+ * @foc: cookie for tcp fast open
+ * @synack_type: Type of synack to prepare
+ * @syn_skb: SYN packet just received. It could be NULL for rtx case.
*/
struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
struct request_sock *req,
struct tcp_fastopen_cookie *foc,
- enum tcp_synack_type synack_type)
+ enum tcp_synack_type synack_type,
+ struct sk_buff *syn_skb)
{
struct inet_request_sock *ireq = inet_rsk(req);
const struct tcp_sock *tp = tcp_sk(sk);
@@ -3408,8 +3565,11 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
md5 = tcp_rsk(req)->af_specific->req_md5_lookup(sk, req_to_sk(req));
#endif
skb_set_hash(skb, tcp_rsk(req)->txhash, PKT_HASH_TYPE_L4);
+ /* bpf program will be interested in the tcp_flags */
+ TCP_SKB_CB(skb)->tcp_flags = TCPHDR_SYN | TCPHDR_ACK;
tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, md5,
- foc, synack_type) + sizeof(*th);
+ foc, synack_type,
+ syn_skb) + sizeof(*th);
skb_push(skb, tcp_header_size);
skb_reset_transport_header(skb);
@@ -3441,6 +3601,9 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
rcu_read_unlock();
#endif
+ bpf_skops_write_hdr_opt((struct sock *)sk, skb, req, syn_skb,
+ synack_type, &opts);
+
skb->skb_mstamp_ns = now;
tcp_add_tx_delay(skb, tp);
@@ -3741,16 +3904,15 @@ void tcp_send_delayed_ack(struct sock *sk)
ato = min(ato, max_ato);
}
+ ato = min_t(u32, ato, inet_csk(sk)->icsk_delack_max);
+
/* Stay within the limit we were given */
timeout = jiffies + ato;
/* Use new timeout only if there wasn't a older one earlier. */
if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
- /* If delack timer was blocked or is about to expire,
- * send ACK now.
- */
- if (icsk->icsk_ack.blocked ||
- time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
+ /* If delack timer is about to expire, send ACK now. */
+ if (time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
tcp_send_ack(sk);
return;
}
@@ -3779,10 +3941,15 @@ void __tcp_send_ack(struct sock *sk, u32 rcv_nxt)
buff = alloc_skb(MAX_TCP_HEADER,
sk_gfp_mask(sk, GFP_ATOMIC | __GFP_NOWARN));
if (unlikely(!buff)) {
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ unsigned long delay;
+
+ delay = TCP_DELACK_MAX << icsk->icsk_ack.retry;
+ if (delay < TCP_RTO_MAX)
+ icsk->icsk_ack.retry++;
inet_csk_schedule_ack(sk);
- inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
- inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
- TCP_DELACK_MAX, TCP_RTO_MAX);
+ icsk->icsk_ack.ato = TCP_ATO_MIN;
+ inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, delay, TCP_RTO_MAX);
return;
}
@@ -3934,7 +4101,8 @@ int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
int res;
tcp_rsk(req)->txhash = net_tx_rndhash();
- res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL);
+ res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL,
+ NULL);
if (!res) {
__TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c
index fdb715bdd2d1..f65a3ddd0d58 100644
--- a/net/ipv4/tcp_recovery.c
+++ b/net/ipv4/tcp_recovery.c
@@ -2,20 +2,6 @@
#include <linux/tcp.h>
#include <net/tcp.h>
-void tcp_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
-{
- struct tcp_sock *tp = tcp_sk(sk);
-
- tcp_skb_mark_lost_uncond_verify(tp, skb);
- if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
- /* Account for retransmits that are lost again */
- TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
- tp->retrans_out -= tcp_skb_pcount(skb);
- NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT,
- tcp_skb_pcount(skb));
- }
-}
-
static bool tcp_rack_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
{
return t1 > t2 || (t1 == t2 && after(seq1, seq2));
@@ -246,6 +232,6 @@ void tcp_newreno_mark_lost(struct sock *sk, bool snd_una_advanced)
tcp_fragment(sk, TCP_FRAG_IN_RTX_QUEUE, skb,
mss, mss, GFP_ATOMIC);
- tcp_skb_mark_lost_uncond_verify(tp, skb);
+ tcp_mark_skb_lost(sk, skb);
}
}
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index 6cebf412d590..5842081bc8a2 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -10,7 +10,7 @@
#include <net/tcp.h>
/* These factors derived from the recommended values in the aer:
- * .01 and and 7/8.
+ * .01 and 7/8.
*/
#define TCP_SCALABLE_AI_CNT 100U
#define TCP_SCALABLE_MD_SCALE 3
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 0c08c420fbc2..6c62b9ea1320 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -331,7 +331,6 @@ static void tcp_delack_timer(struct timer_list *t)
if (!sock_owned_by_user(sk)) {
tcp_delack_timer_handler(sk);
} else {
- icsk->icsk_ack.blocked = 1;
__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
/* deleguate our work to tcp_release_cb() */
if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags))
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index 3f51e781562a..c8003c8aad2c 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -293,10 +293,10 @@ size_t tcp_vegas_get_info(struct sock *sk, u32 ext, int *attr,
const struct vegas *ca = inet_csk_ca(sk);
if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
- info->vegas.tcpv_enabled = ca->doing_vegas_now,
- info->vegas.tcpv_rttcnt = ca->cntRTT,
- info->vegas.tcpv_rtt = ca->baseRTT,
- info->vegas.tcpv_minrtt = ca->minRTT,
+ info->vegas.tcpv_enabled = ca->doing_vegas_now;
+ info->vegas.tcpv_rttcnt = ca->cntRTT;
+ info->vegas.tcpv_rtt = ca->baseRTT;
+ info->vegas.tcpv_minrtt = ca->minRTT;
*attr = INET_DIAG_VEGASINFO;
return sizeof(struct tcpvegas_info);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index e88efba07551..09f0a23d1a01 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1170,7 +1170,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
ipc.oif = inet->uc_index;
} else if (ipv4_is_lbcast(daddr) && inet->uc_index) {
/* oif is set, packet is to local broadcast and
- * and uc_index is set. oif is most likely set
+ * uc_index is set. oif is most likely set
* by sk_bound_dev_if. If uc_index != oif check if the
* oif is an L3 master and uc_index is an L3 slave.
* If so, we want to allow the send using the uc_index.
diff --git a/net/ipv4/udp_bpf.c b/net/ipv4/udp_bpf.c
index eddd973e6575..7a94791efc1a 100644
--- a/net/ipv4/udp_bpf.c
+++ b/net/ipv4/udp_bpf.c
@@ -22,10 +22,9 @@ static void udp_bpf_rebuild_protos(struct proto *prot, const struct proto *base)
prot->close = sock_map_close;
}
-static void udp_bpf_check_v6_needs_rebuild(struct sock *sk, struct proto *ops)
+static void udp_bpf_check_v6_needs_rebuild(struct proto *ops)
{
- if (sk->sk_family == AF_INET6 &&
- unlikely(ops != smp_load_acquire(&udpv6_prot_saved))) {
+ if (unlikely(ops != smp_load_acquire(&udpv6_prot_saved))) {
spin_lock_bh(&udpv6_prot_lock);
if (likely(ops != udpv6_prot_saved)) {
udp_bpf_rebuild_protos(&udp_bpf_prots[UDP_BPF_IPV6], ops);
@@ -46,8 +45,8 @@ struct proto *udp_bpf_get_proto(struct sock *sk, struct sk_psock *psock)
{
int family = sk->sk_family == AF_INET ? UDP_BPF_IPV4 : UDP_BPF_IPV6;
- if (!psock->sk_proto)
- udp_bpf_check_v6_needs_rebuild(sk, READ_ONCE(sk->sk_prot));
+ if (sk->sk_family == AF_INET6)
+ udp_bpf_check_v6_needs_rebuild(psock->sk_proto);
return &udp_bpf_prots[family];
}
diff --git a/net/ipv4/udp_tunnel_nic.c b/net/ipv4/udp_tunnel_nic.c
index 69962165c0e8..0d122edc368d 100644
--- a/net/ipv4/udp_tunnel_nic.c
+++ b/net/ipv4/udp_tunnel_nic.c
@@ -19,8 +19,9 @@ enum udp_tunnel_nic_table_entry_flags {
struct udp_tunnel_nic_table_entry {
__be16 port;
u8 type;
- u8 use_cnt;
u8 flags;
+ u16 use_cnt;
+#define UDP_TUNNEL_NIC_USE_CNT_MAX U16_MAX
u8 hw_priv;
};
@@ -370,6 +371,8 @@ udp_tunnel_nic_entry_adj(struct udp_tunnel_nic *utn,
bool dodgy = entry->flags & UDP_TUNNEL_NIC_ENTRY_OP_FAIL;
unsigned int from, to;
+ WARN_ON(entry->use_cnt + (u32)use_cnt_adj > U16_MAX);
+
/* If not going from used to unused or vice versa - all done.
* For dodgy entries make sure we try to sync again (queue the entry).
*/
@@ -675,6 +678,7 @@ static void
udp_tunnel_nic_replay(struct net_device *dev, struct udp_tunnel_nic *utn)
{
const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
+ struct udp_tunnel_nic_shared_node *node;
unsigned int i, j;
/* Freeze all the ports we are already tracking so that the replay
@@ -686,7 +690,12 @@ udp_tunnel_nic_replay(struct net_device *dev, struct udp_tunnel_nic *utn)
utn->missed = 0;
utn->need_replay = 0;
- udp_tunnel_get_rx_info(dev);
+ if (!info->shared) {
+ udp_tunnel_get_rx_info(dev);
+ } else {
+ list_for_each_entry(node, &info->shared->devices, list)
+ udp_tunnel_get_rx_info(node->dev);
+ }
for (i = 0; i < utn->n_tables; i++)
for (j = 0; j < info->tables[i].n_entries; j++)
@@ -742,20 +751,39 @@ err_free_utn:
return NULL;
}
+static void udp_tunnel_nic_free(struct udp_tunnel_nic *utn)
+{
+ unsigned int i;
+
+ for (i = 0; i < utn->n_tables; i++)
+ kfree(utn->entries[i]);
+ kfree(utn->entries);
+ kfree(utn);
+}
+
static int udp_tunnel_nic_register(struct net_device *dev)
{
const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
+ struct udp_tunnel_nic_shared_node *node = NULL;
struct udp_tunnel_nic *utn;
unsigned int n_tables, i;
BUILD_BUG_ON(sizeof(utn->missed) * BITS_PER_BYTE <
UDP_TUNNEL_NIC_MAX_TABLES);
+ /* Expect use count of at most 2 (IPv4, IPv6) per device */
+ BUILD_BUG_ON(UDP_TUNNEL_NIC_USE_CNT_MAX <
+ UDP_TUNNEL_NIC_MAX_SHARING_DEVICES * 2);
+ /* Check that the driver info is sane */
if (WARN_ON(!info->set_port != !info->unset_port) ||
WARN_ON(!info->set_port == !info->sync_table) ||
WARN_ON(!info->tables[0].n_entries))
return -EINVAL;
+ if (WARN_ON(info->shared &&
+ info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY))
+ return -EINVAL;
+
n_tables = 1;
for (i = 1; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) {
if (!info->tables[i].n_entries)
@@ -766,9 +794,33 @@ static int udp_tunnel_nic_register(struct net_device *dev)
return -EINVAL;
}
- utn = udp_tunnel_nic_alloc(info, n_tables);
- if (!utn)
- return -ENOMEM;
+ /* Create UDP tunnel state structures */
+ if (info->shared) {
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ node->dev = dev;
+ }
+
+ if (info->shared && info->shared->udp_tunnel_nic_info) {
+ utn = info->shared->udp_tunnel_nic_info;
+ } else {
+ utn = udp_tunnel_nic_alloc(info, n_tables);
+ if (!utn) {
+ kfree(node);
+ return -ENOMEM;
+ }
+ }
+
+ if (info->shared) {
+ if (!info->shared->udp_tunnel_nic_info) {
+ INIT_LIST_HEAD(&info->shared->devices);
+ info->shared->udp_tunnel_nic_info = utn;
+ }
+
+ list_add_tail(&node->list, &info->shared->devices);
+ }
utn->dev = dev;
dev_hold(dev);
@@ -783,7 +835,33 @@ static int udp_tunnel_nic_register(struct net_device *dev)
static void
udp_tunnel_nic_unregister(struct net_device *dev, struct udp_tunnel_nic *utn)
{
- unsigned int i;
+ const struct udp_tunnel_nic_info *info = dev->udp_tunnel_nic_info;
+
+ /* For a shared table remove this dev from the list of sharing devices
+ * and if there are other devices just detach.
+ */
+ if (info->shared) {
+ struct udp_tunnel_nic_shared_node *node, *first;
+
+ list_for_each_entry(node, &info->shared->devices, list)
+ if (node->dev == dev)
+ break;
+ if (node->dev != dev)
+ return;
+
+ list_del(&node->list);
+ kfree(node);
+
+ first = list_first_entry_or_null(&info->shared->devices,
+ typeof(*first), list);
+ if (first) {
+ udp_tunnel_drop_rx_info(dev);
+ utn->dev = first->dev;
+ goto release_dev;
+ }
+
+ info->shared->udp_tunnel_nic_info = NULL;
+ }
/* Flush before we check work, so we don't waste time adding entries
* from the work which we will boot immediately.
@@ -796,10 +874,8 @@ udp_tunnel_nic_unregister(struct net_device *dev, struct udp_tunnel_nic *utn)
if (utn->work_pending)
return;
- for (i = 0; i < utn->n_tables; i++)
- kfree(utn->entries[i]);
- kfree(utn->entries);
- kfree(utn);
+ udp_tunnel_nic_free(utn);
+release_dev:
dev->udp_tunnel_nic = NULL;
dev_put(dev);
}
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
index 9ebf3fe0d2b1..c70c192bc91b 100644
--- a/net/ipv6/addrconf_core.c
+++ b/net/ipv6/addrconf_core.c
@@ -191,6 +191,13 @@ static int eafnosupport_ip6_del_rt(struct net *net, struct fib6_info *rt,
return -EAFNOSUPPORT;
}
+static int eafnosupport_ipv6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
+ int (*output)(struct net *, struct sock *, struct sk_buff *))
+{
+ kfree_skb(skb);
+ return -EAFNOSUPPORT;
+}
+
const struct ipv6_stub *ipv6_stub __read_mostly = &(struct ipv6_stub) {
.ipv6_dst_lookup_flow = eafnosupport_ipv6_dst_lookup_flow,
.ipv6_route_input = eafnosupport_ipv6_route_input,
@@ -201,6 +208,7 @@ const struct ipv6_stub *ipv6_stub __read_mostly = &(struct ipv6_stub) {
.ip6_mtu_from_fib6 = eafnosupport_ip6_mtu_from_fib6,
.fib6_nh_init = eafnosupport_fib6_nh_init,
.ip6_del_rt = eafnosupport_ip6_del_rt,
+ .ipv6_fragment = eafnosupport_ipv6_fragment,
};
EXPORT_SYMBOL_GPL(ipv6_stub);
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 0306509ab063..e648fbebb167 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -661,6 +661,7 @@ int inet6_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
const struct proto_ops inet6_stream_ops = {
.family = PF_INET6,
+ .flags = PROTO_CMSG_DATA_ONLY,
.owner = THIS_MODULE,
.release = inet6_release,
.bind = inet6_bind,
@@ -1026,6 +1027,7 @@ static const struct ipv6_stub ipv6_stub_impl = {
.xfrm6_rcv_encap = xfrm6_rcv_encap,
#endif
.nd_tbl = &nd_tbl,
+ .ipv6_fragment = ip6_fragment,
};
static const struct ipv6_bpf_stub ipv6_bpf_stub_impl = {
diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c
index 8d3f66c310db..78f766019b7e 100644
--- a/net/ipv6/calipso.c
+++ b/net/ipv6/calipso.c
@@ -761,7 +761,7 @@ static int calipso_genopt(unsigned char *buf, u32 start, u32 buf_len,
calipso[1] = len - 2;
*(__be32 *)(calipso + 2) = htonl(doi_def->doi);
calipso[6] = (len - CALIPSO_HDR_LEN) / 4;
- calipso[7] = secattr->attr.mls.lvl,
+ calipso[7] = secattr->attr.mls.lvl;
crc = ~crc_ccitt(0xffff, calipso, len);
calipso[8] = crc & 0xff;
calipso[9] = (crc >> 8) & 0xff;
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 83b251151b5c..ec448b71bf9a 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -501,8 +501,11 @@ void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
if (__ipv6_addr_needs_scope_id(addr_type)) {
iif = icmp6_iif(skb);
} else {
- dst = skb_dst(skb);
- iif = l3mdev_master_ifindex(dst ? dst->dev : skb->dev);
+ /*
+ * The source device is used for looking up which routing table
+ * to use for sending an ICMP error.
+ */
+ iif = l3mdev_master_ifindex(skb->dev);
}
/*
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 2d3add9e6116..55c290d55605 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -94,7 +94,7 @@ EXPORT_SYMBOL(__inet6_lookup_established);
static inline int compute_score(struct sock *sk, struct net *net,
const unsigned short hnum,
const struct in6_addr *daddr,
- const int dif, const int sdif, bool exact_dif)
+ const int dif, const int sdif)
{
int score = -1;
@@ -138,15 +138,13 @@ static struct sock *inet6_lhash2_lookup(struct net *net,
const __be16 sport, const struct in6_addr *daddr,
const unsigned short hnum, const int dif, const int sdif)
{
- bool exact_dif = inet6_exact_dif_match(net, skb);
struct inet_connection_sock *icsk;
struct sock *sk, *result = NULL;
int score, hiscore = 0;
inet_lhash2_for_each_icsk_rcu(icsk, &ilb2->head) {
sk = (struct sock *)icsk;
- score = compute_score(sk, net, hnum, daddr, dif, sdif,
- exact_dif);
+ score = compute_score(sk, net, hnum, daddr, dif, sdif);
if (score > hiscore) {
result = lookup_reuseport(net, sk, skb, doff,
saddr, sport, daddr, hnum);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 4a664ad4f4d4..605cdd38a919 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -1812,10 +1812,14 @@ static struct fib6_node *fib6_repair_tree(struct net *net,
children = 0;
child = NULL;
- if (fn_r)
- child = fn_r, children |= 1;
- if (fn_l)
- child = fn_l, children |= 2;
+ if (fn_r) {
+ child = fn_r;
+ children |= 1;
+ }
+ if (fn_l) {
+ child = fn_l;
+ children |= 2;
+ }
if (children == 3 || FIB6_SUBTREE(fn)
#ifdef CONFIG_IPV6_SUBTREES
@@ -2618,8 +2622,10 @@ static void *ipv6_route_seq_start(struct seq_file *seq, loff_t *pos)
iter->skip = *pos;
if (iter->tbl) {
+ loff_t p = 0;
+
ipv6_route_seq_setup_walk(iter, net);
- return ipv6_route_seq_next(seq, NULL, pos);
+ return ipv6_route_seq_next(seq, NULL, &p);
} else {
return NULL;
}
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 3a57fb9ce049..931b186d2e48 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -707,6 +707,17 @@ static int prepare_ip6gre_xmit_ipv6(struct sk_buff *skb,
return 0;
}
+static struct ip_tunnel_info *skb_tunnel_info_txcheck(struct sk_buff *skb)
+{
+ struct ip_tunnel_info *tun_info;
+
+ tun_info = skb_tunnel_info(skb);
+ if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX)))
+ return ERR_PTR(-EINVAL);
+
+ return tun_info;
+}
+
static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
struct net_device *dev, __u8 dsfield,
struct flowi6 *fl6, int encap_limit,
@@ -734,10 +745,9 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb,
const struct ip_tunnel_key *key;
__be16 flags;
- tun_info = skb_tunnel_info(skb);
- if (unlikely(!tun_info ||
- !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
- ip_tunnel_info_af(tun_info) != AF_INET6))
+ tun_info = skb_tunnel_info_txcheck(skb);
+ if (IS_ERR(tun_info) ||
+ unlikely(ip_tunnel_info_af(tun_info) != AF_INET6))
return -EINVAL;
key = &tun_info->key;
@@ -908,7 +918,8 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
tx_err:
- stats->tx_errors++;
+ if (!t->parms.collect_md || !IS_ERR(skb_tunnel_info_txcheck(skb)))
+ stats->tx_errors++;
stats->tx_dropped++;
kfree_skb(skb);
return NETDEV_TX_OK;
@@ -917,6 +928,7 @@ tx_err:
static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
struct net_device *dev)
{
+ struct ip_tunnel_info *tun_info = NULL;
struct ip6_tnl *t = netdev_priv(dev);
struct dst_entry *dst = skb_dst(skb);
struct net_device_stats *stats;
@@ -964,15 +976,13 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
* for native mode, call prepare_ip6gre_xmit_{ipv4,ipv6}.
*/
if (t->parms.collect_md) {
- struct ip_tunnel_info *tun_info;
const struct ip_tunnel_key *key;
struct erspan_metadata *md;
__be32 tun_id;
- tun_info = skb_tunnel_info(skb);
- if (unlikely(!tun_info ||
- !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
- ip_tunnel_info_af(tun_info) != AF_INET6))
+ tun_info = skb_tunnel_info_txcheck(skb);
+ if (IS_ERR(tun_info) ||
+ unlikely(ip_tunnel_info_af(tun_info) != AF_INET6))
goto tx_err;
key = &tun_info->key;
@@ -1065,7 +1075,8 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
tx_err:
stats = &t->dev->stats;
- stats->tx_errors++;
+ if (!IS_ERR(tun_info))
+ stats->tx_errors++;
stats->tx_dropped++;
kfree_skb(skb);
return NETDEV_TX_OK;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 2689498157d1..749ad72386b2 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -468,8 +468,6 @@ int ip6_forward(struct sk_buff *skb)
* check and decrement ttl
*/
if (hdr->hop_limit <= 1) {
- /* Force OUTPUT device used as source address */
- skb->dev = dst->dev;
icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
@@ -1492,7 +1490,7 @@ emsgsize:
* Otherwise, we need to reserve fragment header and
* fragment alignment (= 8-15 octects, in total).
*
- * Note that we may need to "move" the data from the tail of
+ * Note that we may need to "move" the data from the tail
* of the buffer to the new fragment when we split
* the message.
*
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index fac01b80a104..5f9c4fdc120d 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -347,7 +347,6 @@ static int vti6_rcv_cb(struct sk_buff *skb, int err)
{
unsigned short family;
struct net_device *dev;
- struct pcpu_sw_netstats *tstats;
struct xfrm_state *x;
const struct xfrm_mode *inner_mode;
struct ip6_tnl *t = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6;
@@ -390,12 +389,7 @@ static int vti6_rcv_cb(struct sk_buff *skb, int err)
skb_scrub_packet(skb, !net_eq(t->net, dev_net(skb->dev)));
skb->dev = dev;
-
- tstats = this_cpu_ptr(dev->tstats);
- u64_stats_update_begin(&tstats->syncp);
- tstats->rx_packets++;
- tstats->rx_bytes += skb->len;
- u64_stats_update_end(&tstats->syncp);
+ dev_sw_netstats_rx_add(dev, skb->len);
return 0;
}
diff --git a/net/ipv6/netfilter/ip6t_NPT.c b/net/ipv6/netfilter/ip6t_NPT.c
index 9ee077bf4f49..787c74aa85e3 100644
--- a/net/ipv6/netfilter/ip6t_NPT.c
+++ b/net/ipv6/netfilter/ip6t_NPT.c
@@ -77,16 +77,43 @@ static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt,
return true;
}
+static struct ipv6hdr *icmpv6_bounced_ipv6hdr(struct sk_buff *skb,
+ struct ipv6hdr *_bounced_hdr)
+{
+ if (ipv6_hdr(skb)->nexthdr != IPPROTO_ICMPV6)
+ return NULL;
+
+ if (!icmpv6_is_err(icmp6_hdr(skb)->icmp6_type))
+ return NULL;
+
+ return skb_header_pointer(skb,
+ skb_transport_offset(skb) + sizeof(struct icmp6hdr),
+ sizeof(struct ipv6hdr),
+ _bounced_hdr);
+}
+
static unsigned int
ip6t_snpt_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ip6t_npt_tginfo *npt = par->targinfo;
+ struct ipv6hdr _bounced_hdr;
+ struct ipv6hdr *bounced_hdr;
+ struct in6_addr bounced_pfx;
if (!ip6t_npt_map_pfx(npt, &ipv6_hdr(skb)->saddr)) {
icmpv6_send(skb, ICMPV6_PARAMPROB, ICMPV6_HDR_FIELD,
offsetof(struct ipv6hdr, saddr));
return NF_DROP;
}
+
+ /* rewrite dst addr of bounced packet which was sent to dst range */
+ bounced_hdr = icmpv6_bounced_ipv6hdr(skb, &_bounced_hdr);
+ if (bounced_hdr) {
+ ipv6_addr_prefix(&bounced_pfx, &bounced_hdr->daddr, npt->src_pfx_len);
+ if (ipv6_addr_cmp(&bounced_pfx, &npt->src_pfx.in6) == 0)
+ ip6t_npt_map_pfx(npt, &bounced_hdr->daddr);
+ }
+
return XT_CONTINUE;
}
@@ -94,12 +121,24 @@ static unsigned int
ip6t_dnpt_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
const struct ip6t_npt_tginfo *npt = par->targinfo;
+ struct ipv6hdr _bounced_hdr;
+ struct ipv6hdr *bounced_hdr;
+ struct in6_addr bounced_pfx;
if (!ip6t_npt_map_pfx(npt, &ipv6_hdr(skb)->daddr)) {
icmpv6_send(skb, ICMPV6_PARAMPROB, ICMPV6_HDR_FIELD,
offsetof(struct ipv6hdr, daddr));
return NF_DROP;
}
+
+ /* rewrite src addr of bounced packet which was sent from dst range */
+ bounced_hdr = icmpv6_bounced_ipv6hdr(skb, &_bounced_hdr);
+ if (bounced_hdr) {
+ ipv6_addr_prefix(&bounced_pfx, &bounced_hdr->saddr, npt->src_pfx_len);
+ if (ipv6_addr_cmp(&bounced_pfx, &npt->src_pfx.in6) == 0)
+ ip6t_npt_map_pfx(npt, &bounced_hdr->saddr);
+ }
+
return XT_CONTINUE;
}
diff --git a/net/ipv6/netfilter/nf_log_ipv6.c b/net/ipv6/netfilter/nf_log_ipv6.c
index da64550a5707..8210ff34ed9b 100644
--- a/net/ipv6/netfilter/nf_log_ipv6.c
+++ b/net/ipv6/netfilter/nf_log_ipv6.c
@@ -297,9 +297,11 @@ static void dump_ipv6_mac_header(struct nf_log_buf *m,
switch (dev->type) {
case ARPHRD_ETHER:
- nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
- eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
- ntohs(eth_hdr(skb)->h_proto));
+ nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM ",
+ eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest);
+ nf_log_dump_vlan(m, skb);
+ nf_log_buf_add(m, "MACPROTO=%04x ",
+ ntohs(eth_hdr(skb)->h_proto));
return;
default:
break;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index fb075d9545b9..7e0ce7af8234 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2745,7 +2745,8 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
if (confirm_neigh)
dst_confirm_neigh(dst, daddr);
- mtu = max_t(u32, mtu, IPV6_MIN_MTU);
+ if (mtu < IPV6_MIN_MTU)
+ return;
if (mtu >= dst_mtu(dst))
return;
@@ -5284,9 +5285,10 @@ static int ip6_route_multipath_del(struct fib6_config *cfg,
{
struct fib6_config r_cfg;
struct rtnexthop *rtnh;
+ int last_err = 0;
int remaining;
int attrlen;
- int err = 1, last_err = 0;
+ int err;
remaining = cfg->fc_mp_len;
rtnh = (struct rtnexthop *)cfg->fc_mp;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 305870a72352..8db59f4e5f13 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -458,7 +458,7 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
case TCP_SYN_SENT:
case TCP_SYN_RECV:
/* Only in fast or simultaneous open. If a fast open socket is
- * is already accepted it is treated as a connected one below.
+ * already accepted it is treated as a connected one below.
*/
if (fastopen && !fastopen->sk)
break;
@@ -501,7 +501,8 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
struct flowi *fl,
struct request_sock *req,
struct tcp_fastopen_cookie *foc,
- enum tcp_synack_type synack_type)
+ enum tcp_synack_type synack_type,
+ struct sk_buff *syn_skb)
{
struct inet_request_sock *ireq = inet_rsk(req);
struct ipv6_pinfo *np = tcp_inet6_sk(sk);
@@ -509,13 +510,14 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
struct flowi6 *fl6 = &fl->u.ip6;
struct sk_buff *skb;
int err = -ENOMEM;
+ u8 tclass;
/* First, grab a route. */
if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
IPPROTO_TCP)) == NULL)
goto done;
- skb = tcp_make_synack(sk, dst, req, foc, synack_type);
+ skb = tcp_make_synack(sk, dst, req, foc, synack_type, syn_skb);
if (skb) {
__tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
@@ -527,9 +529,12 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
rcu_read_lock();
opt = ireq->ipv6_opt;
+ tclass = sock_net(sk)->ipv4.sysctl_tcp_reflect_tos ?
+ tcp_rsk(req)->syn_tos : np->tclass;
if (!opt)
opt = rcu_dereference(np->opt);
- err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass,
+ err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt,
+ tclass & ~INET_ECN_MASK,
sk->sk_priority);
rcu_read_unlock();
err = net_xmit_eval(err);
@@ -958,8 +963,8 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL);
if (!IS_ERR(dst)) {
skb_dst_set(buff, dst);
- ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass,
- priority);
+ ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL,
+ tclass & ~INET_ECN_MASK, priority);
TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
if (rst)
TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
@@ -1067,8 +1072,8 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
label = ip6_flowlabel(ipv6h);
}
- tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0,
- label, priority);
+ tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1,
+ ipv6_get_dsfield(ipv6h), label, priority);
#ifdef CONFIG_TCP_MD5SIG
out:
@@ -1121,7 +1126,7 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
req->ts_recent, sk->sk_bound_dev_if,
tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr, l3index),
- 0, 0, sk->sk_priority);
+ ipv6_get_dsfield(ipv6_hdr(skb)), 0, sk->sk_priority);
}
@@ -1309,6 +1314,10 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
if (np->repflow)
newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
+ /* Set ToS of the new socket based upon the value of incoming SYN. */
+ if (sock_net(sk)->ipv4.sysctl_tcp_reflect_tos)
+ newnp->tclass = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
+
/* Clone native IPv6 options from listening socket (if any)
Yes, keeping reference count would be much more clever,
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index a95af62acb52..d80572074667 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -588,11 +588,11 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
int addr_len)
{
struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr;
+ char uid[sizeof(sa->siucv_user_id)];
struct sock *sk = sock->sk;
struct iucv_sock *iucv;
int err = 0;
struct net_device *dev;
- char uid[9];
/* Verify the input sockaddr */
if (addr_len < sizeof(struct sockaddr_iucv) ||
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
index cd2e468852e7..349c6ac3313f 100644
--- a/net/iucv/iucv.c
+++ b/net/iucv/iucv.c
@@ -1116,10 +1116,9 @@ int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
if (msg->flags & IUCV_IPRMDATA)
return iucv_message_receive_iprmdata(path, msg, flags,
buffer, size, residual);
- if (cpumask_empty(&iucv_buffer_cpumask)) {
- rc = -EIO;
- goto out;
- }
+ if (cpumask_empty(&iucv_buffer_cpumask))
+ return -EIO;
+
parm = iucv_param[smp_processor_id()];
memset(parm, 0, sizeof(union iucv_param));
parm->db.ipbfadr1 = (u32)(addr_t) buffer;
@@ -1135,7 +1134,6 @@ int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg,
if (residual)
*residual = parm->db.ipbfln1f;
}
-out:
return rc;
}
EXPORT_SYMBOL(__iucv_message_receive);
diff --git a/net/l2tp/Makefile b/net/l2tp/Makefile
index 399a7e5db2f4..cf8f27071d3f 100644
--- a/net/l2tp/Makefile
+++ b/net/l2tp/Makefile
@@ -5,6 +5,8 @@
obj-$(CONFIG_L2TP) += l2tp_core.o
+CFLAGS_l2tp_core.o += -I$(src)
+
# Build l2tp as modules if L2TP is M
obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_PPPOL2TP)) += l2tp_ppp.o
obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_IP)) += l2tp_ip.o
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 701fc72ad9f4..7be5103ff2a8 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -61,6 +61,10 @@
#include <linux/atomic.h>
#include "l2tp_core.h"
+#include "trace.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
#define L2TP_DRV_VERSION "V2.0"
@@ -116,11 +120,6 @@ static bool l2tp_sk_is_v6(struct sock *sk)
}
#endif
-static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk)
-{
- return sk->sk_user_data;
-}
-
static inline struct l2tp_net *l2tp_pernet(const struct net *net)
{
return net_generic(net, l2tp_net_id);
@@ -151,23 +150,30 @@ l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
{
+ trace_free_tunnel(tunnel);
sock_put(tunnel->sock);
/* the tunnel is freed in the socket destructor */
}
static void l2tp_session_free(struct l2tp_session *session)
{
- struct l2tp_tunnel *tunnel = session->tunnel;
+ trace_free_session(session);
+ if (session->tunnel)
+ l2tp_tunnel_dec_refcount(session->tunnel);
+ kfree(session);
+}
- if (tunnel) {
+struct l2tp_tunnel *l2tp_sk_to_tunnel(struct sock *sk)
+{
+ struct l2tp_tunnel *tunnel = sk->sk_user_data;
+
+ if (tunnel)
if (WARN_ON(tunnel->magic != L2TP_TUNNEL_MAGIC))
- goto out;
- l2tp_tunnel_dec_refcount(tunnel);
- }
+ return NULL;
-out:
- kfree(session);
+ return tunnel;
}
+EXPORT_SYMBOL_GPL(l2tp_sk_to_tunnel);
void l2tp_tunnel_inc_refcount(struct l2tp_tunnel *tunnel)
{
@@ -381,6 +387,8 @@ int l2tp_session_register(struct l2tp_session *session,
hlist_add_head(&session->hlist, head);
write_unlock_bh(&tunnel->hlist_lock);
+ trace_register_session(session);
+
return 0;
err_tlock_pnlock:
@@ -409,10 +417,6 @@ static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *sk
skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
if (L2TP_SKB_CB(skbp)->ns > ns) {
__skb_queue_before(&session->reorder_q, skbp, skb);
- l2tp_dbg(session, L2TP_MSG_SEQ,
- "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
- session->name, ns, L2TP_SKB_CB(skbp)->ns,
- skb_queue_len(&session->reorder_q));
atomic_long_inc(&session->stats.rx_oos_packets);
goto out;
}
@@ -445,9 +449,7 @@ static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *
/* Bump our Nr */
session->nr++;
session->nr &= session->nr_max;
-
- l2tp_dbg(session, L2TP_MSG_SEQ, "%s: updated nr to %hu\n",
- session->name, session->nr);
+ trace_session_seqnum_update(session);
}
/* call private receive handler */
@@ -472,37 +474,27 @@ static void l2tp_recv_dequeue(struct l2tp_session *session)
start:
spin_lock_bh(&session->reorder_q.lock);
skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
- if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) {
+ struct l2tp_skb_cb *cb = L2TP_SKB_CB(skb);
+
+ /* If the packet has been pending on the queue for too long, discard it */
+ if (time_after(jiffies, cb->expires)) {
atomic_long_inc(&session->stats.rx_seq_discards);
atomic_long_inc(&session->stats.rx_errors);
- l2tp_dbg(session, L2TP_MSG_SEQ,
- "%s: oos pkt %u len %d discarded (too old), waiting for %u, reorder_q_len=%d\n",
- session->name, L2TP_SKB_CB(skb)->ns,
- L2TP_SKB_CB(skb)->length, session->nr,
- skb_queue_len(&session->reorder_q));
+ trace_session_pkt_expired(session, cb->ns);
session->reorder_skip = 1;
__skb_unlink(skb, &session->reorder_q);
kfree_skb(skb);
continue;
}
- if (L2TP_SKB_CB(skb)->has_seq) {
+ if (cb->has_seq) {
if (session->reorder_skip) {
- l2tp_dbg(session, L2TP_MSG_SEQ,
- "%s: advancing nr to next pkt: %u -> %u",
- session->name, session->nr,
- L2TP_SKB_CB(skb)->ns);
session->reorder_skip = 0;
- session->nr = L2TP_SKB_CB(skb)->ns;
+ session->nr = cb->ns;
+ trace_session_seqnum_reset(session);
}
- if (L2TP_SKB_CB(skb)->ns != session->nr) {
- l2tp_dbg(session, L2TP_MSG_SEQ,
- "%s: holding oos pkt %u len %d, waiting for %u, reorder_q_len=%d\n",
- session->name, L2TP_SKB_CB(skb)->ns,
- L2TP_SKB_CB(skb)->length, session->nr,
- skb_queue_len(&session->reorder_q));
+ if (cb->ns != session->nr)
goto out;
- }
}
__skb_unlink(skb, &session->reorder_q);
@@ -535,14 +527,13 @@ static int l2tp_seq_check_rx_window(struct l2tp_session *session, u32 nr)
*/
static int l2tp_recv_data_seq(struct l2tp_session *session, struct sk_buff *skb)
{
- if (!l2tp_seq_check_rx_window(session, L2TP_SKB_CB(skb)->ns)) {
+ struct l2tp_skb_cb *cb = L2TP_SKB_CB(skb);
+
+ if (!l2tp_seq_check_rx_window(session, cb->ns)) {
/* Packet sequence number is outside allowed window.
* Discard it.
*/
- l2tp_dbg(session, L2TP_MSG_SEQ,
- "%s: pkt %u len %d discarded, outside window, nr=%u\n",
- session->name, L2TP_SKB_CB(skb)->ns,
- L2TP_SKB_CB(skb)->length, session->nr);
+ trace_session_pkt_outside_rx_window(session, cb->ns);
goto discard;
}
@@ -559,10 +550,10 @@ static int l2tp_recv_data_seq(struct l2tp_session *session, struct sk_buff *skb)
* is seen. After nr_oos_count_max in-sequence packets, reset the
* sequence number to re-enable packet reception.
*/
- if (L2TP_SKB_CB(skb)->ns == session->nr) {
+ if (cb->ns == session->nr) {
skb_queue_tail(&session->reorder_q, skb);
} else {
- u32 nr_oos = L2TP_SKB_CB(skb)->ns;
+ u32 nr_oos = cb->ns;
u32 nr_next = (session->nr_oos + 1) & session->nr_max;
if (nr_oos == nr_next)
@@ -573,17 +564,10 @@ static int l2tp_recv_data_seq(struct l2tp_session *session, struct sk_buff *skb)
session->nr_oos = nr_oos;
if (session->nr_oos_count > session->nr_oos_count_max) {
session->reorder_skip = 1;
- l2tp_dbg(session, L2TP_MSG_SEQ,
- "%s: %d oos packets received. Resetting sequence numbers\n",
- session->name, session->nr_oos_count);
}
if (!session->reorder_skip) {
atomic_long_inc(&session->stats.rx_seq_discards);
- l2tp_dbg(session, L2TP_MSG_SEQ,
- "%s: oos pkt %u len %d discarded, waiting for %u, reorder_q_len=%d\n",
- session->name, L2TP_SKB_CB(skb)->ns,
- L2TP_SKB_CB(skb)->length, session->nr,
- skb_queue_len(&session->reorder_q));
+ trace_session_pkt_oos(session, cb->ns);
goto discard;
}
skb_queue_tail(&session->reorder_q, skb);
@@ -660,16 +644,14 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
int length)
{
struct l2tp_tunnel *tunnel = session->tunnel;
- u32 ns = 0, nr = 0;
int offset;
/* Parse and check optional cookie */
if (session->peer_cookie_len > 0) {
if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
- l2tp_info(tunnel, L2TP_MSG_DATA,
- "%s: cookie mismatch (%u/%u). Discarding.\n",
- tunnel->name, tunnel->tunnel_id,
- session->session_id);
+ pr_warn_ratelimited("%s: cookie mismatch (%u/%u). Discarding.\n",
+ tunnel->name, tunnel->tunnel_id,
+ session->session_id);
atomic_long_inc(&session->stats.rx_cookie_discards);
goto discard;
}
@@ -686,32 +668,21 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
L2TP_SKB_CB(skb)->has_seq = 0;
if (tunnel->version == L2TP_HDR_VER_2) {
if (hdrflags & L2TP_HDRFLAG_S) {
- ns = ntohs(*(__be16 *)ptr);
- ptr += 2;
- nr = ntohs(*(__be16 *)ptr);
- ptr += 2;
-
/* Store L2TP info in the skb */
- L2TP_SKB_CB(skb)->ns = ns;
+ L2TP_SKB_CB(skb)->ns = ntohs(*(__be16 *)ptr);
L2TP_SKB_CB(skb)->has_seq = 1;
+ ptr += 2;
+ /* Skip past nr in the header */
+ ptr += 2;
- l2tp_dbg(session, L2TP_MSG_SEQ,
- "%s: recv data ns=%u, nr=%u, session nr=%u\n",
- session->name, ns, nr, session->nr);
}
} else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
u32 l2h = ntohl(*(__be32 *)ptr);
if (l2h & 0x40000000) {
- ns = l2h & 0x00ffffff;
-
/* Store L2TP info in the skb */
- L2TP_SKB_CB(skb)->ns = ns;
+ L2TP_SKB_CB(skb)->ns = l2h & 0x00ffffff;
L2TP_SKB_CB(skb)->has_seq = 1;
-
- l2tp_dbg(session, L2TP_MSG_SEQ,
- "%s: recv data ns=%u, session nr=%u\n",
- session->name, ns, session->nr);
}
ptr += 4;
}
@@ -722,9 +693,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
* configure it so.
*/
if (!session->lns_mode && !session->send_seq) {
- l2tp_info(session, L2TP_MSG_SEQ,
- "%s: requested to enable seq numbers by LNS\n",
- session->name);
+ trace_session_seqnum_lns_enable(session);
session->send_seq = 1;
l2tp_session_set_header_len(session, tunnel->version);
}
@@ -733,9 +702,8 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
* If user has configured mandatory sequence numbers, discard.
*/
if (session->recv_seq) {
- l2tp_warn(session, L2TP_MSG_SEQ,
- "%s: recv data has no seq numbers when required. Discarding.\n",
- session->name);
+ pr_warn_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
+ session->name);
atomic_long_inc(&session->stats.rx_seq_discards);
goto discard;
}
@@ -746,15 +714,12 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
* LAC is broken. Discard the frame.
*/
if (!session->lns_mode && session->send_seq) {
- l2tp_info(session, L2TP_MSG_SEQ,
- "%s: requested to disable seq numbers by LNS\n",
- session->name);
+ trace_session_seqnum_lns_disable(session);
session->send_seq = 0;
l2tp_session_set_header_len(session, tunnel->version);
} else if (session->send_seq) {
- l2tp_warn(session, L2TP_MSG_SEQ,
- "%s: recv data has no seq numbers when required. Discarding.\n",
- session->name);
+ pr_warn_ratelimited("%s: recv data has no seq numbers when required. Discarding.\n",
+ session->name);
atomic_long_inc(&session->stats.rx_seq_discards);
goto discard;
}
@@ -816,9 +781,6 @@ static void l2tp_session_queue_purge(struct l2tp_session *session)
{
struct sk_buff *skb = NULL;
- if (WARN_ON(session->magic != L2TP_SESSION_MAGIC))
- return;
-
while ((skb = skb_dequeue(&session->reorder_q))) {
atomic_long_inc(&session->stats.rx_errors);
kfree_skb(skb);
@@ -847,22 +809,11 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
/* Short packet? */
if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) {
- l2tp_info(tunnel, L2TP_MSG_DATA,
- "%s: recv short packet (len=%d)\n",
- tunnel->name, skb->len);
+ pr_warn_ratelimited("%s: recv short packet (len=%d)\n",
+ tunnel->name, skb->len);
goto error;
}
- /* Trace packet contents, if enabled */
- if (tunnel->debug & L2TP_MSG_DATA) {
- length = min(32u, skb->len);
- if (!pskb_may_pull(skb, length))
- goto error;
-
- pr_debug("%s: recv\n", tunnel->name);
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length);
- }
-
/* Point to L2TP header */
optr = skb->data;
ptr = skb->data;
@@ -873,9 +824,8 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
/* Check protocol version */
version = hdrflags & L2TP_HDR_VER_MASK;
if (version != tunnel->version) {
- l2tp_info(tunnel, L2TP_MSG_DATA,
- "%s: recv protocol version mismatch: got %d expected %d\n",
- tunnel->name, version, tunnel->version);
+ pr_warn_ratelimited("%s: recv protocol version mismatch: got %d expected %d\n",
+ tunnel->name, version, tunnel->version);
goto error;
}
@@ -883,12 +833,8 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
length = skb->len;
/* If type is control packet, it is handled by userspace. */
- if (hdrflags & L2TP_HDRFLAG_T) {
- l2tp_dbg(tunnel, L2TP_MSG_DATA,
- "%s: recv control packet, len=%d\n",
- tunnel->name, length);
+ if (hdrflags & L2TP_HDRFLAG_T)
goto error;
- }
/* Skip flags */
ptr += 2;
@@ -917,9 +863,8 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
l2tp_session_dec_refcount(session);
/* Not found? Pass to userspace to deal with */
- l2tp_info(tunnel, L2TP_MSG_DATA,
- "%s: no session found (%u/%u). Passing up.\n",
- tunnel->name, tunnel_id, session_id);
+ pr_warn_ratelimited("%s: no session found (%u/%u). Passing up.\n",
+ tunnel->name, tunnel_id, session_id);
goto error;
}
@@ -949,12 +894,17 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
struct l2tp_tunnel *tunnel;
+ /* Note that this is called from the encap_rcv hook inside an
+ * RCU-protected region, but without the socket being locked.
+ * Hence we use rcu_dereference_sk_user_data to access the
+ * tunnel data structure rather the usual l2tp_sk_to_tunnel
+ * accessor function.
+ */
tunnel = rcu_dereference_sk_user_data(sk);
if (!tunnel)
goto pass_up;
-
- l2tp_dbg(tunnel, L2TP_MSG_DATA, "%s: received %d bytes\n",
- tunnel->name, skb->len);
+ if (WARN_ON(tunnel->magic != L2TP_TUNNEL_MAGIC))
+ goto pass_up;
if (l2tp_udp_recv_core(tunnel, skb))
goto pass_up;
@@ -993,8 +943,7 @@ static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf)
*bufp++ = 0;
session->ns++;
session->ns &= 0xffff;
- l2tp_dbg(session, L2TP_MSG_SEQ, "%s: updated ns to %u\n",
- session->name, session->ns);
+ trace_session_seqnum_update(session);
}
return bufp - optr;
@@ -1030,9 +979,7 @@ static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
l2h = 0x40000000 | session->ns;
session->ns++;
session->ns &= 0xffffff;
- l2tp_dbg(session, L2TP_MSG_SEQ,
- "%s: updated ns to %u\n",
- session->name, session->ns);
+ trace_session_seqnum_update(session);
}
*((__be32 *)bufp) = htonl(l2h);
@@ -1042,74 +989,39 @@ static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
return bufp - optr;
}
-static void l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
- struct flowi *fl, size_t data_len)
+/* Queue the packet to IP for output: tunnel socket lock must be held */
+static int l2tp_xmit_queue(struct l2tp_tunnel *tunnel, struct sk_buff *skb, struct flowi *fl)
{
- struct l2tp_tunnel *tunnel = session->tunnel;
- unsigned int len = skb->len;
- int error;
-
- /* Debug */
- if (session->send_seq)
- l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %zd bytes, ns=%u\n",
- session->name, data_len, session->ns - 1);
- else
- l2tp_dbg(session, L2TP_MSG_DATA, "%s: send %zd bytes\n",
- session->name, data_len);
-
- if (session->debug & L2TP_MSG_DATA) {
- int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
- unsigned char *datap = skb->data + uhlen;
-
- pr_debug("%s: xmit\n", session->name);
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
- datap, min_t(size_t, 32, len - uhlen));
- }
+ int err;
- /* Queue the packet to IP for output */
skb->ignore_df = 1;
skb_dst_drop(skb);
#if IS_ENABLED(CONFIG_IPV6)
if (l2tp_sk_is_v6(tunnel->sock))
- error = inet6_csk_xmit(tunnel->sock, skb, NULL);
+ err = inet6_csk_xmit(tunnel->sock, skb, NULL);
else
#endif
- error = ip_queue_xmit(tunnel->sock, skb, fl);
+ err = ip_queue_xmit(tunnel->sock, skb, fl);
- /* Update stats */
- if (error >= 0) {
- atomic_long_inc(&tunnel->stats.tx_packets);
- atomic_long_add(len, &tunnel->stats.tx_bytes);
- atomic_long_inc(&session->stats.tx_packets);
- atomic_long_add(len, &session->stats.tx_bytes);
- } else {
- atomic_long_inc(&tunnel->stats.tx_errors);
- atomic_long_inc(&session->stats.tx_errors);
- }
+ return err >= 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
}
-/* If caller requires the skb to have a ppp header, the header must be
- * inserted in the skb data before calling this function.
- */
-int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len)
+static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, unsigned int *len)
{
- int data_len = skb->len;
struct l2tp_tunnel *tunnel = session->tunnel;
+ unsigned int data_len = skb->len;
struct sock *sk = tunnel->sock;
- struct flowi *fl;
- struct udphdr *uh;
- struct inet_sock *inet;
- int headroom;
- int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
- int udp_len;
+ int headroom, uhlen, udp_len;
int ret = NET_XMIT_SUCCESS;
+ struct inet_sock *inet;
+ struct udphdr *uh;
/* Check that there's enough headroom in the skb to insert IP,
* UDP and L2TP headers. If not enough, expand it to
* make room. Adjust truesize.
*/
- headroom = NET_SKB_PAD + sizeof(struct iphdr) +
- uhlen + hdr_len;
+ uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(*uh) : 0;
+ headroom = NET_SKB_PAD + sizeof(struct iphdr) + uhlen + session->hdr_len;
if (skb_cow_head(skb, headroom)) {
kfree_skb(skb);
return NET_XMIT_DROP;
@@ -1117,14 +1029,13 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
/* Setup L2TP header */
if (tunnel->version == L2TP_HDR_VER_2)
- l2tp_build_l2tpv2_header(session, __skb_push(skb, hdr_len));
+ l2tp_build_l2tpv2_header(session, __skb_push(skb, session->hdr_len));
else
- l2tp_build_l2tpv3_header(session, __skb_push(skb, hdr_len));
+ l2tp_build_l2tpv3_header(session, __skb_push(skb, session->hdr_len));
/* Reset skb netfilter state */
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
- IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
- IPSKB_REROUTED);
+ IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED);
nf_reset_ct(skb);
bh_lock_sock(sk);
@@ -1143,8 +1054,12 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
goto out_unlock;
}
+ /* Report transmitted length before we add encap header, which keeps
+ * statistics consistent for both UDP and IP encap tx/rx paths.
+ */
+ *len = skb->len;
+
inet = inet_sk(sk);
- fl = &inet->cork.fl;
switch (tunnel->encap) {
case L2TP_ENCAPTYPE_UDP:
/* Setup UDP header */
@@ -1153,7 +1068,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
uh = udp_hdr(skb);
uh->source = inet->inet_sport;
uh->dest = inet->inet_dport;
- udp_len = uhlen + hdr_len + data_len;
+ udp_len = uhlen + session->hdr_len + data_len;
uh->len = htons(udp_len);
/* Calculate UDP checksum if configured to do so */
@@ -1172,12 +1087,34 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
break;
}
- l2tp_xmit_core(session, skb, fl, data_len);
+ ret = l2tp_xmit_queue(tunnel, skb, &inet->cork.fl);
+
out_unlock:
bh_unlock_sock(sk);
return ret;
}
+
+/* If caller requires the skb to have a ppp header, the header must be
+ * inserted in the skb data before calling this function.
+ */
+int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb)
+{
+ unsigned int len = 0;
+ int ret;
+
+ ret = l2tp_xmit_core(session, skb, &len);
+ if (ret == NET_XMIT_SUCCESS) {
+ atomic_long_inc(&session->tunnel->stats.tx_packets);
+ atomic_long_add(len, &session->tunnel->stats.tx_bytes);
+ atomic_long_inc(&session->stats.tx_packets);
+ atomic_long_add(len, &session->stats.tx_bytes);
+ } else {
+ atomic_long_inc(&session->tunnel->stats.tx_errors);
+ atomic_long_inc(&session->stats.tx_errors);
+ }
+ return ret;
+}
EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
/*****************************************************************************
@@ -1190,13 +1127,11 @@ EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
*/
static void l2tp_tunnel_destruct(struct sock *sk)
{
- struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
+ struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
if (!tunnel)
goto end;
- l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing...\n", tunnel->name);
-
/* Disable udp encapsulation */
switch (tunnel->encap) {
case L2TP_ENCAPTYPE_UDP:
@@ -1255,34 +1190,16 @@ static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
struct hlist_node *tmp;
struct l2tp_session *session;
- l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: closing all sessions...\n",
- tunnel->name);
-
write_lock_bh(&tunnel->hlist_lock);
tunnel->acpt_newsess = false;
for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
again:
hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
session = hlist_entry(walk, struct l2tp_session, hlist);
-
- l2tp_info(session, L2TP_MSG_CONTROL,
- "%s: closing session\n", session->name);
-
hlist_del_init(&session->hlist);
- if (test_and_set_bit(0, &session->dead))
- goto again;
-
write_unlock_bh(&tunnel->hlist_lock);
-
- l2tp_session_unhash(session);
- l2tp_session_queue_purge(session);
-
- if (session->session_close)
- (*session->session_close)(session);
-
- l2tp_session_dec_refcount(session);
-
+ l2tp_session_delete(session);
write_lock_bh(&tunnel->hlist_lock);
/* Now restart from the beginning of this hash
@@ -1299,7 +1216,7 @@ again:
/* Tunnel socket destroy hook for UDP encapsulation */
static void l2tp_udp_encap_destroy(struct sock *sk)
{
- struct l2tp_tunnel *tunnel = l2tp_tunnel(sk);
+ struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
if (tunnel)
l2tp_tunnel_delete(tunnel);
@@ -1464,7 +1381,7 @@ out:
static struct lock_class_key l2tp_socket_class;
-int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id,
+int l2tp_tunnel_create(int fd, int version, u32 tunnel_id, u32 peer_tunnel_id,
struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
{
struct l2tp_tunnel *tunnel = NULL;
@@ -1483,16 +1400,12 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
tunnel->version = version;
tunnel->tunnel_id = tunnel_id;
tunnel->peer_tunnel_id = peer_tunnel_id;
- tunnel->debug = L2TP_DEFAULT_DEBUG_FLAGS;
tunnel->magic = L2TP_TUNNEL_MAGIC;
sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
rwlock_init(&tunnel->hlist_lock);
tunnel->acpt_newsess = true;
- if (cfg)
- tunnel->debug = cfg->debug;
-
tunnel->encap = encap;
refcount_set(&tunnel->ref_count, 1);
@@ -1597,6 +1510,8 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
"l2tp_sock");
sk->sk_allocation = GFP_ATOMIC;
+ trace_register_tunnel(tunnel);
+
if (tunnel->fd >= 0)
sockfd_put(sock);
@@ -1617,6 +1532,7 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_register);
void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
{
if (!test_and_set_bit(0, &tunnel->dead)) {
+ trace_delete_tunnel(tunnel);
l2tp_tunnel_inc_refcount(tunnel);
queue_work(l2tp_wq, &tunnel->del_work);
}
@@ -1628,6 +1544,7 @@ void l2tp_session_delete(struct l2tp_session *session)
if (test_and_set_bit(0, &session->dead))
return;
+ trace_delete_session(session);
l2tp_session_unhash(session);
l2tp_session_queue_purge(session);
if (session->session_close)
@@ -1686,12 +1603,8 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
INIT_HLIST_NODE(&session->hlist);
INIT_HLIST_NODE(&session->global_hlist);
- /* Inherit debug options from tunnel */
- session->debug = tunnel->debug;
-
if (cfg) {
session->pwtype = cfg->pw_type;
- session->debug = cfg->debug;
session->send_seq = cfg->send_seq;
session->recv_seq = cfg->recv_seq;
session->lns_mode = cfg->lns_mode;
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 3468d6b177a0..cb21d906343e 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -51,7 +51,6 @@ struct l2tp_session_cfg {
unsigned int lns_mode:1; /* behave as LNS?
* LAC enables sequence numbers under LNS control.
*/
- int debug; /* bitmask of debug message categories */
u16 l2specific_type; /* Layer 2 specific type */
u8 cookie[8]; /* optional cookie */
int cookie_len; /* 0, 4 or 8 bytes */
@@ -66,6 +65,7 @@ struct l2tp_session_cfg {
* Is linked into a per-tunnel session hashlist; and in the case of an L2TPv3 session into
* an additional per-net ("global") hashlist.
*/
+#define L2TP_SESSION_NAME_MAX 32
struct l2tp_session {
int magic; /* should be L2TP_SESSION_MAGIC */
long dead;
@@ -90,14 +90,13 @@ struct l2tp_session {
struct hlist_node hlist; /* hash list node */
refcount_t ref_count;
- char name[32]; /* for logging */
+ char name[L2TP_SESSION_NAME_MAX]; /* for logging */
char ifname[IFNAMSIZ];
unsigned int recv_seq:1; /* expect receive packets with sequence numbers? */
unsigned int send_seq:1; /* send packets with sequence numbers? */
unsigned int lns_mode:1; /* behave as LNS?
* LAC enables sequence numbers under LNS control.
*/
- int debug; /* bitmask of debug message categories */
int reorder_timeout; /* configured reorder timeout (in jiffies) */
int reorder_skip; /* set if skip to next nr */
enum l2tp_pwtype pwtype;
@@ -131,7 +130,6 @@ struct l2tp_session {
/* L2TP tunnel configuration */
struct l2tp_tunnel_cfg {
- int debug; /* bitmask of debug message categories */
enum l2tp_encap_type encap;
/* Used only for kernel-created sockets */
@@ -154,6 +152,7 @@ struct l2tp_tunnel_cfg {
* Maintains a hashlist of sessions belonging to the tunnel instance.
* Is linked into a per-net list of tunnels.
*/
+#define L2TP_TUNNEL_NAME_MAX 20
struct l2tp_tunnel {
int magic; /* Should be L2TP_TUNNEL_MAGIC */
@@ -170,8 +169,7 @@ struct l2tp_tunnel {
u32 peer_tunnel_id;
int version; /* 2=>L2TPv2, 3=>L2TPv3 */
- char name[20]; /* for logging */
- int debug; /* bitmask of debug message categories */
+ char name[L2TP_TUNNEL_NAME_MAX]; /* for logging */
enum l2tp_encap_type encap;
struct l2tp_stats stats;
@@ -237,7 +235,7 @@ struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
* Creation of a new instance is a two-step process: create, then register.
* Destruction is triggered using the *_delete functions, and completes asynchronously.
*/
-int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id,
+int l2tp_tunnel_create(int fd, int version, u32 tunnel_id,
u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg,
struct l2tp_tunnel **tunnelp);
int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
@@ -263,8 +261,7 @@ int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb);
/* Transmit path helpers for sending packets over the tunnel socket. */
void l2tp_session_set_header_len(struct l2tp_session *session, int version);
-int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb,
- int hdr_len);
+int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb);
/* Pseudowire management.
* Pseudowires should register with l2tp core on module init, and unregister
@@ -276,6 +273,11 @@ void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
/* IOCTL helper for IP encap modules. */
int l2tp_ioctl(struct sock *sk, int cmd, unsigned long arg);
+/* Extract the tunnel structure from a socket's sk_user_data pointer,
+ * validating the tunnel magic feather.
+ */
+struct l2tp_tunnel *l2tp_sk_to_tunnel(struct sock *sk);
+
static inline int l2tp_get_l2specific_len(struct l2tp_session *session)
{
switch (session->l2specific_type) {
@@ -337,19 +339,6 @@ static inline int l2tp_v3_ensure_opt_in_linear(struct l2tp_session *session, str
return 0;
}
-#define l2tp_printk(ptr, type, func, fmt, ...) \
-do { \
- if (((ptr)->debug) & (type)) \
- func(fmt, ##__VA_ARGS__); \
-} while (0)
-
-#define l2tp_warn(ptr, type, fmt, ...) \
- l2tp_printk(ptr, type, pr_warn, fmt, ##__VA_ARGS__)
-#define l2tp_info(ptr, type, fmt, ...) \
- l2tp_printk(ptr, type, pr_info, fmt, ##__VA_ARGS__)
-#define l2tp_dbg(ptr, type, fmt, ...) \
- l2tp_printk(ptr, type, pr_debug, fmt, ##__VA_ARGS__)
-
#define MODULE_ALIAS_L2TP_PWTYPE(type) \
MODULE_ALIAS("net-l2tp-type-" __stringify(type))
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index 96cb9601c21b..bca75bef8282 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -167,7 +167,7 @@ static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v)
tunnel->sock ? refcount_read(&tunnel->sock->sk_refcnt) : 0,
refcount_read(&tunnel->ref_count));
seq_printf(m, " %08x rx %ld/%ld/%ld rx %ld/%ld/%ld\n",
- tunnel->debug,
+ 0,
atomic_long_read(&tunnel->stats.tx_packets),
atomic_long_read(&tunnel->stats.tx_bytes),
atomic_long_read(&tunnel->stats.tx_errors),
@@ -192,7 +192,7 @@ static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v)
session->recv_seq ? 'R' : '-',
session->send_seq ? 'S' : '-',
session->lns_mode ? "LNS" : "LAC",
- session->debug,
+ 0,
jiffies_to_msecs(session->reorder_timeout));
seq_printf(m, " offset 0 l2specific %hu/%hu\n",
session->l2specific_type, l2tp_get_l2specific_len(session));
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 7ed2b4eced94..6cd97c75445c 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -76,7 +76,7 @@ static netdev_tx_t l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev
struct l2tp_eth *priv = netdev_priv(dev);
struct l2tp_session *session = priv->session;
unsigned int len = skb->len;
- int ret = l2tp_xmit_skb(session, skb, session->hdr_len);
+ int ret = l2tp_xmit_skb(session, skb);
if (likely(ret == NET_XMIT_SUCCESS)) {
atomic_long_add(len, &priv->tx_bytes);
@@ -128,17 +128,6 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
struct net_device *dev;
struct l2tp_eth *priv;
- if (session->debug & L2TP_MSG_DATA) {
- unsigned int length;
-
- length = min(32u, skb->len);
- if (!pskb_may_pull(skb, length))
- goto error;
-
- pr_debug("%s: eth recv\n", session->name);
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length);
- }
-
if (!pskb_may_pull(skb, ETH_HLEN))
goto error;
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index df2a35b5714a..97ae1255fcb6 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -118,7 +118,6 @@ static int l2tp_ip_recv(struct sk_buff *skb)
struct l2tp_session *session;
struct l2tp_tunnel *tunnel = NULL;
struct iphdr *iph;
- int length;
if (!pskb_may_pull(skb, 4))
goto discard;
@@ -147,20 +146,6 @@ static int l2tp_ip_recv(struct sk_buff *skb)
if (!tunnel)
goto discard_sess;
- /* Trace packet contents, if enabled */
- if (tunnel->debug & L2TP_MSG_DATA) {
- length = min(32u, skb->len);
- if (!pskb_may_pull(skb, length))
- goto discard_sess;
-
- /* Point to L2TP header */
- optr = skb->data;
- ptr = skb->data;
- ptr += 4;
- pr_debug("%s: ip recv\n", tunnel->name);
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
- }
-
if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
goto discard_sess;
@@ -248,8 +233,8 @@ static void l2tp_ip_close(struct sock *sk, long timeout)
static void l2tp_ip_destroy_sock(struct sock *sk)
{
+ struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
struct sk_buff *skb;
- struct l2tp_tunnel *tunnel = sk->sk_user_data;
while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
kfree_skb(skb);
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index bc757bc7e264..e5e5036257b0 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -131,7 +131,6 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
struct l2tp_session *session;
struct l2tp_tunnel *tunnel = NULL;
struct ipv6hdr *iph;
- int length;
if (!pskb_may_pull(skb, 4))
goto discard;
@@ -160,20 +159,6 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
if (!tunnel)
goto discard_sess;
- /* Trace packet contents, if enabled */
- if (tunnel->debug & L2TP_MSG_DATA) {
- length = min(32u, skb->len);
- if (!pskb_may_pull(skb, length))
- goto discard_sess;
-
- /* Point to L2TP header */
- optr = skb->data;
- ptr = skb->data;
- ptr += 4;
- pr_debug("%s: ip recv\n", tunnel->name);
- print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
- }
-
if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
goto discard_sess;
@@ -262,7 +247,7 @@ static void l2tp_ip6_close(struct sock *sk, long timeout)
static void l2tp_ip6_destroy_sock(struct sock *sk)
{
- struct l2tp_tunnel *tunnel = sk->sk_user_data;
+ struct l2tp_tunnel *tunnel = l2tp_sk_to_tunnel(sk);
lock_sock(sk);
ip6_flush_pending_frames(sk);
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index def78eebca4c..83956c9ee1fc 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -229,14 +229,11 @@ static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info
goto out;
}
- if (attrs[L2TP_ATTR_DEBUG])
- cfg.debug = nla_get_u32(attrs[L2TP_ATTR_DEBUG]);
-
ret = -EINVAL;
switch (cfg.encap) {
case L2TP_ENCAPTYPE_UDP:
case L2TP_ENCAPTYPE_IP:
- ret = l2tp_tunnel_create(net, fd, proto_version, tunnel_id,
+ ret = l2tp_tunnel_create(fd, proto_version, tunnel_id,
peer_tunnel_id, &cfg, &tunnel);
break;
}
@@ -307,9 +304,6 @@ static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info
goto out;
}
- if (info->attrs[L2TP_ATTR_DEBUG])
- tunnel->debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]);
-
ret = l2tp_tunnel_notify(&l2tp_nl_family, info,
tunnel, L2TP_CMD_TUNNEL_MODIFY);
@@ -400,7 +394,7 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla
if (nla_put_u8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version) ||
nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) ||
nla_put_u32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id) ||
- nla_put_u32(skb, L2TP_ATTR_DEBUG, tunnel->debug) ||
+ nla_put_u32(skb, L2TP_ATTR_DEBUG, 0) ||
nla_put_u16(skb, L2TP_ATTR_ENCAP_TYPE, tunnel->encap))
goto nla_put_failure;
@@ -426,6 +420,9 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla
nla_put_u64_64bit(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
atomic_long_read(&tunnel->stats.rx_seq_discards),
L2TP_ATTR_STATS_PAD) ||
+ nla_put_u64_64bit(skb, L2TP_ATTR_RX_COOKIE_DISCARDS,
+ atomic_long_read(&tunnel->stats.rx_cookie_discards),
+ L2TP_ATTR_STATS_PAD) ||
nla_put_u64_64bit(skb, L2TP_ATTR_RX_OOS_PACKETS,
atomic_long_read(&tunnel->stats.rx_oos_packets),
L2TP_ATTR_STATS_PAD) ||
@@ -605,9 +602,6 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
cfg.ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]);
}
- if (info->attrs[L2TP_ATTR_DEBUG])
- cfg.debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]);
-
if (info->attrs[L2TP_ATTR_RECV_SEQ])
cfg.recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]);
@@ -689,9 +683,6 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf
goto out;
}
- if (info->attrs[L2TP_ATTR_DEBUG])
- session->debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]);
-
if (info->attrs[L2TP_ATTR_RECV_SEQ])
session->recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]);
@@ -730,7 +721,7 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl
nla_put_u32(skb, L2TP_ATTR_SESSION_ID, session->session_id) ||
nla_put_u32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id) ||
nla_put_u32(skb, L2TP_ATTR_PEER_SESSION_ID, session->peer_session_id) ||
- nla_put_u32(skb, L2TP_ATTR_DEBUG, session->debug) ||
+ nla_put_u32(skb, L2TP_ATTR_DEBUG, 0) ||
nla_put_u16(skb, L2TP_ATTR_PW_TYPE, session->pwtype))
goto nla_put_failure;
@@ -772,6 +763,9 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl
nla_put_u64_64bit(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
atomic_long_read(&session->stats.rx_seq_discards),
L2TP_ATTR_STATS_PAD) ||
+ nla_put_u64_64bit(skb, L2TP_ATTR_RX_COOKIE_DISCARDS,
+ atomic_long_read(&session->stats.rx_cookie_discards),
+ L2TP_ATTR_STATS_PAD) ||
nla_put_u64_64bit(skb, L2TP_ATTR_RX_OOS_PACKETS,
atomic_long_read(&session->stats.rx_oos_packets),
L2TP_ATTR_STATS_PAD) ||
@@ -920,7 +914,7 @@ static const struct nla_policy l2tp_nl_policy[L2TP_ATTR_MAX + 1] = {
},
};
-static const struct genl_ops l2tp_nl_ops[] = {
+static const struct genl_small_ops l2tp_nl_ops[] = {
{
.cmd = L2TP_CMD_NOOP,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
@@ -987,8 +981,8 @@ static struct genl_family l2tp_nl_family __ro_after_init = {
.policy = l2tp_nl_policy,
.netnsok = true,
.module = THIS_MODULE,
- .ops = l2tp_nl_ops,
- .n_ops = ARRAY_SIZE(l2tp_nl_ops),
+ .small_ops = l2tp_nl_ops,
+ .n_small_ops = ARRAY_SIZE(l2tp_nl_ops),
.mcgrps = l2tp_multicast_group,
.n_mcgrps = ARRAY_SIZE(l2tp_multicast_group),
};
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 13c3153b40d6..aea85f91f059 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -237,17 +237,9 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int
if (sk->sk_state & PPPOX_BOUND) {
struct pppox_sock *po;
- l2tp_dbg(session, L2TP_MSG_DATA,
- "%s: recv %d byte data frame, passing to ppp\n",
- session->name, data_len);
-
po = pppox_sk(sk);
ppp_input(&po->chan, skb);
} else {
- l2tp_dbg(session, L2TP_MSG_DATA,
- "%s: recv %d byte data frame, passing to L2TP socket\n",
- session->name, data_len);
-
if (sock_queue_rcv_skb(sk, skb) < 0) {
atomic_long_inc(&session->stats.rx_errors);
kfree_skb(skb);
@@ -259,7 +251,7 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int
no_sock:
rcu_read_unlock();
- l2tp_info(session, L2TP_MSG_DATA, "%s: no socket\n", session->name);
+ pr_warn_ratelimited("%s: no socket in recv\n", session->name);
kfree_skb(skb);
}
@@ -324,7 +316,7 @@ static int pppol2tp_sendmsg(struct socket *sock, struct msghdr *m,
}
local_bh_disable();
- l2tp_xmit_skb(session, skb, session->hdr_len);
+ l2tp_xmit_skb(session, skb);
local_bh_enable();
sock_put(sk);
@@ -383,7 +375,7 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
skb->data[1] = PPP_UI;
local_bh_disable();
- l2tp_xmit_skb(session, skb, session->hdr_len);
+ l2tp_xmit_skb(session, skb);
local_bh_enable();
sock_put(sk);
@@ -710,7 +702,6 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
if (!tunnel) {
struct l2tp_tunnel_cfg tcfg = {
.encap = L2TP_ENCAPTYPE_UDP,
- .debug = 0,
};
/* Prevent l2tp_tunnel_register() from trying to set up
@@ -721,7 +712,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
goto end;
}
- error = l2tp_tunnel_create(sock_net(sk), info.fd,
+ error = l2tp_tunnel_create(info.fd,
info.version,
info.tunnel_id,
info.peer_tunnel_id, &tcfg,
@@ -840,8 +831,6 @@ out_no_ppp:
drop_refcnt = false;
sk->sk_state = PPPOX_CONNECTED;
- l2tp_info(session, L2TP_MSG_CONTROL, "%s: created\n",
- session->name);
end:
if (error) {
@@ -1076,6 +1065,9 @@ static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd,
if (!session)
return -ENOTCONN;
+ if (WARN_ON(session->magic != L2TP_SESSION_MAGIC))
+ return -EBADF;
+
/* Not defined for tunnels */
if (!session->session_id && !session->peer_session_id)
return -ENOSYS;
@@ -1090,6 +1082,9 @@ static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd,
if (!session)
return -ENOTCONN;
+ if (WARN_ON(session->magic != L2TP_SESSION_MAGIC))
+ return -EBADF;
+
/* Not defined for tunnels */
if (!session->session_id && !session->peer_session_id)
return -ENOSYS;
@@ -1103,6 +1098,9 @@ static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd,
if (!session)
return -ENOTCONN;
+ if (WARN_ON(session->magic != L2TP_SESSION_MAGIC))
+ return -EBADF;
+
/* Session 0 represents the parent tunnel */
if (!session->session_id && !session->peer_session_id) {
u32 session_id;
@@ -1157,9 +1155,7 @@ static int pppol2tp_tunnel_setsockopt(struct sock *sk,
switch (optname) {
case PPPOL2TP_SO_DEBUG:
- tunnel->debug = val;
- l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: set debug=%x\n",
- tunnel->name, tunnel->debug);
+ /* Tunnel debug flags option is deprecated */
break;
default:
@@ -1185,9 +1181,6 @@ static int pppol2tp_session_setsockopt(struct sock *sk,
break;
}
session->recv_seq = !!val;
- l2tp_info(session, L2TP_MSG_CONTROL,
- "%s: set recv_seq=%d\n",
- session->name, session->recv_seq);
break;
case PPPOL2TP_SO_SENDSEQ:
@@ -1203,9 +1196,6 @@ static int pppol2tp_session_setsockopt(struct sock *sk,
PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
}
l2tp_session_set_header_len(session, session->tunnel->version);
- l2tp_info(session, L2TP_MSG_CONTROL,
- "%s: set send_seq=%d\n",
- session->name, session->send_seq);
break;
case PPPOL2TP_SO_LNSMODE:
@@ -1214,22 +1204,14 @@ static int pppol2tp_session_setsockopt(struct sock *sk,
break;
}
session->lns_mode = !!val;
- l2tp_info(session, L2TP_MSG_CONTROL,
- "%s: set lns_mode=%d\n",
- session->name, session->lns_mode);
break;
case PPPOL2TP_SO_DEBUG:
- session->debug = val;
- l2tp_info(session, L2TP_MSG_CONTROL, "%s: set debug=%x\n",
- session->name, session->debug);
+ /* Session debug flags option is deprecated */
break;
case PPPOL2TP_SO_REORDERTO:
session->reorder_timeout = msecs_to_jiffies(val);
- l2tp_info(session, L2TP_MSG_CONTROL,
- "%s: set reorder_timeout=%d\n",
- session->name, session->reorder_timeout);
break;
default:
@@ -1297,9 +1279,8 @@ static int pppol2tp_tunnel_getsockopt(struct sock *sk,
switch (optname) {
case PPPOL2TP_SO_DEBUG:
- *val = tunnel->debug;
- l2tp_info(tunnel, L2TP_MSG_CONTROL, "%s: get debug=%x\n",
- tunnel->name, tunnel->debug);
+ /* Tunnel debug flags option is deprecated */
+ *val = 0;
break;
default:
@@ -1321,32 +1302,23 @@ static int pppol2tp_session_getsockopt(struct sock *sk,
switch (optname) {
case PPPOL2TP_SO_RECVSEQ:
*val = session->recv_seq;
- l2tp_info(session, L2TP_MSG_CONTROL,
- "%s: get recv_seq=%d\n", session->name, *val);
break;
case PPPOL2TP_SO_SENDSEQ:
*val = session->send_seq;
- l2tp_info(session, L2TP_MSG_CONTROL,
- "%s: get send_seq=%d\n", session->name, *val);
break;
case PPPOL2TP_SO_LNSMODE:
*val = session->lns_mode;
- l2tp_info(session, L2TP_MSG_CONTROL,
- "%s: get lns_mode=%d\n", session->name, *val);
break;
case PPPOL2TP_SO_DEBUG:
- *val = session->debug;
- l2tp_info(session, L2TP_MSG_CONTROL, "%s: get debug=%d\n",
- session->name, *val);
+ /* Session debug flags option is deprecated */
+ *val = 0;
break;
case PPPOL2TP_SO_REORDERTO:
*val = (int)jiffies_to_msecs(session->reorder_timeout);
- l2tp_info(session, L2TP_MSG_CONTROL,
- "%s: get reorder_timeout=%d\n", session->name, *val);
break;
default:
@@ -1534,7 +1506,7 @@ static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v)
(tunnel == tunnel->sock->sk_user_data) ? 'Y' : 'N',
refcount_read(&tunnel->ref_count) - 1);
seq_printf(m, " %08x %ld/%ld/%ld %ld/%ld/%ld\n",
- tunnel->debug,
+ 0,
atomic_long_read(&tunnel->stats.tx_packets),
atomic_long_read(&tunnel->stats.tx_bytes),
atomic_long_read(&tunnel->stats.tx_errors),
@@ -1580,7 +1552,7 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
session->recv_seq ? 'R' : '-',
session->send_seq ? 'S' : '-',
session->lns_mode ? "LNS" : "LAC",
- session->debug,
+ 0,
jiffies_to_msecs(session->reorder_timeout));
seq_printf(m, " %hu/%hu %ld/%ld/%ld %ld/%ld/%ld\n",
session->nr, session->ns,
diff --git a/net/l2tp/trace.h b/net/l2tp/trace.h
new file mode 100644
index 000000000000..8596eaa12a2e
--- /dev/null
+++ b/net/l2tp/trace.h
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM l2tp
+
+#if !defined(_TRACE_L2TP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_L2TP_H
+
+#include <linux/tracepoint.h>
+#include <linux/l2tp.h>
+#include "l2tp_core.h"
+
+#define encap_type_name(e) { L2TP_ENCAPTYPE_##e, #e }
+#define show_encap_type_name(val) \
+ __print_symbolic(val, \
+ encap_type_name(UDP), \
+ encap_type_name(IP))
+
+#define pw_type_name(p) { L2TP_PWTYPE_##p, #p }
+#define show_pw_type_name(val) \
+ __print_symbolic(val, \
+ pw_type_name(ETH_VLAN), \
+ pw_type_name(ETH), \
+ pw_type_name(PPP), \
+ pw_type_name(PPP_AC), \
+ pw_type_name(IP))
+
+DECLARE_EVENT_CLASS(tunnel_only_evt,
+ TP_PROTO(struct l2tp_tunnel *tunnel),
+ TP_ARGS(tunnel),
+ TP_STRUCT__entry(
+ __array(char, name, L2TP_TUNNEL_NAME_MAX)
+ ),
+ TP_fast_assign(
+ memcpy(__entry->name, tunnel->name, L2TP_TUNNEL_NAME_MAX);
+ ),
+ TP_printk("%s", __entry->name)
+);
+
+DECLARE_EVENT_CLASS(session_only_evt,
+ TP_PROTO(struct l2tp_session *session),
+ TP_ARGS(session),
+ TP_STRUCT__entry(
+ __array(char, name, L2TP_SESSION_NAME_MAX)
+ ),
+ TP_fast_assign(
+ memcpy(__entry->name, session->name, L2TP_SESSION_NAME_MAX);
+ ),
+ TP_printk("%s", __entry->name)
+);
+
+TRACE_EVENT(register_tunnel,
+ TP_PROTO(struct l2tp_tunnel *tunnel),
+ TP_ARGS(tunnel),
+ TP_STRUCT__entry(
+ __array(char, name, L2TP_TUNNEL_NAME_MAX)
+ __field(int, fd)
+ __field(u32, tid)
+ __field(u32, ptid)
+ __field(int, version)
+ __field(enum l2tp_encap_type, encap)
+ ),
+ TP_fast_assign(
+ memcpy(__entry->name, tunnel->name, L2TP_TUNNEL_NAME_MAX);
+ __entry->fd = tunnel->fd;
+ __entry->tid = tunnel->tunnel_id;
+ __entry->ptid = tunnel->peer_tunnel_id;
+ __entry->version = tunnel->version;
+ __entry->encap = tunnel->encap;
+ ),
+ TP_printk("%s: type=%s encap=%s version=L2TPv%d tid=%u ptid=%u fd=%d",
+ __entry->name,
+ __entry->fd > 0 ? "managed" : "unmanaged",
+ show_encap_type_name(__entry->encap),
+ __entry->version,
+ __entry->tid,
+ __entry->ptid,
+ __entry->fd)
+);
+
+DEFINE_EVENT(tunnel_only_evt, delete_tunnel,
+ TP_PROTO(struct l2tp_tunnel *tunnel),
+ TP_ARGS(tunnel)
+);
+
+DEFINE_EVENT(tunnel_only_evt, free_tunnel,
+ TP_PROTO(struct l2tp_tunnel *tunnel),
+ TP_ARGS(tunnel)
+);
+
+TRACE_EVENT(register_session,
+ TP_PROTO(struct l2tp_session *session),
+ TP_ARGS(session),
+ TP_STRUCT__entry(
+ __array(char, name, L2TP_SESSION_NAME_MAX)
+ __field(u32, tid)
+ __field(u32, ptid)
+ __field(u32, sid)
+ __field(u32, psid)
+ __field(enum l2tp_pwtype, pwtype)
+ ),
+ TP_fast_assign(
+ memcpy(__entry->name, session->name, L2TP_SESSION_NAME_MAX);
+ __entry->tid = session->tunnel ? session->tunnel->tunnel_id : 0;
+ __entry->ptid = session->tunnel ? session->tunnel->peer_tunnel_id : 0;
+ __entry->sid = session->session_id;
+ __entry->psid = session->peer_session_id;
+ __entry->pwtype = session->pwtype;
+ ),
+ TP_printk("%s: pseudowire=%s sid=%u psid=%u tid=%u ptid=%u",
+ __entry->name,
+ show_pw_type_name(__entry->pwtype),
+ __entry->sid,
+ __entry->psid,
+ __entry->sid,
+ __entry->psid)
+);
+
+DEFINE_EVENT(session_only_evt, delete_session,
+ TP_PROTO(struct l2tp_session *session),
+ TP_ARGS(session)
+);
+
+DEFINE_EVENT(session_only_evt, free_session,
+ TP_PROTO(struct l2tp_session *session),
+ TP_ARGS(session)
+);
+
+DEFINE_EVENT(session_only_evt, session_seqnum_lns_enable,
+ TP_PROTO(struct l2tp_session *session),
+ TP_ARGS(session)
+);
+
+DEFINE_EVENT(session_only_evt, session_seqnum_lns_disable,
+ TP_PROTO(struct l2tp_session *session),
+ TP_ARGS(session)
+);
+
+DECLARE_EVENT_CLASS(session_seqnum_evt,
+ TP_PROTO(struct l2tp_session *session),
+ TP_ARGS(session),
+ TP_STRUCT__entry(
+ __array(char, name, L2TP_SESSION_NAME_MAX)
+ __field(u32, ns)
+ __field(u32, nr)
+ ),
+ TP_fast_assign(
+ memcpy(__entry->name, session->name, L2TP_SESSION_NAME_MAX);
+ __entry->ns = session->ns;
+ __entry->nr = session->nr;
+ ),
+ TP_printk("%s: ns=%u nr=%u",
+ __entry->name,
+ __entry->ns,
+ __entry->nr)
+);
+
+DEFINE_EVENT(session_seqnum_evt, session_seqnum_update,
+ TP_PROTO(struct l2tp_session *session),
+ TP_ARGS(session)
+);
+
+DEFINE_EVENT(session_seqnum_evt, session_seqnum_reset,
+ TP_PROTO(struct l2tp_session *session),
+ TP_ARGS(session)
+);
+
+DECLARE_EVENT_CLASS(session_pkt_discard_evt,
+ TP_PROTO(struct l2tp_session *session, u32 pkt_ns),
+ TP_ARGS(session, pkt_ns),
+ TP_STRUCT__entry(
+ __array(char, name, L2TP_SESSION_NAME_MAX)
+ __field(u32, pkt_ns)
+ __field(u32, my_nr)
+ __field(u32, reorder_q_len)
+ ),
+ TP_fast_assign(
+ memcpy(__entry->name, session->name, L2TP_SESSION_NAME_MAX);
+ __entry->pkt_ns = pkt_ns,
+ __entry->my_nr = session->nr;
+ __entry->reorder_q_len = skb_queue_len(&session->reorder_q);
+ ),
+ TP_printk("%s: pkt_ns=%u my_nr=%u reorder_q_len=%u",
+ __entry->name,
+ __entry->pkt_ns,
+ __entry->my_nr,
+ __entry->reorder_q_len)
+);
+
+DEFINE_EVENT(session_pkt_discard_evt, session_pkt_expired,
+ TP_PROTO(struct l2tp_session *session, u32 pkt_ns),
+ TP_ARGS(session, pkt_ns)
+);
+
+DEFINE_EVENT(session_pkt_discard_evt, session_pkt_outside_rx_window,
+ TP_PROTO(struct l2tp_session *session, u32 pkt_ns),
+ TP_ARGS(session, pkt_ns)
+);
+
+DEFINE_EVENT(session_pkt_discard_evt, session_pkt_oos,
+ TP_PROTO(struct l2tp_session *session, u32 pkt_ns),
+ TP_ARGS(session, pkt_ns)
+);
+
+#endif /* _TRACE_L2TP_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 6cbb1286d6c0..ad04c361cba5 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -13,6 +13,7 @@ mac80211-y := \
ht.o agg-tx.o agg-rx.o \
vht.o \
he.o \
+ s1g.o \
ibss.o \
iface.o \
rate.o \
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 313ba97acae3..cd4cf84a7f99 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -350,7 +350,7 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta,
sta->sta.addr, tid);
/* We have no API to update the timeout value in the
* driver so reject the timeout update if the timeout
- * changed. If if did not change, i.e., no real update,
+ * changed. If it did not change, i.e., no real update,
* just reply with success.
*/
rcu_read_lock();
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 87fddd84c621..7276e66ae435 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -709,7 +709,8 @@ void sta_set_rate_info_tx(struct sta_info *sta,
u16 brate;
sband = ieee80211_get_sband(sta->sdata);
- if (sband) {
+ WARN_ON_ONCE(sband && !sband->bitrates);
+ if (sband && sband->bitrates) {
brate = sband->bitrates[rate->idx].bitrate;
rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift);
}
@@ -826,9 +827,9 @@ static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
memcpy(new->data, resp, resp_len);
if (csa)
- memcpy(new->csa_counter_offsets, csa->counter_offsets_presp,
+ memcpy(new->cntdwn_counter_offsets, csa->counter_offsets_presp,
csa->n_counter_offsets_presp *
- sizeof(new->csa_counter_offsets[0]));
+ sizeof(new->cntdwn_counter_offsets[0]));
rcu_assign_pointer(sdata->u.ap.probe_resp, new);
if (old)
@@ -837,6 +838,59 @@ static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
return 0;
}
+static int ieee80211_set_fils_discovery(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_fils_discovery *params)
+{
+ struct fils_discovery_data *new, *old = NULL;
+ struct ieee80211_fils_discovery *fd;
+
+ if (!params->tmpl || !params->tmpl_len)
+ return -EINVAL;
+
+ fd = &sdata->vif.bss_conf.fils_discovery;
+ fd->min_interval = params->min_interval;
+ fd->max_interval = params->max_interval;
+
+ old = sdata_dereference(sdata->u.ap.fils_discovery, sdata);
+ new = kzalloc(sizeof(*new) + params->tmpl_len, GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+ new->len = params->tmpl_len;
+ memcpy(new->data, params->tmpl, params->tmpl_len);
+ rcu_assign_pointer(sdata->u.ap.fils_discovery, new);
+
+ if (old)
+ kfree_rcu(old, rcu_head);
+
+ return 0;
+}
+
+static int
+ieee80211_set_unsol_bcast_probe_resp(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_unsol_bcast_probe_resp *params)
+{
+ struct unsol_bcast_probe_resp_data *new, *old = NULL;
+
+ if (!params->tmpl || !params->tmpl_len)
+ return -EINVAL;
+
+ old = sdata_dereference(sdata->u.ap.unsol_bcast_probe_resp, sdata);
+ new = kzalloc(sizeof(*new) + params->tmpl_len, GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+ new->len = params->tmpl_len;
+ memcpy(new->data, params->tmpl, params->tmpl_len);
+ rcu_assign_pointer(sdata->u.ap.unsol_bcast_probe_resp, new);
+
+ if (old)
+ kfree_rcu(old, rcu_head);
+
+ sdata->vif.bss_conf.unsol_bcast_probe_resp_interval =
+ params->interval;
+
+ return 0;
+}
+
static int ieee80211_set_ftm_responder_params(
struct ieee80211_sub_if_data *sdata,
const u8 *lci, size_t lci_len,
@@ -926,10 +980,10 @@ static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
new->tail_len = new_tail_len;
if (csa) {
- new->csa_current_counter = csa->count;
- memcpy(new->csa_counter_offsets, csa->counter_offsets_beacon,
+ new->cntdwn_current_counter = csa->count;
+ memcpy(new->cntdwn_counter_offsets, csa->counter_offsets_beacon,
csa->n_counter_offsets_beacon *
- sizeof(new->csa_counter_offsets[0]));
+ sizeof(new->cntdwn_counter_offsets[0]));
}
/* copy in head */
@@ -1071,6 +1125,8 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
sizeof(struct ieee80211_he_obss_pd));
memcpy(&sdata->vif.bss_conf.he_bss_color, &params->he_bss_color,
sizeof(struct ieee80211_he_bss_color));
+ sdata->vif.bss_conf.s1g = params->chandef.chan->band ==
+ NL80211_BAND_S1GHZ;
sdata->vif.bss_conf.ssid_len = params->ssid_len;
if (params->ssid_len)
@@ -1098,13 +1154,30 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
}
}
+ if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL))
+ sdata->vif.bss_conf.beacon_tx_rate = params->beacon_rate;
+
err = ieee80211_assign_beacon(sdata, &params->beacon, NULL);
- if (err < 0) {
- ieee80211_vif_release_channel(sdata);
- return err;
- }
+ if (err < 0)
+ goto error;
changed |= err;
+ if (params->fils_discovery.max_interval) {
+ err = ieee80211_set_fils_discovery(sdata,
+ &params->fils_discovery);
+ if (err < 0)
+ goto error;
+ changed |= BSS_CHANGED_FILS_DISCOVERY;
+ }
+
+ if (params->unsol_bcast_probe_resp.interval) {
+ err = ieee80211_set_unsol_bcast_probe_resp(sdata,
+ &params->unsol_bcast_probe_resp);
+ if (err < 0)
+ goto error;
+ changed |= BSS_CHANGED_UNSOL_BCAST_PROBE_RESP;
+ }
+
err = drv_start_ap(sdata->local, sdata);
if (err) {
old = sdata_dereference(sdata->u.ap.beacon, sdata);
@@ -1112,8 +1185,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
if (old)
kfree_rcu(old, rcu_head);
RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
- ieee80211_vif_release_channel(sdata);
- return err;
+ goto error;
}
ieee80211_recalc_dtim(local, sdata);
@@ -1124,6 +1196,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
netif_carrier_on(vlan->dev);
return 0;
+
+error:
+ ieee80211_vif_release_channel(sdata);
+ return err;
}
static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev,
@@ -1160,6 +1236,8 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
struct ieee80211_local *local = sdata->local;
struct beacon_data *old_beacon;
struct probe_resp *old_probe_resp;
+ struct fils_discovery_data *old_fils_discovery;
+ struct unsol_bcast_probe_resp_data *old_unsol_bcast_probe_resp;
struct cfg80211_chan_def chandef;
sdata_assert_lock(sdata);
@@ -1168,6 +1246,11 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
if (!old_beacon)
return -ENOENT;
old_probe_resp = sdata_dereference(sdata->u.ap.probe_resp, sdata);
+ old_fils_discovery = sdata_dereference(sdata->u.ap.fils_discovery,
+ sdata);
+ old_unsol_bcast_probe_resp =
+ sdata_dereference(sdata->u.ap.unsol_bcast_probe_resp,
+ sdata);
/* abort any running channel switch */
mutex_lock(&local->mtx);
@@ -1191,9 +1274,15 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
/* remove beacon and probe response */
RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
RCU_INIT_POINTER(sdata->u.ap.probe_resp, NULL);
+ RCU_INIT_POINTER(sdata->u.ap.fils_discovery, NULL);
+ RCU_INIT_POINTER(sdata->u.ap.unsol_bcast_probe_resp, NULL);
kfree_rcu(old_beacon, rcu_head);
if (old_probe_resp)
kfree_rcu(old_probe_resp, rcu_head);
+ if (old_fils_discovery)
+ kfree_rcu(old_fils_discovery, rcu_head);
+ if (old_unsol_bcast_probe_resp)
+ kfree_rcu(old_unsol_bcast_probe_resp, rcu_head);
kfree(sdata->vif.bss_conf.ftmr_params);
sdata->vif.bss_conf.ftmr_params = NULL;
@@ -1696,6 +1785,7 @@ static int ieee80211_change_station(struct wiphy *wiphy,
rcu_assign_pointer(vlansdata->u.vlan.sta, sta);
__ieee80211_check_fast_rx_iface(vlansdata);
+ drv_sta_set_4addr(local, sta->sdata, &sta->sta, true);
}
if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
@@ -3186,9 +3276,9 @@ static int ieee80211_set_csa_beacon(struct ieee80211_sub_if_data *sdata,
break;
if ((params->n_counter_offsets_beacon >
- IEEE80211_MAX_CSA_COUNTERS_NUM) ||
+ IEEE80211_MAX_CNTDWN_COUNTERS_NUM) ||
(params->n_counter_offsets_presp >
- IEEE80211_MAX_CSA_COUNTERS_NUM))
+ IEEE80211_MAX_CNTDWN_COUNTERS_NUM))
return -EINVAL;
csa.counter_offsets_beacon = params->counter_offsets_beacon;
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index bdc0f29dc6cd..8f48aff74c7b 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -536,7 +536,14 @@ static void ieee80211_del_chanctx(struct ieee80211_local *local,
if (!local->use_chanctx) {
struct cfg80211_chan_def *chandef = &local->_oper_chandef;
- chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
+ /* S1G doesn't have 20MHz, so get the correct width for the
+ * current channel.
+ */
+ if (chandef->chan->band == NL80211_BAND_S1GHZ)
+ chandef->width =
+ ieee80211_s1g_channel_width(chandef->chan);
+ else
+ chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
chandef->center_freq1 = chandef->chan->center_freq;
chandef->freq1_offset = chandef->chan->freq_offset;
chandef->center_freq2 = 0;
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index 54080290d6e2..90470392fdaa 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -408,6 +408,7 @@ static const char *hw_flag_names[] = {
FLAG(SUPPORTS_MULTI_BSSID),
FLAG(SUPPORTS_ONLY_HE_MULTI_BSSID),
FLAG(AMPDU_KEYBORDER_SUPPORT),
+ FLAG(SUPPORTS_TX_ENCAP_OFFLOAD),
#undef FLAG
};
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 41d495d73d3a..bcdfd19a596b 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -1384,4 +1384,33 @@ static inline int drv_reset_tid_config(struct ieee80211_local *local,
return ret;
}
+
+static inline void drv_update_vif_offload(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata)
+{
+ might_sleep();
+ check_sdata_in_driver(sdata);
+
+ if (!local->ops->update_vif_offload)
+ return;
+
+ trace_drv_update_vif_offload(local, sdata);
+ local->ops->update_vif_offload(&local->hw, &sdata->vif);
+ trace_drv_return_void(local);
+}
+
+static inline void drv_sta_set_4addr(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta, bool enabled)
+{
+ sdata = get_bss_sdata(sdata);
+ if (!check_sdata_in_driver(sdata))
+ return;
+
+ trace_drv_sta_set_4addr(local, sdata, sta, enabled);
+ if (local->ops->sta_set_4addr)
+ local->ops->sta_set_4addr(&local->hw, &sdata->vif, sta, enabled);
+ trace_drv_return_void(local);
+}
+
#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 53632c2f5217..1f552f374e97 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -145,9 +145,9 @@ ieee80211_ibss_build_presp(struct ieee80211_sub_if_data *sdata,
*pos++ = csa_settings->block_tx ? 1 : 0;
*pos++ = ieee80211_frequency_to_channel(
csa_settings->chandef.chan->center_freq);
- presp->csa_counter_offsets[0] = (pos - presp->head);
+ presp->cntdwn_counter_offsets[0] = (pos - presp->head);
*pos++ = csa_settings->count;
- presp->csa_current_counter = csa_settings->count;
+ presp->cntdwn_current_counter = csa_settings->count;
}
/* put the remaining rates in WLAN_EID_EXT_SUPP_RATES */
@@ -1037,7 +1037,8 @@ static void ieee80211_update_sta_info(struct ieee80211_sub_if_data *sdata,
}
if (sta && !sta->sta.wme &&
- elems->wmm_info && local->hw.queues >= IEEE80211_NUM_ACS) {
+ (elems->wmm_info || elems->s1g_capab) &&
+ local->hw.queues >= IEEE80211_NUM_ACS) {
sta->sta.wme = true;
ieee80211_check_fast_xmit(sta);
}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 0b1eaec6649f..2a21226fb518 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -259,15 +259,27 @@ struct beacon_data {
u8 *head, *tail;
int head_len, tail_len;
struct ieee80211_meshconf_ie *meshconf;
- u16 csa_counter_offsets[IEEE80211_MAX_CSA_COUNTERS_NUM];
- u8 csa_current_counter;
+ u16 cntdwn_counter_offsets[IEEE80211_MAX_CNTDWN_COUNTERS_NUM];
+ u8 cntdwn_current_counter;
struct rcu_head rcu_head;
};
struct probe_resp {
struct rcu_head rcu_head;
int len;
- u16 csa_counter_offsets[IEEE80211_MAX_CSA_COUNTERS_NUM];
+ u16 cntdwn_counter_offsets[IEEE80211_MAX_CNTDWN_COUNTERS_NUM];
+ u8 data[];
+};
+
+struct fils_discovery_data {
+ struct rcu_head rcu_head;
+ int len;
+ u8 data[];
+};
+
+struct unsol_bcast_probe_resp_data {
+ struct rcu_head rcu_head;
+ int len;
u8 data[];
};
@@ -286,6 +298,8 @@ struct ps_data {
struct ieee80211_if_ap {
struct beacon_data __rcu *beacon;
struct probe_resp __rcu *probe_resp;
+ struct fils_discovery_data __rcu *fils_discovery;
+ struct unsol_bcast_probe_resp_data __rcu *unsol_bcast_probe_resp;
/* to be used after channel switch. */
struct cfg80211_beacon_data *next_beacon;
@@ -530,6 +544,8 @@ struct ieee80211_if_managed {
struct ieee80211_ht_cap ht_capa_mask; /* Valid parts of ht_capa */
struct ieee80211_vht_cap vht_capa; /* configured VHT overrides */
struct ieee80211_vht_cap vht_capa_mask; /* Valid parts of vht_capa */
+ struct ieee80211_s1g_cap s1g_capa; /* configured S1G overrides */
+ struct ieee80211_s1g_cap s1g_capa_mask; /* valid s1g_capa bits */
/* TDLS support */
u8 tdls_peer[ETH_ALEN] __aligned(2);
@@ -989,8 +1005,6 @@ struct ieee80211_sub_if_data {
} debugfs;
#endif
- bool hw_80211_encap;
-
/* must be last, dynamically sized area in this! */
struct ieee80211_vif vif;
};
@@ -1364,7 +1378,6 @@ struct ieee80211_local {
*/
bool pspolling;
- bool offchannel_ps_enabled;
/*
* PS can only be enabled when we have exactly one managed
* interface (and monitors) in PS, this then points there.
@@ -1522,6 +1535,10 @@ struct ieee802_11_elems {
u8 dtim_count;
u8 dtim_period;
const struct ieee80211_addba_ext_ie *addba_ext_ie;
+ const struct ieee80211_s1g_cap *s1g_capab;
+ const struct ieee80211_s1g_oper_ie *s1g_oper;
+ const struct ieee80211_s1g_bcn_compat_ie *s1g_bcn_compat;
+ const struct ieee80211_aid_response_ie *aid_resp;
/* length of them, respectively */
u8 ext_capab_len;
@@ -1640,6 +1657,8 @@ int ieee80211_set_arp_filter(struct ieee80211_sub_if_data *sdata);
void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata);
void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb);
+void ieee80211_sta_rx_queued_ext(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb);
void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata);
void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata);
void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata);
@@ -1767,6 +1786,7 @@ void ieee80211_del_virtual_monitor(struct ieee80211_local *local);
bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata);
void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata,
bool update_bss);
+void ieee80211_recalc_offload(struct ieee80211_local *local);
static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata)
{
@@ -1908,6 +1928,9 @@ void
ieee80211_he_op_ie_to_bss_conf(struct ieee80211_vif *vif,
const struct ieee80211_he_operation *he_op_ie_elem);
+/* S1G */
+void ieee80211_s1g_sta_rate_init(struct sta_info *sta);
+
/* Spectrum management */
void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
struct ieee80211_mgmt *mgmt,
@@ -2040,8 +2063,6 @@ void ieee80211_dynamic_ps_timer(struct timer_list *t);
void ieee80211_send_nullfunc(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata,
bool powersave);
-void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_hdr *hdr);
void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
struct ieee80211_hdr *hdr, bool ack, u16 tx_time);
@@ -2193,6 +2214,11 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, bool need_basic,
enum nl80211_band band);
u8 *ieee80211_add_wmm_info_ie(u8 *buf, u8 qosinfo);
+void ieee80211_add_s1g_capab_ie(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta_s1g_cap *caps,
+ struct sk_buff *skb);
+void ieee80211_add_aid_request_ie(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb);
/* channel management */
bool ieee80211_chandef_ht_oper(const struct ieee80211_ht_operation *ht_oper,
@@ -2204,6 +2230,8 @@ bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw, u32 vht_cap_info,
bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata,
const struct ieee80211_he_operation *he_oper,
struct cfg80211_chan_def *chandef);
+bool ieee80211_chandef_s1g_oper(const struct ieee80211_s1g_oper_ie *oper,
+ struct cfg80211_chan_def *chandef);
u32 ieee80211_chandef_downgrade(struct cfg80211_chan_def *c);
int __must_check
@@ -2282,6 +2310,9 @@ void ieee80211_tdls_chsw_work(struct work_struct *wk);
void ieee80211_tdls_handle_disconnect(struct ieee80211_sub_if_data *sdata,
const u8 *peer, u16 reason);
const char *ieee80211_get_reason_code_string(u16 reason_code);
+u16 ieee80211_encode_usf(int val);
+u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
+ enum nl80211_iftype type);
extern const struct ethtool_ops ieee80211_ethtool_ops;
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 9740ae8fa697..1be775979132 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -348,439 +348,6 @@ static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata,
return 0;
}
-void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata,
- const int offset)
-{
- struct ieee80211_local *local = sdata->local;
- u32 flags = sdata->u.mntr.flags;
-
-#define ADJUST(_f, _s) do { \
- if (flags & MONITOR_FLAG_##_f) \
- local->fif_##_s += offset; \
- } while (0)
-
- ADJUST(FCSFAIL, fcsfail);
- ADJUST(PLCPFAIL, plcpfail);
- ADJUST(CONTROL, control);
- ADJUST(CONTROL, pspoll);
- ADJUST(OTHER_BSS, other_bss);
-
-#undef ADJUST
-}
-
-static void ieee80211_set_default_queues(struct ieee80211_sub_if_data *sdata)
-{
- struct ieee80211_local *local = sdata->local;
- int i;
-
- for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
- sdata->vif.hw_queue[i] = IEEE80211_INVAL_HW_QUEUE;
- else if (local->hw.queues >= IEEE80211_NUM_ACS)
- sdata->vif.hw_queue[i] = i;
- else
- sdata->vif.hw_queue[i] = 0;
- }
- sdata->vif.cab_queue = IEEE80211_INVAL_HW_QUEUE;
-}
-
-int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
-{
- struct ieee80211_sub_if_data *sdata;
- int ret;
-
- if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
- return 0;
-
- ASSERT_RTNL();
-
- if (local->monitor_sdata)
- return 0;
-
- sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL);
- if (!sdata)
- return -ENOMEM;
-
- /* set up data */
- sdata->local = local;
- sdata->vif.type = NL80211_IFTYPE_MONITOR;
- snprintf(sdata->name, IFNAMSIZ, "%s-monitor",
- wiphy_name(local->hw.wiphy));
- sdata->wdev.iftype = NL80211_IFTYPE_MONITOR;
-
- sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM;
-
- ieee80211_set_default_queues(sdata);
-
- ret = drv_add_interface(local, sdata);
- if (WARN_ON(ret)) {
- /* ok .. stupid driver, it asked for this! */
- kfree(sdata);
- return ret;
- }
-
- ret = ieee80211_check_queues(sdata, NL80211_IFTYPE_MONITOR);
- if (ret) {
- kfree(sdata);
- return ret;
- }
-
- mutex_lock(&local->iflist_mtx);
- rcu_assign_pointer(local->monitor_sdata, sdata);
- mutex_unlock(&local->iflist_mtx);
-
- mutex_lock(&local->mtx);
- ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef,
- IEEE80211_CHANCTX_EXCLUSIVE);
- mutex_unlock(&local->mtx);
- if (ret) {
- mutex_lock(&local->iflist_mtx);
- RCU_INIT_POINTER(local->monitor_sdata, NULL);
- mutex_unlock(&local->iflist_mtx);
- synchronize_net();
- drv_remove_interface(local, sdata);
- kfree(sdata);
- return ret;
- }
-
- skb_queue_head_init(&sdata->skb_queue);
- INIT_WORK(&sdata->work, ieee80211_iface_work);
-
- return 0;
-}
-
-void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
-{
- struct ieee80211_sub_if_data *sdata;
-
- if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
- return;
-
- ASSERT_RTNL();
-
- mutex_lock(&local->iflist_mtx);
-
- sdata = rcu_dereference_protected(local->monitor_sdata,
- lockdep_is_held(&local->iflist_mtx));
- if (!sdata) {
- mutex_unlock(&local->iflist_mtx);
- return;
- }
-
- RCU_INIT_POINTER(local->monitor_sdata, NULL);
- mutex_unlock(&local->iflist_mtx);
-
- synchronize_net();
-
- mutex_lock(&local->mtx);
- ieee80211_vif_release_channel(sdata);
- mutex_unlock(&local->mtx);
-
- drv_remove_interface(local, sdata);
-
- kfree(sdata);
-}
-
-/*
- * NOTE: Be very careful when changing this function, it must NOT return
- * an error on interface type changes that have been pre-checked, so most
- * checks should be in ieee80211_check_concurrent_iface.
- */
-int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
-{
- struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
- struct net_device *dev = wdev->netdev;
- struct ieee80211_local *local = sdata->local;
- struct sta_info *sta;
- u32 changed = 0;
- int res;
- u32 hw_reconf_flags = 0;
-
- switch (sdata->vif.type) {
- case NL80211_IFTYPE_WDS:
- if (!is_valid_ether_addr(sdata->u.wds.remote_addr))
- return -ENOLINK;
- break;
- case NL80211_IFTYPE_AP_VLAN: {
- struct ieee80211_sub_if_data *master;
-
- if (!sdata->bss)
- return -ENOLINK;
-
- mutex_lock(&local->mtx);
- list_add(&sdata->u.vlan.list, &sdata->bss->vlans);
- mutex_unlock(&local->mtx);
-
- master = container_of(sdata->bss,
- struct ieee80211_sub_if_data, u.ap);
- sdata->control_port_protocol =
- master->control_port_protocol;
- sdata->control_port_no_encrypt =
- master->control_port_no_encrypt;
- sdata->control_port_over_nl80211 =
- master->control_port_over_nl80211;
- sdata->control_port_no_preauth =
- master->control_port_no_preauth;
- sdata->vif.cab_queue = master->vif.cab_queue;
- memcpy(sdata->vif.hw_queue, master->vif.hw_queue,
- sizeof(sdata->vif.hw_queue));
- sdata->vif.bss_conf.chandef = master->vif.bss_conf.chandef;
-
- mutex_lock(&local->key_mtx);
- sdata->crypto_tx_tailroom_needed_cnt +=
- master->crypto_tx_tailroom_needed_cnt;
- mutex_unlock(&local->key_mtx);
-
- break;
- }
- case NL80211_IFTYPE_AP:
- sdata->bss = &sdata->u.ap;
- break;
- case NL80211_IFTYPE_MESH_POINT:
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_MONITOR:
- case NL80211_IFTYPE_ADHOC:
- case NL80211_IFTYPE_P2P_DEVICE:
- case NL80211_IFTYPE_OCB:
- case NL80211_IFTYPE_NAN:
- /* no special treatment */
- break;
- case NL80211_IFTYPE_UNSPECIFIED:
- case NUM_NL80211_IFTYPES:
- case NL80211_IFTYPE_P2P_CLIENT:
- case NL80211_IFTYPE_P2P_GO:
- /* cannot happen */
- WARN_ON(1);
- break;
- }
-
- if (local->open_count == 0) {
- res = drv_start(local);
- if (res)
- goto err_del_bss;
- /* we're brought up, everything changes */
- hw_reconf_flags = ~0;
- ieee80211_led_radio(local, true);
- ieee80211_mod_tpt_led_trig(local,
- IEEE80211_TPT_LEDTRIG_FL_RADIO, 0);
- }
-
- /*
- * Copy the hopefully now-present MAC address to
- * this interface, if it has the special null one.
- */
- if (dev && is_zero_ether_addr(dev->dev_addr)) {
- memcpy(dev->dev_addr,
- local->hw.wiphy->perm_addr,
- ETH_ALEN);
- memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
-
- if (!is_valid_ether_addr(dev->dev_addr)) {
- res = -EADDRNOTAVAIL;
- goto err_stop;
- }
- }
-
- switch (sdata->vif.type) {
- case NL80211_IFTYPE_AP_VLAN:
- /* no need to tell driver, but set carrier and chanctx */
- if (rtnl_dereference(sdata->bss->beacon)) {
- ieee80211_vif_vlan_copy_chanctx(sdata);
- netif_carrier_on(dev);
- } else {
- netif_carrier_off(dev);
- }
- break;
- case NL80211_IFTYPE_MONITOR:
- if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) {
- local->cooked_mntrs++;
- break;
- }
-
- if (sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) {
- res = drv_add_interface(local, sdata);
- if (res)
- goto err_stop;
- } else if (local->monitors == 0 && local->open_count == 0) {
- res = ieee80211_add_virtual_monitor(local);
- if (res)
- goto err_stop;
- }
-
- /* must be before the call to ieee80211_configure_filter */
- local->monitors++;
- if (local->monitors == 1) {
- local->hw.conf.flags |= IEEE80211_CONF_MONITOR;
- hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR;
- }
-
- ieee80211_adjust_monitor_flags(sdata, 1);
- ieee80211_configure_filter(local);
- mutex_lock(&local->mtx);
- ieee80211_recalc_idle(local);
- mutex_unlock(&local->mtx);
-
- netif_carrier_on(dev);
- break;
- default:
- if (coming_up) {
- ieee80211_del_virtual_monitor(local);
-
- res = drv_add_interface(local, sdata);
- if (res)
- goto err_stop;
- res = ieee80211_check_queues(sdata,
- ieee80211_vif_type_p2p(&sdata->vif));
- if (res)
- goto err_del_interface;
- }
-
- if (sdata->vif.type == NL80211_IFTYPE_AP) {
- local->fif_pspoll++;
- local->fif_probe_req++;
-
- ieee80211_configure_filter(local);
- } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
- local->fif_probe_req++;
- }
-
- if (sdata->vif.probe_req_reg)
- drv_config_iface_filter(local, sdata,
- FIF_PROBE_REQ,
- FIF_PROBE_REQ);
-
- if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
- sdata->vif.type != NL80211_IFTYPE_NAN)
- changed |= ieee80211_reset_erp_info(sdata);
- ieee80211_bss_info_change_notify(sdata, changed);
-
- switch (sdata->vif.type) {
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_ADHOC:
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_MESH_POINT:
- case NL80211_IFTYPE_OCB:
- netif_carrier_off(dev);
- break;
- case NL80211_IFTYPE_WDS:
- case NL80211_IFTYPE_P2P_DEVICE:
- case NL80211_IFTYPE_NAN:
- break;
- default:
- /* not reached */
- WARN_ON(1);
- }
-
- /*
- * Set default queue parameters so drivers don't
- * need to initialise the hardware if the hardware
- * doesn't start up with sane defaults.
- * Enable QoS for anything but station interfaces.
- */
- ieee80211_set_wmm_default(sdata, true,
- sdata->vif.type != NL80211_IFTYPE_STATION);
- }
-
- set_bit(SDATA_STATE_RUNNING, &sdata->state);
-
- switch (sdata->vif.type) {
- case NL80211_IFTYPE_WDS:
- /* Create STA entry for the WDS peer */
- sta = sta_info_alloc(sdata, sdata->u.wds.remote_addr,
- GFP_KERNEL);
- if (!sta) {
- res = -ENOMEM;
- goto err_del_interface;
- }
-
- sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
- sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
- sta_info_pre_move_state(sta, IEEE80211_STA_AUTHORIZED);
-
- res = sta_info_insert(sta);
- if (res) {
- /* STA has been freed */
- goto err_del_interface;
- }
-
- rate_control_rate_init(sta);
- netif_carrier_on(dev);
- break;
- case NL80211_IFTYPE_P2P_DEVICE:
- rcu_assign_pointer(local->p2p_sdata, sdata);
- break;
- case NL80211_IFTYPE_MONITOR:
- if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES)
- break;
- list_add_tail_rcu(&sdata->u.mntr.list, &local->mon_list);
- break;
- default:
- break;
- }
-
- /*
- * set_multicast_list will be invoked by the networking core
- * which will check whether any increments here were done in
- * error and sync them down to the hardware as filter flags.
- */
- if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
- atomic_inc(&local->iff_allmultis);
-
- if (coming_up)
- local->open_count++;
-
- if (hw_reconf_flags)
- ieee80211_hw_config(local, hw_reconf_flags);
-
- ieee80211_recalc_ps(local);
-
- if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
- sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
- local->ops->wake_tx_queue) {
- /* XXX: for AP_VLAN, actually track AP queues */
- if (dev)
- netif_tx_start_all_queues(dev);
- } else if (dev) {
- unsigned long flags;
- int n_acs = IEEE80211_NUM_ACS;
- int ac;
-
- if (local->hw.queues < IEEE80211_NUM_ACS)
- n_acs = 1;
-
- spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
- if (sdata->vif.cab_queue == IEEE80211_INVAL_HW_QUEUE ||
- (local->queue_stop_reasons[sdata->vif.cab_queue] == 0 &&
- skb_queue_empty(&local->pending[sdata->vif.cab_queue]))) {
- for (ac = 0; ac < n_acs; ac++) {
- int ac_queue = sdata->vif.hw_queue[ac];
-
- if (local->queue_stop_reasons[ac_queue] == 0 &&
- skb_queue_empty(&local->pending[ac_queue]))
- netif_start_subqueue(dev, ac);
- }
- }
- spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
- }
-
- return 0;
- err_del_interface:
- drv_remove_interface(local, sdata);
- err_stop:
- if (!local->open_count)
- drv_stop(local);
- err_del_bss:
- sdata->bss = NULL;
- if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
- mutex_lock(&local->mtx);
- list_del(&sdata->u.vlan.list);
- mutex_unlock(&local->mtx);
- }
- /* might already be clear but that doesn't matter */
- clear_bit(SDATA_STATE_RUNNING, &sdata->state);
- return res;
-}
-
static int ieee80211_open(struct net_device *dev)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -1142,28 +709,7 @@ static u16 ieee80211_netdev_select_queue(struct net_device *dev,
static void
ieee80211_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
- int i;
-
- for_each_possible_cpu(i) {
- const struct pcpu_sw_netstats *tstats;
- u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
- unsigned int start;
-
- tstats = per_cpu_ptr(dev->tstats, i);
-
- do {
- start = u64_stats_fetch_begin_irq(&tstats->syncp);
- rx_packets = tstats->rx_packets;
- tx_packets = tstats->tx_packets;
- rx_bytes = tstats->rx_bytes;
- tx_bytes = tstats->tx_bytes;
- } while (u64_stats_fetch_retry_irq(&tstats->syncp, start));
-
- stats->rx_packets += rx_packets;
- stats->tx_packets += tx_packets;
- stats->rx_bytes += rx_bytes;
- stats->tx_bytes += tx_bytes;
- }
+ dev_fetch_sw_netstats(stats, dev->tstats);
}
static const struct net_device_ops ieee80211_dataif_ops = {
@@ -1227,60 +773,547 @@ static const struct net_device_ops ieee80211_dataif_8023_ops = {
.ndo_get_stats64 = ieee80211_get_stats64,
};
-static void __ieee80211_set_hw_80211_encap(struct ieee80211_sub_if_data *sdata,
- bool enable)
+static bool ieee80211_iftype_supports_encap_offload(enum nl80211_iftype iftype)
{
- sdata->dev->netdev_ops = enable ? &ieee80211_dataif_8023_ops :
- &ieee80211_dataif_ops;
- sdata->hw_80211_encap = enable;
+ switch (iftype) {
+ /* P2P GO and client are mapped to AP/STATION types */
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_STATION:
+ return true;
+ default:
+ return false;
+ }
}
-bool ieee80211_set_hw_80211_encap(struct ieee80211_vif *vif, bool enable)
+static bool ieee80211_set_sdata_offload_flags(struct ieee80211_sub_if_data *sdata)
{
- struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct ieee80211_local *local = sdata->local;
- struct ieee80211_sub_if_data *iter;
- struct ieee80211_key *key;
+ u32 flags;
+
+ flags = sdata->vif.offload_flags;
+
+ if (ieee80211_hw_check(&local->hw, SUPPORTS_TX_ENCAP_OFFLOAD) &&
+ ieee80211_iftype_supports_encap_offload(sdata->vif.type)) {
+ flags |= IEEE80211_OFFLOAD_ENCAP_ENABLED;
+
+ if (!ieee80211_hw_check(&local->hw, SUPPORTS_TX_FRAG) &&
+ local->hw.wiphy->frag_threshold != (u32)-1)
+ flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED;
+
+ if (local->monitors)
+ flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED;
+ } else {
+ flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED;
+ }
+
+ if (sdata->vif.offload_flags == flags)
+ return false;
+
+ sdata->vif.offload_flags = flags;
+ return true;
+}
+
+static void ieee80211_set_vif_encap_ops(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_sub_if_data *bss = sdata;
+ bool enabled;
+
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
+ if (!sdata->bss)
+ return;
+
+ bss = container_of(sdata->bss, struct ieee80211_sub_if_data, u.ap);
+ }
+
+ if (!ieee80211_hw_check(&local->hw, SUPPORTS_TX_ENCAP_OFFLOAD) ||
+ !ieee80211_iftype_supports_encap_offload(bss->vif.type))
+ return;
+
+ enabled = bss->vif.offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED;
+ if (sdata->wdev.use_4addr &&
+ !(bss->vif.offload_flags & IEEE80211_OFFLOAD_ENCAP_4ADDR))
+ enabled = false;
+
+ sdata->dev->netdev_ops = enabled ? &ieee80211_dataif_8023_ops :
+ &ieee80211_dataif_ops;
+}
+
+static void ieee80211_recalc_sdata_offload(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_sub_if_data *vsdata;
+
+ if (ieee80211_set_sdata_offload_flags(sdata)) {
+ drv_update_vif_offload(local, sdata);
+ ieee80211_set_vif_encap_ops(sdata);
+ }
+
+ list_for_each_entry(vsdata, &local->interfaces, list) {
+ if (vsdata->vif.type != NL80211_IFTYPE_AP_VLAN ||
+ vsdata->bss != &sdata->u.ap)
+ continue;
+
+ ieee80211_set_vif_encap_ops(vsdata);
+ }
+}
+
+void ieee80211_recalc_offload(struct ieee80211_local *local)
+{
+ struct ieee80211_sub_if_data *sdata;
+
+ if (!ieee80211_hw_check(&local->hw, SUPPORTS_TX_ENCAP_OFFLOAD))
+ return;
mutex_lock(&local->iflist_mtx);
- list_for_each_entry(iter, &local->interfaces, list) {
- struct ieee80211_sub_if_data *disable = NULL;
-
- if (vif->type == NL80211_IFTYPE_MONITOR) {
- disable = iter;
- __ieee80211_set_hw_80211_encap(iter, false);
- } else if (iter->vif.type == NL80211_IFTYPE_MONITOR) {
- disable = sdata;
- enable = false;
- }
- if (disable)
- sdata_dbg(disable,
- "disable hw 80211 encap due to mon co-exist\n");
+
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ if (!ieee80211_sdata_running(sdata))
+ continue;
+
+ ieee80211_recalc_sdata_offload(sdata);
}
+
mutex_unlock(&local->iflist_mtx);
+}
- if (enable == sdata->hw_80211_encap)
- return enable;
+void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata,
+ const int offset)
+{
+ struct ieee80211_local *local = sdata->local;
+ u32 flags = sdata->u.mntr.flags;
- if (!sdata->dev)
- return false;
+#define ADJUST(_f, _s) do { \
+ if (flags & MONITOR_FLAG_##_f) \
+ local->fif_##_s += offset; \
+ } while (0)
+
+ ADJUST(FCSFAIL, fcsfail);
+ ADJUST(PLCPFAIL, plcpfail);
+ ADJUST(CONTROL, control);
+ ADJUST(CONTROL, pspoll);
+ ADJUST(OTHER_BSS, other_bss);
+
+#undef ADJUST
+}
+
+static void ieee80211_set_default_queues(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ int i;
+
+ for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ if (ieee80211_hw_check(&local->hw, QUEUE_CONTROL))
+ sdata->vif.hw_queue[i] = IEEE80211_INVAL_HW_QUEUE;
+ else if (local->hw.queues >= IEEE80211_NUM_ACS)
+ sdata->vif.hw_queue[i] = i;
+ else
+ sdata->vif.hw_queue[i] = 0;
+ }
+ sdata->vif.cab_queue = IEEE80211_INVAL_HW_QUEUE;
+}
+
+int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
+{
+ struct ieee80211_sub_if_data *sdata;
+ int ret;
+
+ if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
+ return 0;
+
+ ASSERT_RTNL();
+
+ if (local->monitor_sdata)
+ return 0;
+
+ sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL);
+ if (!sdata)
+ return -ENOMEM;
+
+ /* set up data */
+ sdata->local = local;
+ sdata->vif.type = NL80211_IFTYPE_MONITOR;
+ snprintf(sdata->name, IFNAMSIZ, "%s-monitor",
+ wiphy_name(local->hw.wiphy));
+ sdata->wdev.iftype = NL80211_IFTYPE_MONITOR;
+
+ sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM;
+
+ ieee80211_set_default_queues(sdata);
+
+ ret = drv_add_interface(local, sdata);
+ if (WARN_ON(ret)) {
+ /* ok .. stupid driver, it asked for this! */
+ kfree(sdata);
+ return ret;
+ }
+
+ ret = ieee80211_check_queues(sdata, NL80211_IFTYPE_MONITOR);
+ if (ret) {
+ kfree(sdata);
+ return ret;
+ }
+
+ mutex_lock(&local->iflist_mtx);
+ rcu_assign_pointer(local->monitor_sdata, sdata);
+ mutex_unlock(&local->iflist_mtx);
+
+ mutex_lock(&local->mtx);
+ ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef,
+ IEEE80211_CHANCTX_EXCLUSIVE);
+ mutex_unlock(&local->mtx);
+ if (ret) {
+ mutex_lock(&local->iflist_mtx);
+ RCU_INIT_POINTER(local->monitor_sdata, NULL);
+ mutex_unlock(&local->iflist_mtx);
+ synchronize_net();
+ drv_remove_interface(local, sdata);
+ kfree(sdata);
+ return ret;
+ }
- if (!ieee80211_hw_check(&local->hw, SUPPORTS_TX_FRAG) &&
- (local->hw.wiphy->frag_threshold != (u32)-1))
- enable = false;
+ skb_queue_head_init(&sdata->skb_queue);
+ INIT_WORK(&sdata->work, ieee80211_iface_work);
+
+ return 0;
+}
- mutex_lock(&sdata->local->key_mtx);
- list_for_each_entry(key, &sdata->key_list, list) {
- if (key->conf.cipher == WLAN_CIPHER_SUITE_TKIP)
- enable = false;
+void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
+{
+ struct ieee80211_sub_if_data *sdata;
+
+ if (!ieee80211_hw_check(&local->hw, WANT_MONITOR_VIF))
+ return;
+
+ ASSERT_RTNL();
+
+ mutex_lock(&local->iflist_mtx);
+
+ sdata = rcu_dereference_protected(local->monitor_sdata,
+ lockdep_is_held(&local->iflist_mtx));
+ if (!sdata) {
+ mutex_unlock(&local->iflist_mtx);
+ return;
}
- mutex_unlock(&sdata->local->key_mtx);
- __ieee80211_set_hw_80211_encap(sdata, enable);
+ RCU_INIT_POINTER(local->monitor_sdata, NULL);
+ mutex_unlock(&local->iflist_mtx);
+
+ synchronize_net();
- return enable;
+ mutex_lock(&local->mtx);
+ ieee80211_vif_release_channel(sdata);
+ mutex_unlock(&local->mtx);
+
+ drv_remove_interface(local, sdata);
+
+ kfree(sdata);
+}
+
+/*
+ * NOTE: Be very careful when changing this function, it must NOT return
+ * an error on interface type changes that have been pre-checked, so most
+ * checks should be in ieee80211_check_concurrent_iface.
+ */
+int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+ struct net_device *dev = wdev->netdev;
+ struct ieee80211_local *local = sdata->local;
+ struct sta_info *sta;
+ u32 changed = 0;
+ int res;
+ u32 hw_reconf_flags = 0;
+
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_WDS:
+ if (!is_valid_ether_addr(sdata->u.wds.remote_addr))
+ return -ENOLINK;
+ break;
+ case NL80211_IFTYPE_AP_VLAN: {
+ struct ieee80211_sub_if_data *master;
+
+ if (!sdata->bss)
+ return -ENOLINK;
+
+ mutex_lock(&local->mtx);
+ list_add(&sdata->u.vlan.list, &sdata->bss->vlans);
+ mutex_unlock(&local->mtx);
+
+ master = container_of(sdata->bss,
+ struct ieee80211_sub_if_data, u.ap);
+ sdata->control_port_protocol =
+ master->control_port_protocol;
+ sdata->control_port_no_encrypt =
+ master->control_port_no_encrypt;
+ sdata->control_port_over_nl80211 =
+ master->control_port_over_nl80211;
+ sdata->control_port_no_preauth =
+ master->control_port_no_preauth;
+ sdata->vif.cab_queue = master->vif.cab_queue;
+ memcpy(sdata->vif.hw_queue, master->vif.hw_queue,
+ sizeof(sdata->vif.hw_queue));
+ sdata->vif.bss_conf.chandef = master->vif.bss_conf.chandef;
+
+ mutex_lock(&local->key_mtx);
+ sdata->crypto_tx_tailroom_needed_cnt +=
+ master->crypto_tx_tailroom_needed_cnt;
+ mutex_unlock(&local->key_mtx);
+
+ break;
+ }
+ case NL80211_IFTYPE_AP:
+ sdata->bss = &sdata->u.ap;
+ break;
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_MONITOR:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_P2P_DEVICE:
+ case NL80211_IFTYPE_OCB:
+ case NL80211_IFTYPE_NAN:
+ /* no special treatment */
+ break;
+ case NL80211_IFTYPE_UNSPECIFIED:
+ case NUM_NL80211_IFTYPES:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
+ /* cannot happen */
+ WARN_ON(1);
+ break;
+ }
+
+ if (local->open_count == 0) {
+ res = drv_start(local);
+ if (res)
+ goto err_del_bss;
+ /* we're brought up, everything changes */
+ hw_reconf_flags = ~0;
+ ieee80211_led_radio(local, true);
+ ieee80211_mod_tpt_led_trig(local,
+ IEEE80211_TPT_LEDTRIG_FL_RADIO, 0);
+ }
+
+ /*
+ * Copy the hopefully now-present MAC address to
+ * this interface, if it has the special null one.
+ */
+ if (dev && is_zero_ether_addr(dev->dev_addr)) {
+ memcpy(dev->dev_addr,
+ local->hw.wiphy->perm_addr,
+ ETH_ALEN);
+ memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
+
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ res = -EADDRNOTAVAIL;
+ goto err_stop;
+ }
+ }
+
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_AP_VLAN:
+ /* no need to tell driver, but set carrier and chanctx */
+ if (rtnl_dereference(sdata->bss->beacon)) {
+ ieee80211_vif_vlan_copy_chanctx(sdata);
+ netif_carrier_on(dev);
+ ieee80211_set_vif_encap_ops(sdata);
+ } else {
+ netif_carrier_off(dev);
+ }
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) {
+ local->cooked_mntrs++;
+ break;
+ }
+
+ if (sdata->u.mntr.flags & MONITOR_FLAG_ACTIVE) {
+ res = drv_add_interface(local, sdata);
+ if (res)
+ goto err_stop;
+ } else if (local->monitors == 0 && local->open_count == 0) {
+ res = ieee80211_add_virtual_monitor(local);
+ if (res)
+ goto err_stop;
+ }
+
+ /* must be before the call to ieee80211_configure_filter */
+ local->monitors++;
+ if (local->monitors == 1) {
+ local->hw.conf.flags |= IEEE80211_CONF_MONITOR;
+ hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR;
+ }
+
+ ieee80211_adjust_monitor_flags(sdata, 1);
+ ieee80211_configure_filter(local);
+ ieee80211_recalc_offload(local);
+ mutex_lock(&local->mtx);
+ ieee80211_recalc_idle(local);
+ mutex_unlock(&local->mtx);
+
+ netif_carrier_on(dev);
+ break;
+ default:
+ if (coming_up) {
+ ieee80211_del_virtual_monitor(local);
+ ieee80211_set_sdata_offload_flags(sdata);
+
+ res = drv_add_interface(local, sdata);
+ if (res)
+ goto err_stop;
+
+ ieee80211_set_vif_encap_ops(sdata);
+ res = ieee80211_check_queues(sdata,
+ ieee80211_vif_type_p2p(&sdata->vif));
+ if (res)
+ goto err_del_interface;
+ }
+
+ if (sdata->vif.type == NL80211_IFTYPE_AP) {
+ local->fif_pspoll++;
+ local->fif_probe_req++;
+
+ ieee80211_configure_filter(local);
+ } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) {
+ local->fif_probe_req++;
+ }
+
+ if (sdata->vif.probe_req_reg)
+ drv_config_iface_filter(local, sdata,
+ FIF_PROBE_REQ,
+ FIF_PROBE_REQ);
+
+ if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
+ sdata->vif.type != NL80211_IFTYPE_NAN)
+ changed |= ieee80211_reset_erp_info(sdata);
+ ieee80211_bss_info_change_notify(sdata, changed);
+
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_MESH_POINT:
+ case NL80211_IFTYPE_OCB:
+ netif_carrier_off(dev);
+ break;
+ case NL80211_IFTYPE_WDS:
+ case NL80211_IFTYPE_P2P_DEVICE:
+ case NL80211_IFTYPE_NAN:
+ break;
+ default:
+ /* not reached */
+ WARN_ON(1);
+ }
+
+ /*
+ * Set default queue parameters so drivers don't
+ * need to initialise the hardware if the hardware
+ * doesn't start up with sane defaults.
+ * Enable QoS for anything but station interfaces.
+ */
+ ieee80211_set_wmm_default(sdata, true,
+ sdata->vif.type != NL80211_IFTYPE_STATION);
+ }
+
+ set_bit(SDATA_STATE_RUNNING, &sdata->state);
+
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_WDS:
+ /* Create STA entry for the WDS peer */
+ sta = sta_info_alloc(sdata, sdata->u.wds.remote_addr,
+ GFP_KERNEL);
+ if (!sta) {
+ res = -ENOMEM;
+ goto err_del_interface;
+ }
+
+ sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
+ sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
+ sta_info_pre_move_state(sta, IEEE80211_STA_AUTHORIZED);
+
+ res = sta_info_insert(sta);
+ if (res) {
+ /* STA has been freed */
+ goto err_del_interface;
+ }
+
+ rate_control_rate_init(sta);
+ netif_carrier_on(dev);
+ break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ rcu_assign_pointer(local->p2p_sdata, sdata);
+ break;
+ case NL80211_IFTYPE_MONITOR:
+ if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES)
+ break;
+ list_add_tail_rcu(&sdata->u.mntr.list, &local->mon_list);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * set_multicast_list will be invoked by the networking core
+ * which will check whether any increments here were done in
+ * error and sync them down to the hardware as filter flags.
+ */
+ if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
+ atomic_inc(&local->iff_allmultis);
+
+ if (coming_up)
+ local->open_count++;
+
+ if (hw_reconf_flags)
+ ieee80211_hw_config(local, hw_reconf_flags);
+
+ ieee80211_recalc_ps(local);
+
+ if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
+ sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+ local->ops->wake_tx_queue) {
+ /* XXX: for AP_VLAN, actually track AP queues */
+ if (dev)
+ netif_tx_start_all_queues(dev);
+ } else if (dev) {
+ unsigned long flags;
+ int n_acs = IEEE80211_NUM_ACS;
+ int ac;
+
+ if (local->hw.queues < IEEE80211_NUM_ACS)
+ n_acs = 1;
+
+ spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
+ if (sdata->vif.cab_queue == IEEE80211_INVAL_HW_QUEUE ||
+ (local->queue_stop_reasons[sdata->vif.cab_queue] == 0 &&
+ skb_queue_empty(&local->pending[sdata->vif.cab_queue]))) {
+ for (ac = 0; ac < n_acs; ac++) {
+ int ac_queue = sdata->vif.hw_queue[ac];
+
+ if (local->queue_stop_reasons[ac_queue] == 0 &&
+ skb_queue_empty(&local->pending[ac_queue]))
+ netif_start_subqueue(dev, ac);
+ }
+ }
+ spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+ }
+
+ return 0;
+ err_del_interface:
+ drv_remove_interface(local, sdata);
+ err_stop:
+ if (!local->open_count)
+ drv_stop(local);
+ err_del_bss:
+ sdata->bss = NULL;
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
+ mutex_lock(&local->mtx);
+ list_del(&sdata->u.vlan.list);
+ mutex_unlock(&local->mtx);
+ }
+ /* might already be clear but that doesn't matter */
+ clear_bit(SDATA_STATE_RUNNING, &sdata->state);
+ return res;
}
-EXPORT_SYMBOL(ieee80211_set_hw_80211_encap);
static void ieee80211_if_free(struct net_device *dev)
{
@@ -1379,6 +1412,11 @@ static void ieee80211_iface_work(struct work_struct *work)
WARN_ON(1);
break;
}
+ } else if (ieee80211_is_ext(mgmt->frame_control)) {
+ if (sdata->vif.type == NL80211_IFTYPE_STATION)
+ ieee80211_sta_rx_queued_ext(sdata, skb);
+ else
+ WARN_ON(1);
} else if (ieee80211_is_data_qos(mgmt->frame_control)) {
struct ieee80211_hdr *hdr = (void *)mgmt;
/*
@@ -1484,7 +1522,6 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
sdata->vif.bss_conf.txpower = INT_MIN; /* unset */
sdata->noack_map = 0;
- sdata->hw_80211_encap = false;
/* only monitor/p2p-device differ */
if (sdata->dev) {
@@ -1619,6 +1656,7 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
ieee80211_teardown_sdata(sdata);
+ ieee80211_set_sdata_offload_flags(sdata);
ret = drv_change_interface(local, sdata, internal_type, p2p);
if (ret)
type = ieee80211_vif_type_p2p(&sdata->vif);
@@ -1631,6 +1669,7 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
ieee80211_check_queues(sdata, type);
ieee80211_setup_sdata(sdata, type);
+ ieee80211_set_vif_encap_ops(sdata);
err = ieee80211_do_open(&sdata->wdev, false);
WARN(err, "type change: do_open returned %d", err);
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 2df636c32432..8c5f829ff6d7 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -177,13 +177,6 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
}
}
- /* TKIP countermeasures don't work in encap offload mode */
- if (key->conf.cipher == WLAN_CIPHER_SUITE_TKIP &&
- sdata->hw_80211_encap) {
- sdata_dbg(sdata, "TKIP is not allowed in hw 80211 encap mode\n");
- return -EINVAL;
- }
-
ret = drv_set_key(key->local, SET_KEY, sdata,
sta ? &sta->sta : NULL, &key->conf);
@@ -219,14 +212,6 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
case WLAN_CIPHER_SUITE_CCMP_256:
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
- /* We cannot do software crypto of data frames with
- * encapsulation offload enabled. However for 802.11w to
- * function properly we need cmac/gmac keys.
- */
- if (sdata->hw_80211_encap)
- return -EINVAL;
- fallthrough;
-
case WLAN_CIPHER_SUITE_AES_CMAC:
case WLAN_CIPHER_SUITE_BIP_CMAC_256:
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index b4a2efe8e83a..523380aed92e 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -1168,7 +1168,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT;
}
- local->hw.wiphy->max_num_csa_counters = IEEE80211_MAX_CSA_COUNTERS_NUM;
+ local->hw.wiphy->max_num_csa_counters = IEEE80211_MAX_CNTDWN_COUNTERS_NUM;
/*
* We use the number of queues for feature tests (QoS, HT) internally
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 7ecd801a943b..ce5825d6f1d1 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -672,7 +672,7 @@ void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh)
* @hdr: 802.11 frame header
* @fc: frame control field
* @meshda: destination address in the mesh
- * @meshsa: source address address in the mesh. Same as TA, as frame is
+ * @meshsa: source address in the mesh. Same as TA, as frame is
* locally originated.
*
* Return the length of the 802.11 (does not include a mesh control header)
@@ -864,8 +864,8 @@ ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh)
*pos++ = 0x0;
*pos++ = ieee80211_frequency_to_channel(
csa->settings.chandef.chan->center_freq);
- bcn->csa_current_counter = csa->settings.count;
- bcn->csa_counter_offsets[0] = hdr_len + 6;
+ bcn->cntdwn_current_counter = csa->settings.count;
+ bcn->cntdwn_counter_offsets[0] = hdr_len + 6;
*pos++ = csa->settings.count;
*pos++ = WLAN_EID_CHAN_SWITCH_PARAM;
*pos++ = 6;
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index bec23d2eee7a..313eee12410e 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -212,7 +212,7 @@ static void prepare_frame_for_deferred_tx(struct ieee80211_sub_if_data *sdata,
skb->priority = 7;
info->control.vif = &sdata->vif;
- info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
+ info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
ieee80211_set_qos_hdr(sdata, skb);
ieee80211_mps_set_frame_flags(sdata, NULL, hdr);
}
@@ -1163,7 +1163,7 @@ int mesh_nexthop_resolve(struct ieee80211_sub_if_data *sdata,
if (skb_queue_len(&mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN)
skb_to_free = skb_dequeue(&mpath->frame_queue);
- info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
+ info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
ieee80211_set_qos_hdr(sdata, skb);
skb_queue_tail(&mpath->frame_queue, skb);
if (skb_to_free)
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 15f2fc658f70..aca26df7587d 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -144,6 +144,7 @@ out:
/**
* mesh_set_ht_prot_mode - set correct HT protection mode
+ * @sdata: the (mesh) interface to handle
*
* Section 9.23.3.5 of IEEE 80211-2012 describes the protection rules for HT
* mesh STA in a MBSS. Three HT protection modes are supported for now, non-HT
diff --git a/net/mac80211/mesh_ps.c b/net/mac80211/mesh_ps.c
index 031e905f684a..204830a55240 100644
--- a/net/mac80211/mesh_ps.c
+++ b/net/mac80211/mesh_ps.c
@@ -12,6 +12,7 @@
/**
* mps_qos_null_get - create pre-addressed QoS Null frame for mesh powersave
+ * @sta: the station to get the frame for
*/
static struct sk_buff *mps_qos_null_get(struct sta_info *sta)
{
@@ -44,6 +45,7 @@ static struct sk_buff *mps_qos_null_get(struct sta_info *sta)
/**
* mps_qos_null_tx - send a QoS Null to indicate link-specific power mode
+ * @sta: the station to send to
*/
static void mps_qos_null_tx(struct sta_info *sta)
{
@@ -400,6 +402,8 @@ static void mpsp_trigger_send(struct sta_info *sta, bool rspi, bool eosp)
/**
* mpsp_qos_null_append - append QoS Null frame to MPSP skb queue if needed
+ * @sta: the station to handle
+ * @frames: the frame list to append to
*
* To properly end a mesh MPSP the last transmitted frame has to set the EOSP
* flag in the QoS Control field. In case the current tailing frame is not a
@@ -432,7 +436,7 @@ static void mpsp_qos_null_append(struct sta_info *sta,
info = IEEE80211_SKB_CB(new_skb);
info->control.vif = &sdata->vif;
- info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
+ info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
__skb_queue_tail(frames, new_skb);
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 2e400b0ff696..f400240a556f 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -149,6 +149,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
const struct ieee80211_ht_operation *ht_oper,
const struct ieee80211_vht_operation *vht_oper,
const struct ieee80211_he_operation *he_oper,
+ const struct ieee80211_s1g_oper_ie *s1g_oper,
struct cfg80211_chan_def *chandef, bool tracking)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -171,6 +172,18 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
ret = 0;
vht_chandef = *chandef;
goto out;
+ } else if (sband->band == NL80211_BAND_S1GHZ) {
+ if (!ieee80211_chandef_s1g_oper(s1g_oper, chandef)) {
+ sdata_info(sdata,
+ "Missing S1G Operation Element? Trying operating == primary\n");
+ chandef->width = ieee80211_s1g_channel_width(channel);
+ }
+
+ ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_40MHZ |
+ IEEE80211_STA_DISABLE_VHT |
+ IEEE80211_STA_DISABLE_80P80MHZ |
+ IEEE80211_STA_DISABLE_160MHZ;
+ goto out;
}
memcpy(&sta_ht_cap, &sband->ht_cap, sizeof(sta_ht_cap));
@@ -347,6 +360,7 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
const struct ieee80211_ht_operation *ht_oper,
const struct ieee80211_vht_operation *vht_oper,
const struct ieee80211_he_operation *he_oper,
+ const struct ieee80211_s1g_oper_ie *s1g_oper,
const u8 *bssid, u32 *changed)
{
struct ieee80211_local *local = sdata->local;
@@ -393,7 +407,7 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
/* calculate new channel (type) based on HT/VHT/HE operation IEs */
flags = ieee80211_determine_chantype(sdata, sband, chan, vht_cap_info,
ht_oper, vht_oper, he_oper,
- &chandef, true);
+ s1g_oper, &chandef, true);
/*
* Downgrade the new channel if we associated with restricted
@@ -696,6 +710,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
struct ieee80211_chanctx_conf *chanctx_conf;
struct ieee80211_channel *chan;
u32 rates = 0;
+ __le16 listen_int;
struct element *ext_capa = NULL;
/* we know it's writable, cast away the const */
@@ -784,13 +799,15 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
memcpy(mgmt->bssid, assoc_data->bss->bssid, ETH_ALEN);
+ listen_int = cpu_to_le16(sband->band == NL80211_BAND_S1GHZ ?
+ ieee80211_encode_usf(local->hw.conf.listen_interval) :
+ local->hw.conf.listen_interval);
if (!is_zero_ether_addr(assoc_data->prev_bssid)) {
skb_put(skb, 10);
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_REASSOC_REQ);
mgmt->u.reassoc_req.capab_info = cpu_to_le16(capab);
- mgmt->u.reassoc_req.listen_interval =
- cpu_to_le16(local->hw.conf.listen_interval);
+ mgmt->u.reassoc_req.listen_interval = listen_int;
memcpy(mgmt->u.reassoc_req.current_ap, assoc_data->prev_bssid,
ETH_ALEN);
} else {
@@ -798,8 +815,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_ASSOC_REQ);
mgmt->u.assoc_req.capab_info = cpu_to_le16(capab);
- mgmt->u.assoc_req.listen_interval =
- cpu_to_le16(local->hw.conf.listen_interval);
+ mgmt->u.assoc_req.listen_interval = listen_int;
}
/* SSID */
@@ -809,6 +825,9 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
*pos++ = assoc_data->ssid_len;
memcpy(pos, assoc_data->ssid, assoc_data->ssid_len);
+ if (sband->band == NL80211_BAND_S1GHZ)
+ goto skip_rates;
+
/* add all rates which were marked to be used above */
supp_rates_len = rates_len;
if (supp_rates_len > 8)
@@ -844,6 +863,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
}
}
+skip_rates:
if (capab & WLAN_CAPABILITY_SPECTRUM_MGMT ||
capab & WLAN_CAPABILITY_RADIO_MEASURE) {
pos = skb_put(skb, 4);
@@ -1018,6 +1038,11 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
pos = ieee80211_add_wmm_info_ie(skb_put(skb, 9), qos_info);
}
+ if (sband->band == NL80211_BAND_S1GHZ) {
+ ieee80211_add_aid_request_ie(sdata, skb);
+ ieee80211_add_s1g_capab_ie(sdata, &sband->s1g_cap, skb);
+ }
+
/* add any remaining custom (i.e. vendor specific here) IEs */
if (assoc_data->ie_len) {
noffset = assoc_data->ie_len;
@@ -1597,6 +1622,9 @@ static u32 ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata,
int new_ap_level;
__le16 capab = mgmt->u.probe_resp.capab_info;
+ if (ieee80211_is_s1g_beacon(mgmt->frame_control))
+ return 0; /* TODO */
+
if (country_ie &&
(capab & cpu_to_le16(WLAN_CAPABILITY_SPECTRUM_MGMT) ||
capab & cpu_to_le16(WLAN_CAPABILITY_RADIO_MEASURE))) {
@@ -2432,23 +2460,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
sdata->encrypt_headroom = IEEE80211_ENCRYPT_HEADROOM;
}
-void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_hdr *hdr)
-{
- /*
- * We can postpone the mgd.timer whenever receiving unicast frames
- * from AP because we know that the connection is working both ways
- * at that time. But multicast frames (and hence also beacons) must
- * be ignored here, because we need to trigger the timer during
- * data idle periods for sending the periodic probe request to the
- * AP we're connected to.
- */
- if (is_multicast_ether_addr(hdr->addr1))
- return;
-
- ieee80211_sta_reset_conn_monitor(sdata);
-}
-
static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -2521,21 +2532,15 @@ void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
{
ieee80211_sta_tx_wmm_ac_notify(sdata, hdr, tx_time);
- if (!ieee80211_is_data(hdr->frame_control))
- return;
-
- if (ieee80211_is_any_nullfunc(hdr->frame_control) &&
- sdata->u.mgd.probe_send_count > 0) {
- if (ack)
- ieee80211_sta_reset_conn_monitor(sdata);
- else
- sdata->u.mgd.nullfunc_failed = true;
- ieee80211_queue_work(&sdata->local->hw, &sdata->work);
+ if (!ieee80211_is_any_nullfunc(hdr->frame_control) ||
+ !sdata->u.mgd.probe_send_count)
return;
- }
if (ack)
- ieee80211_sta_reset_conn_monitor(sdata);
+ sdata->u.mgd.probe_send_count = 0;
+ else
+ sdata->u.mgd.nullfunc_failed = true;
+ ieee80211_queue_work(&sdata->local->hw, &sdata->work);
}
static void ieee80211_mlme_send_probe_req(struct ieee80211_sub_if_data *sdata,
@@ -3267,14 +3272,26 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
const struct cfg80211_bss_ies *bss_ies = NULL;
struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data;
bool is_6ghz = cbss->channel->band == NL80211_BAND_6GHZ;
+ bool is_s1g = cbss->channel->band == NL80211_BAND_S1GHZ;
u32 changed = 0;
+ u8 *pos;
int err;
bool ret;
/* AssocResp and ReassocResp have identical structure */
+ pos = mgmt->u.assoc_resp.variable;
aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
+ if (is_s1g) {
+ pos = (u8 *) mgmt->u.s1g_assoc_resp.variable;
+ aid = 0; /* TODO */
+ }
capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
+ ieee802_11_parse_elems(pos, len - (pos - (u8 *)mgmt), false, elems,
+ mgmt->bssid, assoc_data->bss->bssid);
+
+ if (elems->aid_resp)
+ aid = le16_to_cpu(elems->aid_resp->aid);
/*
* The 5 MSB of the AID field are reserved
@@ -3291,7 +3308,7 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
ifmgd->broken_ap = true;
}
- if (!elems->supp_rates) {
+ if (!is_s1g && !elems->supp_rates) {
sdata_info(sdata, "no SuppRates element in AssocResp\n");
return false;
}
@@ -3533,7 +3550,8 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
sta->sta.mfp = false;
}
- sta->sta.wme = elems->wmm_param && local->hw.queues >= IEEE80211_NUM_ACS;
+ sta->sta.wme = (elems->wmm_param || elems->s1g_capab) &&
+ local->hw.queues >= IEEE80211_NUM_ACS;
err = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
if (!err && !(ifmgd->flags & IEEE80211_STA_CONTROL_PORT))
@@ -3548,6 +3566,9 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
goto out;
}
+ if (sdata->wdev.use_4addr)
+ drv_sta_set_4addr(local, sdata, &sta->sta, true);
+
mutex_unlock(&sdata->local->sta_mtx);
/*
@@ -3605,8 +3626,8 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
* Start timer to probe the connection to the AP now.
* Also start the timer that will detect beacon loss.
*/
- ieee80211_sta_rx_notify(sdata, (struct ieee80211_hdr *)mgmt);
ieee80211_sta_reset_beacon_monitor(sdata);
+ ieee80211_sta_reset_conn_monitor(sdata);
ret = true;
out:
@@ -3625,7 +3646,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
int ac, uapsd_queues = -1;
u8 *pos;
bool reassoc;
- struct cfg80211_bss *bss;
+ struct cfg80211_bss *cbss;
struct ieee80211_event event = {
.type = MLME_EVENT,
.u.mlme.data = ASSOC_EVENT,
@@ -3635,9 +3656,12 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
if (!assoc_data)
return;
+
if (!ether_addr_equal(assoc_data->bss->bssid, mgmt->bssid))
return;
+ cbss = assoc_data->bss;
+
/*
* AssocResp and ReassocResp have identical structure, so process both
* of them in this function.
@@ -3649,7 +3673,12 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
reassoc = ieee80211_is_reassoc_resp(mgmt->frame_control);
capab_info = le16_to_cpu(mgmt->u.assoc_resp.capab_info);
status_code = le16_to_cpu(mgmt->u.assoc_resp.status_code);
+ pos = mgmt->u.assoc_resp.variable;
aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
+ if (cbss->channel->band == NL80211_BAND_S1GHZ) {
+ pos = (u8 *) mgmt->u.s1g_assoc_resp.variable;
+ aid = 0; /* TODO */
+ }
sdata_info(sdata,
"RX %sssocResp from %pM (capab=0x%x status=%d aid=%d)\n",
@@ -3660,7 +3689,6 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
fils_decrypt_assoc_resp(sdata, (u8 *)mgmt, &len, assoc_data) < 0)
return;
- pos = mgmt->u.assoc_resp.variable;
ieee802_11_parse_elems(pos, len - (pos - (u8 *)mgmt), false, &elems,
mgmt->bssid, assoc_data->bss->bssid);
@@ -3680,8 +3708,6 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
return;
}
- bss = assoc_data->bss;
-
if (status_code != WLAN_STATUS_SUCCESS) {
sdata_info(sdata, "%pM denied association (code=%d)\n",
mgmt->sa, status_code);
@@ -3690,10 +3716,10 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
event.u.mlme.reason = status_code;
drv_event_callback(sdata->local, sdata, &event);
} else {
- if (!ieee80211_assoc_success(sdata, bss, mgmt, len, &elems)) {
+ if (!ieee80211_assoc_success(sdata, cbss, mgmt, len, &elems)) {
/* oops -- internal error -- send timeout for now */
ieee80211_destroy_assoc_data(sdata, false, false);
- cfg80211_assoc_timeout(sdata->dev, bss);
+ cfg80211_assoc_timeout(sdata->dev, cbss);
return;
}
event.u.mlme.status = MLME_SUCCESS;
@@ -3714,7 +3740,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
uapsd_queues |= ieee80211_ac_to_qos_mask[ac];
}
- cfg80211_rx_assoc_resp(sdata->dev, bss, (u8 *)mgmt, len, uapsd_queues,
+ cfg80211_rx_assoc_resp(sdata->dev, cbss, (u8 *)mgmt, len, uapsd_queues,
ifmgd->assoc_req_ies, ifmgd->assoc_req_ies_len);
}
@@ -3913,11 +3939,12 @@ static bool ieee80211_rx_our_beacon(const u8 *tx_bssid,
}
static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
- struct ieee80211_mgmt *mgmt, size_t len,
+ struct ieee80211_hdr *hdr, size_t len,
struct ieee80211_rx_status *rx_status)
{
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
+ struct ieee80211_mgmt *mgmt = (void *) hdr;
size_t baselen;
struct ieee802_11_elems elems;
struct ieee80211_local *local = sdata->local;
@@ -3927,14 +3954,24 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
u32 changed = 0;
bool erp_valid;
u8 erp_value = 0;
- u32 ncrc;
- u8 *bssid;
+ u32 ncrc = 0;
+ u8 *bssid, *variable = mgmt->u.beacon.variable;
u8 deauth_buf[IEEE80211_DEAUTH_FRAME_LEN];
sdata_assert_lock(sdata);
/* Process beacon from the current BSS */
- baselen = (u8 *) mgmt->u.beacon.variable - (u8 *) mgmt;
+ bssid = ieee80211_get_bssid(hdr, len, sdata->vif.type);
+ if (ieee80211_is_s1g_beacon(mgmt->frame_control)) {
+ struct ieee80211_ext *ext = (void *) mgmt;
+
+ if (ieee80211_is_s1g_short_beacon(ext->frame_control))
+ variable = ext->u.s1g_short_beacon.variable;
+ else
+ variable = ext->u.s1g_beacon.variable;
+ }
+
+ baselen = (u8 *) variable - (u8 *) mgmt;
if (baselen > len)
return;
@@ -3954,10 +3991,10 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
rcu_read_unlock();
if (ifmgd->assoc_data && ifmgd->assoc_data->need_beacon &&
- ieee80211_rx_our_beacon(mgmt->bssid, ifmgd->assoc_data->bss)) {
- ieee802_11_parse_elems(mgmt->u.beacon.variable,
+ ieee80211_rx_our_beacon(bssid, ifmgd->assoc_data->bss)) {
+ ieee802_11_parse_elems(variable,
len - baselen, false, &elems,
- mgmt->bssid,
+ bssid,
ifmgd->assoc_data->bss->bssid);
ieee80211_rx_bss_info(sdata, mgmt, len, rx_status);
@@ -3990,7 +4027,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
}
if (!ifmgd->associated ||
- !ieee80211_rx_our_beacon(mgmt->bssid, ifmgd->associated))
+ !ieee80211_rx_our_beacon(bssid, ifmgd->associated))
return;
bssid = ifmgd->associated->bssid;
@@ -4010,8 +4047,14 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
*/
ieee80211_sta_reset_beacon_monitor(sdata);
- ncrc = crc32_be(0, (void *)&mgmt->u.beacon.beacon_int, 4);
- ncrc = ieee802_11_parse_elems_crc(mgmt->u.beacon.variable,
+ /* TODO: CRC urrently not calculated on S1G Beacon Compatibility
+ * element (which carries the beacon interval). Don't forget to add a
+ * bit to care_about_ies[] above if mac80211 is interested in a
+ * changing S1G element.
+ */
+ if (!ieee80211_is_s1g_beacon(hdr->frame_control))
+ ncrc = crc32_be(0, (void *)&mgmt->u.beacon.beacon_int, 4);
+ ncrc = ieee802_11_parse_elems_crc(variable,
len - baselen, false, &elems,
care_about_ies, ncrc,
mgmt->bssid, bssid);
@@ -4045,7 +4088,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
struct ieee80211_p2p_noa_attr noa = {};
int ret;
- ret = cfg80211_get_p2p_attr(mgmt->u.beacon.variable,
+ ret = cfg80211_get_p2p_attr(variable,
len - baselen,
IEEE80211_P2P_ATTR_ABSENCE_NOTICE,
(u8 *) &noa, sizeof(noa));
@@ -4081,7 +4124,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
* the driver will use them. The synchronized view is currently
* guaranteed only in certain callbacks.
*/
- if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY)) {
+ if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY) &&
+ !ieee80211_is_s1g_beacon(hdr->frame_control)) {
sdata->vif.bss_conf.sync_tsf =
le64_to_cpu(mgmt->u.beacon.timestamp);
sdata->vif.bss_conf.sync_device_ts =
@@ -4089,7 +4133,8 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
sdata->vif.bss_conf.sync_dtim_count = elems.dtim_count;
}
- if (ncrc == ifmgd->beacon_crc && ifmgd->beacon_crc_valid)
+ if ((ncrc == ifmgd->beacon_crc && ifmgd->beacon_crc_valid) ||
+ ieee80211_is_s1g_short_beacon(mgmt->frame_control))
return;
ifmgd->beacon_crc = ncrc;
ifmgd->beacon_crc_valid = true;
@@ -4130,9 +4175,11 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
} else {
erp_valid = false;
}
- changed |= ieee80211_handle_bss_capability(sdata,
- le16_to_cpu(mgmt->u.beacon.capab_info),
- erp_valid, erp_value);
+
+ if (!ieee80211_is_s1g_beacon(hdr->frame_control))
+ changed |= ieee80211_handle_bss_capability(sdata,
+ le16_to_cpu(mgmt->u.beacon.capab_info),
+ erp_valid, erp_value);
mutex_lock(&local->sta_mtx);
sta = sta_info_get(sdata, bssid);
@@ -4142,7 +4189,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
if (ieee80211_config_bw(sdata, sta, elems.ht_cap_elem,
elems.vht_cap_elem, elems.ht_operation,
elems.vht_operation, elems.he_operation,
- bssid, &changed)) {
+ elems.s1g_oper, bssid, &changed)) {
mutex_unlock(&local->sta_mtx);
sdata_info(sdata,
"failed to follow AP %pM bandwidth change, disconnect\n",
@@ -4170,6 +4217,26 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
ieee80211_bss_info_change_notify(sdata, changed);
}
+void ieee80211_sta_rx_queued_ext(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb)
+{
+ struct ieee80211_rx_status *rx_status;
+ struct ieee80211_hdr *hdr;
+ u16 fc;
+
+ rx_status = (struct ieee80211_rx_status *) skb->cb;
+ hdr = (struct ieee80211_hdr *) skb->data;
+ fc = le16_to_cpu(hdr->frame_control);
+
+ sdata_lock(sdata);
+ switch (fc & IEEE80211_FCTL_STYPE) {
+ case IEEE80211_STYPE_S1G_BEACON:
+ ieee80211_rx_mgmt_beacon(sdata, hdr, skb->len, rx_status);
+ break;
+ }
+ sdata_unlock(sdata);
+}
+
void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb)
{
@@ -4187,7 +4254,8 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
switch (fc & IEEE80211_FCTL_STYPE) {
case IEEE80211_STYPE_BEACON:
- ieee80211_rx_mgmt_beacon(sdata, mgmt, skb->len, rx_status);
+ ieee80211_rx_mgmt_beacon(sdata, (void *)mgmt,
+ skb->len, rx_status);
break;
case IEEE80211_STYPE_PROBE_RESP:
ieee80211_rx_mgmt_probe_resp(sdata, skb);
@@ -4577,10 +4645,26 @@ static void ieee80211_sta_conn_mon_timer(struct timer_list *t)
from_timer(sdata, t, u.mgd.conn_mon_timer);
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct ieee80211_local *local = sdata->local;
+ struct sta_info *sta;
+ unsigned long timeout;
if (sdata->vif.csa_active && !ifmgd->csa_waiting_bcn)
return;
+ sta = sta_info_get(sdata, ifmgd->bssid);
+ if (!sta)
+ return;
+
+ timeout = sta->status_stats.last_ack;
+ if (time_before(sta->status_stats.last_ack, sta->rx_stats.last_rx))
+ timeout = sta->rx_stats.last_rx;
+ timeout += IEEE80211_CONNECTION_IDLE_TIME;
+
+ if (time_is_before_jiffies(timeout)) {
+ mod_timer(&ifmgd->conn_mon_timer, round_jiffies_up(timeout));
+ return;
+ }
+
ieee80211_queue_work(&local->hw, &ifmgd->monitor_work);
}
@@ -4858,6 +4942,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
const struct ieee80211_ht_operation *ht_oper = NULL;
const struct ieee80211_vht_operation *vht_oper = NULL;
const struct ieee80211_he_operation *he_oper = NULL;
+ const struct ieee80211_s1g_oper_ie *s1g_oper = NULL;
struct ieee80211_supported_band *sband;
struct cfg80211_chan_def chandef;
bool is_6ghz = cbss->channel->band == NL80211_BAND_6GHZ;
@@ -4961,10 +5046,23 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
if (!have_80mhz)
ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
+ if (sband->band == NL80211_BAND_S1GHZ) {
+ const u8 *s1g_oper_ie;
+
+ s1g_oper_ie = ieee80211_bss_get_ie(cbss,
+ WLAN_EID_S1G_OPERATION);
+ if (s1g_oper_ie && s1g_oper_ie[1] >= sizeof(*s1g_oper))
+ s1g_oper = (void *)(s1g_oper_ie + 2);
+ else
+ sdata_info(sdata,
+ "AP missing S1G operation element?\n");
+ }
+
ifmgd->flags |= ieee80211_determine_chantype(sdata, sband,
cbss->channel,
bss->vht_cap_info,
ht_oper, vht_oper, he_oper,
+ s1g_oper,
&chandef, false);
sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss),
@@ -5091,6 +5189,12 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
const struct cfg80211_bss_ies *ies;
int shift = ieee80211_vif_get_shift(&sdata->vif);
+ /* TODO: S1G Basic Rate Set is expressed elsewhere */
+ if (cbss->channel->band == NL80211_BAND_S1GHZ) {
+ ieee80211_s1g_sta_rate_init(new_sta);
+ goto skip_rates;
+ }
+
ieee80211_get_rates(sband, bss->supp_rates,
bss->supp_rates_len,
&rates, &basic_rates,
@@ -5135,6 +5239,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
else
sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
+skip_rates:
memcpy(ifmgd->bssid, cbss->bssid, ETH_ALEN);
/* set timing information */
@@ -5462,6 +5567,10 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
memcpy(&ifmgd->vht_capa_mask, &req->vht_capa_mask,
sizeof(ifmgd->vht_capa_mask));
+ memcpy(&ifmgd->s1g_capa, &req->s1g_capa, sizeof(ifmgd->s1g_capa));
+ memcpy(&ifmgd->s1g_capa_mask, &req->s1g_capa_mask,
+ sizeof(ifmgd->s1g_capa_mask));
+
if (req->ie && req->ie_len) {
memcpy(assoc_data->ie, req->ie, req->ie_len);
assoc_data->ie_len = req->ie_len;
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index f470d1a7ce9b..853c9a369d72 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -26,8 +26,7 @@ static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
-
- local->offchannel_ps_enabled = false;
+ bool offchannel_ps_enabled = false;
/* FIXME: what to do when local->pspolling is true? */
@@ -38,12 +37,12 @@ static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
cancel_work_sync(&local->dynamic_ps_enable_work);
if (local->hw.conf.flags & IEEE80211_CONF_PS) {
- local->offchannel_ps_enabled = true;
+ offchannel_ps_enabled = true;
local->hw.conf.flags &= ~IEEE80211_CONF_PS;
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
}
- if (!local->offchannel_ps_enabled ||
+ if (!offchannel_ps_enabled ||
!ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK))
/*
* If power save was enabled, no need to send a nullfunc
@@ -58,38 +57,19 @@ static void ieee80211_offchannel_ps_enable(struct ieee80211_sub_if_data *sdata)
ieee80211_send_nullfunc(local, sdata, true);
}
-/* inform AP that we are awake again, unless power save is enabled */
+/* inform AP that we are awake again */
static void ieee80211_offchannel_ps_disable(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
if (!local->ps_sdata)
ieee80211_send_nullfunc(local, sdata, false);
- else if (local->offchannel_ps_enabled) {
- /*
- * In !IEEE80211_HW_PS_NULLFUNC_STACK case the hardware
- * will send a nullfunc frame with the powersave bit set
- * even though the AP already knows that we are sleeping.
- * This could be avoided by sending a null frame with power
- * save bit disabled before enabling the power save, but
- * this doesn't gain anything.
- *
- * When IEEE80211_HW_PS_NULLFUNC_STACK is enabled, no need
- * to send a nullfunc frame because AP already knows that
- * we are sleeping, let's just enable power save mode in
- * hardware.
- */
- /* TODO: Only set hardware if CONF_PS changed?
- * TODO: Should we set offchannel_ps_enabled to false?
- */
- local->hw.conf.flags |= IEEE80211_CONF_PS;
- ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_PS);
- } else if (local->hw.conf.dynamic_ps_timeout > 0) {
+ else if (local->hw.conf.dynamic_ps_timeout > 0) {
/*
- * If IEEE80211_CONF_PS was not set and the dynamic_ps_timer
- * had been running before leaving the operating channel,
- * restart the timer now and send a nullfunc frame to inform
- * the AP that we are awake.
+ * the dynamic_ps_timer had been running before leaving the
+ * operating channel, restart the timer now and send a nullfunc
+ * frame to inform the AP that we are awake so that AP sends
+ * the buffered packets (if any).
*/
ieee80211_send_nullfunc(local, sdata, false);
mod_timer(&local->dynamic_ps_timer, jiffies +
@@ -916,7 +896,7 @@ int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
if (beacon)
for (i = 0; i < params->n_csa_offsets; i++)
data[params->csa_offsets[i]] =
- beacon->csa_current_counter;
+ beacon->cntdwn_current_counter;
rcu_read_unlock();
}
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index b051f125d3af..45927202c71c 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -51,6 +51,13 @@ void rate_control_rate_init(struct sta_info *sta)
sband = local->hw.wiphy->bands[chanctx_conf->def.chan->band];
+ /* TODO: check for minstrel_s1g ? */
+ if (sband->band == NL80211_BAND_S1GHZ) {
+ ieee80211_s1g_sta_rate_init(sta);
+ rcu_read_unlock();
+ return;
+ }
+
spin_lock_bh(&sta->rate_ctrl_lock);
ref->ops->rate_init(ref->priv, sband, &chanctx_conf->def, ista,
priv_sta);
@@ -266,10 +273,15 @@ void ieee80211_check_rate_mask(struct ieee80211_sub_if_data *sdata)
if (WARN_ON(!sdata->vif.bss_conf.chandef.chan))
return;
+ band = sdata->vif.bss_conf.chandef.chan->band;
+ if (band == NL80211_BAND_S1GHZ) {
+ /* TODO */
+ return;
+ }
+
if (WARN_ON_ONCE(!basic_rates))
return;
- band = sdata->vif.bss_conf.chandef.chan->band;
user_mask = sdata->rc_rateidx_mask[band];
sband = local->hw.wiphy->bands[band];
@@ -296,21 +308,29 @@ static bool rc_no_data_or_no_ack_use_min(struct ieee80211_tx_rate_control *txrc)
!ieee80211_is_data(fc);
}
-static void rc_send_low_basicrate(s8 *idx, u32 basic_rates,
+static void rc_send_low_basicrate(struct ieee80211_tx_rate *rate,
+ u32 basic_rates,
struct ieee80211_supported_band *sband)
{
u8 i;
+ if (sband->band == NL80211_BAND_S1GHZ) {
+ /* TODO */
+ rate->flags |= IEEE80211_TX_RC_S1G_MCS;
+ rate->idx = 0;
+ return;
+ }
+
if (basic_rates == 0)
return; /* assume basic rates unknown and accept rate */
- if (*idx < 0)
+ if (rate->idx < 0)
return;
- if (basic_rates & (1 << *idx))
+ if (basic_rates & (1 << rate->idx))
return; /* selected rate is a basic rate */
- for (i = *idx + 1; i <= sband->n_bitrates; i++) {
+ for (i = rate->idx + 1; i <= sband->n_bitrates; i++) {
if (basic_rates & (1 << i)) {
- *idx = i;
+ rate->idx = i;
return;
}
}
@@ -328,6 +348,12 @@ static void __rate_control_send_low(struct ieee80211_hw *hw,
u32 rate_flags =
ieee80211_chandef_rate_flags(&hw->conf.chandef);
+ if (sband->band == NL80211_BAND_S1GHZ) {
+ info->control.rates[0].flags |= IEEE80211_TX_RC_S1G_MCS;
+ info->control.rates[0].idx = 0;
+ return;
+ }
+
if ((sband->band == NL80211_BAND_2GHZ) &&
(info->flags & IEEE80211_TX_CTL_NO_CCK_RATE))
rate_flags |= IEEE80211_RATE_ERP_G;
@@ -388,7 +414,7 @@ static bool rate_control_send_low(struct ieee80211_sta *pubsta,
}
if (use_basicrate)
- rc_send_low_basicrate(&info->control.rates[0].idx,
+ rc_send_low_basicrate(&info->control.rates[0],
txrc->bss_conf->basic_rates,
sband);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index a959ebf56852..1e2e5a406d58 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -42,51 +42,6 @@ static inline void ieee80211_rx_stats(struct net_device *dev, u32 len)
u64_stats_update_end(&tstats->syncp);
}
-static u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
- enum nl80211_iftype type)
-{
- __le16 fc = hdr->frame_control;
-
- if (ieee80211_is_data(fc)) {
- if (len < 24) /* drop incorrect hdr len (data) */
- return NULL;
-
- if (ieee80211_has_a4(fc))
- return NULL;
- if (ieee80211_has_tods(fc))
- return hdr->addr1;
- if (ieee80211_has_fromds(fc))
- return hdr->addr2;
-
- return hdr->addr3;
- }
-
- if (ieee80211_is_mgmt(fc)) {
- if (len < 24) /* drop incorrect hdr len (mgmt) */
- return NULL;
- return hdr->addr3;
- }
-
- if (ieee80211_is_ctl(fc)) {
- if (ieee80211_is_pspoll(fc))
- return hdr->addr1;
-
- if (ieee80211_is_back_req(fc)) {
- switch (type) {
- case NL80211_IFTYPE_STATION:
- return hdr->addr2;
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_AP_VLAN:
- return hdr->addr1;
- default:
- break; /* fall through to the return */
- }
- }
- }
-
- return NULL;
-}
-
/*
* monitor mode reception
*
@@ -1802,7 +1757,8 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
}
} else if (rx->sdata->vif.type == NL80211_IFTYPE_OCB) {
sta->rx_stats.last_rx = jiffies;
- } else if (!is_multicast_ether_addr(hdr->addr1)) {
+ } else if (!ieee80211_is_s1g_beacon(hdr->frame_control) &&
+ is_multicast_ether_addr(hdr->addr1)) {
/*
* Mesh beacons will update last_rx when if they are found to
* match the current local configuration when processed.
@@ -1812,9 +1768,6 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
sta->rx_stats.last_rate = sta_stats_encode_rate(status);
}
- if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
- ieee80211_sta_rx_notify(rx->sdata, hdr);
-
sta->rx_stats.fragments++;
u64_stats_update_begin(&rx->sta->rx_stats.syncp);
@@ -1840,6 +1793,9 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
}
}
+ if (ieee80211_is_s1g_beacon(hdr->frame_control))
+ return RX_CONTINUE;
+
/*
* Change STA power saving mode only at the end of a frame
* exchange sequence, and only for a data or management
@@ -1950,6 +1906,9 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
__le16 fc;
const struct ieee80211_cipher_scheme *cs = NULL;
+ if (ieee80211_is_ext(hdr->frame_control))
+ return RX_CONTINUE;
+
/*
* Key selection 101
*
@@ -2258,7 +2217,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
hdr = (struct ieee80211_hdr *)rx->skb->data;
fc = hdr->frame_control;
- if (ieee80211_is_ctl(fc))
+ if (ieee80211_is_ctl(fc) || ieee80211_is_ext(fc))
return RX_CONTINUE;
sc = le16_to_cpu(hdr->seq_ctrl);
@@ -2900,7 +2859,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
fwd_hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_RETRY);
info = IEEE80211_SKB_CB(fwd_skb);
memset(info, 0, sizeof(*info));
- info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
+ info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
info->control.vif = &rx->sdata->vif;
info->control.jiffies = jiffies;
if (is_multicast_ether_addr(fwd_hdr->addr1)) {
@@ -3132,6 +3091,9 @@ ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
+ if (ieee80211_is_s1g_beacon(mgmt->frame_control))
+ return RX_CONTINUE;
+
/*
* From here on, look only at management frames.
* Data and control frames are already handled,
@@ -3599,6 +3561,27 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
}
static ieee80211_rx_result debug_noinline
+ieee80211_rx_h_ext(struct ieee80211_rx_data *rx)
+{
+ struct ieee80211_sub_if_data *sdata = rx->sdata;
+ struct ieee80211_hdr *hdr = (void *)rx->skb->data;
+
+ if (!ieee80211_is_ext(hdr->frame_control))
+ return RX_CONTINUE;
+
+ if (sdata->vif.type != NL80211_IFTYPE_STATION)
+ return RX_DROP_MONITOR;
+
+ /* for now only beacons are ext, so queue them */
+ skb_queue_tail(&sdata->skb_queue, rx->skb);
+ ieee80211_queue_work(&rx->local->hw, &sdata->work);
+ if (rx->sta)
+ rx->sta->rx_stats.packets++;
+
+ return RX_QUEUED;
+}
+
+static ieee80211_rx_result debug_noinline
ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
{
struct ieee80211_sub_if_data *sdata = rx->sdata;
@@ -3817,6 +3800,7 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
CALL_RXH(ieee80211_rx_h_userspace_mgmt);
CALL_RXH(ieee80211_rx_h_action_post_userspace);
CALL_RXH(ieee80211_rx_h_action_return);
+ CALL_RXH(ieee80211_rx_h_ext);
CALL_RXH(ieee80211_rx_h_mgmt);
rxh_next:
@@ -3983,7 +3967,8 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx)
struct ieee80211_hdr *hdr = (void *)skb->data;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
- bool multicast = is_multicast_ether_addr(hdr->addr1);
+ bool multicast = is_multicast_ether_addr(hdr->addr1) ||
+ ieee80211_is_s1g_beacon(hdr->frame_control);
switch (sdata->vif.type) {
case NL80211_IFTYPE_STATION:
@@ -4149,7 +4134,6 @@ void ieee80211_check_fast_rx(struct sta_info *sta)
fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr2);
fastrx.expected_ds_bits = 0;
} else {
- fastrx.sta_notify = sdata->u.mgd.probe_send_count > 0;
fastrx.da_offs = offsetof(struct ieee80211_hdr, addr1);
fastrx.sa_offs = offsetof(struct ieee80211_hdr, addr3);
fastrx.expected_ds_bits =
@@ -4379,11 +4363,6 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
pskb_trim(skb, skb->len - fast_rx->icv_len))
goto drop;
- if (unlikely(fast_rx->sta_notify)) {
- ieee80211_sta_rx_notify(rx->sdata, hdr);
- fast_rx->sta_notify = false;
- }
-
/* statistics part of ieee80211_rx_h_sta_process() */
if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
stats->last_signal = status->signal;
@@ -4587,7 +4566,8 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
ieee80211_verify_alignment(&rx);
if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) ||
- ieee80211_is_beacon(hdr->frame_control)))
+ ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_s1g_beacon(hdr->frame_control)))
ieee80211_scan_rx(local, skb);
if (ieee80211_is_data(fc)) {
diff --git a/net/mac80211/s1g.c b/net/mac80211/s1g.c
new file mode 100644
index 000000000000..c33f332b049a
--- /dev/null
+++ b/net/mac80211/s1g.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * S1G handling
+ * Copyright(c) 2020 Adapt-IP
+ */
+#include <linux/ieee80211.h>
+#include <net/mac80211.h>
+#include "ieee80211_i.h"
+
+void ieee80211_s1g_sta_rate_init(struct sta_info *sta)
+{
+ /* avoid indicating legacy bitrates for S1G STAs */
+ sta->tx_stats.last_rate.flags |= IEEE80211_TX_RC_S1G_MCS;
+ sta->rx_stats.last_rate =
+ STA_STATS_FIELD(TYPE, STA_STATS_RATE_TYPE_S1G);
+}
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 5ac2785cdc7b..d4cc9ac2d703 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -9,7 +9,7 @@
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
* Copyright 2013-2015 Intel Mobile Communications GmbH
* Copyright 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2019 Intel Corporation
+ * Copyright (C) 2018-2020 Intel Corporation
*/
#include <linux/if_arp.h>
@@ -146,7 +146,8 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
struct ieee80211_mgmt *mgmt, size_t len,
struct ieee80211_channel *channel)
{
- bool beacon = ieee80211_is_beacon(mgmt->frame_control);
+ bool beacon = ieee80211_is_beacon(mgmt->frame_control) ||
+ ieee80211_is_s1g_beacon(mgmt->frame_control);
struct cfg80211_bss *cbss, *non_tx_cbss;
struct ieee80211_bss *bss, *non_tx_bss;
struct cfg80211_inform_bss bss_meta = {
@@ -195,6 +196,11 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
elements = mgmt->u.probe_resp.variable;
baselen = offsetof(struct ieee80211_mgmt,
u.probe_resp.variable);
+ } else if (ieee80211_is_s1g_beacon(mgmt->frame_control)) {
+ struct ieee80211_ext *ext = (void *) mgmt;
+
+ baselen = offsetof(struct ieee80211_ext, u.s1g_beacon.variable);
+ elements = ext->u.s1g_beacon.variable;
} else {
baselen = offsetof(struct ieee80211_mgmt, u.beacon.variable);
elements = mgmt->u.beacon.variable;
@@ -246,9 +252,12 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
struct ieee80211_bss *bss;
struct ieee80211_channel *channel;
- if (skb->len < 24 ||
- (!ieee80211_is_probe_resp(mgmt->frame_control) &&
- !ieee80211_is_beacon(mgmt->frame_control)))
+ if (ieee80211_is_s1g_beacon(mgmt->frame_control)) {
+ if (skb->len < 15)
+ return;
+ } else if (skb->len < 24 ||
+ (!ieee80211_is_probe_resp(mgmt->frame_control) &&
+ !ieee80211_is_beacon(mgmt->frame_control)))
return;
sdata1 = rcu_dereference(local->scan_sdata);
@@ -712,6 +721,10 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
req->duration_mandatory;
local->hw_scan_band = 0;
+ local->hw_scan_req->req.n_6ghz_params = req->n_6ghz_params;
+ local->hw_scan_req->req.scan_6ghz_params =
+ req->scan_6ghz_params;
+ local->hw_scan_req->req.scan_6ghz = req->scan_6ghz;
/*
* After allocating local->hw_scan_req, we must
@@ -905,6 +918,17 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
local->scan_chandef.center_freq1 = chan->center_freq;
local->scan_chandef.freq1_offset = chan->freq_offset;
local->scan_chandef.center_freq2 = 0;
+
+ /* For scanning on the S1G band, ignore scan_width (which is constant
+ * across all channels) for now since channel width is specific to each
+ * channel. Detect the required channel width here and likely revisit
+ * later. Maybe scan_width could be used to build the channel scan list?
+ */
+ if (chan->band == NL80211_BAND_S1GHZ) {
+ local->scan_chandef.width = ieee80211_s1g_channel_width(chan);
+ goto set_channel;
+ }
+
switch (scan_req->scan_width) {
case NL80211_BSS_CHAN_WIDTH_5:
local->scan_chandef.width = NL80211_CHAN_WIDTH_5;
@@ -925,8 +949,14 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
else
local->scan_chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
break;
+ case NL80211_BSS_CHAN_WIDTH_1:
+ case NL80211_BSS_CHAN_WIDTH_2:
+ /* shouldn't get here, S1G handled above */
+ WARN_ON(1);
+ break;
}
+set_channel:
if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL))
skip = 1;
@@ -1124,7 +1154,8 @@ int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
int max_n;
for (band = 0; band < NUM_NL80211_BANDS; band++) {
- if (!local->hw.wiphy->bands[band])
+ if (!local->hw.wiphy->bands[band] ||
+ band == NL80211_BAND_6GHZ)
continue;
max_n = local->hw.wiphy->bands[band]->n_channels;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index f2840d1d95cf..fb4f2b9b294f 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -2122,6 +2122,10 @@ static void sta_stats_decode_rate(struct ieee80211_local *local, u32 rate,
int rate_idx = STA_STATS_GET(LEGACY_IDX, rate);
sband = local->hw.wiphy->bands[band];
+
+ if (WARN_ON_ONCE(!sband->bitrates))
+ break;
+
brate = sband->bitrates[rate_idx].bitrate;
if (rinfo->bw == RATE_INFO_BW_5)
shift = 2;
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index d5010116cf4d..00ae81e9e1a1 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -336,7 +336,6 @@ struct ieee80211_fast_tx {
* @expected_ds_bits: from/to DS bits expected
* @icv_len: length of the MIC if present
* @key: bool indicating encryption is expected (key is set)
- * @sta_notify: notify the MLME code (once)
* @internal_forward: forward froms internally on AP/VLAN type interfaces
* @uses_rss: copy of USES_RSS hw flag
* @da_offs: offset of the DA in the header (for header conversion)
@@ -352,7 +351,6 @@ struct ieee80211_fast_rx {
__le16 expected_ds_bits;
u8 icv_len;
u8 key:1,
- sta_notify:1,
internal_forward:1,
uses_rss:1;
u8 da_offs, sa_offs;
@@ -825,6 +823,7 @@ enum sta_stats_type {
STA_STATS_RATE_TYPE_HT,
STA_STATS_RATE_TYPE_VHT,
STA_STATS_RATE_TYPE_HE,
+ STA_STATS_RATE_TYPE_S1G,
};
#define STA_STATS_FIELD_HT_MCS GENMASK( 7, 0)
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 0794396a7988..6feb45135020 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -66,8 +66,8 @@ static void ieee80211_handle_filtered_frame(struct ieee80211_local *local,
info->control.jiffies = jiffies;
info->control.vif = &sta->sdata->vif;
- info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING |
- IEEE80211_TX_INTFL_RETRANSMISSION;
+ info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
+ info->flags |= IEEE80211_TX_INTFL_RETRANSMISSION;
info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
sta->status_stats.filtered++;
@@ -184,18 +184,6 @@ static void ieee80211_frame_acked(struct sta_info *sta, struct sk_buff *skb)
struct ieee80211_mgmt *mgmt = (void *) skb->data;
struct ieee80211_local *local = sta->local;
struct ieee80211_sub_if_data *sdata = sta->sdata;
- struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
-
- if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
- sta->status_stats.last_ack = jiffies;
- if (txinfo->status.is_valid_ack_signal) {
- sta->status_stats.last_ack_signal =
- (s8)txinfo->status.ack_signal;
- sta->status_stats.ack_signal_filled = true;
- ewma_avg_signal_add(&sta->status_stats.avg_ack_signal,
- -txinfo->status.ack_signal);
- }
- }
if (ieee80211_is_data_qos(mgmt->frame_control)) {
struct ieee80211_hdr *hdr = (void *) skb->data;
@@ -890,7 +878,8 @@ void ieee80211_tx_monitor(struct ieee80211_local *local, struct sk_buff *skb,
}
static void __ieee80211_tx_status(struct ieee80211_hw *hw,
- struct ieee80211_tx_status *status)
+ struct ieee80211_tx_status *status,
+ int rates_idx, int retry_count)
{
struct sk_buff *skb = status->skb;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -899,17 +888,12 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
struct sta_info *sta;
__le16 fc;
struct ieee80211_supported_band *sband;
- int retry_count;
- int rates_idx;
bool send_to_cooked;
bool acked;
bool noack_success;
struct ieee80211_bar *bar;
int shift = 0;
int tid = IEEE80211_NUM_TIDS;
- u16 tx_time_est;
-
- rates_idx = ieee80211_tx_get_rates(hw, info, &retry_count);
sband = local->hw.wiphy->bands[info->band];
fc = hdr->frame_control;
@@ -987,62 +971,17 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
if (info->flags & IEEE80211_TX_STAT_TX_FILTERED) {
ieee80211_handle_filtered_frame(local, sta, skb);
return;
- } else {
+ } else if (ieee80211_is_data_present(fc)) {
if (!acked && !noack_success)
- sta->status_stats.retry_failed++;
- sta->status_stats.retry_count += retry_count;
+ sta->status_stats.msdu_failed[tid]++;
- if (ieee80211_is_data_present(fc)) {
- if (!acked && !noack_success)
- sta->status_stats.msdu_failed[tid]++;
-
- sta->status_stats.msdu_retries[tid] +=
- retry_count;
- }
+ sta->status_stats.msdu_retries[tid] +=
+ retry_count;
}
- rate_control_tx_status(local, sband, status);
- if (ieee80211_vif_is_mesh(&sta->sdata->vif))
- ieee80211s_update_metric(local, sta, status);
-
if (!(info->flags & IEEE80211_TX_CTL_INJECTED) && acked)
ieee80211_frame_acked(sta, skb);
- if ((sta->sdata->vif.type == NL80211_IFTYPE_STATION) &&
- ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS))
- ieee80211_sta_tx_notify(sta->sdata, (void *) skb->data,
- acked, info->status.tx_time);
-
- if (info->status.tx_time &&
- wiphy_ext_feature_isset(local->hw.wiphy,
- NL80211_EXT_FEATURE_AIRTIME_FAIRNESS))
- ieee80211_sta_register_airtime(&sta->sta, tid,
- info->status.tx_time, 0);
-
- if ((tx_time_est = ieee80211_info_get_tx_time_est(info)) > 0) {
- /* Do this here to avoid the expensive lookup of the sta
- * in ieee80211_report_used_skb().
- */
- ieee80211_sta_update_pending_airtime(local, sta,
- skb_get_queue_mapping(skb),
- tx_time_est,
- true);
- ieee80211_info_set_tx_time_est(info, 0);
- }
-
- if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
- if (acked) {
- if (sta->status_stats.lost_packets)
- sta->status_stats.lost_packets = 0;
-
- /* Track when last TDLS packet was ACKed */
- sta->status_stats.last_pkt_time = jiffies;
- } else if (noack_success) {
- /* nothing to do here, do not account as lost */
- } else {
- ieee80211_lost_packet(sta, info);
- }
- }
}
/* SNMP counters
@@ -1101,7 +1040,10 @@ static void __ieee80211_tx_status(struct ieee80211_hw *hw,
* with this test...
*/
if (!local->monitors && (!send_to_cooked || !local->cooked_mntrs)) {
- dev_kfree_skb(skb);
+ if (status->free_list)
+ list_add_tail(&skb->list, status->free_list);
+ else
+ dev_kfree_skb(skb);
return;
}
@@ -1126,7 +1068,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
if (sta)
status.sta = &sta->sta;
- __ieee80211_tx_status(hw, &status);
+ ieee80211_tx_status_ext(hw, &status);
rcu_read_unlock();
}
EXPORT_SYMBOL(ieee80211_tx_status);
@@ -1137,10 +1079,12 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_tx_info *info = status->info;
struct ieee80211_sta *pubsta = status->sta;
+ struct sk_buff *skb = status->skb;
struct ieee80211_supported_band *sband;
- struct sta_info *sta;
- int retry_count;
+ struct sta_info *sta = NULL;
+ int rates_idx, retry_count;
bool acked, noack_success;
+ u16 tx_time_est;
if (pubsta) {
sta = container_of(pubsta, struct sta_info, sta);
@@ -1149,13 +1093,22 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
sta->tx_stats.last_rate_info = *status->rate;
}
- if (status->skb)
- return __ieee80211_tx_status(hw, status);
+ if (skb && (tx_time_est =
+ ieee80211_info_get_tx_time_est(IEEE80211_SKB_CB(skb))) > 0) {
+ /* Do this here to avoid the expensive lookup of the sta
+ * in ieee80211_report_used_skb().
+ */
+ ieee80211_sta_update_pending_airtime(local, sta,
+ skb_get_queue_mapping(skb),
+ tx_time_est,
+ true);
+ ieee80211_info_set_tx_time_est(IEEE80211_SKB_CB(skb), 0);
+ }
- if (!status->sta)
- return;
+ if (!status->info)
+ goto free;
- ieee80211_tx_get_rates(hw, info, &retry_count);
+ rates_idx = ieee80211_tx_get_rates(hw, info, &retry_count);
sband = hw->wiphy->bands[info->band];
@@ -1163,24 +1116,46 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
noack_success = !!(info->flags & IEEE80211_TX_STAT_NOACK_TRANSMITTED);
if (pubsta) {
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+
if (!acked && !noack_success)
sta->status_stats.retry_failed++;
sta->status_stats.retry_count += retry_count;
- if (acked) {
- sta->status_stats.last_ack = jiffies;
+ if (ieee80211_hw_check(&local->hw, REPORTS_TX_ACK_STATUS)) {
+ if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+ skb && !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP))
+ ieee80211_sta_tx_notify(sdata, (void *) skb->data,
+ acked, info->status.tx_time);
- if (sta->status_stats.lost_packets)
- sta->status_stats.lost_packets = 0;
+ if (acked) {
+ sta->status_stats.last_ack = jiffies;
- /* Track when last packet was ACKed */
- sta->status_stats.last_pkt_time = jiffies;
- } else if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
- return;
- } else if (noack_success) {
- /* nothing to do here, do not account as lost */
- } else {
- ieee80211_lost_packet(sta, info);
+ if (sta->status_stats.lost_packets)
+ sta->status_stats.lost_packets = 0;
+
+ /* Track when last packet was ACKed */
+ sta->status_stats.last_pkt_time = jiffies;
+
+ /* Reset connection monitor */
+ if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+ unlikely(sdata->u.mgd.probe_send_count > 0))
+ sdata->u.mgd.probe_send_count = 0;
+
+ if (info->status.is_valid_ack_signal) {
+ sta->status_stats.last_ack_signal =
+ (s8)info->status.ack_signal;
+ sta->status_stats.ack_signal_filled = true;
+ ewma_avg_signal_add(&sta->status_stats.avg_ack_signal,
+ -info->status.ack_signal);
+ }
+ } else if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
+ return;
+ } else if (noack_success) {
+ /* nothing to do here, do not account as lost */
+ } else {
+ ieee80211_lost_packet(sta, info);
+ }
}
rate_control_tx_status(local, sband, status);
@@ -1188,6 +1163,10 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
ieee80211s_update_metric(local, sta, status);
}
+ if (skb && !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP))
+ return __ieee80211_tx_status(hw, status, rates_idx,
+ retry_count);
+
if (acked || noack_success) {
I802_DEBUG_INC(local->dot11TransmittedFrameCount);
if (!pubsta)
@@ -1199,6 +1178,16 @@ void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
} else {
I802_DEBUG_INC(local->dot11FailedCount);
}
+
+free:
+ if (!skb)
+ return;
+
+ ieee80211_report_used_skb(local, skb, false);
+ if (status->free_list)
+ list_add_tail(&skb->list, status->free_list);
+ else
+ dev_kfree_skb(skb);
}
EXPORT_SYMBOL(ieee80211_tx_status_ext);
@@ -1225,69 +1214,23 @@ void ieee80211_tx_status_8023(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct sk_buff *skb)
{
- struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_sub_if_data *sdata;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_tx_status status = {
+ .skb = skb,
+ .info = IEEE80211_SKB_CB(skb),
+ };
struct sta_info *sta;
- int retry_count;
- int rates_idx;
- bool acked;
sdata = vif_to_sdata(vif);
- acked = info->flags & IEEE80211_TX_STAT_ACK;
- rates_idx = ieee80211_tx_get_rates(hw, info, &retry_count);
-
rcu_read_lock();
- if (ieee80211_lookup_ra_sta(sdata, skb, &sta))
- goto counters_update;
-
- if (IS_ERR(sta))
- goto counters_update;
-
- if (!acked)
- sta->status_stats.retry_failed++;
-
- if (rates_idx != -1)
- sta->tx_stats.last_rate = info->status.rates[rates_idx];
-
- sta->status_stats.retry_count += retry_count;
-
- if (ieee80211_hw_check(hw, REPORTS_TX_ACK_STATUS)) {
- if (acked && vif->type == NL80211_IFTYPE_STATION)
- ieee80211_sta_reset_conn_monitor(sdata);
-
- sta->status_stats.last_ack = jiffies;
- if (info->flags & IEEE80211_TX_STAT_ACK) {
- if (sta->status_stats.lost_packets)
- sta->status_stats.lost_packets = 0;
+ if (!ieee80211_lookup_ra_sta(sdata, skb, &sta) && !IS_ERR(sta))
+ status.sta = &sta->sta;
- sta->status_stats.last_pkt_time = jiffies;
- } else {
- ieee80211_lost_packet(sta, info);
- }
- }
+ ieee80211_tx_status_ext(hw, &status);
-counters_update:
rcu_read_unlock();
- ieee80211_led_tx(local);
-
- if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
- !(info->flags & IEEE80211_TX_STAT_NOACK_TRANSMITTED))
- goto skip_stats_update;
-
- I802_DEBUG_INC(local->dot11TransmittedFrameCount);
- if (is_multicast_ether_addr(skb->data))
- I802_DEBUG_INC(local->dot11MulticastTransmittedFrameCount);
- if (retry_count > 0)
- I802_DEBUG_INC(local->dot11RetryCount);
- if (retry_count > 1)
- I802_DEBUG_INC(local->dot11MultipleRetryCount);
-
-skip_stats_update:
- ieee80211_report_used_skb(local, skb, false);
- dev_kfree_skb(skb);
}
EXPORT_SYMBOL(ieee80211_tx_status_8023);
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 50ab5b9d8eab..89723907a094 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -2734,6 +2734,39 @@ TRACE_EVENT(drv_get_ftm_responder_stats,
)
);
+DEFINE_EVENT(local_sdata_addr_evt, drv_update_vif_offload,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata),
+ TP_ARGS(local, sdata)
+);
+
+TRACE_EVENT(drv_sta_set_4addr,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta *sta, bool enabled),
+
+ TP_ARGS(local, sdata, sta, enabled),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ STA_ENTRY
+ __field(bool, enabled)
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ STA_ASSIGN;
+ __entry->enabled = enabled;
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " enabled:%d",
+ LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->enabled
+ )
+);
+
#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */
#undef TRACE_INCLUDE_PATH
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index dca01d7e6e3e..8ba10a48ded4 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -82,6 +82,10 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
erp = txrate->flags & IEEE80211_RATE_ERP_G;
+ /* device is expected to do this */
+ if (sband->band == NL80211_BAND_S1GHZ)
+ return 0;
+
/*
* data and mgmt (except PS Poll):
* - during CFP: 32768
@@ -531,7 +535,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
info->control.jiffies = jiffies;
info->control.vif = &tx->sdata->vif;
- info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
+ info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
skb_queue_tail(&sta->ps_tx_buf[ac], tx->skb);
spin_unlock(&sta->ps_lock);
@@ -1134,7 +1138,7 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
tx->sta->sta.addr, tx->sta->sta.aid);
}
info->control.vif = &tx->sdata->vif;
- info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
+ info->control.flags |= IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
__skb_queue_tail(&tid_tx->pending, skb);
if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER)
@@ -1179,7 +1183,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
* we are doing the needed processing, so remove the flag
* now.
*/
- info->flags &= ~IEEE80211_TX_INTFL_NEED_TXPROCESSING;
+ info->control.flags &= ~IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
hdr = (struct ieee80211_hdr *) skb->data;
@@ -1258,7 +1262,7 @@ static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
(info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
return NULL;
- if (!(info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP) &&
+ if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
unlikely(!ieee80211_is_data_present(hdr->frame_control))) {
if ((!ieee80211_is_mgmt(hdr->frame_control) ||
ieee80211_is_bufferable_mmpdu(hdr->frame_control) ||
@@ -2473,7 +2477,9 @@ static u16 ieee80211_store_ack_skb(struct ieee80211_local *local,
* @sdata: virtual interface to build the header for
* @skb: the skb to build the header in
* @info_flags: skb flags to set
+ * @sta: the station pointer
* @ctrl_flags: info control flags to set
+ * @cookie: cookie pointer to fill (if not %NULL)
*
* This function takes the skb with 802.3 header and reformats the header to
* the appropriate IEEE 802.11 header based on which interface the packet is
@@ -3649,7 +3655,7 @@ begin:
else
info->flags &= ~IEEE80211_TX_CTL_AMPDU;
- if (info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP)
+ if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
goto encap_out;
if (info->control.flags & IEEE80211_TX_CTRL_FAST_XMIT) {
@@ -4190,38 +4196,18 @@ static bool ieee80211_tx_8023(struct ieee80211_sub_if_data *sdata,
static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata,
struct net_device *dev, struct sta_info *sta,
- struct sk_buff *skb)
+ struct ieee80211_key *key, struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ethhdr *ehdr = (struct ethhdr *)skb->data;
struct ieee80211_local *local = sdata->local;
- bool authorized = false;
- bool multicast;
- unsigned char *ra = ehdr->h_dest;
-
- if (IS_ERR(sta) || (sta && !sta->uploaded))
- sta = NULL;
-
- if (sdata->vif.type == NL80211_IFTYPE_STATION &&
- (!sta || !test_sta_flag(sta, WLAN_STA_TDLS_PEER)))
- ra = sdata->u.mgd.bssid;
-
- if (is_zero_ether_addr(ra))
- goto out_free;
-
- multicast = is_multicast_ether_addr(ra);
+ struct tid_ampdu_tx *tid_tx;
+ u8 tid;
- if (sta)
- authorized = test_sta_flag(sta, WLAN_STA_AUTHORIZED);
-
- if (!multicast && !authorized &&
- (ehdr->h_proto != sdata->control_port_protocol ||
- !ether_addr_equal(sdata->vif.addr, ehdr->h_source)))
- goto out_free;
-
- if (multicast && sdata->vif.type == NL80211_IFTYPE_AP &&
- !atomic_read(&sdata->u.ap.num_mcast_sta))
- goto out_free;
+ if (local->ops->wake_tx_queue) {
+ u16 queue = __ieee80211_select_queue(sdata, sta, skb);
+ skb_set_queue_mapping(skb, queue);
+ skb_get_hash(skb);
+ }
if (unlikely(test_bit(SCAN_SW_SCANNING, &local->scanning)) &&
test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))
@@ -4229,36 +4215,42 @@ static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata,
memset(info, 0, sizeof(*info));
- if (unlikely(!multicast && skb->sk &&
- skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS))
- info->ack_frame_id = ieee80211_store_ack_skb(local, skb,
- &info->flags, NULL);
+ tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
+ tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
+ if (tid_tx) {
+ if (!test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
+ /* fall back to non-offload slow path */
+ __ieee80211_subif_start_xmit(skb, dev, 0, 0, NULL);
+ return;
+ }
- if (unlikely(sdata->control_port_protocol == ehdr->h_proto)) {
- if (sdata->control_port_no_encrypt)
- info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
- info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
+ info->flags |= IEEE80211_TX_CTL_AMPDU;
+ if (tid_tx->timeout)
+ tid_tx->last_tx = jiffies;
}
- if (multicast)
- info->flags |= IEEE80211_TX_CTL_NO_ACK;
+ if (unlikely(skb->sk &&
+ skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS))
+ info->ack_frame_id = ieee80211_store_ack_skb(local, skb,
+ &info->flags, NULL);
info->hw_queue = sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
ieee80211_tx_stats(dev, skb->len);
- if (sta) {
- sta->tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len;
- sta->tx_stats.packets[skb_get_queue_mapping(skb)]++;
- }
+ sta->tx_stats.bytes[skb_get_queue_mapping(skb)] += skb->len;
+ sta->tx_stats.packets[skb_get_queue_mapping(skb)]++;
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
sdata = container_of(sdata->bss,
struct ieee80211_sub_if_data, u.ap);
- info->control.flags |= IEEE80211_TX_CTRL_HW_80211_ENCAP;
+ info->flags |= IEEE80211_TX_CTL_HW_80211_ENCAP;
info->control.vif = &sdata->vif;
+ if (key)
+ info->control.hw_key = &key->conf;
+
ieee80211_tx_8023(sdata, skb, skb->len, sta, false);
return;
@@ -4271,12 +4263,10 @@ netdev_tx_t ieee80211_subif_start_xmit_8023(struct sk_buff *skb,
struct net_device *dev)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ethhdr *ehdr = (struct ethhdr *)skb->data;
+ struct ieee80211_key *key;
struct sta_info *sta;
-
- if (WARN_ON(!sdata->hw_80211_encap)) {
- kfree_skb(skb);
- return NETDEV_TX_OK;
- }
+ bool offload = true;
if (unlikely(skb->len < ETH_HLEN)) {
kfree_skb(skb);
@@ -4285,11 +4275,26 @@ netdev_tx_t ieee80211_subif_start_xmit_8023(struct sk_buff *skb,
rcu_read_lock();
- if (ieee80211_lookup_ra_sta(sdata, skb, &sta))
+ if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) {
kfree_skb(skb);
+ goto out;
+ }
+
+ if (unlikely(IS_ERR_OR_NULL(sta) || !sta->uploaded ||
+ !test_sta_flag(sta, WLAN_STA_AUTHORIZED) ||
+ sdata->control_port_protocol == ehdr->h_proto))
+ offload = false;
+ else if ((key = rcu_dereference(sta->ptk[sta->ptk_idx])) &&
+ (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) ||
+ key->conf.cipher == WLAN_CIPHER_SUITE_TKIP))
+ offload = false;
+
+ if (offload)
+ ieee80211_8023_xmit(sdata, dev, sta, key, skb);
else
- ieee80211_8023_xmit(sdata, dev, sta, skb);
+ ieee80211_subif_start_xmit(skb, dev);
+out:
rcu_read_unlock();
return NETDEV_TX_OK;
@@ -4365,7 +4370,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
sdata = vif_to_sdata(info->control.vif);
- if (info->flags & IEEE80211_TX_INTFL_NEED_TXPROCESSING) {
+ if (info->control.flags & IEEE80211_TX_INTCFL_NEED_TXPROCESSING) {
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
if (unlikely(!chanctx_conf)) {
dev_kfree_skb(skb);
@@ -4373,7 +4378,7 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
}
info->band = chanctx_conf->def.chan->band;
result = ieee80211_tx(sdata, NULL, skb, true);
- } else if (info->control.flags & IEEE80211_TX_CTRL_HW_80211_ENCAP) {
+ } else if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) {
dev_kfree_skb(skb);
return true;
@@ -4538,14 +4543,14 @@ static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
return 0;
}
-static void ieee80211_set_csa(struct ieee80211_sub_if_data *sdata,
- struct beacon_data *beacon)
+static void ieee80211_set_beacon_cntdwn(struct ieee80211_sub_if_data *sdata,
+ struct beacon_data *beacon)
{
struct probe_resp *resp;
u8 *beacon_data;
size_t beacon_data_len;
int i;
- u8 count = beacon->csa_current_counter;
+ u8 count = beacon->cntdwn_current_counter;
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP:
@@ -4565,36 +4570,36 @@ static void ieee80211_set_csa(struct ieee80211_sub_if_data *sdata,
}
rcu_read_lock();
- for (i = 0; i < IEEE80211_MAX_CSA_COUNTERS_NUM; ++i) {
+ for (i = 0; i < IEEE80211_MAX_CNTDWN_COUNTERS_NUM; ++i) {
resp = rcu_dereference(sdata->u.ap.probe_resp);
- if (beacon->csa_counter_offsets[i]) {
- if (WARN_ON_ONCE(beacon->csa_counter_offsets[i] >=
+ if (beacon->cntdwn_counter_offsets[i]) {
+ if (WARN_ON_ONCE(beacon->cntdwn_counter_offsets[i] >=
beacon_data_len)) {
rcu_read_unlock();
return;
}
- beacon_data[beacon->csa_counter_offsets[i]] = count;
+ beacon_data[beacon->cntdwn_counter_offsets[i]] = count;
}
if (sdata->vif.type == NL80211_IFTYPE_AP && resp)
- resp->data[resp->csa_counter_offsets[i]] = count;
+ resp->data[resp->cntdwn_counter_offsets[i]] = count;
}
rcu_read_unlock();
}
-static u8 __ieee80211_csa_update_counter(struct beacon_data *beacon)
+static u8 __ieee80211_beacon_update_cntdwn(struct beacon_data *beacon)
{
- beacon->csa_current_counter--;
+ beacon->cntdwn_current_counter--;
/* the counter should never reach 0 */
- WARN_ON_ONCE(!beacon->csa_current_counter);
+ WARN_ON_ONCE(!beacon->cntdwn_current_counter);
- return beacon->csa_current_counter;
+ return beacon->cntdwn_current_counter;
}
-u8 ieee80211_csa_update_counter(struct ieee80211_vif *vif)
+u8 ieee80211_beacon_update_cntdwn(struct ieee80211_vif *vif)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct beacon_data *beacon = NULL;
@@ -4612,15 +4617,15 @@ u8 ieee80211_csa_update_counter(struct ieee80211_vif *vif)
if (!beacon)
goto unlock;
- count = __ieee80211_csa_update_counter(beacon);
+ count = __ieee80211_beacon_update_cntdwn(beacon);
unlock:
rcu_read_unlock();
return count;
}
-EXPORT_SYMBOL(ieee80211_csa_update_counter);
+EXPORT_SYMBOL(ieee80211_beacon_update_cntdwn);
-void ieee80211_csa_set_counter(struct ieee80211_vif *vif, u8 counter)
+void ieee80211_beacon_set_cntdwn(struct ieee80211_vif *vif, u8 counter)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct beacon_data *beacon = NULL;
@@ -4637,15 +4642,15 @@ void ieee80211_csa_set_counter(struct ieee80211_vif *vif, u8 counter)
if (!beacon)
goto unlock;
- if (counter < beacon->csa_current_counter)
- beacon->csa_current_counter = counter;
+ if (counter < beacon->cntdwn_current_counter)
+ beacon->cntdwn_current_counter = counter;
unlock:
rcu_read_unlock();
}
-EXPORT_SYMBOL(ieee80211_csa_set_counter);
+EXPORT_SYMBOL(ieee80211_beacon_set_cntdwn);
-bool ieee80211_csa_is_complete(struct ieee80211_vif *vif)
+bool ieee80211_beacon_cntdwn_is_complete(struct ieee80211_vif *vif)
{
struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
struct beacon_data *beacon = NULL;
@@ -4688,20 +4693,21 @@ bool ieee80211_csa_is_complete(struct ieee80211_vif *vif)
goto out;
}
- if (!beacon->csa_counter_offsets[0])
+ if (!beacon->cntdwn_counter_offsets[0])
goto out;
- if (WARN_ON_ONCE(beacon->csa_counter_offsets[0] > beacon_data_len))
+ if (WARN_ON_ONCE(beacon->cntdwn_counter_offsets[0] > beacon_data_len))
goto out;
- if (beacon_data[beacon->csa_counter_offsets[0]] == 1)
+ if (beacon_data[beacon->cntdwn_counter_offsets[0]] == 1)
ret = true;
+
out:
rcu_read_unlock();
return ret;
}
-EXPORT_SYMBOL(ieee80211_csa_is_complete);
+EXPORT_SYMBOL(ieee80211_beacon_cntdwn_is_complete);
static int ieee80211_beacon_protect(struct sk_buff *skb,
struct ieee80211_local *local,
@@ -4761,11 +4767,11 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
beacon = rcu_dereference(ap->beacon);
if (beacon) {
- if (beacon->csa_counter_offsets[0]) {
+ if (beacon->cntdwn_counter_offsets[0]) {
if (!is_template)
- __ieee80211_csa_update_counter(beacon);
+ ieee80211_beacon_update_cntdwn(vif);
- ieee80211_set_csa(sdata, beacon);
+ ieee80211_set_beacon_cntdwn(sdata, beacon);
}
/*
@@ -4809,11 +4815,11 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
if (!beacon)
goto out;
- if (beacon->csa_counter_offsets[0]) {
+ if (beacon->cntdwn_counter_offsets[0]) {
if (!is_template)
- __ieee80211_csa_update_counter(beacon);
+ __ieee80211_beacon_update_cntdwn(beacon);
- ieee80211_set_csa(sdata, beacon);
+ ieee80211_set_beacon_cntdwn(sdata, beacon);
}
skb = dev_alloc_skb(local->tx_headroom + beacon->head_len +
@@ -4833,16 +4839,16 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
if (!beacon)
goto out;
- if (beacon->csa_counter_offsets[0]) {
+ if (beacon->cntdwn_counter_offsets[0]) {
if (!is_template)
/* TODO: For mesh csa_counter is in TU, so
* decrementing it by one isn't correct, but
* for now we leave it consistent with overall
* mac80211's behavior.
*/
- __ieee80211_csa_update_counter(beacon);
+ __ieee80211_beacon_update_cntdwn(beacon);
- ieee80211_set_csa(sdata, beacon);
+ ieee80211_set_beacon_cntdwn(sdata, beacon);
}
if (ifmsh->sync_ops)
@@ -4874,13 +4880,13 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
if (offs && beacon) {
int i;
- for (i = 0; i < IEEE80211_MAX_CSA_COUNTERS_NUM; i++) {
- u16 csa_off = beacon->csa_counter_offsets[i];
+ for (i = 0; i < IEEE80211_MAX_CNTDWN_COUNTERS_NUM; i++) {
+ u16 csa_off = beacon->cntdwn_counter_offsets[i];
if (!csa_off)
continue;
- offs->csa_counter_offs[i] = csa_off_base + csa_off;
+ offs->cntdwn_counter_offs[i] = csa_off_base + csa_off;
}
}
@@ -4999,6 +5005,63 @@ out:
}
EXPORT_SYMBOL(ieee80211_proberesp_get);
+struct sk_buff *ieee80211_get_fils_discovery_tmpl(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct sk_buff *skb = NULL;
+ struct fils_discovery_data *tmpl = NULL;
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+
+ if (sdata->vif.type != NL80211_IFTYPE_AP)
+ return NULL;
+
+ rcu_read_lock();
+ tmpl = rcu_dereference(sdata->u.ap.fils_discovery);
+ if (!tmpl) {
+ rcu_read_unlock();
+ return NULL;
+ }
+
+ skb = dev_alloc_skb(sdata->local->hw.extra_tx_headroom + tmpl->len);
+ if (skb) {
+ skb_reserve(skb, sdata->local->hw.extra_tx_headroom);
+ skb_put_data(skb, tmpl->data, tmpl->len);
+ }
+
+ rcu_read_unlock();
+ return skb;
+}
+EXPORT_SYMBOL(ieee80211_get_fils_discovery_tmpl);
+
+struct sk_buff *
+ieee80211_get_unsol_bcast_probe_resp_tmpl(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct sk_buff *skb = NULL;
+ struct unsol_bcast_probe_resp_data *tmpl = NULL;
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+
+ if (sdata->vif.type != NL80211_IFTYPE_AP)
+ return NULL;
+
+ rcu_read_lock();
+ tmpl = rcu_dereference(sdata->u.ap.unsol_bcast_probe_resp);
+ if (!tmpl) {
+ rcu_read_unlock();
+ return NULL;
+ }
+
+ skb = dev_alloc_skb(sdata->local->hw.extra_tx_headroom + tmpl->len);
+ if (skb) {
+ skb_reserve(skb, sdata->local->hw.extra_tx_headroom);
+ skb_put_data(skb, tmpl->data, tmpl->len);
+ }
+
+ rcu_read_unlock();
+ return skb;
+}
+EXPORT_SYMBOL(ieee80211_get_unsol_bcast_probe_resp_tmpl);
+
struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 8d3bfc0fe176..49342060490f 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -45,6 +45,58 @@ struct ieee80211_hw *wiphy_to_ieee80211_hw(struct wiphy *wiphy)
}
EXPORT_SYMBOL(wiphy_to_ieee80211_hw);
+u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
+ enum nl80211_iftype type)
+{
+ __le16 fc = hdr->frame_control;
+
+ if (ieee80211_is_data(fc)) {
+ if (len < 24) /* drop incorrect hdr len (data) */
+ return NULL;
+
+ if (ieee80211_has_a4(fc))
+ return NULL;
+ if (ieee80211_has_tods(fc))
+ return hdr->addr1;
+ if (ieee80211_has_fromds(fc))
+ return hdr->addr2;
+
+ return hdr->addr3;
+ }
+
+ if (ieee80211_is_s1g_beacon(fc)) {
+ struct ieee80211_ext *ext = (void *) hdr;
+
+ return ext->u.s1g_beacon.sa;
+ }
+
+ if (ieee80211_is_mgmt(fc)) {
+ if (len < 24) /* drop incorrect hdr len (mgmt) */
+ return NULL;
+ return hdr->addr3;
+ }
+
+ if (ieee80211_is_ctl(fc)) {
+ if (ieee80211_is_pspoll(fc))
+ return hdr->addr1;
+
+ if (ieee80211_is_back_req(fc)) {
+ switch (type) {
+ case NL80211_IFTYPE_STATION:
+ return hdr->addr2;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_AP_VLAN:
+ return hdr->addr1;
+ default:
+ break; /* fall through to the return */
+ }
+ }
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(ieee80211_get_bssid);
+
void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx)
{
struct sk_buff *skb;
@@ -733,6 +785,9 @@ static void __iterate_interfaces(struct ieee80211_local *local,
if (!(iter_flags & IEEE80211_IFACE_ITER_RESUME_ALL) &&
active_only && !(sdata->flags & IEEE80211_SDATA_IN_DRIVER))
continue;
+ if ((iter_flags & IEEE80211_IFACE_SKIP_SDATA_NOT_IN_DRIVER) &&
+ !(sdata->flags & IEEE80211_SDATA_IN_DRIVER))
+ continue;
if (ieee80211_sdata_running(sdata) || !active_only)
iterator(data, sdata->vif.addr,
&sdata->vif);
@@ -1003,6 +1058,11 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
case WLAN_EID_LINK_ID:
case WLAN_EID_BSS_MAX_IDLE_PERIOD:
case WLAN_EID_RSNX:
+ case WLAN_EID_S1G_BCN_COMPAT:
+ case WLAN_EID_S1G_CAPABILITIES:
+ case WLAN_EID_S1G_OPERATION:
+ case WLAN_EID_AID_RESPONSE:
+ case WLAN_EID_S1G_SHORT_BCN_INTERVAL:
/*
* not listing WLAN_EID_CHANNEL_SWITCH_WRAPPER -- it seems possible
* that if the content gets bigger it might be needed more than once
@@ -1288,6 +1348,30 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
&crc : NULL,
elem, elems);
break;
+ case WLAN_EID_S1G_CAPABILITIES:
+ if (elen == sizeof(*elems->s1g_capab))
+ elems->s1g_capab = (void *)pos;
+ else
+ elem_parse_failed = true;
+ break;
+ case WLAN_EID_S1G_OPERATION:
+ if (elen == sizeof(*elems->s1g_oper))
+ elems->s1g_oper = (void *)pos;
+ else
+ elem_parse_failed = true;
+ break;
+ case WLAN_EID_S1G_BCN_COMPAT:
+ if (elen == sizeof(*elems->s1g_bcn_compat))
+ elems->s1g_bcn_compat = (void *)pos;
+ else
+ elem_parse_failed = true;
+ break;
+ case WLAN_EID_AID_RESPONSE:
+ if (elen == sizeof(struct ieee80211_aid_response_ie))
+ elems->aid_resp = (void *)pos;
+ else
+ elem_parse_failed = true;
+ break;
default:
break;
}
@@ -3371,6 +3455,42 @@ bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata,
*chandef = he_chandef;
+ return false;
+}
+
+bool ieee80211_chandef_s1g_oper(const struct ieee80211_s1g_oper_ie *oper,
+ struct cfg80211_chan_def *chandef)
+{
+ u32 oper_freq;
+
+ if (!oper)
+ return false;
+
+ switch (FIELD_GET(S1G_OPER_CH_WIDTH_OPER, oper->ch_width)) {
+ case IEEE80211_S1G_CHANWIDTH_1MHZ:
+ chandef->width = NL80211_CHAN_WIDTH_1;
+ break;
+ case IEEE80211_S1G_CHANWIDTH_2MHZ:
+ chandef->width = NL80211_CHAN_WIDTH_2;
+ break;
+ case IEEE80211_S1G_CHANWIDTH_4MHZ:
+ chandef->width = NL80211_CHAN_WIDTH_4;
+ break;
+ case IEEE80211_S1G_CHANWIDTH_8MHZ:
+ chandef->width = NL80211_CHAN_WIDTH_8;
+ break;
+ case IEEE80211_S1G_CHANWIDTH_16MHZ:
+ chandef->width = NL80211_CHAN_WIDTH_16;
+ break;
+ default:
+ return false;
+ }
+
+ oper_freq = ieee80211_channel_to_freq_khz(oper->oper_ch,
+ NL80211_BAND_S1GHZ);
+ chandef->center_freq1 = KHZ_TO_MHZ(oper_freq);
+ chandef->freq1_offset = oper_freq % 1000;
+
return true;
}
@@ -4277,6 +4397,58 @@ int ieee80211_max_num_channels(struct ieee80211_local *local)
return max_num_different_channels;
}
+void ieee80211_add_s1g_capab_ie(struct ieee80211_sub_if_data *sdata,
+ struct ieee80211_sta_s1g_cap *caps,
+ struct sk_buff *skb)
+{
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+ struct ieee80211_s1g_cap s1g_capab;
+ u8 *pos;
+ int i;
+
+ if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
+ return;
+
+ if (!caps->s1g)
+ return;
+
+ memcpy(s1g_capab.capab_info, caps->cap, sizeof(caps->cap));
+ memcpy(s1g_capab.supp_mcs_nss, caps->nss_mcs, sizeof(caps->nss_mcs));
+
+ /* override the capability info */
+ for (i = 0; i < sizeof(ifmgd->s1g_capa.capab_info); i++) {
+ u8 mask = ifmgd->s1g_capa_mask.capab_info[i];
+
+ s1g_capab.capab_info[i] &= ~mask;
+ s1g_capab.capab_info[i] |= ifmgd->s1g_capa.capab_info[i] & mask;
+ }
+
+ /* then MCS and NSS set */
+ for (i = 0; i < sizeof(ifmgd->s1g_capa.supp_mcs_nss); i++) {
+ u8 mask = ifmgd->s1g_capa_mask.supp_mcs_nss[i];
+
+ s1g_capab.supp_mcs_nss[i] &= ~mask;
+ s1g_capab.supp_mcs_nss[i] |=
+ ifmgd->s1g_capa.supp_mcs_nss[i] & mask;
+ }
+
+ pos = skb_put(skb, 2 + sizeof(s1g_capab));
+ *pos++ = WLAN_EID_S1G_CAPABILITIES;
+ *pos++ = sizeof(s1g_capab);
+
+ memcpy(pos, &s1g_capab, sizeof(s1g_capab));
+}
+
+void ieee80211_add_aid_request_ie(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb)
+{
+ u8 *pos = skb_put(skb, 3);
+
+ *pos++ = WLAN_EID_AID_REQUEST;
+ *pos++ = 1;
+ *pos++ = 0;
+}
+
u8 *ieee80211_add_wmm_info_ie(u8 *buf, u8 qosinfo)
{
*buf++ = WLAN_EID_VENDOR_SPECIFIC;
@@ -4319,3 +4491,24 @@ const u8 ieee80211_ac_to_qos_mask[IEEE80211_NUM_ACS] = {
IEEE80211_WMM_IE_STA_QOSINFO_AC_BE,
IEEE80211_WMM_IE_STA_QOSINFO_AC_BK
};
+
+u16 ieee80211_encode_usf(int listen_interval)
+{
+ static const int listen_int_usf[] = { 1, 10, 1000, 10000 };
+ u16 ui, usf = 0;
+
+ /* find greatest USF */
+ while (usf < IEEE80211_MAX_USF) {
+ if (listen_interval % listen_int_usf[usf + 1])
+ break;
+ usf += 1;
+ }
+ ui = listen_interval / listen_int_usf[usf];
+
+ /* error if there is a remainder. Should've been checked by user */
+ WARN_ON_ONCE(ui > IEEE80211_MAX_UI);
+ listen_interval = FIELD_PREP(LISTEN_INT_USF, usf) |
+ FIELD_PREP(LISTEN_INT_UI, ui);
+
+ return (u16) listen_interval;
+}
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index d1b64d0751f2..fb0e3a657d2d 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -315,10 +315,6 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
sta->sta.bandwidth = ieee80211_sta_cur_vht_bw(sta);
- /* If HT IE reported 3839 bytes only, stay with that size. */
- if (sta->sta.max_amsdu_len == IEEE80211_MAX_MPDU_LEN_HT_3839)
- return;
-
switch (vht_cap->cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK) {
case IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454:
sta->sta.max_amsdu_len = IEEE80211_MAX_MPDU_LEN_VHT_11454;
diff --git a/net/mptcp/mib.c b/net/mptcp/mib.c
index 0a6a15f3456d..84d119436b22 100644
--- a/net/mptcp/mib.c
+++ b/net/mptcp/mib.c
@@ -22,6 +22,15 @@ static const struct snmp_mib mptcp_snmp_list[] = {
SNMP_MIB_ITEM("MPJoinAckHMacFailure", MPTCP_MIB_JOINACKMAC),
SNMP_MIB_ITEM("DSSNotMatching", MPTCP_MIB_DSSNOMATCH),
SNMP_MIB_ITEM("InfiniteMapRx", MPTCP_MIB_INFINITEMAPRX),
+ SNMP_MIB_ITEM("OFOQueueTail", MPTCP_MIB_OFOQUEUETAIL),
+ SNMP_MIB_ITEM("OFOQueue", MPTCP_MIB_OFOQUEUE),
+ SNMP_MIB_ITEM("OFOMerge", MPTCP_MIB_OFOMERGE),
+ SNMP_MIB_ITEM("NoDSSInWindow", MPTCP_MIB_NODSSWINDOW),
+ SNMP_MIB_ITEM("DuplicateData", MPTCP_MIB_DUPDATA),
+ SNMP_MIB_ITEM("AddAddr", MPTCP_MIB_ADDADDR),
+ SNMP_MIB_ITEM("EchoAdd", MPTCP_MIB_ECHOADD),
+ SNMP_MIB_ITEM("RmAddr", MPTCP_MIB_RMADDR),
+ SNMP_MIB_ITEM("RmSubflow", MPTCP_MIB_RMSUBFLOW),
SNMP_MIB_SENTINEL
};
diff --git a/net/mptcp/mib.h b/net/mptcp/mib.h
index d7de340fc997..47bcecce1106 100644
--- a/net/mptcp/mib.h
+++ b/net/mptcp/mib.h
@@ -15,6 +15,15 @@ enum linux_mptcp_mib_field {
MPTCP_MIB_JOINACKMAC, /* HMAC was wrong on ACK + MP_JOIN */
MPTCP_MIB_DSSNOMATCH, /* Received a new mapping that did not match the previous one */
MPTCP_MIB_INFINITEMAPRX, /* Received an infinite mapping */
+ MPTCP_MIB_OFOQUEUETAIL, /* Segments inserted into OoO queue tail */
+ MPTCP_MIB_OFOQUEUE, /* Segments inserted into OoO queue */
+ MPTCP_MIB_OFOMERGE, /* Segments merged in OoO queue */
+ MPTCP_MIB_NODSSWINDOW, /* Segments not in MPTCP windows */
+ MPTCP_MIB_DUPDATA, /* Segments discarded due to duplicate DSS */
+ MPTCP_MIB_ADDADDR, /* Received ADD_ADDR with echo-flag=0 */
+ MPTCP_MIB_ECHOADD, /* Received ADD_ADDR with echo-flag=1 */
+ MPTCP_MIB_RMADDR, /* Received RM_ADDR */
+ MPTCP_MIB_RMSUBFLOW, /* Remove a subflow */
__MPTCP_MIB_MAX
};
diff --git a/net/mptcp/options.c b/net/mptcp/options.c
index 888bbbbb3e8a..092a2d48bfd3 100644
--- a/net/mptcp/options.c
+++ b/net/mptcp/options.c
@@ -11,6 +11,7 @@
#include <net/tcp.h>
#include <net/mptcp.h>
#include "protocol.h"
+#include "mib.h"
static bool mptcp_cap_flag_sha256(u8 flags)
{
@@ -242,7 +243,7 @@ static void mptcp_parse_option(const struct sk_buff *skb,
mp_opt->add_addr = 1;
mp_opt->port = 0;
mp_opt->addr_id = *ptr++;
- pr_debug("ADD_ADDR: id=%d", mp_opt->addr_id);
+ pr_debug("ADD_ADDR: id=%d, echo=%d", mp_opt->addr_id, mp_opt->echo);
if (mp_opt->family == MPTCP_ADDR_IPVERSION_4) {
memcpy((u8 *)&mp_opt->addr.s_addr, (u8 *)ptr, 4);
ptr += 4;
@@ -516,7 +517,7 @@ static bool mptcp_established_options_dss(struct sock *sk, struct sk_buff *skb,
return ret;
}
- if (subflow->use_64bit_ack) {
+ if (READ_ONCE(msk->use_64bit_ack)) {
ack_size = TCPOLEN_MPTCP_DSS_ACK64;
opts->ext_copy.data_ack = READ_ONCE(msk->ack_seq);
opts->ext_copy.ack64 = 1;
@@ -571,21 +572,22 @@ static u64 add_addr6_generate_hmac(u64 key1, u64 key2, u8 addr_id,
}
#endif
-static bool mptcp_established_options_addr(struct sock *sk,
- unsigned int *size,
- unsigned int remaining,
- struct mptcp_out_options *opts)
+static bool mptcp_established_options_add_addr(struct sock *sk,
+ unsigned int *size,
+ unsigned int remaining,
+ struct mptcp_out_options *opts)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
struct mptcp_sock *msk = mptcp_sk(subflow->conn);
struct mptcp_addr_info saddr;
+ bool echo;
int len;
- if (!mptcp_pm_should_signal(msk) ||
- !(mptcp_pm_addr_signal(msk, remaining, &saddr)))
+ if (!mptcp_pm_should_add_signal(msk) ||
+ !(mptcp_pm_add_addr_signal(msk, remaining, &saddr, &echo)))
return false;
- len = mptcp_add_addr_len(saddr.family);
+ len = mptcp_add_addr_len(saddr.family, echo);
if (remaining < len)
return false;
@@ -594,22 +596,51 @@ static bool mptcp_established_options_addr(struct sock *sk,
if (saddr.family == AF_INET) {
opts->suboptions |= OPTION_MPTCP_ADD_ADDR;
opts->addr = saddr.addr;
- opts->ahmac = add_addr_generate_hmac(msk->local_key,
- msk->remote_key,
- opts->addr_id,
- &opts->addr);
+ if (!echo) {
+ opts->ahmac = add_addr_generate_hmac(msk->local_key,
+ msk->remote_key,
+ opts->addr_id,
+ &opts->addr);
+ }
}
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
else if (saddr.family == AF_INET6) {
opts->suboptions |= OPTION_MPTCP_ADD_ADDR6;
opts->addr6 = saddr.addr6;
- opts->ahmac = add_addr6_generate_hmac(msk->local_key,
- msk->remote_key,
- opts->addr_id,
- &opts->addr6);
+ if (!echo) {
+ opts->ahmac = add_addr6_generate_hmac(msk->local_key,
+ msk->remote_key,
+ opts->addr_id,
+ &opts->addr6);
+ }
}
#endif
- pr_debug("addr_id=%d, ahmac=%llu", opts->addr_id, opts->ahmac);
+ pr_debug("addr_id=%d, ahmac=%llu, echo=%d", opts->addr_id, opts->ahmac, echo);
+
+ return true;
+}
+
+static bool mptcp_established_options_rm_addr(struct sock *sk,
+ unsigned int *size,
+ unsigned int remaining,
+ struct mptcp_out_options *opts)
+{
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
+ struct mptcp_sock *msk = mptcp_sk(subflow->conn);
+ u8 rm_id;
+
+ if (!mptcp_pm_should_rm_signal(msk) ||
+ !(mptcp_pm_rm_addr_signal(msk, remaining, &rm_id)))
+ return false;
+
+ if (remaining < TCPOLEN_MPTCP_RM_ADDR_BASE)
+ return false;
+
+ *size = TCPOLEN_MPTCP_RM_ADDR_BASE;
+ opts->suboptions |= OPTION_MPTCP_RM_ADDR;
+ opts->rm_id = rm_id;
+
+ pr_debug("rm_id=%d", opts->rm_id);
return true;
}
@@ -626,6 +657,12 @@ bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
if (unlikely(mptcp_check_fallback(sk)))
return false;
+ /* prevent adding of any MPTCP related options on reset packet
+ * until we support MP_TCPRST/MP_FASTCLOSE
+ */
+ if (unlikely(skb && TCP_SKB_CB(skb)->tcp_flags & TCPHDR_RST))
+ return false;
+
if (mptcp_established_options_mp(sk, skb, &opt_size, remaining, opts))
ret = true;
else if (mptcp_established_options_dss(sk, skb, &opt_size, remaining,
@@ -640,7 +677,11 @@ bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
*size += opt_size;
remaining -= opt_size;
- if (mptcp_established_options_addr(sk, &opt_size, remaining, opts)) {
+ if (mptcp_established_options_add_addr(sk, &opt_size, remaining, opts)) {
+ *size += opt_size;
+ remaining -= opt_size;
+ ret = true;
+ } else if (mptcp_established_options_rm_addr(sk, &opt_size, remaining, opts)) {
*size += opt_size;
remaining -= opt_size;
ret = true;
@@ -676,7 +717,7 @@ bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
return false;
}
-static bool check_fully_established(struct mptcp_sock *msk, struct sock *sk,
+static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
struct mptcp_subflow_context *subflow,
struct sk_buff *skb,
struct mptcp_options_received *mp_opt)
@@ -693,15 +734,20 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *sk,
TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq &&
subflow->mp_join && mp_opt->mp_join &&
READ_ONCE(msk->pm.server_side))
- tcp_send_ack(sk);
+ tcp_send_ack(ssk);
goto fully_established;
}
- /* we should process OoO packets before the first subflow is fully
- * established, but not expected for MP_JOIN subflows
+ /* we must process OoO packets before the first subflow is fully
+ * established. OoO packets are instead a protocol violation
+ * for MP_JOIN subflows as the peer must not send any data
+ * before receiving the forth ack - cfr. RFC 8684 section 3.2.
*/
- if (TCP_SKB_CB(skb)->seq != subflow->ssn_offset + 1)
+ if (TCP_SKB_CB(skb)->seq != subflow->ssn_offset + 1) {
+ if (subflow->mp_join)
+ goto reset;
return subflow->mp_capable;
+ }
if (mp_opt->dss && mp_opt->use_ack) {
/* subflows are fully established as soon as we get any
@@ -713,9 +759,12 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *sk,
}
/* If the first established packet does not contain MP_CAPABLE + data
- * then fallback to TCP
+ * then fallback to TCP. Fallback scenarios requires a reset for
+ * MP_JOIN subflows.
*/
if (!mp_opt->mp_capable) {
+ if (subflow->mp_join)
+ goto reset;
subflow->mp_capable = 0;
pr_fallback(msk);
__mptcp_do_fallback(msk);
@@ -732,12 +781,16 @@ fully_established:
subflow->pm_notified = 1;
if (subflow->mp_join) {
- clear_3rdack_retransmission(sk);
+ clear_3rdack_retransmission(ssk);
mptcp_pm_subflow_established(msk, subflow);
} else {
mptcp_pm_fully_established(msk);
}
return true;
+
+reset:
+ mptcp_subflow_reset(ssk);
+ return false;
}
static u64 expand_ack(u64 old_ack, u64 cur_ack, bool use_64bit)
@@ -825,8 +878,7 @@ static bool add_addr_hmac_valid(struct mptcp_sock *msk,
return hmac == mp_opt->ahmac;
}
-void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb,
- struct tcp_options_received *opt_rx)
+void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
struct mptcp_sock *msk = mptcp_sk(subflow->conn);
@@ -855,11 +907,21 @@ void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb,
addr.addr6 = mp_opt.addr6;
}
#endif
- if (!mp_opt.echo)
+ if (!mp_opt.echo) {
mptcp_pm_add_addr_received(msk, &addr);
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ADDADDR);
+ } else {
+ mptcp_pm_del_add_timer(msk, &addr);
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_ECHOADD);
+ }
mp_opt.add_addr = 0;
}
+ if (mp_opt.rm_addr) {
+ mptcp_pm_rm_addr_received(msk, mp_opt.rm_id);
+ mp_opt.rm_addr = 0;
+ }
+
if (!mp_opt.dss)
return;
diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c
index a8ad20559aaa..e19e1525ecbb 100644
--- a/net/mptcp/pm.c
+++ b/net/mptcp/pm.c
@@ -13,23 +13,34 @@
/* path manager command handlers */
int mptcp_pm_announce_addr(struct mptcp_sock *msk,
- const struct mptcp_addr_info *addr)
+ const struct mptcp_addr_info *addr,
+ bool echo)
{
pr_debug("msk=%p, local_id=%d", msk, addr->id);
msk->pm.local = *addr;
- WRITE_ONCE(msk->pm.addr_signal, true);
+ WRITE_ONCE(msk->pm.add_addr_echo, echo);
+ WRITE_ONCE(msk->pm.add_addr_signal, true);
return 0;
}
int mptcp_pm_remove_addr(struct mptcp_sock *msk, u8 local_id)
{
- return -ENOTSUPP;
+ pr_debug("msk=%p, local_id=%d", msk, local_id);
+
+ msk->pm.rm_id = local_id;
+ WRITE_ONCE(msk->pm.rm_addr_signal, true);
+ return 0;
}
-int mptcp_pm_remove_subflow(struct mptcp_sock *msk, u8 remote_id)
+int mptcp_pm_remove_subflow(struct mptcp_sock *msk, u8 local_id)
{
- return -ENOTSUPP;
+ pr_debug("msk=%p, local_id=%d", msk, local_id);
+
+ spin_lock_bh(&msk->pm.lock);
+ mptcp_pm_nl_rm_subflow_received(msk, local_id);
+ spin_unlock_bh(&msk->pm.lock);
+ return 0;
}
/* path manager event handlers */
@@ -46,7 +57,7 @@ void mptcp_pm_new_connection(struct mptcp_sock *msk, int server_side)
bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
{
struct mptcp_pm_data *pm = &msk->pm;
- int ret;
+ int ret = 0;
pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows,
pm->subflows_max, READ_ONCE(pm->accept_subflow));
@@ -56,9 +67,11 @@ bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
return false;
spin_lock_bh(&pm->lock);
- ret = pm->subflows < pm->subflows_max;
- if (ret && ++pm->subflows == pm->subflows_max)
- WRITE_ONCE(pm->accept_subflow, false);
+ if (READ_ONCE(pm->accept_subflow)) {
+ ret = pm->subflows < pm->subflows_max;
+ if (ret && ++pm->subflows == pm->subflows_max)
+ WRITE_ONCE(pm->accept_subflow, false);
+ }
spin_unlock_bh(&pm->lock);
return ret;
@@ -135,38 +148,71 @@ void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id,
READ_ONCE(pm->accept_addr));
- /* avoid acquiring the lock if there is no room for fouther addresses */
- if (!READ_ONCE(pm->accept_addr))
- return;
-
spin_lock_bh(&pm->lock);
- /* be sure there is something to signal re-checking under PM lock */
- if (READ_ONCE(pm->accept_addr) &&
- mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED))
+ if (!READ_ONCE(pm->accept_addr))
+ mptcp_pm_announce_addr(msk, addr, true);
+ else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED))
pm->remote = *addr;
spin_unlock_bh(&pm->lock);
}
+void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, u8 rm_id)
+{
+ struct mptcp_pm_data *pm = &msk->pm;
+
+ pr_debug("msk=%p remote_id=%d", msk, rm_id);
+
+ spin_lock_bh(&pm->lock);
+ mptcp_pm_schedule_work(msk, MPTCP_PM_RM_ADDR_RECEIVED);
+ pm->rm_id = rm_id;
+ spin_unlock_bh(&pm->lock);
+}
+
/* path manager helpers */
-bool mptcp_pm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
- struct mptcp_addr_info *saddr)
+bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
+ struct mptcp_addr_info *saddr, bool *echo)
{
int ret = false;
spin_lock_bh(&msk->pm.lock);
/* double check after the lock is acquired */
- if (!mptcp_pm_should_signal(msk))
+ if (!mptcp_pm_should_add_signal(msk))
goto out_unlock;
- if (remaining < mptcp_add_addr_len(msk->pm.local.family))
+ *echo = READ_ONCE(msk->pm.add_addr_echo);
+
+ if (remaining < mptcp_add_addr_len(msk->pm.local.family, *echo))
goto out_unlock;
*saddr = msk->pm.local;
- WRITE_ONCE(msk->pm.addr_signal, false);
+ WRITE_ONCE(msk->pm.add_addr_signal, false);
+ ret = true;
+
+out_unlock:
+ spin_unlock_bh(&msk->pm.lock);
+ return ret;
+}
+
+bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
+ u8 *rm_id)
+{
+ int ret = false;
+
+ spin_lock_bh(&msk->pm.lock);
+
+ /* double check after the lock is acquired */
+ if (!mptcp_pm_should_rm_signal(msk))
+ goto out_unlock;
+
+ if (remaining < TCPOLEN_MPTCP_RM_ADDR_BASE)
+ goto out_unlock;
+
+ *rm_id = msk->pm.rm_id;
+ WRITE_ONCE(msk->pm.rm_addr_signal, false);
ret = true;
out_unlock:
@@ -185,13 +231,17 @@ void mptcp_pm_data_init(struct mptcp_sock *msk)
msk->pm.add_addr_accepted = 0;
msk->pm.local_addr_used = 0;
msk->pm.subflows = 0;
+ msk->pm.rm_id = 0;
WRITE_ONCE(msk->pm.work_pending, false);
- WRITE_ONCE(msk->pm.addr_signal, false);
+ WRITE_ONCE(msk->pm.add_addr_signal, false);
+ WRITE_ONCE(msk->pm.rm_addr_signal, false);
WRITE_ONCE(msk->pm.accept_addr, false);
WRITE_ONCE(msk->pm.accept_subflow, false);
+ WRITE_ONCE(msk->pm.add_addr_echo, false);
msk->pm.status = 0;
spin_lock_init(&msk->pm.lock);
+ INIT_LIST_HEAD(&msk->pm.anno_list);
mptcp_pm_nl_data_init(msk);
}
diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c
index 770da3627848..0d6f3d912891 100644
--- a/net/mptcp/pm_netlink.c
+++ b/net/mptcp/pm_netlink.c
@@ -15,6 +15,7 @@
#include <uapi/linux/mptcp.h>
#include "protocol.h"
+#include "mib.h"
/* forward declaration */
static struct genl_family mptcp_genl_family;
@@ -23,12 +24,18 @@ static int pm_nl_pernet_id;
struct mptcp_pm_addr_entry {
struct list_head list;
- unsigned int flags;
- int ifindex;
struct mptcp_addr_info addr;
struct rcu_head rcu;
};
+struct mptcp_pm_add_entry {
+ struct list_head list;
+ struct mptcp_addr_info addr;
+ struct timer_list add_timer;
+ struct mptcp_sock *sock;
+ u8 retrans_times;
+};
+
struct pm_nl_pernet {
/* protects pernet updates */
spinlock_t lock;
@@ -42,6 +49,7 @@ struct pm_nl_pernet {
};
#define MPTCP_PM_ADDR_MAX 8
+#define ADD_ADDR_RETRANS_MAX 3
static bool addresses_equal(const struct mptcp_addr_info *a,
struct mptcp_addr_info *b, bool use_port)
@@ -129,7 +137,7 @@ select_local_address(const struct pm_nl_pernet *pernet,
rcu_read_lock();
spin_lock_bh(&msk->join_list_lock);
list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
- if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW))
+ if (!(entry->addr.flags & MPTCP_PM_ADDR_FLAG_SUBFLOW))
continue;
/* avoid any address already in use by subflows and
@@ -160,7 +168,7 @@ select_signal_address(struct pm_nl_pernet *pernet, unsigned int pos)
* can lead to additional addresses not being announced.
*/
list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) {
- if (!(entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL))
+ if (!(entry->addr.flags & MPTCP_PM_ADDR_FLAG_SIGNAL))
continue;
if (i++ == pos) {
ret = entry;
@@ -179,6 +187,121 @@ static void check_work_pending(struct mptcp_sock *msk)
WRITE_ONCE(msk->pm.work_pending, false);
}
+static struct mptcp_pm_add_entry *
+lookup_anno_list_by_saddr(struct mptcp_sock *msk,
+ struct mptcp_addr_info *addr)
+{
+ struct mptcp_pm_add_entry *entry;
+
+ list_for_each_entry(entry, &msk->pm.anno_list, list) {
+ if (addresses_equal(&entry->addr, addr, false))
+ return entry;
+ }
+
+ return NULL;
+}
+
+static void mptcp_pm_add_timer(struct timer_list *timer)
+{
+ struct mptcp_pm_add_entry *entry = from_timer(entry, timer, add_timer);
+ struct mptcp_sock *msk = entry->sock;
+ struct sock *sk = (struct sock *)msk;
+
+ pr_debug("msk=%p", msk);
+
+ if (!msk)
+ return;
+
+ if (inet_sk_state_load(sk) == TCP_CLOSE)
+ return;
+
+ if (!entry->addr.id)
+ return;
+
+ if (mptcp_pm_should_add_signal(msk)) {
+ sk_reset_timer(sk, timer, jiffies + TCP_RTO_MAX / 8);
+ goto out;
+ }
+
+ spin_lock_bh(&msk->pm.lock);
+
+ if (!mptcp_pm_should_add_signal(msk)) {
+ pr_debug("retransmit ADD_ADDR id=%d", entry->addr.id);
+ mptcp_pm_announce_addr(msk, &entry->addr, false);
+ entry->retrans_times++;
+ }
+
+ if (entry->retrans_times < ADD_ADDR_RETRANS_MAX)
+ sk_reset_timer(sk, timer, jiffies + TCP_RTO_MAX);
+
+ spin_unlock_bh(&msk->pm.lock);
+
+out:
+ __sock_put(sk);
+}
+
+struct mptcp_pm_add_entry *
+mptcp_pm_del_add_timer(struct mptcp_sock *msk,
+ struct mptcp_addr_info *addr)
+{
+ struct mptcp_pm_add_entry *entry;
+ struct sock *sk = (struct sock *)msk;
+
+ spin_lock_bh(&msk->pm.lock);
+ entry = lookup_anno_list_by_saddr(msk, addr);
+ if (entry)
+ entry->retrans_times = ADD_ADDR_RETRANS_MAX;
+ spin_unlock_bh(&msk->pm.lock);
+
+ if (entry)
+ sk_stop_timer_sync(sk, &entry->add_timer);
+
+ return entry;
+}
+
+static bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk,
+ struct mptcp_pm_addr_entry *entry)
+{
+ struct mptcp_pm_add_entry *add_entry = NULL;
+ struct sock *sk = (struct sock *)msk;
+
+ if (lookup_anno_list_by_saddr(msk, &entry->addr))
+ return false;
+
+ add_entry = kmalloc(sizeof(*add_entry), GFP_ATOMIC);
+ if (!add_entry)
+ return false;
+
+ list_add(&add_entry->list, &msk->pm.anno_list);
+
+ add_entry->addr = entry->addr;
+ add_entry->sock = msk;
+ add_entry->retrans_times = 0;
+
+ timer_setup(&add_entry->add_timer, mptcp_pm_add_timer, 0);
+ sk_reset_timer(sk, &add_entry->add_timer, jiffies + TCP_RTO_MAX);
+
+ return true;
+}
+
+void mptcp_pm_free_anno_list(struct mptcp_sock *msk)
+{
+ struct mptcp_pm_add_entry *entry, *tmp;
+ struct sock *sk = (struct sock *)msk;
+ LIST_HEAD(free_list);
+
+ pr_debug("msk=%p", msk);
+
+ spin_lock_bh(&msk->pm.lock);
+ list_splice_init(&msk->pm.anno_list, &free_list);
+ spin_unlock_bh(&msk->pm.lock);
+
+ list_for_each_entry_safe(entry, tmp, &free_list, list) {
+ sk_stop_timer_sync(sk, &entry->add_timer);
+ kfree(entry);
+ }
+}
+
static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
{
struct mptcp_addr_info remote = { 0 };
@@ -199,8 +322,10 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
msk->pm.add_addr_signaled);
if (local) {
- msk->pm.add_addr_signaled++;
- mptcp_pm_announce_addr(msk, &local->addr);
+ if (mptcp_pm_alloc_anno_list(msk, local)) {
+ msk->pm.add_addr_signaled++;
+ mptcp_pm_announce_addr(msk, &local->addr, false);
+ }
} else {
/* pick failed, avoid fourther attempts later */
msk->pm.local_addr_used = msk->pm.add_addr_signal_max;
@@ -220,8 +345,7 @@ static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk)
msk->pm.subflows++;
check_work_pending(msk);
spin_unlock_bh(&msk->pm.lock);
- __mptcp_subflow_connect(sk, local->ifindex,
- &local->addr, &remote);
+ __mptcp_subflow_connect(sk, &local->addr, &remote);
spin_lock_bh(&msk->pm.lock);
return;
}
@@ -267,13 +391,86 @@ void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk)
local.family = remote.family;
spin_unlock_bh(&msk->pm.lock);
- __mptcp_subflow_connect((struct sock *)msk, 0, &local, &remote);
+ __mptcp_subflow_connect((struct sock *)msk, &local, &remote);
spin_lock_bh(&msk->pm.lock);
+
+ mptcp_pm_announce_addr(msk, &remote, true);
+}
+
+void mptcp_pm_nl_rm_addr_received(struct mptcp_sock *msk)
+{
+ struct mptcp_subflow_context *subflow, *tmp;
+ struct sock *sk = (struct sock *)msk;
+
+ pr_debug("address rm_id %d", msk->pm.rm_id);
+
+ if (!msk->pm.rm_id)
+ return;
+
+ if (list_empty(&msk->conn_list))
+ return;
+
+ list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ int how = RCV_SHUTDOWN | SEND_SHUTDOWN;
+ long timeout = 0;
+
+ if (msk->pm.rm_id != subflow->remote_id)
+ continue;
+
+ spin_unlock_bh(&msk->pm.lock);
+ mptcp_subflow_shutdown(sk, ssk, how);
+ __mptcp_close_ssk(sk, ssk, subflow, timeout);
+ spin_lock_bh(&msk->pm.lock);
+
+ msk->pm.add_addr_accepted--;
+ msk->pm.subflows--;
+ WRITE_ONCE(msk->pm.accept_addr, true);
+
+ __MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RMADDR);
+
+ break;
+ }
+}
+
+void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk, u8 rm_id)
+{
+ struct mptcp_subflow_context *subflow, *tmp;
+ struct sock *sk = (struct sock *)msk;
+
+ pr_debug("subflow rm_id %d", rm_id);
+
+ if (!rm_id)
+ return;
+
+ if (list_empty(&msk->conn_list))
+ return;
+
+ list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ int how = RCV_SHUTDOWN | SEND_SHUTDOWN;
+ long timeout = 0;
+
+ if (rm_id != subflow->local_id)
+ continue;
+
+ spin_unlock_bh(&msk->pm.lock);
+ mptcp_subflow_shutdown(sk, ssk, how);
+ __mptcp_close_ssk(sk, ssk, subflow, timeout);
+ spin_lock_bh(&msk->pm.lock);
+
+ msk->pm.local_addr_used--;
+ msk->pm.subflows--;
+
+ __MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RMSUBFLOW);
+
+ break;
+ }
}
static bool address_use_port(struct mptcp_pm_addr_entry *entry)
{
- return (entry->flags &
+ return (entry->addr.flags &
(MPTCP_PM_ADDR_FLAG_SIGNAL | MPTCP_PM_ADDR_FLAG_SUBFLOW)) ==
MPTCP_PM_ADDR_FLAG_SIGNAL;
}
@@ -303,9 +500,9 @@ static int mptcp_pm_nl_append_new_local_addr(struct pm_nl_pernet *pernet,
goto out;
}
- if (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL)
+ if (entry->addr.flags & MPTCP_PM_ADDR_FLAG_SIGNAL)
pernet->add_addr_signal_max++;
- if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW)
+ if (entry->addr.flags & MPTCP_PM_ADDR_FLAG_SUBFLOW)
pernet->local_addr_max++;
entry->addr.id = pernet->next_id++;
@@ -358,8 +555,9 @@ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
if (!entry)
return -ENOMEM;
- entry->flags = 0;
entry->addr = skc_local;
+ entry->addr.ifindex = 0;
+ entry->addr.flags = 0;
ret = mptcp_pm_nl_append_new_local_addr(pernet, entry);
if (ret < 0)
kfree(entry);
@@ -397,8 +595,8 @@ mptcp_pm_addr_policy[MPTCP_PM_ADDR_ATTR_MAX + 1] = {
[MPTCP_PM_ADDR_ATTR_FAMILY] = { .type = NLA_U16, },
[MPTCP_PM_ADDR_ATTR_ID] = { .type = NLA_U8, },
[MPTCP_PM_ADDR_ATTR_ADDR4] = { .type = NLA_U32, },
- [MPTCP_PM_ADDR_ATTR_ADDR6] = { .type = NLA_EXACT_LEN,
- .len = sizeof(struct in6_addr), },
+ [MPTCP_PM_ADDR_ATTR_ADDR6] =
+ NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
[MPTCP_PM_ADDR_ATTR_PORT] = { .type = NLA_U16 },
[MPTCP_PM_ADDR_ATTR_FLAGS] = { .type = NLA_U32 },
[MPTCP_PM_ADDR_ATTR_IF_IDX] = { .type = NLA_S32 },
@@ -473,14 +671,17 @@ static int mptcp_pm_parse_addr(struct nlattr *attr, struct genl_info *info,
entry->addr.addr.s_addr = nla_get_in_addr(tb[addr_addr]);
skip_family:
- if (tb[MPTCP_PM_ADDR_ATTR_IF_IDX])
- entry->ifindex = nla_get_s32(tb[MPTCP_PM_ADDR_ATTR_IF_IDX]);
+ if (tb[MPTCP_PM_ADDR_ATTR_IF_IDX]) {
+ u32 val = nla_get_s32(tb[MPTCP_PM_ADDR_ATTR_IF_IDX]);
+
+ entry->addr.ifindex = val;
+ }
if (tb[MPTCP_PM_ADDR_ATTR_ID])
entry->addr.id = nla_get_u8(tb[MPTCP_PM_ADDR_ATTR_ID]);
if (tb[MPTCP_PM_ADDR_ATTR_FLAGS])
- entry->flags = nla_get_u32(tb[MPTCP_PM_ADDR_ATTR_FLAGS]);
+ entry->addr.flags = nla_get_u32(tb[MPTCP_PM_ADDR_ATTR_FLAGS]);
return 0;
}
@@ -530,6 +731,68 @@ __lookup_addr_by_id(struct pm_nl_pernet *pernet, unsigned int id)
return NULL;
}
+static bool remove_anno_list_by_saddr(struct mptcp_sock *msk,
+ struct mptcp_addr_info *addr)
+{
+ struct mptcp_pm_add_entry *entry;
+
+ entry = mptcp_pm_del_add_timer(msk, addr);
+ if (entry) {
+ list_del(&entry->list);
+ kfree(entry);
+ return true;
+ }
+
+ return false;
+}
+
+static bool mptcp_pm_remove_anno_addr(struct mptcp_sock *msk,
+ struct mptcp_addr_info *addr,
+ bool force)
+{
+ bool ret;
+
+ ret = remove_anno_list_by_saddr(msk, addr);
+ if (ret || force) {
+ spin_lock_bh(&msk->pm.lock);
+ mptcp_pm_remove_addr(msk, addr->id);
+ spin_unlock_bh(&msk->pm.lock);
+ }
+ return ret;
+}
+
+static int mptcp_nl_remove_subflow_and_signal_addr(struct net *net,
+ struct mptcp_addr_info *addr)
+{
+ struct mptcp_sock *msk;
+ long s_slot = 0, s_num = 0;
+
+ pr_debug("remove_id=%d", addr->id);
+
+ while ((msk = mptcp_token_iter_next(net, &s_slot, &s_num)) != NULL) {
+ struct sock *sk = (struct sock *)msk;
+ bool remove_subflow;
+
+ if (list_empty(&msk->conn_list)) {
+ mptcp_pm_remove_anno_addr(msk, addr, false);
+ goto next;
+ }
+
+ lock_sock(sk);
+ remove_subflow = lookup_subflow_by_saddr(&msk->conn_list, addr);
+ mptcp_pm_remove_anno_addr(msk, addr, remove_subflow);
+ if (remove_subflow)
+ mptcp_pm_remove_subflow(msk, addr->id);
+ release_sock(sk);
+
+next:
+ sock_put(sk);
+ cond_resched();
+ }
+
+ return 0;
+}
+
static int mptcp_nl_cmd_del_addr(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr *attr = info->attrs[MPTCP_PM_ATTR_ADDR];
@@ -545,19 +808,21 @@ static int mptcp_nl_cmd_del_addr(struct sk_buff *skb, struct genl_info *info)
entry = __lookup_addr_by_id(pernet, addr.addr.id);
if (!entry) {
GENL_SET_ERR_MSG(info, "address not found");
- ret = -EINVAL;
- goto out;
+ spin_unlock_bh(&pernet->lock);
+ return -EINVAL;
}
- if (entry->flags & MPTCP_PM_ADDR_FLAG_SIGNAL)
+ if (entry->addr.flags & MPTCP_PM_ADDR_FLAG_SIGNAL)
pernet->add_addr_signal_max--;
- if (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW)
+ if (entry->addr.flags & MPTCP_PM_ADDR_FLAG_SUBFLOW)
pernet->local_addr_max--;
pernet->addrs--;
list_del_rcu(&entry->list);
- kfree_rcu(entry, rcu);
-out:
spin_unlock_bh(&pernet->lock);
+
+ mptcp_nl_remove_subflow_and_signal_addr(sock_net(skb->sk), &entry->addr);
+ kfree_rcu(entry, rcu);
+
return ret;
}
@@ -606,10 +871,10 @@ static int mptcp_nl_fill_addr(struct sk_buff *skb,
goto nla_put_failure;
if (nla_put_u8(skb, MPTCP_PM_ADDR_ATTR_ID, addr->id))
goto nla_put_failure;
- if (nla_put_u32(skb, MPTCP_PM_ADDR_ATTR_FLAGS, entry->flags))
+ if (nla_put_u32(skb, MPTCP_PM_ADDR_ATTR_FLAGS, entry->addr.flags))
goto nla_put_failure;
- if (entry->ifindex &&
- nla_put_s32(skb, MPTCP_PM_ADDR_ATTR_IF_IDX, entry->ifindex))
+ if (entry->addr.ifindex &&
+ nla_put_s32(skb, MPTCP_PM_ADDR_ATTR_IF_IDX, entry->addr.ifindex))
goto nla_put_failure;
if (addr->family == AF_INET &&
@@ -789,7 +1054,7 @@ fail:
return -EMSGSIZE;
}
-static struct genl_ops mptcp_pm_ops[] = {
+static const struct genl_small_ops mptcp_pm_ops[] = {
{
.cmd = MPTCP_PM_CMD_ADD_ADDR,
.doit = mptcp_nl_cmd_add_addr,
@@ -828,8 +1093,8 @@ static struct genl_family mptcp_genl_family __ro_after_init = {
.policy = mptcp_pm_policy,
.netnsok = true,
.module = THIS_MODULE,
- .ops = mptcp_pm_ops,
- .n_ops = ARRAY_SIZE(mptcp_pm_ops),
+ .small_ops = mptcp_pm_ops,
+ .n_small_ops = ARRAY_SIZE(mptcp_pm_ops),
.mcgrps = mptcp_pm_mcgrps,
.n_mcgrps = ARRAY_SIZE(mptcp_pm_mcgrps),
};
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 5d747c6a610e..185dacb39781 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -24,8 +24,6 @@
#include "protocol.h"
#include "mib.h"
-#define MPTCP_SAME_STATE TCP_MAX_STATES
-
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
struct mptcp6_sock {
struct mptcp_sock msk;
@@ -34,6 +32,8 @@ struct mptcp6_sock {
#endif
struct mptcp_skb_cb {
+ u64 map_seq;
+ u64 end_seq;
u32 offset;
};
@@ -112,64 +112,205 @@ static int __mptcp_socket_create(struct mptcp_sock *msk)
return 0;
}
-static void __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
- struct sk_buff *skb,
- unsigned int offset, size_t copy_len)
+static void mptcp_drop(struct sock *sk, struct sk_buff *skb)
+{
+ sk_drops_add(sk, skb);
+ __kfree_skb(skb);
+}
+
+static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to,
+ struct sk_buff *from)
+{
+ bool fragstolen;
+ int delta;
+
+ if (MPTCP_SKB_CB(from)->offset ||
+ !skb_try_coalesce(to, from, &fragstolen, &delta))
+ return false;
+
+ pr_debug("colesced seq %llx into %llx new len %d new end seq %llx",
+ MPTCP_SKB_CB(from)->map_seq, MPTCP_SKB_CB(to)->map_seq,
+ to->len, MPTCP_SKB_CB(from)->end_seq);
+ MPTCP_SKB_CB(to)->end_seq = MPTCP_SKB_CB(from)->end_seq;
+ kfree_skb_partial(from, fragstolen);
+ atomic_add(delta, &sk->sk_rmem_alloc);
+ sk_mem_charge(sk, delta);
+ return true;
+}
+
+static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to,
+ struct sk_buff *from)
+{
+ if (MPTCP_SKB_CB(from)->map_seq != MPTCP_SKB_CB(to)->end_seq)
+ return false;
+
+ return mptcp_try_coalesce((struct sock *)msk, to, from);
+}
+
+/* "inspired" by tcp_data_queue_ofo(), main differences:
+ * - use mptcp seqs
+ * - don't cope with sacks
+ */
+static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb)
{
struct sock *sk = (struct sock *)msk;
- struct sk_buff *tail;
+ struct rb_node **p, *parent;
+ u64 seq, end_seq, max_seq;
+ struct sk_buff *skb1;
+ int space;
+
+ seq = MPTCP_SKB_CB(skb)->map_seq;
+ end_seq = MPTCP_SKB_CB(skb)->end_seq;
+ space = tcp_space(sk);
+ max_seq = space > 0 ? space + msk->ack_seq : msk->ack_seq;
+
+ pr_debug("msk=%p seq=%llx limit=%llx empty=%d", msk, seq, max_seq,
+ RB_EMPTY_ROOT(&msk->out_of_order_queue));
+ if (after64(seq, max_seq)) {
+ /* out of window */
+ mptcp_drop(sk, skb);
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_NODSSWINDOW);
+ return;
+ }
- __skb_unlink(skb, &ssk->sk_receive_queue);
+ p = &msk->out_of_order_queue.rb_node;
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUE);
+ if (RB_EMPTY_ROOT(&msk->out_of_order_queue)) {
+ rb_link_node(&skb->rbnode, NULL, p);
+ rb_insert_color(&skb->rbnode, &msk->out_of_order_queue);
+ msk->ooo_last_skb = skb;
+ goto end;
+ }
- skb_ext_reset(skb);
- skb_orphan(skb);
- WRITE_ONCE(msk->ack_seq, msk->ack_seq + copy_len);
+ /* with 2 subflows, adding at end of ooo queue is quite likely
+ * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
+ */
+ if (mptcp_ooo_try_coalesce(msk, msk->ooo_last_skb, skb)) {
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE);
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL);
+ return;
+ }
- tail = skb_peek_tail(&sk->sk_receive_queue);
- if (offset == 0 && tail) {
- bool fragstolen;
- int delta;
+ /* Can avoid an rbtree lookup if we are adding skb after ooo_last_skb */
+ if (!before64(seq, MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq)) {
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL);
+ parent = &msk->ooo_last_skb->rbnode;
+ p = &parent->rb_right;
+ goto insert;
+ }
- if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
- kfree_skb_partial(skb, fragstolen);
- atomic_add(delta, &sk->sk_rmem_alloc);
- sk_mem_charge(sk, delta);
+ /* Find place to insert this segment. Handle overlaps on the way. */
+ parent = NULL;
+ while (*p) {
+ parent = *p;
+ skb1 = rb_to_skb(parent);
+ if (before64(seq, MPTCP_SKB_CB(skb1)->map_seq)) {
+ p = &parent->rb_left;
+ continue;
+ }
+ if (before64(seq, MPTCP_SKB_CB(skb1)->end_seq)) {
+ if (!after64(end_seq, MPTCP_SKB_CB(skb1)->end_seq)) {
+ /* All the bits are present. Drop. */
+ mptcp_drop(sk, skb);
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
+ return;
+ }
+ if (after64(seq, MPTCP_SKB_CB(skb1)->map_seq)) {
+ /* partial overlap:
+ * | skb |
+ * | skb1 |
+ * continue traversing
+ */
+ } else {
+ /* skb's seq == skb1's seq and skb covers skb1.
+ * Replace skb1 with skb.
+ */
+ rb_replace_node(&skb1->rbnode, &skb->rbnode,
+ &msk->out_of_order_queue);
+ mptcp_drop(sk, skb1);
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
+ goto merge_right;
+ }
+ } else if (mptcp_ooo_try_coalesce(msk, skb1, skb)) {
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE);
return;
}
+ p = &parent->rb_right;
}
- skb_set_owner_r(skb, sk);
- __skb_queue_tail(&sk->sk_receive_queue, skb);
- MPTCP_SKB_CB(skb)->offset = offset;
-}
+insert:
+ /* Insert segment into RB tree. */
+ rb_link_node(&skb->rbnode, parent, p);
+ rb_insert_color(&skb->rbnode, &msk->out_of_order_queue);
-static void mptcp_stop_timer(struct sock *sk)
-{
- struct inet_connection_sock *icsk = inet_csk(sk);
+merge_right:
+ /* Remove other segments covered by skb. */
+ while ((skb1 = skb_rb_next(skb)) != NULL) {
+ if (before64(end_seq, MPTCP_SKB_CB(skb1)->end_seq))
+ break;
+ rb_erase(&skb1->rbnode, &msk->out_of_order_queue);
+ mptcp_drop(sk, skb1);
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
+ }
+ /* If there is no skb after us, we are the last_skb ! */
+ if (!skb1)
+ msk->ooo_last_skb = skb;
- sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
- mptcp_sk(sk)->timer_ival = 0;
+end:
+ skb_condense(skb);
+ skb_set_owner_r(skb, sk);
}
-/* both sockets must be locked */
-static bool mptcp_subflow_dsn_valid(const struct mptcp_sock *msk,
- struct sock *ssk)
+static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
+ struct sk_buff *skb, unsigned int offset,
+ size_t copy_len)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
- u64 dsn = mptcp_subflow_get_mapped_dsn(subflow);
+ struct sock *sk = (struct sock *)msk;
+ struct sk_buff *tail;
- /* revalidate data sequence number.
- *
- * mptcp_subflow_data_available() is usually called
- * without msk lock. Its unlikely (but possible)
- * that msk->ack_seq has been advanced since the last
- * call found in-sequence data.
+ __skb_unlink(skb, &ssk->sk_receive_queue);
+
+ skb_ext_reset(skb);
+ skb_orphan(skb);
+
+ /* the skb map_seq accounts for the skb offset:
+ * mptcp_subflow_get_mapped_dsn() is based on the current tp->copied_seq
+ * value
*/
- if (likely(dsn == msk->ack_seq))
+ MPTCP_SKB_CB(skb)->map_seq = mptcp_subflow_get_mapped_dsn(subflow);
+ MPTCP_SKB_CB(skb)->end_seq = MPTCP_SKB_CB(skb)->map_seq + copy_len;
+ MPTCP_SKB_CB(skb)->offset = offset;
+
+ if (MPTCP_SKB_CB(skb)->map_seq == msk->ack_seq) {
+ /* in sequence */
+ WRITE_ONCE(msk->ack_seq, msk->ack_seq + copy_len);
+ tail = skb_peek_tail(&sk->sk_receive_queue);
+ if (tail && mptcp_try_coalesce(sk, tail, skb))
+ return true;
+
+ skb_set_owner_r(skb, sk);
+ __skb_queue_tail(&sk->sk_receive_queue, skb);
return true;
+ } else if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) {
+ mptcp_data_queue_ofo(msk, skb);
+ return false;
+ }
- subflow->data_avail = 0;
- return mptcp_subflow_data_available(ssk);
+ /* old data, keep it simple and drop the whole pkt, sender
+ * will retransmit as needed, if needed.
+ */
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
+ mptcp_drop(sk, skb);
+ return false;
+}
+
+static void mptcp_stop_timer(struct sock *sk)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+
+ sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
+ mptcp_sk(sk)->timer_ival = 0;
}
static void mptcp_check_data_fin_ack(struct sock *sk)
@@ -313,14 +454,12 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
unsigned int moved = 0;
bool more_data_avail;
struct tcp_sock *tp;
+ u32 old_copied_seq;
bool done = false;
- if (!mptcp_subflow_dsn_valid(msk, ssk)) {
- *bytes = 0;
- return false;
- }
-
+ pr_debug("msk=%p ssk=%p", msk, ssk);
tp = tcp_sk(ssk);
+ old_copied_seq = tp->copied_seq;
do {
u32 map_remaining, offset;
u32 seq = tp->copied_seq;
@@ -332,8 +471,15 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
mptcp_subflow_get_map_offset(subflow);
skb = skb_peek(&ssk->sk_receive_queue);
- if (!skb)
+ if (!skb) {
+ /* if no data is found, a racing workqueue/recvmsg
+ * already processed the new data, stop here or we
+ * can enter an infinite loop
+ */
+ if (!moved)
+ done = true;
break;
+ }
if (__mptcp_check_fallback(msk)) {
/* if we are running under the workqueue, TCP could have
@@ -357,9 +503,9 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
if (tp->urg_data)
done = true;
- __mptcp_move_skb(msk, ssk, skb, offset, len);
+ if (__mptcp_move_skb(msk, ssk, skb, offset, len))
+ moved += len;
seq += len;
- moved += len;
if (WARN_ON_ONCE(map_remaining < len))
break;
@@ -378,20 +524,56 @@ static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
}
} while (more_data_avail);
- *bytes = moved;
-
- /* If the moves have caught up with the DATA_FIN sequence number
- * it's time to ack the DATA_FIN and change socket state, but
- * this is not a good place to change state. Let the workqueue
- * do it.
- */
- if (mptcp_pending_data_fin(sk, NULL) &&
- schedule_work(&msk->work))
- sock_hold(sk);
+ *bytes += moved;
+ if (tp->copied_seq != old_copied_seq)
+ tcp_cleanup_rbuf(ssk, 1);
return done;
}
+static bool mptcp_ofo_queue(struct mptcp_sock *msk)
+{
+ struct sock *sk = (struct sock *)msk;
+ struct sk_buff *skb, *tail;
+ bool moved = false;
+ struct rb_node *p;
+ u64 end_seq;
+
+ p = rb_first(&msk->out_of_order_queue);
+ pr_debug("msk=%p empty=%d", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue));
+ while (p) {
+ skb = rb_to_skb(p);
+ if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq))
+ break;
+
+ p = rb_next(p);
+ rb_erase(&skb->rbnode, &msk->out_of_order_queue);
+
+ if (unlikely(!after64(MPTCP_SKB_CB(skb)->end_seq,
+ msk->ack_seq))) {
+ mptcp_drop(sk, skb);
+ MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
+ continue;
+ }
+
+ end_seq = MPTCP_SKB_CB(skb)->end_seq;
+ tail = skb_peek_tail(&sk->sk_receive_queue);
+ if (!tail || !mptcp_ooo_try_coalesce(msk, tail, skb)) {
+ int delta = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq;
+
+ /* skip overlapping data, if any */
+ pr_debug("uncoalesced seq=%llx ack seq=%llx delta=%d",
+ MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq,
+ delta);
+ MPTCP_SKB_CB(skb)->offset += delta;
+ __skb_queue_tail(&sk->sk_receive_queue, skb);
+ }
+ msk->ack_seq = end_seq;
+ moved = true;
+ }
+ return moved;
+}
+
/* In most cases we will be able to lock the mptcp socket. If its already
* owned, we need to defer to the work queue to avoid ABBA deadlock.
*/
@@ -407,8 +589,19 @@ static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
return false;
/* must re-check after taking the lock */
- if (!READ_ONCE(sk->sk_lock.owned))
+ if (!READ_ONCE(sk->sk_lock.owned)) {
__mptcp_move_skbs_from_subflow(msk, ssk, &moved);
+ mptcp_ofo_queue(msk);
+
+ /* If the moves have caught up with the DATA_FIN sequence number
+ * it's time to ack the DATA_FIN and change socket state, but
+ * this is not a good place to change state. Let the workqueue
+ * do it.
+ */
+ if (mptcp_pending_data_fin(sk, NULL) &&
+ schedule_work(&msk->work))
+ sock_hold(sk);
+ }
spin_unlock_bh(&sk->sk_lock.slock);
@@ -417,9 +610,17 @@ static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
void mptcp_data_ready(struct sock *sk, struct sock *ssk)
{
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
struct mptcp_sock *msk = mptcp_sk(sk);
+ bool wake;
- set_bit(MPTCP_DATA_READY, &msk->flags);
+ /* move_skbs_to_msk below can legitly clear the data_avail flag,
+ * but we will need later to properly woke the reader, cache its
+ * value
+ */
+ wake = subflow->data_avail == MPTCP_SUBFLOW_DATA_AVAIL;
+ if (wake)
+ set_bit(MPTCP_DATA_READY, &msk->flags);
if (atomic_read(&sk->sk_rmem_alloc) < READ_ONCE(sk->sk_rcvbuf) &&
move_skbs_to_msk(msk, ssk))
@@ -440,7 +641,8 @@ void mptcp_data_ready(struct sock *sk, struct sock *ssk)
move_skbs_to_msk(msk, ssk);
}
wake:
- sk->sk_data_ready(sk);
+ if (wake)
+ sk->sk_data_ready(sk);
}
static void __mptcp_flush_join_list(struct mptcp_sock *msk)
@@ -474,7 +676,7 @@ void mptcp_data_acked(struct sock *sk)
{
mptcp_reset_timer(sk);
- if ((!sk_stream_is_writeable(sk) ||
+ if ((!test_bit(MPTCP_SEND_SPACE, &mptcp_sk(sk)->flags) ||
(inet_sk_state_load(sk) != TCP_ESTABLISHED)) &&
schedule_work(&mptcp_sk(sk)->work))
sock_hold(sk);
@@ -569,6 +771,20 @@ static void dfrag_clear(struct sock *sk, struct mptcp_data_frag *dfrag)
put_page(dfrag->page);
}
+static bool mptcp_is_writeable(struct mptcp_sock *msk)
+{
+ struct mptcp_subflow_context *subflow;
+
+ if (!sk_stream_is_writeable((struct sock *)msk))
+ return false;
+
+ mptcp_for_each_subflow(msk, subflow) {
+ if (sk_stream_is_writeable(subflow->tcp_sock))
+ return true;
+ }
+ return false;
+}
+
static void mptcp_clean_una(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
@@ -611,8 +827,15 @@ out:
sk_mem_reclaim_partial(sk);
/* Only wake up writers if a subflow is ready */
- if (test_bit(MPTCP_SEND_SPACE, &msk->flags))
+ if (mptcp_is_writeable(msk)) {
+ set_bit(MPTCP_SEND_SPACE, &mptcp_sk(sk)->flags);
+ smp_mb__after_atomic();
+
+ /* set SEND_SPACE before sk_stream_write_space clears
+ * NOSPACE
+ */
sk_stream_write_space(sk);
+ }
}
}
@@ -803,60 +1026,128 @@ out:
return ret;
}
-static void mptcp_nospace(struct mptcp_sock *msk, struct socket *sock)
+static void mptcp_nospace(struct mptcp_sock *msk)
{
+ struct mptcp_subflow_context *subflow;
+
clear_bit(MPTCP_SEND_SPACE, &msk->flags);
smp_mb__after_atomic(); /* msk->flags is changed by write_space cb */
- /* enables sk->write_space() callbacks */
- set_bit(SOCK_NOSPACE, &sock->flags);
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ struct socket *sock = READ_ONCE(ssk->sk_socket);
+
+ /* enables ssk->write_space() callbacks */
+ if (sock)
+ set_bit(SOCK_NOSPACE, &sock->flags);
+ }
+}
+
+static bool mptcp_subflow_active(struct mptcp_subflow_context *subflow)
+{
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+
+ /* can't send if JOIN hasn't completed yet (i.e. is usable for mptcp) */
+ if (subflow->request_join && !subflow->fully_established)
+ return false;
+
+ /* only send if our side has not closed yet */
+ return ((1 << ssk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT));
}
-static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
+#define MPTCP_SEND_BURST_SIZE ((1 << 16) - \
+ sizeof(struct tcphdr) - \
+ MAX_TCP_OPTION_SPACE - \
+ sizeof(struct ipv6hdr) - \
+ sizeof(struct frag_hdr))
+
+struct subflow_send_info {
+ struct sock *ssk;
+ u64 ratio;
+};
+
+static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk,
+ u32 *sndbuf)
{
+ struct subflow_send_info send_info[2];
struct mptcp_subflow_context *subflow;
- struct sock *backup = NULL;
+ int i, nr_active = 0;
+ struct sock *ssk;
+ u64 ratio;
+ u32 pace;
- sock_owned_by_me((const struct sock *)msk);
+ sock_owned_by_me((struct sock *)msk);
+ *sndbuf = 0;
if (!mptcp_ext_cache_refill(msk))
return NULL;
+ if (__mptcp_check_fallback(msk)) {
+ if (!msk->first)
+ return NULL;
+ *sndbuf = msk->first->sk_sndbuf;
+ return sk_stream_memory_free(msk->first) ? msk->first : NULL;
+ }
+
+ /* re-use last subflow, if the burst allow that */
+ if (msk->last_snd && msk->snd_burst > 0 &&
+ sk_stream_memory_free(msk->last_snd) &&
+ mptcp_subflow_active(mptcp_subflow_ctx(msk->last_snd))) {
+ mptcp_for_each_subflow(msk, subflow) {
+ ssk = mptcp_subflow_tcp_sock(subflow);
+ *sndbuf = max(tcp_sk(ssk)->snd_wnd, *sndbuf);
+ }
+ return msk->last_snd;
+ }
+
+ /* pick the subflow with the lower wmem/wspace ratio */
+ for (i = 0; i < 2; ++i) {
+ send_info[i].ssk = NULL;
+ send_info[i].ratio = -1;
+ }
mptcp_for_each_subflow(msk, subflow) {
- struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ ssk = mptcp_subflow_tcp_sock(subflow);
+ if (!mptcp_subflow_active(subflow))
+ continue;
- if (!sk_stream_memory_free(ssk)) {
- struct socket *sock = ssk->sk_socket;
+ nr_active += !subflow->backup;
+ *sndbuf = max(tcp_sk(ssk)->snd_wnd, *sndbuf);
+ if (!sk_stream_memory_free(subflow->tcp_sock))
+ continue;
- if (sock)
- mptcp_nospace(msk, sock);
+ pace = READ_ONCE(ssk->sk_pacing_rate);
+ if (!pace)
+ continue;
- return NULL;
+ ratio = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32,
+ pace);
+ if (ratio < send_info[subflow->backup].ratio) {
+ send_info[subflow->backup].ssk = ssk;
+ send_info[subflow->backup].ratio = ratio;
}
+ }
- if (subflow->backup) {
- if (!backup)
- backup = ssk;
+ pr_debug("msk=%p nr_active=%d ssk=%p:%lld backup=%p:%lld",
+ msk, nr_active, send_info[0].ssk, send_info[0].ratio,
+ send_info[1].ssk, send_info[1].ratio);
- continue;
- }
+ /* pick the best backup if no other subflow is active */
+ if (!nr_active)
+ send_info[0].ssk = send_info[1].ssk;
- return ssk;
+ if (send_info[0].ssk) {
+ msk->last_snd = send_info[0].ssk;
+ msk->snd_burst = min_t(int, MPTCP_SEND_BURST_SIZE,
+ sk_stream_wspace(msk->last_snd));
+ return msk->last_snd;
}
-
- return backup;
+ return NULL;
}
-static void ssk_check_wmem(struct mptcp_sock *msk, struct sock *ssk)
+static void ssk_check_wmem(struct mptcp_sock *msk)
{
- struct socket *sock;
-
- if (likely(sk_stream_is_writeable(ssk)))
- return;
-
- sock = READ_ONCE(ssk->sk_socket);
- if (sock)
- mptcp_nospace(msk, sock);
+ if (unlikely(!mptcp_is_writeable(msk)))
+ mptcp_nospace(msk);
}
static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
@@ -866,6 +1157,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
struct page_frag *pfrag;
size_t copied = 0;
struct sock *ssk;
+ u32 sndbuf;
bool tx_ok;
long timeo;
@@ -892,7 +1184,7 @@ restart:
}
__mptcp_flush_join_list(msk);
- ssk = mptcp_subflow_get_send(msk);
+ ssk = mptcp_subflow_get_send(msk, &sndbuf);
while (!sk_stream_memory_free(sk) ||
!ssk ||
!mptcp_page_frag_refill(ssk, pfrag)) {
@@ -909,19 +1201,25 @@ restart:
mptcp_reset_timer(sk);
}
+ mptcp_nospace(msk);
ret = sk_stream_wait_memory(sk, &timeo);
if (ret)
goto out;
mptcp_clean_una(sk);
- ssk = mptcp_subflow_get_send(msk);
+ ssk = mptcp_subflow_get_send(msk, &sndbuf);
if (list_empty(&msk->conn_list)) {
ret = -ENOTCONN;
goto out;
}
}
+ /* do auto tuning */
+ if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK) &&
+ sndbuf > READ_ONCE(sk->sk_sndbuf))
+ WRITE_ONCE(sk->sk_sndbuf, sndbuf);
+
pr_debug("conn_list->subflow=%p", ssk);
lock_sock(ssk);
@@ -938,6 +1236,10 @@ restart:
break;
}
+ /* burst can be negative, we will try move to the next subflow
+ * at selection time, if possible.
+ */
+ msk->snd_burst -= ret;
copied += ret;
tx_ok = msg_data_left(msg);
@@ -947,7 +1249,6 @@ restart:
if (!sk_stream_memory_free(ssk) ||
!mptcp_page_frag_refill(ssk, pfrag) ||
!mptcp_ext_cache_refill(msk)) {
- set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
tcp_push(ssk, msg->msg_flags, mss_now,
tcp_sk(ssk)->nonagle, size_goal);
mptcp_set_timeout(sk, ssk);
@@ -995,9 +1296,9 @@ restart:
mptcp_reset_timer(sk);
}
- ssk_check_wmem(msk, ssk);
release_sock(ssk);
out:
+ ssk_check_wmem(msk);
release_sock(sk);
return copied ? : ret;
}
@@ -1135,10 +1436,14 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
*/
mptcp_for_each_subflow(msk, subflow) {
struct sock *ssk;
+ bool slow;
ssk = mptcp_subflow_tcp_sock(subflow);
+ slow = lock_sock_fast(ssk);
WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf);
tcp_sk(ssk)->window_clamp = window_clamp;
+ tcp_cleanup_rbuf(ssk, 1);
+ unlock_sock_fast(ssk, slow);
}
}
}
@@ -1154,6 +1459,11 @@ static bool __mptcp_move_skbs(struct mptcp_sock *msk)
unsigned int moved = 0;
bool done;
+ /* avoid looping forever below on racing close */
+ if (((struct sock *)msk)->sk_state == TCP_CLOSE)
+ return false;
+
+ __mptcp_flush_join_list(msk);
do {
struct sock *ssk = mptcp_subflow_recv_lookup(msk);
@@ -1165,7 +1475,11 @@ static bool __mptcp_move_skbs(struct mptcp_sock *msk)
release_sock(ssk);
} while (!done);
- return moved > 0;
+ if (mptcp_ofo_queue(msk) || moved > 0) {
+ mptcp_check_data_fin((struct sock *)msk);
+ return true;
+ }
+ return false;
}
static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
@@ -1261,6 +1575,9 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
set_bit(MPTCP_DATA_READY, &msk->flags);
}
out_err:
+ pr_debug("msk=%p data_ready=%d rx queue empty=%d copied=%d",
+ msk, test_bit(MPTCP_DATA_READY, &msk->flags),
+ skb_queue_empty(&sk->sk_receive_queue), copied);
mptcp_rcv_space_adjust(msk, copied);
release_sock(sk);
@@ -1311,9 +1628,15 @@ static struct sock *mptcp_subflow_get_retrans(const struct mptcp_sock *msk)
sock_owned_by_me((const struct sock *)msk);
+ if (__mptcp_check_fallback(msk))
+ return msk->first;
+
mptcp_for_each_subflow(msk, subflow) {
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ if (!mptcp_subflow_active(subflow))
+ continue;
+
/* still data outstanding at TCP level? Don't retransmit. */
if (!tcp_write_queue_empty(ssk))
return NULL;
@@ -1338,9 +1661,9 @@ static struct sock *mptcp_subflow_get_retrans(const struct mptcp_sock *msk)
* so we need to use tcp_close() after detaching them from the mptcp
* parent socket.
*/
-static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
- struct mptcp_subflow_context *subflow,
- long timeout)
+void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ struct mptcp_subflow_context *subflow,
+ long timeout)
{
struct socket *sock = READ_ONCE(ssk->sk_socket);
@@ -1371,6 +1694,10 @@ static void pm_work(struct mptcp_sock *msk)
pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED);
mptcp_pm_nl_add_addr_received(msk);
}
+ if (pm->status & BIT(MPTCP_PM_RM_ADDR_RECEIVED)) {
+ pm->status &= ~BIT(MPTCP_PM_RM_ADDR_RECEIVED);
+ mptcp_pm_nl_rm_addr_received(msk);
+ }
if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) {
pm->status &= ~BIT(MPTCP_PM_ESTABLISHED);
mptcp_pm_nl_fully_established(msk);
@@ -1383,6 +1710,20 @@ static void pm_work(struct mptcp_sock *msk)
spin_unlock_bh(&msk->pm.lock);
}
+static void __mptcp_close_subflow(struct mptcp_sock *msk)
+{
+ struct mptcp_subflow_context *subflow, *tmp;
+
+ list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+
+ if (inet_sk_state_load(ssk) != TCP_CLOSE)
+ continue;
+
+ __mptcp_close_ssk((struct sock *)msk, ssk, subflow, 0);
+ }
+}
+
static void mptcp_worker(struct work_struct *work)
{
struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work);
@@ -1400,6 +1741,9 @@ static void mptcp_worker(struct work_struct *work)
mptcp_clean_una(sk);
mptcp_check_data_fin_ack(sk);
__mptcp_flush_join_list(msk);
+ if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
+ __mptcp_close_subflow(msk);
+
__mptcp_move_skbs(msk);
if (msk->pm.status)
@@ -1474,6 +1818,7 @@ static int __mptcp_init_sock(struct sock *sk)
INIT_LIST_HEAD(&msk->rtx_queue);
__set_bit(MPTCP_SEND_SPACE, &msk->flags);
INIT_WORK(&msk->work, mptcp_worker);
+ msk->out_of_order_queue = RB_ROOT;
msk->first = NULL;
inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss;
@@ -1491,23 +1836,23 @@ static int mptcp_init_sock(struct sock *sk)
struct net *net = sock_net(sk);
int ret;
+ ret = __mptcp_init_sock(sk);
+ if (ret)
+ return ret;
+
if (!mptcp_is_enabled(net))
return -ENOPROTOOPT;
if (unlikely(!net->mib.mptcp_statistics) && !mptcp_mib_alloc(net))
return -ENOMEM;
- ret = __mptcp_init_sock(sk);
- if (ret)
- return ret;
-
ret = __mptcp_socket_create(mptcp_sk(sk));
if (ret)
return ret;
sk_sockets_allocated_inc(sk);
sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1];
- sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[2];
+ sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[1];
return 0;
}
@@ -1531,7 +1876,7 @@ static void mptcp_cancel_work(struct sock *sk)
sock_put(sk);
}
-static void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
+void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
{
lock_sock(ssk);
@@ -1809,14 +2154,21 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err,
return newsk;
}
+void mptcp_destroy_common(struct mptcp_sock *msk)
+{
+ skb_rbtree_purge(&msk->out_of_order_queue);
+ mptcp_token_destroy(msk);
+ mptcp_pm_free_anno_list(msk);
+}
+
static void mptcp_destroy(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
- mptcp_token_destroy(msk);
if (msk->cached_ext)
__skb_ext_put(msk->cached_ext);
+ mptcp_destroy_common(msk);
sk_sockets_allocated_dec(sk);
}
@@ -2288,13 +2640,13 @@ static __poll_t mptcp_poll(struct file *file, struct socket *sock,
sock_poll_wait(file, sock, wait);
state = inet_sk_state_load(sk);
+ pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags);
if (state == TCP_LISTEN)
return mptcp_check_readable(msk);
if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) {
mask |= mptcp_check_readable(msk);
- if (sk_stream_is_writeable(sk) &&
- test_bit(MPTCP_SEND_SPACE, &msk->flags))
+ if (test_bit(MPTCP_SEND_SPACE, &msk->flags))
mask |= EPOLLOUT | EPOLLWRNORM;
}
if (sk->sk_shutdown & RCV_SHUTDOWN)
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 20f04ac85409..13ab89dc1914 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -90,6 +90,7 @@
#define MPTCP_WORK_RTX 2
#define MPTCP_WORK_EOF 3
#define MPTCP_FALLBACK_DONE 4
+#define MPTCP_WORK_CLOSE_SUBFLOW 5
struct mptcp_options_received {
u64 sndr_key;
@@ -140,6 +141,8 @@ struct mptcp_addr_info {
sa_family_t family;
__be16 port;
u8 id;
+ u8 flags;
+ int ifindex;
union {
struct in_addr addr;
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
@@ -150,6 +153,7 @@ struct mptcp_addr_info {
enum mptcp_pm_status {
MPTCP_PM_ADD_ADDR_RECEIVED,
+ MPTCP_PM_RM_ADDR_RECEIVED,
MPTCP_PM_ESTABLISHED,
MPTCP_PM_SUBFLOW_ESTABLISHED,
};
@@ -157,14 +161,17 @@ enum mptcp_pm_status {
struct mptcp_pm_data {
struct mptcp_addr_info local;
struct mptcp_addr_info remote;
+ struct list_head anno_list;
spinlock_t lock; /*protects the whole PM data */
- bool addr_signal;
+ bool add_addr_signal;
+ bool rm_addr_signal;
bool server_side;
bool work_pending;
bool accept_addr;
bool accept_subflow;
+ bool add_addr_echo;
u8 add_addr_signaled;
u8 add_addr_accepted;
u8 local_addr_used;
@@ -174,6 +181,7 @@ struct mptcp_pm_data {
u8 local_addr_max;
u8 subflows_max;
u8 status;
+ u8 rm_id;
};
struct mptcp_data_frag {
@@ -194,6 +202,8 @@ struct mptcp_sock {
u64 write_seq;
u64 ack_seq;
u64 rcv_data_fin_seq;
+ struct sock *last_snd;
+ int snd_burst;
atomic64_t snd_una;
unsigned long timer_ival;
u32 token;
@@ -202,8 +212,11 @@ struct mptcp_sock {
bool fully_established;
bool rcv_data_fin;
bool snd_data_fin_enable;
+ bool use_64bit_ack; /* Set when we received a 64-bit DSN */
spinlock_t join_list_lock;
struct work_struct work;
+ struct sk_buff *ooo_last_skb;
+ struct rb_root out_of_order_queue;
struct list_head conn_list;
struct list_head rtx_queue;
struct list_head join_list;
@@ -268,6 +281,12 @@ mptcp_subflow_rsk(const struct request_sock *rsk)
return (struct mptcp_subflow_request_sock *)rsk;
}
+enum mptcp_data_avail {
+ MPTCP_SUBFLOW_NODATA,
+ MPTCP_SUBFLOW_DATA_AVAIL,
+ MPTCP_SUBFLOW_OOO_DATA
+};
+
/* MPTCP subflow context */
struct mptcp_subflow_context {
struct list_head node;/* conn_list of subflows */
@@ -292,10 +311,9 @@ struct mptcp_subflow_context {
map_valid : 1,
mpc_map : 1,
backup : 1,
- data_avail : 1,
rx_eof : 1,
- use_64bit_ack : 1, /* Set when we received a 64-bit DSN */
can_ack : 1; /* only after processing the remote a key */
+ enum mptcp_data_avail data_avail;
u32 remote_nonce;
u64 thmac;
u32 local_nonce;
@@ -348,10 +366,14 @@ void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
struct mptcp_options_received *mp_opt);
bool mptcp_subflow_data_available(struct sock *sk);
void __init mptcp_subflow_init(void);
+void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how);
+void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
+ struct mptcp_subflow_context *subflow,
+ long timeout);
+void mptcp_subflow_reset(struct sock *ssk);
/* called with sk socket lock held */
-int __mptcp_subflow_connect(struct sock *sk, int ifindex,
- const struct mptcp_addr_info *loc,
+int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
const struct mptcp_addr_info *remote);
int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock);
@@ -388,6 +410,7 @@ bool mptcp_finish_join(struct sock *sk);
void mptcp_data_acked(struct sock *sk);
void mptcp_subflow_eof(struct sock *sk);
bool mptcp_update_rcv_data_fin(struct mptcp_sock *msk, u64 data_fin_seq, bool use_64bit);
+void mptcp_destroy_common(struct mptcp_sock *msk);
void __init mptcp_token_init(void);
static inline void mptcp_token_init_request(struct request_sock *req)
@@ -421,26 +444,40 @@ void mptcp_pm_subflow_established(struct mptcp_sock *msk,
void mptcp_pm_subflow_closed(struct mptcp_sock *msk, u8 id);
void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
const struct mptcp_addr_info *addr);
+void mptcp_pm_rm_addr_received(struct mptcp_sock *msk, u8 rm_id);
+void mptcp_pm_free_anno_list(struct mptcp_sock *msk);
+struct mptcp_pm_add_entry *
+mptcp_pm_del_add_timer(struct mptcp_sock *msk,
+ struct mptcp_addr_info *addr);
int mptcp_pm_announce_addr(struct mptcp_sock *msk,
- const struct mptcp_addr_info *addr);
+ const struct mptcp_addr_info *addr,
+ bool echo);
int mptcp_pm_remove_addr(struct mptcp_sock *msk, u8 local_id);
-int mptcp_pm_remove_subflow(struct mptcp_sock *msk, u8 remote_id);
+int mptcp_pm_remove_subflow(struct mptcp_sock *msk, u8 local_id);
+
+static inline bool mptcp_pm_should_add_signal(struct mptcp_sock *msk)
+{
+ return READ_ONCE(msk->pm.add_addr_signal);
+}
-static inline bool mptcp_pm_should_signal(struct mptcp_sock *msk)
+static inline bool mptcp_pm_should_rm_signal(struct mptcp_sock *msk)
{
- return READ_ONCE(msk->pm.addr_signal);
+ return READ_ONCE(msk->pm.rm_addr_signal);
}
-static inline unsigned int mptcp_add_addr_len(int family)
+static inline unsigned int mptcp_add_addr_len(int family, bool echo)
{
if (family == AF_INET)
- return TCPOLEN_MPTCP_ADD_ADDR;
- return TCPOLEN_MPTCP_ADD_ADDR6;
+ return echo ? TCPOLEN_MPTCP_ADD_ADDR_BASE
+ : TCPOLEN_MPTCP_ADD_ADDR;
+ return echo ? TCPOLEN_MPTCP_ADD_ADDR6_BASE : TCPOLEN_MPTCP_ADD_ADDR6;
}
-bool mptcp_pm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
- struct mptcp_addr_info *saddr);
+bool mptcp_pm_add_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
+ struct mptcp_addr_info *saddr, bool *echo);
+bool mptcp_pm_rm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
+ u8 *rm_id);
int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc);
void __init mptcp_pm_nl_init(void);
@@ -448,6 +485,8 @@ void mptcp_pm_nl_data_init(struct mptcp_sock *msk);
void mptcp_pm_nl_fully_established(struct mptcp_sock *msk);
void mptcp_pm_nl_subflow_established(struct mptcp_sock *msk);
void mptcp_pm_nl_add_addr_received(struct mptcp_sock *msk);
+void mptcp_pm_nl_rm_addr_received(struct mptcp_sock *msk);
+void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk, u8 rm_id);
int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct sock_common *skc);
static inline struct mptcp_ext *mptcp_get_ext(struct sk_buff *skb)
@@ -464,12 +503,12 @@ static inline bool before64(__u64 seq1, __u64 seq2)
void mptcp_diag_subflow_init(struct tcp_ulp_ops *ops);
-static inline bool __mptcp_check_fallback(struct mptcp_sock *msk)
+static inline bool __mptcp_check_fallback(const struct mptcp_sock *msk)
{
return test_bit(MPTCP_FALLBACK_DONE, &msk->flags);
}
-static inline bool mptcp_check_fallback(struct sock *sk)
+static inline bool mptcp_check_fallback(const struct sock *sk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
struct mptcp_sock *msk = mptcp_sk(subflow->conn);
diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
index 6f035af1c9d2..ac4a1fe3550b 100644
--- a/net/mptcp/subflow.c
+++ b/net/mptcp/subflow.c
@@ -20,6 +20,7 @@
#include <net/ip6_route.h>
#endif
#include <net/mptcp.h>
+#include <uapi/linux/mptcp.h>
#include "protocol.h"
#include "mib.h"
@@ -270,6 +271,19 @@ static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow)
return thmac == subflow->thmac;
}
+void mptcp_subflow_reset(struct sock *ssk)
+{
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+ struct sock *sk = subflow->conn;
+
+ tcp_set_state(ssk, TCP_CLOSE);
+ tcp_send_active_reset(ssk, GFP_ATOMIC);
+ tcp_done(ssk);
+ if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags) &&
+ schedule_work(&mptcp_sk(sk)->work))
+ sock_hold(sk);
+}
+
static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
@@ -342,8 +356,7 @@ fallback:
return;
do_reset:
- tcp_send_active_reset(sk, GFP_ATOMIC);
- tcp_done(sk);
+ mptcp_subflow_reset(sk);
}
struct request_sock_ops mptcp_subflow_request_sock_ops;
@@ -434,7 +447,7 @@ static void mptcp_sock_destruct(struct sock *sk)
sock_orphan(sk);
}
- mptcp_token_destroy(mptcp_sk(sk));
+ mptcp_destroy_common(mptcp_sk(sk));
inet_sock_destruct(sk);
}
@@ -769,12 +782,11 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
if (!mpext->dsn64) {
map_seq = expand_seq(subflow->map_seq, subflow->map_data_len,
mpext->data_seq);
- subflow->use_64bit_ack = 0;
pr_debug("expanded seq=%llu", subflow->map_seq);
} else {
map_seq = mpext->data_seq;
- subflow->use_64bit_ack = 1;
}
+ WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64);
if (subflow->map_valid) {
/* Allow replacing only with an identical map */
@@ -817,16 +829,25 @@ validate_seq:
return MAPPING_OK;
}
-static int subflow_read_actor(read_descriptor_t *desc,
- struct sk_buff *skb,
- unsigned int offset, size_t len)
+static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
+ u64 limit)
{
- size_t copy_len = min(desc->count, len);
-
- desc->count -= copy_len;
-
- pr_debug("flushed %zu bytes, %zu left", copy_len, desc->count);
- return copy_len;
+ struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
+ bool fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN;
+ u32 incr;
+
+ incr = limit >= skb->len ? skb->len + fin : limit;
+
+ pr_debug("discarding=%d len=%d seq=%d", incr, skb->len,
+ subflow->map_subflow_seq);
+ MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
+ tcp_sk(ssk)->copied_seq += incr;
+ if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq))
+ sk_eat_skb(ssk, skb);
+ if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len)
+ subflow->map_valid = 0;
+ if (incr)
+ tcp_cleanup_rbuf(ssk, incr);
}
static bool subflow_check_data_avail(struct sock *ssk)
@@ -838,13 +859,13 @@ static bool subflow_check_data_avail(struct sock *ssk)
pr_debug("msk=%p ssk=%p data_avail=%d skb=%p", subflow->conn, ssk,
subflow->data_avail, skb_peek(&ssk->sk_receive_queue));
+ if (!skb_peek(&ssk->sk_receive_queue))
+ subflow->data_avail = 0;
if (subflow->data_avail)
return true;
msk = mptcp_sk(subflow->conn);
for (;;) {
- u32 map_remaining;
- size_t delta;
u64 ack_seq;
u64 old_ack;
@@ -862,6 +883,7 @@ static bool subflow_check_data_avail(struct sock *ssk)
subflow->map_data_len = skb->len;
subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq -
subflow->ssn_offset;
+ subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
return true;
}
@@ -889,42 +911,18 @@ static bool subflow_check_data_avail(struct sock *ssk)
ack_seq = mptcp_subflow_get_mapped_dsn(subflow);
pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
ack_seq);
- if (ack_seq == old_ack)
+ if (ack_seq == old_ack) {
+ subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL;
break;
+ } else if (after64(ack_seq, old_ack)) {
+ subflow->data_avail = MPTCP_SUBFLOW_OOO_DATA;
+ break;
+ }
/* only accept in-sequence mapping. Old values are spurious
- * retransmission; we can hit "future" values on active backup
- * subflow switch, we relay on retransmissions to get
- * in-sequence data.
- * Cuncurrent subflows support will require subflow data
- * reordering
+ * retransmission
*/
- map_remaining = subflow->map_data_len -
- mptcp_subflow_get_map_offset(subflow);
- if (before64(ack_seq, old_ack))
- delta = min_t(size_t, old_ack - ack_seq, map_remaining);
- else
- delta = min_t(size_t, ack_seq - old_ack, map_remaining);
-
- /* discard mapped data */
- pr_debug("discarding %zu bytes, current map len=%d", delta,
- map_remaining);
- if (delta) {
- read_descriptor_t desc = {
- .count = delta,
- };
- int ret;
-
- ret = tcp_read_sock(ssk, &desc, subflow_read_actor);
- if (ret < 0) {
- ssk->sk_err = -ret;
- goto fatal;
- }
- if (ret < delta)
- return false;
- if (delta == map_remaining)
- subflow->map_valid = 0;
- }
+ mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
}
return true;
@@ -935,13 +933,13 @@ fatal:
ssk->sk_error_report(ssk);
tcp_set_state(ssk, TCP_CLOSE);
tcp_send_active_reset(ssk, GFP_ATOMIC);
+ subflow->data_avail = 0;
return false;
}
bool mptcp_subflow_data_available(struct sock *sk)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
- struct sk_buff *skb;
/* check if current mapping is still valid */
if (subflow->map_valid &&
@@ -954,15 +952,7 @@ bool mptcp_subflow_data_available(struct sock *sk)
subflow->map_data_len);
}
- if (!subflow_check_data_avail(sk)) {
- subflow->data_avail = 0;
- return false;
- }
-
- skb = skb_peek(&sk->sk_receive_queue);
- subflow->data_avail = skb &&
- before(tcp_sk(sk)->copied_seq, TCP_SKB_CB(skb)->end_seq);
- return subflow->data_avail;
+ return subflow_check_data_avail(sk);
}
/* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy,
@@ -1009,8 +999,10 @@ static void subflow_write_space(struct sock *sk)
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
struct sock *parent = subflow->conn;
- sk_stream_write_space(sk);
- if (sk_stream_is_writeable(sk)) {
+ if (!sk_stream_is_writeable(sk))
+ return;
+
+ if (sk_stream_is_writeable(parent)) {
set_bit(MPTCP_SEND_SPACE, &mptcp_sk(parent)->flags);
smp_mb__after_atomic();
/* set SEND_SPACE before sk_stream_write_space clears NOSPACE */
@@ -1069,8 +1061,7 @@ static void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
#endif
}
-int __mptcp_subflow_connect(struct sock *sk, int ifindex,
- const struct mptcp_addr_info *loc,
+int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
const struct mptcp_addr_info *remote)
{
struct mptcp_sock *msk = mptcp_sk(sk);
@@ -1115,7 +1106,7 @@ int __mptcp_subflow_connect(struct sock *sk, int ifindex,
if (loc->family == AF_INET6)
addrlen = sizeof(struct sockaddr_in6);
#endif
- ssk->sk_bound_dev_if = ifindex;
+ ssk->sk_bound_dev_if = loc->ifindex;
err = kernel_bind(sf, (struct sockaddr *)&addr, addrlen);
if (err)
goto failed;
@@ -1127,7 +1118,7 @@ int __mptcp_subflow_connect(struct sock *sk, int ifindex,
subflow->local_id = local_id;
subflow->remote_id = remote_id;
subflow->request_join = 1;
- subflow->request_bkup = 1;
+ subflow->request_bkup = !!(loc->flags & MPTCP_PM_ADDR_FLAG_BACKUP);
mptcp_info2sockaddr(remote, &addr);
err = kernel_connect(sf, (struct sockaddr *)&addr, addrlen, O_NONBLOCK);
diff --git a/net/ncsi/ncsi-netlink.c b/net/ncsi/ncsi-netlink.c
index 8b386d766e7d..adddc7707aa4 100644
--- a/net/ncsi/ncsi-netlink.c
+++ b/net/ncsi/ncsi-netlink.c
@@ -716,7 +716,7 @@ static int ncsi_set_channel_mask_nl(struct sk_buff *msg,
return 0;
}
-static const struct genl_ops ncsi_ops[] = {
+static const struct genl_small_ops ncsi_ops[] = {
{
.cmd = NCSI_CMD_PKG_INFO,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
@@ -762,8 +762,8 @@ static struct genl_family ncsi_genl_family __ro_after_init = {
.maxattr = NCSI_ATTR_MAX,
.policy = ncsi_genl_policy,
.module = THIS_MODULE,
- .ops = ncsi_ops,
- .n_ops = ARRAY_SIZE(ncsi_ops),
+ .small_ops = ncsi_ops,
+ .n_small_ops = ARRAY_SIZE(ncsi_ops),
};
int ncsi_init_netlink(struct net_device *dev)
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 25313c29d799..52370211e46b 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -441,6 +441,7 @@ endif # NF_CONNTRACK
config NF_TABLES
select NETFILTER_NETLINK
+ select LIBCRC32C
tristate "Netfilter nf_tables support"
help
nftables is the new packet classification framework that intends to
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 3ac7c8c1548d..63d032191e62 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -282,6 +282,16 @@ nf_hook_entry_head(struct net *net, int pf, unsigned int hooknum,
return NULL;
return net->nf.hooks_bridge + hooknum;
#endif
+#ifdef CONFIG_NETFILTER_INGRESS
+ case NFPROTO_INET:
+ if (WARN_ON_ONCE(hooknum != NF_INET_INGRESS))
+ return NULL;
+ if (!dev || dev_net(dev) != net) {
+ WARN_ON_ONCE(1);
+ return NULL;
+ }
+ return &dev->nf_hooks_ingress;
+#endif
case NFPROTO_IPV4:
if (WARN_ON_ONCE(ARRAY_SIZE(net->nf.hooks_ipv4) <= hooknum))
return NULL;
@@ -311,20 +321,80 @@ nf_hook_entry_head(struct net *net, int pf, unsigned int hooknum,
return NULL;
}
+static int nf_ingress_check(struct net *net, const struct nf_hook_ops *reg,
+ int hooknum)
+{
+#ifndef CONFIG_NETFILTER_INGRESS
+ if (reg->hooknum == hooknum)
+ return -EOPNOTSUPP;
+#endif
+ if (reg->hooknum != hooknum ||
+ !reg->dev || dev_net(reg->dev) != net)
+ return -EINVAL;
+
+ return 0;
+}
+
+static inline bool nf_ingress_hook(const struct nf_hook_ops *reg, int pf)
+{
+ if ((pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS) ||
+ (pf == NFPROTO_INET && reg->hooknum == NF_INET_INGRESS))
+ return true;
+
+ return false;
+}
+
+static void nf_static_key_inc(const struct nf_hook_ops *reg, int pf)
+{
+#ifdef CONFIG_JUMP_LABEL
+ int hooknum;
+
+ if (pf == NFPROTO_INET && reg->hooknum == NF_INET_INGRESS) {
+ pf = NFPROTO_NETDEV;
+ hooknum = NF_NETDEV_INGRESS;
+ } else {
+ hooknum = reg->hooknum;
+ }
+ static_key_slow_inc(&nf_hooks_needed[pf][hooknum]);
+#endif
+}
+
+static void nf_static_key_dec(const struct nf_hook_ops *reg, int pf)
+{
+#ifdef CONFIG_JUMP_LABEL
+ int hooknum;
+
+ if (pf == NFPROTO_INET && reg->hooknum == NF_INET_INGRESS) {
+ pf = NFPROTO_NETDEV;
+ hooknum = NF_NETDEV_INGRESS;
+ } else {
+ hooknum = reg->hooknum;
+ }
+ static_key_slow_dec(&nf_hooks_needed[pf][hooknum]);
+#endif
+}
+
static int __nf_register_net_hook(struct net *net, int pf,
const struct nf_hook_ops *reg)
{
struct nf_hook_entries *p, *new_hooks;
struct nf_hook_entries __rcu **pp;
+ int err;
- if (pf == NFPROTO_NETDEV) {
-#ifndef CONFIG_NETFILTER_INGRESS
- if (reg->hooknum == NF_NETDEV_INGRESS)
- return -EOPNOTSUPP;
-#endif
- if (reg->hooknum != NF_NETDEV_INGRESS ||
- !reg->dev || dev_net(reg->dev) != net)
- return -EINVAL;
+ switch (pf) {
+ case NFPROTO_NETDEV:
+ err = nf_ingress_check(net, reg, NF_NETDEV_INGRESS);
+ if (err < 0)
+ return err;
+ break;
+ case NFPROTO_INET:
+ if (reg->hooknum != NF_INET_INGRESS)
+ break;
+
+ err = nf_ingress_check(net, reg, NF_INET_INGRESS);
+ if (err < 0)
+ return err;
+ break;
}
pp = nf_hook_entry_head(net, pf, reg->hooknum, reg->dev);
@@ -345,12 +415,11 @@ static int __nf_register_net_hook(struct net *net, int pf,
hooks_validate(new_hooks);
#ifdef CONFIG_NETFILTER_INGRESS
- if (pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
+ if (nf_ingress_hook(reg, pf))
net_inc_ingress_queue();
#endif
-#ifdef CONFIG_JUMP_LABEL
- static_key_slow_inc(&nf_hooks_needed[pf][reg->hooknum]);
-#endif
+ nf_static_key_inc(reg, pf);
+
BUG_ON(p == new_hooks);
nf_hook_entries_free(p);
return 0;
@@ -403,12 +472,10 @@ static void __nf_unregister_net_hook(struct net *net, int pf,
if (nf_remove_net_hook(p, reg)) {
#ifdef CONFIG_NETFILTER_INGRESS
- if (pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
+ if (nf_ingress_hook(reg, pf))
net_dec_ingress_queue();
#endif
-#ifdef CONFIG_JUMP_LABEL
- static_key_slow_dec(&nf_hooks_needed[pf][reg->hooknum]);
-#endif
+ nf_static_key_dec(reg, pf);
} else {
WARN_ONCE(1, "hook not found, pf %d num %d", pf, reg->hooknum);
}
@@ -425,8 +492,12 @@ static void __nf_unregister_net_hook(struct net *net, int pf,
void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
{
if (reg->pf == NFPROTO_INET) {
- __nf_unregister_net_hook(net, NFPROTO_IPV4, reg);
- __nf_unregister_net_hook(net, NFPROTO_IPV6, reg);
+ if (reg->hooknum == NF_INET_INGRESS) {
+ __nf_unregister_net_hook(net, NFPROTO_INET, reg);
+ } else {
+ __nf_unregister_net_hook(net, NFPROTO_IPV4, reg);
+ __nf_unregister_net_hook(net, NFPROTO_IPV6, reg);
+ }
} else {
__nf_unregister_net_hook(net, reg->pf, reg);
}
@@ -451,14 +522,20 @@ int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
int err;
if (reg->pf == NFPROTO_INET) {
- err = __nf_register_net_hook(net, NFPROTO_IPV4, reg);
- if (err < 0)
- return err;
-
- err = __nf_register_net_hook(net, NFPROTO_IPV6, reg);
- if (err < 0) {
- __nf_unregister_net_hook(net, NFPROTO_IPV4, reg);
- return err;
+ if (reg->hooknum == NF_INET_INGRESS) {
+ err = __nf_register_net_hook(net, NFPROTO_INET, reg);
+ if (err < 0)
+ return err;
+ } else {
+ err = __nf_register_net_hook(net, NFPROTO_IPV4, reg);
+ if (err < 0)
+ return err;
+
+ err = __nf_register_net_hook(net, NFPROTO_IPV6, reg);
+ if (err < 0) {
+ __nf_unregister_net_hook(net, NFPROTO_IPV4, reg);
+ return err;
+ }
}
} else {
err = __nf_register_net_hook(net, reg->pf, reg);
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 920b7c4331f0..6f35832f0de3 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -250,22 +250,7 @@ EXPORT_SYMBOL_GPL(ip_set_type_unregister);
void *
ip_set_alloc(size_t size)
{
- void *members = NULL;
-
- if (size < KMALLOC_MAX_SIZE)
- members = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
-
- if (members) {
- pr_debug("%p: allocated with kmalloc\n", members);
- return members;
- }
-
- members = vzalloc(size);
- if (!members)
- return NULL;
- pr_debug("%p: allocated with vmalloc\n", members);
-
- return members;
+ return kvzalloc(size, GFP_KERNEL_ACCOUNT);
}
EXPORT_SYMBOL_GPL(ip_set_alloc);
diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig
index 2c1593089ede..eb0e329f9b8d 100644
--- a/net/netfilter/ipvs/Kconfig
+++ b/net/netfilter/ipvs/Kconfig
@@ -29,7 +29,6 @@ if IP_VS
config IP_VS_IPV6
bool "IPv6 support for IPVS"
depends on IPV6 = y || IP_VS = IPV6
- select IP6_NF_IPTABLES
select NF_DEFRAG_IPV6
help
Add IPv6 support to IPVS.
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index a90b8eac16ac..c100c6b112c8 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -402,6 +402,8 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
{
unsigned int hash;
struct ip_vs_conn *cp, *ret=NULL;
+ const union nf_inet_addr *saddr;
+ __be16 sport;
/*
* Check for "full" addressed entries
@@ -411,10 +413,20 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
rcu_read_lock();
hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) {
- if (p->vport == cp->cport && p->cport == cp->dport &&
- cp->af == p->af &&
+ if (p->vport != cp->cport)
+ continue;
+
+ if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
+ sport = cp->vport;
+ saddr = &cp->vaddr;
+ } else {
+ sport = cp->dport;
+ saddr = &cp->daddr;
+ }
+
+ if (p->cport == sport && cp->af == p->af &&
ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) &&
- ip_vs_addr_equal(p->af, p->caddr, &cp->daddr) &&
+ ip_vs_addr_equal(p->af, p->caddr, saddr) &&
p->protocol == cp->protocol &&
cp->ipvs == p->ipvs) {
if (!__ip_vs_conn_get(cp))
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index e3668a6e54e4..cc3c275934f4 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -875,7 +875,7 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
unsigned int verdict = NF_DROP;
if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
- goto ignore_cp;
+ goto after_nat;
/* Ensure the checksum is correct */
if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
@@ -901,6 +901,7 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
if (ip_vs_route_me_harder(cp->ipvs, af, skb, hooknum))
goto out;
+after_nat:
/* do the statistics and put it back */
ip_vs_out_stats(cp, skb);
@@ -909,8 +910,6 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
ip_vs_notrack(skb);
else
ip_vs_update_conntrack(skb, cp, 0);
-
-ignore_cp:
verdict = NF_ACCEPT;
out:
@@ -1276,6 +1275,9 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
{
struct ip_vs_protocol *pp = pd->pp;
+ if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
+ goto after_nat;
+
IP_VS_DBG_PKT(11, af, pp, skb, iph->off, "Outgoing packet");
if (skb_ensure_writable(skb, iph->len))
@@ -1316,6 +1318,7 @@ handle_response(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
IP_VS_DBG_PKT(10, af, pp, skb, iph->off, "After SNAT");
+after_nat:
ip_vs_out_stats(cp, skb);
ip_vs_set_state(cp, IP_VS_DIR_OUTPUT, skb, pd);
skb->ipvs_property = 1;
@@ -1412,11 +1415,8 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in
cp = INDIRECT_CALL_1(pp->conn_out_get, ip_vs_conn_out_get_proto,
ipvs, af, skb, &iph);
- if (likely(cp)) {
- if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
- goto ignore_cp;
+ if (likely(cp))
return handle_response(af, skb, pd, cp, &iph, hooknum);
- }
/* Check for real-server-started requests */
if (atomic_read(&ipvs->conn_out_counter)) {
@@ -1475,14 +1475,9 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in
}
}
-out:
IP_VS_DBG_PKT(12, af, pp, skb, iph.off,
"ip_vs_out: packet continues traversal as normal");
return NF_ACCEPT;
-
-ignore_cp:
- __ip_vs_conn_put(cp);
- goto out;
}
/*
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 678c5b14841c..e279ded4e306 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2508,6 +2508,10 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, sockptr_t ptr, unsigned int len)
/* Set timeout values for (tcp tcpfin udp) */
ret = ip_vs_set_timeout(ipvs, (struct ip_vs_timeout_user *)arg);
goto out_unlock;
+ } else if (!len) {
+ /* No more commands with len == 0 below */
+ ret = -EINVAL;
+ goto out_unlock;
}
usvc_compat = (struct ip_vs_service_user *)arg;
@@ -2584,9 +2588,6 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, sockptr_t ptr, unsigned int len)
break;
case IP_VS_SO_SET_DELDEST:
ret = ip_vs_del_dest(svc, &udest);
- break;
- default:
- ret = -EINVAL;
}
out_unlock:
@@ -3892,7 +3893,7 @@ out:
}
-static const struct genl_ops ip_vs_genl_ops[] = {
+static const struct genl_small_ops ip_vs_genl_ops[] = {
{
.cmd = IPVS_CMD_NEW_SERVICE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
@@ -4000,8 +4001,8 @@ static struct genl_family ip_vs_genl_family __ro_after_init = {
.policy = ip_vs_cmd_policy,
.netnsok = true, /* Make ipvsadm to work on netns */
.module = THIS_MODULE,
- .ops = ip_vs_genl_ops,
- .n_ops = ARRAY_SIZE(ip_vs_genl_ops),
+ .small_ops = ip_vs_genl_ops,
+ .n_small_ops = ARRAY_SIZE(ip_vs_genl_ops),
};
static int __init ip_vs_genl_register(void)
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 2b8abbfe018c..16b48064f715 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -242,9 +242,6 @@ struct ip_vs_sync_thread_data {
| IPVS Sync Connection (1) |
*/
-#define SYNC_MESG_HEADER_LEN 4
-#define MAX_CONNS_PER_SYNCBUFF 255 /* nr_conns in ip_vs_sync_mesg is 8 bit */
-
/* Version 0 header */
struct ip_vs_sync_mesg_v0 {
__u8 nr_conns;
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index b00866d777fe..d2e5a8f644b8 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -609,6 +609,8 @@ static inline int ip_vs_tunnel_xmit_prepare(struct sk_buff *skb,
if (ret == NF_ACCEPT) {
nf_reset_ct(skb);
skb_forward_csum(skb);
+ if (skb->dev)
+ skb->tstamp = 0;
}
return ret;
}
@@ -649,6 +651,8 @@ static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
if (!local) {
skb_forward_csum(skb);
+ if (skb->dev)
+ skb->tstamp = 0;
NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb,
NULL, skb_dst(skb)->dev, dst_output);
} else
@@ -669,6 +673,8 @@ static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb,
if (!local) {
ip_vs_drop_early_demux_sk(skb);
skb_forward_csum(skb);
+ if (skb->dev)
+ skb->tstamp = 0;
NF_HOOK(pf, NF_INET_LOCAL_OUT, cp->ipvs->net, NULL, skb,
NULL, skb_dst(skb)->dev, dst_output);
} else
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 5b97d233f89b..234b7cab37c3 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -859,7 +859,6 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
out:
nf_conntrack_double_unlock(hash, reply_hash);
- NF_CT_STAT_INC(net, insert_failed);
local_bh_enable();
return -EEXIST;
}
@@ -909,6 +908,7 @@ static void __nf_conntrack_insert_prepare(struct nf_conn *ct)
tstamp->start = ktime_get_real_ns();
}
+/* caller must hold locks to prevent concurrent changes */
static int __nf_ct_resolve_clash(struct sk_buff *skb,
struct nf_conntrack_tuple_hash *h)
{
@@ -922,23 +922,21 @@ static int __nf_ct_resolve_clash(struct sk_buff *skb,
if (nf_ct_is_dying(ct))
return NF_DROP;
- if (!atomic_inc_not_zero(&ct->ct_general.use))
- return NF_DROP;
-
if (((ct->status & IPS_NAT_DONE_MASK) == 0) ||
nf_ct_match(ct, loser_ct)) {
struct net *net = nf_ct_net(ct);
+ nf_conntrack_get(&ct->ct_general);
+
nf_ct_acct_merge(ct, ctinfo, loser_ct);
nf_ct_add_to_dying_list(loser_ct);
nf_conntrack_put(&loser_ct->ct_general);
nf_ct_set(skb, ct, ctinfo);
- NF_CT_STAT_INC(net, insert_failed);
+ NF_CT_STAT_INC(net, clash_resolve);
return NF_ACCEPT;
}
- nf_ct_put(ct);
return NF_DROP;
}
@@ -998,6 +996,8 @@ static int nf_ct_resolve_clash_harder(struct sk_buff *skb, u32 repl_idx)
hlist_nulls_add_head_rcu(&loser_ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
&nf_conntrack_hash[repl_idx]);
+
+ NF_CT_STAT_INC(net, clash_resolve);
return NF_ACCEPT;
}
@@ -1027,10 +1027,10 @@ static int nf_ct_resolve_clash_harder(struct sk_buff *skb, u32 repl_idx)
*
* Failing that, the new, unconfirmed conntrack is still added to the table
* provided that the collision only occurs in the ORIGINAL direction.
- * The new entry will be added after the existing one in the hash list,
+ * The new entry will be added only in the non-clashing REPLY direction,
* so packets in the ORIGINAL direction will continue to match the existing
* entry. The new entry will also have a fixed timeout so it expires --
- * due to the collision, it will not see bidirectional traffic.
+ * due to the collision, it will only see reply traffic.
*
* Returns NF_DROP if the clash could not be resolved.
*/
@@ -1725,10 +1725,8 @@ nf_conntrack_handle_icmp(struct nf_conn *tmpl,
else
return NF_ACCEPT;
- if (ret <= 0) {
+ if (ret <= 0)
NF_CT_STAT_INC_ATOMIC(state->net, error);
- NF_CT_STAT_INC_ATOMIC(state->net, invalid);
- }
return ret;
}
@@ -1802,10 +1800,8 @@ nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state)
if (tmpl || ctinfo == IP_CT_UNTRACKED) {
/* Previously seen (loopback or untracked)? Ignore. */
if ((tmpl && !nf_ct_is_template(tmpl)) ||
- ctinfo == IP_CT_UNTRACKED) {
- NF_CT_STAT_INC_ATOMIC(state->net, ignore);
+ ctinfo == IP_CT_UNTRACKED)
return NF_ACCEPT;
- }
skb->_nfct = 0;
}
@@ -1813,7 +1809,6 @@ nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state)
dataoff = get_l4proto(skb, skb_network_offset(skb), state->pf, &protonum);
if (dataoff <= 0) {
pr_debug("not prepared to track yet or error occurred\n");
- NF_CT_STAT_INC_ATOMIC(state->net, error);
NF_CT_STAT_INC_ATOMIC(state->net, invalid);
ret = NF_ACCEPT;
goto out;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index c3a4214dc958..3d0fd33be018 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -2497,7 +2497,6 @@ ctnetlink_ct_stat_cpu_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
if (nla_put_be32(skb, CTA_STATS_FOUND, htonl(st->found)) ||
nla_put_be32(skb, CTA_STATS_INVALID, htonl(st->invalid)) ||
- nla_put_be32(skb, CTA_STATS_IGNORE, htonl(st->ignore)) ||
nla_put_be32(skb, CTA_STATS_INSERT, htonl(st->insert)) ||
nla_put_be32(skb, CTA_STATS_INSERT_FAILED,
htonl(st->insert_failed)) ||
@@ -2505,7 +2504,9 @@ ctnetlink_ct_stat_cpu_fill_info(struct sk_buff *skb, u32 portid, u32 seq,
nla_put_be32(skb, CTA_STATS_EARLY_DROP, htonl(st->early_drop)) ||
nla_put_be32(skb, CTA_STATS_ERROR, htonl(st->error)) ||
nla_put_be32(skb, CTA_STATS_SEARCH_RESTART,
- htonl(st->search_restart)))
+ htonl(st->search_restart)) ||
+ nla_put_be32(skb, CTA_STATS_CLASH_RESOLVE,
+ htonl(st->clash_resolve)))
goto nla_put_failure;
nlmsg_end(skb, nlh);
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index a604f43e3e6b..46c5557c1fec 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -428,18 +428,18 @@ static int ct_cpu_seq_show(struct seq_file *seq, void *v)
const struct ip_conntrack_stat *st = v;
if (v == SEQ_START_TOKEN) {
- seq_puts(seq, "entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete search_restart\n");
+ seq_puts(seq, "entries clashres found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete search_restart\n");
return 0;
}
seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x "
"%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
nr_conntracks,
- 0,
+ st->clash_resolve,
st->found,
0,
st->invalid,
- st->ignore,
+ 0,
0,
0,
st->insert,
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 4f7a567c536e..513f78db3cb2 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -395,8 +395,7 @@ static int nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
{
struct tcphdr *tcph;
- if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
- skb_try_make_writable(skb, thoff + sizeof(*tcph)))
+ if (skb_try_make_writable(skb, thoff + sizeof(*tcph)))
return -1;
tcph = (void *)(skb_network_header(skb) + thoff);
@@ -410,8 +409,7 @@ static int nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
{
struct udphdr *udph;
- if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
- skb_try_make_writable(skb, thoff + sizeof(*udph)))
+ if (skb_try_make_writable(skb, thoff + sizeof(*udph)))
return -1;
udph = (void *)(skb_network_header(skb) + thoff);
@@ -449,8 +447,7 @@ int nf_flow_snat_port(const struct flow_offload *flow,
struct flow_ports *hdr;
__be16 port, new_port;
- if (!pskb_may_pull(skb, thoff + sizeof(*hdr)) ||
- skb_try_make_writable(skb, thoff + sizeof(*hdr)))
+ if (skb_try_make_writable(skb, thoff + sizeof(*hdr)))
return -1;
hdr = (void *)(skb_network_header(skb) + thoff);
@@ -481,8 +478,7 @@ int nf_flow_dnat_port(const struct flow_offload *flow,
struct flow_ports *hdr;
__be16 port, new_port;
- if (!pskb_may_pull(skb, thoff + sizeof(*hdr)) ||
- skb_try_make_writable(skb, thoff + sizeof(*hdr)))
+ if (skb_try_make_writable(skb, thoff + sizeof(*hdr)))
return -1;
hdr = (void *)(skb_network_header(skb) + thoff);
diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
index a3bca758b849..a698dbe28ef5 100644
--- a/net/netfilter/nf_flow_table_ip.c
+++ b/net/netfilter/nf_flow_table_ip.c
@@ -25,9 +25,6 @@ static int nf_flow_state_check(struct flow_offload *flow, int proto,
if (proto != IPPROTO_TCP)
return 0;
- if (!pskb_may_pull(skb, thoff + sizeof(*tcph)))
- return -1;
-
tcph = (void *)(skb_network_header(skb) + thoff);
if (unlikely(tcph->fin || tcph->rst)) {
flow_offload_teardown(flow);
@@ -42,8 +39,7 @@ static int nf_flow_nat_ip_tcp(struct sk_buff *skb, unsigned int thoff,
{
struct tcphdr *tcph;
- if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
- skb_try_make_writable(skb, thoff + sizeof(*tcph)))
+ if (skb_try_make_writable(skb, thoff + sizeof(*tcph)))
return -1;
tcph = (void *)(skb_network_header(skb) + thoff);
@@ -57,8 +53,7 @@ static int nf_flow_nat_ip_udp(struct sk_buff *skb, unsigned int thoff,
{
struct udphdr *udph;
- if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
- skb_try_make_writable(skb, thoff + sizeof(*udph)))
+ if (skb_try_make_writable(skb, thoff + sizeof(*udph)))
return -1;
udph = (void *)(skb_network_header(skb) + thoff);
@@ -167,8 +162,8 @@ static bool ip_has_options(unsigned int thoff)
static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev,
struct flow_offload_tuple *tuple)
{
+ unsigned int thoff, hdrsize;
struct flow_ports *ports;
- unsigned int thoff;
struct iphdr *iph;
if (!pskb_may_pull(skb, sizeof(*iph)))
@@ -181,15 +176,22 @@ static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev,
unlikely(ip_has_options(thoff)))
return -1;
- if (iph->protocol != IPPROTO_TCP &&
- iph->protocol != IPPROTO_UDP)
+ switch (iph->protocol) {
+ case IPPROTO_TCP:
+ hdrsize = sizeof(struct tcphdr);
+ break;
+ case IPPROTO_UDP:
+ hdrsize = sizeof(struct udphdr);
+ break;
+ default:
return -1;
+ }
if (iph->ttl <= 1)
return -1;
thoff = iph->ihl * 4;
- if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
+ if (!pskb_may_pull(skb, thoff + hdrsize))
return -1;
iph = ip_hdr(skb);
@@ -315,8 +317,7 @@ static int nf_flow_nat_ipv6_tcp(struct sk_buff *skb, unsigned int thoff,
{
struct tcphdr *tcph;
- if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
- skb_try_make_writable(skb, thoff + sizeof(*tcph)))
+ if (skb_try_make_writable(skb, thoff + sizeof(*tcph)))
return -1;
tcph = (void *)(skb_network_header(skb) + thoff);
@@ -332,8 +333,7 @@ static int nf_flow_nat_ipv6_udp(struct sk_buff *skb, unsigned int thoff,
{
struct udphdr *udph;
- if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
- skb_try_make_writable(skb, thoff + sizeof(*udph)))
+ if (skb_try_make_writable(skb, thoff + sizeof(*udph)))
return -1;
udph = (void *)(skb_network_header(skb) + thoff);
@@ -439,24 +439,31 @@ static int nf_flow_nat_ipv6(const struct flow_offload *flow,
static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
struct flow_offload_tuple *tuple)
{
+ unsigned int thoff, hdrsize;
struct flow_ports *ports;
struct ipv6hdr *ip6h;
- unsigned int thoff;
if (!pskb_may_pull(skb, sizeof(*ip6h)))
return -1;
ip6h = ipv6_hdr(skb);
- if (ip6h->nexthdr != IPPROTO_TCP &&
- ip6h->nexthdr != IPPROTO_UDP)
+ switch (ip6h->nexthdr) {
+ case IPPROTO_TCP:
+ hdrsize = sizeof(struct tcphdr);
+ break;
+ case IPPROTO_UDP:
+ hdrsize = sizeof(struct udphdr);
+ break;
+ default:
return -1;
+ }
if (ip6h->hop_limit <= 1)
return -1;
thoff = sizeof(*ip6h);
- if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
+ if (!pskb_may_pull(skb, thoff + hdrsize))
return -1;
ip6h = ipv6_hdr(skb);
diff --git a/net/netfilter/nf_log_common.c b/net/netfilter/nf_log_common.c
index ae5628ddbe6d..fd7c5f0f5c25 100644
--- a/net/netfilter/nf_log_common.c
+++ b/net/netfilter/nf_log_common.c
@@ -171,6 +171,18 @@ nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
}
EXPORT_SYMBOL_GPL(nf_log_dump_packet_common);
+void nf_log_dump_vlan(struct nf_log_buf *m, const struct sk_buff *skb)
+{
+ u16 vid;
+
+ if (!skb_vlan_tag_present(skb))
+ return;
+
+ vid = skb_vlan_tag_get(skb);
+ nf_log_buf_add(m, "VPROTO=%04x VID=%u ", ntohs(skb->vlan_proto), vid);
+}
+EXPORT_SYMBOL_GPL(nf_log_dump_vlan);
+
/* bridge and netdev logging families share this code. */
void nf_log_l2packet(struct net *net, u_int8_t pf,
__be16 protocol,
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 4603b667973a..9957e0ed8658 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -206,7 +206,7 @@ static int nf_tables_register_hook(struct net *net,
if (basechain->type->ops_register)
return basechain->type->ops_register(net, ops);
- if (table->family == NFPROTO_NETDEV)
+ if (nft_base_chain_netdev(table->family, basechain->ops.hooknum))
return nft_netdev_register_hooks(net, &basechain->hook_list);
return nf_register_net_hook(net, &basechain->ops);
@@ -228,7 +228,7 @@ static void nf_tables_unregister_hook(struct net *net,
if (basechain->type->ops_unregister)
return basechain->type->ops_unregister(net, ops);
- if (table->family == NFPROTO_NETDEV)
+ if (nft_base_chain_netdev(table->family, basechain->ops.hooknum))
nft_netdev_unregister_hooks(net, &basechain->hook_list);
else
nf_unregister_net_hook(net, &basechain->ops);
@@ -650,6 +650,8 @@ static const struct nla_policy nft_table_policy[NFTA_TABLE_MAX + 1] = {
.len = NFT_TABLE_MAXNAMELEN - 1 },
[NFTA_TABLE_FLAGS] = { .type = NLA_U32 },
[NFTA_TABLE_HANDLE] = { .type = NLA_U64 },
+ [NFTA_TABLE_USERDATA] = { .type = NLA_BINARY,
+ .len = NFT_USERDATA_MAXLEN }
};
static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net,
@@ -676,6 +678,11 @@ static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net,
NFTA_TABLE_PAD))
goto nla_put_failure;
+ if (table->udata) {
+ if (nla_put(skb, NFTA_TABLE_USERDATA, table->udlen, table->udata))
+ goto nla_put_failure;
+ }
+
nlmsg_end(skb, nlh);
return 0;
@@ -988,8 +995,8 @@ static int nf_tables_newtable(struct net *net, struct sock *nlsk,
int family = nfmsg->nfgen_family;
const struct nlattr *attr;
struct nft_table *table;
- u32 flags = 0;
struct nft_ctx ctx;
+ u32 flags = 0;
int err;
lockdep_assert_held(&net->nft.commit_mutex);
@@ -1025,6 +1032,14 @@ static int nf_tables_newtable(struct net *net, struct sock *nlsk,
if (table->name == NULL)
goto err_strdup;
+ if (nla[NFTA_TABLE_USERDATA]) {
+ table->udata = nla_memdup(nla[NFTA_TABLE_USERDATA], GFP_KERNEL);
+ if (table->udata == NULL)
+ goto err_table_udata;
+
+ table->udlen = nla_len(nla[NFTA_TABLE_USERDATA]);
+ }
+
err = rhltable_init(&table->chains_ht, &nft_chain_ht_params);
if (err)
goto err_chain_ht;
@@ -1047,6 +1062,8 @@ static int nf_tables_newtable(struct net *net, struct sock *nlsk,
err_trans:
rhltable_destroy(&table->chains_ht);
err_chain_ht:
+ kfree(table->udata);
+err_table_udata:
kfree(table->name);
err_strdup:
kfree(table);
@@ -1202,6 +1219,7 @@ static void nf_tables_table_destroy(struct nft_ctx *ctx)
rhltable_destroy(&ctx->table->chains_ht);
kfree(ctx->table->name);
+ kfree(ctx->table->udata);
kfree(ctx->table);
}
@@ -1297,6 +1315,8 @@ static const struct nla_policy nft_chain_policy[NFTA_CHAIN_MAX + 1] = {
[NFTA_CHAIN_COUNTERS] = { .type = NLA_NESTED },
[NFTA_CHAIN_FLAGS] = { .type = NLA_U32 },
[NFTA_CHAIN_ID] = { .type = NLA_U32 },
+ [NFTA_CHAIN_USERDATA] = { .type = NLA_BINARY,
+ .len = NFT_USERDATA_MAXLEN },
};
static const struct nla_policy nft_hook_policy[NFTA_HOOK_MAX + 1] = {
@@ -1361,7 +1381,7 @@ static int nft_dump_basechain_hook(struct sk_buff *skb, int family,
if (nla_put_be32(skb, NFTA_HOOK_PRIORITY, htonl(ops->priority)))
goto nla_put_failure;
- if (family == NFPROTO_NETDEV) {
+ if (nft_base_chain_netdev(family, ops->hooknum)) {
nest_devs = nla_nest_start_noflag(skb, NFTA_HOOK_DEVS);
list_for_each_entry(hook, &basechain->hook_list, list) {
if (!first)
@@ -1438,6 +1458,10 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
if (nla_put_be32(skb, NFTA_CHAIN_USE, htonl(chain->use)))
goto nla_put_failure;
+ if (chain->udata &&
+ nla_put(skb, NFTA_CHAIN_USERDATA, chain->udlen, chain->udata))
+ goto nla_put_failure;
+
nlmsg_end(skb, nlh);
return 0;
@@ -1661,7 +1685,7 @@ void nf_tables_chain_destroy(struct nft_ctx *ctx)
if (nft_is_base_chain(chain)) {
struct nft_base_chain *basechain = nft_base_chain(chain);
- if (ctx->family == NFPROTO_NETDEV) {
+ if (nft_base_chain_netdev(ctx->family, basechain->ops.hooknum)) {
list_for_each_entry_safe(hook, next,
&basechain->hook_list, list) {
list_del_rcu(&hook->list);
@@ -1674,9 +1698,11 @@ void nf_tables_chain_destroy(struct nft_ctx *ctx)
free_percpu(rcu_dereference_raw(basechain->stats));
}
kfree(chain->name);
+ kfree(chain->udata);
kfree(basechain);
} else {
kfree(chain->name);
+ kfree(chain->udata);
kfree(chain);
}
}
@@ -1838,7 +1864,7 @@ static int nft_chain_parse_hook(struct net *net,
if (IS_ERR(type))
return PTR_ERR(type);
}
- if (hook->num > NF_MAX_HOOKS || !(type->hook_mask & (1 << hook->num)))
+ if (hook->num >= NFT_MAX_HOOKS || !(type->hook_mask & (1 << hook->num)))
return -EOPNOTSUPP;
if (type->type == NFT_CHAIN_T_NAT &&
@@ -1851,7 +1877,7 @@ static int nft_chain_parse_hook(struct net *net,
hook->type = type;
INIT_LIST_HEAD(&hook->list);
- if (family == NFPROTO_NETDEV) {
+ if (nft_base_chain_netdev(family, hook->num)) {
err = nft_chain_parse_netdev(net, ha, &hook->list);
if (err < 0) {
module_put(type->owner);
@@ -1918,7 +1944,7 @@ static int nft_basechain_init(struct nft_base_chain *basechain, u8 family,
INIT_LIST_HEAD(&basechain->hook_list);
chain = &basechain->chain;
- if (family == NFPROTO_NETDEV) {
+ if (nft_base_chain_netdev(family, hook->num)) {
list_splice_init(&hook->list, &basechain->hook_list);
list_for_each_entry(h, &basechain->hook_list, list)
nft_basechain_hook_init(&h->ops, family, hook, chain);
@@ -2030,7 +2056,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
} else {
if (!(flags & NFT_CHAIN_BINDING)) {
err = -EINVAL;
- goto err1;
+ goto err_destroy_chain;
}
snprintf(name, sizeof(name), "__chain%llu", ++chain_id);
@@ -2039,13 +2065,22 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
if (!chain->name) {
err = -ENOMEM;
- goto err1;
+ goto err_destroy_chain;
+ }
+
+ if (nla[NFTA_CHAIN_USERDATA]) {
+ chain->udata = nla_memdup(nla[NFTA_CHAIN_USERDATA], GFP_KERNEL);
+ if (chain->udata == NULL) {
+ err = -ENOMEM;
+ goto err_destroy_chain;
+ }
+ chain->udlen = nla_len(nla[NFTA_CHAIN_USERDATA]);
}
rules = nf_tables_chain_alloc_rules(chain, 0);
if (!rules) {
err = -ENOMEM;
- goto err1;
+ goto err_destroy_chain;
}
*rules = NULL;
@@ -2054,12 +2089,12 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
err = nf_tables_register_hook(net, table, chain);
if (err < 0)
- goto err1;
+ goto err_destroy_chain;
trans = nft_trans_chain_add(ctx, NFT_MSG_NEWCHAIN);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
- goto err2;
+ goto err_unregister_hook;
}
nft_trans_chain_policy(trans) = NFT_CHAIN_POLICY_UNSET;
@@ -2069,15 +2104,15 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask,
err = nft_chain_add(table, chain);
if (err < 0) {
nft_trans_destroy(trans);
- goto err2;
+ goto err_unregister_hook;
}
table->use++;
return 0;
-err2:
+err_unregister_hook:
nf_tables_unregister_hook(net, table, chain);
-err1:
+err_destroy_chain:
nf_tables_chain_destroy(ctx);
return err;
@@ -2103,7 +2138,8 @@ static bool nft_hook_list_equal(struct list_head *hook_list1,
}
static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
- u32 flags)
+ u32 flags, const struct nlattr *attr,
+ struct netlink_ext_ack *extack)
{
const struct nlattr * const *nla = ctx->nla;
struct nft_table *table = ctx->table;
@@ -2119,9 +2155,10 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
return -EOPNOTSUPP;
if (nla[NFTA_CHAIN_HOOK]) {
- if (!nft_is_base_chain(chain))
+ if (!nft_is_base_chain(chain)) {
+ NL_SET_BAD_ATTR(extack, attr);
return -EEXIST;
-
+ }
err = nft_chain_parse_hook(ctx->net, nla, &hook, ctx->family,
false);
if (err < 0)
@@ -2130,13 +2167,15 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
basechain = nft_base_chain(chain);
if (basechain->type != hook.type) {
nft_chain_release_hook(&hook);
+ NL_SET_BAD_ATTR(extack, attr);
return -EEXIST;
}
- if (ctx->family == NFPROTO_NETDEV) {
+ if (nft_base_chain_netdev(ctx->family, hook.num)) {
if (!nft_hook_list_equal(&basechain->hook_list,
&hook.list)) {
nft_chain_release_hook(&hook);
+ NL_SET_BAD_ATTR(extack, attr);
return -EEXIST;
}
} else {
@@ -2144,6 +2183,7 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
if (ops->hooknum != hook.num ||
ops->priority != hook.priority) {
nft_chain_release_hook(&hook);
+ NL_SET_BAD_ATTR(extack, attr);
return -EEXIST;
}
}
@@ -2156,8 +2196,10 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
chain2 = nft_chain_lookup(ctx->net, table,
nla[NFTA_CHAIN_NAME], genmask);
- if (!IS_ERR(chain2))
+ if (!IS_ERR(chain2)) {
+ NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_NAME]);
return -EEXIST;
+ }
}
if (nla[NFTA_CHAIN_COUNTERS]) {
@@ -2200,6 +2242,7 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
nft_trans_chain_update(tmp) &&
nft_trans_chain_name(tmp) &&
strcmp(name, nft_trans_chain_name(tmp)) == 0) {
+ NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_NAME]);
kfree(name);
goto err;
}
@@ -2322,7 +2365,8 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
return -EOPNOTSUPP;
flags |= chain->flags & NFT_CHAIN_BASE;
- return nf_tables_updchain(&ctx, genmask, policy, flags);
+ return nf_tables_updchain(&ctx, genmask, policy, flags, attr,
+ extack);
}
return nf_tables_addchain(&ctx, family, genmask, policy, flags);
@@ -5737,6 +5781,8 @@ static const struct nla_policy nft_obj_policy[NFTA_OBJ_MAX + 1] = {
[NFTA_OBJ_TYPE] = { .type = NLA_U32 },
[NFTA_OBJ_DATA] = { .type = NLA_NESTED },
[NFTA_OBJ_HANDLE] = { .type = NLA_U64},
+ [NFTA_OBJ_USERDATA] = { .type = NLA_BINARY,
+ .len = NFT_USERDATA_MAXLEN },
};
static struct nft_object *nft_obj_init(const struct nft_ctx *ctx,
@@ -5928,7 +5974,7 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk,
obj = nft_obj_init(&ctx, type, nla[NFTA_OBJ_DATA]);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
- goto err1;
+ goto err_init;
}
obj->key.table = table;
obj->handle = nf_tables_alloc_handle(table);
@@ -5936,32 +5982,42 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk,
obj->key.name = nla_strdup(nla[NFTA_OBJ_NAME], GFP_KERNEL);
if (!obj->key.name) {
err = -ENOMEM;
- goto err2;
+ goto err_strdup;
+ }
+
+ if (nla[NFTA_OBJ_USERDATA]) {
+ obj->udata = nla_memdup(nla[NFTA_OBJ_USERDATA], GFP_KERNEL);
+ if (obj->udata == NULL)
+ goto err_userdata;
+
+ obj->udlen = nla_len(nla[NFTA_OBJ_USERDATA]);
}
err = nft_trans_obj_add(&ctx, NFT_MSG_NEWOBJ, obj);
if (err < 0)
- goto err3;
+ goto err_trans;
err = rhltable_insert(&nft_objname_ht, &obj->rhlhead,
nft_objname_ht_params);
if (err < 0)
- goto err4;
+ goto err_obj_ht;
list_add_tail_rcu(&obj->list, &table->objects);
table->use++;
return 0;
-err4:
+err_obj_ht:
/* queued in transaction log */
INIT_LIST_HEAD(&obj->list);
return err;
-err3:
+err_trans:
kfree(obj->key.name);
-err2:
+err_userdata:
+ kfree(obj->udata);
+err_strdup:
if (obj->ops->destroy)
obj->ops->destroy(&ctx, obj);
kfree(obj);
-err1:
+err_init:
module_put(type->owner);
return err;
}
@@ -5993,6 +6049,10 @@ static int nf_tables_fill_obj_info(struct sk_buff *skb, struct net *net,
NFTA_OBJ_PAD))
goto nla_put_failure;
+ if (obj->udata &&
+ nla_put(skb, NFTA_OBJ_USERDATA, obj->udlen, obj->udata))
+ goto nla_put_failure;
+
nlmsg_end(skb, nlh);
return 0;
@@ -6199,6 +6259,7 @@ static void nft_obj_destroy(const struct nft_ctx *ctx, struct nft_object *obj)
module_put(obj->ops->type->owner);
kfree(obj->key.name);
+ kfree(obj->udata);
kfree(obj);
}
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index 587897a2498b..dbc2e945c98e 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -47,13 +47,22 @@ static inline void nft_trace_packet(struct nft_traceinfo *info,
}
}
+static void nft_bitwise_fast_eval(const struct nft_expr *expr,
+ struct nft_regs *regs)
+{
+ const struct nft_bitwise_fast_expr *priv = nft_expr_priv(expr);
+ u32 *src = &regs->data[priv->sreg];
+ u32 *dst = &regs->data[priv->dreg];
+
+ *dst = (*src & priv->mask) ^ priv->xor;
+}
+
static void nft_cmp_fast_eval(const struct nft_expr *expr,
struct nft_regs *regs)
{
const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
- u32 mask = nft_cmp_fast_mask(priv->len);
- if ((regs->data[priv->sreg] & mask) == priv->data)
+ if (((regs->data[priv->sreg] & priv->mask) == priv->data) ^ priv->inv)
return;
regs->verdict.code = NFT_BREAK;
}
@@ -176,6 +185,8 @@ next_rule:
nft_rule_for_each_expr(expr, last, rule) {
if (expr->ops == &nft_cmp_fast_ops)
nft_cmp_fast_eval(expr, &regs);
+ else if (expr->ops == &nft_bitwise_fast_ops)
+ nft_bitwise_fast_eval(expr, &regs);
else if (expr->ops != &nft_payload_fast_ops ||
!nft_payload_fast_eval(expr, &regs, pkt))
expr_call_ops_eval(expr, &regs, pkt);
diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
index 9ef37c1b7b3b..7c7e06624dc3 100644
--- a/net/netfilter/nf_tables_offload.c
+++ b/net/netfilter/nf_tables_offload.c
@@ -323,8 +323,6 @@ static int nft_indr_block_offload_cmd(struct nft_base_chain *basechain,
return nft_block_setup(basechain, &bo, cmd);
}
-#define FLOW_SETUP_BLOCK TC_SETUP_BLOCK
-
static int nft_chain_offload_cmd(struct nft_base_chain *basechain,
struct net_device *dev,
enum flow_block_command cmd)
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 3a2e64e13b22..2daa1f6ae344 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -46,6 +46,23 @@ static struct {
const struct nfnetlink_subsystem __rcu *subsys;
} table[NFNL_SUBSYS_COUNT];
+static struct lock_class_key nfnl_lockdep_keys[NFNL_SUBSYS_COUNT];
+
+static const char *const nfnl_lockdep_names[NFNL_SUBSYS_COUNT] = {
+ [NFNL_SUBSYS_NONE] = "nfnl_subsys_none",
+ [NFNL_SUBSYS_CTNETLINK] = "nfnl_subsys_ctnetlink",
+ [NFNL_SUBSYS_CTNETLINK_EXP] = "nfnl_subsys_ctnetlink_exp",
+ [NFNL_SUBSYS_QUEUE] = "nfnl_subsys_queue",
+ [NFNL_SUBSYS_ULOG] = "nfnl_subsys_ulog",
+ [NFNL_SUBSYS_OSF] = "nfnl_subsys_osf",
+ [NFNL_SUBSYS_IPSET] = "nfnl_subsys_ipset",
+ [NFNL_SUBSYS_ACCT] = "nfnl_subsys_acct",
+ [NFNL_SUBSYS_CTNETLINK_TIMEOUT] = "nfnl_subsys_cttimeout",
+ [NFNL_SUBSYS_CTHELPER] = "nfnl_subsys_cthelper",
+ [NFNL_SUBSYS_NFTABLES] = "nfnl_subsys_nftables",
+ [NFNL_SUBSYS_NFT_COMPAT] = "nfnl_subsys_nftcompat",
+};
+
static const int nfnl_group2type[NFNLGRP_MAX+1] = {
[NFNLGRP_CONNTRACK_NEW] = NFNL_SUBSYS_CTNETLINK,
[NFNLGRP_CONNTRACK_UPDATE] = NFNL_SUBSYS_CTNETLINK,
@@ -632,7 +649,7 @@ static int __init nfnetlink_init(void)
BUG_ON(nfnl_group2type[i] == NFNL_SUBSYS_NONE);
for (i=0; i<NFNL_SUBSYS_COUNT; i++)
- mutex_init(&table[i].mutex);
+ __mutex_init(&table[i].mutex, nfnl_lockdep_names[i], &nfnl_lockdep_keys[i]);
return register_pernet_subsys(&nfnetlink_net_ops);
}
diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c
index bc37d6c59db4..bbd773d74377 100644
--- a/net/netfilter/nft_bitwise.c
+++ b/net/netfilter/nft_bitwise.c
@@ -163,11 +163,6 @@ static int nft_bitwise_init(const struct nft_ctx *ctx,
u32 len;
int err;
- if (!tb[NFTA_BITWISE_SREG] ||
- !tb[NFTA_BITWISE_DREG] ||
- !tb[NFTA_BITWISE_LEN])
- return -EINVAL;
-
err = nft_parse_u32_check(tb[NFTA_BITWISE_LEN], U8_MAX, &len);
if (err < 0)
return err;
@@ -292,9 +287,143 @@ static const struct nft_expr_ops nft_bitwise_ops = {
.offload = nft_bitwise_offload,
};
+static int
+nft_bitwise_extract_u32_data(const struct nlattr * const tb, u32 *out)
+{
+ struct nft_data_desc desc;
+ struct nft_data data;
+ int err = 0;
+
+ err = nft_data_init(NULL, &data, sizeof(data), &desc, tb);
+ if (err < 0)
+ return err;
+
+ if (desc.type != NFT_DATA_VALUE || desc.len != sizeof(u32)) {
+ err = -EINVAL;
+ goto err;
+ }
+ *out = data.data[0];
+err:
+ nft_data_release(&data, desc.type);
+ return err;
+}
+
+static int nft_bitwise_fast_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_bitwise_fast_expr *priv = nft_expr_priv(expr);
+ int err;
+
+ priv->sreg = nft_parse_register(tb[NFTA_BITWISE_SREG]);
+ err = nft_validate_register_load(priv->sreg, sizeof(u32));
+ if (err < 0)
+ return err;
+
+ priv->dreg = nft_parse_register(tb[NFTA_BITWISE_DREG]);
+ err = nft_validate_register_store(ctx, priv->dreg, NULL,
+ NFT_DATA_VALUE, sizeof(u32));
+ if (err < 0)
+ return err;
+
+ if (tb[NFTA_BITWISE_DATA])
+ return -EINVAL;
+
+ if (!tb[NFTA_BITWISE_MASK] ||
+ !tb[NFTA_BITWISE_XOR])
+ return -EINVAL;
+
+ err = nft_bitwise_extract_u32_data(tb[NFTA_BITWISE_MASK], &priv->mask);
+ if (err < 0)
+ return err;
+
+ err = nft_bitwise_extract_u32_data(tb[NFTA_BITWISE_XOR], &priv->xor);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int
+nft_bitwise_fast_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ const struct nft_bitwise_fast_expr *priv = nft_expr_priv(expr);
+ struct nft_data data;
+
+ if (nft_dump_register(skb, NFTA_BITWISE_SREG, priv->sreg))
+ return -1;
+ if (nft_dump_register(skb, NFTA_BITWISE_DREG, priv->dreg))
+ return -1;
+ if (nla_put_be32(skb, NFTA_BITWISE_LEN, htonl(sizeof(u32))))
+ return -1;
+ if (nla_put_be32(skb, NFTA_BITWISE_OP, htonl(NFT_BITWISE_BOOL)))
+ return -1;
+
+ data.data[0] = priv->mask;
+ if (nft_data_dump(skb, NFTA_BITWISE_MASK, &data,
+ NFT_DATA_VALUE, sizeof(u32)) < 0)
+ return -1;
+
+ data.data[0] = priv->xor;
+ if (nft_data_dump(skb, NFTA_BITWISE_XOR, &data,
+ NFT_DATA_VALUE, sizeof(u32)) < 0)
+ return -1;
+
+ return 0;
+}
+
+static int nft_bitwise_fast_offload(struct nft_offload_ctx *ctx,
+ struct nft_flow_rule *flow,
+ const struct nft_expr *expr)
+{
+ const struct nft_bitwise_fast_expr *priv = nft_expr_priv(expr);
+ struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
+
+ if (priv->xor || priv->sreg != priv->dreg || reg->len != sizeof(u32))
+ return -EOPNOTSUPP;
+
+ reg->mask.data[0] = priv->mask;
+ return 0;
+}
+
+const struct nft_expr_ops nft_bitwise_fast_ops = {
+ .type = &nft_bitwise_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_bitwise_fast_expr)),
+ .eval = NULL, /* inlined */
+ .init = nft_bitwise_fast_init,
+ .dump = nft_bitwise_fast_dump,
+ .offload = nft_bitwise_fast_offload,
+};
+
+static const struct nft_expr_ops *
+nft_bitwise_select_ops(const struct nft_ctx *ctx,
+ const struct nlattr * const tb[])
+{
+ int err;
+ u32 len;
+
+ if (!tb[NFTA_BITWISE_LEN] ||
+ !tb[NFTA_BITWISE_SREG] ||
+ !tb[NFTA_BITWISE_DREG])
+ return ERR_PTR(-EINVAL);
+
+ err = nft_parse_u32_check(tb[NFTA_BITWISE_LEN], U8_MAX, &len);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ if (len != sizeof(u32))
+ return &nft_bitwise_ops;
+
+ if (tb[NFTA_BITWISE_OP] &&
+ ntohl(nla_get_be32(tb[NFTA_BITWISE_OP])) != NFT_BITWISE_BOOL)
+ return &nft_bitwise_ops;
+
+ return &nft_bitwise_fast_ops;
+}
+
struct nft_expr_type nft_bitwise_type __read_mostly = {
.name = "bitwise",
- .ops = &nft_bitwise_ops,
+ .select_ops = nft_bitwise_select_ops,
.policy = nft_bitwise_policy,
.maxattr = NFTA_BITWISE_MAX,
.owner = THIS_MODULE,
diff --git a/net/netfilter/nft_chain_filter.c b/net/netfilter/nft_chain_filter.c
index c78d01bc02e9..ff8528ad3dc6 100644
--- a/net/netfilter/nft_chain_filter.c
+++ b/net/netfilter/nft_chain_filter.c
@@ -161,16 +161,49 @@ static unsigned int nft_do_chain_inet(void *priv, struct sk_buff *skb,
return nft_do_chain(&pkt, priv);
}
+static unsigned int nft_do_chain_inet_ingress(void *priv, struct sk_buff *skb,
+ const struct nf_hook_state *state)
+{
+ struct nf_hook_state ingress_state = *state;
+ struct nft_pktinfo pkt;
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ /* Original hook is NFPROTO_NETDEV and NF_NETDEV_INGRESS. */
+ ingress_state.pf = NFPROTO_IPV4;
+ ingress_state.hook = NF_INET_INGRESS;
+ nft_set_pktinfo(&pkt, skb, &ingress_state);
+
+ if (nft_set_pktinfo_ipv4_ingress(&pkt, skb) < 0)
+ return NF_DROP;
+ break;
+ case htons(ETH_P_IPV6):
+ ingress_state.pf = NFPROTO_IPV6;
+ ingress_state.hook = NF_INET_INGRESS;
+ nft_set_pktinfo(&pkt, skb, &ingress_state);
+
+ if (nft_set_pktinfo_ipv6_ingress(&pkt, skb) < 0)
+ return NF_DROP;
+ break;
+ default:
+ return NF_ACCEPT;
+ }
+
+ return nft_do_chain(&pkt, priv);
+}
+
static const struct nft_chain_type nft_chain_filter_inet = {
.name = "filter",
.type = NFT_CHAIN_T_DEFAULT,
.family = NFPROTO_INET,
- .hook_mask = (1 << NF_INET_LOCAL_IN) |
+ .hook_mask = (1 << NF_INET_INGRESS) |
+ (1 << NF_INET_LOCAL_IN) |
(1 << NF_INET_LOCAL_OUT) |
(1 << NF_INET_FORWARD) |
(1 << NF_INET_PRE_ROUTING) |
(1 << NF_INET_POST_ROUTING),
.hooks = {
+ [NF_INET_INGRESS] = nft_do_chain_inet_ingress,
[NF_INET_LOCAL_IN] = nft_do_chain_inet,
[NF_INET_LOCAL_OUT] = nft_do_chain_inet,
[NF_INET_FORWARD] = nft_do_chain_inet,
diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c
index 16f4d84599ac..bc079d68a536 100644
--- a/net/netfilter/nft_cmp.c
+++ b/net/netfilter/nft_cmp.c
@@ -167,7 +167,6 @@ static int nft_cmp_fast_init(const struct nft_ctx *ctx,
struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
struct nft_data_desc desc;
struct nft_data data;
- u32 mask;
int err;
err = nft_data_init(NULL, &data, sizeof(data), &desc,
@@ -181,10 +180,11 @@ static int nft_cmp_fast_init(const struct nft_ctx *ctx,
return err;
desc.len *= BITS_PER_BYTE;
- mask = nft_cmp_fast_mask(desc.len);
- priv->data = data.data[0] & mask;
+ priv->mask = nft_cmp_fast_mask(desc.len);
+ priv->data = data.data[0] & priv->mask;
priv->len = desc.len;
+ priv->inv = ntohl(nla_get_be32(tb[NFTA_CMP_OP])) != NFT_CMP_EQ;
return 0;
}
@@ -201,7 +201,7 @@ static int nft_cmp_fast_offload(struct nft_offload_ctx *ctx,
},
.sreg = priv->sreg,
.len = priv->len / BITS_PER_BYTE,
- .op = NFT_CMP_EQ,
+ .op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ,
};
return __nft_cmp_offload(ctx, flow, &cmp);
@@ -210,11 +210,12 @@ static int nft_cmp_fast_offload(struct nft_offload_ctx *ctx,
static int nft_cmp_fast_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
+ enum nft_cmp_ops op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ;
struct nft_data data;
if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg))
goto nla_put_failure;
- if (nla_put_be32(skb, NFTA_CMP_OP, htonl(NFT_CMP_EQ)))
+ if (nla_put_be32(skb, NFTA_CMP_OP, htonl(op)))
goto nla_put_failure;
data.data[0] = priv->data;
@@ -272,7 +273,7 @@ nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
goto err1;
}
- if (desc.len <= sizeof(u32) && op == NFT_CMP_EQ)
+ if (desc.len <= sizeof(u32) && (op == NFT_CMP_EQ || op == NFT_CMP_NEQ))
return &nft_cmp_fast_ops;
return &nft_cmp_ops;
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index 7a2e59638499..dcd3c7b8a367 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -22,6 +22,7 @@
#include <linux/icmpv6.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <net/sctp/checksum.h>
static bool nft_payload_rebuild_vlan_hdr(const struct sk_buff *skb, int mac_off,
struct vlan_ethhdr *veth)
@@ -484,6 +485,19 @@ static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt,
return 0;
}
+static int nft_payload_csum_sctp(struct sk_buff *skb, int offset)
+{
+ struct sctphdr *sh;
+
+ if (skb_ensure_writable(skb, offset + sizeof(*sh)))
+ return -1;
+
+ sh = (struct sctphdr *)(skb->data + offset);
+ sh->checksum = sctp_compute_cksum(skb, offset);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ return 0;
+}
+
static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt,
struct sk_buff *skb,
__wsum fsum, __wsum tsum)
@@ -587,6 +601,13 @@ static void nft_payload_set_eval(const struct nft_expr *expr,
skb_store_bits(skb, offset, src, priv->len) < 0)
goto err;
+ if (priv->csum_type == NFT_PAYLOAD_CSUM_SCTP &&
+ pkt->tprot == IPPROTO_SCTP &&
+ skb->ip_summed != CHECKSUM_PARTIAL) {
+ if (nft_payload_csum_sctp(skb, pkt->xt.thoff))
+ goto err;
+ }
+
return;
err:
regs->verdict.code = NFT_BREAK;
@@ -623,6 +644,13 @@ static int nft_payload_set_init(const struct nft_ctx *ctx,
case NFT_PAYLOAD_CSUM_NONE:
case NFT_PAYLOAD_CSUM_INET:
break;
+ case NFT_PAYLOAD_CSUM_SCTP:
+ if (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER)
+ return -EINVAL;
+
+ if (priv->csum_offset != offsetof(struct sctphdr, checksum))
+ return -EINVAL;
+ break;
default:
return -EOPNOTSUPP;
}
diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c
index 637ce3e8c575..a28aca5124ce 100644
--- a/net/netfilter/nft_socket.c
+++ b/net/netfilter/nft_socket.c
@@ -14,6 +14,25 @@ struct nft_socket {
};
};
+static void nft_socket_wildcard(const struct nft_pktinfo *pkt,
+ struct nft_regs *regs, struct sock *sk,
+ u32 *dest)
+{
+ switch (nft_pf(pkt)) {
+ case NFPROTO_IPV4:
+ nft_reg_store8(dest, inet_sk(sk)->inet_rcv_saddr == 0);
+ break;
+#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
+ case NFPROTO_IPV6:
+ nft_reg_store8(dest, ipv6_addr_any(&sk->sk_v6_rcv_saddr));
+ break;
+#endif
+ default:
+ regs->verdict.code = NFT_BREAK;
+ return;
+ }
+}
+
static void nft_socket_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
@@ -59,6 +78,13 @@ static void nft_socket_eval(const struct nft_expr *expr,
return;
}
break;
+ case NFT_SOCKET_WILDCARD:
+ if (!sk_fullsock(sk)) {
+ regs->verdict.code = NFT_BREAK;
+ return;
+ }
+ nft_socket_wildcard(pkt, regs, sk, dest);
+ break;
default:
WARN_ON(1);
regs->verdict.code = NFT_BREAK;
@@ -97,6 +123,7 @@ static int nft_socket_init(const struct nft_ctx *ctx,
priv->key = ntohl(nla_get_u32(tb[NFTA_SOCKET_KEY]));
switch(priv->key) {
case NFT_SOCKET_TRANSPARENT:
+ case NFT_SOCKET_WILDCARD:
len = sizeof(u8);
break;
case NFT_SOCKET_MARK:
diff --git a/net/netfilter/xt_HMARK.c b/net/netfilter/xt_HMARK.c
index 713fb38541df..8928ec56c388 100644
--- a/net/netfilter/xt_HMARK.c
+++ b/net/netfilter/xt_HMARK.c
@@ -276,7 +276,7 @@ hmark_pkt_set_htuple_ipv4(const struct sk_buff *skb, struct hmark_tuple *t,
return 0;
/* follow-up fragments don't contain ports, skip all fragments */
- if (ip->frag_off & htons(IP_MF | IP_OFFSET))
+ if (ip_is_fragment(ip))
return 0;
hmark_set_tuple_ports(skb, (ip->ihl * 4) + nhoff, t, info);
diff --git a/net/netlabel/netlabel_calipso.c b/net/netlabel/netlabel_calipso.c
index 249da67d50a2..4e62f2ad3575 100644
--- a/net/netlabel/netlabel_calipso.c
+++ b/net/netlabel/netlabel_calipso.c
@@ -304,7 +304,7 @@ static int netlbl_calipso_remove(struct sk_buff *skb, struct genl_info *info)
/* NetLabel Generic NETLINK Command Definitions
*/
-static const struct genl_ops netlbl_calipso_ops[] = {
+static const struct genl_small_ops netlbl_calipso_ops[] = {
{
.cmd = NLBL_CALIPSO_C_ADD,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
@@ -342,8 +342,8 @@ static struct genl_family netlbl_calipso_gnl_family __ro_after_init = {
.maxattr = NLBL_CALIPSO_A_MAX,
.policy = calipso_genl_policy,
.module = THIS_MODULE,
- .ops = netlbl_calipso_ops,
- .n_ops = ARRAY_SIZE(netlbl_calipso_ops),
+ .small_ops = netlbl_calipso_ops,
+ .n_small_ops = ARRAY_SIZE(netlbl_calipso_ops),
};
/* NetLabel Generic NETLINK Protocol Functions
@@ -426,7 +426,7 @@ void calipso_doi_free(struct calipso_doi *doi_def)
/**
* calipso_doi_remove - Remove an existing DOI from the CALIPSO protocol engine
* @doi: the DOI value
- * @audit_secid: the LSM secid to use in the audit message
+ * @audit_info: NetLabel audit information
*
* Description:
* Removes a DOI definition from the CALIPSO engine. The NetLabel routines will
@@ -595,7 +595,7 @@ int calipso_req_setattr(struct request_sock *req,
/**
* calipso_req_delattr - Delete the CALIPSO option from a request socket
- * @reg: the request socket
+ * @req: the request socket
*
* Description:
* Removes the CALIPSO option from a request socket, if present.
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
index 0f16080b87cb..726dda95934c 100644
--- a/net/netlabel/netlabel_cipso_v4.c
+++ b/net/netlabel/netlabel_cipso_v4.c
@@ -724,7 +724,7 @@ static int netlbl_cipsov4_remove(struct sk_buff *skb, struct genl_info *info)
* NetLabel Generic NETLINK Command Definitions
*/
-static const struct genl_ops netlbl_cipsov4_ops[] = {
+static const struct genl_small_ops netlbl_cipsov4_ops[] = {
{
.cmd = NLBL_CIPSOV4_C_ADD,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
@@ -762,8 +762,8 @@ static struct genl_family netlbl_cipsov4_gnl_family __ro_after_init = {
.maxattr = NLBL_CIPSOV4_A_MAX,
.policy = netlbl_cipsov4_genl_policy,
.module = THIS_MODULE,
- .ops = netlbl_cipsov4_ops,
- .n_ops = ARRAY_SIZE(netlbl_cipsov4_ops),
+ .small_ops = netlbl_cipsov4_ops,
+ .n_small_ops = ARRAY_SIZE(netlbl_cipsov4_ops),
};
/*
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index f73a8382c275..dc8c39f51f7d 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -612,9 +612,8 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry,
audit_buf = netlbl_audit_start_common(AUDIT_MAC_MAP_DEL, audit_info);
if (audit_buf != NULL) {
audit_log_format(audit_buf,
- " nlbl_domain=%s res=%u",
- entry->domain ? entry->domain : "(default)",
- ret_val == 0 ? 1 : 0);
+ " nlbl_domain=%s res=1",
+ entry->domain ? entry->domain : "(default)");
audit_log_end(audit_buf);
}
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c
index e7a25fbfaf8b..eb1d66d20afb 100644
--- a/net/netlabel/netlabel_mgmt.c
+++ b/net/netlabel/netlabel_mgmt.c
@@ -757,7 +757,7 @@ version_failure:
* NetLabel Generic NETLINK Command Definitions
*/
-static const struct genl_ops netlbl_mgmt_genl_ops[] = {
+static const struct genl_small_ops netlbl_mgmt_genl_ops[] = {
{
.cmd = NLBL_MGMT_C_ADD,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
@@ -823,8 +823,8 @@ static struct genl_family netlbl_mgmt_gnl_family __ro_after_init = {
.maxattr = NLBL_MGMT_A_MAX,
.policy = netlbl_mgmt_genl_policy,
.module = THIS_MODULE,
- .ops = netlbl_mgmt_genl_ops,
- .n_ops = ARRAY_SIZE(netlbl_mgmt_genl_ops),
+ .small_ops = netlbl_mgmt_genl_ops,
+ .n_small_ops = ARRAY_SIZE(netlbl_mgmt_genl_ops),
};
/*
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index 77bb1bb22c3b..2e8e3f7b2111 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -1301,7 +1301,7 @@ unlabel_staticlistdef_return:
* NetLabel Generic NETLINK Command Definitions
*/
-static const struct genl_ops netlbl_unlabel_genl_ops[] = {
+static const struct genl_small_ops netlbl_unlabel_genl_ops[] = {
{
.cmd = NLBL_UNLABEL_C_STATICADD,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
@@ -1367,8 +1367,8 @@ static struct genl_family netlbl_unlabel_gnl_family __ro_after_init = {
.maxattr = NLBL_UNLABEL_A_MAX,
.policy = netlbl_unlabel_genl_policy,
.module = THIS_MODULE,
- .ops = netlbl_unlabel_genl_ops,
- .n_ops = ARRAY_SIZE(netlbl_unlabel_genl_ops),
+ .small_ops = netlbl_unlabel_genl_ops,
+ .n_small_ops = ARRAY_SIZE(netlbl_unlabel_genl_ops),
};
/*
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index d2d1448274f5..daca50d6bb12 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -848,7 +848,7 @@ retry:
*
* Test to see if the opener of the socket we received the message
* from had when the netlink socket was created and the sender of the
- * message has has the capability @cap in the user namespace @user_ns.
+ * message has the capability @cap in the user namespace @user_ns.
*/
bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
struct user_namespace *user_ns, int cap)
@@ -867,7 +867,7 @@ EXPORT_SYMBOL(__netlink_ns_capable);
*
* Test to see if the opener of the socket we received the message
* from had when the netlink socket was created and the sender of the
- * message has has the capability @cap in the user namespace @user_ns.
+ * message has the capability @cap in the user namespace @user_ns.
*/
bool netlink_ns_capable(const struct sk_buff *skb,
struct user_namespace *user_ns, int cap)
@@ -883,7 +883,7 @@ EXPORT_SYMBOL(netlink_ns_capable);
*
* Test to see if the opener of the socket we received the message
* from had when the netlink socket was created and the sender of the
- * message has has the capability @cap in all user namespaces.
+ * message has the capability @cap in all user namespaces.
*/
bool netlink_capable(const struct sk_buff *skb, int cap)
{
@@ -898,7 +898,7 @@ EXPORT_SYMBOL(netlink_capable);
*
* Test to see if the opener of the socket we received the message
* from had when the netlink socket was created and the sender of the
- * message has has the capability @cap over the network namespace of
+ * message has the capability @cap over the network namespace of
* the socket we received the message from.
*/
bool netlink_net_capable(const struct sk_buff *skb, int cap)
@@ -1853,7 +1853,7 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
struct scm_cookie scm;
u32 netlink_skb_flags = 0;
- if (msg->msg_flags&MSG_OOB)
+ if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
err = scm_send(sock, msg, &scm, true);
@@ -1916,7 +1916,7 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
refcount_inc(&skb->users);
netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
}
- err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
+ err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags & MSG_DONTWAIT);
out:
scm_destroy(&scm);
@@ -1929,12 +1929,12 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
struct scm_cookie scm;
struct sock *sk = sock->sk;
struct netlink_sock *nlk = nlk_sk(sk);
- int noblock = flags&MSG_DONTWAIT;
+ int noblock = flags & MSG_DONTWAIT;
size_t copied;
struct sk_buff *skb, *data_skb;
int err, ret;
- if (flags&MSG_OOB)
+ if (flags & MSG_OOB)
return -EOPNOTSUPP;
copied = 0;
@@ -2186,13 +2186,35 @@ EXPORT_SYMBOL(__nlmsg_put);
* It would be better to create kernel thread.
*/
+static int netlink_dump_done(struct netlink_sock *nlk, struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct netlink_ext_ack *extack)
+{
+ struct nlmsghdr *nlh;
+
+ nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(nlk->dump_done_errno),
+ NLM_F_MULTI | cb->answer_flags);
+ if (WARN_ON(!nlh))
+ return -ENOBUFS;
+
+ nl_dump_check_consistent(cb, nlh);
+ memcpy(nlmsg_data(nlh), &nlk->dump_done_errno, sizeof(nlk->dump_done_errno));
+
+ if (extack->_msg && nlk->flags & NETLINK_F_EXT_ACK) {
+ nlh->nlmsg_flags |= NLM_F_ACK_TLVS;
+ if (!nla_put_string(skb, NLMSGERR_ATTR_MSG, extack->_msg))
+ nlmsg_end(skb, nlh);
+ }
+
+ return 0;
+}
+
static int netlink_dump(struct sock *sk)
{
struct netlink_sock *nlk = nlk_sk(sk);
struct netlink_ext_ack extack = {};
struct netlink_callback *cb;
struct sk_buff *skb = NULL;
- struct nlmsghdr *nlh;
struct module *module;
int err = -ENOBUFS;
int alloc_min_size;
@@ -2258,22 +2280,19 @@ static int netlink_dump(struct sock *sk)
return 0;
}
- nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE,
- sizeof(nlk->dump_done_errno),
- NLM_F_MULTI | cb->answer_flags);
- if (WARN_ON(!nlh))
+ if (netlink_dump_done(nlk, skb, cb, &extack))
goto errout_skb;
- nl_dump_check_consistent(cb, nlh);
-
- memcpy(nlmsg_data(nlh), &nlk->dump_done_errno,
- sizeof(nlk->dump_done_errno));
-
- if (extack._msg && nlk->flags & NETLINK_F_EXT_ACK) {
- nlh->nlmsg_flags |= NLM_F_ACK_TLVS;
- if (!nla_put_string(skb, NLMSGERR_ATTR_MSG, extack._msg))
- nlmsg_end(skb, nlh);
+#ifdef CONFIG_COMPAT_NETLINK_MESSAGES
+ /* frag_list skb's data is used for compat tasks
+ * and the regular skb's data for normal (non-compat) tasks.
+ * See netlink_recvmsg().
+ */
+ if (unlikely(skb_shinfo(skb)->frag_list)) {
+ if (netlink_dump_done(nlk, skb_shinfo(skb)->frag_list, cb, &extack))
+ goto errout_skb;
}
+#endif
if (sk_filter(sk, skb))
kfree_skb(skb);
@@ -2401,6 +2420,8 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
tlvlen += nla_total_size(sizeof(u32));
if (nlk_has_extack && extack && extack->cookie_len)
tlvlen += nla_total_size(extack->cookie_len);
+ if (err && nlk_has_extack && extack && extack->policy)
+ tlvlen += netlink_policy_dump_attr_size_estimate(extack->policy);
if (tlvlen)
flags |= NLM_F_ACK_TLVS;
@@ -2433,6 +2454,9 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
if (extack->cookie_len)
WARN_ON(nla_put(skb, NLMSGERR_ATTR_COOKIE,
extack->cookie_len, extack->cookie));
+ if (extack->policy)
+ netlink_policy_dump_write_attr(skb, extack->policy,
+ NLMSGERR_ATTR_POLICY);
}
nlmsg_end(skb, rep);
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index c4b4d3376227..c992424e4d63 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -107,16 +107,83 @@ static const struct genl_family *genl_family_find_byname(char *name)
return NULL;
}
-static const struct genl_ops *genl_get_cmd(u8 cmd,
- const struct genl_family *family)
+static int genl_get_cmd_cnt(const struct genl_family *family)
+{
+ return family->n_ops + family->n_small_ops;
+}
+
+static void genl_op_from_full(const struct genl_family *family,
+ unsigned int i, struct genl_ops *op)
+{
+ *op = family->ops[i];
+
+ if (!op->maxattr)
+ op->maxattr = family->maxattr;
+ if (!op->policy)
+ op->policy = family->policy;
+}
+
+static int genl_get_cmd_full(u32 cmd, const struct genl_family *family,
+ struct genl_ops *op)
{
int i;
for (i = 0; i < family->n_ops; i++)
- if (family->ops[i].cmd == cmd)
- return &family->ops[i];
+ if (family->ops[i].cmd == cmd) {
+ genl_op_from_full(family, i, op);
+ return 0;
+ }
- return NULL;
+ return -ENOENT;
+}
+
+static void genl_op_from_small(const struct genl_family *family,
+ unsigned int i, struct genl_ops *op)
+{
+ memset(op, 0, sizeof(*op));
+ op->doit = family->small_ops[i].doit;
+ op->dumpit = family->small_ops[i].dumpit;
+ op->cmd = family->small_ops[i].cmd;
+ op->internal_flags = family->small_ops[i].internal_flags;
+ op->flags = family->small_ops[i].flags;
+ op->validate = family->small_ops[i].validate;
+
+ op->maxattr = family->maxattr;
+ op->policy = family->policy;
+}
+
+static int genl_get_cmd_small(u32 cmd, const struct genl_family *family,
+ struct genl_ops *op)
+{
+ int i;
+
+ for (i = 0; i < family->n_small_ops; i++)
+ if (family->small_ops[i].cmd == cmd) {
+ genl_op_from_small(family, i, op);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static int genl_get_cmd(u32 cmd, const struct genl_family *family,
+ struct genl_ops *op)
+{
+ if (!genl_get_cmd_full(cmd, family, op))
+ return 0;
+ return genl_get_cmd_small(cmd, family, op);
+}
+
+static void genl_get_cmd_by_index(unsigned int i,
+ const struct genl_family *family,
+ struct genl_ops *op)
+{
+ if (i < family->n_ops)
+ genl_op_from_full(family, i, op);
+ else if (i < family->n_ops + family->n_small_ops)
+ genl_op_from_small(family, i - family->n_ops, op);
+ else
+ WARN_ON_ONCE(1);
}
static int genl_allocate_reserve_groups(int n_groups, int *first_id)
@@ -222,7 +289,7 @@ static int genl_validate_assign_mc_groups(struct genl_family *family)
family->mcgrp_offset = first_id;
- /* if still initializing, can't and don't need to to realloc bitmaps */
+ /* if still initializing, can't and don't need to realloc bitmaps */
if (!init_net.genl_sock)
return 0;
@@ -286,22 +353,25 @@ static void genl_unregister_mc_groups(const struct genl_family *family)
static int genl_validate_ops(const struct genl_family *family)
{
- const struct genl_ops *ops = family->ops;
- unsigned int n_ops = family->n_ops;
int i, j;
- if (WARN_ON(n_ops && !ops))
+ if (WARN_ON(family->n_ops && !family->ops) ||
+ WARN_ON(family->n_small_ops && !family->small_ops))
return -EINVAL;
- if (!n_ops)
- return 0;
+ for (i = 0; i < genl_get_cmd_cnt(family); i++) {
+ struct genl_ops op;
- for (i = 0; i < n_ops; i++) {
- if (ops[i].dumpit == NULL && ops[i].doit == NULL)
+ genl_get_cmd_by_index(i, family, &op);
+ if (op.dumpit == NULL && op.doit == NULL)
return -EINVAL;
- for (j = i + 1; j < n_ops; j++)
- if (ops[i].cmd == ops[j].cmd)
+ for (j = i + 1; j < genl_get_cmd_cnt(family); j++) {
+ struct genl_ops op2;
+
+ genl_get_cmd_by_index(j, family, &op2);
+ if (op.cmd == op2.cmd)
return -EINVAL;
+ }
}
return 0;
@@ -467,16 +537,16 @@ genl_family_rcv_msg_attrs_parse(const struct genl_family *family,
struct nlattr **attrbuf;
int err;
- if (!family->maxattr)
+ if (!ops->maxattr)
return NULL;
- attrbuf = kmalloc_array(family->maxattr + 1,
+ attrbuf = kmalloc_array(ops->maxattr + 1,
sizeof(struct nlattr *), GFP_KERNEL);
if (!attrbuf)
return ERR_PTR(-ENOMEM);
- err = __nlmsg_parse(nlh, hdrlen, attrbuf, family->maxattr,
- family->policy, validate, extack);
+ err = __nlmsg_parse(nlh, hdrlen, attrbuf, ops->maxattr, ops->policy,
+ validate, extack);
if (err) {
kfree(attrbuf);
return ERR_PTR(err);
@@ -524,7 +594,7 @@ no_attrs:
return -ENOMEM;
}
info->family = ctx->family;
- info->ops = ops;
+ info->op = *ops;
info->attrs = attrs;
cb->data = info;
@@ -546,7 +616,7 @@ no_attrs:
static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
{
- const struct genl_ops *ops = genl_dumpit_info(cb)->ops;
+ const struct genl_ops *ops = &genl_dumpit_info(cb)->op;
int rc;
genl_lock();
@@ -558,7 +628,7 @@ static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
static int genl_lock_done(struct netlink_callback *cb)
{
const struct genl_dumpit_info *info = genl_dumpit_info(cb);
- const struct genl_ops *ops = info->ops;
+ const struct genl_ops *ops = &info->op;
int rc = 0;
if (ops->done) {
@@ -574,7 +644,7 @@ static int genl_lock_done(struct netlink_callback *cb)
static int genl_parallel_done(struct netlink_callback *cb)
{
const struct genl_dumpit_info *info = genl_dumpit_info(cb);
- const struct genl_ops *ops = info->ops;
+ const struct genl_ops *ops = &info->op;
int rc = 0;
if (ops->done)
@@ -682,9 +752,9 @@ static int genl_family_rcv_msg(const struct genl_family *family,
struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
- const struct genl_ops *ops;
struct net *net = sock_net(skb->sk);
struct genlmsghdr *hdr = nlmsg_data(nlh);
+ struct genl_ops op;
int hdrlen;
/* this family doesn't exist in this netns */
@@ -695,24 +765,23 @@ static int genl_family_rcv_msg(const struct genl_family *family,
if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
return -EINVAL;
- ops = genl_get_cmd(hdr->cmd, family);
- if (ops == NULL)
+ if (genl_get_cmd(hdr->cmd, family, &op))
return -EOPNOTSUPP;
- if ((ops->flags & GENL_ADMIN_PERM) &&
+ if ((op.flags & GENL_ADMIN_PERM) &&
!netlink_capable(skb, CAP_NET_ADMIN))
return -EPERM;
- if ((ops->flags & GENL_UNS_ADMIN_PERM) &&
+ if ((op.flags & GENL_UNS_ADMIN_PERM) &&
!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN))
return -EPERM;
if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP)
return genl_family_rcv_msg_dumpit(family, skb, nlh, extack,
- ops, hdrlen, net);
+ &op, hdrlen, net);
else
return genl_family_rcv_msg_doit(family, skb, nlh, extack,
- ops, hdrlen, net);
+ &op, hdrlen, net);
}
static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -765,7 +834,7 @@ static int ctrl_fill_info(const struct genl_family *family, u32 portid, u32 seq,
nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr))
goto nla_put_failure;
- if (family->n_ops) {
+ if (genl_get_cmd_cnt(family)) {
struct nlattr *nla_ops;
int i;
@@ -773,23 +842,25 @@ static int ctrl_fill_info(const struct genl_family *family, u32 portid, u32 seq,
if (nla_ops == NULL)
goto nla_put_failure;
- for (i = 0; i < family->n_ops; i++) {
+ for (i = 0; i < genl_get_cmd_cnt(family); i++) {
struct nlattr *nest;
- const struct genl_ops *ops = &family->ops[i];
- u32 op_flags = ops->flags;
+ struct genl_ops op;
+ u32 op_flags;
- if (ops->dumpit)
+ genl_get_cmd_by_index(i, family, &op);
+ op_flags = op.flags;
+ if (op.dumpit)
op_flags |= GENL_CMD_CAP_DUMP;
- if (ops->doit)
+ if (op.doit)
op_flags |= GENL_CMD_CAP_DO;
- if (family->policy)
+ if (op.policy)
op_flags |= GENL_CMD_CAP_HASPOL;
nest = nla_nest_start_noflag(skb, i + 1);
if (nest == NULL)
goto nla_put_failure;
- if (nla_put_u32(skb, CTRL_ATTR_OP_ID, ops->cmd) ||
+ if (nla_put_u32(skb, CTRL_ATTR_OP_ID, op.cmd) ||
nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, op_flags))
goto nla_put_failure;
@@ -945,7 +1016,7 @@ ctrl_build_mcgrp_msg(const struct genl_family *family,
return skb;
}
-static const struct nla_policy ctrl_policy[CTRL_ATTR_MAX+1] = {
+static const struct nla_policy ctrl_policy_family[] = {
[CTRL_ATTR_FAMILY_ID] = { .type = NLA_U16 },
[CTRL_ATTR_FAMILY_NAME] = { .type = NLA_NUL_STRING,
.len = GENL_NAMSIZ - 1 },
@@ -1039,83 +1110,218 @@ static int genl_ctrl_event(int event, const struct genl_family *family,
return 0;
}
-static int ctrl_dumppolicy(struct sk_buff *skb, struct netlink_callback *cb)
-{
+struct ctrl_dump_policy_ctx {
+ struct netlink_policy_dump_state *state;
const struct genl_family *rt;
- unsigned int fam_id = cb->args[0];
- int err;
+ unsigned int opidx;
+ u32 op;
+ u16 fam_id;
+ u8 policies:1,
+ single_op:1;
+};
- if (!fam_id) {
- struct nlattr *tb[CTRL_ATTR_MAX + 1];
+static const struct nla_policy ctrl_policy_policy[] = {
+ [CTRL_ATTR_FAMILY_ID] = { .type = NLA_U16 },
+ [CTRL_ATTR_FAMILY_NAME] = { .type = NLA_NUL_STRING,
+ .len = GENL_NAMSIZ - 1 },
+ [CTRL_ATTR_OP] = { .type = NLA_U32 },
+};
- err = genlmsg_parse(cb->nlh, &genl_ctrl, tb,
- genl_ctrl.maxattr,
- genl_ctrl.policy, cb->extack);
- if (err)
- return err;
+static int ctrl_dumppolicy_start(struct netlink_callback *cb)
+{
+ const struct genl_dumpit_info *info = genl_dumpit_info(cb);
+ struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
+ struct nlattr **tb = info->attrs;
+ const struct genl_family *rt;
+ struct genl_ops op;
+ int err, i;
- if (!tb[CTRL_ATTR_FAMILY_ID] && !tb[CTRL_ATTR_FAMILY_NAME])
- return -EINVAL;
+ BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
- if (tb[CTRL_ATTR_FAMILY_ID]) {
- fam_id = nla_get_u16(tb[CTRL_ATTR_FAMILY_ID]);
- } else {
- rt = genl_family_find_byname(
- nla_data(tb[CTRL_ATTR_FAMILY_NAME]));
- if (!rt)
- return -ENOENT;
- fam_id = rt->id;
- }
+ if (!tb[CTRL_ATTR_FAMILY_ID] && !tb[CTRL_ATTR_FAMILY_NAME])
+ return -EINVAL;
+
+ if (tb[CTRL_ATTR_FAMILY_ID]) {
+ ctx->fam_id = nla_get_u16(tb[CTRL_ATTR_FAMILY_ID]);
+ } else {
+ rt = genl_family_find_byname(
+ nla_data(tb[CTRL_ATTR_FAMILY_NAME]));
+ if (!rt)
+ return -ENOENT;
+ ctx->fam_id = rt->id;
}
- rt = genl_family_find_byid(fam_id);
+ rt = genl_family_find_byid(ctx->fam_id);
if (!rt)
return -ENOENT;
- if (!rt->policy)
+ ctx->rt = rt;
+
+ if (tb[CTRL_ATTR_OP]) {
+ ctx->single_op = true;
+ ctx->op = nla_get_u32(tb[CTRL_ATTR_OP]);
+
+ err = genl_get_cmd(ctx->op, rt, &op);
+ if (err) {
+ NL_SET_BAD_ATTR(cb->extack, tb[CTRL_ATTR_OP]);
+ return err;
+ }
+
+ if (!op.policy)
+ return -ENODATA;
+
+ return netlink_policy_dump_add_policy(&ctx->state, op.policy,
+ op.maxattr);
+ }
+
+ for (i = 0; i < genl_get_cmd_cnt(rt); i++) {
+ genl_get_cmd_by_index(i, rt, &op);
+
+ if (op.policy) {
+ err = netlink_policy_dump_add_policy(&ctx->state,
+ op.policy,
+ op.maxattr);
+ if (err)
+ return err;
+ }
+ }
+
+ if (!ctx->state)
return -ENODATA;
+ return 0;
+}
- err = netlink_policy_dump_start(rt->policy, rt->maxattr, &cb->args[1]);
- if (err)
- return err;
+static void *ctrl_dumppolicy_prep(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
+ void *hdr;
+
+ hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, &genl_ctrl,
+ NLM_F_MULTI, CTRL_CMD_GETPOLICY);
+ if (!hdr)
+ return NULL;
+
+ if (nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, ctx->fam_id))
+ return NULL;
+
+ return hdr;
+}
+
+static int ctrl_dumppolicy_put_op(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct genl_ops *op)
+{
+ struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
+ struct nlattr *nest_pol, *nest_op;
+ void *hdr;
+ int idx;
+
+ /* skip if we have nothing to show */
+ if (!op->policy)
+ return 0;
+ if (!op->doit &&
+ (!op->dumpit || op->validate & GENL_DONT_VALIDATE_DUMP))
+ return 0;
+
+ hdr = ctrl_dumppolicy_prep(skb, cb);
+ if (!hdr)
+ return -ENOBUFS;
+
+ nest_pol = nla_nest_start(skb, CTRL_ATTR_OP_POLICY);
+ if (!nest_pol)
+ goto err;
+
+ nest_op = nla_nest_start(skb, op->cmd);
+ if (!nest_op)
+ goto err;
+
+ /* for now both do/dump are always the same */
+ idx = netlink_policy_dump_get_policy_idx(ctx->state,
+ op->policy,
+ op->maxattr);
+
+ if (op->doit && nla_put_u32(skb, CTRL_ATTR_POLICY_DO, idx))
+ goto err;
+
+ if (op->dumpit && !(op->validate & GENL_DONT_VALIDATE_DUMP) &&
+ nla_put_u32(skb, CTRL_ATTR_POLICY_DUMP, idx))
+ goto err;
+
+ nla_nest_end(skb, nest_op);
+ nla_nest_end(skb, nest_pol);
+ genlmsg_end(skb, hdr);
- while (netlink_policy_dump_loop(cb->args[1])) {
- void *hdr;
+ return 0;
+err:
+ genlmsg_cancel(skb, hdr);
+ return -ENOBUFS;
+}
+
+static int ctrl_dumppolicy(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
+ void *hdr;
+
+ if (!ctx->policies) {
+ while (ctx->opidx < genl_get_cmd_cnt(ctx->rt)) {
+ struct genl_ops op;
+
+ if (ctx->single_op) {
+ int err;
+
+ err = genl_get_cmd(ctx->op, ctx->rt, &op);
+ if (WARN_ON(err))
+ return skb->len;
+
+ /* break out of the loop after this one */
+ ctx->opidx = genl_get_cmd_cnt(ctx->rt);
+ } else {
+ genl_get_cmd_by_index(ctx->opidx, ctx->rt, &op);
+ }
+
+ if (ctrl_dumppolicy_put_op(skb, cb, &op))
+ return skb->len;
+
+ ctx->opidx++;
+ }
+
+ /* completed with the per-op policy index list */
+ ctx->policies = true;
+ }
+
+ while (netlink_policy_dump_loop(ctx->state)) {
struct nlattr *nest;
- hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, &genl_ctrl,
- NLM_F_MULTI, CTRL_CMD_GETPOLICY);
+ hdr = ctrl_dumppolicy_prep(skb, cb);
if (!hdr)
goto nla_put_failure;
- if (nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, rt->id))
- goto nla_put_failure;
-
nest = nla_nest_start(skb, CTRL_ATTR_POLICY);
if (!nest)
goto nla_put_failure;
- if (netlink_policy_dump_write(skb, cb->args[1]))
+ if (netlink_policy_dump_write(skb, ctx->state))
goto nla_put_failure;
nla_nest_end(skb, nest);
genlmsg_end(skb, hdr);
- continue;
-
-nla_put_failure:
- genlmsg_cancel(skb, hdr);
- break;
}
- cb->args[0] = fam_id;
+ return skb->len;
+
+nla_put_failure:
+ genlmsg_cancel(skb, hdr);
return skb->len;
}
static int ctrl_dumppolicy_done(struct netlink_callback *cb)
{
- netlink_policy_dump_free(cb->args[1]);
+ struct ctrl_dump_policy_ctx *ctx = (void *)cb->ctx;
+
+ netlink_policy_dump_free(ctx->state);
return 0;
}
@@ -1123,11 +1329,16 @@ static const struct genl_ops genl_ctrl_ops[] = {
{
.cmd = CTRL_CMD_GETFAMILY,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .policy = ctrl_policy_family,
+ .maxattr = ARRAY_SIZE(ctrl_policy_family) - 1,
.doit = ctrl_getfamily,
.dumpit = ctrl_dumpfamily,
},
{
.cmd = CTRL_CMD_GETPOLICY,
+ .policy = ctrl_policy_policy,
+ .maxattr = ARRAY_SIZE(ctrl_policy_policy) - 1,
+ .start = ctrl_dumppolicy_start,
.dumpit = ctrl_dumppolicy,
.done = ctrl_dumppolicy_done,
},
@@ -1146,8 +1357,6 @@ static struct genl_family genl_ctrl __ro_after_init = {
.id = GENL_ID_CTRL,
.name = "nlctrl",
.version = 0x2,
- .maxattr = CTRL_ATTR_MAX,
- .policy = ctrl_policy,
.netnsok = true,
};
diff --git a/net/netlink/policy.c b/net/netlink/policy.c
index 0176b59ce530..8d7c900e27f4 100644
--- a/net/netlink/policy.c
+++ b/net/netlink/policy.c
@@ -14,7 +14,7 @@
#define INITIAL_POLICIES_ALLOC 10
-struct nl_policy_dump {
+struct netlink_policy_dump_state {
unsigned int policy_idx;
unsigned int attr_idx;
unsigned int n_alloc;
@@ -24,18 +24,19 @@ struct nl_policy_dump {
} policies[];
};
-static int add_policy(struct nl_policy_dump **statep,
+static int add_policy(struct netlink_policy_dump_state **statep,
const struct nla_policy *policy,
unsigned int maxtype)
{
- struct nl_policy_dump *state = *statep;
+ struct netlink_policy_dump_state *state = *statep;
unsigned int n_alloc, i;
if (!policy || !maxtype)
return 0;
for (i = 0; i < state->n_alloc; i++) {
- if (state->policies[i].policy == policy)
+ if (state->policies[i].policy == policy &&
+ state->policies[i].maxtype == maxtype)
return 0;
if (!state->policies[i].policy) {
@@ -62,42 +63,85 @@ static int add_policy(struct nl_policy_dump **statep,
return 0;
}
-static unsigned int get_policy_idx(struct nl_policy_dump *state,
- const struct nla_policy *policy)
+/**
+ * netlink_policy_dump_get_policy_idx - retrieve policy index
+ * @state: the policy dump state
+ * @policy: the policy to find
+ * @maxtype: the policy's maxattr
+ *
+ * Returns: the index of the given policy in the dump state
+ *
+ * Call this to find a policy index when you've added multiple and e.g.
+ * need to tell userspace which command has which policy (by index).
+ *
+ * Note: this will WARN and return 0 if the policy isn't found, which
+ * means it wasn't added in the first place, which would be an
+ * internal consistency bug.
+ */
+int netlink_policy_dump_get_policy_idx(struct netlink_policy_dump_state *state,
+ const struct nla_policy *policy,
+ unsigned int maxtype)
{
unsigned int i;
+ if (WARN_ON(!policy || !maxtype))
+ return 0;
+
for (i = 0; i < state->n_alloc; i++) {
- if (state->policies[i].policy == policy)
+ if (state->policies[i].policy == policy &&
+ state->policies[i].maxtype == maxtype)
return i;
}
- WARN_ON_ONCE(1);
- return -1;
+ WARN_ON(1);
+ return 0;
+}
+
+static struct netlink_policy_dump_state *alloc_state(void)
+{
+ struct netlink_policy_dump_state *state;
+
+ state = kzalloc(struct_size(state, policies, INITIAL_POLICIES_ALLOC),
+ GFP_KERNEL);
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+ state->n_alloc = INITIAL_POLICIES_ALLOC;
+
+ return state;
}
-int netlink_policy_dump_start(const struct nla_policy *policy,
- unsigned int maxtype,
- unsigned long *_state)
+/**
+ * netlink_policy_dump_add_policy - add a policy to the dump
+ * @pstate: state to add to, may be reallocated, must be %NULL the first time
+ * @policy: the new policy to add to the dump
+ * @maxtype: the new policy's max attr type
+ *
+ * Returns: 0 on success, a negative error code otherwise.
+ *
+ * Call this to allocate a policy dump state, and to add policies to it. This
+ * should be called from the dump start() callback.
+ *
+ * Note: on failures, any previously allocated state is freed.
+ */
+int netlink_policy_dump_add_policy(struct netlink_policy_dump_state **pstate,
+ const struct nla_policy *policy,
+ unsigned int maxtype)
{
- struct nl_policy_dump *state;
+ struct netlink_policy_dump_state *state = *pstate;
unsigned int policy_idx;
int err;
- if (*_state)
- return 0;
+ if (!state) {
+ state = alloc_state();
+ if (IS_ERR(state))
+ return PTR_ERR(state);
+ }
/*
* walk the policies and nested ones first, and build
* a linear list of them.
*/
- state = kzalloc(struct_size(state, policies, INITIAL_POLICIES_ALLOC),
- GFP_KERNEL);
- if (!state)
- return -ENOMEM;
- state->n_alloc = INITIAL_POLICIES_ALLOC;
-
err = add_policy(&state, policy, maxtype);
if (err)
return err;
@@ -128,62 +172,103 @@ int netlink_policy_dump_start(const struct nla_policy *policy,
}
}
- *_state = (unsigned long)state;
-
+ *pstate = state;
return 0;
}
-static bool netlink_policy_dump_finished(struct nl_policy_dump *state)
+static bool
+netlink_policy_dump_finished(struct netlink_policy_dump_state *state)
{
return state->policy_idx >= state->n_alloc ||
!state->policies[state->policy_idx].policy;
}
-bool netlink_policy_dump_loop(unsigned long _state)
+/**
+ * netlink_policy_dump_loop - dumping loop indicator
+ * @state: the policy dump state
+ *
+ * Returns: %true if the dump continues, %false otherwise
+ *
+ * Note: this frees the dump state when finishing
+ */
+bool netlink_policy_dump_loop(struct netlink_policy_dump_state *state)
{
- struct nl_policy_dump *state = (void *)_state;
-
return !netlink_policy_dump_finished(state);
}
-int netlink_policy_dump_write(struct sk_buff *skb, unsigned long _state)
+int netlink_policy_dump_attr_size_estimate(const struct nla_policy *pt)
{
- struct nl_policy_dump *state = (void *)_state;
- const struct nla_policy *pt;
- struct nlattr *policy, *attr;
- enum netlink_attribute_type type;
- bool again;
+ /* nested + type */
+ int common = 2 * nla_attr_size(sizeof(u32));
-send_attribute:
- again = false;
+ switch (pt->type) {
+ case NLA_UNSPEC:
+ case NLA_REJECT:
+ /* these actually don't need any space */
+ return 0;
+ case NLA_NESTED:
+ case NLA_NESTED_ARRAY:
+ /* common, policy idx, policy maxattr */
+ return common + 2 * nla_attr_size(sizeof(u32));
+ case NLA_U8:
+ case NLA_U16:
+ case NLA_U32:
+ case NLA_U64:
+ case NLA_MSECS:
+ case NLA_S8:
+ case NLA_S16:
+ case NLA_S32:
+ case NLA_S64:
+ /* maximum is common, u64 min/max with padding */
+ return common +
+ 2 * (nla_attr_size(0) + nla_attr_size(sizeof(u64)));
+ case NLA_BITFIELD32:
+ return common + nla_attr_size(sizeof(u32));
+ case NLA_STRING:
+ case NLA_NUL_STRING:
+ case NLA_BINARY:
+ /* maximum is common, u32 min-length/max-length */
+ return common + 2 * nla_attr_size(sizeof(u32));
+ case NLA_FLAG:
+ return common;
+ }
- pt = &state->policies[state->policy_idx].policy[state->attr_idx];
+ /* this should then cause a warning later */
+ return 0;
+}
- policy = nla_nest_start(skb, state->policy_idx);
- if (!policy)
- return -ENOBUFS;
+static int
+__netlink_policy_dump_write_attr(struct netlink_policy_dump_state *state,
+ struct sk_buff *skb,
+ const struct nla_policy *pt,
+ int nestattr)
+{
+ int estimate = netlink_policy_dump_attr_size_estimate(pt);
+ enum netlink_attribute_type type;
+ struct nlattr *attr;
- attr = nla_nest_start(skb, state->attr_idx);
+ attr = nla_nest_start(skb, nestattr);
if (!attr)
- goto nla_put_failure;
+ return -ENOBUFS;
switch (pt->type) {
default:
case NLA_UNSPEC:
case NLA_REJECT:
/* skip - use NLA_MIN_LEN to advertise such */
- nla_nest_cancel(skb, policy);
- again = true;
- goto next;
+ nla_nest_cancel(skb, attr);
+ return -ENODATA;
case NLA_NESTED:
type = NL_ATTR_TYPE_NESTED;
fallthrough;
case NLA_NESTED_ARRAY:
if (pt->type == NLA_NESTED_ARRAY)
type = NL_ATTR_TYPE_NESTED_ARRAY;
- if (pt->nested_policy && pt->len &&
+ if (state && pt->nested_policy && pt->len &&
(nla_put_u32(skb, NL_POLICY_TYPE_ATTR_POLICY_IDX,
- get_policy_idx(state, pt->nested_policy)) ||
+ netlink_policy_dump_get_policy_idx(state,
+ pt->nested_policy,
+ pt->len)) ||
nla_put_u32(skb, NL_POLICY_TYPE_ATTR_POLICY_MAXTYPE,
pt->len)))
goto nla_put_failure;
@@ -204,6 +289,14 @@ send_attribute:
else
type = NL_ATTR_TYPE_U64;
+ if (pt->validation_type == NLA_VALIDATE_MASK) {
+ if (nla_put_u64_64bit(skb, NL_POLICY_TYPE_ATTR_MASK,
+ pt->mask,
+ NL_POLICY_TYPE_ATTR_PAD))
+ goto nla_put_failure;
+ break;
+ }
+
nla_get_range_unsigned(pt, &range);
if (nla_put_u64_64bit(skb, NL_POLICY_TYPE_ATTR_MIN_VALUE_U,
@@ -243,12 +336,6 @@ send_attribute:
pt->bitfield32_valid))
goto nla_put_failure;
break;
- case NLA_EXACT_LEN:
- type = NL_ATTR_TYPE_BINARY;
- if (nla_put_u32(skb, NL_POLICY_TYPE_ATTR_MIN_LENGTH, pt->len) ||
- nla_put_u32(skb, NL_POLICY_TYPE_ATTR_MAX_LENGTH, pt->len))
- goto nla_put_failure;
- break;
case NLA_STRING:
case NLA_NUL_STRING:
case NLA_BINARY:
@@ -258,14 +345,27 @@ send_attribute:
type = NL_ATTR_TYPE_NUL_STRING;
else
type = NL_ATTR_TYPE_BINARY;
- if (pt->len && nla_put_u32(skb, NL_POLICY_TYPE_ATTR_MAX_LENGTH,
- pt->len))
- goto nla_put_failure;
- break;
- case NLA_MIN_LEN:
- type = NL_ATTR_TYPE_BINARY;
- if (nla_put_u32(skb, NL_POLICY_TYPE_ATTR_MIN_LENGTH, pt->len))
+
+ if (pt->validation_type == NLA_VALIDATE_RANGE ||
+ pt->validation_type == NLA_VALIDATE_RANGE_WARN_TOO_LONG) {
+ struct netlink_range_validation range;
+
+ nla_get_range_unsigned(pt, &range);
+
+ if (range.min &&
+ nla_put_u32(skb, NL_POLICY_TYPE_ATTR_MIN_LENGTH,
+ range.min))
+ goto nla_put_failure;
+
+ if (range.max < U16_MAX &&
+ nla_put_u32(skb, NL_POLICY_TYPE_ATTR_MAX_LENGTH,
+ range.max))
+ goto nla_put_failure;
+ } else if (pt->len &&
+ nla_put_u32(skb, NL_POLICY_TYPE_ATTR_MAX_LENGTH,
+ pt->len)) {
goto nla_put_failure;
+ }
break;
case NLA_FLAG:
type = NL_ATTR_TYPE_FLAG;
@@ -275,8 +375,66 @@ send_attribute:
if (nla_put_u32(skb, NL_POLICY_TYPE_ATTR_TYPE, type))
goto nla_put_failure;
- /* finish and move state to next attribute */
nla_nest_end(skb, attr);
+ WARN_ON(attr->nla_len > estimate);
+
+ return 0;
+nla_put_failure:
+ nla_nest_cancel(skb, attr);
+ return -ENOBUFS;
+}
+
+/**
+ * netlink_policy_dump_write_attr - write a given attribute policy
+ * @skb: the message skb to write to
+ * @pt: the attribute's policy
+ * @nestattr: the nested attribute ID to use
+ *
+ * Returns: 0 on success, an error code otherwise; -%ENODATA is
+ * special, indicating that there's no policy data and
+ * the attribute is generally rejected.
+ */
+int netlink_policy_dump_write_attr(struct sk_buff *skb,
+ const struct nla_policy *pt,
+ int nestattr)
+{
+ return __netlink_policy_dump_write_attr(NULL, skb, pt, nestattr);
+}
+
+/**
+ * netlink_policy_dump_write - write current policy dump attributes
+ * @skb: the message skb to write to
+ * @state: the policy dump state
+ *
+ * Returns: 0 on success, an error code otherwise
+ */
+int netlink_policy_dump_write(struct sk_buff *skb,
+ struct netlink_policy_dump_state *state)
+{
+ const struct nla_policy *pt;
+ struct nlattr *policy;
+ bool again;
+ int err;
+
+send_attribute:
+ again = false;
+
+ pt = &state->policies[state->policy_idx].policy[state->attr_idx];
+
+ policy = nla_nest_start(skb, state->policy_idx);
+ if (!policy)
+ return -ENOBUFS;
+
+ err = __netlink_policy_dump_write_attr(state, skb, pt, state->attr_idx);
+ if (err == -ENODATA) {
+ nla_nest_cancel(skb, policy);
+ again = true;
+ goto next;
+ } else if (err) {
+ goto nla_put_failure;
+ }
+
+ /* finish and move state to next attribute */
nla_nest_end(skb, policy);
next:
@@ -299,9 +457,13 @@ nla_put_failure:
return -ENOBUFS;
}
-void netlink_policy_dump_free(unsigned long _state)
+/**
+ * netlink_policy_dump_free - free policy dump state
+ * @state: the policy dump state to free
+ *
+ * Call this from the done() method to ensure dump state is freed.
+ */
+void netlink_policy_dump_free(struct netlink_policy_dump_state *state)
{
- struct nl_policy_dump *state = (void *)_state;
-
kfree(state);
}
diff --git a/net/nfc/digital_dep.c b/net/nfc/digital_dep.c
index 304b1a9bb18a..5971fb6f51cc 100644
--- a/net/nfc/digital_dep.c
+++ b/net/nfc/digital_dep.c
@@ -38,9 +38,6 @@
#define DIGITAL_GB_BIT 0x02
-#define DIGITAL_NFC_DEP_REQ_RES_HEADROOM 2 /* SoD: [SB (NFC-A)] + LEN */
-#define DIGITAL_NFC_DEP_REQ_RES_TAILROOM 2 /* EoD: 2-byte CRC */
-
#define DIGITAL_NFC_DEP_PFB_TYPE(pfb) ((pfb) & 0xE0)
#define DIGITAL_NFC_DEP_PFB_TIMEOUT_BIT 0x10
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 2611657f40ca..b87bfc82f44f 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -9,7 +9,6 @@
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/openvswitch.h>
-#include <linux/netfilter_ipv6.h>
#include <linux/sctp.h>
#include <linux/tcp.h>
#include <linux/udp.h>
@@ -278,9 +277,11 @@ static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
*/
static int pop_eth(struct sk_buff *skb, struct sw_flow_key *key)
{
- skb_pull_rcsum(skb, ETH_HLEN);
- skb_reset_mac_header(skb);
- skb_reset_mac_len(skb);
+ int err;
+
+ err = skb_eth_pop(skb);
+ if (err)
+ return err;
/* safe right before invalidate_flow_key */
key->mac_proto = MAC_PROTO_NONE;
@@ -291,22 +292,12 @@ static int pop_eth(struct sk_buff *skb, struct sw_flow_key *key)
static int push_eth(struct sk_buff *skb, struct sw_flow_key *key,
const struct ovs_action_push_eth *ethh)
{
- struct ethhdr *hdr;
-
- /* Add the new Ethernet header */
- if (skb_cow_head(skb, ETH_HLEN) < 0)
- return -ENOMEM;
-
- skb_push(skb, ETH_HLEN);
- skb_reset_mac_header(skb);
- skb_reset_mac_len(skb);
-
- hdr = eth_hdr(skb);
- ether_addr_copy(hdr->h_source, ethh->addresses.eth_src);
- ether_addr_copy(hdr->h_dest, ethh->addresses.eth_dst);
- hdr->h_proto = skb->protocol;
+ int err;
- skb_postpush_rcsum(skb, hdr, ETH_HLEN);
+ err = skb_eth_push(skb, ethh->addresses.eth_dst,
+ ethh->addresses.eth_src);
+ if (err)
+ return err;
/* safe right before invalidate_flow_key */
key->mac_proto = MAC_PROTO_ETHERNET;
@@ -742,7 +733,8 @@ static int set_sctp(struct sk_buff *skb, struct sw_flow_key *flow_key,
return 0;
}
-static int ovs_vport_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+static int ovs_vport_output(struct net *net, struct sock *sk,
+ struct sk_buff *skb)
{
struct ovs_frag_data *data = this_cpu_ptr(&ovs_frag_data_storage);
struct vport *vport = data->vport;
@@ -848,13 +840,9 @@ static void ovs_fragment(struct net *net, struct vport *vport,
ip_do_fragment(net, skb->sk, skb, ovs_vport_output);
refdst_drop(orig_dst);
} else if (key->eth.type == htons(ETH_P_IPV6)) {
- const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops();
unsigned long orig_dst;
struct rt6_info ovs_rt;
- if (!v6ops)
- goto err;
-
prepare_frag(vport, skb, orig_network_offset,
ovs_key_mac_proto(key));
memset(&ovs_rt, 0, sizeof(ovs_rt));
@@ -866,7 +854,7 @@ static void ovs_fragment(struct net *net, struct vport *vport,
skb_dst_set_noref(skb, &ovs_rt.dst);
IP6CB(skb)->frag_max_size = mru;
- v6ops->fragment(net, skb->sk, skb, ovs_vport_output);
+ ipv6_stub->ipv6_fragment(net, skb->sk, skb, ovs_vport_output);
refdst_drop(orig_dst);
} else {
WARN_ONCE(1, "Failed fragment ->%s: eth=%04x, MRU=%d, MTU=%d.",
@@ -925,7 +913,7 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb,
upcall.mru = OVS_CB(skb)->mru;
for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
- a = nla_next(a, &rem)) {
+ a = nla_next(a, &rem)) {
switch (nla_type(a)) {
case OVS_USERSPACE_ATTR_USERDATA:
upcall.userdata = a;
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 12d42ab0193b..4beb96139d77 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -1905,8 +1905,8 @@ static void ovs_ct_limit_exit(struct net *net, struct ovs_net *ovs_net)
lockdep_ovsl_is_held())
kfree_rcu(ct_limit, rcu);
}
- kfree(ovs_net->ct_limit_info->limits);
- kfree(ovs_net->ct_limit_info);
+ kfree(info->limits);
+ kfree(info);
}
static struct sk_buff *
@@ -2235,7 +2235,7 @@ exit_err:
return err;
}
-static struct genl_ops ct_limit_genl_ops[] = {
+static const struct genl_small_ops ct_limit_genl_ops[] = {
{ .cmd = OVS_CT_LIMIT_CMD_SET,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN
@@ -2267,8 +2267,8 @@ struct genl_family dp_ct_limit_genl_family __ro_after_init = {
.policy = ct_limit_policy,
.netnsok = true,
.parallel_ops = true,
- .ops = ct_limit_genl_ops,
- .n_ops = ARRAY_SIZE(ct_limit_genl_ops),
+ .small_ops = ct_limit_genl_ops,
+ .n_small_ops = ARRAY_SIZE(ct_limit_genl_ops),
.mcgrps = &ovs_ct_limit_multicast_group,
.n_mcgrps = 1,
.module = THIS_MODULE,
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 6e47ef7ef036..832f898edb6a 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -182,7 +182,7 @@ struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
head = vport_hash_bucket(dp, port_no);
hlist_for_each_entry_rcu(vport, head, dp_hash_node,
- lockdep_ovsl_is_held()) {
+ lockdep_ovsl_is_held()) {
if (vport->port_no == port_no)
return vport;
}
@@ -254,7 +254,7 @@ void ovs_dp_process_packet(struct sk_buff *skb, struct sw_flow_key *key)
error = ovs_execute_actions(dp, skb, sf_acts, key);
if (unlikely(error))
net_dbg_ratelimited("ovs: action execution error on datapath %s: %d\n",
- ovs_dp_name(dp), error);
+ ovs_dp_name(dp), error);
stats_counter = &stats->n_hit;
@@ -302,7 +302,7 @@ err:
static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
const struct sw_flow_key *key,
const struct dp_upcall_info *upcall_info,
- uint32_t cutlen)
+ uint32_t cutlen)
{
unsigned int gso_type = skb_shinfo(skb)->gso_type;
struct sw_flow_key later_key;
@@ -652,7 +652,7 @@ static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
[OVS_PACKET_ATTR_HASH] = { .type = NLA_U64 },
};
-static const struct genl_ops dp_packet_genl_ops[] = {
+static const struct genl_small_ops dp_packet_genl_ops[] = {
{ .cmd = OVS_PACKET_CMD_EXECUTE,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
@@ -668,8 +668,8 @@ static struct genl_family dp_packet_genl_family __ro_after_init = {
.policy = packet_policy,
.netnsok = true,
.parallel_ops = true,
- .ops = dp_packet_genl_ops,
- .n_ops = ARRAY_SIZE(dp_packet_genl_ops),
+ .small_ops = dp_packet_genl_ops,
+ .n_small_ops = ARRAY_SIZE(dp_packet_genl_ops),
.module = THIS_MODULE,
};
@@ -1080,11 +1080,12 @@ error:
}
/* Factor out action copy to avoid "Wframe-larger-than=1024" warning. */
-static noinline_for_stack struct sw_flow_actions *get_flow_actions(struct net *net,
- const struct nlattr *a,
- const struct sw_flow_key *key,
- const struct sw_flow_mask *mask,
- bool log)
+static noinline_for_stack
+struct sw_flow_actions *get_flow_actions(struct net *net,
+ const struct nlattr *a,
+ const struct sw_flow_key *key,
+ const struct sw_flow_mask *mask,
+ bool log)
{
struct sw_flow_actions *acts;
struct sw_flow_key masked_key;
@@ -1383,7 +1384,8 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
ovs_notify(&dp_flow_genl_family, reply, info);
} else {
- netlink_set_err(sock_net(skb->sk)->genl_sock, 0, 0, PTR_ERR(reply));
+ netlink_set_err(sock_net(skb->sk)->genl_sock, 0, 0,
+ PTR_ERR(reply));
}
}
@@ -1451,7 +1453,7 @@ static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
[OVS_FLOW_ATTR_UFID_FLAGS] = { .type = NLA_U32 },
};
-static const struct genl_ops dp_flow_genl_ops[] = {
+static const struct genl_small_ops dp_flow_genl_ops[] = {
{ .cmd = OVS_FLOW_CMD_NEW,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
@@ -1483,8 +1485,8 @@ static struct genl_family dp_flow_genl_family __ro_after_init = {
.policy = flow_policy,
.netnsok = true,
.parallel_ops = true,
- .ops = dp_flow_genl_ops,
- .n_ops = ARRAY_SIZE(dp_flow_genl_ops),
+ .small_ops = dp_flow_genl_ops,
+ .n_small_ops = ARRAY_SIZE(dp_flow_genl_ops),
.mcgrps = &ovs_dp_flow_multicast_group,
.n_mcgrps = 1,
.module = THIS_MODULE,
@@ -1513,7 +1515,7 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
int err;
ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
- flags, cmd);
+ flags, cmd);
if (!ovs_header)
goto error;
@@ -1572,11 +1574,13 @@ static struct datapath *lookup_datapath(struct net *net,
return dp ? dp : ERR_PTR(-ENODEV);
}
-static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *info)
+static void ovs_dp_reset_user_features(struct sk_buff *skb,
+ struct genl_info *info)
{
struct datapath *dp;
- dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
+ dp = lookup_datapath(sock_net(skb->sk), info->userhdr,
+ info->attrs);
if (IS_ERR(dp))
return;
@@ -1914,7 +1918,7 @@ static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
PCPU_MIN_UNIT_SIZE / sizeof(struct mask_cache_entry)),
};
-static const struct genl_ops dp_datapath_genl_ops[] = {
+static const struct genl_small_ops dp_datapath_genl_ops[] = {
{ .cmd = OVS_DP_CMD_NEW,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
@@ -1946,8 +1950,8 @@ static struct genl_family dp_datapath_genl_family __ro_after_init = {
.policy = datapath_policy,
.netnsok = true,
.parallel_ops = true,
- .ops = dp_datapath_genl_ops,
- .n_ops = ARRAY_SIZE(dp_datapath_genl_ops),
+ .small_ops = dp_datapath_genl_ops,
+ .n_small_ops = ARRAY_SIZE(dp_datapath_genl_ops),
.mcgrps = &ovs_dp_datapath_multicast_group,
.n_mcgrps = 1,
.module = THIS_MODULE,
@@ -2075,7 +2079,7 @@ static unsigned int ovs_get_max_headroom(struct datapath *dp)
for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node,
- lockdep_ovsl_is_held()) {
+ lockdep_ovsl_is_held()) {
dev = vport->dev;
dev_headroom = netdev_get_fwd_headroom(dev);
if (dev_headroom > max_headroom)
@@ -2093,10 +2097,11 @@ static void ovs_update_headroom(struct datapath *dp, unsigned int new_headroom)
int i;
dp->max_headroom = new_headroom;
- for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
+ for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node,
- lockdep_ovsl_is_held())
+ lockdep_ovsl_is_held())
netdev_set_rx_headroom(vport->dev, new_headroom);
+ }
}
static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
@@ -2396,7 +2401,7 @@ static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
[OVS_VPORT_ATTR_NETNSID] = { .type = NLA_S32 },
};
-static const struct genl_ops dp_vport_genl_ops[] = {
+static const struct genl_small_ops dp_vport_genl_ops[] = {
{ .cmd = OVS_VPORT_CMD_NEW,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = GENL_UNS_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
@@ -2428,8 +2433,8 @@ struct genl_family dp_vport_genl_family __ro_after_init = {
.policy = vport_policy,
.netnsok = true,
.parallel_ops = true,
- .ops = dp_vport_genl_ops,
- .n_ops = ARRAY_SIZE(dp_vport_genl_ops),
+ .small_ops = dp_vport_genl_ops,
+ .n_small_ops = ARRAY_SIZE(dp_vport_genl_ops),
.mcgrps = &ovs_dp_vport_multicast_group,
.n_mcgrps = 1,
.module = THIS_MODULE,
@@ -2476,13 +2481,19 @@ error:
static int __net_init ovs_init_net(struct net *net)
{
struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
+ int err;
INIT_LIST_HEAD(&ovs_net->dps);
INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
INIT_DELAYED_WORK(&ovs_net->masks_rebalance, ovs_dp_masks_rebalance);
+
+ err = ovs_ct_init(net);
+ if (err)
+ return err;
+
schedule_delayed_work(&ovs_net->masks_rebalance,
msecs_to_jiffies(DP_MASKS_REBALANCE_INTERVAL));
- return ovs_ct_init(net);
+ return 0;
}
static void __net_exit list_vports_from_net(struct net *net, struct net *dnet,
@@ -2551,7 +2562,8 @@ static int __init dp_init(void)
{
int err;
- BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof_field(struct sk_buff, cb));
+ BUILD_BUG_ON(sizeof(struct ovs_skb_cb) >
+ sizeof_field(struct sk_buff, cb));
pr_info("Open vSwitch switching datapath\n");
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index e2235849a57e..87c286ad660e 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -111,12 +111,16 @@ static void flow_free(struct sw_flow *flow)
if (ovs_identifier_is_key(&flow->id))
kfree(flow->id.unmasked_key);
if (flow->sf_acts)
- ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
+ ovs_nla_free_flow_actions((struct sw_flow_actions __force *)
+ flow->sf_acts);
/* We open code this to make sure cpu 0 is always considered */
- for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask))
+ for (cpu = 0; cpu < nr_cpu_ids;
+ cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
if (flow->stats[cpu])
kmem_cache_free(flow_stats_cache,
(struct sw_flow_stats __force *)flow->stats[cpu]);
+ }
+
kmem_cache_free(flow_cache, flow);
}
@@ -164,7 +168,6 @@ static struct table_instance *table_instance_alloc(int new_size)
ti->n_buckets = new_size;
ti->node_ver = 0;
- ti->keep_flows = false;
get_random_bytes(&ti->hash_seed, sizeof(u32));
return ti;
@@ -192,7 +195,7 @@ static void tbl_mask_array_reset_counters(struct mask_array *ma)
* zero based counter we store the value at reset, and subtract it
* later when processing.
*/
- for (i = 0; i < ma->max; i++) {
+ for (i = 0; i < ma->max; i++) {
ma->masks_usage_zero_cntr[i] = 0;
for_each_possible_cpu(cpu) {
@@ -273,7 +276,7 @@ static int tbl_mask_array_add_mask(struct flow_table *tbl,
if (ma_count >= ma->max) {
err = tbl_mask_array_realloc(tbl, ma->max +
- MASK_ARRAY_SIZE_MIN);
+ MASK_ARRAY_SIZE_MIN);
if (err)
return err;
@@ -288,7 +291,7 @@ static int tbl_mask_array_add_mask(struct flow_table *tbl,
BUG_ON(ovsl_dereference(ma->masks[ma_count]));
rcu_assign_pointer(ma->masks[ma_count], new);
- WRITE_ONCE(ma->count, ma_count +1);
+ WRITE_ONCE(ma->count, ma_count + 1);
return 0;
}
@@ -309,10 +312,10 @@ static void tbl_mask_array_del_mask(struct flow_table *tbl,
return;
found:
- WRITE_ONCE(ma->count, ma_count -1);
+ WRITE_ONCE(ma->count, ma_count - 1);
- rcu_assign_pointer(ma->masks[i], ma->masks[ma_count -1]);
- RCU_INIT_POINTER(ma->masks[ma_count -1], NULL);
+ rcu_assign_pointer(ma->masks[i], ma->masks[ma_count - 1]);
+ RCU_INIT_POINTER(ma->masks[ma_count - 1], NULL);
kfree_rcu(mask, rcu);
@@ -448,26 +451,23 @@ free_mask_cache:
static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
{
- struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
+ struct table_instance *ti;
+ ti = container_of(rcu, struct table_instance, rcu);
__table_instance_destroy(ti);
}
static void table_instance_flow_free(struct flow_table *table,
- struct table_instance *ti,
- struct table_instance *ufid_ti,
- struct sw_flow *flow,
- bool count)
+ struct table_instance *ti,
+ struct table_instance *ufid_ti,
+ struct sw_flow *flow)
{
hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
- if (count)
- table->count--;
+ table->count--;
if (ovs_identifier_is_ufid(&flow->id)) {
hlist_del_rcu(&flow->ufid_table.node[ufid_ti->node_ver]);
-
- if (count)
- table->ufid_count--;
+ table->ufid_count--;
}
flow_mask_remove(table, flow->mask);
@@ -480,22 +480,25 @@ void table_instance_flow_flush(struct flow_table *table,
{
int i;
- if (ti->keep_flows)
- return;
-
for (i = 0; i < ti->n_buckets; i++) {
- struct sw_flow *flow;
struct hlist_head *head = &ti->buckets[i];
struct hlist_node *n;
+ struct sw_flow *flow;
hlist_for_each_entry_safe(flow, n, head,
flow_table.node[ti->node_ver]) {
table_instance_flow_free(table, ti, ufid_ti,
- flow, false);
+ flow);
ovs_flow_free(flow, true);
}
}
+
+ if (WARN_ON(table->count != 0 ||
+ table->ufid_count != 0)) {
+ table->count = 0;
+ table->ufid_count = 0;
+ }
}
static void table_instance_destroy(struct table_instance *ti,
@@ -596,8 +599,6 @@ static void flow_table_copy_flows(struct table_instance *old,
lockdep_ovsl_is_held())
table_instance_insert(new, flow);
}
-
- old->keep_flows = true;
}
static struct table_instance *table_instance_rehash(struct table_instance *ti,
@@ -632,8 +633,6 @@ int ovs_flow_tbl_flush(struct flow_table *flow_table)
rcu_assign_pointer(flow_table->ti, new_ti);
rcu_assign_pointer(flow_table->ufid_ti, new_ufid_ti);
flow_table->last_rehash = jiffies;
- flow_table->count = 0;
- flow_table->ufid_count = 0;
table_instance_flow_flush(flow_table, old_ti, old_ufid_ti);
table_instance_destroy(old_ti, old_ufid_ti);
@@ -661,7 +660,7 @@ static int flow_key_start(const struct sw_flow_key *key)
return 0;
else
return rounddown(offsetof(struct sw_flow_key, phy),
- sizeof(long));
+ sizeof(long));
}
static bool cmp_key(const struct sw_flow_key *key1,
@@ -673,7 +672,7 @@ static bool cmp_key(const struct sw_flow_key *key1,
long diffs = 0;
int i;
- for (i = key_start; i < key_end; i += sizeof(long))
+ for (i = key_start; i < key_end; i += sizeof(long))
diffs |= *cp1++ ^ *cp2++;
return diffs == 0;
@@ -713,7 +712,7 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
(*n_mask_hit)++;
hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver],
- lockdep_ovsl_is_held()) {
+ lockdep_ovsl_is_held()) {
if (flow->mask == mask && flow->flow_table.hash == hash &&
flow_cmp_masked_key(flow, &masked_key, &mask->range))
return flow;
@@ -897,7 +896,8 @@ static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
}
-bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match)
+bool ovs_flow_cmp(const struct sw_flow *flow,
+ const struct sw_flow_match *match)
{
if (ovs_identifier_is_ufid(&flow->id))
return flow_cmp_masked_key(flow, match->key, &match->range);
@@ -916,7 +916,7 @@ struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
hash = ufid_hash(ufid);
head = find_bucket(ti, hash);
hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver],
- lockdep_ovsl_is_held()) {
+ lockdep_ovsl_is_held()) {
if (flow->ufid_table.hash == hash &&
ovs_flow_cmp_ufid(flow, ufid))
return flow;
@@ -950,7 +950,7 @@ void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
BUG_ON(table->count == 0);
- table_instance_flow_free(table, ti, ufid_ti, flow, true);
+ table_instance_flow_free(table, ti, ufid_ti, flow);
}
static struct sw_flow_mask *mask_alloc(void)
@@ -1107,7 +1107,7 @@ void ovs_flow_masks_rebalance(struct flow_table *table)
if (!masks_and_count)
return;
- for (i = 0; i < ma->max; i++) {
+ for (i = 0; i < ma->max; i++) {
struct sw_flow_mask *mask;
unsigned int start;
int cpu;
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
index 6e7d4ac59353..d8fb7a3a3dfd 100644
--- a/net/openvswitch/flow_table.h
+++ b/net/openvswitch/flow_table.h
@@ -53,7 +53,6 @@ struct table_instance {
struct rcu_head rcu;
int node_ver;
u32 hash_seed;
- bool keep_flows;
};
struct flow_table {
diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c
index 3d3d8e094546..8fbefd52af7f 100644
--- a/net/openvswitch/meter.c
+++ b/net/openvswitch/meter.c
@@ -672,7 +672,7 @@ bool ovs_meter_execute(struct datapath *dp, struct sk_buff *skb,
return false;
}
-static struct genl_ops dp_meter_genl_ops[] = {
+static const struct genl_small_ops dp_meter_genl_ops[] = {
{ .cmd = OVS_METER_CMD_FEATURES,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
.flags = 0, /* OK for unprivileged users. */
@@ -711,8 +711,8 @@ struct genl_family dp_meter_genl_family __ro_after_init = {
.policy = meter_policy,
.netnsok = true,
.parallel_ops = true,
- .ops = dp_meter_genl_ops,
- .n_ops = ARRAY_SIZE(dp_meter_genl_ops),
+ .small_ops = dp_meter_genl_ops,
+ .n_small_ops = ARRAY_SIZE(dp_meter_genl_ops),
.mcgrps = &ovs_meter_multicast_group,
.n_mcgrps = 1,
.module = THIS_MODULE,
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 58a7b8312c28..1e30d8df3ba5 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -86,31 +86,13 @@ static void internal_dev_destructor(struct net_device *dev)
static void
internal_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
- int i;
-
memset(stats, 0, sizeof(*stats));
stats->rx_errors = dev->stats.rx_errors;
stats->tx_errors = dev->stats.tx_errors;
stats->tx_dropped = dev->stats.tx_dropped;
stats->rx_dropped = dev->stats.rx_dropped;
- for_each_possible_cpu(i) {
- const struct pcpu_sw_netstats *percpu_stats;
- struct pcpu_sw_netstats local_stats;
- unsigned int start;
-
- percpu_stats = per_cpu_ptr(dev->tstats, i);
-
- do {
- start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
- local_stats = *percpu_stats;
- } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
-
- stats->rx_bytes += local_stats.rx_bytes;
- stats->rx_packets += local_stats.rx_packets;
- stats->tx_bytes += local_stats.tx_bytes;
- stats->tx_packets += local_stats.tx_packets;
- }
+ dev_fetch_sw_netstats(stats, dev->tstats);
}
static const struct net_device_ops internal_dev_netdev_ops = {
@@ -225,7 +207,6 @@ static void internal_dev_destroy(struct vport *vport)
static netdev_tx_t internal_dev_recv(struct sk_buff *skb)
{
struct net_device *netdev = skb->dev;
- struct pcpu_sw_netstats *stats;
if (unlikely(!(netdev->flags & IFF_UP))) {
kfree_skb(skb);
@@ -240,12 +221,7 @@ static netdev_tx_t internal_dev_recv(struct sk_buff *skb)
skb->pkt_type = PACKET_HOST;
skb->protocol = eth_type_trans(skb, netdev);
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
-
- stats = this_cpu_ptr(netdev->tstats);
- u64_stats_update_begin(&stats->syncp);
- stats->rx_packets++;
- stats->rx_bytes += skb->len;
- u64_stats_update_end(&stats->syncp);
+ dev_sw_netstats_rx_add(netdev, skb->len);
netif_rx(skb);
return NETDEV_TX_OK;
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 0d44c5c013fa..82d801f063b7 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -98,7 +98,7 @@ struct vport *ovs_vport_locate(const struct net *net, const char *name)
struct vport *vport;
hlist_for_each_entry_rcu(vport, bucket, hash_node,
- lockdep_ovsl_is_held())
+ lockdep_ovsl_is_held())
if (!strcmp(name, ovs_vport_name(vport)) &&
net_eq(ovs_dp_get_net(vport->dp), net))
return vport;
@@ -118,7 +118,7 @@ struct vport *ovs_vport_locate(const struct net *net, const char *name)
* vport_free().
*/
struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
- const struct vport_parms *parms)
+ const struct vport_parms *parms)
{
struct vport *vport;
size_t alloc_size;
@@ -397,7 +397,8 @@ int ovs_vport_get_upcall_portids(const struct vport *vport,
*
* Returns the portid of the target socket. Must be called with rcu_read_lock.
*/
-u32 ovs_vport_find_upcall_portid(const struct vport *vport, struct sk_buff *skb)
+u32 ovs_vport_find_upcall_portid(const struct vport *vport,
+ struct sk_buff *skb)
{
struct vport_portids *ids;
u32 ids_index;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 2b33e977a905..cefbd50c1090 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -93,52 +93,56 @@
/*
Assumptions:
- - if device has no dev->hard_header routine, it adds and removes ll header
- inside itself. In this case ll header is invisible outside of device,
- but higher levels still should reserve dev->hard_header_len.
- Some devices are enough clever to reallocate skb, when header
- will not fit to reserved space (tunnel), another ones are silly
- (PPP).
+ - If the device has no dev->header_ops, there is no LL header visible
+ above the device. In this case, its hard_header_len should be 0.
+ The device may prepend its own header internally. In this case, its
+ needed_headroom should be set to the space needed for it to add its
+ internal header.
+ For example, a WiFi driver pretending to be an Ethernet driver should
+ set its hard_header_len to be the Ethernet header length, and set its
+ needed_headroom to be (the real WiFi header length - the fake Ethernet
+ header length).
- packet socket receives packets with pulled ll header,
so that SOCK_RAW should push it back.
On receive:
-----------
-Incoming, dev->hard_header!=NULL
+Incoming, dev->header_ops != NULL
mac_header -> ll header
data -> data
-Outgoing, dev->hard_header!=NULL
+Outgoing, dev->header_ops != NULL
mac_header -> ll header
data -> ll header
-Incoming, dev->hard_header==NULL
- mac_header -> UNKNOWN position. It is very likely, that it points to ll
- header. PPP makes it, that is wrong, because introduce
- assymetry between rx and tx paths.
+Incoming, dev->header_ops == NULL
+ mac_header -> data
+ However drivers often make it point to the ll header.
+ This is incorrect because the ll header should be invisible to us.
data -> data
-Outgoing, dev->hard_header==NULL
- mac_header -> data. ll header is still not built!
+Outgoing, dev->header_ops == NULL
+ mac_header -> data. ll header is invisible to us.
data -> data
Resume
- If dev->hard_header==NULL we are unlikely to restore sensible ll header.
+ If dev->header_ops == NULL we are unable to restore the ll header,
+ because it is invisible to us.
On transmit:
------------
-dev->hard_header != NULL
+dev->header_ops != NULL
mac_header -> ll header
data -> ll header
-dev->hard_header == NULL (ll header is added by device, we cannot control it)
+dev->header_ops == NULL (ll header is invisible to us)
mac_header -> data
data -> data
- We should set nh.raw on output to correct posistion,
+ We should set network_header on output to the correct position,
packet classifier depends on it.
*/
@@ -177,7 +181,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
#define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
#define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
#define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
-#define BLOCK_PRIV(x) ((void *)((char *)(x) + BLOCK_O2PRIV(x)))
struct packet_sock;
static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
diff --git a/net/psample/psample.c b/net/psample/psample.c
index a042261a45c5..33e238c965bd 100644
--- a/net/psample/psample.c
+++ b/net/psample/psample.c
@@ -96,7 +96,7 @@ static int psample_nl_cmd_get_group_dumpit(struct sk_buff *msg,
return msg->len;
}
-static const struct genl_ops psample_nl_ops[] = {
+static const struct genl_small_ops psample_nl_ops[] = {
{
.cmd = PSAMPLE_CMD_GET_GROUP,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
@@ -112,8 +112,8 @@ static struct genl_family psample_nl_family __ro_after_init = {
.netnsok = true,
.module = THIS_MODULE,
.mcgrps = psample_nl_mcgrps,
- .ops = psample_nl_ops,
- .n_ops = ARRAY_SIZE(psample_nl_ops),
+ .small_ops = psample_nl_ops,
+ .n_small_ops = ARRAY_SIZE(psample_nl_ops),
.n_mcgrps = ARRAY_SIZE(psample_nl_mcgrps),
};
diff --git a/net/rds/cong.c b/net/rds/cong.c
index ccdff09a79c8..8b689ebbd5b5 100644
--- a/net/rds/cong.c
+++ b/net/rds/cong.c
@@ -236,7 +236,7 @@ void rds_cong_queue_updates(struct rds_cong_map *map)
* tcp_setsockopt and/or tcp_sendmsg will deadlock
* when it tries to get the sock_lock())
* 2. Interrupts are masked so that we can mark the
- * the port congested from both send and recv paths.
+ * port congested from both send and recv paths.
* (See comment around declaration of rdc_cong_lock).
* An attempt to get the sock_lock() here will
* therefore trigger warnings.
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index c3319ff3ee11..06603dd1c8aa 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -711,7 +711,7 @@ static u32 rds_ib_protocol_compatible(struct rdma_cm_event *event, bool isv6)
* original size. The only way to tell the difference is by looking at
* the contents, which are initialized to zero.
* If the protocol version fields aren't set, this is a connection attempt
- * from an older version. This could could be 3.0 or 2.0 - we can't tell.
+ * from an older version. This could be 3.0 or 2.0 - we can't tell.
* We really should have changed this for OFED 1.3 :-(
*/
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index 694d411dc72f..3cffcec5fb37 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -310,8 +310,8 @@ static int rds_ib_recv_refill_one(struct rds_connection *conn,
struct rds_ib_connection *ic = conn->c_transport_data;
struct ib_sge *sge;
int ret = -ENOMEM;
- gfp_t slab_mask = GFP_NOWAIT;
- gfp_t page_mask = GFP_NOWAIT;
+ gfp_t slab_mask = gfp;
+ gfp_t page_mask = gfp;
if (gfp & __GFP_DIRECT_RECLAIM) {
slab_mask = GFP_KERNEL;
@@ -1020,7 +1020,7 @@ void rds_ib_recv_cqe_handler(struct rds_ib_connection *ic,
rds_ib_stats_inc(s_ib_rx_ring_empty);
if (rds_ib_ring_low(&ic->i_recv_ring)) {
- rds_ib_recv_refill(conn, 0, GFP_NOWAIT);
+ rds_ib_recv_refill(conn, 0, GFP_NOWAIT | __GFP_NOWARN);
rds_ib_stats_inc(s_ib_rx_refill_from_cq);
}
}
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index ccdd304eae0a..1d0afb1dd77b 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -269,7 +269,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
goto out;
} else {
nents = ret;
- sg = kcalloc(nents, sizeof(*sg), GFP_KERNEL);
+ sg = kmalloc_array(nents, sizeof(*sg), GFP_KERNEL);
if (!sg) {
ret = -ENOMEM;
goto out;
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 186c8a889b16..0a2f4817ec6c 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -308,9 +308,10 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
key = NULL; /* a no-security key */
memset(&p, 0, sizeof(p));
- p.user_call_ID = user_call_ID;
- p.tx_total_len = tx_total_len;
- p.interruptibility = interruptibility;
+ p.user_call_ID = user_call_ID;
+ p.tx_total_len = tx_total_len;
+ p.interruptibility = interruptibility;
+ p.kernel = true;
memset(&cp, 0, sizeof(cp));
cp.local = rx->local;
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 97aebb5d19db..dce48162f6c2 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -76,14 +76,12 @@ struct rxrpc_net {
struct work_struct service_conn_reaper;
struct timer_list service_conn_reap_timer;
- unsigned int nr_client_conns;
- unsigned int nr_active_client_conns;
- bool kill_all_client_conns;
bool live;
+
+ bool kill_all_client_conns;
+ atomic_t nr_client_conns;
spinlock_t client_conn_cache_lock; /* Lock for ->*_client_conns */
spinlock_t client_conn_discard_lock; /* Prevent multiple discarders */
- struct list_head waiting_client_conns;
- struct list_head active_client_conns;
struct list_head idle_client_conns;
struct work_struct client_conn_reaper;
struct timer_list client_conn_reap_timer;
@@ -275,8 +273,8 @@ struct rxrpc_local {
struct rw_semaphore defrag_sem; /* control re-enablement of IP DF bit */
struct sk_buff_head reject_queue; /* packets awaiting rejection */
struct sk_buff_head event_queue; /* endpoint event packets awaiting processing */
- struct rb_root client_conns; /* Client connections by socket params */
- spinlock_t client_conns_lock; /* Lock for client_conns */
+ struct rb_root client_bundles; /* Client connection bundles by socket params */
+ spinlock_t client_bundles_lock; /* Lock for client_bundles */
spinlock_t lock; /* access lock */
rwlock_t services_lock; /* lock for services list */
int debug_id; /* debug ID for printks */
@@ -353,10 +351,7 @@ struct rxrpc_conn_parameters {
enum rxrpc_conn_flag {
RXRPC_CONN_HAS_IDR, /* Has a client conn ID assigned */
RXRPC_CONN_IN_SERVICE_CONNS, /* Conn is in peer->service_conns */
- RXRPC_CONN_IN_CLIENT_CONNS, /* Conn is in local->client_conns */
- RXRPC_CONN_EXPOSED, /* Conn has extra ref for exposure */
RXRPC_CONN_DONT_REUSE, /* Don't reuse this connection */
- RXRPC_CONN_COUNTED, /* Counted by rxrpc_nr_client_conns */
RXRPC_CONN_PROBING_FOR_UPGRADE, /* Probing for service upgrade */
RXRPC_CONN_FINAL_ACK_0, /* Need final ACK for channel 0 */
RXRPC_CONN_FINAL_ACK_1, /* Need final ACK for channel 1 */
@@ -377,19 +372,6 @@ enum rxrpc_conn_event {
};
/*
- * The connection cache state.
- */
-enum rxrpc_conn_cache_state {
- RXRPC_CONN_CLIENT_INACTIVE, /* Conn is not yet listed */
- RXRPC_CONN_CLIENT_WAITING, /* Conn is on wait list, waiting for capacity */
- RXRPC_CONN_CLIENT_ACTIVE, /* Conn is on active list, doing calls */
- RXRPC_CONN_CLIENT_UPGRADE, /* Conn is on active list, probing for upgrade */
- RXRPC_CONN_CLIENT_CULLED, /* Conn is culled and delisted, doing calls */
- RXRPC_CONN_CLIENT_IDLE, /* Conn is on idle list, doing mostly nothing */
- RXRPC_CONN__NR_CACHE_STATES
-};
-
-/*
* The connection protocol state.
*/
enum rxrpc_conn_proto_state {
@@ -405,6 +387,23 @@ enum rxrpc_conn_proto_state {
};
/*
+ * RxRPC client connection bundle.
+ */
+struct rxrpc_bundle {
+ struct rxrpc_conn_parameters params;
+ atomic_t usage;
+ unsigned int debug_id;
+ bool try_upgrade; /* True if the bundle is attempting upgrade */
+ bool alloc_conn; /* True if someone's getting a conn */
+ short alloc_error; /* Error from last conn allocation */
+ spinlock_t channel_lock;
+ struct rb_node local_node; /* Node in local->client_conns */
+ struct list_head waiting_calls; /* Calls waiting for channels */
+ unsigned long avail_chans; /* Mask of available channels */
+ struct rxrpc_connection *conns[4]; /* The connections in the bundle (max 4) */
+};
+
+/*
* RxRPC connection definition
* - matched by { local, peer, epoch, conn_id, direction }
* - each connection can only handle four simultaneous calls
@@ -417,10 +416,7 @@ struct rxrpc_connection {
struct rcu_head rcu;
struct list_head cache_link;
- spinlock_t channel_lock;
- unsigned char active_chans; /* Mask of active channels */
-#define RXRPC_ACTIVE_CHANS_MASK ((1 << RXRPC_MAXCALLS) - 1)
- struct list_head waiting_calls; /* Calls waiting for channels */
+ unsigned char act_chans; /* Mask of active channels */
struct rxrpc_channel {
unsigned long final_ack_at; /* Time at which to issue final ACK */
struct rxrpc_call __rcu *call; /* Active call */
@@ -437,10 +433,8 @@ struct rxrpc_connection {
struct timer_list timer; /* Conn event timer */
struct work_struct processor; /* connection event processor */
- union {
- struct rb_node client_node; /* Node in local->client_conns */
- struct rb_node service_node; /* Node in peer->service_conns */
- };
+ struct rxrpc_bundle *bundle; /* Client connection bundle */
+ struct rb_node service_node; /* Node in peer->service_conns */
struct list_head proc_link; /* link in procfs list */
struct list_head link; /* link in master connection list */
struct sk_buff_head rx_queue; /* received conn-level packets */
@@ -452,7 +446,6 @@ struct rxrpc_connection {
unsigned long events;
unsigned long idle_timestamp; /* Time at which last became idle */
spinlock_t state_lock; /* state-change lock */
- enum rxrpc_conn_cache_state cache_state;
enum rxrpc_conn_proto_state state; /* current state of connection */
u32 abort_code; /* Abort code of connection abort */
int debug_id; /* debug ID for printks */
@@ -464,6 +457,7 @@ struct rxrpc_connection {
u8 security_size; /* security header size */
u8 security_ix; /* security type */
u8 out_clientflag; /* RXRPC_CLIENT_INITIATED if we are client */
+ u8 bundle_shift; /* Index into bundle->avail_chans */
short error; /* Local error code */
};
@@ -493,6 +487,8 @@ enum rxrpc_call_flag {
RXRPC_CALL_RX_HEARD, /* The peer responded at least once to this call */
RXRPC_CALL_RX_UNDERRUN, /* Got data underrun */
RXRPC_CALL_DISCONNECTED, /* The call has been disconnected */
+ RXRPC_CALL_KERNEL, /* The call was made by the kernel */
+ RXRPC_CALL_UPGRADE, /* Service upgrade was requested for the call */
};
/*
@@ -576,7 +572,7 @@ struct rxrpc_call {
struct work_struct processor; /* Event processor */
rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */
struct list_head link; /* link in master call list */
- struct list_head chan_wait_link; /* Link in conn->waiting_calls */
+ struct list_head chan_wait_link; /* Link in conn->bundle->waiting_calls */
struct hlist_node error_link; /* link in error distribution list */
struct list_head accept_link; /* Link in rx->acceptq */
struct list_head recvmsg_link; /* Link in rx->recvmsg_q */
@@ -726,6 +722,7 @@ struct rxrpc_call_params {
u32 normal; /* Max time since last call packet (msec) */
} timeouts;
u8 nr_timeouts; /* Number of timeouts specified */
+ bool kernel; /* T if kernel is making the call */
enum rxrpc_interruptibility interruptibility; /* How is interruptible is the call? */
};
@@ -812,18 +809,19 @@ static inline bool rxrpc_is_client_call(const struct rxrpc_call *call)
/*
* conn_client.c
*/
-extern unsigned int rxrpc_max_client_connections;
extern unsigned int rxrpc_reap_client_connections;
extern unsigned long rxrpc_conn_idle_client_expiry;
extern unsigned long rxrpc_conn_idle_client_fast_expiry;
extern struct idr rxrpc_client_conn_ids;
void rxrpc_destroy_client_conn_ids(void);
+struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *);
+void rxrpc_put_bundle(struct rxrpc_bundle *);
int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_call *,
struct rxrpc_conn_parameters *, struct sockaddr_rxrpc *,
gfp_t);
void rxrpc_expose_client_call(struct rxrpc_call *);
-void rxrpc_disconnect_client_call(struct rxrpc_call *);
+void rxrpc_disconnect_client_call(struct rxrpc_bundle *, struct rxrpc_call *);
void rxrpc_put_client_conn(struct rxrpc_connection *);
void rxrpc_discard_expired_client_conns(struct work_struct *);
void rxrpc_destroy_all_client_connections(struct rxrpc_net *);
@@ -833,6 +831,7 @@ void rxrpc_clean_up_local_conns(struct rxrpc_local *);
* conn_event.c
*/
void rxrpc_process_connection(struct work_struct *);
+void rxrpc_process_delayed_final_acks(struct rxrpc_connection *, bool);
/*
* conn_object.c
@@ -849,7 +848,7 @@ void rxrpc_disconnect_call(struct rxrpc_call *);
void rxrpc_kill_connection(struct rxrpc_connection *);
bool rxrpc_queue_conn(struct rxrpc_connection *);
void rxrpc_see_connection(struct rxrpc_connection *);
-void rxrpc_get_connection(struct rxrpc_connection *);
+struct rxrpc_connection *rxrpc_get_connection(struct rxrpc_connection *);
struct rxrpc_connection *rxrpc_get_connection_maybe(struct rxrpc_connection *);
void rxrpc_put_service_conn(struct rxrpc_connection *);
void rxrpc_service_connection_reaper(struct work_struct *);
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index ed49769b459d..c845594b663f 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -40,6 +40,11 @@ const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = {
struct kmem_cache *rxrpc_call_jar;
+static struct semaphore rxrpc_call_limiter =
+ __SEMAPHORE_INITIALIZER(rxrpc_call_limiter, 1000);
+static struct semaphore rxrpc_kernel_call_limiter =
+ __SEMAPHORE_INITIALIZER(rxrpc_kernel_call_limiter, 1000);
+
static void rxrpc_call_timer_expired(struct timer_list *t)
{
struct rxrpc_call *call = from_timer(call, t, timer);
@@ -209,6 +214,34 @@ static void rxrpc_start_call_timer(struct rxrpc_call *call)
}
/*
+ * Wait for a call slot to become available.
+ */
+static struct semaphore *rxrpc_get_call_slot(struct rxrpc_call_params *p, gfp_t gfp)
+{
+ struct semaphore *limiter = &rxrpc_call_limiter;
+
+ if (p->kernel)
+ limiter = &rxrpc_kernel_call_limiter;
+ if (p->interruptibility == RXRPC_UNINTERRUPTIBLE) {
+ down(limiter);
+ return limiter;
+ }
+ return down_interruptible(limiter) < 0 ? NULL : limiter;
+}
+
+/*
+ * Release a call slot.
+ */
+static void rxrpc_put_call_slot(struct rxrpc_call *call)
+{
+ struct semaphore *limiter = &rxrpc_call_limiter;
+
+ if (test_bit(RXRPC_CALL_KERNEL, &call->flags))
+ limiter = &rxrpc_kernel_call_limiter;
+ up(limiter);
+}
+
+/*
* Set up a call for the given parameters.
* - Called with the socket lock held, which it must release.
* - If it returns a call, the call's lock will need releasing by the caller.
@@ -224,15 +257,21 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
{
struct rxrpc_call *call, *xcall;
struct rxrpc_net *rxnet;
+ struct semaphore *limiter;
struct rb_node *parent, **pp;
const void *here = __builtin_return_address(0);
int ret;
_enter("%p,%lx", rx, p->user_call_ID);
+ limiter = rxrpc_get_call_slot(p, gfp);
+ if (!limiter)
+ return ERR_PTR(-ERESTARTSYS);
+
call = rxrpc_alloc_client_call(rx, srx, gfp, debug_id);
if (IS_ERR(call)) {
release_sock(&rx->sk);
+ up(limiter);
_leave(" = %ld", PTR_ERR(call));
return call;
}
@@ -242,6 +281,8 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
trace_rxrpc_call(call->debug_id, rxrpc_call_new_client,
atomic_read(&call->usage),
here, (const void *)p->user_call_ID);
+ if (p->kernel)
+ __set_bit(RXRPC_CALL_KERNEL, &call->flags);
/* We need to protect a partially set up call against the user as we
* will be acting outside the socket lock.
@@ -468,6 +509,8 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
BUG();
spin_unlock_bh(&call->lock);
+ rxrpc_put_call_slot(call);
+
del_timer_sync(&call->timer);
/* Make sure we don't get any more notifications */
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index 159e3eda7914..7e574c75be8e 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -1,63 +1,15 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* Client connection-specific management code.
*
- * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2016, 2020 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* Client connections need to be cached for a little while after they've made a
* call so as to handle retransmitted DATA packets in case the server didn't
* receive the final ACK or terminating ABORT we sent it.
*
- * Client connections can be in one of a number of cache states:
- *
- * (1) INACTIVE - The connection is not held in any list and may not have been
- * exposed to the world. If it has been previously exposed, it was
- * discarded from the idle list after expiring.
- *
- * (2) WAITING - The connection is waiting for the number of client conns to
- * drop below the maximum capacity. Calls may be in progress upon it from
- * when it was active and got culled.
- *
- * The connection is on the rxrpc_waiting_client_conns list which is kept
- * in to-be-granted order. Culled conns with waiters go to the back of
- * the queue just like new conns.
- *
- * (3) ACTIVE - The connection has at least one call in progress upon it, it
- * may freely grant available channels to new calls and calls may be
- * waiting on it for channels to become available.
- *
- * The connection is on the rxnet->active_client_conns list which is kept
- * in activation order for culling purposes.
- *
- * rxrpc_nr_active_client_conns is held incremented also.
- *
- * (4) UPGRADE - As for ACTIVE, but only one call may be in progress and is
- * being used to probe for service upgrade.
- *
- * (5) CULLED - The connection got summarily culled to try and free up
- * capacity. Calls currently in progress on the connection are allowed to
- * continue, but new calls will have to wait. There can be no waiters in
- * this state - the conn would have to go to the WAITING state instead.
- *
- * (6) IDLE - The connection has no calls in progress upon it and must have
- * been exposed to the world (ie. the EXPOSED flag must be set). When it
- * expires, the EXPOSED flag is cleared and the connection transitions to
- * the INACTIVE state.
- *
- * The connection is on the rxnet->idle_client_conns list which is kept in
- * order of how soon they'll expire.
- *
* There are flags of relevance to the cache:
*
- * (1) EXPOSED - The connection ID got exposed to the world. If this flag is
- * set, an extra ref is added to the connection preventing it from being
- * reaped when it has no calls outstanding. This flag is cleared and the
- * ref dropped when a conn is discarded from the idle list.
- *
- * This allows us to move terminal call state retransmission to the
- * connection and to discard the call immediately we think it is done
- * with. It also give us a chance to reuse the connection.
- *
* (2) DONT_REUSE - The connection should be discarded as soon as possible and
* should not be reused. This is set when an exclusive connection is used
* or a call ID counter overflows.
@@ -78,7 +30,6 @@
#include "ar-internal.h"
-__read_mostly unsigned int rxrpc_max_client_connections = 1000;
__read_mostly unsigned int rxrpc_reap_client_connections = 900;
__read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
__read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
@@ -89,8 +40,6 @@ __read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
DEFINE_IDR(rxrpc_client_conn_ids);
static DEFINE_SPINLOCK(rxrpc_conn_id_lock);
-static void rxrpc_cull_active_client_conns(struct rxrpc_net *);
-
/*
* Get a connection ID and epoch for a client connection from the global pool.
* The connection struct pointer is then recorded in the idr radix tree. The
@@ -162,13 +111,50 @@ void rxrpc_destroy_client_conn_ids(void)
}
/*
+ * Allocate a connection bundle.
+ */
+static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_conn_parameters *cp,
+ gfp_t gfp)
+{
+ struct rxrpc_bundle *bundle;
+
+ bundle = kzalloc(sizeof(*bundle), gfp);
+ if (bundle) {
+ bundle->params = *cp;
+ rxrpc_get_peer(bundle->params.peer);
+ atomic_set(&bundle->usage, 1);
+ spin_lock_init(&bundle->channel_lock);
+ INIT_LIST_HEAD(&bundle->waiting_calls);
+ }
+ return bundle;
+}
+
+struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *bundle)
+{
+ atomic_inc(&bundle->usage);
+ return bundle;
+}
+
+void rxrpc_put_bundle(struct rxrpc_bundle *bundle)
+{
+ unsigned int d = bundle->debug_id;
+ unsigned int u = atomic_dec_return(&bundle->usage);
+
+ _debug("PUT B=%x %u", d, u);
+ if (u == 0) {
+ rxrpc_put_peer(bundle->params.peer);
+ kfree(bundle);
+ }
+}
+
+/*
* Allocate a client connection.
*/
static struct rxrpc_connection *
-rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp)
+rxrpc_alloc_client_connection(struct rxrpc_bundle *bundle, gfp_t gfp)
{
struct rxrpc_connection *conn;
- struct rxrpc_net *rxnet = cp->local->rxnet;
+ struct rxrpc_net *rxnet = bundle->params.local->rxnet;
int ret;
_enter("");
@@ -180,15 +166,11 @@ rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp)
}
atomic_set(&conn->usage, 1);
- if (cp->exclusive)
- __set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
- if (cp->upgrade)
- __set_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags);
-
- conn->params = *cp;
+ conn->bundle = bundle;
+ conn->params = bundle->params;
conn->out_clientflag = RXRPC_CLIENT_INITIATED;
conn->state = RXRPC_CONN_CLIENT;
- conn->service_id = cp->service_id;
+ conn->service_id = conn->params.service_id;
ret = rxrpc_get_client_connection_id(conn, gfp);
if (ret < 0)
@@ -207,14 +189,16 @@ rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp)
list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
write_unlock(&rxnet->conn_lock);
- /* We steal the caller's peer ref. */
- cp->peer = NULL;
+ rxrpc_get_bundle(bundle);
+ rxrpc_get_peer(conn->params.peer);
rxrpc_get_local(conn->params.local);
key_get(conn->params.key);
trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_client,
atomic_read(&conn->usage),
__builtin_return_address(0));
+
+ atomic_inc(&rxnet->nr_client_conns);
trace_rxrpc_client(conn, -1, rxrpc_client_alloc);
_leave(" = %p", conn);
return conn;
@@ -234,13 +218,18 @@ error_0:
*/
static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
{
- struct rxrpc_net *rxnet = conn->params.local->rxnet;
+ struct rxrpc_net *rxnet;
int id_cursor, id, distance, limit;
+ if (!conn)
+ goto dont_reuse;
+
+ rxnet = conn->params.local->rxnet;
if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags))
goto dont_reuse;
- if (conn->proto.epoch != rxnet->epoch)
+ if (conn->state != RXRPC_CONN_CLIENT ||
+ conn->proto.epoch != rxnet->epoch)
goto mark_dont_reuse;
/* The IDR tree gets very expensive on memory if the connection IDs are
@@ -254,7 +243,7 @@ static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
distance = id - id_cursor;
if (distance < 0)
distance = -distance;
- limit = max(rxrpc_max_client_connections * 4, 1024U);
+ limit = max_t(unsigned long, atomic_read(&rxnet->nr_conns) * 4, 1024);
if (distance > limit)
goto mark_dont_reuse;
@@ -267,277 +256,247 @@ dont_reuse:
}
/*
- * Create or find a client connection to use for a call.
- *
- * If we return with a connection, the call will be on its waiting list. It's
- * left to the caller to assign a channel and wake up the call.
+ * Look up the conn bundle that matches the connection parameters, adding it if
+ * it doesn't yet exist.
*/
-static int rxrpc_get_client_conn(struct rxrpc_sock *rx,
- struct rxrpc_call *call,
- struct rxrpc_conn_parameters *cp,
- struct sockaddr_rxrpc *srx,
- gfp_t gfp)
+static struct rxrpc_bundle *rxrpc_look_up_bundle(struct rxrpc_conn_parameters *cp,
+ gfp_t gfp)
{
- struct rxrpc_connection *conn, *candidate = NULL;
+ static atomic_t rxrpc_bundle_id;
+ struct rxrpc_bundle *bundle, *candidate;
struct rxrpc_local *local = cp->local;
struct rb_node *p, **pp, *parent;
long diff;
- int ret = -ENOMEM;
- _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
+ _enter("{%px,%x,%u,%u}",
+ cp->peer, key_serial(cp->key), cp->security_level, cp->upgrade);
- cp->peer = rxrpc_lookup_peer(rx, cp->local, srx, gfp);
- if (!cp->peer)
- goto error;
+ if (cp->exclusive)
+ return rxrpc_alloc_bundle(cp, gfp);
- call->cong_cwnd = cp->peer->cong_cwnd;
- if (call->cong_cwnd >= call->cong_ssthresh)
- call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
- else
- call->cong_mode = RXRPC_CALL_SLOW_START;
+ /* First, see if the bundle is already there. */
+ _debug("search 1");
+ spin_lock(&local->client_bundles_lock);
+ p = local->client_bundles.rb_node;
+ while (p) {
+ bundle = rb_entry(p, struct rxrpc_bundle, local_node);
- /* If the connection is not meant to be exclusive, search the available
- * connections to see if the connection we want to use already exists.
- */
- if (!cp->exclusive) {
- _debug("search 1");
- spin_lock(&local->client_conns_lock);
- p = local->client_conns.rb_node;
- while (p) {
- conn = rb_entry(p, struct rxrpc_connection, client_node);
-
-#define cmp(X) ((long)conn->params.X - (long)cp->X)
- diff = (cmp(peer) ?:
- cmp(key) ?:
- cmp(security_level) ?:
- cmp(upgrade));
+#define cmp(X) ((long)bundle->params.X - (long)cp->X)
+ diff = (cmp(peer) ?:
+ cmp(key) ?:
+ cmp(security_level) ?:
+ cmp(upgrade));
#undef cmp
- if (diff < 0) {
- p = p->rb_left;
- } else if (diff > 0) {
- p = p->rb_right;
- } else {
- if (rxrpc_may_reuse_conn(conn) &&
- rxrpc_get_connection_maybe(conn))
- goto found_extant_conn;
- /* The connection needs replacing. It's better
- * to effect that when we have something to
- * replace it with so that we don't have to
- * rebalance the tree twice.
- */
- break;
- }
- }
- spin_unlock(&local->client_conns_lock);
- }
-
- /* There wasn't a connection yet or we need an exclusive connection.
- * We need to create a candidate and then potentially redo the search
- * in case we're racing with another thread also trying to connect on a
- * shareable connection.
- */
- _debug("new conn");
- candidate = rxrpc_alloc_client_connection(cp, gfp);
- if (IS_ERR(candidate)) {
- ret = PTR_ERR(candidate);
- goto error_peer;
+ if (diff < 0)
+ p = p->rb_left;
+ else if (diff > 0)
+ p = p->rb_right;
+ else
+ goto found_bundle;
}
+ spin_unlock(&local->client_bundles_lock);
+ _debug("not found");
- /* Add the call to the new connection's waiting list in case we're
- * going to have to wait for the connection to come live. It's our
- * connection, so we want first dibs on the channel slots. We would
- * normally have to take channel_lock but we do this before anyone else
- * can see the connection.
- */
- list_add(&call->chan_wait_link, &candidate->waiting_calls);
-
- if (cp->exclusive) {
- call->conn = candidate;
- call->security = candidate->security;
- call->security_ix = candidate->security_ix;
- call->service_id = candidate->service_id;
- _leave(" = 0 [exclusive %d]", candidate->debug_id);
- return 0;
- }
+ /* It wasn't. We need to add one. */
+ candidate = rxrpc_alloc_bundle(cp, gfp);
+ if (!candidate)
+ return NULL;
- /* Publish the new connection for userspace to find. We need to redo
- * the search before doing this lest we race with someone else adding a
- * conflicting instance.
- */
_debug("search 2");
- spin_lock(&local->client_conns_lock);
-
- pp = &local->client_conns.rb_node;
+ spin_lock(&local->client_bundles_lock);
+ pp = &local->client_bundles.rb_node;
parent = NULL;
while (*pp) {
parent = *pp;
- conn = rb_entry(parent, struct rxrpc_connection, client_node);
+ bundle = rb_entry(parent, struct rxrpc_bundle, local_node);
-#define cmp(X) ((long)conn->params.X - (long)candidate->params.X)
+#define cmp(X) ((long)bundle->params.X - (long)cp->X)
diff = (cmp(peer) ?:
cmp(key) ?:
cmp(security_level) ?:
cmp(upgrade));
#undef cmp
- if (diff < 0) {
+ if (diff < 0)
pp = &(*pp)->rb_left;
- } else if (diff > 0) {
+ else if (diff > 0)
pp = &(*pp)->rb_right;
- } else {
- if (rxrpc_may_reuse_conn(conn) &&
- rxrpc_get_connection_maybe(conn))
- goto found_extant_conn;
- /* The old connection is from an outdated epoch. */
- _debug("replace conn");
- clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags);
- rb_replace_node(&conn->client_node,
- &candidate->client_node,
- &local->client_conns);
- trace_rxrpc_client(conn, -1, rxrpc_client_replace);
- goto candidate_published;
- }
+ else
+ goto found_bundle_free;
}
- _debug("new conn");
- rb_link_node(&candidate->client_node, parent, pp);
- rb_insert_color(&candidate->client_node, &local->client_conns);
-
-candidate_published:
- set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags);
- call->conn = candidate;
- call->security = candidate->security;
- call->security_ix = candidate->security_ix;
- call->service_id = candidate->service_id;
- spin_unlock(&local->client_conns_lock);
- _leave(" = 0 [new %d]", candidate->debug_id);
- return 0;
+ _debug("new bundle");
+ candidate->debug_id = atomic_inc_return(&rxrpc_bundle_id);
+ rb_link_node(&candidate->local_node, parent, pp);
+ rb_insert_color(&candidate->local_node, &local->client_bundles);
+ rxrpc_get_bundle(candidate);
+ spin_unlock(&local->client_bundles_lock);
+ _leave(" = %u [new]", candidate->debug_id);
+ return candidate;
+
+found_bundle_free:
+ kfree(candidate);
+found_bundle:
+ rxrpc_get_bundle(bundle);
+ spin_unlock(&local->client_bundles_lock);
+ _leave(" = %u [found]", bundle->debug_id);
+ return bundle;
+}
- /* We come here if we found a suitable connection already in existence.
- * Discard any candidate we may have allocated, and try to get a
- * channel on this one.
- */
-found_extant_conn:
- _debug("found conn");
- spin_unlock(&local->client_conns_lock);
+/*
+ * Create or find a client bundle to use for a call.
+ *
+ * If we return with a connection, the call will be on its waiting list. It's
+ * left to the caller to assign a channel and wake up the call.
+ */
+static struct rxrpc_bundle *rxrpc_prep_call(struct rxrpc_sock *rx,
+ struct rxrpc_call *call,
+ struct rxrpc_conn_parameters *cp,
+ struct sockaddr_rxrpc *srx,
+ gfp_t gfp)
+{
+ struct rxrpc_bundle *bundle;
- if (candidate) {
- trace_rxrpc_client(candidate, -1, rxrpc_client_duplicate);
- rxrpc_put_connection(candidate);
- candidate = NULL;
- }
+ _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
- spin_lock(&conn->channel_lock);
- call->conn = conn;
- call->security = conn->security;
- call->security_ix = conn->security_ix;
- call->service_id = conn->service_id;
- list_add_tail(&call->chan_wait_link, &conn->waiting_calls);
- spin_unlock(&conn->channel_lock);
- _leave(" = 0 [extant %d]", conn->debug_id);
- return 0;
+ cp->peer = rxrpc_lookup_peer(rx, cp->local, srx, gfp);
+ if (!cp->peer)
+ goto error;
+
+ call->cong_cwnd = cp->peer->cong_cwnd;
+ if (call->cong_cwnd >= call->cong_ssthresh)
+ call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
+ else
+ call->cong_mode = RXRPC_CALL_SLOW_START;
+ if (cp->upgrade)
+ __set_bit(RXRPC_CALL_UPGRADE, &call->flags);
+
+ /* Find the client connection bundle. */
+ bundle = rxrpc_look_up_bundle(cp, gfp);
+ if (!bundle)
+ goto error;
+
+ /* Get this call queued. Someone else may activate it whilst we're
+ * lining up a new connection, but that's fine.
+ */
+ spin_lock(&bundle->channel_lock);
+ list_add_tail(&call->chan_wait_link, &bundle->waiting_calls);
+ spin_unlock(&bundle->channel_lock);
+
+ _leave(" = [B=%x]", bundle->debug_id);
+ return bundle;
-error_peer:
- rxrpc_put_peer(cp->peer);
- cp->peer = NULL;
error:
- _leave(" = %d", ret);
- return ret;
+ _leave(" = -ENOMEM");
+ return ERR_PTR(-ENOMEM);
}
/*
- * Activate a connection.
+ * Allocate a new connection and add it into a bundle.
*/
-static void rxrpc_activate_conn(struct rxrpc_net *rxnet,
- struct rxrpc_connection *conn)
+static void rxrpc_add_conn_to_bundle(struct rxrpc_bundle *bundle, gfp_t gfp)
+ __releases(bundle->channel_lock)
{
- if (test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags)) {
- trace_rxrpc_client(conn, -1, rxrpc_client_to_upgrade);
- conn->cache_state = RXRPC_CONN_CLIENT_UPGRADE;
- } else {
- trace_rxrpc_client(conn, -1, rxrpc_client_to_active);
- conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE;
- }
- rxnet->nr_active_client_conns++;
- list_move_tail(&conn->cache_link, &rxnet->active_client_conns);
-}
+ struct rxrpc_connection *candidate = NULL, *old = NULL;
+ bool conflict;
+ int i;
-/*
- * Attempt to animate a connection for a new call.
- *
- * If it's not exclusive, the connection is in the endpoint tree, and we're in
- * the conn's list of those waiting to grab a channel. There is, however, a
- * limit on the number of live connections allowed at any one time, so we may
- * have to wait for capacity to become available.
- *
- * Note that a connection on the waiting queue might *also* have active
- * channels if it has been culled to make space and then re-requested by a new
- * call.
- */
-static void rxrpc_animate_client_conn(struct rxrpc_net *rxnet,
- struct rxrpc_connection *conn)
-{
- unsigned int nr_conns;
+ _enter("");
- _enter("%d,%d", conn->debug_id, conn->cache_state);
+ conflict = bundle->alloc_conn;
+ if (!conflict)
+ bundle->alloc_conn = true;
+ spin_unlock(&bundle->channel_lock);
+ if (conflict) {
+ _leave(" [conf]");
+ return;
+ }
- if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE ||
- conn->cache_state == RXRPC_CONN_CLIENT_UPGRADE)
- goto out;
+ candidate = rxrpc_alloc_client_connection(bundle, gfp);
- spin_lock(&rxnet->client_conn_cache_lock);
+ spin_lock(&bundle->channel_lock);
+ bundle->alloc_conn = false;
- nr_conns = rxnet->nr_client_conns;
- if (!test_and_set_bit(RXRPC_CONN_COUNTED, &conn->flags)) {
- trace_rxrpc_client(conn, -1, rxrpc_client_count);
- rxnet->nr_client_conns = nr_conns + 1;
+ if (IS_ERR(candidate)) {
+ bundle->alloc_error = PTR_ERR(candidate);
+ spin_unlock(&bundle->channel_lock);
+ _leave(" [err %ld]", PTR_ERR(candidate));
+ return;
}
- switch (conn->cache_state) {
- case RXRPC_CONN_CLIENT_ACTIVE:
- case RXRPC_CONN_CLIENT_UPGRADE:
- case RXRPC_CONN_CLIENT_WAITING:
- break;
-
- case RXRPC_CONN_CLIENT_INACTIVE:
- case RXRPC_CONN_CLIENT_CULLED:
- case RXRPC_CONN_CLIENT_IDLE:
- if (nr_conns >= rxrpc_max_client_connections)
- goto wait_for_capacity;
- goto activate_conn;
+ bundle->alloc_error = 0;
+
+ for (i = 0; i < ARRAY_SIZE(bundle->conns); i++) {
+ unsigned int shift = i * RXRPC_MAXCALLS;
+ int j;
+
+ old = bundle->conns[i];
+ if (!rxrpc_may_reuse_conn(old)) {
+ if (old)
+ trace_rxrpc_client(old, -1, rxrpc_client_replace);
+ candidate->bundle_shift = shift;
+ bundle->conns[i] = candidate;
+ for (j = 0; j < RXRPC_MAXCALLS; j++)
+ set_bit(shift + j, &bundle->avail_chans);
+ candidate = NULL;
+ break;
+ }
- default:
- BUG();
+ old = NULL;
}
-out_unlock:
- spin_unlock(&rxnet->client_conn_cache_lock);
-out:
- _leave(" [%d]", conn->cache_state);
- return;
+ spin_unlock(&bundle->channel_lock);
-activate_conn:
- _debug("activate");
- rxrpc_activate_conn(rxnet, conn);
- goto out_unlock;
-
-wait_for_capacity:
- _debug("wait");
- trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting);
- conn->cache_state = RXRPC_CONN_CLIENT_WAITING;
- list_move_tail(&conn->cache_link, &rxnet->waiting_client_conns);
- goto out_unlock;
+ if (candidate) {
+ _debug("discard C=%x", candidate->debug_id);
+ trace_rxrpc_client(candidate, -1, rxrpc_client_duplicate);
+ rxrpc_put_connection(candidate);
+ }
+
+ rxrpc_put_connection(old);
+ _leave("");
}
/*
- * Deactivate a channel.
+ * Add a connection to a bundle if there are no usable connections or we have
+ * connections waiting for extra capacity.
*/
-static void rxrpc_deactivate_one_channel(struct rxrpc_connection *conn,
- unsigned int channel)
+static void rxrpc_maybe_add_conn(struct rxrpc_bundle *bundle, gfp_t gfp)
{
- struct rxrpc_channel *chan = &conn->channels[channel];
+ struct rxrpc_call *call;
+ int i, usable;
- rcu_assign_pointer(chan->call, NULL);
- conn->active_chans &= ~(1 << channel);
+ _enter("");
+
+ spin_lock(&bundle->channel_lock);
+
+ /* See if there are any usable connections. */
+ usable = 0;
+ for (i = 0; i < ARRAY_SIZE(bundle->conns); i++)
+ if (rxrpc_may_reuse_conn(bundle->conns[i]))
+ usable++;
+
+ if (!usable && !list_empty(&bundle->waiting_calls)) {
+ call = list_first_entry(&bundle->waiting_calls,
+ struct rxrpc_call, chan_wait_link);
+ if (test_bit(RXRPC_CALL_UPGRADE, &call->flags))
+ bundle->try_upgrade = true;
+ }
+
+ if (!usable)
+ goto alloc_conn;
+
+ if (!bundle->avail_chans &&
+ !bundle->try_upgrade &&
+ !list_empty(&bundle->waiting_calls) &&
+ usable < ARRAY_SIZE(bundle->conns))
+ goto alloc_conn;
+
+ spin_unlock(&bundle->channel_lock);
+ _leave("");
+ return;
+
+alloc_conn:
+ return rxrpc_add_conn_to_bundle(bundle, gfp);
}
/*
@@ -549,35 +508,42 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
unsigned int channel)
{
struct rxrpc_channel *chan = &conn->channels[channel];
- struct rxrpc_call *call = list_entry(conn->waiting_calls.next,
+ struct rxrpc_bundle *bundle = conn->bundle;
+ struct rxrpc_call *call = list_entry(bundle->waiting_calls.next,
struct rxrpc_call, chan_wait_link);
u32 call_id = chan->call_counter + 1;
+ _enter("C=%x,%u", conn->debug_id, channel);
+
trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate);
/* Cancel the final ACK on the previous call if it hasn't been sent yet
* as the DATA packet will implicitly ACK it.
*/
clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
-
- write_lock_bh(&call->state_lock);
- call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
- write_unlock_bh(&call->state_lock);
+ clear_bit(conn->bundle_shift + channel, &bundle->avail_chans);
rxrpc_see_call(call);
list_del_init(&call->chan_wait_link);
- conn->active_chans |= 1 << channel;
call->peer = rxrpc_get_peer(conn->params.peer);
+ call->conn = rxrpc_get_connection(conn);
call->cid = conn->proto.cid | channel;
call->call_id = call_id;
+ call->security = conn->security;
+ call->security_ix = conn->security_ix;
+ call->service_id = conn->service_id;
trace_rxrpc_connect_call(call);
_net("CONNECT call %08x:%08x as call %d on conn %d",
call->cid, call->call_id, call->debug_id, conn->debug_id);
- /* Paired with the read barrier in rxrpc_wait_for_channel(). This
- * orders cid and epoch in the connection wrt to call_id without the
- * need to take the channel_lock.
+ write_lock_bh(&call->state_lock);
+ call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
+ write_unlock_bh(&call->state_lock);
+
+ /* Paired with the read barrier in rxrpc_connect_call(). This orders
+ * cid and epoch in the connection wrt to call_id without the need to
+ * take the channel_lock.
*
* We provisionally assign a callNumber at this point, but we don't
* confirm it until the call is about to be exposed.
@@ -586,101 +552,137 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
* at the call ID through a connection channel.
*/
smp_wmb();
- chan->call_id = call_id;
- chan->call_debug_id = call->debug_id;
+
+ chan->call_id = call_id;
+ chan->call_debug_id = call->debug_id;
rcu_assign_pointer(chan->call, call);
wake_up(&call->waitq);
}
/*
+ * Remove a connection from the idle list if it's on it.
+ */
+static void rxrpc_unidle_conn(struct rxrpc_bundle *bundle, struct rxrpc_connection *conn)
+{
+ struct rxrpc_net *rxnet = bundle->params.local->rxnet;
+ bool drop_ref;
+
+ if (!list_empty(&conn->cache_link)) {
+ drop_ref = false;
+ spin_lock(&rxnet->client_conn_cache_lock);
+ if (!list_empty(&conn->cache_link)) {
+ list_del_init(&conn->cache_link);
+ drop_ref = true;
+ }
+ spin_unlock(&rxnet->client_conn_cache_lock);
+ if (drop_ref)
+ rxrpc_put_connection(conn);
+ }
+}
+
+/*
* Assign channels and callNumbers to waiting calls with channel_lock
* held by caller.
*/
-static void rxrpc_activate_channels_locked(struct rxrpc_connection *conn)
+static void rxrpc_activate_channels_locked(struct rxrpc_bundle *bundle)
{
- u8 avail, mask;
-
- switch (conn->cache_state) {
- case RXRPC_CONN_CLIENT_ACTIVE:
- mask = RXRPC_ACTIVE_CHANS_MASK;
- break;
- case RXRPC_CONN_CLIENT_UPGRADE:
- mask = 0x01;
- break;
- default:
- return;
- }
+ struct rxrpc_connection *conn;
+ unsigned long avail, mask;
+ unsigned int channel, slot;
- while (!list_empty(&conn->waiting_calls) &&
- (avail = ~conn->active_chans,
- avail &= mask,
- avail != 0))
- rxrpc_activate_one_channel(conn, __ffs(avail));
+ if (bundle->try_upgrade)
+ mask = 1;
+ else
+ mask = ULONG_MAX;
+
+ while (!list_empty(&bundle->waiting_calls)) {
+ avail = bundle->avail_chans & mask;
+ if (!avail)
+ break;
+ channel = __ffs(avail);
+ clear_bit(channel, &bundle->avail_chans);
+
+ slot = channel / RXRPC_MAXCALLS;
+ conn = bundle->conns[slot];
+ if (!conn)
+ break;
+
+ if (bundle->try_upgrade)
+ set_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags);
+ rxrpc_unidle_conn(bundle, conn);
+
+ channel &= (RXRPC_MAXCALLS - 1);
+ conn->act_chans |= 1 << channel;
+ rxrpc_activate_one_channel(conn, channel);
+ }
}
/*
* Assign channels and callNumbers to waiting calls.
*/
-static void rxrpc_activate_channels(struct rxrpc_connection *conn)
+static void rxrpc_activate_channels(struct rxrpc_bundle *bundle)
{
- _enter("%d", conn->debug_id);
+ _enter("B=%x", bundle->debug_id);
- trace_rxrpc_client(conn, -1, rxrpc_client_activate_chans);
+ trace_rxrpc_client(NULL, -1, rxrpc_client_activate_chans);
- if (conn->active_chans == RXRPC_ACTIVE_CHANS_MASK)
+ if (!bundle->avail_chans)
return;
- spin_lock(&conn->channel_lock);
- rxrpc_activate_channels_locked(conn);
- spin_unlock(&conn->channel_lock);
+ spin_lock(&bundle->channel_lock);
+ rxrpc_activate_channels_locked(bundle);
+ spin_unlock(&bundle->channel_lock);
_leave("");
}
/*
* Wait for a callNumber and a channel to be granted to a call.
*/
-static int rxrpc_wait_for_channel(struct rxrpc_call *call, gfp_t gfp)
+static int rxrpc_wait_for_channel(struct rxrpc_bundle *bundle,
+ struct rxrpc_call *call, gfp_t gfp)
{
+ DECLARE_WAITQUEUE(myself, current);
int ret = 0;
_enter("%d", call->debug_id);
- if (!call->call_id) {
- DECLARE_WAITQUEUE(myself, current);
+ if (!gfpflags_allow_blocking(gfp)) {
+ rxrpc_maybe_add_conn(bundle, gfp);
+ rxrpc_activate_channels(bundle);
+ ret = bundle->alloc_error ?: -EAGAIN;
+ goto out;
+ }
- if (!gfpflags_allow_blocking(gfp)) {
- ret = -EAGAIN;
- goto out;
+ add_wait_queue_exclusive(&call->waitq, &myself);
+ for (;;) {
+ rxrpc_maybe_add_conn(bundle, gfp);
+ rxrpc_activate_channels(bundle);
+ ret = bundle->alloc_error;
+ if (ret < 0)
+ break;
+
+ switch (call->interruptibility) {
+ case RXRPC_INTERRUPTIBLE:
+ case RXRPC_PREINTERRUPTIBLE:
+ set_current_state(TASK_INTERRUPTIBLE);
+ break;
+ case RXRPC_UNINTERRUPTIBLE:
+ default:
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ break;
}
-
- add_wait_queue_exclusive(&call->waitq, &myself);
- for (;;) {
- switch (call->interruptibility) {
- case RXRPC_INTERRUPTIBLE:
- case RXRPC_PREINTERRUPTIBLE:
- set_current_state(TASK_INTERRUPTIBLE);
- break;
- case RXRPC_UNINTERRUPTIBLE:
- default:
- set_current_state(TASK_UNINTERRUPTIBLE);
- break;
- }
- if (call->call_id)
- break;
- if ((call->interruptibility == RXRPC_INTERRUPTIBLE ||
- call->interruptibility == RXRPC_PREINTERRUPTIBLE) &&
- signal_pending(current)) {
- ret = -ERESTARTSYS;
- break;
- }
- schedule();
+ if (READ_ONCE(call->state) != RXRPC_CALL_CLIENT_AWAIT_CONN)
+ break;
+ if ((call->interruptibility == RXRPC_INTERRUPTIBLE ||
+ call->interruptibility == RXRPC_PREINTERRUPTIBLE) &&
+ signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
}
- remove_wait_queue(&call->waitq, &myself);
- __set_current_state(TASK_RUNNING);
+ schedule();
}
-
- /* Paired with the write barrier in rxrpc_activate_one_channel(). */
- smp_rmb();
+ remove_wait_queue(&call->waitq, &myself);
+ __set_current_state(TASK_RUNNING);
out:
_leave(" = %d", ret);
@@ -697,52 +699,50 @@ int rxrpc_connect_call(struct rxrpc_sock *rx,
struct sockaddr_rxrpc *srx,
gfp_t gfp)
{
+ struct rxrpc_bundle *bundle;
struct rxrpc_net *rxnet = cp->local->rxnet;
- int ret;
+ int ret = 0;
_enter("{%d,%lx},", call->debug_id, call->user_call_ID);
rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper);
- rxrpc_cull_active_client_conns(rxnet);
- ret = rxrpc_get_client_conn(rx, call, cp, srx, gfp);
- if (ret < 0)
+ bundle = rxrpc_prep_call(rx, call, cp, srx, gfp);
+ if (IS_ERR(bundle)) {
+ ret = PTR_ERR(bundle);
goto out;
+ }
- rxrpc_animate_client_conn(rxnet, call->conn);
- rxrpc_activate_channels(call->conn);
-
- ret = rxrpc_wait_for_channel(call, gfp);
- if (ret < 0) {
- trace_rxrpc_client(call->conn, ret, rxrpc_client_chan_wait_failed);
- rxrpc_disconnect_client_call(call);
- goto out;
+ if (call->state == RXRPC_CALL_CLIENT_AWAIT_CONN) {
+ ret = rxrpc_wait_for_channel(bundle, call, gfp);
+ if (ret < 0)
+ goto wait_failed;
}
- spin_lock_bh(&call->conn->params.peer->lock);
- hlist_add_head_rcu(&call->error_link,
- &call->conn->params.peer->error_targets);
- spin_unlock_bh(&call->conn->params.peer->lock);
+granted_channel:
+ /* Paired with the write barrier in rxrpc_activate_one_channel(). */
+ smp_rmb();
+out_put_bundle:
+ rxrpc_put_bundle(bundle);
out:
_leave(" = %d", ret);
return ret;
-}
-/*
- * Note that a connection is about to be exposed to the world. Once it is
- * exposed, we maintain an extra ref on it that stops it from being summarily
- * discarded before it's (a) had a chance to deal with retransmission and (b)
- * had a chance at re-use (the per-connection security negotiation is
- * expensive).
- */
-static void rxrpc_expose_client_conn(struct rxrpc_connection *conn,
- unsigned int channel)
-{
- if (!test_and_set_bit(RXRPC_CONN_EXPOSED, &conn->flags)) {
- trace_rxrpc_client(conn, channel, rxrpc_client_exposed);
- rxrpc_get_connection(conn);
+wait_failed:
+ spin_lock(&bundle->channel_lock);
+ list_del_init(&call->chan_wait_link);
+ spin_unlock(&bundle->channel_lock);
+
+ if (call->state != RXRPC_CALL_CLIENT_AWAIT_CONN) {
+ ret = 0;
+ goto granted_channel;
}
+
+ trace_rxrpc_client(call->conn, ret, rxrpc_client_chan_wait_failed);
+ rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 0, ret);
+ rxrpc_disconnect_client_call(bundle, call);
+ goto out_put_bundle;
}
/*
@@ -764,7 +764,7 @@ void rxrpc_expose_client_call(struct rxrpc_call *call)
chan->call_counter++;
if (chan->call_counter >= INT_MAX)
set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
- rxrpc_expose_client_conn(conn, channel);
+ trace_rxrpc_client(conn, channel, rxrpc_client_exposed);
}
}
@@ -773,62 +773,56 @@ void rxrpc_expose_client_call(struct rxrpc_call *call)
*/
static void rxrpc_set_client_reap_timer(struct rxrpc_net *rxnet)
{
- unsigned long now = jiffies;
- unsigned long reap_at = now + rxrpc_conn_idle_client_expiry;
+ if (!rxnet->kill_all_client_conns) {
+ unsigned long now = jiffies;
+ unsigned long reap_at = now + rxrpc_conn_idle_client_expiry;
- if (rxnet->live)
- timer_reduce(&rxnet->client_conn_reap_timer, reap_at);
+ if (rxnet->live)
+ timer_reduce(&rxnet->client_conn_reap_timer, reap_at);
+ }
}
/*
* Disconnect a client call.
*/
-void rxrpc_disconnect_client_call(struct rxrpc_call *call)
+void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call *call)
{
- struct rxrpc_connection *conn = call->conn;
+ struct rxrpc_connection *conn;
struct rxrpc_channel *chan = NULL;
- struct rxrpc_net *rxnet = conn->params.local->rxnet;
- unsigned int channel = -1;
+ struct rxrpc_net *rxnet = bundle->params.local->rxnet;
+ unsigned int channel;
+ bool may_reuse;
u32 cid;
- spin_lock(&conn->channel_lock);
- set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
+ _enter("c=%x", call->debug_id);
- cid = call->cid;
- if (cid) {
- channel = cid & RXRPC_CHANNELMASK;
- chan = &conn->channels[channel];
- }
- trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
+ spin_lock(&bundle->channel_lock);
+ set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
/* Calls that have never actually been assigned a channel can simply be
- * discarded. If the conn didn't get used either, it will follow
- * immediately unless someone else grabs it in the meantime.
+ * discarded.
*/
- if (!list_empty(&call->chan_wait_link)) {
+ conn = call->conn;
+ if (!conn) {
_debug("call is waiting");
ASSERTCMP(call->call_id, ==, 0);
ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags));
list_del_init(&call->chan_wait_link);
-
- trace_rxrpc_client(conn, channel, rxrpc_client_chan_unstarted);
-
- /* We must deactivate or idle the connection if it's now
- * waiting for nothing.
- */
- spin_lock(&rxnet->client_conn_cache_lock);
- if (conn->cache_state == RXRPC_CONN_CLIENT_WAITING &&
- list_empty(&conn->waiting_calls) &&
- !conn->active_chans)
- goto idle_connection;
goto out;
}
+ cid = call->cid;
+ channel = cid & RXRPC_CHANNELMASK;
+ chan = &conn->channels[channel];
+ trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
+
if (rcu_access_pointer(chan->call) != call) {
- spin_unlock(&conn->channel_lock);
+ spin_unlock(&bundle->channel_lock);
BUG();
}
+ may_reuse = rxrpc_may_reuse_conn(conn);
+
/* If a client call was exposed to the world, we save the result for
* retransmission.
*
@@ -841,14 +835,21 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
_debug("exposed %u,%u", call->call_id, call->abort_code);
__rxrpc_disconnect_call(conn, call);
+
+ if (test_and_clear_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags)) {
+ trace_rxrpc_client(conn, channel, rxrpc_client_to_active);
+ bundle->try_upgrade = false;
+ if (may_reuse)
+ rxrpc_activate_channels_locked(bundle);
+ }
+
}
/* See if we can pass the channel directly to another call. */
- if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE &&
- !list_empty(&conn->waiting_calls)) {
+ if (may_reuse && !list_empty(&bundle->waiting_calls)) {
trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass);
rxrpc_activate_one_channel(conn, channel);
- goto out_2;
+ goto out;
}
/* Schedule the final ACK to be transmitted in a short while so that it
@@ -865,128 +866,99 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
rxrpc_reduce_conn_timer(conn, final_ack_at);
}
- /* Things are more complex and we need the cache lock. We might be
- * able to simply idle the conn or it might now be lurking on the wait
- * list. It might even get moved back to the active list whilst we're
- * waiting for the lock.
- */
- spin_lock(&rxnet->client_conn_cache_lock);
-
- switch (conn->cache_state) {
- case RXRPC_CONN_CLIENT_UPGRADE:
- /* Deal with termination of a service upgrade probe. */
- if (test_bit(RXRPC_CONN_EXPOSED, &conn->flags)) {
- clear_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags);
- trace_rxrpc_client(conn, channel, rxrpc_client_to_active);
- conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE;
- rxrpc_activate_channels_locked(conn);
- }
- fallthrough;
- case RXRPC_CONN_CLIENT_ACTIVE:
- if (list_empty(&conn->waiting_calls)) {
- rxrpc_deactivate_one_channel(conn, channel);
- if (!conn->active_chans) {
- rxnet->nr_active_client_conns--;
- goto idle_connection;
- }
- goto out;
- }
-
- trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass);
- rxrpc_activate_one_channel(conn, channel);
- goto out;
+ /* Deactivate the channel. */
+ rcu_assign_pointer(chan->call, NULL);
+ set_bit(conn->bundle_shift + channel, &conn->bundle->avail_chans);
+ conn->act_chans &= ~(1 << channel);
- case RXRPC_CONN_CLIENT_CULLED:
- rxrpc_deactivate_one_channel(conn, channel);
- ASSERT(list_empty(&conn->waiting_calls));
- if (!conn->active_chans)
- goto idle_connection;
- goto out;
+ /* If no channels remain active, then put the connection on the idle
+ * list for a short while. Give it a ref to stop it going away if it
+ * becomes unbundled.
+ */
+ if (!conn->act_chans) {
+ trace_rxrpc_client(conn, channel, rxrpc_client_to_idle);
+ conn->idle_timestamp = jiffies;
- case RXRPC_CONN_CLIENT_WAITING:
- rxrpc_deactivate_one_channel(conn, channel);
- goto out;
+ rxrpc_get_connection(conn);
+ spin_lock(&rxnet->client_conn_cache_lock);
+ list_move_tail(&conn->cache_link, &rxnet->idle_client_conns);
+ spin_unlock(&rxnet->client_conn_cache_lock);
- default:
- BUG();
+ rxrpc_set_client_reap_timer(rxnet);
}
out:
- spin_unlock(&rxnet->client_conn_cache_lock);
-out_2:
- spin_unlock(&conn->channel_lock);
+ spin_unlock(&bundle->channel_lock);
_leave("");
return;
+}
-idle_connection:
- /* As no channels remain active, the connection gets deactivated
- * immediately or moved to the idle list for a short while.
- */
- if (test_bit(RXRPC_CONN_EXPOSED, &conn->flags)) {
- trace_rxrpc_client(conn, channel, rxrpc_client_to_idle);
- conn->idle_timestamp = jiffies;
- conn->cache_state = RXRPC_CONN_CLIENT_IDLE;
- list_move_tail(&conn->cache_link, &rxnet->idle_client_conns);
- if (rxnet->idle_client_conns.next == &conn->cache_link &&
- !rxnet->kill_all_client_conns)
- rxrpc_set_client_reap_timer(rxnet);
- } else {
- trace_rxrpc_client(conn, channel, rxrpc_client_to_inactive);
- conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
- list_del_init(&conn->cache_link);
+/*
+ * Remove a connection from a bundle.
+ */
+static void rxrpc_unbundle_conn(struct rxrpc_connection *conn)
+{
+ struct rxrpc_bundle *bundle = conn->bundle;
+ struct rxrpc_local *local = bundle->params.local;
+ unsigned int bindex;
+ bool need_drop = false, need_put = false;
+ int i;
+
+ _enter("C=%x", conn->debug_id);
+
+ if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK)
+ rxrpc_process_delayed_final_acks(conn, true);
+
+ spin_lock(&bundle->channel_lock);
+ bindex = conn->bundle_shift / RXRPC_MAXCALLS;
+ if (bundle->conns[bindex] == conn) {
+ _debug("clear slot %u", bindex);
+ bundle->conns[bindex] = NULL;
+ for (i = 0; i < RXRPC_MAXCALLS; i++)
+ clear_bit(conn->bundle_shift + i, &bundle->avail_chans);
+ need_drop = true;
+ }
+ spin_unlock(&bundle->channel_lock);
+
+ /* If there are no more connections, remove the bundle */
+ if (!bundle->avail_chans) {
+ _debug("maybe unbundle");
+ spin_lock(&local->client_bundles_lock);
+
+ for (i = 0; i < ARRAY_SIZE(bundle->conns); i++)
+ if (bundle->conns[i])
+ break;
+ if (i == ARRAY_SIZE(bundle->conns) && !bundle->params.exclusive) {
+ _debug("erase bundle");
+ rb_erase(&bundle->local_node, &local->client_bundles);
+ need_put = true;
+ }
+
+ spin_unlock(&local->client_bundles_lock);
+ if (need_put)
+ rxrpc_put_bundle(bundle);
}
- goto out;
+
+ if (need_drop)
+ rxrpc_put_connection(conn);
+ _leave("");
}
/*
* Clean up a dead client connection.
*/
-static struct rxrpc_connection *
-rxrpc_put_one_client_conn(struct rxrpc_connection *conn)
+static void rxrpc_kill_client_conn(struct rxrpc_connection *conn)
{
- struct rxrpc_connection *next = NULL;
struct rxrpc_local *local = conn->params.local;
struct rxrpc_net *rxnet = local->rxnet;
- unsigned int nr_conns;
- trace_rxrpc_client(conn, -1, rxrpc_client_cleanup);
+ _enter("C=%x", conn->debug_id);
- if (test_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags)) {
- spin_lock(&local->client_conns_lock);
- if (test_and_clear_bit(RXRPC_CONN_IN_CLIENT_CONNS,
- &conn->flags))
- rb_erase(&conn->client_node, &local->client_conns);
- spin_unlock(&local->client_conns_lock);
- }
+ trace_rxrpc_client(conn, -1, rxrpc_client_cleanup);
+ atomic_dec(&rxnet->nr_client_conns);
rxrpc_put_client_connection_id(conn);
-
- ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_INACTIVE);
-
- if (test_bit(RXRPC_CONN_COUNTED, &conn->flags)) {
- trace_rxrpc_client(conn, -1, rxrpc_client_uncount);
- spin_lock(&rxnet->client_conn_cache_lock);
- nr_conns = --rxnet->nr_client_conns;
-
- if (nr_conns < rxrpc_max_client_connections &&
- !list_empty(&rxnet->waiting_client_conns)) {
- next = list_entry(rxnet->waiting_client_conns.next,
- struct rxrpc_connection, cache_link);
- rxrpc_get_connection(next);
- rxrpc_activate_conn(rxnet, next);
- }
-
- spin_unlock(&rxnet->client_conn_cache_lock);
- }
-
rxrpc_kill_connection(conn);
- if (next)
- rxrpc_activate_channels(next);
-
- /* We need to get rid of the temporary ref we took upon next, but we
- * can't call rxrpc_put_connection() recursively.
- */
- return next;
}
/*
@@ -998,63 +970,12 @@ void rxrpc_put_client_conn(struct rxrpc_connection *conn)
unsigned int debug_id = conn->debug_id;
int n;
- do {
- n = atomic_dec_return(&conn->usage);
- trace_rxrpc_conn(debug_id, rxrpc_conn_put_client, n, here);
- if (n > 0)
- return;
+ n = atomic_dec_return(&conn->usage);
+ trace_rxrpc_conn(debug_id, rxrpc_conn_put_client, n, here);
+ if (n <= 0) {
ASSERTCMP(n, >=, 0);
-
- conn = rxrpc_put_one_client_conn(conn);
- } while (conn);
-}
-
-/*
- * Kill the longest-active client connections to make room for new ones.
- */
-static void rxrpc_cull_active_client_conns(struct rxrpc_net *rxnet)
-{
- struct rxrpc_connection *conn;
- unsigned int nr_conns = rxnet->nr_client_conns;
- unsigned int nr_active, limit;
-
- _enter("");
-
- ASSERTCMP(nr_conns, >=, 0);
- if (nr_conns < rxrpc_max_client_connections) {
- _leave(" [ok]");
- return;
- }
- limit = rxrpc_reap_client_connections;
-
- spin_lock(&rxnet->client_conn_cache_lock);
- nr_active = rxnet->nr_active_client_conns;
-
- while (nr_active > limit) {
- ASSERT(!list_empty(&rxnet->active_client_conns));
- conn = list_entry(rxnet->active_client_conns.next,
- struct rxrpc_connection, cache_link);
- ASSERTIFCMP(conn->cache_state != RXRPC_CONN_CLIENT_ACTIVE,
- conn->cache_state, ==, RXRPC_CONN_CLIENT_UPGRADE);
-
- if (list_empty(&conn->waiting_calls)) {
- trace_rxrpc_client(conn, -1, rxrpc_client_to_culled);
- conn->cache_state = RXRPC_CONN_CLIENT_CULLED;
- list_del_init(&conn->cache_link);
- } else {
- trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting);
- conn->cache_state = RXRPC_CONN_CLIENT_WAITING;
- list_move_tail(&conn->cache_link,
- &rxnet->waiting_client_conns);
- }
-
- nr_active--;
+ rxrpc_kill_client_conn(conn);
}
-
- rxnet->nr_active_client_conns = nr_active;
- spin_unlock(&rxnet->client_conn_cache_lock);
- ASSERTCMP(nr_active, >=, 0);
- _leave(" [culled]");
}
/*
@@ -1088,7 +1009,7 @@ void rxrpc_discard_expired_client_conns(struct work_struct *work)
/* We keep an estimate of what the number of conns ought to be after
* we've discarded some so that we don't overdo the discarding.
*/
- nr_conns = rxnet->nr_client_conns;
+ nr_conns = atomic_read(&rxnet->nr_client_conns);
next:
spin_lock(&rxnet->client_conn_cache_lock);
@@ -1098,7 +1019,6 @@ next:
conn = list_entry(rxnet->idle_client_conns.next,
struct rxrpc_connection, cache_link);
- ASSERT(test_bit(RXRPC_CONN_EXPOSED, &conn->flags));
if (!rxnet->kill_all_client_conns) {
/* If the number of connections is over the reap limit, we
@@ -1120,18 +1040,13 @@ next:
}
trace_rxrpc_client(conn, -1, rxrpc_client_discard);
- if (!test_and_clear_bit(RXRPC_CONN_EXPOSED, &conn->flags))
- BUG();
- conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
list_del_init(&conn->cache_link);
spin_unlock(&rxnet->client_conn_cache_lock);
- /* When we cleared the EXPOSED flag, we took on responsibility for the
- * reference that that had on the usage count. We deal with that here.
- * If someone re-sets the flag and re-gets the ref, that's fine.
- */
- rxrpc_put_connection(conn);
+ rxrpc_unbundle_conn(conn);
+ rxrpc_put_connection(conn); /* Drop the ->cache_link ref */
+
nr_conns--;
goto next;
@@ -1145,8 +1060,7 @@ not_yet_expired:
*/
_debug("not yet");
if (!rxnet->kill_all_client_conns)
- timer_reduce(&rxnet->client_conn_reap_timer,
- conn_expires_at);
+ timer_reduce(&rxnet->client_conn_reap_timer, conn_expires_at);
out:
spin_unlock(&rxnet->client_conn_cache_lock);
@@ -1181,37 +1095,27 @@ void rxrpc_clean_up_local_conns(struct rxrpc_local *local)
{
struct rxrpc_connection *conn, *tmp;
struct rxrpc_net *rxnet = local->rxnet;
- unsigned int nr_active;
LIST_HEAD(graveyard);
_enter("");
spin_lock(&rxnet->client_conn_cache_lock);
- nr_active = rxnet->nr_active_client_conns;
list_for_each_entry_safe(conn, tmp, &rxnet->idle_client_conns,
cache_link) {
if (conn->params.local == local) {
- ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_IDLE);
-
trace_rxrpc_client(conn, -1, rxrpc_client_discard);
- if (!test_and_clear_bit(RXRPC_CONN_EXPOSED, &conn->flags))
- BUG();
- conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
list_move(&conn->cache_link, &graveyard);
- nr_active--;
}
}
- rxnet->nr_active_client_conns = nr_active;
spin_unlock(&rxnet->client_conn_cache_lock);
- ASSERTCMP(nr_active, >=, 0);
while (!list_empty(&graveyard)) {
conn = list_entry(graveyard.next,
struct rxrpc_connection, cache_link);
list_del_init(&conn->cache_link);
-
+ rxrpc_unbundle_conn(conn);
rxrpc_put_connection(conn);
}
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index 64ace2960ecc..aff184145ffa 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -157,12 +157,12 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn,
_enter("{%d},%x", conn->debug_id, conn->abort_code);
- spin_lock(&conn->channel_lock);
+ spin_lock(&conn->bundle->channel_lock);
for (i = 0; i < RXRPC_MAXCALLS; i++) {
call = rcu_dereference_protected(
conn->channels[i].call,
- lockdep_is_held(&conn->channel_lock));
+ lockdep_is_held(&conn->bundle->channel_lock));
if (call) {
if (compl == RXRPC_CALL_LOCALLY_ABORTED)
trace_rxrpc_abort(call->debug_id,
@@ -179,7 +179,7 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn,
}
}
- spin_unlock(&conn->channel_lock);
+ spin_unlock(&conn->bundle->channel_lock);
_leave("");
}
@@ -210,6 +210,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
conn->error = error;
conn->abort_code = abort_code;
conn->state = RXRPC_CONN_LOCALLY_ABORTED;
+ set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
spin_unlock_bh(&conn->state_lock);
msg.msg_name = &conn->params.peer->srx.transport;
@@ -319,6 +320,7 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
conn->error = -ECONNABORTED;
conn->abort_code = abort_code;
conn->state = RXRPC_CONN_REMOTELY_ABORTED;
+ set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED, sp->hdr.serial);
return -ECONNABORTED;
@@ -339,7 +341,7 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
if (ret < 0)
return ret;
- spin_lock(&conn->channel_lock);
+ spin_lock(&conn->bundle->channel_lock);
spin_lock_bh(&conn->state_lock);
if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING) {
@@ -349,12 +351,12 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
rxrpc_call_is_secure(
rcu_dereference_protected(
conn->channels[loop].call,
- lockdep_is_held(&conn->channel_lock)));
+ lockdep_is_held(&conn->bundle->channel_lock)));
} else {
spin_unlock_bh(&conn->state_lock);
}
- spin_unlock(&conn->channel_lock);
+ spin_unlock(&conn->bundle->channel_lock);
return 0;
default:
@@ -395,7 +397,7 @@ abort:
/*
* Process delayed final ACKs that we haven't subsumed into a subsequent call.
*/
-static void rxrpc_process_delayed_final_acks(struct rxrpc_connection *conn)
+void rxrpc_process_delayed_final_acks(struct rxrpc_connection *conn, bool force)
{
unsigned long j = jiffies, next_j;
unsigned int channel;
@@ -414,7 +416,7 @@ again:
smp_rmb(); /* vs rxrpc_disconnect_client_call */
ack_at = READ_ONCE(chan->final_ack_at);
- if (time_before(j, ack_at)) {
+ if (time_before(j, ack_at) && !force) {
if (time_before(ack_at, next_j)) {
next_j = ack_at;
set = true;
@@ -448,7 +450,7 @@ static void rxrpc_do_process_connection(struct rxrpc_connection *conn)
/* Process delayed ACKs whose time has come. */
if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK)
- rxrpc_process_delayed_final_acks(conn);
+ rxrpc_process_delayed_final_acks(conn, false);
/* go through the conn-level event packets, releasing the ref on this
* connection that each one has when we've finished with it */
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index 8cbe0bf20ed5..3bcbe0665f91 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -41,8 +41,6 @@ struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
if (conn) {
INIT_LIST_HEAD(&conn->cache_link);
- spin_lock_init(&conn->channel_lock);
- INIT_LIST_HEAD(&conn->waiting_calls);
timer_setup(&conn->timer, &rxrpc_connection_timer, 0);
INIT_WORK(&conn->processor, &rxrpc_process_connection);
INIT_LIST_HEAD(&conn->proc_link);
@@ -219,11 +217,11 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
}
if (rxrpc_is_client_call(call))
- return rxrpc_disconnect_client_call(call);
+ return rxrpc_disconnect_client_call(conn->bundle, call);
- spin_lock(&conn->channel_lock);
+ spin_lock(&conn->bundle->channel_lock);
__rxrpc_disconnect_call(conn, call);
- spin_unlock(&conn->channel_lock);
+ spin_unlock(&conn->bundle->channel_lock);
set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
conn->idle_timestamp = jiffies;
@@ -292,12 +290,13 @@ void rxrpc_see_connection(struct rxrpc_connection *conn)
/*
* Get a ref on a connection.
*/
-void rxrpc_get_connection(struct rxrpc_connection *conn)
+struct rxrpc_connection *rxrpc_get_connection(struct rxrpc_connection *conn)
{
const void *here = __builtin_return_address(0);
int n = atomic_inc_return(&conn->usage);
trace_rxrpc_conn(conn->debug_id, rxrpc_conn_got, n, here);
+ return conn;
}
/*
@@ -365,6 +364,7 @@ static void rxrpc_destroy_connection(struct rcu_head *rcu)
conn->security->clear(conn);
key_put(conn->params.key);
key_put(conn->server_key);
+ rxrpc_put_bundle(conn->bundle);
rxrpc_put_peer(conn->params.peer);
if (atomic_dec_and_test(&conn->params.local->rxnet->nr_conns))
diff --git a/net/rxrpc/conn_service.c b/net/rxrpc/conn_service.c
index 21da48e3d2e5..6c847720494f 100644
--- a/net/rxrpc/conn_service.c
+++ b/net/rxrpc/conn_service.c
@@ -8,6 +8,12 @@
#include <linux/slab.h>
#include "ar-internal.h"
+static struct rxrpc_bundle rxrpc_service_dummy_bundle = {
+ .usage = ATOMIC_INIT(1),
+ .debug_id = UINT_MAX,
+ .channel_lock = __SPIN_LOCK_UNLOCKED(&rxrpc_service_dummy_bundle.channel_lock),
+};
+
/*
* Find a service connection under RCU conditions.
*
@@ -127,6 +133,7 @@ struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxn
*/
conn->state = RXRPC_CONN_SERVICE_PREALLOC;
atomic_set(&conn->usage, 2);
+ conn->bundle = rxrpc_get_bundle(&rxrpc_service_dummy_bundle);
atomic_inc(&rxnet->nr_conns);
write_lock(&rxnet->conn_lock);
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index ede058f9cc15..8c2881054266 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -86,8 +86,8 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
init_rwsem(&local->defrag_sem);
skb_queue_head_init(&local->reject_queue);
skb_queue_head_init(&local->event_queue);
- local->client_conns = RB_ROOT;
- spin_lock_init(&local->client_conns_lock);
+ local->client_bundles = RB_ROOT;
+ spin_lock_init(&local->client_bundles_lock);
spin_lock_init(&local->lock);
rwlock_init(&local->services_lock);
local->debug_id = atomic_inc_return(&rxrpc_debug_id);
diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c
index b312aab80fed..25bbc4cc8b13 100644
--- a/net/rxrpc/net_ns.c
+++ b/net/rxrpc/net_ns.c
@@ -62,13 +62,10 @@ static __net_init int rxrpc_init_net(struct net *net)
timer_setup(&rxnet->service_conn_reap_timer,
rxrpc_service_conn_reap_timeout, 0);
- rxnet->nr_client_conns = 0;
- rxnet->nr_active_client_conns = 0;
+ atomic_set(&rxnet->nr_client_conns, 0);
rxnet->kill_all_client_conns = false;
spin_lock_init(&rxnet->client_conn_cache_lock);
spin_lock_init(&rxnet->client_conn_discard_lock);
- INIT_LIST_HEAD(&rxnet->waiting_client_conns);
- INIT_LIST_HEAD(&rxnet->active_client_conns);
INIT_LIST_HEAD(&rxnet->idle_client_conns);
INIT_WORK(&rxnet->client_conn_reaper,
rxrpc_discard_expired_client_conns);
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index 3cfff7922ba8..10f2bf2e9068 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -357,6 +357,12 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
_enter(",{%d}", skb->len);
+ if (hlist_unhashed(&call->error_link)) {
+ spin_lock_bh(&call->peer->lock);
+ hlist_add_head_rcu(&call->error_link, &call->peer->error_targets);
+ spin_unlock_bh(&call->peer->lock);
+ }
+
/* Each transmission of a Tx packet needs a new serial number */
serial = atomic_inc_return(&conn->serial);
diff --git a/net/rxrpc/proc.c b/net/rxrpc/proc.c
index 543afd9bd664..e2f990754f88 100644
--- a/net/rxrpc/proc.c
+++ b/net/rxrpc/proc.c
@@ -165,7 +165,7 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
"Proto Local "
" Remote "
" SvID ConnID End Use State Key "
- " Serial ISerial\n"
+ " Serial ISerial CallId0 CallId1 CallId2 CallId3\n"
);
return 0;
}
diff --git a/net/rxrpc/rtt.c b/net/rxrpc/rtt.c
index 1221b0637a7e..4e565eeab426 100644
--- a/net/rxrpc/rtt.c
+++ b/net/rxrpc/rtt.c
@@ -14,7 +14,6 @@
#define RXRPC_RTO_MAX ((unsigned)(120 * HZ))
#define RXRPC_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */
#define rxrpc_jiffies32 ((u32)jiffies) /* As rxrpc_jiffies32 */
-#define rxrpc_min_rtt_wlen 300 /* As sysctl_tcp_min_rtt_wlen */
static u32 rxrpc_rto_min_us(struct rxrpc_peer *peer)
{
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index e08130e5746b..f114dc2af5cf 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -1169,7 +1169,7 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
if (response->encrypted.checksum != csum)
goto protocol_error_free;
- spin_lock(&conn->channel_lock);
+ spin_lock(&conn->bundle->channel_lock);
for (i = 0; i < RXRPC_MAXCALLS; i++) {
struct rxrpc_call *call;
u32 call_id = ntohl(response->encrypted.call_id[i]);
@@ -1186,13 +1186,13 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
if (call_id > conn->channels[i].call_counter) {
call = rcu_dereference_protected(
conn->channels[i].call,
- lockdep_is_held(&conn->channel_lock));
+ lockdep_is_held(&conn->bundle->channel_lock));
if (call && call->state < RXRPC_CALL_COMPLETE)
goto protocol_error_unlock;
conn->channels[i].call_counter = call_id;
}
}
- spin_unlock(&conn->channel_lock);
+ spin_unlock(&conn->bundle->channel_lock);
eproto = tracepoint_string("rxkad_rsp_seq");
abort_code = RXKADOUTOFSEQUENCE;
@@ -1219,7 +1219,7 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
return 0;
protocol_error_unlock:
- spin_unlock(&conn->channel_lock);
+ spin_unlock(&conn->bundle->channel_lock);
protocol_error_free:
kfree(ticket);
protocol_error:
diff --git a/net/rxrpc/sysctl.c b/net/rxrpc/sysctl.c
index e91acc95ff28..540351d6a5f4 100644
--- a/net/rxrpc/sysctl.c
+++ b/net/rxrpc/sysctl.c
@@ -74,21 +74,13 @@ static struct ctl_table rxrpc_sysctl_table[] = {
/* Non-time values */
{
- .procname = "max_client_conns",
- .data = &rxrpc_max_client_connections,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = (void *)&rxrpc_reap_client_connections,
- },
- {
.procname = "reap_client_conns",
.data = &rxrpc_reap_client_connections,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = (void *)SYSCTL_ONE,
- .extra2 = (void *)&rxrpc_max_client_connections,
+ .extra2 = (void *)&n_65535,
},
{
.procname = "max_backlog",
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 798430e1a79f..f66417d5d2c3 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -982,7 +982,7 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
#endif
NL_SET_ERR_MSG(extack, "Failed to load TC action module");
err = -ENOENT;
- goto err_out;
+ goto err_free;
}
/* backward compatibility for policer */
@@ -1012,11 +1012,12 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
err_mod:
module_put(a_o->owner);
-err_out:
+err_free:
if (cookie) {
kfree(cookie->data);
kfree(cookie);
}
+err_out:
return ERR_PTR(err);
}
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index a780afdf570d..9c79fb92c2da 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -1039,7 +1039,7 @@ drop:
static const struct nla_policy ct_policy[TCA_CT_MAX + 1] = {
[TCA_CT_ACTION] = { .type = NLA_U16 },
- [TCA_CT_PARMS] = { .type = NLA_EXACT_LEN, .len = sizeof(struct tc_ct) },
+ [TCA_CT_PARMS] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_ct)),
[TCA_CT_ZONE] = { .type = NLA_U16 },
[TCA_CT_MARK] = { .type = NLA_U32 },
[TCA_CT_MARK_MASK] = { .type = NLA_U32 },
@@ -1049,10 +1049,8 @@ static const struct nla_policy ct_policy[TCA_CT_MAX + 1] = {
.len = 128 / BITS_PER_BYTE },
[TCA_CT_NAT_IPV4_MIN] = { .type = NLA_U32 },
[TCA_CT_NAT_IPV4_MAX] = { .type = NLA_U32 },
- [TCA_CT_NAT_IPV6_MIN] = { .type = NLA_EXACT_LEN,
- .len = sizeof(struct in6_addr) },
- [TCA_CT_NAT_IPV6_MAX] = { .type = NLA_EXACT_LEN,
- .len = sizeof(struct in6_addr) },
+ [TCA_CT_NAT_IPV6_MIN] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
+ [TCA_CT_NAT_IPV6_MAX] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
[TCA_CT_NAT_PORT_MIN] = { .type = NLA_U16 },
[TCA_CT_NAT_PORT_MAX] = { .type = NLA_U16 },
};
diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c
index 6084300e51ad..b20c8ce59905 100644
--- a/net/sched/act_ctinfo.c
+++ b/net/sched/act_ctinfo.c
@@ -144,9 +144,8 @@ out:
}
static const struct nla_policy ctinfo_policy[TCA_CTINFO_MAX + 1] = {
- [TCA_CTINFO_ACT] = { .type = NLA_EXACT_LEN,
- .len = sizeof(struct
- tc_ctinfo) },
+ [TCA_CTINFO_ACT] =
+ NLA_POLICY_EXACT_LEN(sizeof(struct tc_ctinfo)),
[TCA_CTINFO_ZONE] = { .type = NLA_U16 },
[TCA_CTINFO_PARMS_DSCP_MASK] = { .type = NLA_U32 },
[TCA_CTINFO_PARMS_DSCP_STATEMASK] = { .type = NLA_U32 },
diff --git a/net/sched/act_gate.c b/net/sched/act_gate.c
index 7c0771dd77a3..a78cb7965718 100644
--- a/net/sched/act_gate.c
+++ b/net/sched/act_gate.c
@@ -159,8 +159,8 @@ static const struct nla_policy entry_policy[TCA_GATE_ENTRY_MAX + 1] = {
};
static const struct nla_policy gate_policy[TCA_GATE_MAX + 1] = {
- [TCA_GATE_PARMS] = { .len = sizeof(struct tc_gate),
- .type = NLA_EXACT_LEN },
+ [TCA_GATE_PARMS] =
+ NLA_POLICY_EXACT_LEN(sizeof(struct tc_gate)),
[TCA_GATE_PRIORITY] = { .type = NLA_S32 },
[TCA_GATE_ENTRY_LIST] = { .type = NLA_NESTED },
[TCA_GATE_BASE_TIME] = { .type = NLA_U64 },
diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c
index e298ec3b3c9e..f40bf9771cb9 100644
--- a/net/sched/act_mpls.c
+++ b/net/sched/act_mpls.c
@@ -87,6 +87,23 @@ static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a,
skb->dev && skb->dev->type == ARPHRD_ETHER))
goto drop;
break;
+ case TCA_MPLS_ACT_MAC_PUSH:
+ if (skb_vlan_tag_present(skb)) {
+ if (__vlan_insert_inner_tag(skb, skb->vlan_proto,
+ skb_vlan_tag_get(skb),
+ ETH_HLEN) < 0)
+ goto drop;
+
+ skb->protocol = skb->vlan_proto;
+ __vlan_hwaccel_clear_tag(skb);
+ }
+
+ new_lse = tcf_mpls_get_lse(NULL, p, mac_len ||
+ !eth_p_mpls(skb->protocol));
+
+ if (skb_mpls_push(skb, new_lse, p->tcfm_proto, 0, false))
+ goto drop;
+ break;
case TCA_MPLS_ACT_MODIFY:
new_lse = tcf_mpls_get_lse(mpls_hdr(skb), p, false);
if (skb_mpls_update_lse(skb, new_lse))
@@ -188,6 +205,7 @@ static int tcf_mpls_init(struct net *net, struct nlattr *nla,
}
break;
case TCA_MPLS_ACT_PUSH:
+ case TCA_MPLS_ACT_MAC_PUSH:
if (!tb[TCA_MPLS_LABEL]) {
NL_SET_ERR_MSG_MOD(extack, "Label is required for MPLS push");
return -EINVAL;
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index 163b0385fd4c..1cac3c6fbb49 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -77,6 +77,16 @@ static int tcf_vlan_act(struct sk_buff *skb, const struct tc_action *a,
/* put updated tci as hwaccel tag */
__vlan_hwaccel_put_tag(skb, p->tcfv_push_proto, tci);
break;
+ case TCA_VLAN_ACT_POP_ETH:
+ err = skb_eth_pop(skb);
+ if (err)
+ goto drop;
+ break;
+ case TCA_VLAN_ACT_PUSH_ETH:
+ err = skb_eth_push(skb, p->tcfv_push_dst, p->tcfv_push_src);
+ if (err)
+ goto drop;
+ break;
default:
BUG();
}
@@ -93,10 +103,13 @@ drop:
}
static const struct nla_policy vlan_policy[TCA_VLAN_MAX + 1] = {
+ [TCA_VLAN_UNSPEC] = { .strict_start_type = TCA_VLAN_PUSH_ETH_DST },
[TCA_VLAN_PARMS] = { .len = sizeof(struct tc_vlan) },
[TCA_VLAN_PUSH_VLAN_ID] = { .type = NLA_U16 },
[TCA_VLAN_PUSH_VLAN_PROTOCOL] = { .type = NLA_U16 },
[TCA_VLAN_PUSH_VLAN_PRIORITY] = { .type = NLA_U8 },
+ [TCA_VLAN_PUSH_ETH_DST] = NLA_POLICY_ETH_ADDR,
+ [TCA_VLAN_PUSH_ETH_SRC] = NLA_POLICY_ETH_ADDR,
};
static int tcf_vlan_init(struct net *net, struct nlattr *nla,
@@ -179,6 +192,17 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
if (tb[TCA_VLAN_PUSH_VLAN_PRIORITY])
push_prio = nla_get_u8(tb[TCA_VLAN_PUSH_VLAN_PRIORITY]);
break;
+ case TCA_VLAN_ACT_POP_ETH:
+ break;
+ case TCA_VLAN_ACT_PUSH_ETH:
+ if (!tb[TCA_VLAN_PUSH_ETH_DST] || !tb[TCA_VLAN_PUSH_ETH_SRC]) {
+ if (exists)
+ tcf_idr_release(*a, bind);
+ else
+ tcf_idr_cleanup(tn, index);
+ return -EINVAL;
+ }
+ break;
default:
if (exists)
tcf_idr_release(*a, bind);
@@ -219,6 +243,13 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
p->tcfv_push_prio = push_prio;
p->tcfv_push_proto = push_proto;
+ if (action == TCA_VLAN_ACT_PUSH_ETH) {
+ nla_memcpy(&p->tcfv_push_dst, tb[TCA_VLAN_PUSH_ETH_DST],
+ ETH_ALEN);
+ nla_memcpy(&p->tcfv_push_src, tb[TCA_VLAN_PUSH_ETH_SRC],
+ ETH_ALEN);
+ }
+
spin_lock_bh(&v->tcf_lock);
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
p = rcu_replace_pointer(v->vlan_p, p, lockdep_is_held(&v->tcf_lock));
@@ -277,6 +308,15 @@ static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a,
p->tcfv_push_prio))))
goto nla_put_failure;
+ if (p->tcfv_action == TCA_VLAN_ACT_PUSH_ETH) {
+ if (nla_put(skb, TCA_VLAN_PUSH_ETH_DST, ETH_ALEN,
+ p->tcfv_push_dst))
+ goto nla_put_failure;
+ if (nla_put(skb, TCA_VLAN_PUSH_ETH_SRC, ETH_ALEN,
+ p->tcfv_push_src))
+ goto nla_put_failure;
+ }
+
tcf_tm_dump(&t, &v->tcf_tm);
if (nla_put_64bit(skb, TCA_VLAN_TM, sizeof(t), &t, TCA_VLAN_PAD))
goto nla_put_failure;
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 7b69ab1993ba..54209a18d7fe 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -79,7 +79,7 @@ struct tc_u_hnode {
/* The 'ht' field MUST be the last field in structure to allow for
* more entries allocated at end of structure.
*/
- struct tc_u_knode __rcu *ht[1];
+ struct tc_u_knode __rcu *ht[];
};
struct tc_u_common {
@@ -353,7 +353,7 @@ static int u32_init(struct tcf_proto *tp)
void *key = tc_u_common_ptr(tp);
struct tc_u_common *tp_c = tc_u_common_find(key);
- root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL);
+ root_ht = kzalloc(struct_size(root_ht, ht, 1), GFP_KERNEL);
if (root_ht == NULL)
return -ENOBUFS;
@@ -364,7 +364,7 @@ static int u32_init(struct tcf_proto *tp)
idr_init(&root_ht->handle_idr);
if (tp_c == NULL) {
- tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
+ tp_c = kzalloc(struct_size(tp_c, hlist->ht, 1), GFP_KERNEL);
if (tp_c == NULL) {
kfree(root_ht);
return -ENOBUFS;
@@ -933,7 +933,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
NL_SET_ERR_MSG_MOD(extack, "Divisor can only be used on a hash table");
return -EINVAL;
}
- ht = kzalloc(sizeof(*ht) + divisor*sizeof(void *), GFP_KERNEL);
+ ht = kzalloc(struct_size(ht, ht, divisor + 1), GFP_KERNEL);
if (ht == NULL)
return -ENOBUFS;
if (handle == 0) {
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 54c417244642..49eae93d1489 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -802,9 +802,8 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
const struct Qdisc_ops *ops,
struct netlink_ext_ack *extack)
{
- void *p;
struct Qdisc *sch;
- unsigned int size = QDISC_ALIGN(sizeof(*sch)) + ops->priv_size;
+ unsigned int size = sizeof(*sch) + ops->priv_size;
int err = -ENOBUFS;
struct net_device *dev;
@@ -815,22 +814,10 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
}
dev = dev_queue->dev;
- p = kzalloc_node(size, GFP_KERNEL,
- netdev_queue_numa_node_read(dev_queue));
+ sch = kzalloc_node(size, GFP_KERNEL, netdev_queue_numa_node_read(dev_queue));
- if (!p)
+ if (!sch)
goto errout;
- sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
- /* if we got non aligned memory, ask more and do alignment ourself */
- if (sch != p) {
- kfree(p);
- p = kzalloc_node(size + QDISC_ALIGNTO - 1, GFP_KERNEL,
- netdev_queue_numa_node_read(dev_queue));
- if (!p)
- goto errout;
- sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
- sch->padded = (char *) sch - (char *) p;
- }
__skb_queue_head_init(&sch->gso_skb);
__skb_queue_head_init(&sch->skb_bad_txq);
qdisc_skb_head_init(&sch->q);
@@ -873,7 +860,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
return sch;
errout1:
- kfree(p);
+ kfree(sch);
errout:
return ERR_PTR(err);
}
@@ -941,7 +928,7 @@ void qdisc_free(struct Qdisc *qdisc)
free_percpu(qdisc->cpu_qstats);
}
- kfree((char *) qdisc - qdisc->padded);
+ kfree(qdisc);
}
static void qdisc_free_cb(struct rcu_head *head)
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 8d735461fa19..fdb69d46276d 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1351,7 +1351,7 @@ static void sctp_select_active_and_retran_path(struct sctp_association *asoc)
}
/* We did not find anything useful for a possible retransmission
- * path; either primary path that we found is the the same as
+ * path; either primary path that we found is the same as
* the current one, or we didn't generally find an active one.
*/
if (trans_sec == NULL)
@@ -1537,7 +1537,7 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
/* If we've reached or overflowed our receive buffer, announce
* a 0 rwnd if rwnd would still be positive. Store the
- * the potential pressure overflow so that the window can be restored
+ * potential pressure overflow so that the window can be restored
* back to original value.
*/
if (rx_count >= asoc->base.sk->sk_rcvbuf)
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 7e59d8a18f3e..6f8319b828b0 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -445,7 +445,7 @@ struct sctp_shared_key *sctp_auth_get_shkey(
}
/*
- * Initialize all the possible digest transforms that we can use. Right now
+ * Initialize all the possible digest transforms that we can use. Right
* now, the supported digests are SHA1 and SHA256. We do this here once
* because of the restrictiong that transforms may only be allocated in
* user context. This forces us to pre-allocated all possible transforms
@@ -811,7 +811,7 @@ int sctp_auth_ep_set_hmacs(struct sctp_endpoint *ep,
}
/* Set a new shared key on either endpoint or association. If the
- * the key with a same ID already exists, replace the key (remove the
+ * key with a same ID already exists, replace the key (remove the
* old key and add a new one).
*/
int sctp_auth_set_key(struct sctp_endpoint *ep,
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index 701c5a4e441d..53e5ed79f63f 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -505,7 +505,7 @@ int sctp_in_scope(struct net *net, const union sctp_addr *addr,
return 0;
/*
* For INIT and INIT-ACK address list, let L be the level of
- * of requested destination address, sender and receiver
+ * requested destination address, sender and receiver
* SHOULD include all of its addresses with level greater
* than or equal to L.
*
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index ab6a997e222f..fd4f8243cc35 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -179,7 +179,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
__func__, asoc, max_data);
}
- /* If the the peer requested that we authenticate DATA chunks
+ /* If the peer requested that we authenticate DATA chunks
* we need to account for bundling of the AUTH chunks along with
* DATA.
*/
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index d19db22262fd..25833238fe93 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -372,7 +372,7 @@ static int sctp_v4_available(union sctp_addr *addr, struct sctp_sock *sp)
* Level 3 - private addresses.
* Level 4 - global addresses
* For INIT and INIT-ACK address list, let L be the level of
- * of requested destination address, sender and receiver
+ * requested destination address, sender and receiver
* SHOULD include all of its addresses with level greater
* than or equal to L.
*
@@ -1483,10 +1483,10 @@ static __init int sctp_init(void)
num_entries = (1UL << order) * PAGE_SIZE /
sizeof(struct sctp_bind_hashbucket);
- /* And finish by rounding it down to the nearest power of two
- * this wastes some memory of course, but its needed because
+ /* And finish by rounding it down to the nearest power of two.
+ * This wastes some memory of course, but it's needed because
* the hash function operates based on the assumption that
- * that the number of entries is a power of two
+ * the number of entries is a power of two.
*/
sctp_port_hashsize = rounddown_pow_of_two(num_entries);
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index c11c24524652..9a56ae2f3651 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1235,7 +1235,7 @@ nodata:
/* Create an Operation Error chunk of a fixed size, specifically,
* min(asoc->pathmtu, SCTP_DEFAULT_MAXSEGMENT) - overheads.
- * This is a helper function to allocate an error chunk for for those
+ * This is a helper function to allocate an error chunk for those
* invalid parameter codes in which we may not want to report all the
* errors, if the incoming chunk is large. If it can't fit in a single
* packet, we ignore it.
@@ -1780,7 +1780,7 @@ no_hmac:
* for init collision case of lost COOKIE ACK.
* If skb has been timestamped, then use the stamp, otherwise
* use current time. This introduces a small possibility that
- * that a cookie may be considered expired, but his would only slow
+ * a cookie may be considered expired, but this would only slow
* down the new association establishment instead of every packet.
*/
if (sock_flag(ep->base.sk, SOCK_TIMESTAMP))
@@ -2319,7 +2319,7 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
/* This implementation defaults to making the first transport
* added as the primary transport. The source address seems to
- * be a a better choice than any of the embedded addresses.
+ * be a better choice than any of the embedded addresses.
*/
if (!sctp_assoc_add_peer(asoc, peer_addr, gfp, SCTP_ACTIVE))
goto nomem;
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 1c6c640607c5..407fed46931b 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -740,7 +740,7 @@ static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
/* Helper function to gather skbs that have possibly become
- * ordered by an an incoming chunk.
+ * ordered by an incoming chunk.
*/
static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
struct sctp_ulpevent *event)
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index e7649bbc2b87..82be0bd0f6e8 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -26,6 +26,7 @@
#include <linux/sched/signal.h>
#include <linux/if_vlan.h>
#include <linux/rcupdate_wait.h>
+#include <linux/ctype.h>
#include <net/sock.h>
#include <net/tcp.h>
@@ -55,6 +56,9 @@ static DEFINE_MUTEX(smc_client_lgr_pending); /* serialize link group
* creation on client
*/
+struct workqueue_struct *smc_hs_wq; /* wq for handshake work */
+struct workqueue_struct *smc_close_wq; /* wq for close work */
+
static void smc_tcp_listen_work(struct work_struct *);
static void smc_connect_work(struct work_struct *);
@@ -436,26 +440,52 @@ static int smcr_clnt_conf_first_link(struct smc_sock *smc)
static void smcr_conn_save_peer_info(struct smc_sock *smc,
struct smc_clc_msg_accept_confirm *clc)
{
- int bufsize = smc_uncompress_bufsize(clc->rmbe_size);
+ int bufsize = smc_uncompress_bufsize(clc->r0.rmbe_size);
- smc->conn.peer_rmbe_idx = clc->rmbe_idx;
- smc->conn.local_tx_ctrl.token = ntohl(clc->rmbe_alert_token);
+ smc->conn.peer_rmbe_idx = clc->r0.rmbe_idx;
+ smc->conn.local_tx_ctrl.token = ntohl(clc->r0.rmbe_alert_token);
smc->conn.peer_rmbe_size = bufsize;
atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
smc->conn.tx_off = bufsize * (smc->conn.peer_rmbe_idx - 1);
}
+static bool smc_isascii(char *hostname)
+{
+ int i;
+
+ for (i = 0; i < SMC_MAX_HOSTNAME_LEN; i++)
+ if (!isascii(hostname[i]))
+ return false;
+ return true;
+}
+
static void smcd_conn_save_peer_info(struct smc_sock *smc,
struct smc_clc_msg_accept_confirm *clc)
{
- int bufsize = smc_uncompress_bufsize(clc->dmbe_size);
+ int bufsize = smc_uncompress_bufsize(clc->d0.dmbe_size);
- smc->conn.peer_rmbe_idx = clc->dmbe_idx;
- smc->conn.peer_token = clc->token;
+ smc->conn.peer_rmbe_idx = clc->d0.dmbe_idx;
+ smc->conn.peer_token = clc->d0.token;
/* msg header takes up space in the buffer */
smc->conn.peer_rmbe_size = bufsize - sizeof(struct smcd_cdc_msg);
atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
smc->conn.tx_off = bufsize * smc->conn.peer_rmbe_idx;
+ if (clc->hdr.version > SMC_V1 &&
+ (clc->hdr.typev2 & SMC_FIRST_CONTACT_MASK)) {
+ struct smc_clc_msg_accept_confirm_v2 *clc_v2 =
+ (struct smc_clc_msg_accept_confirm_v2 *)clc;
+ struct smc_clc_first_contact_ext *fce =
+ (struct smc_clc_first_contact_ext *)
+ (((u8 *)clc_v2) + sizeof(*clc_v2));
+
+ memcpy(smc->conn.lgr->negotiated_eid, clc_v2->eid,
+ SMC_MAX_EID_LEN);
+ smc->conn.lgr->peer_os = fce->os_type;
+ smc->conn.lgr->peer_smc_release = fce->release;
+ if (smc_isascii(fce->hostname))
+ memcpy(smc->conn.lgr->peer_hostname, fce->hostname,
+ SMC_MAX_HOSTNAME_LEN);
+ }
}
static void smc_conn_save_peer_info(struct smc_sock *smc,
@@ -470,11 +500,11 @@ static void smc_conn_save_peer_info(struct smc_sock *smc,
static void smc_link_save_peer_info(struct smc_link *link,
struct smc_clc_msg_accept_confirm *clc)
{
- link->peer_qpn = ntoh24(clc->qpn);
- memcpy(link->peer_gid, clc->lcl.gid, SMC_GID_SIZE);
- memcpy(link->peer_mac, clc->lcl.mac, sizeof(link->peer_mac));
- link->peer_psn = ntoh24(clc->psn);
- link->peer_mtu = clc->qp_mtu;
+ link->peer_qpn = ntoh24(clc->r0.qpn);
+ memcpy(link->peer_gid, clc->r0.lcl.gid, SMC_GID_SIZE);
+ memcpy(link->peer_mac, clc->r0.lcl.mac, sizeof(link->peer_mac));
+ link->peer_psn = ntoh24(clc->r0.psn);
+ link->peer_mtu = clc->r0.qp_mtu;
}
static void smc_switch_to_fallback(struct smc_sock *smc)
@@ -501,7 +531,8 @@ static int smc_connect_fallback(struct smc_sock *smc, int reason_code)
}
/* decline and fall back during connect */
-static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code)
+static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code,
+ u8 version)
{
int rc;
@@ -511,7 +542,7 @@ static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code)
return reason_code;
}
if (reason_code != SMC_CLC_DECL_PEERDECL) {
- rc = smc_clc_send_decline(smc, reason_code);
+ rc = smc_clc_send_decline(smc, reason_code, version);
if (rc < 0) {
if (smc->sk.sk_state == SMC_INIT)
sock_put(&smc->sk); /* passive closing */
@@ -522,23 +553,12 @@ static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code)
}
/* abort connecting */
-static int smc_connect_abort(struct smc_sock *smc, int reason_code,
- int local_contact)
+static void smc_connect_abort(struct smc_sock *smc, int local_first)
{
- bool is_smcd = smc->conn.lgr->is_smcd;
-
- if (local_contact == SMC_FIRST_CONTACT)
+ if (local_first)
smc_lgr_cleanup_early(&smc->conn);
else
smc_conn_free(&smc->conn);
- if (is_smcd)
- /* there is only one lgr role for SMC-D; use server lock */
- mutex_unlock(&smc_server_lgr_pending);
- else
- mutex_unlock(&smc_client_lgr_pending);
-
- smc->connect_nonblock = 0;
- return reason_code;
}
/* check if there is a rdma device available for this connection. */
@@ -561,47 +581,137 @@ static int smc_find_ism_device(struct smc_sock *smc, struct smc_init_info *ini)
{
/* Find ISM device with same PNETID as connecting interface */
smc_pnet_find_ism_resource(smc->clcsock->sk, ini);
- if (!ini->ism_dev)
+ if (!ini->ism_dev[0])
return SMC_CLC_DECL_NOSMCDDEV;
+ else
+ ini->ism_chid[0] = smc_ism_get_chid(ini->ism_dev[0]);
return 0;
}
+/* is chid unique for the ism devices that are already determined? */
+static bool smc_find_ism_v2_is_unique_chid(u16 chid, struct smc_init_info *ini,
+ int cnt)
+{
+ int i = (!ini->ism_dev[0]) ? 1 : 0;
+
+ for (; i < cnt; i++)
+ if (ini->ism_chid[i] == chid)
+ return false;
+ return true;
+}
+
+/* determine possible V2 ISM devices (either without PNETID or with PNETID plus
+ * PNETID matching net_device)
+ */
+static int smc_find_ism_v2_device_clnt(struct smc_sock *smc,
+ struct smc_init_info *ini)
+{
+ int rc = SMC_CLC_DECL_NOSMCDDEV;
+ struct smcd_dev *smcd;
+ int i = 1;
+ u16 chid;
+
+ if (smcd_indicated(ini->smc_type_v1))
+ rc = 0; /* already initialized for V1 */
+ mutex_lock(&smcd_dev_list.mutex);
+ list_for_each_entry(smcd, &smcd_dev_list.list, list) {
+ if (smcd->going_away || smcd == ini->ism_dev[0])
+ continue;
+ chid = smc_ism_get_chid(smcd);
+ if (!smc_find_ism_v2_is_unique_chid(chid, ini, i))
+ continue;
+ if (!smc_pnet_is_pnetid_set(smcd->pnetid) ||
+ smc_pnet_is_ndev_pnetid(sock_net(&smc->sk), smcd->pnetid)) {
+ ini->ism_dev[i] = smcd;
+ ini->ism_chid[i] = chid;
+ ini->is_smcd = true;
+ rc = 0;
+ i++;
+ if (i > SMC_MAX_ISM_DEVS)
+ break;
+ }
+ }
+ mutex_unlock(&smcd_dev_list.mutex);
+ ini->ism_offered_cnt = i - 1;
+ if (!ini->ism_dev[0] && !ini->ism_dev[1])
+ ini->smcd_version = 0;
+
+ return rc;
+}
+
/* Check for VLAN ID and register it on ISM device just for CLC handshake */
static int smc_connect_ism_vlan_setup(struct smc_sock *smc,
struct smc_init_info *ini)
{
- if (ini->vlan_id && smc_ism_get_vlan(ini->ism_dev, ini->vlan_id))
+ if (ini->vlan_id && smc_ism_get_vlan(ini->ism_dev[0], ini->vlan_id))
return SMC_CLC_DECL_ISMVLANERR;
return 0;
}
+static int smc_find_proposal_devices(struct smc_sock *smc,
+ struct smc_init_info *ini)
+{
+ int rc = 0;
+
+ /* check if there is an ism device available */
+ if (ini->smcd_version & SMC_V1) {
+ if (smc_find_ism_device(smc, ini) ||
+ smc_connect_ism_vlan_setup(smc, ini)) {
+ if (ini->smc_type_v1 == SMC_TYPE_B)
+ ini->smc_type_v1 = SMC_TYPE_R;
+ else
+ ini->smc_type_v1 = SMC_TYPE_N;
+ } /* else ISM V1 is supported for this connection */
+ if (smc_find_rdma_device(smc, ini)) {
+ if (ini->smc_type_v1 == SMC_TYPE_B)
+ ini->smc_type_v1 = SMC_TYPE_D;
+ else
+ ini->smc_type_v1 = SMC_TYPE_N;
+ } /* else RDMA is supported for this connection */
+ }
+ if (smc_ism_v2_capable && smc_find_ism_v2_device_clnt(smc, ini))
+ ini->smc_type_v2 = SMC_TYPE_N;
+
+ /* if neither ISM nor RDMA are supported, fallback */
+ if (!smcr_indicated(ini->smc_type_v1) &&
+ ini->smc_type_v1 == SMC_TYPE_N && ini->smc_type_v2 == SMC_TYPE_N)
+ rc = SMC_CLC_DECL_NOSMCDEV;
+
+ return rc;
+}
+
/* cleanup temporary VLAN ID registration used for CLC handshake. If ISM is
* used, the VLAN ID will be registered again during the connection setup.
*/
-static int smc_connect_ism_vlan_cleanup(struct smc_sock *smc, bool is_smcd,
+static int smc_connect_ism_vlan_cleanup(struct smc_sock *smc,
struct smc_init_info *ini)
{
- if (!is_smcd)
+ if (!smcd_indicated(ini->smc_type_v1))
return 0;
- if (ini->vlan_id && smc_ism_put_vlan(ini->ism_dev, ini->vlan_id))
+ if (ini->vlan_id && smc_ism_put_vlan(ini->ism_dev[0], ini->vlan_id))
return SMC_CLC_DECL_CNFERR;
return 0;
}
+#define SMC_CLC_MAX_ACCEPT_LEN \
+ (sizeof(struct smc_clc_msg_accept_confirm_v2) + \
+ sizeof(struct smc_clc_first_contact_ext) + \
+ sizeof(struct smc_clc_msg_trail))
+
/* CLC handshake during connect */
-static int smc_connect_clc(struct smc_sock *smc, int smc_type,
- struct smc_clc_msg_accept_confirm *aclc,
+static int smc_connect_clc(struct smc_sock *smc,
+ struct smc_clc_msg_accept_confirm_v2 *aclc2,
struct smc_init_info *ini)
{
int rc = 0;
/* do inband token exchange */
- rc = smc_clc_send_proposal(smc, smc_type, ini);
+ rc = smc_clc_send_proposal(smc, ini);
if (rc)
return rc;
/* receive SMC Accept CLC message */
- return smc_clc_wait_msg(smc, aclc, sizeof(*aclc), SMC_CLC_ACCEPT,
- CLC_WAIT_TIME);
+ return smc_clc_wait_msg(smc, aclc2, SMC_CLC_MAX_ACCEPT_LEN,
+ SMC_CLC_ACCEPT, CLC_WAIT_TIME);
}
/* setup for RDMA connection of client */
@@ -613,9 +723,9 @@ static int smc_connect_rdma(struct smc_sock *smc,
struct smc_link *link;
ini->is_smcd = false;
- ini->ib_lcl = &aclc->lcl;
- ini->ib_clcqpn = ntoh24(aclc->qpn);
- ini->srv_first_contact = aclc->hdr.flag;
+ ini->ib_lcl = &aclc->r0.lcl;
+ ini->ib_clcqpn = ntoh24(aclc->r0.qpn);
+ ini->first_contact_peer = aclc->hdr.typev2 & SMC_FIRST_CONTACT_MASK;
mutex_lock(&smc_client_lgr_pending);
reason_code = smc_conn_create(smc, ini);
@@ -626,7 +736,7 @@ static int smc_connect_rdma(struct smc_sock *smc,
smc_conn_save_peer_info(smc, aclc);
- if (ini->cln_first_contact == SMC_FIRST_CONTACT) {
+ if (ini->first_contact_local) {
link = smc->conn.lnk;
} else {
/* set link that was assigned by server */
@@ -634,60 +744,66 @@ static int smc_connect_rdma(struct smc_sock *smc,
for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
struct smc_link *l = &smc->conn.lgr->lnk[i];
- if (l->peer_qpn == ntoh24(aclc->qpn) &&
- !memcmp(l->peer_gid, &aclc->lcl.gid, SMC_GID_SIZE) &&
- !memcmp(l->peer_mac, &aclc->lcl.mac, sizeof(l->peer_mac))) {
+ if (l->peer_qpn == ntoh24(aclc->r0.qpn) &&
+ !memcmp(l->peer_gid, &aclc->r0.lcl.gid,
+ SMC_GID_SIZE) &&
+ !memcmp(l->peer_mac, &aclc->r0.lcl.mac,
+ sizeof(l->peer_mac))) {
link = l;
break;
}
}
- if (!link)
- return smc_connect_abort(smc, SMC_CLC_DECL_NOSRVLINK,
- ini->cln_first_contact);
+ if (!link) {
+ reason_code = SMC_CLC_DECL_NOSRVLINK;
+ goto connect_abort;
+ }
smc->conn.lnk = link;
}
/* create send buffer and rmb */
- if (smc_buf_create(smc, false))
- return smc_connect_abort(smc, SMC_CLC_DECL_MEM,
- ini->cln_first_contact);
+ if (smc_buf_create(smc, false)) {
+ reason_code = SMC_CLC_DECL_MEM;
+ goto connect_abort;
+ }
- if (ini->cln_first_contact == SMC_FIRST_CONTACT)
+ if (ini->first_contact_local)
smc_link_save_peer_info(link, aclc);
- if (smc_rmb_rtoken_handling(&smc->conn, link, aclc))
- return smc_connect_abort(smc, SMC_CLC_DECL_ERR_RTOK,
- ini->cln_first_contact);
+ if (smc_rmb_rtoken_handling(&smc->conn, link, aclc)) {
+ reason_code = SMC_CLC_DECL_ERR_RTOK;
+ goto connect_abort;
+ }
smc_close_init(smc);
smc_rx_init(smc);
- if (ini->cln_first_contact == SMC_FIRST_CONTACT) {
- if (smc_ib_ready_link(link))
- return smc_connect_abort(smc, SMC_CLC_DECL_ERR_RDYLNK,
- ini->cln_first_contact);
+ if (ini->first_contact_local) {
+ if (smc_ib_ready_link(link)) {
+ reason_code = SMC_CLC_DECL_ERR_RDYLNK;
+ goto connect_abort;
+ }
} else {
- if (smcr_lgr_reg_rmbs(link, smc->conn.rmb_desc))
- return smc_connect_abort(smc, SMC_CLC_DECL_ERR_REGRMB,
- ini->cln_first_contact);
+ if (smcr_lgr_reg_rmbs(link, smc->conn.rmb_desc)) {
+ reason_code = SMC_CLC_DECL_ERR_REGRMB;
+ goto connect_abort;
+ }
}
smc_rmb_sync_sg_for_device(&smc->conn);
- reason_code = smc_clc_send_confirm(smc);
+ reason_code = smc_clc_send_confirm(smc, ini->first_contact_local,
+ SMC_V1);
if (reason_code)
- return smc_connect_abort(smc, reason_code,
- ini->cln_first_contact);
+ goto connect_abort;
smc_tx_init(smc);
- if (ini->cln_first_contact == SMC_FIRST_CONTACT) {
+ if (ini->first_contact_local) {
/* QP confirmation over RoCE fabric */
smc_llc_flow_initiate(link->lgr, SMC_LLC_FLOW_ADD_LINK);
reason_code = smcr_clnt_conf_first_link(smc);
smc_llc_flow_stop(link->lgr, &link->lgr->llc_flow_lcl);
if (reason_code)
- return smc_connect_abort(smc, reason_code,
- ini->cln_first_contact);
+ goto connect_abort;
}
mutex_unlock(&smc_client_lgr_pending);
@@ -697,6 +813,31 @@ static int smc_connect_rdma(struct smc_sock *smc,
smc->sk.sk_state = SMC_ACTIVE;
return 0;
+connect_abort:
+ smc_connect_abort(smc, ini->first_contact_local);
+ mutex_unlock(&smc_client_lgr_pending);
+ smc->connect_nonblock = 0;
+
+ return reason_code;
+}
+
+/* The server has chosen one of the proposed ISM devices for the communication.
+ * Determine from the CHID of the received CLC ACCEPT the ISM device chosen.
+ */
+static int
+smc_v2_determine_accepted_chid(struct smc_clc_msg_accept_confirm_v2 *aclc,
+ struct smc_init_info *ini)
+{
+ int i;
+
+ for (i = 0; i < ini->ism_offered_cnt + 1; i++) {
+ if (ini->ism_chid[i] == ntohs(aclc->chid)) {
+ ini->ism_selected = i;
+ return 0;
+ }
+ }
+
+ return -EPROTO;
}
/* setup for ISM connection of client */
@@ -707,8 +848,17 @@ static int smc_connect_ism(struct smc_sock *smc,
int rc = 0;
ini->is_smcd = true;
- ini->ism_gid = aclc->gid;
- ini->srv_first_contact = aclc->hdr.flag;
+ ini->first_contact_peer = aclc->hdr.typev2 & SMC_FIRST_CONTACT_MASK;
+
+ if (aclc->hdr.version == SMC_V2) {
+ struct smc_clc_msg_accept_confirm_v2 *aclc_v2 =
+ (struct smc_clc_msg_accept_confirm_v2 *)aclc;
+
+ rc = smc_v2_determine_accepted_chid(aclc_v2, ini);
+ if (rc)
+ return rc;
+ }
+ ini->ism_peer_gid[ini->ism_selected] = aclc->d0.gid;
/* there is only one lgr role for SMC-D; use server lock */
mutex_lock(&smc_server_lgr_pending);
@@ -720,20 +870,20 @@ static int smc_connect_ism(struct smc_sock *smc,
/* Create send and receive buffers */
rc = smc_buf_create(smc, true);
- if (rc)
- return smc_connect_abort(smc, (rc == -ENOSPC) ?
- SMC_CLC_DECL_MAX_DMB :
- SMC_CLC_DECL_MEM,
- ini->cln_first_contact);
+ if (rc) {
+ rc = (rc == -ENOSPC) ? SMC_CLC_DECL_MAX_DMB : SMC_CLC_DECL_MEM;
+ goto connect_abort;
+ }
smc_conn_save_peer_info(smc, aclc);
smc_close_init(smc);
smc_rx_init(smc);
smc_tx_init(smc);
- rc = smc_clc_send_confirm(smc);
+ rc = smc_clc_send_confirm(smc, ini->first_contact_local,
+ aclc->hdr.version);
if (rc)
- return smc_connect_abort(smc, rc, ini->cln_first_contact);
+ goto connect_abort;
mutex_unlock(&smc_server_lgr_pending);
smc_copy_sock_settings_to_clc(smc);
@@ -742,15 +892,40 @@ static int smc_connect_ism(struct smc_sock *smc,
smc->sk.sk_state = SMC_ACTIVE;
return 0;
+connect_abort:
+ smc_connect_abort(smc, ini->first_contact_local);
+ mutex_unlock(&smc_server_lgr_pending);
+ smc->connect_nonblock = 0;
+
+ return rc;
+}
+
+/* check if received accept type and version matches a proposed one */
+static int smc_connect_check_aclc(struct smc_init_info *ini,
+ struct smc_clc_msg_accept_confirm *aclc)
+{
+ if ((aclc->hdr.typev1 == SMC_TYPE_R &&
+ !smcr_indicated(ini->smc_type_v1)) ||
+ (aclc->hdr.typev1 == SMC_TYPE_D &&
+ ((!smcd_indicated(ini->smc_type_v1) &&
+ !smcd_indicated(ini->smc_type_v2)) ||
+ (aclc->hdr.version == SMC_V1 &&
+ !smcd_indicated(ini->smc_type_v1)) ||
+ (aclc->hdr.version == SMC_V2 &&
+ !smcd_indicated(ini->smc_type_v2)))))
+ return SMC_CLC_DECL_MODEUNSUPP;
+
+ return 0;
}
/* perform steps before actually connecting */
static int __smc_connect(struct smc_sock *smc)
{
- bool ism_supported = false, rdma_supported = false;
- struct smc_clc_msg_accept_confirm aclc;
- struct smc_init_info ini = {0};
- int smc_type;
+ u8 version = smc_ism_v2_capable ? SMC_V2 : SMC_V1;
+ struct smc_clc_msg_accept_confirm_v2 *aclc2;
+ struct smc_clc_msg_accept_confirm *aclc;
+ struct smc_init_info *ini = NULL;
+ u8 *buf = NULL;
int rc = 0;
if (smc->use_fallback)
@@ -760,58 +935,73 @@ static int __smc_connect(struct smc_sock *smc)
if (!tcp_sk(smc->clcsock->sk)->syn_smc)
return smc_connect_fallback(smc, SMC_CLC_DECL_PEERNOSMC);
- /* IPSec connections opt out of SMC-R optimizations */
+ /* IPSec connections opt out of SMC optimizations */
if (using_ipsec(smc))
- return smc_connect_decline_fallback(smc, SMC_CLC_DECL_IPSEC);
+ return smc_connect_decline_fallback(smc, SMC_CLC_DECL_IPSEC,
+ version);
- /* get vlan id from IP device */
- if (smc_vlan_by_tcpsk(smc->clcsock, &ini))
- return smc_connect_decline_fallback(smc,
- SMC_CLC_DECL_GETVLANERR);
+ ini = kzalloc(sizeof(*ini), GFP_KERNEL);
+ if (!ini)
+ return smc_connect_decline_fallback(smc, SMC_CLC_DECL_MEM,
+ version);
- /* check if there is an ism device available */
- if (!smc_find_ism_device(smc, &ini) &&
- !smc_connect_ism_vlan_setup(smc, &ini)) {
- /* ISM is supported for this connection */
- ism_supported = true;
- smc_type = SMC_TYPE_D;
- }
-
- /* check if there is a rdma device available */
- if (!smc_find_rdma_device(smc, &ini)) {
- /* RDMA is supported for this connection */
- rdma_supported = true;
- if (ism_supported)
- smc_type = SMC_TYPE_B; /* both */
- else
- smc_type = SMC_TYPE_R; /* only RDMA */
+ ini->smcd_version = SMC_V1;
+ ini->smcd_version |= smc_ism_v2_capable ? SMC_V2 : 0;
+ ini->smc_type_v1 = SMC_TYPE_B;
+ ini->smc_type_v2 = smc_ism_v2_capable ? SMC_TYPE_D : SMC_TYPE_N;
+
+ /* get vlan id from IP device */
+ if (smc_vlan_by_tcpsk(smc->clcsock, ini)) {
+ ini->smcd_version &= ~SMC_V1;
+ ini->smc_type_v1 = SMC_TYPE_N;
+ if (!ini->smcd_version) {
+ rc = SMC_CLC_DECL_GETVLANERR;
+ goto fallback;
+ }
}
- /* if neither ISM nor RDMA are supported, fallback */
- if (!rdma_supported && !ism_supported)
- return smc_connect_decline_fallback(smc, SMC_CLC_DECL_NOSMCDEV);
+ rc = smc_find_proposal_devices(smc, ini);
+ if (rc)
+ goto fallback;
- /* perform CLC handshake */
- rc = smc_connect_clc(smc, smc_type, &aclc, &ini);
- if (rc) {
- smc_connect_ism_vlan_cleanup(smc, ism_supported, &ini);
- return smc_connect_decline_fallback(smc, rc);
+ buf = kzalloc(SMC_CLC_MAX_ACCEPT_LEN, GFP_KERNEL);
+ if (!buf) {
+ rc = SMC_CLC_DECL_MEM;
+ goto fallback;
}
+ aclc2 = (struct smc_clc_msg_accept_confirm_v2 *)buf;
+ aclc = (struct smc_clc_msg_accept_confirm *)aclc2;
+
+ /* perform CLC handshake */
+ rc = smc_connect_clc(smc, aclc2, ini);
+ if (rc)
+ goto vlan_cleanup;
+
+ /* check if smc modes and versions of CLC proposal and accept match */
+ rc = smc_connect_check_aclc(ini, aclc);
+ version = aclc->hdr.version == SMC_V1 ? SMC_V1 : version;
+ if (rc)
+ goto vlan_cleanup;
/* depending on previous steps, connect using rdma or ism */
- if (rdma_supported && aclc.hdr.path == SMC_TYPE_R)
- rc = smc_connect_rdma(smc, &aclc, &ini);
- else if (ism_supported && aclc.hdr.path == SMC_TYPE_D)
- rc = smc_connect_ism(smc, &aclc, &ini);
- else
- rc = SMC_CLC_DECL_MODEUNSUPP;
- if (rc) {
- smc_connect_ism_vlan_cleanup(smc, ism_supported, &ini);
- return smc_connect_decline_fallback(smc, rc);
- }
+ if (aclc->hdr.typev1 == SMC_TYPE_R)
+ rc = smc_connect_rdma(smc, aclc, ini);
+ else if (aclc->hdr.typev1 == SMC_TYPE_D)
+ rc = smc_connect_ism(smc, aclc, ini);
+ if (rc)
+ goto vlan_cleanup;
- smc_connect_ism_vlan_cleanup(smc, ism_supported, &ini);
+ smc_connect_ism_vlan_cleanup(smc, ini);
+ kfree(buf);
+ kfree(ini);
return 0;
+
+vlan_cleanup:
+ smc_connect_ism_vlan_cleanup(smc, ini);
+ kfree(buf);
+fallback:
+ kfree(ini);
+ return smc_connect_decline_fallback(smc, rc, version);
}
static void smc_connect_work(struct work_struct *work)
@@ -903,7 +1093,7 @@ static int smc_connect(struct socket *sock, struct sockaddr *addr,
if (smc->use_fallback)
goto out;
if (flags & O_NONBLOCK) {
- if (schedule_work(&smc->connect_work))
+ if (queue_work(smc_hs_wq, &smc->connect_work))
smc->connect_nonblock = 1;
rc = -EINPROGRESS;
} else {
@@ -940,10 +1130,10 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
mutex_lock(&lsmc->clcsock_release_lock);
if (lsmc->clcsock)
- rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0);
+ rc = kernel_accept(lsmc->clcsock, &new_clcsock, SOCK_NONBLOCK);
mutex_unlock(&lsmc->clcsock_release_lock);
lock_sock(lsk);
- if (rc < 0)
+ if (rc < 0 && rc != -EAGAIN)
lsk->sk_err = -rc;
if (rc < 0 || lsk->sk_state == SMC_CLOSED) {
new_sk->sk_prot->unhash(new_sk);
@@ -956,6 +1146,10 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
goto out;
}
+ /* new clcsock has inherited the smc listen-specific sk_data_ready
+ * function; switch it back to the original sk_data_ready function
+ */
+ new_clcsock->sk->sk_data_ready = lsmc->clcsk_data_ready;
(*new_smc)->clcsock = new_clcsock;
out:
return rc;
@@ -1123,10 +1317,10 @@ static void smc_listen_out_err(struct smc_sock *new_smc)
/* listen worker: decline and fall back if possible */
static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
- int local_contact)
+ struct smc_init_info *ini, u8 version)
{
/* RDMA setup failed, switch back to TCP */
- if (local_contact == SMC_FIRST_CONTACT)
+ if (ini->first_contact_local)
smc_lgr_cleanup_early(&new_smc->conn);
else
smc_conn_free(&new_smc->conn);
@@ -1137,7 +1331,7 @@ static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
smc_switch_to_fallback(new_smc);
new_smc->fallback_rsn = reason_code;
if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) {
- if (smc_clc_send_decline(new_smc, reason_code) < 0) {
+ if (smc_clc_send_decline(new_smc, reason_code, version) < 0) {
smc_listen_out_err(new_smc);
return;
}
@@ -1145,6 +1339,47 @@ static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
smc_listen_out_connected(new_smc);
}
+/* listen worker: version checking */
+static int smc_listen_v2_check(struct smc_sock *new_smc,
+ struct smc_clc_msg_proposal *pclc,
+ struct smc_init_info *ini)
+{
+ struct smc_clc_smcd_v2_extension *pclc_smcd_v2_ext;
+ struct smc_clc_v2_extension *pclc_v2_ext;
+
+ ini->smc_type_v1 = pclc->hdr.typev1;
+ ini->smc_type_v2 = pclc->hdr.typev2;
+ ini->smcd_version = ini->smc_type_v1 != SMC_TYPE_N ? SMC_V1 : 0;
+ if (pclc->hdr.version > SMC_V1)
+ ini->smcd_version |=
+ ini->smc_type_v2 != SMC_TYPE_N ? SMC_V2 : 0;
+ if (!smc_ism_v2_capable) {
+ ini->smcd_version &= ~SMC_V2;
+ goto out;
+ }
+ pclc_v2_ext = smc_get_clc_v2_ext(pclc);
+ if (!pclc_v2_ext) {
+ ini->smcd_version &= ~SMC_V2;
+ goto out;
+ }
+ pclc_smcd_v2_ext = smc_get_clc_smcd_v2_ext(pclc_v2_ext);
+ if (!pclc_smcd_v2_ext)
+ ini->smcd_version &= ~SMC_V2;
+
+out:
+ if (!ini->smcd_version) {
+ if (pclc->hdr.typev1 == SMC_TYPE_B ||
+ pclc->hdr.typev2 == SMC_TYPE_B)
+ return SMC_CLC_DECL_NOSMCDEV;
+ if (pclc->hdr.typev1 == SMC_TYPE_D ||
+ pclc->hdr.typev2 == SMC_TYPE_D)
+ return SMC_CLC_DECL_NOSMCDDEV;
+ return SMC_CLC_DECL_NOSMCRDEV;
+ }
+
+ return 0;
+}
+
/* listen worker: check prefixes */
static int smc_listen_prfx_check(struct smc_sock *new_smc,
struct smc_clc_msg_proposal *pclc)
@@ -1152,6 +1387,8 @@ static int smc_listen_prfx_check(struct smc_sock *new_smc,
struct smc_clc_msg_proposal_prefix *pclc_prfx;
struct socket *newclcsock = new_smc->clcsock;
+ if (pclc->hdr.typev1 == SMC_TYPE_N)
+ return 0;
pclc_prfx = smc_clc_proposal_get_prefix(pclc);
if (smc_clc_prfx_match(newclcsock, pclc_prfx))
return SMC_CLC_DECL_DIFFPREFIX;
@@ -1179,33 +1416,18 @@ static int smc_listen_rdma_init(struct smc_sock *new_smc,
/* listen worker: initialize connection and buffers for SMC-D */
static int smc_listen_ism_init(struct smc_sock *new_smc,
- struct smc_clc_msg_proposal *pclc,
struct smc_init_info *ini)
{
- struct smc_clc_msg_smcd *pclc_smcd;
int rc;
- pclc_smcd = smc_get_clc_msg_smcd(pclc);
- ini->ism_gid = pclc_smcd->gid;
rc = smc_conn_create(new_smc, ini);
if (rc)
return rc;
- /* Check if peer can be reached via ISM device */
- if (smc_ism_cantalk(new_smc->conn.lgr->peer_gid,
- new_smc->conn.lgr->vlan_id,
- new_smc->conn.lgr->smcd)) {
- if (ini->cln_first_contact == SMC_FIRST_CONTACT)
- smc_lgr_cleanup_early(&new_smc->conn);
- else
- smc_conn_free(&new_smc->conn);
- return SMC_CLC_DECL_SMCDNOTALK;
- }
-
/* Create send and receive buffers */
rc = smc_buf_create(new_smc, true);
if (rc) {
- if (ini->cln_first_contact == SMC_FIRST_CONTACT)
+ if (ini->first_contact_local)
smc_lgr_cleanup_early(&new_smc->conn);
else
smc_conn_free(&new_smc->conn);
@@ -1216,12 +1438,135 @@ static int smc_listen_ism_init(struct smc_sock *new_smc,
return 0;
}
+static bool smc_is_already_selected(struct smcd_dev *smcd,
+ struct smc_init_info *ini,
+ int matches)
+{
+ int i;
+
+ for (i = 0; i < matches; i++)
+ if (smcd == ini->ism_dev[i])
+ return true;
+
+ return false;
+}
+
+/* check for ISM devices matching proposed ISM devices */
+static void smc_check_ism_v2_match(struct smc_init_info *ini,
+ u16 proposed_chid, u64 proposed_gid,
+ unsigned int *matches)
+{
+ struct smcd_dev *smcd;
+
+ list_for_each_entry(smcd, &smcd_dev_list.list, list) {
+ if (smcd->going_away)
+ continue;
+ if (smc_is_already_selected(smcd, ini, *matches))
+ continue;
+ if (smc_ism_get_chid(smcd) == proposed_chid &&
+ !smc_ism_cantalk(proposed_gid, ISM_RESERVED_VLANID, smcd)) {
+ ini->ism_peer_gid[*matches] = proposed_gid;
+ ini->ism_dev[*matches] = smcd;
+ (*matches)++;
+ break;
+ }
+ }
+}
+
+static void smc_find_ism_v2_device_serv(struct smc_sock *new_smc,
+ struct smc_clc_msg_proposal *pclc,
+ struct smc_init_info *ini)
+{
+ struct smc_clc_smcd_v2_extension *smcd_v2_ext;
+ struct smc_clc_v2_extension *smc_v2_ext;
+ struct smc_clc_msg_smcd *pclc_smcd;
+ unsigned int matches = 0;
+ u8 smcd_version;
+ u8 *eid = NULL;
+ int i;
+
+ if (!(ini->smcd_version & SMC_V2) || !smcd_indicated(ini->smc_type_v2))
+ goto not_found;
+
+ pclc_smcd = smc_get_clc_msg_smcd(pclc);
+ smc_v2_ext = smc_get_clc_v2_ext(pclc);
+ smcd_v2_ext = smc_get_clc_smcd_v2_ext(smc_v2_ext);
+ if (!smcd_v2_ext ||
+ !smc_v2_ext->hdr.flag.seid) /* no system EID support for SMCD */
+ goto not_found;
+
+ mutex_lock(&smcd_dev_list.mutex);
+ if (pclc_smcd->ism.chid)
+ /* check for ISM device matching proposed native ISM device */
+ smc_check_ism_v2_match(ini, ntohs(pclc_smcd->ism.chid),
+ ntohll(pclc_smcd->ism.gid), &matches);
+ for (i = 1; i <= smc_v2_ext->hdr.ism_gid_cnt; i++) {
+ /* check for ISM devices matching proposed non-native ISM
+ * devices
+ */
+ smc_check_ism_v2_match(ini,
+ ntohs(smcd_v2_ext->gidchid[i - 1].chid),
+ ntohll(smcd_v2_ext->gidchid[i - 1].gid),
+ &matches);
+ }
+ mutex_unlock(&smcd_dev_list.mutex);
+
+ if (ini->ism_dev[0]) {
+ smc_ism_get_system_eid(ini->ism_dev[0], &eid);
+ if (memcmp(eid, smcd_v2_ext->system_eid, SMC_MAX_EID_LEN))
+ goto not_found;
+ } else {
+ goto not_found;
+ }
+
+ /* separate - outside the smcd_dev_list.lock */
+ smcd_version = ini->smcd_version;
+ for (i = 0; i < matches; i++) {
+ ini->smcd_version = SMC_V2;
+ ini->is_smcd = true;
+ ini->ism_selected = i;
+ if (smc_listen_ism_init(new_smc, ini))
+ /* try next active ISM device */
+ continue;
+ return; /* matching and usable V2 ISM device found */
+ }
+ /* no V2 ISM device could be initialized */
+ ini->smcd_version = smcd_version; /* restore original value */
+
+not_found:
+ ini->smcd_version &= ~SMC_V2;
+ ini->ism_dev[0] = NULL;
+ ini->is_smcd = false;
+}
+
+static void smc_find_ism_v1_device_serv(struct smc_sock *new_smc,
+ struct smc_clc_msg_proposal *pclc,
+ struct smc_init_info *ini)
+{
+ struct smc_clc_msg_smcd *pclc_smcd = smc_get_clc_msg_smcd(pclc);
+
+ /* check if ISM V1 is available */
+ if (!(ini->smcd_version & SMC_V1) || !smcd_indicated(ini->smc_type_v1))
+ goto not_found;
+ ini->is_smcd = true; /* prepare ISM check */
+ ini->ism_peer_gid[0] = ntohll(pclc_smcd->ism.gid);
+ if (smc_find_ism_device(new_smc, ini))
+ goto not_found;
+ ini->ism_selected = 0;
+ if (!smc_listen_ism_init(new_smc, ini))
+ return; /* V1 ISM device found */
+
+not_found:
+ ini->ism_dev[0] = NULL;
+ ini->is_smcd = false;
+}
+
/* listen worker: register buffers */
-static int smc_listen_rdma_reg(struct smc_sock *new_smc, int local_contact)
+static int smc_listen_rdma_reg(struct smc_sock *new_smc, bool local_first)
{
struct smc_connection *conn = &new_smc->conn;
- if (local_contact != SMC_FIRST_CONTACT) {
+ if (!local_first) {
if (smcr_lgr_reg_rmbs(conn->lnk, conn->rmb_desc))
return SMC_CLC_DECL_ERR_REGRMB;
}
@@ -1230,52 +1575,103 @@ static int smc_listen_rdma_reg(struct smc_sock *new_smc, int local_contact)
return 0;
}
+static int smc_find_rdma_v1_device_serv(struct smc_sock *new_smc,
+ struct smc_clc_msg_proposal *pclc,
+ struct smc_init_info *ini)
+{
+ int rc;
+
+ if (!smcr_indicated(ini->smc_type_v1))
+ return SMC_CLC_DECL_NOSMCDEV;
+
+ /* prepare RDMA check */
+ ini->ib_lcl = &pclc->lcl;
+ rc = smc_find_rdma_device(new_smc, ini);
+ if (rc) {
+ /* no RDMA device found */
+ if (ini->smc_type_v1 == SMC_TYPE_B)
+ /* neither ISM nor RDMA device found */
+ rc = SMC_CLC_DECL_NOSMCDEV;
+ return rc;
+ }
+ rc = smc_listen_rdma_init(new_smc, ini);
+ if (rc)
+ return rc;
+ return smc_listen_rdma_reg(new_smc, ini->first_contact_local);
+}
+
+/* determine the local device matching to proposal */
+static int smc_listen_find_device(struct smc_sock *new_smc,
+ struct smc_clc_msg_proposal *pclc,
+ struct smc_init_info *ini)
+{
+ int rc;
+
+ /* check for ISM device matching V2 proposed device */
+ smc_find_ism_v2_device_serv(new_smc, pclc, ini);
+ if (ini->ism_dev[0])
+ return 0;
+
+ if (!(ini->smcd_version & SMC_V1))
+ return SMC_CLC_DECL_NOSMCDEV;
+
+ /* check for matching IP prefix and subnet length */
+ rc = smc_listen_prfx_check(new_smc, pclc);
+ if (rc)
+ return rc;
+
+ /* get vlan id from IP device */
+ if (smc_vlan_by_tcpsk(new_smc->clcsock, ini))
+ return SMC_CLC_DECL_GETVLANERR;
+
+ /* check for ISM device matching V1 proposed device */
+ smc_find_ism_v1_device_serv(new_smc, pclc, ini);
+ if (ini->ism_dev[0])
+ return 0;
+
+ if (pclc->hdr.typev1 == SMC_TYPE_D)
+ return SMC_CLC_DECL_NOSMCDDEV; /* skip RDMA and decline */
+
+ /* check if RDMA is available */
+ return smc_find_rdma_v1_device_serv(new_smc, pclc, ini);
+}
+
/* listen worker: finish RDMA setup */
static int smc_listen_rdma_finish(struct smc_sock *new_smc,
struct smc_clc_msg_accept_confirm *cclc,
- int local_contact)
+ bool local_first)
{
struct smc_link *link = new_smc->conn.lnk;
int reason_code = 0;
- if (local_contact == SMC_FIRST_CONTACT)
+ if (local_first)
smc_link_save_peer_info(link, cclc);
- if (smc_rmb_rtoken_handling(&new_smc->conn, link, cclc)) {
- reason_code = SMC_CLC_DECL_ERR_RTOK;
- goto decline;
- }
+ if (smc_rmb_rtoken_handling(&new_smc->conn, link, cclc))
+ return SMC_CLC_DECL_ERR_RTOK;
- if (local_contact == SMC_FIRST_CONTACT) {
- if (smc_ib_ready_link(link)) {
- reason_code = SMC_CLC_DECL_ERR_RDYLNK;
- goto decline;
- }
+ if (local_first) {
+ if (smc_ib_ready_link(link))
+ return SMC_CLC_DECL_ERR_RDYLNK;
/* QP confirmation over RoCE fabric */
smc_llc_flow_initiate(link->lgr, SMC_LLC_FLOW_ADD_LINK);
reason_code = smcr_serv_conf_first_link(new_smc);
smc_llc_flow_stop(link->lgr, &link->lgr->llc_flow_lcl);
- if (reason_code)
- goto decline;
}
- return 0;
-
-decline:
- smc_listen_decline(new_smc, reason_code, local_contact);
return reason_code;
}
-/* setup for RDMA connection of server */
+/* setup for connection of server */
static void smc_listen_work(struct work_struct *work)
{
struct smc_sock *new_smc = container_of(work, struct smc_sock,
smc_listen_work);
+ u8 version = smc_ism_v2_capable ? SMC_V2 : SMC_V1;
struct socket *newclcsock = new_smc->clcsock;
- struct smc_clc_msg_accept_confirm cclc;
+ struct smc_clc_msg_accept_confirm *cclc;
+ struct smc_clc_msg_proposal_area *buf;
struct smc_clc_msg_proposal *pclc;
- struct smc_init_info ini = {0};
- bool ism_supported = false;
- u8 buf[SMC_CLC_MAX_LEN];
+ struct smc_init_info *ini = NULL;
int rc = 0;
if (new_smc->listen_smc->sk.sk_state != SMC_LISTEN)
@@ -1297,102 +1693,85 @@ static void smc_listen_work(struct work_struct *work)
/* do inband token exchange -
* wait for and receive SMC Proposal CLC message
*/
- pclc = (struct smc_clc_msg_proposal *)&buf;
- rc = smc_clc_wait_msg(new_smc, pclc, SMC_CLC_MAX_LEN,
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf) {
+ rc = SMC_CLC_DECL_MEM;
+ goto out_decl;
+ }
+ pclc = (struct smc_clc_msg_proposal *)buf;
+ rc = smc_clc_wait_msg(new_smc, pclc, sizeof(*buf),
SMC_CLC_PROPOSAL, CLC_WAIT_TIME);
if (rc)
goto out_decl;
+ version = pclc->hdr.version == SMC_V1 ? SMC_V1 : version;
- /* IPSec connections opt out of SMC-R optimizations */
+ /* IPSec connections opt out of SMC optimizations */
if (using_ipsec(new_smc)) {
rc = SMC_CLC_DECL_IPSEC;
goto out_decl;
}
- /* check for matching IP prefix and subnet length */
- rc = smc_listen_prfx_check(new_smc, pclc);
- if (rc)
+ ini = kzalloc(sizeof(*ini), GFP_KERNEL);
+ if (!ini) {
+ rc = SMC_CLC_DECL_MEM;
goto out_decl;
+ }
- /* get vlan id from IP device */
- if (smc_vlan_by_tcpsk(new_smc->clcsock, &ini)) {
- rc = SMC_CLC_DECL_GETVLANERR;
+ /* initial version checking */
+ rc = smc_listen_v2_check(new_smc, pclc, ini);
+ if (rc)
goto out_decl;
- }
mutex_lock(&smc_server_lgr_pending);
smc_close_init(new_smc);
smc_rx_init(new_smc);
smc_tx_init(new_smc);
- /* check if ISM is available */
- if (pclc->hdr.path == SMC_TYPE_D || pclc->hdr.path == SMC_TYPE_B) {
- ini.is_smcd = true; /* prepare ISM check */
- rc = smc_find_ism_device(new_smc, &ini);
- if (!rc)
- rc = smc_listen_ism_init(new_smc, pclc, &ini);
- if (!rc)
- ism_supported = true;
- else if (pclc->hdr.path == SMC_TYPE_D)
- goto out_unlock; /* skip RDMA and decline */
- }
-
- /* check if RDMA is available */
- if (!ism_supported) { /* SMC_TYPE_R or SMC_TYPE_B */
- /* prepare RDMA check */
- ini.is_smcd = false;
- ini.ism_dev = NULL;
- ini.ib_lcl = &pclc->lcl;
- rc = smc_find_rdma_device(new_smc, &ini);
- if (rc) {
- /* no RDMA device found */
- if (pclc->hdr.path == SMC_TYPE_B)
- /* neither ISM nor RDMA device found */
- rc = SMC_CLC_DECL_NOSMCDEV;
- goto out_unlock;
- }
- rc = smc_listen_rdma_init(new_smc, &ini);
- if (rc)
- goto out_unlock;
- rc = smc_listen_rdma_reg(new_smc, ini.cln_first_contact);
- if (rc)
- goto out_unlock;
- }
+ /* determine ISM or RoCE device used for connection */
+ rc = smc_listen_find_device(new_smc, pclc, ini);
+ if (rc)
+ goto out_unlock;
/* send SMC Accept CLC message */
- rc = smc_clc_send_accept(new_smc, ini.cln_first_contact);
+ rc = smc_clc_send_accept(new_smc, ini->first_contact_local,
+ ini->smcd_version == SMC_V2 ? SMC_V2 : SMC_V1);
if (rc)
goto out_unlock;
/* SMC-D does not need this lock any more */
- if (ism_supported)
+ if (ini->is_smcd)
mutex_unlock(&smc_server_lgr_pending);
/* receive SMC Confirm CLC message */
- rc = smc_clc_wait_msg(new_smc, &cclc, sizeof(cclc),
+ memset(buf, 0, sizeof(*buf));
+ cclc = (struct smc_clc_msg_accept_confirm *)buf;
+ rc = smc_clc_wait_msg(new_smc, cclc, sizeof(*buf),
SMC_CLC_CONFIRM, CLC_WAIT_TIME);
if (rc) {
- if (!ism_supported)
+ if (!ini->is_smcd)
goto out_unlock;
goto out_decl;
}
/* finish worker */
- if (!ism_supported) {
- rc = smc_listen_rdma_finish(new_smc, &cclc,
- ini.cln_first_contact);
- mutex_unlock(&smc_server_lgr_pending);
+ if (!ini->is_smcd) {
+ rc = smc_listen_rdma_finish(new_smc, cclc,
+ ini->first_contact_local);
if (rc)
- return;
+ goto out_unlock;
+ mutex_unlock(&smc_server_lgr_pending);
}
- smc_conn_save_peer_info(new_smc, &cclc);
+ smc_conn_save_peer_info(new_smc, cclc);
smc_listen_out_connected(new_smc);
- return;
+ goto out_free;
out_unlock:
mutex_unlock(&smc_server_lgr_pending);
out_decl:
- smc_listen_decline(new_smc, rc, ini.cln_first_contact);
+ smc_listen_decline(new_smc, rc, ini, version);
+out_free:
+ kfree(ini);
+ kfree(buf);
}
static void smc_tcp_listen_work(struct work_struct *work)
@@ -1406,7 +1785,7 @@ static void smc_tcp_listen_work(struct work_struct *work)
lock_sock(lsk);
while (lsk->sk_state == SMC_LISTEN) {
rc = smc_clcsock_accept(lsmc, &new_smc);
- if (rc)
+ if (rc) /* clcsock accept queue empty or error */
goto out;
if (!new_smc)
continue;
@@ -1420,13 +1799,29 @@ static void smc_tcp_listen_work(struct work_struct *work)
new_smc->sk.sk_sndbuf = lsmc->sk.sk_sndbuf;
new_smc->sk.sk_rcvbuf = lsmc->sk.sk_rcvbuf;
sock_hold(&new_smc->sk); /* sock_put in passive closing */
- if (!schedule_work(&new_smc->smc_listen_work))
+ if (!queue_work(smc_hs_wq, &new_smc->smc_listen_work))
sock_put(&new_smc->sk);
}
out:
release_sock(lsk);
- sock_put(&lsmc->sk); /* sock_hold in smc_listen */
+ sock_put(&lsmc->sk); /* sock_hold in smc_clcsock_data_ready() */
+}
+
+static void smc_clcsock_data_ready(struct sock *listen_clcsock)
+{
+ struct smc_sock *lsmc;
+
+ lsmc = (struct smc_sock *)
+ ((uintptr_t)listen_clcsock->sk_user_data & ~SK_USER_DATA_NOCOPY);
+ if (!lsmc)
+ return;
+ lsmc->clcsk_data_ready(listen_clcsock);
+ if (lsmc->sk.sk_state == SMC_LISTEN) {
+ sock_hold(&lsmc->sk); /* sock_put in smc_tcp_listen_work() */
+ if (!queue_work(smc_hs_wq, &lsmc->tcp_listen_work))
+ sock_put(&lsmc->sk);
+ }
}
static int smc_listen(struct socket *sock, int backlog)
@@ -1455,15 +1850,19 @@ static int smc_listen(struct socket *sock, int backlog)
if (!smc->use_fallback)
tcp_sk(smc->clcsock->sk)->syn_smc = 1;
+ /* save original sk_data_ready function and establish
+ * smc-specific sk_data_ready function
+ */
+ smc->clcsk_data_ready = smc->clcsock->sk->sk_data_ready;
+ smc->clcsock->sk->sk_data_ready = smc_clcsock_data_ready;
+ smc->clcsock->sk->sk_user_data =
+ (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
rc = kernel_listen(smc->clcsock, backlog);
if (rc)
goto out;
sk->sk_max_ack_backlog = backlog;
sk->sk_ack_backlog = 0;
sk->sk_state = SMC_LISTEN;
- sock_hold(sk); /* sock_hold in tcp_listen_worker */
- if (!schedule_work(&smc->tcp_listen_work))
- sock_put(sk);
out:
release_sock(sk);
@@ -1788,8 +2187,8 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
sk->sk_state != SMC_LISTEN &&
sk->sk_state != SMC_CLOSED) {
if (val)
- mod_delayed_work(system_wq, &smc->conn.tx_work,
- 0);
+ mod_delayed_work(smc->conn.lgr->tx_wq,
+ &smc->conn.tx_work, 0);
}
break;
case TCP_CORK:
@@ -1797,8 +2196,8 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
sk->sk_state != SMC_LISTEN &&
sk->sk_state != SMC_CLOSED) {
if (!val)
- mod_delayed_work(system_wq, &smc->conn.tx_work,
- 0);
+ mod_delayed_work(smc->conn.lgr->tx_wq,
+ &smc->conn.tx_work, 0);
}
break;
case TCP_DEFER_ACCEPT:
@@ -2077,14 +2476,26 @@ static int __init smc_init(void)
if (rc)
return rc;
+ smc_ism_init();
+ smc_clc_init();
+
rc = smc_pnet_init();
if (rc)
goto out_pernet_subsys;
+ rc = -ENOMEM;
+ smc_hs_wq = alloc_workqueue("smc_hs_wq", 0, 0);
+ if (!smc_hs_wq)
+ goto out_pnet;
+
+ smc_close_wq = alloc_workqueue("smc_close_wq", 0, 0);
+ if (!smc_close_wq)
+ goto out_alloc_hs_wq;
+
rc = smc_core_init();
if (rc) {
pr_err("%s: smc_core_init fails with %d\n", __func__, rc);
- goto out_pnet;
+ goto out_alloc_wqs;
}
rc = smc_llc_init();
@@ -2136,6 +2547,10 @@ out_proto:
proto_unregister(&smc_proto);
out_core:
smc_core_exit();
+out_alloc_wqs:
+ destroy_workqueue(smc_close_wq);
+out_alloc_hs_wq:
+ destroy_workqueue(smc_hs_wq);
out_pnet:
smc_pnet_exit();
out_pernet_subsys:
@@ -2150,6 +2565,8 @@ static void __exit smc_exit(void)
sock_unregister(PF_SMC);
smc_core_exit();
smc_ib_unregister_client();
+ destroy_workqueue(smc_close_wq);
+ destroy_workqueue(smc_hs_wq);
proto_unregister(&smc_proto6);
proto_unregister(&smc_proto);
smc_pnet_exit();
diff --git a/net/smc/smc.h b/net/smc/smc.h
index 6f1c42da7a4c..d65e15f0c944 100644
--- a/net/smc/smc.h
+++ b/net/smc/smc.h
@@ -18,9 +18,20 @@
#include "smc_ib.h"
+#define SMC_V1 1 /* SMC version V1 */
+#define SMC_V2 2 /* SMC version V2 */
+#define SMC_RELEASE 0
+
#define SMCPROTO_SMC 0 /* SMC protocol, IPv4 */
#define SMCPROTO_SMC6 1 /* SMC protocol, IPv6 */
+#define SMC_MAX_ISM_DEVS 8 /* max # of proposed non-native ISM
+ * devices
+ */
+
+#define SMC_MAX_HOSTNAME_LEN 32
+#define SMC_MAX_EID_LEN 32
+
extern struct proto smc_proto;
extern struct proto smc_proto6;
@@ -201,6 +212,8 @@ struct smc_connection {
struct smc_sock { /* smc sock container */
struct sock sk;
struct socket *clcsock; /* internal tcp socket */
+ void (*clcsk_data_ready)(struct sock *sk);
+ /* original data_ready fct. **/
struct smc_connection conn; /* smc connection */
struct smc_sock *listen_smc; /* listen parent */
struct work_struct connect_work; /* handle non-blocking connect*/
@@ -235,10 +248,16 @@ static inline struct smc_sock *smc_sk(const struct sock *sk)
return (struct smc_sock *)sk;
}
+extern struct workqueue_struct *smc_hs_wq; /* wq for handshake work */
+extern struct workqueue_struct *smc_close_wq; /* wq for close work */
+
#define SMC_SYSTEMID_LEN 8
extern u8 local_systemid[SMC_SYSTEMID_LEN]; /* unique system identifier */
+#define ntohll(x) be64_to_cpu(x)
+#define htonll(x) cpu_to_be64(x)
+
/* convert an u32 value into network byte order, store it into a 3 byte field */
static inline void hton24(u8 *net, u32 host)
{
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
index ce468ff62a19..b1ce6ccbfaec 100644
--- a/net/smc/smc_cdc.c
+++ b/net/smc/smc_cdc.c
@@ -299,7 +299,7 @@ static void smc_cdc_msg_validate(struct smc_sock *smc, struct smc_cdc_msg *cdc,
conn->lnk = link;
spin_unlock_bh(&conn->send_lock);
sock_hold(&smc->sk); /* sock_put in abort_work */
- if (!schedule_work(&conn->abort_work))
+ if (!queue_work(smc_close_wq, &conn->abort_work))
sock_put(&smc->sk);
}
}
@@ -368,7 +368,7 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
smc->clcsock->sk->sk_shutdown |= RCV_SHUTDOWN;
sock_set_flag(&smc->sk, SOCK_DONE);
sock_hold(&smc->sk); /* sock_put in close_work */
- if (!schedule_work(&conn->close_work))
+ if (!queue_work(smc_close_wq, &conn->close_work))
sock_put(&smc->sk);
}
}
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index 779f4142a11d..696d89c2dce4 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -14,6 +14,8 @@
#include <linux/inetdevice.h>
#include <linux/if_ether.h>
#include <linux/sched/signal.h>
+#include <linux/utsname.h>
+#include <linux/ctype.h>
#include <net/addrconf.h>
#include <net/sock.h>
@@ -27,6 +29,7 @@
#define SMCR_CLC_ACCEPT_CONFIRM_LEN 68
#define SMCD_CLC_ACCEPT_CONFIRM_LEN 48
+#define SMCD_CLC_ACCEPT_CONFIRM_LEN_V2 78
#define SMC_CLC_RECV_BUF_LEN 100
/* eye catcher "SMCR" EBCDIC for CLC messages */
@@ -34,13 +37,88 @@ static const char SMC_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xd9'};
/* eye catcher "SMCD" EBCDIC for CLC messages */
static const char SMCD_EYECATCHER[4] = {'\xe2', '\xd4', '\xc3', '\xc4'};
+static u8 smc_hostname[SMC_MAX_HOSTNAME_LEN];
+
+/* check arriving CLC proposal */
+static bool smc_clc_msg_prop_valid(struct smc_clc_msg_proposal *pclc)
+{
+ struct smc_clc_msg_proposal_prefix *pclc_prfx;
+ struct smc_clc_smcd_v2_extension *smcd_v2_ext;
+ struct smc_clc_msg_hdr *hdr = &pclc->hdr;
+ struct smc_clc_v2_extension *v2_ext;
+
+ v2_ext = smc_get_clc_v2_ext(pclc);
+ pclc_prfx = smc_clc_proposal_get_prefix(pclc);
+ if (hdr->version == SMC_V1) {
+ if (hdr->typev1 == SMC_TYPE_N)
+ return false;
+ if (ntohs(hdr->length) !=
+ sizeof(*pclc) + ntohs(pclc->iparea_offset) +
+ sizeof(*pclc_prfx) +
+ pclc_prfx->ipv6_prefixes_cnt *
+ sizeof(struct smc_clc_ipv6_prefix) +
+ sizeof(struct smc_clc_msg_trail))
+ return false;
+ } else {
+ if (ntohs(hdr->length) !=
+ sizeof(*pclc) +
+ sizeof(struct smc_clc_msg_smcd) +
+ (hdr->typev1 != SMC_TYPE_N ?
+ sizeof(*pclc_prfx) +
+ pclc_prfx->ipv6_prefixes_cnt *
+ sizeof(struct smc_clc_ipv6_prefix) : 0) +
+ (hdr->typev2 != SMC_TYPE_N ?
+ sizeof(*v2_ext) +
+ v2_ext->hdr.eid_cnt * SMC_MAX_EID_LEN : 0) +
+ (smcd_indicated(hdr->typev2) ?
+ sizeof(*smcd_v2_ext) + v2_ext->hdr.ism_gid_cnt *
+ sizeof(struct smc_clc_smcd_gid_chid) :
+ 0) +
+ sizeof(struct smc_clc_msg_trail))
+ return false;
+ }
+ return true;
+}
+
+/* check arriving CLC accept or confirm */
+static bool
+smc_clc_msg_acc_conf_valid(struct smc_clc_msg_accept_confirm_v2 *clc_v2)
+{
+ struct smc_clc_msg_hdr *hdr = &clc_v2->hdr;
+
+ if (hdr->typev1 != SMC_TYPE_R && hdr->typev1 != SMC_TYPE_D)
+ return false;
+ if (hdr->version == SMC_V1) {
+ if ((hdr->typev1 == SMC_TYPE_R &&
+ ntohs(hdr->length) != SMCR_CLC_ACCEPT_CONFIRM_LEN) ||
+ (hdr->typev1 == SMC_TYPE_D &&
+ ntohs(hdr->length) != SMCD_CLC_ACCEPT_CONFIRM_LEN))
+ return false;
+ } else {
+ if (hdr->typev1 == SMC_TYPE_D &&
+ ntohs(hdr->length) != SMCD_CLC_ACCEPT_CONFIRM_LEN_V2 &&
+ (ntohs(hdr->length) != SMCD_CLC_ACCEPT_CONFIRM_LEN_V2 +
+ sizeof(struct smc_clc_first_contact_ext)))
+ return false;
+ }
+ return true;
+}
+
+static void smc_clc_fill_fce(struct smc_clc_first_contact_ext *fce, int *len)
+{
+ memset(fce, 0, sizeof(*fce));
+ fce->os_type = SMC_CLC_OS_LINUX;
+ fce->release = SMC_RELEASE;
+ memcpy(fce->hostname, smc_hostname, sizeof(smc_hostname));
+ (*len) += sizeof(*fce);
+}
+
/* check if received message has a correct header length and contains valid
* heading and trailing eyecatchers
*/
static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm, bool check_trl)
{
- struct smc_clc_msg_proposal_prefix *pclc_prfx;
- struct smc_clc_msg_accept_confirm *clc;
+ struct smc_clc_msg_accept_confirm_v2 *clc_v2;
struct smc_clc_msg_proposal *pclc;
struct smc_clc_msg_decline *dclc;
struct smc_clc_msg_trail *trl;
@@ -51,29 +129,19 @@ static bool smc_clc_msg_hdr_valid(struct smc_clc_msg_hdr *clcm, bool check_trl)
switch (clcm->type) {
case SMC_CLC_PROPOSAL:
pclc = (struct smc_clc_msg_proposal *)clcm;
- pclc_prfx = smc_clc_proposal_get_prefix(pclc);
- if (ntohs(pclc->hdr.length) <
- sizeof(*pclc) + ntohs(pclc->iparea_offset) +
- sizeof(*pclc_prfx) +
- pclc_prfx->ipv6_prefixes_cnt *
- sizeof(struct smc_clc_ipv6_prefix) +
- sizeof(*trl))
+ if (!smc_clc_msg_prop_valid(pclc))
return false;
trl = (struct smc_clc_msg_trail *)
((u8 *)pclc + ntohs(pclc->hdr.length) - sizeof(*trl));
break;
case SMC_CLC_ACCEPT:
case SMC_CLC_CONFIRM:
- if (clcm->path != SMC_TYPE_R && clcm->path != SMC_TYPE_D)
- return false;
- clc = (struct smc_clc_msg_accept_confirm *)clcm;
- if ((clcm->path == SMC_TYPE_R &&
- ntohs(clc->hdr.length) != SMCR_CLC_ACCEPT_CONFIRM_LEN) ||
- (clcm->path == SMC_TYPE_D &&
- ntohs(clc->hdr.length) != SMCD_CLC_ACCEPT_CONFIRM_LEN))
+ clc_v2 = (struct smc_clc_msg_accept_confirm_v2 *)clcm;
+ if (!smc_clc_msg_acc_conf_valid(clc_v2))
return false;
trl = (struct smc_clc_msg_trail *)
- ((u8 *)clc + ntohs(clc->hdr.length) - sizeof(*trl));
+ ((u8 *)clc_v2 + ntohs(clc_v2->hdr.length) -
+ sizeof(*trl));
break;
case SMC_CLC_DECLINE:
dclc = (struct smc_clc_msg_decline *)clcm;
@@ -153,7 +221,6 @@ static int smc_clc_prfx_set(struct socket *clcsock,
struct sockaddr_in *addr;
int rc = -ENOENT;
- memset(prop, 0, sizeof(*prop));
if (!dst) {
rc = -ENOTCONN;
goto out;
@@ -320,7 +387,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
}
datlen = ntohs(clcm->length);
if ((len < sizeof(struct smc_clc_msg_hdr)) ||
- (clcm->version < SMC_CLC_V1) ||
+ (clcm->version < SMC_V1) ||
((clcm->type != SMC_CLC_DECLINE) &&
(clcm->type != expected_type))) {
smc->sk.sk_err = EPROTO;
@@ -328,9 +395,6 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
goto out;
}
- if (clcm->type == SMC_CLC_PROPOSAL && clcm->path == SMC_TYPE_N)
- reason_code = SMC_CLC_DECL_VERSMISMAT; /* just V2 offered */
-
/* receive the complete CLC message */
memset(&msg, 0, sizeof(struct msghdr));
if (datlen > buflen) {
@@ -366,7 +430,8 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
dclc = (struct smc_clc_msg_decline *)clcm;
reason_code = SMC_CLC_DECL_PEERDECL;
smc->peer_diagnosis = ntohl(dclc->peer_diagnosis);
- if (((struct smc_clc_msg_decline *)buf)->hdr.flag) {
+ if (((struct smc_clc_msg_decline *)buf)->hdr.typev2 &
+ SMC_FIRST_CONTACT_MASK) {
smc->conn.lgr->sync_err = 1;
smc_lgr_terminate_sched(smc->conn.lgr);
}
@@ -378,7 +443,7 @@ out:
}
/* send CLC DECLINE message across internal TCP socket */
-int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info)
+int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info, u8 version)
{
struct smc_clc_msg_decline dclc;
struct msghdr msg;
@@ -389,8 +454,10 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info)
memcpy(dclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
dclc.hdr.type = SMC_CLC_DECLINE;
dclc.hdr.length = htons(sizeof(struct smc_clc_msg_decline));
- dclc.hdr.version = SMC_CLC_V1;
- dclc.hdr.flag = (peer_diag_info == SMC_CLC_DECL_SYNCERR) ? 1 : 0;
+ dclc.hdr.version = version;
+ dclc.os_type = version == SMC_V1 ? 0 : SMC_CLC_OS_LINUX;
+ dclc.hdr.typev2 = (peer_diag_info == SMC_CLC_DECL_SYNCERR) ?
+ SMC_FIRST_CONTACT_MASK : 0;
if ((!smc->conn.lgr || !smc->conn.lgr->is_smcd) &&
smc_ib_is_valid_local_systemid())
memcpy(dclc.id_for_peer, local_systemid,
@@ -409,142 +476,274 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info)
}
/* send CLC PROPOSAL message across internal TCP socket */
-int smc_clc_send_proposal(struct smc_sock *smc, int smc_type,
- struct smc_init_info *ini)
+int smc_clc_send_proposal(struct smc_sock *smc, struct smc_init_info *ini)
{
- struct smc_clc_ipv6_prefix ipv6_prfx[SMC_CLC_MAX_V6_PREFIX];
- struct smc_clc_msg_proposal_prefix pclc_prfx;
- struct smc_clc_msg_smcd pclc_smcd;
- struct smc_clc_msg_proposal pclc;
- struct smc_clc_msg_trail trl;
+ struct smc_clc_smcd_v2_extension *smcd_v2_ext;
+ struct smc_clc_msg_proposal_prefix *pclc_prfx;
+ struct smc_clc_msg_proposal *pclc_base;
+ struct smc_clc_smcd_gid_chid *gidchids;
+ struct smc_clc_msg_proposal_area *pclc;
+ struct smc_clc_ipv6_prefix *ipv6_prfx;
+ struct smc_clc_v2_extension *v2_ext;
+ struct smc_clc_msg_smcd *pclc_smcd;
+ struct smc_clc_msg_trail *trl;
int len, i, plen, rc;
int reason_code = 0;
- struct kvec vec[5];
+ struct kvec vec[8];
struct msghdr msg;
+ pclc = kzalloc(sizeof(*pclc), GFP_KERNEL);
+ if (!pclc)
+ return -ENOMEM;
+
+ pclc_base = &pclc->pclc_base;
+ pclc_smcd = &pclc->pclc_smcd;
+ pclc_prfx = &pclc->pclc_prfx;
+ ipv6_prfx = pclc->pclc_prfx_ipv6;
+ v2_ext = &pclc->pclc_v2_ext;
+ smcd_v2_ext = &pclc->pclc_smcd_v2_ext;
+ gidchids = pclc->pclc_gidchids;
+ trl = &pclc->pclc_trl;
+
+ pclc_base->hdr.version = SMC_V2;
+ pclc_base->hdr.typev1 = ini->smc_type_v1;
+ pclc_base->hdr.typev2 = ini->smc_type_v2;
+ plen = sizeof(*pclc_base) + sizeof(*pclc_smcd) + sizeof(*trl);
+
/* retrieve ip prefixes for CLC proposal msg */
- rc = smc_clc_prfx_set(smc->clcsock, &pclc_prfx, ipv6_prfx);
- if (rc)
- return SMC_CLC_DECL_CNFERR; /* configuration error */
+ if (ini->smc_type_v1 != SMC_TYPE_N) {
+ rc = smc_clc_prfx_set(smc->clcsock, pclc_prfx, ipv6_prfx);
+ if (rc) {
+ if (ini->smc_type_v2 == SMC_TYPE_N) {
+ kfree(pclc);
+ return SMC_CLC_DECL_CNFERR;
+ }
+ pclc_base->hdr.typev1 = SMC_TYPE_N;
+ } else {
+ pclc_base->iparea_offset = htons(sizeof(*pclc_smcd));
+ plen += sizeof(*pclc_prfx) +
+ pclc_prfx->ipv6_prefixes_cnt *
+ sizeof(ipv6_prfx[0]);
+ }
+ }
- /* send SMC Proposal CLC message */
- plen = sizeof(pclc) + sizeof(pclc_prfx) +
- (pclc_prfx.ipv6_prefixes_cnt * sizeof(ipv6_prfx[0])) +
- sizeof(trl);
- memset(&pclc, 0, sizeof(pclc));
- memcpy(pclc.hdr.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
- pclc.hdr.type = SMC_CLC_PROPOSAL;
- pclc.hdr.version = SMC_CLC_V1; /* SMC version */
- pclc.hdr.path = smc_type;
- if (smc_type == SMC_TYPE_R || smc_type == SMC_TYPE_B) {
+ /* build SMC Proposal CLC message */
+ memcpy(pclc_base->hdr.eyecatcher, SMC_EYECATCHER,
+ sizeof(SMC_EYECATCHER));
+ pclc_base->hdr.type = SMC_CLC_PROPOSAL;
+ if (smcr_indicated(ini->smc_type_v1)) {
/* add SMC-R specifics */
- memcpy(pclc.lcl.id_for_peer, local_systemid,
+ memcpy(pclc_base->lcl.id_for_peer, local_systemid,
sizeof(local_systemid));
- memcpy(&pclc.lcl.gid, ini->ib_gid, SMC_GID_SIZE);
- memcpy(&pclc.lcl.mac, &ini->ib_dev->mac[ini->ib_port - 1],
+ memcpy(pclc_base->lcl.gid, ini->ib_gid, SMC_GID_SIZE);
+ memcpy(pclc_base->lcl.mac, &ini->ib_dev->mac[ini->ib_port - 1],
ETH_ALEN);
- pclc.iparea_offset = htons(0);
}
- if (smc_type == SMC_TYPE_D || smc_type == SMC_TYPE_B) {
+ if (smcd_indicated(ini->smc_type_v1)) {
/* add SMC-D specifics */
- memset(&pclc_smcd, 0, sizeof(pclc_smcd));
- plen += sizeof(pclc_smcd);
- pclc.iparea_offset = htons(SMC_CLC_PROPOSAL_MAX_OFFSET);
- pclc_smcd.gid = ini->ism_dev->local_gid;
+ if (ini->ism_dev[0]) {
+ pclc_smcd->ism.gid = htonll(ini->ism_dev[0]->local_gid);
+ pclc_smcd->ism.chid =
+ htons(smc_ism_get_chid(ini->ism_dev[0]));
+ }
+ }
+ if (ini->smc_type_v2 == SMC_TYPE_N) {
+ pclc_smcd->v2_ext_offset = 0;
+ } else {
+ u16 v2_ext_offset;
+ u8 *eid = NULL;
+
+ v2_ext_offset = sizeof(*pclc_smcd) -
+ offsetofend(struct smc_clc_msg_smcd, v2_ext_offset);
+ if (ini->smc_type_v1 != SMC_TYPE_N)
+ v2_ext_offset += sizeof(*pclc_prfx) +
+ pclc_prfx->ipv6_prefixes_cnt *
+ sizeof(ipv6_prfx[0]);
+ pclc_smcd->v2_ext_offset = htons(v2_ext_offset);
+ v2_ext->hdr.eid_cnt = 0;
+ v2_ext->hdr.ism_gid_cnt = ini->ism_offered_cnt;
+ v2_ext->hdr.flag.release = SMC_RELEASE;
+ v2_ext->hdr.flag.seid = 1;
+ v2_ext->hdr.smcd_v2_ext_offset = htons(sizeof(*v2_ext) -
+ offsetofend(struct smc_clnt_opts_area_hdr,
+ smcd_v2_ext_offset) +
+ v2_ext->hdr.eid_cnt * SMC_MAX_EID_LEN);
+ if (ini->ism_dev[0])
+ smc_ism_get_system_eid(ini->ism_dev[0], &eid);
+ else
+ smc_ism_get_system_eid(ini->ism_dev[1], &eid);
+ if (eid)
+ memcpy(smcd_v2_ext->system_eid, eid, SMC_MAX_EID_LEN);
+ plen += sizeof(*v2_ext) + sizeof(*smcd_v2_ext);
+ if (ini->ism_offered_cnt) {
+ for (i = 1; i <= ini->ism_offered_cnt; i++) {
+ gidchids[i - 1].gid =
+ htonll(ini->ism_dev[i]->local_gid);
+ gidchids[i - 1].chid =
+ htons(smc_ism_get_chid(ini->ism_dev[i]));
+ }
+ plen += ini->ism_offered_cnt *
+ sizeof(struct smc_clc_smcd_gid_chid);
+ }
}
- pclc.hdr.length = htons(plen);
+ pclc_base->hdr.length = htons(plen);
+ memcpy(trl->eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
- memcpy(trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
+ /* send SMC Proposal CLC message */
memset(&msg, 0, sizeof(msg));
i = 0;
- vec[i].iov_base = &pclc;
- vec[i++].iov_len = sizeof(pclc);
- if (smc_type == SMC_TYPE_D || smc_type == SMC_TYPE_B) {
- vec[i].iov_base = &pclc_smcd;
- vec[i++].iov_len = sizeof(pclc_smcd);
+ vec[i].iov_base = pclc_base;
+ vec[i++].iov_len = sizeof(*pclc_base);
+ vec[i].iov_base = pclc_smcd;
+ vec[i++].iov_len = sizeof(*pclc_smcd);
+ if (ini->smc_type_v1 != SMC_TYPE_N) {
+ vec[i].iov_base = pclc_prfx;
+ vec[i++].iov_len = sizeof(*pclc_prfx);
+ if (pclc_prfx->ipv6_prefixes_cnt > 0) {
+ vec[i].iov_base = ipv6_prfx;
+ vec[i++].iov_len = pclc_prfx->ipv6_prefixes_cnt *
+ sizeof(ipv6_prfx[0]);
+ }
}
- vec[i].iov_base = &pclc_prfx;
- vec[i++].iov_len = sizeof(pclc_prfx);
- if (pclc_prfx.ipv6_prefixes_cnt > 0) {
- vec[i].iov_base = &ipv6_prfx[0];
- vec[i++].iov_len = pclc_prfx.ipv6_prefixes_cnt *
- sizeof(ipv6_prfx[0]);
+ if (ini->smc_type_v2 != SMC_TYPE_N) {
+ vec[i].iov_base = v2_ext;
+ vec[i++].iov_len = sizeof(*v2_ext);
+ vec[i].iov_base = smcd_v2_ext;
+ vec[i++].iov_len = sizeof(*smcd_v2_ext);
+ if (ini->ism_offered_cnt) {
+ vec[i].iov_base = gidchids;
+ vec[i++].iov_len = ini->ism_offered_cnt *
+ sizeof(struct smc_clc_smcd_gid_chid);
+ }
}
- vec[i].iov_base = &trl;
- vec[i++].iov_len = sizeof(trl);
+ vec[i].iov_base = trl;
+ vec[i++].iov_len = sizeof(*trl);
/* due to the few bytes needed for clc-handshake this cannot block */
len = kernel_sendmsg(smc->clcsock, &msg, vec, i, plen);
if (len < 0) {
smc->sk.sk_err = smc->clcsock->sk->sk_err;
reason_code = -smc->sk.sk_err;
- } else if (len < (int)sizeof(pclc)) {
+ } else if (len < ntohs(pclc_base->hdr.length)) {
reason_code = -ENETUNREACH;
smc->sk.sk_err = -reason_code;
}
+ kfree(pclc);
return reason_code;
}
-/* send CLC CONFIRM message across internal TCP socket */
-int smc_clc_send_confirm(struct smc_sock *smc)
+/* build and send CLC CONFIRM / ACCEPT message */
+static int smc_clc_send_confirm_accept(struct smc_sock *smc,
+ struct smc_clc_msg_accept_confirm_v2 *clc_v2,
+ int first_contact, u8 version)
{
struct smc_connection *conn = &smc->conn;
- struct smc_clc_msg_accept_confirm cclc;
- struct smc_link *link;
- int reason_code = 0;
+ struct smc_clc_msg_accept_confirm *clc;
+ struct smc_clc_first_contact_ext fce;
+ struct smc_clc_msg_trail trl;
+ struct kvec vec[3];
struct msghdr msg;
- struct kvec vec;
- int len;
+ int i, len;
/* send SMC Confirm CLC msg */
- memset(&cclc, 0, sizeof(cclc));
- cclc.hdr.type = SMC_CLC_CONFIRM;
- cclc.hdr.version = SMC_CLC_V1; /* SMC version */
- if (smc->conn.lgr->is_smcd) {
+ clc = (struct smc_clc_msg_accept_confirm *)clc_v2;
+ clc->hdr.version = version; /* SMC version */
+ if (first_contact)
+ clc->hdr.typev2 |= SMC_FIRST_CONTACT_MASK;
+ if (conn->lgr->is_smcd) {
/* SMC-D specific settings */
- memcpy(cclc.hdr.eyecatcher, SMCD_EYECATCHER,
+ memcpy(clc->hdr.eyecatcher, SMCD_EYECATCHER,
sizeof(SMCD_EYECATCHER));
- cclc.hdr.path = SMC_TYPE_D;
- cclc.hdr.length = htons(SMCD_CLC_ACCEPT_CONFIRM_LEN);
- cclc.gid = conn->lgr->smcd->local_gid;
- cclc.token = conn->rmb_desc->token;
- cclc.dmbe_size = conn->rmbe_size_short;
- cclc.dmbe_idx = 0;
- memcpy(&cclc.linkid, conn->lgr->id, SMC_LGR_ID_SIZE);
- memcpy(cclc.smcd_trl.eyecatcher, SMCD_EYECATCHER,
+ clc->hdr.typev1 = SMC_TYPE_D;
+ clc->d0.gid = conn->lgr->smcd->local_gid;
+ clc->d0.token = conn->rmb_desc->token;
+ clc->d0.dmbe_size = conn->rmbe_size_short;
+ clc->d0.dmbe_idx = 0;
+ memcpy(&clc->d0.linkid, conn->lgr->id, SMC_LGR_ID_SIZE);
+ if (version == SMC_V1) {
+ clc->hdr.length = htons(SMCD_CLC_ACCEPT_CONFIRM_LEN);
+ } else {
+ u8 *eid = NULL;
+
+ clc_v2->chid = htons(smc_ism_get_chid(conn->lgr->smcd));
+ smc_ism_get_system_eid(conn->lgr->smcd, &eid);
+ if (eid)
+ memcpy(clc_v2->eid, eid, SMC_MAX_EID_LEN);
+ len = SMCD_CLC_ACCEPT_CONFIRM_LEN_V2;
+ if (first_contact)
+ smc_clc_fill_fce(&fce, &len);
+ clc_v2->hdr.length = htons(len);
+ }
+ memcpy(trl.eyecatcher, SMCD_EYECATCHER,
sizeof(SMCD_EYECATCHER));
} else {
+ struct smc_link *link = conn->lnk;
+
/* SMC-R specific settings */
link = conn->lnk;
- memcpy(cclc.hdr.eyecatcher, SMC_EYECATCHER,
+ memcpy(clc->hdr.eyecatcher, SMC_EYECATCHER,
sizeof(SMC_EYECATCHER));
- cclc.hdr.path = SMC_TYPE_R;
- cclc.hdr.length = htons(SMCR_CLC_ACCEPT_CONFIRM_LEN);
- memcpy(cclc.lcl.id_for_peer, local_systemid,
+ clc->hdr.typev1 = SMC_TYPE_R;
+ clc->hdr.length = htons(SMCR_CLC_ACCEPT_CONFIRM_LEN);
+ memcpy(clc->r0.lcl.id_for_peer, local_systemid,
sizeof(local_systemid));
- memcpy(&cclc.lcl.gid, link->gid, SMC_GID_SIZE);
- memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1],
+ memcpy(&clc->r0.lcl.gid, link->gid, SMC_GID_SIZE);
+ memcpy(&clc->r0.lcl.mac, &link->smcibdev->mac[link->ibport - 1],
ETH_ALEN);
- hton24(cclc.qpn, link->roce_qp->qp_num);
- cclc.rmb_rkey =
+ hton24(clc->r0.qpn, link->roce_qp->qp_num);
+ clc->r0.rmb_rkey =
htonl(conn->rmb_desc->mr_rx[link->link_idx]->rkey);
- cclc.rmbe_idx = 1; /* for now: 1 RMB = 1 RMBE */
- cclc.rmbe_alert_token = htonl(conn->alert_token_local);
- cclc.qp_mtu = min(link->path_mtu, link->peer_mtu);
- cclc.rmbe_size = conn->rmbe_size_short;
- cclc.rmb_dma_addr = cpu_to_be64((u64)sg_dma_address
+ clc->r0.rmbe_idx = 1; /* for now: 1 RMB = 1 RMBE */
+ clc->r0.rmbe_alert_token = htonl(conn->alert_token_local);
+ switch (clc->hdr.type) {
+ case SMC_CLC_ACCEPT:
+ clc->r0.qp_mtu = link->path_mtu;
+ break;
+ case SMC_CLC_CONFIRM:
+ clc->r0.qp_mtu = min(link->path_mtu, link->peer_mtu);
+ break;
+ }
+ clc->r0.rmbe_size = conn->rmbe_size_short;
+ clc->r0.rmb_dma_addr = cpu_to_be64((u64)sg_dma_address
(conn->rmb_desc->sgt[link->link_idx].sgl));
- hton24(cclc.psn, link->psn_initial);
- memcpy(cclc.smcr_trl.eyecatcher, SMC_EYECATCHER,
- sizeof(SMC_EYECATCHER));
+ hton24(clc->r0.psn, link->psn_initial);
+ memcpy(trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
}
memset(&msg, 0, sizeof(msg));
- vec.iov_base = &cclc;
- vec.iov_len = ntohs(cclc.hdr.length);
- len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1,
- ntohs(cclc.hdr.length));
- if (len < ntohs(cclc.hdr.length)) {
+ i = 0;
+ vec[i].iov_base = clc_v2;
+ if (version > SMC_V1)
+ vec[i++].iov_len = SMCD_CLC_ACCEPT_CONFIRM_LEN_V2 - sizeof(trl);
+ else
+ vec[i++].iov_len = (clc->hdr.typev1 == SMC_TYPE_D ?
+ SMCD_CLC_ACCEPT_CONFIRM_LEN :
+ SMCR_CLC_ACCEPT_CONFIRM_LEN) -
+ sizeof(trl);
+ if (version > SMC_V1 && first_contact) {
+ vec[i].iov_base = &fce;
+ vec[i++].iov_len = sizeof(fce);
+ }
+ vec[i].iov_base = &trl;
+ vec[i++].iov_len = sizeof(trl);
+ return kernel_sendmsg(smc->clcsock, &msg, vec, 1,
+ ntohs(clc->hdr.length));
+}
+
+/* send CLC CONFIRM message across internal TCP socket */
+int smc_clc_send_confirm(struct smc_sock *smc, bool clnt_first_contact,
+ u8 version)
+{
+ struct smc_clc_msg_accept_confirm_v2 cclc_v2;
+ int reason_code = 0;
+ int len;
+
+ /* send SMC Confirm CLC msg */
+ memset(&cclc_v2, 0, sizeof(cclc_v2));
+ cclc_v2.hdr.type = SMC_CLC_CONFIRM;
+ len = smc_clc_send_confirm_accept(smc, &cclc_v2, clnt_first_contact,
+ version);
+ if (len < ntohs(cclc_v2.hdr.length)) {
if (len >= 0) {
reason_code = -ENETUNREACH;
smc->sk.sk_err = -reason_code;
@@ -557,67 +756,28 @@ int smc_clc_send_confirm(struct smc_sock *smc)
}
/* send CLC ACCEPT message across internal TCP socket */
-int smc_clc_send_accept(struct smc_sock *new_smc, int srv_first_contact)
+int smc_clc_send_accept(struct smc_sock *new_smc, bool srv_first_contact,
+ u8 version)
{
- struct smc_connection *conn = &new_smc->conn;
- struct smc_clc_msg_accept_confirm aclc;
- struct smc_link *link;
- struct msghdr msg;
- struct kvec vec;
+ struct smc_clc_msg_accept_confirm_v2 aclc_v2;
int len;
- memset(&aclc, 0, sizeof(aclc));
- aclc.hdr.type = SMC_CLC_ACCEPT;
- aclc.hdr.version = SMC_CLC_V1; /* SMC version */
- if (srv_first_contact)
- aclc.hdr.flag = 1;
-
- if (new_smc->conn.lgr->is_smcd) {
- /* SMC-D specific settings */
- aclc.hdr.length = htons(SMCD_CLC_ACCEPT_CONFIRM_LEN);
- memcpy(aclc.hdr.eyecatcher, SMCD_EYECATCHER,
- sizeof(SMCD_EYECATCHER));
- aclc.hdr.path = SMC_TYPE_D;
- aclc.gid = conn->lgr->smcd->local_gid;
- aclc.token = conn->rmb_desc->token;
- aclc.dmbe_size = conn->rmbe_size_short;
- aclc.dmbe_idx = 0;
- memcpy(&aclc.linkid, conn->lgr->id, SMC_LGR_ID_SIZE);
- memcpy(aclc.smcd_trl.eyecatcher, SMCD_EYECATCHER,
- sizeof(SMCD_EYECATCHER));
- } else {
- /* SMC-R specific settings */
- aclc.hdr.length = htons(SMCR_CLC_ACCEPT_CONFIRM_LEN);
- memcpy(aclc.hdr.eyecatcher, SMC_EYECATCHER,
- sizeof(SMC_EYECATCHER));
- aclc.hdr.path = SMC_TYPE_R;
- link = conn->lnk;
- memcpy(aclc.lcl.id_for_peer, local_systemid,
- sizeof(local_systemid));
- memcpy(&aclc.lcl.gid, link->gid, SMC_GID_SIZE);
- memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1],
- ETH_ALEN);
- hton24(aclc.qpn, link->roce_qp->qp_num);
- aclc.rmb_rkey =
- htonl(conn->rmb_desc->mr_rx[link->link_idx]->rkey);
- aclc.rmbe_idx = 1; /* as long as 1 RMB = 1 RMBE */
- aclc.rmbe_alert_token = htonl(conn->alert_token_local);
- aclc.qp_mtu = link->path_mtu;
- aclc.rmbe_size = conn->rmbe_size_short,
- aclc.rmb_dma_addr = cpu_to_be64((u64)sg_dma_address
- (conn->rmb_desc->sgt[link->link_idx].sgl));
- hton24(aclc.psn, link->psn_initial);
- memcpy(aclc.smcr_trl.eyecatcher, SMC_EYECATCHER,
- sizeof(SMC_EYECATCHER));
- }
-
- memset(&msg, 0, sizeof(msg));
- vec.iov_base = &aclc;
- vec.iov_len = ntohs(aclc.hdr.length);
- len = kernel_sendmsg(new_smc->clcsock, &msg, &vec, 1,
- ntohs(aclc.hdr.length));
- if (len < ntohs(aclc.hdr.length))
+ memset(&aclc_v2, 0, sizeof(aclc_v2));
+ aclc_v2.hdr.type = SMC_CLC_ACCEPT;
+ len = smc_clc_send_confirm_accept(new_smc, &aclc_v2, srv_first_contact,
+ version);
+ if (len < ntohs(aclc_v2.hdr.length))
len = len >= 0 ? -EPROTO : -new_smc->clcsock->sk->sk_err;
return len > 0 ? 0 : len;
}
+
+void __init smc_clc_init(void)
+{
+ struct new_utsname *u;
+
+ memset(smc_hostname, _S, sizeof(smc_hostname)); /* ASCII blanks */
+ u = utsname();
+ memcpy(smc_hostname, u->nodename,
+ min_t(size_t, strlen(u->nodename), sizeof(smc_hostname)));
+}
diff --git a/net/smc/smc_clc.h b/net/smc/smc_clc.h
index cf7b45306f4e..b3f46ab79e47 100644
--- a/net/smc/smc_clc.h
+++ b/net/smc/smc_clc.h
@@ -22,7 +22,6 @@
#define SMC_CLC_CONFIRM 0x03
#define SMC_CLC_DECLINE 0x04
-#define SMC_CLC_V1 0x1 /* SMC version */
#define SMC_TYPE_R 0 /* SMC-R only */
#define SMC_TYPE_D 1 /* SMC-D only */
#define SMC_TYPE_N 2 /* neither SMC-R nor SMC-D */
@@ -38,7 +37,6 @@
#define SMC_CLC_DECL_NOSMCDEV 0x03030000 /* no SMC device found (R or D) */
#define SMC_CLC_DECL_NOSMCDDEV 0x03030001 /* no SMC-D device found */
#define SMC_CLC_DECL_NOSMCRDEV 0x03030002 /* no SMC-R device found */
-#define SMC_CLC_DECL_SMCDNOTALK 0x03030003 /* SMC-D dev can't talk to peer */
#define SMC_CLC_DECL_MODEUNSUPP 0x03040000 /* smc modes do not match (R or D)*/
#define SMC_CLC_DECL_RMBE_EC 0x03050000 /* peer has eyecatcher in RMBE */
#define SMC_CLC_DECL_OPTUNSUPP 0x03060000 /* fastopen sockopt not supported */
@@ -56,19 +54,19 @@
#define SMC_CLC_DECL_ERR_RDYLNK 0x09990002 /* ib ready link failed */
#define SMC_CLC_DECL_ERR_REGRMB 0x09990003 /* reg rmb failed */
+#define SMC_FIRST_CONTACT_MASK 0b10 /* first contact bit within typev2 */
+
struct smc_clc_msg_hdr { /* header1 of clc messages */
u8 eyecatcher[4]; /* eye catcher */
u8 type; /* proposal / accept / confirm / decline */
__be16 length;
#if defined(__BIG_ENDIAN_BITFIELD)
u8 version : 4,
- flag : 1,
- rsvd : 1,
- path : 2;
+ typev2 : 2,
+ typev1 : 2;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u8 path : 2,
- rsvd : 1,
- flag : 1,
+ u8 typev1 : 2,
+ typev2 : 2,
version : 4;
#endif
} __packed; /* format defined in RFC7609 */
@@ -83,8 +81,6 @@ struct smc_clc_msg_local { /* header2 of clc messages */
u8 mac[6]; /* mac of ib_device port */
};
-#define SMC_CLC_MAX_V6_PREFIX 8
-
/* Struct would be 4 byte aligned, but it is used in an array that is sent
* to peers and must conform to RFC7609, hence we need to use packed here.
*/
@@ -93,6 +89,44 @@ struct smc_clc_ipv6_prefix {
u8 prefix_len;
} __packed; /* format defined in RFC7609 */
+#if defined(__BIG_ENDIAN_BITFIELD)
+struct smc_clc_v2_flag {
+ u8 release : 4,
+ rsvd : 3,
+ seid : 1;
+};
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+struct smc_clc_v2_flag {
+ u8 seid : 1,
+ rsvd : 3,
+ release : 4;
+};
+#endif
+
+struct smc_clnt_opts_area_hdr {
+ u8 eid_cnt; /* number of user defined EIDs */
+ u8 ism_gid_cnt; /* number of ISMv2 GIDs */
+ u8 reserved1;
+ struct smc_clc_v2_flag flag;
+ u8 reserved2[2];
+ __be16 smcd_v2_ext_offset; /* SMC-Dv2 Extension Offset */
+};
+
+struct smc_clc_smcd_gid_chid {
+ __be64 gid; /* ISM GID */
+ __be16 chid; /* ISMv2 CHID */
+} __packed; /* format defined in
+ * IBM Shared Memory Communications Version 2
+ * (https://www.ibm.com/support/pages/node/6326337)
+ */
+
+struct smc_clc_v2_extension {
+ struct smc_clnt_opts_area_hdr hdr;
+ u8 roce[16]; /* RoCEv2 GID */
+ u8 reserved[16];
+ u8 user_eids[0][SMC_MAX_EID_LEN];
+};
+
struct smc_clc_msg_proposal_prefix { /* prefix part of clc proposal message*/
__be32 outgoing_subnet; /* subnet mask */
u8 prefix_len; /* number of significant bits in mask */
@@ -101,8 +135,15 @@ struct smc_clc_msg_proposal_prefix { /* prefix part of clc proposal message*/
} __aligned(4);
struct smc_clc_msg_smcd { /* SMC-D GID information */
- u64 gid; /* ISM GID of requestor */
- u8 res[32];
+ struct smc_clc_smcd_gid_chid ism; /* ISM native GID+CHID of requestor */
+ __be16 v2_ext_offset; /* SMC Version 2 Extension Offset */
+ u8 reserved[28];
+};
+
+struct smc_clc_smcd_v2_extension {
+ u8 system_eid[SMC_MAX_EID_LEN];
+ u8 reserved[16];
+ struct smc_clc_smcd_gid_chid gidchid[0];
};
struct smc_clc_msg_proposal { /* clc proposal message sent by Linux */
@@ -111,64 +152,107 @@ struct smc_clc_msg_proposal { /* clc proposal message sent by Linux */
__be16 iparea_offset; /* offset to IP address information area */
} __aligned(4);
-#define SMC_CLC_PROPOSAL_MAX_OFFSET 0x28
-#define SMC_CLC_PROPOSAL_MAX_PREFIX (SMC_CLC_MAX_V6_PREFIX * \
- sizeof(struct smc_clc_ipv6_prefix))
-#define SMC_CLC_MAX_LEN (sizeof(struct smc_clc_msg_proposal) + \
- SMC_CLC_PROPOSAL_MAX_OFFSET + \
- sizeof(struct smc_clc_msg_proposal_prefix) + \
- SMC_CLC_PROPOSAL_MAX_PREFIX + \
- sizeof(struct smc_clc_msg_trail))
+#define SMC_CLC_MAX_V6_PREFIX 8
-struct smc_clc_msg_accept_confirm { /* clc accept / confirm message */
- struct smc_clc_msg_hdr hdr;
- union {
- struct { /* SMC-R */
- struct smc_clc_msg_local lcl;
- u8 qpn[3]; /* QP number */
- __be32 rmb_rkey; /* RMB rkey */
- u8 rmbe_idx; /* Index of RMBE in RMB */
- __be32 rmbe_alert_token;/* unique connection id */
+struct smc_clc_msg_proposal_area {
+ struct smc_clc_msg_proposal pclc_base;
+ struct smc_clc_msg_smcd pclc_smcd;
+ struct smc_clc_msg_proposal_prefix pclc_prfx;
+ struct smc_clc_ipv6_prefix pclc_prfx_ipv6[SMC_CLC_MAX_V6_PREFIX];
+ struct smc_clc_v2_extension pclc_v2_ext;
+ struct smc_clc_smcd_v2_extension pclc_smcd_v2_ext;
+ struct smc_clc_smcd_gid_chid pclc_gidchids[SMC_MAX_ISM_DEVS];
+ struct smc_clc_msg_trail pclc_trl;
+};
+
+struct smcr_clc_msg_accept_confirm { /* SMCR accept/confirm */
+ struct smc_clc_msg_local lcl;
+ u8 qpn[3]; /* QP number */
+ __be32 rmb_rkey; /* RMB rkey */
+ u8 rmbe_idx; /* Index of RMBE in RMB */
+ __be32 rmbe_alert_token; /* unique connection id */
+ #if defined(__BIG_ENDIAN_BITFIELD)
+ u8 rmbe_size : 4, /* buf size (compressed) */
+ qp_mtu : 4; /* QP mtu */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 qp_mtu : 4,
+ rmbe_size : 4;
+#endif
+ u8 reserved;
+ __be64 rmb_dma_addr; /* RMB virtual address */
+ u8 reserved2;
+ u8 psn[3]; /* packet sequence number */
+} __packed;
+
+struct smcd_clc_msg_accept_confirm_common { /* SMCD accept/confirm */
+ u64 gid; /* Sender GID */
+ u64 token; /* DMB token */
+ u8 dmbe_idx; /* DMBE index */
#if defined(__BIG_ENDIAN_BITFIELD)
- u8 rmbe_size : 4, /* buf size (compressed) */
- qp_mtu : 4; /* QP mtu */
+ u8 dmbe_size : 4, /* buf size (compressed) */
+ reserved3 : 4;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u8 qp_mtu : 4,
- rmbe_size : 4;
+ u8 reserved3 : 4,
+ dmbe_size : 4;
#endif
- u8 reserved;
- __be64 rmb_dma_addr; /* RMB virtual address */
- u8 reserved2;
- u8 psn[3]; /* packet sequence number */
- struct smc_clc_msg_trail smcr_trl;
- /* eye catcher "SMCR" EBCDIC */
- } __packed;
- struct { /* SMC-D */
- u64 gid; /* Sender GID */
- u64 token; /* DMB token */
- u8 dmbe_idx; /* DMBE index */
+ u16 reserved4;
+ __be32 linkid; /* Link identifier */
+} __packed;
+
+#define SMC_CLC_OS_ZOS 1
+#define SMC_CLC_OS_LINUX 2
+#define SMC_CLC_OS_AIX 3
+
+struct smc_clc_first_contact_ext {
+ u8 reserved1;
#if defined(__BIG_ENDIAN_BITFIELD)
- u8 dmbe_size : 4, /* buf size (compressed) */
- reserved3 : 4;
+ u8 os_type : 4,
+ release : 4;
#elif defined(__LITTLE_ENDIAN_BITFIELD)
- u8 reserved3 : 4,
- dmbe_size : 4;
+ u8 release : 4,
+ os_type : 4;
#endif
- u16 reserved4;
- u32 linkid; /* Link identifier */
+ u8 reserved2[2];
+ u8 hostname[SMC_MAX_HOSTNAME_LEN];
+};
+
+struct smc_clc_msg_accept_confirm { /* clc accept / confirm message */
+ struct smc_clc_msg_hdr hdr;
+ union {
+ struct smcr_clc_msg_accept_confirm r0; /* SMC-R */
+ struct { /* SMC-D */
+ struct smcd_clc_msg_accept_confirm_common d0;
u32 reserved5[3];
- struct smc_clc_msg_trail smcd_trl;
- /* eye catcher "SMCD" EBCDIC */
- } __packed;
+ };
};
} __packed; /* format defined in RFC7609 */
+struct smc_clc_msg_accept_confirm_v2 { /* clc accept / confirm message */
+ struct smc_clc_msg_hdr hdr;
+ union {
+ struct smcr_clc_msg_accept_confirm r0; /* SMC-R */
+ struct { /* SMC-D */
+ struct smcd_clc_msg_accept_confirm_common d0;
+ __be16 chid;
+ u8 eid[SMC_MAX_EID_LEN];
+ u8 reserved5[8];
+ };
+ };
+};
+
struct smc_clc_msg_decline { /* clc decline message */
struct smc_clc_msg_hdr hdr;
u8 id_for_peer[SMC_SYSTEMID_LEN]; /* sender peer_id */
__be32 peer_diagnosis; /* diagnosis information */
- u8 reserved2[4];
- struct smc_clc_msg_trail trl; /* eye catcher "SMCR" EBCDIC */
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u8 os_type : 4,
+ reserved : 4;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 reserved : 4,
+ os_type : 4;
+#endif
+ u8 reserved2[3];
+ struct smc_clc_msg_trail trl; /* eye catcher "SMCD" or "SMCR" EBCDIC */
} __aligned(4);
/* determine start of the prefix area within the proposal message */
@@ -179,16 +263,58 @@ smc_clc_proposal_get_prefix(struct smc_clc_msg_proposal *pclc)
((u8 *)pclc + sizeof(*pclc) + ntohs(pclc->iparea_offset));
}
+static inline bool smcr_indicated(int smc_type)
+{
+ return smc_type == SMC_TYPE_R || smc_type == SMC_TYPE_B;
+}
+
+static inline bool smcd_indicated(int smc_type)
+{
+ return smc_type == SMC_TYPE_D || smc_type == SMC_TYPE_B;
+}
+
/* get SMC-D info from proposal message */
static inline struct smc_clc_msg_smcd *
smc_get_clc_msg_smcd(struct smc_clc_msg_proposal *prop)
{
- if (ntohs(prop->iparea_offset) != sizeof(struct smc_clc_msg_smcd))
+ if (smcd_indicated(prop->hdr.typev1) &&
+ ntohs(prop->iparea_offset) != sizeof(struct smc_clc_msg_smcd))
return NULL;
return (struct smc_clc_msg_smcd *)(prop + 1);
}
+static inline struct smc_clc_v2_extension *
+smc_get_clc_v2_ext(struct smc_clc_msg_proposal *prop)
+{
+ struct smc_clc_msg_smcd *prop_smcd = smc_get_clc_msg_smcd(prop);
+
+ if (!prop_smcd || !ntohs(prop_smcd->v2_ext_offset))
+ return NULL;
+
+ return (struct smc_clc_v2_extension *)
+ ((u8 *)prop_smcd +
+ offsetof(struct smc_clc_msg_smcd, v2_ext_offset) +
+ sizeof(prop_smcd->v2_ext_offset) +
+ ntohs(prop_smcd->v2_ext_offset));
+}
+
+static inline struct smc_clc_smcd_v2_extension *
+smc_get_clc_smcd_v2_ext(struct smc_clc_v2_extension *prop_v2ext)
+{
+ if (!prop_v2ext)
+ return NULL;
+ if (!ntohs(prop_v2ext->hdr.smcd_v2_ext_offset))
+ return NULL;
+
+ return (struct smc_clc_smcd_v2_extension *)
+ ((u8 *)prop_v2ext +
+ offsetof(struct smc_clc_v2_extension, hdr) +
+ offsetof(struct smc_clnt_opts_area_hdr, smcd_v2_ext_offset) +
+ sizeof(prop_v2ext->hdr.smcd_v2_ext_offset) +
+ ntohs(prop_v2ext->hdr.smcd_v2_ext_offset));
+}
+
struct smcd_dev;
struct smc_init_info;
@@ -196,10 +322,12 @@ int smc_clc_prfx_match(struct socket *clcsock,
struct smc_clc_msg_proposal_prefix *prop);
int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
u8 expected_type, unsigned long timeout);
-int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info);
-int smc_clc_send_proposal(struct smc_sock *smc, int smc_type,
- struct smc_init_info *ini);
-int smc_clc_send_confirm(struct smc_sock *smc);
-int smc_clc_send_accept(struct smc_sock *smc, int srv_first_contact);
+int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info, u8 version);
+int smc_clc_send_proposal(struct smc_sock *smc, struct smc_init_info *ini);
+int smc_clc_send_confirm(struct smc_sock *smc, bool clnt_first_contact,
+ u8 version);
+int smc_clc_send_accept(struct smc_sock *smc, bool srv_first_contact,
+ u8 version);
+void smc_clc_init(void) __init;
#endif
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index 0e7409e469c0..0f9ffba07d26 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -210,9 +210,9 @@ again:
sk->sk_state = SMC_CLOSED;
sk->sk_state_change(sk); /* wake up accept */
if (smc->clcsock && smc->clcsock->sk) {
+ smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready;
+ smc->clcsock->sk->sk_user_data = NULL;
rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
- /* wake up kernel_accept of smc_tcp_listen_worker */
- smc->clcsock->sk->sk_data_ready(smc->clcsock->sk);
}
smc_close_cleanup_listen(sk);
release_sock(sk);
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index a406627b1d55..d790c43c473f 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -34,7 +34,6 @@
#define SMC_LGR_NUM_INCR 256
#define SMC_LGR_FREE_DELAY_SERV (600 * HZ)
#define SMC_LGR_FREE_DELAY_CLNT (SMC_LGR_FREE_DELAY_SERV + 10 * HZ)
-#define SMC_LGR_FREE_DELAY_FAST (8 * HZ)
static struct smc_lgr_list smc_lgr_list = { /* established link groups */
.lock = __SPIN_LOCK_UNLOCKED(smc_lgr_list.lock),
@@ -70,7 +69,7 @@ static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
* creation. For client use a somewhat higher removal delay time,
* otherwise there is a risk of out-of-sync link groups.
*/
- if (!lgr->freeing && !lgr->freefast) {
+ if (!lgr->freeing) {
mod_delayed_work(system_wq, &lgr->free_work,
(!lgr->is_smcd && lgr->role == SMC_CLNT) ?
SMC_LGR_FREE_DELAY_CLNT :
@@ -78,15 +77,6 @@ static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
}
}
-void smc_lgr_schedule_free_work_fast(struct smc_link_group *lgr)
-{
- if (!lgr->freeing && !lgr->freefast) {
- lgr->freefast = 1;
- mod_delayed_work(system_wq, &lgr->free_work,
- SMC_LGR_FREE_DELAY_FAST);
- }
-}
-
/* Register connection's alert token in our lookup structure.
* To use rbtrees we have to implement our own insert core.
* Requires @conns_lock
@@ -227,7 +217,7 @@ void smc_lgr_cleanup_early(struct smc_connection *conn)
if (!list_empty(lgr_list))
list_del_init(lgr_list);
spin_unlock_bh(lgr_lock);
- smc_lgr_schedule_free_work_fast(lgr);
+ __smc_lgr_terminate(lgr, true);
}
static void smcr_lgr_link_deactivate_all(struct smc_link_group *lgr)
@@ -385,7 +375,8 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
int i;
if (ini->is_smcd && ini->vlan_id) {
- if (smc_ism_get_vlan(ini->ism_dev, ini->vlan_id)) {
+ if (smc_ism_get_vlan(ini->ism_dev[ini->ism_selected],
+ ini->vlan_id)) {
rc = SMC_CLC_DECL_ISMVLANERR;
goto out;
}
@@ -396,10 +387,15 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
rc = SMC_CLC_DECL_MEM;
goto ism_put_vlan;
}
+ lgr->tx_wq = alloc_workqueue("smc_tx_wq-%*phN", 0, 0,
+ SMC_LGR_ID_SIZE, &lgr->id);
+ if (!lgr->tx_wq) {
+ rc = -ENOMEM;
+ goto free_lgr;
+ }
lgr->is_smcd = ini->is_smcd;
lgr->sync_err = 0;
lgr->terminating = 0;
- lgr->freefast = 0;
lgr->freeing = 0;
lgr->vlan_id = ini->vlan_id;
mutex_init(&lgr->sndbufs_lock);
@@ -417,13 +413,14 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
lgr->conns_all = RB_ROOT;
if (ini->is_smcd) {
/* SMC-D specific settings */
- get_device(&ini->ism_dev->dev);
- lgr->peer_gid = ini->ism_gid;
- lgr->smcd = ini->ism_dev;
- lgr_list = &ini->ism_dev->lgr_list;
+ get_device(&ini->ism_dev[ini->ism_selected]->dev);
+ lgr->peer_gid = ini->ism_peer_gid[ini->ism_selected];
+ lgr->smcd = ini->ism_dev[ini->ism_selected];
+ lgr_list = &ini->ism_dev[ini->ism_selected]->lgr_list;
lgr_lock = &lgr->smcd->lgr_lock;
+ lgr->smc_version = ini->smcd_version;
lgr->peer_shutdown = 0;
- atomic_inc(&ini->ism_dev->lgr_cnt);
+ atomic_inc(&ini->ism_dev[ini->ism_selected]->lgr_cnt);
} else {
/* SMC-R specific settings */
lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
@@ -437,7 +434,7 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
lnk = &lgr->lnk[link_idx];
rc = smcr_link_init(lgr, lnk, link_idx, ini);
if (rc)
- goto free_lgr;
+ goto free_wq;
lgr_list = &smc_lgr_list.list;
lgr_lock = &smc_lgr_list.lock;
atomic_inc(&lgr_cnt);
@@ -448,11 +445,13 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
spin_unlock_bh(lgr_lock);
return 0;
+free_wq:
+ destroy_workqueue(lgr->tx_wq);
free_lgr:
kfree(lgr);
ism_put_vlan:
if (ini->is_smcd && ini->vlan_id)
- smc_ism_put_vlan(ini->ism_dev, ini->vlan_id);
+ smc_ism_put_vlan(ini->ism_dev[ini->ism_selected], ini->vlan_id);
out:
if (rc < 0) {
if (rc == -ENOMEM)
@@ -517,7 +516,7 @@ static int smc_switch_cursor(struct smc_sock *smc, struct smc_cdc_tx_pend *pend,
smc->sk.sk_state != SMC_CLOSED) {
rc = smcr_cdc_msg_send_validation(conn, pend, wr_buf);
if (!rc) {
- schedule_delayed_work(&conn->tx_work, 0);
+ queue_delayed_work(conn->lgr->tx_wq, &conn->tx_work, 0);
smc->sk.sk_data_ready(&smc->sk);
}
} else {
@@ -824,11 +823,10 @@ static void smc_lgr_free(struct smc_link_group *lgr)
}
smc_lgr_free_bufs(lgr);
+ destroy_workqueue(lgr->tx_wq);
if (lgr->is_smcd) {
- if (!lgr->terminating) {
- smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
- put_device(&lgr->smcd->dev);
- }
+ smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
+ put_device(&lgr->smcd->dev);
if (!atomic_dec_return(&lgr->smcd->lgr_cnt))
wake_up(&lgr->smcd->lgrs_deleted);
} else {
@@ -889,8 +887,6 @@ static void smc_lgr_cleanup(struct smc_link_group *lgr)
if (lgr->is_smcd) {
smc_ism_signal_shutdown(lgr);
smcd_unregister_all_dmbs(lgr);
- smc_ism_put_vlan(lgr->smcd, lgr->vlan_id);
- put_device(&lgr->smcd->dev);
} else {
u32 rsn = lgr->llc_termination_rsn;
@@ -1294,11 +1290,13 @@ int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
spinlock_t *lgr_lock;
int rc = 0;
- lgr_list = ini->is_smcd ? &ini->ism_dev->lgr_list : &smc_lgr_list.list;
- lgr_lock = ini->is_smcd ? &ini->ism_dev->lgr_lock : &smc_lgr_list.lock;
- ini->cln_first_contact = SMC_FIRST_CONTACT;
+ lgr_list = ini->is_smcd ? &ini->ism_dev[ini->ism_selected]->lgr_list :
+ &smc_lgr_list.list;
+ lgr_lock = ini->is_smcd ? &ini->ism_dev[ini->ism_selected]->lgr_lock :
+ &smc_lgr_list.lock;
+ ini->first_contact_local = 1;
role = smc->listen_smc ? SMC_SERV : SMC_CLNT;
- if (role == SMC_CLNT && ini->srv_first_contact)
+ if (role == SMC_CLNT && ini->first_contact_peer)
/* create new link group as well */
goto create;
@@ -1307,14 +1305,15 @@ int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
list_for_each_entry(lgr, lgr_list, list) {
write_lock_bh(&lgr->conns_lock);
if ((ini->is_smcd ?
- smcd_lgr_match(lgr, ini->ism_dev, ini->ism_gid) :
+ smcd_lgr_match(lgr, ini->ism_dev[ini->ism_selected],
+ ini->ism_peer_gid[ini->ism_selected]) :
smcr_lgr_match(lgr, ini->ib_lcl, role, ini->ib_clcqpn)) &&
!lgr->sync_err &&
lgr->vlan_id == ini->vlan_id &&
(role == SMC_CLNT || ini->is_smcd ||
lgr->conns_num < SMC_RMBS_PER_LGR_MAX)) {
/* link group found */
- ini->cln_first_contact = SMC_REUSE_CONTACT;
+ ini->first_contact_local = 0;
conn->lgr = lgr;
rc = smc_lgr_register_conn(conn, false);
write_unlock_bh(&lgr->conns_lock);
@@ -1328,8 +1327,8 @@ int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
if (rc)
return rc;
- if (role == SMC_CLNT && !ini->srv_first_contact &&
- ini->cln_first_contact == SMC_FIRST_CONTACT) {
+ if (role == SMC_CLNT && !ini->first_contact_peer &&
+ ini->first_contact_local) {
/* Server reuses a link group, but Client wants to start
* a new one
* send out_of_sync decline, reason synchr. error
@@ -1338,7 +1337,7 @@ int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini)
}
create:
- if (ini->cln_first_contact == SMC_FIRST_CONTACT) {
+ if (ini->first_contact_local) {
rc = smc_lgr_create(smc, ini);
if (rc)
goto out;
@@ -1597,7 +1596,7 @@ out:
return rc;
}
-#define SMCD_DMBE_SIZES 7 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
+#define SMCD_DMBE_SIZES 6 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */
static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr,
bool is_dmb, int bufsize)
@@ -1616,7 +1615,8 @@ static struct smc_buf_desc *smcd_new_buf_create(struct smc_link_group *lgr,
rc = smc_ism_register_dmb(lgr, bufsize, buf_desc);
if (rc) {
kfree(buf_desc);
- return (rc == -ENOMEM) ? ERR_PTR(-EAGAIN) : ERR_PTR(rc);
+ return (rc == -ENOMEM) ? ERR_PTR(-EAGAIN) :
+ ERR_PTR(-EIO);
}
buf_desc->pages = virt_to_page(buf_desc->cpu_addr);
/* CDC header stored in buf. So, pretend it was smaller */
@@ -1892,8 +1892,8 @@ int smc_rmb_rtoken_handling(struct smc_connection *conn,
struct smc_link *lnk,
struct smc_clc_msg_accept_confirm *clc)
{
- conn->rtoken_idx = smc_rtoken_add(lnk, clc->rmb_dma_addr,
- clc->rmb_rkey);
+ conn->rtoken_idx = smc_rtoken_add(lnk, clc->r0.rmb_dma_addr,
+ clc->r0.rmb_rkey);
if (conn->rtoken_idx < 0)
return conn->rtoken_idx;
return 0;
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index 1c4d5439d0ff..f1e867ce2e63 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -137,9 +137,6 @@ struct smc_link {
#define SMC_LINKS_PER_LGR_MAX 3
#define SMC_SINGLE_LINK 0
-#define SMC_FIRST_CONTACT 1 /* first contact to a peer */
-#define SMC_REUSE_CONTACT 0 /* follow-on contact to a peer*/
-
/* tx/rx buffer list element for sndbufs list and rmbs list of a lgr */
struct smc_buf_desc {
struct list_head list;
@@ -228,12 +225,17 @@ struct smc_link_group {
u8 id[SMC_LGR_ID_SIZE]; /* unique lgr id */
struct delayed_work free_work; /* delayed freeing of an lgr */
struct work_struct terminate_work; /* abnormal lgr termination */
+ struct workqueue_struct *tx_wq; /* wq for conn. tx workers */
u8 sync_err : 1; /* lgr no longer fits to peer */
u8 terminating : 1;/* lgr is terminating */
- u8 freefast : 1; /* free worker scheduled fast */
u8 freeing : 1; /* lgr is being freed */
bool is_smcd; /* SMC-R or SMC-D */
+ u8 smc_version;
+ u8 negotiated_eid[SMC_MAX_EID_LEN];
+ u8 peer_os; /* peer operating system */
+ u8 peer_smc_release;
+ u8 peer_hostname[SMC_MAX_HOSTNAME_LEN];
union {
struct { /* SMC-R */
enum smc_lgr_role role;
@@ -294,9 +296,11 @@ struct smc_clc_msg_local;
struct smc_init_info {
u8 is_smcd;
+ u8 smc_type_v1;
+ u8 smc_type_v2;
+ u8 first_contact_peer;
+ u8 first_contact_local;
unsigned short vlan_id;
- int srv_first_contact;
- int cln_first_contact;
/* SMC-R */
struct smc_clc_msg_local *ib_lcl;
struct smc_ib_device *ib_dev;
@@ -304,8 +308,12 @@ struct smc_init_info {
u8 ib_port;
u32 ib_clcqpn;
/* SMC-D */
- u64 ism_gid;
- struct smcd_dev *ism_dev;
+ u64 ism_peer_gid[SMC_MAX_ISM_DEVS + 1];
+ struct smcd_dev *ism_dev[SMC_MAX_ISM_DEVS + 1];
+ u16 ism_chid[SMC_MAX_ISM_DEVS + 1];
+ u8 ism_offered_cnt; /* # of ISM devices offered */
+ u8 ism_selected; /* index of selected ISM dev*/
+ u8 smcd_version;
};
/* Find the connection associated with the given alert token in the link group.
diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c
index da9ba6d1679b..f15fca59b4b2 100644
--- a/net/smc/smc_diag.c
+++ b/net/smc/smc_diag.c
@@ -22,6 +22,15 @@
#include "smc.h"
#include "smc_core.h"
+struct smc_diag_dump_ctx {
+ int pos[2];
+};
+
+static struct smc_diag_dump_ctx *smc_dump_context(struct netlink_callback *cb)
+{
+ return (struct smc_diag_dump_ctx *)cb->ctx;
+}
+
static void smc_gid_be16_convert(__u8 *buf, u8 *gid_raw)
{
sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x",
@@ -193,13 +202,15 @@ errout:
}
static int smc_diag_dump_proto(struct proto *prot, struct sk_buff *skb,
- struct netlink_callback *cb)
+ struct netlink_callback *cb, int p_type)
{
+ struct smc_diag_dump_ctx *cb_ctx = smc_dump_context(cb);
struct net *net = sock_net(skb->sk);
+ int snum = cb_ctx->pos[p_type];
struct nlattr *bc = NULL;
struct hlist_head *head;
+ int rc = 0, num = 0;
struct sock *sk;
- int rc = 0;
read_lock(&prot->h.smc_hash->lock);
head = &prot->h.smc_hash->ht;
@@ -209,13 +220,18 @@ static int smc_diag_dump_proto(struct proto *prot, struct sk_buff *skb,
sk_for_each(sk, head) {
if (!net_eq(sock_net(sk), net))
continue;
+ if (num < snum)
+ goto next;
rc = __smc_diag_dump(sk, skb, cb, nlmsg_data(cb->nlh), bc);
- if (rc)
- break;
+ if (rc < 0)
+ goto out;
+next:
+ num++;
}
out:
read_unlock(&prot->h.smc_hash->lock);
+ cb_ctx->pos[p_type] = num;
return rc;
}
@@ -223,10 +239,10 @@ static int smc_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
int rc = 0;
- rc = smc_diag_dump_proto(&smc_proto, skb, cb);
+ rc = smc_diag_dump_proto(&smc_proto, skb, cb, SMCPROTO_SMC);
if (!rc)
- rc = smc_diag_dump_proto(&smc_proto6, skb, cb);
- return rc;
+ smc_diag_dump_proto(&smc_proto6, skb, cb, SMCPROTO_SMC6);
+ return skb->len;
}
static int smc_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c
index 998c525de785..6abbdd09a580 100644
--- a/net/smc/smc_ism.c
+++ b/net/smc/smc_ism.c
@@ -21,7 +21,9 @@ struct smcd_dev_list smcd_dev_list = {
.mutex = __MUTEX_INITIALIZER(smcd_dev_list.mutex)
};
-/* Test if an ISM communication is possible. */
+bool smc_ism_v2_capable;
+
+/* Test if an ISM communication is possible - same CPC */
int smc_ism_cantalk(u64 peer_gid, unsigned short vlan_id, struct smcd_dev *smcd)
{
return smcd->ops->query_remote_gid(smcd, peer_gid, vlan_id ? 1 : 0,
@@ -39,6 +41,16 @@ int smc_ism_write(struct smcd_dev *smcd, const struct smc_ism_position *pos,
return rc < 0 ? rc : 0;
}
+void smc_ism_get_system_eid(struct smcd_dev *smcd, u8 **eid)
+{
+ smcd->ops->get_system_eid(smcd, eid);
+}
+
+u16 smc_ism_get_chid(struct smcd_dev *smcd)
+{
+ return smcd->ops->get_chid(smcd);
+}
+
/* Set a connection using this DMBE. */
void smc_ism_set_conn(struct smc_connection *conn)
{
@@ -319,7 +331,18 @@ EXPORT_SYMBOL_GPL(smcd_alloc_dev);
int smcd_register_dev(struct smcd_dev *smcd)
{
mutex_lock(&smcd_dev_list.mutex);
- list_add_tail(&smcd->list, &smcd_dev_list.list);
+ if (list_empty(&smcd_dev_list.list)) {
+ u8 *system_eid = NULL;
+
+ smc_ism_get_system_eid(smcd, &system_eid);
+ if (system_eid[24] != '0' || system_eid[28] != '0')
+ smc_ism_v2_capable = true;
+ }
+ /* sort list: devices without pnetid before devices with pnetid */
+ if (smcd->pnetid[0])
+ list_add_tail(&smcd->list, &smcd_dev_list.list);
+ else
+ list_add(&smcd->list, &smcd_dev_list.list);
mutex_unlock(&smcd_dev_list.mutex);
pr_warn_ratelimited("smc: adding smcd device %s with pnetid %.16s%s\n",
@@ -399,3 +422,8 @@ void smcd_handle_irq(struct smcd_dev *smcd, unsigned int dmbno)
spin_unlock_irqrestore(&smcd->lock, flags);
}
EXPORT_SYMBOL_GPL(smcd_handle_irq);
+
+void __init smc_ism_init(void)
+{
+ smc_ism_v2_capable = false;
+}
diff --git a/net/smc/smc_ism.h b/net/smc/smc_ism.h
index 81cc4537efd3..8048e09ddcf8 100644
--- a/net/smc/smc_ism.h
+++ b/net/smc/smc_ism.h
@@ -19,7 +19,10 @@ struct smcd_dev_list { /* List of SMCD devices */
struct mutex mutex; /* Protects list of devices */
};
-extern struct smcd_dev_list smcd_dev_list; /* list of smcd devices */
+extern struct smcd_dev_list smcd_dev_list; /* list of smcd devices */
+extern bool smc_ism_v2_capable; /* HW supports ISM V2 and thus
+ * System EID is defined
+ */
struct smc_ism_vlanid { /* VLAN id set on ISM device */
struct list_head list;
@@ -47,4 +50,7 @@ int smc_ism_unregister_dmb(struct smcd_dev *dev, struct smc_buf_desc *dmb_desc);
int smc_ism_write(struct smcd_dev *dev, const struct smc_ism_position *pos,
void *data, size_t len);
int smc_ism_signal_shutdown(struct smc_link_group *lgr);
+void smc_ism_get_system_eid(struct smcd_dev *dev, u8 **eid);
+u16 smc_ism_get_chid(struct smcd_dev *dev);
+void smc_ism_init(void);
#endif
diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
index 3ea33466ebe9..273eaf1bfe49 100644
--- a/net/smc/smc_llc.c
+++ b/net/smc/smc_llc.c
@@ -233,8 +233,6 @@ static bool smc_llc_flow_start(struct smc_llc_flow *flow,
default:
flow->type = SMC_LLC_FLOW_NONE;
}
- if (qentry == lgr->delayed_event)
- lgr->delayed_event = NULL;
smc_llc_flow_qentry_set(flow, qentry);
spin_unlock_bh(&lgr->llc_flow_lock);
return true;
@@ -1209,7 +1207,7 @@ static void smc_llc_process_srv_add_link(struct smc_link_group *lgr)
/* enqueue a local add_link req to trigger a new add_link flow */
void smc_llc_add_link_local(struct smc_link *link)
{
- struct smc_llc_msg_add_link add_llc = {0};
+ struct smc_llc_msg_add_link add_llc = {};
add_llc.hd.length = sizeof(add_llc);
add_llc.hd.common.type = SMC_LLC_ADD_LINK;
@@ -1242,7 +1240,7 @@ out:
*/
void smc_llc_srv_delete_link_local(struct smc_link *link, u8 del_link_id)
{
- struct smc_llc_msg_del_link del_llc = {0};
+ struct smc_llc_msg_del_link del_llc = {};
del_llc.hd.length = sizeof(del_llc);
del_llc.hd.common.type = SMC_LLC_DELETE_LINK;
@@ -1314,7 +1312,7 @@ out:
*/
void smc_llc_send_link_delete_all(struct smc_link_group *lgr, bool ord, u32 rsn)
{
- struct smc_llc_msg_del_link delllc = {0};
+ struct smc_llc_msg_del_link delllc = {};
int i;
delllc.hd.common.type = SMC_LLC_DELETE_LINK;
@@ -1603,13 +1601,12 @@ static void smc_llc_event_work(struct work_struct *work)
struct smc_llc_qentry *qentry;
if (!lgr->llc_flow_lcl.type && lgr->delayed_event) {
- if (smc_link_usable(lgr->delayed_event->link)) {
- smc_llc_event_handler(lgr->delayed_event);
- } else {
- qentry = lgr->delayed_event;
- lgr->delayed_event = NULL;
+ qentry = lgr->delayed_event;
+ lgr->delayed_event = NULL;
+ if (smc_link_usable(qentry->link))
+ smc_llc_event_handler(qentry);
+ else
kfree(qentry);
- }
}
again:
@@ -1691,7 +1688,7 @@ static void smc_llc_enqueue(struct smc_link *link, union smc_llc_msg *llc)
spin_lock_irqsave(&lgr->llc_event_q_lock, flags);
list_add_tail(&qentry->list, &lgr->llc_event_q);
spin_unlock_irqrestore(&lgr->llc_event_q_lock, flags);
- schedule_work(&lgr->llc_event_work);
+ queue_work(system_highpri_wq, &lgr->llc_event_work);
}
/* copy received msg and add it to the event queue */
diff --git a/net/smc/smc_netns.h b/net/smc/smc_netns.h
index e7a8fc4ae02f..0f4f35aa43ad 100644
--- a/net/smc/smc_netns.h
+++ b/net/smc/smc_netns.h
@@ -16,5 +16,6 @@ extern unsigned int smc_net_id;
/* per-network namespace private data */
struct smc_net {
struct smc_pnettable pnettable;
+ struct smc_pnetids_ndev pnetids_ndev;
};
#endif
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index 30e5fac7034e..f3c18b991d35 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -29,8 +29,7 @@
#include "smc_ism.h"
#include "smc_core.h"
-#define SMC_ASCII_BLANK 32
-
+static struct net_device *__pnet_find_base_ndev(struct net_device *ndev);
static struct net_device *pnet_find_base_ndev(struct net_device *ndev);
static const struct nla_policy smc_pnet_policy[SMC_PNETID_MAX + 1] = {
@@ -73,14 +72,22 @@ struct smc_pnetentry {
};
};
+/* Check if the pnetid is set */
+bool smc_pnet_is_pnetid_set(u8 *pnetid)
+{
+ if (pnetid[0] == 0 || pnetid[0] == _S)
+ return false;
+ return true;
+}
+
/* Check if two given pnetids match */
static bool smc_pnet_match(u8 *pnetid1, u8 *pnetid2)
{
int i;
for (i = 0; i < SMC_MAX_PNETID_LEN; i++) {
- if ((pnetid1[i] == 0 || pnetid1[i] == SMC_ASCII_BLANK) &&
- (pnetid2[i] == 0 || pnetid2[i] == SMC_ASCII_BLANK))
+ if ((pnetid1[i] == 0 || pnetid1[i] == _S) &&
+ (pnetid2[i] == 0 || pnetid2[i] == _S))
break;
if (pnetid1[i] != pnetid2[i])
return false;
@@ -238,11 +245,10 @@ static int smc_pnet_remove_by_ndev(struct net_device *ndev)
static bool smc_pnet_apply_ib(struct smc_ib_device *ib_dev, u8 ib_port,
char *pnet_name)
{
- u8 pnet_null[SMC_MAX_PNETID_LEN] = {0};
bool applied = false;
mutex_lock(&smc_ib_devices.mutex);
- if (smc_pnet_match(ib_dev->pnetid[ib_port - 1], pnet_null)) {
+ if (!smc_pnet_is_pnetid_set(ib_dev->pnetid[ib_port - 1])) {
memcpy(ib_dev->pnetid[ib_port - 1], pnet_name,
SMC_MAX_PNETID_LEN);
ib_dev->pnetid_by_user[ib_port - 1] = true;
@@ -256,11 +262,10 @@ static bool smc_pnet_apply_ib(struct smc_ib_device *ib_dev, u8 ib_port,
*/
static bool smc_pnet_apply_smcd(struct smcd_dev *smcd_dev, char *pnet_name)
{
- u8 pnet_null[SMC_MAX_PNETID_LEN] = {0};
bool applied = false;
mutex_lock(&smcd_dev_list.mutex);
- if (smc_pnet_match(smcd_dev->pnetid, pnet_null)) {
+ if (!smc_pnet_is_pnetid_set(smcd_dev->pnetid)) {
memcpy(smcd_dev->pnetid, pnet_name, SMC_MAX_PNETID_LEN);
smcd_dev->pnetid_by_user = true;
applied = true;
@@ -708,10 +713,115 @@ static struct genl_family smc_pnet_nl_family __ro_after_init = {
.n_ops = ARRAY_SIZE(smc_pnet_ops)
};
+bool smc_pnet_is_ndev_pnetid(struct net *net, u8 *pnetid)
+{
+ struct smc_net *sn = net_generic(net, smc_net_id);
+ struct smc_pnetids_ndev_entry *pe;
+ bool rc = false;
+
+ read_lock(&sn->pnetids_ndev.lock);
+ list_for_each_entry(pe, &sn->pnetids_ndev.list, list) {
+ if (smc_pnet_match(pnetid, pe->pnetid)) {
+ rc = true;
+ goto unlock;
+ }
+ }
+
+unlock:
+ read_unlock(&sn->pnetids_ndev.lock);
+ return rc;
+}
+
+static int smc_pnet_add_pnetid(struct net *net, u8 *pnetid)
+{
+ struct smc_net *sn = net_generic(net, smc_net_id);
+ struct smc_pnetids_ndev_entry *pe, *pi;
+
+ pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+ if (!pe)
+ return -ENOMEM;
+
+ write_lock(&sn->pnetids_ndev.lock);
+ list_for_each_entry(pi, &sn->pnetids_ndev.list, list) {
+ if (smc_pnet_match(pnetid, pe->pnetid)) {
+ refcount_inc(&pi->refcnt);
+ kfree(pe);
+ goto unlock;
+ }
+ }
+ refcount_set(&pe->refcnt, 1);
+ memcpy(pe->pnetid, pnetid, SMC_MAX_PNETID_LEN);
+ list_add_tail(&pe->list, &sn->pnetids_ndev.list);
+
+unlock:
+ write_unlock(&sn->pnetids_ndev.lock);
+ return 0;
+}
+
+static void smc_pnet_remove_pnetid(struct net *net, u8 *pnetid)
+{
+ struct smc_net *sn = net_generic(net, smc_net_id);
+ struct smc_pnetids_ndev_entry *pe, *pe2;
+
+ write_lock(&sn->pnetids_ndev.lock);
+ list_for_each_entry_safe(pe, pe2, &sn->pnetids_ndev.list, list) {
+ if (smc_pnet_match(pnetid, pe->pnetid)) {
+ if (refcount_dec_and_test(&pe->refcnt)) {
+ list_del(&pe->list);
+ kfree(pe);
+ }
+ break;
+ }
+ }
+ write_unlock(&sn->pnetids_ndev.lock);
+}
+
+static void smc_pnet_add_base_pnetid(struct net *net, struct net_device *dev,
+ u8 *ndev_pnetid)
+{
+ struct net_device *base_dev;
+
+ base_dev = __pnet_find_base_ndev(dev);
+ if (base_dev->flags & IFF_UP &&
+ !smc_pnetid_by_dev_port(base_dev->dev.parent, base_dev->dev_port,
+ ndev_pnetid)) {
+ /* add to PNETIDs list */
+ smc_pnet_add_pnetid(net, ndev_pnetid);
+ }
+}
+
+/* create initial list of netdevice pnetids */
+static void smc_pnet_create_pnetids_list(struct net *net)
+{
+ u8 ndev_pnetid[SMC_MAX_PNETID_LEN];
+ struct net_device *dev;
+
+ rtnl_lock();
+ for_each_netdev(net, dev)
+ smc_pnet_add_base_pnetid(net, dev, ndev_pnetid);
+ rtnl_unlock();
+}
+
+/* clean up list of netdevice pnetids */
+static void smc_pnet_destroy_pnetids_list(struct net *net)
+{
+ struct smc_net *sn = net_generic(net, smc_net_id);
+ struct smc_pnetids_ndev_entry *pe, *temp_pe;
+
+ write_lock(&sn->pnetids_ndev.lock);
+ list_for_each_entry_safe(pe, temp_pe, &sn->pnetids_ndev.list, list) {
+ list_del(&pe->list);
+ kfree(pe);
+ }
+ write_unlock(&sn->pnetids_ndev.lock);
+}
+
static int smc_pnet_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
+ struct net *net = dev_net(event_dev);
+ u8 ndev_pnetid[SMC_MAX_PNETID_LEN];
switch (event) {
case NETDEV_REBOOT:
@@ -721,6 +831,17 @@ static int smc_pnet_netdev_event(struct notifier_block *this,
case NETDEV_REGISTER:
smc_pnet_add_by_ndev(event_dev);
return NOTIFY_OK;
+ case NETDEV_UP:
+ smc_pnet_add_base_pnetid(net, event_dev, ndev_pnetid);
+ return NOTIFY_OK;
+ case NETDEV_DOWN:
+ event_dev = __pnet_find_base_ndev(event_dev);
+ if (!smc_pnetid_by_dev_port(event_dev->dev.parent,
+ event_dev->dev_port, ndev_pnetid)) {
+ /* remove from PNETIDs list */
+ smc_pnet_remove_pnetid(net, ndev_pnetid);
+ }
+ return NOTIFY_OK;
default:
return NOTIFY_DONE;
}
@@ -735,9 +856,14 @@ int smc_pnet_net_init(struct net *net)
{
struct smc_net *sn = net_generic(net, smc_net_id);
struct smc_pnettable *pnettable = &sn->pnettable;
+ struct smc_pnetids_ndev *pnetids_ndev = &sn->pnetids_ndev;
INIT_LIST_HEAD(&pnettable->pnetlist);
rwlock_init(&pnettable->lock);
+ INIT_LIST_HEAD(&pnetids_ndev->list);
+ rwlock_init(&pnetids_ndev->lock);
+
+ smc_pnet_create_pnetids_list(net);
return 0;
}
@@ -752,6 +878,7 @@ int __init smc_pnet_init(void)
rc = register_netdevice_notifier(&smc_netdev_notifier);
if (rc)
genl_unregister_family(&smc_pnet_nl_family);
+
return rc;
}
@@ -760,6 +887,7 @@ void smc_pnet_net_exit(struct net *net)
{
/* flush pnet table */
smc_pnet_remove_by_pnetid(net, NULL);
+ smc_pnet_destroy_pnetids_list(net);
}
void smc_pnet_exit(void)
@@ -768,16 +896,11 @@ void smc_pnet_exit(void)
genl_unregister_family(&smc_pnet_nl_family);
}
-/* Determine one base device for stacked net devices.
- * If the lower device level contains more than one devices
- * (for instance with bonding slaves), just the first device
- * is used to reach a base device.
- */
-static struct net_device *pnet_find_base_ndev(struct net_device *ndev)
+static struct net_device *__pnet_find_base_ndev(struct net_device *ndev)
{
int i, nest_lvl;
- rtnl_lock();
+ ASSERT_RTNL();
nest_lvl = ndev->lower_level;
for (i = 0; i < nest_lvl; i++) {
struct list_head *lower = &ndev->adj_list.lower;
@@ -787,6 +910,18 @@ static struct net_device *pnet_find_base_ndev(struct net_device *ndev)
lower = lower->next;
ndev = netdev_lower_get_next(ndev, &lower);
}
+ return ndev;
+}
+
+/* Determine one base device for stacked net devices.
+ * If the lower device level contains more than one devices
+ * (for instance with bonding slaves), just the first device
+ * is used to reach a base device.
+ */
+static struct net_device *pnet_find_base_ndev(struct net_device *ndev)
+{
+ rtnl_lock();
+ ndev = __pnet_find_base_ndev(ndev);
rtnl_unlock();
return ndev;
}
@@ -928,8 +1063,11 @@ static void smc_pnet_find_ism_by_pnetid(struct net_device *ndev,
mutex_lock(&smcd_dev_list.mutex);
list_for_each_entry(ismdev, &smcd_dev_list.list, list) {
if (smc_pnet_match(ismdev->pnetid, ndev_pnetid) &&
- !ismdev->going_away) {
- ini->ism_dev = ismdev;
+ !ismdev->going_away &&
+ (!ini->ism_peer_gid[0] ||
+ !smc_ism_cantalk(ini->ism_peer_gid[0], ini->vlan_id,
+ ismdev))) {
+ ini->ism_dev[0] = ismdev;
break;
}
}
@@ -963,7 +1101,7 @@ void smc_pnet_find_ism_resource(struct sock *sk, struct smc_init_info *ini)
{
struct dst_entry *dst = sk_dst_get(sk);
- ini->ism_dev = NULL;
+ ini->ism_dev[0] = NULL;
if (!dst)
goto out;
if (!dst->dev)
diff --git a/net/smc/smc_pnet.h b/net/smc/smc_pnet.h
index 811a65986691..14039272f7e4 100644
--- a/net/smc/smc_pnet.h
+++ b/net/smc/smc_pnet.h
@@ -12,6 +12,8 @@
#ifndef _SMC_PNET_H
#define _SMC_PNET_H
+#include <net/smc.h>
+
#if IS_ENABLED(CONFIG_HAVE_PNETID)
#include <asm/pnet.h>
#endif
@@ -31,6 +33,17 @@ struct smc_pnettable {
struct list_head pnetlist;
};
+struct smc_pnetids_ndev { /* list of pnetids for net devices in UP state*/
+ struct list_head list;
+ rwlock_t lock;
+};
+
+struct smc_pnetids_ndev_entry {
+ struct list_head list;
+ u8 pnetid[SMC_MAX_PNETID_LEN];
+ refcount_t refcnt;
+};
+
static inline int smc_pnetid_by_dev_port(struct device *dev,
unsigned short port, u8 *pnetid)
{
@@ -52,4 +65,6 @@ int smc_pnetid_by_table_smcd(struct smcd_dev *smcd);
void smc_pnet_find_alt_roce(struct smc_link_group *lgr,
struct smc_init_info *ini,
struct smc_ib_device *known_dev);
+bool smc_pnet_is_ndev_pnetid(struct net *net, u8 *pnetid);
+bool smc_pnet_is_pnetid_set(u8 *pnetid);
#endif
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index 54ba0443847e..4532c16bf85e 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -228,8 +228,8 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len)
/* for a corked socket defer the RDMA writes if there
* is still sufficient sndbuf_space available
*/
- schedule_delayed_work(&conn->tx_work,
- SMC_TX_CORK_DELAY);
+ queue_delayed_work(conn->lgr->tx_wq, &conn->tx_work,
+ SMC_TX_CORK_DELAY);
else
smc_tx_sndbuf_nonempty(conn);
} /* while (msg_data_left(msg)) */
@@ -499,7 +499,7 @@ static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
if (conn->killed)
return -EPIPE;
rc = 0;
- mod_delayed_work(system_wq, &conn->tx_work,
+ mod_delayed_work(conn->lgr->tx_wq, &conn->tx_work,
SMC_TX_WORK_DELAY);
}
return rc;
@@ -623,8 +623,8 @@ void smc_tx_consumer_update(struct smc_connection *conn, bool force)
return;
if ((smc_cdc_get_slot_and_msg_send(conn) < 0) &&
!conn->killed) {
- schedule_delayed_work(&conn->tx_work,
- SMC_TX_WORK_DELAY);
+ queue_delayed_work(conn->lgr->tx_wq, &conn->tx_work,
+ SMC_TX_WORK_DELAY);
return;
}
}
diff --git a/net/socket.c b/net/socket.c
index 58cac2da5f66..6e6cccc2104f 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2628,9 +2628,11 @@ long __sys_recvmsg_sock(struct socket *sock, struct msghdr *msg,
struct user_msghdr __user *umsg,
struct sockaddr __user *uaddr, unsigned int flags)
{
- /* disallow ancillary data requests from this path */
- if (msg->msg_control || msg->msg_controllen)
- return -EINVAL;
+ if (msg->msg_control || msg->msg_controllen) {
+ /* disallow ancillary data reqs unless cmsg is plain data */
+ if (!(sock->ops->flags & PROTO_CMSG_DATA_ONLY))
+ return -EINVAL;
+ }
return ____sys_recvmsg(sock, msg, umsg, uaddr, flags, 0);
}
diff --git a/net/sunrpc/sysctl.c b/net/sunrpc/sysctl.c
index 999eee1ed61c..6c86e2a7d942 100644
--- a/net/sunrpc/sysctl.c
+++ b/net/sunrpc/sysctl.c
@@ -108,8 +108,10 @@ proc_dodebug(struct ctl_table *table, int write, void *buffer, size_t *lenp,
left -= (s - tmpbuf);
if (left && !isspace(*s))
return -EINVAL;
- while (left && isspace(*s))
- left--, s++;
+ while (left && isspace(*s)) {
+ left--;
+ s++;
+ }
} else
left = 0;
*(unsigned int *) table->data = value;
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 4f6dc74adf45..c2ff42900b53 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -60,6 +60,7 @@ static int __net_init tipc_init_net(struct net *net)
tn->trial_addr = 0;
tn->addr_trial_end = 0;
tn->capabilities = TIPC_NODE_CAPABILITIES;
+ INIT_WORK(&tn->final_work.work, tipc_net_finalize_work);
memset(tn->node_id, 0, sizeof(tn->node_id));
memset(tn->node_id_string, 0, sizeof(tn->node_id_string));
tn->mon_threshold = TIPC_DEF_MON_THRESHOLD;
@@ -107,8 +108,13 @@ out_crypto:
static void __net_exit tipc_exit_net(struct net *net)
{
+ struct tipc_net *tn = tipc_net(net);
+
tipc_detach_loopback(net);
+ /* Make sure the tipc_net_finalize_work() finished */
+ cancel_work_sync(&tn->final_work.work);
tipc_net_stop(net);
+
tipc_bcast_stop(net);
tipc_nametbl_stop(net);
tipc_sk_rht_destroy(net);
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 631d83c9705f..1d57a4d3b05e 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -90,6 +90,12 @@ extern unsigned int tipc_net_id __read_mostly;
extern int sysctl_tipc_rmem[3] __read_mostly;
extern int sysctl_tipc_named_timeout __read_mostly;
+struct tipc_net_work {
+ struct work_struct work;
+ struct net *net;
+ u32 addr;
+};
+
struct tipc_net {
u8 node_id[NODE_ID_LEN];
u32 node_addr;
@@ -143,6 +149,8 @@ struct tipc_net {
/* TX crypto handler */
struct tipc_crypto *crypto_tx;
#endif
+ /* Work item for net finalize */
+ struct tipc_net_work final_work;
};
static inline struct tipc_net *tipc_net(struct net *net)
diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c
index 7c523dc81575..40c44101fe8e 100644
--- a/net/tipc/crypto.c
+++ b/net/tipc/crypto.c
@@ -36,22 +36,28 @@
#include <crypto/aead.h>
#include <crypto/aes.h>
+#include <crypto/rng.h>
#include "crypto.h"
+#include "msg.h"
+#include "bcast.h"
-#define TIPC_TX_PROBE_LIM msecs_to_jiffies(1000) /* > 1s */
-#define TIPC_TX_LASTING_LIM msecs_to_jiffies(120000) /* 2 mins */
+#define TIPC_TX_GRACE_PERIOD msecs_to_jiffies(5000) /* 5s */
+#define TIPC_TX_LASTING_TIME msecs_to_jiffies(10000) /* 10s */
#define TIPC_RX_ACTIVE_LIM msecs_to_jiffies(3000) /* 3s */
-#define TIPC_RX_PASSIVE_LIM msecs_to_jiffies(180000) /* 3 mins */
+#define TIPC_RX_PASSIVE_LIM msecs_to_jiffies(15000) /* 15s */
+
#define TIPC_MAX_TFMS_DEF 10
#define TIPC_MAX_TFMS_LIM 1000
+#define TIPC_REKEYING_INTV_DEF (60 * 24) /* default: 1 day */
+
/**
* TIPC Key ids
*/
enum {
- KEY_UNUSED = 0,
- KEY_MIN,
- KEY_1 = KEY_MIN,
+ KEY_MASTER = 0,
+ KEY_MIN = KEY_MASTER,
+ KEY_1 = 1,
KEY_2,
KEY_3,
KEY_MAX = KEY_3,
@@ -81,6 +87,8 @@ static const char *hstats[MAX_STATS] = {"ok", "nok", "async", "async_ok",
/* Max TFMs number per key */
int sysctl_tipc_max_tfms __read_mostly = TIPC_MAX_TFMS_DEF;
+/* Key exchange switch, default: on */
+int sysctl_tipc_key_exchange_enabled __read_mostly = 1;
/**
* struct tipc_key - TIPC keys' status indicator
@@ -132,6 +140,8 @@ struct tipc_tfm {
* @mode: crypto mode is applied to the key
* @hint[]: a hint for user key
* @rcu: struct rcu_head
+ * @key: the aead key
+ * @gen: the key's generation
* @seqno: the key seqno (cluster scope)
* @refcnt: the key reference counter
*/
@@ -144,8 +154,10 @@ struct tipc_aead {
u32 salt;
u8 authsize;
u8 mode;
- char hint[TIPC_AEAD_HINT_LEN + 1];
+ char hint[2 * TIPC_AEAD_HINT_LEN + 1];
struct rcu_head rcu;
+ struct tipc_aead_key *key;
+ u16 gen;
atomic64_t seqno ____cacheline_aligned;
refcount_t refcnt ____cacheline_aligned;
@@ -165,26 +177,56 @@ struct tipc_crypto_stats {
* @node: TIPC node (RX)
* @aead: array of pointers to AEAD keys for encryption/decryption
* @peer_rx_active: replicated peer RX active key index
+ * @key_gen: TX/RX key generation
* @key: the key states
- * @working: the crypto is working or not
+ * @skey_mode: session key's mode
+ * @skey: received session key
+ * @wq: common workqueue on TX crypto
+ * @work: delayed work sched for TX/RX
+ * @key_distr: key distributing state
+ * @rekeying_intv: rekeying interval (in minutes)
* @stats: the crypto statistics
+ * @name: the crypto name
* @sndnxt: the per-peer sndnxt (TX)
* @timer1: general timer 1 (jiffies)
- * @timer2: general timer 1 (jiffies)
+ * @timer2: general timer 2 (jiffies)
+ * @working: the crypto is working or not
+ * @key_master: flag indicates if master key exists
+ * @legacy_user: flag indicates if a peer joins w/o master key (for bwd comp.)
+ * @nokey: no key indication
* @lock: tipc_key lock
*/
struct tipc_crypto {
struct net *net;
struct tipc_node *node;
- struct tipc_aead __rcu *aead[KEY_MAX + 1]; /* key[0] is UNUSED */
+ struct tipc_aead __rcu *aead[KEY_MAX + 1];
atomic_t peer_rx_active;
+ u16 key_gen;
struct tipc_key key;
- u8 working:1;
+ u8 skey_mode;
+ struct tipc_aead_key *skey;
+ struct workqueue_struct *wq;
+ struct delayed_work work;
+#define KEY_DISTR_SCHED 1
+#define KEY_DISTR_COMPL 2
+ atomic_t key_distr;
+ u32 rekeying_intv;
+
struct tipc_crypto_stats __percpu *stats;
+ char name[48];
atomic64_t sndnxt ____cacheline_aligned;
unsigned long timer1;
unsigned long timer2;
+ union {
+ struct {
+ u8 working:1;
+ u8 key_master:1;
+ u8 legacy_user:1;
+ u8 nokey: 1;
+ };
+ u8 flags;
+ };
spinlock_t lock; /* crypto lock */
} ____cacheline_aligned;
@@ -234,23 +276,35 @@ static inline void tipc_crypto_key_set_state(struct tipc_crypto *c,
u8 new_active,
u8 new_pending);
static int tipc_crypto_key_attach(struct tipc_crypto *c,
- struct tipc_aead *aead, u8 pos);
+ struct tipc_aead *aead, u8 pos,
+ bool master_key);
static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending);
static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx,
struct tipc_crypto *rx,
- struct sk_buff *skb);
-static void tipc_crypto_key_synch(struct tipc_crypto *rx, u8 new_rx_active,
- struct tipc_msg *hdr);
+ struct sk_buff *skb,
+ u8 tx_key);
+static void tipc_crypto_key_synch(struct tipc_crypto *rx, struct sk_buff *skb);
static int tipc_crypto_key_revoke(struct net *net, u8 tx_key);
+static inline void tipc_crypto_clone_msg(struct net *net, struct sk_buff *_skb,
+ struct tipc_bearer *b,
+ struct tipc_media_addr *dst,
+ struct tipc_node *__dnode, u8 type);
static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead,
struct tipc_bearer *b,
struct sk_buff **skb, int err);
static void tipc_crypto_do_cmd(struct net *net, int cmd);
static char *tipc_crypto_key_dump(struct tipc_crypto *c, char *buf);
-#ifdef TIPC_CRYPTO_DEBUG
static char *tipc_key_change_dump(struct tipc_key old, struct tipc_key new,
char *buf);
-#endif
+static int tipc_crypto_key_xmit(struct net *net, struct tipc_aead_key *skey,
+ u16 gen, u8 mode, u32 dnode);
+static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr);
+static void tipc_crypto_work_tx(struct work_struct *work);
+static void tipc_crypto_work_rx(struct work_struct *work);
+static int tipc_aead_key_generate(struct tipc_aead_key *skey);
+
+#define is_tx(crypto) (!(crypto)->node)
+#define is_rx(crypto) (!is_tx(crypto))
#define key_next(cur) ((cur) % KEY_MAX + 1)
@@ -271,30 +325,55 @@ do { \
/**
* tipc_aead_key_validate - Validate a AEAD user key
*/
-int tipc_aead_key_validate(struct tipc_aead_key *ukey)
+int tipc_aead_key_validate(struct tipc_aead_key *ukey, struct genl_info *info)
{
int keylen;
/* Check if algorithm exists */
if (unlikely(!crypto_has_alg(ukey->alg_name, 0, 0))) {
- pr_info("Not found cipher: \"%s\"!\n", ukey->alg_name);
+ GENL_SET_ERR_MSG(info, "unable to load the algorithm (module existed?)");
return -ENODEV;
}
/* Currently, we only support the "gcm(aes)" cipher algorithm */
- if (strcmp(ukey->alg_name, "gcm(aes)"))
+ if (strcmp(ukey->alg_name, "gcm(aes)")) {
+ GENL_SET_ERR_MSG(info, "not supported yet the algorithm");
return -ENOTSUPP;
+ }
/* Check if key size is correct */
keylen = ukey->keylen - TIPC_AES_GCM_SALT_SIZE;
if (unlikely(keylen != TIPC_AES_GCM_KEY_SIZE_128 &&
keylen != TIPC_AES_GCM_KEY_SIZE_192 &&
- keylen != TIPC_AES_GCM_KEY_SIZE_256))
- return -EINVAL;
+ keylen != TIPC_AES_GCM_KEY_SIZE_256)) {
+ GENL_SET_ERR_MSG(info, "incorrect key length (20, 28 or 36 octets?)");
+ return -EKEYREJECTED;
+ }
return 0;
}
+/**
+ * tipc_aead_key_generate - Generate new session key
+ * @skey: input/output key with new content
+ *
+ * Return: 0 in case of success, otherwise < 0
+ */
+static int tipc_aead_key_generate(struct tipc_aead_key *skey)
+{
+ int rc = 0;
+
+ /* Fill the key's content with a random value via RNG cipher */
+ rc = crypto_get_default_rng();
+ if (likely(!rc)) {
+ rc = crypto_rng_get_bytes(crypto_default_rng, skey->key,
+ skey->keylen);
+ crypto_put_default_rng();
+ }
+
+ return rc;
+}
+
static struct tipc_aead *tipc_aead_get(struct tipc_aead __rcu *aead)
{
struct tipc_aead *tmp;
@@ -339,6 +418,7 @@ static void tipc_aead_free(struct rcu_head *rp)
kfree(head);
}
free_percpu(aead->tfm_entry);
+ kzfree(aead->key);
kfree(aead);
}
@@ -501,14 +581,15 @@ static int tipc_aead_init(struct tipc_aead **aead, struct tipc_aead_key *ukey,
return err;
}
- /* Copy some chars from the user key as a hint */
- memcpy(tmp->hint, ukey->key, TIPC_AEAD_HINT_LEN);
- tmp->hint[TIPC_AEAD_HINT_LEN] = '\0';
+ /* Form a hex string of some last bytes as the key's hint */
+ bin2hex(tmp->hint, ukey->key + keylen - TIPC_AEAD_HINT_LEN,
+ TIPC_AEAD_HINT_LEN);
/* Initialize the other data */
tmp->mode = mode;
tmp->cloned = NULL;
tmp->authsize = TIPC_AES_GCM_TAG_SIZE;
+ tmp->key = kmemdup(ukey, tipc_aead_key_size(ukey), GFP_KERNEL);
memcpy(&tmp->salt, ukey->key + keylen, TIPC_AES_GCM_SALT_SIZE);
atomic_set(&tmp->users, 0);
atomic64_set(&tmp->seqno, 0);
@@ -663,13 +744,11 @@ static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb,
* but there is no frag_list, it should be still fine!
* Otherwise, we must cow it to be a writable buffer with the tailroom.
*/
-#ifdef TIPC_CRYPTO_DEBUG
SKB_LINEAR_ASSERT(skb);
if (tailen > skb_tailroom(skb)) {
- pr_warn("TX: skb tailroom is not enough: %d, requires: %d\n",
- skb_tailroom(skb), tailen);
+ pr_debug("TX(): skb tailroom is not enough: %d, requires: %d\n",
+ skb_tailroom(skb), tailen);
}
-#endif
if (unlikely(!skb_cloned(skb) && tailen <= skb_tailroom(skb))) {
nsg = 1;
@@ -940,8 +1019,6 @@ bool tipc_ehdr_validate(struct sk_buff *skb)
return false;
if (unlikely(skb->len <= ehsz + TIPC_AES_GCM_TAG_SIZE))
return false;
- if (unlikely(!ehdr->tx_key))
- return false;
return true;
}
@@ -994,6 +1071,8 @@ static int tipc_ehdr_build(struct net *net, struct tipc_aead *aead,
ehdr->tx_key = tx_key;
ehdr->destined = (__rx) ? 1 : 0;
ehdr->rx_key_active = (__rx) ? __rx->key.active : 0;
+ ehdr->rx_nokey = (__rx) ? __rx->nokey : 0;
+ ehdr->master_key = aead->crypto->key_master;
ehdr->reserved_1 = 0;
ehdr->reserved_2 = 0;
@@ -1019,23 +1098,16 @@ static inline void tipc_crypto_key_set_state(struct tipc_crypto *c,
u8 new_active,
u8 new_pending)
{
-#ifdef TIPC_CRYPTO_DEBUG
struct tipc_key old = c->key;
char buf[32];
-#endif
c->key.keys = ((new_passive & KEY_MASK) << (KEY_BITS * 2)) |
((new_active & KEY_MASK) << (KEY_BITS)) |
((new_pending & KEY_MASK));
-#ifdef TIPC_CRYPTO_DEBUG
- pr_info("%s(%s): key changing %s ::%pS\n",
- (c->node) ? "RX" : "TX",
- (c->node) ? tipc_node_get_id_str(c->node) :
- tipc_own_id_string(c->net),
- tipc_key_change_dump(old, c->key, buf),
- __builtin_return_address(0));
-#endif
+ pr_debug("%s: key changing %s ::%pS\n", c->name,
+ tipc_key_change_dump(old, c->key, buf),
+ __builtin_return_address(0));
}
/**
@@ -1043,6 +1115,7 @@ static inline void tipc_crypto_key_set_state(struct tipc_crypto *c,
* @c: TIPC crypto to which new key is attached
* @ukey: the user key
* @mode: the key mode (CLUSTER_KEY or PER_NODE_KEY)
+ * @master_key: specify this is a cluster master key
*
* A new TIPC AEAD key will be allocated and initiated with the specified user
* key, then attached to the TIPC crypto.
@@ -1050,7 +1123,7 @@ static inline void tipc_crypto_key_set_state(struct tipc_crypto *c,
* Return: new key id in case of success, otherwise: < 0
*/
int tipc_crypto_key_init(struct tipc_crypto *c, struct tipc_aead_key *ukey,
- u8 mode)
+ u8 mode, bool master_key)
{
struct tipc_aead *aead = NULL;
int rc = 0;
@@ -1060,17 +1133,11 @@ int tipc_crypto_key_init(struct tipc_crypto *c, struct tipc_aead_key *ukey,
/* Attach it to the crypto */
if (likely(!rc)) {
- rc = tipc_crypto_key_attach(c, aead, 0);
+ rc = tipc_crypto_key_attach(c, aead, 0, master_key);
if (rc < 0)
tipc_aead_free(&aead->rcu);
}
- pr_info("%s(%s): key initiating, rc %d!\n",
- (c->node) ? "RX" : "TX",
- (c->node) ? tipc_node_get_id_str(c->node) :
- tipc_own_id_string(c->net),
- rc);
-
return rc;
}
@@ -1079,58 +1146,58 @@ int tipc_crypto_key_init(struct tipc_crypto *c, struct tipc_aead_key *ukey,
* @c: TIPC crypto to which the new AEAD key is attached
* @aead: the new AEAD key pointer
* @pos: desired slot in the crypto key array, = 0 if any!
+ * @master_key: specify this is a cluster master key
*
* Return: new key id in case of success, otherwise: -EBUSY
*/
static int tipc_crypto_key_attach(struct tipc_crypto *c,
- struct tipc_aead *aead, u8 pos)
+ struct tipc_aead *aead, u8 pos,
+ bool master_key)
{
- u8 new_pending, new_passive, new_key;
struct tipc_key key;
int rc = -EBUSY;
+ u8 new_key;
spin_lock_bh(&c->lock);
key = c->key;
+ if (master_key) {
+ new_key = KEY_MASTER;
+ goto attach;
+ }
if (key.active && key.passive)
goto exit;
- if (key.passive && !tipc_aead_users(c->aead[key.passive]))
- goto exit;
if (key.pending) {
- if (pos)
- goto exit;
if (tipc_aead_users(c->aead[key.pending]) > 0)
goto exit;
+ /* if (pos): ok with replacing, will be aligned when needed */
/* Replace it */
- new_pending = key.pending;
- new_passive = key.passive;
- new_key = new_pending;
+ new_key = key.pending;
} else {
if (pos) {
if (key.active && pos != key_next(key.active)) {
- new_pending = key.pending;
- new_passive = pos;
- new_key = new_passive;
+ key.passive = pos;
+ new_key = pos;
goto attach;
} else if (!key.active && !key.passive) {
- new_pending = pos;
- new_passive = key.passive;
- new_key = new_pending;
+ key.pending = pos;
+ new_key = pos;
goto attach;
}
}
- new_pending = key_next(key.active ?: key.passive);
- new_passive = key.passive;
- new_key = new_pending;
+ key.pending = key_next(key.active ?: key.passive);
+ new_key = key.pending;
}
attach:
aead->crypto = c;
- tipc_crypto_key_set_state(c, new_passive, key.active, new_pending);
+ aead->gen = (is_tx(c)) ? ++c->key_gen : c->key_gen;
tipc_aead_rcu_replace(c->aead[new_key], aead, &c->lock);
-
+ if (likely(c->key.keys != key.keys))
+ tipc_crypto_key_set_state(c, key.passive, key.active,
+ key.pending);
c->working = 1;
- c->timer1 = jiffies;
- c->timer2 = jiffies;
+ c->nokey = 0;
+ c->key_master |= master_key;
rc = new_key;
exit:
@@ -1140,14 +1207,33 @@ exit:
void tipc_crypto_key_flush(struct tipc_crypto *c)
{
+ struct tipc_crypto *tx, *rx;
int k;
spin_lock_bh(&c->lock);
- c->working = 0;
+ if (is_rx(c)) {
+ /* Try to cancel pending work */
+ rx = c;
+ tx = tipc_net(rx->net)->crypto_tx;
+ if (cancel_delayed_work(&rx->work)) {
+ kfree(rx->skey);
+ rx->skey = NULL;
+ atomic_xchg(&rx->key_distr, 0);
+ tipc_node_put(rx->node);
+ }
+ /* RX stopping => decrease TX key users if any */
+ k = atomic_xchg(&rx->peer_rx_active, 0);
+ if (k) {
+ tipc_aead_users_dec(tx->aead[k], 0);
+ /* Mark the point TX key users changed */
+ tx->timer1 = jiffies;
+ }
+ }
+
+ c->flags = 0;
tipc_crypto_key_set_state(c, 0, 0, 0);
for (k = KEY_MIN; k <= KEY_MAX; k++)
tipc_crypto_key_detach(c->aead[k], &c->lock);
- atomic_set(&c->peer_rx_active, 0);
atomic64_set(&c->sndnxt, 0);
spin_unlock_bh(&c->lock);
}
@@ -1206,7 +1292,8 @@ static bool tipc_crypto_key_try_align(struct tipc_crypto *rx, u8 new_pending)
rcu_assign_pointer(rx->aead[new_passive], tmp2);
refcount_set(&tmp1->refcnt, 1);
aligned = true;
- pr_info("RX(%s): key is aligned!\n", tipc_node_get_id_str(rx->node));
+ pr_info_ratelimited("%s: key[%d] -> key[%d]\n", rx->name, key.pending,
+ new_pending);
exit:
spin_unlock(&rx->lock);
@@ -1218,6 +1305,7 @@ exit:
* @tx: TX crypto handle
* @rx: RX crypto handle (can be NULL)
* @skb: the message skb which will be decrypted later
+ * @tx_key: peer TX key id
*
* This function looks up the existing TX keys and pick one which is suitable
* for the message decryption, that must be a cluster key and not used before
@@ -1227,7 +1315,8 @@ exit:
*/
static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx,
struct tipc_crypto *rx,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ u8 tx_key)
{
struct tipc_skb_cb *skb_cb = TIPC_SKB_CB(skb);
struct tipc_aead *aead = NULL;
@@ -1246,6 +1335,10 @@ static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx,
/* Pick one TX key */
spin_lock(&tx->lock);
+ if (tx_key == KEY_MASTER) {
+ aead = tipc_aead_rcu_ptr(tx->aead[KEY_MASTER], &tx->lock);
+ goto done;
+ }
do {
k = (i == 0) ? key.pending :
((i == 1) ? key.active : key.passive);
@@ -1265,9 +1358,12 @@ static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx,
skb->next = skb_clone(skb, GFP_ATOMIC);
if (unlikely(!skb->next))
pr_warn("Failed to clone skb for next round if any\n");
- WARN_ON(!refcount_inc_not_zero(&aead->refcnt));
break;
} while (++i < 3);
+
+done:
+ if (likely(aead))
+ WARN_ON(!refcount_inc_not_zero(&aead->refcnt));
spin_unlock(&tx->lock);
return aead;
@@ -1276,53 +1372,73 @@ static struct tipc_aead *tipc_crypto_key_pick_tx(struct tipc_crypto *tx,
/**
* tipc_crypto_key_synch: Synch own key data according to peer key status
* @rx: RX crypto handle
- * @new_rx_active: latest RX active key from peer
- * @hdr: TIPCv2 message
+ * @skb: TIPCv2 message buffer (incl. the ehdr from peer)
*
* This function updates the peer node related data as the peer RX active key
* has changed, so the number of TX keys' users on this node are increased and
* decreased correspondingly.
*
+ * It also considers if peer has no key, then we need to make own master key
+ * (if any) taking over i.e. starting grace period and also trigger key
+ * distributing process.
+ *
* The "per-peer" sndnxt is also reset when the peer key has switched.
*/
-static void tipc_crypto_key_synch(struct tipc_crypto *rx, u8 new_rx_active,
- struct tipc_msg *hdr)
+static void tipc_crypto_key_synch(struct tipc_crypto *rx, struct sk_buff *skb)
{
- struct net *net = rx->net;
- struct tipc_crypto *tx = tipc_net(net)->crypto_tx;
- u8 cur_rx_active;
+ struct tipc_ehdr *ehdr = (struct tipc_ehdr *)skb_network_header(skb);
+ struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx;
+ struct tipc_msg *hdr = buf_msg(skb);
+ u32 self = tipc_own_addr(rx->net);
+ u8 cur, new;
+ unsigned long delay;
- /* TX might be even not ready yet */
- if (unlikely(!tx->key.active && !tx->key.pending))
- return;
+ /* Update RX 'key_master' flag according to peer, also mark "legacy" if
+ * a peer has no master key.
+ */
+ rx->key_master = ehdr->master_key;
+ if (!rx->key_master)
+ tx->legacy_user = 1;
- cur_rx_active = atomic_read(&rx->peer_rx_active);
- if (likely(cur_rx_active == new_rx_active))
+ /* For later cases, apply only if message is destined to this node */
+ if (!ehdr->destined || msg_short(hdr) || msg_destnode(hdr) != self)
return;
- /* Make sure this message destined for this node */
- if (unlikely(msg_short(hdr) ||
- msg_destnode(hdr) != tipc_own_addr(net)))
- return;
+ /* Case 1: Peer has no keys, let's make master key take over */
+ if (ehdr->rx_nokey) {
+ /* Set or extend grace period */
+ tx->timer2 = jiffies;
+ /* Schedule key distributing for the peer if not yet */
+ if (tx->key.keys &&
+ !atomic_cmpxchg(&rx->key_distr, 0, KEY_DISTR_SCHED)) {
+ get_random_bytes(&delay, 2);
+ delay %= 5;
+ delay = msecs_to_jiffies(500 * ++delay);
+ if (queue_delayed_work(tx->wq, &rx->work, delay))
+ tipc_node_get(rx->node);
+ }
+ } else {
+ /* Cancel a pending key distributing if any */
+ atomic_xchg(&rx->key_distr, 0);
+ }
- /* Peer RX active key has changed, try to update owns' & TX users */
- if (atomic_cmpxchg(&rx->peer_rx_active,
- cur_rx_active,
- new_rx_active) == cur_rx_active) {
- if (new_rx_active)
- tipc_aead_users_inc(tx->aead[new_rx_active], INT_MAX);
- if (cur_rx_active)
- tipc_aead_users_dec(tx->aead[cur_rx_active], 0);
+ /* Case 2: Peer RX active key has changed, let's update own TX users */
+ cur = atomic_read(&rx->peer_rx_active);
+ new = ehdr->rx_key_active;
+ if (tx->key.keys &&
+ cur != new &&
+ atomic_cmpxchg(&rx->peer_rx_active, cur, new) == cur) {
+ if (new)
+ tipc_aead_users_inc(tx->aead[new], INT_MAX);
+ if (cur)
+ tipc_aead_users_dec(tx->aead[cur], 0);
atomic64_set(&rx->sndnxt, 0);
/* Mark the point TX key users changed */
tx->timer1 = jiffies;
-#ifdef TIPC_CRYPTO_DEBUG
- pr_info("TX(%s): key users changed %d-- %d++, peer RX(%s)\n",
- tipc_own_id_string(net), cur_rx_active,
- new_rx_active, tipc_node_get_id_str(rx->node));
-#endif
+ pr_debug("%s: key users changed %d-- %d++, peer %s\n",
+ tx->name, cur, new, rx->name);
}
}
@@ -1340,7 +1456,7 @@ static int tipc_crypto_key_revoke(struct net *net, u8 tx_key)
tipc_crypto_key_detach(tx->aead[key.active], &tx->lock);
spin_unlock(&tx->lock);
- pr_warn("TX(%s): key is revoked!\n", tipc_own_id_string(net));
+ pr_warn("%s: key is revoked\n", tx->name);
return -EKEYREVOKED;
}
@@ -1357,6 +1473,15 @@ int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
if (!c)
return -ENOMEM;
+ /* Allocate workqueue on TX */
+ if (!node) {
+ c->wq = alloc_ordered_workqueue("tipc_crypto", 0);
+ if (!c->wq) {
+ kfree(c);
+ return -ENOMEM;
+ }
+ }
+
/* Allocate statistic structure */
c->stats = alloc_percpu_gfp(struct tipc_crypto_stats, GFP_ATOMIC);
if (!c->stats) {
@@ -1364,53 +1489,52 @@ int tipc_crypto_start(struct tipc_crypto **crypto, struct net *net,
return -ENOMEM;
}
- c->working = 0;
+ c->flags = 0;
c->net = net;
c->node = node;
+ get_random_bytes(&c->key_gen, 2);
tipc_crypto_key_set_state(c, 0, 0, 0);
+ atomic_set(&c->key_distr, 0);
atomic_set(&c->peer_rx_active, 0);
atomic64_set(&c->sndnxt, 0);
c->timer1 = jiffies;
c->timer2 = jiffies;
+ c->rekeying_intv = TIPC_REKEYING_INTV_DEF;
spin_lock_init(&c->lock);
- *crypto = c;
+ scnprintf(c->name, 48, "%s(%s)", (is_rx(c)) ? "RX" : "TX",
+ (is_rx(c)) ? tipc_node_get_id_str(c->node) :
+ tipc_own_id_string(c->net));
+
+ if (is_rx(c))
+ INIT_DELAYED_WORK(&c->work, tipc_crypto_work_rx);
+ else
+ INIT_DELAYED_WORK(&c->work, tipc_crypto_work_tx);
+ *crypto = c;
return 0;
}
void tipc_crypto_stop(struct tipc_crypto **crypto)
{
- struct tipc_crypto *c, *tx, *rx;
- bool is_rx;
+ struct tipc_crypto *c = *crypto;
u8 k;
- if (!*crypto)
+ if (!c)
return;
- rcu_read_lock();
- /* RX stopping? => decrease TX key users if any */
- is_rx = !!((*crypto)->node);
- if (is_rx) {
- rx = *crypto;
- tx = tipc_net(rx->net)->crypto_tx;
- k = atomic_read(&rx->peer_rx_active);
- if (k) {
- tipc_aead_users_dec(tx->aead[k], 0);
- /* Mark the point TX key users changed */
- tx->timer1 = jiffies;
- }
+ /* Flush any queued works & destroy wq */
+ if (is_tx(c)) {
+ c->rekeying_intv = 0;
+ cancel_delayed_work_sync(&c->work);
+ destroy_workqueue(c->wq);
}
/* Release AEAD keys */
- c = *crypto;
+ rcu_read_lock();
for (k = KEY_MIN; k <= KEY_MAX; k++)
tipc_aead_put(rcu_dereference(c->aead[k]));
rcu_read_unlock();
-
- pr_warn("%s(%s) has been purged, node left!\n",
- (is_rx) ? "RX" : "TX",
- (is_rx) ? tipc_node_get_id_str((*crypto)->node) :
- tipc_own_id_string((*crypto)->net));
+ pr_debug("%s: has been stopped\n", c->name);
/* Free this crypto statistics */
free_percpu(c->stats);
@@ -1424,106 +1548,91 @@ void tipc_crypto_timeout(struct tipc_crypto *rx)
struct tipc_net *tn = tipc_net(rx->net);
struct tipc_crypto *tx = tn->crypto_tx;
struct tipc_key key;
- u8 new_pending, new_passive;
int cmd;
- /* TX key activating:
- * The pending key (users > 0) -> active
- * The active key if any (users == 0) -> free
- */
+ /* TX pending: taking all users & stable -> active */
spin_lock(&tx->lock);
key = tx->key;
if (key.active && tipc_aead_users(tx->aead[key.active]) > 0)
goto s1;
if (!key.pending || tipc_aead_users(tx->aead[key.pending]) <= 0)
goto s1;
- if (time_before(jiffies, tx->timer1 + TIPC_TX_LASTING_LIM))
+ if (time_before(jiffies, tx->timer1 + TIPC_TX_LASTING_TIME))
goto s1;
tipc_crypto_key_set_state(tx, key.passive, key.pending, 0);
if (key.active)
tipc_crypto_key_detach(tx->aead[key.active], &tx->lock);
this_cpu_inc(tx->stats->stat[STAT_SWITCHES]);
- pr_info("TX(%s): key %d is activated!\n", tipc_own_id_string(tx->net),
- key.pending);
+ pr_info("%s: key[%d] is activated\n", tx->name, key.pending);
s1:
spin_unlock(&tx->lock);
- /* RX key activating:
- * The pending key (users > 0) -> active
- * The active key if any -> passive, freed later
- */
+ /* RX pending: having user -> active */
spin_lock(&rx->lock);
key = rx->key;
if (!key.pending || tipc_aead_users(rx->aead[key.pending]) <= 0)
goto s2;
- new_pending = (key.passive &&
- !tipc_aead_users(rx->aead[key.passive])) ?
- key.passive : 0;
- new_passive = (key.active) ?: ((new_pending) ? 0 : key.passive);
- tipc_crypto_key_set_state(rx, new_passive, key.pending, new_pending);
+ if (key.active)
+ key.passive = key.active;
+ key.active = key.pending;
+ rx->timer2 = jiffies;
+ tipc_crypto_key_set_state(rx, key.passive, key.active, 0);
this_cpu_inc(rx->stats->stat[STAT_SWITCHES]);
- pr_info("RX(%s): key %d is activated!\n",
- tipc_node_get_id_str(rx->node), key.pending);
+ pr_info("%s: key[%d] is activated\n", rx->name, key.pending);
goto s5;
s2:
- /* RX key "faulty" switching:
- * The faulty pending key (users < -30) -> passive
- * The passive key (users = 0) -> pending
- * Note: This only happens after RX deactivated - s3!
- */
- key = rx->key;
- if (!key.pending || tipc_aead_users(rx->aead[key.pending]) > -30)
- goto s3;
- if (!key.passive || tipc_aead_users(rx->aead[key.passive]) != 0)
+ /* RX pending: not working -> remove */
+ if (!key.pending || tipc_aead_users(rx->aead[key.pending]) > -10)
goto s3;
- new_pending = key.passive;
- new_passive = key.pending;
- tipc_crypto_key_set_state(rx, new_passive, key.active, new_pending);
+ tipc_crypto_key_set_state(rx, key.passive, key.active, 0);
+ tipc_crypto_key_detach(rx->aead[key.pending], &rx->lock);
+ pr_debug("%s: key[%d] is removed\n", rx->name, key.pending);
goto s5;
s3:
- /* RX key deactivating:
- * The passive key if any -> pending
- * The active key -> passive (users = 0) / pending
- * The pending key if any -> passive (users = 0)
- */
- key = rx->key;
+ /* RX active: timed out or no user -> pending */
if (!key.active)
goto s4;
- if (time_before(jiffies, rx->timer1 + TIPC_RX_ACTIVE_LIM))
+ if (time_before(jiffies, rx->timer1 + TIPC_RX_ACTIVE_LIM) &&
+ tipc_aead_users(rx->aead[key.active]) > 0)
goto s4;
- new_pending = (key.passive) ?: key.active;
- new_passive = (key.passive) ? key.active : key.pending;
- tipc_aead_users_set(rx->aead[new_pending], 0);
- if (new_passive)
- tipc_aead_users_set(rx->aead[new_passive], 0);
- tipc_crypto_key_set_state(rx, new_passive, 0, new_pending);
- pr_info("RX(%s): key %d is deactivated!\n",
- tipc_node_get_id_str(rx->node), key.active);
+ if (key.pending)
+ key.passive = key.active;
+ else
+ key.pending = key.active;
+ rx->timer2 = jiffies;
+ tipc_crypto_key_set_state(rx, key.passive, 0, key.pending);
+ tipc_aead_users_set(rx->aead[key.pending], 0);
+ pr_debug("%s: key[%d] is deactivated\n", rx->name, key.active);
goto s5;
s4:
- /* RX key passive -> freed: */
- key = rx->key;
- if (!key.passive || !tipc_aead_users(rx->aead[key.passive]))
+ /* RX passive: outdated or not working -> free */
+ if (!key.passive)
goto s5;
- if (time_before(jiffies, rx->timer2 + TIPC_RX_PASSIVE_LIM))
+ if (time_before(jiffies, rx->timer2 + TIPC_RX_PASSIVE_LIM) &&
+ tipc_aead_users(rx->aead[key.passive]) > -10)
goto s5;
tipc_crypto_key_set_state(rx, 0, key.active, key.pending);
tipc_crypto_key_detach(rx->aead[key.passive], &rx->lock);
- pr_info("RX(%s): key %d is freed!\n", tipc_node_get_id_str(rx->node),
- key.passive);
+ pr_debug("%s: key[%d] is freed\n", rx->name, key.passive);
s5:
spin_unlock(&rx->lock);
+ /* Relax it here, the flag will be set again if it really is, but only
+ * when we are not in grace period for safety!
+ */
+ if (time_after(jiffies, tx->timer2 + TIPC_TX_GRACE_PERIOD))
+ tx->legacy_user = 0;
+
/* Limit max_tfms & do debug commands if needed */
if (likely(sysctl_tipc_max_tfms <= TIPC_MAX_TFMS_LIM))
return;
@@ -1533,6 +1642,22 @@ s5:
tipc_crypto_do_cmd(rx->net, cmd);
}
+static inline void tipc_crypto_clone_msg(struct net *net, struct sk_buff *_skb,
+ struct tipc_bearer *b,
+ struct tipc_media_addr *dst,
+ struct tipc_node *__dnode, u8 type)
+{
+ struct sk_buff *skb;
+
+ skb = skb_clone(_skb, GFP_ATOMIC);
+ if (skb) {
+ TIPC_SKB_CB(skb)->xmit_type = type;
+ tipc_crypto_xmit(net, &skb, b, dst, __dnode);
+ if (skb)
+ b->media->send_msg(net, skb, b, dst);
+ }
+}
+
/**
* tipc_crypto_xmit - Build & encrypt TIPC message for xmit
* @net: struct net
@@ -1542,7 +1667,8 @@ s5:
* @__dnode: destination node for reference if any
*
* First, build an encryption message header on the top of the message, then
- * encrypt the original TIPC message by using the active or pending TX key.
+ * encrypt the original TIPC message by using the pending, master or active
+ * key with this preference order.
* If the encryption is successful, the encrypted skb is returned directly or
* via the callback.
* Otherwise, the skb is freed!
@@ -1562,46 +1688,67 @@ int tipc_crypto_xmit(struct net *net, struct sk_buff **skb,
struct tipc_crypto *__rx = tipc_node_crypto_rx(__dnode);
struct tipc_crypto *tx = tipc_net(net)->crypto_tx;
struct tipc_crypto_stats __percpu *stats = tx->stats;
+ struct tipc_msg *hdr = buf_msg(*skb);
struct tipc_key key = tx->key;
struct tipc_aead *aead = NULL;
- struct sk_buff *probe;
+ u32 user = msg_user(hdr);
+ u32 type = msg_type(hdr);
int rc = -ENOKEY;
- u8 tx_key;
+ u8 tx_key = 0;
/* No encryption? */
if (!tx->working)
return 0;
- /* Try with the pending key if available and:
- * 1) This is the only choice (i.e. no active key) or;
- * 2) Peer has switched to this key (unicast only) or;
- * 3) It is time to do a pending key probe;
- */
+ /* Pending key if peer has active on it or probing time */
if (unlikely(key.pending)) {
tx_key = key.pending;
- if (!key.active)
+ if (!tx->key_master && !key.active)
goto encrypt;
if (__rx && atomic_read(&__rx->peer_rx_active) == tx_key)
goto encrypt;
- if (TIPC_SKB_CB(*skb)->probe)
+ if (TIPC_SKB_CB(*skb)->xmit_type == SKB_PROBING) {
+ pr_debug("%s: probing for key[%d]\n", tx->name,
+ key.pending);
+ goto encrypt;
+ }
+ if (user == LINK_CONFIG || user == LINK_PROTOCOL)
+ tipc_crypto_clone_msg(net, *skb, b, dst, __dnode,
+ SKB_PROBING);
+ }
+
+ /* Master key if this is a *vital* message or in grace period */
+ if (tx->key_master) {
+ tx_key = KEY_MASTER;
+ if (!key.active)
+ goto encrypt;
+ if (TIPC_SKB_CB(*skb)->xmit_type == SKB_GRACING) {
+ pr_debug("%s: gracing for msg (%d %d)\n", tx->name,
+ user, type);
goto encrypt;
- if (!__rx &&
- time_after(jiffies, tx->timer2 + TIPC_TX_PROBE_LIM)) {
- tx->timer2 = jiffies;
- probe = skb_clone(*skb, GFP_ATOMIC);
- if (probe) {
- TIPC_SKB_CB(probe)->probe = 1;
- tipc_crypto_xmit(net, &probe, b, dst, __dnode);
- if (probe)
- b->media->send_msg(net, probe, b, dst);
+ }
+ if (user == LINK_CONFIG ||
+ (user == LINK_PROTOCOL && type == RESET_MSG) ||
+ (user == MSG_CRYPTO && type == KEY_DISTR_MSG) ||
+ time_before(jiffies, tx->timer2 + TIPC_TX_GRACE_PERIOD)) {
+ if (__rx && __rx->key_master &&
+ !atomic_read(&__rx->peer_rx_active))
+ goto encrypt;
+ if (!__rx) {
+ if (likely(!tx->legacy_user))
+ goto encrypt;
+ tipc_crypto_clone_msg(net, *skb, b, dst,
+ __dnode, SKB_GRACING);
}
}
}
+
/* Else, use the active key if any */
if (likely(key.active)) {
tx_key = key.active;
goto encrypt;
}
+
goto exit;
encrypt:
@@ -1667,30 +1814,21 @@ int tipc_crypto_rcv(struct net *net, struct tipc_crypto *rx,
struct tipc_aead *aead = NULL;
struct tipc_key key;
int rc = -ENOKEY;
- u8 tx_key = 0;
+ u8 tx_key, n;
+
+ tx_key = ((struct tipc_ehdr *)(*skb)->data)->tx_key;
/* New peer?
* Let's try with TX key (i.e. cluster mode) & verify the skb first!
*/
- if (unlikely(!rx))
+ if (unlikely(!rx || tx_key == KEY_MASTER))
goto pick_tx;
- /* Pick RX key according to TX key, three cases are possible:
- * 1) The current active key (likely) or;
- * 2) The pending (new or deactivated) key (if any) or;
- * 3) The passive or old active key (i.e. users > 0);
- */
- tx_key = ((struct tipc_ehdr *)(*skb)->data)->tx_key;
+ /* Pick RX key according to TX key if any */
key = rx->key;
- if (likely(tx_key == key.active))
+ if (tx_key == key.active || tx_key == key.pending ||
+ tx_key == key.passive)
goto decrypt;
- if (tx_key == key.pending)
- goto decrypt;
- if (tx_key == key.passive) {
- rx->timer2 = jiffies;
- if (tipc_aead_users(rx->aead[key.passive]) > 0)
- goto decrypt;
- }
/* Unknown key, let's try to align RX key(s) */
if (tipc_crypto_key_try_align(rx, tx_key))
@@ -1698,7 +1836,7 @@ int tipc_crypto_rcv(struct net *net, struct tipc_crypto *rx,
pick_tx:
/* No key suitable? Try to pick one from TX... */
- aead = tipc_crypto_key_pick_tx(tx, rx, *skb);
+ aead = tipc_crypto_key_pick_tx(tx, rx, *skb, tx_key);
if (aead)
goto decrypt;
goto exit;
@@ -1726,8 +1864,19 @@ exit:
if (rc == -ENOKEY) {
kfree_skb(*skb);
*skb = NULL;
- if (rx)
+ if (rx) {
+ /* Mark rx->nokey only if we dont have a
+ * pending received session key, nor a newer
+ * one i.e. in the next slot.
+ */
+ n = key_next(tx_key);
+ rx->nokey = !(rx->skey ||
+ rcu_access_pointer(rx->aead[n]));
+ pr_debug_ratelimited("%s: nokey %d, key %d/%x\n",
+ rx->name, rx->nokey,
+ tx_key, rx->key.keys);
tipc_node_put(rx->node);
+ }
this_cpu_inc(stats->stat[STAT_NOKEYS]);
return rc;
} else if (rc == -EBADMSG) {
@@ -1749,21 +1898,17 @@ static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead,
struct tipc_aead *tmp = NULL;
struct tipc_ehdr *ehdr;
struct tipc_node *n;
- u8 rx_key_active;
- bool destined;
/* Is this completed by TX? */
- if (unlikely(!rx->node)) {
+ if (unlikely(is_tx(aead->crypto))) {
rx = skb_cb->tx_clone_ctx.rx;
-#ifdef TIPC_CRYPTO_DEBUG
- pr_info("TX->RX(%s): err %d, aead %p, skb->next %p, flags %x\n",
- (rx) ? tipc_node_get_id_str(rx->node) : "-", err, aead,
- (*skb)->next, skb_cb->flags);
- pr_info("skb_cb [recurs %d, last %p], tx->aead [%p %p %p]\n",
- skb_cb->tx_clone_ctx.recurs, skb_cb->tx_clone_ctx.last,
- aead->crypto->aead[1], aead->crypto->aead[2],
- aead->crypto->aead[3]);
-#endif
+ pr_debug("TX->RX(%s): err %d, aead %p, skb->next %p, flags %x\n",
+ (rx) ? tipc_node_get_id_str(rx->node) : "-", err, aead,
+ (*skb)->next, skb_cb->flags);
+ pr_debug("skb_cb [recurs %d, last %p], tx->aead [%p %p %p]\n",
+ skb_cb->tx_clone_ctx.recurs, skb_cb->tx_clone_ctx.last,
+ aead->crypto->aead[1], aead->crypto->aead[2],
+ aead->crypto->aead[3]);
if (unlikely(err)) {
if (err == -EBADMSG && (*skb)->next)
tipc_rcv(net, (*skb)->next, b);
@@ -1784,12 +1929,12 @@ static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead,
goto free_skb;
}
- /* Skip cloning this time as we had a RX pending key */
- if (rx->key.pending)
+ /* Ignore cloning if it was TX master key */
+ if (ehdr->tx_key == KEY_MASTER)
goto rcv;
if (tipc_aead_clone(&tmp, aead) < 0)
goto rcv;
- if (tipc_crypto_key_attach(rx, tmp, ehdr->tx_key) < 0) {
+ if (tipc_crypto_key_attach(rx, tmp, ehdr->tx_key, false) < 0) {
tipc_aead_free(&tmp->rcu);
goto rcv;
}
@@ -1805,14 +1950,18 @@ static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead,
/* Set the RX key's user */
tipc_aead_users_set(aead, 1);
-rcv:
/* Mark this point, RX works */
rx->timer1 = jiffies;
+rcv:
/* Remove ehdr & auth. tag prior to tipc_rcv() */
ehdr = (struct tipc_ehdr *)(*skb)->data;
- destined = ehdr->destined;
- rx_key_active = ehdr->rx_key_active;
+
+ /* Mark this point, RX passive still works */
+ if (rx->key.passive && ehdr->tx_key == rx->key.passive)
+ rx->timer2 = jiffies;
+
+ skb_reset_network_header(*skb);
skb_pull(*skb, tipc_ehdr_size(ehdr));
pskb_trim(*skb, (*skb)->len - aead->authsize);
@@ -1822,9 +1971,8 @@ rcv:
goto free_skb;
}
- /* Update peer RX active key & TX users */
- if (destined)
- tipc_crypto_key_synch(rx, rx_key_active, buf_msg(*skb));
+ /* Ok, everything's fine, try to synch own keys according to peers' */
+ tipc_crypto_key_synch(rx, *skb);
/* Mark skb decrypted */
skb_cb->decrypted = 1;
@@ -1883,7 +2031,7 @@ print_stats:
/* Print crypto statistics */
for (i = 0, j = 0; i < MAX_STATS; i++)
j += scnprintf(buf + j, 200 - j, "|%11s ", hstats[i]);
- pr_info("\nCounter %s", buf);
+ pr_info("Counter %s", buf);
memset(buf, '-', 115);
buf[115] = '\0';
@@ -1927,21 +2075,31 @@ static char *tipc_crypto_key_dump(struct tipc_crypto *c, char *buf)
char *s;
for (k = KEY_MIN; k <= KEY_MAX; k++) {
- if (k == key.passive)
- s = "PAS";
- else if (k == key.active)
- s = "ACT";
- else if (k == key.pending)
- s = "PEN";
- else
- s = "-";
+ if (k == KEY_MASTER) {
+ if (is_rx(c))
+ continue;
+ if (time_before(jiffies,
+ c->timer2 + TIPC_TX_GRACE_PERIOD))
+ s = "ACT";
+ else
+ s = "PAS";
+ } else {
+ if (k == key.passive)
+ s = "PAS";
+ else if (k == key.active)
+ s = "ACT";
+ else if (k == key.pending)
+ s = "PEN";
+ else
+ s = "-";
+ }
i += scnprintf(buf + i, 200 - i, "\tKey%d: %s", k, s);
rcu_read_lock();
aead = rcu_dereference(c->aead[k]);
if (aead)
i += scnprintf(buf + i, 200 - i,
- "{\"%s...\", \"%s\"}/%d:%d",
+ "{\"0x...%s\", \"%s\"}/%d:%d",
aead->hint,
(aead->mode == CLUSTER_KEY) ? "c" : "p",
atomic_read(&aead->users),
@@ -1950,14 +2108,13 @@ static char *tipc_crypto_key_dump(struct tipc_crypto *c, char *buf)
i += scnprintf(buf + i, 200 - i, "\n");
}
- if (c->node)
+ if (is_rx(c))
i += scnprintf(buf + i, 200 - i, "\tPeer RX active: %d\n",
atomic_read(&c->peer_rx_active));
return buf;
}
-#ifdef TIPC_CRYPTO_DEBUG
static char *tipc_key_change_dump(struct tipc_key old, struct tipc_key new,
char *buf)
{
@@ -1968,7 +2125,7 @@ static char *tipc_key_change_dump(struct tipc_key old, struct tipc_key new,
/* Output format: "[%s %s %s] -> [%s %s %s]", max len = 32 */
again:
i += scnprintf(buf + i, 32 - i, "[");
- for (k = KEY_MIN; k <= KEY_MAX; k++) {
+ for (k = KEY_1; k <= KEY_3; k++) {
if (k == key->passive)
s = "pas";
else if (k == key->active)
@@ -1978,7 +2135,7 @@ again:
else
s = "-";
i += scnprintf(buf + i, 32 - i,
- (k != KEY_MAX) ? "%s " : "%s", s);
+ (k != KEY_3) ? "%s " : "%s", s);
}
if (key != &new) {
i += scnprintf(buf + i, 32 - i, "] -> ");
@@ -1988,4 +2145,320 @@ again:
i += scnprintf(buf + i, 32 - i, "]");
return buf;
}
-#endif
+
+/**
+ * tipc_crypto_msg_rcv - Common 'MSG_CRYPTO' processing point
+ * @net: the struct net
+ * @skb: the receiving message buffer
+ */
+void tipc_crypto_msg_rcv(struct net *net, struct sk_buff *skb)
+{
+ struct tipc_crypto *rx;
+ struct tipc_msg *hdr;
+
+ if (unlikely(skb_linearize(skb)))
+ goto exit;
+
+ hdr = buf_msg(skb);
+ rx = tipc_node_crypto_rx_by_addr(net, msg_prevnode(hdr));
+ if (unlikely(!rx))
+ goto exit;
+
+ switch (msg_type(hdr)) {
+ case KEY_DISTR_MSG:
+ if (tipc_crypto_key_rcv(rx, hdr))
+ goto exit;
+ break;
+ default:
+ break;
+ }
+
+ tipc_node_put(rx->node);
+
+exit:
+ kfree_skb(skb);
+}
+
+/**
+ * tipc_crypto_key_distr - Distribute a TX key
+ * @tx: the TX crypto
+ * @key: the key's index
+ * @dest: the destination tipc node, = NULL if distributing to all nodes
+ *
+ * Return: 0 in case of success, otherwise < 0
+ */
+int tipc_crypto_key_distr(struct tipc_crypto *tx, u8 key,
+ struct tipc_node *dest)
+{
+ struct tipc_aead *aead;
+ u32 dnode = tipc_node_get_addr(dest);
+ int rc = -ENOKEY;
+
+ if (!sysctl_tipc_key_exchange_enabled)
+ return 0;
+
+ if (key) {
+ rcu_read_lock();
+ aead = tipc_aead_get(tx->aead[key]);
+ if (likely(aead)) {
+ rc = tipc_crypto_key_xmit(tx->net, aead->key,
+ aead->gen, aead->mode,
+ dnode);
+ tipc_aead_put(aead);
+ }
+ rcu_read_unlock();
+ }
+
+ return rc;
+}
+
+/**
+ * tipc_crypto_key_xmit - Send a session key
+ * @net: the struct net
+ * @skey: the session key to be sent
+ * @gen: the key's generation
+ * @mode: the key's mode
+ * @dnode: the destination node address, = 0 if broadcasting to all nodes
+ *
+ * The session key 'skey' is packed in a TIPC v2 'MSG_CRYPTO/KEY_DISTR_MSG'
+ * as its data section, then xmit-ed through the uc/bc link.
+ *
+ * Return: 0 in case of success, otherwise < 0
+ */
+static int tipc_crypto_key_xmit(struct net *net, struct tipc_aead_key *skey,
+ u16 gen, u8 mode, u32 dnode)
+{
+ struct sk_buff_head pkts;
+ struct tipc_msg *hdr;
+ struct sk_buff *skb;
+ u16 size, cong_link_cnt;
+ u8 *data;
+ int rc;
+
+ size = tipc_aead_key_size(skey);
+ skb = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ hdr = buf_msg(skb);
+ tipc_msg_init(tipc_own_addr(net), hdr, MSG_CRYPTO, KEY_DISTR_MSG,
+ INT_H_SIZE, dnode);
+ msg_set_size(hdr, INT_H_SIZE + size);
+ msg_set_key_gen(hdr, gen);
+ msg_set_key_mode(hdr, mode);
+
+ data = msg_data(hdr);
+ *((__be32 *)(data + TIPC_AEAD_ALG_NAME)) = htonl(skey->keylen);
+ memcpy(data, skey->alg_name, TIPC_AEAD_ALG_NAME);
+ memcpy(data + TIPC_AEAD_ALG_NAME + sizeof(__be32), skey->key,
+ skey->keylen);
+
+ __skb_queue_head_init(&pkts);
+ __skb_queue_tail(&pkts, skb);
+ if (dnode)
+ rc = tipc_node_xmit(net, &pkts, dnode, 0);
+ else
+ rc = tipc_bcast_xmit(net, &pkts, &cong_link_cnt);
+
+ return rc;
+}
+
+/**
+ * tipc_crypto_key_rcv - Receive a session key
+ * @rx: the RX crypto
+ * @hdr: the TIPC v2 message incl. the receiving session key in its data
+ *
+ * This function retrieves the session key in the message from peer, then
+ * schedules a RX work to attach the key to the corresponding RX crypto.
+ *
+ * Return: "true" if the key has been scheduled for attaching, otherwise
+ * "false".
+ */
+static bool tipc_crypto_key_rcv(struct tipc_crypto *rx, struct tipc_msg *hdr)
+{
+ struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx;
+ struct tipc_aead_key *skey = NULL;
+ u16 key_gen = msg_key_gen(hdr);
+ u16 size = msg_data_sz(hdr);
+ u8 *data = msg_data(hdr);
+
+ spin_lock(&rx->lock);
+ if (unlikely(rx->skey || (key_gen == rx->key_gen && rx->key.keys))) {
+ pr_err("%s: key existed <%p>, gen %d vs %d\n", rx->name,
+ rx->skey, key_gen, rx->key_gen);
+ goto exit;
+ }
+
+ /* Allocate memory for the key */
+ skey = kmalloc(size, GFP_ATOMIC);
+ if (unlikely(!skey)) {
+ pr_err("%s: unable to allocate memory for skey\n", rx->name);
+ goto exit;
+ }
+
+ /* Copy key from msg data */
+ skey->keylen = ntohl(*((__be32 *)(data + TIPC_AEAD_ALG_NAME)));
+ memcpy(skey->alg_name, data, TIPC_AEAD_ALG_NAME);
+ memcpy(skey->key, data + TIPC_AEAD_ALG_NAME + sizeof(__be32),
+ skey->keylen);
+
+ /* Sanity check */
+ if (unlikely(size != tipc_aead_key_size(skey))) {
+ kfree(skey);
+ skey = NULL;
+ goto exit;
+ }
+
+ rx->key_gen = key_gen;
+ rx->skey_mode = msg_key_mode(hdr);
+ rx->skey = skey;
+ rx->nokey = 0;
+ mb(); /* for nokey flag */
+
+exit:
+ spin_unlock(&rx->lock);
+
+ /* Schedule the key attaching on this crypto */
+ if (likely(skey && queue_delayed_work(tx->wq, &rx->work, 0)))
+ return true;
+
+ return false;
+}
+
+/**
+ * tipc_crypto_work_rx - Scheduled RX works handler
+ * @work: the struct RX work
+ *
+ * The function processes the previous scheduled works i.e. distributing TX key
+ * or attaching a received session key on RX crypto.
+ */
+static void tipc_crypto_work_rx(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct tipc_crypto *rx = container_of(dwork, struct tipc_crypto, work);
+ struct tipc_crypto *tx = tipc_net(rx->net)->crypto_tx;
+ unsigned long delay = msecs_to_jiffies(5000);
+ bool resched = false;
+ u8 key;
+ int rc;
+
+ /* Case 1: Distribute TX key to peer if scheduled */
+ if (atomic_cmpxchg(&rx->key_distr,
+ KEY_DISTR_SCHED,
+ KEY_DISTR_COMPL) == KEY_DISTR_SCHED) {
+ /* Always pick the newest one for distributing */
+ key = tx->key.pending ?: tx->key.active;
+ rc = tipc_crypto_key_distr(tx, key, rx->node);
+ if (unlikely(rc))
+ pr_warn("%s: unable to distr key[%d] to %s, err %d\n",
+ tx->name, key, tipc_node_get_id_str(rx->node),
+ rc);
+
+ /* Sched for key_distr releasing */
+ resched = true;
+ } else {
+ atomic_cmpxchg(&rx->key_distr, KEY_DISTR_COMPL, 0);
+ }
+
+ /* Case 2: Attach a pending received session key from peer if any */
+ if (rx->skey) {
+ rc = tipc_crypto_key_init(rx, rx->skey, rx->skey_mode, false);
+ if (unlikely(rc < 0))
+ pr_warn("%s: unable to attach received skey, err %d\n",
+ rx->name, rc);
+ switch (rc) {
+ case -EBUSY:
+ case -ENOMEM:
+ /* Resched the key attaching */
+ resched = true;
+ break;
+ default:
+ synchronize_rcu();
+ kfree(rx->skey);
+ rx->skey = NULL;
+ break;
+ }
+ }
+
+ if (resched && queue_delayed_work(tx->wq, &rx->work, delay))
+ return;
+
+ tipc_node_put(rx->node);
+}
+
+/**
+ * tipc_crypto_rekeying_sched - (Re)schedule rekeying w/o new interval
+ * @tx: TX crypto
+ * @changed: if the rekeying needs to be rescheduled with new interval
+ * @new_intv: new rekeying interval (when "changed" = true)
+ */
+void tipc_crypto_rekeying_sched(struct tipc_crypto *tx, bool changed,
+ u32 new_intv)
+{
+ unsigned long delay;
+ bool now = false;
+
+ if (changed) {
+ if (new_intv == TIPC_REKEYING_NOW)
+ now = true;
+ else
+ tx->rekeying_intv = new_intv;
+ cancel_delayed_work_sync(&tx->work);
+ }
+
+ if (tx->rekeying_intv || now) {
+ delay = (now) ? 0 : tx->rekeying_intv * 60 * 1000;
+ queue_delayed_work(tx->wq, &tx->work, msecs_to_jiffies(delay));
+ }
+}
+
+/**
+ * tipc_crypto_work_tx - Scheduled TX works handler
+ * @work: the struct TX work
+ *
+ * The function processes the previous scheduled work, i.e. key rekeying, by
+ * generating a new session key based on current one, then attaching it to the
+ * TX crypto and finally distributing it to peers. It also re-schedules the
+ * rekeying if needed.
+ */
+static void tipc_crypto_work_tx(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct tipc_crypto *tx = container_of(dwork, struct tipc_crypto, work);
+ struct tipc_aead_key *skey = NULL;
+ struct tipc_key key = tx->key;
+ struct tipc_aead *aead;
+ int rc = -ENOMEM;
+
+ if (unlikely(key.pending))
+ goto resched;
+
+ /* Take current key as a template */
+ rcu_read_lock();
+ aead = rcu_dereference(tx->aead[key.active ?: KEY_MASTER]);
+ if (unlikely(!aead)) {
+ rcu_read_unlock();
+ /* At least one key should exist for securing */
+ return;
+ }
+
+ /* Lets duplicate it first */
+ skey = kmemdup(aead->key, tipc_aead_key_size(aead->key), GFP_ATOMIC);
+ rcu_read_unlock();
+
+ /* Now, generate new key, initiate & distribute it */
+ if (likely(skey)) {
+ rc = tipc_aead_key_generate(skey) ?:
+ tipc_crypto_key_init(tx, skey, PER_NODE_KEY, false);
+ if (likely(rc > 0))
+ rc = tipc_crypto_key_distr(tx, rc, NULL);
+ kzfree(skey);
+ }
+
+ if (unlikely(rc))
+ pr_warn_ratelimited("%s: rekeying returns %d\n", tx->name, rc);
+
+resched:
+ /* Re-schedule rekeying if any */
+ tipc_crypto_rekeying_sched(tx, false, 0);
+}
diff --git a/net/tipc/crypto.h b/net/tipc/crypto.h
index c3de769f49e8..e71193bd5e36 100644
--- a/net/tipc/crypto.h
+++ b/net/tipc/crypto.h
@@ -67,6 +67,7 @@ enum {
};
extern int sysctl_tipc_max_tfms __read_mostly;
+extern int sysctl_tipc_key_exchange_enabled __read_mostly;
/**
* TIPC encryption message format:
@@ -74,7 +75,7 @@ extern int sysctl_tipc_max_tfms __read_mostly;
* 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0
* 1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * w0:|Ver=7| User |D|TX |RX |K| Rsvd |
+ * w0:|Ver=7| User |D|TX |RX |K|M|N| Rsvd |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* w1:| Seqno |
* w2:| (8 octets) |
@@ -101,6 +102,9 @@ extern int sysctl_tipc_max_tfms __read_mostly;
* RX : Currently RX active key corresponding to the destination
* node's TX key (when the "D" bit is set)
* K : Keep-alive bit (for RPS, LINK_PROTOCOL/STATE_MSG only)
+ * M : Bit indicates if sender has master key
+ * N : Bit indicates if sender has no RX keys corresponding to the
+ * receiver's TX (when the "D" bit is set)
* Rsvd : Reserved bit, field
* Word1-2:
* Seqno : The 64-bit sequence number of the encrypted message, also
@@ -117,7 +121,9 @@ struct tipc_ehdr {
__u8 destined:1,
user:4,
version:3;
- __u8 reserved_1:3,
+ __u8 reserved_1:1,
+ rx_nokey:1,
+ master_key:1,
keepalive:1,
rx_key_active:2,
tx_key:2;
@@ -128,7 +134,9 @@ struct tipc_ehdr {
__u8 tx_key:2,
rx_key_active:2,
keepalive:1,
- reserved_1:3;
+ master_key:1,
+ rx_nokey:1,
+ reserved_1:1;
#else
#error "Please fix <asm/byteorder.h>"
#endif
@@ -158,10 +166,35 @@ int tipc_crypto_xmit(struct net *net, struct sk_buff **skb,
int tipc_crypto_rcv(struct net *net, struct tipc_crypto *rx,
struct sk_buff **skb, struct tipc_bearer *b);
int tipc_crypto_key_init(struct tipc_crypto *c, struct tipc_aead_key *ukey,
- u8 mode);
+ u8 mode, bool master_key);
void tipc_crypto_key_flush(struct tipc_crypto *c);
-int tipc_aead_key_validate(struct tipc_aead_key *ukey);
+int tipc_crypto_key_distr(struct tipc_crypto *tx, u8 key,
+ struct tipc_node *dest);
+void tipc_crypto_msg_rcv(struct net *net, struct sk_buff *skb);
+void tipc_crypto_rekeying_sched(struct tipc_crypto *tx, bool changed,
+ u32 new_intv);
+int tipc_aead_key_validate(struct tipc_aead_key *ukey, struct genl_info *info);
bool tipc_ehdr_validate(struct sk_buff *skb);
+static inline u32 msg_key_gen(struct tipc_msg *m)
+{
+ return msg_bits(m, 4, 16, 0xffff);
+}
+
+static inline void msg_set_key_gen(struct tipc_msg *m, u32 gen)
+{
+ msg_set_bits(m, 4, 16, 0xffff, gen);
+}
+
+static inline u32 msg_key_mode(struct tipc_msg *m)
+{
+ return msg_bits(m, 4, 0, 0xf);
+}
+
+static inline void msg_set_key_mode(struct tipc_msg *m, u32 mode)
+{
+ msg_set_bits(m, 4, 0, 0xf, mode);
+}
+
#endif /* _TIPC_CRYPTO_H */
#endif
diff --git a/net/tipc/link.c b/net/tipc/link.c
index cef38a910107..06b880da2a8e 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -216,11 +216,6 @@ enum {
#define TIPC_BC_RETR_LIM (jiffies + msecs_to_jiffies(10))
#define TIPC_UC_RETR_TIME (jiffies + msecs_to_jiffies(1))
-/*
- * Interval between NACKs when packets arrive out of order
- */
-#define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
-
/* Link FSM states:
*/
enum {
@@ -1256,6 +1251,11 @@ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
case MSG_FRAGMENTER:
case BCAST_PROTOCOL:
return false;
+#ifdef CONFIG_TIPC_CRYPTO
+ case MSG_CRYPTO:
+ tipc_crypto_msg_rcv(l->net, skb);
+ return true;
+#endif
default:
pr_warn("Dropping received illegal msg type\n");
kfree_skb(skb);
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 52e93ba4d8e2..2a78aa701572 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -150,7 +150,8 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
if (fragid == FIRST_FRAGMENT) {
if (unlikely(head))
goto err;
- frag = skb_unshare(frag, GFP_ATOMIC);
+ if (skb_cloned(frag))
+ frag = skb_copy(frag, GFP_ATOMIC);
if (unlikely(!frag))
goto err;
head = *headbuf = frag;
@@ -582,7 +583,7 @@ bundle:
* @pos: position in outer message of msg to be extracted.
* Returns position of next msg
* Consumes outer buffer when last packet extracted
- * Returns true when when there is an extracted buffer, otherwise false
+ * Returns true when there is an extracted buffer, otherwise false
*/
bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
{
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 1016e96db5c4..5d64596ba987 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -82,6 +82,7 @@ struct plist;
#define NAME_DISTRIBUTOR 11
#define MSG_FRAGMENTER 12
#define LINK_CONFIG 13
+#define MSG_CRYPTO 14
#define SOCK_WAKEUP 14 /* pseudo user */
#define TOP_SRV 15 /* pseudo user */
@@ -127,7 +128,9 @@ struct tipc_skb_cb {
#ifdef CONFIG_TIPC_CRYPTO
u8 encrypted:1;
u8 decrypted:1;
- u8 probe:1;
+#define SKB_PROBING 1
+#define SKB_GRACING 2
+ u8 xmit_type:2;
u8 tx_clone_deferred:1;
#endif
};
@@ -747,6 +750,9 @@ static inline void msg_set_nameupper(struct tipc_msg *m, u32 n)
#define GRP_RECLAIM_MSG 4
#define GRP_REMIT_MSG 5
+/* Crypto message types */
+#define KEY_DISTR_MSG 0
+
/*
* Word 1
*/
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 2f9c148f17e2..fe4edce459ad 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -327,8 +327,13 @@ static struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq,
struct tipc_msg *hdr;
u16 seqno;
+ spin_lock_bh(&namedq->lock);
skb_queue_walk_safe(namedq, skb, tmp) {
- skb_linearize(skb);
+ if (unlikely(skb_linearize(skb))) {
+ __skb_unlink(skb, namedq);
+ kfree_skb(skb);
+ continue;
+ }
hdr = buf_msg(skb);
seqno = msg_named_seqno(hdr);
if (msg_is_last_bulk(hdr)) {
@@ -338,12 +343,14 @@ static struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq,
if (msg_is_bulk(hdr) || msg_is_legacy(hdr)) {
__skb_unlink(skb, namedq);
+ spin_unlock_bh(&namedq->lock);
return skb;
}
if (*open && (*rcv_nxt == seqno)) {
(*rcv_nxt)++;
__skb_unlink(skb, namedq);
+ spin_unlock_bh(&namedq->lock);
return skb;
}
@@ -353,6 +360,7 @@ static struct sk_buff *tipc_named_dequeue(struct sk_buff_head *namedq,
continue;
}
}
+ spin_unlock_bh(&namedq->lock);
return NULL;
}
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 85400e4242de..0bb2323201da 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -105,12 +105,6 @@
* - A local spin_lock protecting the queue of subscriber events.
*/
-struct tipc_net_work {
- struct work_struct work;
- struct net *net;
- u32 addr;
-};
-
static void tipc_net_finalize(struct net *net, u32 addr);
int tipc_net_init(struct net *net, u8 *node_id, u32 addr)
@@ -142,25 +136,21 @@ static void tipc_net_finalize(struct net *net, u32 addr)
TIPC_CLUSTER_SCOPE, 0, addr);
}
-static void tipc_net_finalize_work(struct work_struct *work)
+void tipc_net_finalize_work(struct work_struct *work)
{
struct tipc_net_work *fwork;
fwork = container_of(work, struct tipc_net_work, work);
tipc_net_finalize(fwork->net, fwork->addr);
- kfree(fwork);
}
void tipc_sched_net_finalize(struct net *net, u32 addr)
{
- struct tipc_net_work *fwork = kzalloc(sizeof(*fwork), GFP_ATOMIC);
+ struct tipc_net *tn = tipc_net(net);
- if (!fwork)
- return;
- INIT_WORK(&fwork->work, tipc_net_finalize_work);
- fwork->net = net;
- fwork->addr = addr;
- schedule_work(&fwork->work);
+ tn->final_work.net = net;
+ tn->final_work.addr = addr;
+ schedule_work(&tn->final_work.work);
}
void tipc_net_stop(struct net *net)
diff --git a/net/tipc/net.h b/net/tipc/net.h
index 6740d97c706e..d0c91d2df20a 100644
--- a/net/tipc/net.h
+++ b/net/tipc/net.h
@@ -42,6 +42,7 @@
extern const struct nla_policy tipc_nl_net_policy[];
int tipc_net_init(struct net *net, u8 *node_id, u32 addr);
+void tipc_net_finalize_work(struct work_struct *work);
void tipc_sched_net_finalize(struct net *net, u32 addr);
void tipc_net_stop(struct net *net);
int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb);
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index c4aee6247d55..c447cb5f879e 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -108,6 +108,8 @@ const struct nla_policy tipc_nl_node_policy[TIPC_NLA_NODE_MAX + 1] = {
.len = TIPC_NODEID_LEN},
[TIPC_NLA_NODE_KEY] = { .type = NLA_BINARY,
.len = TIPC_AEAD_KEY_SIZE_MAX},
+ [TIPC_NLA_NODE_KEY_MASTER] = { .type = NLA_FLAG },
+ [TIPC_NLA_NODE_REKEYING] = { .type = NLA_U32 },
};
/* Properties valid for media, bearer and link */
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 90e3c70a91ad..1c7aa51cc2a3 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -1337,7 +1337,7 @@ send:
return err;
}
-static const struct genl_ops tipc_genl_compat_ops[] = {
+static const struct genl_small_ops tipc_genl_compat_ops[] = {
{
.cmd = TIPC_GENL_CMD,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
@@ -1352,8 +1352,8 @@ static struct genl_family tipc_genl_compat_family __ro_after_init = {
.maxattr = 0,
.netnsok = true,
.module = THIS_MODULE,
- .ops = tipc_genl_compat_ops,
- .n_ops = ARRAY_SIZE(tipc_genl_compat_ops),
+ .small_ops = tipc_genl_compat_ops,
+ .n_small_ops = ARRAY_SIZE(tipc_genl_compat_ops),
};
int __init tipc_netlink_compat_start(void)
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 4edcee3088da..d269ebe382e1 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -278,6 +278,14 @@ struct tipc_crypto *tipc_node_crypto_rx_by_list(struct list_head *pos)
{
return container_of(pos, struct tipc_node, list)->crypto_rx;
}
+
+struct tipc_crypto *tipc_node_crypto_rx_by_addr(struct net *net, u32 addr)
+{
+ struct tipc_node *n;
+
+ n = tipc_node_find(net, addr);
+ return (n) ? n->crypto_rx : NULL;
+}
#endif
static void tipc_node_free(struct rcu_head *rp)
@@ -303,7 +311,7 @@ void tipc_node_put(struct tipc_node *node)
kref_put(&node->kref, tipc_node_kref_release);
}
-static void tipc_node_get(struct tipc_node *node)
+void tipc_node_get(struct tipc_node *node)
{
kref_get(&node->kref);
}
@@ -584,6 +592,9 @@ static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
static void tipc_node_delete_from_list(struct tipc_node *node)
{
+#ifdef CONFIG_TIPC_CRYPTO
+ tipc_crypto_key_flush(node->crypto_rx);
+#endif
list_del_rcu(&node->list);
hlist_del_rcu(&node->hash);
tipc_node_put(node);
@@ -1485,7 +1496,7 @@ static void node_lost_contact(struct tipc_node *n,
/* Clean up broadcast state */
tipc_bcast_remove_peer(n->net, n->bc_entry.link);
- __skb_queue_purge(&n->bc_entry.namedq);
+ skb_queue_purge(&n->bc_entry.namedq);
/* Abort any ongoing link failover */
for (i = 0; i < MAX_BEARERS; i++) {
@@ -2868,15 +2879,27 @@ static int tipc_nl_retrieve_nodeid(struct nlattr **attrs, u8 **node_id)
return 0;
}
+static int tipc_nl_retrieve_rekeying(struct nlattr **attrs, u32 *intv)
+{
+ struct nlattr *attr = attrs[TIPC_NLA_NODE_REKEYING];
+
+ if (!attr)
+ return -ENODATA;
+
+ *intv = nla_get_u32(attr);
+ return 0;
+}
+
static int __tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr *attrs[TIPC_NLA_NODE_MAX + 1];
struct net *net = sock_net(skb->sk);
- struct tipc_net *tn = tipc_net(net);
+ struct tipc_crypto *tx = tipc_net(net)->crypto_tx, *c = tx;
struct tipc_node *n = NULL;
struct tipc_aead_key *ukey;
- struct tipc_crypto *c;
- u8 *id, *own_id;
+ bool rekeying = true, master_key = false;
+ u8 *id, *own_id, mode;
+ u32 intv = 0;
int rc = 0;
if (!info->attrs[TIPC_NLA_NODE])
@@ -2886,52 +2909,66 @@ static int __tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
info->attrs[TIPC_NLA_NODE],
tipc_nl_node_policy, info->extack);
if (rc)
- goto exit;
+ return rc;
own_id = tipc_own_id(net);
if (!own_id) {
- rc = -EPERM;
- goto exit;
+ GENL_SET_ERR_MSG(info, "not found own node identity (set id?)");
+ return -EPERM;
}
+ rc = tipc_nl_retrieve_rekeying(attrs, &intv);
+ if (rc == -ENODATA)
+ rekeying = false;
+
rc = tipc_nl_retrieve_key(attrs, &ukey);
- if (rc)
- goto exit;
+ if (rc == -ENODATA && rekeying)
+ goto rekeying;
+ else if (rc)
+ return rc;
- rc = tipc_aead_key_validate(ukey);
+ rc = tipc_aead_key_validate(ukey, info);
if (rc)
- goto exit;
+ return rc;
rc = tipc_nl_retrieve_nodeid(attrs, &id);
switch (rc) {
case -ENODATA:
- /* Cluster key mode */
- rc = tipc_crypto_key_init(tn->crypto_tx, ukey, CLUSTER_KEY);
+ mode = CLUSTER_KEY;
+ master_key = !!(attrs[TIPC_NLA_NODE_KEY_MASTER]);
break;
case 0:
- /* Per-node key mode */
- if (!memcmp(id, own_id, NODE_ID_LEN)) {
- c = tn->crypto_tx;
- } else {
+ mode = PER_NODE_KEY;
+ if (memcmp(id, own_id, NODE_ID_LEN)) {
n = tipc_node_find_by_id(net, id) ?:
tipc_node_create(net, 0, id, 0xffffu, 0, true);
- if (unlikely(!n)) {
- rc = -ENOMEM;
- break;
- }
+ if (unlikely(!n))
+ return -ENOMEM;
c = n->crypto_rx;
}
-
- rc = tipc_crypto_key_init(c, ukey, PER_NODE_KEY);
- if (n)
- tipc_node_put(n);
break;
default:
- break;
+ return rc;
}
-exit:
- return (rc < 0) ? rc : 0;
+ /* Initiate the TX/RX key */
+ rc = tipc_crypto_key_init(c, ukey, mode, master_key);
+ if (n)
+ tipc_node_put(n);
+
+ if (unlikely(rc < 0)) {
+ GENL_SET_ERR_MSG(info, "unable to initiate or attach new key");
+ return rc;
+ } else if (c == tx) {
+ /* Distribute TX key but not master one */
+ if (!master_key && tipc_crypto_key_distr(tx, rc, NULL))
+ GENL_SET_ERR_MSG(info, "failed to replicate new key");
+rekeying:
+ /* Schedule TX rekeying if needed */
+ tipc_crypto_rekeying_sched(tx, rekeying, intv);
+ }
+
+ return 0;
}
int tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
@@ -2958,7 +2995,6 @@ static int __tipc_nl_node_flush_key(struct sk_buff *skb,
tipc_crypto_key_flush(n->crypto_rx);
rcu_read_unlock();
- pr_info("All keys are flushed!\n");
return 0;
}
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 9f6f13f1604f..154a5bbb0d29 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -79,12 +79,14 @@ bool tipc_node_get_id(struct net *net, u32 addr, u8 *id);
u32 tipc_node_get_addr(struct tipc_node *node);
char *tipc_node_get_id_str(struct tipc_node *node);
void tipc_node_put(struct tipc_node *node);
+void tipc_node_get(struct tipc_node *node);
struct tipc_node *tipc_node_create(struct net *net, u32 addr, u8 *peer_id,
u16 capabilities, u32 hash_mixes,
bool preliminary);
#ifdef CONFIG_TIPC_CRYPTO
struct tipc_crypto *tipc_node_crypto_rx(struct tipc_node *__n);
struct tipc_crypto *tipc_node_crypto_rx_by_list(struct list_head *pos);
+struct tipc_crypto *tipc_node_crypto_rx_by_addr(struct net *net, u32 addr);
#endif
u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr);
void tipc_node_check_dest(struct net *net, u32 onode, u8 *peer_id128,
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 11b27ddc75ba..e795a8a2955b 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -52,10 +52,9 @@
#define NAGLE_START_MAX 1024
#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
#define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */
-#define TIPC_FWD_MSG 1
#define TIPC_MAX_PORT 0xffffffff
#define TIPC_MIN_PORT 1
-#define TIPC_ACK_RATE 4 /* ACK at 1/4 of of rcv window size */
+#define TIPC_ACK_RATE 4 /* ACK at 1/4 of rcv window size */
enum {
TIPC_LISTEN = TCP_LISTEN,
diff --git a/net/tipc/sysctl.c b/net/tipc/sysctl.c
index 97a6264a2993..9fb65c988f7f 100644
--- a/net/tipc/sysctl.c
+++ b/net/tipc/sysctl.c
@@ -74,6 +74,15 @@ static struct ctl_table tipc_table[] = {
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ONE,
},
+ {
+ .procname = "key_exchange_enabled",
+ .data = &sysctl_tipc_key_exchange_enabled,
+ .maxlen = sizeof(sysctl_tipc_key_exchange_enabled),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
#endif
{
.procname = "bc_retruni",
diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
index 1489cfb941d8..5f6f86051c83 100644
--- a/net/tipc/topsrv.c
+++ b/net/tipc/topsrv.c
@@ -48,7 +48,6 @@
#define MAX_SEND_MSG_COUNT 25
#define MAX_RECV_MSG_COUNT 25
#define CF_CONNECTED 1
-#define CF_SERVER 2
#define TIPC_SERVER_NAME_LEN 32
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index 911d13cd2e67..1d17f4470ee2 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -52,6 +52,7 @@
#include "bearer.h"
#include "netlink.h"
#include "msg.h"
+#include "udp_media.h"
/* IANA assigned UDP port */
#define UDP_PORT_DEFAULT 6118
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index b74e2741f74f..cec86229a6a0 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -418,14 +418,14 @@ static int tls_push_data(struct sock *sk,
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_prot_info *prot = &tls_ctx->prot_info;
struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
- int more = flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE);
struct tls_record_info *record = ctx->open_record;
int tls_push_record_flags;
struct page_frag *pfrag;
size_t orig_size = size;
u32 max_open_record_len;
- int copy, rc = 0;
+ bool more = false;
bool done = false;
+ int copy, rc = 0;
long timeo;
if (flags &
@@ -492,9 +492,8 @@ handle_error:
if (!size) {
last_record:
tls_push_record_flags = flags;
- if (more) {
- tls_ctx->pending_open_record_frags =
- !!record->num_frags;
+ if (flags & (MSG_SENDPAGE_NOTLAST | MSG_MORE)) {
+ more = true;
break;
}
@@ -526,6 +525,8 @@ last_record:
}
} while (!done);
+ tls_ctx->pending_open_record_frags = more;
+
if (orig_size - size > 0)
rc = orig_size - size;
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index bbc52b088d29..8d93cea99f2c 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -330,12 +330,13 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
tls_ctx_free(sk, ctx);
}
-static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
- int __user *optlen)
+static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
+ int __user *optlen, int tx)
{
int rc = 0;
struct tls_context *ctx = tls_get_ctx(sk);
struct tls_crypto_info *crypto_info;
+ struct cipher_context *cctx;
int len;
if (get_user(len, optlen))
@@ -352,7 +353,13 @@ static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
}
/* get user crypto info */
- crypto_info = &ctx->crypto_send.info;
+ if (tx) {
+ crypto_info = &ctx->crypto_send.info;
+ cctx = &ctx->tx;
+ } else {
+ crypto_info = &ctx->crypto_recv.info;
+ cctx = &ctx->rx;
+ }
if (!TLS_CRYPTO_INFO_READY(crypto_info)) {
rc = -EBUSY;
@@ -379,9 +386,9 @@ static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
}
lock_sock(sk);
memcpy(crypto_info_aes_gcm_128->iv,
- ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
+ cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
TLS_CIPHER_AES_GCM_128_IV_SIZE);
- memcpy(crypto_info_aes_gcm_128->rec_seq, ctx->tx.rec_seq,
+ memcpy(crypto_info_aes_gcm_128->rec_seq, cctx->rec_seq,
TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
release_sock(sk);
if (copy_to_user(optval,
@@ -403,9 +410,9 @@ static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
}
lock_sock(sk);
memcpy(crypto_info_aes_gcm_256->iv,
- ctx->tx.iv + TLS_CIPHER_AES_GCM_256_SALT_SIZE,
+ cctx->iv + TLS_CIPHER_AES_GCM_256_SALT_SIZE,
TLS_CIPHER_AES_GCM_256_IV_SIZE);
- memcpy(crypto_info_aes_gcm_256->rec_seq, ctx->tx.rec_seq,
+ memcpy(crypto_info_aes_gcm_256->rec_seq, cctx->rec_seq,
TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE);
release_sock(sk);
if (copy_to_user(optval,
@@ -429,7 +436,9 @@ static int do_tls_getsockopt(struct sock *sk, int optname,
switch (optname) {
case TLS_TX:
- rc = do_tls_getsockopt_tx(sk, optval, optlen);
+ case TLS_RX:
+ rc = do_tls_getsockopt_conf(sk, optval, optlen,
+ optname == TLS_TX);
break;
default:
rc = -ENOPROTOOPT;
@@ -860,7 +869,7 @@ static int __init tls_register(void)
tls_sw_proto_ops = inet_stream_ops;
tls_sw_proto_ops.splice_read = tls_sw_splice_read;
- tls_sw_proto_ops.sendpage_locked = tls_sw_sendpage_locked,
+ tls_sw_proto_ops.sendpage_locked = tls_sw_sendpage_locked;
tls_device_init();
tcp_register_ulp(&tcp_tls_ulp_ops);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 92784e51ee7d..41c3303c3357 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -613,7 +613,6 @@ static int unix_listen(struct socket *sock, int backlog)
int err;
struct sock *sk = sock->sk;
struct unix_sock *u = unix_sk(sk);
- struct pid *old_pid = NULL;
err = -EOPNOTSUPP;
if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
@@ -634,7 +633,6 @@ static int unix_listen(struct socket *sock, int backlog)
out_unlock:
unix_state_unlock(sk);
- put_pid(old_pid);
out:
return err;
}
@@ -878,7 +876,6 @@ static int unix_autobind(struct socket *sock)
if (err)
return err;
- err = 0;
if (u->addr)
goto out;
diff --git a/net/wimax/stack.c b/net/wimax/stack.c
index 4b9b1c5e8f3a..b6dd9d956ed8 100644
--- a/net/wimax/stack.c
+++ b/net/wimax/stack.c
@@ -401,7 +401,7 @@ static const struct nla_policy wimax_gnl_policy[WIMAX_GNL_ATTR_MAX + 1] = {
},
};
-static const struct genl_ops wimax_gnl_ops[] = {
+static const struct genl_small_ops wimax_gnl_ops[] = {
{
.cmd = WIMAX_GNL_OP_MSG_FROM_USER,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
@@ -560,8 +560,8 @@ struct genl_family wimax_gnl_family __ro_after_init = {
.maxattr = WIMAX_GNL_ATTR_MAX,
.policy = wimax_gnl_policy,
.module = THIS_MODULE,
- .ops = wimax_gnl_ops,
- .n_ops = ARRAY_SIZE(wimax_gnl_ops),
+ .small_ops = wimax_gnl_ops,
+ .n_small_ops = ARRAY_SIZE(wimax_gnl_ops),
.mcgrps = wimax_gnl_mcgrps,
.n_mcgrps = ARRAY_SIZE(wimax_gnl_mcgrps),
};
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 6a6f2f214c10..22d1779ab2b1 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -141,9 +141,62 @@ static bool cfg80211_edmg_chandef_valid(const struct cfg80211_chan_def *chandef)
return true;
}
+static int nl80211_chan_width_to_mhz(enum nl80211_chan_width chan_width)
+{
+ int mhz;
+
+ switch (chan_width) {
+ case NL80211_CHAN_WIDTH_1:
+ mhz = 1;
+ break;
+ case NL80211_CHAN_WIDTH_2:
+ mhz = 2;
+ break;
+ case NL80211_CHAN_WIDTH_4:
+ mhz = 4;
+ break;
+ case NL80211_CHAN_WIDTH_8:
+ mhz = 8;
+ break;
+ case NL80211_CHAN_WIDTH_16:
+ mhz = 16;
+ break;
+ case NL80211_CHAN_WIDTH_5:
+ mhz = 5;
+ break;
+ case NL80211_CHAN_WIDTH_10:
+ mhz = 10;
+ break;
+ case NL80211_CHAN_WIDTH_20:
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ mhz = 20;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ mhz = 40;
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_80:
+ mhz = 80;
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ mhz = 160;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -1;
+ }
+ return mhz;
+}
+
+static int cfg80211_chandef_get_width(const struct cfg80211_chan_def *c)
+{
+ return nl80211_chan_width_to_mhz(c->width);
+}
+
bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef)
{
- u32 control_freq;
+ u32 control_freq, oper_freq;
+ int oper_width, control_width;
if (!chandef->chan)
return false;
@@ -154,11 +207,6 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef)
control_freq = chandef->chan->center_freq;
switch (chandef->width) {
- case NL80211_CHAN_WIDTH_1:
- case NL80211_CHAN_WIDTH_2:
- case NL80211_CHAN_WIDTH_4:
- case NL80211_CHAN_WIDTH_8:
- case NL80211_CHAN_WIDTH_16:
case NL80211_CHAN_WIDTH_5:
case NL80211_CHAN_WIDTH_10:
case NL80211_CHAN_WIDTH_20:
@@ -169,6 +217,34 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef)
if (chandef->center_freq2)
return false;
break;
+ case NL80211_CHAN_WIDTH_1:
+ case NL80211_CHAN_WIDTH_2:
+ case NL80211_CHAN_WIDTH_4:
+ case NL80211_CHAN_WIDTH_8:
+ case NL80211_CHAN_WIDTH_16:
+ if (chandef->chan->band != NL80211_BAND_S1GHZ)
+ return false;
+
+ control_freq = ieee80211_channel_to_khz(chandef->chan);
+ oper_freq = ieee80211_chandef_to_khz(chandef);
+ control_width = nl80211_chan_width_to_mhz(
+ ieee80211_s1g_channel_width(
+ chandef->chan));
+ oper_width = cfg80211_chandef_get_width(chandef);
+
+ if (oper_width < 0 || control_width < 0)
+ return false;
+ if (chandef->center_freq2)
+ return false;
+
+ if (control_freq + MHZ_TO_KHZ(control_width) / 2 >
+ oper_freq + MHZ_TO_KHZ(oper_width) / 2)
+ return false;
+
+ if (control_freq - MHZ_TO_KHZ(control_width) / 2 <
+ oper_freq - MHZ_TO_KHZ(oper_width) / 2)
+ return false;
+ break;
case NL80211_CHAN_WIDTH_40:
if (chandef->center_freq1 != control_freq + 10 &&
chandef->center_freq1 != control_freq - 10)
@@ -264,53 +340,6 @@ static void chandef_primary_freqs(const struct cfg80211_chan_def *c,
}
}
-static int cfg80211_chandef_get_width(const struct cfg80211_chan_def *c)
-{
- int width;
-
- switch (c->width) {
- case NL80211_CHAN_WIDTH_1:
- width = 1;
- break;
- case NL80211_CHAN_WIDTH_2:
- width = 2;
- break;
- case NL80211_CHAN_WIDTH_4:
- width = 4;
- break;
- case NL80211_CHAN_WIDTH_8:
- width = 8;
- break;
- case NL80211_CHAN_WIDTH_16:
- width = 16;
- break;
- case NL80211_CHAN_WIDTH_5:
- width = 5;
- break;
- case NL80211_CHAN_WIDTH_10:
- width = 10;
- break;
- case NL80211_CHAN_WIDTH_20:
- case NL80211_CHAN_WIDTH_20_NOHT:
- width = 20;
- break;
- case NL80211_CHAN_WIDTH_40:
- width = 40;
- break;
- case NL80211_CHAN_WIDTH_80P80:
- case NL80211_CHAN_WIDTH_80:
- width = 80;
- break;
- case NL80211_CHAN_WIDTH_160:
- width = 160;
- break;
- default:
- WARN_ON_ONCE(1);
- return -1;
- }
- return width;
-}
-
const struct cfg80211_chan_def *
cfg80211_chandef_compatible(const struct cfg80211_chan_def *c1,
const struct cfg80211_chan_def *c2)
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 354b0ccbdc24..9f23923e8d29 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -236,7 +236,9 @@ void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
rdev->opencount--;
if (rdev->scan_req && rdev->scan_req->wdev == wdev) {
- if (WARN_ON(!rdev->scan_req->notified))
+ if (WARN_ON(!rdev->scan_req->notified &&
+ (!rdev->int_scan_req ||
+ !rdev->int_scan_req->notified)))
rdev->scan_req->info.aborted = true;
___cfg80211_scan_done(rdev, false);
}
@@ -1336,7 +1338,9 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
case NETDEV_DOWN:
cfg80211_update_iface_num(rdev, wdev->iftype, -1);
if (rdev->scan_req && rdev->scan_req->wdev == wdev) {
- if (WARN_ON(!rdev->scan_req->notified))
+ if (WARN_ON(!rdev->scan_req->notified &&
+ (!rdev->int_scan_req ||
+ !rdev->int_scan_req->notified)))
rdev->scan_req->info.aborted = true;
___cfg80211_scan_done(rdev, false);
}
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 67b0389fca4d..e1ec9ac8e608 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -3,7 +3,7 @@
* Wireless configuration interface internals.
*
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
- * Copyright (C) 2018-2019 Intel Corporation
+ * Copyright (C) 2018-2020 Intel Corporation
*/
#ifndef __NET_WIRELESS_CORE_H
#define __NET_WIRELESS_CORE_H
@@ -72,6 +72,7 @@ struct cfg80211_registered_device {
u32 bss_generation;
u32 bss_entries;
struct cfg80211_scan_request *scan_req; /* protected by RTNL */
+ struct cfg80211_scan_request *int_scan_req;
struct sk_buff *scan_msg;
struct list_head sched_scan_req_list;
time64_t suspend_at;
@@ -457,6 +458,8 @@ void cfg80211_process_wdev_events(struct wireless_dev *wdev);
bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range,
u32 center_freq_khz, u32 bw_khz);
+int cfg80211_scan(struct cfg80211_registered_device *rdev);
+
extern struct work_struct cfg80211_disconnect_work;
/**
@@ -466,8 +469,8 @@ extern struct work_struct cfg80211_disconnect_work;
*
* Checks if chandef is usable and we can/need start CAC on such channel.
*
- * Return: Return true if all channels available and at least
- * one channel require CAC (NL80211_DFS_USABLE)
+ * Return: true if all channels available and at least
+ * one channel requires CAC (NL80211_DFS_USABLE)
*/
bool cfg80211_chandef_dfs_usable(struct wiphy *wiphy,
const struct cfg80211_chan_def *chandef);
diff --git a/net/wireless/lib80211.c b/net/wireless/lib80211.c
index cc7b9fd5c166..d66a913027e0 100644
--- a/net/wireless/lib80211.c
+++ b/net/wireless/lib80211.c
@@ -26,8 +26,6 @@
#include <net/lib80211.h>
-#define DRV_NAME "lib80211"
-
#define DRV_DESCRIPTION "common routines for IEEE802.11 drivers"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index db7333e20dd7..0ac820780437 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -30,6 +30,15 @@ void cfg80211_rx_assoc_resp(struct net_device *dev, struct cfg80211_bss *bss,
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buf;
struct cfg80211_connect_resp_params cr;
+ const u8 *resp_ie = mgmt->u.assoc_resp.variable;
+ size_t resp_ie_len = len - offsetof(struct ieee80211_mgmt,
+ u.assoc_resp.variable);
+
+ if (bss->channel->band == NL80211_BAND_S1GHZ) {
+ resp_ie = (u8 *)&mgmt->u.s1g_assoc_resp.variable;
+ resp_ie_len = len - offsetof(struct ieee80211_mgmt,
+ u.s1g_assoc_resp.variable);
+ }
memset(&cr, 0, sizeof(cr));
cr.status = (int)le16_to_cpu(mgmt->u.assoc_resp.status_code);
@@ -37,9 +46,8 @@ void cfg80211_rx_assoc_resp(struct net_device *dev, struct cfg80211_bss *bss,
cr.bss = bss;
cr.req_ie = req_ies;
cr.req_ie_len = req_ies_len;
- cr.resp_ie = mgmt->u.assoc_resp.variable;
- cr.resp_ie_len =
- len - offsetof(struct ieee80211_mgmt, u.assoc_resp.variable);
+ cr.resp_ie = resp_ie;
+ cr.resp_ie_len = resp_ie_len;
cr.timeout_reason = NL80211_TIMEOUT_UNSPECIFIED;
trace_cfg80211_send_rx_assoc(dev, bss);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 7fd45f6ddb05..554796a6c6fe 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -209,14 +209,23 @@ static int validate_beacon_head(const struct nlattr *attr,
unsigned int len = nla_len(attr);
const struct element *elem;
const struct ieee80211_mgmt *mgmt = (void *)data;
- unsigned int fixedlen = offsetof(struct ieee80211_mgmt,
- u.beacon.variable);
+ bool s1g_bcn = ieee80211_is_s1g_beacon(mgmt->frame_control);
+ unsigned int fixedlen, hdrlen;
+
+ if (s1g_bcn) {
+ fixedlen = offsetof(struct ieee80211_ext,
+ u.s1g_beacon.variable);
+ hdrlen = offsetof(struct ieee80211_ext, u.s1g_beacon);
+ } else {
+ fixedlen = offsetof(struct ieee80211_mgmt,
+ u.beacon.variable);
+ hdrlen = offsetof(struct ieee80211_mgmt, u.beacon);
+ }
if (len < fixedlen)
goto err;
- if (ieee80211_hdrlen(mgmt->frame_control) !=
- offsetof(struct ieee80211_mgmt, u.beacon))
+ if (ieee80211_hdrlen(mgmt->frame_control) != hdrlen)
goto err;
data += fixedlen;
@@ -320,6 +329,13 @@ he_obss_pd_policy[NL80211_HE_OBSS_PD_ATTR_MAX + 1] = {
NLA_POLICY_RANGE(NLA_U8, 1, 20),
[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET] =
NLA_POLICY_RANGE(NLA_U8, 1, 20),
+ [NL80211_HE_OBSS_PD_ATTR_NON_SRG_MAX_OFFSET] =
+ NLA_POLICY_RANGE(NLA_U8, 1, 20),
+ [NL80211_HE_OBSS_PD_ATTR_BSS_COLOR_BITMAP] =
+ NLA_POLICY_EXACT_LEN(8),
+ [NL80211_HE_OBSS_PD_ATTR_PARTIAL_BSSID_BITMAP] =
+ NLA_POLICY_EXACT_LEN(8),
+ [NL80211_HE_OBSS_PD_ATTR_SR_CTRL] = { .type = NLA_U8 },
};
static const struct nla_policy
@@ -336,6 +352,13 @@ static const struct nla_policy nl80211_txattr_policy[NL80211_TXRATE_MAX + 1] = {
.len = NL80211_MAX_SUPP_HT_RATES },
[NL80211_TXRATE_VHT] = NLA_POLICY_EXACT_LEN_WARN(sizeof(struct nl80211_txrate_vht)),
[NL80211_TXRATE_GI] = { .type = NLA_U8 },
+ [NL80211_TXRATE_HE] = NLA_POLICY_EXACT_LEN(sizeof(struct nl80211_txrate_he)),
+ [NL80211_TXRATE_HE_GI] = NLA_POLICY_RANGE(NLA_U8,
+ NL80211_RATE_INFO_HE_GI_0_8,
+ NL80211_RATE_INFO_HE_GI_3_2),
+ [NL80211_TXRATE_HE_LTF] = NLA_POLICY_RANGE(NLA_U8,
+ NL80211_RATE_INFO_HE_1XLTF,
+ NL80211_RATE_INFO_HE_4XLTF),
};
static const struct nla_policy
@@ -360,6 +383,22 @@ nl80211_tid_config_attr_policy[NL80211_TID_CONFIG_ATTR_MAX + 1] = {
NLA_POLICY_NESTED(nl80211_txattr_policy),
};
+static const struct nla_policy
+nl80211_fils_discovery_policy[NL80211_FILS_DISCOVERY_ATTR_MAX + 1] = {
+ [NL80211_FILS_DISCOVERY_ATTR_INT_MIN] = NLA_POLICY_MAX(NLA_U32, 10000),
+ [NL80211_FILS_DISCOVERY_ATTR_INT_MAX] = NLA_POLICY_MAX(NLA_U32, 10000),
+ NLA_POLICY_RANGE(NLA_BINARY,
+ NL80211_FILS_DISCOVERY_TMPL_MIN_LEN,
+ IEEE80211_MAX_DATA_LEN),
+};
+
+static const struct nla_policy
+nl80211_unsol_bcast_probe_resp_policy[NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_MAX + 1] = {
+ [NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_INT] = NLA_POLICY_MAX(NLA_U32, 20),
+ [NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_TMPL] = { .type = NLA_BINARY,
+ .len = IEEE80211_MAX_DATA_LEN }
+};
+
static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[0] = { .strict_start_type = NL80211_ATTR_HE_OBSS_PD },
[NL80211_ATTR_WIPHY] = { .type = NLA_U32 },
@@ -539,7 +578,10 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_BG_SCAN_PERIOD] = { .type = NLA_U16 },
[NL80211_ATTR_WDEV] = { .type = NLA_U64 },
[NL80211_ATTR_USER_REG_HINT_TYPE] = { .type = NLA_U32 },
- [NL80211_ATTR_AUTH_DATA] = { .type = NLA_BINARY, },
+
+ /* need to include at least Auth Transaction and Status Code */
+ [NL80211_ATTR_AUTH_DATA] = NLA_POLICY_MIN_LEN(4),
+
[NL80211_ATTR_VHT_CAPABILITY] = NLA_POLICY_EXACT_LEN_WARN(NL80211_VHT_CAPABILITY_LEN),
[NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 },
[NL80211_ATTR_P2P_CTWINDOW] = NLA_POLICY_MAX(NLA_U8, 127),
@@ -561,23 +603,30 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_IE_RIC] = { .type = NLA_BINARY,
.len = IEEE80211_MAX_DATA_LEN },
[NL80211_ATTR_CRIT_PROT_ID] = { .type = NLA_U16 },
- [NL80211_ATTR_MAX_CRIT_PROT_DURATION] = { .type = NLA_U16 },
+ [NL80211_ATTR_MAX_CRIT_PROT_DURATION] =
+ NLA_POLICY_MAX(NLA_U16, NL80211_CRIT_PROTO_MAX_DURATION),
[NL80211_ATTR_PEER_AID] =
NLA_POLICY_RANGE(NLA_U16, 1, IEEE80211_MAX_AID),
[NL80211_ATTR_CH_SWITCH_COUNT] = { .type = NLA_U32 },
[NL80211_ATTR_CH_SWITCH_BLOCK_TX] = { .type = NLA_FLAG },
[NL80211_ATTR_CSA_IES] = { .type = NLA_NESTED },
- [NL80211_ATTR_CSA_C_OFF_BEACON] = { .type = NLA_BINARY },
- [NL80211_ATTR_CSA_C_OFF_PRESP] = { .type = NLA_BINARY },
- [NL80211_ATTR_STA_SUPPORTED_CHANNELS] = { .type = NLA_BINARY },
- [NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES] = { .type = NLA_BINARY },
+ [NL80211_ATTR_CNTDWN_OFFS_BEACON] = { .type = NLA_BINARY },
+ [NL80211_ATTR_CNTDWN_OFFS_PRESP] = { .type = NLA_BINARY },
+ [NL80211_ATTR_STA_SUPPORTED_CHANNELS] = NLA_POLICY_MIN_LEN(2),
+ /*
+ * The value of the Length field of the Supported Operating
+ * Classes element is between 2 and 253.
+ */
+ [NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES] =
+ NLA_POLICY_RANGE(NLA_BINARY, 2, 253),
[NL80211_ATTR_HANDLE_DFS] = { .type = NLA_FLAG },
[NL80211_ATTR_OPMODE_NOTIF] = { .type = NLA_U8 },
[NL80211_ATTR_VENDOR_ID] = { .type = NLA_U32 },
[NL80211_ATTR_VENDOR_SUBCMD] = { .type = NLA_U32 },
[NL80211_ATTR_VENDOR_DATA] = { .type = NLA_BINARY },
- [NL80211_ATTR_QOS_MAP] = { .type = NLA_BINARY,
- .len = IEEE80211_QOS_MAP_LEN_MAX },
+ [NL80211_ATTR_QOS_MAP] = NLA_POLICY_RANGE(NLA_BINARY,
+ IEEE80211_QOS_MAP_LEN_MIN,
+ IEEE80211_QOS_MAP_LEN_MAX),
[NL80211_ATTR_MAC_HINT] = NLA_POLICY_EXACT_LEN_WARN(ETH_ALEN),
[NL80211_ATTR_WIPHY_FREQ_HINT] = { .type = NLA_U32 },
[NL80211_ATTR_TDLS_PEER_CAPABILITY] = { .type = NLA_U32 },
@@ -625,15 +674,17 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
.len = FILS_ERP_MAX_RRK_LEN },
[NL80211_ATTR_FILS_CACHE_ID] = NLA_POLICY_EXACT_LEN_WARN(2),
[NL80211_ATTR_PMK] = { .type = NLA_BINARY, .len = PMK_MAX_LEN },
+ [NL80211_ATTR_PMKR0_NAME] = NLA_POLICY_EXACT_LEN(WLAN_PMK_NAME_LEN),
[NL80211_ATTR_SCHED_SCAN_MULTI] = { .type = NLA_FLAG },
[NL80211_ATTR_EXTERNAL_AUTH_SUPPORT] = { .type = NLA_FLAG },
[NL80211_ATTR_TXQ_LIMIT] = { .type = NLA_U32 },
[NL80211_ATTR_TXQ_MEMORY_LIMIT] = { .type = NLA_U32 },
[NL80211_ATTR_TXQ_QUANTUM] = { .type = NLA_U32 },
- [NL80211_ATTR_HE_CAPABILITY] = { .type = NLA_BINARY,
- .len = NL80211_HE_MAX_CAPABILITY_LEN },
-
+ [NL80211_ATTR_HE_CAPABILITY] =
+ NLA_POLICY_RANGE(NLA_BINARY,
+ NL80211_HE_MIN_CAPABILITY_LEN,
+ NL80211_HE_MAX_CAPABILITY_LEN),
[NL80211_ATTR_FTM_RESPONDER] =
NLA_POLICY_NESTED(nl80211_ftm_responder_policy),
[NL80211_ATTR_TIMEOUT] = NLA_POLICY_MIN(NLA_U32, 1),
@@ -654,10 +705,16 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_RECEIVE_MULTICAST] = { .type = NLA_FLAG },
[NL80211_ATTR_WIPHY_FREQ_OFFSET] = NLA_POLICY_RANGE(NLA_U32, 0, 999),
[NL80211_ATTR_SCAN_FREQ_KHZ] = { .type = NLA_NESTED },
- [NL80211_ATTR_HE_6GHZ_CAPABILITY] = {
- .type = NLA_EXACT_LEN,
- .len = sizeof(struct ieee80211_he_6ghz_capa),
- },
+ [NL80211_ATTR_HE_6GHZ_CAPABILITY] =
+ NLA_POLICY_EXACT_LEN(sizeof(struct ieee80211_he_6ghz_capa)),
+ [NL80211_ATTR_FILS_DISCOVERY] =
+ NLA_POLICY_NESTED(nl80211_fils_discovery_policy),
+ [NL80211_ATTR_UNSOL_BCAST_PROBE_RESP] =
+ NLA_POLICY_NESTED(nl80211_unsol_bcast_probe_resp_policy),
+ [NL80211_ATTR_S1G_CAPABILITY] =
+ NLA_POLICY_EXACT_LEN(IEEE80211_S1G_CAPABILITY_LEN),
+ [NL80211_ATTR_S1G_CAPABILITY_MASK] =
+ NLA_POLICY_EXACT_LEN(IEEE80211_S1G_CAPABILITY_LEN),
};
/* policy for the key attributes */
@@ -703,7 +760,7 @@ nl80211_wowlan_tcp_policy[NUM_NL80211_WOWLAN_TCP] = {
[NL80211_WOWLAN_TCP_DST_MAC] = NLA_POLICY_EXACT_LEN_WARN(ETH_ALEN),
[NL80211_WOWLAN_TCP_SRC_PORT] = { .type = NLA_U16 },
[NL80211_WOWLAN_TCP_DST_PORT] = { .type = NLA_U16 },
- [NL80211_WOWLAN_TCP_DATA_PAYLOAD] = { .type = NLA_MIN_LEN, .len = 1 },
+ [NL80211_WOWLAN_TCP_DATA_PAYLOAD] = NLA_POLICY_MIN_LEN(1),
[NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ] = {
.len = sizeof(struct nl80211_wowlan_tcp_data_seq)
},
@@ -711,8 +768,8 @@ nl80211_wowlan_tcp_policy[NUM_NL80211_WOWLAN_TCP] = {
.len = sizeof(struct nl80211_wowlan_tcp_data_token)
},
[NL80211_WOWLAN_TCP_DATA_INTERVAL] = { .type = NLA_U32 },
- [NL80211_WOWLAN_TCP_WAKE_PAYLOAD] = { .type = NLA_MIN_LEN, .len = 1 },
- [NL80211_WOWLAN_TCP_WAKE_MASK] = { .type = NLA_MIN_LEN, .len = 1 },
+ [NL80211_WOWLAN_TCP_WAKE_PAYLOAD] = NLA_POLICY_MIN_LEN(1),
+ [NL80211_WOWLAN_TCP_WAKE_MASK] = NLA_POLICY_MIN_LEN(1),
};
#endif /* CONFIG_PM */
@@ -738,7 +795,7 @@ nl80211_rekey_policy[NUM_NL80211_REKEY_DATA] = {
.type = NLA_BINARY,
.len = NL80211_KCK_EXT_LEN
},
- [NL80211_REKEY_DATA_REPLAY_CTR] = NLA_POLICY_EXACT_LEN_WARN(NL80211_REPLAY_CTR_LEN),
+ [NL80211_REKEY_DATA_REPLAY_CTR] = NLA_POLICY_EXACT_LEN(NL80211_REPLAY_CTR_LEN),
[NL80211_REKEY_DATA_AKM] = { .type = NLA_U32 },
};
@@ -778,7 +835,8 @@ nl80211_bss_select_policy[NL80211_BSS_SELECT_ATTR_MAX + 1] = {
/* policy for NAN function attributes */
static const struct nla_policy
nl80211_nan_func_policy[NL80211_NAN_FUNC_ATTR_MAX + 1] = {
- [NL80211_NAN_FUNC_TYPE] = { .type = NLA_U8 },
+ [NL80211_NAN_FUNC_TYPE] =
+ NLA_POLICY_MAX(NLA_U8, NL80211_NAN_FUNC_MAX_TYPE),
[NL80211_NAN_FUNC_SERVICE_ID] = {
.len = NL80211_NAN_FUNC_SERVICE_ID_LEN },
[NL80211_NAN_FUNC_PUBLISH_TYPE] = { .type = NLA_U8 },
@@ -926,6 +984,8 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, struct wiphy *wiphy,
if (!large && chan->flags &
(IEEE80211_CHAN_NO_10MHZ | IEEE80211_CHAN_NO_20MHZ))
return 0;
+ if (!large && chan->freq_offset)
+ return 0;
if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_FREQ,
chan->center_freq))
@@ -992,6 +1052,21 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, struct wiphy *wiphy,
if ((chan->flags & IEEE80211_CHAN_NO_HE) &&
nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HE))
goto nla_put_failure;
+ if ((chan->flags & IEEE80211_CHAN_1MHZ) &&
+ nla_put_flag(msg, NL80211_FREQUENCY_ATTR_1MHZ))
+ goto nla_put_failure;
+ if ((chan->flags & IEEE80211_CHAN_2MHZ) &&
+ nla_put_flag(msg, NL80211_FREQUENCY_ATTR_2MHZ))
+ goto nla_put_failure;
+ if ((chan->flags & IEEE80211_CHAN_4MHZ) &&
+ nla_put_flag(msg, NL80211_FREQUENCY_ATTR_4MHZ))
+ goto nla_put_failure;
+ if ((chan->flags & IEEE80211_CHAN_8MHZ) &&
+ nla_put_flag(msg, NL80211_FREQUENCY_ATTR_8MHZ))
+ goto nla_put_failure;
+ if ((chan->flags & IEEE80211_CHAN_16MHZ) &&
+ nla_put_flag(msg, NL80211_FREQUENCY_ATTR_16MHZ))
+ goto nla_put_failure;
}
if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
@@ -1603,7 +1678,8 @@ nl80211_send_iftype_data(struct sk_buff *msg,
}
static int nl80211_send_band_rateinfo(struct sk_buff *msg,
- struct ieee80211_supported_band *sband)
+ struct ieee80211_supported_band *sband,
+ bool large)
{
struct nlattr *nl_rates, *nl_rate;
struct ieee80211_rate *rate;
@@ -1631,7 +1707,7 @@ static int nl80211_send_band_rateinfo(struct sk_buff *msg,
sband->vht_cap.cap)))
return -ENOBUFS;
- if (sband->n_iftype_data) {
+ if (large && sband->n_iftype_data) {
struct nlattr *nl_iftype_data =
nla_nest_start_noflag(msg,
NL80211_BAND_ATTR_IFTYPE_DATA);
@@ -1659,7 +1735,7 @@ static int nl80211_send_band_rateinfo(struct sk_buff *msg,
}
/* add EDMG info */
- if (sband->edmg_cap.channels &&
+ if (large && sband->edmg_cap.channels &&
(nla_put_u8(msg, NL80211_BAND_ATTR_EDMG_CHANNELS,
sband->edmg_cap.channels) ||
nla_put_u8(msg, NL80211_BAND_ATTR_EDMG_BW_CONFIG,
@@ -2077,13 +2153,7 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
nla_put_u16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN,
rdev->wiphy.max_sched_scan_ie_len) ||
nla_put_u8(msg, NL80211_ATTR_MAX_MATCH_SETS,
- rdev->wiphy.max_match_sets) ||
- nla_put_u32(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_PLANS,
- rdev->wiphy.max_sched_scan_plans) ||
- nla_put_u32(msg, NL80211_ATTR_MAX_SCAN_PLAN_INTERVAL,
- rdev->wiphy.max_sched_scan_plan_interval) ||
- nla_put_u32(msg, NL80211_ATTR_MAX_SCAN_PLAN_ITERATIONS,
- rdev->wiphy.max_sched_scan_plan_iterations))
+ rdev->wiphy.max_match_sets))
goto nla_put_failure;
if ((rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN) &&
@@ -2173,6 +2243,10 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
band < NUM_NL80211_BANDS; band++) {
struct ieee80211_supported_band *sband;
+ /* omit higher bands for ancient software */
+ if (band > NL80211_BAND_5GHZ && !state->split)
+ break;
+
sband = rdev->wiphy.bands[band];
if (!sband)
@@ -2184,7 +2258,8 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
switch (state->chan_start) {
case 0:
- if (nl80211_send_band_rateinfo(msg, sband))
+ if (nl80211_send_band_rateinfo(msg, sband,
+ state->split))
goto nla_put_failure;
state->chan_start++;
if (state->split)
@@ -2286,8 +2361,6 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
nla_put_flag(msg, NL80211_ATTR_OFFCHANNEL_TX_OK))
goto nla_put_failure;
- if (nl80211_send_mgmt_stypes(msg, mgmt_stypes))
- goto nla_put_failure;
state->split_start++;
if (state->split)
break;
@@ -2355,9 +2428,23 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
* case we'll continue with more data in the next round,
* but break unconditionally so unsplit data stops here.
*/
- state->split_start++;
+ if (state->split)
+ state->split_start++;
+ else
+ state->split_start = 0;
break;
case 9:
+ if (nl80211_send_mgmt_stypes(msg, mgmt_stypes))
+ goto nla_put_failure;
+
+ if (nla_put_u32(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_PLANS,
+ rdev->wiphy.max_sched_scan_plans) ||
+ nla_put_u32(msg, NL80211_ATTR_MAX_SCAN_PLAN_INTERVAL,
+ rdev->wiphy.max_sched_scan_plan_interval) ||
+ nla_put_u32(msg, NL80211_ATTR_MAX_SCAN_PLAN_ITERATIONS,
+ rdev->wiphy.max_sched_scan_plan_iterations))
+ goto nla_put_failure;
+
if (rdev->wiphy.extended_capabilities &&
(nla_put(msg, NL80211_ATTR_EXT_CAPA,
rdev->wiphy.extended_capabilities_len,
@@ -4422,21 +4509,106 @@ static bool vht_set_mcs_mask(struct ieee80211_supported_band *sband,
return true;
}
+static u16 he_mcs_map_to_mcs_mask(u8 he_mcs_map)
+{
+ switch (he_mcs_map) {
+ case IEEE80211_HE_MCS_NOT_SUPPORTED:
+ return 0;
+ case IEEE80211_HE_MCS_SUPPORT_0_7:
+ return 0x00FF;
+ case IEEE80211_HE_MCS_SUPPORT_0_9:
+ return 0x03FF;
+ case IEEE80211_HE_MCS_SUPPORT_0_11:
+ return 0xFFF;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static void he_build_mcs_mask(u16 he_mcs_map,
+ u16 he_mcs_mask[NL80211_HE_NSS_MAX])
+{
+ u8 nss;
+
+ for (nss = 0; nss < NL80211_HE_NSS_MAX; nss++) {
+ he_mcs_mask[nss] = he_mcs_map_to_mcs_mask(he_mcs_map & 0x03);
+ he_mcs_map >>= 2;
+ }
+}
+
+static u16 he_get_txmcsmap(struct genl_info *info,
+ const struct ieee80211_sta_he_cap *he_cap)
+{
+ struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ __le16 tx_mcs;
+
+ switch (wdev->chandef.width) {
+ case NL80211_CHAN_WIDTH_80P80:
+ tx_mcs = he_cap->he_mcs_nss_supp.tx_mcs_80p80;
+ break;
+ case NL80211_CHAN_WIDTH_160:
+ tx_mcs = he_cap->he_mcs_nss_supp.tx_mcs_160;
+ break;
+ default:
+ tx_mcs = he_cap->he_mcs_nss_supp.tx_mcs_80;
+ break;
+ }
+ return le16_to_cpu(tx_mcs);
+}
+
+static bool he_set_mcs_mask(struct genl_info *info,
+ struct wireless_dev *wdev,
+ struct ieee80211_supported_band *sband,
+ struct nl80211_txrate_he *txrate,
+ u16 mcs[NL80211_HE_NSS_MAX])
+{
+ const struct ieee80211_sta_he_cap *he_cap;
+ u16 tx_mcs_mask[NL80211_HE_NSS_MAX] = {};
+ u16 tx_mcs_map = 0;
+ u8 i;
+
+ he_cap = ieee80211_get_he_iftype_cap(sband, wdev->iftype);
+ if (!he_cap)
+ return false;
+
+ memset(mcs, 0, sizeof(u16) * NL80211_HE_NSS_MAX);
+
+ tx_mcs_map = he_get_txmcsmap(info, he_cap);
+
+ /* Build he_mcs_mask from HE capabilities */
+ he_build_mcs_mask(tx_mcs_map, tx_mcs_mask);
+
+ for (i = 0; i < NL80211_HE_NSS_MAX; i++) {
+ if ((tx_mcs_mask[i] & txrate->mcs[i]) == txrate->mcs[i])
+ mcs[i] = txrate->mcs[i];
+ else
+ return false;
+ }
+
+ return true;
+}
+
static int nl80211_parse_tx_bitrate_mask(struct genl_info *info,
struct nlattr *attrs[],
enum nl80211_attrs attr,
- struct cfg80211_bitrate_mask *mask)
+ struct cfg80211_bitrate_mask *mask,
+ struct net_device *dev)
{
struct nlattr *tb[NL80211_TXRATE_MAX + 1];
struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
int rem, i;
struct nlattr *tx_rates;
struct ieee80211_supported_band *sband;
- u16 vht_tx_mcs_map;
+ u16 vht_tx_mcs_map, he_tx_mcs_map;
memset(mask, 0, sizeof(*mask));
/* Default to all rates enabled */
for (i = 0; i < NUM_NL80211_BANDS; i++) {
+ const struct ieee80211_sta_he_cap *he_cap;
+
sband = rdev->wiphy.bands[i];
if (!sband)
@@ -4452,6 +4624,16 @@ static int nl80211_parse_tx_bitrate_mask(struct genl_info *info,
vht_tx_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
vht_build_mcs_mask(vht_tx_mcs_map, mask->control[i].vht_mcs);
+
+ he_cap = ieee80211_get_he_iftype_cap(sband, wdev->iftype);
+ if (!he_cap)
+ continue;
+
+ he_tx_mcs_map = he_get_txmcsmap(info, he_cap);
+ he_build_mcs_mask(he_tx_mcs_map, mask->control[i].he_mcs);
+
+ mask->control[i].he_gi = 0xFF;
+ mask->control[i].he_ltf = 0xFF;
}
/* if no rates are given set it back to the defaults */
@@ -4507,13 +4689,25 @@ static int nl80211_parse_tx_bitrate_mask(struct genl_info *info,
if (mask->control[band].gi > NL80211_TXRATE_FORCE_LGI)
return -EINVAL;
}
+ if (tb[NL80211_TXRATE_HE] &&
+ !he_set_mcs_mask(info, wdev, sband,
+ nla_data(tb[NL80211_TXRATE_HE]),
+ mask->control[band].he_mcs))
+ return -EINVAL;
+ if (tb[NL80211_TXRATE_HE_GI])
+ mask->control[band].he_gi =
+ nla_get_u8(tb[NL80211_TXRATE_HE_GI]);
+ if (tb[NL80211_TXRATE_HE_LTF])
+ mask->control[band].he_ltf =
+ nla_get_u8(tb[NL80211_TXRATE_HE_LTF]);
if (mask->control[band].legacy == 0) {
- /* don't allow empty legacy rates if HT or VHT
+ /* don't allow empty legacy rates if HT, VHT or HE
* are not even supported.
*/
if (!(rdev->wiphy.bands[band]->ht_cap.ht_supported ||
- rdev->wiphy.bands[band]->vht_cap.vht_supported))
+ rdev->wiphy.bands[band]->vht_cap.vht_supported ||
+ ieee80211_get_he_iftype_cap(sband, wdev->iftype)))
return -EINVAL;
for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
@@ -4524,6 +4718,10 @@ static int nl80211_parse_tx_bitrate_mask(struct genl_info *info,
if (mask->control[band].vht_mcs[i])
goto out;
+ for (i = 0; i < NL80211_HE_NSS_MAX; i++)
+ if (mask->control[band].he_mcs[i])
+ goto out;
+
/* legacy and mcs rates may not be both empty */
return -EINVAL;
}
@@ -4683,18 +4881,34 @@ static int nl80211_parse_he_obss_pd(struct nlattr *attrs,
if (err)
return err;
- if (!tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET] ||
- !tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET])
+ if (!tb[NL80211_HE_OBSS_PD_ATTR_SR_CTRL])
return -EINVAL;
- he_obss_pd->min_offset =
- nla_get_u32(tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET]);
- he_obss_pd->max_offset =
- nla_get_u32(tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET]);
+ he_obss_pd->sr_ctrl = nla_get_u8(tb[NL80211_HE_OBSS_PD_ATTR_SR_CTRL]);
+
+ if (tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET])
+ he_obss_pd->min_offset =
+ nla_get_u8(tb[NL80211_HE_OBSS_PD_ATTR_MIN_OFFSET]);
+ if (tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET])
+ he_obss_pd->max_offset =
+ nla_get_u8(tb[NL80211_HE_OBSS_PD_ATTR_MAX_OFFSET]);
+ if (tb[NL80211_HE_OBSS_PD_ATTR_NON_SRG_MAX_OFFSET])
+ he_obss_pd->non_srg_max_offset =
+ nla_get_u8(tb[NL80211_HE_OBSS_PD_ATTR_NON_SRG_MAX_OFFSET]);
- if (he_obss_pd->min_offset >= he_obss_pd->max_offset)
+ if (he_obss_pd->min_offset > he_obss_pd->max_offset)
return -EINVAL;
+ if (tb[NL80211_HE_OBSS_PD_ATTR_BSS_COLOR_BITMAP])
+ memcpy(he_obss_pd->bss_color_bitmap,
+ nla_data(tb[NL80211_HE_OBSS_PD_ATTR_BSS_COLOR_BITMAP]),
+ sizeof(he_obss_pd->bss_color_bitmap));
+
+ if (tb[NL80211_HE_OBSS_PD_ATTR_PARTIAL_BSSID_BITMAP])
+ memcpy(he_obss_pd->partial_bssid_bitmap,
+ nla_data(tb[NL80211_HE_OBSS_PD_ATTR_PARTIAL_BSSID_BITMAP]),
+ sizeof(he_obss_pd->partial_bssid_bitmap));
+
he_obss_pd->enable = true;
return 0;
@@ -4724,6 +4938,65 @@ static int nl80211_parse_he_bss_color(struct nlattr *attrs,
return 0;
}
+static int nl80211_parse_fils_discovery(struct cfg80211_registered_device *rdev,
+ struct nlattr *attrs,
+ struct cfg80211_ap_settings *params)
+{
+ struct nlattr *tb[NL80211_FILS_DISCOVERY_ATTR_MAX + 1];
+ int ret;
+ struct cfg80211_fils_discovery *fd = &params->fils_discovery;
+
+ if (!wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_FILS_DISCOVERY))
+ return -EINVAL;
+
+ ret = nla_parse_nested(tb, NL80211_FILS_DISCOVERY_ATTR_MAX, attrs,
+ NULL, NULL);
+ if (ret)
+ return ret;
+
+ if (!tb[NL80211_FILS_DISCOVERY_ATTR_INT_MIN] ||
+ !tb[NL80211_FILS_DISCOVERY_ATTR_INT_MAX] ||
+ !tb[NL80211_FILS_DISCOVERY_ATTR_TMPL])
+ return -EINVAL;
+
+ fd->tmpl_len = nla_len(tb[NL80211_FILS_DISCOVERY_ATTR_TMPL]);
+ fd->tmpl = nla_data(tb[NL80211_FILS_DISCOVERY_ATTR_TMPL]);
+ fd->min_interval = nla_get_u32(tb[NL80211_FILS_DISCOVERY_ATTR_INT_MIN]);
+ fd->max_interval = nla_get_u32(tb[NL80211_FILS_DISCOVERY_ATTR_INT_MAX]);
+
+ return 0;
+}
+
+static int
+nl80211_parse_unsol_bcast_probe_resp(struct cfg80211_registered_device *rdev,
+ struct nlattr *attrs,
+ struct cfg80211_ap_settings *params)
+{
+ struct nlattr *tb[NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_MAX + 1];
+ int ret;
+ struct cfg80211_unsol_bcast_probe_resp *presp =
+ &params->unsol_bcast_probe_resp;
+
+ if (!wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_UNSOL_BCAST_PROBE_RESP))
+ return -EINVAL;
+
+ ret = nla_parse_nested(tb, NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_MAX,
+ attrs, NULL, NULL);
+ if (ret)
+ return ret;
+
+ if (!tb[NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_INT] ||
+ !tb[NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_TMPL])
+ return -EINVAL;
+
+ presp->tmpl = nla_data(tb[NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_TMPL]);
+ presp->tmpl_len = nla_len(tb[NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_TMPL]);
+ presp->interval = nla_get_u32(tb[NL80211_UNSOL_BCAST_PROBE_RESP_ATTR_INT]);
+ return 0;
+}
+
static void nl80211_check_ap_rate_selectors(struct cfg80211_ap_settings *params,
const u8 *rates)
{
@@ -4834,8 +5107,9 @@ static bool nl80211_valid_auth_type(struct cfg80211_registered_device *rdev,
return false;
return true;
case NL80211_CMD_START_AP:
- /* SAE not supported yet */
- if (auth_type == NL80211_AUTHTYPE_SAE)
+ if (!wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_SAE_OFFLOAD_AP) &&
+ auth_type == NL80211_AUTHTYPE_SAE)
return false;
/* FILS not supported yet */
if (auth_type == NL80211_AUTHTYPE_FILS_SK ||
@@ -4899,8 +5173,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
params.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
params.ssid_len =
nla_len(info->attrs[NL80211_ATTR_SSID]);
- if (params.ssid_len == 0 ||
- params.ssid_len > IEEE80211_MAX_SSID_LEN)
+ if (params.ssid_len == 0)
return -EINVAL;
}
@@ -4969,7 +5242,8 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_TX_RATES]) {
err = nl80211_parse_tx_bitrate_mask(info, info->attrs,
NL80211_ATTR_TX_RATES,
- &params.beacon_rate);
+ &params.beacon_rate,
+ dev);
if (err)
return err;
@@ -5031,6 +5305,22 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
goto out;
}
+ if (info->attrs[NL80211_ATTR_FILS_DISCOVERY]) {
+ err = nl80211_parse_fils_discovery(rdev,
+ info->attrs[NL80211_ATTR_FILS_DISCOVERY],
+ &params);
+ if (err)
+ goto out;
+ }
+
+ if (info->attrs[NL80211_ATTR_UNSOL_BCAST_PROBE_RESP]) {
+ err = nl80211_parse_unsol_bcast_probe_resp(
+ rdev, info->attrs[NL80211_ATTR_UNSOL_BCAST_PROBE_RESP],
+ &params);
+ if (err)
+ return err;
+ }
+
nl80211_calculate_ap_params(&params);
if (info->attrs[NL80211_ATTR_EXTERNAL_AUTH_SUPPORT])
@@ -5840,11 +6130,9 @@ static int nl80211_parse_sta_channel_info(struct genl_info *info,
nla_len(info->attrs[NL80211_ATTR_STA_SUPPORTED_CHANNELS]);
/*
* Need to include at least one (first channel, number of
- * channels) tuple for each subband, and must have proper
- * tuples for the rest of the data as well.
+ * channels) tuple for each subband (checked in policy),
+ * and must have proper tuples for the rest of the data as well.
*/
- if (params->supported_channels_len < 2)
- return -EINVAL;
if (params->supported_channels_len % 2)
return -EINVAL;
}
@@ -5854,13 +6142,6 @@ static int nl80211_parse_sta_channel_info(struct genl_info *info,
nla_data(info->attrs[NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES]);
params->supported_oper_classes_len =
nla_len(info->attrs[NL80211_ATTR_STA_SUPPORTED_OPER_CLASSES]);
- /*
- * The value of the Length field of the Supported Operating
- * Classes element is between 2 and 253.
- */
- if (params->supported_oper_classes_len < 2 ||
- params->supported_oper_classes_len > 253)
- return -EINVAL;
}
return 0;
}
@@ -5883,9 +6164,6 @@ static int nl80211_set_station_tdls(struct genl_info *info,
nla_data(info->attrs[NL80211_ATTR_HE_CAPABILITY]);
params->he_capa_len =
nla_len(info->attrs[NL80211_ATTR_HE_CAPABILITY]);
-
- if (params->he_capa_len < NL80211_HE_MIN_CAPABILITY_LEN)
- return -EINVAL;
}
err = nl80211_parse_sta_channel_info(info, params);
@@ -6144,10 +6422,6 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
nla_data(info->attrs[NL80211_ATTR_HE_CAPABILITY]);
params.he_capa_len =
nla_len(info->attrs[NL80211_ATTR_HE_CAPABILITY]);
-
- /* max len is validated in nla policy */
- if (params.he_capa_len < NL80211_HE_MIN_CAPABILITY_LEN)
- return -EINVAL;
}
if (info->attrs[NL80211_ATTR_HE_6GHZ_CAPABILITY])
@@ -8006,7 +8280,7 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
request->scan_start = jiffies;
rdev->scan_req = request;
- err = rdev_scan(rdev, request);
+ err = cfg80211_scan(rdev);
if (err)
goto out_free;
@@ -8419,23 +8693,14 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
}
if (ssid) {
- if (nla_len(ssid) > IEEE80211_MAX_SSID_LEN) {
- err = -EINVAL;
- goto out_free;
- }
memcpy(request->match_sets[i].ssid.ssid,
nla_data(ssid), nla_len(ssid));
request->match_sets[i].ssid.ssid_len =
nla_len(ssid);
}
- if (bssid) {
- if (nla_len(bssid) != ETH_ALEN) {
- err = -EINVAL;
- goto out_free;
- }
+ if (bssid)
memcpy(request->match_sets[i].bssid,
nla_data(bssid), ETH_ALEN);
- }
/* special attribute - old implementation w/a */
request->match_sets[i].rssi_thold = default_match_rssi;
@@ -8790,10 +9055,10 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
if (err)
return err;
- if (!csa_attrs[NL80211_ATTR_CSA_C_OFF_BEACON])
+ if (!csa_attrs[NL80211_ATTR_CNTDWN_OFFS_BEACON])
return -EINVAL;
- len = nla_len(csa_attrs[NL80211_ATTR_CSA_C_OFF_BEACON]);
+ len = nla_len(csa_attrs[NL80211_ATTR_CNTDWN_OFFS_BEACON]);
if (!len || (len % sizeof(u16)))
return -EINVAL;
@@ -8804,7 +9069,7 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
params.counter_offsets_beacon =
- nla_data(csa_attrs[NL80211_ATTR_CSA_C_OFF_BEACON]);
+ nla_data(csa_attrs[NL80211_ATTR_CNTDWN_OFFS_BEACON]);
/* sanity checks - counters should fit and be the same */
for (i = 0; i < params.n_counter_offsets_beacon; i++) {
@@ -8817,8 +9082,8 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
}
- if (csa_attrs[NL80211_ATTR_CSA_C_OFF_PRESP]) {
- len = nla_len(csa_attrs[NL80211_ATTR_CSA_C_OFF_PRESP]);
+ if (csa_attrs[NL80211_ATTR_CNTDWN_OFFS_PRESP]) {
+ len = nla_len(csa_attrs[NL80211_ATTR_CNTDWN_OFFS_PRESP]);
if (!len || (len % sizeof(u16)))
return -EINVAL;
@@ -8829,7 +9094,7 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
params.counter_offsets_presp =
- nla_data(csa_attrs[NL80211_ATTR_CSA_C_OFF_PRESP]);
+ nla_data(csa_attrs[NL80211_ATTR_CNTDWN_OFFS_PRESP]);
/* sanity checks - counters should fit and be the same */
for (i = 0; i < params.n_counter_offsets_presp; i++) {
@@ -9094,6 +9359,11 @@ static int nl80211_send_survey(struct sk_buff *msg, u32 portid, u32 seq,
survey->channel->center_freq))
goto nla_put_failure;
+ if (survey->channel && survey->channel->freq_offset &&
+ nla_put_u32(msg, NL80211_SURVEY_INFO_FREQUENCY_OFFSET,
+ survey->channel->freq_offset))
+ goto nla_put_failure;
+
if ((survey->filled & SURVEY_INFO_NOISE_DBM) &&
nla_put_u8(msg, NL80211_SURVEY_INFO_NOISE, survey->noise))
goto nla_put_failure;
@@ -9312,9 +9582,6 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
auth_data = nla_data(info->attrs[NL80211_ATTR_AUTH_DATA]);
auth_data_len = nla_len(info->attrs[NL80211_ATTR_AUTH_DATA]);
- /* need to include at least Auth Transaction and Status Code */
- if (auth_data_len < 4)
- return -EINVAL;
}
local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE];
@@ -9454,7 +9721,9 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev,
if (info->attrs[NL80211_ATTR_SAE_PASSWORD]) {
if (!wiphy_ext_feature_isset(&rdev->wiphy,
- NL80211_EXT_FEATURE_SAE_OFFLOAD))
+ NL80211_EXT_FEATURE_SAE_OFFLOAD) &&
+ !wiphy_ext_feature_isset(&rdev->wiphy,
+ NL80211_EXT_FEATURE_SAE_OFFLOAD_AP))
return -EINVAL;
settings->sae_pwd =
nla_data(info->attrs[NL80211_ATTR_SAE_PASSWORD]);
@@ -9572,6 +9841,22 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
nla_data(info->attrs[NL80211_ATTR_FILS_NONCES]);
}
+ if (info->attrs[NL80211_ATTR_S1G_CAPABILITY_MASK]) {
+ if (!info->attrs[NL80211_ATTR_S1G_CAPABILITY])
+ return -EINVAL;
+ memcpy(&req.s1g_capa_mask,
+ nla_data(info->attrs[NL80211_ATTR_S1G_CAPABILITY_MASK]),
+ sizeof(req.s1g_capa_mask));
+ }
+
+ if (info->attrs[NL80211_ATTR_S1G_CAPABILITY]) {
+ if (!info->attrs[NL80211_ATTR_S1G_CAPABILITY_MASK])
+ return -EINVAL;
+ memcpy(&req.s1g_capa,
+ nla_data(info->attrs[NL80211_ATTR_S1G_CAPABILITY]),
+ sizeof(req.s1g_capa));
+ }
+
err = nl80211_crypto_settings(rdev, info, &req.crypto, 1);
if (!err) {
wdev_lock(dev->ieee80211_ptr);
@@ -10801,7 +11086,8 @@ static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
return -EOPNOTSUPP;
err = nl80211_parse_tx_bitrate_mask(info, info->attrs,
- NL80211_ATTR_TX_RATES, &mask);
+ NL80211_ATTR_TX_RATES, &mask,
+ dev);
if (err)
return err;
@@ -11409,7 +11695,8 @@ static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_TX_RATES]) {
err = nl80211_parse_tx_bitrate_mask(info, info->attrs,
NL80211_ATTR_TX_RATES,
- &setup.beacon_rate);
+ &setup.beacon_rate,
+ dev);
if (err)
return err;
@@ -12361,8 +12648,6 @@ static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info)
if (!tb[NL80211_REKEY_DATA_REPLAY_CTR] || !tb[NL80211_REKEY_DATA_KEK] ||
!tb[NL80211_REKEY_DATA_KCK])
return -EINVAL;
- if (nla_len(tb[NL80211_REKEY_DATA_REPLAY_CTR]) != NL80211_REPLAY_CTR_LEN)
- return -ERANGE;
if (nla_len(tb[NL80211_REKEY_DATA_KEK]) != NL80211_KEK_LEN &&
!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK &&
nla_len(tb[NL80211_REKEY_DATA_KEK]) == NL80211_KEK_EXT_LEN))
@@ -12687,8 +12972,7 @@ static int nl80211_nan_add_func(struct sk_buff *skb,
func->cookie = cfg80211_assign_cookie(rdev);
- if (!tb[NL80211_NAN_FUNC_TYPE] ||
- nla_get_u8(tb[NL80211_NAN_FUNC_TYPE]) > NL80211_NAN_FUNC_MAX_TYPE) {
+ if (!tb[NL80211_NAN_FUNC_TYPE]) {
err = -EINVAL;
goto out;
}
@@ -13178,9 +13462,6 @@ static int nl80211_crit_protocol_start(struct sk_buff *skb,
duration =
nla_get_u16(info->attrs[NL80211_ATTR_MAX_CRIT_PROT_DURATION]);
- if (duration > NL80211_CRIT_PROTO_MAX_DURATION)
- return -ERANGE;
-
ret = rdev_crit_proto_start(rdev, wdev, proto, duration);
if (!ret)
rdev->crit_proto_nlportid = info->snd_portid;
@@ -13565,8 +13846,7 @@ static int nl80211_set_qos_map(struct sk_buff *skb,
pos = nla_data(info->attrs[NL80211_ATTR_QOS_MAP]);
len = nla_len(info->attrs[NL80211_ATTR_QOS_MAP]);
- if (len % 2 || len < IEEE80211_QOS_MAP_LEN_MIN ||
- len > IEEE80211_QOS_MAP_LEN_MAX)
+ if (len % 2)
return -EINVAL;
qos_map = kzalloc(sizeof(struct cfg80211_qos_map), GFP_KERNEL);
@@ -13834,17 +14114,9 @@ static int nl80211_set_pmk(struct sk_buff *skb, struct genl_info *info)
goto out;
}
- if (info->attrs[NL80211_ATTR_PMKR0_NAME]) {
- int r0_name_len = nla_len(info->attrs[NL80211_ATTR_PMKR0_NAME]);
-
- if (r0_name_len != WLAN_PMK_NAME_LEN) {
- ret = -EINVAL;
- goto out;
- }
-
+ if (info->attrs[NL80211_ATTR_PMKR0_NAME])
pmk_conf.pmk_r0_name =
nla_data(info->attrs[NL80211_ATTR_PMKR0_NAME]);
- }
ret = rdev_set_pmk(rdev, dev, &pmk_conf);
out:
@@ -13903,8 +14175,7 @@ static int nl80211_external_auth(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_SSID]) {
params.ssid.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
- if (params.ssid.ssid_len == 0 ||
- params.ssid.ssid_len > IEEE80211_MAX_SSID_LEN)
+ if (params.ssid.ssid_len == 0)
return -EINVAL;
memcpy(params.ssid.ssid,
nla_data(info->attrs[NL80211_ATTR_SSID]),
@@ -14205,7 +14476,7 @@ static int parse_tid_conf(struct cfg80211_registered_device *rdev,
if (tid_conf->txrate_type != NL80211_TX_RATE_AUTOMATIC) {
attr = NL80211_TID_CONFIG_ATTR_TX_RATE;
err = nl80211_parse_tx_bitrate_mask(info, attrs, attr,
- &tid_conf->txrate_mask);
+ &tid_conf->txrate_mask, dev);
if (err)
return err;
@@ -14397,6 +14668,9 @@ static const struct genl_ops nl80211_ops[] = {
.internal_flags = NL80211_FLAG_NEED_WIPHY |
NL80211_FLAG_NEED_RTNL,
},
+};
+
+static const struct genl_small_ops nl80211_small_ops[] = {
{
.cmd = NL80211_CMD_SET_WIPHY,
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
@@ -15258,6 +15532,8 @@ static struct genl_family nl80211_fam __ro_after_init = {
.module = THIS_MODULE,
.ops = nl80211_ops,
.n_ops = ARRAY_SIZE(nl80211_ops),
+ .small_ops = nl80211_small_ops,
+ .n_small_ops = ARRAY_SIZE(nl80211_small_ops),
.mcgrps = nl80211_mcgrps,
.n_mcgrps = ARRAY_SIZE(nl80211_mcgrps),
.parallel_ops = true,
@@ -15312,6 +15588,7 @@ static int nl80211_add_scan_req(struct sk_buff *msg,
struct cfg80211_scan_request *req = rdev->scan_req;
struct nlattr *nest;
int i;
+ struct cfg80211_scan_info *info;
if (WARN_ON(!req))
return 0;
@@ -15355,11 +15632,13 @@ static int nl80211_add_scan_req(struct sk_buff *msg,
nla_put_u32(msg, NL80211_ATTR_SCAN_FLAGS, req->flags))
goto nla_put_failure;
- if (req->info.scan_start_tsf &&
+ info = rdev->int_scan_req ? &rdev->int_scan_req->info :
+ &rdev->scan_req->info;
+ if (info->scan_start_tsf &&
(nla_put_u64_64bit(msg, NL80211_ATTR_SCAN_START_TIME_TSF,
- req->info.scan_start_tsf, NL80211_BSS_PAD) ||
+ info->scan_start_tsf, NL80211_BSS_PAD) ||
nla_put(msg, NL80211_ATTR_SCAN_START_TIME_TSF_BSSID, ETH_ALEN,
- req->info.tsf_bssid)))
+ info->tsf_bssid)))
goto nla_put_failure;
return 0;
diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
index d5e28239e030..36f1b59a78bf 100644
--- a/net/wireless/radiotap.c
+++ b/net/wireless/radiotap.c
@@ -59,6 +59,7 @@ static const struct ieee80211_radiotap_namespace radiotap_ns = {
* @iterator: radiotap_iterator to initialize
* @radiotap_header: radiotap header to parse
* @max_length: total length we can parse into (eg, whole packet length)
+ * @vns: vendor namespaces to parse
*
* Returns: 0 or a negative error code if there is a problem.
*
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index d8a90d397423..3dab859641e1 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1594,7 +1594,7 @@ freq_reg_info_regd(u32 center_freq,
/*
* We only need to know if one frequency rule was
- * was in center_freq's band, that's enough, so lets
+ * in center_freq's band, that's enough, so let's
* not overwrite it once found
*/
if (!band_rule_found)
@@ -1616,10 +1616,12 @@ static const struct ieee80211_reg_rule *
__freq_reg_info(struct wiphy *wiphy, u32 center_freq, u32 min_bw)
{
const struct ieee80211_regdomain *regd = reg_get_regdomain(wiphy);
- const struct ieee80211_reg_rule *reg_rule = NULL;
+ const u32 bws[] = {0, 1, 2, 4, 5, 8, 10, 16, 20};
+ const struct ieee80211_reg_rule *reg_rule;
+ int i = ARRAY_SIZE(bws) - 1;
u32 bw;
- for (bw = MHZ_TO_KHZ(20); bw >= min_bw; bw = bw / 2) {
+ for (bw = MHZ_TO_KHZ(bws[i]); bw >= min_bw; bw = MHZ_TO_KHZ(bws[i--])) {
reg_rule = freq_reg_info_regd(center_freq, regd, bw);
if (!IS_ERR(reg_rule))
return reg_rule;
@@ -1631,7 +1633,9 @@ __freq_reg_info(struct wiphy *wiphy, u32 center_freq, u32 min_bw)
const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy,
u32 center_freq)
{
- return __freq_reg_info(wiphy, center_freq, MHZ_TO_KHZ(20));
+ u32 min_bw = center_freq < MHZ_TO_KHZ(1000) ? 1 : 20;
+
+ return __freq_reg_info(wiphy, center_freq, MHZ_TO_KHZ(min_bw));
}
EXPORT_SYMBOL(freq_reg_info);
@@ -1659,6 +1663,7 @@ static uint32_t reg_rule_to_chan_bw_flags(const struct ieee80211_regdomain *regd
{
const struct ieee80211_freq_range *freq_range = NULL;
u32 max_bandwidth_khz, center_freq_khz, bw_flags = 0;
+ bool is_s1g = chan->band == NL80211_BAND_S1GHZ;
freq_range = &reg_rule->freq_range;
@@ -1678,70 +1683,72 @@ static uint32_t reg_rule_to_chan_bw_flags(const struct ieee80211_regdomain *regd
MHZ_TO_KHZ(20)))
bw_flags |= IEEE80211_CHAN_NO_20MHZ;
- if (max_bandwidth_khz < MHZ_TO_KHZ(10))
- bw_flags |= IEEE80211_CHAN_NO_10MHZ;
- if (max_bandwidth_khz < MHZ_TO_KHZ(20))
- bw_flags |= IEEE80211_CHAN_NO_20MHZ;
- if (max_bandwidth_khz < MHZ_TO_KHZ(40))
- bw_flags |= IEEE80211_CHAN_NO_HT40;
- if (max_bandwidth_khz < MHZ_TO_KHZ(80))
- bw_flags |= IEEE80211_CHAN_NO_80MHZ;
- if (max_bandwidth_khz < MHZ_TO_KHZ(160))
- bw_flags |= IEEE80211_CHAN_NO_160MHZ;
+ if (is_s1g) {
+ /* S1G is strict about non overlapping channels. We can
+ * calculate which bandwidth is allowed per channel by finding
+ * the largest bandwidth which cleanly divides the freq_range.
+ */
+ int edge_offset;
+ int ch_bw = max_bandwidth_khz;
+
+ while (ch_bw) {
+ edge_offset = (center_freq_khz - ch_bw / 2) -
+ freq_range->start_freq_khz;
+ if (edge_offset % ch_bw == 0) {
+ switch (KHZ_TO_MHZ(ch_bw)) {
+ case 1:
+ bw_flags |= IEEE80211_CHAN_1MHZ;
+ break;
+ case 2:
+ bw_flags |= IEEE80211_CHAN_2MHZ;
+ break;
+ case 4:
+ bw_flags |= IEEE80211_CHAN_4MHZ;
+ break;
+ case 8:
+ bw_flags |= IEEE80211_CHAN_8MHZ;
+ break;
+ case 16:
+ bw_flags |= IEEE80211_CHAN_16MHZ;
+ break;
+ default:
+ /* If we got here, no bandwidths fit on
+ * this frequency, ie. band edge.
+ */
+ bw_flags |= IEEE80211_CHAN_DISABLED;
+ break;
+ }
+ break;
+ }
+ ch_bw /= 2;
+ }
+ } else {
+ if (max_bandwidth_khz < MHZ_TO_KHZ(10))
+ bw_flags |= IEEE80211_CHAN_NO_10MHZ;
+ if (max_bandwidth_khz < MHZ_TO_KHZ(20))
+ bw_flags |= IEEE80211_CHAN_NO_20MHZ;
+ if (max_bandwidth_khz < MHZ_TO_KHZ(40))
+ bw_flags |= IEEE80211_CHAN_NO_HT40;
+ if (max_bandwidth_khz < MHZ_TO_KHZ(80))
+ bw_flags |= IEEE80211_CHAN_NO_80MHZ;
+ if (max_bandwidth_khz < MHZ_TO_KHZ(160))
+ bw_flags |= IEEE80211_CHAN_NO_160MHZ;
+ }
return bw_flags;
}
-/*
- * Note that right now we assume the desired channel bandwidth
- * is always 20 MHz for each individual channel (HT40 uses 20 MHz
- * per channel, the primary and the extension channel).
- */
-static void handle_channel(struct wiphy *wiphy,
- enum nl80211_reg_initiator initiator,
- struct ieee80211_channel *chan)
+static void handle_channel_single_rule(struct wiphy *wiphy,
+ enum nl80211_reg_initiator initiator,
+ struct ieee80211_channel *chan,
+ u32 flags,
+ struct regulatory_request *lr,
+ struct wiphy *request_wiphy,
+ const struct ieee80211_reg_rule *reg_rule)
{
- u32 flags, bw_flags = 0;
- const struct ieee80211_reg_rule *reg_rule = NULL;
+ u32 bw_flags = 0;
const struct ieee80211_power_rule *power_rule = NULL;
- struct wiphy *request_wiphy = NULL;
- struct regulatory_request *lr = get_last_request();
const struct ieee80211_regdomain *regd;
- request_wiphy = wiphy_idx_to_wiphy(lr->wiphy_idx);
-
- flags = chan->orig_flags;
-
- reg_rule = freq_reg_info(wiphy, ieee80211_channel_to_khz(chan));
- if (IS_ERR(reg_rule)) {
- /*
- * We will disable all channels that do not match our
- * received regulatory rule unless the hint is coming
- * from a Country IE and the Country IE had no information
- * about a band. The IEEE 802.11 spec allows for an AP
- * to send only a subset of the regulatory rules allowed,
- * so an AP in the US that only supports 2.4 GHz may only send
- * a country IE with information for the 2.4 GHz band
- * while 5 GHz is still supported.
- */
- if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE &&
- PTR_ERR(reg_rule) == -ERANGE)
- return;
-
- if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER &&
- request_wiphy && request_wiphy == wiphy &&
- request_wiphy->regulatory_flags & REGULATORY_STRICT_REG) {
- pr_debug("Disabling freq %d.%03d MHz for good\n",
- chan->center_freq, chan->freq_offset);
- chan->orig_flags |= IEEE80211_CHAN_DISABLED;
- chan->flags = chan->orig_flags;
- } else {
- pr_debug("Disabling freq %d.%03d MHz\n",
- chan->center_freq, chan->freq_offset);
- chan->flags |= IEEE80211_CHAN_DISABLED;
- }
- return;
- }
-
regd = reg_get_regdomain(wiphy);
power_rule = &reg_rule->power_rule;
@@ -1803,6 +1810,204 @@ static void handle_channel(struct wiphy *wiphy,
chan->max_power = chan->max_reg_power;
}
+static void handle_channel_adjacent_rules(struct wiphy *wiphy,
+ enum nl80211_reg_initiator initiator,
+ struct ieee80211_channel *chan,
+ u32 flags,
+ struct regulatory_request *lr,
+ struct wiphy *request_wiphy,
+ const struct ieee80211_reg_rule *rrule1,
+ const struct ieee80211_reg_rule *rrule2,
+ struct ieee80211_freq_range *comb_range)
+{
+ u32 bw_flags1 = 0;
+ u32 bw_flags2 = 0;
+ const struct ieee80211_power_rule *power_rule1 = NULL;
+ const struct ieee80211_power_rule *power_rule2 = NULL;
+ const struct ieee80211_regdomain *regd;
+
+ regd = reg_get_regdomain(wiphy);
+
+ power_rule1 = &rrule1->power_rule;
+ power_rule2 = &rrule2->power_rule;
+ bw_flags1 = reg_rule_to_chan_bw_flags(regd, rrule1, chan);
+ bw_flags2 = reg_rule_to_chan_bw_flags(regd, rrule2, chan);
+
+ if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER &&
+ request_wiphy && request_wiphy == wiphy &&
+ request_wiphy->regulatory_flags & REGULATORY_STRICT_REG) {
+ /* This guarantees the driver's requested regulatory domain
+ * will always be used as a base for further regulatory
+ * settings
+ */
+ chan->flags =
+ map_regdom_flags(rrule1->flags) |
+ map_regdom_flags(rrule2->flags) |
+ bw_flags1 |
+ bw_flags2;
+ chan->orig_flags = chan->flags;
+ chan->max_antenna_gain =
+ min_t(int, MBI_TO_DBI(power_rule1->max_antenna_gain),
+ MBI_TO_DBI(power_rule2->max_antenna_gain));
+ chan->orig_mag = chan->max_antenna_gain;
+ chan->max_reg_power =
+ min_t(int, MBM_TO_DBM(power_rule1->max_eirp),
+ MBM_TO_DBM(power_rule2->max_eirp));
+ chan->max_power = chan->max_reg_power;
+ chan->orig_mpwr = chan->max_reg_power;
+
+ if (chan->flags & IEEE80211_CHAN_RADAR) {
+ chan->dfs_cac_ms = IEEE80211_DFS_MIN_CAC_TIME_MS;
+ if (rrule1->dfs_cac_ms || rrule2->dfs_cac_ms)
+ chan->dfs_cac_ms = max_t(unsigned int,
+ rrule1->dfs_cac_ms,
+ rrule2->dfs_cac_ms);
+ }
+
+ return;
+ }
+
+ chan->dfs_state = NL80211_DFS_USABLE;
+ chan->dfs_state_entered = jiffies;
+
+ chan->beacon_found = false;
+ chan->flags = flags | bw_flags1 | bw_flags2 |
+ map_regdom_flags(rrule1->flags) |
+ map_regdom_flags(rrule2->flags);
+
+ /* reg_rule_to_chan_bw_flags may forbids 10 and forbids 20 MHz
+ * (otherwise no adj. rule case), recheck therefore
+ */
+ if (cfg80211_does_bw_fit_range(comb_range,
+ ieee80211_channel_to_khz(chan),
+ MHZ_TO_KHZ(10)))
+ chan->flags &= ~IEEE80211_CHAN_NO_10MHZ;
+ if (cfg80211_does_bw_fit_range(comb_range,
+ ieee80211_channel_to_khz(chan),
+ MHZ_TO_KHZ(20)))
+ chan->flags &= ~IEEE80211_CHAN_NO_20MHZ;
+
+ chan->max_antenna_gain =
+ min_t(int, chan->orig_mag,
+ min_t(int,
+ MBI_TO_DBI(power_rule1->max_antenna_gain),
+ MBI_TO_DBI(power_rule2->max_antenna_gain)));
+ chan->max_reg_power = min_t(int,
+ MBM_TO_DBM(power_rule1->max_eirp),
+ MBM_TO_DBM(power_rule2->max_eirp));
+
+ if (chan->flags & IEEE80211_CHAN_RADAR) {
+ if (rrule1->dfs_cac_ms || rrule2->dfs_cac_ms)
+ chan->dfs_cac_ms = max_t(unsigned int,
+ rrule1->dfs_cac_ms,
+ rrule2->dfs_cac_ms);
+ else
+ chan->dfs_cac_ms = IEEE80211_DFS_MIN_CAC_TIME_MS;
+ }
+
+ if (chan->orig_mpwr) {
+ /* Devices that use REGULATORY_COUNTRY_IE_FOLLOW_POWER
+ * will always follow the passed country IE power settings.
+ */
+ if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE &&
+ wiphy->regulatory_flags & REGULATORY_COUNTRY_IE_FOLLOW_POWER)
+ chan->max_power = chan->max_reg_power;
+ else
+ chan->max_power = min(chan->orig_mpwr,
+ chan->max_reg_power);
+ } else {
+ chan->max_power = chan->max_reg_power;
+ }
+}
+
+/* Note that right now we assume the desired channel bandwidth
+ * is always 20 MHz for each individual channel (HT40 uses 20 MHz
+ * per channel, the primary and the extension channel).
+ */
+static void handle_channel(struct wiphy *wiphy,
+ enum nl80211_reg_initiator initiator,
+ struct ieee80211_channel *chan)
+{
+ const u32 orig_chan_freq = ieee80211_channel_to_khz(chan);
+ struct regulatory_request *lr = get_last_request();
+ struct wiphy *request_wiphy = wiphy_idx_to_wiphy(lr->wiphy_idx);
+ const struct ieee80211_reg_rule *rrule = NULL;
+ const struct ieee80211_reg_rule *rrule1 = NULL;
+ const struct ieee80211_reg_rule *rrule2 = NULL;
+
+ u32 flags = chan->orig_flags;
+
+ rrule = freq_reg_info(wiphy, orig_chan_freq);
+ if (IS_ERR(rrule)) {
+ /* check for adjacent match, therefore get rules for
+ * chan - 20 MHz and chan + 20 MHz and test
+ * if reg rules are adjacent
+ */
+ rrule1 = freq_reg_info(wiphy,
+ orig_chan_freq - MHZ_TO_KHZ(20));
+ rrule2 = freq_reg_info(wiphy,
+ orig_chan_freq + MHZ_TO_KHZ(20));
+ if (!IS_ERR(rrule1) && !IS_ERR(rrule2)) {
+ struct ieee80211_freq_range comb_range;
+
+ if (rrule1->freq_range.end_freq_khz !=
+ rrule2->freq_range.start_freq_khz)
+ goto disable_chan;
+
+ comb_range.start_freq_khz =
+ rrule1->freq_range.start_freq_khz;
+ comb_range.end_freq_khz =
+ rrule2->freq_range.end_freq_khz;
+ comb_range.max_bandwidth_khz =
+ min_t(u32,
+ rrule1->freq_range.max_bandwidth_khz,
+ rrule2->freq_range.max_bandwidth_khz);
+
+ if (!cfg80211_does_bw_fit_range(&comb_range,
+ orig_chan_freq,
+ MHZ_TO_KHZ(20)))
+ goto disable_chan;
+
+ handle_channel_adjacent_rules(wiphy, initiator, chan,
+ flags, lr, request_wiphy,
+ rrule1, rrule2,
+ &comb_range);
+ return;
+ }
+
+disable_chan:
+ /* We will disable all channels that do not match our
+ * received regulatory rule unless the hint is coming
+ * from a Country IE and the Country IE had no information
+ * about a band. The IEEE 802.11 spec allows for an AP
+ * to send only a subset of the regulatory rules allowed,
+ * so an AP in the US that only supports 2.4 GHz may only send
+ * a country IE with information for the 2.4 GHz band
+ * while 5 GHz is still supported.
+ */
+ if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE &&
+ PTR_ERR(rrule) == -ERANGE)
+ return;
+
+ if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER &&
+ request_wiphy && request_wiphy == wiphy &&
+ request_wiphy->regulatory_flags & REGULATORY_STRICT_REG) {
+ pr_debug("Disabling freq %d.%03d MHz for good\n",
+ chan->center_freq, chan->freq_offset);
+ chan->orig_flags |= IEEE80211_CHAN_DISABLED;
+ chan->flags = chan->orig_flags;
+ } else {
+ pr_debug("Disabling freq %d.%03d MHz\n",
+ chan->center_freq, chan->freq_offset);
+ chan->flags |= IEEE80211_CHAN_DISABLED;
+ }
+ return;
+ }
+
+ handle_channel_single_rule(wiphy, initiator, chan, flags, lr,
+ request_wiphy, rrule);
+}
+
static void handle_band(struct wiphy *wiphy,
enum nl80211_reg_initiator initiator,
struct ieee80211_supported_band *sband)
@@ -3170,7 +3375,7 @@ static void restore_custom_reg_settings(struct wiphy *wiphy)
* - send a user regulatory hint if applicable
*
* Device drivers that send a regulatory hint for a specific country
- * keep their own regulatory domain on wiphy->regd so that does does
+ * keep their own regulatory domain on wiphy->regd so that does
* not need to be remembered.
*/
static void restore_regulatory_settings(bool reset_user, bool cached)
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 04f2d198c215..8d0e49c46db3 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -5,7 +5,7 @@
* Copyright 2008 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2016 Intel Deutschland GmbH
- * Copyright (C) 2018-2019 Intel Corporation
+ * Copyright (C) 2018-2020 Intel Corporation
*/
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -14,6 +14,8 @@
#include <linux/wireless.h>
#include <linux/nl80211.h>
#include <linux/etherdevice.h>
+#include <linux/crc32.h>
+#include <linux/bitfield.h>
#include <net/arp.h>
#include <net/cfg80211.h>
#include <net/cfg80211-wext.h>
@@ -55,7 +57,7 @@
*
* Also note that the hidden_beacon_bss pointer is only relevant
* if the driver uses something other than the IEs, e.g. private
- * data stored stored in the BSS struct, since the beacon IEs are
+ * data stored in the BSS struct, since the beacon IEs are
* also linked into the probe response struct.
*/
@@ -74,6 +76,43 @@ MODULE_PARM_DESC(bss_entries_limit,
#define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ)
+/**
+ * struct cfg80211_colocated_ap - colocated AP information
+ *
+ * @list: linked list to all colocated aPS
+ * @bssid: BSSID of the reported AP
+ * @ssid: SSID of the reported AP
+ * @ssid_len: length of the ssid
+ * @center_freq: frequency the reported AP is on
+ * @unsolicited_probe: the reported AP is part of an ESS, where all the APs
+ * that operate in the same channel as the reported AP and that might be
+ * detected by a STA receiving this frame, are transmitting unsolicited
+ * Probe Response frames every 20 TUs
+ * @oct_recommended: OCT is recommended to exchange MMPDUs with the reported AP
+ * @same_ssid: the reported AP has the same SSID as the reporting AP
+ * @multi_bss: the reported AP is part of a multiple BSSID set
+ * @transmitted_bssid: the reported AP is the transmitting BSSID
+ * @colocated_ess: all the APs that share the same ESS as the reported AP are
+ * colocated and can be discovered via legacy bands.
+ * @short_ssid_valid: short_ssid is valid and can be used
+ * @short_ssid: the short SSID for this SSID
+ */
+struct cfg80211_colocated_ap {
+ struct list_head list;
+ u8 bssid[ETH_ALEN];
+ u8 ssid[IEEE80211_MAX_SSID_LEN];
+ size_t ssid_len;
+ u32 short_ssid;
+ u32 center_freq;
+ u8 unsolicited_probe:1,
+ oct_recommended:1,
+ same_ssid:1,
+ multi_bss:1,
+ transmitted_bssid:1,
+ colocated_ess:1,
+ short_ssid_valid:1;
+};
+
static void bss_free(struct cfg80211_internal_bss *bss)
{
struct cfg80211_bss_ies *ies;
@@ -448,10 +487,433 @@ static bool cfg80211_bss_expire_oldest(struct cfg80211_registered_device *rdev)
return ret;
}
+static u8 cfg80211_parse_bss_param(u8 data,
+ struct cfg80211_colocated_ap *coloc_ap)
+{
+ coloc_ap->oct_recommended =
+ u8_get_bits(data, IEEE80211_RNR_TBTT_PARAMS_OCT_RECOMMENDED);
+ coloc_ap->same_ssid =
+ u8_get_bits(data, IEEE80211_RNR_TBTT_PARAMS_SAME_SSID);
+ coloc_ap->multi_bss =
+ u8_get_bits(data, IEEE80211_RNR_TBTT_PARAMS_MULTI_BSSID);
+ coloc_ap->transmitted_bssid =
+ u8_get_bits(data, IEEE80211_RNR_TBTT_PARAMS_TRANSMITTED_BSSID);
+ coloc_ap->unsolicited_probe =
+ u8_get_bits(data, IEEE80211_RNR_TBTT_PARAMS_PROBE_ACTIVE);
+ coloc_ap->colocated_ess =
+ u8_get_bits(data, IEEE80211_RNR_TBTT_PARAMS_COLOC_ESS);
+
+ return u8_get_bits(data, IEEE80211_RNR_TBTT_PARAMS_COLOC_AP);
+}
+
+static int cfg80211_calc_short_ssid(const struct cfg80211_bss_ies *ies,
+ const struct element **elem, u32 *s_ssid)
+{
+
+ *elem = cfg80211_find_elem(WLAN_EID_SSID, ies->data, ies->len);
+ if (!*elem || (*elem)->datalen > IEEE80211_MAX_SSID_LEN)
+ return -EINVAL;
+
+ *s_ssid = ~crc32_le(~0, (*elem)->data, (*elem)->datalen);
+ return 0;
+}
+
+static void cfg80211_free_coloc_ap_list(struct list_head *coloc_ap_list)
+{
+ struct cfg80211_colocated_ap *ap, *tmp_ap;
+
+ list_for_each_entry_safe(ap, tmp_ap, coloc_ap_list, list) {
+ list_del(&ap->list);
+ kfree(ap);
+ }
+}
+
+static int cfg80211_parse_ap_info(struct cfg80211_colocated_ap *entry,
+ const u8 *pos, u8 length,
+ const struct element *ssid_elem,
+ int s_ssid_tmp)
+{
+ /* skip the TBTT offset */
+ pos++;
+
+ memcpy(entry->bssid, pos, ETH_ALEN);
+ pos += ETH_ALEN;
+
+ if (length == IEEE80211_TBTT_INFO_OFFSET_BSSID_SSSID_BSS_PARAM) {
+ memcpy(&entry->short_ssid, pos,
+ sizeof(entry->short_ssid));
+ entry->short_ssid_valid = true;
+ pos += 4;
+ }
+
+ /* skip non colocated APs */
+ if (!cfg80211_parse_bss_param(*pos, entry))
+ return -EINVAL;
+ pos++;
+
+ if (length == IEEE80211_TBTT_INFO_OFFSET_BSSID_BSS_PARAM) {
+ /*
+ * no information about the short ssid. Consider the entry valid
+ * for now. It would later be dropped in case there are explicit
+ * SSIDs that need to be matched
+ */
+ if (!entry->same_ssid)
+ return 0;
+ }
+
+ if (entry->same_ssid) {
+ entry->short_ssid = s_ssid_tmp;
+ entry->short_ssid_valid = true;
+
+ /*
+ * This is safe because we validate datalen in
+ * cfg80211_parse_colocated_ap(), before calling this
+ * function.
+ */
+ memcpy(&entry->ssid, &ssid_elem->data,
+ ssid_elem->datalen);
+ entry->ssid_len = ssid_elem->datalen;
+ }
+ return 0;
+}
+
+static int cfg80211_parse_colocated_ap(const struct cfg80211_bss_ies *ies,
+ struct list_head *list)
+{
+ struct ieee80211_neighbor_ap_info *ap_info;
+ const struct element *elem, *ssid_elem;
+ const u8 *pos, *end;
+ u32 s_ssid_tmp;
+ int n_coloc = 0, ret;
+ LIST_HEAD(ap_list);
+
+ elem = cfg80211_find_elem(WLAN_EID_REDUCED_NEIGHBOR_REPORT, ies->data,
+ ies->len);
+ if (!elem || elem->datalen > IEEE80211_MAX_SSID_LEN)
+ return 0;
+
+ pos = elem->data;
+ end = pos + elem->datalen;
+
+ ret = cfg80211_calc_short_ssid(ies, &ssid_elem, &s_ssid_tmp);
+ if (ret)
+ return ret;
+
+ /* RNR IE may contain more than one NEIGHBOR_AP_INFO */
+ while (pos + sizeof(*ap_info) <= end) {
+ enum nl80211_band band;
+ int freq;
+ u8 length, i, count;
+
+ ap_info = (void *)pos;
+ count = u8_get_bits(ap_info->tbtt_info_hdr,
+ IEEE80211_AP_INFO_TBTT_HDR_COUNT) + 1;
+ length = ap_info->tbtt_info_len;
+
+ pos += sizeof(*ap_info);
+
+ if (!ieee80211_operating_class_to_band(ap_info->op_class,
+ &band))
+ break;
+
+ freq = ieee80211_channel_to_frequency(ap_info->channel, band);
+
+ if (end - pos < count * ap_info->tbtt_info_len)
+ break;
+
+ /*
+ * TBTT info must include bss param + BSSID +
+ * (short SSID or same_ssid bit to be set).
+ * ignore other options, and move to the
+ * next AP info
+ */
+ if (band != NL80211_BAND_6GHZ ||
+ (length != IEEE80211_TBTT_INFO_OFFSET_BSSID_BSS_PARAM &&
+ length < IEEE80211_TBTT_INFO_OFFSET_BSSID_SSSID_BSS_PARAM)) {
+ pos += count * ap_info->tbtt_info_len;
+ continue;
+ }
+
+ for (i = 0; i < count; i++) {
+ struct cfg80211_colocated_ap *entry;
+
+ entry = kzalloc(sizeof(*entry) + IEEE80211_MAX_SSID_LEN,
+ GFP_ATOMIC);
+
+ if (!entry)
+ break;
+
+ entry->center_freq = freq;
+
+ if (!cfg80211_parse_ap_info(entry, pos, length,
+ ssid_elem, s_ssid_tmp)) {
+ n_coloc++;
+ list_add_tail(&entry->list, &ap_list);
+ } else {
+ kfree(entry);
+ }
+
+ pos += ap_info->tbtt_info_len;
+ }
+ }
+
+ if (pos != end) {
+ cfg80211_free_coloc_ap_list(&ap_list);
+ return 0;
+ }
+
+ list_splice_tail(&ap_list, list);
+ return n_coloc;
+}
+
+static void cfg80211_scan_req_add_chan(struct cfg80211_scan_request *request,
+ struct ieee80211_channel *chan,
+ bool add_to_6ghz)
+{
+ int i;
+ u32 n_channels = request->n_channels;
+ struct cfg80211_scan_6ghz_params *params =
+ &request->scan_6ghz_params[request->n_6ghz_params];
+
+ for (i = 0; i < n_channels; i++) {
+ if (request->channels[i] == chan) {
+ if (add_to_6ghz)
+ params->channel_idx = i;
+ return;
+ }
+ }
+
+ request->channels[n_channels] = chan;
+ if (add_to_6ghz)
+ request->scan_6ghz_params[request->n_6ghz_params].channel_idx =
+ n_channels;
+
+ request->n_channels++;
+}
+
+static bool cfg80211_find_ssid_match(struct cfg80211_colocated_ap *ap,
+ struct cfg80211_scan_request *request)
+{
+ u8 i;
+ u32 s_ssid;
+
+ for (i = 0; i < request->n_ssids; i++) {
+ /* wildcard ssid in the scan request */
+ if (!request->ssids[i].ssid_len)
+ return true;
+
+ if (ap->ssid_len &&
+ ap->ssid_len == request->ssids[i].ssid_len) {
+ if (!memcmp(request->ssids[i].ssid, ap->ssid,
+ ap->ssid_len))
+ return true;
+ } else if (ap->short_ssid_valid) {
+ s_ssid = ~crc32_le(~0, request->ssids[i].ssid,
+ request->ssids[i].ssid_len);
+
+ if (ap->short_ssid == s_ssid)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int cfg80211_scan_6ghz(struct cfg80211_registered_device *rdev)
+{
+ u8 i;
+ struct cfg80211_colocated_ap *ap;
+ int n_channels, count = 0, err;
+ struct cfg80211_scan_request *request, *rdev_req = rdev->scan_req;
+ LIST_HEAD(coloc_ap_list);
+ bool need_scan_psc;
+ const struct ieee80211_sband_iftype_data *iftd;
+
+ rdev_req->scan_6ghz = true;
+
+ if (!rdev->wiphy.bands[NL80211_BAND_6GHZ])
+ return -EOPNOTSUPP;
+
+ iftd = ieee80211_get_sband_iftype_data(rdev->wiphy.bands[NL80211_BAND_6GHZ],
+ rdev_req->wdev->iftype);
+ if (!iftd || !iftd->he_cap.has_he)
+ return -EOPNOTSUPP;
+
+ n_channels = rdev->wiphy.bands[NL80211_BAND_6GHZ]->n_channels;
+
+ if (rdev_req->flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ) {
+ struct cfg80211_internal_bss *intbss;
+
+ spin_lock_bh(&rdev->bss_lock);
+ list_for_each_entry(intbss, &rdev->bss_list, list) {
+ struct cfg80211_bss *res = &intbss->pub;
+ const struct cfg80211_bss_ies *ies;
+
+ ies = rcu_access_pointer(res->ies);
+ count += cfg80211_parse_colocated_ap(ies,
+ &coloc_ap_list);
+ }
+ spin_unlock_bh(&rdev->bss_lock);
+ }
+
+ request = kzalloc(struct_size(request, channels, n_channels) +
+ sizeof(*request->scan_6ghz_params) * count,
+ GFP_KERNEL);
+ if (!request) {
+ cfg80211_free_coloc_ap_list(&coloc_ap_list);
+ return -ENOMEM;
+ }
+
+ *request = *rdev_req;
+ request->n_channels = 0;
+ request->scan_6ghz_params =
+ (void *)&request->channels[n_channels];
+
+ /*
+ * PSC channels should not be scanned if all the reported co-located APs
+ * are indicating that all APs in the same ESS are co-located
+ */
+ if (count) {
+ need_scan_psc = false;
+
+ list_for_each_entry(ap, &coloc_ap_list, list) {
+ if (!ap->colocated_ess) {
+ need_scan_psc = true;
+ break;
+ }
+ }
+ } else {
+ need_scan_psc = true;
+ }
+
+ /*
+ * add to the scan request the channels that need to be scanned
+ * regardless of the collocated APs (PSC channels or all channels
+ * in case that NL80211_SCAN_FLAG_COLOCATED_6GHZ is not set)
+ */
+ for (i = 0; i < rdev_req->n_channels; i++) {
+ if (rdev_req->channels[i]->band == NL80211_BAND_6GHZ &&
+ ((need_scan_psc &&
+ cfg80211_channel_is_psc(rdev_req->channels[i])) ||
+ !(rdev_req->flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ))) {
+ cfg80211_scan_req_add_chan(request,
+ rdev_req->channels[i],
+ false);
+ }
+ }
+
+ if (!(rdev_req->flags & NL80211_SCAN_FLAG_COLOCATED_6GHZ))
+ goto skip;
+
+ list_for_each_entry(ap, &coloc_ap_list, list) {
+ bool found = false;
+ struct cfg80211_scan_6ghz_params *scan_6ghz_params =
+ &request->scan_6ghz_params[request->n_6ghz_params];
+ struct ieee80211_channel *chan =
+ ieee80211_get_channel(&rdev->wiphy, ap->center_freq);
+
+ if (!chan || chan->flags & IEEE80211_CHAN_DISABLED)
+ continue;
+
+ for (i = 0; i < rdev_req->n_channels; i++) {
+ if (rdev_req->channels[i] == chan)
+ found = true;
+ }
+
+ if (!found)
+ continue;
+
+ if (request->n_ssids > 0 &&
+ !cfg80211_find_ssid_match(ap, request))
+ continue;
+
+ cfg80211_scan_req_add_chan(request, chan, true);
+ memcpy(scan_6ghz_params->bssid, ap->bssid, ETH_ALEN);
+ scan_6ghz_params->short_ssid = ap->short_ssid;
+ scan_6ghz_params->short_ssid_valid = ap->short_ssid_valid;
+ scan_6ghz_params->unsolicited_probe = ap->unsolicited_probe;
+
+ /*
+ * If a PSC channel is added to the scan and 'need_scan_psc' is
+ * set to false, then all the APs that the scan logic is
+ * interested with on the channel are collocated and thus there
+ * is no need to perform the initial PSC channel listen.
+ */
+ if (cfg80211_channel_is_psc(chan) && !need_scan_psc)
+ scan_6ghz_params->psc_no_listen = true;
+
+ request->n_6ghz_params++;
+ }
+
+skip:
+ cfg80211_free_coloc_ap_list(&coloc_ap_list);
+
+ if (request->n_channels) {
+ struct cfg80211_scan_request *old = rdev->int_scan_req;
+
+ rdev->int_scan_req = request;
+
+ /*
+ * If this scan follows a previous scan, save the scan start
+ * info from the first part of the scan
+ */
+ if (old)
+ rdev->int_scan_req->info = old->info;
+
+ err = rdev_scan(rdev, request);
+ if (err) {
+ rdev->int_scan_req = old;
+ kfree(request);
+ } else {
+ kfree(old);
+ }
+
+ return err;
+ }
+
+ kfree(request);
+ return -EINVAL;
+}
+
+int cfg80211_scan(struct cfg80211_registered_device *rdev)
+{
+ struct cfg80211_scan_request *request;
+ struct cfg80211_scan_request *rdev_req = rdev->scan_req;
+ u32 n_channels = 0, idx, i;
+
+ if (!(rdev->wiphy.flags & WIPHY_FLAG_SPLIT_SCAN_6GHZ))
+ return rdev_scan(rdev, rdev_req);
+
+ for (i = 0; i < rdev_req->n_channels; i++) {
+ if (rdev_req->channels[i]->band != NL80211_BAND_6GHZ)
+ n_channels++;
+ }
+
+ if (!n_channels)
+ return cfg80211_scan_6ghz(rdev);
+
+ request = kzalloc(struct_size(request, channels, n_channels),
+ GFP_KERNEL);
+ if (!request)
+ return -ENOMEM;
+
+ *request = *rdev_req;
+ request->n_channels = n_channels;
+
+ for (i = idx = 0; i < rdev_req->n_channels; i++) {
+ if (rdev_req->channels[i]->band != NL80211_BAND_6GHZ)
+ request->channels[idx++] = rdev_req->channels[i];
+ }
+
+ rdev_req->scan_6ghz = false;
+ rdev->int_scan_req = request;
+ return rdev_scan(rdev, request);
+}
+
void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev,
bool send_message)
{
- struct cfg80211_scan_request *request;
+ struct cfg80211_scan_request *request, *rdev_req;
struct wireless_dev *wdev;
struct sk_buff *msg;
#ifdef CONFIG_CFG80211_WEXT
@@ -466,11 +928,18 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev,
return;
}
- request = rdev->scan_req;
- if (!request)
+ rdev_req = rdev->scan_req;
+ if (!rdev_req)
return;
- wdev = request->wdev;
+ wdev = rdev_req->wdev;
+ request = rdev->int_scan_req ? rdev->int_scan_req : rdev_req;
+
+ if (wdev_running(wdev) &&
+ (rdev->wiphy.flags & WIPHY_FLAG_SPLIT_SCAN_6GHZ) &&
+ !rdev_req->scan_6ghz && !request->info.aborted &&
+ !cfg80211_scan_6ghz(rdev))
+ return;
/*
* This must be before sending the other events!
@@ -501,8 +970,11 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev,
if (wdev->netdev)
dev_put(wdev->netdev);
+ kfree(rdev->int_scan_req);
+ rdev->int_scan_req = NULL;
+
+ kfree(rdev->scan_req);
rdev->scan_req = NULL;
- kfree(request);
if (!send_message)
rdev->scan_msg = msg;
@@ -525,10 +997,25 @@ void __cfg80211_scan_done(struct work_struct *wk)
void cfg80211_scan_done(struct cfg80211_scan_request *request,
struct cfg80211_scan_info *info)
{
+ struct cfg80211_scan_info old_info = request->info;
+
trace_cfg80211_scan_done(request, info);
- WARN_ON(request != wiphy_to_rdev(request->wiphy)->scan_req);
+ WARN_ON(request != wiphy_to_rdev(request->wiphy)->scan_req &&
+ request != wiphy_to_rdev(request->wiphy)->int_scan_req);
request->info = *info;
+
+ /*
+ * In case the scan is split, the scan_start_tsf and tsf_bssid should
+ * be of the first part. In such a case old_info.scan_start_tsf should
+ * be non zero.
+ */
+ if (request->scan_6ghz && old_info.scan_start_tsf) {
+ request->info.scan_start_tsf = old_info.scan_start_tsf;
+ memcpy(request->info.tsf_bssid, old_info.tsf_bssid,
+ sizeof(request->info.tsf_bssid));
+ }
+
request->notified = true;
queue_work(cfg80211_wq, &wiphy_to_rdev(request->wiphy)->scan_done_wk);
}
@@ -1315,15 +1802,24 @@ cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
int channel_number = -1;
struct ieee80211_channel *alt_channel;
- tmp = cfg80211_find_ie(WLAN_EID_DS_PARAMS, ie, ielen);
- if (tmp && tmp[1] == 1) {
- channel_number = tmp[2];
+ if (channel->band == NL80211_BAND_S1GHZ) {
+ tmp = cfg80211_find_ie(WLAN_EID_S1G_OPERATION, ie, ielen);
+ if (tmp && tmp[1] >= sizeof(struct ieee80211_s1g_oper_ie)) {
+ struct ieee80211_s1g_oper_ie *s1gop = (void *)(tmp + 2);
+
+ channel_number = s1gop->primary_ch;
+ }
} else {
- tmp = cfg80211_find_ie(WLAN_EID_HT_OPERATION, ie, ielen);
- if (tmp && tmp[1] >= sizeof(struct ieee80211_ht_operation)) {
- struct ieee80211_ht_operation *htop = (void *)(tmp + 2);
+ tmp = cfg80211_find_ie(WLAN_EID_DS_PARAMS, ie, ielen);
+ if (tmp && tmp[1] == 1) {
+ channel_number = tmp[2];
+ } else {
+ tmp = cfg80211_find_ie(WLAN_EID_HT_OPERATION, ie, ielen);
+ if (tmp && tmp[1] >= sizeof(struct ieee80211_ht_operation)) {
+ struct ieee80211_ht_operation *htop = (void *)(tmp + 2);
- channel_number = htop->primary_chan;
+ channel_number = htop->primary_chan;
+ }
}
}
@@ -1488,7 +1984,7 @@ static const struct element
ielen - (mbssid_end - ie));
/*
- * If is is not the last subelement in current MBSSID IE or there isn't
+ * If it is not the last subelement in current MBSSID IE or there isn't
* a next MBSSID IE - profile is complete.
*/
if ((sub_elem->data + sub_elem->datalen < mbssid_end - 1) ||
@@ -1807,8 +2303,11 @@ cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy,
struct cfg80211_bss_ies *ies;
struct ieee80211_channel *channel;
bool signal_valid;
- size_t ielen = len - offsetof(struct ieee80211_mgmt,
- u.probe_resp.variable);
+ struct ieee80211_ext *ext = NULL;
+ u8 *bssid, *variable;
+ u16 capability, beacon_int;
+ size_t ielen, min_hdr_len = offsetof(struct ieee80211_mgmt,
+ u.probe_resp.variable);
int bss_type;
BUILD_BUG_ON(offsetof(struct ieee80211_mgmt, u.probe_resp.variable) !=
@@ -1826,21 +2325,57 @@ cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy,
(data->signal < 0 || data->signal > 100)))
return NULL;
- if (WARN_ON(len < offsetof(struct ieee80211_mgmt, u.probe_resp.variable)))
+ if (ieee80211_is_s1g_beacon(mgmt->frame_control)) {
+ ext = (void *) mgmt;
+ min_hdr_len = offsetof(struct ieee80211_ext, u.s1g_beacon);
+ if (ieee80211_is_s1g_short_beacon(mgmt->frame_control))
+ min_hdr_len = offsetof(struct ieee80211_ext,
+ u.s1g_short_beacon.variable);
+ }
+
+ if (WARN_ON(len < min_hdr_len))
return NULL;
- channel = cfg80211_get_bss_channel(wiphy, mgmt->u.beacon.variable,
+ ielen = len - min_hdr_len;
+ variable = mgmt->u.probe_resp.variable;
+ if (ext) {
+ if (ieee80211_is_s1g_short_beacon(mgmt->frame_control))
+ variable = ext->u.s1g_short_beacon.variable;
+ else
+ variable = ext->u.s1g_beacon.variable;
+ }
+
+ channel = cfg80211_get_bss_channel(wiphy, variable,
ielen, data->chan, data->scan_width);
if (!channel)
return NULL;
+ if (ext) {
+ struct ieee80211_s1g_bcn_compat_ie *compat;
+ u8 *ie;
+
+ ie = (void *)cfg80211_find_ie(WLAN_EID_S1G_BCN_COMPAT,
+ variable, ielen);
+ if (!ie)
+ return NULL;
+ compat = (void *)(ie + 2);
+ bssid = ext->u.s1g_beacon.sa;
+ capability = le16_to_cpu(compat->compat_info);
+ beacon_int = le16_to_cpu(compat->beacon_int);
+ } else {
+ bssid = mgmt->bssid;
+ beacon_int = le16_to_cpu(mgmt->u.probe_resp.beacon_int);
+ capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
+ }
+
ies = kzalloc(sizeof(*ies) + ielen, gfp);
if (!ies)
return NULL;
ies->len = ielen;
ies->tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp);
- ies->from_beacon = ieee80211_is_beacon(mgmt->frame_control);
- memcpy(ies->data, mgmt->u.probe_resp.variable, ielen);
+ ies->from_beacon = ieee80211_is_beacon(mgmt->frame_control) ||
+ ieee80211_is_s1g_beacon(mgmt->frame_control);
+ memcpy(ies->data, variable, ielen);
if (ieee80211_is_probe_resp(mgmt->frame_control))
rcu_assign_pointer(tmp.pub.proberesp_ies, ies);
@@ -1848,12 +2383,12 @@ cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy,
rcu_assign_pointer(tmp.pub.beacon_ies, ies);
rcu_assign_pointer(tmp.pub.ies, ies);
- memcpy(tmp.pub.bssid, mgmt->bssid, ETH_ALEN);
+ memcpy(tmp.pub.bssid, bssid, ETH_ALEN);
+ tmp.pub.beacon_interval = beacon_int;
+ tmp.pub.capability = capability;
tmp.pub.channel = channel;
tmp.pub.scan_width = data->scan_width;
tmp.pub.signal = data->signal;
- tmp.pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int);
- tmp.pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
tmp.ts_boottime = data->boottime_ns;
tmp.parent_tsf = data->parent_tsf;
tmp.pub.chains = data->chains;
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 079ce320dc1e..38df713f2e2e 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -24,7 +24,7 @@
/*
* Software SME in cfg80211, using auth/assoc/deauth calls to the
- * driver. This is is for implementing nl80211's connect/disconnect
+ * driver. This is for implementing nl80211's connect/disconnect
* and wireless extensions (if configured.)
*/
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 6fa99df52f86..f01746894a4e 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -111,6 +111,33 @@ u32 ieee80211_channel_to_freq_khz(int chan, enum nl80211_band band)
}
EXPORT_SYMBOL(ieee80211_channel_to_freq_khz);
+enum nl80211_chan_width
+ieee80211_s1g_channel_width(const struct ieee80211_channel *chan)
+{
+ if (WARN_ON(!chan || chan->band != NL80211_BAND_S1GHZ))
+ return NL80211_CHAN_WIDTH_20_NOHT;
+
+ /*S1G defines a single allowed channel width per channel.
+ * Extract that width here.
+ */
+ if (chan->flags & IEEE80211_CHAN_1MHZ)
+ return NL80211_CHAN_WIDTH_1;
+ else if (chan->flags & IEEE80211_CHAN_2MHZ)
+ return NL80211_CHAN_WIDTH_2;
+ else if (chan->flags & IEEE80211_CHAN_4MHZ)
+ return NL80211_CHAN_WIDTH_4;
+ else if (chan->flags & IEEE80211_CHAN_8MHZ)
+ return NL80211_CHAN_WIDTH_8;
+ else if (chan->flags & IEEE80211_CHAN_16MHZ)
+ return NL80211_CHAN_WIDTH_16;
+
+ pr_err("unknown channel width for channel at %dKHz?\n",
+ ieee80211_channel_to_khz(chan));
+
+ return NL80211_CHAN_WIDTH_1;
+}
+EXPORT_SYMBOL(ieee80211_s1g_channel_width);
+
int ieee80211_freq_khz_to_channel(u32 freq)
{
/* TODO: just handle MHz for now */
@@ -399,6 +426,11 @@ unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc)
{
unsigned int hdrlen = 24;
+ if (ieee80211_is_ext(fc)) {
+ hdrlen = 4;
+ goto out;
+ }
+
if (ieee80211_is_data(fc)) {
if (ieee80211_has_a4(fc))
hdrlen = 30;
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 4d2160c989a3..78f2927ead7f 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -497,7 +497,7 @@ static int __cfg80211_set_encryption(struct cfg80211_registered_device *rdev,
/*
* We only need to store WEP keys, since they're the only keys that
- * can be be set before a connection is established and persist after
+ * can be set before a connection is established and persist after
* disconnecting.
*/
if (!addr && (params->cipher == WLAN_CIPHER_SUITE_WEP40 ||
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index b010bfde0149..56d052bc65cb 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -23,162 +23,6 @@
static DEFINE_IDA(umem_ida);
-void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
-{
- unsigned long flags;
-
- if (!xs->tx)
- return;
-
- spin_lock_irqsave(&umem->xsk_tx_list_lock, flags);
- list_add_rcu(&xs->list, &umem->xsk_tx_list);
- spin_unlock_irqrestore(&umem->xsk_tx_list_lock, flags);
-}
-
-void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
-{
- unsigned long flags;
-
- if (!xs->tx)
- return;
-
- spin_lock_irqsave(&umem->xsk_tx_list_lock, flags);
- list_del_rcu(&xs->list);
- spin_unlock_irqrestore(&umem->xsk_tx_list_lock, flags);
-}
-
-/* The umem is stored both in the _rx struct and the _tx struct as we do
- * not know if the device has more tx queues than rx, or the opposite.
- * This might also change during run time.
- */
-static int xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem,
- u16 queue_id)
-{
- if (queue_id >= max_t(unsigned int,
- dev->real_num_rx_queues,
- dev->real_num_tx_queues))
- return -EINVAL;
-
- if (queue_id < dev->real_num_rx_queues)
- dev->_rx[queue_id].umem = umem;
- if (queue_id < dev->real_num_tx_queues)
- dev->_tx[queue_id].umem = umem;
-
- return 0;
-}
-
-struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
- u16 queue_id)
-{
- if (queue_id < dev->real_num_rx_queues)
- return dev->_rx[queue_id].umem;
- if (queue_id < dev->real_num_tx_queues)
- return dev->_tx[queue_id].umem;
-
- return NULL;
-}
-EXPORT_SYMBOL(xdp_get_umem_from_qid);
-
-static void xdp_clear_umem_at_qid(struct net_device *dev, u16 queue_id)
-{
- if (queue_id < dev->real_num_rx_queues)
- dev->_rx[queue_id].umem = NULL;
- if (queue_id < dev->real_num_tx_queues)
- dev->_tx[queue_id].umem = NULL;
-}
-
-int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
- u16 queue_id, u16 flags)
-{
- bool force_zc, force_copy;
- struct netdev_bpf bpf;
- int err = 0;
-
- ASSERT_RTNL();
-
- force_zc = flags & XDP_ZEROCOPY;
- force_copy = flags & XDP_COPY;
-
- if (force_zc && force_copy)
- return -EINVAL;
-
- if (xdp_get_umem_from_qid(dev, queue_id))
- return -EBUSY;
-
- err = xdp_reg_umem_at_qid(dev, umem, queue_id);
- if (err)
- return err;
-
- umem->dev = dev;
- umem->queue_id = queue_id;
-
- if (flags & XDP_USE_NEED_WAKEUP) {
- umem->flags |= XDP_UMEM_USES_NEED_WAKEUP;
- /* Tx needs to be explicitly woken up the first time.
- * Also for supporting drivers that do not implement this
- * feature. They will always have to call sendto().
- */
- xsk_set_tx_need_wakeup(umem);
- }
-
- dev_hold(dev);
-
- if (force_copy)
- /* For copy-mode, we are done. */
- return 0;
-
- if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_wakeup) {
- err = -EOPNOTSUPP;
- goto err_unreg_umem;
- }
-
- bpf.command = XDP_SETUP_XSK_UMEM;
- bpf.xsk.umem = umem;
- bpf.xsk.queue_id = queue_id;
-
- err = dev->netdev_ops->ndo_bpf(dev, &bpf);
- if (err)
- goto err_unreg_umem;
-
- umem->zc = true;
- return 0;
-
-err_unreg_umem:
- if (!force_zc)
- err = 0; /* fallback to copy mode */
- if (err)
- xdp_clear_umem_at_qid(dev, queue_id);
- return err;
-}
-
-void xdp_umem_clear_dev(struct xdp_umem *umem)
-{
- struct netdev_bpf bpf;
- int err;
-
- ASSERT_RTNL();
-
- if (!umem->dev)
- return;
-
- if (umem->zc) {
- bpf.command = XDP_SETUP_XSK_UMEM;
- bpf.xsk.umem = NULL;
- bpf.xsk.queue_id = umem->queue_id;
-
- err = umem->dev->netdev_ops->ndo_bpf(umem->dev, &bpf);
-
- if (err)
- WARN(1, "failed to disable umem!\n");
- }
-
- xdp_clear_umem_at_qid(umem->dev, umem->queue_id);
-
- dev_put(umem->dev);
- umem->dev = NULL;
- umem->zc = false;
-}
-
static void xdp_umem_unpin_pages(struct xdp_umem *umem)
{
unpin_user_pages_dirty_lock(umem->pgs, umem->npgs, true);
@@ -195,38 +39,33 @@ static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
}
}
-static void xdp_umem_release(struct xdp_umem *umem)
+static void xdp_umem_addr_unmap(struct xdp_umem *umem)
{
- rtnl_lock();
- xdp_umem_clear_dev(umem);
- rtnl_unlock();
-
- ida_simple_remove(&umem_ida, umem->id);
+ vunmap(umem->addrs);
+ umem->addrs = NULL;
+}
- if (umem->fq) {
- xskq_destroy(umem->fq);
- umem->fq = NULL;
- }
+static int xdp_umem_addr_map(struct xdp_umem *umem, struct page **pages,
+ u32 nr_pages)
+{
+ umem->addrs = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
+ if (!umem->addrs)
+ return -ENOMEM;
+ return 0;
+}
- if (umem->cq) {
- xskq_destroy(umem->cq);
- umem->cq = NULL;
- }
+static void xdp_umem_release(struct xdp_umem *umem)
+{
+ umem->zc = false;
+ ida_simple_remove(&umem_ida, umem->id);
- xp_destroy(umem->pool);
+ xdp_umem_addr_unmap(umem);
xdp_umem_unpin_pages(umem);
xdp_umem_unaccount_pages(umem);
kfree(umem);
}
-static void xdp_umem_release_deferred(struct work_struct *work)
-{
- struct xdp_umem *umem = container_of(work, struct xdp_umem, work);
-
- xdp_umem_release(umem);
-}
-
void xdp_get_umem(struct xdp_umem *umem)
{
refcount_inc(&umem->users);
@@ -237,10 +76,8 @@ void xdp_put_umem(struct xdp_umem *umem)
if (!umem)
return;
- if (refcount_dec_and_test(&umem->users)) {
- INIT_WORK(&umem->work, xdp_umem_release_deferred);
- schedule_work(&umem->work);
- }
+ if (refcount_dec_and_test(&umem->users))
+ xdp_umem_release(umem);
}
static int xdp_umem_pin_pages(struct xdp_umem *umem, unsigned long address)
@@ -319,8 +156,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
return -EINVAL;
}
- if (mr->flags & ~(XDP_UMEM_UNALIGNED_CHUNK_FLAG |
- XDP_UMEM_USES_NEED_WAKEUP))
+ if (mr->flags & ~XDP_UMEM_UNALIGNED_CHUNK_FLAG)
return -EINVAL;
if (!unaligned_chunks && !is_power_of_2(chunk_size))
@@ -355,13 +191,13 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
umem->size = size;
umem->headroom = headroom;
umem->chunk_size = chunk_size;
+ umem->chunks = chunks;
umem->npgs = (u32)npgs;
umem->pgs = NULL;
umem->user = NULL;
umem->flags = mr->flags;
- INIT_LIST_HEAD(&umem->xsk_tx_list);
- spin_lock_init(&umem->xsk_tx_list_lock);
+ INIT_LIST_HEAD(&umem->xsk_dma_list);
refcount_set(&umem->users, 1);
err = xdp_umem_account_pages(umem);
@@ -372,15 +208,13 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
if (err)
goto out_account;
- umem->pool = xp_create(umem->pgs, umem->npgs, chunks, chunk_size,
- headroom, size, unaligned_chunks);
- if (!umem->pool) {
- err = -ENOMEM;
- goto out_pin;
- }
+ err = xdp_umem_addr_map(umem, umem->pgs, umem->npgs);
+ if (err)
+ goto out_unpin;
+
return 0;
-out_pin:
+out_unpin:
xdp_umem_unpin_pages(umem);
out_account:
xdp_umem_unaccount_pages(umem);
@@ -412,8 +246,3 @@ struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr)
return umem;
}
-
-bool xdp_umem_validate_queues(struct xdp_umem *umem)
-{
- return umem->fq && umem->cq;
-}
diff --git a/net/xdp/xdp_umem.h b/net/xdp/xdp_umem.h
index 32067fe98f65..181fdda2f2a8 100644
--- a/net/xdp/xdp_umem.h
+++ b/net/xdp/xdp_umem.h
@@ -8,14 +8,8 @@
#include <net/xdp_sock_drv.h>
-int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
- u16 queue_id, u16 flags);
-void xdp_umem_clear_dev(struct xdp_umem *umem);
-bool xdp_umem_validate_queues(struct xdp_umem *umem);
void xdp_get_umem(struct xdp_umem *umem);
void xdp_put_umem(struct xdp_umem *umem);
-void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs);
-void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs);
struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr);
#endif /* XDP_UMEM_H_ */
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 6c5e09e7440a..b71a32eeae65 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -33,71 +33,105 @@
static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
-bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
+void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
{
- return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) &&
- READ_ONCE(xs->umem->fq);
-}
-
-void xsk_set_rx_need_wakeup(struct xdp_umem *umem)
-{
- if (umem->need_wakeup & XDP_WAKEUP_RX)
+ if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
return;
- umem->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
- umem->need_wakeup |= XDP_WAKEUP_RX;
+ pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
+ pool->cached_need_wakeup |= XDP_WAKEUP_RX;
}
EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
-void xsk_set_tx_need_wakeup(struct xdp_umem *umem)
+void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
{
struct xdp_sock *xs;
- if (umem->need_wakeup & XDP_WAKEUP_TX)
+ if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
return;
rcu_read_lock();
- list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) {
+ list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
}
rcu_read_unlock();
- umem->need_wakeup |= XDP_WAKEUP_TX;
+ pool->cached_need_wakeup |= XDP_WAKEUP_TX;
}
EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
-void xsk_clear_rx_need_wakeup(struct xdp_umem *umem)
+void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
{
- if (!(umem->need_wakeup & XDP_WAKEUP_RX))
+ if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX))
return;
- umem->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
- umem->need_wakeup &= ~XDP_WAKEUP_RX;
+ pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
+ pool->cached_need_wakeup &= ~XDP_WAKEUP_RX;
}
EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
-void xsk_clear_tx_need_wakeup(struct xdp_umem *umem)
+void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
{
struct xdp_sock *xs;
- if (!(umem->need_wakeup & XDP_WAKEUP_TX))
+ if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX))
return;
rcu_read_lock();
- list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) {
+ list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
}
rcu_read_unlock();
- umem->need_wakeup &= ~XDP_WAKEUP_TX;
+ pool->cached_need_wakeup &= ~XDP_WAKEUP_TX;
}
EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
-bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem)
+bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
+{
+ return pool->uses_need_wakeup;
+}
+EXPORT_SYMBOL(xsk_uses_need_wakeup);
+
+struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
+ u16 queue_id)
+{
+ if (queue_id < dev->real_num_rx_queues)
+ return dev->_rx[queue_id].pool;
+ if (queue_id < dev->real_num_tx_queues)
+ return dev->_tx[queue_id].pool;
+
+ return NULL;
+}
+EXPORT_SYMBOL(xsk_get_pool_from_qid);
+
+void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
+{
+ if (queue_id < dev->real_num_rx_queues)
+ dev->_rx[queue_id].pool = NULL;
+ if (queue_id < dev->real_num_tx_queues)
+ dev->_tx[queue_id].pool = NULL;
+}
+
+/* The buffer pool is stored both in the _rx struct and the _tx struct as we do
+ * not know if the device has more tx queues than rx, or the opposite.
+ * This might also change during run time.
+ */
+int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
+ u16 queue_id)
{
- return umem->flags & XDP_UMEM_USES_NEED_WAKEUP;
+ if (queue_id >= max_t(unsigned int,
+ dev->real_num_rx_queues,
+ dev->real_num_tx_queues))
+ return -EINVAL;
+
+ if (queue_id < dev->real_num_rx_queues)
+ dev->_rx[queue_id].pool = pool;
+ if (queue_id < dev->real_num_tx_queues)
+ dev->_tx[queue_id].pool = pool;
+
+ return 0;
}
-EXPORT_SYMBOL(xsk_umem_uses_need_wakeup);
void xp_release(struct xdp_buff_xsk *xskb)
{
@@ -155,12 +189,12 @@ static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len,
struct xdp_buff *xsk_xdp;
int err;
- if (len > xsk_umem_get_rx_frame_size(xs->umem)) {
+ if (len > xsk_pool_get_rx_frame_size(xs->pool)) {
xs->rx_dropped++;
return -ENOSPC;
}
- xsk_xdp = xsk_buff_alloc(xs->umem);
+ xsk_xdp = xsk_buff_alloc(xs->pool);
if (!xsk_xdp) {
xs->rx_dropped++;
return -ENOSPC;
@@ -208,7 +242,7 @@ static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp,
static void xsk_flush(struct xdp_sock *xs)
{
xskq_prod_submit(xs->rx);
- __xskq_cons_release(xs->umem->fq);
+ __xskq_cons_release(xs->pool->fq);
sock_def_readable(&xs->sk);
}
@@ -249,32 +283,32 @@ void __xsk_map_flush(void)
}
}
-void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
+void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
{
- xskq_prod_submit_n(umem->cq, nb_entries);
+ xskq_prod_submit_n(pool->cq, nb_entries);
}
-EXPORT_SYMBOL(xsk_umem_complete_tx);
+EXPORT_SYMBOL(xsk_tx_completed);
-void xsk_umem_consume_tx_done(struct xdp_umem *umem)
+void xsk_tx_release(struct xsk_buff_pool *pool)
{
struct xdp_sock *xs;
rcu_read_lock();
- list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) {
+ list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
__xskq_cons_release(xs->tx);
xs->sk.sk_write_space(&xs->sk);
}
rcu_read_unlock();
}
-EXPORT_SYMBOL(xsk_umem_consume_tx_done);
+EXPORT_SYMBOL(xsk_tx_release);
-bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc)
+bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
{
struct xdp_sock *xs;
rcu_read_lock();
- list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) {
- if (!xskq_cons_peek_desc(xs->tx, desc, umem)) {
+ list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
+ if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
xs->tx->queue_empty_descs++;
continue;
}
@@ -284,7 +318,7 @@ bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc)
* if there is space in it. This avoids having to implement
* any buffering in the Tx path.
*/
- if (xskq_prod_reserve_addr(umem->cq, desc->addr))
+ if (xskq_prod_reserve_addr(pool->cq, desc->addr))
goto out;
xskq_cons_release(xs->tx);
@@ -296,7 +330,7 @@ out:
rcu_read_unlock();
return false;
}
-EXPORT_SYMBOL(xsk_umem_consume_tx);
+EXPORT_SYMBOL(xsk_tx_peek_desc);
static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
{
@@ -322,7 +356,7 @@ static void xsk_destruct_skb(struct sk_buff *skb)
unsigned long flags;
spin_lock_irqsave(&xs->tx_completion_lock, flags);
- xskq_prod_submit_addr(xs->umem->cq, addr);
+ xskq_prod_submit_addr(xs->pool->cq, addr);
spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
sock_wfree(skb);
@@ -342,7 +376,7 @@ static int xsk_generic_xmit(struct sock *sk)
if (xs->queue_id >= xs->dev->real_num_tx_queues)
goto out;
- while (xskq_cons_peek_desc(xs->tx, &desc, xs->umem)) {
+ while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
char *buffer;
u64 addr;
u32 len;
@@ -359,14 +393,14 @@ static int xsk_generic_xmit(struct sock *sk)
skb_put(skb, len);
addr = desc.addr;
- buffer = xsk_buff_raw_get_data(xs->umem, addr);
+ buffer = xsk_buff_raw_get_data(xs->pool, addr);
err = skb_store_bits(skb, 0, buffer, len);
/* This is the backpressure mechanism for the Tx path.
* Reserve space in the completion queue and only proceed
* if there is space in it. This avoids having to implement
* any buffering in the Tx path.
*/
- if (unlikely(err) || xskq_prod_reserve(xs->umem->cq)) {
+ if (unlikely(err) || xskq_prod_reserve(xs->pool->cq)) {
kfree_skb(skb);
goto out;
}
@@ -446,16 +480,16 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
__poll_t mask = datagram_poll(file, sock, wait);
struct sock *sk = sock->sk;
struct xdp_sock *xs = xdp_sk(sk);
- struct xdp_umem *umem;
+ struct xsk_buff_pool *pool;
if (unlikely(!xsk_is_bound(xs)))
return mask;
- umem = xs->umem;
+ pool = xs->pool;
- if (umem->need_wakeup) {
+ if (pool->cached_need_wakeup) {
if (xs->zc)
- xsk_wakeup(xs, umem->need_wakeup);
+ xsk_wakeup(xs, pool->cached_need_wakeup);
else
/* Poll needs to drive Tx also in copy mode */
__xsk_sendmsg(sk);
@@ -496,7 +530,7 @@ static void xsk_unbind_dev(struct xdp_sock *xs)
WRITE_ONCE(xs->state, XSK_UNBOUND);
/* Wait for driver to stop using the xdp socket. */
- xdp_del_sk_umem(xs->umem, xs);
+ xp_del_xsk(xs->pool, xs);
xs->dev = NULL;
synchronize_net();
dev_put(dev);
@@ -574,6 +608,8 @@ static int xsk_release(struct socket *sock)
xskq_destroy(xs->rx);
xskq_destroy(xs->tx);
+ xskq_destroy(xs->fq_tmp);
+ xskq_destroy(xs->cq_tmp);
sock_orphan(sk);
sock->sk = NULL;
@@ -601,6 +637,11 @@ static struct socket *xsk_lookup_xsk_from_fd(int fd)
return sock;
}
+static bool xsk_validate_queues(struct xdp_sock *xs)
+{
+ return xs->fq_tmp && xs->cq_tmp;
+}
+
static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
{
struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
@@ -669,29 +710,66 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
sockfd_put(sock);
goto out_unlock;
}
- if (umem_xs->dev != dev || umem_xs->queue_id != qid) {
- err = -EINVAL;
- sockfd_put(sock);
- goto out_unlock;
+
+ if (umem_xs->queue_id != qid || umem_xs->dev != dev) {
+ /* Share the umem with another socket on another qid
+ * and/or device.
+ */
+ xs->pool = xp_create_and_assign_umem(xs,
+ umem_xs->umem);
+ if (!xs->pool) {
+ err = -ENOMEM;
+ sockfd_put(sock);
+ goto out_unlock;
+ }
+
+ err = xp_assign_dev_shared(xs->pool, umem_xs->umem,
+ dev, qid);
+ if (err) {
+ xp_destroy(xs->pool);
+ xs->pool = NULL;
+ sockfd_put(sock);
+ goto out_unlock;
+ }
+ } else {
+ /* Share the buffer pool with the other socket. */
+ if (xs->fq_tmp || xs->cq_tmp) {
+ /* Do not allow setting your own fq or cq. */
+ err = -EINVAL;
+ sockfd_put(sock);
+ goto out_unlock;
+ }
+
+ xp_get_pool(umem_xs->pool);
+ xs->pool = umem_xs->pool;
}
xdp_get_umem(umem_xs->umem);
WRITE_ONCE(xs->umem, umem_xs->umem);
sockfd_put(sock);
- } else if (!xs->umem || !xdp_umem_validate_queues(xs->umem)) {
+ } else if (!xs->umem || !xsk_validate_queues(xs)) {
err = -EINVAL;
goto out_unlock;
} else {
/* This xsk has its own umem. */
- err = xdp_umem_assign_dev(xs->umem, dev, qid, flags);
- if (err)
+ xs->pool = xp_create_and_assign_umem(xs, xs->umem);
+ if (!xs->pool) {
+ err = -ENOMEM;
goto out_unlock;
+ }
+
+ err = xp_assign_dev(xs->pool, dev, qid, flags);
+ if (err) {
+ xp_destroy(xs->pool);
+ xs->pool = NULL;
+ goto out_unlock;
+ }
}
xs->dev = dev;
xs->zc = xs->umem->zc;
xs->queue_id = qid;
- xdp_add_sk_umem(xs->umem, xs);
+ xp_add_xsk(xs->pool, xs);
out_unlock:
if (err) {
@@ -797,16 +875,10 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname,
mutex_unlock(&xs->mutex);
return -EBUSY;
}
- if (!xs->umem) {
- mutex_unlock(&xs->mutex);
- return -EINVAL;
- }
- q = (optname == XDP_UMEM_FILL_RING) ? &xs->umem->fq :
- &xs->umem->cq;
+ q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp :
+ &xs->cq_tmp;
err = xsk_init_queue(entries, q, true);
- if (optname == XDP_UMEM_FILL_RING)
- xp_set_fq(xs->umem->pool, *q);
mutex_unlock(&xs->mutex);
return err;
}
@@ -873,7 +945,7 @@ static int xsk_getsockopt(struct socket *sock, int level, int optname,
if (extra_stats) {
stats.rx_ring_full = xs->rx_queue_full;
stats.rx_fill_ring_empty_descs =
- xs->umem ? xskq_nb_queue_empty_descs(xs->umem->fq) : 0;
+ xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
} else {
stats.rx_dropped += xs->rx_queue_full;
@@ -975,7 +1047,6 @@ static int xsk_mmap(struct file *file, struct socket *sock,
unsigned long size = vma->vm_end - vma->vm_start;
struct xdp_sock *xs = xdp_sk(sock->sk);
struct xsk_queue *q = NULL;
- struct xdp_umem *umem;
unsigned long pfn;
struct page *qpg;
@@ -987,16 +1058,12 @@ static int xsk_mmap(struct file *file, struct socket *sock,
} else if (offset == XDP_PGOFF_TX_RING) {
q = READ_ONCE(xs->tx);
} else {
- umem = READ_ONCE(xs->umem);
- if (!umem)
- return -EINVAL;
-
/* Matches the smp_wmb() in XDP_UMEM_REG */
smp_rmb();
if (offset == XDP_UMEM_PGOFF_FILL_RING)
- q = READ_ONCE(umem->fq);
+ q = READ_ONCE(xs->fq_tmp);
else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
- q = READ_ONCE(umem->cq);
+ q = READ_ONCE(xs->cq_tmp);
}
if (!q)
@@ -1034,8 +1101,8 @@ static int xsk_notifier(struct notifier_block *this,
xsk_unbind_dev(xs);
- /* Clear device references in umem. */
- xdp_umem_clear_dev(xs->umem);
+ /* Clear device references. */
+ xp_clear_dev(xs->pool);
}
mutex_unlock(&xs->mutex);
}
@@ -1079,7 +1146,7 @@ static void xsk_destruct(struct sock *sk)
if (!sock_flag(sk, SOCK_DEAD))
return;
- xdp_put_umem(xs->umem);
+ xp_put_pool(xs->pool);
sk_refcnt_debug_dec(sk);
}
@@ -1087,8 +1154,8 @@ static void xsk_destruct(struct sock *sk)
static int xsk_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
- struct sock *sk;
struct xdp_sock *xs;
+ struct sock *sk;
if (!ns_capable(net->user_ns, CAP_NET_RAW))
return -EPERM;
diff --git a/net/xdp/xsk.h b/net/xdp/xsk.h
index 455ddd480f3d..b9e896cee5bb 100644
--- a/net/xdp/xsk.h
+++ b/net/xdp/xsk.h
@@ -11,13 +11,6 @@
#define XSK_NEXT_PG_CONTIG_SHIFT 0
#define XSK_NEXT_PG_CONTIG_MASK BIT_ULL(XSK_NEXT_PG_CONTIG_SHIFT)
-/* Flags for the umem flags field.
- *
- * The NEED_WAKEUP flag is 1 due to the reuse of the flags field for public
- * flags. See inlude/uapi/include/linux/if_xdp.h.
- */
-#define XDP_UMEM_USES_NEED_WAKEUP BIT(1)
-
struct xdp_ring_offset_v1 {
__u64 producer;
__u64 consumer;
@@ -46,10 +39,12 @@ static inline struct xdp_sock *xdp_sk(struct sock *sk)
return (struct xdp_sock *)sk;
}
-bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs);
void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
struct xdp_sock **map_entry);
int xsk_map_inc(struct xsk_map *map);
void xsk_map_put(struct xsk_map *map);
+void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id);
+int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
+ u16 queue_id);
#endif /* XSK_H_ */
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
index a2044c245215..64c9e55d4d4e 100644
--- a/net/xdp/xsk_buff_pool.c
+++ b/net/xdp/xsk_buff_pool.c
@@ -2,21 +2,34 @@
#include <net/xsk_buff_pool.h>
#include <net/xdp_sock.h>
+#include <net/xdp_sock_drv.h>
#include "xsk_queue.h"
+#include "xdp_umem.h"
+#include "xsk.h"
-static void xp_addr_unmap(struct xsk_buff_pool *pool)
+void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs)
{
- vunmap(pool->addrs);
+ unsigned long flags;
+
+ if (!xs->tx)
+ return;
+
+ spin_lock_irqsave(&pool->xsk_tx_list_lock, flags);
+ list_add_rcu(&xs->tx_list, &pool->xsk_tx_list);
+ spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags);
}
-static int xp_addr_map(struct xsk_buff_pool *pool,
- struct page **pages, u32 nr_pages)
+void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs)
{
- pool->addrs = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
- if (!pool->addrs)
- return -ENOMEM;
- return 0;
+ unsigned long flags;
+
+ if (!xs->tx)
+ return;
+
+ spin_lock_irqsave(&pool->xsk_tx_list_lock, flags);
+ list_del_rcu(&xs->tx_list);
+ spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags);
}
void xp_destroy(struct xsk_buff_pool *pool)
@@ -24,59 +37,61 @@ void xp_destroy(struct xsk_buff_pool *pool)
if (!pool)
return;
- xp_addr_unmap(pool);
kvfree(pool->heads);
kvfree(pool);
}
-struct xsk_buff_pool *xp_create(struct page **pages, u32 nr_pages, u32 chunks,
- u32 chunk_size, u32 headroom, u64 size,
- bool unaligned)
+struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
+ struct xdp_umem *umem)
{
struct xsk_buff_pool *pool;
struct xdp_buff_xsk *xskb;
- int err;
u32 i;
- pool = kvzalloc(struct_size(pool, free_heads, chunks), GFP_KERNEL);
+ pool = kvzalloc(struct_size(pool, free_heads, umem->chunks),
+ GFP_KERNEL);
if (!pool)
goto out;
- pool->heads = kvcalloc(chunks, sizeof(*pool->heads), GFP_KERNEL);
+ pool->heads = kvcalloc(umem->chunks, sizeof(*pool->heads), GFP_KERNEL);
if (!pool->heads)
goto out;
- pool->chunk_mask = ~((u64)chunk_size - 1);
- pool->addrs_cnt = size;
- pool->heads_cnt = chunks;
- pool->free_heads_cnt = chunks;
- pool->headroom = headroom;
- pool->chunk_size = chunk_size;
- pool->unaligned = unaligned;
- pool->frame_len = chunk_size - headroom - XDP_PACKET_HEADROOM;
+ pool->chunk_mask = ~((u64)umem->chunk_size - 1);
+ pool->addrs_cnt = umem->size;
+ pool->heads_cnt = umem->chunks;
+ pool->free_heads_cnt = umem->chunks;
+ pool->headroom = umem->headroom;
+ pool->chunk_size = umem->chunk_size;
+ pool->unaligned = umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
+ pool->frame_len = umem->chunk_size - umem->headroom -
+ XDP_PACKET_HEADROOM;
+ pool->umem = umem;
+ pool->addrs = umem->addrs;
INIT_LIST_HEAD(&pool->free_list);
+ INIT_LIST_HEAD(&pool->xsk_tx_list);
+ spin_lock_init(&pool->xsk_tx_list_lock);
+ refcount_set(&pool->users, 1);
+
+ pool->fq = xs->fq_tmp;
+ pool->cq = xs->cq_tmp;
+ xs->fq_tmp = NULL;
+ xs->cq_tmp = NULL;
for (i = 0; i < pool->free_heads_cnt; i++) {
xskb = &pool->heads[i];
xskb->pool = pool;
- xskb->xdp.frame_sz = chunk_size - headroom;
+ xskb->xdp.frame_sz = umem->chunk_size - umem->headroom;
pool->free_heads[i] = xskb;
}
- err = xp_addr_map(pool, pages, nr_pages);
- if (!err)
- return pool;
+ return pool;
out:
xp_destroy(pool);
return NULL;
}
-void xp_set_fq(struct xsk_buff_pool *pool, struct xsk_queue *fq)
-{
- pool->fq = fq;
-}
-
void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq)
{
u32 i;
@@ -86,70 +101,320 @@ void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq)
}
EXPORT_SYMBOL(xp_set_rxq_info);
-void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs)
+static void xp_disable_drv_zc(struct xsk_buff_pool *pool)
{
- dma_addr_t *dma;
- u32 i;
+ struct netdev_bpf bpf;
+ int err;
- if (pool->dma_pages_cnt == 0)
+ ASSERT_RTNL();
+
+ if (pool->umem->zc) {
+ bpf.command = XDP_SETUP_XSK_POOL;
+ bpf.xsk.pool = NULL;
+ bpf.xsk.queue_id = pool->queue_id;
+
+ err = pool->netdev->netdev_ops->ndo_bpf(pool->netdev, &bpf);
+
+ if (err)
+ WARN(1, "Failed to disable zero-copy!\n");
+ }
+}
+
+static int __xp_assign_dev(struct xsk_buff_pool *pool,
+ struct net_device *netdev, u16 queue_id, u16 flags)
+{
+ bool force_zc, force_copy;
+ struct netdev_bpf bpf;
+ int err = 0;
+
+ ASSERT_RTNL();
+
+ force_zc = flags & XDP_ZEROCOPY;
+ force_copy = flags & XDP_COPY;
+
+ if (force_zc && force_copy)
+ return -EINVAL;
+
+ if (xsk_get_pool_from_qid(netdev, queue_id))
+ return -EBUSY;
+
+ pool->netdev = netdev;
+ pool->queue_id = queue_id;
+ err = xsk_reg_pool_at_qid(netdev, pool, queue_id);
+ if (err)
+ return err;
+
+ if (flags & XDP_USE_NEED_WAKEUP) {
+ pool->uses_need_wakeup = true;
+ /* Tx needs to be explicitly woken up the first time.
+ * Also for supporting drivers that do not implement this
+ * feature. They will always have to call sendto().
+ */
+ pool->cached_need_wakeup = XDP_WAKEUP_TX;
+ }
+
+ dev_hold(netdev);
+
+ if (force_copy)
+ /* For copy-mode, we are done. */
+ return 0;
+
+ if (!netdev->netdev_ops->ndo_bpf ||
+ !netdev->netdev_ops->ndo_xsk_wakeup) {
+ err = -EOPNOTSUPP;
+ goto err_unreg_pool;
+ }
+
+ bpf.command = XDP_SETUP_XSK_POOL;
+ bpf.xsk.pool = pool;
+ bpf.xsk.queue_id = queue_id;
+
+ err = netdev->netdev_ops->ndo_bpf(netdev, &bpf);
+ if (err)
+ goto err_unreg_pool;
+
+ if (!pool->dma_pages) {
+ WARN(1, "Driver did not DMA map zero-copy buffers");
+ goto err_unreg_xsk;
+ }
+ pool->umem->zc = true;
+ return 0;
+
+err_unreg_xsk:
+ xp_disable_drv_zc(pool);
+err_unreg_pool:
+ if (!force_zc)
+ err = 0; /* fallback to copy mode */
+ if (err)
+ xsk_clear_pool_at_qid(netdev, queue_id);
+ return err;
+}
+
+int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *dev,
+ u16 queue_id, u16 flags)
+{
+ return __xp_assign_dev(pool, dev, queue_id, flags);
+}
+
+int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem,
+ struct net_device *dev, u16 queue_id)
+{
+ u16 flags;
+
+ /* One fill and completion ring required for each queue id. */
+ if (!pool->fq || !pool->cq)
+ return -EINVAL;
+
+ flags = umem->zc ? XDP_ZEROCOPY : XDP_COPY;
+ if (pool->uses_need_wakeup)
+ flags |= XDP_USE_NEED_WAKEUP;
+
+ return __xp_assign_dev(pool, dev, queue_id, flags);
+}
+
+void xp_clear_dev(struct xsk_buff_pool *pool)
+{
+ if (!pool->netdev)
return;
- for (i = 0; i < pool->dma_pages_cnt; i++) {
- dma = &pool->dma_pages[i];
+ xp_disable_drv_zc(pool);
+ xsk_clear_pool_at_qid(pool->netdev, pool->queue_id);
+ dev_put(pool->netdev);
+ pool->netdev = NULL;
+}
+
+static void xp_release_deferred(struct work_struct *work)
+{
+ struct xsk_buff_pool *pool = container_of(work, struct xsk_buff_pool,
+ work);
+
+ rtnl_lock();
+ xp_clear_dev(pool);
+ rtnl_unlock();
+
+ if (pool->fq) {
+ xskq_destroy(pool->fq);
+ pool->fq = NULL;
+ }
+
+ if (pool->cq) {
+ xskq_destroy(pool->cq);
+ pool->cq = NULL;
+ }
+
+ xdp_put_umem(pool->umem);
+ xp_destroy(pool);
+}
+
+void xp_get_pool(struct xsk_buff_pool *pool)
+{
+ refcount_inc(&pool->users);
+}
+
+void xp_put_pool(struct xsk_buff_pool *pool)
+{
+ if (!pool)
+ return;
+
+ if (refcount_dec_and_test(&pool->users)) {
+ INIT_WORK(&pool->work, xp_release_deferred);
+ schedule_work(&pool->work);
+ }
+}
+
+static struct xsk_dma_map *xp_find_dma_map(struct xsk_buff_pool *pool)
+{
+ struct xsk_dma_map *dma_map;
+
+ list_for_each_entry(dma_map, &pool->umem->xsk_dma_list, list) {
+ if (dma_map->netdev == pool->netdev)
+ return dma_map;
+ }
+
+ return NULL;
+}
+
+static struct xsk_dma_map *xp_create_dma_map(struct device *dev, struct net_device *netdev,
+ u32 nr_pages, struct xdp_umem *umem)
+{
+ struct xsk_dma_map *dma_map;
+
+ dma_map = kzalloc(sizeof(*dma_map), GFP_KERNEL);
+ if (!dma_map)
+ return NULL;
+
+ dma_map->dma_pages = kvcalloc(nr_pages, sizeof(*dma_map->dma_pages), GFP_KERNEL);
+ if (!dma_map->dma_pages) {
+ kfree(dma_map);
+ return NULL;
+ }
+
+ dma_map->netdev = netdev;
+ dma_map->dev = dev;
+ dma_map->dma_need_sync = false;
+ dma_map->dma_pages_cnt = nr_pages;
+ refcount_set(&dma_map->users, 1);
+ list_add(&dma_map->list, &umem->xsk_dma_list);
+ return dma_map;
+}
+
+static void xp_destroy_dma_map(struct xsk_dma_map *dma_map)
+{
+ list_del(&dma_map->list);
+ kvfree(dma_map->dma_pages);
+ kfree(dma_map);
+}
+
+static void __xp_dma_unmap(struct xsk_dma_map *dma_map, unsigned long attrs)
+{
+ dma_addr_t *dma;
+ u32 i;
+
+ for (i = 0; i < dma_map->dma_pages_cnt; i++) {
+ dma = &dma_map->dma_pages[i];
if (*dma) {
- dma_unmap_page_attrs(pool->dev, *dma, PAGE_SIZE,
+ dma_unmap_page_attrs(dma_map->dev, *dma, PAGE_SIZE,
DMA_BIDIRECTIONAL, attrs);
*dma = 0;
}
}
+ xp_destroy_dma_map(dma_map);
+}
+
+void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs)
+{
+ struct xsk_dma_map *dma_map;
+
+ if (pool->dma_pages_cnt == 0)
+ return;
+
+ dma_map = xp_find_dma_map(pool);
+ if (!dma_map) {
+ WARN(1, "Could not find dma_map for device");
+ return;
+ }
+
+ if (!refcount_dec_and_test(&dma_map->users))
+ return;
+
+ __xp_dma_unmap(dma_map, attrs);
kvfree(pool->dma_pages);
pool->dma_pages_cnt = 0;
pool->dev = NULL;
}
EXPORT_SYMBOL(xp_dma_unmap);
-static void xp_check_dma_contiguity(struct xsk_buff_pool *pool)
+static void xp_check_dma_contiguity(struct xsk_dma_map *dma_map)
{
u32 i;
- for (i = 0; i < pool->dma_pages_cnt - 1; i++) {
- if (pool->dma_pages[i] + PAGE_SIZE == pool->dma_pages[i + 1])
- pool->dma_pages[i] |= XSK_NEXT_PG_CONTIG_MASK;
+ for (i = 0; i < dma_map->dma_pages_cnt - 1; i++) {
+ if (dma_map->dma_pages[i] + PAGE_SIZE == dma_map->dma_pages[i + 1])
+ dma_map->dma_pages[i] |= XSK_NEXT_PG_CONTIG_MASK;
else
- pool->dma_pages[i] &= ~XSK_NEXT_PG_CONTIG_MASK;
+ dma_map->dma_pages[i] &= ~XSK_NEXT_PG_CONTIG_MASK;
}
}
+static int xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_map)
+{
+ pool->dma_pages = kvcalloc(dma_map->dma_pages_cnt, sizeof(*pool->dma_pages), GFP_KERNEL);
+ if (!pool->dma_pages)
+ return -ENOMEM;
+
+ pool->dev = dma_map->dev;
+ pool->dma_pages_cnt = dma_map->dma_pages_cnt;
+ pool->dma_need_sync = dma_map->dma_need_sync;
+ memcpy(pool->dma_pages, dma_map->dma_pages,
+ pool->dma_pages_cnt * sizeof(*pool->dma_pages));
+
+ return 0;
+}
+
int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
unsigned long attrs, struct page **pages, u32 nr_pages)
{
+ struct xsk_dma_map *dma_map;
dma_addr_t dma;
+ int err;
u32 i;
- pool->dma_pages = kvcalloc(nr_pages, sizeof(*pool->dma_pages),
- GFP_KERNEL);
- if (!pool->dma_pages)
- return -ENOMEM;
+ dma_map = xp_find_dma_map(pool);
+ if (dma_map) {
+ err = xp_init_dma_info(pool, dma_map);
+ if (err)
+ return err;
+
+ refcount_inc(&dma_map->users);
+ return 0;
+ }
- pool->dev = dev;
- pool->dma_pages_cnt = nr_pages;
- pool->dma_need_sync = false;
+ dma_map = xp_create_dma_map(dev, pool->netdev, nr_pages, pool->umem);
+ if (!dma_map)
+ return -ENOMEM;
- for (i = 0; i < pool->dma_pages_cnt; i++) {
+ for (i = 0; i < dma_map->dma_pages_cnt; i++) {
dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE,
DMA_BIDIRECTIONAL, attrs);
if (dma_mapping_error(dev, dma)) {
- xp_dma_unmap(pool, attrs);
+ __xp_dma_unmap(dma_map, attrs);
return -ENOMEM;
}
if (dma_need_sync(dev, dma))
- pool->dma_need_sync = true;
- pool->dma_pages[i] = dma;
+ dma_map->dma_need_sync = true;
+ dma_map->dma_pages[i] = dma;
}
if (pool->unaligned)
- xp_check_dma_contiguity(pool);
+ xp_check_dma_contiguity(dma_map);
+
+ err = xp_init_dma_info(pool, dma_map);
+ if (err) {
+ __xp_dma_unmap(dma_map, attrs);
+ return err;
+ }
+
return 0;
}
EXPORT_SYMBOL(xp_dma_map);
diff --git a/net/xdp/xsk_diag.c b/net/xdp/xsk_diag.c
index 21e9c2d123ee..c014217f5fa7 100644
--- a/net/xdp/xsk_diag.c
+++ b/net/xdp/xsk_diag.c
@@ -46,6 +46,7 @@ static int xsk_diag_put_rings_cfg(const struct xdp_sock *xs,
static int xsk_diag_put_umem(const struct xdp_sock *xs, struct sk_buff *nlskb)
{
+ struct xsk_buff_pool *pool = xs->pool;
struct xdp_umem *umem = xs->umem;
struct xdp_diag_umem du = {};
int err;
@@ -58,21 +59,20 @@ static int xsk_diag_put_umem(const struct xdp_sock *xs, struct sk_buff *nlskb)
du.num_pages = umem->npgs;
du.chunk_size = umem->chunk_size;
du.headroom = umem->headroom;
- du.ifindex = umem->dev ? umem->dev->ifindex : 0;
- du.queue_id = umem->queue_id;
+ du.ifindex = (pool && pool->netdev) ? pool->netdev->ifindex : 0;
+ du.queue_id = pool ? pool->queue_id : 0;
du.flags = 0;
if (umem->zc)
du.flags |= XDP_DU_F_ZEROCOPY;
du.refs = refcount_read(&umem->users);
err = nla_put(nlskb, XDP_DIAG_UMEM, sizeof(du), &du);
-
- if (!err && umem->fq)
- err = xsk_diag_put_ring(umem->fq, XDP_DIAG_UMEM_FILL_RING, nlskb);
- if (!err && umem->cq) {
- err = xsk_diag_put_ring(umem->cq, XDP_DIAG_UMEM_COMPLETION_RING,
- nlskb);
- }
+ if (!err && pool && pool->fq)
+ err = xsk_diag_put_ring(pool->fq,
+ XDP_DIAG_UMEM_FILL_RING, nlskb);
+ if (!err && pool && pool->cq)
+ err = xsk_diag_put_ring(pool->cq,
+ XDP_DIAG_UMEM_COMPLETION_RING, nlskb);
return err;
}
@@ -83,7 +83,7 @@ static int xsk_diag_put_stats(const struct xdp_sock *xs, struct sk_buff *nlskb)
du.n_rx_dropped = xs->rx_dropped;
du.n_rx_invalid = xskq_nb_invalid_descs(xs->rx);
du.n_rx_full = xs->rx_queue_full;
- du.n_fill_ring_empty = xs->umem ? xskq_nb_queue_empty_descs(xs->umem->fq) : 0;
+ du.n_fill_ring_empty = xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
du.n_tx_invalid = xskq_nb_invalid_descs(xs->tx);
du.n_tx_ring_empty = xskq_nb_queue_empty_descs(xs->tx);
return nla_put(nlskb, XDP_DIAG_STATS, sizeof(du), &du);
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index bf42cfd74b89..cdb9cf3cd136 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -15,6 +15,10 @@
struct xdp_ring {
u32 producer ____cacheline_aligned_in_smp;
+ /* Hinder the adjacent cache prefetcher to prefetch the consumer
+ * pointer if the producer pointer is touched and vice versa.
+ */
+ u32 pad ____cacheline_aligned_in_smp;
u32 consumer ____cacheline_aligned_in_smp;
u32 flags;
};
@@ -96,7 +100,7 @@ struct xsk_queue {
* seen and read by the consumer.
*
* The consumer peeks into the ring to see if the producer has written
- * any new entries. If so, the producer can then read these entries
+ * any new entries. If so, the consumer can then read these entries
* and when it is done reading them release them back to the producer
* so that the producer can use these slots to fill in new entries.
*
@@ -166,9 +170,9 @@ static inline bool xp_validate_desc(struct xsk_buff_pool *pool,
static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q,
struct xdp_desc *d,
- struct xdp_umem *umem)
+ struct xsk_buff_pool *pool)
{
- if (!xp_validate_desc(umem->pool, d)) {
+ if (!xp_validate_desc(pool, d)) {
q->invalid_descs++;
return false;
}
@@ -177,14 +181,14 @@ static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q,
static inline bool xskq_cons_read_desc(struct xsk_queue *q,
struct xdp_desc *desc,
- struct xdp_umem *umem)
+ struct xsk_buff_pool *pool)
{
while (q->cached_cons != q->cached_prod) {
struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
u32 idx = q->cached_cons & q->ring_mask;
*desc = ring->desc[idx];
- if (xskq_cons_is_valid_desc(q, desc, umem))
+ if (xskq_cons_is_valid_desc(q, desc, pool))
return true;
q->cached_cons++;
@@ -236,11 +240,11 @@ static inline bool xskq_cons_peek_addr_unchecked(struct xsk_queue *q, u64 *addr)
static inline bool xskq_cons_peek_desc(struct xsk_queue *q,
struct xdp_desc *desc,
- struct xdp_umem *umem)
+ struct xsk_buff_pool *pool)
{
if (q->cached_prod == q->cached_cons)
xskq_cons_get_entries(q);
- return xskq_cons_read_desc(q, desc, umem);
+ return xskq_cons_read_desc(q, desc, pool);
}
static inline void xskq_cons_release(struct xsk_queue *q)
diff --git a/net/xdp/xskmap.c b/net/xdp/xskmap.c
index 8367adbbe9df..49da2b8ace8b 100644
--- a/net/xdp/xskmap.c
+++ b/net/xdp/xskmap.c
@@ -132,7 +132,7 @@ static int xsk_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
return 0;
}
-static u32 xsk_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
+static int xsk_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
{
const int ret = BPF_REG_0, mp = BPF_REG_1, index = BPF_REG_2;
struct bpf_insn *insn = insn_buf;
@@ -185,11 +185,6 @@ static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value,
xs = (struct xdp_sock *)sock->sk;
- if (!xsk_is_setup_for_bpf_map(xs)) {
- sockfd_put(sock);
- return -EOPNOTSUPP;
- }
-
map_entry = &m->xsk_map[i];
node = xsk_map_node_alloc(m, map_entry);
if (IS_ERR(node)) {
@@ -254,8 +249,16 @@ void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
spin_unlock_bh(&map->lock);
}
+static bool xsk_map_meta_equal(const struct bpf_map *meta0,
+ const struct bpf_map *meta1)
+{
+ return meta0->max_entries == meta1->max_entries &&
+ bpf_map_meta_equal(meta0, meta1);
+}
+
static int xsk_map_btf_id;
const struct bpf_map_ops xsk_map_ops = {
+ .map_meta_equal = xsk_map_meta_equal,
.map_alloc = xsk_map_alloc,
.map_free = xsk_map_free,
.map_get_next_key = xsk_map_get_next_key,
diff --git a/net/xfrm/Kconfig b/net/xfrm/Kconfig
index 5b9a5ab48111..3adf31a83a79 100644
--- a/net/xfrm/Kconfig
+++ b/net/xfrm/Kconfig
@@ -28,6 +28,17 @@ config XFRM_USER
If unsure, say Y.
+config XFRM_USER_COMPAT
+ tristate "Compatible ABI support"
+ depends on XFRM_USER && COMPAT_FOR_U64_ALIGNMENT && \
+ HAVE_EFFICIENT_UNALIGNED_ACCESS
+ select WANT_COMPAT_NETLINK_MESSAGES
+ help
+ Transformation(XFRM) user configuration interface like IPsec
+ used by compatible Linux applications.
+
+ If unsure, say N.
+
config XFRM_INTERFACE
tristate "Transformation virtual interface"
depends on XFRM && IPV6
diff --git a/net/xfrm/Makefile b/net/xfrm/Makefile
index 2d4bb4b9f75e..494aa744bfb9 100644
--- a/net/xfrm/Makefile
+++ b/net/xfrm/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_XFRM) := xfrm_policy.o xfrm_state.o xfrm_hash.o \
obj-$(CONFIG_XFRM_STATISTICS) += xfrm_proc.o
obj-$(CONFIG_XFRM_ALGO) += xfrm_algo.o
obj-$(CONFIG_XFRM_USER) += xfrm_user.o
+obj-$(CONFIG_XFRM_USER_COMPAT) += xfrm_compat.o
obj-$(CONFIG_XFRM_IPCOMP) += xfrm_ipcomp.o
obj-$(CONFIG_XFRM_INTERFACE) += xfrm_interface.o
obj-$(CONFIG_XFRM_ESPINTCP) += espintcp.o
diff --git a/net/xfrm/xfrm_compat.c b/net/xfrm/xfrm_compat.c
new file mode 100644
index 000000000000..e28f0c9ecd6a
--- /dev/null
+++ b/net/xfrm/xfrm_compat.c
@@ -0,0 +1,625 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * XFRM compat layer
+ * Author: Dmitry Safonov <dima@arista.com>
+ * Based on code and translator idea by: Florian Westphal <fw@strlen.de>
+ */
+#include <linux/compat.h>
+#include <linux/xfrm.h>
+#include <net/xfrm.h>
+
+struct compat_xfrm_lifetime_cfg {
+ compat_u64 soft_byte_limit, hard_byte_limit;
+ compat_u64 soft_packet_limit, hard_packet_limit;
+ compat_u64 soft_add_expires_seconds, hard_add_expires_seconds;
+ compat_u64 soft_use_expires_seconds, hard_use_expires_seconds;
+}; /* same size on 32bit, but only 4 byte alignment required */
+
+struct compat_xfrm_lifetime_cur {
+ compat_u64 bytes, packets, add_time, use_time;
+}; /* same size on 32bit, but only 4 byte alignment required */
+
+struct compat_xfrm_userpolicy_info {
+ struct xfrm_selector sel;
+ struct compat_xfrm_lifetime_cfg lft;
+ struct compat_xfrm_lifetime_cur curlft;
+ __u32 priority, index;
+ u8 dir, action, flags, share;
+ /* 4 bytes additional padding on 64bit */
+};
+
+struct compat_xfrm_usersa_info {
+ struct xfrm_selector sel;
+ struct xfrm_id id;
+ xfrm_address_t saddr;
+ struct compat_xfrm_lifetime_cfg lft;
+ struct compat_xfrm_lifetime_cur curlft;
+ struct xfrm_stats stats;
+ __u32 seq, reqid;
+ u16 family;
+ u8 mode, replay_window, flags;
+ /* 4 bytes additional padding on 64bit */
+};
+
+struct compat_xfrm_user_acquire {
+ struct xfrm_id id;
+ xfrm_address_t saddr;
+ struct xfrm_selector sel;
+ struct compat_xfrm_userpolicy_info policy;
+ /* 4 bytes additional padding on 64bit */
+ __u32 aalgos, ealgos, calgos, seq;
+};
+
+struct compat_xfrm_userspi_info {
+ struct compat_xfrm_usersa_info info;
+ /* 4 bytes additional padding on 64bit */
+ __u32 min, max;
+};
+
+struct compat_xfrm_user_expire {
+ struct compat_xfrm_usersa_info state;
+ /* 8 bytes additional padding on 64bit */
+ u8 hard;
+};
+
+struct compat_xfrm_user_polexpire {
+ struct compat_xfrm_userpolicy_info pol;
+ /* 8 bytes additional padding on 64bit */
+ u8 hard;
+};
+
+#define XMSGSIZE(type) sizeof(struct type)
+
+static const int compat_msg_min[XFRM_NR_MSGTYPES] = {
+ [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(compat_xfrm_usersa_info),
+ [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
+ [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
+ [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(compat_xfrm_userpolicy_info),
+ [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
+ [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
+ [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(compat_xfrm_userspi_info),
+ [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(compat_xfrm_user_acquire),
+ [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(compat_xfrm_user_expire),
+ [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(compat_xfrm_userpolicy_info),
+ [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(compat_xfrm_usersa_info),
+ [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(compat_xfrm_user_polexpire),
+ [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush),
+ [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0,
+ [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
+ [XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
+ [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report),
+ [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
+ [XFRM_MSG_NEWSADINFO - XFRM_MSG_BASE] = sizeof(u32),
+ [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = sizeof(u32),
+ [XFRM_MSG_NEWSPDINFO - XFRM_MSG_BASE] = sizeof(u32),
+ [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32),
+ [XFRM_MSG_MAPPING - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_mapping)
+};
+
+static const struct nla_policy compat_policy[XFRMA_MAX+1] = {
+ [XFRMA_SA] = { .len = XMSGSIZE(compat_xfrm_usersa_info)},
+ [XFRMA_POLICY] = { .len = XMSGSIZE(compat_xfrm_userpolicy_info)},
+ [XFRMA_LASTUSED] = { .type = NLA_U64},
+ [XFRMA_ALG_AUTH_TRUNC] = { .len = sizeof(struct xfrm_algo_auth)},
+ [XFRMA_ALG_AEAD] = { .len = sizeof(struct xfrm_algo_aead) },
+ [XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) },
+ [XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) },
+ [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) },
+ [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) },
+ [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) },
+ [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) },
+ [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) },
+ [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) },
+ [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 },
+ [XFRMA_ETIMER_THRESH] = { .type = NLA_U32 },
+ [XFRMA_SRCADDR] = { .len = sizeof(xfrm_address_t) },
+ [XFRMA_COADDR] = { .len = sizeof(xfrm_address_t) },
+ [XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)},
+ [XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) },
+ [XFRMA_KMADDRESS] = { .len = sizeof(struct xfrm_user_kmaddress) },
+ [XFRMA_MARK] = { .len = sizeof(struct xfrm_mark) },
+ [XFRMA_TFCPAD] = { .type = NLA_U32 },
+ [XFRMA_REPLAY_ESN_VAL] = { .len = sizeof(struct xfrm_replay_state_esn) },
+ [XFRMA_SA_EXTRA_FLAGS] = { .type = NLA_U32 },
+ [XFRMA_PROTO] = { .type = NLA_U8 },
+ [XFRMA_ADDRESS_FILTER] = { .len = sizeof(struct xfrm_address_filter) },
+ [XFRMA_OFFLOAD_DEV] = { .len = sizeof(struct xfrm_user_offload) },
+ [XFRMA_SET_MARK] = { .type = NLA_U32 },
+ [XFRMA_SET_MARK_MASK] = { .type = NLA_U32 },
+ [XFRMA_IF_ID] = { .type = NLA_U32 },
+};
+
+static struct nlmsghdr *xfrm_nlmsg_put_compat(struct sk_buff *skb,
+ const struct nlmsghdr *nlh_src, u16 type)
+{
+ int payload = compat_msg_min[type];
+ int src_len = xfrm_msg_min[type];
+ struct nlmsghdr *nlh_dst;
+
+ /* Compat messages are shorter or equal to native (+padding) */
+ if (WARN_ON_ONCE(src_len < payload))
+ return ERR_PTR(-EMSGSIZE);
+
+ nlh_dst = nlmsg_put(skb, nlh_src->nlmsg_pid, nlh_src->nlmsg_seq,
+ nlh_src->nlmsg_type, payload, nlh_src->nlmsg_flags);
+ if (!nlh_dst)
+ return ERR_PTR(-EMSGSIZE);
+
+ memset(nlmsg_data(nlh_dst), 0, payload);
+
+ switch (nlh_src->nlmsg_type) {
+ /* Compat message has the same layout as native */
+ case XFRM_MSG_DELSA:
+ case XFRM_MSG_DELPOLICY:
+ case XFRM_MSG_FLUSHSA:
+ case XFRM_MSG_FLUSHPOLICY:
+ case XFRM_MSG_NEWAE:
+ case XFRM_MSG_REPORT:
+ case XFRM_MSG_MIGRATE:
+ case XFRM_MSG_NEWSADINFO:
+ case XFRM_MSG_NEWSPDINFO:
+ case XFRM_MSG_MAPPING:
+ WARN_ON_ONCE(src_len != payload);
+ memcpy(nlmsg_data(nlh_dst), nlmsg_data(nlh_src), src_len);
+ break;
+ /* 4 byte alignment for trailing u64 on native, but not on compat */
+ case XFRM_MSG_NEWSA:
+ case XFRM_MSG_NEWPOLICY:
+ case XFRM_MSG_UPDSA:
+ case XFRM_MSG_UPDPOLICY:
+ WARN_ON_ONCE(src_len != payload + 4);
+ memcpy(nlmsg_data(nlh_dst), nlmsg_data(nlh_src), payload);
+ break;
+ case XFRM_MSG_EXPIRE: {
+ const struct xfrm_user_expire *src_ue = nlmsg_data(nlh_src);
+ struct compat_xfrm_user_expire *dst_ue = nlmsg_data(nlh_dst);
+
+ /* compat_xfrm_user_expire has 4-byte smaller state */
+ memcpy(dst_ue, src_ue, sizeof(dst_ue->state));
+ dst_ue->hard = src_ue->hard;
+ break;
+ }
+ case XFRM_MSG_ACQUIRE: {
+ const struct xfrm_user_acquire *src_ua = nlmsg_data(nlh_src);
+ struct compat_xfrm_user_acquire *dst_ua = nlmsg_data(nlh_dst);
+
+ memcpy(dst_ua, src_ua, offsetof(struct compat_xfrm_user_acquire, aalgos));
+ dst_ua->aalgos = src_ua->aalgos;
+ dst_ua->ealgos = src_ua->ealgos;
+ dst_ua->calgos = src_ua->calgos;
+ dst_ua->seq = src_ua->seq;
+ break;
+ }
+ case XFRM_MSG_POLEXPIRE: {
+ const struct xfrm_user_polexpire *src_upe = nlmsg_data(nlh_src);
+ struct compat_xfrm_user_polexpire *dst_upe = nlmsg_data(nlh_dst);
+
+ /* compat_xfrm_user_polexpire has 4-byte smaller state */
+ memcpy(dst_upe, src_upe, sizeof(dst_upe->pol));
+ dst_upe->hard = src_upe->hard;
+ break;
+ }
+ case XFRM_MSG_ALLOCSPI: {
+ const struct xfrm_userspi_info *src_usi = nlmsg_data(nlh_src);
+ struct compat_xfrm_userspi_info *dst_usi = nlmsg_data(nlh_dst);
+
+ /* compat_xfrm_user_polexpire has 4-byte smaller state */
+ memcpy(dst_usi, src_usi, sizeof(src_usi->info));
+ dst_usi->min = src_usi->min;
+ dst_usi->max = src_usi->max;
+ break;
+ }
+ /* Not being sent by kernel */
+ case XFRM_MSG_GETSA:
+ case XFRM_MSG_GETPOLICY:
+ case XFRM_MSG_GETAE:
+ case XFRM_MSG_GETSADINFO:
+ case XFRM_MSG_GETSPDINFO:
+ default:
+ WARN_ONCE(1, "unsupported nlmsg_type %d", nlh_src->nlmsg_type);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ return nlh_dst;
+}
+
+static int xfrm_nla_cpy(struct sk_buff *dst, const struct nlattr *src, int len)
+{
+ return nla_put(dst, src->nla_type, len, nla_data(src));
+}
+
+static int xfrm_xlate64_attr(struct sk_buff *dst, const struct nlattr *src)
+{
+ switch (src->nla_type) {
+ case XFRMA_PAD:
+ /* Ignore */
+ return 0;
+ case XFRMA_ALG_AUTH:
+ case XFRMA_ALG_CRYPT:
+ case XFRMA_ALG_COMP:
+ case XFRMA_ENCAP:
+ case XFRMA_TMPL:
+ return xfrm_nla_cpy(dst, src, nla_len(src));
+ case XFRMA_SA:
+ return xfrm_nla_cpy(dst, src, XMSGSIZE(compat_xfrm_usersa_info));
+ case XFRMA_POLICY:
+ return xfrm_nla_cpy(dst, src, XMSGSIZE(compat_xfrm_userpolicy_info));
+ case XFRMA_SEC_CTX:
+ return xfrm_nla_cpy(dst, src, nla_len(src));
+ case XFRMA_LTIME_VAL:
+ return nla_put_64bit(dst, src->nla_type, nla_len(src),
+ nla_data(src), XFRMA_PAD);
+ case XFRMA_REPLAY_VAL:
+ case XFRMA_REPLAY_THRESH:
+ case XFRMA_ETIMER_THRESH:
+ case XFRMA_SRCADDR:
+ case XFRMA_COADDR:
+ return xfrm_nla_cpy(dst, src, nla_len(src));
+ case XFRMA_LASTUSED:
+ return nla_put_64bit(dst, src->nla_type, nla_len(src),
+ nla_data(src), XFRMA_PAD);
+ case XFRMA_POLICY_TYPE:
+ case XFRMA_MIGRATE:
+ case XFRMA_ALG_AEAD:
+ case XFRMA_KMADDRESS:
+ case XFRMA_ALG_AUTH_TRUNC:
+ case XFRMA_MARK:
+ case XFRMA_TFCPAD:
+ case XFRMA_REPLAY_ESN_VAL:
+ case XFRMA_SA_EXTRA_FLAGS:
+ case XFRMA_PROTO:
+ case XFRMA_ADDRESS_FILTER:
+ case XFRMA_OFFLOAD_DEV:
+ case XFRMA_SET_MARK:
+ case XFRMA_SET_MARK_MASK:
+ case XFRMA_IF_ID:
+ return xfrm_nla_cpy(dst, src, nla_len(src));
+ default:
+ BUILD_BUG_ON(XFRMA_MAX != XFRMA_IF_ID);
+ WARN_ONCE(1, "unsupported nla_type %d", src->nla_type);
+ return -EOPNOTSUPP;
+ }
+}
+
+/* Take kernel-built (64bit layout) and create 32bit layout for userspace */
+static int xfrm_xlate64(struct sk_buff *dst, const struct nlmsghdr *nlh_src)
+{
+ u16 type = nlh_src->nlmsg_type - XFRM_MSG_BASE;
+ const struct nlattr *nla, *attrs;
+ struct nlmsghdr *nlh_dst;
+ int len, remaining;
+
+ nlh_dst = xfrm_nlmsg_put_compat(dst, nlh_src, type);
+ if (IS_ERR(nlh_dst))
+ return PTR_ERR(nlh_dst);
+
+ attrs = nlmsg_attrdata(nlh_src, xfrm_msg_min[type]);
+ len = nlmsg_attrlen(nlh_src, xfrm_msg_min[type]);
+
+ nla_for_each_attr(nla, attrs, len, remaining) {
+ int err = xfrm_xlate64_attr(dst, nla);
+
+ if (err)
+ return err;
+ }
+
+ nlmsg_end(dst, nlh_dst);
+
+ return 0;
+}
+
+static int xfrm_alloc_compat(struct sk_buff *skb, const struct nlmsghdr *nlh_src)
+{
+ u16 type = nlh_src->nlmsg_type - XFRM_MSG_BASE;
+ struct sk_buff *new = NULL;
+ int err;
+
+ if (WARN_ON_ONCE(type >= ARRAY_SIZE(xfrm_msg_min)))
+ return -EOPNOTSUPP;
+
+ if (skb_shinfo(skb)->frag_list == NULL) {
+ new = alloc_skb(skb->len + skb_tailroom(skb), GFP_ATOMIC);
+ if (!new)
+ return -ENOMEM;
+ skb_shinfo(skb)->frag_list = new;
+ }
+
+ err = xfrm_xlate64(skb_shinfo(skb)->frag_list, nlh_src);
+ if (err) {
+ if (new) {
+ kfree_skb(new);
+ skb_shinfo(skb)->frag_list = NULL;
+ }
+ return err;
+ }
+
+ return 0;
+}
+
+/* Calculates len of translated 64-bit message. */
+static size_t xfrm_user_rcv_calculate_len64(const struct nlmsghdr *src,
+ struct nlattr *attrs[XFRMA_MAX+1])
+{
+ size_t len = nlmsg_len(src);
+
+ switch (src->nlmsg_type) {
+ case XFRM_MSG_NEWSA:
+ case XFRM_MSG_NEWPOLICY:
+ case XFRM_MSG_ALLOCSPI:
+ case XFRM_MSG_ACQUIRE:
+ case XFRM_MSG_UPDPOLICY:
+ case XFRM_MSG_UPDSA:
+ len += 4;
+ break;
+ case XFRM_MSG_EXPIRE:
+ case XFRM_MSG_POLEXPIRE:
+ len += 8;
+ break;
+ default:
+ break;
+ }
+
+ if (attrs[XFRMA_SA])
+ len += 4;
+ if (attrs[XFRMA_POLICY])
+ len += 4;
+
+ /* XXX: some attrs may need to be realigned
+ * if !CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ */
+
+ return len;
+}
+
+static int xfrm_attr_cpy32(void *dst, size_t *pos, const struct nlattr *src,
+ size_t size, int copy_len, int payload)
+{
+ struct nlmsghdr *nlmsg = dst;
+ struct nlattr *nla;
+
+ if (WARN_ON_ONCE(copy_len > payload))
+ copy_len = payload;
+
+ if (size - *pos < nla_attr_size(payload))
+ return -ENOBUFS;
+
+ nla = dst + *pos;
+
+ memcpy(nla, src, nla_attr_size(copy_len));
+ nla->nla_len = nla_attr_size(payload);
+ *pos += nla_attr_size(payload);
+ nlmsg->nlmsg_len += nla->nla_len;
+
+ memset(dst + *pos, 0, payload - copy_len);
+ *pos += payload - copy_len;
+
+ return 0;
+}
+
+static int xfrm_xlate32_attr(void *dst, const struct nlattr *nla,
+ size_t *pos, size_t size,
+ struct netlink_ext_ack *extack)
+{
+ int type = nla_type(nla);
+ u16 pol_len32, pol_len64;
+ int err;
+
+ if (type > XFRMA_MAX) {
+ BUILD_BUG_ON(XFRMA_MAX != XFRMA_IF_ID);
+ NL_SET_ERR_MSG(extack, "Bad attribute");
+ return -EOPNOTSUPP;
+ }
+ if (nla_len(nla) < compat_policy[type].len) {
+ NL_SET_ERR_MSG(extack, "Attribute bad length");
+ return -EOPNOTSUPP;
+ }
+
+ pol_len32 = compat_policy[type].len;
+ pol_len64 = xfrma_policy[type].len;
+
+ /* XFRMA_SA and XFRMA_POLICY - need to know how-to translate */
+ if (pol_len32 != pol_len64) {
+ if (nla_len(nla) != compat_policy[type].len) {
+ NL_SET_ERR_MSG(extack, "Attribute bad length");
+ return -EOPNOTSUPP;
+ }
+ err = xfrm_attr_cpy32(dst, pos, nla, size, pol_len32, pol_len64);
+ if (err)
+ return err;
+ }
+
+ return xfrm_attr_cpy32(dst, pos, nla, size, nla_len(nla), nla_len(nla));
+}
+
+static int xfrm_xlate32(struct nlmsghdr *dst, const struct nlmsghdr *src,
+ struct nlattr *attrs[XFRMA_MAX+1],
+ size_t size, u8 type, struct netlink_ext_ack *extack)
+{
+ size_t pos;
+ int i;
+
+ memcpy(dst, src, NLMSG_HDRLEN);
+ dst->nlmsg_len = NLMSG_HDRLEN + xfrm_msg_min[type];
+ memset(nlmsg_data(dst), 0, xfrm_msg_min[type]);
+
+ switch (src->nlmsg_type) {
+ /* Compat message has the same layout as native */
+ case XFRM_MSG_DELSA:
+ case XFRM_MSG_GETSA:
+ case XFRM_MSG_DELPOLICY:
+ case XFRM_MSG_GETPOLICY:
+ case XFRM_MSG_FLUSHSA:
+ case XFRM_MSG_FLUSHPOLICY:
+ case XFRM_MSG_NEWAE:
+ case XFRM_MSG_GETAE:
+ case XFRM_MSG_REPORT:
+ case XFRM_MSG_MIGRATE:
+ case XFRM_MSG_NEWSADINFO:
+ case XFRM_MSG_GETSADINFO:
+ case XFRM_MSG_NEWSPDINFO:
+ case XFRM_MSG_GETSPDINFO:
+ case XFRM_MSG_MAPPING:
+ memcpy(nlmsg_data(dst), nlmsg_data(src), compat_msg_min[type]);
+ break;
+ /* 4 byte alignment for trailing u64 on native, but not on compat */
+ case XFRM_MSG_NEWSA:
+ case XFRM_MSG_NEWPOLICY:
+ case XFRM_MSG_UPDSA:
+ case XFRM_MSG_UPDPOLICY:
+ memcpy(nlmsg_data(dst), nlmsg_data(src), compat_msg_min[type]);
+ break;
+ case XFRM_MSG_EXPIRE: {
+ const struct compat_xfrm_user_expire *src_ue = nlmsg_data(src);
+ struct xfrm_user_expire *dst_ue = nlmsg_data(dst);
+
+ /* compat_xfrm_user_expire has 4-byte smaller state */
+ memcpy(dst_ue, src_ue, sizeof(src_ue->state));
+ dst_ue->hard = src_ue->hard;
+ break;
+ }
+ case XFRM_MSG_ACQUIRE: {
+ const struct compat_xfrm_user_acquire *src_ua = nlmsg_data(src);
+ struct xfrm_user_acquire *dst_ua = nlmsg_data(dst);
+
+ memcpy(dst_ua, src_ua, offsetof(struct compat_xfrm_user_acquire, aalgos));
+ dst_ua->aalgos = src_ua->aalgos;
+ dst_ua->ealgos = src_ua->ealgos;
+ dst_ua->calgos = src_ua->calgos;
+ dst_ua->seq = src_ua->seq;
+ break;
+ }
+ case XFRM_MSG_POLEXPIRE: {
+ const struct compat_xfrm_user_polexpire *src_upe = nlmsg_data(src);
+ struct xfrm_user_polexpire *dst_upe = nlmsg_data(dst);
+
+ /* compat_xfrm_user_polexpire has 4-byte smaller state */
+ memcpy(dst_upe, src_upe, sizeof(src_upe->pol));
+ dst_upe->hard = src_upe->hard;
+ break;
+ }
+ case XFRM_MSG_ALLOCSPI: {
+ const struct compat_xfrm_userspi_info *src_usi = nlmsg_data(src);
+ struct xfrm_userspi_info *dst_usi = nlmsg_data(dst);
+
+ /* compat_xfrm_user_polexpire has 4-byte smaller state */
+ memcpy(dst_usi, src_usi, sizeof(src_usi->info));
+ dst_usi->min = src_usi->min;
+ dst_usi->max = src_usi->max;
+ break;
+ }
+ default:
+ NL_SET_ERR_MSG(extack, "Unsupported message type");
+ return -EOPNOTSUPP;
+ }
+ pos = dst->nlmsg_len;
+
+ for (i = 1; i < XFRMA_MAX + 1; i++) {
+ int err;
+
+ if (i == XFRMA_PAD)
+ continue;
+
+ if (!attrs[i])
+ continue;
+
+ err = xfrm_xlate32_attr(dst, attrs[i], &pos, size, extack);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static struct nlmsghdr *xfrm_user_rcv_msg_compat(const struct nlmsghdr *h32,
+ int maxtype, const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
+{
+ /* netlink_rcv_skb() checks if a message has full (struct nlmsghdr) */
+ u16 type = h32->nlmsg_type - XFRM_MSG_BASE;
+ struct nlattr *attrs[XFRMA_MAX+1];
+ struct nlmsghdr *h64;
+ size_t len;
+ int err;
+
+ BUILD_BUG_ON(ARRAY_SIZE(xfrm_msg_min) != ARRAY_SIZE(compat_msg_min));
+
+ if (type >= ARRAY_SIZE(xfrm_msg_min))
+ return ERR_PTR(-EINVAL);
+
+ /* Don't call parse: the message might have only nlmsg header */
+ if ((h32->nlmsg_type == XFRM_MSG_GETSA ||
+ h32->nlmsg_type == XFRM_MSG_GETPOLICY) &&
+ (h32->nlmsg_flags & NLM_F_DUMP))
+ return NULL;
+
+ err = nlmsg_parse_deprecated(h32, compat_msg_min[type], attrs,
+ maxtype ? : XFRMA_MAX, policy ? : compat_policy, extack);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ len = xfrm_user_rcv_calculate_len64(h32, attrs);
+ /* The message doesn't need translation */
+ if (len == nlmsg_len(h32))
+ return NULL;
+
+ len += NLMSG_HDRLEN;
+ h64 = kvmalloc(len, GFP_KERNEL | __GFP_ZERO);
+ if (!h64)
+ return ERR_PTR(-ENOMEM);
+
+ err = xfrm_xlate32(h64, h32, attrs, len, type, extack);
+ if (err < 0) {
+ kvfree(h64);
+ return ERR_PTR(err);
+ }
+
+ return h64;
+}
+
+static int xfrm_user_policy_compat(u8 **pdata32, int optlen)
+{
+ struct compat_xfrm_userpolicy_info *p = (void *)*pdata32;
+ u8 *src_templates, *dst_templates;
+ u8 *data64;
+
+ if (optlen < sizeof(*p))
+ return -EINVAL;
+
+ data64 = kmalloc_track_caller(optlen + 4, GFP_USER | __GFP_NOWARN);
+ if (!data64)
+ return -ENOMEM;
+
+ memcpy(data64, *pdata32, sizeof(*p));
+ memset(data64 + sizeof(*p), 0, 4);
+
+ src_templates = *pdata32 + sizeof(*p);
+ dst_templates = data64 + sizeof(*p) + 4;
+ memcpy(dst_templates, src_templates, optlen - sizeof(*p));
+
+ kfree(*pdata32);
+ *pdata32 = data64;
+ return 0;
+}
+
+static struct xfrm_translator xfrm_translator = {
+ .owner = THIS_MODULE,
+ .alloc_compat = xfrm_alloc_compat,
+ .rcv_msg_compat = xfrm_user_rcv_msg_compat,
+ .xlate_user_policy_sockptr = xfrm_user_policy_compat,
+};
+
+static int __init xfrm_compat_init(void)
+{
+ return xfrm_register_translator(&xfrm_translator);
+}
+
+static void __exit xfrm_compat_exit(void)
+{
+ xfrm_unregister_translator(&xfrm_translator);
+}
+
+module_init(xfrm_compat_init);
+module_exit(xfrm_compat_exit);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Dmitry Safonov");
+MODULE_DESCRIPTION("XFRM 32-bit compatibility layer");
diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
index a8f66112c52b..aa4cdcf69d47 100644
--- a/net/xfrm/xfrm_interface.c
+++ b/net/xfrm/xfrm_interface.c
@@ -210,7 +210,6 @@ static void xfrmi_scrub_packet(struct sk_buff *skb, bool xnet)
static int xfrmi_rcv_cb(struct sk_buff *skb, int err)
{
const struct xfrm_mode *inner_mode;
- struct pcpu_sw_netstats *tstats;
struct net_device *dev;
struct xfrm_state *x;
struct xfrm_if *xi;
@@ -255,13 +254,7 @@ static int xfrmi_rcv_cb(struct sk_buff *skb, int err)
}
xfrmi_scrub_packet(skb, xnet);
-
- tstats = this_cpu_ptr(dev->tstats);
-
- u64_stats_update_begin(&tstats->syncp);
- tstats->rx_packets++;
- tstats->rx_bytes += skb->len;
- u64_stats_update_end(&tstats->syncp);
+ dev_sw_netstats_rx_add(dev, skb->len);
return 0;
}
@@ -548,27 +541,7 @@ static int xfrmi_update(struct xfrm_if *xi, struct xfrm_if_parms *p)
static void xfrmi_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *s)
{
- int cpu;
-
- for_each_possible_cpu(cpu) {
- struct pcpu_sw_netstats *stats;
- struct pcpu_sw_netstats tmp;
- int start;
-
- stats = per_cpu_ptr(dev->tstats, cpu);
- do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
- tmp.rx_packets = stats->rx_packets;
- tmp.rx_bytes = stats->rx_bytes;
- tmp.tx_packets = stats->tx_packets;
- tmp.tx_bytes = stats->tx_bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
-
- s->rx_packets += tmp.rx_packets;
- s->rx_bytes += tmp.rx_bytes;
- s->tx_packets += tmp.tx_packets;
- s->tx_bytes += tmp.tx_bytes;
- }
+ dev_fetch_sw_netstats(s, dev->tstats);
s->rx_dropped = dev->stats.rx_dropped;
s->tx_dropped = dev->stats.tx_dropped;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index efc89a92961d..bbd4643d7e82 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -2296,6 +2296,66 @@ static bool km_is_alive(const struct km_event *c)
return is_alive;
}
+#if IS_ENABLED(CONFIG_XFRM_USER_COMPAT)
+static DEFINE_SPINLOCK(xfrm_translator_lock);
+static struct xfrm_translator __rcu *xfrm_translator;
+
+struct xfrm_translator *xfrm_get_translator(void)
+{
+ struct xfrm_translator *xtr;
+
+ rcu_read_lock();
+ xtr = rcu_dereference(xfrm_translator);
+ if (unlikely(!xtr))
+ goto out;
+ if (!try_module_get(xtr->owner))
+ xtr = NULL;
+out:
+ rcu_read_unlock();
+ return xtr;
+}
+EXPORT_SYMBOL_GPL(xfrm_get_translator);
+
+void xfrm_put_translator(struct xfrm_translator *xtr)
+{
+ module_put(xtr->owner);
+}
+EXPORT_SYMBOL_GPL(xfrm_put_translator);
+
+int xfrm_register_translator(struct xfrm_translator *xtr)
+{
+ int err = 0;
+
+ spin_lock_bh(&xfrm_translator_lock);
+ if (unlikely(xfrm_translator != NULL))
+ err = -EEXIST;
+ else
+ rcu_assign_pointer(xfrm_translator, xtr);
+ spin_unlock_bh(&xfrm_translator_lock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(xfrm_register_translator);
+
+int xfrm_unregister_translator(struct xfrm_translator *xtr)
+{
+ int err = 0;
+
+ spin_lock_bh(&xfrm_translator_lock);
+ if (likely(xfrm_translator != NULL)) {
+ if (rcu_access_pointer(xfrm_translator) != xtr)
+ err = -EINVAL;
+ else
+ RCU_INIT_POINTER(xfrm_translator, NULL);
+ }
+ spin_unlock_bh(&xfrm_translator_lock);
+ synchronize_rcu();
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(xfrm_unregister_translator);
+#endif
+
int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval, int optlen)
{
int err;
@@ -2303,9 +2363,6 @@ int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval, int optlen)
struct xfrm_mgr *km;
struct xfrm_policy *pol = NULL;
- if (in_compat_syscall())
- return -EOPNOTSUPP;
-
if (sockptr_is_null(optval) && !optlen) {
xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL);
@@ -2320,6 +2377,20 @@ int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval, int optlen)
if (IS_ERR(data))
return PTR_ERR(data);
+ if (in_compat_syscall()) {
+ struct xfrm_translator *xtr = xfrm_get_translator();
+
+ if (!xtr)
+ return -EOPNOTSUPP;
+
+ err = xtr->xlate_user_policy_sockptr(&data, optlen);
+ xfrm_put_translator(xtr);
+ if (err) {
+ kfree(data);
+ return err;
+ }
+ }
+
err = -EINVAL;
rcu_read_lock();
list_for_each_entry_rcu(km, &xfrm_km_list, list) {
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index fbb7d9d06478..d0c32a8fcc4a 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -975,6 +975,7 @@ static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
struct xfrm_dump_info *sp = ptr;
struct sk_buff *in_skb = sp->in_skb;
struct sk_buff *skb = sp->out_skb;
+ struct xfrm_translator *xtr;
struct xfrm_usersa_info *p;
struct nlmsghdr *nlh;
int err;
@@ -992,6 +993,18 @@ static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
return err;
}
nlmsg_end(skb, nlh);
+
+ xtr = xfrm_get_translator();
+ if (xtr) {
+ err = xtr->alloc_compat(skb, nlh);
+
+ xfrm_put_translator(xtr);
+ if (err) {
+ nlmsg_cancel(skb, nlh);
+ return err;
+ }
+ }
+
return 0;
}
@@ -1006,7 +1019,6 @@ static int xfrm_dump_sa_done(struct netlink_callback *cb)
return 0;
}
-static const struct nla_policy xfrma_policy[XFRMA_MAX+1];
static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
@@ -1083,12 +1095,24 @@ static inline int xfrm_nlmsg_multicast(struct net *net, struct sk_buff *skb,
u32 pid, unsigned int group)
{
struct sock *nlsk = rcu_dereference(net->xfrm.nlsk);
+ struct xfrm_translator *xtr;
if (!nlsk) {
kfree_skb(skb);
return -EPIPE;
}
+ xtr = xfrm_get_translator();
+ if (xtr) {
+ int err = xtr->alloc_compat(skb, nlmsg_hdr(skb));
+
+ xfrm_put_translator(xtr);
+ if (err) {
+ kfree_skb(skb);
+ return err;
+ }
+ }
+
return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC);
}
@@ -1308,6 +1332,7 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
struct net *net = sock_net(skb->sk);
struct xfrm_state *x;
struct xfrm_userspi_info *p;
+ struct xfrm_translator *xtr;
struct sk_buff *resp_skb;
xfrm_address_t *daddr;
int family;
@@ -1358,6 +1383,17 @@ static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
goto out;
}
+ xtr = xfrm_get_translator();
+ if (xtr) {
+ err = xtr->alloc_compat(skb, nlmsg_hdr(skb));
+
+ xfrm_put_translator(xtr);
+ if (err) {
+ kfree_skb(resp_skb);
+ goto out;
+ }
+ }
+
err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid);
out:
@@ -1764,6 +1800,7 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
struct xfrm_userpolicy_info *p;
struct sk_buff *in_skb = sp->in_skb;
struct sk_buff *skb = sp->out_skb;
+ struct xfrm_translator *xtr;
struct nlmsghdr *nlh;
int err;
@@ -1788,6 +1825,18 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr
return err;
}
nlmsg_end(skb, nlh);
+
+ xtr = xfrm_get_translator();
+ if (xtr) {
+ err = xtr->alloc_compat(skb, nlh);
+
+ xfrm_put_translator(xtr);
+ if (err) {
+ nlmsg_cancel(skb, nlh);
+ return err;
+ }
+ }
+
return 0;
}
@@ -2533,7 +2582,7 @@ static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
#define XMSGSIZE(type) sizeof(struct type)
-static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
+const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
[XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
[XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
[XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
@@ -2556,10 +2605,11 @@ static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
[XFRM_MSG_NEWSPDINFO - XFRM_MSG_BASE] = sizeof(u32),
[XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32),
};
+EXPORT_SYMBOL_GPL(xfrm_msg_min);
#undef XMSGSIZE
-static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
+const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
[XFRMA_SA] = { .len = sizeof(struct xfrm_usersa_info)},
[XFRMA_POLICY] = { .len = sizeof(struct xfrm_userpolicy_info)},
[XFRMA_LASTUSED] = { .type = NLA_U64},
@@ -2591,6 +2641,7 @@ static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
[XFRMA_SET_MARK_MASK] = { .type = NLA_U32 },
[XFRMA_IF_ID] = { .type = NLA_U32 },
};
+EXPORT_SYMBOL_GPL(xfrma_policy);
static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = {
[XFRMA_SPD_IPV4_HTHRESH] = { .len = sizeof(struct xfrmu_spdhthresh) },
@@ -2640,11 +2691,9 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
struct net *net = sock_net(skb->sk);
struct nlattr *attrs[XFRMA_MAX+1];
const struct xfrm_link *link;
+ struct nlmsghdr *nlh64 = NULL;
int type, err;
- if (in_compat_syscall())
- return -EOPNOTSUPP;
-
type = nlh->nlmsg_type;
if (type > XFRM_MSG_MAX)
return -EINVAL;
@@ -2656,32 +2705,55 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
if (!netlink_net_capable(skb, CAP_NET_ADMIN))
return -EPERM;
+ if (in_compat_syscall()) {
+ struct xfrm_translator *xtr = xfrm_get_translator();
+
+ if (!xtr)
+ return -EOPNOTSUPP;
+
+ nlh64 = xtr->rcv_msg_compat(nlh, link->nla_max,
+ link->nla_pol, extack);
+ xfrm_put_translator(xtr);
+ if (IS_ERR(nlh64))
+ return PTR_ERR(nlh64);
+ if (nlh64)
+ nlh = nlh64;
+ }
+
if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
(nlh->nlmsg_flags & NLM_F_DUMP)) {
- if (link->dump == NULL)
- return -EINVAL;
+ struct netlink_dump_control c = {
+ .start = link->start,
+ .dump = link->dump,
+ .done = link->done,
+ };
- {
- struct netlink_dump_control c = {
- .start = link->start,
- .dump = link->dump,
- .done = link->done,
- };
- return netlink_dump_start(net->xfrm.nlsk, skb, nlh, &c);
+ if (link->dump == NULL) {
+ err = -EINVAL;
+ goto err;
}
+
+ err = netlink_dump_start(net->xfrm.nlsk, skb, nlh, &c);
+ goto err;
}
err = nlmsg_parse_deprecated(nlh, xfrm_msg_min[type], attrs,
link->nla_max ? : XFRMA_MAX,
link->nla_pol ? : xfrma_policy, extack);
if (err < 0)
- return err;
+ goto err;
- if (link->doit == NULL)
- return -EINVAL;
+ if (link->doit == NULL) {
+ err = -EINVAL;
+ goto err;
+ }
- return link->doit(skb, nlh, attrs);
+ err = link->doit(skb, nlh, attrs);
+
+err:
+ kvfree(nlh64);
+ return err;
}
static void xfrm_netlink_rcv(struct sk_buff *skb)
diff --git a/samples/bpf/.gitignore b/samples/bpf/.gitignore
index 034800c4d1e6..b2f29bc8dc43 100644
--- a/samples/bpf/.gitignore
+++ b/samples/bpf/.gitignore
@@ -50,4 +50,5 @@ xdp_rxq_info
xdp_sample_pkts
xdp_tx_iptunnel
xdpsock
+xsk_fwd
testfile.img
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index f87ee02073ba..aeebf5d12f32 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -48,6 +48,7 @@ tprogs-y += syscall_tp
tprogs-y += cpustat
tprogs-y += xdp_adjust_tail
tprogs-y += xdpsock
+tprogs-y += xsk_fwd
tprogs-y += xdp_fwd
tprogs-y += task_fd_query
tprogs-y += xdp_sample_pkts
@@ -71,12 +72,12 @@ tracex4-objs := tracex4_user.o
tracex5-objs := tracex5_user.o $(TRACE_HELPERS)
tracex6-objs := tracex6_user.o
tracex7-objs := tracex7_user.o
-test_probe_write_user-objs := bpf_load.o test_probe_write_user_user.o
-trace_output-objs := bpf_load.o trace_output_user.o $(TRACE_HELPERS)
-lathist-objs := bpf_load.o lathist_user.o
-offwaketime-objs := bpf_load.o offwaketime_user.o $(TRACE_HELPERS)
-spintest-objs := bpf_load.o spintest_user.o $(TRACE_HELPERS)
-map_perf_test-objs := bpf_load.o map_perf_test_user.o
+test_probe_write_user-objs := test_probe_write_user_user.o
+trace_output-objs := trace_output_user.o $(TRACE_HELPERS)
+lathist-objs := lathist_user.o
+offwaketime-objs := offwaketime_user.o $(TRACE_HELPERS)
+spintest-objs := spintest_user.o $(TRACE_HELPERS)
+map_perf_test-objs := map_perf_test_user.o
test_overhead-objs := bpf_load.o test_overhead_user.o
test_cgrp2_array_pin-objs := test_cgrp2_array_pin.o
test_cgrp2_attach-objs := test_cgrp2_attach.o
@@ -86,7 +87,7 @@ xdp1-objs := xdp1_user.o
# reuse xdp1 source intentionally
xdp2-objs := xdp1_user.o
xdp_router_ipv4-objs := xdp_router_ipv4_user.o
-test_current_task_under_cgroup-objs := bpf_load.o $(CGROUP_HELPERS) \
+test_current_task_under_cgroup-objs := $(CGROUP_HELPERS) \
test_current_task_under_cgroup_user.o
trace_event-objs := trace_event_user.o $(TRACE_HELPERS)
sampleip-objs := sampleip_user.o $(TRACE_HELPERS)
@@ -97,13 +98,14 @@ test_map_in_map-objs := test_map_in_map_user.o
per_socket_stats_example-objs := cookie_uid_helper_example.o
xdp_redirect-objs := xdp_redirect_user.o
xdp_redirect_map-objs := xdp_redirect_map_user.o
-xdp_redirect_cpu-objs := bpf_load.o xdp_redirect_cpu_user.o
-xdp_monitor-objs := bpf_load.o xdp_monitor_user.o
+xdp_redirect_cpu-objs := xdp_redirect_cpu_user.o
+xdp_monitor-objs := xdp_monitor_user.o
xdp_rxq_info-objs := xdp_rxq_info_user.o
-syscall_tp-objs := bpf_load.o syscall_tp_user.o
-cpustat-objs := bpf_load.o cpustat_user.o
+syscall_tp-objs := syscall_tp_user.o
+cpustat-objs := cpustat_user.o
xdp_adjust_tail-objs := xdp_adjust_tail_user.o
xdpsock-objs := xdpsock_user.o
+xsk_fwd-objs := xsk_fwd.o
xdp_fwd-objs := xdp_fwd_user.o
task_fd_query-objs := bpf_load.o task_fd_query_user.o $(TRACE_HELPERS)
xdp_sample_pkts-objs := xdp_sample_pkts_user.o $(TRACE_HELPERS)
@@ -203,11 +205,14 @@ TPROGLDLIBS_trace_output += -lrt
TPROGLDLIBS_map_perf_test += -lrt
TPROGLDLIBS_test_overhead += -lrt
TPROGLDLIBS_xdpsock += -pthread
+TPROGLDLIBS_xsk_fwd += -pthread
# Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline:
# make M=samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang
LLC ?= llc
CLANG ?= clang
+OPT ?= opt
+LLVM_DIS ?= llvm-dis
LLVM_OBJCOPY ?= llvm-objcopy
BTF_PAHOLE ?= pahole
@@ -300,6 +305,11 @@ $(obj)/hbm_edt_kern.o: $(src)/hbm.h $(src)/hbm_kern.h
# asm/sysreg.h - inline assembly used by it is incompatible with llvm.
# But, there is no easy way to fix it, so just exclude it since it is
# useless for BPF samples.
+# below we use long chain of commands, clang | opt | llvm-dis | llc,
+# to generate final object file. 'clang' compiles the source into IR
+# with native target, e.g., x64, arm64, etc. 'opt' does bpf CORE IR builtin
+# processing (llvm12) and IR optimizations. 'llvm-dis' converts
+# 'opt' output to IR, and finally 'llc' generates bpf byte code.
$(obj)/%.o: $(src)/%.c
@echo " CLANG-bpf " $@
$(Q)$(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(BPF_EXTRA_CFLAGS) \
@@ -311,7 +321,9 @@ $(obj)/%.o: $(src)/%.c
-Wno-address-of-packed-member -Wno-tautological-compare \
-Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \
-I$(srctree)/samples/bpf/ -include asm_goto_workaround.h \
- -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf $(LLC_FLAGS) -filetype=obj -o $@
+ -O2 -emit-llvm -Xclang -disable-llvm-passes -c $< -o - | \
+ $(OPT) -O2 -mtriple=bpf-pc-linux | $(LLVM_DIS) | \
+ $(LLC) -march=bpf $(LLC_FLAGS) -filetype=obj -o $@
ifeq ($(DWARF2BTF),y)
$(BTF_PAHOLE) -J $@
endif
diff --git a/samples/bpf/cpustat_kern.c b/samples/bpf/cpustat_kern.c
index a86a19d5f033..5aefd19cdfa1 100644
--- a/samples/bpf/cpustat_kern.c
+++ b/samples/bpf/cpustat_kern.c
@@ -51,28 +51,28 @@ static int cpu_opps[] = { 208000, 432000, 729000, 960000, 1200000 };
#define MAP_OFF_PSTATE_IDX 3
#define MAP_OFF_NUM 4
-struct bpf_map_def SEC("maps") my_map = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(u32),
- .value_size = sizeof(u64),
- .max_entries = MAX_CPU * MAP_OFF_NUM,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, u32);
+ __type(value, u64);
+ __uint(max_entries, MAX_CPU * MAP_OFF_NUM);
+} my_map SEC(".maps");
/* cstate_duration records duration time for every idle state per CPU */
-struct bpf_map_def SEC("maps") cstate_duration = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(u32),
- .value_size = sizeof(u64),
- .max_entries = MAX_CPU * MAX_CSTATE_ENTRIES,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, u32);
+ __type(value, u64);
+ __uint(max_entries, MAX_CPU * MAX_CSTATE_ENTRIES);
+} cstate_duration SEC(".maps");
/* pstate_duration records duration time for every operating point per CPU */
-struct bpf_map_def SEC("maps") pstate_duration = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(u32),
- .value_size = sizeof(u64),
- .max_entries = MAX_CPU * MAX_PSTATE_ENTRIES,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, u32);
+ __type(value, u64);
+ __uint(max_entries, MAX_CPU * MAX_PSTATE_ENTRIES);
+} pstate_duration SEC(".maps");
/*
* The trace events for cpu_idle and cpu_frequency are taken from:
diff --git a/samples/bpf/cpustat_user.c b/samples/bpf/cpustat_user.c
index 869a99406dbf..96675985e9e0 100644
--- a/samples/bpf/cpustat_user.c
+++ b/samples/bpf/cpustat_user.c
@@ -9,7 +9,6 @@
#include <string.h>
#include <unistd.h>
#include <fcntl.h>
-#include <linux/bpf.h>
#include <locale.h>
#include <sys/types.h>
#include <sys/stat.h>
@@ -18,7 +17,9 @@
#include <sys/wait.h>
#include <bpf/bpf.h>
-#include "bpf_load.h"
+#include <bpf/libbpf.h>
+
+static int cstate_map_fd, pstate_map_fd;
#define MAX_CPU 8
#define MAX_PSTATE_ENTRIES 5
@@ -181,21 +182,50 @@ static void int_exit(int sig)
{
cpu_stat_inject_cpu_idle_event();
cpu_stat_inject_cpu_frequency_event();
- cpu_stat_update(map_fd[1], map_fd[2]);
+ cpu_stat_update(cstate_map_fd, pstate_map_fd);
cpu_stat_print();
exit(0);
}
int main(int argc, char **argv)
{
+ struct bpf_link *link = NULL;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
char filename[256];
int ret;
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ obj = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(obj)) {
+ fprintf(stderr, "ERROR: opening BPF object file failed\n");
+ return 0;
+ }
- if (load_bpf_file(filename)) {
- printf("%s", bpf_log_buf);
- return 1;
+ prog = bpf_object__find_program_by_name(obj, "bpf_prog1");
+ if (!prog) {
+ printf("finding a prog in obj file failed\n");
+ goto cleanup;
+ }
+
+ /* load BPF program */
+ if (bpf_object__load(obj)) {
+ fprintf(stderr, "ERROR: loading BPF object file failed\n");
+ goto cleanup;
+ }
+
+ cstate_map_fd = bpf_object__find_map_fd_by_name(obj, "cstate_duration");
+ pstate_map_fd = bpf_object__find_map_fd_by_name(obj, "pstate_duration");
+ if (cstate_map_fd < 0 || pstate_map_fd < 0) {
+ fprintf(stderr, "ERROR: finding a map in obj file failed\n");
+ goto cleanup;
+ }
+
+ link = bpf_program__attach(prog);
+ if (libbpf_get_error(link)) {
+ fprintf(stderr, "ERROR: bpf_program__attach failed\n");
+ link = NULL;
+ goto cleanup;
}
ret = cpu_stat_inject_cpu_idle_event();
@@ -210,10 +240,13 @@ int main(int argc, char **argv)
signal(SIGTERM, int_exit);
while (1) {
- cpu_stat_update(map_fd[1], map_fd[2]);
+ cpu_stat_update(cstate_map_fd, pstate_map_fd);
cpu_stat_print();
sleep(5);
}
+cleanup:
+ bpf_link__destroy(link);
+ bpf_object__close(obj);
return 0;
}
diff --git a/samples/bpf/hbm.c b/samples/bpf/hbm.c
index 4b22ace52f80..ff4c533dfac2 100644
--- a/samples/bpf/hbm.c
+++ b/samples/bpf/hbm.c
@@ -40,6 +40,7 @@
#include <errno.h>
#include <fcntl.h>
#include <linux/unistd.h>
+#include <linux/compiler.h>
#include <linux/bpf.h>
#include <bpf/bpf.h>
@@ -483,7 +484,7 @@ int main(int argc, char **argv)
"Option -%c requires an argument.\n\n",
optopt);
case 'h':
- fallthrough;
+ __fallthrough;
default:
Usage();
return 0;
diff --git a/samples/bpf/lathist_kern.c b/samples/bpf/lathist_kern.c
index ca9c2e4e69aa..4adfcbbe6ef4 100644
--- a/samples/bpf/lathist_kern.c
+++ b/samples/bpf/lathist_kern.c
@@ -18,12 +18,12 @@
* trace_preempt_[on|off] tracepoints hooks is not supported.
*/
-struct bpf_map_def SEC("maps") my_map = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(u64),
- .max_entries = MAX_CPU,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, u64);
+ __uint(max_entries, MAX_CPU);
+} my_map SEC(".maps");
SEC("kprobe/trace_preempt_off")
int bpf_prog1(struct pt_regs *ctx)
@@ -61,12 +61,12 @@ static unsigned int log2l(unsigned long v)
return log2(v);
}
-struct bpf_map_def SEC("maps") my_lat = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(long),
- .max_entries = MAX_CPU * MAX_ENTRIES,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, long);
+ __uint(max_entries, MAX_CPU * MAX_ENTRIES);
+} my_lat SEC(".maps");
SEC("kprobe/trace_preempt_on")
int bpf_prog2(struct pt_regs *ctx)
diff --git a/samples/bpf/lathist_user.c b/samples/bpf/lathist_user.c
index 2ff2839a52d5..7d8ff2418303 100644
--- a/samples/bpf/lathist_user.c
+++ b/samples/bpf/lathist_user.c
@@ -6,9 +6,8 @@
#include <unistd.h>
#include <stdlib.h>
#include <signal.h>
-#include <linux/bpf.h>
+#include <bpf/libbpf.h>
#include <bpf/bpf.h>
-#include "bpf_load.h"
#define MAX_ENTRIES 20
#define MAX_CPU 4
@@ -81,20 +80,51 @@ static void get_data(int fd)
int main(int argc, char **argv)
{
+ struct bpf_link *links[2];
+ struct bpf_program *prog;
+ struct bpf_object *obj;
char filename[256];
+ int map_fd, i = 0;
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ obj = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(obj)) {
+ fprintf(stderr, "ERROR: opening BPF object file failed\n");
+ return 0;
+ }
+
+ /* load BPF program */
+ if (bpf_object__load(obj)) {
+ fprintf(stderr, "ERROR: loading BPF object file failed\n");
+ goto cleanup;
+ }
- if (load_bpf_file(filename)) {
- printf("%s", bpf_log_buf);
- return 1;
+ map_fd = bpf_object__find_map_fd_by_name(obj, "my_lat");
+ if (map_fd < 0) {
+ fprintf(stderr, "ERROR: finding a map in obj file failed\n");
+ goto cleanup;
+ }
+
+ bpf_object__for_each_program(prog, obj) {
+ links[i] = bpf_program__attach(prog);
+ if (libbpf_get_error(links[i])) {
+ fprintf(stderr, "ERROR: bpf_program__attach failed\n");
+ links[i] = NULL;
+ goto cleanup;
+ }
+ i++;
}
while (1) {
- get_data(map_fd[1]);
+ get_data(map_fd);
print_hist();
sleep(5);
}
+cleanup:
+ for (i--; i >= 0; i--)
+ bpf_link__destroy(links[i]);
+
+ bpf_object__close(obj);
return 0;
}
diff --git a/samples/bpf/offwaketime_kern.c b/samples/bpf/offwaketime_kern.c
index e74ee1cd4b9c..14b792915a9c 100644
--- a/samples/bpf/offwaketime_kern.c
+++ b/samples/bpf/offwaketime_kern.c
@@ -28,38 +28,38 @@ struct key_t {
u32 tret;
};
-struct bpf_map_def SEC("maps") counts = {
- .type = BPF_MAP_TYPE_HASH,
- .key_size = sizeof(struct key_t),
- .value_size = sizeof(u64),
- .max_entries = 10000,
-};
-
-struct bpf_map_def SEC("maps") start = {
- .type = BPF_MAP_TYPE_HASH,
- .key_size = sizeof(u32),
- .value_size = sizeof(u64),
- .max_entries = 10000,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, struct key_t);
+ __type(value, u64);
+ __uint(max_entries, 10000);
+} counts SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, u32);
+ __type(value, u64);
+ __uint(max_entries, 10000);
+} start SEC(".maps");
struct wokeby_t {
char name[TASK_COMM_LEN];
u32 ret;
};
-struct bpf_map_def SEC("maps") wokeby = {
- .type = BPF_MAP_TYPE_HASH,
- .key_size = sizeof(u32),
- .value_size = sizeof(struct wokeby_t),
- .max_entries = 10000,
-};
-
-struct bpf_map_def SEC("maps") stackmap = {
- .type = BPF_MAP_TYPE_STACK_TRACE,
- .key_size = sizeof(u32),
- .value_size = PERF_MAX_STACK_DEPTH * sizeof(u64),
- .max_entries = 10000,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, u32);
+ __type(value, struct wokeby_t);
+ __uint(max_entries, 10000);
+} wokeby SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_STACK_TRACE);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, PERF_MAX_STACK_DEPTH * sizeof(u64));
+ __uint(max_entries, 10000);
+} stackmap SEC(".maps");
#define STACKID_FLAGS (0 | BPF_F_FAST_STACK_CMP)
diff --git a/samples/bpf/offwaketime_user.c b/samples/bpf/offwaketime_user.c
index 51c7da5341cc..5734cfdaaacb 100644
--- a/samples/bpf/offwaketime_user.c
+++ b/samples/bpf/offwaketime_user.c
@@ -5,19 +5,19 @@
#include <unistd.h>
#include <stdlib.h>
#include <signal.h>
-#include <linux/bpf.h>
-#include <string.h>
#include <linux/perf_event.h>
#include <errno.h>
-#include <assert.h>
#include <stdbool.h>
#include <sys/resource.h>
#include <bpf/libbpf.h>
-#include "bpf_load.h"
+#include <bpf/bpf.h>
#include "trace_helpers.h"
#define PRINT_RAW_ADDR 0
+/* counts, stackmap */
+static int map_fd[2];
+
static void print_ksym(__u64 addr)
{
struct ksym *sym;
@@ -52,14 +52,14 @@ static void print_stack(struct key_t *key, __u64 count)
int i;
printf("%s;", key->target);
- if (bpf_map_lookup_elem(map_fd[3], &key->tret, ip) != 0) {
+ if (bpf_map_lookup_elem(map_fd[1], &key->tret, ip) != 0) {
printf("---;");
} else {
for (i = PERF_MAX_STACK_DEPTH - 1; i >= 0; i--)
print_ksym(ip[i]);
}
printf("-;");
- if (bpf_map_lookup_elem(map_fd[3], &key->wret, ip) != 0) {
+ if (bpf_map_lookup_elem(map_fd[1], &key->wret, ip) != 0) {
printf("---;");
} else {
for (i = 0; i < PERF_MAX_STACK_DEPTH; i++)
@@ -96,23 +96,54 @@ static void int_exit(int sig)
int main(int argc, char **argv)
{
struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
+ struct bpf_object *obj = NULL;
+ struct bpf_link *links[2];
+ struct bpf_program *prog;
+ int delay = 1, i = 0;
char filename[256];
- int delay = 1;
-
- snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
- setrlimit(RLIMIT_MEMLOCK, &r);
- signal(SIGINT, int_exit);
- signal(SIGTERM, int_exit);
+ if (setrlimit(RLIMIT_MEMLOCK, &r)) {
+ perror("setrlimit(RLIMIT_MEMLOCK)");
+ return 1;
+ }
if (load_kallsyms()) {
printf("failed to process /proc/kallsyms\n");
return 2;
}
- if (load_bpf_file(filename)) {
- printf("%s", bpf_log_buf);
- return 1;
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ obj = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(obj)) {
+ fprintf(stderr, "ERROR: opening BPF object file failed\n");
+ obj = NULL;
+ goto cleanup;
+ }
+
+ /* load BPF program */
+ if (bpf_object__load(obj)) {
+ fprintf(stderr, "ERROR: loading BPF object file failed\n");
+ goto cleanup;
+ }
+
+ map_fd[0] = bpf_object__find_map_fd_by_name(obj, "counts");
+ map_fd[1] = bpf_object__find_map_fd_by_name(obj, "stackmap");
+ if (map_fd[0] < 0 || map_fd[1] < 0) {
+ fprintf(stderr, "ERROR: finding a map in obj file failed\n");
+ goto cleanup;
+ }
+
+ signal(SIGINT, int_exit);
+ signal(SIGTERM, int_exit);
+
+ bpf_object__for_each_program(prog, obj) {
+ links[i] = bpf_program__attach(prog);
+ if (libbpf_get_error(links[i])) {
+ fprintf(stderr, "ERROR: bpf_program__attach failed\n");
+ links[i] = NULL;
+ goto cleanup;
+ }
+ i++;
}
if (argc > 1)
@@ -120,5 +151,10 @@ int main(int argc, char **argv)
sleep(delay);
print_stacks(map_fd[0]);
+cleanup:
+ for (i--; i >= 0; i--)
+ bpf_link__destroy(links[i]);
+
+ bpf_object__close(obj);
return 0;
}
diff --git a/samples/bpf/sockex3_kern.c b/samples/bpf/sockex3_kern.c
index cab9cca0b8eb..8142d02b33e6 100644
--- a/samples/bpf/sockex3_kern.c
+++ b/samples/bpf/sockex3_kern.c
@@ -31,28 +31,30 @@ struct {
#define PARSE_IP 3
#define PARSE_IPV6 4
-/* protocol dispatch routine.
- * It tail-calls next BPF program depending on eth proto
- * Note, we could have used:
- * bpf_tail_call(skb, &jmp_table, proto);
- * but it would need large prog_array
+/* Protocol dispatch routine. It tail-calls next BPF program depending
+ * on eth proto. Note, we could have used ...
+ *
+ * bpf_tail_call(skb, &jmp_table, proto);
+ *
+ * ... but it would need large prog_array and cannot be optimised given
+ * the map key is not static.
*/
static inline void parse_eth_proto(struct __sk_buff *skb, u32 proto)
{
switch (proto) {
case ETH_P_8021Q:
case ETH_P_8021AD:
- bpf_tail_call(skb, &jmp_table, PARSE_VLAN);
+ bpf_tail_call_static(skb, &jmp_table, PARSE_VLAN);
break;
case ETH_P_MPLS_UC:
case ETH_P_MPLS_MC:
- bpf_tail_call(skb, &jmp_table, PARSE_MPLS);
+ bpf_tail_call_static(skb, &jmp_table, PARSE_MPLS);
break;
case ETH_P_IP:
- bpf_tail_call(skb, &jmp_table, PARSE_IP);
+ bpf_tail_call_static(skb, &jmp_table, PARSE_IP);
break;
case ETH_P_IPV6:
- bpf_tail_call(skb, &jmp_table, PARSE_IPV6);
+ bpf_tail_call_static(skb, &jmp_table, PARSE_IPV6);
break;
}
}
diff --git a/samples/bpf/sockex3_user.c b/samples/bpf/sockex3_user.c
index 4dbee7427d47..7793f6a6ae7e 100644
--- a/samples/bpf/sockex3_user.c
+++ b/samples/bpf/sockex3_user.c
@@ -29,8 +29,8 @@ int main(int argc, char **argv)
struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
struct bpf_program *prog;
struct bpf_object *obj;
+ const char *section;
char filename[256];
- const char *title;
FILE *f;
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
@@ -58,8 +58,8 @@ int main(int argc, char **argv)
bpf_object__for_each_program(prog, obj) {
fd = bpf_program__fd(prog);
- title = bpf_program__title(prog, false);
- if (sscanf(title, "socket/%d", &key) != 1) {
+ section = bpf_program__section_name(prog);
+ if (sscanf(section, "socket/%d", &key) != 1) {
fprintf(stderr, "ERROR: finding prog failed\n");
goto cleanup;
}
diff --git a/samples/bpf/spintest_kern.c b/samples/bpf/spintest_kern.c
index f508af357251..455da77319d9 100644
--- a/samples/bpf/spintest_kern.c
+++ b/samples/bpf/spintest_kern.c
@@ -12,25 +12,25 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
-struct bpf_map_def SEC("maps") my_map = {
- .type = BPF_MAP_TYPE_HASH,
- .key_size = sizeof(long),
- .value_size = sizeof(long),
- .max_entries = 1024,
-};
-struct bpf_map_def SEC("maps") my_map2 = {
- .type = BPF_MAP_TYPE_PERCPU_HASH,
- .key_size = sizeof(long),
- .value_size = sizeof(long),
- .max_entries = 1024,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, long);
+ __type(value, long);
+ __uint(max_entries, 1024);
+} my_map SEC(".maps");
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
+ __uint(key_size, sizeof(long));
+ __uint(value_size, sizeof(long));
+ __uint(max_entries, 1024);
+} my_map2 SEC(".maps");
-struct bpf_map_def SEC("maps") stackmap = {
- .type = BPF_MAP_TYPE_STACK_TRACE,
- .key_size = sizeof(u32),
- .value_size = PERF_MAX_STACK_DEPTH * sizeof(u64),
- .max_entries = 10000,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_STACK_TRACE);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, PERF_MAX_STACK_DEPTH * sizeof(u64));
+ __uint(max_entries, 10000);
+} stackmap SEC(".maps");
#define PROG(foo) \
int foo(struct pt_regs *ctx) \
diff --git a/samples/bpf/spintest_user.c b/samples/bpf/spintest_user.c
index fb430ea2ef51..f090d0dc60d6 100644
--- a/samples/bpf/spintest_user.c
+++ b/samples/bpf/spintest_user.c
@@ -1,40 +1,77 @@
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
#include <unistd.h>
-#include <linux/bpf.h>
#include <string.h>
#include <assert.h>
#include <sys/resource.h>
#include <bpf/libbpf.h>
-#include "bpf_load.h"
+#include <bpf/bpf.h>
#include "trace_helpers.h"
int main(int ac, char **argv)
{
struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
+ char filename[256], symbol[256];
+ struct bpf_object *obj = NULL;
+ struct bpf_link *links[20];
long key, next_key, value;
- char filename[256];
+ struct bpf_program *prog;
+ int map_fd, i, j = 0;
+ const char *section;
struct ksym *sym;
- int i;
- snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
- setrlimit(RLIMIT_MEMLOCK, &r);
+ if (setrlimit(RLIMIT_MEMLOCK, &r)) {
+ perror("setrlimit(RLIMIT_MEMLOCK)");
+ return 1;
+ }
if (load_kallsyms()) {
printf("failed to process /proc/kallsyms\n");
return 2;
}
- if (load_bpf_file(filename)) {
- printf("%s", bpf_log_buf);
- return 1;
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ obj = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(obj)) {
+ fprintf(stderr, "ERROR: opening BPF object file failed\n");
+ obj = NULL;
+ goto cleanup;
+ }
+
+ /* load BPF program */
+ if (bpf_object__load(obj)) {
+ fprintf(stderr, "ERROR: loading BPF object file failed\n");
+ goto cleanup;
+ }
+
+ map_fd = bpf_object__find_map_fd_by_name(obj, "my_map");
+ if (map_fd < 0) {
+ fprintf(stderr, "ERROR: finding a map in obj file failed\n");
+ goto cleanup;
+ }
+
+ bpf_object__for_each_program(prog, obj) {
+ section = bpf_program__section_name(prog);
+ if (sscanf(section, "kprobe/%s", symbol) != 1)
+ continue;
+
+ /* Attach prog only when symbol exists */
+ if (ksym_get_addr(symbol)) {
+ links[j] = bpf_program__attach(prog);
+ if (libbpf_get_error(links[j])) {
+ fprintf(stderr, "bpf_program__attach failed\n");
+ links[j] = NULL;
+ goto cleanup;
+ }
+ j++;
+ }
}
for (i = 0; i < 5; i++) {
key = 0;
printf("kprobing funcs:");
- while (bpf_map_get_next_key(map_fd[0], &key, &next_key) == 0) {
- bpf_map_lookup_elem(map_fd[0], &next_key, &value);
+ while (bpf_map_get_next_key(map_fd, &key, &next_key) == 0) {
+ bpf_map_lookup_elem(map_fd, &next_key, &value);
assert(next_key == value);
sym = ksym_search(value);
key = next_key;
@@ -48,10 +85,15 @@ int main(int ac, char **argv)
if (key)
printf("\n");
key = 0;
- while (bpf_map_get_next_key(map_fd[0], &key, &next_key) == 0)
- bpf_map_delete_elem(map_fd[0], &next_key);
+ while (bpf_map_get_next_key(map_fd, &key, &next_key) == 0)
+ bpf_map_delete_elem(map_fd, &next_key);
sleep(1);
}
+cleanup:
+ for (j--; j >= 0; j--)
+ bpf_link__destroy(links[j]);
+
+ bpf_object__close(obj);
return 0;
}
diff --git a/samples/bpf/syscall_tp_kern.c b/samples/bpf/syscall_tp_kern.c
index 5a62b03b1f88..50231c2eff9c 100644
--- a/samples/bpf/syscall_tp_kern.c
+++ b/samples/bpf/syscall_tp_kern.c
@@ -18,19 +18,19 @@ struct syscalls_exit_open_args {
long ret;
};
-struct bpf_map_def SEC("maps") enter_open_map = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(u32),
- .value_size = sizeof(u32),
- .max_entries = 1,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, u32);
+ __type(value, u32);
+ __uint(max_entries, 1);
+} enter_open_map SEC(".maps");
-struct bpf_map_def SEC("maps") exit_open_map = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(u32),
- .value_size = sizeof(u32),
- .max_entries = 1,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, u32);
+ __type(value, u32);
+ __uint(max_entries, 1);
+} exit_open_map SEC(".maps");
static __always_inline void count(void *map)
{
diff --git a/samples/bpf/syscall_tp_user.c b/samples/bpf/syscall_tp_user.c
index 57014bab7cbe..76a1d00128fb 100644
--- a/samples/bpf/syscall_tp_user.c
+++ b/samples/bpf/syscall_tp_user.c
@@ -5,16 +5,12 @@
#include <unistd.h>
#include <fcntl.h>
#include <stdlib.h>
-#include <signal.h>
-#include <linux/bpf.h>
#include <string.h>
#include <linux/perf_event.h>
#include <errno.h>
-#include <assert.h>
-#include <stdbool.h>
#include <sys/resource.h>
+#include <bpf/libbpf.h>
#include <bpf/bpf.h>
-#include "bpf_load.h"
/* This program verifies bpf attachment to tracepoint sys_enter_* and sys_exit_*.
* This requires kernel CONFIG_FTRACE_SYSCALLS to be set.
@@ -49,16 +45,44 @@ static void verify_map(int map_id)
static int test(char *filename, int num_progs)
{
- int i, fd, map0_fds[num_progs], map1_fds[num_progs];
+ int map0_fds[num_progs], map1_fds[num_progs], fd, i, j = 0;
+ struct bpf_link *links[num_progs * 4];
+ struct bpf_object *objs[num_progs];
+ struct bpf_program *prog;
for (i = 0; i < num_progs; i++) {
- if (load_bpf_file(filename)) {
- fprintf(stderr, "%s", bpf_log_buf);
- return 1;
+ objs[i] = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(objs[i])) {
+ fprintf(stderr, "opening BPF object file failed\n");
+ objs[i] = NULL;
+ goto cleanup;
}
- printf("prog #%d: map ids %d %d\n", i, map_fd[0], map_fd[1]);
- map0_fds[i] = map_fd[0];
- map1_fds[i] = map_fd[1];
+
+ /* load BPF program */
+ if (bpf_object__load(objs[i])) {
+ fprintf(stderr, "loading BPF object file failed\n");
+ goto cleanup;
+ }
+
+ map0_fds[i] = bpf_object__find_map_fd_by_name(objs[i],
+ "enter_open_map");
+ map1_fds[i] = bpf_object__find_map_fd_by_name(objs[i],
+ "exit_open_map");
+ if (map0_fds[i] < 0 || map1_fds[i] < 0) {
+ fprintf(stderr, "finding a map in obj file failed\n");
+ goto cleanup;
+ }
+
+ bpf_object__for_each_program(prog, objs[i]) {
+ links[j] = bpf_program__attach(prog);
+ if (libbpf_get_error(links[j])) {
+ fprintf(stderr, "bpf_program__attach failed\n");
+ links[j] = NULL;
+ goto cleanup;
+ }
+ j++;
+ }
+ printf("prog #%d: map ids %d %d\n", i, map0_fds[i], map1_fds[i]);
}
/* current load_bpf_file has perf_event_open default pid = -1
@@ -80,6 +104,12 @@ static int test(char *filename, int num_progs)
verify_map(map1_fds[i]);
}
+cleanup:
+ for (j--; j >= 0; j--)
+ bpf_link__destroy(links[j]);
+
+ for (i--; i >= 0; i--)
+ bpf_object__close(objs[i]);
return 0;
}
diff --git a/samples/bpf/task_fd_query_kern.c b/samples/bpf/task_fd_query_kern.c
index 278ade5427c8..c821294e1774 100644
--- a/samples/bpf/task_fd_query_kern.c
+++ b/samples/bpf/task_fd_query_kern.c
@@ -10,7 +10,7 @@ int bpf_prog1(struct pt_regs *ctx)
return 0;
}
-SEC("kretprobe/blk_account_io_completion")
+SEC("kretprobe/blk_account_io_done")
int bpf_prog2(struct pt_regs *ctx)
{
return 0;
diff --git a/samples/bpf/task_fd_query_user.c b/samples/bpf/task_fd_query_user.c
index ff2e9c1c7266..4a74531dc403 100644
--- a/samples/bpf/task_fd_query_user.c
+++ b/samples/bpf/task_fd_query_user.c
@@ -314,7 +314,7 @@ int main(int argc, char **argv)
/* test two functions in the corresponding *_kern.c file */
CHECK_AND_RET(test_debug_fs_kprobe(0, "blk_mq_start_request",
BPF_FD_TYPE_KPROBE));
- CHECK_AND_RET(test_debug_fs_kprobe(1, "blk_account_io_completion",
+ CHECK_AND_RET(test_debug_fs_kprobe(1, "blk_account_io_done",
BPF_FD_TYPE_KRETPROBE));
/* test nondebug fs kprobe */
diff --git a/samples/bpf/test_current_task_under_cgroup_kern.c b/samples/bpf/test_current_task_under_cgroup_kern.c
index 6dc4f41bb6cb..fbd43e2bb4d3 100644
--- a/samples/bpf/test_current_task_under_cgroup_kern.c
+++ b/samples/bpf/test_current_task_under_cgroup_kern.c
@@ -10,23 +10,24 @@
#include <linux/version.h>
#include <bpf/bpf_helpers.h>
#include <uapi/linux/utsname.h>
+#include "trace_common.h"
-struct bpf_map_def SEC("maps") cgroup_map = {
- .type = BPF_MAP_TYPE_CGROUP_ARRAY,
- .key_size = sizeof(u32),
- .value_size = sizeof(u32),
- .max_entries = 1,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_CGROUP_ARRAY);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, sizeof(u32));
+ __uint(max_entries, 1);
+} cgroup_map SEC(".maps");
-struct bpf_map_def SEC("maps") perf_map = {
- .type = BPF_MAP_TYPE_ARRAY,
- .key_size = sizeof(u32),
- .value_size = sizeof(u64),
- .max_entries = 1,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, u32);
+ __type(value, u64);
+ __uint(max_entries, 1);
+} perf_map SEC(".maps");
/* Writes the last PID that called sync to a map at index 0 */
-SEC("kprobe/sys_sync")
+SEC("kprobe/" SYSCALL(sys_sync))
int bpf_prog1(struct pt_regs *ctx)
{
u64 pid = bpf_get_current_pid_tgid();
diff --git a/samples/bpf/test_current_task_under_cgroup_user.c b/samples/bpf/test_current_task_under_cgroup_user.c
index 06e9f8ce42e2..ac251a417f45 100644
--- a/samples/bpf/test_current_task_under_cgroup_user.c
+++ b/samples/bpf/test_current_task_under_cgroup_user.c
@@ -4,10 +4,9 @@
#define _GNU_SOURCE
#include <stdio.h>
-#include <linux/bpf.h>
#include <unistd.h>
#include <bpf/bpf.h>
-#include "bpf_load.h"
+#include <bpf/libbpf.h>
#include "cgroup_helpers.h"
#define CGROUP_PATH "/my-cgroup"
@@ -15,13 +14,44 @@
int main(int argc, char **argv)
{
pid_t remote_pid, local_pid = getpid();
- int cg2, idx = 0, rc = 0;
+ struct bpf_link *link = NULL;
+ struct bpf_program *prog;
+ int cg2, idx = 0, rc = 1;
+ struct bpf_object *obj;
char filename[256];
+ int map_fd[2];
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
- if (load_bpf_file(filename)) {
- printf("%s", bpf_log_buf);
- return 1;
+ obj = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(obj)) {
+ fprintf(stderr, "ERROR: opening BPF object file failed\n");
+ return 0;
+ }
+
+ prog = bpf_object__find_program_by_name(obj, "bpf_prog1");
+ if (!prog) {
+ printf("finding a prog in obj file failed\n");
+ goto cleanup;
+ }
+
+ /* load BPF program */
+ if (bpf_object__load(obj)) {
+ fprintf(stderr, "ERROR: loading BPF object file failed\n");
+ goto cleanup;
+ }
+
+ map_fd[0] = bpf_object__find_map_fd_by_name(obj, "cgroup_map");
+ map_fd[1] = bpf_object__find_map_fd_by_name(obj, "perf_map");
+ if (map_fd[0] < 0 || map_fd[1] < 0) {
+ fprintf(stderr, "ERROR: finding a map in obj file failed\n");
+ goto cleanup;
+ }
+
+ link = bpf_program__attach(prog);
+ if (libbpf_get_error(link)) {
+ fprintf(stderr, "ERROR: bpf_program__attach failed\n");
+ link = NULL;
+ goto cleanup;
}
if (setup_cgroup_environment())
@@ -70,12 +100,14 @@ int main(int argc, char **argv)
goto err;
}
- goto out;
-err:
- rc = 1;
+ rc = 0;
-out:
+err:
close(cg2);
cleanup_cgroup_environment();
+
+cleanup:
+ bpf_link__destroy(link);
+ bpf_object__close(obj);
return rc;
}
diff --git a/samples/bpf/test_map_in_map_kern.c b/samples/bpf/test_map_in_map_kern.c
index 8def45c5b697..b0200c8eac09 100644
--- a/samples/bpf/test_map_in_map_kern.c
+++ b/samples/bpf/test_map_in_map_kern.c
@@ -103,10 +103,9 @@ static __always_inline int do_inline_hash_lookup(void *inner_map, u32 port)
return result ? *result : -ENOENT;
}
-SEC("kprobe/" SYSCALL(sys_connect))
+SEC("kprobe/__sys_connect")
int trace_sys_connect(struct pt_regs *ctx)
{
- struct pt_regs *real_regs = (struct pt_regs *)PT_REGS_PARM1_CORE(ctx);
struct sockaddr_in6 *in6;
u16 test_case, port, dst6[8];
int addrlen, ret, inline_ret, ret_key = 0;
@@ -114,8 +113,8 @@ int trace_sys_connect(struct pt_regs *ctx)
void *outer_map, *inner_map;
bool inline_hash = false;
- in6 = (struct sockaddr_in6 *)PT_REGS_PARM2_CORE(real_regs);
- addrlen = (int)PT_REGS_PARM3_CORE(real_regs);
+ in6 = (struct sockaddr_in6 *)PT_REGS_PARM2_CORE(ctx);
+ addrlen = (int)PT_REGS_PARM3_CORE(ctx);
if (addrlen != sizeof(*in6))
return 0;
diff --git a/samples/bpf/test_probe_write_user_kern.c b/samples/bpf/test_probe_write_user_kern.c
index fd651a65281e..220a96438d75 100644
--- a/samples/bpf/test_probe_write_user_kern.c
+++ b/samples/bpf/test_probe_write_user_kern.c
@@ -13,12 +13,12 @@
#include <bpf/bpf_core_read.h>
#include "trace_common.h"
-struct bpf_map_def SEC("maps") dnat_map = {
- .type = BPF_MAP_TYPE_HASH,
- .key_size = sizeof(struct sockaddr_in),
- .value_size = sizeof(struct sockaddr_in),
- .max_entries = 256,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, struct sockaddr_in);
+ __type(value, struct sockaddr_in);
+ __uint(max_entries, 256);
+} dnat_map SEC(".maps");
/* kprobe is NOT a stable ABI
* kernel functions can be removed, renamed or completely change semantics.
diff --git a/samples/bpf/test_probe_write_user_user.c b/samples/bpf/test_probe_write_user_user.c
index 045eb5e30f54..00ccfb834e45 100644
--- a/samples/bpf/test_probe_write_user_user.c
+++ b/samples/bpf/test_probe_write_user_user.c
@@ -1,21 +1,22 @@
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
#include <assert.h>
-#include <linux/bpf.h>
#include <unistd.h>
#include <bpf/bpf.h>
-#include "bpf_load.h"
+#include <bpf/libbpf.h>
#include <sys/socket.h>
-#include <string.h>
#include <netinet/in.h>
#include <arpa/inet.h>
int main(int ac, char **argv)
{
- int serverfd, serverconnfd, clientfd;
- socklen_t sockaddr_len;
- struct sockaddr serv_addr, mapped_addr, tmp_addr;
struct sockaddr_in *serv_addr_in, *mapped_addr_in, *tmp_addr_in;
+ struct sockaddr serv_addr, mapped_addr, tmp_addr;
+ int serverfd, serverconnfd, clientfd, map_fd;
+ struct bpf_link *link = NULL;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ socklen_t sockaddr_len;
char filename[256];
char *ip;
@@ -24,10 +25,35 @@ int main(int ac, char **argv)
tmp_addr_in = (struct sockaddr_in *)&tmp_addr;
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ obj = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(obj)) {
+ fprintf(stderr, "ERROR: opening BPF object file failed\n");
+ return 0;
+ }
+
+ prog = bpf_object__find_program_by_name(obj, "bpf_prog1");
+ if (libbpf_get_error(prog)) {
+ fprintf(stderr, "ERROR: finding a prog in obj file failed\n");
+ goto cleanup;
+ }
+
+ /* load BPF program */
+ if (bpf_object__load(obj)) {
+ fprintf(stderr, "ERROR: loading BPF object file failed\n");
+ goto cleanup;
+ }
+
+ map_fd = bpf_object__find_map_fd_by_name(obj, "dnat_map");
+ if (map_fd < 0) {
+ fprintf(stderr, "ERROR: finding a map in obj file failed\n");
+ goto cleanup;
+ }
- if (load_bpf_file(filename)) {
- printf("%s", bpf_log_buf);
- return 1;
+ link = bpf_program__attach(prog);
+ if (libbpf_get_error(link)) {
+ fprintf(stderr, "ERROR: bpf_program__attach failed\n");
+ link = NULL;
+ goto cleanup;
}
assert((serverfd = socket(AF_INET, SOCK_STREAM, 0)) > 0);
@@ -51,7 +77,7 @@ int main(int ac, char **argv)
mapped_addr_in->sin_port = htons(5555);
mapped_addr_in->sin_addr.s_addr = inet_addr("255.255.255.255");
- assert(!bpf_map_update_elem(map_fd[0], &mapped_addr, &serv_addr, BPF_ANY));
+ assert(!bpf_map_update_elem(map_fd, &mapped_addr, &serv_addr, BPF_ANY));
assert(listen(serverfd, 5) == 0);
@@ -75,5 +101,8 @@ int main(int ac, char **argv)
/* Is the server's getsockname = the socket getpeername */
assert(memcmp(&serv_addr, &tmp_addr, sizeof(struct sockaddr_in)) == 0);
+cleanup:
+ bpf_link__destroy(link);
+ bpf_object__close(obj);
return 0;
}
diff --git a/samples/bpf/trace_output_kern.c b/samples/bpf/trace_output_kern.c
index 1d7d422cae6f..b64815af0943 100644
--- a/samples/bpf/trace_output_kern.c
+++ b/samples/bpf/trace_output_kern.c
@@ -2,15 +2,16 @@
#include <linux/version.h>
#include <uapi/linux/bpf.h>
#include <bpf/bpf_helpers.h>
+#include "trace_common.h"
-struct bpf_map_def SEC("maps") my_map = {
- .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(u32),
- .max_entries = 2,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(u32));
+ __uint(max_entries, 2);
+} my_map SEC(".maps");
-SEC("kprobe/sys_write")
+SEC("kprobe/" SYSCALL(sys_write))
int bpf_prog1(struct pt_regs *ctx)
{
struct S {
diff --git a/samples/bpf/trace_output_user.c b/samples/bpf/trace_output_user.c
index 60a17dd05345..364b98764d54 100644
--- a/samples/bpf/trace_output_user.c
+++ b/samples/bpf/trace_output_user.c
@@ -1,23 +1,10 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <stdio.h>
-#include <unistd.h>
-#include <stdlib.h>
-#include <stdbool.h>
-#include <string.h>
#include <fcntl.h>
#include <poll.h>
-#include <linux/perf_event.h>
-#include <linux/bpf.h>
-#include <errno.h>
-#include <assert.h>
-#include <sys/syscall.h>
-#include <sys/ioctl.h>
-#include <sys/mman.h>
#include <time.h>
#include <signal.h>
#include <bpf/libbpf.h>
-#include "bpf_load.h"
-#include "perf-sys.h"
static __u64 time_get_ns(void)
{
@@ -57,20 +44,48 @@ static void print_bpf_output(void *ctx, int cpu, void *data, __u32 size)
int main(int argc, char **argv)
{
struct perf_buffer_opts pb_opts = {};
+ struct bpf_link *link = NULL;
+ struct bpf_program *prog;
struct perf_buffer *pb;
+ struct bpf_object *obj;
+ int map_fd, ret = 0;
char filename[256];
FILE *f;
- int ret;
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
+ obj = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(obj)) {
+ fprintf(stderr, "ERROR: opening BPF object file failed\n");
+ return 0;
+ }
- if (load_bpf_file(filename)) {
- printf("%s", bpf_log_buf);
- return 1;
+ /* load BPF program */
+ if (bpf_object__load(obj)) {
+ fprintf(stderr, "ERROR: loading BPF object file failed\n");
+ goto cleanup;
+ }
+
+ map_fd = bpf_object__find_map_fd_by_name(obj, "my_map");
+ if (map_fd < 0) {
+ fprintf(stderr, "ERROR: finding a map in obj file failed\n");
+ goto cleanup;
+ }
+
+ prog = bpf_object__find_program_by_name(obj, "bpf_prog1");
+ if (libbpf_get_error(prog)) {
+ fprintf(stderr, "ERROR: finding a prog in obj file failed\n");
+ goto cleanup;
+ }
+
+ link = bpf_program__attach(prog);
+ if (libbpf_get_error(link)) {
+ fprintf(stderr, "ERROR: bpf_program__attach failed\n");
+ link = NULL;
+ goto cleanup;
}
pb_opts.sample_cb = print_bpf_output;
- pb = perf_buffer__new(map_fd[0], 8, &pb_opts);
+ pb = perf_buffer__new(map_fd, 8, &pb_opts);
ret = libbpf_get_error(pb);
if (ret) {
printf("failed to setup perf_buffer: %d\n", ret);
@@ -84,5 +99,9 @@ int main(int argc, char **argv)
while ((ret = perf_buffer__poll(pb, 1000)) >= 0 && cnt < MAX_CNT) {
}
kill(0, SIGINT);
+
+cleanup:
+ bpf_link__destroy(link);
+ bpf_object__close(obj);
return ret;
}
diff --git a/samples/bpf/tracex3_kern.c b/samples/bpf/tracex3_kern.c
index 659613c19a82..710a4410b2fb 100644
--- a/samples/bpf/tracex3_kern.c
+++ b/samples/bpf/tracex3_kern.c
@@ -49,7 +49,7 @@ struct {
__uint(max_entries, SLOTS);
} lat_map SEC(".maps");
-SEC("kprobe/blk_account_io_completion")
+SEC("kprobe/blk_account_io_done")
int bpf_prog2(struct pt_regs *ctx)
{
long rq = PT_REGS_PARM1(ctx);
diff --git a/samples/bpf/tracex5_user.c b/samples/bpf/tracex5_user.c
index 98dad57a96c4..c17d3fb5fd64 100644
--- a/samples/bpf/tracex5_user.c
+++ b/samples/bpf/tracex5_user.c
@@ -39,8 +39,8 @@ int main(int ac, char **argv)
struct bpf_program *prog;
struct bpf_object *obj;
int key, fd, progs_fd;
+ const char *section;
char filename[256];
- const char *title;
FILE *f;
setrlimit(RLIMIT_MEMLOCK, &r);
@@ -78,9 +78,9 @@ int main(int ac, char **argv)
}
bpf_object__for_each_program(prog, obj) {
- title = bpf_program__title(prog, false);
+ section = bpf_program__section_name(prog);
/* register only syscalls to PROG_ARRAY */
- if (sscanf(title, "kprobe/%d", &key) != 1)
+ if (sscanf(section, "kprobe/%d", &key) != 1)
continue;
fd = bpf_program__fd(prog);
diff --git a/samples/bpf/xdp_monitor_kern.c b/samples/bpf/xdp_monitor_kern.c
index 3d33cca2d48a..5c955b812c47 100644
--- a/samples/bpf/xdp_monitor_kern.c
+++ b/samples/bpf/xdp_monitor_kern.c
@@ -6,21 +6,21 @@
#include <uapi/linux/bpf.h>
#include <bpf/bpf_helpers.h>
-struct bpf_map_def SEC("maps") redirect_err_cnt = {
- .type = BPF_MAP_TYPE_PERCPU_ARRAY,
- .key_size = sizeof(u32),
- .value_size = sizeof(u64),
- .max_entries = 2,
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __type(key, u32);
+ __type(value, u64);
+ __uint(max_entries, 2);
/* TODO: have entries for all possible errno's */
-};
+} redirect_err_cnt SEC(".maps");
#define XDP_UNKNOWN XDP_REDIRECT + 1
-struct bpf_map_def SEC("maps") exception_cnt = {
- .type = BPF_MAP_TYPE_PERCPU_ARRAY,
- .key_size = sizeof(u32),
- .value_size = sizeof(u64),
- .max_entries = XDP_UNKNOWN + 1,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __type(key, u32);
+ __type(value, u64);
+ __uint(max_entries, XDP_UNKNOWN + 1);
+} exception_cnt SEC(".maps");
/* Tracepoint format: /sys/kernel/debug/tracing/events/xdp/xdp_redirect/format
* Code in: kernel/include/trace/events/xdp.h
@@ -129,19 +129,19 @@ struct datarec {
};
#define MAX_CPUS 64
-struct bpf_map_def SEC("maps") cpumap_enqueue_cnt = {
- .type = BPF_MAP_TYPE_PERCPU_ARRAY,
- .key_size = sizeof(u32),
- .value_size = sizeof(struct datarec),
- .max_entries = MAX_CPUS,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __type(key, u32);
+ __type(value, struct datarec);
+ __uint(max_entries, MAX_CPUS);
+} cpumap_enqueue_cnt SEC(".maps");
-struct bpf_map_def SEC("maps") cpumap_kthread_cnt = {
- .type = BPF_MAP_TYPE_PERCPU_ARRAY,
- .key_size = sizeof(u32),
- .value_size = sizeof(struct datarec),
- .max_entries = 1,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __type(key, u32);
+ __type(value, struct datarec);
+ __uint(max_entries, 1);
+} cpumap_kthread_cnt SEC(".maps");
/* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_cpumap_enqueue/format
* Code in: kernel/include/trace/events/xdp.h
@@ -210,12 +210,12 @@ int trace_xdp_cpumap_kthread(struct cpumap_kthread_ctx *ctx)
return 0;
}
-struct bpf_map_def SEC("maps") devmap_xmit_cnt = {
- .type = BPF_MAP_TYPE_PERCPU_ARRAY,
- .key_size = sizeof(u32),
- .value_size = sizeof(struct datarec),
- .max_entries = 1,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __type(key, u32);
+ __type(value, struct datarec);
+ __uint(max_entries, 1);
+} devmap_xmit_cnt SEC(".maps");
/* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_devmap_xmit/format
* Code in: kernel/include/trace/events/xdp.h
diff --git a/samples/bpf/xdp_monitor_user.c b/samples/bpf/xdp_monitor_user.c
index ef53b93db573..03d0a182913f 100644
--- a/samples/bpf/xdp_monitor_user.c
+++ b/samples/bpf/xdp_monitor_user.c
@@ -26,12 +26,37 @@ static const char *__doc_err_only__=
#include <net/if.h>
#include <time.h>
+#include <signal.h>
#include <bpf/bpf.h>
-#include "bpf_load.h"
+#include <bpf/libbpf.h>
#include "bpf_util.h"
+enum map_type {
+ REDIRECT_ERR_CNT,
+ EXCEPTION_CNT,
+ CPUMAP_ENQUEUE_CNT,
+ CPUMAP_KTHREAD_CNT,
+ DEVMAP_XMIT_CNT,
+};
+
+static const char *const map_type_strings[] = {
+ [REDIRECT_ERR_CNT] = "redirect_err_cnt",
+ [EXCEPTION_CNT] = "exception_cnt",
+ [CPUMAP_ENQUEUE_CNT] = "cpumap_enqueue_cnt",
+ [CPUMAP_KTHREAD_CNT] = "cpumap_kthread_cnt",
+ [DEVMAP_XMIT_CNT] = "devmap_xmit_cnt",
+};
+
+#define NUM_MAP 5
+#define NUM_TP 8
+
+static int tp_cnt;
+static int map_cnt;
static int verbose = 1;
static bool debug = false;
+struct bpf_map *map_data[NUM_MAP] = {};
+struct bpf_link *tp_links[NUM_TP] = {};
+struct bpf_object *obj;
static const struct option long_options[] = {
{"help", no_argument, NULL, 'h' },
@@ -41,6 +66,16 @@ static const struct option long_options[] = {
{0, 0, NULL, 0 }
};
+static void int_exit(int sig)
+{
+ /* Detach tracepoints */
+ while (tp_cnt)
+ bpf_link__destroy(tp_links[--tp_cnt]);
+
+ bpf_object__close(obj);
+ exit(0);
+}
+
/* C standard specifies two constants, EXIT_SUCCESS(0) and EXIT_FAILURE(1) */
#define EXIT_FAIL_MEM 5
@@ -483,23 +518,23 @@ static bool stats_collect(struct stats_record *rec)
* this can happen by someone running perf-record -e
*/
- fd = map_data[0].fd; /* map0: redirect_err_cnt */
+ fd = bpf_map__fd(map_data[REDIRECT_ERR_CNT]);
for (i = 0; i < REDIR_RES_MAX; i++)
map_collect_record_u64(fd, i, &rec->xdp_redirect[i]);
- fd = map_data[1].fd; /* map1: exception_cnt */
+ fd = bpf_map__fd(map_data[EXCEPTION_CNT]);
for (i = 0; i < XDP_ACTION_MAX; i++) {
map_collect_record_u64(fd, i, &rec->xdp_exception[i]);
}
- fd = map_data[2].fd; /* map2: cpumap_enqueue_cnt */
+ fd = bpf_map__fd(map_data[CPUMAP_ENQUEUE_CNT]);
for (i = 0; i < MAX_CPUS; i++)
map_collect_record(fd, i, &rec->xdp_cpumap_enqueue[i]);
- fd = map_data[3].fd; /* map3: cpumap_kthread_cnt */
+ fd = bpf_map__fd(map_data[CPUMAP_KTHREAD_CNT]);
map_collect_record(fd, 0, &rec->xdp_cpumap_kthread);
- fd = map_data[4].fd; /* map4: devmap_xmit_cnt */
+ fd = bpf_map__fd(map_data[DEVMAP_XMIT_CNT]);
map_collect_record(fd, 0, &rec->xdp_devmap_xmit);
return true;
@@ -598,8 +633,8 @@ static void stats_poll(int interval, bool err_only)
/* TODO Need more advanced stats on error types */
if (verbose) {
- printf(" - Stats map0: %s\n", map_data[0].name);
- printf(" - Stats map1: %s\n", map_data[1].name);
+ printf(" - Stats map0: %s\n", bpf_map__name(map_data[0]));
+ printf(" - Stats map1: %s\n", bpf_map__name(map_data[1]));
printf("\n");
}
fflush(stdout);
@@ -618,44 +653,51 @@ static void stats_poll(int interval, bool err_only)
static void print_bpf_prog_info(void)
{
- int i;
+ struct bpf_program *prog;
+ struct bpf_map *map;
+ int i = 0;
/* Prog info */
- printf("Loaded BPF prog have %d bpf program(s)\n", prog_cnt);
- for (i = 0; i < prog_cnt; i++) {
- printf(" - prog_fd[%d] = fd(%d)\n", i, prog_fd[i]);
+ printf("Loaded BPF prog have %d bpf program(s)\n", tp_cnt);
+ bpf_object__for_each_program(prog, obj) {
+ printf(" - prog_fd[%d] = fd(%d)\n", i, bpf_program__fd(prog));
+ i++;
}
+ i = 0;
/* Maps info */
- printf("Loaded BPF prog have %d map(s)\n", map_data_count);
- for (i = 0; i < map_data_count; i++) {
- char *name = map_data[i].name;
- int fd = map_data[i].fd;
+ printf("Loaded BPF prog have %d map(s)\n", map_cnt);
+ bpf_object__for_each_map(map, obj) {
+ const char *name = bpf_map__name(map);
+ int fd = bpf_map__fd(map);
printf(" - map_data[%d] = fd(%d) name:%s\n", i, fd, name);
+ i++;
}
/* Event info */
- printf("Searching for (max:%d) event file descriptor(s)\n", prog_cnt);
- for (i = 0; i < prog_cnt; i++) {
- if (event_fd[i] != -1)
- printf(" - event_fd[%d] = fd(%d)\n", i, event_fd[i]);
+ printf("Searching for (max:%d) event file descriptor(s)\n", tp_cnt);
+ for (i = 0; i < tp_cnt; i++) {
+ int fd = bpf_link__fd(tp_links[i]);
+
+ if (fd != -1)
+ printf(" - event_fd[%d] = fd(%d)\n", i, fd);
}
}
int main(int argc, char **argv)
{
struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
+ struct bpf_program *prog;
int longindex = 0, opt;
- int ret = EXIT_SUCCESS;
- char bpf_obj_file[256];
+ int ret = EXIT_FAILURE;
+ enum map_type type;
+ char filename[256];
/* Default settings: */
bool errors_only = true;
int interval = 2;
- snprintf(bpf_obj_file, sizeof(bpf_obj_file), "%s_kern.o", argv[0]);
-
/* Parse commands line args */
while ((opt = getopt_long(argc, argv, "hDSs:",
long_options, &longindex)) != -1) {
@@ -672,40 +714,79 @@ int main(int argc, char **argv)
case 'h':
default:
usage(argv);
- return EXIT_FAILURE;
+ return ret;
}
}
+ snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
if (setrlimit(RLIMIT_MEMLOCK, &r)) {
perror("setrlimit(RLIMIT_MEMLOCK)");
- return EXIT_FAILURE;
+ return ret;
}
- if (load_bpf_file(bpf_obj_file)) {
- printf("ERROR - bpf_log_buf: %s", bpf_log_buf);
- return EXIT_FAILURE;
+ /* Remove tracepoint program when program is interrupted or killed */
+ signal(SIGINT, int_exit);
+ signal(SIGTERM, int_exit);
+
+ obj = bpf_object__open_file(filename, NULL);
+ if (libbpf_get_error(obj)) {
+ printf("ERROR: opening BPF object file failed\n");
+ obj = NULL;
+ goto cleanup;
+ }
+
+ /* load BPF program */
+ if (bpf_object__load(obj)) {
+ printf("ERROR: loading BPF object file failed\n");
+ goto cleanup;
+ }
+
+ for (type = 0; type < NUM_MAP; type++) {
+ map_data[type] =
+ bpf_object__find_map_by_name(obj, map_type_strings[type]);
+
+ if (libbpf_get_error(map_data[type])) {
+ printf("ERROR: finding a map in obj file failed\n");
+ goto cleanup;
+ }
+ map_cnt++;
}
- if (!prog_fd[0]) {
- printf("ERROR - load_bpf_file: %s\n", strerror(errno));
- return EXIT_FAILURE;
+
+ bpf_object__for_each_program(prog, obj) {
+ tp_links[tp_cnt] = bpf_program__attach(prog);
+ if (libbpf_get_error(tp_links[tp_cnt])) {
+ printf("ERROR: bpf_program__attach failed\n");
+ tp_links[tp_cnt] = NULL;
+ goto cleanup;
+ }
+ tp_cnt++;
}
if (debug) {
print_bpf_prog_info();
}
- /* Unload/stop tracepoint event by closing fd's */
+ /* Unload/stop tracepoint event by closing bpf_link's */
if (errors_only) {
- /* The prog_fd[i] and event_fd[i] depend on the
- * order the functions was defined in _kern.c
+ /* The bpf_link[i] depend on the order of
+ * the functions was defined in _kern.c
*/
- close(event_fd[2]); /* tracepoint/xdp/xdp_redirect */
- close(prog_fd[2]); /* func: trace_xdp_redirect */
- close(event_fd[3]); /* tracepoint/xdp/xdp_redirect_map */
- close(prog_fd[3]); /* func: trace_xdp_redirect_map */
+ bpf_link__destroy(tp_links[2]); /* tracepoint/xdp/xdp_redirect */
+ tp_links[2] = NULL;
+
+ bpf_link__destroy(tp_links[3]); /* tracepoint/xdp/xdp_redirect_map */
+ tp_links[3] = NULL;
}
stats_poll(interval, errors_only);
+ ret = EXIT_SUCCESS;
+
+cleanup:
+ /* Detach tracepoints */
+ while (tp_cnt)
+ bpf_link__destroy(tp_links[--tp_cnt]);
+
+ bpf_object__close(obj);
return ret;
}
diff --git a/samples/bpf/xdp_redirect_cpu_user.c b/samples/bpf/xdp_redirect_cpu_user.c
index 004c0622c913..6fb8dbde62c5 100644
--- a/samples/bpf/xdp_redirect_cpu_user.c
+++ b/samples/bpf/xdp_redirect_cpu_user.c
@@ -37,18 +37,35 @@ static __u32 prog_id;
static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
static int n_cpus;
-static int cpu_map_fd;
-static int rx_cnt_map_fd;
-static int redirect_err_cnt_map_fd;
-static int cpumap_enqueue_cnt_map_fd;
-static int cpumap_kthread_cnt_map_fd;
-static int cpus_available_map_fd;
-static int cpus_count_map_fd;
-static int cpus_iterator_map_fd;
-static int exception_cnt_map_fd;
+
+enum map_type {
+ CPU_MAP,
+ RX_CNT,
+ REDIRECT_ERR_CNT,
+ CPUMAP_ENQUEUE_CNT,
+ CPUMAP_KTHREAD_CNT,
+ CPUS_AVAILABLE,
+ CPUS_COUNT,
+ CPUS_ITERATOR,
+ EXCEPTION_CNT,
+};
+
+static const char *const map_type_strings[] = {
+ [CPU_MAP] = "cpu_map",
+ [RX_CNT] = "rx_cnt",
+ [REDIRECT_ERR_CNT] = "redirect_err_cnt",
+ [CPUMAP_ENQUEUE_CNT] = "cpumap_enqueue_cnt",
+ [CPUMAP_KTHREAD_CNT] = "cpumap_kthread_cnt",
+ [CPUS_AVAILABLE] = "cpus_available",
+ [CPUS_COUNT] = "cpus_count",
+ [CPUS_ITERATOR] = "cpus_iterator",
+ [EXCEPTION_CNT] = "exception_cnt",
+};
#define NUM_TP 5
-struct bpf_link *tp_links[NUM_TP] = { 0 };
+#define NUM_MAP 9
+struct bpf_link *tp_links[NUM_TP] = {};
+static int map_fds[NUM_MAP];
static int tp_cnt = 0;
/* Exit return codes */
@@ -111,7 +128,7 @@ static void print_avail_progs(struct bpf_object *obj)
bpf_object__for_each_program(pos, obj) {
if (bpf_program__is_xdp(pos))
- printf(" %s\n", bpf_program__title(pos, false));
+ printf(" %s\n", bpf_program__section_name(pos));
}
}
@@ -527,20 +544,20 @@ static void stats_collect(struct stats_record *rec)
{
int fd, i;
- fd = rx_cnt_map_fd;
+ fd = map_fds[RX_CNT];
map_collect_percpu(fd, 0, &rec->rx_cnt);
- fd = redirect_err_cnt_map_fd;
+ fd = map_fds[REDIRECT_ERR_CNT];
map_collect_percpu(fd, 1, &rec->redir_err);
- fd = cpumap_enqueue_cnt_map_fd;
+ fd = map_fds[CPUMAP_ENQUEUE_CNT];
for (i = 0; i < n_cpus; i++)
map_collect_percpu(fd, i, &rec->enq[i]);
- fd = cpumap_kthread_cnt_map_fd;
+ fd = map_fds[CPUMAP_KTHREAD_CNT];
map_collect_percpu(fd, 0, &rec->kthread);
- fd = exception_cnt_map_fd;
+ fd = map_fds[EXCEPTION_CNT];
map_collect_percpu(fd, 0, &rec->exception);
}
@@ -565,7 +582,7 @@ static int create_cpu_entry(__u32 cpu, struct bpf_cpumap_val *value,
/* Add a CPU entry to cpumap, as this allocate a cpu entry in
* the kernel for the cpu.
*/
- ret = bpf_map_update_elem(cpu_map_fd, &cpu, value, 0);
+ ret = bpf_map_update_elem(map_fds[CPU_MAP], &cpu, value, 0);
if (ret) {
fprintf(stderr, "Create CPU entry failed (err:%d)\n", ret);
exit(EXIT_FAIL_BPF);
@@ -574,21 +591,21 @@ static int create_cpu_entry(__u32 cpu, struct bpf_cpumap_val *value,
/* Inform bpf_prog's that a new CPU is available to select
* from via some control maps.
*/
- ret = bpf_map_update_elem(cpus_available_map_fd, &avail_idx, &cpu, 0);
+ ret = bpf_map_update_elem(map_fds[CPUS_AVAILABLE], &avail_idx, &cpu, 0);
if (ret) {
fprintf(stderr, "Add to avail CPUs failed\n");
exit(EXIT_FAIL_BPF);
}
/* When not replacing/updating existing entry, bump the count */
- ret = bpf_map_lookup_elem(cpus_count_map_fd, &key, &curr_cpus_count);
+ ret = bpf_map_lookup_elem(map_fds[CPUS_COUNT], &key, &curr_cpus_count);
if (ret) {
fprintf(stderr, "Failed reading curr cpus_count\n");
exit(EXIT_FAIL_BPF);
}
if (new) {
curr_cpus_count++;
- ret = bpf_map_update_elem(cpus_count_map_fd, &key,
+ ret = bpf_map_update_elem(map_fds[CPUS_COUNT], &key,
&curr_cpus_count, 0);
if (ret) {
fprintf(stderr, "Failed write curr cpus_count\n");
@@ -612,7 +629,7 @@ static void mark_cpus_unavailable(void)
int ret, i;
for (i = 0; i < n_cpus; i++) {
- ret = bpf_map_update_elem(cpus_available_map_fd, &i,
+ ret = bpf_map_update_elem(map_fds[CPUS_AVAILABLE], &i,
&invalid_cpu, 0);
if (ret) {
fprintf(stderr, "Failed marking CPU unavailable\n");
@@ -665,68 +682,37 @@ static void stats_poll(int interval, bool use_separators, char *prog_name,
free_stats_record(prev);
}
-static struct bpf_link * attach_tp(struct bpf_object *obj,
- const char *tp_category,
- const char* tp_name)
+static int init_tracepoints(struct bpf_object *obj)
{
struct bpf_program *prog;
- struct bpf_link *link;
- char sec_name[PATH_MAX];
- int len;
- len = snprintf(sec_name, PATH_MAX, "tracepoint/%s/%s",
- tp_category, tp_name);
- if (len < 0)
- exit(EXIT_FAIL);
+ bpf_object__for_each_program(prog, obj) {
+ if (bpf_program__is_tracepoint(prog) != true)
+ continue;
- prog = bpf_object__find_program_by_title(obj, sec_name);
- if (!prog) {
- fprintf(stderr, "ERR: finding progsec: %s\n", sec_name);
- exit(EXIT_FAIL_BPF);
+ tp_links[tp_cnt] = bpf_program__attach(prog);
+ if (libbpf_get_error(tp_links[tp_cnt])) {
+ tp_links[tp_cnt] = NULL;
+ return -EINVAL;
+ }
+ tp_cnt++;
}
- link = bpf_program__attach_tracepoint(prog, tp_category, tp_name);
- if (libbpf_get_error(link))
- exit(EXIT_FAIL_BPF);
-
- return link;
-}
-
-static void init_tracepoints(struct bpf_object *obj) {
- tp_links[tp_cnt++] = attach_tp(obj, "xdp", "xdp_redirect_err");
- tp_links[tp_cnt++] = attach_tp(obj, "xdp", "xdp_redirect_map_err");
- tp_links[tp_cnt++] = attach_tp(obj, "xdp", "xdp_exception");
- tp_links[tp_cnt++] = attach_tp(obj, "xdp", "xdp_cpumap_enqueue");
- tp_links[tp_cnt++] = attach_tp(obj, "xdp", "xdp_cpumap_kthread");
+ return 0;
}
static int init_map_fds(struct bpf_object *obj)
{
- /* Maps updated by tracepoints */
- redirect_err_cnt_map_fd =
- bpf_object__find_map_fd_by_name(obj, "redirect_err_cnt");
- exception_cnt_map_fd =
- bpf_object__find_map_fd_by_name(obj, "exception_cnt");
- cpumap_enqueue_cnt_map_fd =
- bpf_object__find_map_fd_by_name(obj, "cpumap_enqueue_cnt");
- cpumap_kthread_cnt_map_fd =
- bpf_object__find_map_fd_by_name(obj, "cpumap_kthread_cnt");
-
- /* Maps used by XDP */
- rx_cnt_map_fd = bpf_object__find_map_fd_by_name(obj, "rx_cnt");
- cpu_map_fd = bpf_object__find_map_fd_by_name(obj, "cpu_map");
- cpus_available_map_fd =
- bpf_object__find_map_fd_by_name(obj, "cpus_available");
- cpus_count_map_fd = bpf_object__find_map_fd_by_name(obj, "cpus_count");
- cpus_iterator_map_fd =
- bpf_object__find_map_fd_by_name(obj, "cpus_iterator");
-
- if (cpu_map_fd < 0 || rx_cnt_map_fd < 0 ||
- redirect_err_cnt_map_fd < 0 || cpumap_enqueue_cnt_map_fd < 0 ||
- cpumap_kthread_cnt_map_fd < 0 || cpus_available_map_fd < 0 ||
- cpus_count_map_fd < 0 || cpus_iterator_map_fd < 0 ||
- exception_cnt_map_fd < 0)
- return -ENOENT;
+ enum map_type type;
+
+ for (type = 0; type < NUM_MAP; type++) {
+ map_fds[type] =
+ bpf_object__find_map_fd_by_name(obj,
+ map_type_strings[type]);
+
+ if (map_fds[type] < 0)
+ return -ENOENT;
+ }
return 0;
}
@@ -795,13 +781,13 @@ int main(int argc, char **argv)
bool stress_mode = false;
struct bpf_program *prog;
struct bpf_object *obj;
+ int err = EXIT_FAIL;
char filename[256];
int added_cpus = 0;
int longindex = 0;
int interval = 2;
int add_cpu = -1;
- int opt, err;
- int prog_fd;
+ int opt, prog_fd;
int *cpu, i;
__u32 qsize;
@@ -824,24 +810,29 @@ int main(int argc, char **argv)
}
if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
- return EXIT_FAIL;
+ return err;
if (prog_fd < 0) {
fprintf(stderr, "ERR: bpf_prog_load_xattr: %s\n",
strerror(errno));
- return EXIT_FAIL;
+ return err;
}
- init_tracepoints(obj);
+
+ if (init_tracepoints(obj) < 0) {
+ fprintf(stderr, "ERR: bpf_program__attach failed\n");
+ return err;
+ }
+
if (init_map_fds(obj) < 0) {
fprintf(stderr, "bpf_object__find_map_fd_by_name failed\n");
- return EXIT_FAIL;
+ return err;
}
mark_cpus_unavailable();
cpu = malloc(n_cpus * sizeof(int));
if (!cpu) {
fprintf(stderr, "failed to allocate cpu array\n");
- return EXIT_FAIL;
+ return err;
}
memset(cpu, 0, n_cpus * sizeof(int));
@@ -960,14 +951,12 @@ int main(int argc, char **argv)
prog = bpf_object__find_program_by_title(obj, prog_name);
if (!prog) {
fprintf(stderr, "bpf_object__find_program_by_title failed\n");
- err = EXIT_FAIL;
goto out;
}
prog_fd = bpf_program__fd(prog);
if (prog_fd < 0) {
fprintf(stderr, "bpf_program__fd failed\n");
- err = EXIT_FAIL;
goto out;
}
@@ -986,6 +975,8 @@ int main(int argc, char **argv)
stats_poll(interval, use_separators, prog_name, mprog_name,
&value, stress_mode);
+
+ err = EXIT_OK;
out:
free(cpu);
return err;
diff --git a/samples/bpf/xdp_sample_pkts_kern.c b/samples/bpf/xdp_sample_pkts_kern.c
index 33377289e2a8..9cf76b340dd7 100644
--- a/samples/bpf/xdp_sample_pkts_kern.c
+++ b/samples/bpf/xdp_sample_pkts_kern.c
@@ -5,14 +5,12 @@
#include <bpf/bpf_helpers.h>
#define SAMPLE_SIZE 64ul
-#define MAX_CPUS 128
-
-struct bpf_map_def SEC("maps") my_map = {
- .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
- .key_size = sizeof(int),
- .value_size = sizeof(u32),
- .max_entries = MAX_CPUS,
-};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(u32));
+} my_map SEC(".maps");
SEC("xdp_sample")
int xdp_sample_prog(struct xdp_md *ctx)
diff --git a/samples/bpf/xdp_sample_pkts_user.c b/samples/bpf/xdp_sample_pkts_user.c
index 991ef6f0880b..4b2a300c750c 100644
--- a/samples/bpf/xdp_sample_pkts_user.c
+++ b/samples/bpf/xdp_sample_pkts_user.c
@@ -18,7 +18,6 @@
#include "perf-sys.h"
-#define MAX_CPUS 128
static int if_idx;
static char *if_name;
static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c
index 19c679456a0e..1149e94ca32f 100644
--- a/samples/bpf/xdpsock_user.c
+++ b/samples/bpf/xdpsock_user.c
@@ -11,6 +11,7 @@
#include <linux/if_xdp.h>
#include <linux/if_ether.h>
#include <linux/ip.h>
+#include <linux/limits.h>
#include <linux/udp.h>
#include <arpa/inet.h>
#include <locale.h>
@@ -78,6 +79,11 @@ static int opt_pkt_count;
static u16 opt_pkt_size = MIN_PKT_SIZE;
static u32 opt_pkt_fill_pattern = 0x12345678;
static bool opt_extra_stats;
+static bool opt_quiet;
+static bool opt_app_stats;
+static const char *opt_irq_str = "";
+static u32 irq_no;
+static int irqs_at_init = -1;
static int opt_poll;
static int opt_interval = 1;
static u32 opt_xdp_bind_flags = XDP_USE_NEED_WAKEUP;
@@ -90,18 +96,7 @@ static bool opt_need_wakeup = true;
static u32 opt_num_xsks = 1;
static u32 prog_id;
-struct xsk_umem_info {
- struct xsk_ring_prod fq;
- struct xsk_ring_cons cq;
- struct xsk_umem *umem;
- void *buffer;
-};
-
-struct xsk_socket_info {
- struct xsk_ring_cons rx;
- struct xsk_ring_prod tx;
- struct xsk_umem_info *umem;
- struct xsk_socket *xsk;
+struct xsk_ring_stats {
unsigned long rx_npkts;
unsigned long tx_npkts;
unsigned long rx_dropped_npkts;
@@ -118,6 +113,41 @@ struct xsk_socket_info {
unsigned long prev_rx_full_npkts;
unsigned long prev_rx_fill_empty_npkts;
unsigned long prev_tx_empty_npkts;
+};
+
+struct xsk_driver_stats {
+ unsigned long intrs;
+ unsigned long prev_intrs;
+};
+
+struct xsk_app_stats {
+ unsigned long rx_empty_polls;
+ unsigned long fill_fail_polls;
+ unsigned long copy_tx_sendtos;
+ unsigned long tx_wakeup_sendtos;
+ unsigned long opt_polls;
+ unsigned long prev_rx_empty_polls;
+ unsigned long prev_fill_fail_polls;
+ unsigned long prev_copy_tx_sendtos;
+ unsigned long prev_tx_wakeup_sendtos;
+ unsigned long prev_opt_polls;
+};
+
+struct xsk_umem_info {
+ struct xsk_ring_prod fq;
+ struct xsk_ring_cons cq;
+ struct xsk_umem *umem;
+ void *buffer;
+};
+
+struct xsk_socket_info {
+ struct xsk_ring_cons rx;
+ struct xsk_ring_prod tx;
+ struct xsk_umem_info *umem;
+ struct xsk_socket *xsk;
+ struct xsk_ring_stats ring_stats;
+ struct xsk_app_stats app_stats;
+ struct xsk_driver_stats drv_stats;
u32 outstanding_tx;
};
@@ -172,18 +202,151 @@ static int xsk_get_xdp_stats(int fd, struct xsk_socket_info *xsk)
return err;
if (optlen == sizeof(struct xdp_statistics)) {
- xsk->rx_dropped_npkts = stats.rx_dropped;
- xsk->rx_invalid_npkts = stats.rx_invalid_descs;
- xsk->tx_invalid_npkts = stats.tx_invalid_descs;
- xsk->rx_full_npkts = stats.rx_ring_full;
- xsk->rx_fill_empty_npkts = stats.rx_fill_ring_empty_descs;
- xsk->tx_empty_npkts = stats.tx_ring_empty_descs;
+ xsk->ring_stats.rx_dropped_npkts = stats.rx_dropped;
+ xsk->ring_stats.rx_invalid_npkts = stats.rx_invalid_descs;
+ xsk->ring_stats.tx_invalid_npkts = stats.tx_invalid_descs;
+ xsk->ring_stats.rx_full_npkts = stats.rx_ring_full;
+ xsk->ring_stats.rx_fill_empty_npkts = stats.rx_fill_ring_empty_descs;
+ xsk->ring_stats.tx_empty_npkts = stats.tx_ring_empty_descs;
return 0;
}
return -EINVAL;
}
+static void dump_app_stats(long dt)
+{
+ int i;
+
+ for (i = 0; i < num_socks && xsks[i]; i++) {
+ char *fmt = "%-18s %'-14.0f %'-14lu\n";
+ double rx_empty_polls_ps, fill_fail_polls_ps, copy_tx_sendtos_ps,
+ tx_wakeup_sendtos_ps, opt_polls_ps;
+
+ rx_empty_polls_ps = (xsks[i]->app_stats.rx_empty_polls -
+ xsks[i]->app_stats.prev_rx_empty_polls) * 1000000000. / dt;
+ fill_fail_polls_ps = (xsks[i]->app_stats.fill_fail_polls -
+ xsks[i]->app_stats.prev_fill_fail_polls) * 1000000000. / dt;
+ copy_tx_sendtos_ps = (xsks[i]->app_stats.copy_tx_sendtos -
+ xsks[i]->app_stats.prev_copy_tx_sendtos) * 1000000000. / dt;
+ tx_wakeup_sendtos_ps = (xsks[i]->app_stats.tx_wakeup_sendtos -
+ xsks[i]->app_stats.prev_tx_wakeup_sendtos)
+ * 1000000000. / dt;
+ opt_polls_ps = (xsks[i]->app_stats.opt_polls -
+ xsks[i]->app_stats.prev_opt_polls) * 1000000000. / dt;
+
+ printf("\n%-18s %-14s %-14s\n", "", "calls/s", "count");
+ printf(fmt, "rx empty polls", rx_empty_polls_ps, xsks[i]->app_stats.rx_empty_polls);
+ printf(fmt, "fill fail polls", fill_fail_polls_ps,
+ xsks[i]->app_stats.fill_fail_polls);
+ printf(fmt, "copy tx sendtos", copy_tx_sendtos_ps,
+ xsks[i]->app_stats.copy_tx_sendtos);
+ printf(fmt, "tx wakeup sendtos", tx_wakeup_sendtos_ps,
+ xsks[i]->app_stats.tx_wakeup_sendtos);
+ printf(fmt, "opt polls", opt_polls_ps, xsks[i]->app_stats.opt_polls);
+
+ xsks[i]->app_stats.prev_rx_empty_polls = xsks[i]->app_stats.rx_empty_polls;
+ xsks[i]->app_stats.prev_fill_fail_polls = xsks[i]->app_stats.fill_fail_polls;
+ xsks[i]->app_stats.prev_copy_tx_sendtos = xsks[i]->app_stats.copy_tx_sendtos;
+ xsks[i]->app_stats.prev_tx_wakeup_sendtos = xsks[i]->app_stats.tx_wakeup_sendtos;
+ xsks[i]->app_stats.prev_opt_polls = xsks[i]->app_stats.opt_polls;
+ }
+}
+
+static bool get_interrupt_number(void)
+{
+ FILE *f_int_proc;
+ char line[4096];
+ bool found = false;
+
+ f_int_proc = fopen("/proc/interrupts", "r");
+ if (f_int_proc == NULL) {
+ printf("Failed to open /proc/interrupts.\n");
+ return found;
+ }
+
+ while (!feof(f_int_proc) && !found) {
+ /* Make sure to read a full line at a time */
+ if (fgets(line, sizeof(line), f_int_proc) == NULL ||
+ line[strlen(line) - 1] != '\n') {
+ printf("Error reading from interrupts file\n");
+ break;
+ }
+
+ /* Extract interrupt number from line */
+ if (strstr(line, opt_irq_str) != NULL) {
+ irq_no = atoi(line);
+ found = true;
+ break;
+ }
+ }
+
+ fclose(f_int_proc);
+
+ return found;
+}
+
+static int get_irqs(void)
+{
+ char count_path[PATH_MAX];
+ int total_intrs = -1;
+ FILE *f_count_proc;
+ char line[4096];
+
+ snprintf(count_path, sizeof(count_path),
+ "/sys/kernel/irq/%i/per_cpu_count", irq_no);
+ f_count_proc = fopen(count_path, "r");
+ if (f_count_proc == NULL) {
+ printf("Failed to open %s\n", count_path);
+ return total_intrs;
+ }
+
+ if (fgets(line, sizeof(line), f_count_proc) == NULL ||
+ line[strlen(line) - 1] != '\n') {
+ printf("Error reading from %s\n", count_path);
+ } else {
+ static const char com[2] = ",";
+ char *token;
+
+ total_intrs = 0;
+ token = strtok(line, com);
+ while (token != NULL) {
+ /* sum up interrupts across all cores */
+ total_intrs += atoi(token);
+ token = strtok(NULL, com);
+ }
+ }
+
+ fclose(f_count_proc);
+
+ return total_intrs;
+}
+
+static void dump_driver_stats(long dt)
+{
+ int i;
+
+ for (i = 0; i < num_socks && xsks[i]; i++) {
+ char *fmt = "%-18s %'-14.0f %'-14lu\n";
+ double intrs_ps;
+ int n_ints = get_irqs();
+
+ if (n_ints < 0) {
+ printf("error getting intr info for intr %i\n", irq_no);
+ return;
+ }
+ xsks[i]->drv_stats.intrs = n_ints - irqs_at_init;
+
+ intrs_ps = (xsks[i]->drv_stats.intrs - xsks[i]->drv_stats.prev_intrs) *
+ 1000000000. / dt;
+
+ printf("\n%-18s %-14s %-14s\n", "", "intrs/s", "count");
+ printf(fmt, "irqs", intrs_ps, xsks[i]->drv_stats.intrs);
+
+ xsks[i]->drv_stats.prev_intrs = xsks[i]->drv_stats.intrs;
+ }
+}
+
static void dump_stats(void)
{
unsigned long now = get_nsecs();
@@ -193,67 +356,83 @@ static void dump_stats(void)
prev_time = now;
for (i = 0; i < num_socks && xsks[i]; i++) {
- char *fmt = "%-15s %'-11.0f %'-11lu\n";
+ char *fmt = "%-18s %'-14.0f %'-14lu\n";
double rx_pps, tx_pps, dropped_pps, rx_invalid_pps, full_pps, fill_empty_pps,
tx_invalid_pps, tx_empty_pps;
- rx_pps = (xsks[i]->rx_npkts - xsks[i]->prev_rx_npkts) *
+ rx_pps = (xsks[i]->ring_stats.rx_npkts - xsks[i]->ring_stats.prev_rx_npkts) *
1000000000. / dt;
- tx_pps = (xsks[i]->tx_npkts - xsks[i]->prev_tx_npkts) *
+ tx_pps = (xsks[i]->ring_stats.tx_npkts - xsks[i]->ring_stats.prev_tx_npkts) *
1000000000. / dt;
printf("\n sock%d@", i);
print_benchmark(false);
printf("\n");
- printf("%-15s %-11s %-11s %-11.2f\n", "", "pps", "pkts",
+ printf("%-18s %-14s %-14s %-14.2f\n", "", "pps", "pkts",
dt / 1000000000.);
- printf(fmt, "rx", rx_pps, xsks[i]->rx_npkts);
- printf(fmt, "tx", tx_pps, xsks[i]->tx_npkts);
+ printf(fmt, "rx", rx_pps, xsks[i]->ring_stats.rx_npkts);
+ printf(fmt, "tx", tx_pps, xsks[i]->ring_stats.tx_npkts);
- xsks[i]->prev_rx_npkts = xsks[i]->rx_npkts;
- xsks[i]->prev_tx_npkts = xsks[i]->tx_npkts;
+ xsks[i]->ring_stats.prev_rx_npkts = xsks[i]->ring_stats.rx_npkts;
+ xsks[i]->ring_stats.prev_tx_npkts = xsks[i]->ring_stats.tx_npkts;
if (opt_extra_stats) {
if (!xsk_get_xdp_stats(xsk_socket__fd(xsks[i]->xsk), xsks[i])) {
- dropped_pps = (xsks[i]->rx_dropped_npkts -
- xsks[i]->prev_rx_dropped_npkts) * 1000000000. / dt;
- rx_invalid_pps = (xsks[i]->rx_invalid_npkts -
- xsks[i]->prev_rx_invalid_npkts) * 1000000000. / dt;
- tx_invalid_pps = (xsks[i]->tx_invalid_npkts -
- xsks[i]->prev_tx_invalid_npkts) * 1000000000. / dt;
- full_pps = (xsks[i]->rx_full_npkts -
- xsks[i]->prev_rx_full_npkts) * 1000000000. / dt;
- fill_empty_pps = (xsks[i]->rx_fill_empty_npkts -
- xsks[i]->prev_rx_fill_empty_npkts)
- * 1000000000. / dt;
- tx_empty_pps = (xsks[i]->tx_empty_npkts -
- xsks[i]->prev_tx_empty_npkts) * 1000000000. / dt;
+ dropped_pps = (xsks[i]->ring_stats.rx_dropped_npkts -
+ xsks[i]->ring_stats.prev_rx_dropped_npkts) *
+ 1000000000. / dt;
+ rx_invalid_pps = (xsks[i]->ring_stats.rx_invalid_npkts -
+ xsks[i]->ring_stats.prev_rx_invalid_npkts) *
+ 1000000000. / dt;
+ tx_invalid_pps = (xsks[i]->ring_stats.tx_invalid_npkts -
+ xsks[i]->ring_stats.prev_tx_invalid_npkts) *
+ 1000000000. / dt;
+ full_pps = (xsks[i]->ring_stats.rx_full_npkts -
+ xsks[i]->ring_stats.prev_rx_full_npkts) *
+ 1000000000. / dt;
+ fill_empty_pps = (xsks[i]->ring_stats.rx_fill_empty_npkts -
+ xsks[i]->ring_stats.prev_rx_fill_empty_npkts) *
+ 1000000000. / dt;
+ tx_empty_pps = (xsks[i]->ring_stats.tx_empty_npkts -
+ xsks[i]->ring_stats.prev_tx_empty_npkts) *
+ 1000000000. / dt;
printf(fmt, "rx dropped", dropped_pps,
- xsks[i]->rx_dropped_npkts);
+ xsks[i]->ring_stats.rx_dropped_npkts);
printf(fmt, "rx invalid", rx_invalid_pps,
- xsks[i]->rx_invalid_npkts);
+ xsks[i]->ring_stats.rx_invalid_npkts);
printf(fmt, "tx invalid", tx_invalid_pps,
- xsks[i]->tx_invalid_npkts);
+ xsks[i]->ring_stats.tx_invalid_npkts);
printf(fmt, "rx queue full", full_pps,
- xsks[i]->rx_full_npkts);
+ xsks[i]->ring_stats.rx_full_npkts);
printf(fmt, "fill ring empty", fill_empty_pps,
- xsks[i]->rx_fill_empty_npkts);
+ xsks[i]->ring_stats.rx_fill_empty_npkts);
printf(fmt, "tx ring empty", tx_empty_pps,
- xsks[i]->tx_empty_npkts);
-
- xsks[i]->prev_rx_dropped_npkts = xsks[i]->rx_dropped_npkts;
- xsks[i]->prev_rx_invalid_npkts = xsks[i]->rx_invalid_npkts;
- xsks[i]->prev_tx_invalid_npkts = xsks[i]->tx_invalid_npkts;
- xsks[i]->prev_rx_full_npkts = xsks[i]->rx_full_npkts;
- xsks[i]->prev_rx_fill_empty_npkts = xsks[i]->rx_fill_empty_npkts;
- xsks[i]->prev_tx_empty_npkts = xsks[i]->tx_empty_npkts;
+ xsks[i]->ring_stats.tx_empty_npkts);
+
+ xsks[i]->ring_stats.prev_rx_dropped_npkts =
+ xsks[i]->ring_stats.rx_dropped_npkts;
+ xsks[i]->ring_stats.prev_rx_invalid_npkts =
+ xsks[i]->ring_stats.rx_invalid_npkts;
+ xsks[i]->ring_stats.prev_tx_invalid_npkts =
+ xsks[i]->ring_stats.tx_invalid_npkts;
+ xsks[i]->ring_stats.prev_rx_full_npkts =
+ xsks[i]->ring_stats.rx_full_npkts;
+ xsks[i]->ring_stats.prev_rx_fill_empty_npkts =
+ xsks[i]->ring_stats.rx_fill_empty_npkts;
+ xsks[i]->ring_stats.prev_tx_empty_npkts =
+ xsks[i]->ring_stats.tx_empty_npkts;
} else {
printf("%-15s\n", "Error retrieving extra stats");
}
}
}
+
+ if (opt_app_stats)
+ dump_app_stats(dt);
+ if (irq_no)
+ dump_driver_stats(dt);
}
static bool is_benchmark_done(void)
@@ -613,7 +792,16 @@ static struct xsk_umem_info *xsk_configure_umem(void *buffer, u64 size)
{
struct xsk_umem_info *umem;
struct xsk_umem_config cfg = {
- .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
+ /* We recommend that you set the fill ring size >= HW RX ring size +
+ * AF_XDP RX ring size. Make sure you fill up the fill ring
+ * with buffers at regular intervals, and you will with this setting
+ * avoid allocation failures in the driver. These are usually quite
+ * expensive since drivers have not been written to assume that
+ * allocation failures are common. For regular sockets, kernel
+ * allocated memory is used that only runs out in OOM situations
+ * that should be rare.
+ */
+ .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS * 2,
.comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS,
.frame_size = opt_xsk_frame_size,
.frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM,
@@ -640,13 +828,13 @@ static void xsk_populate_fill_ring(struct xsk_umem_info *umem)
u32 idx;
ret = xsk_ring_prod__reserve(&umem->fq,
- XSK_RING_PROD__DEFAULT_NUM_DESCS, &idx);
- if (ret != XSK_RING_PROD__DEFAULT_NUM_DESCS)
+ XSK_RING_PROD__DEFAULT_NUM_DESCS * 2, &idx);
+ if (ret != XSK_RING_PROD__DEFAULT_NUM_DESCS * 2)
exit_with_error(-ret);
- for (i = 0; i < XSK_RING_PROD__DEFAULT_NUM_DESCS; i++)
+ for (i = 0; i < XSK_RING_PROD__DEFAULT_NUM_DESCS * 2; i++)
*xsk_ring_prod__fill_addr(&umem->fq, idx++) =
i * opt_xsk_frame_size;
- xsk_ring_prod__submit(&umem->fq, XSK_RING_PROD__DEFAULT_NUM_DESCS);
+ xsk_ring_prod__submit(&umem->fq, XSK_RING_PROD__DEFAULT_NUM_DESCS * 2);
}
static struct xsk_socket_info *xsk_configure_socket(struct xsk_umem_info *umem,
@@ -683,6 +871,17 @@ static struct xsk_socket_info *xsk_configure_socket(struct xsk_umem_info *umem,
if (ret)
exit_with_error(-ret);
+ xsk->app_stats.rx_empty_polls = 0;
+ xsk->app_stats.fill_fail_polls = 0;
+ xsk->app_stats.copy_tx_sendtos = 0;
+ xsk->app_stats.tx_wakeup_sendtos = 0;
+ xsk->app_stats.opt_polls = 0;
+ xsk->app_stats.prev_rx_empty_polls = 0;
+ xsk->app_stats.prev_fill_fail_polls = 0;
+ xsk->app_stats.prev_copy_tx_sendtos = 0;
+ xsk->app_stats.prev_tx_wakeup_sendtos = 0;
+ xsk->app_stats.prev_opt_polls = 0;
+
return xsk;
}
@@ -709,6 +908,9 @@ static struct option long_options[] = {
{"tx-pkt-size", required_argument, 0, 's'},
{"tx-pkt-pattern", required_argument, 0, 'P'},
{"extra-stats", no_argument, 0, 'x'},
+ {"quiet", no_argument, 0, 'Q'},
+ {"app-stats", no_argument, 0, 'a'},
+ {"irq-string", no_argument, 0, 'I'},
{0, 0, 0, 0}
};
@@ -744,6 +946,9 @@ static void usage(const char *prog)
" Min size: %d, Max size %d.\n"
" -P, --tx-pkt-pattern=nPacket fill pattern. Default: 0x%x\n"
" -x, --extra-stats Display extra statistics.\n"
+ " -Q, --quiet Do not display any stats.\n"
+ " -a, --app-stats Display application (syscall) statistics.\n"
+ " -I, --irq-string Display driver interrupt statistics for interface associated with irq-string.\n"
"\n";
fprintf(stderr, str, prog, XSK_UMEM__DEFAULT_FRAME_SIZE,
opt_batch_size, MIN_PKT_SIZE, MIN_PKT_SIZE,
@@ -759,7 +964,7 @@ static void parse_command_line(int argc, char **argv)
opterr = 0;
for (;;) {
- c = getopt_long(argc, argv, "Frtli:q:pSNn:czf:muMd:b:C:s:P:x",
+ c = getopt_long(argc, argv, "Frtli:q:pSNn:czf:muMd:b:C:s:P:xQaI:",
long_options, &option_index);
if (c == -1)
break;
@@ -843,6 +1048,22 @@ static void parse_command_line(int argc, char **argv)
case 'x':
opt_extra_stats = 1;
break;
+ case 'Q':
+ opt_quiet = 1;
+ break;
+ case 'a':
+ opt_app_stats = 1;
+ break;
+ case 'I':
+ opt_irq_str = optarg;
+ if (get_interrupt_number())
+ irqs_at_init = get_irqs();
+ if (irqs_at_init < 0) {
+ fprintf(stderr, "ERROR: Failed to get irqs for %s\n", opt_irq_str);
+ usage(basename(argv[0]));
+ }
+
+ break;
default:
usage(basename(argv[0]));
}
@@ -888,8 +1109,15 @@ static inline void complete_tx_l2fwd(struct xsk_socket_info *xsk,
if (!xsk->outstanding_tx)
return;
- if (!opt_need_wakeup || xsk_ring_prod__needs_wakeup(&xsk->tx))
+ /* In copy mode, Tx is driven by a syscall so we need to use e.g. sendto() to
+ * really send the packets. In zero-copy mode we do not have to do this, since Tx
+ * is driven by the NAPI loop. So as an optimization, we do not have to call
+ * sendto() all the time in zero-copy mode for l2fwd.
+ */
+ if (opt_xdp_bind_flags & XDP_COPY) {
+ xsk->app_stats.copy_tx_sendtos++;
kick_tx(xsk);
+ }
ndescs = (xsk->outstanding_tx > opt_batch_size) ? opt_batch_size :
xsk->outstanding_tx;
@@ -904,8 +1132,10 @@ static inline void complete_tx_l2fwd(struct xsk_socket_info *xsk,
while (ret != rcvd) {
if (ret < 0)
exit_with_error(-ret);
- if (xsk_ring_prod__needs_wakeup(&umem->fq))
+ if (xsk_ring_prod__needs_wakeup(&umem->fq)) {
+ xsk->app_stats.fill_fail_polls++;
ret = poll(fds, num_socks, opt_timeout);
+ }
ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
}
@@ -916,7 +1146,7 @@ static inline void complete_tx_l2fwd(struct xsk_socket_info *xsk,
xsk_ring_prod__submit(&xsk->umem->fq, rcvd);
xsk_ring_cons__release(&xsk->umem->cq, rcvd);
xsk->outstanding_tx -= rcvd;
- xsk->tx_npkts += rcvd;
+ xsk->ring_stats.tx_npkts += rcvd;
}
}
@@ -929,14 +1159,16 @@ static inline void complete_tx_only(struct xsk_socket_info *xsk,
if (!xsk->outstanding_tx)
return;
- if (!opt_need_wakeup || xsk_ring_prod__needs_wakeup(&xsk->tx))
+ if (!opt_need_wakeup || xsk_ring_prod__needs_wakeup(&xsk->tx)) {
+ xsk->app_stats.tx_wakeup_sendtos++;
kick_tx(xsk);
+ }
rcvd = xsk_ring_cons__peek(&xsk->umem->cq, batch_size, &idx);
if (rcvd > 0) {
xsk_ring_cons__release(&xsk->umem->cq, rcvd);
xsk->outstanding_tx -= rcvd;
- xsk->tx_npkts += rcvd;
+ xsk->ring_stats.tx_npkts += rcvd;
}
}
@@ -948,8 +1180,10 @@ static void rx_drop(struct xsk_socket_info *xsk, struct pollfd *fds)
rcvd = xsk_ring_cons__peek(&xsk->rx, opt_batch_size, &idx_rx);
if (!rcvd) {
- if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq))
+ if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) {
+ xsk->app_stats.rx_empty_polls++;
ret = poll(fds, num_socks, opt_timeout);
+ }
return;
}
@@ -957,8 +1191,10 @@ static void rx_drop(struct xsk_socket_info *xsk, struct pollfd *fds)
while (ret != rcvd) {
if (ret < 0)
exit_with_error(-ret);
- if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq))
+ if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) {
+ xsk->app_stats.fill_fail_polls++;
ret = poll(fds, num_socks, opt_timeout);
+ }
ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq);
}
@@ -976,7 +1212,7 @@ static void rx_drop(struct xsk_socket_info *xsk, struct pollfd *fds)
xsk_ring_prod__submit(&xsk->umem->fq, rcvd);
xsk_ring_cons__release(&xsk->rx, rcvd);
- xsk->rx_npkts += rcvd;
+ xsk->ring_stats.rx_npkts += rcvd;
}
static void rx_drop_all(void)
@@ -991,6 +1227,8 @@ static void rx_drop_all(void)
for (;;) {
if (opt_poll) {
+ for (i = 0; i < num_socks; i++)
+ xsks[i]->app_stats.opt_polls++;
ret = poll(fds, num_socks, opt_timeout);
if (ret <= 0)
continue;
@@ -1004,7 +1242,7 @@ static void rx_drop_all(void)
}
}
-static void tx_only(struct xsk_socket_info *xsk, u32 frame_nb, int batch_size)
+static void tx_only(struct xsk_socket_info *xsk, u32 *frame_nb, int batch_size)
{
u32 idx;
unsigned int i;
@@ -1017,14 +1255,14 @@ static void tx_only(struct xsk_socket_info *xsk, u32 frame_nb, int batch_size)
for (i = 0; i < batch_size; i++) {
struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx,
idx + i);
- tx_desc->addr = (frame_nb + i) << XSK_UMEM__DEFAULT_FRAME_SHIFT;
+ tx_desc->addr = (*frame_nb + i) << XSK_UMEM__DEFAULT_FRAME_SHIFT;
tx_desc->len = PKT_SIZE;
}
xsk_ring_prod__submit(&xsk->tx, batch_size);
xsk->outstanding_tx += batch_size;
- frame_nb += batch_size;
- frame_nb %= NUM_FRAMES;
+ *frame_nb += batch_size;
+ *frame_nb %= NUM_FRAMES;
complete_tx_only(xsk, batch_size);
}
@@ -1071,6 +1309,8 @@ static void tx_only_all(void)
int batch_size = get_batch_size(pkt_cnt);
if (opt_poll) {
+ for (i = 0; i < num_socks; i++)
+ xsks[i]->app_stats.opt_polls++;
ret = poll(fds, num_socks, opt_timeout);
if (ret <= 0)
continue;
@@ -1080,7 +1320,7 @@ static void tx_only_all(void)
}
for (i = 0; i < num_socks; i++)
- tx_only(xsks[i], frame_nb[i], batch_size);
+ tx_only(xsks[i], &frame_nb[i], batch_size);
pkt_cnt += batch_size;
@@ -1102,8 +1342,10 @@ static void l2fwd(struct xsk_socket_info *xsk, struct pollfd *fds)
rcvd = xsk_ring_cons__peek(&xsk->rx, opt_batch_size, &idx_rx);
if (!rcvd) {
- if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq))
+ if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) {
+ xsk->app_stats.rx_empty_polls++;
ret = poll(fds, num_socks, opt_timeout);
+ }
return;
}
@@ -1111,8 +1353,11 @@ static void l2fwd(struct xsk_socket_info *xsk, struct pollfd *fds)
while (ret != rcvd) {
if (ret < 0)
exit_with_error(-ret);
- if (xsk_ring_prod__needs_wakeup(&xsk->tx))
+ complete_tx_l2fwd(xsk, fds);
+ if (xsk_ring_prod__needs_wakeup(&xsk->tx)) {
+ xsk->app_stats.tx_wakeup_sendtos++;
kick_tx(xsk);
+ }
ret = xsk_ring_prod__reserve(&xsk->tx, rcvd, &idx_tx);
}
@@ -1134,7 +1379,7 @@ static void l2fwd(struct xsk_socket_info *xsk, struct pollfd *fds)
xsk_ring_prod__submit(&xsk->tx, rcvd);
xsk_ring_cons__release(&xsk->rx, rcvd);
- xsk->rx_npkts += rcvd;
+ xsk->ring_stats.rx_npkts += rcvd;
xsk->outstanding_tx += rcvd;
}
@@ -1150,6 +1395,8 @@ static void l2fwd_all(void)
for (;;) {
if (opt_poll) {
+ for (i = 0; i < num_socks; i++)
+ xsks[i]->app_stats.opt_polls++;
ret = poll(fds, num_socks, opt_timeout);
if (ret <= 0)
continue;
@@ -1271,9 +1518,11 @@ int main(int argc, char **argv)
setlocale(LC_ALL, "");
- ret = pthread_create(&pt, NULL, poller, NULL);
- if (ret)
- exit_with_error(ret);
+ if (!opt_quiet) {
+ ret = pthread_create(&pt, NULL, poller, NULL);
+ if (ret)
+ exit_with_error(ret);
+ }
prev_time = get_nsecs();
start_time = prev_time;
@@ -1287,7 +1536,8 @@ int main(int argc, char **argv)
benchmark_done = true;
- pthread_join(pt, NULL);
+ if (!opt_quiet)
+ pthread_join(pt, NULL);
xdpsock_cleanup();
diff --git a/samples/bpf/xsk_fwd.c b/samples/bpf/xsk_fwd.c
new file mode 100644
index 000000000000..1cd97c84c337
--- /dev/null
+++ b/samples/bpf/xsk_fwd.c
@@ -0,0 +1,1085 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2020 Intel Corporation. */
+
+#define _GNU_SOURCE
+#include <poll.h>
+#include <pthread.h>
+#include <signal.h>
+#include <sched.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <time.h>
+#include <unistd.h>
+#include <getopt.h>
+#include <netinet/ether.h>
+#include <net/if.h>
+
+#include <linux/bpf.h>
+#include <linux/if_link.h>
+#include <linux/if_xdp.h>
+
+#include <bpf/libbpf.h>
+#include <bpf/xsk.h>
+#include <bpf/bpf.h>
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+typedef __u64 u64;
+typedef __u32 u32;
+typedef __u16 u16;
+typedef __u8 u8;
+
+/* This program illustrates the packet forwarding between multiple AF_XDP
+ * sockets in multi-threaded environment. All threads are sharing a common
+ * buffer pool, with each socket having its own private buffer cache.
+ *
+ * Example 1: Single thread handling two sockets. The packets received by socket
+ * A (interface IFA, queue QA) are forwarded to socket B (interface IFB, queue
+ * QB), while the packets received by socket B are forwarded to socket A. The
+ * thread is running on CPU core X:
+ *
+ * ./xsk_fwd -i IFA -q QA -i IFB -q QB -c X
+ *
+ * Example 2: Two threads, each handling two sockets. The thread running on CPU
+ * core X forwards all the packets received by socket A to socket B, and all the
+ * packets received by socket B to socket A. The thread running on CPU core Y is
+ * performing the same packet forwarding between sockets C and D:
+ *
+ * ./xsk_fwd -i IFA -q QA -i IFB -q QB -i IFC -q QC -i IFD -q QD
+ * -c CX -c CY
+ */
+
+/*
+ * Buffer pool and buffer cache
+ *
+ * For packet forwarding, the packet buffers are typically allocated from the
+ * pool for packet reception and freed back to the pool for further reuse once
+ * the packet transmission is completed.
+ *
+ * The buffer pool is shared between multiple threads. In order to minimize the
+ * access latency to the shared buffer pool, each thread creates one (or
+ * several) buffer caches, which, unlike the buffer pool, are private to the
+ * thread that creates them and therefore cannot be shared with other threads.
+ * The access to the shared pool is only needed either (A) when the cache gets
+ * empty due to repeated buffer allocations and it needs to be replenished from
+ * the pool, or (B) when the cache gets full due to repeated buffer free and it
+ * needs to be flushed back to the pull.
+ *
+ * In a packet forwarding system, a packet received on any input port can
+ * potentially be transmitted on any output port, depending on the forwarding
+ * configuration. For AF_XDP sockets, for this to work with zero-copy of the
+ * packet buffers when, it is required that the buffer pool memory fits into the
+ * UMEM area shared by all the sockets.
+ */
+
+struct bpool_params {
+ u32 n_buffers;
+ u32 buffer_size;
+ int mmap_flags;
+
+ u32 n_users_max;
+ u32 n_buffers_per_slab;
+};
+
+/* This buffer pool implementation organizes the buffers into equally sized
+ * slabs of *n_buffers_per_slab*. Initially, there are *n_slabs* slabs in the
+ * pool that are completely filled with buffer pointers (full slabs).
+ *
+ * Each buffer cache has a slab for buffer allocation and a slab for buffer
+ * free, with both of these slabs initially empty. When the cache's allocation
+ * slab goes empty, it is swapped with one of the available full slabs from the
+ * pool, if any is available. When the cache's free slab goes full, it is
+ * swapped for one of the empty slabs from the pool, which is guaranteed to
+ * succeed.
+ *
+ * Partially filled slabs never get traded between the cache and the pool
+ * (except when the cache itself is destroyed), which enables fast operation
+ * through pointer swapping.
+ */
+struct bpool {
+ struct bpool_params params;
+ pthread_mutex_t lock;
+ void *addr;
+
+ u64 **slabs;
+ u64 **slabs_reserved;
+ u64 *buffers;
+ u64 *buffers_reserved;
+
+ u64 n_slabs;
+ u64 n_slabs_reserved;
+ u64 n_buffers;
+
+ u64 n_slabs_available;
+ u64 n_slabs_reserved_available;
+
+ struct xsk_umem_config umem_cfg;
+ struct xsk_ring_prod umem_fq;
+ struct xsk_ring_cons umem_cq;
+ struct xsk_umem *umem;
+};
+
+static struct bpool *
+bpool_init(struct bpool_params *params,
+ struct xsk_umem_config *umem_cfg)
+{
+ struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
+ u64 n_slabs, n_slabs_reserved, n_buffers, n_buffers_reserved;
+ u64 slabs_size, slabs_reserved_size;
+ u64 buffers_size, buffers_reserved_size;
+ u64 total_size, i;
+ struct bpool *bp;
+ u8 *p;
+ int status;
+
+ /* mmap prep. */
+ if (setrlimit(RLIMIT_MEMLOCK, &r))
+ return NULL;
+
+ /* bpool internals dimensioning. */
+ n_slabs = (params->n_buffers + params->n_buffers_per_slab - 1) /
+ params->n_buffers_per_slab;
+ n_slabs_reserved = params->n_users_max * 2;
+ n_buffers = n_slabs * params->n_buffers_per_slab;
+ n_buffers_reserved = n_slabs_reserved * params->n_buffers_per_slab;
+
+ slabs_size = n_slabs * sizeof(u64 *);
+ slabs_reserved_size = n_slabs_reserved * sizeof(u64 *);
+ buffers_size = n_buffers * sizeof(u64);
+ buffers_reserved_size = n_buffers_reserved * sizeof(u64);
+
+ total_size = sizeof(struct bpool) +
+ slabs_size + slabs_reserved_size +
+ buffers_size + buffers_reserved_size;
+
+ /* bpool memory allocation. */
+ p = calloc(total_size, sizeof(u8));
+ if (!p)
+ return NULL;
+
+ /* bpool memory initialization. */
+ bp = (struct bpool *)p;
+ memcpy(&bp->params, params, sizeof(*params));
+ bp->params.n_buffers = n_buffers;
+
+ bp->slabs = (u64 **)&p[sizeof(struct bpool)];
+ bp->slabs_reserved = (u64 **)&p[sizeof(struct bpool) +
+ slabs_size];
+ bp->buffers = (u64 *)&p[sizeof(struct bpool) +
+ slabs_size + slabs_reserved_size];
+ bp->buffers_reserved = (u64 *)&p[sizeof(struct bpool) +
+ slabs_size + slabs_reserved_size + buffers_size];
+
+ bp->n_slabs = n_slabs;
+ bp->n_slabs_reserved = n_slabs_reserved;
+ bp->n_buffers = n_buffers;
+
+ for (i = 0; i < n_slabs; i++)
+ bp->slabs[i] = &bp->buffers[i * params->n_buffers_per_slab];
+ bp->n_slabs_available = n_slabs;
+
+ for (i = 0; i < n_slabs_reserved; i++)
+ bp->slabs_reserved[i] = &bp->buffers_reserved[i *
+ params->n_buffers_per_slab];
+ bp->n_slabs_reserved_available = n_slabs_reserved;
+
+ for (i = 0; i < n_buffers; i++)
+ bp->buffers[i] = i * params->buffer_size;
+
+ /* lock. */
+ status = pthread_mutex_init(&bp->lock, NULL);
+ if (status) {
+ free(p);
+ return NULL;
+ }
+
+ /* mmap. */
+ bp->addr = mmap(NULL,
+ n_buffers * params->buffer_size,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | params->mmap_flags,
+ -1,
+ 0);
+ if (bp->addr == MAP_FAILED) {
+ pthread_mutex_destroy(&bp->lock);
+ free(p);
+ return NULL;
+ }
+
+ /* umem. */
+ status = xsk_umem__create(&bp->umem,
+ bp->addr,
+ bp->params.n_buffers * bp->params.buffer_size,
+ &bp->umem_fq,
+ &bp->umem_cq,
+ umem_cfg);
+ if (status) {
+ munmap(bp->addr, bp->params.n_buffers * bp->params.buffer_size);
+ pthread_mutex_destroy(&bp->lock);
+ free(p);
+ return NULL;
+ }
+ memcpy(&bp->umem_cfg, umem_cfg, sizeof(*umem_cfg));
+
+ return bp;
+}
+
+static void
+bpool_free(struct bpool *bp)
+{
+ if (!bp)
+ return;
+
+ xsk_umem__delete(bp->umem);
+ munmap(bp->addr, bp->params.n_buffers * bp->params.buffer_size);
+ pthread_mutex_destroy(&bp->lock);
+ free(bp);
+}
+
+struct bcache {
+ struct bpool *bp;
+
+ u64 *slab_cons;
+ u64 *slab_prod;
+
+ u64 n_buffers_cons;
+ u64 n_buffers_prod;
+};
+
+static u32
+bcache_slab_size(struct bcache *bc)
+{
+ struct bpool *bp = bc->bp;
+
+ return bp->params.n_buffers_per_slab;
+}
+
+static struct bcache *
+bcache_init(struct bpool *bp)
+{
+ struct bcache *bc;
+
+ bc = calloc(1, sizeof(struct bcache));
+ if (!bc)
+ return NULL;
+
+ bc->bp = bp;
+ bc->n_buffers_cons = 0;
+ bc->n_buffers_prod = 0;
+
+ pthread_mutex_lock(&bp->lock);
+ if (bp->n_slabs_reserved_available == 0) {
+ pthread_mutex_unlock(&bp->lock);
+ free(bc);
+ return NULL;
+ }
+
+ bc->slab_cons = bp->slabs_reserved[bp->n_slabs_reserved_available - 1];
+ bc->slab_prod = bp->slabs_reserved[bp->n_slabs_reserved_available - 2];
+ bp->n_slabs_reserved_available -= 2;
+ pthread_mutex_unlock(&bp->lock);
+
+ return bc;
+}
+
+static void
+bcache_free(struct bcache *bc)
+{
+ struct bpool *bp;
+
+ if (!bc)
+ return;
+
+ /* In order to keep this example simple, the case of freeing any
+ * existing buffers from the cache back to the pool is ignored.
+ */
+
+ bp = bc->bp;
+ pthread_mutex_lock(&bp->lock);
+ bp->slabs_reserved[bp->n_slabs_reserved_available] = bc->slab_prod;
+ bp->slabs_reserved[bp->n_slabs_reserved_available + 1] = bc->slab_cons;
+ bp->n_slabs_reserved_available += 2;
+ pthread_mutex_unlock(&bp->lock);
+
+ free(bc);
+}
+
+/* To work correctly, the implementation requires that the *n_buffers* input
+ * argument is never greater than the buffer pool's *n_buffers_per_slab*. This
+ * is typically the case, with one exception taking place when large number of
+ * buffers are allocated at init time (e.g. for the UMEM fill queue setup).
+ */
+static inline u32
+bcache_cons_check(struct bcache *bc, u32 n_buffers)
+{
+ struct bpool *bp = bc->bp;
+ u64 n_buffers_per_slab = bp->params.n_buffers_per_slab;
+ u64 n_buffers_cons = bc->n_buffers_cons;
+ u64 n_slabs_available;
+ u64 *slab_full;
+
+ /*
+ * Consumer slab is not empty: Use what's available locally. Do not
+ * look for more buffers from the pool when the ask can only be
+ * partially satisfied.
+ */
+ if (n_buffers_cons)
+ return (n_buffers_cons < n_buffers) ?
+ n_buffers_cons :
+ n_buffers;
+
+ /*
+ * Consumer slab is empty: look to trade the current consumer slab
+ * (full) for a full slab from the pool, if any is available.
+ */
+ pthread_mutex_lock(&bp->lock);
+ n_slabs_available = bp->n_slabs_available;
+ if (!n_slabs_available) {
+ pthread_mutex_unlock(&bp->lock);
+ return 0;
+ }
+
+ n_slabs_available--;
+ slab_full = bp->slabs[n_slabs_available];
+ bp->slabs[n_slabs_available] = bc->slab_cons;
+ bp->n_slabs_available = n_slabs_available;
+ pthread_mutex_unlock(&bp->lock);
+
+ bc->slab_cons = slab_full;
+ bc->n_buffers_cons = n_buffers_per_slab;
+ return n_buffers;
+}
+
+static inline u64
+bcache_cons(struct bcache *bc)
+{
+ u64 n_buffers_cons = bc->n_buffers_cons - 1;
+ u64 buffer;
+
+ buffer = bc->slab_cons[n_buffers_cons];
+ bc->n_buffers_cons = n_buffers_cons;
+ return buffer;
+}
+
+static inline void
+bcache_prod(struct bcache *bc, u64 buffer)
+{
+ struct bpool *bp = bc->bp;
+ u64 n_buffers_per_slab = bp->params.n_buffers_per_slab;
+ u64 n_buffers_prod = bc->n_buffers_prod;
+ u64 n_slabs_available;
+ u64 *slab_empty;
+
+ /*
+ * Producer slab is not yet full: store the current buffer to it.
+ */
+ if (n_buffers_prod < n_buffers_per_slab) {
+ bc->slab_prod[n_buffers_prod] = buffer;
+ bc->n_buffers_prod = n_buffers_prod + 1;
+ return;
+ }
+
+ /*
+ * Producer slab is full: trade the cache's current producer slab
+ * (full) for an empty slab from the pool, then store the current
+ * buffer to the new producer slab. As one full slab exists in the
+ * cache, it is guaranteed that there is at least one empty slab
+ * available in the pool.
+ */
+ pthread_mutex_lock(&bp->lock);
+ n_slabs_available = bp->n_slabs_available;
+ slab_empty = bp->slabs[n_slabs_available];
+ bp->slabs[n_slabs_available] = bc->slab_prod;
+ bp->n_slabs_available = n_slabs_available + 1;
+ pthread_mutex_unlock(&bp->lock);
+
+ slab_empty[0] = buffer;
+ bc->slab_prod = slab_empty;
+ bc->n_buffers_prod = 1;
+}
+
+/*
+ * Port
+ *
+ * Each of the forwarding ports sits on top of an AF_XDP socket. In order for
+ * packet forwarding to happen with no packet buffer copy, all the sockets need
+ * to share the same UMEM area, which is used as the buffer pool memory.
+ */
+#ifndef MAX_BURST_RX
+#define MAX_BURST_RX 64
+#endif
+
+#ifndef MAX_BURST_TX
+#define MAX_BURST_TX 64
+#endif
+
+struct burst_rx {
+ u64 addr[MAX_BURST_RX];
+ u32 len[MAX_BURST_RX];
+};
+
+struct burst_tx {
+ u64 addr[MAX_BURST_TX];
+ u32 len[MAX_BURST_TX];
+ u32 n_pkts;
+};
+
+struct port_params {
+ struct xsk_socket_config xsk_cfg;
+ struct bpool *bp;
+ const char *iface;
+ u32 iface_queue;
+};
+
+struct port {
+ struct port_params params;
+
+ struct bcache *bc;
+
+ struct xsk_ring_cons rxq;
+ struct xsk_ring_prod txq;
+ struct xsk_ring_prod umem_fq;
+ struct xsk_ring_cons umem_cq;
+ struct xsk_socket *xsk;
+ int umem_fq_initialized;
+
+ u64 n_pkts_rx;
+ u64 n_pkts_tx;
+};
+
+static void
+port_free(struct port *p)
+{
+ if (!p)
+ return;
+
+ /* To keep this example simple, the code to free the buffers from the
+ * socket's receive and transmit queues, as well as from the UMEM fill
+ * and completion queues, is not included.
+ */
+
+ if (p->xsk)
+ xsk_socket__delete(p->xsk);
+
+ bcache_free(p->bc);
+
+ free(p);
+}
+
+static struct port *
+port_init(struct port_params *params)
+{
+ struct port *p;
+ u32 umem_fq_size, pos = 0;
+ int status, i;
+
+ /* Memory allocation and initialization. */
+ p = calloc(sizeof(struct port), 1);
+ if (!p)
+ return NULL;
+
+ memcpy(&p->params, params, sizeof(p->params));
+ umem_fq_size = params->bp->umem_cfg.fill_size;
+
+ /* bcache. */
+ p->bc = bcache_init(params->bp);
+ if (!p->bc ||
+ (bcache_slab_size(p->bc) < umem_fq_size) ||
+ (bcache_cons_check(p->bc, umem_fq_size) < umem_fq_size)) {
+ port_free(p);
+ return NULL;
+ }
+
+ /* xsk socket. */
+ status = xsk_socket__create_shared(&p->xsk,
+ params->iface,
+ params->iface_queue,
+ params->bp->umem,
+ &p->rxq,
+ &p->txq,
+ &p->umem_fq,
+ &p->umem_cq,
+ &params->xsk_cfg);
+ if (status) {
+ port_free(p);
+ return NULL;
+ }
+
+ /* umem fq. */
+ xsk_ring_prod__reserve(&p->umem_fq, umem_fq_size, &pos);
+
+ for (i = 0; i < umem_fq_size; i++)
+ *xsk_ring_prod__fill_addr(&p->umem_fq, pos + i) =
+ bcache_cons(p->bc);
+
+ xsk_ring_prod__submit(&p->umem_fq, umem_fq_size);
+ p->umem_fq_initialized = 1;
+
+ return p;
+}
+
+static inline u32
+port_rx_burst(struct port *p, struct burst_rx *b)
+{
+ u32 n_pkts, pos, i;
+
+ /* Free buffers for FQ replenish. */
+ n_pkts = ARRAY_SIZE(b->addr);
+
+ n_pkts = bcache_cons_check(p->bc, n_pkts);
+ if (!n_pkts)
+ return 0;
+
+ /* RXQ. */
+ n_pkts = xsk_ring_cons__peek(&p->rxq, n_pkts, &pos);
+ if (!n_pkts) {
+ if (xsk_ring_prod__needs_wakeup(&p->umem_fq)) {
+ struct pollfd pollfd = {
+ .fd = xsk_socket__fd(p->xsk),
+ .events = POLLIN,
+ };
+
+ poll(&pollfd, 1, 0);
+ }
+ return 0;
+ }
+
+ for (i = 0; i < n_pkts; i++) {
+ b->addr[i] = xsk_ring_cons__rx_desc(&p->rxq, pos + i)->addr;
+ b->len[i] = xsk_ring_cons__rx_desc(&p->rxq, pos + i)->len;
+ }
+
+ xsk_ring_cons__release(&p->rxq, n_pkts);
+ p->n_pkts_rx += n_pkts;
+
+ /* UMEM FQ. */
+ for ( ; ; ) {
+ int status;
+
+ status = xsk_ring_prod__reserve(&p->umem_fq, n_pkts, &pos);
+ if (status == n_pkts)
+ break;
+
+ if (xsk_ring_prod__needs_wakeup(&p->umem_fq)) {
+ struct pollfd pollfd = {
+ .fd = xsk_socket__fd(p->xsk),
+ .events = POLLIN,
+ };
+
+ poll(&pollfd, 1, 0);
+ }
+ }
+
+ for (i = 0; i < n_pkts; i++)
+ *xsk_ring_prod__fill_addr(&p->umem_fq, pos + i) =
+ bcache_cons(p->bc);
+
+ xsk_ring_prod__submit(&p->umem_fq, n_pkts);
+
+ return n_pkts;
+}
+
+static inline void
+port_tx_burst(struct port *p, struct burst_tx *b)
+{
+ u32 n_pkts, pos, i;
+ int status;
+
+ /* UMEM CQ. */
+ n_pkts = p->params.bp->umem_cfg.comp_size;
+
+ n_pkts = xsk_ring_cons__peek(&p->umem_cq, n_pkts, &pos);
+
+ for (i = 0; i < n_pkts; i++) {
+ u64 addr = *xsk_ring_cons__comp_addr(&p->umem_cq, pos + i);
+
+ bcache_prod(p->bc, addr);
+ }
+
+ xsk_ring_cons__release(&p->umem_cq, n_pkts);
+
+ /* TXQ. */
+ n_pkts = b->n_pkts;
+
+ for ( ; ; ) {
+ status = xsk_ring_prod__reserve(&p->txq, n_pkts, &pos);
+ if (status == n_pkts)
+ break;
+
+ if (xsk_ring_prod__needs_wakeup(&p->txq))
+ sendto(xsk_socket__fd(p->xsk), NULL, 0, MSG_DONTWAIT,
+ NULL, 0);
+ }
+
+ for (i = 0; i < n_pkts; i++) {
+ xsk_ring_prod__tx_desc(&p->txq, pos + i)->addr = b->addr[i];
+ xsk_ring_prod__tx_desc(&p->txq, pos + i)->len = b->len[i];
+ }
+
+ xsk_ring_prod__submit(&p->txq, n_pkts);
+ if (xsk_ring_prod__needs_wakeup(&p->txq))
+ sendto(xsk_socket__fd(p->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0);
+ p->n_pkts_tx += n_pkts;
+}
+
+/*
+ * Thread
+ *
+ * Packet forwarding threads.
+ */
+#ifndef MAX_PORTS_PER_THREAD
+#define MAX_PORTS_PER_THREAD 16
+#endif
+
+struct thread_data {
+ struct port *ports_rx[MAX_PORTS_PER_THREAD];
+ struct port *ports_tx[MAX_PORTS_PER_THREAD];
+ u32 n_ports_rx;
+ struct burst_rx burst_rx;
+ struct burst_tx burst_tx[MAX_PORTS_PER_THREAD];
+ u32 cpu_core_id;
+ int quit;
+};
+
+static void swap_mac_addresses(void *data)
+{
+ struct ether_header *eth = (struct ether_header *)data;
+ struct ether_addr *src_addr = (struct ether_addr *)&eth->ether_shost;
+ struct ether_addr *dst_addr = (struct ether_addr *)&eth->ether_dhost;
+ struct ether_addr tmp;
+
+ tmp = *src_addr;
+ *src_addr = *dst_addr;
+ *dst_addr = tmp;
+}
+
+static void *
+thread_func(void *arg)
+{
+ struct thread_data *t = arg;
+ cpu_set_t cpu_cores;
+ u32 i;
+
+ CPU_ZERO(&cpu_cores);
+ CPU_SET(t->cpu_core_id, &cpu_cores);
+ pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpu_cores);
+
+ for (i = 0; !t->quit; i = (i + 1) & (t->n_ports_rx - 1)) {
+ struct port *port_rx = t->ports_rx[i];
+ struct port *port_tx = t->ports_tx[i];
+ struct burst_rx *brx = &t->burst_rx;
+ struct burst_tx *btx = &t->burst_tx[i];
+ u32 n_pkts, j;
+
+ /* RX. */
+ n_pkts = port_rx_burst(port_rx, brx);
+ if (!n_pkts)
+ continue;
+
+ /* Process & TX. */
+ for (j = 0; j < n_pkts; j++) {
+ u64 addr = xsk_umem__add_offset_to_addr(brx->addr[j]);
+ u8 *pkt = xsk_umem__get_data(port_rx->params.bp->addr,
+ addr);
+
+ swap_mac_addresses(pkt);
+
+ btx->addr[btx->n_pkts] = brx->addr[j];
+ btx->len[btx->n_pkts] = brx->len[j];
+ btx->n_pkts++;
+
+ if (btx->n_pkts == MAX_BURST_TX) {
+ port_tx_burst(port_tx, btx);
+ btx->n_pkts = 0;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * Process
+ */
+static const struct bpool_params bpool_params_default = {
+ .n_buffers = 64 * 1024,
+ .buffer_size = XSK_UMEM__DEFAULT_FRAME_SIZE,
+ .mmap_flags = 0,
+
+ .n_users_max = 16,
+ .n_buffers_per_slab = XSK_RING_PROD__DEFAULT_NUM_DESCS * 2,
+};
+
+static const struct xsk_umem_config umem_cfg_default = {
+ .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS * 2,
+ .comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS,
+ .frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE,
+ .frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM,
+ .flags = 0,
+};
+
+static const struct port_params port_params_default = {
+ .xsk_cfg = {
+ .rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS,
+ .tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS,
+ .libbpf_flags = 0,
+ .xdp_flags = XDP_FLAGS_DRV_MODE,
+ .bind_flags = XDP_USE_NEED_WAKEUP | XDP_ZEROCOPY,
+ },
+
+ .bp = NULL,
+ .iface = NULL,
+ .iface_queue = 0,
+};
+
+#ifndef MAX_PORTS
+#define MAX_PORTS 64
+#endif
+
+#ifndef MAX_THREADS
+#define MAX_THREADS 64
+#endif
+
+static struct bpool_params bpool_params;
+static struct xsk_umem_config umem_cfg;
+static struct bpool *bp;
+
+static struct port_params port_params[MAX_PORTS];
+static struct port *ports[MAX_PORTS];
+static u64 n_pkts_rx[MAX_PORTS];
+static u64 n_pkts_tx[MAX_PORTS];
+static int n_ports;
+
+static pthread_t threads[MAX_THREADS];
+static struct thread_data thread_data[MAX_THREADS];
+static int n_threads;
+
+static void
+print_usage(char *prog_name)
+{
+ const char *usage =
+ "Usage:\n"
+ "\t%s [ -b SIZE ] -c CORE -i INTERFACE [ -q QUEUE ]\n"
+ "\n"
+ "-c CORE CPU core to run a packet forwarding thread\n"
+ " on. May be invoked multiple times.\n"
+ "\n"
+ "-b SIZE Number of buffers in the buffer pool shared\n"
+ " by all the forwarding threads. Default: %u.\n"
+ "\n"
+ "-i INTERFACE Network interface. Each (INTERFACE, QUEUE)\n"
+ " pair specifies one forwarding port. May be\n"
+ " invoked multiple times.\n"
+ "\n"
+ "-q QUEUE Network interface queue for RX and TX. Each\n"
+ " (INTERFACE, QUEUE) pair specified one\n"
+ " forwarding port. Default: %u. May be invoked\n"
+ " multiple times.\n"
+ "\n";
+ printf(usage,
+ prog_name,
+ bpool_params_default.n_buffers,
+ port_params_default.iface_queue);
+}
+
+static int
+parse_args(int argc, char **argv)
+{
+ struct option lgopts[] = {
+ { NULL, 0, 0, 0 }
+ };
+ int opt, option_index;
+
+ /* Parse the input arguments. */
+ for ( ; ;) {
+ opt = getopt_long(argc, argv, "c:i:q:", lgopts, &option_index);
+ if (opt == EOF)
+ break;
+
+ switch (opt) {
+ case 'b':
+ bpool_params.n_buffers = atoi(optarg);
+ break;
+
+ case 'c':
+ if (n_threads == MAX_THREADS) {
+ printf("Max number of threads (%d) reached.\n",
+ MAX_THREADS);
+ return -1;
+ }
+
+ thread_data[n_threads].cpu_core_id = atoi(optarg);
+ n_threads++;
+ break;
+
+ case 'i':
+ if (n_ports == MAX_PORTS) {
+ printf("Max number of ports (%d) reached.\n",
+ MAX_PORTS);
+ return -1;
+ }
+
+ port_params[n_ports].iface = optarg;
+ port_params[n_ports].iface_queue = 0;
+ n_ports++;
+ break;
+
+ case 'q':
+ if (n_ports == 0) {
+ printf("No port specified for queue.\n");
+ return -1;
+ }
+ port_params[n_ports - 1].iface_queue = atoi(optarg);
+ break;
+
+ default:
+ printf("Illegal argument.\n");
+ return -1;
+ }
+ }
+
+ optind = 1; /* reset getopt lib */
+
+ /* Check the input arguments. */
+ if (!n_ports) {
+ printf("No ports specified.\n");
+ return -1;
+ }
+
+ if (!n_threads) {
+ printf("No threads specified.\n");
+ return -1;
+ }
+
+ if (n_ports % n_threads) {
+ printf("Ports cannot be evenly distributed to threads.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static void
+print_port(u32 port_id)
+{
+ struct port *port = ports[port_id];
+
+ printf("Port %u: interface = %s, queue = %u\n",
+ port_id, port->params.iface, port->params.iface_queue);
+}
+
+static void
+print_thread(u32 thread_id)
+{
+ struct thread_data *t = &thread_data[thread_id];
+ u32 i;
+
+ printf("Thread %u (CPU core %u): ",
+ thread_id, t->cpu_core_id);
+
+ for (i = 0; i < t->n_ports_rx; i++) {
+ struct port *port_rx = t->ports_rx[i];
+ struct port *port_tx = t->ports_tx[i];
+
+ printf("(%s, %u) -> (%s, %u), ",
+ port_rx->params.iface,
+ port_rx->params.iface_queue,
+ port_tx->params.iface,
+ port_tx->params.iface_queue);
+ }
+
+ printf("\n");
+}
+
+static void
+print_port_stats_separator(void)
+{
+ printf("+-%4s-+-%12s-+-%13s-+-%12s-+-%13s-+\n",
+ "----",
+ "------------",
+ "-------------",
+ "------------",
+ "-------------");
+}
+
+static void
+print_port_stats_header(void)
+{
+ print_port_stats_separator();
+ printf("| %4s | %12s | %13s | %12s | %13s |\n",
+ "Port",
+ "RX packets",
+ "RX rate (pps)",
+ "TX packets",
+ "TX_rate (pps)");
+ print_port_stats_separator();
+}
+
+static void
+print_port_stats_trailer(void)
+{
+ print_port_stats_separator();
+ printf("\n");
+}
+
+static void
+print_port_stats(int port_id, u64 ns_diff)
+{
+ struct port *p = ports[port_id];
+ double rx_pps, tx_pps;
+
+ rx_pps = (p->n_pkts_rx - n_pkts_rx[port_id]) * 1000000000. / ns_diff;
+ tx_pps = (p->n_pkts_tx - n_pkts_tx[port_id]) * 1000000000. / ns_diff;
+
+ printf("| %4d | %12llu | %13.0f | %12llu | %13.0f |\n",
+ port_id,
+ p->n_pkts_rx,
+ rx_pps,
+ p->n_pkts_tx,
+ tx_pps);
+
+ n_pkts_rx[port_id] = p->n_pkts_rx;
+ n_pkts_tx[port_id] = p->n_pkts_tx;
+}
+
+static void
+print_port_stats_all(u64 ns_diff)
+{
+ int i;
+
+ print_port_stats_header();
+ for (i = 0; i < n_ports; i++)
+ print_port_stats(i, ns_diff);
+ print_port_stats_trailer();
+}
+
+static int quit;
+
+static void
+signal_handler(int sig)
+{
+ quit = 1;
+}
+
+static void remove_xdp_program(void)
+{
+ int i;
+
+ for (i = 0 ; i < n_ports; i++)
+ bpf_set_link_xdp_fd(if_nametoindex(port_params[i].iface), -1,
+ port_params[i].xsk_cfg.xdp_flags);
+}
+
+int main(int argc, char **argv)
+{
+ struct timespec time;
+ u64 ns0;
+ int i;
+
+ /* Parse args. */
+ memcpy(&bpool_params, &bpool_params_default,
+ sizeof(struct bpool_params));
+ memcpy(&umem_cfg, &umem_cfg_default,
+ sizeof(struct xsk_umem_config));
+ for (i = 0; i < MAX_PORTS; i++)
+ memcpy(&port_params[i], &port_params_default,
+ sizeof(struct port_params));
+
+ if (parse_args(argc, argv)) {
+ print_usage(argv[0]);
+ return -1;
+ }
+
+ /* Buffer pool initialization. */
+ bp = bpool_init(&bpool_params, &umem_cfg);
+ if (!bp) {
+ printf("Buffer pool initialization failed.\n");
+ return -1;
+ }
+ printf("Buffer pool created successfully.\n");
+
+ /* Ports initialization. */
+ for (i = 0; i < MAX_PORTS; i++)
+ port_params[i].bp = bp;
+
+ for (i = 0; i < n_ports; i++) {
+ ports[i] = port_init(&port_params[i]);
+ if (!ports[i]) {
+ printf("Port %d initialization failed.\n", i);
+ return -1;
+ }
+ print_port(i);
+ }
+ printf("All ports created successfully.\n");
+
+ /* Threads. */
+ for (i = 0; i < n_threads; i++) {
+ struct thread_data *t = &thread_data[i];
+ u32 n_ports_per_thread = n_ports / n_threads, j;
+
+ for (j = 0; j < n_ports_per_thread; j++) {
+ t->ports_rx[j] = ports[i * n_ports_per_thread + j];
+ t->ports_tx[j] = ports[i * n_ports_per_thread +
+ (j + 1) % n_ports_per_thread];
+ }
+
+ t->n_ports_rx = n_ports_per_thread;
+
+ print_thread(i);
+ }
+
+ for (i = 0; i < n_threads; i++) {
+ int status;
+
+ status = pthread_create(&threads[i],
+ NULL,
+ thread_func,
+ &thread_data[i]);
+ if (status) {
+ printf("Thread %d creation failed.\n", i);
+ return -1;
+ }
+ }
+ printf("All threads created successfully.\n");
+
+ /* Print statistics. */
+ signal(SIGINT, signal_handler);
+ signal(SIGTERM, signal_handler);
+ signal(SIGABRT, signal_handler);
+
+ clock_gettime(CLOCK_MONOTONIC, &time);
+ ns0 = time.tv_sec * 1000000000UL + time.tv_nsec;
+ for ( ; !quit; ) {
+ u64 ns1, ns_diff;
+
+ sleep(1);
+ clock_gettime(CLOCK_MONOTONIC, &time);
+ ns1 = time.tv_sec * 1000000000UL + time.tv_nsec;
+ ns_diff = ns1 - ns0;
+ ns0 = ns1;
+
+ print_port_stats_all(ns_diff);
+ }
+
+ /* Threads completion. */
+ printf("Quit.\n");
+ for (i = 0; i < n_threads; i++)
+ thread_data[i].quit = 1;
+
+ for (i = 0; i < n_threads; i++)
+ pthread_join(threads[i], NULL);
+
+ for (i = 0; i < n_ports; i++)
+ port_free(ports[i]);
+
+ bpool_free(bp);
+
+ remove_xdp_program();
+
+ return 0;
+}
diff --git a/scripts/bpf_helpers_doc.py b/scripts/bpf_helpers_doc.py
index 5bfa448b4704..7d86fdd190be 100755
--- a/scripts/bpf_helpers_doc.py
+++ b/scripts/bpf_helpers_doc.py
@@ -432,6 +432,8 @@ class PrinterHelpers(Printer):
'struct __sk_buff',
'struct sk_msg_md',
'struct xdp_md',
+ 'struct path',
+ 'struct btf_ptr',
]
known_types = {
'...',
@@ -472,6 +474,8 @@ class PrinterHelpers(Printer):
'struct tcp_request_sock',
'struct udp6_sock',
'struct task_struct',
+ 'struct path',
+ 'struct btf_ptr',
}
mapped_types = {
'u8': '__u8',
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index e6e2d9e5ff48..dbde59d343b1 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -341,9 +341,9 @@ fi
vmlinux_link vmlinux "${kallsymso}" ${btf_vmlinux_bin_o}
# fill in BTF IDs
-if [ -n "${CONFIG_DEBUG_INFO_BTF}" ]; then
-info BTFIDS vmlinux
-${RESOLVE_BTFIDS} vmlinux
+if [ -n "${CONFIG_DEBUG_INFO_BTF}" -a -n "${CONFIG_BPF}" ]; then
+ info BTFIDS vmlinux
+ ${RESOLVE_BTFIDS} vmlinux
fi
if [ -n "${CONFIG_BUILDTIME_TABLE_SORT}" ]; then
diff --git a/security/bpf/hooks.c b/security/bpf/hooks.c
index 32d32d485451..788667d582ae 100644
--- a/security/bpf/hooks.c
+++ b/security/bpf/hooks.c
@@ -11,6 +11,7 @@ static struct security_hook_list bpf_lsm_hooks[] __lsm_ro_after_init = {
LSM_HOOK_INIT(NAME, bpf_lsm_##NAME),
#include <linux/lsm_hook_defs.h>
#undef LSM_HOOK
+ LSM_HOOK_INIT(inode_free_security, bpf_inode_storage_free),
};
static int __init bpf_lsm_init(void)
@@ -20,7 +21,12 @@ static int __init bpf_lsm_init(void)
return 0;
}
+struct lsm_blob_sizes bpf_lsm_blob_sizes __lsm_ro_after_init = {
+ .lbs_inode = sizeof(struct bpf_storage_blob),
+};
+
DEFINE_LSM(bpf) = {
.name = "bpf",
.init = bpf_lsm_init,
+ .blobs = &bpf_lsm_blob_sizes
};
diff --git a/tools/bpf/bpftool/Documentation/Makefile b/tools/bpf/bpftool/Documentation/Makefile
index 815ac9804aee..f33cb02de95c 100644
--- a/tools/bpf/bpftool/Documentation/Makefile
+++ b/tools/bpf/bpftool/Documentation/Makefile
@@ -19,7 +19,7 @@ man8dir = $(mandir)/man8
# Load targets for building eBPF helpers man page.
include ../../Makefile.helpers
-MAN8_RST = $(filter-out $(HELPERS_RST),$(wildcard *.rst))
+MAN8_RST = $(wildcard bpftool*.rst)
_DOC_MAN8 = $(patsubst %.rst,%.8,$(MAN8_RST))
DOC_MAN8 = $(addprefix $(OUTPUT),$(_DOC_MAN8))
@@ -28,12 +28,23 @@ man: man8 helpers
man8: $(DOC_MAN8)
RST2MAN_DEP := $(shell command -v rst2man 2>/dev/null)
+RST2MAN_OPTS += --verbose
+
+list_pages = $(sort $(basename $(filter-out $(1),$(MAN8_RST))))
+see_also = $(subst " ",, \
+ "\n" \
+ "SEE ALSO\n" \
+ "========\n" \
+ "\t**bpf**\ (2),\n" \
+ "\t**bpf-helpers**\\ (7)" \
+ $(foreach page,$(call list_pages,$(1)),",\n\t**$(page)**\\ (8)") \
+ "\n")
$(OUTPUT)%.8: %.rst
ifndef RST2MAN_DEP
$(error "rst2man not found, but required to generate man pages")
endif
- $(QUIET_GEN)rst2man $< > $@
+ $(QUIET_GEN)( cat $< ; printf "%b" $(call see_also,$<) ) | rst2man $(RST2MAN_OPTS) > $@
clean: helpers-clean
$(call QUIET_CLEAN, Documentation)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-btf.rst b/tools/bpf/bpftool/Documentation/bpftool-btf.rst
index 896f4c6c2870..ff4d327a582e 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-btf.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-btf.rst
@@ -71,26 +71,12 @@ DESCRIPTION
OPTIONS
=======
- -h, --help
- Print short generic help message (similar to **bpftool help**).
-
- -V, --version
- Print version number (similar to **bpftool version**).
-
- -j, --json
- Generate JSON output. For commands that cannot produce JSON, this
- option has no effect.
-
- -p, --pretty
- Generate human-readable JSON output. Implies **-j**.
-
- -d, --debug
- Print all logs available from libbpf, including debug-level
- information.
+ .. include:: common_options.rst
EXAMPLES
========
**# bpftool btf dump id 1226**
+
::
[1] PTR '(anon)' type_id=2
@@ -104,6 +90,7 @@ EXAMPLES
This gives an example of default output for all supported BTF kinds.
**$ cat prog.c**
+
::
struct fwd_struct;
@@ -144,6 +131,7 @@ This gives an example of default output for all supported BTF kinds.
}
**$ bpftool btf dump file prog.o**
+
::
[1] PTR '(anon)' type_id=2
@@ -229,20 +217,3 @@ All the standard ways to specify map or program are supported:
**# bpftool btf dump prog tag b88e0a09b1d9759d**
**# bpftool btf dump prog pinned /sys/fs/bpf/prog_name**
-
-SEE ALSO
-========
- **bpf**\ (2),
- **bpf-helpers**\ (7),
- **bpftool**\ (8),
- **bpftool-btf**\ (8),
- **bpftool-cgroup**\ (8),
- **bpftool-feature**\ (8),
- **bpftool-gen**\ (8),
- **bpftool-iter**\ (8),
- **bpftool-link**\ (8),
- **bpftool-map**\ (8),
- **bpftool-net**\ (8),
- **bpftool-perf**\ (8),
- **bpftool-prog**\ (8),
- **bpftool-struct_ops**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
index a226aee3574f..790944c35602 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
@@ -116,26 +116,11 @@ DESCRIPTION
OPTIONS
=======
- -h, --help
- Print short generic help message (similar to **bpftool help**).
-
- -V, --version
- Print version number (similar to **bpftool version**).
-
- -j, --json
- Generate JSON output. For commands that cannot produce JSON, this
- option has no effect.
-
- -p, --pretty
- Generate human-readable JSON output. Implies **-j**.
+ .. include:: common_options.rst
-f, --bpffs
Show file names of pinned programs.
- -d, --debug
- Print all logs available from libbpf, including debug-level
- information.
-
EXAMPLES
========
|
@@ -158,19 +143,3 @@ EXAMPLES
::
ID AttachType AttachFlags Name
-
-SEE ALSO
-========
- **bpf**\ (2),
- **bpf-helpers**\ (7),
- **bpftool**\ (8),
- **bpftool-btf**\ (8),
- **bpftool-feature**\ (8),
- **bpftool-gen**\ (8),
- **bpftool-iter**\ (8),
- **bpftool-link**\ (8),
- **bpftool-map**\ (8),
- **bpftool-net**\ (8),
- **bpftool-perf**\ (8),
- **bpftool-prog**\ (8),
- **bpftool-struct_ops**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-feature.rst b/tools/bpf/bpftool/Documentation/bpftool-feature.rst
index 8609f06e71de..dd3771bdbc57 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-feature.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-feature.rst
@@ -71,35 +71,4 @@ DESCRIPTION
OPTIONS
=======
- -h, --help
- Print short generic help message (similar to **bpftool help**).
-
- -V, --version
- Print version number (similar to **bpftool version**).
-
- -j, --json
- Generate JSON output. For commands that cannot produce JSON, this
- option has no effect.
-
- -p, --pretty
- Generate human-readable JSON output. Implies **-j**.
-
- -d, --debug
- Print all logs available from libbpf, including debug-level
- information.
-
-SEE ALSO
-========
- **bpf**\ (2),
- **bpf-helpers**\ (7),
- **bpftool**\ (8),
- **bpftool-btf**\ (8),
- **bpftool-cgroup**\ (8),
- **bpftool-gen**\ (8),
- **bpftool-iter**\ (8),
- **bpftool-link**\ (8),
- **bpftool-map**\ (8),
- **bpftool-net**\ (8),
- **bpftool-perf**\ (8),
- **bpftool-prog**\ (8),
- **bpftool-struct_ops**\ (8)
+ .. include:: common_options.rst
diff --git a/tools/bpf/bpftool/Documentation/bpftool-gen.rst b/tools/bpf/bpftool/Documentation/bpftool-gen.rst
index df85dbd962c0..84cf0639696f 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-gen.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-gen.rst
@@ -126,26 +126,12 @@ DESCRIPTION
OPTIONS
=======
- -h, --help
- Print short generic help message (similar to **bpftool help**).
-
- -V, --version
- Print version number (similar to **bpftool version**).
-
- -j, --json
- Generate JSON output. For commands that cannot produce JSON,
- this option has no effect.
-
- -p, --pretty
- Generate human-readable JSON output. Implies **-j**.
-
- -d, --debug
- Print all logs available from libbpf, including debug-level
- information.
+ .. include:: common_options.rst
EXAMPLES
========
**$ cat example.c**
+
::
#include <stdbool.h>
@@ -187,6 +173,7 @@ This is example BPF application with two BPF programs and a mix of BPF maps
and global variables.
**$ bpftool gen skeleton example.o**
+
::
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
@@ -241,6 +228,7 @@ and global variables.
#endif /* __EXAMPLE_SKEL_H__ */
**$ cat example_user.c**
+
::
#include "example.skel.h"
@@ -283,6 +271,7 @@ and global variables.
}
**# ./example_user**
+
::
my_map name: my_map
@@ -290,19 +279,3 @@ and global variables.
my_static_var: 7
This is a stripped-out version of skeleton generated for above example code.
-
-SEE ALSO
-========
- **bpf**\ (2),
- **bpf-helpers**\ (7),
- **bpftool**\ (8),
- **bpftool-btf**\ (8),
- **bpftool-cgroup**\ (8),
- **bpftool-feature**\ (8),
- **bpftool-iter**\ (8),
- **bpftool-link**\ (8),
- **bpftool-map**\ (8),
- **bpftool-net**\ (8),
- **bpftool-perf**\ (8),
- **bpftool-prog**\ (8),
- **bpftool-struct_ops**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-iter.rst b/tools/bpf/bpftool/Documentation/bpftool-iter.rst
index 070ffacb42b5..51f49bead619 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-iter.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-iter.rst
@@ -51,16 +51,7 @@ DESCRIPTION
OPTIONS
=======
- -h, --help
- Print short generic help message (similar to **bpftool help**).
-
- -V, --version
- Print version number (similar to **bpftool version**).
-
- -d, --debug
- Print all logs available, even debug-level information. This
- includes logs from libbpf as well as from the verifier, when
- attempting to load programs.
+ .. include:: common_options.rst
EXAMPLES
========
@@ -77,19 +68,3 @@ EXAMPLES
Create a file-based bpf iterator from bpf_iter_hashmap.o and map with
id 20, and pin it to /sys/fs/bpf/my_hashmap
-
-SEE ALSO
-========
- **bpf**\ (2),
- **bpf-helpers**\ (7),
- **bpftool**\ (8),
- **bpftool-btf**\ (8),
- **bpftool-cgroup**\ (8),
- **bpftool-feature**\ (8),
- **bpftool-gen**\ (8),
- **bpftool-link**\ (8),
- **bpftool-map**\ (8),
- **bpftool-net**\ (8),
- **bpftool-perf**\ (8),
- **bpftool-prog**\ (8),
- **bpftool-struct_ops**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-link.rst b/tools/bpf/bpftool/Documentation/bpftool-link.rst
index 4a52e7a93339..5f7db2a837cc 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-link.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-link.rst
@@ -21,7 +21,7 @@ LINK COMMANDS
| **bpftool** **link { show | list }** [*LINK*]
| **bpftool** **link pin** *LINK* *FILE*
-| **bpftool** **link detach *LINK*
+| **bpftool** **link detach** *LINK*
| **bpftool** **link help**
|
| *LINK* := { **id** *LINK_ID* | **pinned** *FILE* }
@@ -62,18 +62,7 @@ DESCRIPTION
OPTIONS
=======
- -h, --help
- Print short generic help message (similar to **bpftool help**).
-
- -V, --version
- Print version number (similar to **bpftool version**).
-
- -j, --json
- Generate JSON output. For commands that cannot produce JSON, this
- option has no effect.
-
- -p, --pretty
- Generate human-readable JSON output. Implies **-j**.
+ .. include:: common_options.rst
-f, --bpffs
When showing BPF links, show file names of pinned
@@ -83,10 +72,6 @@ OPTIONS
Do not automatically attempt to mount any virtual file system
(such as tracefs or BPF virtual file system) when necessary.
- -d, --debug
- Print all logs available, even debug-level information. This
- includes logs from libbpf.
-
EXAMPLES
========
**# bpftool link show**
@@ -121,20 +106,3 @@ EXAMPLES
::
-rw------- 1 root root 0 Apr 23 21:39 link
-
-
-SEE ALSO
-========
- **bpf**\ (2),
- **bpf-helpers**\ (7),
- **bpftool**\ (8),
- **bpftool-btf**\ (8),
- **bpftool-cgroup**\ (8),
- **bpftool-feature**\ (8),
- **bpftool-gen**\ (8),
- **bpftool-iter**\ (8),
- **bpftool-map**\ (8),
- **bpftool-net**\ (8),
- **bpftool-perf**\ (8),
- **bpftool-prog**\ (8),
- **bpftool-struct_ops**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-map.rst b/tools/bpf/bpftool/Documentation/bpftool-map.rst
index 41e2a74252d0..dade10cdf295 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-map.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-map.rst
@@ -23,7 +23,8 @@ MAP COMMANDS
| **bpftool** **map** { **show** | **list** } [*MAP*]
| **bpftool** **map create** *FILE* **type** *TYPE* **key** *KEY_SIZE* **value** *VALUE_SIZE* \
-| **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**dev** *NAME*]
+| **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**inner_map** *MAP*] \
+| [**dev** *NAME*]
| **bpftool** **map dump** *MAP*
| **bpftool** **map update** *MAP* [**key** *DATA*] [**value** *VALUE*] [*UPDATE_FLAGS*]
| **bpftool** **map lookup** *MAP* [**key** *DATA*]
@@ -49,7 +50,7 @@ MAP COMMANDS
| | **lru_percpu_hash** | **lpm_trie** | **array_of_maps** | **hash_of_maps**
| | **devmap** | **devmap_hash** | **sockmap** | **cpumap** | **xskmap** | **sockhash**
| | **cgroup_storage** | **reuseport_sockarray** | **percpu_cgroup_storage**
-| | **queue** | **stack** | **sk_storage** | **struct_ops** | **ringbuf** }
+| | **queue** | **stack** | **sk_storage** | **struct_ops** | **ringbuf** | **inode_storage** }
DESCRIPTION
===========
@@ -67,7 +68,7 @@ DESCRIPTION
maps. On such kernels bpftool will automatically emit this
information as well.
- **bpftool map create** *FILE* **type** *TYPE* **key** *KEY_SIZE* **value** *VALUE_SIZE* **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**dev** *NAME*]
+ **bpftool map create** *FILE* **type** *TYPE* **key** *KEY_SIZE* **value** *VALUE_SIZE* **entries** *MAX_ENTRIES* **name** *NAME* [**flags** *FLAGS*] [**inner_map** *MAP*] [**dev** *NAME*]
Create a new map with given parameters and pin it to *bpffs*
as *FILE*.
@@ -75,6 +76,11 @@ DESCRIPTION
desired flags, e.g. 1024 for **BPF_F_MMAPABLE** (see bpf.h
UAPI header for existing flags).
+ To create maps of type array-of-maps or hash-of-maps, the
+ **inner_map** keyword must be used to pass an inner map. The
+ kernel needs it to collect metadata related to the inner maps
+ that the new map will work with.
+
Keyword **dev** expects a network interface name, and is used
to request hardware offload for the map.
@@ -155,18 +161,7 @@ DESCRIPTION
OPTIONS
=======
- -h, --help
- Print short generic help message (similar to **bpftool help**).
-
- -V, --version
- Print version number (similar to **bpftool version**).
-
- -j, --json
- Generate JSON output. For commands that cannot produce JSON, this
- option has no effect.
-
- -p, --pretty
- Generate human-readable JSON output. Implies **-j**.
+ .. include:: common_options.rst
-f, --bpffs
Show file names of pinned maps.
@@ -175,13 +170,10 @@ OPTIONS
Do not automatically attempt to mount any virtual file system
(such as tracefs or BPF virtual file system) when necessary.
- -d, --debug
- Print all logs available from libbpf, including debug-level
- information.
-
EXAMPLES
========
**# bpftool map show**
+
::
10: hash name some_map flags 0x0
@@ -203,6 +195,7 @@ The following three commands are equivalent:
**# bpftool map dump id 10**
+
::
key: 00 01 02 03 value: 00 01 02 03 04 05 06 07
@@ -210,6 +203,7 @@ The following three commands are equivalent:
Found 2 elements
**# bpftool map getnext id 10 key 0 1 2 3**
+
::
key:
@@ -276,19 +270,3 @@ would be lost as soon as bpftool exits).
key: 00 00 00 00 value: 22 02 00 00
Found 1 element
-
-SEE ALSO
-========
- **bpf**\ (2),
- **bpf-helpers**\ (7),
- **bpftool**\ (8),
- **bpftool-btf**\ (8),
- **bpftool-cgroup**\ (8),
- **bpftool-feature**\ (8),
- **bpftool-gen**\ (8),
- **bpftool-iter**\ (8),
- **bpftool-link**\ (8),
- **bpftool-net**\ (8),
- **bpftool-perf**\ (8),
- **bpftool-prog**\ (8),
- **bpftool-struct_ops**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-net.rst b/tools/bpf/bpftool/Documentation/bpftool-net.rst
index aa7450736179..d8165d530937 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-net.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-net.rst
@@ -75,22 +75,7 @@ DESCRIPTION
OPTIONS
=======
- -h, --help
- Print short generic help message (similar to **bpftool help**).
-
- -V, --version
- Print version number (similar to **bpftool version**).
-
- -j, --json
- Generate JSON output. For commands that cannot produce JSON, this
- option has no effect.
-
- -p, --pretty
- Generate human-readable JSON output. Implies **-j**.
-
- -d, --debug
- Print all logs available from libbpf, including debug-level
- information.
+ .. include:: common_options.rst
EXAMPLES
========
@@ -187,20 +172,3 @@ EXAMPLES
::
xdp:
-
-
-SEE ALSO
-========
- **bpf**\ (2),
- **bpf-helpers**\ (7),
- **bpftool**\ (8),
- **bpftool-btf**\ (8),
- **bpftool-cgroup**\ (8),
- **bpftool-feature**\ (8),
- **bpftool-gen**\ (8),
- **bpftool-iter**\ (8),
- **bpftool-link**\ (8),
- **bpftool-map**\ (8),
- **bpftool-perf**\ (8),
- **bpftool-prog**\ (8),
- **bpftool-struct_ops**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-perf.rst b/tools/bpf/bpftool/Documentation/bpftool-perf.rst
index 9c592b7c6775..e958ce91de72 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-perf.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-perf.rst
@@ -40,22 +40,7 @@ DESCRIPTION
OPTIONS
=======
- -h, --help
- Print short generic help message (similar to **bpftool help**).
-
- -V, --version
- Print version number (similar to **bpftool version**).
-
- -j, --json
- Generate JSON output. For commands that cannot produce JSON, this
- option has no effect.
-
- -p, --pretty
- Generate human-readable JSON output. Implies **-j**.
-
- -d, --debug
- Print all logs available from libbpf, including debug-level
- information.
+ .. include:: common_options.rst
EXAMPLES
========
@@ -78,20 +63,3 @@ EXAMPLES
{"pid":21765,"fd":5,"prog_id":7,"fd_type":"kretprobe","func":"__x64_sys_nanosleep","offset":0}, \
{"pid":21767,"fd":5,"prog_id":8,"fd_type":"tracepoint","tracepoint":"sys_enter_nanosleep"}, \
{"pid":21800,"fd":5,"prog_id":9,"fd_type":"uprobe","filename":"/home/yhs/a.out","offset":1159}]
-
-
-SEE ALSO
-========
- **bpf**\ (2),
- **bpf-helpers**\ (7),
- **bpftool**\ (8),
- **bpftool-btf**\ (8),
- **bpftool-cgroup**\ (8),
- **bpftool-feature**\ (8),
- **bpftool-gen**\ (8),
- **bpftool-iter**\ (8),
- **bpftool-link**\ (8),
- **bpftool-map**\ (8),
- **bpftool-net**\ (8),
- **bpftool-prog**\ (8),
- **bpftool-struct_ops**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
index 82e356b664e8..358c7309d419 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
@@ -210,18 +210,7 @@ DESCRIPTION
OPTIONS
=======
- -h, --help
- Print short generic help message (similar to **bpftool help**).
-
- -V, --version
- Print version number (similar to **bpftool version**).
-
- -j, --json
- Generate JSON output. For commands that cannot produce JSON, this
- option has no effect.
-
- -p, --pretty
- Generate human-readable JSON output. Implies **-j**.
+ .. include:: common_options.rst
-f, --bpffs
When showing BPF programs, show file names of pinned
@@ -234,11 +223,6 @@ OPTIONS
Do not automatically attempt to mount any virtual file system
(such as tracefs or BPF virtual file system) when necessary.
- -d, --debug
- Print all logs available, even debug-level information. This
- includes logs from libbpf as well as from the verifier, when
- attempting to load programs.
-
EXAMPLES
========
**# bpftool prog show**
@@ -342,19 +326,3 @@ EXAMPLES
40176203 cycles (83.05%)
42518139 instructions # 1.06 insns per cycle (83.39%)
123 llc_misses # 2.89 LLC misses per million insns (83.15%)
-
-SEE ALSO
-========
- **bpf**\ (2),
- **bpf-helpers**\ (7),
- **bpftool**\ (8),
- **bpftool-btf**\ (8),
- **bpftool-cgroup**\ (8),
- **bpftool-feature**\ (8),
- **bpftool-gen**\ (8),
- **bpftool-iter**\ (8),
- **bpftool-link**\ (8),
- **bpftool-map**\ (8),
- **bpftool-net**\ (8),
- **bpftool-perf**\ (8),
- **bpftool-struct_ops**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst b/tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst
index d93cd1cb8b0f..506e70ee78e9 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-struct_ops.rst
@@ -60,23 +60,7 @@ DESCRIPTION
OPTIONS
=======
- -h, --help
- Print short generic help message (similar to **bpftool help**).
-
- -V, --version
- Print version number (similar to **bpftool version**).
-
- -j, --json
- Generate JSON output. For commands that cannot produce JSON, this
- option has no effect.
-
- -p, --pretty
- Generate human-readable JSON output. Implies **-j**.
-
- -d, --debug
- Print all logs available, even debug-level information. This
- includes logs from libbpf as well as from the verifier, when
- attempting to load programs.
+ .. include:: common_options.rst
EXAMPLES
========
@@ -98,20 +82,3 @@ EXAMPLES
::
Registered tcp_congestion_ops cubic id 110
-
-
-SEE ALSO
-========
- **bpf**\ (2),
- **bpf-helpers**\ (7),
- **bpftool**\ (8),
- **bpftool-btf**\ (8),
- **bpftool-cgroup**\ (8),
- **bpftool-feature**\ (8),
- **bpftool-gen**\ (8),
- **bpftool-iter**\ (8),
- **bpftool-link**\ (8),
- **bpftool-map**\ (8),
- **bpftool-net**\ (8),
- **bpftool-perf**\ (8),
- **bpftool-prog**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/bpftool.rst b/tools/bpf/bpftool/Documentation/bpftool.rst
index 420d4d5df8b6..e7d949334961 100644
--- a/tools/bpf/bpftool/Documentation/bpftool.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool.rst
@@ -46,18 +46,7 @@ DESCRIPTION
OPTIONS
=======
- -h, --help
- Print short help message (similar to **bpftool help**).
-
- -V, --version
- Print version number (similar to **bpftool version**).
-
- -j, --json
- Generate JSON output. For commands that cannot produce JSON, this
- option has no effect.
-
- -p, --pretty
- Generate human-readable JSON output. Implies **-j**.
+ .. include:: common_options.rst
-m, --mapcompat
Allow loading maps with unknown map definitions.
@@ -65,24 +54,3 @@ OPTIONS
-n, --nomount
Do not automatically attempt to mount any virtual file system
(such as tracefs or BPF virtual file system) when necessary.
-
- -d, --debug
- Print all logs available, even debug-level information. This
- includes logs from libbpf as well as from the verifier, when
- attempting to load programs.
-
-SEE ALSO
-========
- **bpf**\ (2),
- **bpf-helpers**\ (7),
- **bpftool-btf**\ (8),
- **bpftool-cgroup**\ (8),
- **bpftool-feature**\ (8),
- **bpftool-gen**\ (8),
- **bpftool-iter**\ (8),
- **bpftool-link**\ (8),
- **bpftool-map**\ (8),
- **bpftool-net**\ (8),
- **bpftool-perf**\ (8),
- **bpftool-prog**\ (8),
- **bpftool-struct_ops**\ (8)
diff --git a/tools/bpf/bpftool/Documentation/common_options.rst b/tools/bpf/bpftool/Documentation/common_options.rst
new file mode 100644
index 000000000000..05d06c74dcaa
--- /dev/null
+++ b/tools/bpf/bpftool/Documentation/common_options.rst
@@ -0,0 +1,22 @@
+-h, --help
+ Print short help message (similar to **bpftool help**).
+
+-V, --version
+ Print version number (similar to **bpftool version**), and optional
+ features that were included when bpftool was compiled. Optional
+ features include linking against libbfd to provide the disassembler
+ for JIT-ted programs (**bpftool prog dump jited**) and usage of BPF
+ skeletons (some features like **bpftool prog profile** or showing
+ pids associated to BPF objects may rely on it).
+
+-j, --json
+ Generate JSON output. For commands that cannot produce JSON, this
+ option has no effect.
+
+-p, --pretty
+ Generate human-readable JSON output. Implies **-j**.
+
+-d, --debug
+ Print all logs available, even debug-level information. This includes
+ logs from libbpf as well as from the verifier, when attempting to
+ load programs.
diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile
index 4828913703b6..f60e6ad3a1df 100644
--- a/tools/bpf/bpftool/Makefile
+++ b/tools/bpf/bpftool/Makefile
@@ -176,7 +176,11 @@ $(OUTPUT)bpftool: $(OBJS) $(LIBBPF)
$(OUTPUT)%.o: %.c
$(QUIET_CC)$(CC) $(CFLAGS) -c -MMD -o $@ $<
-clean: $(LIBBPF)-clean
+feature-detect-clean:
+ $(call QUIET_CLEAN, feature-detect)
+ $(Q)$(MAKE) -C $(srctree)/tools/build/feature/ clean >/dev/null
+
+clean: $(LIBBPF)-clean feature-detect-clean
$(call QUIET_CLEAN, bpftool)
$(Q)$(RM) -- $(OUTPUT)bpftool $(OUTPUT)*.o $(OUTPUT)*.d
$(Q)$(RM) -- $(BPFTOOL_BOOTSTRAP) $(OUTPUT)*.skel.h $(OUTPUT)vmlinux.h
diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool
index f53ed2f1a4aa..3f1da30c4da6 100644
--- a/tools/bpf/bpftool/bash-completion/bpftool
+++ b/tools/bpf/bpftool/bash-completion/bpftool
@@ -704,13 +704,31 @@ _bpftool()
lru_percpu_hash lpm_trie array_of_maps \
hash_of_maps devmap devmap_hash sockmap cpumap \
xskmap sockhash cgroup_storage reuseport_sockarray \
- percpu_cgroup_storage queue stack' -- \
+ percpu_cgroup_storage queue stack sk_storage \
+ struct_ops inode_storage' -- \
"$cur" ) )
return 0
;;
- key|value|flags|name|entries)
+ key|value|flags|entries)
return 0
;;
+ inner_map)
+ COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) )
+ return 0
+ ;;
+ id)
+ _bpftool_get_map_ids
+ ;;
+ name)
+ case $pprev in
+ inner_map)
+ _bpftool_get_map_names
+ ;;
+ *)
+ return 0
+ ;;
+ esac
+ ;;
*)
_bpftool_once_attr 'type'
_bpftool_once_attr 'key'
@@ -718,6 +736,9 @@ _bpftool()
_bpftool_once_attr 'entries'
_bpftool_once_attr 'name'
_bpftool_once_attr 'flags'
+ if _bpftool_search_list 'array_of_maps' 'hash_of_maps'; then
+ _bpftool_once_attr 'inner_map'
+ fi
_bpftool_once_attr 'dev'
return 0
;;
diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c
index f61184653633..4033c46d83e7 100644
--- a/tools/bpf/bpftool/gen.c
+++ b/tools/bpf/bpftool/gen.c
@@ -19,11 +19,9 @@
#include <sys/mman.h>
#include <bpf/btf.h>
-#include "bpf/libbpf_internal.h"
#include "json_writer.h"
#include "main.h"
-
#define MAX_OBJ_NAME_LEN 64
static void sanitize_identifier(char *name)
diff --git a/tools/bpf/bpftool/json_writer.c b/tools/bpf/bpftool/json_writer.c
index 86501cd3c763..7fea83bedf48 100644
--- a/tools/bpf/bpftool/json_writer.c
+++ b/tools/bpf/bpftool/json_writer.c
@@ -119,6 +119,12 @@ void jsonw_pretty(json_writer_t *self, bool on)
self->pretty = on;
}
+void jsonw_reset(json_writer_t *self)
+{
+ assert(self->depth == 0);
+ self->sep = '\0';
+}
+
/* Basic blocks */
static void jsonw_begin(json_writer_t *self, int c)
{
diff --git a/tools/bpf/bpftool/json_writer.h b/tools/bpf/bpftool/json_writer.h
index 35cf1f00f96c..8ace65cdb92f 100644
--- a/tools/bpf/bpftool/json_writer.h
+++ b/tools/bpf/bpftool/json_writer.h
@@ -27,6 +27,9 @@ void jsonw_destroy(json_writer_t **self_p);
/* Cause output to have pretty whitespace */
void jsonw_pretty(json_writer_t *self, bool on);
+/* Reset separator to create new JSON */
+void jsonw_reset(json_writer_t *self);
+
/* Add property name */
void jsonw_name(json_writer_t *self, const char *name);
diff --git a/tools/bpf/bpftool/link.c b/tools/bpf/bpftool/link.c
index a89f09e3c848..e77e1525d20a 100644
--- a/tools/bpf/bpftool/link.c
+++ b/tools/bpf/bpftool/link.c
@@ -77,6 +77,22 @@ static void show_link_attach_type_json(__u32 attach_type, json_writer_t *wtr)
jsonw_uint_field(wtr, "attach_type", attach_type);
}
+static bool is_iter_map_target(const char *target_name)
+{
+ return strcmp(target_name, "bpf_map_elem") == 0 ||
+ strcmp(target_name, "bpf_sk_storage_map") == 0;
+}
+
+static void show_iter_json(struct bpf_link_info *info, json_writer_t *wtr)
+{
+ const char *target_name = u64_to_ptr(info->iter.target_name);
+
+ jsonw_string_field(wtr, "target_name", target_name);
+
+ if (is_iter_map_target(target_name))
+ jsonw_uint_field(wtr, "map_id", info->iter.map.map_id);
+}
+
static int get_prog_info(int prog_id, struct bpf_prog_info *info)
{
__u32 len = sizeof(*info);
@@ -128,6 +144,9 @@ static int show_link_close_json(int fd, struct bpf_link_info *info)
info->cgroup.cgroup_id);
show_link_attach_type_json(info->cgroup.attach_type, json_wtr);
break;
+ case BPF_LINK_TYPE_ITER:
+ show_iter_json(info, json_wtr);
+ break;
case BPF_LINK_TYPE_NETNS:
jsonw_uint_field(json_wtr, "netns_ino",
info->netns.netns_ino);
@@ -175,6 +194,16 @@ static void show_link_attach_type_plain(__u32 attach_type)
printf("attach_type %u ", attach_type);
}
+static void show_iter_plain(struct bpf_link_info *info)
+{
+ const char *target_name = u64_to_ptr(info->iter.target_name);
+
+ printf("target_name %s ", target_name);
+
+ if (is_iter_map_target(target_name))
+ printf("map_id %u ", info->iter.map.map_id);
+}
+
static int show_link_close_plain(int fd, struct bpf_link_info *info)
{
struct bpf_prog_info prog_info;
@@ -204,6 +233,9 @@ static int show_link_close_plain(int fd, struct bpf_link_info *info)
printf("\n\tcgroup_id %zu ", (size_t)info->cgroup.cgroup_id);
show_link_attach_type_plain(info->cgroup.attach_type);
break;
+ case BPF_LINK_TYPE_ITER:
+ show_iter_plain(info);
+ break;
case BPF_LINK_TYPE_NETNS:
printf("\n\tnetns_ino %u ", info->netns.netns_ino);
show_link_attach_type_plain(info->netns.attach_type);
@@ -231,7 +263,7 @@ static int do_show_link(int fd)
{
struct bpf_link_info info;
__u32 len = sizeof(info);
- char raw_tp_name[256];
+ char buf[256];
int err;
memset(&info, 0, sizeof(info));
@@ -245,8 +277,14 @@ again:
}
if (info.type == BPF_LINK_TYPE_RAW_TRACEPOINT &&
!info.raw_tracepoint.tp_name) {
- info.raw_tracepoint.tp_name = (unsigned long)&raw_tp_name;
- info.raw_tracepoint.tp_name_len = sizeof(raw_tp_name);
+ info.raw_tracepoint.tp_name = (unsigned long)&buf;
+ info.raw_tracepoint.tp_name_len = sizeof(buf);
+ goto again;
+ }
+ if (info.type == BPF_LINK_TYPE_ITER &&
+ !info.iter.target_name) {
+ info.iter.target_name = (unsigned long)&buf;
+ info.iter.target_name_len = sizeof(buf);
goto again;
}
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index 4a191fcbeb82..682daaa49e6a 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -70,13 +70,42 @@ static int do_help(int argc, char **argv)
static int do_version(int argc, char **argv)
{
+#ifdef HAVE_LIBBFD_SUPPORT
+ const bool has_libbfd = true;
+#else
+ const bool has_libbfd = false;
+#endif
+#ifdef BPFTOOL_WITHOUT_SKELETONS
+ const bool has_skeletons = false;
+#else
+ const bool has_skeletons = true;
+#endif
+
if (json_output) {
- jsonw_start_object(json_wtr);
+ jsonw_start_object(json_wtr); /* root object */
+
jsonw_name(json_wtr, "version");
jsonw_printf(json_wtr, "\"%s\"", BPFTOOL_VERSION);
- jsonw_end_object(json_wtr);
+
+ jsonw_name(json_wtr, "features");
+ jsonw_start_object(json_wtr); /* features */
+ jsonw_bool_field(json_wtr, "libbfd", has_libbfd);
+ jsonw_bool_field(json_wtr, "skeletons", has_skeletons);
+ jsonw_end_object(json_wtr); /* features */
+
+ jsonw_end_object(json_wtr); /* root object */
} else {
+ unsigned int nb_features = 0;
+
printf("%s v%s\n", bin_name, BPFTOOL_VERSION);
+ printf("features:");
+ if (has_libbfd) {
+ printf(" libbfd");
+ nb_features++;
+ }
+ if (has_skeletons)
+ printf("%s skeletons", nb_features++ ? "," : "");
+ printf("\n");
}
return 0;
}
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index 3a27d31a1856..a7efbd84fbcc 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -50,6 +50,7 @@ const char * const map_type_name[] = {
[BPF_MAP_TYPE_SK_STORAGE] = "sk_storage",
[BPF_MAP_TYPE_STRUCT_OPS] = "struct_ops",
[BPF_MAP_TYPE_RINGBUF] = "ringbuf",
+ [BPF_MAP_TYPE_INODE_STORAGE] = "inode_storage",
};
const size_t map_type_name_size = ARRAY_SIZE(map_type_name);
@@ -212,8 +213,9 @@ static void print_entry_json(struct bpf_map_info *info, unsigned char *key,
jsonw_end_object(json_wtr);
}
-static void print_entry_error(struct bpf_map_info *info, unsigned char *key,
- const char *error_msg)
+static void
+print_entry_error_msg(struct bpf_map_info *info, unsigned char *key,
+ const char *error_msg)
{
int msg_size = strlen(error_msg);
bool single_line, break_names;
@@ -231,6 +233,40 @@ static void print_entry_error(struct bpf_map_info *info, unsigned char *key,
printf("\n");
}
+static void
+print_entry_error(struct bpf_map_info *map_info, void *key, int lookup_errno)
+{
+ /* For prog_array maps or arrays of maps, failure to lookup the value
+ * means there is no entry for that key. Do not print an error message
+ * in that case.
+ */
+ if ((map_is_map_of_maps(map_info->type) ||
+ map_is_map_of_progs(map_info->type)) && lookup_errno == ENOENT)
+ return;
+
+ if (json_output) {
+ jsonw_start_object(json_wtr); /* entry */
+ jsonw_name(json_wtr, "key");
+ print_hex_data_json(key, map_info->key_size);
+ jsonw_name(json_wtr, "value");
+ jsonw_start_object(json_wtr); /* error */
+ jsonw_string_field(json_wtr, "error", strerror(lookup_errno));
+ jsonw_end_object(json_wtr); /* error */
+ jsonw_end_object(json_wtr); /* entry */
+ } else {
+ const char *msg = NULL;
+
+ if (lookup_errno == ENOENT)
+ msg = "<no entry>";
+ else if (lookup_errno == ENOSPC &&
+ map_info->type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY)
+ msg = "<cannot read>";
+
+ print_entry_error_msg(map_info, key,
+ msg ? : strerror(lookup_errno));
+ }
+}
+
static void print_entry_plain(struct bpf_map_info *info, unsigned char *key,
unsigned char *value)
{
@@ -712,56 +748,23 @@ static int dump_map_elem(int fd, void *key, void *value,
struct bpf_map_info *map_info, struct btf *btf,
json_writer_t *btf_wtr)
{
- int num_elems = 0;
- int lookup_errno;
-
- if (!bpf_map_lookup_elem(fd, key, value)) {
- if (json_output) {
- print_entry_json(map_info, key, value, btf);
- } else {
- if (btf) {
- struct btf_dumper d = {
- .btf = btf,
- .jw = btf_wtr,
- .is_plain_text = true,
- };
-
- do_dump_btf(&d, map_info, key, value);
- } else {
- print_entry_plain(map_info, key, value);
- }
- num_elems++;
- }
- return num_elems;
+ if (bpf_map_lookup_elem(fd, key, value)) {
+ print_entry_error(map_info, key, errno);
+ return -1;
}
- /* lookup error handling */
- lookup_errno = errno;
-
- if (map_is_map_of_maps(map_info->type) ||
- map_is_map_of_progs(map_info->type))
- return 0;
-
if (json_output) {
- jsonw_start_object(json_wtr);
- jsonw_name(json_wtr, "key");
- print_hex_data_json(key, map_info->key_size);
- jsonw_name(json_wtr, "value");
- jsonw_start_object(json_wtr);
- jsonw_string_field(json_wtr, "error", strerror(lookup_errno));
- jsonw_end_object(json_wtr);
- jsonw_end_object(json_wtr);
- } else {
- const char *msg = NULL;
-
- if (lookup_errno == ENOENT)
- msg = "<no entry>";
- else if (lookup_errno == ENOSPC &&
- map_info->type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY)
- msg = "<cannot read>";
+ print_entry_json(map_info, key, value, btf);
+ } else if (btf) {
+ struct btf_dumper d = {
+ .btf = btf,
+ .jw = btf_wtr,
+ .is_plain_text = true,
+ };
- print_entry_error(map_info, key,
- msg ? : strerror(lookup_errno));
+ do_dump_btf(&d, map_info, key, value);
+ } else {
+ print_entry_plain(map_info, key, value);
}
return 0;
@@ -872,7 +875,8 @@ map_dump(int fd, struct bpf_map_info *info, json_writer_t *wtr,
err = 0;
break;
}
- num_elems += dump_map_elem(fd, key, value, info, btf, wtr);
+ if (!dump_map_elem(fd, key, value, info, btf, wtr))
+ num_elems++;
prev_key = key;
}
@@ -1246,7 +1250,7 @@ static int do_create(int argc, char **argv)
{
struct bpf_create_map_attr attr = { NULL, };
const char *pinfile;
- int err, fd;
+ int err = -1, fd;
if (!REQ_ARGS(7))
return -1;
@@ -1261,13 +1265,13 @@ static int do_create(int argc, char **argv)
if (attr.map_type) {
p_err("map type already specified");
- return -1;
+ goto exit;
}
attr.map_type = map_type_from_str(*argv);
if ((int)attr.map_type < 0) {
p_err("unrecognized map type: %s", *argv);
- return -1;
+ goto exit;
}
NEXT_ARG();
} else if (is_prefix(*argv, "name")) {
@@ -1276,43 +1280,56 @@ static int do_create(int argc, char **argv)
} else if (is_prefix(*argv, "key")) {
if (parse_u32_arg(&argc, &argv, &attr.key_size,
"key size"))
- return -1;
+ goto exit;
} else if (is_prefix(*argv, "value")) {
if (parse_u32_arg(&argc, &argv, &attr.value_size,
"value size"))
- return -1;
+ goto exit;
} else if (is_prefix(*argv, "entries")) {
if (parse_u32_arg(&argc, &argv, &attr.max_entries,
"max entries"))
- return -1;
+ goto exit;
} else if (is_prefix(*argv, "flags")) {
if (parse_u32_arg(&argc, &argv, &attr.map_flags,
"flags"))
- return -1;
+ goto exit;
} else if (is_prefix(*argv, "dev")) {
NEXT_ARG();
if (attr.map_ifindex) {
p_err("offload device already specified");
- return -1;
+ goto exit;
}
attr.map_ifindex = if_nametoindex(*argv);
if (!attr.map_ifindex) {
p_err("unrecognized netdevice '%s': %s",
*argv, strerror(errno));
- return -1;
+ goto exit;
}
NEXT_ARG();
+ } else if (is_prefix(*argv, "inner_map")) {
+ struct bpf_map_info info = {};
+ __u32 len = sizeof(info);
+ int inner_map_fd;
+
+ NEXT_ARG();
+ if (!REQ_ARGS(2))
+ usage();
+ inner_map_fd = map_parse_fd_and_info(&argc, &argv,
+ &info, &len);
+ if (inner_map_fd < 0)
+ return -1;
+ attr.inner_map_fd = inner_map_fd;
} else {
p_err("unknown arg %s", *argv);
- return -1;
+ goto exit;
}
}
if (!attr.name) {
p_err("map name not specified");
- return -1;
+ goto exit;
}
set_max_rlimit();
@@ -1320,17 +1337,22 @@ static int do_create(int argc, char **argv)
fd = bpf_create_map_xattr(&attr);
if (fd < 0) {
p_err("map create failed: %s", strerror(errno));
- return -1;
+ goto exit;
}
err = do_pin_fd(fd, pinfile);
close(fd);
if (err)
- return err;
+ goto exit;
if (json_output)
jsonw_null(json_wtr);
- return 0;
+
+exit:
+ if (attr.inner_map_fd > 0)
+ close(attr.inner_map_fd);
+
+ return err;
}
static int do_pop_dequeue(int argc, char **argv)
@@ -1416,7 +1438,7 @@ static int do_help(int argc, char **argv)
"Usage: %1$s %2$s { show | list } [MAP]\n"
" %1$s %2$s create FILE type TYPE key KEY_SIZE value VALUE_SIZE \\\n"
" entries MAX_ENTRIES name NAME [flags FLAGS] \\\n"
- " [dev NAME]\n"
+ " [inner_map MAP] [dev NAME]\n"
" %1$s %2$s dump MAP\n"
" %1$s %2$s update MAP [key DATA] [value VALUE] [UPDATE_FLAGS]\n"
" %1$s %2$s lookup MAP [key DATA]\n"
@@ -1442,7 +1464,7 @@ static int do_help(int argc, char **argv)
" lru_percpu_hash | lpm_trie | array_of_maps | hash_of_maps |\n"
" devmap | devmap_hash | sockmap | cpumap | xskmap | sockhash |\n"
" cgroup_storage | reuseport_sockarray | percpu_cgroup_storage |\n"
- " queue | stack | sk_storage | struct_ops | ringbuf }\n"
+ " queue | stack | sk_storage | struct_ops | ringbuf | inode_storage }\n"
" " HELP_SPEC_OPTIONS "\n"
"",
bin_name, argv[-2]);
diff --git a/tools/bpf/bpftool/net.c b/tools/bpf/bpftool/net.c
index 56c3a2bae3ef..910e7bac6e9e 100644
--- a/tools/bpf/bpftool/net.c
+++ b/tools/bpf/bpftool/net.c
@@ -6,22 +6,27 @@
#include <fcntl.h>
#include <stdlib.h>
#include <string.h>
+#include <time.h>
#include <unistd.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include <net/if.h>
#include <linux/if.h>
#include <linux/rtnetlink.h>
+#include <linux/socket.h>
#include <linux/tc_act/tc_bpf.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/types.h>
#include "bpf/nlattr.h"
-#include "bpf/libbpf_internal.h"
#include "main.h"
#include "netlink_dumper.h"
+#ifndef SOL_NETLINK
+#define SOL_NETLINK 270
+#endif
+
struct ip_devname_ifindex {
char devname[64];
int ifindex;
@@ -85,6 +90,266 @@ static enum net_attach_type parse_attach_type(const char *str)
return net_attach_type_size;
}
+typedef int (*dump_nlmsg_t)(void *cookie, void *msg, struct nlattr **tb);
+
+typedef int (*__dump_nlmsg_t)(struct nlmsghdr *nlmsg, dump_nlmsg_t, void *cookie);
+
+static int netlink_open(__u32 *nl_pid)
+{
+ struct sockaddr_nl sa;
+ socklen_t addrlen;
+ int one = 1, ret;
+ int sock;
+
+ memset(&sa, 0, sizeof(sa));
+ sa.nl_family = AF_NETLINK;
+
+ sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
+ if (sock < 0)
+ return -errno;
+
+ if (setsockopt(sock, SOL_NETLINK, NETLINK_EXT_ACK,
+ &one, sizeof(one)) < 0) {
+ p_err("Netlink error reporting not supported");
+ }
+
+ if (bind(sock, (struct sockaddr *)&sa, sizeof(sa)) < 0) {
+ ret = -errno;
+ goto cleanup;
+ }
+
+ addrlen = sizeof(sa);
+ if (getsockname(sock, (struct sockaddr *)&sa, &addrlen) < 0) {
+ ret = -errno;
+ goto cleanup;
+ }
+
+ if (addrlen != sizeof(sa)) {
+ ret = -LIBBPF_ERRNO__INTERNAL;
+ goto cleanup;
+ }
+
+ *nl_pid = sa.nl_pid;
+ return sock;
+
+cleanup:
+ close(sock);
+ return ret;
+}
+
+static int netlink_recv(int sock, __u32 nl_pid, __u32 seq,
+ __dump_nlmsg_t _fn, dump_nlmsg_t fn,
+ void *cookie)
+{
+ bool multipart = true;
+ struct nlmsgerr *err;
+ struct nlmsghdr *nh;
+ char buf[4096];
+ int len, ret;
+
+ while (multipart) {
+ multipart = false;
+ len = recv(sock, buf, sizeof(buf), 0);
+ if (len < 0) {
+ ret = -errno;
+ goto done;
+ }
+
+ if (len == 0)
+ break;
+
+ for (nh = (struct nlmsghdr *)buf; NLMSG_OK(nh, len);
+ nh = NLMSG_NEXT(nh, len)) {
+ if (nh->nlmsg_pid != nl_pid) {
+ ret = -LIBBPF_ERRNO__WRNGPID;
+ goto done;
+ }
+ if (nh->nlmsg_seq != seq) {
+ ret = -LIBBPF_ERRNO__INVSEQ;
+ goto done;
+ }
+ if (nh->nlmsg_flags & NLM_F_MULTI)
+ multipart = true;
+ switch (nh->nlmsg_type) {
+ case NLMSG_ERROR:
+ err = (struct nlmsgerr *)NLMSG_DATA(nh);
+ if (!err->error)
+ continue;
+ ret = err->error;
+ libbpf_nla_dump_errormsg(nh);
+ goto done;
+ case NLMSG_DONE:
+ return 0;
+ default:
+ break;
+ }
+ if (_fn) {
+ ret = _fn(nh, fn, cookie);
+ if (ret)
+ return ret;
+ }
+ }
+ }
+ ret = 0;
+done:
+ return ret;
+}
+
+static int __dump_class_nlmsg(struct nlmsghdr *nlh,
+ dump_nlmsg_t dump_class_nlmsg,
+ void *cookie)
+{
+ struct nlattr *tb[TCA_MAX + 1], *attr;
+ struct tcmsg *t = NLMSG_DATA(nlh);
+ int len;
+
+ len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*t));
+ attr = (struct nlattr *) ((void *) t + NLMSG_ALIGN(sizeof(*t)));
+ if (libbpf_nla_parse(tb, TCA_MAX, attr, len, NULL) != 0)
+ return -LIBBPF_ERRNO__NLPARSE;
+
+ return dump_class_nlmsg(cookie, t, tb);
+}
+
+static int netlink_get_class(int sock, unsigned int nl_pid, int ifindex,
+ dump_nlmsg_t dump_class_nlmsg, void *cookie)
+{
+ struct {
+ struct nlmsghdr nlh;
+ struct tcmsg t;
+ } req = {
+ .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)),
+ .nlh.nlmsg_type = RTM_GETTCLASS,
+ .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
+ .t.tcm_family = AF_UNSPEC,
+ .t.tcm_ifindex = ifindex,
+ };
+ int seq = time(NULL);
+
+ req.nlh.nlmsg_seq = seq;
+ if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0)
+ return -errno;
+
+ return netlink_recv(sock, nl_pid, seq, __dump_class_nlmsg,
+ dump_class_nlmsg, cookie);
+}
+
+static int __dump_qdisc_nlmsg(struct nlmsghdr *nlh,
+ dump_nlmsg_t dump_qdisc_nlmsg,
+ void *cookie)
+{
+ struct nlattr *tb[TCA_MAX + 1], *attr;
+ struct tcmsg *t = NLMSG_DATA(nlh);
+ int len;
+
+ len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*t));
+ attr = (struct nlattr *) ((void *) t + NLMSG_ALIGN(sizeof(*t)));
+ if (libbpf_nla_parse(tb, TCA_MAX, attr, len, NULL) != 0)
+ return -LIBBPF_ERRNO__NLPARSE;
+
+ return dump_qdisc_nlmsg(cookie, t, tb);
+}
+
+static int netlink_get_qdisc(int sock, unsigned int nl_pid, int ifindex,
+ dump_nlmsg_t dump_qdisc_nlmsg, void *cookie)
+{
+ struct {
+ struct nlmsghdr nlh;
+ struct tcmsg t;
+ } req = {
+ .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)),
+ .nlh.nlmsg_type = RTM_GETQDISC,
+ .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
+ .t.tcm_family = AF_UNSPEC,
+ .t.tcm_ifindex = ifindex,
+ };
+ int seq = time(NULL);
+
+ req.nlh.nlmsg_seq = seq;
+ if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0)
+ return -errno;
+
+ return netlink_recv(sock, nl_pid, seq, __dump_qdisc_nlmsg,
+ dump_qdisc_nlmsg, cookie);
+}
+
+static int __dump_filter_nlmsg(struct nlmsghdr *nlh,
+ dump_nlmsg_t dump_filter_nlmsg,
+ void *cookie)
+{
+ struct nlattr *tb[TCA_MAX + 1], *attr;
+ struct tcmsg *t = NLMSG_DATA(nlh);
+ int len;
+
+ len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*t));
+ attr = (struct nlattr *) ((void *) t + NLMSG_ALIGN(sizeof(*t)));
+ if (libbpf_nla_parse(tb, TCA_MAX, attr, len, NULL) != 0)
+ return -LIBBPF_ERRNO__NLPARSE;
+
+ return dump_filter_nlmsg(cookie, t, tb);
+}
+
+static int netlink_get_filter(int sock, unsigned int nl_pid, int ifindex, int handle,
+ dump_nlmsg_t dump_filter_nlmsg, void *cookie)
+{
+ struct {
+ struct nlmsghdr nlh;
+ struct tcmsg t;
+ } req = {
+ .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)),
+ .nlh.nlmsg_type = RTM_GETTFILTER,
+ .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
+ .t.tcm_family = AF_UNSPEC,
+ .t.tcm_ifindex = ifindex,
+ .t.tcm_parent = handle,
+ };
+ int seq = time(NULL);
+
+ req.nlh.nlmsg_seq = seq;
+ if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0)
+ return -errno;
+
+ return netlink_recv(sock, nl_pid, seq, __dump_filter_nlmsg,
+ dump_filter_nlmsg, cookie);
+}
+
+static int __dump_link_nlmsg(struct nlmsghdr *nlh,
+ dump_nlmsg_t dump_link_nlmsg, void *cookie)
+{
+ struct nlattr *tb[IFLA_MAX + 1], *attr;
+ struct ifinfomsg *ifi = NLMSG_DATA(nlh);
+ int len;
+
+ len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*ifi));
+ attr = (struct nlattr *) ((void *) ifi + NLMSG_ALIGN(sizeof(*ifi)));
+ if (libbpf_nla_parse(tb, IFLA_MAX, attr, len, NULL) != 0)
+ return -LIBBPF_ERRNO__NLPARSE;
+
+ return dump_link_nlmsg(cookie, ifi, tb);
+}
+
+static int netlink_get_link(int sock, unsigned int nl_pid,
+ dump_nlmsg_t dump_link_nlmsg, void *cookie)
+{
+ struct {
+ struct nlmsghdr nlh;
+ struct ifinfomsg ifm;
+ } req = {
+ .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)),
+ .nlh.nlmsg_type = RTM_GETLINK,
+ .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
+ .ifm.ifi_family = AF_PACKET,
+ };
+ int seq = time(NULL);
+
+ req.nlh.nlmsg_seq = seq;
+ if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0)
+ return -errno;
+
+ return netlink_recv(sock, nl_pid, seq, __dump_link_nlmsg,
+ dump_link_nlmsg, cookie);
+}
+
static int dump_link_nlmsg(void *cookie, void *msg, struct nlattr **tb)
{
struct bpf_netdev_t *netinfo = cookie;
@@ -168,14 +433,14 @@ static int show_dev_tc_bpf(int sock, unsigned int nl_pid,
tcinfo.array_len = 0;
tcinfo.is_qdisc = false;
- ret = libbpf_nl_get_class(sock, nl_pid, dev->ifindex,
- dump_class_qdisc_nlmsg, &tcinfo);
+ ret = netlink_get_class(sock, nl_pid, dev->ifindex,
+ dump_class_qdisc_nlmsg, &tcinfo);
if (ret)
goto out;
tcinfo.is_qdisc = true;
- ret = libbpf_nl_get_qdisc(sock, nl_pid, dev->ifindex,
- dump_class_qdisc_nlmsg, &tcinfo);
+ ret = netlink_get_qdisc(sock, nl_pid, dev->ifindex,
+ dump_class_qdisc_nlmsg, &tcinfo);
if (ret)
goto out;
@@ -183,9 +448,9 @@ static int show_dev_tc_bpf(int sock, unsigned int nl_pid,
filter_info.ifindex = dev->ifindex;
for (i = 0; i < tcinfo.used_len; i++) {
filter_info.kind = tcinfo.handle_array[i].kind;
- ret = libbpf_nl_get_filter(sock, nl_pid, dev->ifindex,
- tcinfo.handle_array[i].handle,
- dump_filter_nlmsg, &filter_info);
+ ret = netlink_get_filter(sock, nl_pid, dev->ifindex,
+ tcinfo.handle_array[i].handle,
+ dump_filter_nlmsg, &filter_info);
if (ret)
goto out;
}
@@ -193,22 +458,22 @@ static int show_dev_tc_bpf(int sock, unsigned int nl_pid,
/* root, ingress and egress handle */
handle = TC_H_ROOT;
filter_info.kind = "root";
- ret = libbpf_nl_get_filter(sock, nl_pid, dev->ifindex, handle,
- dump_filter_nlmsg, &filter_info);
+ ret = netlink_get_filter(sock, nl_pid, dev->ifindex, handle,
+ dump_filter_nlmsg, &filter_info);
if (ret)
goto out;
handle = TC_H_MAKE(TC_H_CLSACT, TC_H_MIN_INGRESS);
filter_info.kind = "clsact/ingress";
- ret = libbpf_nl_get_filter(sock, nl_pid, dev->ifindex, handle,
- dump_filter_nlmsg, &filter_info);
+ ret = netlink_get_filter(sock, nl_pid, dev->ifindex, handle,
+ dump_filter_nlmsg, &filter_info);
if (ret)
goto out;
handle = TC_H_MAKE(TC_H_CLSACT, TC_H_MIN_EGRESS);
filter_info.kind = "clsact/egress";
- ret = libbpf_nl_get_filter(sock, nl_pid, dev->ifindex, handle,
- dump_filter_nlmsg, &filter_info);
+ ret = netlink_get_filter(sock, nl_pid, dev->ifindex, handle,
+ dump_filter_nlmsg, &filter_info);
if (ret)
goto out;
@@ -386,7 +651,7 @@ static int do_show(int argc, char **argv)
struct bpf_attach_info attach_info = {};
int i, sock, ret, filter_idx = -1;
struct bpf_netdev_t dev_array;
- unsigned int nl_pid;
+ unsigned int nl_pid = 0;
char err_buf[256];
if (argc == 2) {
@@ -401,7 +666,7 @@ static int do_show(int argc, char **argv)
if (ret)
return -1;
- sock = libbpf_netlink_open(&nl_pid);
+ sock = netlink_open(&nl_pid);
if (sock < 0) {
fprintf(stderr, "failed to open netlink sock\n");
return -1;
@@ -416,7 +681,7 @@ static int do_show(int argc, char **argv)
jsonw_start_array(json_wtr);
NET_START_OBJECT;
NET_START_ARRAY("xdp", "%s:\n");
- ret = libbpf_nl_get_link(sock, nl_pid, dump_link_nlmsg, &dev_array);
+ ret = netlink_get_link(sock, nl_pid, dump_link_nlmsg, &dev_array);
NET_END_ARRAY("\n");
if (!ret) {
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index d393eb8263a6..d942c1e3372c 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -29,6 +29,9 @@
#include "main.h"
#include "xlated_dumper.h"
+#define BPF_METADATA_PREFIX "bpf_metadata_"
+#define BPF_METADATA_PREFIX_LEN (sizeof(BPF_METADATA_PREFIX) - 1)
+
const char * const prog_type_name[] = {
[BPF_PROG_TYPE_UNSPEC] = "unspec",
[BPF_PROG_TYPE_SOCKET_FILTER] = "socket_filter",
@@ -151,6 +154,198 @@ static void show_prog_maps(int fd, __u32 num_maps)
}
}
+static void *find_metadata(int prog_fd, struct bpf_map_info *map_info)
+{
+ struct bpf_prog_info prog_info;
+ __u32 prog_info_len;
+ __u32 map_info_len;
+ void *value = NULL;
+ __u32 *map_ids;
+ int nr_maps;
+ int key = 0;
+ int map_fd;
+ int ret;
+ __u32 i;
+
+ memset(&prog_info, 0, sizeof(prog_info));
+ prog_info_len = sizeof(prog_info);
+ ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
+ if (ret)
+ return NULL;
+
+ if (!prog_info.nr_map_ids)
+ return NULL;
+
+ map_ids = calloc(prog_info.nr_map_ids, sizeof(__u32));
+ if (!map_ids)
+ return NULL;
+
+ nr_maps = prog_info.nr_map_ids;
+ memset(&prog_info, 0, sizeof(prog_info));
+ prog_info.nr_map_ids = nr_maps;
+ prog_info.map_ids = ptr_to_u64(map_ids);
+ prog_info_len = sizeof(prog_info);
+
+ ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
+ if (ret)
+ goto free_map_ids;
+
+ for (i = 0; i < prog_info.nr_map_ids; i++) {
+ map_fd = bpf_map_get_fd_by_id(map_ids[i]);
+ if (map_fd < 0)
+ goto free_map_ids;
+
+ memset(map_info, 0, sizeof(*map_info));
+ map_info_len = sizeof(*map_info);
+ ret = bpf_obj_get_info_by_fd(map_fd, map_info, &map_info_len);
+ if (ret < 0) {
+ close(map_fd);
+ goto free_map_ids;
+ }
+
+ if (map_info->type != BPF_MAP_TYPE_ARRAY ||
+ map_info->key_size != sizeof(int) ||
+ map_info->max_entries != 1 ||
+ !map_info->btf_value_type_id ||
+ !strstr(map_info->name, ".rodata")) {
+ close(map_fd);
+ continue;
+ }
+
+ value = malloc(map_info->value_size);
+ if (!value) {
+ close(map_fd);
+ goto free_map_ids;
+ }
+
+ if (bpf_map_lookup_elem(map_fd, &key, value)) {
+ close(map_fd);
+ free(value);
+ value = NULL;
+ goto free_map_ids;
+ }
+
+ close(map_fd);
+ break;
+ }
+
+free_map_ids:
+ free(map_ids);
+ return value;
+}
+
+static bool has_metadata_prefix(const char *s)
+{
+ return strncmp(s, BPF_METADATA_PREFIX, BPF_METADATA_PREFIX_LEN) == 0;
+}
+
+static void show_prog_metadata(int fd, __u32 num_maps)
+{
+ const struct btf_type *t_datasec, *t_var;
+ struct bpf_map_info map_info;
+ struct btf_var_secinfo *vsi;
+ bool printed_header = false;
+ struct btf *btf = NULL;
+ unsigned int i, vlen;
+ void *value = NULL;
+ const char *name;
+ int err;
+
+ if (!num_maps)
+ return;
+
+ memset(&map_info, 0, sizeof(map_info));
+ value = find_metadata(fd, &map_info);
+ if (!value)
+ return;
+
+ err = btf__get_from_id(map_info.btf_id, &btf);
+ if (err || !btf)
+ goto out_free;
+
+ t_datasec = btf__type_by_id(btf, map_info.btf_value_type_id);
+ if (!btf_is_datasec(t_datasec))
+ goto out_free;
+
+ vlen = btf_vlen(t_datasec);
+ vsi = btf_var_secinfos(t_datasec);
+
+ /* We don't proceed to check the kinds of the elements of the DATASEC.
+ * The verifier enforces them to be BTF_KIND_VAR.
+ */
+
+ if (json_output) {
+ struct btf_dumper d = {
+ .btf = btf,
+ .jw = json_wtr,
+ .is_plain_text = false,
+ };
+
+ for (i = 0; i < vlen; i++, vsi++) {
+ t_var = btf__type_by_id(btf, vsi->type);
+ name = btf__name_by_offset(btf, t_var->name_off);
+
+ if (!has_metadata_prefix(name))
+ continue;
+
+ if (!printed_header) {
+ jsonw_name(json_wtr, "metadata");
+ jsonw_start_object(json_wtr);
+ printed_header = true;
+ }
+
+ jsonw_name(json_wtr, name + BPF_METADATA_PREFIX_LEN);
+ err = btf_dumper_type(&d, t_var->type, value + vsi->offset);
+ if (err) {
+ p_err("btf dump failed: %d", err);
+ break;
+ }
+ }
+ if (printed_header)
+ jsonw_end_object(json_wtr);
+ } else {
+ json_writer_t *btf_wtr = jsonw_new(stdout);
+ struct btf_dumper d = {
+ .btf = btf,
+ .jw = btf_wtr,
+ .is_plain_text = true,
+ };
+
+ if (!btf_wtr) {
+ p_err("jsonw alloc failed");
+ goto out_free;
+ }
+
+ for (i = 0; i < vlen; i++, vsi++) {
+ t_var = btf__type_by_id(btf, vsi->type);
+ name = btf__name_by_offset(btf, t_var->name_off);
+
+ if (!has_metadata_prefix(name))
+ continue;
+
+ if (!printed_header) {
+ printf("\tmetadata:");
+ printed_header = true;
+ }
+
+ printf("\n\t\t%s = ", name + BPF_METADATA_PREFIX_LEN);
+
+ jsonw_reset(btf_wtr);
+ err = btf_dumper_type(&d, t_var->type, value + vsi->offset);
+ if (err) {
+ p_err("btf dump failed: %d", err);
+ break;
+ }
+ }
+ if (printed_header)
+ jsonw_destroy(&btf_wtr);
+ }
+
+out_free:
+ btf__free(btf);
+ free(value);
+}
+
static void print_prog_header_json(struct bpf_prog_info *info)
{
jsonw_uint_field(json_wtr, "id", info->id);
@@ -228,6 +423,8 @@ static void print_prog_json(struct bpf_prog_info *info, int fd)
emit_obj_refs_json(&refs_table, info->id, json_wtr);
+ show_prog_metadata(fd, info->nr_map_ids);
+
jsonw_end_object(json_wtr);
}
@@ -297,6 +494,8 @@ static void print_prog_plain(struct bpf_prog_info *info, int fd)
emit_obj_refs_plain(&refs_table, info->id, "\n\tpids ");
printf("\n");
+
+ show_prog_metadata(fd, info->nr_map_ids);
}
static int show_prog(int fd)
@@ -1304,7 +1503,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
enum bpf_prog_type prog_type = common_prog_type;
if (prog_type == BPF_PROG_TYPE_UNSPEC) {
- const char *sec_name = bpf_program__title(pos, false);
+ const char *sec_name = bpf_program__section_name(pos);
err = get_prog_type_by_name(sec_name, &prog_type,
&expected_attach_type);
@@ -1398,7 +1597,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
err = bpf_obj_pin(bpf_program__fd(prog), pinfile);
if (err) {
p_err("failed to pin program %s",
- bpf_program__title(prog, false));
+ bpf_program__section_name(prog));
goto err_close_obj;
}
} else {
diff --git a/tools/bpf/resolve_btfids/Makefile b/tools/bpf/resolve_btfids/Makefile
index fe8eb537688b..66cb92136de4 100644
--- a/tools/bpf/resolve_btfids/Makefile
+++ b/tools/bpf/resolve_btfids/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
include ../../scripts/Makefile.include
+include ../../scripts/Makefile.arch
ifeq ($(srctree),)
srctree := $(patsubst %/,%,$(dir $(CURDIR)))
@@ -29,6 +30,7 @@ endif
AR = $(HOSTAR)
CC = $(HOSTCC)
LD = $(HOSTLD)
+ARCH = $(HOSTARCH)
OUTPUT ?= $(srctree)/tools/bpf/resolve_btfids/
diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c
index 0def0bb1f783..dfa540d8a02d 100644
--- a/tools/bpf/resolve_btfids/main.c
+++ b/tools/bpf/resolve_btfids/main.c
@@ -199,9 +199,16 @@ static char *get_id(const char *prefix_end)
/*
* __BTF_ID__func__vfs_truncate__0
* prefix_end = ^
+ * pos = ^
*/
- char *p, *id = strdup(prefix_end + sizeof("__") - 1);
+ int len = strlen(prefix_end);
+ int pos = sizeof("__") - 1;
+ char *p, *id;
+ if (pos >= len)
+ return NULL;
+
+ id = strdup(prefix_end + pos);
if (id) {
/*
* __BTF_ID__func__vfs_truncate__0
@@ -220,6 +227,24 @@ static char *get_id(const char *prefix_end)
return id;
}
+static struct btf_id *add_set(struct object *obj, char *name)
+{
+ /*
+ * __BTF_ID__set__name
+ * name = ^
+ * id = ^
+ */
+ char *id = name + sizeof(BTF_SET "__") - 1;
+ int len = strlen(name);
+
+ if (id >= name + len) {
+ pr_err("FAILED to parse set name: %s\n", name);
+ return NULL;
+ }
+
+ return btf_id__add(&obj->sets, id, true);
+}
+
static struct btf_id *add_symbol(struct rb_root *root, char *name, size_t size)
{
char *id;
@@ -412,7 +437,7 @@ static int symbols_collect(struct object *obj)
id = add_symbol(&obj->funcs, prefix, sizeof(BTF_FUNC) - 1);
/* set */
} else if (!strncmp(prefix, BTF_SET, sizeof(BTF_SET) - 1)) {
- id = add_symbol(&obj->sets, prefix, sizeof(BTF_SET) - 1);
+ id = add_set(obj, prefix);
/*
* SET objects store list's count, which is encoded
* in symbol's size, together with 'cnt' field hence
diff --git a/tools/build/Makefile b/tools/build/Makefile
index 727050c40f09..722f1700d96a 100644
--- a/tools/build/Makefile
+++ b/tools/build/Makefile
@@ -38,6 +38,8 @@ clean:
$(call QUIET_CLEAN, fixdep)
$(Q)find $(if $(OUTPUT),$(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
$(Q)rm -f $(OUTPUT)fixdep
+ $(call QUIET_CLEAN, feature-detect)
+ $(Q)$(MAKE) -C feature/ clean >/dev/null
$(OUTPUT)fixdep-in.o: FORCE
$(Q)$(MAKE) $(build)=fixdep
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index c1daf4d57518..38415d251075 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -46,7 +46,6 @@ FEATURE_TESTS_BASIC := \
libelf-getphdrnum \
libelf-gelf_getnote \
libelf-getshdrstrndx \
- libelf-mmap \
libnuma \
numa_num_possible_cpus \
libperl \
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index d220fe952747..b2a2347c67ed 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -25,7 +25,6 @@ FILES= \
test-libelf-getphdrnum.bin \
test-libelf-gelf_getnote.bin \
test-libelf-getshdrstrndx.bin \
- test-libelf-mmap.bin \
test-libdebuginfod.bin \
test-libnuma.bin \
test-numa_num_possible_cpus.bin \
@@ -146,9 +145,6 @@ $(OUTPUT)test-dwarf.bin:
$(OUTPUT)test-dwarf_getlocations.bin:
$(BUILD) $(DWARFLIBS)
-$(OUTPUT)test-libelf-mmap.bin:
- $(BUILD) -lelf
-
$(OUTPUT)test-libelf-getphdrnum.bin:
$(BUILD) -lelf
diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
index 5479e543b194..5284e6e9c756 100644
--- a/tools/build/feature/test-all.c
+++ b/tools/build/feature/test-all.c
@@ -30,10 +30,6 @@
# include "test-libelf.c"
#undef main
-#define main main_test_libelf_mmap
-# include "test-libelf-mmap.c"
-#undef main
-
#define main main_test_get_current_dir_name
# include "test-get_current_dir_name.c"
#undef main
diff --git a/tools/build/feature/test-libelf-mmap.c b/tools/build/feature/test-libelf-mmap.c
deleted file mode 100644
index 2c3ef81affe2..000000000000
--- a/tools/build/feature/test-libelf-mmap.c
+++ /dev/null
@@ -1,9 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <libelf.h>
-
-int main(void)
-{
- Elf *elf = elf_begin(0, ELF_C_READ_MMAP, 0);
-
- return (long)elf;
-}
diff --git a/tools/include/linux/btf_ids.h b/tools/include/linux/btf_ids.h
index 4867d549e3c1..57890b357f85 100644
--- a/tools/include/linux/btf_ids.h
+++ b/tools/include/linux/btf_ids.h
@@ -3,6 +3,11 @@
#ifndef _LINUX_BTF_IDS_H
#define _LINUX_BTF_IDS_H
+struct btf_id_set {
+ u32 cnt;
+ u32 ids[];
+};
+
#ifdef CONFIG_DEBUG_INFO_BTF
#include <linux/compiler.h> /* for __PASTE */
@@ -62,7 +67,7 @@ asm( \
".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
"." #scope " " #name "; \n" \
#name ":; \n" \
-".popsection; \n"); \
+".popsection; \n");
#define BTF_ID_LIST(name) \
__BTF_ID_LIST(name, local) \
@@ -71,6 +76,13 @@ extern u32 name[];
#define BTF_ID_LIST_GLOBAL(name) \
__BTF_ID_LIST(name, globl)
+/* The BTF_ID_LIST_SINGLE macro defines a BTF_ID_LIST with
+ * a single entry.
+ */
+#define BTF_ID_LIST_SINGLE(name, prefix, typename) \
+ BTF_ID_LIST(name) \
+ BTF_ID(prefix, typename)
+
/*
* The BTF_ID_UNUSED macro defines 4 zero bytes.
* It's used when we want to define 'unused' entry
@@ -88,12 +100,57 @@ asm( \
".zero 4 \n" \
".popsection; \n");
+/*
+ * The BTF_SET_START/END macros pair defines sorted list of
+ * BTF IDs plus its members count, with following layout:
+ *
+ * BTF_SET_START(list)
+ * BTF_ID(type1, name1)
+ * BTF_ID(type2, name2)
+ * BTF_SET_END(list)
+ *
+ * __BTF_ID__set__list:
+ * .zero 4
+ * list:
+ * __BTF_ID__type1__name1__3:
+ * .zero 4
+ * __BTF_ID__type2__name2__4:
+ * .zero 4
+ *
+ */
+#define __BTF_SET_START(name, scope) \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+"." #scope " __BTF_ID__set__" #name "; \n" \
+"__BTF_ID__set__" #name ":; \n" \
+".zero 4 \n" \
+".popsection; \n");
+
+#define BTF_SET_START(name) \
+__BTF_ID_LIST(name, local) \
+__BTF_SET_START(name, local)
+
+#define BTF_SET_START_GLOBAL(name) \
+__BTF_ID_LIST(name, globl) \
+__BTF_SET_START(name, globl)
+
+#define BTF_SET_END(name) \
+asm( \
+".pushsection " BTF_IDS_SECTION ",\"a\"; \n" \
+".size __BTF_ID__set__" #name ", .-" #name " \n" \
+".popsection; \n"); \
+extern struct btf_id_set name;
+
#else
#define BTF_ID_LIST(name) static u32 name[5];
#define BTF_ID(prefix, name)
#define BTF_ID_UNUSED
#define BTF_ID_LIST_GLOBAL(name) u32 name[1];
+#define BTF_ID_LIST_SINGLE(name, prefix, typename) static u32 name[1];
+#define BTF_SET_START(name) static struct btf_id_set name = { 0 };
+#define BTF_SET_START_GLOBAL(name) static struct btf_id_set name = { 0 };
+#define BTF_SET_END(name)
#endif /* CONFIG_DEBUG_INFO_BTF */
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index b6238b2209b7..bf5a99d803e4 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -124,6 +124,7 @@ enum bpf_cmd {
BPF_ENABLE_STATS,
BPF_ITER_CREATE,
BPF_LINK_DETACH,
+ BPF_PROG_BIND_MAP,
};
enum bpf_map_type {
@@ -155,6 +156,7 @@ enum bpf_map_type {
BPF_MAP_TYPE_DEVMAP_HASH,
BPF_MAP_TYPE_STRUCT_OPS,
BPF_MAP_TYPE_RINGBUF,
+ BPF_MAP_TYPE_INODE_STORAGE,
};
/* Note that tracing related programs such as
@@ -345,19 +347,45 @@ enum bpf_link_type {
/* The verifier internal test flag. Behavior is undefined */
#define BPF_F_TEST_STATE_FREQ (1U << 3)
+/* If BPF_F_SLEEPABLE is used in BPF_PROG_LOAD command, the verifier will
+ * restrict map and helper usage for such programs. Sleepable BPF programs can
+ * only be attached to hooks where kernel execution context allows sleeping.
+ * Such programs are allowed to use helpers that may sleep like
+ * bpf_copy_from_user().
+ */
+#define BPF_F_SLEEPABLE (1U << 4)
+
/* When BPF ldimm64's insn[0].src_reg != 0 then this can have
- * two extensions:
- *
- * insn[0].src_reg: BPF_PSEUDO_MAP_FD BPF_PSEUDO_MAP_VALUE
- * insn[0].imm: map fd map fd
- * insn[1].imm: 0 offset into value
- * insn[0].off: 0 0
- * insn[1].off: 0 0
- * ldimm64 rewrite: address of map address of map[0]+offset
- * verifier type: CONST_PTR_TO_MAP PTR_TO_MAP_VALUE
+ * the following extensions:
+ *
+ * insn[0].src_reg: BPF_PSEUDO_MAP_FD
+ * insn[0].imm: map fd
+ * insn[1].imm: 0
+ * insn[0].off: 0
+ * insn[1].off: 0
+ * ldimm64 rewrite: address of map
+ * verifier type: CONST_PTR_TO_MAP
*/
#define BPF_PSEUDO_MAP_FD 1
+/* insn[0].src_reg: BPF_PSEUDO_MAP_VALUE
+ * insn[0].imm: map fd
+ * insn[1].imm: offset into value
+ * insn[0].off: 0
+ * insn[1].off: 0
+ * ldimm64 rewrite: address of map[0]+offset
+ * verifier type: PTR_TO_MAP_VALUE
+ */
#define BPF_PSEUDO_MAP_VALUE 2
+/* insn[0].src_reg: BPF_PSEUDO_BTF_ID
+ * insn[0].imm: kernel btd id of VAR
+ * insn[1].imm: 0
+ * insn[0].off: 0
+ * insn[1].off: 0
+ * ldimm64 rewrite: address of the kernel variable
+ * verifier type: PTR_TO_BTF_ID or PTR_TO_MEM, depending on whether the var
+ * is struct/union.
+ */
+#define BPF_PSEUDO_BTF_ID 3
/* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
* offset to another bpf function
@@ -404,6 +432,12 @@ enum {
/* Enable memory-mapping BPF map */
BPF_F_MMAPABLE = (1U << 10),
+
+/* Share perf_event among processes */
+ BPF_F_PRESERVE_ELEMS = (1U << 11),
+
+/* Create a map that is suitable to be an inner map with dynamic max entries */
+ BPF_F_INNER_MAP = (1U << 12),
};
/* Flags for BPF_PROG_QUERY. */
@@ -414,6 +448,11 @@ enum {
*/
#define BPF_F_QUERY_EFFECTIVE (1U << 0)
+/* Flags for BPF_PROG_TEST_RUN */
+
+/* If set, run the test on the cpu specified by bpf_attr.test.cpu */
+#define BPF_F_TEST_RUN_ON_CPU (1U << 0)
+
/* type for BPF_ENABLE_STATS */
enum bpf_stats_type {
/* enabled run_time_ns and run_cnt */
@@ -556,6 +595,8 @@ union bpf_attr {
*/
__aligned_u64 ctx_in;
__aligned_u64 ctx_out;
+ __u32 flags;
+ __u32 cpu;
} test;
struct { /* anonymous struct used by BPF_*_GET_*_ID */
@@ -622,8 +663,13 @@ union bpf_attr {
};
__u32 attach_type; /* attach type */
__u32 flags; /* extra flags */
- __aligned_u64 iter_info; /* extra bpf_iter_link_info */
- __u32 iter_info_len; /* iter_info length */
+ union {
+ __u32 target_btf_id; /* btf_id of target to attach to */
+ struct {
+ __aligned_u64 iter_info; /* extra bpf_iter_link_info */
+ __u32 iter_info_len; /* iter_info length */
+ };
+ };
} link_create;
struct { /* struct used by BPF_LINK_UPDATE command */
@@ -649,6 +695,12 @@ union bpf_attr {
__u32 flags;
} iter_create;
+ struct { /* struct used by BPF_PROG_BIND_MAP command */
+ __u32 prog_fd;
+ __u32 map_fd;
+ __u32 flags; /* extra flags */
+ } prog_bind_map;
+
} __attribute__((aligned(8)));
/* The description below is an attempt at providing documentation to eBPF
@@ -1438,8 +1490,8 @@ union bpf_attr {
* Return
* The return value depends on the result of the test, and can be:
*
- * * 0, if the *skb* task belongs to the cgroup2.
- * * 1, if the *skb* task does not belong to the cgroup2.
+ * * 0, if current task belongs to the cgroup2.
+ * * 1, if current task does not belong to the cgroup2.
* * A negative error code, if an error occurred.
*
* long bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags)
@@ -1649,7 +1701,7 @@ union bpf_attr {
* **TCP_CONGESTION**, **TCP_BPF_IW**,
* **TCP_BPF_SNDCWND_CLAMP**, **TCP_SAVE_SYN**,
* **TCP_KEEPIDLE**, **TCP_KEEPINTVL**, **TCP_KEEPCNT**,
- * **TCP_SYNCNT**, **TCP_USER_TIMEOUT**.
+ * **TCP_SYNCNT**, **TCP_USER_TIMEOUT**, **TCP_NOTSENT_LOWAT**.
* * **IPPROTO_IP**, which supports *optname* **IP_TOS**.
* * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**.
* Return
@@ -2204,7 +2256,7 @@ union bpf_attr {
* Description
* This helper is used in programs implementing policies at the
* skb socket level. If the sk_buff *skb* is allowed to pass (i.e.
- * if the verdeict eBPF program returns **SK_PASS**), redirect it
+ * if the verdict eBPF program returns **SK_PASS**), redirect it
* to the socket referenced by *map* (of type
* **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and
* egress interfaces can be used for redirection. The
@@ -2496,7 +2548,7 @@ union bpf_attr {
* result is from *reuse*\ **->socks**\ [] using the hash of the
* tuple.
*
- * long bpf_sk_release(struct bpf_sock *sock)
+ * long bpf_sk_release(void *sock)
* Description
* Release the reference held by *sock*. *sock* must be a
* non-**NULL** pointer that was returned from
@@ -2676,7 +2728,7 @@ union bpf_attr {
* result is from *reuse*\ **->socks**\ [] using the hash of the
* tuple.
*
- * long bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
+ * long bpf_tcp_check_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
* Description
* Check whether *iph* and *th* contain a valid SYN cookie ACK for
* the listening socket in *sk*.
@@ -2807,7 +2859,7 @@ union bpf_attr {
*
* **-ERANGE** if resulting value was out of range.
*
- * void *bpf_sk_storage_get(struct bpf_map *map, struct bpf_sock *sk, void *value, u64 flags)
+ * void *bpf_sk_storage_get(struct bpf_map *map, void *sk, void *value, u64 flags)
* Description
* Get a bpf-local-storage from a *sk*.
*
@@ -2823,6 +2875,9 @@ union bpf_attr {
* "type". The bpf-local-storage "type" (i.e. the *map*) is
* searched against all bpf-local-storages residing at *sk*.
*
+ * *sk* is a kernel **struct sock** pointer for LSM program.
+ * *sk* is a **struct bpf_sock** pointer for other program types.
+ *
* An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be
* used such that a new bpf-local-storage will be
* created if one does not exist. *value* can be used
@@ -2835,13 +2890,14 @@ union bpf_attr {
* **NULL** if not found or there was an error in adding
* a new bpf-local-storage.
*
- * long bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk)
+ * long bpf_sk_storage_delete(struct bpf_map *map, void *sk)
* Description
* Delete a bpf-local-storage from a *sk*.
* Return
* 0 on success.
*
* **-ENOENT** if the bpf-local-storage cannot be found.
+ * **-EINVAL** if sk is not a fullsock (e.g. a request_sock).
*
* long bpf_send_signal(u32 sig)
* Description
@@ -2858,7 +2914,7 @@ union bpf_attr {
*
* **-EAGAIN** if bpf program can try again.
*
- * s64 bpf_tcp_gen_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
+ * s64 bpf_tcp_gen_syncookie(void *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len)
* Description
* Try to issue a SYN cookie for the packet with corresponding
* IP/TCP headers, *iph* and *th*, on the listening socket in *sk*.
@@ -3087,7 +3143,7 @@ union bpf_attr {
* Return
* The id is returned or 0 in case the id could not be retrieved.
*
- * long bpf_sk_assign(struct sk_buff *skb, struct bpf_sock *sk, u64 flags)
+ * long bpf_sk_assign(struct sk_buff *skb, void *sk, u64 flags)
* Description
* Helper is overloaded depending on BPF program type. This
* description applies to **BPF_PROG_TYPE_SCHED_CLS** and
@@ -3215,11 +3271,11 @@ union bpf_attr {
*
* **-EOVERFLOW** if an overflow happened: The same object will be tried again.
*
- * u64 bpf_sk_cgroup_id(struct bpf_sock *sk)
+ * u64 bpf_sk_cgroup_id(void *sk)
* Description
* Return the cgroup v2 id of the socket *sk*.
*
- * *sk* must be a non-**NULL** pointer to a full socket, e.g. one
+ * *sk* must be a non-**NULL** pointer to a socket, e.g. one
* returned from **bpf_sk_lookup_xxx**\ (),
* **bpf_sk_fullsock**\ (), etc. The format of returned id is
* same as in **bpf_skb_cgroup_id**\ ().
@@ -3229,7 +3285,7 @@ union bpf_attr {
* Return
* The id is returned or 0 in case the id could not be retrieved.
*
- * u64 bpf_sk_ancestor_cgroup_id(struct bpf_sock *sk, int ancestor_level)
+ * u64 bpf_sk_ancestor_cgroup_id(void *sk, int ancestor_level)
* Description
* Return id of cgroup v2 that is ancestor of cgroup associated
* with the *sk* at the *ancestor_level*. The root cgroup is at
@@ -3337,38 +3393,38 @@ union bpf_attr {
* Description
* Dynamically cast a *sk* pointer to a *tcp6_sock* pointer.
* Return
- * *sk* if casting is valid, or NULL otherwise.
+ * *sk* if casting is valid, or **NULL** otherwise.
*
* struct tcp_sock *bpf_skc_to_tcp_sock(void *sk)
* Description
* Dynamically cast a *sk* pointer to a *tcp_sock* pointer.
* Return
- * *sk* if casting is valid, or NULL otherwise.
+ * *sk* if casting is valid, or **NULL** otherwise.
*
* struct tcp_timewait_sock *bpf_skc_to_tcp_timewait_sock(void *sk)
* Description
* Dynamically cast a *sk* pointer to a *tcp_timewait_sock* pointer.
* Return
- * *sk* if casting is valid, or NULL otherwise.
+ * *sk* if casting is valid, or **NULL** otherwise.
*
* struct tcp_request_sock *bpf_skc_to_tcp_request_sock(void *sk)
* Description
* Dynamically cast a *sk* pointer to a *tcp_request_sock* pointer.
* Return
- * *sk* if casting is valid, or NULL otherwise.
+ * *sk* if casting is valid, or **NULL** otherwise.
*
* struct udp6_sock *bpf_skc_to_udp6_sock(void *sk)
* Description
* Dynamically cast a *sk* pointer to a *udp6_sock* pointer.
* Return
- * *sk* if casting is valid, or NULL otherwise.
+ * *sk* if casting is valid, or **NULL** otherwise.
*
* long bpf_get_task_stack(struct task_struct *task, void *buf, u32 size, u64 flags)
* Description
* Return a user or a kernel stack in bpf program provided buffer.
* To achieve this, the helper needs *task*, which is a valid
- * pointer to struct task_struct. To store the stacktrace, the
- * bpf program provides *buf* with a nonnegative *size*.
+ * pointer to **struct task_struct**. To store the stacktrace, the
+ * bpf program provides *buf* with a nonnegative *size*.
*
* The last argument, *flags*, holds the number of stack frames to
* skip (from 0 to 255), masked with
@@ -3395,6 +3451,293 @@ union bpf_attr {
* A non-negative value equal to or less than *size* on success,
* or a negative error in case of failure.
*
+ * long bpf_load_hdr_opt(struct bpf_sock_ops *skops, void *searchby_res, u32 len, u64 flags)
+ * Description
+ * Load header option. Support reading a particular TCP header
+ * option for bpf program (**BPF_PROG_TYPE_SOCK_OPS**).
+ *
+ * If *flags* is 0, it will search the option from the
+ * *skops*\ **->skb_data**. The comment in **struct bpf_sock_ops**
+ * has details on what skb_data contains under different
+ * *skops*\ **->op**.
+ *
+ * The first byte of the *searchby_res* specifies the
+ * kind that it wants to search.
+ *
+ * If the searching kind is an experimental kind
+ * (i.e. 253 or 254 according to RFC6994). It also
+ * needs to specify the "magic" which is either
+ * 2 bytes or 4 bytes. It then also needs to
+ * specify the size of the magic by using
+ * the 2nd byte which is "kind-length" of a TCP
+ * header option and the "kind-length" also
+ * includes the first 2 bytes "kind" and "kind-length"
+ * itself as a normal TCP header option also does.
+ *
+ * For example, to search experimental kind 254 with
+ * 2 byte magic 0xeB9F, the searchby_res should be
+ * [ 254, 4, 0xeB, 0x9F, 0, 0, .... 0 ].
+ *
+ * To search for the standard window scale option (3),
+ * the *searchby_res* should be [ 3, 0, 0, .... 0 ].
+ * Note, kind-length must be 0 for regular option.
+ *
+ * Searching for No-Op (0) and End-of-Option-List (1) are
+ * not supported.
+ *
+ * *len* must be at least 2 bytes which is the minimal size
+ * of a header option.
+ *
+ * Supported flags:
+ *
+ * * **BPF_LOAD_HDR_OPT_TCP_SYN** to search from the
+ * saved_syn packet or the just-received syn packet.
+ *
+ * Return
+ * > 0 when found, the header option is copied to *searchby_res*.
+ * The return value is the total length copied. On failure, a
+ * negative error code is returned:
+ *
+ * **-EINVAL** if a parameter is invalid.
+ *
+ * **-ENOMSG** if the option is not found.
+ *
+ * **-ENOENT** if no syn packet is available when
+ * **BPF_LOAD_HDR_OPT_TCP_SYN** is used.
+ *
+ * **-ENOSPC** if there is not enough space. Only *len* number of
+ * bytes are copied.
+ *
+ * **-EFAULT** on failure to parse the header options in the
+ * packet.
+ *
+ * **-EPERM** if the helper cannot be used under the current
+ * *skops*\ **->op**.
+ *
+ * long bpf_store_hdr_opt(struct bpf_sock_ops *skops, const void *from, u32 len, u64 flags)
+ * Description
+ * Store header option. The data will be copied
+ * from buffer *from* with length *len* to the TCP header.
+ *
+ * The buffer *from* should have the whole option that
+ * includes the kind, kind-length, and the actual
+ * option data. The *len* must be at least kind-length
+ * long. The kind-length does not have to be 4 byte
+ * aligned. The kernel will take care of the padding
+ * and setting the 4 bytes aligned value to th->doff.
+ *
+ * This helper will check for duplicated option
+ * by searching the same option in the outgoing skb.
+ *
+ * This helper can only be called during
+ * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**.
+ *
+ * Return
+ * 0 on success, or negative error in case of failure:
+ *
+ * **-EINVAL** If param is invalid.
+ *
+ * **-ENOSPC** if there is not enough space in the header.
+ * Nothing has been written
+ *
+ * **-EEXIST** if the option already exists.
+ *
+ * **-EFAULT** on failrue to parse the existing header options.
+ *
+ * **-EPERM** if the helper cannot be used under the current
+ * *skops*\ **->op**.
+ *
+ * long bpf_reserve_hdr_opt(struct bpf_sock_ops *skops, u32 len, u64 flags)
+ * Description
+ * Reserve *len* bytes for the bpf header option. The
+ * space will be used by **bpf_store_hdr_opt**\ () later in
+ * **BPF_SOCK_OPS_WRITE_HDR_OPT_CB**.
+ *
+ * If **bpf_reserve_hdr_opt**\ () is called multiple times,
+ * the total number of bytes will be reserved.
+ *
+ * This helper can only be called during
+ * **BPF_SOCK_OPS_HDR_OPT_LEN_CB**.
+ *
+ * Return
+ * 0 on success, or negative error in case of failure:
+ *
+ * **-EINVAL** if a parameter is invalid.
+ *
+ * **-ENOSPC** if there is not enough space in the header.
+ *
+ * **-EPERM** if the helper cannot be used under the current
+ * *skops*\ **->op**.
+ *
+ * void *bpf_inode_storage_get(struct bpf_map *map, void *inode, void *value, u64 flags)
+ * Description
+ * Get a bpf_local_storage from an *inode*.
+ *
+ * Logically, it could be thought of as getting the value from
+ * a *map* with *inode* as the **key**. From this
+ * perspective, the usage is not much different from
+ * **bpf_map_lookup_elem**\ (*map*, **&**\ *inode*) except this
+ * helper enforces the key must be an inode and the map must also
+ * be a **BPF_MAP_TYPE_INODE_STORAGE**.
+ *
+ * Underneath, the value is stored locally at *inode* instead of
+ * the *map*. The *map* is used as the bpf-local-storage
+ * "type". The bpf-local-storage "type" (i.e. the *map*) is
+ * searched against all bpf_local_storage residing at *inode*.
+ *
+ * An optional *flags* (**BPF_LOCAL_STORAGE_GET_F_CREATE**) can be
+ * used such that a new bpf_local_storage will be
+ * created if one does not exist. *value* can be used
+ * together with **BPF_LOCAL_STORAGE_GET_F_CREATE** to specify
+ * the initial value of a bpf_local_storage. If *value* is
+ * **NULL**, the new bpf_local_storage will be zero initialized.
+ * Return
+ * A bpf_local_storage pointer is returned on success.
+ *
+ * **NULL** if not found or there was an error in adding
+ * a new bpf_local_storage.
+ *
+ * int bpf_inode_storage_delete(struct bpf_map *map, void *inode)
+ * Description
+ * Delete a bpf_local_storage from an *inode*.
+ * Return
+ * 0 on success.
+ *
+ * **-ENOENT** if the bpf_local_storage cannot be found.
+ *
+ * long bpf_d_path(struct path *path, char *buf, u32 sz)
+ * Description
+ * Return full path for given **struct path** object, which
+ * needs to be the kernel BTF *path* object. The path is
+ * returned in the provided buffer *buf* of size *sz* and
+ * is zero terminated.
+ *
+ * Return
+ * On success, the strictly positive length of the string,
+ * including the trailing NUL character. On error, a negative
+ * value.
+ *
+ * long bpf_copy_from_user(void *dst, u32 size, const void *user_ptr)
+ * Description
+ * Read *size* bytes from user space address *user_ptr* and store
+ * the data in *dst*. This is a wrapper of **copy_from_user**\ ().
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * long bpf_snprintf_btf(char *str, u32 str_size, struct btf_ptr *ptr, u32 btf_ptr_size, u64 flags)
+ * Description
+ * Use BTF to store a string representation of *ptr*->ptr in *str*,
+ * using *ptr*->type_id. This value should specify the type
+ * that *ptr*->ptr points to. LLVM __builtin_btf_type_id(type, 1)
+ * can be used to look up vmlinux BTF type ids. Traversing the
+ * data structure using BTF, the type information and values are
+ * stored in the first *str_size* - 1 bytes of *str*. Safe copy of
+ * the pointer data is carried out to avoid kernel crashes during
+ * operation. Smaller types can use string space on the stack;
+ * larger programs can use map data to store the string
+ * representation.
+ *
+ * The string can be subsequently shared with userspace via
+ * bpf_perf_event_output() or ring buffer interfaces.
+ * bpf_trace_printk() is to be avoided as it places too small
+ * a limit on string size to be useful.
+ *
+ * *flags* is a combination of
+ *
+ * **BTF_F_COMPACT**
+ * no formatting around type information
+ * **BTF_F_NONAME**
+ * no struct/union member names/types
+ * **BTF_F_PTR_RAW**
+ * show raw (unobfuscated) pointer values;
+ * equivalent to printk specifier %px.
+ * **BTF_F_ZERO**
+ * show zero-valued struct/union members; they
+ * are not displayed by default
+ *
+ * Return
+ * The number of bytes that were written (or would have been
+ * written if output had to be truncated due to string size),
+ * or a negative error in cases of failure.
+ *
+ * long bpf_seq_printf_btf(struct seq_file *m, struct btf_ptr *ptr, u32 ptr_size, u64 flags)
+ * Description
+ * Use BTF to write to seq_write a string representation of
+ * *ptr*->ptr, using *ptr*->type_id as per bpf_snprintf_btf().
+ * *flags* are identical to those used for bpf_snprintf_btf.
+ * Return
+ * 0 on success or a negative error in case of failure.
+ *
+ * u64 bpf_skb_cgroup_classid(struct sk_buff *skb)
+ * Description
+ * See **bpf_get_cgroup_classid**\ () for the main description.
+ * This helper differs from **bpf_get_cgroup_classid**\ () in that
+ * the cgroup v1 net_cls class is retrieved only from the *skb*'s
+ * associated socket instead of the current process.
+ * Return
+ * The id is returned or 0 in case the id could not be retrieved.
+ *
+ * long bpf_redirect_neigh(u32 ifindex, u64 flags)
+ * Description
+ * Redirect the packet to another net device of index *ifindex*
+ * and fill in L2 addresses from neighboring subsystem. This helper
+ * is somewhat similar to **bpf_redirect**\ (), except that it
+ * populates L2 addresses as well, meaning, internally, the helper
+ * performs a FIB lookup based on the skb's networking header to
+ * get the address of the next hop and then relies on the neighbor
+ * lookup for the L2 address of the nexthop.
+ *
+ * The *flags* argument is reserved and must be 0. The helper is
+ * currently only supported for tc BPF program types, and enabled
+ * for IPv4 and IPv6 protocols.
+ * Return
+ * The helper returns **TC_ACT_REDIRECT** on success or
+ * **TC_ACT_SHOT** on error.
+ *
+ * void *bpf_per_cpu_ptr(const void *percpu_ptr, u32 cpu)
+ * Description
+ * Take a pointer to a percpu ksym, *percpu_ptr*, and return a
+ * pointer to the percpu kernel variable on *cpu*. A ksym is an
+ * extern variable decorated with '__ksym'. For ksym, there is a
+ * global var (either static or global) defined of the same name
+ * in the kernel. The ksym is percpu if the global var is percpu.
+ * The returned pointer points to the global percpu var on *cpu*.
+ *
+ * bpf_per_cpu_ptr() has the same semantic as per_cpu_ptr() in the
+ * kernel, except that bpf_per_cpu_ptr() may return NULL. This
+ * happens if *cpu* is larger than nr_cpu_ids. The caller of
+ * bpf_per_cpu_ptr() must check the returned value.
+ * Return
+ * A pointer pointing to the kernel percpu variable on *cpu*, or
+ * NULL, if *cpu* is invalid.
+ *
+ * void *bpf_this_cpu_ptr(const void *percpu_ptr)
+ * Description
+ * Take a pointer to a percpu ksym, *percpu_ptr*, and return a
+ * pointer to the percpu kernel variable on this cpu. See the
+ * description of 'ksym' in **bpf_per_cpu_ptr**\ ().
+ *
+ * bpf_this_cpu_ptr() has the same semantic as this_cpu_ptr() in
+ * the kernel. Different from **bpf_per_cpu_ptr**\ (), it would
+ * never return NULL.
+ * Return
+ * A pointer pointing to the kernel percpu variable on this cpu.
+ *
+ * long bpf_redirect_peer(u32 ifindex, u64 flags)
+ * Description
+ * Redirect the packet to another net device of index *ifindex*.
+ * This helper is somewhat similar to **bpf_redirect**\ (), except
+ * that the redirection happens to the *ifindex*' peer device and
+ * the netns switch takes place from ingress to ingress without
+ * going through the CPU's backlog queue.
+ *
+ * The *flags* argument is reserved and must be 0. The helper is
+ * currently only supported for tc BPF program types at the ingress
+ * hook and for veth device types. The peer device must reside in a
+ * different network namespace.
+ * Return
+ * The helper returns **TC_ACT_REDIRECT** on success or
+ * **TC_ACT_SHOT** on error.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -3539,6 +3882,20 @@ union bpf_attr {
FN(skc_to_tcp_request_sock), \
FN(skc_to_udp6_sock), \
FN(get_task_stack), \
+ FN(load_hdr_opt), \
+ FN(store_hdr_opt), \
+ FN(reserve_hdr_opt), \
+ FN(inode_storage_get), \
+ FN(inode_storage_delete), \
+ FN(d_path), \
+ FN(copy_from_user), \
+ FN(snprintf_btf), \
+ FN(seq_printf_btf), \
+ FN(skb_cgroup_classid), \
+ FN(redirect_neigh), \
+ FN(bpf_per_cpu_ptr), \
+ FN(bpf_this_cpu_ptr), \
+ FN(redirect_peer), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
@@ -3648,9 +4005,13 @@ enum {
BPF_F_SYSCTL_BASE_NAME = (1ULL << 0),
};
-/* BPF_FUNC_sk_storage_get flags */
+/* BPF_FUNC_<kernel_obj>_storage_get flags */
enum {
- BPF_SK_STORAGE_GET_F_CREATE = (1ULL << 0),
+ BPF_LOCAL_STORAGE_GET_F_CREATE = (1ULL << 0),
+ /* BPF_SK_STORAGE_GET_F_CREATE is only kept for backward compatibility
+ * and BPF_LOCAL_STORAGE_GET_F_CREATE must be used instead.
+ */
+ BPF_SK_STORAGE_GET_F_CREATE = BPF_LOCAL_STORAGE_GET_F_CREATE,
};
/* BPF_FUNC_read_branch_records flags. */
@@ -4071,6 +4432,15 @@ struct bpf_link_info {
__u64 cgroup_id;
__u32 attach_type;
} cgroup;
+ struct {
+ __aligned_u64 target_name; /* in/out: target_name buffer ptr */
+ __u32 target_name_len; /* in/out: target_name buffer len */
+ union {
+ struct {
+ __u32 map_id;
+ } map;
+ };
+ } iter;
struct {
__u32 netns_ino;
__u32 attach_type;
@@ -4158,6 +4528,36 @@ struct bpf_sock_ops {
__u64 bytes_received;
__u64 bytes_acked;
__bpf_md_ptr(struct bpf_sock *, sk);
+ /* [skb_data, skb_data_end) covers the whole TCP header.
+ *
+ * BPF_SOCK_OPS_PARSE_HDR_OPT_CB: The packet received
+ * BPF_SOCK_OPS_HDR_OPT_LEN_CB: Not useful because the
+ * header has not been written.
+ * BPF_SOCK_OPS_WRITE_HDR_OPT_CB: The header and options have
+ * been written so far.
+ * BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB: The SYNACK that concludes
+ * the 3WHS.
+ * BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB: The ACK that concludes
+ * the 3WHS.
+ *
+ * bpf_load_hdr_opt() can also be used to read a particular option.
+ */
+ __bpf_md_ptr(void *, skb_data);
+ __bpf_md_ptr(void *, skb_data_end);
+ __u32 skb_len; /* The total length of a packet.
+ * It includes the header, options,
+ * and payload.
+ */
+ __u32 skb_tcp_flags; /* tcp_flags of the header. It provides
+ * an easy way to check for tcp_flags
+ * without parsing skb_data.
+ *
+ * In particular, the skb_tcp_flags
+ * will still be available in
+ * BPF_SOCK_OPS_HDR_OPT_LEN even though
+ * the outgoing header has not
+ * been written yet.
+ */
};
/* Definitions for bpf_sock_ops_cb_flags */
@@ -4166,8 +4566,51 @@ enum {
BPF_SOCK_OPS_RETRANS_CB_FLAG = (1<<1),
BPF_SOCK_OPS_STATE_CB_FLAG = (1<<2),
BPF_SOCK_OPS_RTT_CB_FLAG = (1<<3),
+ /* Call bpf for all received TCP headers. The bpf prog will be
+ * called under sock_ops->op == BPF_SOCK_OPS_PARSE_HDR_OPT_CB
+ *
+ * Please refer to the comment in BPF_SOCK_OPS_PARSE_HDR_OPT_CB
+ * for the header option related helpers that will be useful
+ * to the bpf programs.
+ *
+ * It could be used at the client/active side (i.e. connect() side)
+ * when the server told it that the server was in syncookie
+ * mode and required the active side to resend the bpf-written
+ * options. The active side can keep writing the bpf-options until
+ * it received a valid packet from the server side to confirm
+ * the earlier packet (and options) has been received. The later
+ * example patch is using it like this at the active side when the
+ * server is in syncookie mode.
+ *
+ * The bpf prog will usually turn this off in the common cases.
+ */
+ BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG = (1<<4),
+ /* Call bpf when kernel has received a header option that
+ * the kernel cannot handle. The bpf prog will be called under
+ * sock_ops->op == BPF_SOCK_OPS_PARSE_HDR_OPT_CB.
+ *
+ * Please refer to the comment in BPF_SOCK_OPS_PARSE_HDR_OPT_CB
+ * for the header option related helpers that will be useful
+ * to the bpf programs.
+ */
+ BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG = (1<<5),
+ /* Call bpf when the kernel is writing header options for the
+ * outgoing packet. The bpf prog will first be called
+ * to reserve space in a skb under
+ * sock_ops->op == BPF_SOCK_OPS_HDR_OPT_LEN_CB. Then
+ * the bpf prog will be called to write the header option(s)
+ * under sock_ops->op == BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
+ *
+ * Please refer to the comment in BPF_SOCK_OPS_HDR_OPT_LEN_CB
+ * and BPF_SOCK_OPS_WRITE_HDR_OPT_CB for the header option
+ * related helpers that will be useful to the bpf programs.
+ *
+ * The kernel gets its chance to reserve space and write
+ * options first before the BPF program does.
+ */
+ BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG = (1<<6),
/* Mask of all currently supported cb flags */
- BPF_SOCK_OPS_ALL_CB_FLAGS = 0xF,
+ BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7F,
};
/* List of known BPF sock_ops operators.
@@ -4223,6 +4666,63 @@ enum {
*/
BPF_SOCK_OPS_RTT_CB, /* Called on every RTT.
*/
+ BPF_SOCK_OPS_PARSE_HDR_OPT_CB, /* Parse the header option.
+ * It will be called to handle
+ * the packets received at
+ * an already established
+ * connection.
+ *
+ * sock_ops->skb_data:
+ * Referring to the received skb.
+ * It covers the TCP header only.
+ *
+ * bpf_load_hdr_opt() can also
+ * be used to search for a
+ * particular option.
+ */
+ BPF_SOCK_OPS_HDR_OPT_LEN_CB, /* Reserve space for writing the
+ * header option later in
+ * BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
+ * Arg1: bool want_cookie. (in
+ * writing SYNACK only)
+ *
+ * sock_ops->skb_data:
+ * Not available because no header has
+ * been written yet.
+ *
+ * sock_ops->skb_tcp_flags:
+ * The tcp_flags of the
+ * outgoing skb. (e.g. SYN, ACK, FIN).
+ *
+ * bpf_reserve_hdr_opt() should
+ * be used to reserve space.
+ */
+ BPF_SOCK_OPS_WRITE_HDR_OPT_CB, /* Write the header options
+ * Arg1: bool want_cookie. (in
+ * writing SYNACK only)
+ *
+ * sock_ops->skb_data:
+ * Referring to the outgoing skb.
+ * It covers the TCP header
+ * that has already been written
+ * by the kernel and the
+ * earlier bpf-progs.
+ *
+ * sock_ops->skb_tcp_flags:
+ * The tcp_flags of the outgoing
+ * skb. (e.g. SYN, ACK, FIN).
+ *
+ * bpf_store_hdr_opt() should
+ * be used to write the
+ * option.
+ *
+ * bpf_load_hdr_opt() can also
+ * be used to search for a
+ * particular option that
+ * has already been written
+ * by the kernel or the
+ * earlier bpf-progs.
+ */
};
/* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
@@ -4250,6 +4750,63 @@ enum {
enum {
TCP_BPF_IW = 1001, /* Set TCP initial congestion window */
TCP_BPF_SNDCWND_CLAMP = 1002, /* Set sndcwnd_clamp */
+ TCP_BPF_DELACK_MAX = 1003, /* Max delay ack in usecs */
+ TCP_BPF_RTO_MIN = 1004, /* Min delay ack in usecs */
+ /* Copy the SYN pkt to optval
+ *
+ * BPF_PROG_TYPE_SOCK_OPS only. It is similar to the
+ * bpf_getsockopt(TCP_SAVED_SYN) but it does not limit
+ * to only getting from the saved_syn. It can either get the
+ * syn packet from:
+ *
+ * 1. the just-received SYN packet (only available when writing the
+ * SYNACK). It will be useful when it is not necessary to
+ * save the SYN packet for latter use. It is also the only way
+ * to get the SYN during syncookie mode because the syn
+ * packet cannot be saved during syncookie.
+ *
+ * OR
+ *
+ * 2. the earlier saved syn which was done by
+ * bpf_setsockopt(TCP_SAVE_SYN).
+ *
+ * The bpf_getsockopt(TCP_BPF_SYN*) option will hide where the
+ * SYN packet is obtained.
+ *
+ * If the bpf-prog does not need the IP[46] header, the
+ * bpf-prog can avoid parsing the IP header by using
+ * TCP_BPF_SYN. Otherwise, the bpf-prog can get both
+ * IP[46] and TCP header by using TCP_BPF_SYN_IP.
+ *
+ * >0: Total number of bytes copied
+ * -ENOSPC: Not enough space in optval. Only optlen number of
+ * bytes is copied.
+ * -ENOENT: The SYN skb is not available now and the earlier SYN pkt
+ * is not saved by setsockopt(TCP_SAVE_SYN).
+ */
+ TCP_BPF_SYN = 1005, /* Copy the TCP header */
+ TCP_BPF_SYN_IP = 1006, /* Copy the IP[46] and TCP header */
+ TCP_BPF_SYN_MAC = 1007, /* Copy the MAC, IP[46], and TCP header */
+};
+
+enum {
+ BPF_LOAD_HDR_OPT_TCP_SYN = (1ULL << 0),
+};
+
+/* args[0] value during BPF_SOCK_OPS_HDR_OPT_LEN_CB and
+ * BPF_SOCK_OPS_WRITE_HDR_OPT_CB.
+ */
+enum {
+ BPF_WRITE_HDR_TCP_CURRENT_MSS = 1, /* Kernel is finding the
+ * total option spaces
+ * required for an established
+ * sk in order to calculate the
+ * MSS. No skb is actually
+ * sent.
+ */
+ BPF_WRITE_HDR_TCP_SYNACK_COOKIE = 2, /* Kernel is in syncookie mode
+ * when sending a SYN.
+ */
};
struct bpf_perf_event_value {
@@ -4447,4 +5004,34 @@ struct bpf_sk_lookup {
__u32 local_port; /* Host byte order */
};
+/*
+ * struct btf_ptr is used for typed pointer representation; the
+ * type id is used to render the pointer data as the appropriate type
+ * via the bpf_snprintf_btf() helper described above. A flags field -
+ * potentially to specify additional details about the BTF pointer
+ * (rather than its mode of display) - is included for future use.
+ * Display flags - BTF_F_* - are passed to bpf_snprintf_btf separately.
+ */
+struct btf_ptr {
+ void *ptr;
+ __u32 type_id;
+ __u32 flags; /* BTF ptr flags; unused at present. */
+};
+
+/*
+ * Flags to control bpf_snprintf_btf() behaviour.
+ * - BTF_F_COMPACT: no formatting around type information
+ * - BTF_F_NONAME: no struct/union member names/types
+ * - BTF_F_PTR_RAW: show raw (unobfuscated) pointer values;
+ * equivalent to %px.
+ * - BTF_F_ZERO: show zero-valued struct/union members; they
+ * are not displayed by default
+ */
+enum {
+ BTF_F_COMPACT = (1ULL << 0),
+ BTF_F_NONAME = (1ULL << 1),
+ BTF_F_PTR_RAW = (1ULL << 2),
+ BTF_F_ZERO = (1ULL << 3),
+};
+
#endif /* _UAPI__LINUX_BPF_H__ */
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index 9ae8f4ef0aac..5f9abed3e226 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -1,6 +1,9 @@
# SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
# Most of this file is copied from tools/lib/traceevent/Makefile
+RM ?= rm
+srctree = $(abs_srctree)
+
LIBBPF_VERSION := $(shell \
grep -oE '^LIBBPF_([0-9.]+)' libbpf.map | \
sort -rV | head -n1 | cut -d'_' -f2)
@@ -56,7 +59,7 @@ ifndef VERBOSE
endif
FEATURE_USER = .libbpf
-FEATURE_TESTS = libelf libelf-mmap zlib bpf reallocarray
+FEATURE_TESTS = libelf zlib bpf
FEATURE_DISPLAY = libelf zlib bpf
INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/tools/include/uapi
@@ -95,27 +98,18 @@ PC_FILE = libbpf.pc
ifdef EXTRA_CFLAGS
CFLAGS := $(EXTRA_CFLAGS)
else
- CFLAGS := -g -Wall
-endif
-
-ifeq ($(feature-libelf-mmap), 1)
- override CFLAGS += -DHAVE_LIBELF_MMAP_SUPPORT
-endif
-
-ifeq ($(feature-reallocarray), 0)
- override CFLAGS += -DCOMPAT_NEED_REALLOCARRAY
+ CFLAGS := -g -O2
endif
# Append required CFLAGS
-override CFLAGS += $(EXTRA_WARNINGS)
+override CFLAGS += $(EXTRA_WARNINGS) -Wno-switch-enum
override CFLAGS += -Werror -Wall
-override CFLAGS += -fPIC
override CFLAGS += $(INCLUDES)
override CFLAGS += -fvisibility=hidden
override CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
# flags specific for shared library
-SHLIB_FLAGS := -DSHARED
+SHLIB_FLAGS := -DSHARED -fPIC
ifeq ($(VERBOSE),1)
Q =
@@ -197,7 +191,7 @@ $(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN_SHARED)
@ln -sf $(@F) $(OUTPUT)libbpf.so.$(LIBBPF_MAJOR_VERSION)
$(OUTPUT)libbpf.a: $(BPF_IN_STATIC)
- $(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^
+ $(QUIET_LINK)$(RM) -f $@; $(AR) rcs $@ $^
$(OUTPUT)libbpf.pc:
$(QUIET_GEN)sed -e "s|@PREFIX@|$(prefix)|" \
@@ -271,10 +265,10 @@ install: install_lib install_pkgconfig install_headers
### Cleaning rules
config-clean:
- $(call QUIET_CLEAN, config)
+ $(call QUIET_CLEAN, feature-detect)
$(Q)$(MAKE) -C $(srctree)/tools/build/feature/ clean >/dev/null
-clean:
+clean: config-clean
$(call QUIET_CLEAN, libbpf) $(RM) -rf $(CMD_TARGETS) \
*~ .*.d .*.cmd LIBBPF-CFLAGS $(BPF_HELPER_DEFS) \
$(SHARED_OBJDIR) $(STATIC_OBJDIR) \
@@ -301,7 +295,7 @@ cscope:
cscope -b -q -I $(srctree)/include -f cscope.out
tags:
- rm -f TAGS tags
+ $(RM) -f TAGS tags
ls *.c *.h | xargs $(TAGS_PROG) -a
# Declare the contents of the .PHONY variable as phony. We keep that
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 0750681057c2..d27e34133973 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -32,9 +32,6 @@
#include "libbpf.h"
#include "libbpf_internal.h"
-/* make sure libbpf doesn't use kernel-only integer typedefs */
-#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
-
/*
* When building perf, unistd.h is overridden. __NR_bpf is
* required to be defined explicitly.
@@ -589,19 +586,31 @@ int bpf_link_create(int prog_fd, int target_fd,
enum bpf_attach_type attach_type,
const struct bpf_link_create_opts *opts)
{
+ __u32 target_btf_id, iter_info_len;
union bpf_attr attr;
if (!OPTS_VALID(opts, bpf_link_create_opts))
return -EINVAL;
+ iter_info_len = OPTS_GET(opts, iter_info_len, 0);
+ target_btf_id = OPTS_GET(opts, target_btf_id, 0);
+
+ if (iter_info_len && target_btf_id)
+ return -EINVAL;
+
memset(&attr, 0, sizeof(attr));
attr.link_create.prog_fd = prog_fd;
attr.link_create.target_fd = target_fd;
attr.link_create.attach_type = attach_type;
attr.link_create.flags = OPTS_GET(opts, flags, 0);
- attr.link_create.iter_info =
- ptr_to_u64(OPTS_GET(opts, iter_info, (void *)0));
- attr.link_create.iter_info_len = OPTS_GET(opts, iter_info_len, 0);
+
+ if (iter_info_len) {
+ attr.link_create.iter_info =
+ ptr_to_u64(OPTS_GET(opts, iter_info, (void *)0));
+ attr.link_create.iter_info_len = iter_info_len;
+ } else if (target_btf_id) {
+ attr.link_create.target_btf_id = target_btf_id;
+ }
return sys_bpf(BPF_LINK_CREATE, &attr, sizeof(attr));
}
@@ -715,6 +724,37 @@ int bpf_prog_test_run_xattr(struct bpf_prog_test_run_attr *test_attr)
return ret;
}
+int bpf_prog_test_run_opts(int prog_fd, struct bpf_test_run_opts *opts)
+{
+ union bpf_attr attr;
+ int ret;
+
+ if (!OPTS_VALID(opts, bpf_test_run_opts))
+ return -EINVAL;
+
+ memset(&attr, 0, sizeof(attr));
+ attr.test.prog_fd = prog_fd;
+ attr.test.cpu = OPTS_GET(opts, cpu, 0);
+ attr.test.flags = OPTS_GET(opts, flags, 0);
+ attr.test.repeat = OPTS_GET(opts, repeat, 0);
+ attr.test.duration = OPTS_GET(opts, duration, 0);
+ attr.test.ctx_size_in = OPTS_GET(opts, ctx_size_in, 0);
+ attr.test.ctx_size_out = OPTS_GET(opts, ctx_size_out, 0);
+ attr.test.data_size_in = OPTS_GET(opts, data_size_in, 0);
+ attr.test.data_size_out = OPTS_GET(opts, data_size_out, 0);
+ attr.test.ctx_in = ptr_to_u64(OPTS_GET(opts, ctx_in, NULL));
+ attr.test.ctx_out = ptr_to_u64(OPTS_GET(opts, ctx_out, NULL));
+ attr.test.data_in = ptr_to_u64(OPTS_GET(opts, data_in, NULL));
+ attr.test.data_out = ptr_to_u64(OPTS_GET(opts, data_out, NULL));
+
+ ret = sys_bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr));
+ OPTS_SET(opts, data_size_out, attr.test.data_size_out);
+ OPTS_SET(opts, ctx_size_out, attr.test.ctx_size_out);
+ OPTS_SET(opts, duration, attr.test.duration);
+ OPTS_SET(opts, retval, attr.test.retval);
+ return ret;
+}
+
static int bpf_obj_get_next_id(__u32 start_id, __u32 *next_id, int cmd)
{
union bpf_attr attr;
@@ -818,7 +858,7 @@ int bpf_raw_tracepoint_open(const char *name, int prog_fd)
return sys_bpf(BPF_RAW_TRACEPOINT_OPEN, &attr, sizeof(attr));
}
-int bpf_load_btf(void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size,
+int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf, __u32 log_buf_size,
bool do_log)
{
union bpf_attr attr = {};
@@ -875,3 +915,19 @@ int bpf_enable_stats(enum bpf_stats_type type)
return sys_bpf(BPF_ENABLE_STATS, &attr, sizeof(attr));
}
+
+int bpf_prog_bind_map(int prog_fd, int map_fd,
+ const struct bpf_prog_bind_opts *opts)
+{
+ union bpf_attr attr;
+
+ if (!OPTS_VALID(opts, bpf_prog_bind_opts))
+ return -EINVAL;
+
+ memset(&attr, 0, sizeof(attr));
+ attr.prog_bind_map.prog_fd = prog_fd;
+ attr.prog_bind_map.map_fd = map_fd;
+ attr.prog_bind_map.flags = OPTS_GET(opts, flags, 0);
+
+ return sys_bpf(BPF_PROG_BIND_MAP, &attr, sizeof(attr));
+}
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index 015d13f25fcc..875dde20d56e 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -174,8 +174,9 @@ struct bpf_link_create_opts {
__u32 flags;
union bpf_iter_link_info *iter_info;
__u32 iter_info_len;
+ __u32 target_btf_id;
};
-#define bpf_link_create_opts__last_field iter_info_len
+#define bpf_link_create_opts__last_field target_btf_id
LIBBPF_API int bpf_link_create(int prog_fd, int target_fd,
enum bpf_attach_type attach_type,
@@ -234,7 +235,7 @@ LIBBPF_API int bpf_prog_query(int target_fd, enum bpf_attach_type type,
__u32 query_flags, __u32 *attach_flags,
__u32 *prog_ids, __u32 *prog_cnt);
LIBBPF_API int bpf_raw_tracepoint_open(const char *name, int prog_fd);
-LIBBPF_API int bpf_load_btf(void *btf, __u32 btf_size, char *log_buf,
+LIBBPF_API int bpf_load_btf(const void *btf, __u32 btf_size, char *log_buf,
__u32 log_buf_size, bool do_log);
LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf,
__u32 *buf_len, __u32 *prog_id, __u32 *fd_type,
@@ -243,6 +244,40 @@ LIBBPF_API int bpf_task_fd_query(int pid, int fd, __u32 flags, char *buf,
enum bpf_stats_type; /* defined in up-to-date linux/bpf.h */
LIBBPF_API int bpf_enable_stats(enum bpf_stats_type type);
+struct bpf_prog_bind_opts {
+ size_t sz; /* size of this struct for forward/backward compatibility */
+ __u32 flags;
+};
+#define bpf_prog_bind_opts__last_field flags
+
+LIBBPF_API int bpf_prog_bind_map(int prog_fd, int map_fd,
+ const struct bpf_prog_bind_opts *opts);
+
+struct bpf_test_run_opts {
+ size_t sz; /* size of this struct for forward/backward compatibility */
+ const void *data_in; /* optional */
+ void *data_out; /* optional */
+ __u32 data_size_in;
+ __u32 data_size_out; /* in: max length of data_out
+ * out: length of data_out
+ */
+ const void *ctx_in; /* optional */
+ void *ctx_out; /* optional */
+ __u32 ctx_size_in;
+ __u32 ctx_size_out; /* in: max length of ctx_out
+ * out: length of cxt_out
+ */
+ __u32 retval; /* out: return code of the BPF program */
+ int repeat;
+ __u32 duration; /* out: average per repetition in ns */
+ __u32 flags;
+ __u32 cpu;
+};
+#define bpf_test_run_opts__last_field cpu
+
+LIBBPF_API int bpf_prog_test_run_opts(int prog_fd,
+ struct bpf_test_run_opts *opts);
+
#ifdef __cplusplus
} /* extern "C" */
#endif
diff --git a/tools/lib/bpf/bpf_core_read.h b/tools/lib/bpf/bpf_core_read.h
index eae5cccff761..bbcefb3ff5a5 100644
--- a/tools/lib/bpf/bpf_core_read.h
+++ b/tools/lib/bpf/bpf_core_read.h
@@ -19,32 +19,52 @@ enum bpf_field_info_kind {
BPF_FIELD_RSHIFT_U64 = 5,
};
+/* second argument to __builtin_btf_type_id() built-in */
+enum bpf_type_id_kind {
+ BPF_TYPE_ID_LOCAL = 0, /* BTF type ID in local program */
+ BPF_TYPE_ID_TARGET = 1, /* BTF type ID in target kernel */
+};
+
+/* second argument to __builtin_preserve_type_info() built-in */
+enum bpf_type_info_kind {
+ BPF_TYPE_EXISTS = 0, /* type existence in target kernel */
+ BPF_TYPE_SIZE = 1, /* type size in target kernel */
+};
+
+/* second argument to __builtin_preserve_enum_value() built-in */
+enum bpf_enum_value_kind {
+ BPF_ENUMVAL_EXISTS = 0, /* enum value existence in kernel */
+ BPF_ENUMVAL_VALUE = 1, /* enum value value relocation */
+};
+
#define __CORE_RELO(src, field, info) \
__builtin_preserve_field_info((src)->field, BPF_FIELD_##info)
#if __BYTE_ORDER == __LITTLE_ENDIAN
#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \
- bpf_probe_read((void *)dst, \
- __CORE_RELO(src, fld, BYTE_SIZE), \
- (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET))
+ bpf_probe_read_kernel( \
+ (void *)dst, \
+ __CORE_RELO(src, fld, BYTE_SIZE), \
+ (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET))
#else
/* semantics of LSHIFT_64 assumes loading values into low-ordered bytes, so
* for big-endian we need to adjust destination pointer accordingly, based on
* field byte size
*/
#define __CORE_BITFIELD_PROBE_READ(dst, src, fld) \
- bpf_probe_read((void *)dst + (8 - __CORE_RELO(src, fld, BYTE_SIZE)), \
- __CORE_RELO(src, fld, BYTE_SIZE), \
- (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET))
+ bpf_probe_read_kernel( \
+ (void *)dst + (8 - __CORE_RELO(src, fld, BYTE_SIZE)), \
+ __CORE_RELO(src, fld, BYTE_SIZE), \
+ (const void *)src + __CORE_RELO(src, fld, BYTE_OFFSET))
#endif
/*
* Extract bitfield, identified by s->field, and return its value as u64.
* All this is done in relocatable manner, so bitfield changes such as
* signedness, bit size, offset changes, this will be handled automatically.
- * This version of macro is using bpf_probe_read() to read underlying integer
- * storage. Macro functions as an expression and its return type is
- * bpf_probe_read()'s return value: 0, on success, <0 on error.
+ * This version of macro is using bpf_probe_read_kernel() to read underlying
+ * integer storage. Macro functions as an expression and its return type is
+ * bpf_probe_read_kernel()'s return value: 0, on success, <0 on error.
*/
#define BPF_CORE_READ_BITFIELD_PROBED(s, field) ({ \
unsigned long long val = 0; \
@@ -92,15 +112,75 @@ enum bpf_field_info_kind {
__builtin_preserve_field_info(field, BPF_FIELD_EXISTS)
/*
- * Convenience macro to get byte size of a field. Works for integers,
+ * Convenience macro to get the byte size of a field. Works for integers,
* struct/unions, pointers, arrays, and enums.
*/
#define bpf_core_field_size(field) \
__builtin_preserve_field_info(field, BPF_FIELD_BYTE_SIZE)
/*
- * bpf_core_read() abstracts away bpf_probe_read() call and captures offset
- * relocation for source address using __builtin_preserve_access_index()
+ * Convenience macro to get BTF type ID of a specified type, using a local BTF
+ * information. Return 32-bit unsigned integer with type ID from program's own
+ * BTF. Always succeeds.
+ */
+#define bpf_core_type_id_local(type) \
+ __builtin_btf_type_id(*(typeof(type) *)0, BPF_TYPE_ID_LOCAL)
+
+/*
+ * Convenience macro to get BTF type ID of a target kernel's type that matches
+ * specified local type.
+ * Returns:
+ * - valid 32-bit unsigned type ID in kernel BTF;
+ * - 0, if no matching type was found in a target kernel BTF.
+ */
+#define bpf_core_type_id_kernel(type) \
+ __builtin_btf_type_id(*(typeof(type) *)0, BPF_TYPE_ID_TARGET)
+
+/*
+ * Convenience macro to check that provided named type
+ * (struct/union/enum/typedef) exists in a target kernel.
+ * Returns:
+ * 1, if such type is present in target kernel's BTF;
+ * 0, if no matching type is found.
+ */
+#define bpf_core_type_exists(type) \
+ __builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_EXISTS)
+
+/*
+ * Convenience macro to get the byte size of a provided named type
+ * (struct/union/enum/typedef) in a target kernel.
+ * Returns:
+ * >= 0 size (in bytes), if type is present in target kernel's BTF;
+ * 0, if no matching type is found.
+ */
+#define bpf_core_type_size(type) \
+ __builtin_preserve_type_info(*(typeof(type) *)0, BPF_TYPE_SIZE)
+
+/*
+ * Convenience macro to check that provided enumerator value is defined in
+ * a target kernel.
+ * Returns:
+ * 1, if specified enum type and its enumerator value are present in target
+ * kernel's BTF;
+ * 0, if no matching enum and/or enum value within that enum is found.
+ */
+#define bpf_core_enum_value_exists(enum_type, enum_value) \
+ __builtin_preserve_enum_value(*(typeof(enum_type) *)enum_value, BPF_ENUMVAL_EXISTS)
+
+/*
+ * Convenience macro to get the integer value of an enumerator value in
+ * a target kernel.
+ * Returns:
+ * 64-bit value, if specified enum type and its enumerator value are
+ * present in target kernel's BTF;
+ * 0, if no matching enum and/or enum value within that enum is found.
+ */
+#define bpf_core_enum_value(enum_type, enum_value) \
+ __builtin_preserve_enum_value(*(typeof(enum_type) *)enum_value, BPF_ENUMVAL_VALUE)
+
+/*
+ * bpf_core_read() abstracts away bpf_probe_read_kernel() call and captures
+ * offset relocation for source address using __builtin_preserve_access_index()
* built-in, provided by Clang.
*
* __builtin_preserve_access_index() takes as an argument an expression of
@@ -115,8 +195,8 @@ enum bpf_field_info_kind {
* (local) BTF, used to record relocation.
*/
#define bpf_core_read(dst, sz, src) \
- bpf_probe_read(dst, sz, \
- (const void *)__builtin_preserve_access_index(src))
+ bpf_probe_read_kernel(dst, sz, \
+ (const void *)__builtin_preserve_access_index(src))
/*
* bpf_core_read_str() is a thin wrapper around bpf_probe_read_str()
@@ -124,8 +204,8 @@ enum bpf_field_info_kind {
* argument.
*/
#define bpf_core_read_str(dst, sz, src) \
- bpf_probe_read_str(dst, sz, \
- (const void *)__builtin_preserve_access_index(src))
+ bpf_probe_read_kernel_str(dst, sz, \
+ (const void *)__builtin_preserve_access_index(src))
#define ___concat(a, b) a ## b
#define ___apply(fn, n) ___concat(fn, n)
@@ -239,15 +319,17 @@ enum bpf_field_info_kind {
* int x = BPF_CORE_READ(s, a.b.c, d.e, f, g);
*
* BPF_CORE_READ will decompose above statement into 4 bpf_core_read (BPF
- * CO-RE relocatable bpf_probe_read() wrapper) calls, logically equivalent to:
+ * CO-RE relocatable bpf_probe_read_kernel() wrapper) calls, logically
+ * equivalent to:
* 1. const void *__t = s->a.b.c;
* 2. __t = __t->d.e;
* 3. __t = __t->f;
* 4. return __t->g;
*
* Equivalence is logical, because there is a heavy type casting/preservation
- * involved, as well as all the reads are happening through bpf_probe_read()
- * calls using __builtin_preserve_access_index() to emit CO-RE relocations.
+ * involved, as well as all the reads are happening through
+ * bpf_probe_read_kernel() calls using __builtin_preserve_access_index() to
+ * emit CO-RE relocations.
*
* N.B. Only up to 9 "field accessors" are supported, which should be more
* than enough for any practical purpose.
diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h
index e9a4ecddb7a5..2bdb7d6dbad2 100644
--- a/tools/lib/bpf/bpf_helpers.h
+++ b/tools/lib/bpf/bpf_helpers.h
@@ -32,6 +32,9 @@
#ifndef __always_inline
#define __always_inline __attribute__((always_inline))
#endif
+#ifndef __noinline
+#define __noinline __attribute__((noinline))
+#endif
#ifndef __weak
#define __weak __attribute__((weak))
#endif
@@ -51,6 +54,52 @@
#endif
/*
+ * Helper macro to throw a compilation error if __bpf_unreachable() gets
+ * built into the resulting code. This works given BPF back end does not
+ * implement __builtin_trap(). This is useful to assert that certain paths
+ * of the program code are never used and hence eliminated by the compiler.
+ *
+ * For example, consider a switch statement that covers known cases used by
+ * the program. __bpf_unreachable() can then reside in the default case. If
+ * the program gets extended such that a case is not covered in the switch
+ * statement, then it will throw a build error due to the default case not
+ * being compiled out.
+ */
+#ifndef __bpf_unreachable
+# define __bpf_unreachable() __builtin_trap()
+#endif
+
+/*
+ * Helper function to perform a tail call with a constant/immediate map slot.
+ */
+static __always_inline void
+bpf_tail_call_static(void *ctx, const void *map, const __u32 slot)
+{
+ if (!__builtin_constant_p(slot))
+ __bpf_unreachable();
+
+ /*
+ * Provide a hard guarantee that LLVM won't optimize setting r2 (map
+ * pointer) and r3 (constant map index) from _different paths_ ending
+ * up at the _same_ call insn as otherwise we won't be able to use the
+ * jmpq/nopl retpoline-free patching by the x86-64 JIT in the kernel
+ * given they mismatch. See also d2e4c1e6c294 ("bpf: Constant map key
+ * tracking for prog array pokes") for details on verifier tracking.
+ *
+ * Note on clobber list: we need to stay in-line with BPF calling
+ * convention, so even if we don't end up using r0, r4, r5, we need
+ * to mark them as clobber so that LLVM doesn't end up using them
+ * before / after the call.
+ */
+ asm volatile("r1 = %[ctx]\n\t"
+ "r2 = %[map]\n\t"
+ "r3 = %[slot]\n\t"
+ "call 12"
+ :: [ctx]"r"(ctx), [map]"r"(map), [slot]"i"(slot)
+ : "r0", "r1", "r2", "r3", "r4", "r5");
+}
+
+/*
* Helper structure used by eBPF C program
* to describe BPF map attributes to libbpf loader
*/
diff --git a/tools/lib/bpf/bpf_prog_linfo.c b/tools/lib/bpf/bpf_prog_linfo.c
index bafca49cb1e6..3ed1a27b5f7c 100644
--- a/tools/lib/bpf/bpf_prog_linfo.c
+++ b/tools/lib/bpf/bpf_prog_linfo.c
@@ -8,9 +8,6 @@
#include "libbpf.h"
#include "libbpf_internal.h"
-/* make sure libbpf doesn't use kernel-only integer typedefs */
-#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
-
struct bpf_prog_linfo {
void *raw_linfo;
void *raw_jited_linfo;
diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h
index eebf020cbe3e..f9ef37707888 100644
--- a/tools/lib/bpf/bpf_tracing.h
+++ b/tools/lib/bpf/bpf_tracing.h
@@ -289,9 +289,9 @@ struct pt_regs;
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
#else
#define BPF_KPROBE_READ_RET_IP(ip, ctx) \
- ({ bpf_probe_read(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); })
+ ({ bpf_probe_read_kernel(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); })
#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) \
- ({ bpf_probe_read(&(ip), sizeof(ip), \
+ ({ bpf_probe_read_kernel(&(ip), sizeof(ip), \
(void *)(PT_REGS_FP(ctx) + sizeof(ip))); })
#endif
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 6bdbc389b493..231b07203e3d 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/* Copyright (c) 2018 Facebook */
+#include <byteswap.h>
#include <endian.h>
#include <stdio.h>
#include <stdlib.h>
@@ -21,26 +22,78 @@
#include "libbpf_internal.h"
#include "hashmap.h"
-/* make sure libbpf doesn't use kernel-only integer typedefs */
-#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
-
#define BTF_MAX_NR_TYPES 0x7fffffffU
#define BTF_MAX_STR_OFFSET 0x7fffffffU
static struct btf_type btf_void;
struct btf {
- union {
- struct btf_header *hdr;
- void *data;
- };
- struct btf_type **types;
- const char *strings;
- void *nohdr_data;
+ /* raw BTF data in native endianness */
+ void *raw_data;
+ /* raw BTF data in non-native endianness */
+ void *raw_data_swapped;
+ __u32 raw_size;
+ /* whether target endianness differs from the native one */
+ bool swapped_endian;
+
+ /*
+ * When BTF is loaded from an ELF or raw memory it is stored
+ * in a contiguous memory block. The hdr, type_data, and, strs_data
+ * point inside that memory region to their respective parts of BTF
+ * representation:
+ *
+ * +--------------------------------+
+ * | Header | Types | Strings |
+ * +--------------------------------+
+ * ^ ^ ^
+ * | | |
+ * hdr | |
+ * types_data-+ |
+ * strs_data------------+
+ *
+ * If BTF data is later modified, e.g., due to types added or
+ * removed, BTF deduplication performed, etc, this contiguous
+ * representation is broken up into three independently allocated
+ * memory regions to be able to modify them independently.
+ * raw_data is nulled out at that point, but can be later allocated
+ * and cached again if user calls btf__get_raw_data(), at which point
+ * raw_data will contain a contiguous copy of header, types, and
+ * strings:
+ *
+ * +----------+ +---------+ +-----------+
+ * | Header | | Types | | Strings |
+ * +----------+ +---------+ +-----------+
+ * ^ ^ ^
+ * | | |
+ * hdr | |
+ * types_data----+ |
+ * strs_data------------------+
+ *
+ * +----------+---------+-----------+
+ * | Header | Types | Strings |
+ * raw_data----->+----------+---------+-----------+
+ */
+ struct btf_header *hdr;
+
+ void *types_data;
+ size_t types_data_cap; /* used size stored in hdr->type_len */
+
+ /* type ID to `struct btf_type *` lookup index */
+ __u32 *type_offs;
+ size_t type_offs_cap;
__u32 nr_types;
- __u32 types_size;
- __u32 data_size;
+
+ void *strs_data;
+ size_t strs_data_cap; /* used size stored in hdr->str_len */
+
+ /* lookup index for each unique string in strings section */
+ struct hashmap *strs_hash;
+ /* whether strings are already deduplicated */
+ bool strs_deduped;
+ /* BTF object FD, if loaded into kernel */
int fd;
+
+ /* Pointer size (in bytes) for a target architecture of this BTF */
int ptr_sz;
};
@@ -49,60 +102,114 @@ static inline __u64 ptr_to_u64(const void *ptr)
return (__u64) (unsigned long) ptr;
}
-static int btf_add_type(struct btf *btf, struct btf_type *t)
+/* Ensure given dynamically allocated memory region pointed to by *data* with
+ * capacity of *cap_cnt* elements each taking *elem_sz* bytes has enough
+ * memory to accomodate *add_cnt* new elements, assuming *cur_cnt* elements
+ * are already used. At most *max_cnt* elements can be ever allocated.
+ * If necessary, memory is reallocated and all existing data is copied over,
+ * new pointer to the memory region is stored at *data, new memory region
+ * capacity (in number of elements) is stored in *cap.
+ * On success, memory pointer to the beginning of unused memory is returned.
+ * On error, NULL is returned.
+ */
+void *btf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
+ size_t cur_cnt, size_t max_cnt, size_t add_cnt)
{
- if (btf->types_size - btf->nr_types < 2) {
- struct btf_type **new_types;
- __u32 expand_by, new_size;
+ size_t new_cnt;
+ void *new_data;
- if (btf->types_size == BTF_MAX_NR_TYPES)
- return -E2BIG;
+ if (cur_cnt + add_cnt <= *cap_cnt)
+ return *data + cur_cnt * elem_sz;
- expand_by = max(btf->types_size >> 2, 16U);
- new_size = min(BTF_MAX_NR_TYPES, btf->types_size + expand_by);
+ /* requested more than the set limit */
+ if (cur_cnt + add_cnt > max_cnt)
+ return NULL;
- new_types = realloc(btf->types, sizeof(*new_types) * new_size);
- if (!new_types)
- return -ENOMEM;
+ new_cnt = *cap_cnt;
+ new_cnt += new_cnt / 4; /* expand by 25% */
+ if (new_cnt < 16) /* but at least 16 elements */
+ new_cnt = 16;
+ if (new_cnt > max_cnt) /* but not exceeding a set limit */
+ new_cnt = max_cnt;
+ if (new_cnt < cur_cnt + add_cnt) /* also ensure we have enough memory */
+ new_cnt = cur_cnt + add_cnt;
+
+ new_data = libbpf_reallocarray(*data, new_cnt, elem_sz);
+ if (!new_data)
+ return NULL;
- if (btf->nr_types == 0)
- new_types[0] = &btf_void;
+ /* zero out newly allocated portion of memory */
+ memset(new_data + (*cap_cnt) * elem_sz, 0, (new_cnt - *cap_cnt) * elem_sz);
- btf->types = new_types;
- btf->types_size = new_size;
- }
+ *data = new_data;
+ *cap_cnt = new_cnt;
+ return new_data + cur_cnt * elem_sz;
+}
- btf->types[++(btf->nr_types)] = t;
+/* Ensure given dynamically allocated memory region has enough allocated space
+ * to accommodate *need_cnt* elements of size *elem_sz* bytes each
+ */
+int btf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt)
+{
+ void *p;
+
+ if (need_cnt <= *cap_cnt)
+ return 0;
+ p = btf_add_mem(data, cap_cnt, elem_sz, *cap_cnt, SIZE_MAX, need_cnt - *cap_cnt);
+ if (!p)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int btf_add_type_idx_entry(struct btf *btf, __u32 type_off)
+{
+ __u32 *p;
+
+ p = btf_add_mem((void **)&btf->type_offs, &btf->type_offs_cap, sizeof(__u32),
+ btf->nr_types + 1, BTF_MAX_NR_TYPES, 1);
+ if (!p)
+ return -ENOMEM;
+
+ *p = type_off;
return 0;
}
+static void btf_bswap_hdr(struct btf_header *h)
+{
+ h->magic = bswap_16(h->magic);
+ h->hdr_len = bswap_32(h->hdr_len);
+ h->type_off = bswap_32(h->type_off);
+ h->type_len = bswap_32(h->type_len);
+ h->str_off = bswap_32(h->str_off);
+ h->str_len = bswap_32(h->str_len);
+}
+
static int btf_parse_hdr(struct btf *btf)
{
- const struct btf_header *hdr = btf->hdr;
+ struct btf_header *hdr = btf->hdr;
__u32 meta_left;
- if (btf->data_size < sizeof(struct btf_header)) {
+ if (btf->raw_size < sizeof(struct btf_header)) {
pr_debug("BTF header not found\n");
return -EINVAL;
}
- if (hdr->magic != BTF_MAGIC) {
+ if (hdr->magic == bswap_16(BTF_MAGIC)) {
+ btf->swapped_endian = true;
+ if (bswap_32(hdr->hdr_len) != sizeof(struct btf_header)) {
+ pr_warn("Can't load BTF with non-native endianness due to unsupported header length %u\n",
+ bswap_32(hdr->hdr_len));
+ return -ENOTSUP;
+ }
+ btf_bswap_hdr(hdr);
+ } else if (hdr->magic != BTF_MAGIC) {
pr_debug("Invalid BTF magic:%x\n", hdr->magic);
return -EINVAL;
}
- if (hdr->version != BTF_VERSION) {
- pr_debug("Unsupported BTF version:%u\n", hdr->version);
- return -ENOTSUP;
- }
-
- if (hdr->flags) {
- pr_debug("Unsupported BTF flags:%x\n", hdr->flags);
- return -ENOTSUP;
- }
-
- meta_left = btf->data_size - sizeof(*hdr);
+ meta_left = btf->raw_size - sizeof(*hdr);
if (!meta_left) {
pr_debug("BTF has no data\n");
return -EINVAL;
@@ -128,15 +235,13 @@ static int btf_parse_hdr(struct btf *btf)
return -EINVAL;
}
- btf->nohdr_data = btf->hdr + 1;
-
return 0;
}
static int btf_parse_str_sec(struct btf *btf)
{
const struct btf_header *hdr = btf->hdr;
- const char *start = btf->nohdr_data + hdr->str_off;
+ const char *start = btf->strs_data;
const char *end = start + btf->hdr->str_len;
if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_STR_OFFSET ||
@@ -145,14 +250,12 @@ static int btf_parse_str_sec(struct btf *btf)
return -EINVAL;
}
- btf->strings = start;
-
return 0;
}
-static int btf_type_size(struct btf_type *t)
+static int btf_type_size(const struct btf_type *t)
{
- int base_size = sizeof(struct btf_type);
+ const int base_size = sizeof(struct btf_type);
__u16 vlen = btf_vlen(t);
switch (btf_kind(t)) {
@@ -185,25 +288,120 @@ static int btf_type_size(struct btf_type *t)
}
}
+static void btf_bswap_type_base(struct btf_type *t)
+{
+ t->name_off = bswap_32(t->name_off);
+ t->info = bswap_32(t->info);
+ t->type = bswap_32(t->type);
+}
+
+static int btf_bswap_type_rest(struct btf_type *t)
+{
+ struct btf_var_secinfo *v;
+ struct btf_member *m;
+ struct btf_array *a;
+ struct btf_param *p;
+ struct btf_enum *e;
+ __u16 vlen = btf_vlen(t);
+ int i;
+
+ switch (btf_kind(t)) {
+ case BTF_KIND_FWD:
+ case BTF_KIND_CONST:
+ case BTF_KIND_VOLATILE:
+ case BTF_KIND_RESTRICT:
+ case BTF_KIND_PTR:
+ case BTF_KIND_TYPEDEF:
+ case BTF_KIND_FUNC:
+ return 0;
+ case BTF_KIND_INT:
+ *(__u32 *)(t + 1) = bswap_32(*(__u32 *)(t + 1));
+ return 0;
+ case BTF_KIND_ENUM:
+ for (i = 0, e = btf_enum(t); i < vlen; i++, e++) {
+ e->name_off = bswap_32(e->name_off);
+ e->val = bswap_32(e->val);
+ }
+ return 0;
+ case BTF_KIND_ARRAY:
+ a = btf_array(t);
+ a->type = bswap_32(a->type);
+ a->index_type = bswap_32(a->index_type);
+ a->nelems = bswap_32(a->nelems);
+ return 0;
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ for (i = 0, m = btf_members(t); i < vlen; i++, m++) {
+ m->name_off = bswap_32(m->name_off);
+ m->type = bswap_32(m->type);
+ m->offset = bswap_32(m->offset);
+ }
+ return 0;
+ case BTF_KIND_FUNC_PROTO:
+ for (i = 0, p = btf_params(t); i < vlen; i++, p++) {
+ p->name_off = bswap_32(p->name_off);
+ p->type = bswap_32(p->type);
+ }
+ return 0;
+ case BTF_KIND_VAR:
+ btf_var(t)->linkage = bswap_32(btf_var(t)->linkage);
+ return 0;
+ case BTF_KIND_DATASEC:
+ for (i = 0, v = btf_var_secinfos(t); i < vlen; i++, v++) {
+ v->type = bswap_32(v->type);
+ v->offset = bswap_32(v->offset);
+ v->size = bswap_32(v->size);
+ }
+ return 0;
+ default:
+ pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t));
+ return -EINVAL;
+ }
+}
+
static int btf_parse_type_sec(struct btf *btf)
{
struct btf_header *hdr = btf->hdr;
- void *nohdr_data = btf->nohdr_data;
- void *next_type = nohdr_data + hdr->type_off;
- void *end_type = nohdr_data + hdr->str_off;
+ void *next_type = btf->types_data;
+ void *end_type = next_type + hdr->type_len;
+ int err, i = 0, type_size;
- while (next_type < end_type) {
- struct btf_type *t = next_type;
- int type_size;
- int err;
+ /* VOID (type_id == 0) is specially handled by btf__get_type_by_id(),
+ * so ensure we can never properly use its offset from index by
+ * setting it to a large value
+ */
+ err = btf_add_type_idx_entry(btf, UINT_MAX);
+ if (err)
+ return err;
+
+ while (next_type + sizeof(struct btf_type) <= end_type) {
+ i++;
+
+ if (btf->swapped_endian)
+ btf_bswap_type_base(next_type);
- type_size = btf_type_size(t);
+ type_size = btf_type_size(next_type);
if (type_size < 0)
return type_size;
- next_type += type_size;
- err = btf_add_type(btf, t);
+ if (next_type + type_size > end_type) {
+ pr_warn("BTF type [%d] is malformed\n", i);
+ return -EINVAL;
+ }
+
+ if (btf->swapped_endian && btf_bswap_type_rest(next_type))
+ return -EINVAL;
+
+ err = btf_add_type_idx_entry(btf, next_type - btf->types_data);
if (err)
return err;
+
+ next_type += type_size;
+ btf->nr_types++;
+ }
+
+ if (next_type != end_type) {
+ pr_warn("BTF types data is malformed\n");
+ return -EINVAL;
}
return 0;
@@ -214,12 +412,20 @@ __u32 btf__get_nr_types(const struct btf *btf)
return btf->nr_types;
}
+/* internal helper returning non-const pointer to a type */
+static struct btf_type *btf_type_by_id(struct btf *btf, __u32 type_id)
+{
+ if (type_id == 0)
+ return &btf_void;
+
+ return btf->types_data + btf->type_offs[type_id];
+}
+
const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
{
if (type_id > btf->nr_types)
return NULL;
-
- return btf->types[type_id];
+ return btf_type_by_id((struct btf *)btf, type_id);
}
static int determine_ptr_size(const struct btf *btf)
@@ -286,6 +492,38 @@ int btf__set_pointer_size(struct btf *btf, size_t ptr_sz)
return 0;
}
+static bool is_host_big_endian(void)
+{
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ return false;
+#elif __BYTE_ORDER == __BIG_ENDIAN
+ return true;
+#else
+# error "Unrecognized __BYTE_ORDER__"
+#endif
+}
+
+enum btf_endianness btf__endianness(const struct btf *btf)
+{
+ if (is_host_big_endian())
+ return btf->swapped_endian ? BTF_LITTLE_ENDIAN : BTF_BIG_ENDIAN;
+ else
+ return btf->swapped_endian ? BTF_BIG_ENDIAN : BTF_LITTLE_ENDIAN;
+}
+
+int btf__set_endianness(struct btf *btf, enum btf_endianness endian)
+{
+ if (endian != BTF_LITTLE_ENDIAN && endian != BTF_BIG_ENDIAN)
+ return -EINVAL;
+
+ btf->swapped_endian = is_host_big_endian() != (endian == BTF_BIG_ENDIAN);
+ if (!btf->swapped_endian) {
+ free(btf->raw_data_swapped);
+ btf->raw_data_swapped = NULL;
+ }
+ return 0;
+}
+
static bool btf_type_is_void(const struct btf_type *t)
{
return t == &btf_void || btf_is_fwd(t);
@@ -417,7 +655,7 @@ __s32 btf__find_by_name(const struct btf *btf, const char *type_name)
return 0;
for (i = 1; i <= btf->nr_types; i++) {
- const struct btf_type *t = btf->types[i];
+ const struct btf_type *t = btf__type_by_id(btf, i);
const char *name = btf__name_by_offset(btf, t->name_off);
if (name && !strcmp(type_name, name))
@@ -436,7 +674,7 @@ __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
return 0;
for (i = 1; i <= btf->nr_types; i++) {
- const struct btf_type *t = btf->types[i];
+ const struct btf_type *t = btf__type_by_id(btf, i);
const char *name;
if (btf_kind(t) != kind)
@@ -449,6 +687,11 @@ __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
return -ENOENT;
}
+static bool btf_is_modifiable(const struct btf *btf)
+{
+ return (void *)btf->hdr != btf->raw_data;
+}
+
void btf__free(struct btf *btf)
{
if (IS_ERR_OR_NULL(btf))
@@ -457,11 +700,55 @@ void btf__free(struct btf *btf)
if (btf->fd >= 0)
close(btf->fd);
- free(btf->data);
- free(btf->types);
+ if (btf_is_modifiable(btf)) {
+ /* if BTF was modified after loading, it will have a split
+ * in-memory representation for header, types, and strings
+ * sections, so we need to free all of them individually. It
+ * might still have a cached contiguous raw data present,
+ * which will be unconditionally freed below.
+ */
+ free(btf->hdr);
+ free(btf->types_data);
+ free(btf->strs_data);
+ }
+ free(btf->raw_data);
+ free(btf->raw_data_swapped);
+ free(btf->type_offs);
free(btf);
}
+struct btf *btf__new_empty(void)
+{
+ struct btf *btf;
+
+ btf = calloc(1, sizeof(*btf));
+ if (!btf)
+ return ERR_PTR(-ENOMEM);
+
+ btf->fd = -1;
+ btf->ptr_sz = sizeof(void *);
+ btf->swapped_endian = false;
+
+ /* +1 for empty string at offset 0 */
+ btf->raw_size = sizeof(struct btf_header) + 1;
+ btf->raw_data = calloc(1, btf->raw_size);
+ if (!btf->raw_data) {
+ free(btf);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ btf->hdr = btf->raw_data;
+ btf->hdr->hdr_len = sizeof(struct btf_header);
+ btf->hdr->magic = BTF_MAGIC;
+ btf->hdr->version = BTF_VERSION;
+
+ btf->types_data = btf->raw_data + btf->hdr->hdr_len;
+ btf->strs_data = btf->raw_data + btf->hdr->hdr_len;
+ btf->hdr->str_len = 1; /* empty string at offset 0 */
+
+ return btf;
+}
+
struct btf *btf__new(const void *data, __u32 size)
{
struct btf *btf;
@@ -471,26 +758,28 @@ struct btf *btf__new(const void *data, __u32 size)
if (!btf)
return ERR_PTR(-ENOMEM);
- btf->fd = -1;
-
- btf->data = malloc(size);
- if (!btf->data) {
+ btf->raw_data = malloc(size);
+ if (!btf->raw_data) {
err = -ENOMEM;
goto done;
}
+ memcpy(btf->raw_data, data, size);
+ btf->raw_size = size;
- memcpy(btf->data, data, size);
- btf->data_size = size;
-
+ btf->hdr = btf->raw_data;
err = btf_parse_hdr(btf);
if (err)
goto done;
+ btf->strs_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->str_off;
+ btf->types_data = btf->raw_data + btf->hdr->hdr_len + btf->hdr->type_off;
+
err = btf_parse_str_sec(btf);
+ err = err ?: btf_parse_type_sec(btf);
if (err)
goto done;
- err = btf_parse_type_sec(btf);
+ btf->fd = -1;
done:
if (err) {
@@ -501,17 +790,6 @@ done:
return btf;
}
-static bool btf_check_endianness(const GElf_Ehdr *ehdr)
-{
-#if __BYTE_ORDER == __LITTLE_ENDIAN
- return ehdr->e_ident[EI_DATA] == ELFDATA2LSB;
-#elif __BYTE_ORDER == __BIG_ENDIAN
- return ehdr->e_ident[EI_DATA] == ELFDATA2MSB;
-#else
-# error "Unrecognized __BYTE_ORDER__"
-#endif
-}
-
struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
{
Elf_Data *btf_data = NULL, *btf_ext_data = NULL;
@@ -544,10 +822,6 @@ struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
pr_warn("failed to get EHDR from %s\n", path);
goto done;
}
- if (!btf_check_endianness(&ehdr)) {
- pr_warn("non-native ELF endianness is not supported\n");
- goto done;
- }
if (!elf_rawdata(elf_getscn(elf, ehdr.e_shstrndx), NULL)) {
pr_warn("failed to get e_shstrndx from %s\n", path);
goto done;
@@ -659,13 +933,7 @@ struct btf *btf__parse_raw(const char *path)
err = -EIO;
goto err_out;
}
- if (magic == __bswap_16(BTF_MAGIC)) {
- /* non-native endian raw BTF */
- pr_warn("non-native BTF endianness is not supported\n");
- err = -LIBBPF_ERRNO__ENDIAN;
- goto err_out;
- }
- if (magic != BTF_MAGIC) {
+ if (magic != BTF_MAGIC && magic != bswap_16(BTF_MAGIC)) {
/* definitely not a raw BTF */
err = -EPROTO;
goto err_out;
@@ -798,7 +1066,7 @@ int btf__finalize_data(struct bpf_object *obj, struct btf *btf)
__u32 i;
for (i = 1; i <= btf->nr_types; i++) {
- struct btf_type *t = btf->types[i];
+ struct btf_type *t = btf_type_by_id(btf, i);
/* Loader needs to fix up some of the things compiler
* couldn't get its hands on while emitting BTF. This
@@ -815,10 +1083,13 @@ int btf__finalize_data(struct bpf_object *obj, struct btf *btf)
return err;
}
+static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian);
+
int btf__load(struct btf *btf)
{
- __u32 log_buf_size = 0;
+ __u32 log_buf_size = 0, raw_size;
char *log_buf = NULL;
+ void *raw_data;
int err = 0;
if (btf->fd >= 0)
@@ -833,8 +1104,16 @@ retry_load:
*log_buf = 0;
}
- btf->fd = bpf_load_btf(btf->data, btf->data_size,
- log_buf, log_buf_size, false);
+ raw_data = btf_get_raw_data(btf, &raw_size, false);
+ if (!raw_data) {
+ err = -ENOMEM;
+ goto done;
+ }
+ /* cache native raw data representation */
+ btf->raw_size = raw_size;
+ btf->raw_data = raw_data;
+
+ btf->fd = bpf_load_btf(raw_data, raw_size, log_buf, log_buf_size, false);
if (btf->fd < 0) {
if (!log_buf || errno == ENOSPC) {
log_buf_size = max((__u32)BPF_LOG_BUF_SIZE,
@@ -865,20 +1144,88 @@ void btf__set_fd(struct btf *btf, int fd)
btf->fd = fd;
}
-const void *btf__get_raw_data(const struct btf *btf, __u32 *size)
+static void *btf_get_raw_data(const struct btf *btf, __u32 *size, bool swap_endian)
{
- *size = btf->data_size;
- return btf->data;
+ struct btf_header *hdr = btf->hdr;
+ struct btf_type *t;
+ void *data, *p;
+ __u32 data_sz;
+ int i;
+
+ data = swap_endian ? btf->raw_data_swapped : btf->raw_data;
+ if (data) {
+ *size = btf->raw_size;
+ return data;
+ }
+
+ data_sz = hdr->hdr_len + hdr->type_len + hdr->str_len;
+ data = calloc(1, data_sz);
+ if (!data)
+ return NULL;
+ p = data;
+
+ memcpy(p, hdr, hdr->hdr_len);
+ if (swap_endian)
+ btf_bswap_hdr(p);
+ p += hdr->hdr_len;
+
+ memcpy(p, btf->types_data, hdr->type_len);
+ if (swap_endian) {
+ for (i = 1; i <= btf->nr_types; i++) {
+ t = p + btf->type_offs[i];
+ /* btf_bswap_type_rest() relies on native t->info, so
+ * we swap base type info after we swapped all the
+ * additional information
+ */
+ if (btf_bswap_type_rest(t))
+ goto err_out;
+ btf_bswap_type_base(t);
+ }
+ }
+ p += hdr->type_len;
+
+ memcpy(p, btf->strs_data, hdr->str_len);
+ p += hdr->str_len;
+
+ *size = data_sz;
+ return data;
+err_out:
+ free(data);
+ return NULL;
}
-const char *btf__name_by_offset(const struct btf *btf, __u32 offset)
+const void *btf__get_raw_data(const struct btf *btf_ro, __u32 *size)
+{
+ struct btf *btf = (struct btf *)btf_ro;
+ __u32 data_sz;
+ void *data;
+
+ data = btf_get_raw_data(btf, &data_sz, btf->swapped_endian);
+ if (!data)
+ return NULL;
+
+ btf->raw_size = data_sz;
+ if (btf->swapped_endian)
+ btf->raw_data_swapped = data;
+ else
+ btf->raw_data = data;
+ *size = data_sz;
+ return data;
+}
+
+const char *btf__str_by_offset(const struct btf *btf, __u32 offset)
{
if (offset < btf->hdr->str_len)
- return &btf->strings[offset];
+ return btf->strs_data + offset;
else
return NULL;
}
+const char *btf__name_by_offset(const struct btf *btf, __u32 offset)
+{
+ return btf__str_by_offset(btf, offset);
+}
+
int btf__get_from_id(__u32 id, struct btf **btf)
{
struct bpf_btf_info btf_info = { 0 };
@@ -1014,6 +1361,970 @@ int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
return 0;
}
+static size_t strs_hash_fn(const void *key, void *ctx)
+{
+ struct btf *btf = ctx;
+ const char *str = btf->strs_data + (long)key;
+
+ return str_hash(str);
+}
+
+static bool strs_hash_equal_fn(const void *key1, const void *key2, void *ctx)
+{
+ struct btf *btf = ctx;
+ const char *str1 = btf->strs_data + (long)key1;
+ const char *str2 = btf->strs_data + (long)key2;
+
+ return strcmp(str1, str2) == 0;
+}
+
+static void btf_invalidate_raw_data(struct btf *btf)
+{
+ if (btf->raw_data) {
+ free(btf->raw_data);
+ btf->raw_data = NULL;
+ }
+ if (btf->raw_data_swapped) {
+ free(btf->raw_data_swapped);
+ btf->raw_data_swapped = NULL;
+ }
+}
+
+/* Ensure BTF is ready to be modified (by splitting into a three memory
+ * regions for header, types, and strings). Also invalidate cached
+ * raw_data, if any.
+ */
+static int btf_ensure_modifiable(struct btf *btf)
+{
+ void *hdr, *types, *strs, *strs_end, *s;
+ struct hashmap *hash = NULL;
+ long off;
+ int err;
+
+ if (btf_is_modifiable(btf)) {
+ /* any BTF modification invalidates raw_data */
+ btf_invalidate_raw_data(btf);
+ return 0;
+ }
+
+ /* split raw data into three memory regions */
+ hdr = malloc(btf->hdr->hdr_len);
+ types = malloc(btf->hdr->type_len);
+ strs = malloc(btf->hdr->str_len);
+ if (!hdr || !types || !strs)
+ goto err_out;
+
+ memcpy(hdr, btf->hdr, btf->hdr->hdr_len);
+ memcpy(types, btf->types_data, btf->hdr->type_len);
+ memcpy(strs, btf->strs_data, btf->hdr->str_len);
+
+ /* build lookup index for all strings */
+ hash = hashmap__new(strs_hash_fn, strs_hash_equal_fn, btf);
+ if (IS_ERR(hash)) {
+ err = PTR_ERR(hash);
+ hash = NULL;
+ goto err_out;
+ }
+
+ strs_end = strs + btf->hdr->str_len;
+ for (off = 0, s = strs; s < strs_end; off += strlen(s) + 1, s = strs + off) {
+ /* hashmap__add() returns EEXIST if string with the same
+ * content already is in the hash map
+ */
+ err = hashmap__add(hash, (void *)off, (void *)off);
+ if (err == -EEXIST)
+ continue; /* duplicate */
+ if (err)
+ goto err_out;
+ }
+
+ /* only when everything was successful, update internal state */
+ btf->hdr = hdr;
+ btf->types_data = types;
+ btf->types_data_cap = btf->hdr->type_len;
+ btf->strs_data = strs;
+ btf->strs_data_cap = btf->hdr->str_len;
+ btf->strs_hash = hash;
+ /* if BTF was created from scratch, all strings are guaranteed to be
+ * unique and deduplicated
+ */
+ btf->strs_deduped = btf->hdr->str_len <= 1;
+
+ /* invalidate raw_data representation */
+ btf_invalidate_raw_data(btf);
+
+ return 0;
+
+err_out:
+ hashmap__free(hash);
+ free(hdr);
+ free(types);
+ free(strs);
+ return -ENOMEM;
+}
+
+static void *btf_add_str_mem(struct btf *btf, size_t add_sz)
+{
+ return btf_add_mem(&btf->strs_data, &btf->strs_data_cap, 1,
+ btf->hdr->str_len, BTF_MAX_STR_OFFSET, add_sz);
+}
+
+/* Find an offset in BTF string section that corresponds to a given string *s*.
+ * Returns:
+ * - >0 offset into string section, if string is found;
+ * - -ENOENT, if string is not in the string section;
+ * - <0, on any other error.
+ */
+int btf__find_str(struct btf *btf, const char *s)
+{
+ long old_off, new_off, len;
+ void *p;
+
+ /* BTF needs to be in a modifiable state to build string lookup index */
+ if (btf_ensure_modifiable(btf))
+ return -ENOMEM;
+
+ /* see btf__add_str() for why we do this */
+ len = strlen(s) + 1;
+ p = btf_add_str_mem(btf, len);
+ if (!p)
+ return -ENOMEM;
+
+ new_off = btf->hdr->str_len;
+ memcpy(p, s, len);
+
+ if (hashmap__find(btf->strs_hash, (void *)new_off, (void **)&old_off))
+ return old_off;
+
+ return -ENOENT;
+}
+
+/* Add a string s to the BTF string section.
+ * Returns:
+ * - > 0 offset into string section, on success;
+ * - < 0, on error.
+ */
+int btf__add_str(struct btf *btf, const char *s)
+{
+ long old_off, new_off, len;
+ void *p;
+ int err;
+
+ if (btf_ensure_modifiable(btf))
+ return -ENOMEM;
+
+ /* Hashmap keys are always offsets within btf->strs_data, so to even
+ * look up some string from the "outside", we need to first append it
+ * at the end, so that it can be addressed with an offset. Luckily,
+ * until btf->hdr->str_len is incremented, that string is just a piece
+ * of garbage for the rest of BTF code, so no harm, no foul. On the
+ * other hand, if the string is unique, it's already appended and
+ * ready to be used, only a simple btf->hdr->str_len increment away.
+ */
+ len = strlen(s) + 1;
+ p = btf_add_str_mem(btf, len);
+ if (!p)
+ return -ENOMEM;
+
+ new_off = btf->hdr->str_len;
+ memcpy(p, s, len);
+
+ /* Now attempt to add the string, but only if the string with the same
+ * contents doesn't exist already (HASHMAP_ADD strategy). If such
+ * string exists, we'll get its offset in old_off (that's old_key).
+ */
+ err = hashmap__insert(btf->strs_hash, (void *)new_off, (void *)new_off,
+ HASHMAP_ADD, (const void **)&old_off, NULL);
+ if (err == -EEXIST)
+ return old_off; /* duplicated string, return existing offset */
+ if (err)
+ return err;
+
+ btf->hdr->str_len += len; /* new unique string, adjust data length */
+ return new_off;
+}
+
+static void *btf_add_type_mem(struct btf *btf, size_t add_sz)
+{
+ return btf_add_mem(&btf->types_data, &btf->types_data_cap, 1,
+ btf->hdr->type_len, UINT_MAX, add_sz);
+}
+
+static __u32 btf_type_info(int kind, int vlen, int kflag)
+{
+ return (kflag << 31) | (kind << 24) | vlen;
+}
+
+static void btf_type_inc_vlen(struct btf_type *t)
+{
+ t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, btf_kflag(t));
+}
+
+/*
+ * Append new BTF_KIND_INT type with:
+ * - *name* - non-empty, non-NULL type name;
+ * - *sz* - power-of-2 (1, 2, 4, ..) size of the type, in bytes;
+ * - encoding is a combination of BTF_INT_SIGNED, BTF_INT_CHAR, BTF_INT_BOOL.
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding)
+{
+ struct btf_type *t;
+ int sz, err, name_off;
+
+ /* non-empty name */
+ if (!name || !name[0])
+ return -EINVAL;
+ /* byte_sz must be power of 2 */
+ if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 16)
+ return -EINVAL;
+ if (encoding & ~(BTF_INT_SIGNED | BTF_INT_CHAR | BTF_INT_BOOL))
+ return -EINVAL;
+
+ /* deconstruct BTF, if necessary, and invalidate raw_data */
+ if (btf_ensure_modifiable(btf))
+ return -ENOMEM;
+
+ sz = sizeof(struct btf_type) + sizeof(int);
+ t = btf_add_type_mem(btf, sz);
+ if (!t)
+ return -ENOMEM;
+
+ /* if something goes wrong later, we might end up with an extra string,
+ * but that shouldn't be a problem, because BTF can't be constructed
+ * completely anyway and will most probably be just discarded
+ */
+ name_off = btf__add_str(btf, name);
+ if (name_off < 0)
+ return name_off;
+
+ t->name_off = name_off;
+ t->info = btf_type_info(BTF_KIND_INT, 0, 0);
+ t->size = byte_sz;
+ /* set INT info, we don't allow setting legacy bit offset/size */
+ *(__u32 *)(t + 1) = (encoding << 24) | (byte_sz * 8);
+
+ err = btf_add_type_idx_entry(btf, btf->hdr->type_len);
+ if (err)
+ return err;
+
+ btf->hdr->type_len += sz;
+ btf->hdr->str_off += sz;
+ btf->nr_types++;
+ return btf->nr_types;
+}
+
+/* it's completely legal to append BTF types with type IDs pointing forward to
+ * types that haven't been appended yet, so we only make sure that id looks
+ * sane, we can't guarantee that ID will always be valid
+ */
+static int validate_type_id(int id)
+{
+ if (id < 0 || id > BTF_MAX_NR_TYPES)
+ return -EINVAL;
+ return 0;
+}
+
+/* generic append function for PTR, TYPEDEF, CONST/VOLATILE/RESTRICT */
+static int btf_add_ref_kind(struct btf *btf, int kind, const char *name, int ref_type_id)
+{
+ struct btf_type *t;
+ int sz, name_off = 0, err;
+
+ if (validate_type_id(ref_type_id))
+ return -EINVAL;
+
+ if (btf_ensure_modifiable(btf))
+ return -ENOMEM;
+
+ sz = sizeof(struct btf_type);
+ t = btf_add_type_mem(btf, sz);
+ if (!t)
+ return -ENOMEM;
+
+ if (name && name[0]) {
+ name_off = btf__add_str(btf, name);
+ if (name_off < 0)
+ return name_off;
+ }
+
+ t->name_off = name_off;
+ t->info = btf_type_info(kind, 0, 0);
+ t->type = ref_type_id;
+
+ err = btf_add_type_idx_entry(btf, btf->hdr->type_len);
+ if (err)
+ return err;
+
+ btf->hdr->type_len += sz;
+ btf->hdr->str_off += sz;
+ btf->nr_types++;
+ return btf->nr_types;
+}
+
+/*
+ * Append new BTF_KIND_PTR type with:
+ * - *ref_type_id* - referenced type ID, it might not exist yet;
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_ptr(struct btf *btf, int ref_type_id)
+{
+ return btf_add_ref_kind(btf, BTF_KIND_PTR, NULL, ref_type_id);
+}
+
+/*
+ * Append new BTF_KIND_ARRAY type with:
+ * - *index_type_id* - type ID of the type describing array index;
+ * - *elem_type_id* - type ID of the type describing array element;
+ * - *nr_elems* - the size of the array;
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_array(struct btf *btf, int index_type_id, int elem_type_id, __u32 nr_elems)
+{
+ struct btf_type *t;
+ struct btf_array *a;
+ int sz, err;
+
+ if (validate_type_id(index_type_id) || validate_type_id(elem_type_id))
+ return -EINVAL;
+
+ if (btf_ensure_modifiable(btf))
+ return -ENOMEM;
+
+ sz = sizeof(struct btf_type) + sizeof(struct btf_array);
+ t = btf_add_type_mem(btf, sz);
+ if (!t)
+ return -ENOMEM;
+
+ t->name_off = 0;
+ t->info = btf_type_info(BTF_KIND_ARRAY, 0, 0);
+ t->size = 0;
+
+ a = btf_array(t);
+ a->type = elem_type_id;
+ a->index_type = index_type_id;
+ a->nelems = nr_elems;
+
+ err = btf_add_type_idx_entry(btf, btf->hdr->type_len);
+ if (err)
+ return err;
+
+ btf->hdr->type_len += sz;
+ btf->hdr->str_off += sz;
+ btf->nr_types++;
+ return btf->nr_types;
+}
+
+/* generic STRUCT/UNION append function */
+static int btf_add_composite(struct btf *btf, int kind, const char *name, __u32 bytes_sz)
+{
+ struct btf_type *t;
+ int sz, err, name_off = 0;
+
+ if (btf_ensure_modifiable(btf))
+ return -ENOMEM;
+
+ sz = sizeof(struct btf_type);
+ t = btf_add_type_mem(btf, sz);
+ if (!t)
+ return -ENOMEM;
+
+ if (name && name[0]) {
+ name_off = btf__add_str(btf, name);
+ if (name_off < 0)
+ return name_off;
+ }
+
+ /* start out with vlen=0 and no kflag; this will be adjusted when
+ * adding each member
+ */
+ t->name_off = name_off;
+ t->info = btf_type_info(kind, 0, 0);
+ t->size = bytes_sz;
+
+ err = btf_add_type_idx_entry(btf, btf->hdr->type_len);
+ if (err)
+ return err;
+
+ btf->hdr->type_len += sz;
+ btf->hdr->str_off += sz;
+ btf->nr_types++;
+ return btf->nr_types;
+}
+
+/*
+ * Append new BTF_KIND_STRUCT type with:
+ * - *name* - name of the struct, can be NULL or empty for anonymous structs;
+ * - *byte_sz* - size of the struct, in bytes;
+ *
+ * Struct initially has no fields in it. Fields can be added by
+ * btf__add_field() right after btf__add_struct() succeeds.
+ *
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_struct(struct btf *btf, const char *name, __u32 byte_sz)
+{
+ return btf_add_composite(btf, BTF_KIND_STRUCT, name, byte_sz);
+}
+
+/*
+ * Append new BTF_KIND_UNION type with:
+ * - *name* - name of the union, can be NULL or empty for anonymous union;
+ * - *byte_sz* - size of the union, in bytes;
+ *
+ * Union initially has no fields in it. Fields can be added by
+ * btf__add_field() right after btf__add_union() succeeds. All fields
+ * should have *bit_offset* of 0.
+ *
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_union(struct btf *btf, const char *name, __u32 byte_sz)
+{
+ return btf_add_composite(btf, BTF_KIND_UNION, name, byte_sz);
+}
+
+/*
+ * Append new field for the current STRUCT/UNION type with:
+ * - *name* - name of the field, can be NULL or empty for anonymous field;
+ * - *type_id* - type ID for the type describing field type;
+ * - *bit_offset* - bit offset of the start of the field within struct/union;
+ * - *bit_size* - bit size of a bitfield, 0 for non-bitfield fields;
+ * Returns:
+ * - 0, on success;
+ * - <0, on error.
+ */
+int btf__add_field(struct btf *btf, const char *name, int type_id,
+ __u32 bit_offset, __u32 bit_size)
+{
+ struct btf_type *t;
+ struct btf_member *m;
+ bool is_bitfield;
+ int sz, name_off = 0;
+
+ /* last type should be union/struct */
+ if (btf->nr_types == 0)
+ return -EINVAL;
+ t = btf_type_by_id(btf, btf->nr_types);
+ if (!btf_is_composite(t))
+ return -EINVAL;
+
+ if (validate_type_id(type_id))
+ return -EINVAL;
+ /* best-effort bit field offset/size enforcement */
+ is_bitfield = bit_size || (bit_offset % 8 != 0);
+ if (is_bitfield && (bit_size == 0 || bit_size > 255 || bit_offset > 0xffffff))
+ return -EINVAL;
+
+ /* only offset 0 is allowed for unions */
+ if (btf_is_union(t) && bit_offset)
+ return -EINVAL;
+
+ /* decompose and invalidate raw data */
+ if (btf_ensure_modifiable(btf))
+ return -ENOMEM;
+
+ sz = sizeof(struct btf_member);
+ m = btf_add_type_mem(btf, sz);
+ if (!m)
+ return -ENOMEM;
+
+ if (name && name[0]) {
+ name_off = btf__add_str(btf, name);
+ if (name_off < 0)
+ return name_off;
+ }
+
+ m->name_off = name_off;
+ m->type = type_id;
+ m->offset = bit_offset | (bit_size << 24);
+
+ /* btf_add_type_mem can invalidate t pointer */
+ t = btf_type_by_id(btf, btf->nr_types);
+ /* update parent type's vlen and kflag */
+ t->info = btf_type_info(btf_kind(t), btf_vlen(t) + 1, is_bitfield || btf_kflag(t));
+
+ btf->hdr->type_len += sz;
+ btf->hdr->str_off += sz;
+ return 0;
+}
+
+/*
+ * Append new BTF_KIND_ENUM type with:
+ * - *name* - name of the enum, can be NULL or empty for anonymous enums;
+ * - *byte_sz* - size of the enum, in bytes.
+ *
+ * Enum initially has no enum values in it (and corresponds to enum forward
+ * declaration). Enumerator values can be added by btf__add_enum_value()
+ * immediately after btf__add_enum() succeeds.
+ *
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_enum(struct btf *btf, const char *name, __u32 byte_sz)
+{
+ struct btf_type *t;
+ int sz, err, name_off = 0;
+
+ /* byte_sz must be power of 2 */
+ if (!byte_sz || (byte_sz & (byte_sz - 1)) || byte_sz > 8)
+ return -EINVAL;
+
+ if (btf_ensure_modifiable(btf))
+ return -ENOMEM;
+
+ sz = sizeof(struct btf_type);
+ t = btf_add_type_mem(btf, sz);
+ if (!t)
+ return -ENOMEM;
+
+ if (name && name[0]) {
+ name_off = btf__add_str(btf, name);
+ if (name_off < 0)
+ return name_off;
+ }
+
+ /* start out with vlen=0; it will be adjusted when adding enum values */
+ t->name_off = name_off;
+ t->info = btf_type_info(BTF_KIND_ENUM, 0, 0);
+ t->size = byte_sz;
+
+ err = btf_add_type_idx_entry(btf, btf->hdr->type_len);
+ if (err)
+ return err;
+
+ btf->hdr->type_len += sz;
+ btf->hdr->str_off += sz;
+ btf->nr_types++;
+ return btf->nr_types;
+}
+
+/*
+ * Append new enum value for the current ENUM type with:
+ * - *name* - name of the enumerator value, can't be NULL or empty;
+ * - *value* - integer value corresponding to enum value *name*;
+ * Returns:
+ * - 0, on success;
+ * - <0, on error.
+ */
+int btf__add_enum_value(struct btf *btf, const char *name, __s64 value)
+{
+ struct btf_type *t;
+ struct btf_enum *v;
+ int sz, name_off;
+
+ /* last type should be BTF_KIND_ENUM */
+ if (btf->nr_types == 0)
+ return -EINVAL;
+ t = btf_type_by_id(btf, btf->nr_types);
+ if (!btf_is_enum(t))
+ return -EINVAL;
+
+ /* non-empty name */
+ if (!name || !name[0])
+ return -EINVAL;
+ if (value < INT_MIN || value > UINT_MAX)
+ return -E2BIG;
+
+ /* decompose and invalidate raw data */
+ if (btf_ensure_modifiable(btf))
+ return -ENOMEM;
+
+ sz = sizeof(struct btf_enum);
+ v = btf_add_type_mem(btf, sz);
+ if (!v)
+ return -ENOMEM;
+
+ name_off = btf__add_str(btf, name);
+ if (name_off < 0)
+ return name_off;
+
+ v->name_off = name_off;
+ v->val = value;
+
+ /* update parent type's vlen */
+ t = btf_type_by_id(btf, btf->nr_types);
+ btf_type_inc_vlen(t);
+
+ btf->hdr->type_len += sz;
+ btf->hdr->str_off += sz;
+ return 0;
+}
+
+/*
+ * Append new BTF_KIND_FWD type with:
+ * - *name*, non-empty/non-NULL name;
+ * - *fwd_kind*, kind of forward declaration, one of BTF_FWD_STRUCT,
+ * BTF_FWD_UNION, or BTF_FWD_ENUM;
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_fwd(struct btf *btf, const char *name, enum btf_fwd_kind fwd_kind)
+{
+ if (!name || !name[0])
+ return -EINVAL;
+
+ switch (fwd_kind) {
+ case BTF_FWD_STRUCT:
+ case BTF_FWD_UNION: {
+ struct btf_type *t;
+ int id;
+
+ id = btf_add_ref_kind(btf, BTF_KIND_FWD, name, 0);
+ if (id <= 0)
+ return id;
+ t = btf_type_by_id(btf, id);
+ t->info = btf_type_info(BTF_KIND_FWD, 0, fwd_kind == BTF_FWD_UNION);
+ return id;
+ }
+ case BTF_FWD_ENUM:
+ /* enum forward in BTF currently is just an enum with no enum
+ * values; we also assume a standard 4-byte size for it
+ */
+ return btf__add_enum(btf, name, sizeof(int));
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * Append new BTF_KING_TYPEDEF type with:
+ * - *name*, non-empty/non-NULL name;
+ * - *ref_type_id* - referenced type ID, it might not exist yet;
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_typedef(struct btf *btf, const char *name, int ref_type_id)
+{
+ if (!name || !name[0])
+ return -EINVAL;
+
+ return btf_add_ref_kind(btf, BTF_KIND_TYPEDEF, name, ref_type_id);
+}
+
+/*
+ * Append new BTF_KIND_VOLATILE type with:
+ * - *ref_type_id* - referenced type ID, it might not exist yet;
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_volatile(struct btf *btf, int ref_type_id)
+{
+ return btf_add_ref_kind(btf, BTF_KIND_VOLATILE, NULL, ref_type_id);
+}
+
+/*
+ * Append new BTF_KIND_CONST type with:
+ * - *ref_type_id* - referenced type ID, it might not exist yet;
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_const(struct btf *btf, int ref_type_id)
+{
+ return btf_add_ref_kind(btf, BTF_KIND_CONST, NULL, ref_type_id);
+}
+
+/*
+ * Append new BTF_KIND_RESTRICT type with:
+ * - *ref_type_id* - referenced type ID, it might not exist yet;
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_restrict(struct btf *btf, int ref_type_id)
+{
+ return btf_add_ref_kind(btf, BTF_KIND_RESTRICT, NULL, ref_type_id);
+}
+
+/*
+ * Append new BTF_KIND_FUNC type with:
+ * - *name*, non-empty/non-NULL name;
+ * - *proto_type_id* - FUNC_PROTO's type ID, it might not exist yet;
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_func(struct btf *btf, const char *name,
+ enum btf_func_linkage linkage, int proto_type_id)
+{
+ int id;
+
+ if (!name || !name[0])
+ return -EINVAL;
+ if (linkage != BTF_FUNC_STATIC && linkage != BTF_FUNC_GLOBAL &&
+ linkage != BTF_FUNC_EXTERN)
+ return -EINVAL;
+
+ id = btf_add_ref_kind(btf, BTF_KIND_FUNC, name, proto_type_id);
+ if (id > 0) {
+ struct btf_type *t = btf_type_by_id(btf, id);
+
+ t->info = btf_type_info(BTF_KIND_FUNC, linkage, 0);
+ }
+ return id;
+}
+
+/*
+ * Append new BTF_KIND_FUNC_PROTO with:
+ * - *ret_type_id* - type ID for return result of a function.
+ *
+ * Function prototype initially has no arguments, but they can be added by
+ * btf__add_func_param() one by one, immediately after
+ * btf__add_func_proto() succeeded.
+ *
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_func_proto(struct btf *btf, int ret_type_id)
+{
+ struct btf_type *t;
+ int sz, err;
+
+ if (validate_type_id(ret_type_id))
+ return -EINVAL;
+
+ if (btf_ensure_modifiable(btf))
+ return -ENOMEM;
+
+ sz = sizeof(struct btf_type);
+ t = btf_add_type_mem(btf, sz);
+ if (!t)
+ return -ENOMEM;
+
+ /* start out with vlen=0; this will be adjusted when adding enum
+ * values, if necessary
+ */
+ t->name_off = 0;
+ t->info = btf_type_info(BTF_KIND_FUNC_PROTO, 0, 0);
+ t->type = ret_type_id;
+
+ err = btf_add_type_idx_entry(btf, btf->hdr->type_len);
+ if (err)
+ return err;
+
+ btf->hdr->type_len += sz;
+ btf->hdr->str_off += sz;
+ btf->nr_types++;
+ return btf->nr_types;
+}
+
+/*
+ * Append new function parameter for current FUNC_PROTO type with:
+ * - *name* - parameter name, can be NULL or empty;
+ * - *type_id* - type ID describing the type of the parameter.
+ * Returns:
+ * - 0, on success;
+ * - <0, on error.
+ */
+int btf__add_func_param(struct btf *btf, const char *name, int type_id)
+{
+ struct btf_type *t;
+ struct btf_param *p;
+ int sz, name_off = 0;
+
+ if (validate_type_id(type_id))
+ return -EINVAL;
+
+ /* last type should be BTF_KIND_FUNC_PROTO */
+ if (btf->nr_types == 0)
+ return -EINVAL;
+ t = btf_type_by_id(btf, btf->nr_types);
+ if (!btf_is_func_proto(t))
+ return -EINVAL;
+
+ /* decompose and invalidate raw data */
+ if (btf_ensure_modifiable(btf))
+ return -ENOMEM;
+
+ sz = sizeof(struct btf_param);
+ p = btf_add_type_mem(btf, sz);
+ if (!p)
+ return -ENOMEM;
+
+ if (name && name[0]) {
+ name_off = btf__add_str(btf, name);
+ if (name_off < 0)
+ return name_off;
+ }
+
+ p->name_off = name_off;
+ p->type = type_id;
+
+ /* update parent type's vlen */
+ t = btf_type_by_id(btf, btf->nr_types);
+ btf_type_inc_vlen(t);
+
+ btf->hdr->type_len += sz;
+ btf->hdr->str_off += sz;
+ return 0;
+}
+
+/*
+ * Append new BTF_KIND_VAR type with:
+ * - *name* - non-empty/non-NULL name;
+ * - *linkage* - variable linkage, one of BTF_VAR_STATIC,
+ * BTF_VAR_GLOBAL_ALLOCATED, or BTF_VAR_GLOBAL_EXTERN;
+ * - *type_id* - type ID of the type describing the type of the variable.
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_var(struct btf *btf, const char *name, int linkage, int type_id)
+{
+ struct btf_type *t;
+ struct btf_var *v;
+ int sz, err, name_off;
+
+ /* non-empty name */
+ if (!name || !name[0])
+ return -EINVAL;
+ if (linkage != BTF_VAR_STATIC && linkage != BTF_VAR_GLOBAL_ALLOCATED &&
+ linkage != BTF_VAR_GLOBAL_EXTERN)
+ return -EINVAL;
+ if (validate_type_id(type_id))
+ return -EINVAL;
+
+ /* deconstruct BTF, if necessary, and invalidate raw_data */
+ if (btf_ensure_modifiable(btf))
+ return -ENOMEM;
+
+ sz = sizeof(struct btf_type) + sizeof(struct btf_var);
+ t = btf_add_type_mem(btf, sz);
+ if (!t)
+ return -ENOMEM;
+
+ name_off = btf__add_str(btf, name);
+ if (name_off < 0)
+ return name_off;
+
+ t->name_off = name_off;
+ t->info = btf_type_info(BTF_KIND_VAR, 0, 0);
+ t->type = type_id;
+
+ v = btf_var(t);
+ v->linkage = linkage;
+
+ err = btf_add_type_idx_entry(btf, btf->hdr->type_len);
+ if (err)
+ return err;
+
+ btf->hdr->type_len += sz;
+ btf->hdr->str_off += sz;
+ btf->nr_types++;
+ return btf->nr_types;
+}
+
+/*
+ * Append new BTF_KIND_DATASEC type with:
+ * - *name* - non-empty/non-NULL name;
+ * - *byte_sz* - data section size, in bytes.
+ *
+ * Data section is initially empty. Variables info can be added with
+ * btf__add_datasec_var_info() calls, after btf__add_datasec() succeeds.
+ *
+ * Returns:
+ * - >0, type ID of newly added BTF type;
+ * - <0, on error.
+ */
+int btf__add_datasec(struct btf *btf, const char *name, __u32 byte_sz)
+{
+ struct btf_type *t;
+ int sz, err, name_off;
+
+ /* non-empty name */
+ if (!name || !name[0])
+ return -EINVAL;
+
+ if (btf_ensure_modifiable(btf))
+ return -ENOMEM;
+
+ sz = sizeof(struct btf_type);
+ t = btf_add_type_mem(btf, sz);
+ if (!t)
+ return -ENOMEM;
+
+ name_off = btf__add_str(btf, name);
+ if (name_off < 0)
+ return name_off;
+
+ /* start with vlen=0, which will be update as var_secinfos are added */
+ t->name_off = name_off;
+ t->info = btf_type_info(BTF_KIND_DATASEC, 0, 0);
+ t->size = byte_sz;
+
+ err = btf_add_type_idx_entry(btf, btf->hdr->type_len);
+ if (err)
+ return err;
+
+ btf->hdr->type_len += sz;
+ btf->hdr->str_off += sz;
+ btf->nr_types++;
+ return btf->nr_types;
+}
+
+/*
+ * Append new data section variable information entry for current DATASEC type:
+ * - *var_type_id* - type ID, describing type of the variable;
+ * - *offset* - variable offset within data section, in bytes;
+ * - *byte_sz* - variable size, in bytes.
+ *
+ * Returns:
+ * - 0, on success;
+ * - <0, on error.
+ */
+int btf__add_datasec_var_info(struct btf *btf, int var_type_id, __u32 offset, __u32 byte_sz)
+{
+ struct btf_type *t;
+ struct btf_var_secinfo *v;
+ int sz;
+
+ /* last type should be BTF_KIND_DATASEC */
+ if (btf->nr_types == 0)
+ return -EINVAL;
+ t = btf_type_by_id(btf, btf->nr_types);
+ if (!btf_is_datasec(t))
+ return -EINVAL;
+
+ if (validate_type_id(var_type_id))
+ return -EINVAL;
+
+ /* decompose and invalidate raw data */
+ if (btf_ensure_modifiable(btf))
+ return -ENOMEM;
+
+ sz = sizeof(struct btf_var_secinfo);
+ v = btf_add_type_mem(btf, sz);
+ if (!v)
+ return -ENOMEM;
+
+ v->type = var_type_id;
+ v->offset = offset;
+ v->size = byte_sz;
+
+ /* update parent type's vlen */
+ t = btf_type_by_id(btf, btf->nr_types);
+ btf_type_inc_vlen(t);
+
+ btf->hdr->type_len += sz;
+ btf->hdr->str_off += sz;
+ return 0;
+}
+
struct btf_ext_sec_setup_param {
__u32 off;
__u32 len;
@@ -1137,14 +2448,14 @@ static int btf_ext_setup_line_info(struct btf_ext *btf_ext)
return btf_ext_setup_info(btf_ext, &param);
}
-static int btf_ext_setup_field_reloc(struct btf_ext *btf_ext)
+static int btf_ext_setup_core_relos(struct btf_ext *btf_ext)
{
struct btf_ext_sec_setup_param param = {
- .off = btf_ext->hdr->field_reloc_off,
- .len = btf_ext->hdr->field_reloc_len,
- .min_rec_size = sizeof(struct bpf_field_reloc),
- .ext_info = &btf_ext->field_reloc_info,
- .desc = "field_reloc",
+ .off = btf_ext->hdr->core_relo_off,
+ .len = btf_ext->hdr->core_relo_len,
+ .min_rec_size = sizeof(struct bpf_core_relo),
+ .ext_info = &btf_ext->core_relo_info,
+ .desc = "core_relo",
};
return btf_ext_setup_info(btf_ext, &param);
@@ -1160,7 +2471,10 @@ static int btf_ext_parse_hdr(__u8 *data, __u32 data_size)
return -EINVAL;
}
- if (hdr->magic != BTF_MAGIC) {
+ if (hdr->magic == bswap_16(BTF_MAGIC)) {
+ pr_warn("BTF.ext in non-native endianness is not supported\n");
+ return -ENOTSUP;
+ } else if (hdr->magic != BTF_MAGIC) {
pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic);
return -EINVAL;
}
@@ -1223,10 +2537,9 @@ struct btf_ext *btf_ext__new(__u8 *data, __u32 size)
if (err)
goto done;
- if (btf_ext->hdr->hdr_len <
- offsetofend(struct btf_ext_header, field_reloc_len))
+ if (btf_ext->hdr->hdr_len < offsetofend(struct btf_ext_header, core_relo_len))
goto done;
- err = btf_ext_setup_field_reloc(btf_ext);
+ err = btf_ext_setup_core_relos(btf_ext);
if (err)
goto done;
@@ -1481,6 +2794,9 @@ int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
return -EINVAL;
}
+ if (btf_ensure_modifiable(btf))
+ return -ENOMEM;
+
err = btf_dedup_strings(d);
if (err < 0) {
pr_debug("btf_dedup_strings failed:%d\n", err);
@@ -1581,7 +2897,7 @@ static int btf_dedup_hypot_map_add(struct btf_dedup *d,
__u32 *new_list;
d->hypot_cap += max((size_t)16, d->hypot_cap / 2);
- new_list = realloc(d->hypot_list, sizeof(__u32) * d->hypot_cap);
+ new_list = libbpf_reallocarray(d->hypot_list, d->hypot_cap, sizeof(__u32));
if (!new_list)
return -ENOMEM;
d->hypot_list = new_list;
@@ -1665,7 +2981,7 @@ static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
/* special BTF "void" type is made canonical immediately */
d->map[0] = 0;
for (i = 1; i <= btf->nr_types; i++) {
- struct btf_type *t = d->btf->types[i];
+ struct btf_type *t = btf_type_by_id(d->btf, i);
/* VAR and DATASEC are never deduped and are self-canonical */
if (btf_is_var(t) || btf_is_datasec(t))
@@ -1704,7 +3020,7 @@ static int btf_for_each_str_off(struct btf_dedup *d, str_off_fn_t fn, void *ctx)
struct btf_type *t;
for (i = 1; i <= d->btf->nr_types; i++) {
- t = d->btf->types[i];
+ t = btf_type_by_id(d->btf, i);
r = fn(&t->name_off, ctx);
if (r)
return r;
@@ -1858,8 +3174,7 @@ static int btf_str_remap_offset(__u32 *str_off_ptr, void *ctx)
*/
static int btf_dedup_strings(struct btf_dedup *d)
{
- const struct btf_header *hdr = d->btf->hdr;
- char *start = (char *)d->btf->nohdr_data + hdr->str_off;
+ char *start = d->btf->strs_data;
char *end = start + d->btf->hdr->str_len;
char *p = start, *tmp_strs = NULL;
struct btf_str_ptrs strs = {
@@ -1871,14 +3186,16 @@ static int btf_dedup_strings(struct btf_dedup *d)
int i, j, err = 0, grp_idx;
bool grp_used;
+ if (d->btf->strs_deduped)
+ return 0;
+
/* build index of all strings */
while (p < end) {
if (strs.cnt + 1 > strs.cap) {
struct btf_str_ptr *new_ptrs;
strs.cap += max(strs.cnt / 2, 16U);
- new_ptrs = realloc(strs.ptrs,
- sizeof(strs.ptrs[0]) * strs.cap);
+ new_ptrs = libbpf_reallocarray(strs.ptrs, strs.cap, sizeof(strs.ptrs[0]));
if (!new_ptrs) {
err = -ENOMEM;
goto done;
@@ -1964,6 +3281,7 @@ static int btf_dedup_strings(struct btf_dedup *d)
goto done;
d->btf->hdr->str_len = end - start;
+ d->btf->strs_deduped = true;
done:
free(tmp_strs);
@@ -2240,7 +3558,7 @@ static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
*/
static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
{
- struct btf_type *t = d->btf->types[type_id];
+ struct btf_type *t = btf_type_by_id(d->btf, type_id);
struct hashmap_entry *hash_entry;
struct btf_type *cand;
/* if we don't find equivalent type, then we are canonical */
@@ -2267,7 +3585,7 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
h = btf_hash_int(t);
for_each_dedup_cand(d, hash_entry, h) {
cand_id = (__u32)(long)hash_entry->value;
- cand = d->btf->types[cand_id];
+ cand = btf_type_by_id(d->btf, cand_id);
if (btf_equal_int(t, cand)) {
new_id = cand_id;
break;
@@ -2279,7 +3597,7 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
h = btf_hash_enum(t);
for_each_dedup_cand(d, hash_entry, h) {
cand_id = (__u32)(long)hash_entry->value;
- cand = d->btf->types[cand_id];
+ cand = btf_type_by_id(d->btf, cand_id);
if (btf_equal_enum(t, cand)) {
new_id = cand_id;
break;
@@ -2302,7 +3620,7 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
h = btf_hash_common(t);
for_each_dedup_cand(d, hash_entry, h) {
cand_id = (__u32)(long)hash_entry->value;
- cand = d->btf->types[cand_id];
+ cand = btf_type_by_id(d->btf, cand_id);
if (btf_equal_common(t, cand)) {
new_id = cand_id;
break;
@@ -2361,13 +3679,13 @@ static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id)
{
__u32 orig_type_id = type_id;
- if (!btf_is_fwd(d->btf->types[type_id]))
+ if (!btf_is_fwd(btf__type_by_id(d->btf, type_id)))
return type_id;
while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
type_id = d->map[type_id];
- if (!btf_is_fwd(d->btf->types[type_id]))
+ if (!btf_is_fwd(btf__type_by_id(d->btf, type_id)))
return type_id;
return orig_type_id;
@@ -2495,8 +3813,8 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
if (btf_dedup_hypot_map_add(d, canon_id, cand_id))
return -ENOMEM;
- cand_type = d->btf->types[cand_id];
- canon_type = d->btf->types[canon_id];
+ cand_type = btf_type_by_id(d->btf, cand_id);
+ canon_type = btf_type_by_id(d->btf, canon_id);
cand_kind = btf_kind(cand_type);
canon_kind = btf_kind(canon_type);
@@ -2647,8 +3965,8 @@ static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
targ_type_id = d->hypot_map[cand_type_id];
t_id = resolve_type_id(d, targ_type_id);
c_id = resolve_type_id(d, cand_type_id);
- t_kind = btf_kind(d->btf->types[t_id]);
- c_kind = btf_kind(d->btf->types[c_id]);
+ t_kind = btf_kind(btf__type_by_id(d->btf, t_id));
+ c_kind = btf_kind(btf__type_by_id(d->btf, c_id));
/*
* Resolve FWD into STRUCT/UNION.
* It's ok to resolve FWD into STRUCT/UNION that's not yet
@@ -2716,7 +4034,7 @@ static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
if (d->map[type_id] <= BTF_MAX_NR_TYPES)
return 0;
- t = d->btf->types[type_id];
+ t = btf_type_by_id(d->btf, type_id);
kind = btf_kind(t);
if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
@@ -2737,7 +4055,7 @@ static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
* creating a loop (FWD -> STRUCT and STRUCT -> FWD), because
* FWD and compatible STRUCT/UNION are considered equivalent.
*/
- cand_type = d->btf->types[cand_id];
+ cand_type = btf_type_by_id(d->btf, cand_id);
if (!btf_shallow_equal_struct(t, cand_type))
continue;
@@ -2809,7 +4127,7 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
if (d->map[type_id] <= BTF_MAX_NR_TYPES)
return resolve_type_id(d, type_id);
- t = d->btf->types[type_id];
+ t = btf_type_by_id(d->btf, type_id);
d->map[type_id] = BTF_IN_PROGRESS_ID;
switch (btf_kind(t)) {
@@ -2827,7 +4145,7 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
h = btf_hash_common(t);
for_each_dedup_cand(d, hash_entry, h) {
cand_id = (__u32)(long)hash_entry->value;
- cand = d->btf->types[cand_id];
+ cand = btf_type_by_id(d->btf, cand_id);
if (btf_equal_common(t, cand)) {
new_id = cand_id;
break;
@@ -2851,7 +4169,7 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
h = btf_hash_array(t);
for_each_dedup_cand(d, hash_entry, h) {
cand_id = (__u32)(long)hash_entry->value;
- cand = d->btf->types[cand_id];
+ cand = btf_type_by_id(d->btf, cand_id);
if (btf_equal_array(t, cand)) {
new_id = cand_id;
break;
@@ -2883,7 +4201,7 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
h = btf_hash_fnproto(t);
for_each_dedup_cand(d, hash_entry, h) {
cand_id = (__u32)(long)hash_entry->value;
- cand = d->btf->types[cand_id];
+ cand = btf_type_by_id(d->btf, cand_id);
if (btf_equal_fnproto(t, cand)) {
new_id = cand_id;
break;
@@ -2931,9 +4249,9 @@ static int btf_dedup_ref_types(struct btf_dedup *d)
*/
static int btf_dedup_compact_types(struct btf_dedup *d)
{
- struct btf_type **new_types;
+ __u32 *new_offs;
__u32 next_type_id = 1;
- char *types_start, *p;
+ void *p;
int i, len;
/* we are going to reuse hypot_map to store compaction remapping */
@@ -2941,41 +4259,34 @@ static int btf_dedup_compact_types(struct btf_dedup *d)
for (i = 1; i <= d->btf->nr_types; i++)
d->hypot_map[i] = BTF_UNPROCESSED_ID;
- types_start = d->btf->nohdr_data + d->btf->hdr->type_off;
- p = types_start;
+ p = d->btf->types_data;
for (i = 1; i <= d->btf->nr_types; i++) {
if (d->map[i] != i)
continue;
- len = btf_type_size(d->btf->types[i]);
+ len = btf_type_size(btf__type_by_id(d->btf, i));
if (len < 0)
return len;
- memmove(p, d->btf->types[i], len);
+ memmove(p, btf__type_by_id(d->btf, i), len);
d->hypot_map[i] = next_type_id;
- d->btf->types[next_type_id] = (struct btf_type *)p;
+ d->btf->type_offs[next_type_id] = p - d->btf->types_data;
p += len;
next_type_id++;
}
/* shrink struct btf's internal types index and update btf_header */
d->btf->nr_types = next_type_id - 1;
- d->btf->types_size = d->btf->nr_types;
- d->btf->hdr->type_len = p - types_start;
- new_types = realloc(d->btf->types,
- (1 + d->btf->nr_types) * sizeof(struct btf_type *));
- if (!new_types)
+ d->btf->type_offs_cap = d->btf->nr_types + 1;
+ d->btf->hdr->type_len = p - d->btf->types_data;
+ new_offs = libbpf_reallocarray(d->btf->type_offs, d->btf->type_offs_cap,
+ sizeof(*new_offs));
+ if (!new_offs)
return -ENOMEM;
- d->btf->types = new_types;
-
- /* make sure string section follows type information without gaps */
- d->btf->hdr->str_off = p - (char *)d->btf->nohdr_data;
- memmove(p, d->btf->strings, d->btf->hdr->str_len);
- d->btf->strings = p;
- p += d->btf->hdr->str_len;
-
- d->btf->data_size = p - (char *)d->btf->data;
+ d->btf->type_offs = new_offs;
+ d->btf->hdr->str_off = d->btf->hdr->type_len;
+ d->btf->raw_size = d->btf->hdr->hdr_len + d->btf->hdr->type_len + d->btf->hdr->str_len;
return 0;
}
@@ -3008,7 +4319,7 @@ static int btf_dedup_remap_type_id(struct btf_dedup *d, __u32 type_id)
*/
static int btf_dedup_remap_type(struct btf_dedup *d, __u32 type_id)
{
- struct btf_type *t = d->btf->types[type_id];
+ struct btf_type *t = btf_type_by_id(d->btf, type_id);
int i, r;
switch (btf_kind(t)) {
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
index 1ca14448df4c..57247240a20a 100644
--- a/tools/lib/bpf/btf.h
+++ b/tools/lib/bpf/btf.h
@@ -5,6 +5,7 @@
#define __LIBBPF_BTF_H
#include <stdarg.h>
+#include <stdbool.h>
#include <linux/btf.h>
#include <linux/types.h>
@@ -24,46 +25,14 @@ struct btf_type;
struct bpf_object;
-/*
- * The .BTF.ext ELF section layout defined as
- * struct btf_ext_header
- * func_info subsection
- *
- * The func_info subsection layout:
- * record size for struct bpf_func_info in the func_info subsection
- * struct btf_sec_func_info for section #1
- * a list of bpf_func_info records for section #1
- * where struct bpf_func_info mimics one in include/uapi/linux/bpf.h
- * but may not be identical
- * struct btf_sec_func_info for section #2
- * a list of bpf_func_info records for section #2
- * ......
- *
- * Note that the bpf_func_info record size in .BTF.ext may not
- * be the same as the one defined in include/uapi/linux/bpf.h.
- * The loader should ensure that record_size meets minimum
- * requirement and pass the record as is to the kernel. The
- * kernel will handle the func_info properly based on its contents.
- */
-struct btf_ext_header {
- __u16 magic;
- __u8 version;
- __u8 flags;
- __u32 hdr_len;
-
- /* All offsets are in bytes relative to the end of this header */
- __u32 func_info_off;
- __u32 func_info_len;
- __u32 line_info_off;
- __u32 line_info_len;
-
- /* optional part of .BTF.ext header */
- __u32 field_reloc_off;
- __u32 field_reloc_len;
+enum btf_endianness {
+ BTF_LITTLE_ENDIAN = 0,
+ BTF_BIG_ENDIAN = 1,
};
LIBBPF_API void btf__free(struct btf *btf);
LIBBPF_API struct btf *btf__new(const void *data, __u32 size);
+LIBBPF_API struct btf *btf__new_empty(void);
LIBBPF_API struct btf *btf__parse(const char *path, struct btf_ext **btf_ext);
LIBBPF_API struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext);
LIBBPF_API struct btf *btf__parse_raw(const char *path);
@@ -78,6 +47,8 @@ LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf,
__u32 id);
LIBBPF_API size_t btf__pointer_size(const struct btf *btf);
LIBBPF_API int btf__set_pointer_size(struct btf *btf, size_t ptr_sz);
+LIBBPF_API enum btf_endianness btf__endianness(const struct btf *btf);
+LIBBPF_API int btf__set_endianness(struct btf *btf, enum btf_endianness endian);
LIBBPF_API __s64 btf__resolve_size(const struct btf *btf, __u32 type_id);
LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id);
LIBBPF_API int btf__align_of(const struct btf *btf, __u32 id);
@@ -85,6 +56,7 @@ LIBBPF_API int btf__fd(const struct btf *btf);
LIBBPF_API void btf__set_fd(struct btf *btf, int fd);
LIBBPF_API const void *btf__get_raw_data(const struct btf *btf, __u32 *size);
LIBBPF_API const char *btf__name_by_offset(const struct btf *btf, __u32 offset);
+LIBBPF_API const char *btf__str_by_offset(const struct btf *btf, __u32 offset);
LIBBPF_API int btf__get_from_id(__u32 id, struct btf **btf);
LIBBPF_API int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
__u32 expected_key_size,
@@ -95,19 +67,62 @@ LIBBPF_API struct btf_ext *btf_ext__new(__u8 *data, __u32 size);
LIBBPF_API void btf_ext__free(struct btf_ext *btf_ext);
LIBBPF_API const void *btf_ext__get_raw_data(const struct btf_ext *btf_ext,
__u32 *size);
-LIBBPF_API int btf_ext__reloc_func_info(const struct btf *btf,
- const struct btf_ext *btf_ext,
- const char *sec_name, __u32 insns_cnt,
- void **func_info, __u32 *cnt);
-LIBBPF_API int btf_ext__reloc_line_info(const struct btf *btf,
- const struct btf_ext *btf_ext,
- const char *sec_name, __u32 insns_cnt,
- void **line_info, __u32 *cnt);
+LIBBPF_API LIBBPF_DEPRECATED("btf_ext__reloc_func_info was never meant as a public API and has wrong assumptions embedded in it; it will be removed in the future libbpf versions")
+int btf_ext__reloc_func_info(const struct btf *btf,
+ const struct btf_ext *btf_ext,
+ const char *sec_name, __u32 insns_cnt,
+ void **func_info, __u32 *cnt);
+LIBBPF_API LIBBPF_DEPRECATED("btf_ext__reloc_line_info was never meant as a public API and has wrong assumptions embedded in it; it will be removed in the future libbpf versions")
+int btf_ext__reloc_line_info(const struct btf *btf,
+ const struct btf_ext *btf_ext,
+ const char *sec_name, __u32 insns_cnt,
+ void **line_info, __u32 *cnt);
LIBBPF_API __u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext);
LIBBPF_API __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext);
LIBBPF_API struct btf *libbpf_find_kernel_btf(void);
+LIBBPF_API int btf__find_str(struct btf *btf, const char *s);
+LIBBPF_API int btf__add_str(struct btf *btf, const char *s);
+
+LIBBPF_API int btf__add_int(struct btf *btf, const char *name, size_t byte_sz, int encoding);
+LIBBPF_API int btf__add_ptr(struct btf *btf, int ref_type_id);
+LIBBPF_API int btf__add_array(struct btf *btf,
+ int index_type_id, int elem_type_id, __u32 nr_elems);
+/* struct/union construction APIs */
+LIBBPF_API int btf__add_struct(struct btf *btf, const char *name, __u32 sz);
+LIBBPF_API int btf__add_union(struct btf *btf, const char *name, __u32 sz);
+LIBBPF_API int btf__add_field(struct btf *btf, const char *name, int field_type_id,
+ __u32 bit_offset, __u32 bit_size);
+
+/* enum construction APIs */
+LIBBPF_API int btf__add_enum(struct btf *btf, const char *name, __u32 bytes_sz);
+LIBBPF_API int btf__add_enum_value(struct btf *btf, const char *name, __s64 value);
+
+enum btf_fwd_kind {
+ BTF_FWD_STRUCT = 0,
+ BTF_FWD_UNION = 1,
+ BTF_FWD_ENUM = 2,
+};
+
+LIBBPF_API int btf__add_fwd(struct btf *btf, const char *name, enum btf_fwd_kind fwd_kind);
+LIBBPF_API int btf__add_typedef(struct btf *btf, const char *name, int ref_type_id);
+LIBBPF_API int btf__add_volatile(struct btf *btf, int ref_type_id);
+LIBBPF_API int btf__add_const(struct btf *btf, int ref_type_id);
+LIBBPF_API int btf__add_restrict(struct btf *btf, int ref_type_id);
+
+/* func and func_proto construction APIs */
+LIBBPF_API int btf__add_func(struct btf *btf, const char *name,
+ enum btf_func_linkage linkage, int proto_type_id);
+LIBBPF_API int btf__add_func_proto(struct btf *btf, int ret_type_id);
+LIBBPF_API int btf__add_func_param(struct btf *btf, const char *name, int type_id);
+
+/* var & datasec construction APIs */
+LIBBPF_API int btf__add_var(struct btf *btf, const char *name, int linkage, int type_id);
+LIBBPF_API int btf__add_datasec(struct btf *btf, const char *name, __u32 byte_sz);
+LIBBPF_API int btf__add_datasec_var_info(struct btf *btf, int var_type_id,
+ __u32 offset, __u32 byte_sz);
+
struct btf_dedup_opts {
unsigned int dedup_table_size;
bool dont_resolve_fwds;
diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
index 57c00fa63932..2f9d685bd522 100644
--- a/tools/lib/bpf/btf_dump.c
+++ b/tools/lib/bpf/btf_dump.c
@@ -19,9 +19,6 @@
#include "libbpf.h"
#include "libbpf_internal.h"
-/* make sure libbpf doesn't use kernel-only integer typedefs */
-#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
-
static const char PREFIXES[] = "\t\t\t\t\t\t\t\t\t\t\t\t\t";
static const size_t PREFIX_CNT = sizeof(PREFIXES) - 1;
@@ -63,11 +60,14 @@ struct btf_dump {
struct btf_dump_opts opts;
int ptr_sz;
bool strip_mods;
+ int last_id;
/* per-type auxiliary state */
struct btf_dump_type_aux_state *type_states;
+ size_t type_states_cap;
/* per-type optional cached unique name, must be freed, if present */
const char **cached_names;
+ size_t cached_names_cap;
/* topo-sorted list of dependent type definitions */
__u32 *emit_queue;
@@ -93,14 +93,7 @@ struct btf_dump {
static size_t str_hash_fn(const void *key, void *ctx)
{
- const char *s = key;
- size_t h = 0;
-
- while (*s) {
- h = h * 31 + *s;
- s++;
- }
- return h;
+ return str_hash(key);
}
static bool str_equal_fn(const void *a, const void *b, void *ctx)
@@ -123,6 +116,7 @@ static void btf_dump_printf(const struct btf_dump *d, const char *fmt, ...)
}
static int btf_dump_mark_referenced(struct btf_dump *d);
+static int btf_dump_resize(struct btf_dump *d);
struct btf_dump *btf_dump__new(const struct btf *btf,
const struct btf_ext *btf_ext,
@@ -154,25 +148,8 @@ struct btf_dump *btf_dump__new(const struct btf *btf,
d->ident_names = NULL;
goto err;
}
- d->type_states = calloc(1 + btf__get_nr_types(d->btf),
- sizeof(d->type_states[0]));
- if (!d->type_states) {
- err = -ENOMEM;
- goto err;
- }
- d->cached_names = calloc(1 + btf__get_nr_types(d->btf),
- sizeof(d->cached_names[0]));
- if (!d->cached_names) {
- err = -ENOMEM;
- goto err;
- }
-
- /* VOID is special */
- d->type_states[0].order_state = ORDERED;
- d->type_states[0].emit_state = EMITTED;
- /* eagerly determine referenced types for anon enums */
- err = btf_dump_mark_referenced(d);
+ err = btf_dump_resize(d);
if (err)
goto err;
@@ -182,9 +159,38 @@ err:
return ERR_PTR(err);
}
+static int btf_dump_resize(struct btf_dump *d)
+{
+ int err, last_id = btf__get_nr_types(d->btf);
+
+ if (last_id <= d->last_id)
+ return 0;
+
+ if (btf_ensure_mem((void **)&d->type_states, &d->type_states_cap,
+ sizeof(*d->type_states), last_id + 1))
+ return -ENOMEM;
+ if (btf_ensure_mem((void **)&d->cached_names, &d->cached_names_cap,
+ sizeof(*d->cached_names), last_id + 1))
+ return -ENOMEM;
+
+ if (d->last_id == 0) {
+ /* VOID is special */
+ d->type_states[0].order_state = ORDERED;
+ d->type_states[0].emit_state = EMITTED;
+ }
+
+ /* eagerly determine referenced types for anon enums */
+ err = btf_dump_mark_referenced(d);
+ if (err)
+ return err;
+
+ d->last_id = last_id;
+ return 0;
+}
+
void btf_dump__free(struct btf_dump *d)
{
- int i, cnt;
+ int i;
if (IS_ERR_OR_NULL(d))
return;
@@ -192,7 +198,7 @@ void btf_dump__free(struct btf_dump *d)
free(d->type_states);
if (d->cached_names) {
/* any set cached name is owned by us and should be freed */
- for (i = 0, cnt = btf__get_nr_types(d->btf); i <= cnt; i++) {
+ for (i = 0; i <= d->last_id; i++) {
if (d->cached_names[i])
free((void *)d->cached_names[i]);
}
@@ -232,6 +238,10 @@ int btf_dump__dump_type(struct btf_dump *d, __u32 id)
if (id > btf__get_nr_types(d->btf))
return -EINVAL;
+ err = btf_dump_resize(d);
+ if (err)
+ return err;
+
d->emit_queue_cnt = 0;
err = btf_dump_order_type(d, id, false);
if (err < 0)
@@ -261,7 +271,7 @@ static int btf_dump_mark_referenced(struct btf_dump *d)
const struct btf_type *t;
__u16 vlen;
- for (i = 1; i <= n; i++) {
+ for (i = d->last_id + 1; i <= n; i++) {
t = btf__type_by_id(d->btf, i);
vlen = btf_vlen(t);
@@ -316,6 +326,7 @@ static int btf_dump_mark_referenced(struct btf_dump *d)
}
return 0;
}
+
static int btf_dump_add_emit_queue_id(struct btf_dump *d, __u32 id)
{
__u32 *new_queue;
@@ -323,8 +334,7 @@ static int btf_dump_add_emit_queue_id(struct btf_dump *d, __u32 id)
if (d->emit_queue_cnt >= d->emit_queue_cap) {
new_cap = max(16, d->emit_queue_cap * 3 / 2);
- new_queue = realloc(d->emit_queue,
- new_cap * sizeof(new_queue[0]));
+ new_queue = libbpf_reallocarray(d->emit_queue, new_cap, sizeof(new_queue[0]));
if (!new_queue)
return -ENOMEM;
d->emit_queue = new_queue;
@@ -1003,8 +1013,7 @@ static int btf_dump_push_decl_stack_id(struct btf_dump *d, __u32 id)
if (d->decl_stack_cnt >= d->decl_stack_cap) {
new_cap = max(16, d->decl_stack_cap * 3 / 2);
- new_stack = realloc(d->decl_stack,
- new_cap * sizeof(new_stack[0]));
+ new_stack = libbpf_reallocarray(d->decl_stack, new_cap, sizeof(new_stack[0]));
if (!new_stack)
return -ENOMEM;
d->decl_stack = new_stack;
@@ -1061,11 +1070,15 @@ int btf_dump__emit_type_decl(struct btf_dump *d, __u32 id,
const struct btf_dump_emit_type_decl_opts *opts)
{
const char *fname;
- int lvl;
+ int lvl, err;
if (!OPTS_VALID(opts, btf_dump_emit_type_decl_opts))
return -EINVAL;
+ err = btf_dump_resize(d);
+ if (err)
+ return -EINVAL;
+
fname = OPTS_GET(opts, field_name, "");
lvl = OPTS_GET(opts, indent_level, 0);
d->strip_mods = OPTS_GET(opts, strip_mods, false);
diff --git a/tools/lib/bpf/hashmap.c b/tools/lib/bpf/hashmap.c
index a405dad068f5..3c20b126d60d 100644
--- a/tools/lib/bpf/hashmap.c
+++ b/tools/lib/bpf/hashmap.c
@@ -15,6 +15,9 @@
/* make sure libbpf doesn't use kernel-only integer typedefs */
#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
+/* prevent accidental re-addition of reallocarray() */
+#pragma GCC poison reallocarray
+
/* start with 4 buckets */
#define HASHMAP_MIN_CAP_BITS 2
diff --git a/tools/lib/bpf/hashmap.h b/tools/lib/bpf/hashmap.h
index e0af36b0e5d8..d9b385fe808c 100644
--- a/tools/lib/bpf/hashmap.h
+++ b/tools/lib/bpf/hashmap.h
@@ -25,6 +25,18 @@ static inline size_t hash_bits(size_t h, int bits)
#endif
}
+/* generic C-string hashing function */
+static inline size_t str_hash(const char *s)
+{
+ size_t h = 0;
+
+ while (*s) {
+ h = h * 31 + *s;
+ s++;
+ }
+ return h;
+}
+
typedef size_t (*hashmap_hash_fn)(const void *key, void *ctx);
typedef bool (*hashmap_equal_fn)(const void *key1, const void *key2, void *ctx);
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index e493d6048143..313034117070 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -44,7 +44,6 @@
#include <sys/vfs.h>
#include <sys/utsname.h>
#include <sys/resource.h>
-#include <tools/libc_compat.h>
#include <libelf.h>
#include <gelf.h>
#include <zlib.h>
@@ -56,9 +55,6 @@
#include "libbpf_internal.h"
#include "hashmap.h"
-/* make sure libbpf doesn't use kernel-only integer typedefs */
-#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
-
#ifndef EM_BPF
#define EM_BPF 247
#endif
@@ -67,6 +63,8 @@
#define BPF_FS_MAGIC 0xcafe4a11
#endif
+#define BPF_INSN_SZ (sizeof(struct bpf_insn))
+
/* vsprintf() in __base_pr() uses nonliteral format string. It may break
* compilation if user enables corresponding warning. Disable it explicitly.
*/
@@ -75,8 +73,6 @@
#define __printf(a, b) __attribute__((format(printf, a, b)))
static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
-static struct bpf_program *bpf_object__find_prog_by_idx(struct bpf_object *obj,
- int idx);
static const struct btf_type *
skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id);
@@ -154,34 +150,37 @@ static void pr_perm_msg(int err)
___err; })
#endif
-#ifdef HAVE_LIBELF_MMAP_SUPPORT
-# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
-#else
-# define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
-#endif
-
static inline __u64 ptr_to_u64(const void *ptr)
{
return (__u64) (unsigned long) ptr;
}
-struct bpf_capabilities {
+enum kern_feature_id {
/* v4.14: kernel support for program & map names. */
- __u32 name:1;
+ FEAT_PROG_NAME,
/* v5.2: kernel support for global data sections. */
- __u32 global_data:1;
+ FEAT_GLOBAL_DATA,
+ /* BTF support */
+ FEAT_BTF,
/* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */
- __u32 btf_func:1;
+ FEAT_BTF_FUNC,
/* BTF_KIND_VAR and BTF_KIND_DATASEC support */
- __u32 btf_datasec:1;
- /* BPF_F_MMAPABLE is supported for arrays */
- __u32 array_mmap:1;
+ FEAT_BTF_DATASEC,
/* BTF_FUNC_GLOBAL is supported */
- __u32 btf_func_global:1;
+ FEAT_BTF_GLOBAL_FUNC,
+ /* BPF_F_MMAPABLE is supported for arrays */
+ FEAT_ARRAY_MMAP,
/* kernel support for expected_attach_type in BPF_PROG_LOAD */
- __u32 exp_attach_type:1;
+ FEAT_EXP_ATTACH_TYPE,
+ /* bpf_probe_read_{kernel,user}[_str] helpers */
+ FEAT_PROBE_READ_KERN,
+ /* BPF_PROG_BIND_MAP is supported */
+ FEAT_PROG_BIND_MAP,
+ __FEAT_CNT,
};
+static bool kernel_supports(enum kern_feature_id feat_id);
+
enum reloc_type {
RELO_LD64,
RELO_CALL,
@@ -194,6 +193,7 @@ struct reloc_desc {
int insn_idx;
int map_idx;
int sym_off;
+ bool processed;
};
struct bpf_sec_def;
@@ -209,6 +209,7 @@ struct bpf_sec_def {
bool is_exp_attach_type_optional;
bool is_attachable;
bool is_attach_btf;
+ bool is_sleepable;
attach_fn_t attach_fn;
};
@@ -217,20 +218,45 @@ struct bpf_sec_def {
* linux/filter.h.
*/
struct bpf_program {
- /* Index in elf obj file, for relocation use. */
- int idx;
- char *name;
- int prog_ifindex;
- char *section_name;
const struct bpf_sec_def *sec_def;
- /* section_name with / replaced by _; makes recursive pinning
+ char *sec_name;
+ size_t sec_idx;
+ /* this program's instruction offset (in number of instructions)
+ * within its containing ELF section
+ */
+ size_t sec_insn_off;
+ /* number of original instructions in ELF section belonging to this
+ * program, not taking into account subprogram instructions possible
+ * appended later during relocation
+ */
+ size_t sec_insn_cnt;
+ /* Offset (in number of instructions) of the start of instruction
+ * belonging to this BPF program within its containing main BPF
+ * program. For the entry-point (main) BPF program, this is always
+ * zero. For a sub-program, this gets reset before each of main BPF
+ * programs are processed and relocated and is used to determined
+ * whether sub-program was already appended to the main program, and
+ * if yes, at which instruction offset.
+ */
+ size_t sub_insn_off;
+
+ char *name;
+ /* sec_name with / replaced by _; makes recursive pinning
* in bpf_object__pin_programs easier
*/
char *pin_name;
+
+ /* instructions that belong to BPF program; insns[0] is located at
+ * sec_insn_off instruction within its ELF section in ELF file, so
+ * when mapping ELF file instruction index to the local instruction,
+ * one needs to subtract sec_insn_off; and vice versa.
+ */
struct bpf_insn *insns;
- size_t insns_cnt, main_prog_cnt;
- enum bpf_prog_type type;
- bool load;
+ /* actual number of instruction in this BPF program's image; for
+ * entry-point BPF programs this includes the size of main program
+ * itself plus all the used sub-programs, appended at the end
+ */
+ size_t insns_cnt;
struct reloc_desc *reloc_desc;
int nr_reloc;
@@ -246,15 +272,16 @@ struct bpf_program {
void *priv;
bpf_program_clear_priv_t clear_priv;
+ bool load;
+ enum bpf_prog_type type;
enum bpf_attach_type expected_attach_type;
+ int prog_ifindex;
__u32 attach_btf_id;
__u32 attach_prog_fd;
void *func_info;
__u32 func_info_rec_size;
__u32 func_info_cnt;
- struct bpf_capabilities *caps;
-
void *line_info;
__u32 line_info_rec_size;
__u32 line_info_cnt;
@@ -363,6 +390,12 @@ struct extern_desc {
} kcfg;
struct {
unsigned long long addr;
+
+ /* target btf_id of the corresponding kernel var. */
+ int vmlinux_btf_id;
+
+ /* local btf_id of the ksym extern's type. */
+ __u32 type_id;
} ksym;
};
};
@@ -384,9 +417,10 @@ struct bpf_object {
struct extern_desc *externs;
int nr_extern;
int kconfig_map_idx;
+ int rodata_map_idx;
bool loaded;
- bool has_pseudo_calls;
+ bool has_subcalls;
/*
* Information when doing elf related work. Only valid if fd
@@ -403,6 +437,7 @@ struct bpf_object {
Elf_Data *rodata;
Elf_Data *bss;
Elf_Data *st_ops_data;
+ size_t shstrndx; /* section index for section name strings */
size_t strtabidx;
struct {
GElf_Shdr shdr;
@@ -436,12 +471,20 @@ struct bpf_object {
void *priv;
bpf_object_clear_priv_t clear_priv;
- struct bpf_capabilities caps;
-
char path[];
};
#define obj_elf_valid(o) ((o)->efile.elf)
+static const char *elf_sym_str(const struct bpf_object *obj, size_t off);
+static const char *elf_sec_str(const struct bpf_object *obj, size_t off);
+static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx);
+static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name);
+static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr);
+static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn);
+static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn);
+static int elf_sym_by_sec_off(const struct bpf_object *obj, size_t sec_idx,
+ size_t off, __u32 sym_type, GElf_Sym *sym);
+
void bpf_program__unload(struct bpf_program *prog)
{
int i;
@@ -481,159 +524,160 @@ static void bpf_program__exit(struct bpf_program *prog)
bpf_program__unload(prog);
zfree(&prog->name);
- zfree(&prog->section_name);
+ zfree(&prog->sec_name);
zfree(&prog->pin_name);
zfree(&prog->insns);
zfree(&prog->reloc_desc);
prog->nr_reloc = 0;
prog->insns_cnt = 0;
- prog->idx = -1;
+ prog->sec_idx = -1;
}
static char *__bpf_program__pin_name(struct bpf_program *prog)
{
char *name, *p;
- name = p = strdup(prog->section_name);
+ name = p = strdup(prog->sec_name);
while ((p = strchr(p, '/')))
*p = '_';
return name;
}
+static bool insn_is_subprog_call(const struct bpf_insn *insn)
+{
+ return BPF_CLASS(insn->code) == BPF_JMP &&
+ BPF_OP(insn->code) == BPF_CALL &&
+ BPF_SRC(insn->code) == BPF_K &&
+ insn->src_reg == BPF_PSEUDO_CALL &&
+ insn->dst_reg == 0 &&
+ insn->off == 0;
+}
+
static int
-bpf_program__init(void *data, size_t size, char *section_name, int idx,
- struct bpf_program *prog)
+bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
+ const char *name, size_t sec_idx, const char *sec_name,
+ size_t sec_off, void *insn_data, size_t insn_data_sz)
{
- const size_t bpf_insn_sz = sizeof(struct bpf_insn);
+ int i;
- if (size == 0 || size % bpf_insn_sz) {
- pr_warn("corrupted section '%s', size: %zu\n",
- section_name, size);
+ if (insn_data_sz == 0 || insn_data_sz % BPF_INSN_SZ || sec_off % BPF_INSN_SZ) {
+ pr_warn("sec '%s': corrupted program '%s', offset %zu, size %zu\n",
+ sec_name, name, sec_off, insn_data_sz);
return -EINVAL;
}
memset(prog, 0, sizeof(*prog));
+ prog->obj = obj;
+
+ prog->sec_idx = sec_idx;
+ prog->sec_insn_off = sec_off / BPF_INSN_SZ;
+ prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ;
+ /* insns_cnt can later be increased by appending used subprograms */
+ prog->insns_cnt = prog->sec_insn_cnt;
- prog->section_name = strdup(section_name);
- if (!prog->section_name) {
- pr_warn("failed to alloc name for prog under section(%d) %s\n",
- idx, section_name);
+ prog->type = BPF_PROG_TYPE_UNSPEC;
+ prog->load = true;
+
+ prog->instances.fds = NULL;
+ prog->instances.nr = -1;
+
+ prog->sec_name = strdup(sec_name);
+ if (!prog->sec_name)
+ goto errout;
+
+ prog->name = strdup(name);
+ if (!prog->name)
goto errout;
- }
prog->pin_name = __bpf_program__pin_name(prog);
- if (!prog->pin_name) {
- pr_warn("failed to alloc pin name for prog under section(%d) %s\n",
- idx, section_name);
+ if (!prog->pin_name)
goto errout;
- }
- prog->insns = malloc(size);
- if (!prog->insns) {
- pr_warn("failed to alloc insns for prog under section %s\n",
- section_name);
+ prog->insns = malloc(insn_data_sz);
+ if (!prog->insns)
goto errout;
+ memcpy(prog->insns, insn_data, insn_data_sz);
+
+ for (i = 0; i < prog->insns_cnt; i++) {
+ if (insn_is_subprog_call(&prog->insns[i])) {
+ obj->has_subcalls = true;
+ break;
+ }
}
- prog->insns_cnt = size / bpf_insn_sz;
- memcpy(prog->insns, data, size);
- prog->idx = idx;
- prog->instances.fds = NULL;
- prog->instances.nr = -1;
- prog->type = BPF_PROG_TYPE_UNSPEC;
- prog->load = true;
return 0;
errout:
+ pr_warn("sec '%s': failed to allocate memory for prog '%s'\n", sec_name, name);
bpf_program__exit(prog);
return -ENOMEM;
}
static int
-bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
- char *section_name, int idx)
+bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
+ const char *sec_name, int sec_idx)
{
- struct bpf_program prog, *progs;
+ struct bpf_program *prog, *progs;
+ void *data = sec_data->d_buf;
+ size_t sec_sz = sec_data->d_size, sec_off, prog_sz;
int nr_progs, err;
+ const char *name;
+ GElf_Sym sym;
- err = bpf_program__init(data, size, section_name, idx, &prog);
- if (err)
- return err;
-
- prog.caps = &obj->caps;
progs = obj->programs;
nr_progs = obj->nr_programs;
+ sec_off = 0;
- progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0]));
- if (!progs) {
- /*
- * In this case the original obj->programs
- * is still valid, so don't need special treat for
- * bpf_close_object().
- */
- pr_warn("failed to alloc a new program under section '%s'\n",
- section_name);
- bpf_program__exit(&prog);
- return -ENOMEM;
- }
-
- pr_debug("found program %s\n", prog.section_name);
- obj->programs = progs;
- obj->nr_programs = nr_progs + 1;
- prog.obj = obj;
- progs[nr_progs] = prog;
- return 0;
-}
-
-static int
-bpf_object__init_prog_names(struct bpf_object *obj)
-{
- Elf_Data *symbols = obj->efile.symbols;
- struct bpf_program *prog;
- size_t pi, si;
+ while (sec_off < sec_sz) {
+ if (elf_sym_by_sec_off(obj, sec_idx, sec_off, STT_FUNC, &sym)) {
+ pr_warn("sec '%s': failed to find program symbol at offset %zu\n",
+ sec_name, sec_off);
+ return -LIBBPF_ERRNO__FORMAT;
+ }
- for (pi = 0; pi < obj->nr_programs; pi++) {
- const char *name = NULL;
+ prog_sz = sym.st_size;
- prog = &obj->programs[pi];
+ name = elf_sym_str(obj, sym.st_name);
+ if (!name) {
+ pr_warn("sec '%s': failed to get symbol name for offset %zu\n",
+ sec_name, sec_off);
+ return -LIBBPF_ERRNO__FORMAT;
+ }
- for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
- si++) {
- GElf_Sym sym;
+ if (sec_off + prog_sz > sec_sz) {
+ pr_warn("sec '%s': program at offset %zu crosses section boundary\n",
+ sec_name, sec_off);
+ return -LIBBPF_ERRNO__FORMAT;
+ }
- if (!gelf_getsym(symbols, si, &sym))
- continue;
- if (sym.st_shndx != prog->idx)
- continue;
- if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
- continue;
+ pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n",
+ sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz);
- name = elf_strptr(obj->efile.elf,
- obj->efile.strtabidx,
- sym.st_name);
- if (!name) {
- pr_warn("failed to get sym name string for prog %s\n",
- prog->section_name);
- return -LIBBPF_ERRNO__LIBELF;
- }
+ progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(*progs));
+ if (!progs) {
+ /*
+ * In this case the original obj->programs
+ * is still valid, so don't need special treat for
+ * bpf_close_object().
+ */
+ pr_warn("sec '%s': failed to alloc memory for new program '%s'\n",
+ sec_name, name);
+ return -ENOMEM;
}
+ obj->programs = progs;
- if (!name && prog->idx == obj->efile.text_shndx)
- name = ".text";
+ prog = &progs[nr_progs];
- if (!name) {
- pr_warn("failed to find sym for prog %s\n",
- prog->section_name);
- return -EINVAL;
- }
+ err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name,
+ sec_off, data + sec_off, prog_sz);
+ if (err)
+ return err;
- prog->name = strdup(name);
- if (!prog->name) {
- pr_warn("failed to allocate memory for prog sym %s\n",
- name);
- return -ENOMEM;
- }
+ nr_progs++;
+ obj->nr_programs = nr_progs;
+
+ sec_off += prog_sz;
}
return 0;
@@ -1035,6 +1079,7 @@ static struct bpf_object *bpf_object__new(const char *path,
obj->efile.bss_shndx = -1;
obj->efile.st_ops_shndx = -1;
obj->kconfig_map_idx = -1;
+ obj->rodata_map_idx = -1;
obj->kern_version = get_kernel_version();
obj->loaded = false;
@@ -1066,13 +1111,18 @@ static void bpf_object__elf_finish(struct bpf_object *obj)
obj->efile.obj_buf_sz = 0;
}
+/* if libelf is old and doesn't support mmap(), fall back to read() */
+#ifndef ELF_C_READ_MMAP
+#define ELF_C_READ_MMAP ELF_C_READ
+#endif
+
static int bpf_object__elf_init(struct bpf_object *obj)
{
int err = 0;
GElf_Ehdr *ep;
if (obj_elf_valid(obj)) {
- pr_warn("elf init: internal error\n");
+ pr_warn("elf: init internal error\n");
return -LIBBPF_ERRNO__LIBELF;
}
@@ -1090,31 +1140,44 @@ static int bpf_object__elf_init(struct bpf_object *obj)
err = -errno;
cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
- pr_warn("failed to open %s: %s\n", obj->path, cp);
+ pr_warn("elf: failed to open %s: %s\n", obj->path, cp);
return err;
}
- obj->efile.elf = elf_begin(obj->efile.fd,
- LIBBPF_ELF_C_READ_MMAP, NULL);
+ obj->efile.elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL);
}
if (!obj->efile.elf) {
- pr_warn("failed to open %s as ELF file\n", obj->path);
+ pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1));
err = -LIBBPF_ERRNO__LIBELF;
goto errout;
}
if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
- pr_warn("failed to get EHDR from %s\n", obj->path);
+ pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1));
err = -LIBBPF_ERRNO__FORMAT;
goto errout;
}
ep = &obj->efile.ehdr;
+ if (elf_getshdrstrndx(obj->efile.elf, &obj->efile.shstrndx)) {
+ pr_warn("elf: failed to get section names section index for %s: %s\n",
+ obj->path, elf_errmsg(-1));
+ err = -LIBBPF_ERRNO__FORMAT;
+ goto errout;
+ }
+
+ /* Elf is corrupted/truncated, avoid calling elf_strptr. */
+ if (!elf_rawdata(elf_getscn(obj->efile.elf, obj->efile.shstrndx), NULL)) {
+ pr_warn("elf: failed to get section names strings from %s: %s\n",
+ obj->path, elf_errmsg(-1));
+ return -LIBBPF_ERRNO__FORMAT;
+ }
+
/* Old LLVM set e_machine to EM_NONE */
if (ep->e_type != ET_REL ||
(ep->e_machine && ep->e_machine != EM_BPF)) {
- pr_warn("%s is not an eBPF object file\n", obj->path);
+ pr_warn("elf: %s is not a valid eBPF object file\n", obj->path);
err = -LIBBPF_ERRNO__FORMAT;
goto errout;
}
@@ -1136,7 +1199,7 @@ static int bpf_object__check_endianness(struct bpf_object *obj)
#else
# error "Unrecognized __BYTE_ORDER__"
#endif
- pr_warn("endianness mismatch.\n");
+ pr_warn("elf: endianness mismatch in %s.\n", obj->path);
return -LIBBPF_ERRNO__ENDIAN;
}
@@ -1171,55 +1234,10 @@ static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
return false;
}
-static int bpf_object_search_section_size(const struct bpf_object *obj,
- const char *name, size_t *d_size)
-{
- const GElf_Ehdr *ep = &obj->efile.ehdr;
- Elf *elf = obj->efile.elf;
- Elf_Scn *scn = NULL;
- int idx = 0;
-
- while ((scn = elf_nextscn(elf, scn)) != NULL) {
- const char *sec_name;
- Elf_Data *data;
- GElf_Shdr sh;
-
- idx++;
- if (gelf_getshdr(scn, &sh) != &sh) {
- pr_warn("failed to get section(%d) header from %s\n",
- idx, obj->path);
- return -EIO;
- }
-
- sec_name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
- if (!sec_name) {
- pr_warn("failed to get section(%d) name from %s\n",
- idx, obj->path);
- return -EIO;
- }
-
- if (strcmp(name, sec_name))
- continue;
-
- data = elf_getdata(scn, 0);
- if (!data) {
- pr_warn("failed to get section(%d) data from %s(%s)\n",
- idx, name, obj->path);
- return -EIO;
- }
-
- *d_size = data->d_size;
- return 0;
- }
-
- return -ENOENT;
-}
-
int bpf_object__section_size(const struct bpf_object *obj, const char *name,
__u32 *size)
{
int ret = -ENOENT;
- size_t d_size;
*size = 0;
if (!name) {
@@ -1237,9 +1255,13 @@ int bpf_object__section_size(const struct bpf_object *obj, const char *name,
if (obj->efile.st_ops_data)
*size = obj->efile.st_ops_data->d_size;
} else {
- ret = bpf_object_search_section_size(obj, name, &d_size);
- if (!ret)
- *size = d_size;
+ Elf_Scn *scn = elf_sec_by_name(obj, name);
+ Elf_Data *data = elf_sec_data(obj, scn);
+
+ if (data) {
+ ret = 0; /* found it */
+ *size = data->d_size;
+ }
}
return *size ? 0 : ret;
@@ -1264,8 +1286,7 @@ int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
GELF_ST_TYPE(sym.st_info) != STT_OBJECT)
continue;
- sname = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
- sym.st_name);
+ sname = elf_sym_str(obj, sym.st_name);
if (!sname) {
pr_warn("failed to get sym name string for var %s\n",
name);
@@ -1290,7 +1311,7 @@ static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
return &obj->maps[obj->nr_maps++];
new_cap = max((size_t)4, obj->maps_cap * 3 / 2);
- new_maps = realloc(obj->maps, new_cap * sizeof(*obj->maps));
+ new_maps = libbpf_reallocarray(obj->maps, new_cap, sizeof(*obj->maps));
if (!new_maps) {
pr_warn("alloc maps for object failed\n");
return ERR_PTR(-ENOMEM);
@@ -1417,6 +1438,8 @@ static int bpf_object__init_global_data_maps(struct bpf_object *obj)
obj->efile.rodata->d_size);
if (err)
return err;
+
+ obj->rodata_map_idx = obj->nr_maps - 1;
}
if (obj->efile.bss_shndx >= 0) {
err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
@@ -1742,12 +1765,12 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
if (!symbols)
return -EINVAL;
- scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
- if (scn)
- data = elf_getdata(scn, NULL);
+
+ scn = elf_sec_by_idx(obj, obj->efile.maps_shndx);
+ data = elf_sec_data(obj, scn);
if (!scn || !data) {
- pr_warn("failed to get Elf_Data from map section %d\n",
- obj->efile.maps_shndx);
+ pr_warn("elf: failed to get legacy map definitions for %s\n",
+ obj->path);
return -EINVAL;
}
@@ -1769,12 +1792,12 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
nr_maps++;
}
/* Assume equally sized map definitions */
- pr_debug("maps in %s: %d maps in %zd bytes\n",
- obj->path, nr_maps, data->d_size);
+ pr_debug("elf: found %d legacy map definitions (%zd bytes) in %s\n",
+ nr_maps, data->d_size, obj->path);
if (!data->d_size || nr_maps == 0 || (data->d_size % nr_maps) != 0) {
- pr_warn("unable to determine map definition size section %s, %d maps in %zd bytes\n",
- obj->path, nr_maps, data->d_size);
+ pr_warn("elf: unable to determine legacy map definition size in %s\n",
+ obj->path);
return -EINVAL;
}
map_def_sz = data->d_size / nr_maps;
@@ -1795,8 +1818,7 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
if (IS_ERR(map))
return PTR_ERR(map);
- map_name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
- sym.st_name);
+ map_name = elf_sym_str(obj, sym.st_name);
if (!map_name) {
pr_warn("failed to get map #%d name sym string for obj %s\n",
i, obj->path);
@@ -1884,6 +1906,29 @@ resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
return btf_is_func_proto(t) ? t : NULL;
}
+static const char *btf_kind_str(const struct btf_type *t)
+{
+ switch (btf_kind(t)) {
+ case BTF_KIND_UNKN: return "void";
+ case BTF_KIND_INT: return "int";
+ case BTF_KIND_PTR: return "ptr";
+ case BTF_KIND_ARRAY: return "array";
+ case BTF_KIND_STRUCT: return "struct";
+ case BTF_KIND_UNION: return "union";
+ case BTF_KIND_ENUM: return "enum";
+ case BTF_KIND_FWD: return "fwd";
+ case BTF_KIND_TYPEDEF: return "typedef";
+ case BTF_KIND_VOLATILE: return "volatile";
+ case BTF_KIND_CONST: return "const";
+ case BTF_KIND_RESTRICT: return "restrict";
+ case BTF_KIND_FUNC: return "func";
+ case BTF_KIND_FUNC_PROTO: return "func_proto";
+ case BTF_KIND_VAR: return "var";
+ case BTF_KIND_DATASEC: return "datasec";
+ default: return "unknown";
+ }
+}
+
/*
* Fetch integer attribute of BTF map definition. Such attributes are
* represented using a pointer to an array, in which dimensionality of array
@@ -1900,8 +1945,8 @@ static bool get_map_field_int(const char *map_name, const struct btf *btf,
const struct btf_type *arr_t;
if (!btf_is_ptr(t)) {
- pr_warn("map '%s': attr '%s': expected PTR, got %u.\n",
- map_name, name, btf_kind(t));
+ pr_warn("map '%s': attr '%s': expected PTR, got %s.\n",
+ map_name, name, btf_kind_str(t));
return false;
}
@@ -1912,8 +1957,8 @@ static bool get_map_field_int(const char *map_name, const struct btf *btf,
return false;
}
if (!btf_is_array(arr_t)) {
- pr_warn("map '%s': attr '%s': expected ARRAY, got %u.\n",
- map_name, name, btf_kind(arr_t));
+ pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n",
+ map_name, name, btf_kind_str(arr_t));
return false;
}
arr_info = btf_array(arr_t);
@@ -1924,7 +1969,7 @@ static bool get_map_field_int(const char *map_name, const struct btf *btf,
static int build_map_pin_path(struct bpf_map *map, const char *path)
{
char buf[PATH_MAX];
- int err, len;
+ int len;
if (!path)
path = "/sys/fs/bpf";
@@ -1935,11 +1980,7 @@ static int build_map_pin_path(struct bpf_map *map, const char *path)
else if (len >= PATH_MAX)
return -ENAMETOOLONG;
- err = bpf_map__set_pin_path(map, buf);
- if (err)
- return err;
-
- return 0;
+ return bpf_map__set_pin_path(map, buf);
}
@@ -2007,8 +2048,8 @@ static int parse_btf_map_def(struct bpf_object *obj,
return -EINVAL;
}
if (!btf_is_ptr(t)) {
- pr_warn("map '%s': key spec is not PTR: %u.\n",
- map->name, btf_kind(t));
+ pr_warn("map '%s': key spec is not PTR: %s.\n",
+ map->name, btf_kind_str(t));
return -EINVAL;
}
sz = btf__resolve_size(obj->btf, t->type);
@@ -2049,8 +2090,8 @@ static int parse_btf_map_def(struct bpf_object *obj,
return -EINVAL;
}
if (!btf_is_ptr(t)) {
- pr_warn("map '%s': value spec is not PTR: %u.\n",
- map->name, btf_kind(t));
+ pr_warn("map '%s': value spec is not PTR: %s.\n",
+ map->name, btf_kind_str(t));
return -EINVAL;
}
sz = btf__resolve_size(obj->btf, t->type);
@@ -2107,14 +2148,14 @@ static int parse_btf_map_def(struct bpf_object *obj,
t = skip_mods_and_typedefs(obj->btf, btf_array(t)->type,
NULL);
if (!btf_is_ptr(t)) {
- pr_warn("map '%s': map-in-map inner def is of unexpected kind %u.\n",
- map->name, btf_kind(t));
+ pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
+ map->name, btf_kind_str(t));
return -EINVAL;
}
t = skip_mods_and_typedefs(obj->btf, t->type, NULL);
if (!btf_is_struct(t)) {
- pr_warn("map '%s': map-in-map inner def is of unexpected kind %u.\n",
- map->name, btf_kind(t));
+ pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
+ map->name, btf_kind_str(t));
return -EINVAL;
}
@@ -2205,8 +2246,8 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
return -EINVAL;
}
if (!btf_is_var(var)) {
- pr_warn("map '%s': unexpected var kind %u.\n",
- map_name, btf_kind(var));
+ pr_warn("map '%s': unexpected var kind %s.\n",
+ map_name, btf_kind_str(var));
return -EINVAL;
}
if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED &&
@@ -2218,8 +2259,8 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
if (!btf_is_struct(def)) {
- pr_warn("map '%s': unexpected def kind %u.\n",
- map_name, btf_kind(var));
+ pr_warn("map '%s': unexpected def kind %s.\n",
+ map_name, btf_kind_str(var));
return -EINVAL;
}
if (def->size > vi->size) {
@@ -2259,12 +2300,11 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
if (obj->efile.btf_maps_shndx < 0)
return 0;
- scn = elf_getscn(obj->efile.elf, obj->efile.btf_maps_shndx);
- if (scn)
- data = elf_getdata(scn, NULL);
+ scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx);
+ data = elf_sec_data(obj, scn);
if (!scn || !data) {
- pr_warn("failed to get Elf_Data from map section %d (%s)\n",
- obj->efile.btf_maps_shndx, MAPS_ELF_SEC);
+ pr_warn("elf: failed to get %s map definitions for %s\n",
+ MAPS_ELF_SEC, obj->path);
return -EINVAL;
}
@@ -2322,36 +2362,28 @@ static int bpf_object__init_maps(struct bpf_object *obj,
static bool section_have_execinstr(struct bpf_object *obj, int idx)
{
- Elf_Scn *scn;
GElf_Shdr sh;
- scn = elf_getscn(obj->efile.elf, idx);
- if (!scn)
+ if (elf_sec_hdr(obj, elf_sec_by_idx(obj, idx), &sh))
return false;
- if (gelf_getshdr(scn, &sh) != &sh)
- return false;
-
- if (sh.sh_flags & SHF_EXECINSTR)
- return true;
-
- return false;
+ return sh.sh_flags & SHF_EXECINSTR;
}
static bool btf_needs_sanitization(struct bpf_object *obj)
{
- bool has_func_global = obj->caps.btf_func_global;
- bool has_datasec = obj->caps.btf_datasec;
- bool has_func = obj->caps.btf_func;
+ bool has_func_global = kernel_supports(FEAT_BTF_GLOBAL_FUNC);
+ bool has_datasec = kernel_supports(FEAT_BTF_DATASEC);
+ bool has_func = kernel_supports(FEAT_BTF_FUNC);
return !has_func || !has_datasec || !has_func_global;
}
static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
{
- bool has_func_global = obj->caps.btf_func_global;
- bool has_datasec = obj->caps.btf_datasec;
- bool has_func = obj->caps.btf_func;
+ bool has_func_global = kernel_supports(FEAT_BTF_GLOBAL_FUNC);
+ bool has_datasec = kernel_supports(FEAT_BTF_DATASEC);
+ bool has_func = kernel_supports(FEAT_BTF_FUNC);
struct btf_type *t;
int i, j, vlen;
@@ -2496,12 +2528,23 @@ static int bpf_object__load_vmlinux_btf(struct bpf_object *obj)
{
bool need_vmlinux_btf = false;
struct bpf_program *prog;
- int err;
+ int i, err;
/* CO-RE relocations need kernel BTF */
- if (obj->btf_ext && obj->btf_ext->field_reloc_info.len)
+ if (obj->btf_ext && obj->btf_ext->core_relo_info.len)
need_vmlinux_btf = true;
+ /* Support for typed ksyms needs kernel BTF */
+ for (i = 0; i < obj->nr_extern; i++) {
+ const struct extern_desc *ext;
+
+ ext = &obj->externs[i];
+ if (ext->type == EXT_KSYM && ext->ksym.type_id) {
+ need_vmlinux_btf = true;
+ break;
+ }
+ }
+
bpf_object__for_each_program(prog, obj) {
if (!prog->load)
continue;
@@ -2533,6 +2576,15 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
if (!obj->btf)
return 0;
+ if (!kernel_supports(FEAT_BTF)) {
+ if (kernel_needs_btf(obj)) {
+ err = -EOPNOTSUPP;
+ goto report;
+ }
+ pr_debug("Kernel doesn't support BTF, skipping uploading it.\n");
+ return 0;
+ }
+
sanitize = btf_needs_sanitization(obj);
if (sanitize) {
const void *raw_data;
@@ -2558,6 +2610,7 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
}
btf__free(kern_btf);
}
+report:
if (err) {
btf_mandatory = kernel_needs_btf(obj);
pr_warn("Error loading .BTF into kernel: %d. %s\n", err,
@@ -2569,61 +2622,255 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
return err;
}
+static const char *elf_sym_str(const struct bpf_object *obj, size_t off)
+{
+ const char *name;
+
+ name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off);
+ if (!name) {
+ pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
+ off, obj->path, elf_errmsg(-1));
+ return NULL;
+ }
+
+ return name;
+}
+
+static const char *elf_sec_str(const struct bpf_object *obj, size_t off)
+{
+ const char *name;
+
+ name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off);
+ if (!name) {
+ pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
+ off, obj->path, elf_errmsg(-1));
+ return NULL;
+ }
+
+ return name;
+}
+
+static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx)
+{
+ Elf_Scn *scn;
+
+ scn = elf_getscn(obj->efile.elf, idx);
+ if (!scn) {
+ pr_warn("elf: failed to get section(%zu) from %s: %s\n",
+ idx, obj->path, elf_errmsg(-1));
+ return NULL;
+ }
+ return scn;
+}
+
+static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name)
+{
+ Elf_Scn *scn = NULL;
+ Elf *elf = obj->efile.elf;
+ const char *sec_name;
+
+ while ((scn = elf_nextscn(elf, scn)) != NULL) {
+ sec_name = elf_sec_name(obj, scn);
+ if (!sec_name)
+ return NULL;
+
+ if (strcmp(sec_name, name) != 0)
+ continue;
+
+ return scn;
+ }
+ return NULL;
+}
+
+static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr)
+{
+ if (!scn)
+ return -EINVAL;
+
+ if (gelf_getshdr(scn, hdr) != hdr) {
+ pr_warn("elf: failed to get section(%zu) header from %s: %s\n",
+ elf_ndxscn(scn), obj->path, elf_errmsg(-1));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn)
+{
+ const char *name;
+ GElf_Shdr sh;
+
+ if (!scn)
+ return NULL;
+
+ if (elf_sec_hdr(obj, scn, &sh))
+ return NULL;
+
+ name = elf_sec_str(obj, sh.sh_name);
+ if (!name) {
+ pr_warn("elf: failed to get section(%zu) name from %s: %s\n",
+ elf_ndxscn(scn), obj->path, elf_errmsg(-1));
+ return NULL;
+ }
+
+ return name;
+}
+
+static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn)
+{
+ Elf_Data *data;
+
+ if (!scn)
+ return NULL;
+
+ data = elf_getdata(scn, 0);
+ if (!data) {
+ pr_warn("elf: failed to get section(%zu) %s data from %s: %s\n",
+ elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "<?>",
+ obj->path, elf_errmsg(-1));
+ return NULL;
+ }
+
+ return data;
+}
+
+static int elf_sym_by_sec_off(const struct bpf_object *obj, size_t sec_idx,
+ size_t off, __u32 sym_type, GElf_Sym *sym)
+{
+ Elf_Data *symbols = obj->efile.symbols;
+ size_t n = symbols->d_size / sizeof(GElf_Sym);
+ int i;
+
+ for (i = 0; i < n; i++) {
+ if (!gelf_getsym(symbols, i, sym))
+ continue;
+ if (sym->st_shndx != sec_idx || sym->st_value != off)
+ continue;
+ if (GELF_ST_TYPE(sym->st_info) != sym_type)
+ continue;
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+static bool is_sec_name_dwarf(const char *name)
+{
+ /* approximation, but the actual list is too long */
+ return strncmp(name, ".debug_", sizeof(".debug_") - 1) == 0;
+}
+
+static bool ignore_elf_section(GElf_Shdr *hdr, const char *name)
+{
+ /* no special handling of .strtab */
+ if (hdr->sh_type == SHT_STRTAB)
+ return true;
+
+ /* ignore .llvm_addrsig section as well */
+ if (hdr->sh_type == 0x6FFF4C03 /* SHT_LLVM_ADDRSIG */)
+ return true;
+
+ /* no subprograms will lead to an empty .text section, ignore it */
+ if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 &&
+ strcmp(name, ".text") == 0)
+ return true;
+
+ /* DWARF sections */
+ if (is_sec_name_dwarf(name))
+ return true;
+
+ if (strncmp(name, ".rel", sizeof(".rel") - 1) == 0) {
+ name += sizeof(".rel") - 1;
+ /* DWARF section relocations */
+ if (is_sec_name_dwarf(name))
+ return true;
+
+ /* .BTF and .BTF.ext don't need relocations */
+ if (strcmp(name, BTF_ELF_SEC) == 0 ||
+ strcmp(name, BTF_EXT_ELF_SEC) == 0)
+ return true;
+ }
+
+ return false;
+}
+
+static int cmp_progs(const void *_a, const void *_b)
+{
+ const struct bpf_program *a = _a;
+ const struct bpf_program *b = _b;
+
+ if (a->sec_idx != b->sec_idx)
+ return a->sec_idx < b->sec_idx ? -1 : 1;
+
+ /* sec_insn_off can't be the same within the section */
+ return a->sec_insn_off < b->sec_insn_off ? -1 : 1;
+}
+
static int bpf_object__elf_collect(struct bpf_object *obj)
{
Elf *elf = obj->efile.elf;
- GElf_Ehdr *ep = &obj->efile.ehdr;
Elf_Data *btf_ext_data = NULL;
Elf_Data *btf_data = NULL;
- Elf_Scn *scn = NULL;
int idx = 0, err = 0;
+ const char *name;
+ Elf_Data *data;
+ Elf_Scn *scn;
+ GElf_Shdr sh;
- /* Elf is corrupted/truncated, avoid calling elf_strptr. */
- if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
- pr_warn("failed to get e_shstrndx from %s\n", obj->path);
- return -LIBBPF_ERRNO__FORMAT;
+ /* a bunch of ELF parsing functionality depends on processing symbols,
+ * so do the first pass and find the symbol table
+ */
+ scn = NULL;
+ while ((scn = elf_nextscn(elf, scn)) != NULL) {
+ if (elf_sec_hdr(obj, scn, &sh))
+ return -LIBBPF_ERRNO__FORMAT;
+
+ if (sh.sh_type == SHT_SYMTAB) {
+ if (obj->efile.symbols) {
+ pr_warn("elf: multiple symbol tables in %s\n", obj->path);
+ return -LIBBPF_ERRNO__FORMAT;
+ }
+
+ data = elf_sec_data(obj, scn);
+ if (!data)
+ return -LIBBPF_ERRNO__FORMAT;
+
+ obj->efile.symbols = data;
+ obj->efile.symbols_shndx = elf_ndxscn(scn);
+ obj->efile.strtabidx = sh.sh_link;
+ }
}
+ scn = NULL;
while ((scn = elf_nextscn(elf, scn)) != NULL) {
- char *name;
- GElf_Shdr sh;
- Elf_Data *data;
-
idx++;
- if (gelf_getshdr(scn, &sh) != &sh) {
- pr_warn("failed to get section(%d) header from %s\n",
- idx, obj->path);
+
+ if (elf_sec_hdr(obj, scn, &sh))
return -LIBBPF_ERRNO__FORMAT;
- }
- name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
- if (!name) {
- pr_warn("failed to get section(%d) name from %s\n",
- idx, obj->path);
+ name = elf_sec_str(obj, sh.sh_name);
+ if (!name)
return -LIBBPF_ERRNO__FORMAT;
- }
- data = elf_getdata(scn, 0);
- if (!data) {
- pr_warn("failed to get section(%d) data from %s(%s)\n",
- idx, name, obj->path);
+ if (ignore_elf_section(&sh, name))
+ continue;
+
+ data = elf_sec_data(obj, scn);
+ if (!data)
return -LIBBPF_ERRNO__FORMAT;
- }
- pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
+
+ pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
idx, name, (unsigned long)data->d_size,
(int)sh.sh_link, (unsigned long)sh.sh_flags,
(int)sh.sh_type);
if (strcmp(name, "license") == 0) {
- err = bpf_object__init_license(obj,
- data->d_buf,
- data->d_size);
+ err = bpf_object__init_license(obj, data->d_buf, data->d_size);
if (err)
return err;
} else if (strcmp(name, "version") == 0) {
- err = bpf_object__init_kversion(obj,
- data->d_buf,
- data->d_size);
+ err = bpf_object__init_kversion(obj, data->d_buf, data->d_size);
if (err)
return err;
} else if (strcmp(name, "maps") == 0) {
@@ -2635,31 +2882,14 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
} else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
btf_ext_data = data;
} else if (sh.sh_type == SHT_SYMTAB) {
- if (obj->efile.symbols) {
- pr_warn("bpf: multiple SYMTAB in %s\n",
- obj->path);
- return -LIBBPF_ERRNO__FORMAT;
- }
- obj->efile.symbols = data;
- obj->efile.symbols_shndx = idx;
- obj->efile.strtabidx = sh.sh_link;
+ /* already processed during the first pass above */
} else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) {
if (sh.sh_flags & SHF_EXECINSTR) {
if (strcmp(name, ".text") == 0)
obj->efile.text_shndx = idx;
- err = bpf_object__add_program(obj, data->d_buf,
- data->d_size,
- name, idx);
- if (err) {
- char errmsg[STRERR_BUFSIZE];
- char *cp;
-
- cp = libbpf_strerror_r(-err, errmsg,
- sizeof(errmsg));
- pr_warn("failed to alloc program %s (%s): %s",
- name, obj->path, cp);
+ err = bpf_object__add_programs(obj, data, name, idx);
+ if (err)
return err;
- }
} else if (strcmp(name, DATA_SEC) == 0) {
obj->efile.data = data;
obj->efile.data_shndx = idx;
@@ -2670,7 +2900,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
obj->efile.st_ops_data = data;
obj->efile.st_ops_shndx = idx;
} else {
- pr_debug("skip section(%d) %s\n", idx, name);
+ pr_info("elf: skipping unrecognized data section(%d) %s\n",
+ idx, name);
}
} else if (sh.sh_type == SHT_REL) {
int nr_sects = obj->efile.nr_reloc_sects;
@@ -2681,36 +2912,40 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
if (!section_have_execinstr(obj, sec) &&
strcmp(name, ".rel" STRUCT_OPS_SEC) &&
strcmp(name, ".rel" MAPS_ELF_SEC)) {
- pr_debug("skip relo %s(%d) for section(%d)\n",
- name, idx, sec);
+ pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n",
+ idx, name, sec,
+ elf_sec_name(obj, elf_sec_by_idx(obj, sec)) ?: "<?>");
continue;
}
- sects = reallocarray(sects, nr_sects + 1,
- sizeof(*obj->efile.reloc_sects));
- if (!sects) {
- pr_warn("reloc_sects realloc failed\n");
+ sects = libbpf_reallocarray(sects, nr_sects + 1,
+ sizeof(*obj->efile.reloc_sects));
+ if (!sects)
return -ENOMEM;
- }
obj->efile.reloc_sects = sects;
obj->efile.nr_reloc_sects++;
obj->efile.reloc_sects[nr_sects].shdr = sh;
obj->efile.reloc_sects[nr_sects].data = data;
- } else if (sh.sh_type == SHT_NOBITS &&
- strcmp(name, BSS_SEC) == 0) {
+ } else if (sh.sh_type == SHT_NOBITS && strcmp(name, BSS_SEC) == 0) {
obj->efile.bss = data;
obj->efile.bss_shndx = idx;
} else {
- pr_debug("skip section(%d) %s\n", idx, name);
+ pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name,
+ (size_t)sh.sh_size);
}
}
if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) {
- pr_warn("Corrupted ELF file: index of strtab invalid\n");
+ pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path);
return -LIBBPF_ERRNO__FORMAT;
}
+
+ /* sort BPF programs by section name and in-section instruction offset
+ * for faster search */
+ qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs);
+
return bpf_object__init_btf(obj, btf_data, btf_ext_data);
}
@@ -2869,14 +3104,13 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
if (!obj->efile.symbols)
return 0;
- scn = elf_getscn(obj->efile.elf, obj->efile.symbols_shndx);
- if (!scn)
+ scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx);
+ if (elf_sec_hdr(obj, scn, &sh))
return -LIBBPF_ERRNO__FORMAT;
- if (gelf_getshdr(scn, &sh) != &sh)
- return -LIBBPF_ERRNO__FORMAT;
- n = sh.sh_size / sh.sh_entsize;
+ n = sh.sh_size / sh.sh_entsize;
pr_debug("looking for externs among %d symbols...\n", n);
+
for (i = 0; i < n; i++) {
GElf_Sym sym;
@@ -2884,13 +3118,12 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
return -LIBBPF_ERRNO__FORMAT;
if (!sym_is_extern(&sym))
continue;
- ext_name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
- sym.st_name);
+ ext_name = elf_sym_str(obj, sym.st_name);
if (!ext_name || !ext_name[0])
continue;
ext = obj->externs;
- ext = reallocarray(ext, obj->nr_extern + 1, sizeof(*ext));
+ ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext));
if (!ext)
return -ENOMEM;
obj->externs = ext;
@@ -2940,16 +3173,10 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
return -ENOTSUP;
}
} else if (strcmp(sec_name, KSYMS_SEC) == 0) {
- const struct btf_type *vt;
-
ksym_sec = sec;
ext->type = EXT_KSYM;
-
- vt = skip_mods_and_typedefs(obj->btf, t->type, NULL);
- if (!btf_is_void(vt)) {
- pr_warn("extern (ksym) '%s' is not typeless (void)\n", ext_name);
- return -ENOTSUP;
- }
+ skip_mods_and_typedefs(obj->btf, t->type,
+ &ext->ksym.type_id);
} else {
pr_warn("unrecognized extern section '%s'\n", sec_name);
return -ENOTSUP;
@@ -3037,20 +3264,6 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
return 0;
}
-static struct bpf_program *
-bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
-{
- struct bpf_program *prog;
- size_t i;
-
- for (i = 0; i < obj->nr_programs; i++) {
- prog = &obj->programs[i];
- if (prog->idx == idx)
- return prog;
- }
- return NULL;
-}
-
struct bpf_program *
bpf_object__find_program_by_title(const struct bpf_object *obj,
const char *title)
@@ -3058,12 +3271,18 @@ bpf_object__find_program_by_title(const struct bpf_object *obj,
struct bpf_program *pos;
bpf_object__for_each_program(pos, obj) {
- if (pos->section_name && !strcmp(pos->section_name, title))
+ if (pos->sec_name && !strcmp(pos->sec_name, title))
return pos;
}
return NULL;
}
+static bool prog_is_subprog(const struct bpf_object *obj,
+ const struct bpf_program *prog)
+{
+ return prog->sec_idx == obj->efile.text_shndx && obj->has_subcalls;
+}
+
struct bpf_program *
bpf_object__find_program_by_name(const struct bpf_object *obj,
const char *name)
@@ -3071,6 +3290,8 @@ bpf_object__find_program_by_name(const struct bpf_object *obj,
struct bpf_program *prog;
bpf_object__for_each_program(prog, obj) {
+ if (prog_is_subprog(obj, prog))
+ continue;
if (!strcmp(prog->name, name))
return prog;
}
@@ -3109,7 +3330,7 @@ bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
static int bpf_program__record_reloc(struct bpf_program *prog,
struct reloc_desc *reloc_desc,
- __u32 insn_idx, const char *name,
+ __u32 insn_idx, const char *sym_name,
const GElf_Sym *sym, const GElf_Rel *rel)
{
struct bpf_insn *insn = &prog->insns[insn_idx];
@@ -3117,34 +3338,38 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
struct bpf_object *obj = prog->obj;
__u32 shdr_idx = sym->st_shndx;
enum libbpf_map_type type;
+ const char *sym_sec_name;
struct bpf_map *map;
+ reloc_desc->processed = false;
+
/* sub-program call relocation */
if (insn->code == (BPF_JMP | BPF_CALL)) {
if (insn->src_reg != BPF_PSEUDO_CALL) {
- pr_warn("incorrect bpf_call opcode\n");
+ pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name);
return -LIBBPF_ERRNO__RELOC;
}
/* text_shndx can be 0, if no default "main" program exists */
if (!shdr_idx || shdr_idx != obj->efile.text_shndx) {
- pr_warn("bad call relo against section %u\n", shdr_idx);
+ sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
+ pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n",
+ prog->name, sym_name, sym_sec_name);
return -LIBBPF_ERRNO__RELOC;
}
- if (sym->st_value % 8) {
- pr_warn("bad call relo offset: %zu\n",
- (size_t)sym->st_value);
+ if (sym->st_value % BPF_INSN_SZ) {
+ pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n",
+ prog->name, sym_name, (size_t)sym->st_value);
return -LIBBPF_ERRNO__RELOC;
}
reloc_desc->type = RELO_CALL;
reloc_desc->insn_idx = insn_idx;
reloc_desc->sym_off = sym->st_value;
- obj->has_pseudo_calls = true;
return 0;
}
if (insn->code != (BPF_LD | BPF_IMM | BPF_DW)) {
- pr_warn("invalid relo for insns[%d].code 0x%x\n",
- insn_idx, insn->code);
+ pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n",
+ prog->name, sym_name, insn_idx, insn->code);
return -LIBBPF_ERRNO__RELOC;
}
@@ -3159,12 +3384,12 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
break;
}
if (i >= n) {
- pr_warn("extern relo failed to find extern for sym %d\n",
- sym_idx);
+ pr_warn("prog '%s': extern relo failed to find extern for '%s' (%d)\n",
+ prog->name, sym_name, sym_idx);
return -LIBBPF_ERRNO__RELOC;
}
- pr_debug("found extern #%d '%s' (sym %d) for insn %u\n",
- i, ext->name, ext->sym_idx, insn_idx);
+ pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n",
+ prog->name, i, ext->name, ext->sym_idx, insn_idx);
reloc_desc->type = RELO_EXTERN;
reloc_desc->insn_idx = insn_idx;
reloc_desc->sym_off = i; /* sym_off stores extern index */
@@ -3172,18 +3397,19 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
}
if (!shdr_idx || shdr_idx >= SHN_LORESERVE) {
- pr_warn("invalid relo for \'%s\' in special section 0x%x; forgot to initialize global var?..\n",
- name, shdr_idx);
+ pr_warn("prog '%s': invalid relo against '%s' in special section 0x%x; forgot to initialize global var?..\n",
+ prog->name, sym_name, shdr_idx);
return -LIBBPF_ERRNO__RELOC;
}
type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
+ sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
/* generic map reference relocation */
if (type == LIBBPF_MAP_UNSPEC) {
if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
- pr_warn("bad map relo against section %u\n",
- shdr_idx);
+ pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n",
+ prog->name, sym_name, sym_sec_name);
return -LIBBPF_ERRNO__RELOC;
}
for (map_idx = 0; map_idx < nr_maps; map_idx++) {
@@ -3192,14 +3418,14 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
map->sec_idx != sym->st_shndx ||
map->sec_offset != sym->st_value)
continue;
- pr_debug("found map %zd (%s, sec %d, off %zu) for insn %u\n",
- map_idx, map->name, map->sec_idx,
+ pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n",
+ prog->name, map_idx, map->name, map->sec_idx,
map->sec_offset, insn_idx);
break;
}
if (map_idx >= nr_maps) {
- pr_warn("map relo failed to find map for sec %u, off %zu\n",
- shdr_idx, (size_t)sym->st_value);
+ pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n",
+ prog->name, sym_sec_name, (size_t)sym->st_value);
return -LIBBPF_ERRNO__RELOC;
}
reloc_desc->type = RELO_LD64;
@@ -3211,21 +3437,22 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
/* global data map relocation */
if (!bpf_object__shndx_is_data(obj, shdr_idx)) {
- pr_warn("bad data relo against section %u\n", shdr_idx);
+ pr_warn("prog '%s': bad data relo against section '%s'\n",
+ prog->name, sym_sec_name);
return -LIBBPF_ERRNO__RELOC;
}
for (map_idx = 0; map_idx < nr_maps; map_idx++) {
map = &obj->maps[map_idx];
if (map->libbpf_type != type)
continue;
- pr_debug("found data map %zd (%s, sec %d, off %zu) for insn %u\n",
- map_idx, map->name, map->sec_idx, map->sec_offset,
- insn_idx);
+ pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n",
+ prog->name, map_idx, map->name, map->sec_idx,
+ map->sec_offset, insn_idx);
break;
}
if (map_idx >= nr_maps) {
- pr_warn("data relo failed to find map for sec %u\n",
- shdr_idx);
+ pr_warn("prog '%s': data relo failed to find map for section '%s'\n",
+ prog->name, sym_sec_name);
return -LIBBPF_ERRNO__RELOC;
}
@@ -3236,55 +3463,113 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
return 0;
}
+static bool prog_contains_insn(const struct bpf_program *prog, size_t insn_idx)
+{
+ return insn_idx >= prog->sec_insn_off &&
+ insn_idx < prog->sec_insn_off + prog->sec_insn_cnt;
+}
+
+static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj,
+ size_t sec_idx, size_t insn_idx)
+{
+ int l = 0, r = obj->nr_programs - 1, m;
+ struct bpf_program *prog;
+
+ while (l < r) {
+ m = l + (r - l + 1) / 2;
+ prog = &obj->programs[m];
+
+ if (prog->sec_idx < sec_idx ||
+ (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx))
+ l = m;
+ else
+ r = m - 1;
+ }
+ /* matching program could be at index l, but it still might be the
+ * wrong one, so we need to double check conditions for the last time
+ */
+ prog = &obj->programs[l];
+ if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx))
+ return prog;
+ return NULL;
+}
+
static int
-bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
- Elf_Data *data, struct bpf_object *obj)
+bpf_object__collect_prog_relos(struct bpf_object *obj, GElf_Shdr *shdr, Elf_Data *data)
{
Elf_Data *symbols = obj->efile.symbols;
+ const char *relo_sec_name, *sec_name;
+ size_t sec_idx = shdr->sh_info;
+ struct bpf_program *prog;
+ struct reloc_desc *relos;
int err, i, nrels;
+ const char *sym_name;
+ __u32 insn_idx;
+ GElf_Sym sym;
+ GElf_Rel rel;
- pr_debug("collecting relocating info for: '%s'\n", prog->section_name);
- nrels = shdr->sh_size / shdr->sh_entsize;
+ relo_sec_name = elf_sec_str(obj, shdr->sh_name);
+ sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
+ if (!relo_sec_name || !sec_name)
+ return -EINVAL;
- prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
- if (!prog->reloc_desc) {
- pr_warn("failed to alloc memory in relocation\n");
- return -ENOMEM;
- }
- prog->nr_reloc = nrels;
+ pr_debug("sec '%s': collecting relocation for section(%zu) '%s'\n",
+ relo_sec_name, sec_idx, sec_name);
+ nrels = shdr->sh_size / shdr->sh_entsize;
for (i = 0; i < nrels; i++) {
- const char *name;
- __u32 insn_idx;
- GElf_Sym sym;
- GElf_Rel rel;
-
if (!gelf_getrel(data, i, &rel)) {
- pr_warn("relocation: failed to get %d reloc\n", i);
+ pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i);
return -LIBBPF_ERRNO__FORMAT;
}
if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
- pr_warn("relocation: symbol %"PRIx64" not found\n",
- GELF_R_SYM(rel.r_info));
+ pr_warn("sec '%s': symbol 0x%zx not found for relo #%d\n",
+ relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i);
return -LIBBPF_ERRNO__FORMAT;
}
- if (rel.r_offset % sizeof(struct bpf_insn))
+ if (rel.r_offset % BPF_INSN_SZ) {
+ pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n",
+ relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i);
return -LIBBPF_ERRNO__FORMAT;
+ }
+
+ insn_idx = rel.r_offset / BPF_INSN_SZ;
+ /* relocations against static functions are recorded as
+ * relocations against the section that contains a function;
+ * in such case, symbol will be STT_SECTION and sym.st_name
+ * will point to empty string (0), so fetch section name
+ * instead
+ */
+ if (GELF_ST_TYPE(sym.st_info) == STT_SECTION && sym.st_name == 0)
+ sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym.st_shndx));
+ else
+ sym_name = elf_sym_str(obj, sym.st_name);
+ sym_name = sym_name ?: "<?";
- insn_idx = rel.r_offset / sizeof(struct bpf_insn);
- name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
- sym.st_name) ? : "<?>";
+ pr_debug("sec '%s': relo #%d: insn #%u against '%s'\n",
+ relo_sec_name, i, insn_idx, sym_name);
- pr_debug("relo for shdr %u, symb %zu, value %zu, type %d, bind %d, name %d (\'%s\'), insn %u\n",
- (__u32)sym.st_shndx, (size_t)GELF_R_SYM(rel.r_info),
- (size_t)sym.st_value, GELF_ST_TYPE(sym.st_info),
- GELF_ST_BIND(sym.st_info), sym.st_name, name,
- insn_idx);
+ prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
+ if (!prog) {
+ pr_warn("sec '%s': relo #%d: program not found in section '%s' for insn #%u\n",
+ relo_sec_name, i, sec_name, insn_idx);
+ return -LIBBPF_ERRNO__RELOC;
+ }
- err = bpf_program__record_reloc(prog, &prog->reloc_desc[i],
- insn_idx, name, &sym, &rel);
+ relos = libbpf_reallocarray(prog->reloc_desc,
+ prog->nr_reloc + 1, sizeof(*relos));
+ if (!relos)
+ return -ENOMEM;
+ prog->reloc_desc = relos;
+
+ /* adjust insn_idx to local BPF program frame of reference */
+ insn_idx -= prog->sec_insn_off;
+ err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc],
+ insn_idx, sym_name, &sym, &rel);
if (err)
return err;
+
+ prog->nr_reloc++;
}
return 0;
}
@@ -3433,8 +3718,14 @@ bpf_object__probe_loading(struct bpf_object *obj)
return 0;
}
-static int
-bpf_object__probe_name(struct bpf_object *obj)
+static int probe_fd(int fd)
+{
+ if (fd >= 0)
+ close(fd);
+ return fd >= 0;
+}
+
+static int probe_kern_prog_name(void)
{
struct bpf_load_program_attr attr;
struct bpf_insn insns[] = {
@@ -3452,16 +3743,10 @@ bpf_object__probe_name(struct bpf_object *obj)
attr.license = "GPL";
attr.name = "test";
ret = bpf_load_program_xattr(&attr, NULL, 0);
- if (ret >= 0) {
- obj->caps.name = 1;
- close(ret);
- }
-
- return 0;
+ return probe_fd(ret);
}
-static int
-bpf_object__probe_global_data(struct bpf_object *obj)
+static int probe_kern_global_data(void)
{
struct bpf_load_program_attr prg_attr;
struct bpf_create_map_attr map_attr;
@@ -3498,16 +3783,23 @@ bpf_object__probe_global_data(struct bpf_object *obj)
prg_attr.license = "GPL";
ret = bpf_load_program_xattr(&prg_attr, NULL, 0);
- if (ret >= 0) {
- obj->caps.global_data = 1;
- close(ret);
- }
-
close(map);
- return 0;
+ return probe_fd(ret);
+}
+
+static int probe_kern_btf(void)
+{
+ static const char strs[] = "\0int";
+ __u32 types[] = {
+ /* int */
+ BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
+ };
+
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs)));
}
-static int bpf_object__probe_btf_func(struct bpf_object *obj)
+static int probe_kern_btf_func(void)
{
static const char strs[] = "\0int\0x\0a";
/* void x(int a) {} */
@@ -3520,20 +3812,12 @@ static int bpf_object__probe_btf_func(struct bpf_object *obj)
/* FUNC x */ /* [3] */
BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
};
- int btf_fd;
-
- btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
- strs, sizeof(strs));
- if (btf_fd >= 0) {
- obj->caps.btf_func = 1;
- close(btf_fd);
- return 1;
- }
- return 0;
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs)));
}
-static int bpf_object__probe_btf_func_global(struct bpf_object *obj)
+static int probe_kern_btf_func_global(void)
{
static const char strs[] = "\0int\0x\0a";
/* static void x(int a) {} */
@@ -3546,20 +3830,12 @@ static int bpf_object__probe_btf_func_global(struct bpf_object *obj)
/* FUNC x BTF_FUNC_GLOBAL */ /* [3] */
BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
};
- int btf_fd;
- btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
- strs, sizeof(strs));
- if (btf_fd >= 0) {
- obj->caps.btf_func_global = 1;
- close(btf_fd);
- return 1;
- }
-
- return 0;
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs)));
}
-static int bpf_object__probe_btf_datasec(struct bpf_object *obj)
+static int probe_kern_btf_datasec(void)
{
static const char strs[] = "\0x\0.data";
/* static int a; */
@@ -3573,20 +3849,12 @@ static int bpf_object__probe_btf_datasec(struct bpf_object *obj)
BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
BTF_VAR_SECINFO_ENC(2, 0, 4),
};
- int btf_fd;
- btf_fd = libbpf__load_raw_btf((char *)types, sizeof(types),
- strs, sizeof(strs));
- if (btf_fd >= 0) {
- obj->caps.btf_datasec = 1;
- close(btf_fd);
- return 1;
- }
-
- return 0;
+ return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
+ strs, sizeof(strs)));
}
-static int bpf_object__probe_array_mmap(struct bpf_object *obj)
+static int probe_kern_array_mmap(void)
{
struct bpf_create_map_attr attr = {
.map_type = BPF_MAP_TYPE_ARRAY,
@@ -3595,27 +3863,17 @@ static int bpf_object__probe_array_mmap(struct bpf_object *obj)
.value_size = sizeof(int),
.max_entries = 1,
};
- int fd;
-
- fd = bpf_create_map_xattr(&attr);
- if (fd >= 0) {
- obj->caps.array_mmap = 1;
- close(fd);
- return 1;
- }
- return 0;
+ return probe_fd(bpf_create_map_xattr(&attr));
}
-static int
-bpf_object__probe_exp_attach_type(struct bpf_object *obj)
+static int probe_kern_exp_attach_type(void)
{
struct bpf_load_program_attr attr;
struct bpf_insn insns[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
- int fd;
memset(&attr, 0, sizeof(attr));
/* use any valid combination of program type and (optional)
@@ -3629,36 +3887,140 @@ bpf_object__probe_exp_attach_type(struct bpf_object *obj)
attr.insns_cnt = ARRAY_SIZE(insns);
attr.license = "GPL";
- fd = bpf_load_program_xattr(&attr, NULL, 0);
- if (fd >= 0) {
- obj->caps.exp_attach_type = 1;
- close(fd);
- return 1;
- }
- return 0;
+ return probe_fd(bpf_load_program_xattr(&attr, NULL, 0));
}
-static int
-bpf_object__probe_caps(struct bpf_object *obj)
-{
- int (*probe_fn[])(struct bpf_object *obj) = {
- bpf_object__probe_name,
- bpf_object__probe_global_data,
- bpf_object__probe_btf_func,
- bpf_object__probe_btf_func_global,
- bpf_object__probe_btf_datasec,
- bpf_object__probe_array_mmap,
- bpf_object__probe_exp_attach_type,
+static int probe_kern_probe_read_kernel(void)
+{
+ struct bpf_load_program_attr attr;
+ struct bpf_insn insns[] = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), /* r1 = r10 (fp) */
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), /* r1 += -8 */
+ BPF_MOV64_IMM(BPF_REG_2, 8), /* r2 = 8 */
+ BPF_MOV64_IMM(BPF_REG_3, 0), /* r3 = 0 */
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
+ BPF_EXIT_INSN(),
};
- int i, ret;
- for (i = 0; i < ARRAY_SIZE(probe_fn); i++) {
- ret = probe_fn[i](obj);
- if (ret < 0)
- pr_debug("Probe #%d failed with %d.\n", i, ret);
+ memset(&attr, 0, sizeof(attr));
+ attr.prog_type = BPF_PROG_TYPE_KPROBE;
+ attr.insns = insns;
+ attr.insns_cnt = ARRAY_SIZE(insns);
+ attr.license = "GPL";
+
+ return probe_fd(bpf_load_program_xattr(&attr, NULL, 0));
+}
+
+static int probe_prog_bind_map(void)
+{
+ struct bpf_load_program_attr prg_attr;
+ struct bpf_create_map_attr map_attr;
+ char *cp, errmsg[STRERR_BUFSIZE];
+ struct bpf_insn insns[] = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ };
+ int ret, map, prog;
+
+ memset(&map_attr, 0, sizeof(map_attr));
+ map_attr.map_type = BPF_MAP_TYPE_ARRAY;
+ map_attr.key_size = sizeof(int);
+ map_attr.value_size = 32;
+ map_attr.max_entries = 1;
+
+ map = bpf_create_map_xattr(&map_attr);
+ if (map < 0) {
+ ret = -errno;
+ cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
+ pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
+ __func__, cp, -ret);
+ return ret;
}
- return 0;
+ memset(&prg_attr, 0, sizeof(prg_attr));
+ prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
+ prg_attr.insns = insns;
+ prg_attr.insns_cnt = ARRAY_SIZE(insns);
+ prg_attr.license = "GPL";
+
+ prog = bpf_load_program_xattr(&prg_attr, NULL, 0);
+ if (prog < 0) {
+ close(map);
+ return 0;
+ }
+
+ ret = bpf_prog_bind_map(prog, map, NULL);
+
+ close(map);
+ close(prog);
+
+ return ret >= 0;
+}
+
+enum kern_feature_result {
+ FEAT_UNKNOWN = 0,
+ FEAT_SUPPORTED = 1,
+ FEAT_MISSING = 2,
+};
+
+typedef int (*feature_probe_fn)(void);
+
+static struct kern_feature_desc {
+ const char *desc;
+ feature_probe_fn probe;
+ enum kern_feature_result res;
+} feature_probes[__FEAT_CNT] = {
+ [FEAT_PROG_NAME] = {
+ "BPF program name", probe_kern_prog_name,
+ },
+ [FEAT_GLOBAL_DATA] = {
+ "global variables", probe_kern_global_data,
+ },
+ [FEAT_BTF] = {
+ "minimal BTF", probe_kern_btf,
+ },
+ [FEAT_BTF_FUNC] = {
+ "BTF functions", probe_kern_btf_func,
+ },
+ [FEAT_BTF_GLOBAL_FUNC] = {
+ "BTF global function", probe_kern_btf_func_global,
+ },
+ [FEAT_BTF_DATASEC] = {
+ "BTF data section and variable", probe_kern_btf_datasec,
+ },
+ [FEAT_ARRAY_MMAP] = {
+ "ARRAY map mmap()", probe_kern_array_mmap,
+ },
+ [FEAT_EXP_ATTACH_TYPE] = {
+ "BPF_PROG_LOAD expected_attach_type attribute",
+ probe_kern_exp_attach_type,
+ },
+ [FEAT_PROBE_READ_KERN] = {
+ "bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
+ },
+ [FEAT_PROG_BIND_MAP] = {
+ "BPF_PROG_BIND_MAP support", probe_prog_bind_map,
+ }
+};
+
+static bool kernel_supports(enum kern_feature_id feat_id)
+{
+ struct kern_feature_desc *feat = &feature_probes[feat_id];
+ int ret;
+
+ if (READ_ONCE(feat->res) == FEAT_UNKNOWN) {
+ ret = feat->probe();
+ if (ret > 0) {
+ WRITE_ONCE(feat->res, FEAT_SUPPORTED);
+ } else if (ret == 0) {
+ WRITE_ONCE(feat->res, FEAT_MISSING);
+ } else {
+ pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret);
+ WRITE_ONCE(feat->res, FEAT_MISSING);
+ }
+ }
+
+ return READ_ONCE(feat->res) == FEAT_SUPPORTED;
}
static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
@@ -3760,7 +4122,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
memset(&create_attr, 0, sizeof(create_attr));
- if (obj->caps.name)
+ if (kernel_supports(FEAT_PROG_NAME))
create_attr.name = map->name;
create_attr.map_ifindex = map->map_ifindex;
create_attr.map_type = def->type;
@@ -3841,6 +4203,36 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
return 0;
}
+static int init_map_slots(struct bpf_map *map)
+{
+ const struct bpf_map *targ_map;
+ unsigned int i;
+ int fd, err;
+
+ for (i = 0; i < map->init_slots_sz; i++) {
+ if (!map->init_slots[i])
+ continue;
+
+ targ_map = map->init_slots[i];
+ fd = bpf_map__fd(targ_map);
+ err = bpf_map_update_elem(map->fd, &i, &fd, 0);
+ if (err) {
+ err = -errno;
+ pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
+ map->name, i, targ_map->name,
+ fd, err);
+ return err;
+ }
+ pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
+ map->name, i, targ_map->name, fd);
+ }
+
+ zfree(&map->init_slots);
+ map->init_slots_sz = 0;
+
+ return 0;
+}
+
static int
bpf_object__create_maps(struct bpf_object *obj)
{
@@ -3864,47 +4256,29 @@ bpf_object__create_maps(struct bpf_object *obj)
if (map->fd >= 0) {
pr_debug("map '%s': skipping creation (preset fd=%d)\n",
map->name, map->fd);
- continue;
- }
-
- err = bpf_object__create_map(obj, map);
- if (err)
- goto err_out;
-
- pr_debug("map '%s': created successfully, fd=%d\n", map->name,
- map->fd);
-
- if (bpf_map__is_internal(map)) {
- err = bpf_object__populate_internal_map(obj, map);
- if (err < 0) {
- zclose(map->fd);
+ } else {
+ err = bpf_object__create_map(obj, map);
+ if (err)
goto err_out;
- }
- }
- if (map->init_slots_sz) {
- for (j = 0; j < map->init_slots_sz; j++) {
- const struct bpf_map *targ_map;
- int fd;
+ pr_debug("map '%s': created successfully, fd=%d\n",
+ map->name, map->fd);
- if (!map->init_slots[j])
- continue;
+ if (bpf_map__is_internal(map)) {
+ err = bpf_object__populate_internal_map(obj, map);
+ if (err < 0) {
+ zclose(map->fd);
+ goto err_out;
+ }
+ }
- targ_map = map->init_slots[j];
- fd = bpf_map__fd(targ_map);
- err = bpf_map_update_elem(map->fd, &j, &fd, 0);
- if (err) {
- err = -errno;
- pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
- map->name, j, targ_map->name,
- fd, err);
+ if (map->init_slots_sz) {
+ err = init_map_slots(map);
+ if (err < 0) {
+ zclose(map->fd);
goto err_out;
}
- pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
- map->name, j, targ_map->name, fd);
}
- zfree(&map->init_slots);
- map->init_slots_sz = 0;
}
if (map->pin_path && !map->pinned) {
@@ -3929,75 +4303,6 @@ err_out:
return err;
}
-static int
-check_btf_ext_reloc_err(struct bpf_program *prog, int err,
- void *btf_prog_info, const char *info_name)
-{
- if (err != -ENOENT) {
- pr_warn("Error in loading %s for sec %s.\n",
- info_name, prog->section_name);
- return err;
- }
-
- /* err == -ENOENT (i.e. prog->section_name not found in btf_ext) */
-
- if (btf_prog_info) {
- /*
- * Some info has already been found but has problem
- * in the last btf_ext reloc. Must have to error out.
- */
- pr_warn("Error in relocating %s for sec %s.\n",
- info_name, prog->section_name);
- return err;
- }
-
- /* Have problem loading the very first info. Ignore the rest. */
- pr_warn("Cannot find %s for main program sec %s. Ignore all %s.\n",
- info_name, prog->section_name, info_name);
- return 0;
-}
-
-static int
-bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj,
- const char *section_name, __u32 insn_offset)
-{
- int err;
-
- if (!insn_offset || prog->func_info) {
- /*
- * !insn_offset => main program
- *
- * For sub prog, the main program's func_info has to
- * be loaded first (i.e. prog->func_info != NULL)
- */
- err = btf_ext__reloc_func_info(obj->btf, obj->btf_ext,
- section_name, insn_offset,
- &prog->func_info,
- &prog->func_info_cnt);
- if (err)
- return check_btf_ext_reloc_err(prog, err,
- prog->func_info,
- "bpf_func_info");
-
- prog->func_info_rec_size = btf_ext__func_info_rec_size(obj->btf_ext);
- }
-
- if (!insn_offset || prog->line_info) {
- err = btf_ext__reloc_line_info(obj->btf, obj->btf_ext,
- section_name, insn_offset,
- &prog->line_info,
- &prog->line_info_cnt);
- if (err)
- return check_btf_ext_reloc_err(prog, err,
- prog->line_info,
- "bpf_line_info");
-
- prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext);
- }
-
- return 0;
-}
-
#define BPF_CORE_SPEC_MAX_LEN 64
/* represents BPF CO-RE field or array element accessor */
@@ -4011,6 +4316,10 @@ struct bpf_core_spec {
const struct btf *btf;
/* high-level spec: named fields and array indices only */
struct bpf_core_accessor spec[BPF_CORE_SPEC_MAX_LEN];
+ /* original unresolved (no skip_mods_or_typedefs) root type ID */
+ __u32 root_type_id;
+ /* CO-RE relocation kind */
+ enum bpf_core_relo_kind relo_kind;
/* high-level spec length */
int len;
/* raw, low-level spec: 1-to-1 with accessor spec string */
@@ -4041,8 +4350,66 @@ static bool is_flex_arr(const struct btf *btf,
return acc->idx == btf_vlen(t) - 1;
}
+static const char *core_relo_kind_str(enum bpf_core_relo_kind kind)
+{
+ switch (kind) {
+ case BPF_FIELD_BYTE_OFFSET: return "byte_off";
+ case BPF_FIELD_BYTE_SIZE: return "byte_sz";
+ case BPF_FIELD_EXISTS: return "field_exists";
+ case BPF_FIELD_SIGNED: return "signed";
+ case BPF_FIELD_LSHIFT_U64: return "lshift_u64";
+ case BPF_FIELD_RSHIFT_U64: return "rshift_u64";
+ case BPF_TYPE_ID_LOCAL: return "local_type_id";
+ case BPF_TYPE_ID_TARGET: return "target_type_id";
+ case BPF_TYPE_EXISTS: return "type_exists";
+ case BPF_TYPE_SIZE: return "type_size";
+ case BPF_ENUMVAL_EXISTS: return "enumval_exists";
+ case BPF_ENUMVAL_VALUE: return "enumval_value";
+ default: return "unknown";
+ }
+}
+
+static bool core_relo_is_field_based(enum bpf_core_relo_kind kind)
+{
+ switch (kind) {
+ case BPF_FIELD_BYTE_OFFSET:
+ case BPF_FIELD_BYTE_SIZE:
+ case BPF_FIELD_EXISTS:
+ case BPF_FIELD_SIGNED:
+ case BPF_FIELD_LSHIFT_U64:
+ case BPF_FIELD_RSHIFT_U64:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool core_relo_is_type_based(enum bpf_core_relo_kind kind)
+{
+ switch (kind) {
+ case BPF_TYPE_ID_LOCAL:
+ case BPF_TYPE_ID_TARGET:
+ case BPF_TYPE_EXISTS:
+ case BPF_TYPE_SIZE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind)
+{
+ switch (kind) {
+ case BPF_ENUMVAL_EXISTS:
+ case BPF_ENUMVAL_VALUE:
+ return true;
+ default:
+ return false;
+ }
+}
+
/*
- * Turn bpf_field_reloc into a low- and high-level spec representation,
+ * Turn bpf_core_relo into a low- and high-level spec representation,
* validating correctness along the way, as well as calculating resulting
* field bit offset, specified by accessor string. Low-level spec captures
* every single level of nestedness, including traversing anonymous
@@ -4071,10 +4438,17 @@ static bool is_flex_arr(const struct btf *btf,
* - field 'a' access (corresponds to '2' in low-level spec);
* - array element #3 access (corresponds to '3' in low-level spec).
*
+ * Type-based relocations (TYPE_EXISTS/TYPE_SIZE,
+ * TYPE_ID_LOCAL/TYPE_ID_TARGET) don't capture any field information. Their
+ * spec and raw_spec are kept empty.
+ *
+ * Enum value-based relocations (ENUMVAL_EXISTS/ENUMVAL_VALUE) use access
+ * string to specify enumerator's value index that need to be relocated.
*/
-static int bpf_core_spec_parse(const struct btf *btf,
+static int bpf_core_parse_spec(const struct btf *btf,
__u32 type_id,
const char *spec_str,
+ enum bpf_core_relo_kind relo_kind,
struct bpf_core_spec *spec)
{
int access_idx, parsed_len, i;
@@ -4089,6 +4463,15 @@ static int bpf_core_spec_parse(const struct btf *btf,
memset(spec, 0, sizeof(*spec));
spec->btf = btf;
+ spec->root_type_id = type_id;
+ spec->relo_kind = relo_kind;
+
+ /* type-based relocations don't have a field access string */
+ if (core_relo_is_type_based(relo_kind)) {
+ if (strcmp(spec_str, "0"))
+ return -EINVAL;
+ return 0;
+ }
/* parse spec_str="0:1:2:3:4" into array raw_spec=[0, 1, 2, 3, 4] */
while (*spec_str) {
@@ -4105,16 +4488,28 @@ static int bpf_core_spec_parse(const struct btf *btf,
if (spec->raw_len == 0)
return -EINVAL;
- /* first spec value is always reloc type array index */
t = skip_mods_and_typedefs(btf, type_id, &id);
if (!t)
return -EINVAL;
access_idx = spec->raw_spec[0];
- spec->spec[0].type_id = id;
- spec->spec[0].idx = access_idx;
+ acc = &spec->spec[0];
+ acc->type_id = id;
+ acc->idx = access_idx;
spec->len++;
+ if (core_relo_is_enumval_based(relo_kind)) {
+ if (!btf_is_enum(t) || spec->raw_len > 1 || access_idx >= btf_vlen(t))
+ return -EINVAL;
+
+ /* record enumerator name in a first accessor */
+ acc->name = btf__name_by_offset(btf, btf_enum(t)[access_idx].name_off);
+ return 0;
+ }
+
+ if (!core_relo_is_field_based(relo_kind))
+ return -EINVAL;
+
sz = btf__resolve_size(btf, id);
if (sz < 0)
return sz;
@@ -4172,8 +4567,8 @@ static int bpf_core_spec_parse(const struct btf *btf,
return sz;
spec->bit_offset += access_idx * sz * 8;
} else {
- pr_warn("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %d\n",
- type_id, spec_str, i, id, btf_kind(t));
+ pr_warn("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %s\n",
+ type_id, spec_str, i, id, btf_kind_str(t));
return -EINVAL;
}
}
@@ -4223,16 +4618,16 @@ static struct ids_vec *bpf_core_find_cands(const struct btf *local_btf,
{
size_t local_essent_len, targ_essent_len;
const char *local_name, *targ_name;
- const struct btf_type *t;
+ const struct btf_type *t, *local_t;
struct ids_vec *cand_ids;
__u32 *new_ids;
int i, err, n;
- t = btf__type_by_id(local_btf, local_type_id);
- if (!t)
+ local_t = btf__type_by_id(local_btf, local_type_id);
+ if (!local_t)
return ERR_PTR(-EINVAL);
- local_name = btf__name_by_offset(local_btf, t->name_off);
+ local_name = btf__name_by_offset(local_btf, local_t->name_off);
if (str_is_empty(local_name))
return ERR_PTR(-EINVAL);
local_essent_len = bpf_core_essential_name_len(local_name);
@@ -4244,12 +4639,11 @@ static struct ids_vec *bpf_core_find_cands(const struct btf *local_btf,
n = btf__get_nr_types(targ_btf);
for (i = 1; i <= n; i++) {
t = btf__type_by_id(targ_btf, i);
- targ_name = btf__name_by_offset(targ_btf, t->name_off);
- if (str_is_empty(targ_name))
+ if (btf_kind(t) != btf_kind(local_t))
continue;
- t = skip_mods_and_typedefs(targ_btf, i, NULL);
- if (!btf_is_composite(t) && !btf_is_array(t))
+ targ_name = btf__name_by_offset(targ_btf, t->name_off);
+ if (str_is_empty(targ_name))
continue;
targ_essent_len = bpf_core_essential_name_len(targ_name);
@@ -4257,11 +4651,12 @@ static struct ids_vec *bpf_core_find_cands(const struct btf *local_btf,
continue;
if (strncmp(local_name, targ_name, local_essent_len) == 0) {
- pr_debug("[%d] %s: found candidate [%d] %s\n",
- local_type_id, local_name, i, targ_name);
- new_ids = reallocarray(cand_ids->data,
- cand_ids->len + 1,
- sizeof(*cand_ids->data));
+ pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s\n",
+ local_type_id, btf_kind_str(local_t),
+ local_name, i, btf_kind_str(t), targ_name);
+ new_ids = libbpf_reallocarray(cand_ids->data,
+ cand_ids->len + 1,
+ sizeof(*cand_ids->data));
if (!new_ids) {
err = -ENOMEM;
goto err_out;
@@ -4276,8 +4671,9 @@ err_out:
return ERR_PTR(err);
}
-/* Check two types for compatibility, skipping const/volatile/restrict and
- * typedefs, to ensure we are relocating compatible entities:
+/* Check two types for compatibility for the purpose of field access
+ * relocation. const/volatile/restrict and typedefs are skipped to ensure we
+ * are relocating semantically compatible entities:
* - any two STRUCTs/UNIONs are compatible and can be mixed;
* - any two FWDs are compatible, if their names match (modulo flavor suffix);
* - any two PTRs are always compatible;
@@ -4432,6 +4828,100 @@ static int bpf_core_match_member(const struct btf *local_btf,
return 0;
}
+/* Check local and target types for compatibility. This check is used for
+ * type-based CO-RE relocations and follow slightly different rules than
+ * field-based relocations. This function assumes that root types were already
+ * checked for name match. Beyond that initial root-level name check, names
+ * are completely ignored. Compatibility rules are as follows:
+ * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
+ * kind should match for local and target types (i.e., STRUCT is not
+ * compatible with UNION);
+ * - for ENUMs, the size is ignored;
+ * - for INT, size and signedness are ignored;
+ * - for ARRAY, dimensionality is ignored, element types are checked for
+ * compatibility recursively;
+ * - CONST/VOLATILE/RESTRICT modifiers are ignored;
+ * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
+ * - FUNC_PROTOs are compatible if they have compatible signature: same
+ * number of input args and compatible return and argument types.
+ * These rules are not set in stone and probably will be adjusted as we get
+ * more experience with using BPF CO-RE relocations.
+ */
+static int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
+ const struct btf *targ_btf, __u32 targ_id)
+{
+ const struct btf_type *local_type, *targ_type;
+ int depth = 32; /* max recursion depth */
+
+ /* caller made sure that names match (ignoring flavor suffix) */
+ local_type = btf__type_by_id(local_btf, local_id);
+ targ_type = btf__type_by_id(targ_btf, targ_id);
+ if (btf_kind(local_type) != btf_kind(targ_type))
+ return 0;
+
+recur:
+ depth--;
+ if (depth < 0)
+ return -EINVAL;
+
+ local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
+ targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
+ if (!local_type || !targ_type)
+ return -EINVAL;
+
+ if (btf_kind(local_type) != btf_kind(targ_type))
+ return 0;
+
+ switch (btf_kind(local_type)) {
+ case BTF_KIND_UNKN:
+ case BTF_KIND_STRUCT:
+ case BTF_KIND_UNION:
+ case BTF_KIND_ENUM:
+ case BTF_KIND_FWD:
+ return 1;
+ case BTF_KIND_INT:
+ /* just reject deprecated bitfield-like integers; all other
+ * integers are by default compatible between each other
+ */
+ return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0;
+ case BTF_KIND_PTR:
+ local_id = local_type->type;
+ targ_id = targ_type->type;
+ goto recur;
+ case BTF_KIND_ARRAY:
+ local_id = btf_array(local_type)->type;
+ targ_id = btf_array(targ_type)->type;
+ goto recur;
+ case BTF_KIND_FUNC_PROTO: {
+ struct btf_param *local_p = btf_params(local_type);
+ struct btf_param *targ_p = btf_params(targ_type);
+ __u16 local_vlen = btf_vlen(local_type);
+ __u16 targ_vlen = btf_vlen(targ_type);
+ int i, err;
+
+ if (local_vlen != targ_vlen)
+ return 0;
+
+ for (i = 0; i < local_vlen; i++, local_p++, targ_p++) {
+ skip_mods_and_typedefs(local_btf, local_p->type, &local_id);
+ skip_mods_and_typedefs(targ_btf, targ_p->type, &targ_id);
+ err = bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id);
+ if (err <= 0)
+ return err;
+ }
+
+ /* tail recurse for return type check */
+ skip_mods_and_typedefs(local_btf, local_type->type, &local_id);
+ skip_mods_and_typedefs(targ_btf, targ_type->type, &targ_id);
+ goto recur;
+ }
+ default:
+ pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n",
+ btf_kind_str(local_type), local_id, targ_id);
+ return 0;
+ }
+}
+
/*
* Try to match local spec to a target type and, if successful, produce full
* target spec (high-level, low-level + bit offset).
@@ -4447,10 +4937,51 @@ static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
memset(targ_spec, 0, sizeof(*targ_spec));
targ_spec->btf = targ_btf;
+ targ_spec->root_type_id = targ_id;
+ targ_spec->relo_kind = local_spec->relo_kind;
+
+ if (core_relo_is_type_based(local_spec->relo_kind)) {
+ return bpf_core_types_are_compat(local_spec->btf,
+ local_spec->root_type_id,
+ targ_btf, targ_id);
+ }
local_acc = &local_spec->spec[0];
targ_acc = &targ_spec->spec[0];
+ if (core_relo_is_enumval_based(local_spec->relo_kind)) {
+ size_t local_essent_len, targ_essent_len;
+ const struct btf_enum *e;
+ const char *targ_name;
+
+ /* has to resolve to an enum */
+ targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, &targ_id);
+ if (!btf_is_enum(targ_type))
+ return 0;
+
+ local_essent_len = bpf_core_essential_name_len(local_acc->name);
+
+ for (i = 0, e = btf_enum(targ_type); i < btf_vlen(targ_type); i++, e++) {
+ targ_name = btf__name_by_offset(targ_spec->btf, e->name_off);
+ targ_essent_len = bpf_core_essential_name_len(targ_name);
+ if (targ_essent_len != local_essent_len)
+ continue;
+ if (strncmp(local_acc->name, targ_name, local_essent_len) == 0) {
+ targ_acc->type_id = targ_id;
+ targ_acc->idx = i;
+ targ_acc->name = targ_name;
+ targ_spec->len++;
+ targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
+ targ_spec->raw_len++;
+ return 1;
+ }
+ }
+ return 0;
+ }
+
+ if (!core_relo_is_field_based(local_spec->relo_kind))
+ return -EINVAL;
+
for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) {
targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id,
&targ_id);
@@ -4507,22 +5038,42 @@ static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
}
static int bpf_core_calc_field_relo(const struct bpf_program *prog,
- const struct bpf_field_reloc *relo,
+ const struct bpf_core_relo *relo,
const struct bpf_core_spec *spec,
- __u32 *val, bool *validate)
+ __u32 *val, __u32 *field_sz, __u32 *type_id,
+ bool *validate)
{
- const struct bpf_core_accessor *acc = &spec->spec[spec->len - 1];
- const struct btf_type *t = btf__type_by_id(spec->btf, acc->type_id);
- __u32 byte_off, byte_sz, bit_off, bit_sz;
+ const struct bpf_core_accessor *acc;
+ const struct btf_type *t;
+ __u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id;
const struct btf_member *m;
const struct btf_type *mt;
bool bitfield;
__s64 sz;
+ *field_sz = 0;
+
+ if (relo->kind == BPF_FIELD_EXISTS) {
+ *val = spec ? 1 : 0;
+ return 0;
+ }
+
+ if (!spec)
+ return -EUCLEAN; /* request instruction poisoning */
+
+ acc = &spec->spec[spec->len - 1];
+ t = btf__type_by_id(spec->btf, acc->type_id);
+
/* a[n] accessor needs special handling */
if (!acc->name) {
if (relo->kind == BPF_FIELD_BYTE_OFFSET) {
*val = spec->bit_offset / 8;
+ /* remember field size for load/store mem size */
+ sz = btf__resolve_size(spec->btf, acc->type_id);
+ if (sz < 0)
+ return -EINVAL;
+ *field_sz = sz;
+ *type_id = acc->type_id;
} else if (relo->kind == BPF_FIELD_BYTE_SIZE) {
sz = btf__resolve_size(spec->btf, acc->type_id);
if (sz < 0)
@@ -4530,8 +5081,7 @@ static int bpf_core_calc_field_relo(const struct bpf_program *prog,
*val = sz;
} else {
pr_warn("prog '%s': relo %d at insn #%d can't be applied to array access\n",
- bpf_program__title(prog, false),
- relo->kind, relo->insn_off / 8);
+ prog->name, relo->kind, relo->insn_off / 8);
return -EINVAL;
}
if (validate)
@@ -4540,7 +5090,7 @@ static int bpf_core_calc_field_relo(const struct bpf_program *prog,
}
m = btf_members(t) + acc->idx;
- mt = skip_mods_and_typedefs(spec->btf, m->type, NULL);
+ mt = skip_mods_and_typedefs(spec->btf, m->type, &field_type_id);
bit_off = spec->bit_offset;
bit_sz = btf_member_bitfield_size(t, acc->idx);
@@ -4553,15 +5103,14 @@ static int bpf_core_calc_field_relo(const struct bpf_program *prog,
if (byte_sz >= 8) {
/* bitfield can't be read with 64-bit read */
pr_warn("prog '%s': relo %d at insn #%d can't be satisfied for bitfield\n",
- bpf_program__title(prog, false),
- relo->kind, relo->insn_off / 8);
+ prog->name, relo->kind, relo->insn_off / 8);
return -E2BIG;
}
byte_sz *= 2;
byte_off = bit_off / 8 / byte_sz * byte_sz;
}
} else {
- sz = btf__resolve_size(spec->btf, m->type);
+ sz = btf__resolve_size(spec->btf, field_type_id);
if (sz < 0)
return -EINVAL;
byte_sz = sz;
@@ -4579,6 +5128,10 @@ static int bpf_core_calc_field_relo(const struct bpf_program *prog,
switch (relo->kind) {
case BPF_FIELD_BYTE_OFFSET:
*val = byte_off;
+ if (!bitfield) {
+ *field_sz = byte_sz;
+ *type_id = field_type_id;
+ }
break;
case BPF_FIELD_BYTE_SIZE:
*val = byte_sz;
@@ -4604,117 +5157,384 @@ static int bpf_core_calc_field_relo(const struct bpf_program *prog,
break;
case BPF_FIELD_EXISTS:
default:
- pr_warn("prog '%s': unknown relo %d at insn #%d\n",
- bpf_program__title(prog, false),
- relo->kind, relo->insn_off / 8);
- return -EINVAL;
+ return -EOPNOTSUPP;
}
return 0;
}
+static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo,
+ const struct bpf_core_spec *spec,
+ __u32 *val)
+{
+ __s64 sz;
+
+ /* type-based relos return zero when target type is not found */
+ if (!spec) {
+ *val = 0;
+ return 0;
+ }
+
+ switch (relo->kind) {
+ case BPF_TYPE_ID_TARGET:
+ *val = spec->root_type_id;
+ break;
+ case BPF_TYPE_EXISTS:
+ *val = 1;
+ break;
+ case BPF_TYPE_SIZE:
+ sz = btf__resolve_size(spec->btf, spec->root_type_id);
+ if (sz < 0)
+ return -EINVAL;
+ *val = sz;
+ break;
+ case BPF_TYPE_ID_LOCAL:
+ /* BPF_TYPE_ID_LOCAL is handled specially and shouldn't get here */
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo,
+ const struct bpf_core_spec *spec,
+ __u32 *val)
+{
+ const struct btf_type *t;
+ const struct btf_enum *e;
+
+ switch (relo->kind) {
+ case BPF_ENUMVAL_EXISTS:
+ *val = spec ? 1 : 0;
+ break;
+ case BPF_ENUMVAL_VALUE:
+ if (!spec)
+ return -EUCLEAN; /* request instruction poisoning */
+ t = btf__type_by_id(spec->btf, spec->spec[0].type_id);
+ e = btf_enum(t) + spec->spec[0].idx;
+ *val = e->val;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+struct bpf_core_relo_res
+{
+ /* expected value in the instruction, unless validate == false */
+ __u32 orig_val;
+ /* new value that needs to be patched up to */
+ __u32 new_val;
+ /* relocation unsuccessful, poison instruction, but don't fail load */
+ bool poison;
+ /* some relocations can't be validated against orig_val */
+ bool validate;
+ /* for field byte offset relocations or the forms:
+ * *(T *)(rX + <off>) = rY
+ * rX = *(T *)(rY + <off>),
+ * we remember original and resolved field size to adjust direct
+ * memory loads of pointers and integers; this is necessary for 32-bit
+ * host kernel architectures, but also allows to automatically
+ * relocate fields that were resized from, e.g., u32 to u64, etc.
+ */
+ bool fail_memsz_adjust;
+ __u32 orig_sz;
+ __u32 orig_type_id;
+ __u32 new_sz;
+ __u32 new_type_id;
+};
+
+/* Calculate original and target relocation values, given local and target
+ * specs and relocation kind. These values are calculated for each candidate.
+ * If there are multiple candidates, resulting values should all be consistent
+ * with each other. Otherwise, libbpf will refuse to proceed due to ambiguity.
+ * If instruction has to be poisoned, *poison will be set to true.
+ */
+static int bpf_core_calc_relo(const struct bpf_program *prog,
+ const struct bpf_core_relo *relo,
+ int relo_idx,
+ const struct bpf_core_spec *local_spec,
+ const struct bpf_core_spec *targ_spec,
+ struct bpf_core_relo_res *res)
+{
+ int err = -EOPNOTSUPP;
+
+ res->orig_val = 0;
+ res->new_val = 0;
+ res->poison = false;
+ res->validate = true;
+ res->fail_memsz_adjust = false;
+ res->orig_sz = res->new_sz = 0;
+ res->orig_type_id = res->new_type_id = 0;
+
+ if (core_relo_is_field_based(relo->kind)) {
+ err = bpf_core_calc_field_relo(prog, relo, local_spec,
+ &res->orig_val, &res->orig_sz,
+ &res->orig_type_id, &res->validate);
+ err = err ?: bpf_core_calc_field_relo(prog, relo, targ_spec,
+ &res->new_val, &res->new_sz,
+ &res->new_type_id, NULL);
+ if (err)
+ goto done;
+ /* Validate if it's safe to adjust load/store memory size.
+ * Adjustments are performed only if original and new memory
+ * sizes differ.
+ */
+ res->fail_memsz_adjust = false;
+ if (res->orig_sz != res->new_sz) {
+ const struct btf_type *orig_t, *new_t;
+
+ orig_t = btf__type_by_id(local_spec->btf, res->orig_type_id);
+ new_t = btf__type_by_id(targ_spec->btf, res->new_type_id);
+
+ /* There are two use cases in which it's safe to
+ * adjust load/store's mem size:
+ * - reading a 32-bit kernel pointer, while on BPF
+ * size pointers are always 64-bit; in this case
+ * it's safe to "downsize" instruction size due to
+ * pointer being treated as unsigned integer with
+ * zero-extended upper 32-bits;
+ * - reading unsigned integers, again due to
+ * zero-extension is preserving the value correctly.
+ *
+ * In all other cases it's incorrect to attempt to
+ * load/store field because read value will be
+ * incorrect, so we poison relocated instruction.
+ */
+ if (btf_is_ptr(orig_t) && btf_is_ptr(new_t))
+ goto done;
+ if (btf_is_int(orig_t) && btf_is_int(new_t) &&
+ btf_int_encoding(orig_t) != BTF_INT_SIGNED &&
+ btf_int_encoding(new_t) != BTF_INT_SIGNED)
+ goto done;
+
+ /* mark as invalid mem size adjustment, but this will
+ * only be checked for LDX/STX/ST insns
+ */
+ res->fail_memsz_adjust = true;
+ }
+ } else if (core_relo_is_type_based(relo->kind)) {
+ err = bpf_core_calc_type_relo(relo, local_spec, &res->orig_val);
+ err = err ?: bpf_core_calc_type_relo(relo, targ_spec, &res->new_val);
+ } else if (core_relo_is_enumval_based(relo->kind)) {
+ err = bpf_core_calc_enumval_relo(relo, local_spec, &res->orig_val);
+ err = err ?: bpf_core_calc_enumval_relo(relo, targ_spec, &res->new_val);
+ }
+
+done:
+ if (err == -EUCLEAN) {
+ /* EUCLEAN is used to signal instruction poisoning request */
+ res->poison = true;
+ err = 0;
+ } else if (err == -EOPNOTSUPP) {
+ /* EOPNOTSUPP means unknown/unsupported relocation */
+ pr_warn("prog '%s': relo #%d: unrecognized CO-RE relocation %s (%d) at insn #%d\n",
+ prog->name, relo_idx, core_relo_kind_str(relo->kind),
+ relo->kind, relo->insn_off / 8);
+ }
+
+ return err;
+}
+
+/*
+ * Turn instruction for which CO_RE relocation failed into invalid one with
+ * distinct signature.
+ */
+static void bpf_core_poison_insn(struct bpf_program *prog, int relo_idx,
+ int insn_idx, struct bpf_insn *insn)
+{
+ pr_debug("prog '%s': relo #%d: substituting insn #%d w/ invalid insn\n",
+ prog->name, relo_idx, insn_idx);
+ insn->code = BPF_JMP | BPF_CALL;
+ insn->dst_reg = 0;
+ insn->src_reg = 0;
+ insn->off = 0;
+ /* if this instruction is reachable (not a dead code),
+ * verifier will complain with the following message:
+ * invalid func unknown#195896080
+ */
+ insn->imm = 195896080; /* => 0xbad2310 => "bad relo" */
+}
+
+static bool is_ldimm64(struct bpf_insn *insn)
+{
+ return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
+}
+
+static int insn_bpf_size_to_bytes(struct bpf_insn *insn)
+{
+ switch (BPF_SIZE(insn->code)) {
+ case BPF_DW: return 8;
+ case BPF_W: return 4;
+ case BPF_H: return 2;
+ case BPF_B: return 1;
+ default: return -1;
+ }
+}
+
+static int insn_bytes_to_bpf_size(__u32 sz)
+{
+ switch (sz) {
+ case 8: return BPF_DW;
+ case 4: return BPF_W;
+ case 2: return BPF_H;
+ case 1: return BPF_B;
+ default: return -1;
+ }
+}
+
/*
* Patch relocatable BPF instruction.
*
* Patched value is determined by relocation kind and target specification.
- * For field existence relocation target spec will be NULL if field is not
- * found.
+ * For existence relocations target spec will be NULL if field/type is not found.
* Expected insn->imm value is determined using relocation kind and local
* spec, and is checked before patching instruction. If actual insn->imm value
* is wrong, bail out with error.
*
- * Currently three kinds of BPF instructions are supported:
+ * Currently supported classes of BPF instruction are:
* 1. rX = <imm> (assignment with immediate operand);
* 2. rX += <imm> (arithmetic operations with immediate operand);
+ * 3. rX = <imm64> (load with 64-bit immediate value);
+ * 4. rX = *(T *)(rY + <off>), where T is one of {u8, u16, u32, u64};
+ * 5. *(T *)(rX + <off>) = rY, where T is one of {u8, u16, u32, u64};
+ * 6. *(T *)(rX + <off>) = <imm>, where T is one of {u8, u16, u32, u64}.
*/
-static int bpf_core_reloc_insn(struct bpf_program *prog,
- const struct bpf_field_reloc *relo,
+static int bpf_core_patch_insn(struct bpf_program *prog,
+ const struct bpf_core_relo *relo,
int relo_idx,
- const struct bpf_core_spec *local_spec,
- const struct bpf_core_spec *targ_spec)
+ const struct bpf_core_relo_res *res)
{
__u32 orig_val, new_val;
struct bpf_insn *insn;
- bool validate = true;
- int insn_idx, err;
+ int insn_idx;
__u8 class;
- if (relo->insn_off % sizeof(struct bpf_insn))
+ if (relo->insn_off % BPF_INSN_SZ)
return -EINVAL;
- insn_idx = relo->insn_off / sizeof(struct bpf_insn);
+ insn_idx = relo->insn_off / BPF_INSN_SZ;
+ /* adjust insn_idx from section frame of reference to the local
+ * program's frame of reference; (sub-)program code is not yet
+ * relocated, so it's enough to just subtract in-section offset
+ */
+ insn_idx = insn_idx - prog->sec_insn_off;
insn = &prog->insns[insn_idx];
class = BPF_CLASS(insn->code);
- if (relo->kind == BPF_FIELD_EXISTS) {
- orig_val = 1; /* can't generate EXISTS relo w/o local field */
- new_val = targ_spec ? 1 : 0;
- } else if (!targ_spec) {
- pr_debug("prog '%s': relo #%d: substituting insn #%d w/ invalid insn\n",
- bpf_program__title(prog, false), relo_idx, insn_idx);
- insn->code = BPF_JMP | BPF_CALL;
- insn->dst_reg = 0;
- insn->src_reg = 0;
- insn->off = 0;
- /* if this instruction is reachable (not a dead code),
- * verifier will complain with the following message:
- * invalid func unknown#195896080
+ if (res->poison) {
+poison:
+ /* poison second part of ldimm64 to avoid confusing error from
+ * verifier about "unknown opcode 00"
*/
- insn->imm = 195896080; /* => 0xbad2310 => "bad relo" */
+ if (is_ldimm64(insn))
+ bpf_core_poison_insn(prog, relo_idx, insn_idx + 1, insn + 1);
+ bpf_core_poison_insn(prog, relo_idx, insn_idx, insn);
return 0;
- } else {
- err = bpf_core_calc_field_relo(prog, relo, local_spec,
- &orig_val, &validate);
- if (err)
- return err;
- err = bpf_core_calc_field_relo(prog, relo, targ_spec,
- &new_val, NULL);
- if (err)
- return err;
}
+ orig_val = res->orig_val;
+ new_val = res->new_val;
+
switch (class) {
case BPF_ALU:
case BPF_ALU64:
if (BPF_SRC(insn->code) != BPF_K)
return -EINVAL;
- if (validate && insn->imm != orig_val) {
+ if (res->validate && insn->imm != orig_val) {
pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %u -> %u\n",
- bpf_program__title(prog, false), relo_idx,
+ prog->name, relo_idx,
insn_idx, insn->imm, orig_val, new_val);
return -EINVAL;
}
orig_val = insn->imm;
insn->imm = new_val;
pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %u -> %u\n",
- bpf_program__title(prog, false), relo_idx, insn_idx,
+ prog->name, relo_idx, insn_idx,
orig_val, new_val);
break;
case BPF_LDX:
case BPF_ST:
case BPF_STX:
- if (validate && insn->off != orig_val) {
- pr_warn("prog '%s': relo #%d: unexpected insn #%d (LD/LDX/ST/STX) value: got %u, exp %u -> %u\n",
- bpf_program__title(prog, false), relo_idx,
- insn_idx, insn->off, orig_val, new_val);
+ if (res->validate && insn->off != orig_val) {
+ pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDX/ST/STX) value: got %u, exp %u -> %u\n",
+ prog->name, relo_idx, insn_idx, insn->off, orig_val, new_val);
return -EINVAL;
}
if (new_val > SHRT_MAX) {
pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) value too big: %u\n",
- bpf_program__title(prog, false), relo_idx,
- insn_idx, new_val);
+ prog->name, relo_idx, insn_idx, new_val);
return -ERANGE;
}
+ if (res->fail_memsz_adjust) {
+ pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) accesses field incorrectly. "
+ "Make sure you are accessing pointers, unsigned integers, or fields of matching type and size.\n",
+ prog->name, relo_idx, insn_idx);
+ goto poison;
+ }
+
orig_val = insn->off;
insn->off = new_val;
pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %u -> %u\n",
- bpf_program__title(prog, false), relo_idx, insn_idx,
- orig_val, new_val);
+ prog->name, relo_idx, insn_idx, orig_val, new_val);
+
+ if (res->new_sz != res->orig_sz) {
+ int insn_bytes_sz, insn_bpf_sz;
+
+ insn_bytes_sz = insn_bpf_size_to_bytes(insn);
+ if (insn_bytes_sz != res->orig_sz) {
+ pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) unexpected mem size: got %d, exp %u\n",
+ prog->name, relo_idx, insn_idx, insn_bytes_sz, res->orig_sz);
+ return -EINVAL;
+ }
+
+ insn_bpf_sz = insn_bytes_to_bpf_size(res->new_sz);
+ if (insn_bpf_sz < 0) {
+ pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) invalid new mem size: %u\n",
+ prog->name, relo_idx, insn_idx, res->new_sz);
+ return -EINVAL;
+ }
+
+ insn->code = BPF_MODE(insn->code) | insn_bpf_sz | BPF_CLASS(insn->code);
+ pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) mem_sz %u -> %u\n",
+ prog->name, relo_idx, insn_idx, res->orig_sz, res->new_sz);
+ }
+ break;
+ case BPF_LD: {
+ __u64 imm;
+
+ if (!is_ldimm64(insn) ||
+ insn[0].src_reg != 0 || insn[0].off != 0 ||
+ insn_idx + 1 >= prog->insns_cnt ||
+ insn[1].code != 0 || insn[1].dst_reg != 0 ||
+ insn[1].src_reg != 0 || insn[1].off != 0) {
+ pr_warn("prog '%s': relo #%d: insn #%d (LDIMM64) has unexpected form\n",
+ prog->name, relo_idx, insn_idx);
+ return -EINVAL;
+ }
+
+ imm = insn[0].imm + ((__u64)insn[1].imm << 32);
+ if (res->validate && imm != orig_val) {
+ pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDIMM64) value: got %llu, exp %u -> %u\n",
+ prog->name, relo_idx,
+ insn_idx, (unsigned long long)imm,
+ orig_val, new_val);
+ return -EINVAL;
+ }
+
+ insn[0].imm = new_val;
+ insn[1].imm = 0; /* currently only 32-bit values are supported */
+ pr_debug("prog '%s': relo #%d: patched insn #%d (LDIMM64) imm64 %llu -> %u\n",
+ prog->name, relo_idx, insn_idx,
+ (unsigned long long)imm, new_val);
break;
+ }
default:
- pr_warn("prog '%s': relo #%d: trying to relocate unrecognized insn #%d, code:%x, src:%x, dst:%x, off:%x, imm:%x\n",
- bpf_program__title(prog, false), relo_idx,
- insn_idx, insn->code, insn->src_reg, insn->dst_reg,
- insn->off, insn->imm);
+ pr_warn("prog '%s': relo #%d: trying to relocate unrecognized insn #%d, code:0x%x, src:0x%x, dst:0x%x, off:0x%x, imm:0x%x\n",
+ prog->name, relo_idx, insn_idx, insn->code,
+ insn->src_reg, insn->dst_reg, insn->off, insn->imm);
return -EINVAL;
}
@@ -4728,29 +5548,48 @@ static int bpf_core_reloc_insn(struct bpf_program *prog,
static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec)
{
const struct btf_type *t;
+ const struct btf_enum *e;
const char *s;
__u32 type_id;
int i;
- type_id = spec->spec[0].type_id;
+ type_id = spec->root_type_id;
t = btf__type_by_id(spec->btf, type_id);
s = btf__name_by_offset(spec->btf, t->name_off);
- libbpf_print(level, "[%u] %s + ", type_id, s);
- for (i = 0; i < spec->raw_len; i++)
- libbpf_print(level, "%d%s", spec->raw_spec[i],
- i == spec->raw_len - 1 ? " => " : ":");
+ libbpf_print(level, "[%u] %s %s", type_id, btf_kind_str(t), str_is_empty(s) ? "<anon>" : s);
- libbpf_print(level, "%u.%u @ &x",
- spec->bit_offset / 8, spec->bit_offset % 8);
+ if (core_relo_is_type_based(spec->relo_kind))
+ return;
- for (i = 0; i < spec->len; i++) {
- if (spec->spec[i].name)
- libbpf_print(level, ".%s", spec->spec[i].name);
- else
- libbpf_print(level, "[%u]", spec->spec[i].idx);
+ if (core_relo_is_enumval_based(spec->relo_kind)) {
+ t = skip_mods_and_typedefs(spec->btf, type_id, NULL);
+ e = btf_enum(t) + spec->raw_spec[0];
+ s = btf__name_by_offset(spec->btf, e->name_off);
+
+ libbpf_print(level, "::%s = %u", s, e->val);
+ return;
}
+ if (core_relo_is_field_based(spec->relo_kind)) {
+ for (i = 0; i < spec->len; i++) {
+ if (spec->spec[i].name)
+ libbpf_print(level, ".%s", spec->spec[i].name);
+ else if (i > 0 || spec->spec[i].idx > 0)
+ libbpf_print(level, "[%u]", spec->spec[i].idx);
+ }
+
+ libbpf_print(level, " (");
+ for (i = 0; i < spec->raw_len; i++)
+ libbpf_print(level, "%s%d", i == 0 ? "" : ":", spec->raw_spec[i]);
+
+ if (spec->bit_offset % 8)
+ libbpf_print(level, " @ offset %u.%u)",
+ spec->bit_offset / 8, spec->bit_offset % 8);
+ else
+ libbpf_print(level, " @ offset %u)", spec->bit_offset / 8);
+ return;
+ }
}
static size_t bpf_core_hash_fn(const void *key, void *ctx)
@@ -4814,22 +5653,22 @@ static void *u32_as_hash_key(__u32 x)
* CPU-wise compared to prebuilding a map from all local type names to
* a list of candidate type names. It's also sped up by caching resolved
* list of matching candidates per each local "root" type ID, that has at
- * least one bpf_field_reloc associated with it. This list is shared
+ * least one bpf_core_relo associated with it. This list is shared
* between multiple relocations for the same type ID and is updated as some
* of the candidates are pruned due to structural incompatibility.
*/
-static int bpf_core_reloc_field(struct bpf_program *prog,
- const struct bpf_field_reloc *relo,
- int relo_idx,
- const struct btf *local_btf,
- const struct btf *targ_btf,
- struct hashmap *cand_cache)
+static int bpf_core_apply_relo(struct bpf_program *prog,
+ const struct bpf_core_relo *relo,
+ int relo_idx,
+ const struct btf *local_btf,
+ const struct btf *targ_btf,
+ struct hashmap *cand_cache)
{
- const char *prog_name = bpf_program__title(prog, false);
- struct bpf_core_spec local_spec, cand_spec, targ_spec;
+ struct bpf_core_spec local_spec, cand_spec, targ_spec = {};
const void *type_key = u32_as_hash_key(relo->type_id);
- const struct btf_type *local_type, *cand_type;
- const char *local_name, *cand_name;
+ struct bpf_core_relo_res cand_res, targ_res;
+ const struct btf_type *local_type;
+ const char *local_name;
struct ids_vec *cand_ids;
__u32 local_id, cand_id;
const char *spec_str;
@@ -4841,32 +5680,49 @@ static int bpf_core_reloc_field(struct bpf_program *prog,
return -EINVAL;
local_name = btf__name_by_offset(local_btf, local_type->name_off);
- if (str_is_empty(local_name))
+ if (!local_name)
return -EINVAL;
spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
if (str_is_empty(spec_str))
return -EINVAL;
- err = bpf_core_spec_parse(local_btf, local_id, spec_str, &local_spec);
+ err = bpf_core_parse_spec(local_btf, local_id, spec_str, relo->kind, &local_spec);
if (err) {
- pr_warn("prog '%s': relo #%d: parsing [%d] %s + %s failed: %d\n",
- prog_name, relo_idx, local_id, local_name, spec_str,
- err);
+ pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n",
+ prog->name, relo_idx, local_id, btf_kind_str(local_type),
+ str_is_empty(local_name) ? "<anon>" : local_name,
+ spec_str, err);
return -EINVAL;
}
- pr_debug("prog '%s': relo #%d: kind %d, spec is ", prog_name, relo_idx,
- relo->kind);
+ pr_debug("prog '%s': relo #%d: kind <%s> (%d), spec is ", prog->name,
+ relo_idx, core_relo_kind_str(relo->kind), relo->kind);
bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec);
libbpf_print(LIBBPF_DEBUG, "\n");
+ /* TYPE_ID_LOCAL relo is special and doesn't need candidate search */
+ if (relo->kind == BPF_TYPE_ID_LOCAL) {
+ targ_res.validate = true;
+ targ_res.poison = false;
+ targ_res.orig_val = local_spec.root_type_id;
+ targ_res.new_val = local_spec.root_type_id;
+ goto patch_insn;
+ }
+
+ /* libbpf doesn't support candidate search for anonymous types */
+ if (str_is_empty(spec_str)) {
+ pr_warn("prog '%s': relo #%d: <%s> (%d) relocation doesn't support anonymous types\n",
+ prog->name, relo_idx, core_relo_kind_str(relo->kind), relo->kind);
+ return -EOPNOTSUPP;
+ }
+
if (!hashmap__find(cand_cache, type_key, (void **)&cand_ids)) {
cand_ids = bpf_core_find_cands(local_btf, local_id, targ_btf);
if (IS_ERR(cand_ids)) {
- pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s: %ld",
- prog_name, relo_idx, local_id, local_name,
- PTR_ERR(cand_ids));
+ pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld",
+ prog->name, relo_idx, local_id, btf_kind_str(local_type),
+ local_name, PTR_ERR(cand_ids));
return PTR_ERR(cand_ids);
}
err = hashmap__set(cand_cache, type_key, cand_ids, NULL, NULL);
@@ -4878,36 +5734,51 @@ static int bpf_core_reloc_field(struct bpf_program *prog,
for (i = 0, j = 0; i < cand_ids->len; i++) {
cand_id = cand_ids->data[i];
- cand_type = btf__type_by_id(targ_btf, cand_id);
- cand_name = btf__name_by_offset(targ_btf, cand_type->name_off);
-
- err = bpf_core_spec_match(&local_spec, targ_btf,
- cand_id, &cand_spec);
- pr_debug("prog '%s': relo #%d: matching candidate #%d %s against spec ",
- prog_name, relo_idx, i, cand_name);
- bpf_core_dump_spec(LIBBPF_DEBUG, &cand_spec);
- libbpf_print(LIBBPF_DEBUG, ": %d\n", err);
+ err = bpf_core_spec_match(&local_spec, targ_btf, cand_id, &cand_spec);
if (err < 0) {
- pr_warn("prog '%s': relo #%d: matching error: %d\n",
- prog_name, relo_idx, err);
+ pr_warn("prog '%s': relo #%d: error matching candidate #%d ",
+ prog->name, relo_idx, i);
+ bpf_core_dump_spec(LIBBPF_WARN, &cand_spec);
+ libbpf_print(LIBBPF_WARN, ": %d\n", err);
return err;
}
+
+ pr_debug("prog '%s': relo #%d: %s candidate #%d ", prog->name,
+ relo_idx, err == 0 ? "non-matching" : "matching", i);
+ bpf_core_dump_spec(LIBBPF_DEBUG, &cand_spec);
+ libbpf_print(LIBBPF_DEBUG, "\n");
+
if (err == 0)
continue;
+ err = bpf_core_calc_relo(prog, relo, relo_idx, &local_spec, &cand_spec, &cand_res);
+ if (err)
+ return err;
+
if (j == 0) {
+ targ_res = cand_res;
targ_spec = cand_spec;
} else if (cand_spec.bit_offset != targ_spec.bit_offset) {
- /* if there are many candidates, they should all
- * resolve to the same bit offset
+ /* if there are many field relo candidates, they
+ * should all resolve to the same bit offset
*/
- pr_warn("prog '%s': relo #%d: offset ambiguity: %u != %u\n",
- prog_name, relo_idx, cand_spec.bit_offset,
+ pr_warn("prog '%s': relo #%d: field offset ambiguity: %u != %u\n",
+ prog->name, relo_idx, cand_spec.bit_offset,
targ_spec.bit_offset);
return -EINVAL;
+ } else if (cand_res.poison != targ_res.poison || cand_res.new_val != targ_res.new_val) {
+ /* all candidates should result in the same relocation
+ * decision and value, otherwise it's dangerous to
+ * proceed due to ambiguity
+ */
+ pr_warn("prog '%s': relo #%d: relocation decision ambiguity: %s %u != %s %u\n",
+ prog->name, relo_idx,
+ cand_res.poison ? "failure" : "success", cand_res.new_val,
+ targ_res.poison ? "failure" : "success", targ_res.new_val);
+ return -EINVAL;
}
- cand_ids->data[j++] = cand_spec.spec[0].type_id;
+ cand_ids->data[j++] = cand_spec.root_type_id;
}
/*
@@ -4926,22 +5797,28 @@ static int bpf_core_reloc_field(struct bpf_program *prog,
* as well as expected case, depending whether instruction w/
* relocation is guarded in some way that makes it unreachable (dead
* code) if relocation can't be resolved. This is handled in
- * bpf_core_reloc_insn() uniformly by replacing that instruction with
+ * bpf_core_patch_insn() uniformly by replacing that instruction with
* BPF helper call insn (using invalid helper ID). If that instruction
* is indeed unreachable, then it will be ignored and eliminated by
* verifier. If it was an error, then verifier will complain and point
* to a specific instruction number in its log.
*/
- if (j == 0)
- pr_debug("prog '%s': relo #%d: no matching targets found for [%d] %s + %s\n",
- prog_name, relo_idx, local_id, local_name, spec_str);
+ if (j == 0) {
+ pr_debug("prog '%s': relo #%d: no matching targets found\n",
+ prog->name, relo_idx);
+
+ /* calculate single target relo result explicitly */
+ err = bpf_core_calc_relo(prog, relo, relo_idx, &local_spec, NULL, &targ_res);
+ if (err)
+ return err;
+ }
- /* bpf_core_reloc_insn should know how to handle missing targ_spec */
- err = bpf_core_reloc_insn(prog, relo, relo_idx, &local_spec,
- j ? &targ_spec : NULL);
+patch_insn:
+ /* bpf_core_patch_insn() should know how to handle missing targ_spec */
+ err = bpf_core_patch_insn(prog, relo, relo_idx, &targ_res);
if (err) {
pr_warn("prog '%s': relo #%d: failed to patch insn at offset %d: %d\n",
- prog_name, relo_idx, relo->insn_off, err);
+ prog->name, relo_idx, relo->insn_off, err);
return -EINVAL;
}
@@ -4949,20 +5826,23 @@ static int bpf_core_reloc_field(struct bpf_program *prog,
}
static int
-bpf_core_reloc_fields(struct bpf_object *obj, const char *targ_btf_path)
+bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
{
const struct btf_ext_info_sec *sec;
- const struct bpf_field_reloc *rec;
+ const struct bpf_core_relo *rec;
const struct btf_ext_info *seg;
struct hashmap_entry *entry;
struct hashmap *cand_cache = NULL;
struct bpf_program *prog;
struct btf *targ_btf;
const char *sec_name;
- int i, err = 0;
+ int i, err = 0, insn_idx, sec_idx;
+
+ if (obj->btf_ext->core_relo_info.len == 0)
+ return 0;
if (targ_btf_path)
- targ_btf = btf__parse_elf(targ_btf_path, NULL);
+ targ_btf = btf__parse(targ_btf_path, NULL);
else
targ_btf = obj->btf_vmlinux;
if (IS_ERR_OR_NULL(targ_btf)) {
@@ -4976,36 +5856,54 @@ bpf_core_reloc_fields(struct bpf_object *obj, const char *targ_btf_path)
goto out;
}
- seg = &obj->btf_ext->field_reloc_info;
+ seg = &obj->btf_ext->core_relo_info;
for_each_btf_ext_sec(seg, sec) {
sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
if (str_is_empty(sec_name)) {
err = -EINVAL;
goto out;
}
+ /* bpf_object's ELF is gone by now so it's not easy to find
+ * section index by section name, but we can find *any*
+ * bpf_program within desired section name and use it's
+ * prog->sec_idx to do a proper search by section index and
+ * instruction offset
+ */
prog = NULL;
for (i = 0; i < obj->nr_programs; i++) {
- if (!strcmp(obj->programs[i].section_name, sec_name)) {
- prog = &obj->programs[i];
+ prog = &obj->programs[i];
+ if (strcmp(prog->sec_name, sec_name) == 0)
break;
- }
}
if (!prog) {
- pr_warn("failed to find program '%s' for CO-RE offset relocation\n",
- sec_name);
- err = -EINVAL;
- goto out;
+ pr_warn("sec '%s': failed to find a BPF program\n", sec_name);
+ return -ENOENT;
}
+ sec_idx = prog->sec_idx;
- pr_debug("prog '%s': performing %d CO-RE offset relocs\n",
+ pr_debug("sec '%s': found %d CO-RE relocations\n",
sec_name, sec->num_info);
for_each_btf_ext_rec(seg, sec, i, rec) {
- err = bpf_core_reloc_field(prog, rec, i, obj->btf,
- targ_btf, cand_cache);
+ insn_idx = rec->insn_off / BPF_INSN_SZ;
+ prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
+ if (!prog) {
+ pr_warn("sec '%s': failed to find program at insn #%d for CO-RE offset relocation #%d\n",
+ sec_name, insn_idx, i);
+ err = -EINVAL;
+ goto out;
+ }
+ /* no need to apply CO-RE relocation if the program is
+ * not going to be loaded
+ */
+ if (!prog->load)
+ continue;
+
+ err = bpf_core_apply_relo(prog, rec, i, obj->btf,
+ targ_btf, cand_cache);
if (err) {
pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
- sec_name, i, err);
+ prog->name, i, err);
goto out;
}
}
@@ -5024,125 +5922,432 @@ out:
return err;
}
+/* Relocate data references within program code:
+ * - map references;
+ * - global variable references;
+ * - extern references.
+ */
static int
-bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
+bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
{
- int err = 0;
+ int i;
- if (obj->btf_ext->field_reloc_info.len)
- err = bpf_core_reloc_fields(obj, targ_btf_path);
+ for (i = 0; i < prog->nr_reloc; i++) {
+ struct reloc_desc *relo = &prog->reloc_desc[i];
+ struct bpf_insn *insn = &prog->insns[relo->insn_idx];
+ struct extern_desc *ext;
- return err;
+ switch (relo->type) {
+ case RELO_LD64:
+ insn[0].src_reg = BPF_PSEUDO_MAP_FD;
+ insn[0].imm = obj->maps[relo->map_idx].fd;
+ relo->processed = true;
+ break;
+ case RELO_DATA:
+ insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
+ insn[1].imm = insn[0].imm + relo->sym_off;
+ insn[0].imm = obj->maps[relo->map_idx].fd;
+ relo->processed = true;
+ break;
+ case RELO_EXTERN:
+ ext = &obj->externs[relo->sym_off];
+ if (ext->type == EXT_KCFG) {
+ insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
+ insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
+ insn[1].imm = ext->kcfg.data_off;
+ } else /* EXT_KSYM */ {
+ if (ext->ksym.type_id) { /* typed ksyms */
+ insn[0].src_reg = BPF_PSEUDO_BTF_ID;
+ insn[0].imm = ext->ksym.vmlinux_btf_id;
+ } else { /* typeless ksyms */
+ insn[0].imm = (__u32)ext->ksym.addr;
+ insn[1].imm = ext->ksym.addr >> 32;
+ }
+ }
+ relo->processed = true;
+ break;
+ case RELO_CALL:
+ /* will be handled as a follow up pass */
+ break;
+ default:
+ pr_warn("prog '%s': relo #%d: bad relo type %d\n",
+ prog->name, i, relo->type);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
}
-static int
-bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
- struct reloc_desc *relo)
+static int adjust_prog_btf_ext_info(const struct bpf_object *obj,
+ const struct bpf_program *prog,
+ const struct btf_ext_info *ext_info,
+ void **prog_info, __u32 *prog_rec_cnt,
+ __u32 *prog_rec_sz)
{
- struct bpf_insn *insn, *new_insn;
- struct bpf_program *text;
- size_t new_cnt;
- int err;
+ void *copy_start = NULL, *copy_end = NULL;
+ void *rec, *rec_end, *new_prog_info;
+ const struct btf_ext_info_sec *sec;
+ size_t old_sz, new_sz;
+ const char *sec_name;
+ int i, off_adj;
- if (prog->idx != obj->efile.text_shndx && prog->main_prog_cnt == 0) {
- text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
- if (!text) {
- pr_warn("no .text section found yet relo into text exist\n");
- return -LIBBPF_ERRNO__RELOC;
+ for_each_btf_ext_sec(ext_info, sec) {
+ sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
+ if (!sec_name)
+ return -EINVAL;
+ if (strcmp(sec_name, prog->sec_name) != 0)
+ continue;
+
+ for_each_btf_ext_rec(ext_info, sec, i, rec) {
+ __u32 insn_off = *(__u32 *)rec / BPF_INSN_SZ;
+
+ if (insn_off < prog->sec_insn_off)
+ continue;
+ if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt)
+ break;
+
+ if (!copy_start)
+ copy_start = rec;
+ copy_end = rec + ext_info->rec_size;
}
- new_cnt = prog->insns_cnt + text->insns_cnt;
- new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn));
- if (!new_insn) {
- pr_warn("oom in prog realloc\n");
+
+ if (!copy_start)
+ return -ENOENT;
+
+ /* append func/line info of a given (sub-)program to the main
+ * program func/line info
+ */
+ old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size;
+ new_sz = old_sz + (copy_end - copy_start);
+ new_prog_info = realloc(*prog_info, new_sz);
+ if (!new_prog_info)
return -ENOMEM;
- }
- prog->insns = new_insn;
+ *prog_info = new_prog_info;
+ *prog_rec_cnt = new_sz / ext_info->rec_size;
+ memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start);
+
+ /* Kernel instruction offsets are in units of 8-byte
+ * instructions, while .BTF.ext instruction offsets generated
+ * by Clang are in units of bytes. So convert Clang offsets
+ * into kernel offsets and adjust offset according to program
+ * relocated position.
+ */
+ off_adj = prog->sub_insn_off - prog->sec_insn_off;
+ rec = new_prog_info + old_sz;
+ rec_end = new_prog_info + new_sz;
+ for (; rec < rec_end; rec += ext_info->rec_size) {
+ __u32 *insn_off = rec;
- if (obj->btf_ext) {
- err = bpf_program_reloc_btf_ext(prog, obj,
- text->section_name,
- prog->insns_cnt);
- if (err)
- return err;
+ *insn_off = *insn_off / BPF_INSN_SZ + off_adj;
}
-
- memcpy(new_insn + prog->insns_cnt, text->insns,
- text->insns_cnt * sizeof(*insn));
- prog->main_prog_cnt = prog->insns_cnt;
- prog->insns_cnt = new_cnt;
- pr_debug("added %zd insn from %s to prog %s\n",
- text->insns_cnt, text->section_name,
- prog->section_name);
+ *prog_rec_sz = ext_info->rec_size;
+ return 0;
}
- insn = &prog->insns[relo->insn_idx];
- insn->imm += relo->sym_off / 8 + prog->main_prog_cnt - relo->insn_idx;
- return 0;
+ return -ENOENT;
}
static int
-bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
+reloc_prog_func_and_line_info(const struct bpf_object *obj,
+ struct bpf_program *main_prog,
+ const struct bpf_program *prog)
{
- int i, err;
+ int err;
- if (!prog)
+ /* no .BTF.ext relocation if .BTF.ext is missing or kernel doesn't
+ * supprot func/line info
+ */
+ if (!obj->btf_ext || !kernel_supports(FEAT_BTF_FUNC))
return 0;
- if (obj->btf_ext) {
- err = bpf_program_reloc_btf_ext(prog, obj,
- prog->section_name, 0);
- if (err)
+ /* only attempt func info relocation if main program's func_info
+ * relocation was successful
+ */
+ if (main_prog != prog && !main_prog->func_info)
+ goto line_info;
+
+ err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info,
+ &main_prog->func_info,
+ &main_prog->func_info_cnt,
+ &main_prog->func_info_rec_size);
+ if (err) {
+ if (err != -ENOENT) {
+ pr_warn("prog '%s': error relocating .BTF.ext function info: %d\n",
+ prog->name, err);
+ return err;
+ }
+ if (main_prog->func_info) {
+ /*
+ * Some info has already been found but has problem
+ * in the last btf_ext reloc. Must have to error out.
+ */
+ pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name);
return err;
+ }
+ /* Have problem loading the very first info. Ignore the rest. */
+ pr_warn("prog '%s': missing .BTF.ext function info for the main program, skipping all of .BTF.ext func info.\n",
+ prog->name);
}
- if (!prog->reloc_desc)
+line_info:
+ /* don't relocate line info if main program's relocation failed */
+ if (main_prog != prog && !main_prog->line_info)
return 0;
- for (i = 0; i < prog->nr_reloc; i++) {
- struct reloc_desc *relo = &prog->reloc_desc[i];
- struct bpf_insn *insn = &prog->insns[relo->insn_idx];
- struct extern_desc *ext;
+ err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info,
+ &main_prog->line_info,
+ &main_prog->line_info_cnt,
+ &main_prog->line_info_rec_size);
+ if (err) {
+ if (err != -ENOENT) {
+ pr_warn("prog '%s': error relocating .BTF.ext line info: %d\n",
+ prog->name, err);
+ return err;
+ }
+ if (main_prog->line_info) {
+ /*
+ * Some info has already been found but has problem
+ * in the last btf_ext reloc. Must have to error out.
+ */
+ pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name);
+ return err;
+ }
+ /* Have problem loading the very first info. Ignore the rest. */
+ pr_warn("prog '%s': missing .BTF.ext line info for the main program, skipping all of .BTF.ext line info.\n",
+ prog->name);
+ }
+ return 0;
+}
+
+static int cmp_relo_by_insn_idx(const void *key, const void *elem)
+{
+ size_t insn_idx = *(const size_t *)key;
+ const struct reloc_desc *relo = elem;
+
+ if (insn_idx == relo->insn_idx)
+ return 0;
+ return insn_idx < relo->insn_idx ? -1 : 1;
+}
+
+static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx)
+{
+ return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc,
+ sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx);
+}
+
+static int
+bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
+ struct bpf_program *prog)
+{
+ size_t sub_insn_idx, insn_idx, new_cnt;
+ struct bpf_program *subprog;
+ struct bpf_insn *insns, *insn;
+ struct reloc_desc *relo;
+ int err;
- if (relo->insn_idx + 1 >= (int)prog->insns_cnt) {
- pr_warn("relocation out of range: '%s'\n",
- prog->section_name);
+ err = reloc_prog_func_and_line_info(obj, main_prog, prog);
+ if (err)
+ return err;
+
+ for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) {
+ insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
+ if (!insn_is_subprog_call(insn))
+ continue;
+
+ relo = find_prog_insn_relo(prog, insn_idx);
+ if (relo && relo->type != RELO_CALL) {
+ pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n",
+ prog->name, insn_idx, relo->type);
return -LIBBPF_ERRNO__RELOC;
}
+ if (relo) {
+ /* sub-program instruction index is a combination of
+ * an offset of a symbol pointed to by relocation and
+ * call instruction's imm field; for global functions,
+ * call always has imm = -1, but for static functions
+ * relocation is against STT_SECTION and insn->imm
+ * points to a start of a static function
+ */
+ sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1;
+ } else {
+ /* if subprogram call is to a static function within
+ * the same ELF section, there won't be any relocation
+ * emitted, but it also means there is no additional
+ * offset necessary, insns->imm is relative to
+ * instruction's original position within the section
+ */
+ sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1;
+ }
- switch (relo->type) {
- case RELO_LD64:
- insn[0].src_reg = BPF_PSEUDO_MAP_FD;
- insn[0].imm = obj->maps[relo->map_idx].fd;
- break;
- case RELO_DATA:
- insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
- insn[1].imm = insn[0].imm + relo->sym_off;
- insn[0].imm = obj->maps[relo->map_idx].fd;
- break;
- case RELO_EXTERN:
- ext = &obj->externs[relo->sym_off];
- if (ext->type == EXT_KCFG) {
- insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
- insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
- insn[1].imm = ext->kcfg.data_off;
- } else /* EXT_KSYM */ {
- insn[0].imm = (__u32)ext->ksym.addr;
- insn[1].imm = ext->ksym.addr >> 32;
+ /* we enforce that sub-programs should be in .text section */
+ subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx);
+ if (!subprog) {
+ pr_warn("prog '%s': no .text section found yet sub-program call exists\n",
+ prog->name);
+ return -LIBBPF_ERRNO__RELOC;
+ }
+
+ /* if it's the first call instruction calling into this
+ * subprogram (meaning this subprog hasn't been processed
+ * yet) within the context of current main program:
+ * - append it at the end of main program's instructions blog;
+ * - process is recursively, while current program is put on hold;
+ * - if that subprogram calls some other not yet processes
+ * subprogram, same thing will happen recursively until
+ * there are no more unprocesses subprograms left to append
+ * and relocate.
+ */
+ if (subprog->sub_insn_off == 0) {
+ subprog->sub_insn_off = main_prog->insns_cnt;
+
+ new_cnt = main_prog->insns_cnt + subprog->insns_cnt;
+ insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns));
+ if (!insns) {
+ pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name);
+ return -ENOMEM;
}
- break;
- case RELO_CALL:
- err = bpf_program__reloc_text(prog, obj, relo);
+ main_prog->insns = insns;
+ main_prog->insns_cnt = new_cnt;
+
+ memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns,
+ subprog->insns_cnt * sizeof(*insns));
+
+ pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
+ main_prog->name, subprog->insns_cnt, subprog->name);
+
+ err = bpf_object__reloc_code(obj, main_prog, subprog);
if (err)
return err;
- break;
- default:
- pr_warn("relo #%d: bad relo type %d\n", i, relo->type);
- return -EINVAL;
}
+
+ /* main_prog->insns memory could have been re-allocated, so
+ * calculate pointer again
+ */
+ insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
+ /* calculate correct instruction position within current main
+ * prog; each main prog can have a different set of
+ * subprograms appended (potentially in different order as
+ * well), so position of any subprog can be different for
+ * different main programs */
+ insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1;
+
+ if (relo)
+ relo->processed = true;
+
+ pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n",
+ prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off);
}
- zfree(&prog->reloc_desc);
- prog->nr_reloc = 0;
+ return 0;
+}
+
+/*
+ * Relocate sub-program calls.
+ *
+ * Algorithm operates as follows. Each entry-point BPF program (referred to as
+ * main prog) is processed separately. For each subprog (non-entry functions,
+ * that can be called from either entry progs or other subprogs) gets their
+ * sub_insn_off reset to zero. This serves as indicator that this subprogram
+ * hasn't been yet appended and relocated within current main prog. Once its
+ * relocated, sub_insn_off will point at the position within current main prog
+ * where given subprog was appended. This will further be used to relocate all
+ * the call instructions jumping into this subprog.
+ *
+ * We start with main program and process all call instructions. If the call
+ * is into a subprog that hasn't been processed (i.e., subprog->sub_insn_off
+ * is zero), subprog instructions are appended at the end of main program's
+ * instruction array. Then main program is "put on hold" while we recursively
+ * process newly appended subprogram. If that subprogram calls into another
+ * subprogram that hasn't been appended, new subprogram is appended again to
+ * the *main* prog's instructions (subprog's instructions are always left
+ * untouched, as they need to be in unmodified state for subsequent main progs
+ * and subprog instructions are always sent only as part of a main prog) and
+ * the process continues recursively. Once all the subprogs called from a main
+ * prog or any of its subprogs are appended (and relocated), all their
+ * positions within finalized instructions array are known, so it's easy to
+ * rewrite call instructions with correct relative offsets, corresponding to
+ * desired target subprog.
+ *
+ * Its important to realize that some subprogs might not be called from some
+ * main prog and any of its called/used subprogs. Those will keep their
+ * subprog->sub_insn_off as zero at all times and won't be appended to current
+ * main prog and won't be relocated within the context of current main prog.
+ * They might still be used from other main progs later.
+ *
+ * Visually this process can be shown as below. Suppose we have two main
+ * programs mainA and mainB and BPF object contains three subprogs: subA,
+ * subB, and subC. mainA calls only subA, mainB calls only subC, but subA and
+ * subC both call subB:
+ *
+ * +--------+ +-------+
+ * | v v |
+ * +--+---+ +--+-+-+ +---+--+
+ * | subA | | subB | | subC |
+ * +--+---+ +------+ +---+--+
+ * ^ ^
+ * | |
+ * +---+-------+ +------+----+
+ * | mainA | | mainB |
+ * +-----------+ +-----------+
+ *
+ * We'll start relocating mainA, will find subA, append it and start
+ * processing sub A recursively:
+ *
+ * +-----------+------+
+ * | mainA | subA |
+ * +-----------+------+
+ *
+ * At this point we notice that subB is used from subA, so we append it and
+ * relocate (there are no further subcalls from subB):
+ *
+ * +-----------+------+------+
+ * | mainA | subA | subB |
+ * +-----------+------+------+
+ *
+ * At this point, we relocate subA calls, then go one level up and finish with
+ * relocatin mainA calls. mainA is done.
+ *
+ * For mainB process is similar but results in different order. We start with
+ * mainB and skip subA and subB, as mainB never calls them (at least
+ * directly), but we see subC is needed, so we append and start processing it:
+ *
+ * +-----------+------+
+ * | mainB | subC |
+ * +-----------+------+
+ * Now we see subC needs subB, so we go back to it, append and relocate it:
+ *
+ * +-----------+------+------+
+ * | mainB | subC | subB |
+ * +-----------+------+------+
+ *
+ * At this point we unwind recursion, relocate calls in subC, then in mainB.
+ */
+static int
+bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog)
+{
+ struct bpf_program *subprog;
+ int i, j, err;
+
+ /* mark all subprogs as not relocated (yet) within the context of
+ * current main program
+ */
+ for (i = 0; i < obj->nr_programs; i++) {
+ subprog = &obj->programs[i];
+ if (!prog_is_subprog(obj, subprog))
+ continue;
+
+ subprog->sub_insn_off = 0;
+ for (j = 0; j < subprog->nr_reloc; j++)
+ if (subprog->reloc_desc[j].type == RELO_CALL)
+ subprog->reloc_desc[j].processed = false;
+ }
+
+ err = bpf_object__reloc_code(obj, prog, prog);
+ if (err)
+ return err;
+
+
return 0;
}
@@ -5161,35 +6366,45 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
return err;
}
}
- /* ensure .text is relocated first, as it's going to be copied as-is
- * later for sub-program calls
+ /* relocate data references first for all programs and sub-programs,
+ * as they don't change relative to code locations, so subsequent
+ * subprogram processing won't need to re-calculate any of them
*/
for (i = 0; i < obj->nr_programs; i++) {
prog = &obj->programs[i];
- if (prog->idx != obj->efile.text_shndx)
- continue;
-
- err = bpf_program__relocate(prog, obj);
+ err = bpf_object__relocate_data(obj, prog);
if (err) {
- pr_warn("failed to relocate '%s'\n", prog->section_name);
+ pr_warn("prog '%s': failed to relocate data references: %d\n",
+ prog->name, err);
return err;
}
- break;
}
- /* now relocate everything but .text, which by now is relocated
- * properly, so we can copy raw sub-program instructions as is safely
+ /* now relocate subprogram calls and append used subprograms to main
+ * programs; each copy of subprogram code needs to be relocated
+ * differently for each main program, because its code location might
+ * have changed
*/
for (i = 0; i < obj->nr_programs; i++) {
prog = &obj->programs[i];
- if (prog->idx == obj->efile.text_shndx)
+ /* sub-program's sub-calls are relocated within the context of
+ * its main program only
+ */
+ if (prog_is_subprog(obj, prog))
continue;
- err = bpf_program__relocate(prog, obj);
+ err = bpf_object__relocate_calls(obj, prog);
if (err) {
- pr_warn("failed to relocate '%s'\n", prog->section_name);
+ pr_warn("prog '%s': failed to relocate calls: %d\n",
+ prog->name, err);
return err;
}
}
+ /* free up relocation descriptors */
+ for (i = 0; i < obj->nr_programs; i++) {
+ prog = &obj->programs[i];
+ zfree(&prog->reloc_desc);
+ prog->nr_reloc = 0;
+ }
return 0;
}
@@ -5230,8 +6445,7 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj,
i, (size_t)GELF_R_SYM(rel.r_info));
return -LIBBPF_ERRNO__FORMAT;
}
- name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
- sym.st_name) ? : "<?>";
+ name = elf_sym_str(obj, sym.st_name) ?: "<?>";
if (sym.st_shndx != obj->efile.btf_maps_shndx) {
pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n",
i, name);
@@ -5293,7 +6507,7 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj,
moff /= bpf_ptr_sz;
if (moff >= map->init_slots_sz) {
new_sz = moff + 1;
- tmp = realloc(map->init_slots, new_sz * host_ptr_sz);
+ tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz);
if (!tmp)
return -ENOMEM;
map->init_slots = tmp;
@@ -5310,41 +6524,98 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj,
return 0;
}
-static int bpf_object__collect_reloc(struct bpf_object *obj)
+static int cmp_relocs(const void *_a, const void *_b)
{
- int i, err;
+ const struct reloc_desc *a = _a;
+ const struct reloc_desc *b = _b;
- if (!obj_elf_valid(obj)) {
- pr_warn("Internal error: elf object is closed\n");
- return -LIBBPF_ERRNO__INTERNAL;
- }
+ if (a->insn_idx != b->insn_idx)
+ return a->insn_idx < b->insn_idx ? -1 : 1;
+
+ /* no two relocations should have the same insn_idx, but ... */
+ if (a->type != b->type)
+ return a->type < b->type ? -1 : 1;
+
+ return 0;
+}
+
+static int bpf_object__collect_relos(struct bpf_object *obj)
+{
+ int i, err;
for (i = 0; i < obj->efile.nr_reloc_sects; i++) {
GElf_Shdr *shdr = &obj->efile.reloc_sects[i].shdr;
Elf_Data *data = obj->efile.reloc_sects[i].data;
int idx = shdr->sh_info;
- struct bpf_program *prog;
if (shdr->sh_type != SHT_REL) {
pr_warn("internal error at %d\n", __LINE__);
return -LIBBPF_ERRNO__INTERNAL;
}
- if (idx == obj->efile.st_ops_shndx) {
+ if (idx == obj->efile.st_ops_shndx)
err = bpf_object__collect_st_ops_relos(obj, shdr, data);
- } else if (idx == obj->efile.btf_maps_shndx) {
+ else if (idx == obj->efile.btf_maps_shndx)
err = bpf_object__collect_map_relos(obj, shdr, data);
- } else {
- prog = bpf_object__find_prog_by_idx(obj, idx);
- if (!prog) {
- pr_warn("relocation failed: no prog in section(%d)\n", idx);
- return -LIBBPF_ERRNO__RELOC;
- }
- err = bpf_program__collect_reloc(prog, shdr, data, obj);
- }
+ else
+ err = bpf_object__collect_prog_relos(obj, shdr, data);
if (err)
return err;
}
+
+ for (i = 0; i < obj->nr_programs; i++) {
+ struct bpf_program *p = &obj->programs[i];
+
+ if (!p->nr_reloc)
+ continue;
+
+ qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs);
+ }
+ return 0;
+}
+
+static bool insn_is_helper_call(struct bpf_insn *insn, enum bpf_func_id *func_id)
+{
+ if (BPF_CLASS(insn->code) == BPF_JMP &&
+ BPF_OP(insn->code) == BPF_CALL &&
+ BPF_SRC(insn->code) == BPF_K &&
+ insn->src_reg == 0 &&
+ insn->dst_reg == 0) {
+ *func_id = insn->imm;
+ return true;
+ }
+ return false;
+}
+
+static int bpf_object__sanitize_prog(struct bpf_object* obj, struct bpf_program *prog)
+{
+ struct bpf_insn *insn = prog->insns;
+ enum bpf_func_id func_id;
+ int i;
+
+ for (i = 0; i < prog->insns_cnt; i++, insn++) {
+ if (!insn_is_helper_call(insn, &func_id))
+ continue;
+
+ /* on kernels that don't yet support
+ * bpf_probe_read_{kernel,user}[_str] helpers, fall back
+ * to bpf_probe_read() which works well for old kernels
+ */
+ switch (func_id) {
+ case BPF_FUNC_probe_read_kernel:
+ case BPF_FUNC_probe_read_user:
+ if (!kernel_supports(FEAT_PROBE_READ_KERN))
+ insn->imm = BPF_FUNC_probe_read;
+ break;
+ case BPF_FUNC_probe_read_kernel_str:
+ case BPF_FUNC_probe_read_user_str:
+ if (!kernel_supports(FEAT_PROBE_READ_KERN))
+ insn->imm = BPF_FUNC_probe_read_str;
+ break;
+ default:
+ break;
+ }
+ }
return 0;
}
@@ -5364,12 +6635,12 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
load_attr.prog_type = prog->type;
/* old kernels might not support specifying expected_attach_type */
- if (!prog->caps->exp_attach_type && prog->sec_def &&
+ if (!kernel_supports(FEAT_EXP_ATTACH_TYPE) && prog->sec_def &&
prog->sec_def->is_exp_attach_type_optional)
load_attr.expected_attach_type = 0;
else
load_attr.expected_attach_type = prog->expected_attach_type;
- if (prog->caps->name)
+ if (kernel_supports(FEAT_PROG_NAME))
load_attr.name = prog->name;
load_attr.insns = insns;
load_attr.insns_cnt = insns_cnt;
@@ -5387,7 +6658,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
}
/* specify func_info/line_info only if kernel supports them */
btf_fd = bpf_object__btf_fd(prog->obj);
- if (btf_fd >= 0 && prog->obj->caps.btf_func) {
+ if (btf_fd >= 0 && kernel_supports(FEAT_BTF_FUNC)) {
load_attr.prog_btf_fd = btf_fd;
load_attr.func_info = prog->func_info;
load_attr.func_info_rec_size = prog->func_info_rec_size;
@@ -5413,6 +6684,20 @@ retry_load:
if (ret >= 0) {
if (log_buf && load_attr.log_level)
pr_debug("verifier log:\n%s", log_buf);
+
+ if (prog->obj->rodata_map_idx >= 0 &&
+ kernel_supports(FEAT_PROG_BIND_MAP)) {
+ struct bpf_map *rodata_map =
+ &prog->obj->maps[prog->obj->rodata_map_idx];
+
+ if (bpf_prog_bind_map(ret, bpf_map__fd(rodata_map), NULL)) {
+ cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
+ pr_warn("prog '%s': failed to bind .rodata map: %s\n",
+ prog->name, cp);
+ /* Don't fail hard if can't bind rodata. */
+ }
+ }
+
*pfd = ret;
ret = 0;
goto out;
@@ -5425,7 +6710,7 @@ retry_load:
free(log_buf);
goto retry_load;
}
- ret = -errno;
+ ret = errno ? -errno : -LIBBPF_ERRNO__LOAD;
cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
pr_warn("load bpf program failed: %s\n", cp);
pr_perm_msg(ret);
@@ -5465,8 +6750,7 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
int err = 0, fd, i, btf_id;
if (prog->obj->loaded) {
- pr_warn("prog '%s'('%s'): can't load after object was loaded\n",
- prog->name, prog->section_name);
+ pr_warn("prog '%s': can't load after object was loaded\n", prog->name);
return -EINVAL;
}
@@ -5482,7 +6766,7 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
if (prog->instances.nr < 0 || !prog->instances.fds) {
if (prog->preprocessor) {
pr_warn("Internal error: can't load program '%s'\n",
- prog->section_name);
+ prog->name);
return -LIBBPF_ERRNO__INTERNAL;
}
@@ -5497,8 +6781,8 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
if (!prog->preprocessor) {
if (prog->instances.nr != 1) {
- pr_warn("Program '%s' is inconsistent: nr(%d) != 1\n",
- prog->section_name, prog->instances.nr);
+ pr_warn("prog '%s': inconsistent nr(%d) != 1\n",
+ prog->name, prog->instances.nr);
}
err = load_program(prog, prog->insns, prog->insns_cnt,
license, kern_ver, &fd);
@@ -5516,13 +6800,13 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
prog->insns_cnt, &result);
if (err) {
pr_warn("Preprocessing the %dth instance of program '%s' failed\n",
- i, prog->section_name);
+ i, prog->name);
goto out;
}
if (!result.new_insn_ptr || !result.new_insn_cnt) {
pr_debug("Skip loading the %dth instance of program '%s'\n",
- i, prog->section_name);
+ i, prog->name);
prog->instances.fds[i] = -1;
if (result.pfd)
*result.pfd = -1;
@@ -5533,7 +6817,7 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
result.new_insn_cnt, license, kern_ver, &fd);
if (err) {
pr_warn("Loading the %dth instance of program '%s' failed\n",
- i, prog->section_name);
+ i, prog->name);
goto out;
}
@@ -5543,18 +6827,12 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
}
out:
if (err)
- pr_warn("failed to load program '%s'\n", prog->section_name);
+ pr_warn("failed to load program '%s'\n", prog->name);
zfree(&prog->insns);
prog->insns_cnt = 0;
return err;
}
-static bool bpf_program__is_function_storage(const struct bpf_program *prog,
- const struct bpf_object *obj)
-{
- return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls;
-}
-
static int
bpf_object__load_progs(struct bpf_object *obj, int log_level)
{
@@ -5564,11 +6842,17 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level)
for (i = 0; i < obj->nr_programs; i++) {
prog = &obj->programs[i];
- if (bpf_program__is_function_storage(prog, obj))
+ err = bpf_object__sanitize_prog(obj, prog);
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < obj->nr_programs; i++) {
+ prog = &obj->programs[i];
+ if (prog_is_subprog(obj, prog))
continue;
if (!prog->load) {
- pr_debug("prog '%s'('%s'): skipped loading\n",
- prog->name, prog->section_name);
+ pr_debug("prog '%s': skipped loading\n", prog->name);
continue;
}
prog->log_level |= log_level;
@@ -5629,18 +6913,19 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
err = err ? : bpf_object__collect_externs(obj);
err = err ? : bpf_object__finalize_btf(obj);
err = err ? : bpf_object__init_maps(obj, opts);
- err = err ? : bpf_object__init_prog_names(obj);
- err = err ? : bpf_object__collect_reloc(obj);
+ err = err ? : bpf_object__collect_relos(obj);
if (err)
goto out;
bpf_object__elf_finish(obj);
bpf_object__for_each_program(prog, obj) {
- prog->sec_def = find_sec_def(prog->section_name);
+ prog->sec_def = find_sec_def(prog->sec_name);
if (!prog->sec_def)
/* couldn't guess, but user might manually specify */
continue;
+ if (prog->sec_def->is_sleepable)
+ prog->prog_flags |= BPF_F_SLEEPABLE;
bpf_program__set_type(prog, prog->sec_def->prog_type);
bpf_program__set_expected_attach_type(prog,
prog->sec_def->expected_attach_type);
@@ -5750,11 +7035,11 @@ static int bpf_object__sanitize_maps(struct bpf_object *obj)
bpf_object__for_each_map(m, obj) {
if (!bpf_map__is_internal(m))
continue;
- if (!obj->caps.global_data) {
+ if (!kernel_supports(FEAT_GLOBAL_DATA)) {
pr_warn("kernel doesn't support global data\n");
return -ENOTSUP;
}
- if (!obj->caps.array_mmap)
+ if (!kernel_supports(FEAT_ARRAY_MMAP))
m->def.map_flags ^= BPF_F_MMAPABLE;
}
@@ -5809,10 +7094,72 @@ out:
return err;
}
+static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj)
+{
+ struct extern_desc *ext;
+ int i, id;
+
+ for (i = 0; i < obj->nr_extern; i++) {
+ const struct btf_type *targ_var, *targ_type;
+ __u32 targ_type_id, local_type_id;
+ const char *targ_var_name;
+ int ret;
+
+ ext = &obj->externs[i];
+ if (ext->type != EXT_KSYM || !ext->ksym.type_id)
+ continue;
+
+ id = btf__find_by_name_kind(obj->btf_vmlinux, ext->name,
+ BTF_KIND_VAR);
+ if (id <= 0) {
+ pr_warn("extern (ksym) '%s': failed to find BTF ID in vmlinux BTF.\n",
+ ext->name);
+ return -ESRCH;
+ }
+
+ /* find local type_id */
+ local_type_id = ext->ksym.type_id;
+
+ /* find target type_id */
+ targ_var = btf__type_by_id(obj->btf_vmlinux, id);
+ targ_var_name = btf__name_by_offset(obj->btf_vmlinux,
+ targ_var->name_off);
+ targ_type = skip_mods_and_typedefs(obj->btf_vmlinux,
+ targ_var->type,
+ &targ_type_id);
+
+ ret = bpf_core_types_are_compat(obj->btf, local_type_id,
+ obj->btf_vmlinux, targ_type_id);
+ if (ret <= 0) {
+ const struct btf_type *local_type;
+ const char *targ_name, *local_name;
+
+ local_type = btf__type_by_id(obj->btf, local_type_id);
+ local_name = btf__name_by_offset(obj->btf,
+ local_type->name_off);
+ targ_name = btf__name_by_offset(obj->btf_vmlinux,
+ targ_type->name_off);
+
+ pr_warn("extern (ksym) '%s': incompatible types, expected [%d] %s %s, but kernel has [%d] %s %s\n",
+ ext->name, local_type_id,
+ btf_kind_str(local_type), local_name, targ_type_id,
+ btf_kind_str(targ_type), targ_name);
+ return -EINVAL;
+ }
+
+ ext->is_set = true;
+ ext->ksym.vmlinux_btf_id = id;
+ pr_debug("extern (ksym) '%s': resolved to [%d] %s %s\n",
+ ext->name, id, btf_kind_str(targ_var), targ_var_name);
+ }
+ return 0;
+}
+
static int bpf_object__resolve_externs(struct bpf_object *obj,
const char *extra_kconfig)
{
bool need_config = false, need_kallsyms = false;
+ bool need_vmlinux_btf = false;
struct extern_desc *ext;
void *kcfg_data = NULL;
int err, i;
@@ -5843,7 +7190,10 @@ static int bpf_object__resolve_externs(struct bpf_object *obj,
strncmp(ext->name, "CONFIG_", 7) == 0) {
need_config = true;
} else if (ext->type == EXT_KSYM) {
- need_kallsyms = true;
+ if (ext->ksym.type_id)
+ need_vmlinux_btf = true;
+ else
+ need_kallsyms = true;
} else {
pr_warn("unrecognized extern '%s'\n", ext->name);
return -EINVAL;
@@ -5872,6 +7222,11 @@ static int bpf_object__resolve_externs(struct bpf_object *obj,
if (err)
return -EINVAL;
}
+ if (need_vmlinux_btf) {
+ err = bpf_object__resolve_ksyms_btf_id(obj);
+ if (err)
+ return -EINVAL;
+ }
for (i = 0; i < obj->nr_extern; i++) {
ext = &obj->externs[i];
@@ -5904,11 +7259,10 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
}
err = bpf_object__probe_loading(obj);
- err = err ? : bpf_object__probe_caps(obj);
+ err = err ? : bpf_object__load_vmlinux_btf(obj);
err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
err = err ? : bpf_object__sanitize_and_load_btf(obj);
err = err ? : bpf_object__sanitize_maps(obj);
- err = err ? : bpf_object__load_vmlinux_btf(obj);
err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
err = err ? : bpf_object__create_maps(obj);
err = err ? : bpf_object__relocate(obj, attr->target_btf_path);
@@ -6016,7 +7370,7 @@ int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
if (instance < 0 || instance >= prog->instances.nr) {
pr_warn("invalid prog instance %d of prog %s (max %d)\n",
- instance, prog->section_name, prog->instances.nr);
+ instance, prog->name, prog->instances.nr);
return -EINVAL;
}
@@ -6047,7 +7401,7 @@ int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
if (instance < 0 || instance >= prog->instances.nr) {
pr_warn("invalid prog instance %d of prog %s (max %d)\n",
- instance, prog->section_name, prog->instances.nr);
+ instance, prog->name, prog->instances.nr);
return -EINVAL;
}
@@ -6077,8 +7431,7 @@ int bpf_program__pin(struct bpf_program *prog, const char *path)
}
if (prog->instances.nr <= 0) {
- pr_warn("no instances of prog %s to pin\n",
- prog->section_name);
+ pr_warn("no instances of prog %s to pin\n", prog->name);
return -EINVAL;
}
@@ -6140,8 +7493,7 @@ int bpf_program__unpin(struct bpf_program *prog, const char *path)
}
if (prog->instances.nr <= 0) {
- pr_warn("no instances of prog %s to pin\n",
- prog->section_name);
+ pr_warn("no instances of prog %s to pin\n", prog->name);
return -EINVAL;
}
@@ -6633,7 +7985,7 @@ bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj)
do {
prog = __bpf_program__iter(prog, obj, true);
- } while (prog && bpf_program__is_function_storage(prog, obj));
+ } while (prog && prog_is_subprog(obj, prog));
return prog;
}
@@ -6645,7 +7997,7 @@ bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj)
do {
prog = __bpf_program__iter(prog, obj, false);
- } while (prog && bpf_program__is_function_storage(prog, obj));
+ } while (prog && prog_is_subprog(obj, prog));
return prog;
}
@@ -6676,11 +8028,16 @@ const char *bpf_program__name(const struct bpf_program *prog)
return prog->name;
}
+const char *bpf_program__section_name(const struct bpf_program *prog)
+{
+ return prog->sec_name;
+}
+
const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy)
{
const char *title;
- title = prog->section_name;
+ title = prog->sec_name;
if (needs_copy) {
title = strdup(title);
if (!title) {
@@ -6713,7 +8070,7 @@ int bpf_program__fd(const struct bpf_program *prog)
size_t bpf_program__size(const struct bpf_program *prog)
{
- return prog->insns_cnt * sizeof(struct bpf_insn);
+ return prog->insns_cnt * BPF_INSN_SZ;
}
int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
@@ -6753,14 +8110,14 @@ int bpf_program__nth_fd(const struct bpf_program *prog, int n)
if (n >= prog->instances.nr || n < 0) {
pr_warn("Can't get the %dth fd from program %s: only %d instances\n",
- n, prog->section_name, prog->instances.nr);
+ n, prog->name, prog->instances.nr);
return -EINVAL;
}
fd = prog->instances.fds[n];
if (fd < 0) {
pr_warn("%dth instance of program '%s' is invalid\n",
- n, prog->section_name);
+ n, prog->name);
return -ENOENT;
}
@@ -6910,6 +8267,21 @@ static const struct bpf_sec_def section_defs[] = {
.expected_attach_type = BPF_TRACE_FEXIT,
.is_attach_btf = true,
.attach_fn = attach_trace),
+ SEC_DEF("fentry.s/", TRACING,
+ .expected_attach_type = BPF_TRACE_FENTRY,
+ .is_attach_btf = true,
+ .is_sleepable = true,
+ .attach_fn = attach_trace),
+ SEC_DEF("fmod_ret.s/", TRACING,
+ .expected_attach_type = BPF_MODIFY_RETURN,
+ .is_attach_btf = true,
+ .is_sleepable = true,
+ .attach_fn = attach_trace),
+ SEC_DEF("fexit.s/", TRACING,
+ .expected_attach_type = BPF_TRACE_FEXIT,
+ .is_attach_btf = true,
+ .is_sleepable = true,
+ .attach_fn = attach_trace),
SEC_DEF("freplace/", EXT,
.is_attach_btf = true,
.attach_fn = attach_trace),
@@ -6917,6 +8289,11 @@ static const struct bpf_sec_def section_defs[] = {
.is_attach_btf = true,
.expected_attach_type = BPF_LSM_MAC,
.attach_fn = attach_lsm),
+ SEC_DEF("lsm.s/", LSM,
+ .is_attach_btf = true,
+ .is_sleepable = true,
+ .expected_attach_type = BPF_LSM_MAC,
+ .attach_fn = attach_lsm),
SEC_DEF("iter/", TRACING,
.expected_attach_type = BPF_TRACE_ITER,
.is_attach_btf = true,
@@ -7100,7 +8477,7 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
const struct btf *btf;
struct bpf_map *map;
Elf_Data *symbols;
- unsigned int moff;
+ unsigned int moff, insn_idx;
const char *name;
__u32 member_idx;
GElf_Sym sym;
@@ -7122,8 +8499,7 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
return -LIBBPF_ERRNO__FORMAT;
}
- name = elf_strptr(obj->efile.elf, obj->efile.strtabidx,
- sym.st_name) ? : "<?>";
+ name = elf_sym_str(obj, sym.st_name) ?: "<?>";
map = find_struct_ops_map_by_offset(obj, rel.r_offset);
if (!map) {
pr_warn("struct_ops reloc: cannot find map at rel.r_offset %zu\n",
@@ -7146,6 +8522,12 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
map->name, (size_t)rel.r_offset, shdr_idx);
return -LIBBPF_ERRNO__RELOC;
}
+ if (sym.st_value % BPF_INSN_SZ) {
+ pr_warn("struct_ops reloc %s: invalid target program offset %llu\n",
+ map->name, (unsigned long long)sym.st_value);
+ return -LIBBPF_ERRNO__FORMAT;
+ }
+ insn_idx = sym.st_value / BPF_INSN_SZ;
member = find_member_by_offset(st_ops->type, moff * 8);
if (!member) {
@@ -7162,7 +8544,7 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
return -EINVAL;
}
- prog = bpf_object__find_prog_by_idx(obj, shdr_idx);
+ prog = find_prog_by_sec_insn(obj, shdr_idx, insn_idx);
if (!prog) {
pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n",
map->name, shdr_idx, name);
@@ -7172,7 +8554,7 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
if (prog->type == BPF_PROG_TYPE_UNSPEC) {
const struct bpf_sec_def *sec_def;
- sec_def = find_sec_def(prog->section_name);
+ sec_def = find_sec_def(prog->sec_name);
if (sec_def &&
sec_def->prog_type != BPF_PROG_TYPE_STRUCT_OPS) {
/* for pr_warn */
@@ -7195,7 +8577,7 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
invalid_prog:
pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",
- map->name, prog->name, prog->section_name, prog->type,
+ map->name, prog->name, prog->sec_name, prog->type,
prog->attach_btf_id, prog->expected_attach_type, name);
return -EINVAL;
}
@@ -7299,7 +8681,7 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog)
{
enum bpf_attach_type attach_type = prog->expected_attach_type;
__u32 attach_prog_fd = prog->attach_prog_fd;
- const char *name = prog->section_name;
+ const char *name = prog->sec_name;
int i, err;
if (!name)
@@ -7640,7 +9022,7 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
prog->prog_ifindex = attr->ifindex;
prog->log_level = attr->log_level;
- prog->prog_flags = attr->prog_flags;
+ prog->prog_flags |= attr->prog_flags;
if (!first_prog)
first_prog = prog;
}
@@ -7826,14 +9208,14 @@ struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
int prog_fd, err;
if (pfd < 0) {
- pr_warn("program '%s': invalid perf event FD %d\n",
- bpf_program__title(prog, false), pfd);
+ pr_warn("prog '%s': invalid perf event FD %d\n",
+ prog->name, pfd);
return ERR_PTR(-EINVAL);
}
prog_fd = bpf_program__fd(prog);
if (prog_fd < 0) {
- pr_warn("program '%s': can't attach BPF program w/o FD (did you load it?)\n",
- bpf_program__title(prog, false));
+ pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n",
+ prog->name);
return ERR_PTR(-EINVAL);
}
@@ -7846,20 +9228,18 @@ struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
err = -errno;
free(link);
- pr_warn("program '%s': failed to attach to pfd %d: %s\n",
- bpf_program__title(prog, false), pfd,
- libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ pr_warn("prog '%s': failed to attach to pfd %d: %s\n",
+ prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
if (err == -EPROTO)
- pr_warn("program '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n",
- bpf_program__title(prog, false), pfd);
+ pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n",
+ prog->name, pfd);
return ERR_PTR(err);
}
if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
err = -errno;
free(link);
- pr_warn("program '%s': failed to enable pfd %d: %s\n",
- bpf_program__title(prog, false), pfd,
- libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
+ pr_warn("prog '%s': failed to enable pfd %d: %s\n",
+ prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
return ERR_PTR(err);
}
return link;
@@ -7981,9 +9361,8 @@ struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog,
pfd = perf_event_open_probe(false /* uprobe */, retprobe, func_name,
0 /* offset */, -1 /* pid */);
if (pfd < 0) {
- pr_warn("program '%s': failed to create %s '%s' perf event: %s\n",
- bpf_program__title(prog, false),
- retprobe ? "kretprobe" : "kprobe", func_name,
+ pr_warn("prog '%s': failed to create %s '%s' perf event: %s\n",
+ prog->name, retprobe ? "kretprobe" : "kprobe", func_name,
libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
return ERR_PTR(pfd);
}
@@ -7991,9 +9370,8 @@ struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog,
if (IS_ERR(link)) {
close(pfd);
err = PTR_ERR(link);
- pr_warn("program '%s': failed to attach to %s '%s': %s\n",
- bpf_program__title(prog, false),
- retprobe ? "kretprobe" : "kprobe", func_name,
+ pr_warn("prog '%s': failed to attach to %s '%s': %s\n",
+ prog->name, retprobe ? "kretprobe" : "kprobe", func_name,
libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
return link;
}
@@ -8006,7 +9384,7 @@ static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
const char *func_name;
bool retprobe;
- func_name = bpf_program__title(prog, false) + sec->len;
+ func_name = prog->sec_name + sec->len;
retprobe = strcmp(sec->sec, "kretprobe/") == 0;
return bpf_program__attach_kprobe(prog, retprobe, func_name);
@@ -8024,9 +9402,8 @@ struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog,
pfd = perf_event_open_probe(true /* uprobe */, retprobe,
binary_path, func_offset, pid);
if (pfd < 0) {
- pr_warn("program '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
- bpf_program__title(prog, false),
- retprobe ? "uretprobe" : "uprobe",
+ pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
+ prog->name, retprobe ? "uretprobe" : "uprobe",
binary_path, func_offset,
libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
return ERR_PTR(pfd);
@@ -8035,9 +9412,8 @@ struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog,
if (IS_ERR(link)) {
close(pfd);
err = PTR_ERR(link);
- pr_warn("program '%s': failed to attach to %s '%s:0x%zx': %s\n",
- bpf_program__title(prog, false),
- retprobe ? "uretprobe" : "uprobe",
+ pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n",
+ prog->name, retprobe ? "uretprobe" : "uprobe",
binary_path, func_offset,
libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
return link;
@@ -8105,9 +9481,8 @@ struct bpf_link *bpf_program__attach_tracepoint(struct bpf_program *prog,
pfd = perf_event_open_tracepoint(tp_category, tp_name);
if (pfd < 0) {
- pr_warn("program '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
- bpf_program__title(prog, false),
- tp_category, tp_name,
+ pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
+ prog->name, tp_category, tp_name,
libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
return ERR_PTR(pfd);
}
@@ -8115,9 +9490,8 @@ struct bpf_link *bpf_program__attach_tracepoint(struct bpf_program *prog,
if (IS_ERR(link)) {
close(pfd);
err = PTR_ERR(link);
- pr_warn("program '%s': failed to attach to tracepoint '%s/%s': %s\n",
- bpf_program__title(prog, false),
- tp_category, tp_name,
+ pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n",
+ prog->name, tp_category, tp_name,
libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
return link;
}
@@ -8130,7 +9504,7 @@ static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
char *sec_name, *tp_cat, *tp_name;
struct bpf_link *link;
- sec_name = strdup(bpf_program__title(prog, false));
+ sec_name = strdup(prog->sec_name);
if (!sec_name)
return ERR_PTR(-ENOMEM);
@@ -8159,8 +9533,7 @@ struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
prog_fd = bpf_program__fd(prog);
if (prog_fd < 0) {
- pr_warn("program '%s': can't attach before loaded\n",
- bpf_program__title(prog, false));
+ pr_warn("prog '%s': can't attach before loaded\n", prog->name);
return ERR_PTR(-EINVAL);
}
@@ -8173,9 +9546,8 @@ struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
if (pfd < 0) {
pfd = -errno;
free(link);
- pr_warn("program '%s': failed to attach to raw tracepoint '%s': %s\n",
- bpf_program__title(prog, false), tp_name,
- libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
+ pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n",
+ prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
return ERR_PTR(pfd);
}
link->fd = pfd;
@@ -8185,7 +9557,7 @@ struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
struct bpf_program *prog)
{
- const char *tp_name = bpf_program__title(prog, false) + sec->len;
+ const char *tp_name = prog->sec_name + sec->len;
return bpf_program__attach_raw_tracepoint(prog, tp_name);
}
@@ -8199,8 +9571,7 @@ static struct bpf_link *bpf_program__attach_btf_id(struct bpf_program *prog)
prog_fd = bpf_program__fd(prog);
if (prog_fd < 0) {
- pr_warn("program '%s': can't attach before loaded\n",
- bpf_program__title(prog, false));
+ pr_warn("prog '%s': can't attach before loaded\n", prog->name);
return ERR_PTR(-EINVAL);
}
@@ -8213,9 +9584,8 @@ static struct bpf_link *bpf_program__attach_btf_id(struct bpf_program *prog)
if (pfd < 0) {
pfd = -errno;
free(link);
- pr_warn("program '%s': failed to attach: %s\n",
- bpf_program__title(prog, false),
- libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
+ pr_warn("prog '%s': failed to attach: %s\n",
+ prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
return ERR_PTR(pfd);
}
link->fd = pfd;
@@ -8251,9 +9621,11 @@ static struct bpf_link *attach_iter(const struct bpf_sec_def *sec,
}
static struct bpf_link *
-bpf_program__attach_fd(struct bpf_program *prog, int target_fd,
+bpf_program__attach_fd(struct bpf_program *prog, int target_fd, int btf_id,
const char *target_name)
{
+ DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts,
+ .target_btf_id = btf_id);
enum bpf_attach_type attach_type;
char errmsg[STRERR_BUFSIZE];
struct bpf_link *link;
@@ -8261,8 +9633,7 @@ bpf_program__attach_fd(struct bpf_program *prog, int target_fd,
prog_fd = bpf_program__fd(prog);
if (prog_fd < 0) {
- pr_warn("program '%s': can't attach before loaded\n",
- bpf_program__title(prog, false));
+ pr_warn("prog '%s': can't attach before loaded\n", prog->name);
return ERR_PTR(-EINVAL);
}
@@ -8272,12 +9643,12 @@ bpf_program__attach_fd(struct bpf_program *prog, int target_fd,
link->detach = &bpf_link__detach_fd;
attach_type = bpf_program__get_expected_attach_type(prog);
- link_fd = bpf_link_create(prog_fd, target_fd, attach_type, NULL);
+ link_fd = bpf_link_create(prog_fd, target_fd, attach_type, &opts);
if (link_fd < 0) {
link_fd = -errno;
free(link);
- pr_warn("program '%s': failed to attach to %s: %s\n",
- bpf_program__title(prog, false), target_name,
+ pr_warn("prog '%s': failed to attach to %s: %s\n",
+ prog->name, target_name,
libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
return ERR_PTR(link_fd);
}
@@ -8288,19 +9659,51 @@ bpf_program__attach_fd(struct bpf_program *prog, int target_fd,
struct bpf_link *
bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd)
{
- return bpf_program__attach_fd(prog, cgroup_fd, "cgroup");
+ return bpf_program__attach_fd(prog, cgroup_fd, 0, "cgroup");
}
struct bpf_link *
bpf_program__attach_netns(struct bpf_program *prog, int netns_fd)
{
- return bpf_program__attach_fd(prog, netns_fd, "netns");
+ return bpf_program__attach_fd(prog, netns_fd, 0, "netns");
}
struct bpf_link *bpf_program__attach_xdp(struct bpf_program *prog, int ifindex)
{
/* target_fd/target_ifindex use the same field in LINK_CREATE */
- return bpf_program__attach_fd(prog, ifindex, "xdp");
+ return bpf_program__attach_fd(prog, ifindex, 0, "xdp");
+}
+
+struct bpf_link *bpf_program__attach_freplace(struct bpf_program *prog,
+ int target_fd,
+ const char *attach_func_name)
+{
+ int btf_id;
+
+ if (!!target_fd != !!attach_func_name) {
+ pr_warn("prog '%s': supply none or both of target_fd and attach_func_name\n",
+ prog->name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (prog->type != BPF_PROG_TYPE_EXT) {
+ pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace",
+ prog->name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (target_fd) {
+ btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd);
+ if (btf_id < 0)
+ return ERR_PTR(btf_id);
+
+ return bpf_program__attach_fd(prog, target_fd, btf_id, "freplace");
+ } else {
+ /* no target, so use raw_tracepoint_open for compatibility
+ * with old kernels
+ */
+ return bpf_program__attach_trace(prog);
+ }
}
struct bpf_link *
@@ -8321,8 +9724,7 @@ bpf_program__attach_iter(struct bpf_program *prog,
prog_fd = bpf_program__fd(prog);
if (prog_fd < 0) {
- pr_warn("program '%s': can't attach before loaded\n",
- bpf_program__title(prog, false));
+ pr_warn("prog '%s': can't attach before loaded\n", prog->name);
return ERR_PTR(-EINVAL);
}
@@ -8336,9 +9738,8 @@ bpf_program__attach_iter(struct bpf_program *prog,
if (link_fd < 0) {
link_fd = -errno;
free(link);
- pr_warn("program '%s': failed to attach to iterator: %s\n",
- bpf_program__title(prog, false),
- libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
+ pr_warn("prog '%s': failed to attach to iterator: %s\n",
+ prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
return ERR_PTR(link_fd);
}
link->fd = link_fd;
@@ -8349,7 +9750,7 @@ struct bpf_link *bpf_program__attach(struct bpf_program *prog)
{
const struct bpf_sec_def *sec_def;
- sec_def = find_sec_def(bpf_program__title(prog, false));
+ sec_def = find_sec_def(prog->sec_name);
if (!sec_def || !sec_def->attach_fn)
return ERR_PTR(-ESRCH);
@@ -8594,7 +9995,7 @@ struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
struct perf_buffer_params p = {};
struct perf_event_attr attr = { 0, };
- attr.config = PERF_COUNT_SW_BPF_OUTPUT,
+ attr.config = PERF_COUNT_SW_BPF_OUTPUT;
attr.type = PERF_TYPE_SOFTWARE;
attr.sample_type = PERF_SAMPLE_RAW;
attr.sample_period = 1;
@@ -8832,6 +10233,11 @@ static int perf_buffer__process_records(struct perf_buffer *pb,
return 0;
}
+int perf_buffer__epoll_fd(const struct perf_buffer *pb)
+{
+ return pb->epoll_fd;
+}
+
int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
{
int i, cnt, err;
@@ -8849,6 +10255,55 @@ int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
return cnt < 0 ? -errno : cnt;
}
+/* Return number of PERF_EVENT_ARRAY map slots set up by this perf_buffer
+ * manager.
+ */
+size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb)
+{
+ return pb->cpu_cnt;
+}
+
+/*
+ * Return perf_event FD of a ring buffer in *buf_idx* slot of
+ * PERF_EVENT_ARRAY BPF map. This FD can be polled for new data using
+ * select()/poll()/epoll() Linux syscalls.
+ */
+int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx)
+{
+ struct perf_cpu_buf *cpu_buf;
+
+ if (buf_idx >= pb->cpu_cnt)
+ return -EINVAL;
+
+ cpu_buf = pb->cpu_bufs[buf_idx];
+ if (!cpu_buf)
+ return -ENOENT;
+
+ return cpu_buf->fd;
+}
+
+/*
+ * Consume data from perf ring buffer corresponding to slot *buf_idx* in
+ * PERF_EVENT_ARRAY BPF map without waiting/polling. If there is no data to
+ * consume, do nothing and return success.
+ * Returns:
+ * - 0 on success;
+ * - <0 on failure.
+ */
+int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx)
+{
+ struct perf_cpu_buf *cpu_buf;
+
+ if (buf_idx >= pb->cpu_cnt)
+ return -EINVAL;
+
+ cpu_buf = pb->cpu_bufs[buf_idx];
+ if (!cpu_buf)
+ return -ENOENT;
+
+ return perf_buffer__process_records(pb, cpu_buf);
+}
+
int perf_buffer__consume(struct perf_buffer *pb)
{
int i, err;
@@ -8861,7 +10316,7 @@ int perf_buffer__consume(struct perf_buffer *pb)
err = perf_buffer__process_records(pb, cpu_buf);
if (err) {
- pr_warn("error while processing records: %d\n", err);
+ pr_warn("perf_buffer: failed to process records in buffer #%d: %d\n", i, err);
return err;
}
}
@@ -9129,9 +10584,8 @@ int bpf_program__set_attach_target(struct bpf_program *prog,
btf_id = libbpf_find_prog_btf_id(attach_func_name,
attach_prog_fd);
else
- btf_id = __find_vmlinux_btf_id(prog->obj->btf_vmlinux,
- attach_func_name,
- prog->expected_attach_type);
+ btf_id = libbpf_find_vmlinux_btf_id(attach_func_name,
+ prog->expected_attach_type);
if (btf_id < 0)
return btf_id;
@@ -9365,12 +10819,11 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
struct bpf_program *prog = *s->progs[i].prog;
struct bpf_link **link = s->progs[i].link;
const struct bpf_sec_def *sec_def;
- const char *sec_name = bpf_program__title(prog, false);
if (!prog->load)
continue;
- sec_def = find_sec_def(sec_name);
+ sec_def = find_sec_def(prog->sec_name);
if (!sec_def || !sec_def->attach_fn)
continue;
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 5ecb4069a9f0..6909ee81113a 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -198,8 +198,9 @@ LIBBPF_API void bpf_program__set_ifindex(struct bpf_program *prog,
__u32 ifindex);
LIBBPF_API const char *bpf_program__name(const struct bpf_program *prog);
-LIBBPF_API const char *bpf_program__title(const struct bpf_program *prog,
- bool needs_copy);
+LIBBPF_API const char *bpf_program__section_name(const struct bpf_program *prog);
+LIBBPF_API LIBBPF_DEPRECATED("BPF program title is confusing term; please use bpf_program__section_name() instead")
+const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy);
LIBBPF_API bool bpf_program__autoload(const struct bpf_program *prog);
LIBBPF_API int bpf_program__set_autoload(struct bpf_program *prog, bool autoload);
@@ -260,6 +261,9 @@ LIBBPF_API struct bpf_link *
bpf_program__attach_netns(struct bpf_program *prog, int netns_fd);
LIBBPF_API struct bpf_link *
bpf_program__attach_xdp(struct bpf_program *prog, int ifindex);
+LIBBPF_API struct bpf_link *
+bpf_program__attach_freplace(struct bpf_program *prog,
+ int target_fd, const char *attach_func_name);
struct bpf_map;
@@ -588,8 +592,12 @@ perf_buffer__new_raw(int map_fd, size_t page_cnt,
const struct perf_buffer_raw_opts *opts);
LIBBPF_API void perf_buffer__free(struct perf_buffer *pb);
+LIBBPF_API int perf_buffer__epoll_fd(const struct perf_buffer *pb);
LIBBPF_API int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms);
LIBBPF_API int perf_buffer__consume(struct perf_buffer *pb);
+LIBBPF_API int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx);
+LIBBPF_API size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb);
+LIBBPF_API int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx);
typedef enum bpf_perf_event_ret
(*bpf_perf_event_print_t)(struct perf_event_header *hdr,
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index e35bd6cdbdbf..4ebfadf45b47 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -299,3 +299,41 @@ LIBBPF_0.1.0 {
btf__set_fd;
btf__set_pointer_size;
} LIBBPF_0.0.9;
+
+LIBBPF_0.2.0 {
+ global:
+ bpf_prog_bind_map;
+ bpf_prog_test_run_opts;
+ bpf_program__attach_freplace;
+ bpf_program__section_name;
+ btf__add_array;
+ btf__add_const;
+ btf__add_enum;
+ btf__add_enum_value;
+ btf__add_datasec;
+ btf__add_datasec_var_info;
+ btf__add_field;
+ btf__add_func;
+ btf__add_func_param;
+ btf__add_func_proto;
+ btf__add_fwd;
+ btf__add_int;
+ btf__add_ptr;
+ btf__add_restrict;
+ btf__add_str;
+ btf__add_struct;
+ btf__add_typedef;
+ btf__add_union;
+ btf__add_var;
+ btf__add_volatile;
+ btf__endianness;
+ btf__find_str;
+ btf__new_empty;
+ btf__set_endianness;
+ btf__str_by_offset;
+ perf_buffer__buffer_cnt;
+ perf_buffer__buffer_fd;
+ perf_buffer__epoll_fd;
+ perf_buffer__consume_buffer;
+ xsk_socket__create_shared;
+} LIBBPF_0.1.0;
diff --git a/tools/lib/bpf/libbpf_common.h b/tools/lib/bpf/libbpf_common.h
index a23ae1ac27eb..947d8bd8a7bb 100644
--- a/tools/lib/bpf/libbpf_common.h
+++ b/tools/lib/bpf/libbpf_common.h
@@ -15,6 +15,8 @@
#define LIBBPF_API __attribute__((visibility("default")))
#endif
+#define LIBBPF_DEPRECATED(msg) __attribute__((deprecated(msg)))
+
/* Helper macro to declare and initialize libbpf options struct
*
* This dance with uninitialized declaration, followed by memset to zero,
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index 50d70e90d5f1..d99bc847bf84 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -9,6 +9,15 @@
#ifndef __LIBBPF_LIBBPF_INTERNAL_H
#define __LIBBPF_LIBBPF_INTERNAL_H
+#include <stdlib.h>
+#include <limits.h>
+
+/* make sure libbpf doesn't use kernel-only integer typedefs */
+#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
+
+/* prevent accidental re-addition of reallocarray() */
+#pragma GCC poison reallocarray
+
#include "libbpf.h"
#define BTF_INFO_ENC(kind, kind_flag, vlen) \
@@ -23,6 +32,12 @@
#define BTF_PARAM_ENC(name, type) (name), (type)
#define BTF_VAR_SECINFO_ENC(type, offset, size) (type), (offset), (size)
+#ifndef likely
+#define likely(x) __builtin_expect(!!(x), 1)
+#endif
+#ifndef unlikely
+#define unlikely(x) __builtin_expect(!!(x), 0)
+#endif
#ifndef min
# define min(x, y) ((x) < (y) ? (x) : (y))
#endif
@@ -63,6 +78,37 @@ do { \
#define pr_info(fmt, ...) __pr(LIBBPF_INFO, fmt, ##__VA_ARGS__)
#define pr_debug(fmt, ...) __pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__)
+#ifndef __has_builtin
+#define __has_builtin(x) 0
+#endif
+/*
+ * Re-implement glibc's reallocarray() for libbpf internal-only use.
+ * reallocarray(), unfortunately, is not available in all versions of glibc,
+ * so requires extra feature detection and using reallocarray() stub from
+ * <tools/libc_compat.h> and COMPAT_NEED_REALLOCARRAY. All this complicates
+ * build of libbpf unnecessarily and is just a maintenance burden. Instead,
+ * it's trivial to implement libbpf-specific internal version and use it
+ * throughout libbpf.
+ */
+static inline void *libbpf_reallocarray(void *ptr, size_t nmemb, size_t size)
+{
+ size_t total;
+
+#if __has_builtin(__builtin_mul_overflow)
+ if (unlikely(__builtin_mul_overflow(nmemb, size, &total)))
+ return NULL;
+#else
+ if (size == 0 || nmemb > ULONG_MAX / size)
+ return NULL;
+ total = nmemb * size;
+#endif
+ return realloc(ptr, total);
+}
+
+void *btf_add_mem(void **data, size_t *cap_cnt, size_t elem_sz,
+ size_t cur_cnt, size_t max_cnt, size_t add_cnt);
+int btf_ensure_mem(void **data, size_t *cap_cnt, size_t elem_sz, size_t need_cnt);
+
static inline bool libbpf_validate_opts(const char *opts,
size_t opts_sz, size_t user_sz,
const char *type_name)
@@ -94,6 +140,11 @@ static inline bool libbpf_validate_opts(const char *opts,
((opts) && opts->sz >= offsetofend(typeof(*(opts)), field))
#define OPTS_GET(opts, field, fallback_value) \
(OPTS_HAS(opts, field) ? (opts)->field : fallback_value)
+#define OPTS_SET(opts, field, value) \
+ do { \
+ if (OPTS_HAS(opts, field)) \
+ (opts)->field = value; \
+ } while (0)
int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz);
int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz);
@@ -105,18 +156,6 @@ int bpf_object__section_size(const struct bpf_object *obj, const char *name,
int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
__u32 *off);
-struct nlattr;
-typedef int (*libbpf_dump_nlmsg_t)(void *cookie, void *msg, struct nlattr **tb);
-int libbpf_netlink_open(unsigned int *nl_pid);
-int libbpf_nl_get_link(int sock, unsigned int nl_pid,
- libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie);
-int libbpf_nl_get_class(int sock, unsigned int nl_pid, int ifindex,
- libbpf_dump_nlmsg_t dump_class_nlmsg, void *cookie);
-int libbpf_nl_get_qdisc(int sock, unsigned int nl_pid, int ifindex,
- libbpf_dump_nlmsg_t dump_qdisc_nlmsg, void *cookie);
-int libbpf_nl_get_filter(int sock, unsigned int nl_pid, int ifindex, int handle,
- libbpf_dump_nlmsg_t dump_filter_nlmsg, void *cookie);
-
struct btf_ext_info {
/*
* info points to the individual info section (e.g. func_info and
@@ -138,6 +177,44 @@ struct btf_ext_info {
i < (sec)->num_info; \
i++, rec = (void *)rec + (seg)->rec_size)
+/*
+ * The .BTF.ext ELF section layout defined as
+ * struct btf_ext_header
+ * func_info subsection
+ *
+ * The func_info subsection layout:
+ * record size for struct bpf_func_info in the func_info subsection
+ * struct btf_sec_func_info for section #1
+ * a list of bpf_func_info records for section #1
+ * where struct bpf_func_info mimics one in include/uapi/linux/bpf.h
+ * but may not be identical
+ * struct btf_sec_func_info for section #2
+ * a list of bpf_func_info records for section #2
+ * ......
+ *
+ * Note that the bpf_func_info record size in .BTF.ext may not
+ * be the same as the one defined in include/uapi/linux/bpf.h.
+ * The loader should ensure that record_size meets minimum
+ * requirement and pass the record as is to the kernel. The
+ * kernel will handle the func_info properly based on its contents.
+ */
+struct btf_ext_header {
+ __u16 magic;
+ __u8 version;
+ __u8 flags;
+ __u32 hdr_len;
+
+ /* All offsets are in bytes relative to the end of this header */
+ __u32 func_info_off;
+ __u32 func_info_len;
+ __u32 line_info_off;
+ __u32 line_info_len;
+
+ /* optional part of .BTF.ext header */
+ __u32 core_relo_off;
+ __u32 core_relo_len;
+};
+
struct btf_ext {
union {
struct btf_ext_header *hdr;
@@ -145,7 +222,7 @@ struct btf_ext {
};
struct btf_ext_info func_info;
struct btf_ext_info line_info;
- struct btf_ext_info field_reloc_info;
+ struct btf_ext_info core_relo_info;
__u32 data_size;
};
@@ -170,32 +247,40 @@ struct bpf_line_info_min {
__u32 line_col;
};
-/* bpf_field_info_kind encodes which aspect of captured field has to be
- * adjusted by relocations. Currently supported values are:
- * - BPF_FIELD_BYTE_OFFSET: field offset (in bytes);
- * - BPF_FIELD_EXISTS: field existence (1, if field exists; 0, otherwise);
+/* bpf_core_relo_kind encodes which aspect of captured field/type/enum value
+ * has to be adjusted by relocations.
*/
-enum bpf_field_info_kind {
+enum bpf_core_relo_kind {
BPF_FIELD_BYTE_OFFSET = 0, /* field byte offset */
- BPF_FIELD_BYTE_SIZE = 1,
+ BPF_FIELD_BYTE_SIZE = 1, /* field size in bytes */
BPF_FIELD_EXISTS = 2, /* field existence in target kernel */
- BPF_FIELD_SIGNED = 3,
- BPF_FIELD_LSHIFT_U64 = 4,
- BPF_FIELD_RSHIFT_U64 = 5,
+ BPF_FIELD_SIGNED = 3, /* field signedness (0 - unsigned, 1 - signed) */
+ BPF_FIELD_LSHIFT_U64 = 4, /* bitfield-specific left bitshift */
+ BPF_FIELD_RSHIFT_U64 = 5, /* bitfield-specific right bitshift */
+ BPF_TYPE_ID_LOCAL = 6, /* type ID in local BPF object */
+ BPF_TYPE_ID_TARGET = 7, /* type ID in target kernel */
+ BPF_TYPE_EXISTS = 8, /* type existence in target kernel */
+ BPF_TYPE_SIZE = 9, /* type size in bytes */
+ BPF_ENUMVAL_EXISTS = 10, /* enum value existence in target kernel */
+ BPF_ENUMVAL_VALUE = 11, /* enum value integer value */
};
-/* The minimum bpf_field_reloc checked by the loader
+/* The minimum bpf_core_relo checked by the loader
*
- * Field relocation captures the following data:
+ * CO-RE relocation captures the following data:
* - insn_off - instruction offset (in bytes) within a BPF program that needs
* its insn->imm field to be relocated with actual field info;
* - type_id - BTF type ID of the "root" (containing) entity of a relocatable
- * field;
+ * type or field;
* - access_str_off - offset into corresponding .BTF string section. String
- * itself encodes an accessed field using a sequence of field and array
- * indicies, separated by colon (:). It's conceptually very close to LLVM's
- * getelementptr ([0]) instruction's arguments for identifying offset to
- * a field.
+ * interpretation depends on specific relocation kind:
+ * - for field-based relocations, string encodes an accessed field using
+ * a sequence of field and array indices, separated by colon (:). It's
+ * conceptually very close to LLVM's getelementptr ([0]) instruction's
+ * arguments for identifying offset to a field.
+ * - for type-based relocations, strings is expected to be just "0";
+ * - for enum value-based relocations, string contains an index of enum
+ * value within its enum type;
*
* Example to provide a better feel.
*
@@ -226,11 +311,11 @@ enum bpf_field_info_kind {
*
* [0] https://llvm.org/docs/LangRef.html#getelementptr-instruction
*/
-struct bpf_field_reloc {
+struct bpf_core_relo {
__u32 insn_off;
__u32 type_id;
__u32 access_str_off;
- enum bpf_field_info_kind kind;
+ enum bpf_core_relo_kind kind;
};
#endif /* __LIBBPF_LIBBPF_INTERNAL_H */
diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c
index 5a3d3f078408..5482a9b7ae2d 100644
--- a/tools/lib/bpf/libbpf_probes.c
+++ b/tools/lib/bpf/libbpf_probes.c
@@ -17,9 +17,6 @@
#include "libbpf.h"
#include "libbpf_internal.h"
-/* make sure libbpf doesn't use kernel-only integer typedefs */
-#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
-
static bool grep(const char *buffer, const char *pattern)
{
return !!strstr(buffer, pattern);
@@ -173,7 +170,7 @@ int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
return btf_fd;
}
-static int load_sk_storage_btf(void)
+static int load_local_storage_btf(void)
{
const char strs[] = "\0bpf_spin_lock\0val\0cnt\0l";
/* struct bpf_spin_lock {
@@ -232,12 +229,13 @@ bool bpf_probe_map_type(enum bpf_map_type map_type, __u32 ifindex)
key_size = 0;
break;
case BPF_MAP_TYPE_SK_STORAGE:
+ case BPF_MAP_TYPE_INODE_STORAGE:
btf_key_type_id = 1;
btf_value_type_id = 3;
value_size = 8;
max_entries = 0;
map_flags = BPF_F_NO_PREALLOC;
- btf_fd = load_sk_storage_btf();
+ btf_fd = load_local_storage_btf();
if (btf_fd < 0)
return false;
break;
diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c
index 312f887570b2..4dd73de00b6f 100644
--- a/tools/lib/bpf/netlink.c
+++ b/tools/lib/bpf/netlink.c
@@ -15,13 +15,12 @@
#include "libbpf_internal.h"
#include "nlattr.h"
-/* make sure libbpf doesn't use kernel-only integer typedefs */
-#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
-
#ifndef SOL_NETLINK
#define SOL_NETLINK 270
#endif
+typedef int (*libbpf_dump_nlmsg_t)(void *cookie, void *msg, struct nlattr **tb);
+
typedef int (*__dump_nlmsg_t)(struct nlmsghdr *nlmsg, libbpf_dump_nlmsg_t,
void *cookie);
@@ -31,7 +30,7 @@ struct xdp_id_md {
struct xdp_link_info info;
};
-int libbpf_netlink_open(__u32 *nl_pid)
+static int libbpf_netlink_open(__u32 *nl_pid)
{
struct sockaddr_nl sa;
socklen_t addrlen;
@@ -283,6 +282,9 @@ static int get_xdp_info(void *cookie, void *msg, struct nlattr **tb)
return 0;
}
+static int libbpf_nl_get_link(int sock, unsigned int nl_pid,
+ libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie);
+
int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info,
size_t info_size, __u32 flags)
{
@@ -368,121 +370,3 @@ int libbpf_nl_get_link(int sock, unsigned int nl_pid,
return bpf_netlink_recv(sock, nl_pid, seq, __dump_link_nlmsg,
dump_link_nlmsg, cookie);
}
-
-static int __dump_class_nlmsg(struct nlmsghdr *nlh,
- libbpf_dump_nlmsg_t dump_class_nlmsg,
- void *cookie)
-{
- struct nlattr *tb[TCA_MAX + 1], *attr;
- struct tcmsg *t = NLMSG_DATA(nlh);
- int len;
-
- len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*t));
- attr = (struct nlattr *) ((void *) t + NLMSG_ALIGN(sizeof(*t)));
- if (libbpf_nla_parse(tb, TCA_MAX, attr, len, NULL) != 0)
- return -LIBBPF_ERRNO__NLPARSE;
-
- return dump_class_nlmsg(cookie, t, tb);
-}
-
-int libbpf_nl_get_class(int sock, unsigned int nl_pid, int ifindex,
- libbpf_dump_nlmsg_t dump_class_nlmsg, void *cookie)
-{
- struct {
- struct nlmsghdr nlh;
- struct tcmsg t;
- } req = {
- .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)),
- .nlh.nlmsg_type = RTM_GETTCLASS,
- .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
- .t.tcm_family = AF_UNSPEC,
- .t.tcm_ifindex = ifindex,
- };
- int seq = time(NULL);
-
- req.nlh.nlmsg_seq = seq;
- if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0)
- return -errno;
-
- return bpf_netlink_recv(sock, nl_pid, seq, __dump_class_nlmsg,
- dump_class_nlmsg, cookie);
-}
-
-static int __dump_qdisc_nlmsg(struct nlmsghdr *nlh,
- libbpf_dump_nlmsg_t dump_qdisc_nlmsg,
- void *cookie)
-{
- struct nlattr *tb[TCA_MAX + 1], *attr;
- struct tcmsg *t = NLMSG_DATA(nlh);
- int len;
-
- len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*t));
- attr = (struct nlattr *) ((void *) t + NLMSG_ALIGN(sizeof(*t)));
- if (libbpf_nla_parse(tb, TCA_MAX, attr, len, NULL) != 0)
- return -LIBBPF_ERRNO__NLPARSE;
-
- return dump_qdisc_nlmsg(cookie, t, tb);
-}
-
-int libbpf_nl_get_qdisc(int sock, unsigned int nl_pid, int ifindex,
- libbpf_dump_nlmsg_t dump_qdisc_nlmsg, void *cookie)
-{
- struct {
- struct nlmsghdr nlh;
- struct tcmsg t;
- } req = {
- .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)),
- .nlh.nlmsg_type = RTM_GETQDISC,
- .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
- .t.tcm_family = AF_UNSPEC,
- .t.tcm_ifindex = ifindex,
- };
- int seq = time(NULL);
-
- req.nlh.nlmsg_seq = seq;
- if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0)
- return -errno;
-
- return bpf_netlink_recv(sock, nl_pid, seq, __dump_qdisc_nlmsg,
- dump_qdisc_nlmsg, cookie);
-}
-
-static int __dump_filter_nlmsg(struct nlmsghdr *nlh,
- libbpf_dump_nlmsg_t dump_filter_nlmsg,
- void *cookie)
-{
- struct nlattr *tb[TCA_MAX + 1], *attr;
- struct tcmsg *t = NLMSG_DATA(nlh);
- int len;
-
- len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*t));
- attr = (struct nlattr *) ((void *) t + NLMSG_ALIGN(sizeof(*t)));
- if (libbpf_nla_parse(tb, TCA_MAX, attr, len, NULL) != 0)
- return -LIBBPF_ERRNO__NLPARSE;
-
- return dump_filter_nlmsg(cookie, t, tb);
-}
-
-int libbpf_nl_get_filter(int sock, unsigned int nl_pid, int ifindex, int handle,
- libbpf_dump_nlmsg_t dump_filter_nlmsg, void *cookie)
-{
- struct {
- struct nlmsghdr nlh;
- struct tcmsg t;
- } req = {
- .nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)),
- .nlh.nlmsg_type = RTM_GETTFILTER,
- .nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
- .t.tcm_family = AF_UNSPEC,
- .t.tcm_ifindex = ifindex,
- .t.tcm_parent = handle,
- };
- int seq = time(NULL);
-
- req.nlh.nlmsg_seq = seq;
- if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0)
- return -errno;
-
- return bpf_netlink_recv(sock, nl_pid, seq, __dump_filter_nlmsg,
- dump_filter_nlmsg, cookie);
-}
diff --git a/tools/lib/bpf/nlattr.c b/tools/lib/bpf/nlattr.c
index 0ad41dfea8eb..b607fa9852b1 100644
--- a/tools/lib/bpf/nlattr.c
+++ b/tools/lib/bpf/nlattr.c
@@ -7,14 +7,11 @@
*/
#include <errno.h>
-#include "nlattr.h"
-#include "libbpf_internal.h"
-#include <linux/rtnetlink.h>
#include <string.h>
#include <stdio.h>
-
-/* make sure libbpf doesn't use kernel-only integer typedefs */
-#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
+#include <linux/rtnetlink.h>
+#include "nlattr.h"
+#include "libbpf_internal.h"
static uint16_t nla_attr_minlen[LIBBPF_NLA_TYPE_MAX+1] = {
[LIBBPF_NLA_U8] = sizeof(uint8_t),
diff --git a/tools/lib/bpf/ringbuf.c b/tools/lib/bpf/ringbuf.c
index 4fc6c6cbb4eb..5c6522c89af1 100644
--- a/tools/lib/bpf/ringbuf.c
+++ b/tools/lib/bpf/ringbuf.c
@@ -16,15 +16,11 @@
#include <asm/barrier.h>
#include <sys/mman.h>
#include <sys/epoll.h>
-#include <tools/libc_compat.h>
#include "libbpf.h"
#include "libbpf_internal.h"
#include "bpf.h"
-/* make sure libbpf doesn't use kernel-only integer typedefs */
-#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
-
struct ring {
ring_buffer_sample_fn sample_cb;
void *ctx;
@@ -82,12 +78,12 @@ int ring_buffer__add(struct ring_buffer *rb, int map_fd,
return -EINVAL;
}
- tmp = reallocarray(rb->rings, rb->ring_cnt + 1, sizeof(*rb->rings));
+ tmp = libbpf_reallocarray(rb->rings, rb->ring_cnt + 1, sizeof(*rb->rings));
if (!tmp)
return -ENOMEM;
rb->rings = tmp;
- tmp = reallocarray(rb->events, rb->ring_cnt + 1, sizeof(*rb->events));
+ tmp = libbpf_reallocarray(rb->events, rb->ring_cnt + 1, sizeof(*rb->events));
if (!tmp)
return -ENOMEM;
rb->events = tmp;
diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c
index f7f4efb70a4c..e3c98c007825 100644
--- a/tools/lib/bpf/xsk.c
+++ b/tools/lib/bpf/xsk.c
@@ -20,6 +20,8 @@
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <linux/if_xdp.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
#include <linux/sockios.h>
#include <net/if.h>
#include <sys/ioctl.h>
@@ -32,9 +34,6 @@
#include "libbpf_internal.h"
#include "xsk.h"
-/* make sure libbpf doesn't use kernel-only integer typedefs */
-#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
-
#ifndef SOL_XDP
#define SOL_XDP 283
#endif
@@ -48,26 +47,35 @@
#endif
struct xsk_umem {
- struct xsk_ring_prod *fill;
- struct xsk_ring_cons *comp;
+ struct xsk_ring_prod *fill_save;
+ struct xsk_ring_cons *comp_save;
char *umem_area;
struct xsk_umem_config config;
int fd;
int refcount;
+ struct list_head ctx_list;
+};
+
+struct xsk_ctx {
+ struct xsk_ring_prod *fill;
+ struct xsk_ring_cons *comp;
+ __u32 queue_id;
+ struct xsk_umem *umem;
+ int refcount;
+ int ifindex;
+ struct list_head list;
+ int prog_fd;
+ int xsks_map_fd;
+ char ifname[IFNAMSIZ];
};
struct xsk_socket {
struct xsk_ring_cons *rx;
struct xsk_ring_prod *tx;
__u64 outstanding_tx;
- struct xsk_umem *umem;
+ struct xsk_ctx *ctx;
struct xsk_socket_config config;
int fd;
- int ifindex;
- int prog_fd;
- int xsks_map_fd;
- __u32 queue_id;
- char ifname[IFNAMSIZ];
};
struct xsk_nl_info {
@@ -203,15 +211,73 @@ static int xsk_get_mmap_offsets(int fd, struct xdp_mmap_offsets *off)
return -EINVAL;
}
+static int xsk_create_umem_rings(struct xsk_umem *umem, int fd,
+ struct xsk_ring_prod *fill,
+ struct xsk_ring_cons *comp)
+{
+ struct xdp_mmap_offsets off;
+ void *map;
+ int err;
+
+ err = setsockopt(fd, SOL_XDP, XDP_UMEM_FILL_RING,
+ &umem->config.fill_size,
+ sizeof(umem->config.fill_size));
+ if (err)
+ return -errno;
+
+ err = setsockopt(fd, SOL_XDP, XDP_UMEM_COMPLETION_RING,
+ &umem->config.comp_size,
+ sizeof(umem->config.comp_size));
+ if (err)
+ return -errno;
+
+ err = xsk_get_mmap_offsets(fd, &off);
+ if (err)
+ return -errno;
+
+ map = mmap(NULL, off.fr.desc + umem->config.fill_size * sizeof(__u64),
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
+ XDP_UMEM_PGOFF_FILL_RING);
+ if (map == MAP_FAILED)
+ return -errno;
+
+ fill->mask = umem->config.fill_size - 1;
+ fill->size = umem->config.fill_size;
+ fill->producer = map + off.fr.producer;
+ fill->consumer = map + off.fr.consumer;
+ fill->flags = map + off.fr.flags;
+ fill->ring = map + off.fr.desc;
+ fill->cached_cons = umem->config.fill_size;
+
+ map = mmap(NULL, off.cr.desc + umem->config.comp_size * sizeof(__u64),
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
+ XDP_UMEM_PGOFF_COMPLETION_RING);
+ if (map == MAP_FAILED) {
+ err = -errno;
+ goto out_mmap;
+ }
+
+ comp->mask = umem->config.comp_size - 1;
+ comp->size = umem->config.comp_size;
+ comp->producer = map + off.cr.producer;
+ comp->consumer = map + off.cr.consumer;
+ comp->flags = map + off.cr.flags;
+ comp->ring = map + off.cr.desc;
+
+ return 0;
+
+out_mmap:
+ munmap(map, off.fr.desc + umem->config.fill_size * sizeof(__u64));
+ return err;
+}
+
int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area,
__u64 size, struct xsk_ring_prod *fill,
struct xsk_ring_cons *comp,
const struct xsk_umem_config *usr_config)
{
- struct xdp_mmap_offsets off;
struct xdp_umem_reg mr;
struct xsk_umem *umem;
- void *map;
int err;
if (!umem_area || !umem_ptr || !fill || !comp)
@@ -230,6 +296,7 @@ int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area,
}
umem->umem_area = umem_area;
+ INIT_LIST_HEAD(&umem->ctx_list);
xsk_set_umem_config(&umem->config, usr_config);
memset(&mr, 0, sizeof(mr));
@@ -244,71 +311,16 @@ int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area,
err = -errno;
goto out_socket;
}
- err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_FILL_RING,
- &umem->config.fill_size,
- sizeof(umem->config.fill_size));
- if (err) {
- err = -errno;
- goto out_socket;
- }
- err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_COMPLETION_RING,
- &umem->config.comp_size,
- sizeof(umem->config.comp_size));
- if (err) {
- err = -errno;
- goto out_socket;
- }
- err = xsk_get_mmap_offsets(umem->fd, &off);
- if (err) {
- err = -errno;
- goto out_socket;
- }
-
- map = mmap(NULL, off.fr.desc + umem->config.fill_size * sizeof(__u64),
- PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, umem->fd,
- XDP_UMEM_PGOFF_FILL_RING);
- if (map == MAP_FAILED) {
- err = -errno;
+ err = xsk_create_umem_rings(umem, umem->fd, fill, comp);
+ if (err)
goto out_socket;
- }
-
- umem->fill = fill;
- fill->mask = umem->config.fill_size - 1;
- fill->size = umem->config.fill_size;
- fill->producer = map + off.fr.producer;
- fill->consumer = map + off.fr.consumer;
- fill->flags = map + off.fr.flags;
- fill->ring = map + off.fr.desc;
- fill->cached_prod = *fill->producer;
- /* cached_cons is "size" bigger than the real consumer pointer
- * See xsk_prod_nb_free
- */
- fill->cached_cons = *fill->consumer + umem->config.fill_size;
-
- map = mmap(NULL, off.cr.desc + umem->config.comp_size * sizeof(__u64),
- PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, umem->fd,
- XDP_UMEM_PGOFF_COMPLETION_RING);
- if (map == MAP_FAILED) {
- err = -errno;
- goto out_mmap;
- }
-
- umem->comp = comp;
- comp->mask = umem->config.comp_size - 1;
- comp->size = umem->config.comp_size;
- comp->producer = map + off.cr.producer;
- comp->consumer = map + off.cr.consumer;
- comp->flags = map + off.cr.flags;
- comp->ring = map + off.cr.desc;
- comp->cached_prod = *comp->producer;
- comp->cached_cons = *comp->consumer;
+ umem->fill_save = fill;
+ umem->comp_save = comp;
*umem_ptr = umem;
return 0;
-out_mmap:
- munmap(map, off.fr.desc + umem->config.fill_size * sizeof(__u64));
out_socket:
close(umem->fd);
out_umem_alloc:
@@ -342,6 +354,7 @@ DEFAULT_VERSION(xsk_umem__create_v0_0_4, xsk_umem__create, LIBBPF_0.0.4)
static int xsk_load_xdp_prog(struct xsk_socket *xsk)
{
static const int log_buf_size = 16 * 1024;
+ struct xsk_ctx *ctx = xsk->ctx;
char log_buf[log_buf_size];
int err, prog_fd;
@@ -369,7 +382,7 @@ static int xsk_load_xdp_prog(struct xsk_socket *xsk)
/* *(u32 *)(r10 - 4) = r2 */
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -4),
/* r1 = xskmap[] */
- BPF_LD_MAP_FD(BPF_REG_1, xsk->xsks_map_fd),
+ BPF_LD_MAP_FD(BPF_REG_1, ctx->xsks_map_fd),
/* r3 = XDP_PASS */
BPF_MOV64_IMM(BPF_REG_3, 2),
/* call bpf_redirect_map */
@@ -381,7 +394,7 @@ static int xsk_load_xdp_prog(struct xsk_socket *xsk)
/* r2 += -4 */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
/* r1 = xskmap[] */
- BPF_LD_MAP_FD(BPF_REG_1, xsk->xsks_map_fd),
+ BPF_LD_MAP_FD(BPF_REG_1, ctx->xsks_map_fd),
/* call bpf_map_lookup_elem */
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
/* r1 = r0 */
@@ -393,7 +406,7 @@ static int xsk_load_xdp_prog(struct xsk_socket *xsk)
/* r2 = *(u32 *)(r10 - 4) */
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_10, -4),
/* r1 = xskmap[] */
- BPF_LD_MAP_FD(BPF_REG_1, xsk->xsks_map_fd),
+ BPF_LD_MAP_FD(BPF_REG_1, ctx->xsks_map_fd),
/* r3 = 0 */
BPF_MOV64_IMM(BPF_REG_3, 0),
/* call bpf_redirect_map */
@@ -411,19 +424,21 @@ static int xsk_load_xdp_prog(struct xsk_socket *xsk)
return prog_fd;
}
- err = bpf_set_link_xdp_fd(xsk->ifindex, prog_fd, xsk->config.xdp_flags);
+ err = bpf_set_link_xdp_fd(xsk->ctx->ifindex, prog_fd,
+ xsk->config.xdp_flags);
if (err) {
close(prog_fd);
return err;
}
- xsk->prog_fd = prog_fd;
+ ctx->prog_fd = prog_fd;
return 0;
}
static int xsk_get_max_queues(struct xsk_socket *xsk)
{
struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS };
+ struct xsk_ctx *ctx = xsk->ctx;
struct ifreq ifr = {};
int fd, err, ret;
@@ -432,7 +447,7 @@ static int xsk_get_max_queues(struct xsk_socket *xsk)
return -errno;
ifr.ifr_data = (void *)&channels;
- memcpy(ifr.ifr_name, xsk->ifname, IFNAMSIZ - 1);
+ memcpy(ifr.ifr_name, ctx->ifname, IFNAMSIZ - 1);
ifr.ifr_name[IFNAMSIZ - 1] = '\0';
err = ioctl(fd, SIOCETHTOOL, &ifr);
if (err && errno != EOPNOTSUPP) {
@@ -460,6 +475,7 @@ out:
static int xsk_create_bpf_maps(struct xsk_socket *xsk)
{
+ struct xsk_ctx *ctx = xsk->ctx;
int max_queues;
int fd;
@@ -472,15 +488,17 @@ static int xsk_create_bpf_maps(struct xsk_socket *xsk)
if (fd < 0)
return fd;
- xsk->xsks_map_fd = fd;
+ ctx->xsks_map_fd = fd;
return 0;
}
static void xsk_delete_bpf_maps(struct xsk_socket *xsk)
{
- bpf_map_delete_elem(xsk->xsks_map_fd, &xsk->queue_id);
- close(xsk->xsks_map_fd);
+ struct xsk_ctx *ctx = xsk->ctx;
+
+ bpf_map_delete_elem(ctx->xsks_map_fd, &ctx->queue_id);
+ close(ctx->xsks_map_fd);
}
static int xsk_lookup_bpf_maps(struct xsk_socket *xsk)
@@ -488,10 +506,11 @@ static int xsk_lookup_bpf_maps(struct xsk_socket *xsk)
__u32 i, *map_ids, num_maps, prog_len = sizeof(struct bpf_prog_info);
__u32 map_len = sizeof(struct bpf_map_info);
struct bpf_prog_info prog_info = {};
+ struct xsk_ctx *ctx = xsk->ctx;
struct bpf_map_info map_info;
int fd, err;
- err = bpf_obj_get_info_by_fd(xsk->prog_fd, &prog_info, &prog_len);
+ err = bpf_obj_get_info_by_fd(ctx->prog_fd, &prog_info, &prog_len);
if (err)
return err;
@@ -505,11 +524,11 @@ static int xsk_lookup_bpf_maps(struct xsk_socket *xsk)
prog_info.nr_map_ids = num_maps;
prog_info.map_ids = (__u64)(unsigned long)map_ids;
- err = bpf_obj_get_info_by_fd(xsk->prog_fd, &prog_info, &prog_len);
+ err = bpf_obj_get_info_by_fd(ctx->prog_fd, &prog_info, &prog_len);
if (err)
goto out_map_ids;
- xsk->xsks_map_fd = -1;
+ ctx->xsks_map_fd = -1;
for (i = 0; i < prog_info.nr_map_ids; i++) {
fd = bpf_map_get_fd_by_id(map_ids[i]);
@@ -523,7 +542,7 @@ static int xsk_lookup_bpf_maps(struct xsk_socket *xsk)
}
if (!strcmp(map_info.name, "xsks_map")) {
- xsk->xsks_map_fd = fd;
+ ctx->xsks_map_fd = fd;
continue;
}
@@ -531,7 +550,7 @@ static int xsk_lookup_bpf_maps(struct xsk_socket *xsk)
}
err = 0;
- if (xsk->xsks_map_fd == -1)
+ if (ctx->xsks_map_fd == -1)
err = -ENOENT;
out_map_ids:
@@ -541,16 +560,19 @@ out_map_ids:
static int xsk_set_bpf_maps(struct xsk_socket *xsk)
{
- return bpf_map_update_elem(xsk->xsks_map_fd, &xsk->queue_id,
+ struct xsk_ctx *ctx = xsk->ctx;
+
+ return bpf_map_update_elem(ctx->xsks_map_fd, &ctx->queue_id,
&xsk->fd, 0);
}
static int xsk_setup_xdp_prog(struct xsk_socket *xsk)
{
+ struct xsk_ctx *ctx = xsk->ctx;
__u32 prog_id = 0;
int err;
- err = bpf_get_link_xdp_id(xsk->ifindex, &prog_id,
+ err = bpf_get_link_xdp_id(ctx->ifindex, &prog_id,
xsk->config.xdp_flags);
if (err)
return err;
@@ -566,12 +588,12 @@ static int xsk_setup_xdp_prog(struct xsk_socket *xsk)
return err;
}
} else {
- xsk->prog_fd = bpf_prog_get_fd_by_id(prog_id);
- if (xsk->prog_fd < 0)
+ ctx->prog_fd = bpf_prog_get_fd_by_id(prog_id);
+ if (ctx->prog_fd < 0)
return -errno;
err = xsk_lookup_bpf_maps(xsk);
if (err) {
- close(xsk->prog_fd);
+ close(ctx->prog_fd);
return err;
}
}
@@ -580,23 +602,108 @@ static int xsk_setup_xdp_prog(struct xsk_socket *xsk)
err = xsk_set_bpf_maps(xsk);
if (err) {
xsk_delete_bpf_maps(xsk);
- close(xsk->prog_fd);
+ close(ctx->prog_fd);
return err;
}
return 0;
}
-int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
- __u32 queue_id, struct xsk_umem *umem,
- struct xsk_ring_cons *rx, struct xsk_ring_prod *tx,
- const struct xsk_socket_config *usr_config)
+static struct xsk_ctx *xsk_get_ctx(struct xsk_umem *umem, int ifindex,
+ __u32 queue_id)
+{
+ struct xsk_ctx *ctx;
+
+ if (list_empty(&umem->ctx_list))
+ return NULL;
+
+ list_for_each_entry(ctx, &umem->ctx_list, list) {
+ if (ctx->ifindex == ifindex && ctx->queue_id == queue_id) {
+ ctx->refcount++;
+ return ctx;
+ }
+ }
+
+ return NULL;
+}
+
+static void xsk_put_ctx(struct xsk_ctx *ctx)
+{
+ struct xsk_umem *umem = ctx->umem;
+ struct xdp_mmap_offsets off;
+ int err;
+
+ if (--ctx->refcount == 0) {
+ err = xsk_get_mmap_offsets(umem->fd, &off);
+ if (!err) {
+ munmap(ctx->fill->ring - off.fr.desc,
+ off.fr.desc + umem->config.fill_size *
+ sizeof(__u64));
+ munmap(ctx->comp->ring - off.cr.desc,
+ off.cr.desc + umem->config.comp_size *
+ sizeof(__u64));
+ }
+
+ list_del(&ctx->list);
+ free(ctx);
+ }
+}
+
+static struct xsk_ctx *xsk_create_ctx(struct xsk_socket *xsk,
+ struct xsk_umem *umem, int ifindex,
+ const char *ifname, __u32 queue_id,
+ struct xsk_ring_prod *fill,
+ struct xsk_ring_cons *comp)
+{
+ struct xsk_ctx *ctx;
+ int err;
+
+ ctx = calloc(1, sizeof(*ctx));
+ if (!ctx)
+ return NULL;
+
+ if (!umem->fill_save) {
+ err = xsk_create_umem_rings(umem, xsk->fd, fill, comp);
+ if (err) {
+ free(ctx);
+ return NULL;
+ }
+ } else if (umem->fill_save != fill || umem->comp_save != comp) {
+ /* Copy over rings to new structs. */
+ memcpy(fill, umem->fill_save, sizeof(*fill));
+ memcpy(comp, umem->comp_save, sizeof(*comp));
+ }
+
+ ctx->ifindex = ifindex;
+ ctx->refcount = 1;
+ ctx->umem = umem;
+ ctx->queue_id = queue_id;
+ memcpy(ctx->ifname, ifname, IFNAMSIZ - 1);
+ ctx->ifname[IFNAMSIZ - 1] = '\0';
+
+ umem->fill_save = NULL;
+ umem->comp_save = NULL;
+ ctx->fill = fill;
+ ctx->comp = comp;
+ list_add(&ctx->list, &umem->ctx_list);
+ return ctx;
+}
+
+int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
+ const char *ifname,
+ __u32 queue_id, struct xsk_umem *umem,
+ struct xsk_ring_cons *rx,
+ struct xsk_ring_prod *tx,
+ struct xsk_ring_prod *fill,
+ struct xsk_ring_cons *comp,
+ const struct xsk_socket_config *usr_config)
{
void *rx_map = NULL, *tx_map = NULL;
struct sockaddr_xdp sxdp = {};
struct xdp_mmap_offsets off;
struct xsk_socket *xsk;
- int err;
+ struct xsk_ctx *ctx;
+ int err, ifindex;
if (!umem || !xsk_ptr || !(rx || tx))
return -EFAULT;
@@ -609,10 +716,10 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
if (err)
goto out_xsk_alloc;
- if (umem->refcount &&
- !(xsk->config.libbpf_flags & XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)) {
- pr_warn("Error: shared umems not supported by libbpf supplied XDP program.\n");
- err = -EBUSY;
+ xsk->outstanding_tx = 0;
+ ifindex = if_nametoindex(ifname);
+ if (!ifindex) {
+ err = -errno;
goto out_xsk_alloc;
}
@@ -626,16 +733,21 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
xsk->fd = umem->fd;
}
- xsk->outstanding_tx = 0;
- xsk->queue_id = queue_id;
- xsk->umem = umem;
- xsk->ifindex = if_nametoindex(ifname);
- if (!xsk->ifindex) {
- err = -errno;
- goto out_socket;
+ ctx = xsk_get_ctx(umem, ifindex, queue_id);
+ if (!ctx) {
+ if (!fill || !comp) {
+ err = -EFAULT;
+ goto out_socket;
+ }
+
+ ctx = xsk_create_ctx(xsk, umem, ifindex, ifname, queue_id,
+ fill, comp);
+ if (!ctx) {
+ err = -ENOMEM;
+ goto out_socket;
+ }
}
- memcpy(xsk->ifname, ifname, IFNAMSIZ - 1);
- xsk->ifname[IFNAMSIZ - 1] = '\0';
+ xsk->ctx = ctx;
if (rx) {
err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING,
@@ -643,7 +755,7 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
sizeof(xsk->config.rx_size));
if (err) {
err = -errno;
- goto out_socket;
+ goto out_put_ctx;
}
}
if (tx) {
@@ -652,14 +764,14 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
sizeof(xsk->config.tx_size));
if (err) {
err = -errno;
- goto out_socket;
+ goto out_put_ctx;
}
}
err = xsk_get_mmap_offsets(xsk->fd, &off);
if (err) {
err = -errno;
- goto out_socket;
+ goto out_put_ctx;
}
if (rx) {
@@ -669,7 +781,7 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
xsk->fd, XDP_PGOFF_RX_RING);
if (rx_map == MAP_FAILED) {
err = -errno;
- goto out_socket;
+ goto out_put_ctx;
}
rx->mask = xsk->config.rx_size - 1;
@@ -708,10 +820,10 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
xsk->tx = tx;
sxdp.sxdp_family = PF_XDP;
- sxdp.sxdp_ifindex = xsk->ifindex;
- sxdp.sxdp_queue_id = xsk->queue_id;
+ sxdp.sxdp_ifindex = ctx->ifindex;
+ sxdp.sxdp_queue_id = ctx->queue_id;
if (umem->refcount > 1) {
- sxdp.sxdp_flags = XDP_SHARED_UMEM;
+ sxdp.sxdp_flags |= XDP_SHARED_UMEM;
sxdp.sxdp_shared_umem_fd = umem->fd;
} else {
sxdp.sxdp_flags = xsk->config.bind_flags;
@@ -723,7 +835,7 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
goto out_mmap_tx;
}
- xsk->prog_fd = -1;
+ ctx->prog_fd = -1;
if (!(xsk->config.libbpf_flags & XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)) {
err = xsk_setup_xdp_prog(xsk);
@@ -742,6 +854,8 @@ out_mmap_rx:
if (rx)
munmap(rx_map, off.rx.desc +
xsk->config.rx_size * sizeof(struct xdp_desc));
+out_put_ctx:
+ xsk_put_ctx(ctx);
out_socket:
if (--umem->refcount)
close(xsk->fd);
@@ -750,25 +864,24 @@ out_xsk_alloc:
return err;
}
-int xsk_umem__delete(struct xsk_umem *umem)
+int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
+ __u32 queue_id, struct xsk_umem *umem,
+ struct xsk_ring_cons *rx, struct xsk_ring_prod *tx,
+ const struct xsk_socket_config *usr_config)
{
- struct xdp_mmap_offsets off;
- int err;
+ return xsk_socket__create_shared(xsk_ptr, ifname, queue_id, umem,
+ rx, tx, umem->fill_save,
+ umem->comp_save, usr_config);
+}
+int xsk_umem__delete(struct xsk_umem *umem)
+{
if (!umem)
return 0;
if (umem->refcount)
return -EBUSY;
- err = xsk_get_mmap_offsets(umem->fd, &off);
- if (!err) {
- munmap(umem->fill->ring - off.fr.desc,
- off.fr.desc + umem->config.fill_size * sizeof(__u64));
- munmap(umem->comp->ring - off.cr.desc,
- off.cr.desc + umem->config.comp_size * sizeof(__u64));
- }
-
close(umem->fd);
free(umem);
@@ -778,15 +891,16 @@ int xsk_umem__delete(struct xsk_umem *umem)
void xsk_socket__delete(struct xsk_socket *xsk)
{
size_t desc_sz = sizeof(struct xdp_desc);
+ struct xsk_ctx *ctx = xsk->ctx;
struct xdp_mmap_offsets off;
int err;
if (!xsk)
return;
- if (xsk->prog_fd != -1) {
+ if (ctx->prog_fd != -1) {
xsk_delete_bpf_maps(xsk);
- close(xsk->prog_fd);
+ close(ctx->prog_fd);
}
err = xsk_get_mmap_offsets(xsk->fd, &off);
@@ -799,14 +913,15 @@ void xsk_socket__delete(struct xsk_socket *xsk)
munmap(xsk->tx->ring - off.tx.desc,
off.tx.desc + xsk->config.tx_size * desc_sz);
}
-
}
- xsk->umem->refcount--;
+ xsk_put_ctx(ctx);
+
+ ctx->umem->refcount--;
/* Do not close an fd that also has an associated umem connected
* to it.
*/
- if (xsk->fd != xsk->umem->fd)
+ if (xsk->fd != ctx->umem->fd)
close(xsk->fd);
free(xsk);
}
diff --git a/tools/lib/bpf/xsk.h b/tools/lib/bpf/xsk.h
index 584f6820a639..1069c46364ff 100644
--- a/tools/lib/bpf/xsk.h
+++ b/tools/lib/bpf/xsk.h
@@ -234,6 +234,15 @@ LIBBPF_API int xsk_socket__create(struct xsk_socket **xsk,
struct xsk_ring_cons *rx,
struct xsk_ring_prod *tx,
const struct xsk_socket_config *config);
+LIBBPF_API int
+xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
+ const char *ifname,
+ __u32 queue_id, struct xsk_umem *umem,
+ struct xsk_ring_cons *rx,
+ struct xsk_ring_prod *tx,
+ struct xsk_ring_prod *fill,
+ struct xsk_ring_cons *comp,
+ const struct xsk_socket_config *config);
/* Returns 0 for success and -EBUSY if the umem is still in use. */
LIBBPF_API int xsk_umem__delete(struct xsk_umem *umem);
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 8137a6046a47..d1b5fb89992f 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -483,10 +483,6 @@ ifndef NO_LIBELF
EXTLIBS += -lelf
$(call detected,CONFIG_LIBELF)
- ifeq ($(feature-libelf-mmap), 1)
- CFLAGS += -DHAVE_LIBELF_MMAP_SUPPORT
- endif
-
ifeq ($(feature-libelf-getphdrnum), 1)
CFLAGS += -DHAVE_ELF_GETPHDRNUM_SUPPORT
endif
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index 2feb751516ab..0374adcb223c 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -328,12 +328,6 @@ config_bpf_program(struct bpf_program *prog)
probe_conf.no_inlines = false;
probe_conf.force_add = false;
- config_str = bpf_program__title(prog, false);
- if (IS_ERR(config_str)) {
- pr_debug("bpf: unable to get title for program\n");
- return PTR_ERR(config_str);
- }
-
priv = calloc(sizeof(*priv), 1);
if (!priv) {
pr_debug("bpf: failed to alloc priv\n");
@@ -341,6 +335,7 @@ config_bpf_program(struct bpf_program *prog)
}
pev = &priv->pev;
+ config_str = bpf_program__section_name(prog);
pr_debug("bpf: config program '%s'\n", config_str);
err = parse_prog_config(config_str, &main_str, &is_tp, pev);
if (err)
@@ -454,10 +449,7 @@ preproc_gen_prologue(struct bpf_program *prog, int n,
if (err) {
const char *title;
- title = bpf_program__title(prog, false);
- if (!title)
- title = "[unknown]";
-
+ title = bpf_program__section_name(prog);
pr_debug("Failed to generate prologue for program %s\n",
title);
return err;
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index ff4f4c47e148..03e264a27cd3 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -28,7 +28,7 @@ struct option;
* libelf 0.8.x and earlier do not support ELF_C_READ_MMAP;
* for newer versions we can use mmap to reduce memory usage:
*/
-#ifdef HAVE_LIBELF_MMAP_SUPPORT
+#ifdef ELF_C_READ_MMAP
# define PERF_ELF_C_READ_MMAP ELF_C_READ_MMAP
#else
# define PERF_ELF_C_READ_MMAP ELF_C_READ
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index 9a0946ddb705..3ab1200e172f 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -13,9 +13,7 @@ test_verifier_log
feature
test_sock
test_sock_addr
-test_sock_fields
urandom_read
-test_btf
test_sockmap
test_lirc_mode2_user
get_cgroup_id_user
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index fc946b7ac288..bdbeafec371b 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -33,9 +33,9 @@ LDLIBS += -lcap -lelf -lz -lrt -lpthread
# Order correspond to 'make run_tests' order
TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
test_verifier_log test_dev_cgroup test_tcpbpf_user \
- test_sock test_btf test_sockmap get_cgroup_id_user test_socket_cookie \
+ test_sock test_sockmap get_cgroup_id_user test_socket_cookie \
test_cgroup_storage \
- test_netcnt test_tcpnotify_user test_sock_fields test_sysctl \
+ test_netcnt test_tcpnotify_user test_sysctl \
test_progs-no_alu32 \
test_current_pid_tgid_new_ns
@@ -68,7 +68,8 @@ TEST_PROGS := test_kmod.sh \
test_tc_edt.sh \
test_xdping.sh \
test_bpftool_build.sh \
- test_bpftool.sh
+ test_bpftool.sh \
+ test_bpftool_metadata.sh \
TEST_PROGS_EXTENDED := with_addr.sh \
with_tunnels.sh \
@@ -176,6 +177,11 @@ $(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \
$(Q)$(MAKE) $(submake_extras) -C $(BPFTOOLDIR) \
OUTPUT=$(BUILD_DIR)/bpftool/ \
prefix= DESTDIR=$(SCRATCH_DIR)/ install
+ $(Q)mkdir -p $(BUILD_DIR)/bpftool/Documentation
+ $(Q)RST2MAN_OPTS="--exit-status=1" $(MAKE) $(submake_extras) \
+ -C $(BPFTOOLDIR)/Documentation \
+ OUTPUT=$(BUILD_DIR)/bpftool/Documentation/ \
+ prefix= DESTDIR=$(SCRATCH_DIR)/ install
$(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
../../../include/uapi/linux/bpf.h \
@@ -316,7 +322,7 @@ $(TRUNNER_BPF_OBJS): $(TRUNNER_OUTPUT)/%.o: \
$(TRUNNER_BPF_PROGS_DIR)/%.c \
$(TRUNNER_BPF_PROGS_DIR)/*.h \
$$(INCLUDE_DIR)/vmlinux.h \
- $$(BPFOBJ) | $(TRUNNER_OUTPUT)
+ $(wildcard $(BPFDIR)/bpf_*.h) | $(TRUNNER_OUTPUT)
$$(call $(TRUNNER_BPF_BUILD_RULE),$$<,$$@, \
$(TRUNNER_BPF_CFLAGS), \
$(TRUNNER_BPF_LDFLAGS))
diff --git a/tools/testing/selftests/bpf/README.rst b/tools/testing/selftests/bpf/README.rst
index e885d351595f..ac9eda830187 100644
--- a/tools/testing/selftests/bpf/README.rst
+++ b/tools/testing/selftests/bpf/README.rst
@@ -7,6 +7,44 @@ General instructions on running selftests can be found in
Additional information about selftest failures are
documented here.
+profiler[23] test failures with clang/llvm <12.0.0
+==================================================
+
+With clang/llvm <12.0.0, the profiler[23] test may fail.
+The symptom looks like
+
+.. code-block:: c
+
+ // r9 is a pointer to map_value
+ // r7 is a scalar
+ 17: bf 96 00 00 00 00 00 00 r6 = r9
+ 18: 0f 76 00 00 00 00 00 00 r6 += r7
+ math between map_value pointer and register with unbounded min value is not allowed
+
+ // the instructions below will not be seen in the verifier log
+ 19: a5 07 01 00 01 01 00 00 if r7 < 257 goto +1
+ 20: bf 96 00 00 00 00 00 00 r6 = r9
+ // r6 is used here
+
+The verifier will reject such code with above error.
+At insn 18 the r7 is indeed unbounded. The later insn 19 checks the bounds and
+the insn 20 undoes map_value addition. It is currently impossible for the
+verifier to understand such speculative pointer arithmetic.
+Hence
+ https://reviews.llvm.org/D85570
+addresses it on the compiler side. It was committed on llvm 12.
+
+The corresponding C code
+.. code-block:: c
+
+ for (int i = 0; i < MAX_CGROUPS_PATH_DEPTH; i++) {
+ filepart_length = bpf_probe_read_str(payload, ...);
+ if (filepart_length <= MAX_PATH) {
+ barrier_var(filepart_length); // workaround
+ payload += filepart_length;
+ }
+ }
+
bpf_iter test failures with clang/llvm 10.0.0
=============================================
@@ -43,3 +81,24 @@ This is due to a llvm BPF backend bug. The fix
https://reviews.llvm.org/D78466
has been pushed to llvm 10.x release branch and will be
available in 10.0.1. The fix is available in llvm 11.0.0 trunk.
+
+BPF CO-RE-based tests and Clang version
+=======================================
+
+A set of selftests use BPF target-specific built-ins, which might require
+bleeding-edge Clang versions (Clang 12 nightly at this time).
+
+Few sub-tests of core_reloc test suit (part of test_progs test runner) require
+the following built-ins, listed with corresponding Clang diffs introducing
+them to Clang/LLVM. These sub-tests are going to be skipped if Clang is too
+old to support them, they shouldn't cause build failures or runtime test
+failures:
+
+ - __builtin_btf_type_id() ([0], [1], [2]);
+ - __builtin_preserve_type_info(), __builtin_preserve_enum_value() ([3], [4]).
+
+ [0] https://reviews.llvm.org/D74572
+ [1] https://reviews.llvm.org/D74668
+ [2] https://reviews.llvm.org/D85174
+ [3] https://reviews.llvm.org/D83878
+ [4] https://reviews.llvm.org/D83242
diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c
index 944ad4721c83..332ed2f7b402 100644
--- a/tools/testing/selftests/bpf/bench.c
+++ b/tools/testing/selftests/bpf/bench.c
@@ -311,12 +311,12 @@ extern const struct bench bench_rename_kretprobe;
extern const struct bench bench_rename_rawtp;
extern const struct bench bench_rename_fentry;
extern const struct bench bench_rename_fexit;
-extern const struct bench bench_rename_fmodret;
extern const struct bench bench_trig_base;
extern const struct bench bench_trig_tp;
extern const struct bench bench_trig_rawtp;
extern const struct bench bench_trig_kprobe;
extern const struct bench bench_trig_fentry;
+extern const struct bench bench_trig_fentry_sleep;
extern const struct bench bench_trig_fmodret;
extern const struct bench bench_rb_libbpf;
extern const struct bench bench_rb_custom;
@@ -332,12 +332,12 @@ static const struct bench *benchs[] = {
&bench_rename_rawtp,
&bench_rename_fentry,
&bench_rename_fexit,
- &bench_rename_fmodret,
&bench_trig_base,
&bench_trig_tp,
&bench_trig_rawtp,
&bench_trig_kprobe,
&bench_trig_fentry,
+ &bench_trig_fentry_sleep,
&bench_trig_fmodret,
&bench_rb_libbpf,
&bench_rb_custom,
@@ -462,4 +462,3 @@ int main(int argc, char **argv)
return 0;
}
-
diff --git a/tools/testing/selftests/bpf/benchs/bench_rename.c b/tools/testing/selftests/bpf/benchs/bench_rename.c
index e74cff40f4fe..a967674098ad 100644
--- a/tools/testing/selftests/bpf/benchs/bench_rename.c
+++ b/tools/testing/selftests/bpf/benchs/bench_rename.c
@@ -106,12 +106,6 @@ static void setup_fexit()
attach_bpf(ctx.skel->progs.prog5);
}
-static void setup_fmodret()
-{
- setup_ctx();
- attach_bpf(ctx.skel->progs.prog6);
-}
-
static void *consumer(void *input)
{
return NULL;
@@ -182,14 +176,3 @@ const struct bench bench_rename_fexit = {
.report_progress = hits_drops_report_progress,
.report_final = hits_drops_report_final,
};
-
-const struct bench bench_rename_fmodret = {
- .name = "rename-fmodret",
- .validate = validate,
- .setup = setup_fmodret,
- .producer_thread = producer,
- .consumer_thread = consumer,
- .measure = measure,
- .report_progress = hits_drops_report_progress,
- .report_final = hits_drops_report_final,
-};
diff --git a/tools/testing/selftests/bpf/benchs/bench_trigger.c b/tools/testing/selftests/bpf/benchs/bench_trigger.c
index 49c22832f216..2a0b6c9885a4 100644
--- a/tools/testing/selftests/bpf/benchs/bench_trigger.c
+++ b/tools/testing/selftests/bpf/benchs/bench_trigger.c
@@ -90,6 +90,12 @@ static void trigger_fentry_setup()
attach_bpf(ctx.skel->progs.bench_trigger_fentry);
}
+static void trigger_fentry_sleep_setup()
+{
+ setup_ctx();
+ attach_bpf(ctx.skel->progs.bench_trigger_fentry_sleep);
+}
+
static void trigger_fmodret_setup()
{
setup_ctx();
@@ -155,6 +161,17 @@ const struct bench bench_trig_fentry = {
.report_final = hits_drops_report_final,
};
+const struct bench bench_trig_fentry_sleep = {
+ .name = "trig-fentry-sleep",
+ .validate = trigger_validate,
+ .setup = trigger_fentry_sleep_setup,
+ .producer_thread = trigger_producer,
+ .consumer_thread = trigger_consumer,
+ .measure = trigger_measure,
+ .report_progress = hits_drops_report_progress,
+ .report_final = hits_drops_report_final,
+};
+
const struct bench bench_trig_fmodret = {
.name = "trig-fmodret",
.validate = trigger_validate,
diff --git a/tools/testing/selftests/bpf/bpf_tcp_helpers.h b/tools/testing/selftests/bpf/bpf_tcp_helpers.h
index 5bf2fe9b1efa..2915664c335d 100644
--- a/tools/testing/selftests/bpf/bpf_tcp_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_tcp_helpers.h
@@ -16,6 +16,7 @@ BPF_PROG(name, args)
struct sock_common {
unsigned char skc_state;
+ __u16 skc_num;
} __attribute__((preserve_access_index));
enum sk_pacing {
@@ -45,6 +46,10 @@ struct inet_connection_sock {
__u64 icsk_ca_priv[104 / sizeof(__u64)];
} __attribute__((preserve_access_index));
+struct request_sock {
+ struct sock_common __req_common;
+} __attribute__((preserve_access_index));
+
struct tcp_sock {
struct inet_connection_sock inet_conn;
@@ -115,14 +120,6 @@ enum tcp_ca_event {
CA_EVENT_ECN_IS_CE = 5,
};
-enum tcp_ca_state {
- TCP_CA_Open = 0,
- TCP_CA_Disorder = 1,
- TCP_CA_CWR = 2,
- TCP_CA_Recovery = 3,
- TCP_CA_Loss = 4
-};
-
struct ack_sample {
__u32 pkts_acked;
__s32 rtt_us;
diff --git a/tools/testing/selftests/bpf/flow_dissector_load.h b/tools/testing/selftests/bpf/flow_dissector_load.h
index daeaeb518894..7290401ec172 100644
--- a/tools/testing/selftests/bpf/flow_dissector_load.h
+++ b/tools/testing/selftests/bpf/flow_dissector_load.h
@@ -23,7 +23,13 @@ static inline int bpf_flow_load(struct bpf_object **obj,
if (ret)
return ret;
- main_prog = bpf_object__find_program_by_title(*obj, section_name);
+ main_prog = NULL;
+ bpf_object__for_each_program(prog, *obj) {
+ if (strcmp(section_name, bpf_program__section_name(prog)) == 0) {
+ main_prog = prog;
+ break;
+ }
+ }
if (!main_prog)
return -1;
diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c
index f56655690f9b..12ee40284da0 100644
--- a/tools/testing/selftests/bpf/network_helpers.c
+++ b/tools/testing/selftests/bpf/network_helpers.c
@@ -104,6 +104,43 @@ error_close:
return -1;
}
+int fastopen_connect(int server_fd, const char *data, unsigned int data_len,
+ int timeout_ms)
+{
+ struct sockaddr_storage addr;
+ socklen_t addrlen = sizeof(addr);
+ struct sockaddr_in *addr_in;
+ int fd, ret;
+
+ if (getsockname(server_fd, (struct sockaddr *)&addr, &addrlen)) {
+ log_err("Failed to get server addr");
+ return -1;
+ }
+
+ addr_in = (struct sockaddr_in *)&addr;
+ fd = socket(addr_in->sin_family, SOCK_STREAM, 0);
+ if (fd < 0) {
+ log_err("Failed to create client socket");
+ return -1;
+ }
+
+ if (settimeo(fd, timeout_ms))
+ goto error_close;
+
+ ret = sendto(fd, data, data_len, MSG_FASTOPEN, (struct sockaddr *)&addr,
+ addrlen);
+ if (ret != data_len) {
+ log_err("sendto(data, %u) != %d\n", data_len, ret);
+ goto error_close;
+ }
+
+ return fd;
+
+error_close:
+ save_errno_close(fd);
+ return -1;
+}
+
static int connect_fd_to_addr(int fd,
const struct sockaddr_storage *addr,
socklen_t addrlen)
diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h
index c3728f6667e4..7205f8afdba1 100644
--- a/tools/testing/selftests/bpf/network_helpers.h
+++ b/tools/testing/selftests/bpf/network_helpers.h
@@ -37,6 +37,8 @@ int start_server(int family, int type, const char *addr, __u16 port,
int timeout_ms);
int connect_to_fd(int server_fd, int timeout_ms);
int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms);
+int fastopen_connect(int server_fd, const char *data, unsigned int data_len,
+ int timeout_ms);
int make_sockaddr(int family, const char *addr_str, __u16 port,
struct sockaddr_storage *addr, socklen_t *len);
diff --git a/tools/testing/selftests/bpf/prog_tests/align.c b/tools/testing/selftests/bpf/prog_tests/align.c
index c548aded6585..52414058a627 100644
--- a/tools/testing/selftests/bpf/prog_tests/align.c
+++ b/tools/testing/selftests/bpf/prog_tests/align.c
@@ -195,13 +195,13 @@ static struct bpf_align_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
{7, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
- {8, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+ {8, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
{9, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
- {10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+ {10, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
{11, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
- {12, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+ {12, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
{13, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
- {14, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
+ {14, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
{15, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
{16, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
},
@@ -518,7 +518,7 @@ static struct bpf_align_test tests[] = {
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
- {20, "R5=pkt(id=1,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc)"},
+ {20, "R5=pkt(id=2,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc)"},
},
},
@@ -561,18 +561,18 @@ static struct bpf_align_test tests[] = {
/* Adding 14 makes R6 be (4n+2) */
{11, "R6_w=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"},
/* Subtracting from packet pointer overflows ubounds */
- {13, "R5_w=pkt(id=1,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c)"},
+ {13, "R5_w=pkt(id=2,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c)"},
/* New unknown value in R7 is (4n), >= 76 */
{15, "R7_w=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"},
/* Adding it to packet pointer gives nice bounds again */
- {16, "R5_w=pkt(id=2,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0xfffffffc)"},
+ {16, "R5_w=pkt(id=3,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0xfffffffc)"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
* which is 2. Then the variable offset is (4n+2), so
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
- {20, "R5=pkt(id=2,off=0,r=4,umin_value=2,umax_value=1082,var_off=(0x2; 0xfffffffc)"},
+ {20, "R5=pkt(id=3,off=0,r=4,umin_value=2,umax_value=1082,var_off=(0x2; 0xfffffffc)"},
},
},
};
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
index 7375d9a6d242..448885b95eed 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
@@ -7,6 +7,7 @@
#include "bpf_iter_task.skel.h"
#include "bpf_iter_task_stack.skel.h"
#include "bpf_iter_task_file.skel.h"
+#include "bpf_iter_task_btf.skel.h"
#include "bpf_iter_tcp4.skel.h"
#include "bpf_iter_tcp6.skel.h"
#include "bpf_iter_udp4.skel.h"
@@ -132,20 +133,118 @@ static void test_task_stack(void)
bpf_iter_task_stack__destroy(skel);
}
+static void *do_nothing(void *arg)
+{
+ pthread_exit(arg);
+}
+
static void test_task_file(void)
{
struct bpf_iter_task_file *skel;
+ pthread_t thread_id;
+ void *ret;
skel = bpf_iter_task_file__open_and_load();
if (CHECK(!skel, "bpf_iter_task_file__open_and_load",
"skeleton open_and_load failed\n"))
return;
+ skel->bss->tgid = getpid();
+
+ if (CHECK(pthread_create(&thread_id, NULL, &do_nothing, NULL),
+ "pthread_create", "pthread_create failed\n"))
+ goto done;
+
do_dummy_read(skel->progs.dump_task_file);
+ if (CHECK(pthread_join(thread_id, &ret) || ret != NULL,
+ "pthread_join", "pthread_join failed\n"))
+ goto done;
+
+ CHECK(skel->bss->count != 0, "check_count",
+ "invalid non pthread file visit count %d\n", skel->bss->count);
+
+done:
bpf_iter_task_file__destroy(skel);
}
+#define TASKBUFSZ 32768
+
+static char taskbuf[TASKBUFSZ];
+
+static int do_btf_read(struct bpf_iter_task_btf *skel)
+{
+ struct bpf_program *prog = skel->progs.dump_task_struct;
+ struct bpf_iter_task_btf__bss *bss = skel->bss;
+ int iter_fd = -1, len = 0, bufleft = TASKBUFSZ;
+ struct bpf_link *link;
+ char *buf = taskbuf;
+ int ret = 0;
+
+ link = bpf_program__attach_iter(prog, NULL);
+ if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
+ return ret;
+
+ iter_fd = bpf_iter_create(bpf_link__fd(link));
+ if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
+ goto free_link;
+
+ do {
+ len = read(iter_fd, buf, bufleft);
+ if (len > 0) {
+ buf += len;
+ bufleft -= len;
+ }
+ } while (len > 0);
+
+ if (bss->skip) {
+ printf("%s:SKIP:no __builtin_btf_type_id\n", __func__);
+ ret = 1;
+ test__skip();
+ goto free_link;
+ }
+
+ if (CHECK(len < 0, "read", "read failed: %s\n", strerror(errno)))
+ goto free_link;
+
+ CHECK(strstr(taskbuf, "(struct task_struct)") == NULL,
+ "check for btf representation of task_struct in iter data",
+ "struct task_struct not found");
+free_link:
+ if (iter_fd > 0)
+ close(iter_fd);
+ bpf_link__destroy(link);
+ return ret;
+}
+
+static void test_task_btf(void)
+{
+ struct bpf_iter_task_btf__bss *bss;
+ struct bpf_iter_task_btf *skel;
+ int ret;
+
+ skel = bpf_iter_task_btf__open_and_load();
+ if (CHECK(!skel, "bpf_iter_task_btf__open_and_load",
+ "skeleton open_and_load failed\n"))
+ return;
+
+ bss = skel->bss;
+
+ ret = do_btf_read(skel);
+ if (ret)
+ goto cleanup;
+
+ if (CHECK(bss->tasks == 0, "check if iterated over tasks",
+ "no task iteration, did BPF program run?\n"))
+ goto cleanup;
+
+ CHECK(bss->seq_err != 0, "check for unexpected err",
+ "bpf_seq_printf_btf returned %ld", bss->seq_err);
+
+cleanup:
+ bpf_iter_task_btf__destroy(skel);
+}
+
static void test_tcp4(void)
{
struct bpf_iter_tcp4 *skel;
@@ -331,7 +430,7 @@ static void test_overflow(bool test_e2big_overflow, bool ret1)
struct bpf_map_info map_info = {};
struct bpf_iter_test_kern4 *skel;
struct bpf_link *link;
- __u32 page_size;
+ __u32 iter_size;
char *buf;
skel = bpf_iter_test_kern4__open();
@@ -353,19 +452,19 @@ static void test_overflow(bool test_e2big_overflow, bool ret1)
"map_creation failed: %s\n", strerror(errno)))
goto free_map1;
- /* bpf_seq_printf kernel buffer is one page, so one map
+ /* bpf_seq_printf kernel buffer is 8 pages, so one map
* bpf_seq_write will mostly fill it, and the other map
* will partially fill and then trigger overflow and need
* bpf_seq_read restart.
*/
- page_size = sysconf(_SC_PAGE_SIZE);
+ iter_size = sysconf(_SC_PAGE_SIZE) << 3;
if (test_e2big_overflow) {
- skel->rodata->print_len = (page_size + 8) / 8;
- expected_read_len = 2 * (page_size + 8);
+ skel->rodata->print_len = (iter_size + 8) / 8;
+ expected_read_len = 2 * (iter_size + 8);
} else if (!ret1) {
- skel->rodata->print_len = (page_size - 8) / 8;
- expected_read_len = 2 * (page_size - 8);
+ skel->rodata->print_len = (iter_size - 8) / 8;
+ expected_read_len = 2 * (iter_size - 8);
} else {
skel->rodata->print_len = 1;
expected_read_len = 2 * 8;
@@ -936,6 +1035,8 @@ void test_bpf_iter(void)
test_task_stack();
if (test__start_subtest("task_file"))
test_task_file();
+ if (test__start_subtest("task_btf"))
+ test_task_btf();
if (test__start_subtest("tcp4"))
test_tcp4();
if (test__start_subtest("tcp6"))
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
index e9f2f12ba06b..e698ee6bb6c2 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
@@ -49,6 +49,7 @@ void test_bpf_verif_scale(void)
{ "test_verif_scale3.o", BPF_PROG_TYPE_SCHED_CLS },
{ "pyperf_global.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
+ { "pyperf_subprogs.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
/* full unroll by llvm */
{ "pyperf50.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
@@ -86,6 +87,9 @@ void test_bpf_verif_scale(void)
{ "strobemeta_nounroll1.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
{ "strobemeta_nounroll2.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
+ /* non-inlined subprogs */
+ { "strobemeta_subprogs.o", BPF_PROG_TYPE_RAW_TRACEPOINT },
+
{ "test_sysctl_loop1.o", BPF_PROG_TYPE_CGROUP_SYSCTL },
{ "test_sysctl_loop2.o", BPF_PROG_TYPE_CGROUP_SYSCTL },
diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c
index c75fc6447186..93162484c2ca 100644
--- a/tools/testing/selftests/bpf/test_btf.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf.c
@@ -24,40 +24,17 @@
#include "bpf_rlimit.h"
#include "bpf_util.h"
-#include "test_btf.h"
+#include "../test_btf.h"
+#include "test_progs.h"
#define MAX_INSNS 512
#define MAX_SUBPROGS 16
-static uint32_t pass_cnt;
-static uint32_t error_cnt;
-static uint32_t skip_cnt;
+static int duration = 0;
+static bool always_log;
-#define CHECK(condition, format...) ({ \
- int __ret = !!(condition); \
- if (__ret) { \
- fprintf(stderr, "%s:%d:FAIL ", __func__, __LINE__); \
- fprintf(stderr, format); \
- } \
- __ret; \
-})
-
-static int count_result(int err)
-{
- if (err)
- error_cnt++;
- else
- pass_cnt++;
-
- fprintf(stderr, "\n");
- return err;
-}
-
-static int __base_pr(enum libbpf_print_level level __attribute__((unused)),
- const char *format, va_list args)
-{
- return vfprintf(stderr, format, args);
-}
+#undef CHECK
+#define CHECK(condition, format...) _CHECK(condition, "check", duration, format)
#define BTF_END_RAW 0xdeadbeef
#define NAME_TBD 0xdeadb33f
@@ -69,21 +46,6 @@ static int __base_pr(enum libbpf_print_level level __attribute__((unused)),
#define MAX_NR_RAW_U32 1024
#define BTF_LOG_BUF_SIZE 65535
-static struct args {
- unsigned int raw_test_num;
- unsigned int file_test_num;
- unsigned int get_info_test_num;
- unsigned int info_raw_test_num;
- unsigned int dedup_test_num;
- bool raw_test;
- bool file_test;
- bool get_info_test;
- bool pprint_test;
- bool always_log;
- bool info_raw_test;
- bool dedup_test;
-} args;
-
static char btf_log_buf[BTF_LOG_BUF_SIZE];
static struct btf_header hdr_tmpl = {
@@ -3664,7 +3626,7 @@ done:
return raw_btf;
}
-static int do_test_raw(unsigned int test_num)
+static void do_test_raw(unsigned int test_num)
{
struct btf_raw_test *test = &raw_tests[test_num - 1];
struct bpf_create_map_attr create_attr = {};
@@ -3674,15 +3636,16 @@ static int do_test_raw(unsigned int test_num)
void *raw_btf;
int err;
- fprintf(stderr, "BTF raw test[%u] (%s): ", test_num, test->descr);
+ if (!test__start_subtest(test->descr))
+ return;
+
raw_btf = btf_raw_create(&hdr_tmpl,
test->raw_types,
test->str_sec,
test->str_sec_size,
&raw_btf_size, NULL);
-
if (!raw_btf)
- return -1;
+ return;
hdr = raw_btf;
@@ -3694,7 +3657,7 @@ static int do_test_raw(unsigned int test_num)
*btf_log_buf = '\0';
btf_fd = bpf_load_btf(raw_btf, raw_btf_size,
btf_log_buf, BTF_LOG_BUF_SIZE,
- args.always_log);
+ always_log);
free(raw_btf);
err = ((btf_fd == -1) != test->btf_load_err);
@@ -3725,32 +3688,12 @@ static int do_test_raw(unsigned int test_num)
map_fd, test->map_create_err);
done:
- if (!err)
- fprintf(stderr, "OK");
-
- if (*btf_log_buf && (err || args.always_log))
+ if (*btf_log_buf && (err || always_log))
fprintf(stderr, "\n%s", btf_log_buf);
-
if (btf_fd != -1)
close(btf_fd);
if (map_fd != -1)
close(map_fd);
-
- return err;
-}
-
-static int test_raw(void)
-{
- unsigned int i;
- int err = 0;
-
- if (args.raw_test_num)
- return count_result(do_test_raw(args.raw_test_num));
-
- for (i = 1; i <= ARRAY_SIZE(raw_tests); i++)
- err |= count_result(do_test_raw(i));
-
- return err;
}
struct btf_get_info_test {
@@ -3814,11 +3757,6 @@ const struct btf_get_info_test get_info_tests[] = {
},
};
-static inline __u64 ptr_to_u64(const void *ptr)
-{
- return (__u64)(unsigned long)ptr;
-}
-
static int test_big_btf_info(unsigned int test_num)
{
const struct btf_get_info_test *test = &get_info_tests[test_num - 1];
@@ -3851,7 +3789,7 @@ static int test_big_btf_info(unsigned int test_num)
btf_fd = bpf_load_btf(raw_btf, raw_btf_size,
btf_log_buf, BTF_LOG_BUF_SIZE,
- args.always_log);
+ always_log);
if (CHECK(btf_fd == -1, "errno:%d", errno)) {
err = -1;
goto done;
@@ -3892,7 +3830,7 @@ static int test_big_btf_info(unsigned int test_num)
fprintf(stderr, "OK");
done:
- if (*btf_log_buf && (err || args.always_log))
+ if (*btf_log_buf && (err || always_log))
fprintf(stderr, "\n%s", btf_log_buf);
free(raw_btf);
@@ -3939,7 +3877,7 @@ static int test_btf_id(unsigned int test_num)
btf_fd[0] = bpf_load_btf(raw_btf, raw_btf_size,
btf_log_buf, BTF_LOG_BUF_SIZE,
- args.always_log);
+ always_log);
if (CHECK(btf_fd[0] == -1, "errno:%d", errno)) {
err = -1;
goto done;
@@ -4024,7 +3962,7 @@ static int test_btf_id(unsigned int test_num)
fprintf(stderr, "OK");
done:
- if (*btf_log_buf && (err || args.always_log))
+ if (*btf_log_buf && (err || always_log))
fprintf(stderr, "\n%s", btf_log_buf);
free(raw_btf);
@@ -4039,7 +3977,7 @@ done:
return err;
}
-static int do_test_get_info(unsigned int test_num)
+static void do_test_get_info(unsigned int test_num)
{
const struct btf_get_info_test *test = &get_info_tests[test_num - 1];
unsigned int raw_btf_size, user_btf_size, expected_nbytes;
@@ -4048,11 +3986,14 @@ static int do_test_get_info(unsigned int test_num)
int btf_fd = -1, err, ret;
uint32_t info_len;
- fprintf(stderr, "BTF GET_INFO test[%u] (%s): ",
- test_num, test->descr);
+ if (!test__start_subtest(test->descr))
+ return;
- if (test->special_test)
- return test->special_test(test_num);
+ if (test->special_test) {
+ err = test->special_test(test_num);
+ if (CHECK(err, "failed: %d\n", err))
+ return;
+ }
raw_btf = btf_raw_create(&hdr_tmpl,
test->raw_types,
@@ -4061,7 +4002,7 @@ static int do_test_get_info(unsigned int test_num)
&raw_btf_size, NULL);
if (!raw_btf)
- return -1;
+ return;
*btf_log_buf = '\0';
@@ -4073,7 +4014,7 @@ static int do_test_get_info(unsigned int test_num)
btf_fd = bpf_load_btf(raw_btf, raw_btf_size,
btf_log_buf, BTF_LOG_BUF_SIZE,
- args.always_log);
+ always_log);
if (CHECK(btf_fd == -1, "errno:%d", errno)) {
err = -1;
goto done;
@@ -4114,7 +4055,7 @@ static int do_test_get_info(unsigned int test_num)
fprintf(stderr, "OK");
done:
- if (*btf_log_buf && (err || args.always_log))
+ if (*btf_log_buf && (err || always_log))
fprintf(stderr, "\n%s", btf_log_buf);
free(raw_btf);
@@ -4122,22 +4063,6 @@ done:
if (btf_fd != -1)
close(btf_fd);
-
- return err;
-}
-
-static int test_get_info(void)
-{
- unsigned int i;
- int err = 0;
-
- if (args.get_info_test_num)
- return count_result(do_test_get_info(args.get_info_test_num));
-
- for (i = 1; i <= ARRAY_SIZE(get_info_tests); i++)
- err |= count_result(do_test_get_info(i));
-
- return err;
}
struct btf_file_test {
@@ -4151,7 +4076,7 @@ static struct btf_file_test file_tests[] = {
{ .file = "test_btf_nokv.o", .btf_kv_notfound = true, },
};
-static int do_test_file(unsigned int test_num)
+static void do_test_file(unsigned int test_num)
{
const struct btf_file_test *test = &file_tests[test_num - 1];
const char *expected_fnames[] = {"_dummy_tracepoint",
@@ -4169,17 +4094,17 @@ static int do_test_file(unsigned int test_num)
struct bpf_map *map;
int i, err, prog_fd;
- fprintf(stderr, "BTF libbpf test[%u] (%s): ", test_num,
- test->file);
+ if (!test__start_subtest(test->file))
+ return;
btf = btf__parse_elf(test->file, &btf_ext);
if (IS_ERR(btf)) {
if (PTR_ERR(btf) == -ENOENT) {
- fprintf(stderr, "SKIP. No ELF %s found", BTF_ELF_SEC);
- skip_cnt++;
- return 0;
+ printf("%s:SKIP: No ELF %s found", __func__, BTF_ELF_SEC);
+ test__skip();
+ return;
}
- return PTR_ERR(btf);
+ return;
}
btf__free(btf);
@@ -4188,7 +4113,7 @@ static int do_test_file(unsigned int test_num)
obj = bpf_object__open(test->file);
if (CHECK(IS_ERR(obj), "obj: %ld", PTR_ERR(obj)))
- return PTR_ERR(obj);
+ return;
prog = bpf_program__next(NULL, obj);
if (CHECK(!prog, "Cannot find bpf_prog")) {
@@ -4310,21 +4235,6 @@ skip:
done:
free(func_info);
bpf_object__close(obj);
- return err;
-}
-
-static int test_file(void)
-{
- unsigned int i;
- int err = 0;
-
- if (args.file_test_num)
- return count_result(do_test_file(args.file_test_num));
-
- for (i = 1; i <= ARRAY_SIZE(file_tests); i++)
- err |= count_result(do_test_file(i));
-
- return err;
}
const char *pprint_enum_str[] = {
@@ -4428,7 +4338,7 @@ static struct btf_raw_test pprint_test_template[] = {
.value_size = sizeof(struct pprint_mapv),
.key_type_id = 3, /* unsigned int */
.value_type_id = 16, /* struct pprint_mapv */
- .max_entries = 128 * 1024,
+ .max_entries = 128,
},
{
@@ -4493,7 +4403,7 @@ static struct btf_raw_test pprint_test_template[] = {
.value_size = sizeof(struct pprint_mapv),
.key_type_id = 3, /* unsigned int */
.value_type_id = 16, /* struct pprint_mapv */
- .max_entries = 128 * 1024,
+ .max_entries = 128,
},
{
@@ -4564,7 +4474,7 @@ static struct btf_raw_test pprint_test_template[] = {
.value_size = sizeof(struct pprint_mapv),
.key_type_id = 3, /* unsigned int */
.value_type_id = 16, /* struct pprint_mapv */
- .max_entries = 128 * 1024,
+ .max_entries = 128,
},
#ifdef __SIZEOF_INT128__
@@ -4591,7 +4501,7 @@ static struct btf_raw_test pprint_test_template[] = {
.value_size = sizeof(struct pprint_mapv_int128),
.key_type_id = 1,
.value_type_id = 4,
- .max_entries = 128 * 1024,
+ .max_entries = 128,
.mapv_kind = PPRINT_MAPV_KIND_INT128,
},
#endif
@@ -4790,7 +4700,7 @@ static int check_line(const char *expected_line, int nexpected_line,
}
-static int do_test_pprint(int test_num)
+static void do_test_pprint(int test_num)
{
const struct btf_raw_test *test = &pprint_test_template[test_num];
enum pprint_mapv_kind_t mapv_kind = test->mapv_kind;
@@ -4809,18 +4719,20 @@ static int do_test_pprint(int test_num)
uint8_t *raw_btf;
ssize_t nread;
- fprintf(stderr, "%s(#%d)......", test->descr, test_num);
+ if (!test__start_subtest(test->descr))
+ return;
+
raw_btf = btf_raw_create(&hdr_tmpl, test->raw_types,
test->str_sec, test->str_sec_size,
&raw_btf_size, NULL);
if (!raw_btf)
- return -1;
+ return;
*btf_log_buf = '\0';
btf_fd = bpf_load_btf(raw_btf, raw_btf_size,
btf_log_buf, BTF_LOG_BUF_SIZE,
- args.always_log);
+ always_log);
free(raw_btf);
if (CHECK(btf_fd == -1, "errno:%d", errno)) {
@@ -4971,7 +4883,7 @@ done:
free(mapv);
if (!err)
fprintf(stderr, "OK");
- if (*btf_log_buf && (err || args.always_log))
+ if (*btf_log_buf && (err || always_log))
fprintf(stderr, "\n%s", btf_log_buf);
if (btf_fd != -1)
close(btf_fd);
@@ -4981,14 +4893,11 @@ done:
fclose(pin_file);
unlink(pin_path);
free(line);
-
- return err;
}
-static int test_pprint(void)
+static void test_pprint(void)
{
unsigned int i;
- int err = 0;
/* test various maps with the first test template */
for (i = 0; i < ARRAY_SIZE(pprint_tests_meta); i++) {
@@ -4999,7 +4908,7 @@ static int test_pprint(void)
pprint_test_template[0].lossless_map = pprint_tests_meta[i].lossless_map;
pprint_test_template[0].percpu_map = pprint_tests_meta[i].percpu_map;
- err |= count_result(do_test_pprint(0));
+ do_test_pprint(0);
}
/* test rest test templates with the first map */
@@ -5010,10 +4919,8 @@ static int test_pprint(void)
pprint_test_template[i].ordered_map = pprint_tests_meta[0].ordered_map;
pprint_test_template[i].lossless_map = pprint_tests_meta[0].lossless_map;
pprint_test_template[i].percpu_map = pprint_tests_meta[0].percpu_map;
- err |= count_result(do_test_pprint(i));
+ do_test_pprint(i);
}
-
- return err;
}
#define BPF_LINE_INFO_ENC(insn_off, file_off, line_off, line_num, line_col) \
@@ -6178,7 +6085,7 @@ done:
return err;
}
-static int do_test_info_raw(unsigned int test_num)
+static void do_test_info_raw(unsigned int test_num)
{
const struct prog_info_raw_test *test = &info_raw_tests[test_num - 1];
unsigned int raw_btf_size, linfo_str_off, linfo_size;
@@ -6187,18 +6094,19 @@ static int do_test_info_raw(unsigned int test_num)
const char *ret_next_str;
union bpf_attr attr = {};
- fprintf(stderr, "BTF prog info raw test[%u] (%s): ", test_num, test->descr);
+ if (!test__start_subtest(test->descr))
+ return;
+
raw_btf = btf_raw_create(&hdr_tmpl, test->raw_types,
test->str_sec, test->str_sec_size,
&raw_btf_size, &ret_next_str);
-
if (!raw_btf)
- return -1;
+ return;
*btf_log_buf = '\0';
btf_fd = bpf_load_btf(raw_btf, raw_btf_size,
btf_log_buf, BTF_LOG_BUF_SIZE,
- args.always_log);
+ always_log);
free(raw_btf);
if (CHECK(btf_fd == -1, "invalid btf_fd errno:%d", errno)) {
@@ -6206,7 +6114,7 @@ static int do_test_info_raw(unsigned int test_num)
goto done;
}
- if (*btf_log_buf && args.always_log)
+ if (*btf_log_buf && always_log)
fprintf(stderr, "\n%s", btf_log_buf);
*btf_log_buf = '\0';
@@ -6261,10 +6169,7 @@ static int do_test_info_raw(unsigned int test_num)
goto done;
done:
- if (!err)
- fprintf(stderr, "OK");
-
- if (*btf_log_buf && (err || args.always_log))
+ if (*btf_log_buf && (err || always_log))
fprintf(stderr, "\n%s", btf_log_buf);
if (btf_fd != -1)
@@ -6274,22 +6179,6 @@ done:
if (!IS_ERR(patched_linfo))
free(patched_linfo);
-
- return err;
-}
-
-static int test_info_raw(void)
-{
- unsigned int i;
- int err = 0;
-
- if (args.info_raw_test_num)
- return count_result(do_test_info_raw(args.info_raw_test_num));
-
- for (i = 1; i <= ARRAY_SIZE(info_raw_tests); i++)
- err |= count_result(do_test_info_raw(i));
-
- return err;
}
struct btf_raw_data {
@@ -6754,7 +6643,7 @@ static void dump_btf_strings(const char *strs, __u32 len)
}
}
-static int do_test_dedup(unsigned int test_num)
+static void do_test_dedup(unsigned int test_num)
{
const struct btf_dedup_test *test = &dedup_tests[test_num - 1];
__u32 test_nr_types, expect_nr_types, test_btf_size, expect_btf_size;
@@ -6769,13 +6658,15 @@ static int do_test_dedup(unsigned int test_num)
void *raw_btf;
int err = 0, i;
- fprintf(stderr, "BTF dedup test[%u] (%s):", test_num, test->descr);
+ if (!test__start_subtest(test->descr))
+ return;
raw_btf = btf_raw_create(&hdr_tmpl, test->input.raw_types,
test->input.str_sec, test->input.str_sec_size,
&raw_btf_size, &ret_test_next_str);
if (!raw_btf)
- return -1;
+ return;
+
test_btf = btf__new((__u8 *)raw_btf, raw_btf_size);
free(raw_btf);
if (CHECK(IS_ERR(test_btf), "invalid test_btf errno:%ld",
@@ -6789,7 +6680,7 @@ static int do_test_dedup(unsigned int test_num)
test->expect.str_sec_size,
&raw_btf_size, &ret_expect_next_str);
if (!raw_btf)
- return -1;
+ return;
expect_btf = btf__new((__u8 *)raw_btf, raw_btf_size);
free(raw_btf);
if (CHECK(IS_ERR(expect_btf), "invalid expect_btf errno:%ld",
@@ -6894,174 +6785,27 @@ static int do_test_dedup(unsigned int test_num)
}
done:
- if (!err)
- fprintf(stderr, "OK");
if (!IS_ERR(test_btf))
btf__free(test_btf);
if (!IS_ERR(expect_btf))
btf__free(expect_btf);
-
- return err;
}
-static int test_dedup(void)
+void test_btf(void)
{
- unsigned int i;
- int err = 0;
+ int i;
- if (args.dedup_test_num)
- return count_result(do_test_dedup(args.dedup_test_num));
+ always_log = env.verbosity > VERBOSE_NONE;
+ for (i = 1; i <= ARRAY_SIZE(raw_tests); i++)
+ do_test_raw(i);
+ for (i = 1; i <= ARRAY_SIZE(get_info_tests); i++)
+ do_test_get_info(i);
+ for (i = 1; i <= ARRAY_SIZE(file_tests); i++)
+ do_test_file(i);
+ for (i = 1; i <= ARRAY_SIZE(info_raw_tests); i++)
+ do_test_info_raw(i);
for (i = 1; i <= ARRAY_SIZE(dedup_tests); i++)
- err |= count_result(do_test_dedup(i));
-
- return err;
-}
-
-static void usage(const char *cmd)
-{
- fprintf(stderr, "Usage: %s [-l] [[-r btf_raw_test_num (1 - %zu)] |\n"
- "\t[-g btf_get_info_test_num (1 - %zu)] |\n"
- "\t[-f btf_file_test_num (1 - %zu)] |\n"
- "\t[-k btf_prog_info_raw_test_num (1 - %zu)] |\n"
- "\t[-p (pretty print test)] |\n"
- "\t[-d btf_dedup_test_num (1 - %zu)]]\n",
- cmd, ARRAY_SIZE(raw_tests), ARRAY_SIZE(get_info_tests),
- ARRAY_SIZE(file_tests), ARRAY_SIZE(info_raw_tests),
- ARRAY_SIZE(dedup_tests));
-}
-
-static int parse_args(int argc, char **argv)
-{
- const char *optstr = "hlpk:f:r:g:d:";
- int opt;
-
- while ((opt = getopt(argc, argv, optstr)) != -1) {
- switch (opt) {
- case 'l':
- args.always_log = true;
- break;
- case 'f':
- args.file_test_num = atoi(optarg);
- args.file_test = true;
- break;
- case 'r':
- args.raw_test_num = atoi(optarg);
- args.raw_test = true;
- break;
- case 'g':
- args.get_info_test_num = atoi(optarg);
- args.get_info_test = true;
- break;
- case 'p':
- args.pprint_test = true;
- break;
- case 'k':
- args.info_raw_test_num = atoi(optarg);
- args.info_raw_test = true;
- break;
- case 'd':
- args.dedup_test_num = atoi(optarg);
- args.dedup_test = true;
- break;
- case 'h':
- usage(argv[0]);
- exit(0);
- default:
- usage(argv[0]);
- return -1;
- }
- }
-
- if (args.raw_test_num &&
- (args.raw_test_num < 1 ||
- args.raw_test_num > ARRAY_SIZE(raw_tests))) {
- fprintf(stderr, "BTF raw test number must be [1 - %zu]\n",
- ARRAY_SIZE(raw_tests));
- return -1;
- }
-
- if (args.file_test_num &&
- (args.file_test_num < 1 ||
- args.file_test_num > ARRAY_SIZE(file_tests))) {
- fprintf(stderr, "BTF file test number must be [1 - %zu]\n",
- ARRAY_SIZE(file_tests));
- return -1;
- }
-
- if (args.get_info_test_num &&
- (args.get_info_test_num < 1 ||
- args.get_info_test_num > ARRAY_SIZE(get_info_tests))) {
- fprintf(stderr, "BTF get info test number must be [1 - %zu]\n",
- ARRAY_SIZE(get_info_tests));
- return -1;
- }
-
- if (args.info_raw_test_num &&
- (args.info_raw_test_num < 1 ||
- args.info_raw_test_num > ARRAY_SIZE(info_raw_tests))) {
- fprintf(stderr, "BTF prog info raw test number must be [1 - %zu]\n",
- ARRAY_SIZE(info_raw_tests));
- return -1;
- }
-
- if (args.dedup_test_num &&
- (args.dedup_test_num < 1 ||
- args.dedup_test_num > ARRAY_SIZE(dedup_tests))) {
- fprintf(stderr, "BTF dedup test number must be [1 - %zu]\n",
- ARRAY_SIZE(dedup_tests));
- return -1;
- }
-
- return 0;
-}
-
-static void print_summary(void)
-{
- fprintf(stderr, "PASS:%u SKIP:%u FAIL:%u\n",
- pass_cnt - skip_cnt, skip_cnt, error_cnt);
-}
-
-int main(int argc, char **argv)
-{
- int err = 0;
-
- err = parse_args(argc, argv);
- if (err)
- return err;
-
- if (args.always_log)
- libbpf_set_print(__base_pr);
-
- if (args.raw_test)
- err |= test_raw();
-
- if (args.get_info_test)
- err |= test_get_info();
-
- if (args.file_test)
- err |= test_file();
-
- if (args.pprint_test)
- err |= test_pprint();
-
- if (args.info_raw_test)
- err |= test_info_raw();
-
- if (args.dedup_test)
- err |= test_dedup();
-
- if (args.raw_test || args.get_info_test || args.file_test ||
- args.pprint_test || args.info_raw_test || args.dedup_test)
- goto done;
-
- err |= test_raw();
- err |= test_get_info();
- err |= test_file();
- err |= test_info_raw();
- err |= test_dedup();
-
-done:
- print_summary();
- return err;
+ do_test_dedup(i);
+ test_pprint();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_dump.c b/tools/testing/selftests/bpf/prog_tests/btf_dump.c
index 39fb81d9daeb..c60091ee8a21 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_dump.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_dump.c
@@ -129,6 +129,109 @@ done:
return err;
}
+static char *dump_buf;
+static size_t dump_buf_sz;
+static FILE *dump_buf_file;
+
+void test_btf_dump_incremental(void)
+{
+ struct btf *btf = NULL;
+ struct btf_dump *d = NULL;
+ struct btf_dump_opts opts;
+ int id, err, i;
+
+ dump_buf_file = open_memstream(&dump_buf, &dump_buf_sz);
+ if (!ASSERT_OK_PTR(dump_buf_file, "dump_memstream"))
+ return;
+ btf = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf, "new_empty"))
+ goto err_out;
+ opts.ctx = dump_buf_file;
+ d = btf_dump__new(btf, NULL, &opts, btf_dump_printf);
+ if (!ASSERT_OK(libbpf_get_error(d), "btf_dump__new"))
+ goto err_out;
+
+ /* First, generate BTF corresponding to the following C code:
+ *
+ * enum { VAL = 1 };
+ *
+ * struct s { int x; };
+ *
+ */
+ id = btf__add_enum(btf, NULL, 4);
+ ASSERT_EQ(id, 1, "enum_id");
+ err = btf__add_enum_value(btf, "VAL", 1);
+ ASSERT_OK(err, "enum_val_ok");
+
+ id = btf__add_int(btf, "int", 4, BTF_INT_SIGNED);
+ ASSERT_EQ(id, 2, "int_id");
+
+ id = btf__add_struct(btf, "s", 4);
+ ASSERT_EQ(id, 3, "struct_id");
+ err = btf__add_field(btf, "x", 2, 0, 0);
+ ASSERT_OK(err, "field_ok");
+
+ for (i = 1; i <= btf__get_nr_types(btf); i++) {
+ err = btf_dump__dump_type(d, i);
+ ASSERT_OK(err, "dump_type_ok");
+ }
+
+ fflush(dump_buf_file);
+ dump_buf[dump_buf_sz] = 0; /* some libc implementations don't do this */
+ ASSERT_STREQ(dump_buf,
+"enum {\n"
+" VAL = 1,\n"
+"};\n"
+"\n"
+"struct s {\n"
+" int x;\n"
+"};\n\n", "c_dump1");
+
+ /* Now, after dumping original BTF, append another struct that embeds
+ * anonymous enum. It also has a name conflict with the first struct:
+ *
+ * struct s___2 {
+ * enum { VAL___2 = 1 } x;
+ * struct s s;
+ * };
+ *
+ * This will test that btf_dump'er maintains internal state properly.
+ * Note that VAL___2 enum value. It's because we've already emitted
+ * that enum as a global anonymous enum, so btf_dump will ensure that
+ * enum values don't conflict;
+ *
+ */
+ fseek(dump_buf_file, 0, SEEK_SET);
+
+ id = btf__add_struct(btf, "s", 4);
+ ASSERT_EQ(id, 4, "struct_id");
+ err = btf__add_field(btf, "x", 1, 0, 0);
+ ASSERT_OK(err, "field_ok");
+ err = btf__add_field(btf, "s", 3, 32, 0);
+ ASSERT_OK(err, "field_ok");
+
+ for (i = 1; i <= btf__get_nr_types(btf); i++) {
+ err = btf_dump__dump_type(d, i);
+ ASSERT_OK(err, "dump_type_ok");
+ }
+
+ fflush(dump_buf_file);
+ dump_buf[dump_buf_sz] = 0; /* some libc implementations don't do this */
+ ASSERT_STREQ(dump_buf,
+"struct s___2 {\n"
+" enum {\n"
+" VAL___2 = 1,\n"
+" } x;\n"
+" struct s s;\n"
+"};\n\n" , "c_dump1");
+
+err_out:
+ fclose(dump_buf_file);
+ free(dump_buf);
+ btf_dump__free(d);
+ btf__free(btf);
+}
+
void test_btf_dump() {
int i;
@@ -140,4 +243,6 @@ void test_btf_dump() {
test_btf_dump_case(i, &btf_dump_test_cases[i]);
}
+ if (test__start_subtest("btf_dump: incremental"))
+ test_btf_dump_incremental();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_endian.c b/tools/testing/selftests/bpf/prog_tests/btf_endian.c
new file mode 100644
index 000000000000..8c52d72c876e
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/btf_endian.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+#define _GNU_SOURCE
+#include <string.h>
+#include <byteswap.h>
+#include <test_progs.h>
+#include <bpf/btf.h>
+
+static int duration = 0;
+
+void test_btf_endian() {
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ enum btf_endianness endian = BTF_LITTLE_ENDIAN;
+#elif __BYTE_ORDER == __BIG_ENDIAN
+ enum btf_endianness endian = BTF_BIG_ENDIAN;
+#else
+#error "Unrecognized __BYTE_ORDER"
+#endif
+ enum btf_endianness swap_endian = 1 - endian;
+ struct btf *btf = NULL, *swap_btf = NULL;
+ const void *raw_data, *swap_raw_data;
+ const struct btf_type *t;
+ const struct btf_header *hdr;
+ __u32 raw_sz, swap_raw_sz;
+ int var_id;
+
+ /* Load BTF in native endianness */
+ btf = btf__parse_elf("btf_dump_test_case_syntax.o", NULL);
+ if (!ASSERT_OK_PTR(btf, "parse_native_btf"))
+ goto err_out;
+
+ ASSERT_EQ(btf__endianness(btf), endian, "endian");
+ btf__set_endianness(btf, swap_endian);
+ ASSERT_EQ(btf__endianness(btf), swap_endian, "endian");
+
+ /* Get raw BTF data in non-native endianness... */
+ raw_data = btf__get_raw_data(btf, &raw_sz);
+ if (!ASSERT_OK_PTR(raw_data, "raw_data_inverted"))
+ goto err_out;
+
+ /* ...and open it as a new BTF instance */
+ swap_btf = btf__new(raw_data, raw_sz);
+ if (!ASSERT_OK_PTR(swap_btf, "parse_swap_btf"))
+ goto err_out;
+
+ ASSERT_EQ(btf__endianness(swap_btf), swap_endian, "endian");
+ ASSERT_EQ(btf__get_nr_types(swap_btf), btf__get_nr_types(btf), "nr_types");
+
+ swap_raw_data = btf__get_raw_data(swap_btf, &swap_raw_sz);
+ if (!ASSERT_OK_PTR(swap_raw_data, "swap_raw_data"))
+ goto err_out;
+
+ /* both raw data should be identical (with non-native endianness) */
+ ASSERT_OK(memcmp(raw_data, swap_raw_data, raw_sz), "mem_identical");
+
+ /* make sure that at least BTF header data is really swapped */
+ hdr = swap_raw_data;
+ ASSERT_EQ(bswap_16(hdr->magic), BTF_MAGIC, "btf_magic_swapped");
+ ASSERT_EQ(raw_sz, swap_raw_sz, "raw_sizes");
+
+ /* swap it back to native endianness */
+ btf__set_endianness(swap_btf, endian);
+ swap_raw_data = btf__get_raw_data(swap_btf, &swap_raw_sz);
+ if (!ASSERT_OK_PTR(swap_raw_data, "swap_raw_data"))
+ goto err_out;
+
+ /* now header should have native BTF_MAGIC */
+ hdr = swap_raw_data;
+ ASSERT_EQ(hdr->magic, BTF_MAGIC, "btf_magic_native");
+ ASSERT_EQ(raw_sz, swap_raw_sz, "raw_sizes");
+
+ /* now modify original BTF */
+ var_id = btf__add_var(btf, "some_var", BTF_VAR_GLOBAL_ALLOCATED, 1);
+ CHECK(var_id <= 0, "var_id", "failed %d\n", var_id);
+
+ btf__free(swap_btf);
+ swap_btf = NULL;
+
+ btf__set_endianness(btf, swap_endian);
+ raw_data = btf__get_raw_data(btf, &raw_sz);
+ if (!ASSERT_OK_PTR(raw_data, "raw_data_inverted"))
+ goto err_out;
+
+ /* and re-open swapped raw data again */
+ swap_btf = btf__new(raw_data, raw_sz);
+ if (!ASSERT_OK_PTR(swap_btf, "parse_swap_btf"))
+ goto err_out;
+
+ ASSERT_EQ(btf__endianness(swap_btf), swap_endian, "endian");
+ ASSERT_EQ(btf__get_nr_types(swap_btf), btf__get_nr_types(btf), "nr_types");
+
+ /* the type should appear as if it was stored in native endianness */
+ t = btf__type_by_id(swap_btf, var_id);
+ ASSERT_STREQ(btf__str_by_offset(swap_btf, t->name_off), "some_var", "var_name");
+ ASSERT_EQ(btf_var(t)->linkage, BTF_VAR_GLOBAL_ALLOCATED, "var_linkage");
+ ASSERT_EQ(t->type, 1, "var_type");
+
+err_out:
+ btf__free(btf);
+ btf__free(swap_btf);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
index 6ccecbd39476..76ebe4c250f1 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf_map_in_map.c
@@ -53,12 +53,12 @@ static int kern_sync_rcu(void)
return err;
}
-void test_btf_map_in_map(void)
+static void test_lookup_update(void)
{
- int err, key = 0, val, i;
+ int map1_fd, map2_fd, map3_fd, map4_fd, map5_fd, map1_id, map2_id;
+ int outer_arr_fd, outer_hash_fd, outer_arr_dyn_fd;
struct test_btf_map_in_map *skel;
- int outer_arr_fd, outer_hash_fd;
- int fd, map1_fd, map2_fd, map1_id, map2_id;
+ int err, key = 0, val, i, fd;
skel = test_btf_map_in_map__open_and_load();
if (CHECK(!skel, "skel_open", "failed to open&load skeleton\n"))
@@ -70,32 +70,45 @@ void test_btf_map_in_map(void)
map1_fd = bpf_map__fd(skel->maps.inner_map1);
map2_fd = bpf_map__fd(skel->maps.inner_map2);
+ map3_fd = bpf_map__fd(skel->maps.inner_map3);
+ map4_fd = bpf_map__fd(skel->maps.inner_map4);
+ map5_fd = bpf_map__fd(skel->maps.inner_map5);
+ outer_arr_dyn_fd = bpf_map__fd(skel->maps.outer_arr_dyn);
outer_arr_fd = bpf_map__fd(skel->maps.outer_arr);
outer_hash_fd = bpf_map__fd(skel->maps.outer_hash);
- /* inner1 = input, inner2 = input + 1 */
- map1_fd = bpf_map__fd(skel->maps.inner_map1);
+ /* inner1 = input, inner2 = input + 1, inner3 = input + 2 */
bpf_map_update_elem(outer_arr_fd, &key, &map1_fd, 0);
- map2_fd = bpf_map__fd(skel->maps.inner_map2);
bpf_map_update_elem(outer_hash_fd, &key, &map2_fd, 0);
+ bpf_map_update_elem(outer_arr_dyn_fd, &key, &map3_fd, 0);
skel->bss->input = 1;
usleep(1);
-
bpf_map_lookup_elem(map1_fd, &key, &val);
CHECK(val != 1, "inner1", "got %d != exp %d\n", val, 1);
bpf_map_lookup_elem(map2_fd, &key, &val);
CHECK(val != 2, "inner2", "got %d != exp %d\n", val, 2);
+ bpf_map_lookup_elem(map3_fd, &key, &val);
+ CHECK(val != 3, "inner3", "got %d != exp %d\n", val, 3);
- /* inner1 = input + 1, inner2 = input */
+ /* inner2 = input, inner1 = input + 1, inner4 = input + 2 */
bpf_map_update_elem(outer_arr_fd, &key, &map2_fd, 0);
bpf_map_update_elem(outer_hash_fd, &key, &map1_fd, 0);
+ bpf_map_update_elem(outer_arr_dyn_fd, &key, &map4_fd, 0);
skel->bss->input = 3;
usleep(1);
-
bpf_map_lookup_elem(map1_fd, &key, &val);
CHECK(val != 4, "inner1", "got %d != exp %d\n", val, 4);
bpf_map_lookup_elem(map2_fd, &key, &val);
CHECK(val != 3, "inner2", "got %d != exp %d\n", val, 3);
+ bpf_map_lookup_elem(map4_fd, &key, &val);
+ CHECK(val != 5, "inner4", "got %d != exp %d\n", val, 5);
+
+ /* inner5 = input + 2 */
+ bpf_map_update_elem(outer_arr_dyn_fd, &key, &map5_fd, 0);
+ skel->bss->input = 5;
+ usleep(1);
+ bpf_map_lookup_elem(map5_fd, &key, &val);
+ CHECK(val != 7, "inner5", "got %d != exp %d\n", val, 7);
for (i = 0; i < 5; i++) {
val = i % 2 ? map1_fd : map2_fd;
@@ -106,7 +119,13 @@ void test_btf_map_in_map(void)
}
err = bpf_map_update_elem(outer_arr_fd, &key, &val, 0);
if (CHECK_FAIL(err)) {
- printf("failed to update hash_of_maps on iter #%d\n", i);
+ printf("failed to update array_of_maps on iter #%d\n", i);
+ goto cleanup;
+ }
+ val = i % 2 ? map4_fd : map5_fd;
+ err = bpf_map_update_elem(outer_arr_dyn_fd, &key, &val, 0);
+ if (CHECK_FAIL(err)) {
+ printf("failed to update array_of_maps (dyn) on iter #%d\n", i);
goto cleanup;
}
}
@@ -143,3 +162,36 @@ void test_btf_map_in_map(void)
cleanup:
test_btf_map_in_map__destroy(skel);
}
+
+static void test_diff_size(void)
+{
+ struct test_btf_map_in_map *skel;
+ int err, inner_map_fd, zero = 0;
+
+ skel = test_btf_map_in_map__open_and_load();
+ if (CHECK(!skel, "skel_open", "failed to open&load skeleton\n"))
+ return;
+
+ inner_map_fd = bpf_map__fd(skel->maps.sockarr_sz2);
+ err = bpf_map_update_elem(bpf_map__fd(skel->maps.outer_sockarr), &zero,
+ &inner_map_fd, 0);
+ CHECK(err, "outer_sockarr inner map size check",
+ "cannot use a different size inner_map\n");
+
+ inner_map_fd = bpf_map__fd(skel->maps.inner_map_sz2);
+ err = bpf_map_update_elem(bpf_map__fd(skel->maps.outer_arr), &zero,
+ &inner_map_fd, 0);
+ CHECK(!err, "outer_arr inner map size check",
+ "incorrectly updated with a different size inner_map\n");
+
+ test_btf_map_in_map__destroy(skel);
+}
+
+void test_btf_map_in_map(void)
+{
+ if (test__start_subtest("lookup_update"))
+ test_lookup_update();
+
+ if (test__start_subtest("diff_size"))
+ test_diff_size();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c b/tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c
new file mode 100644
index 000000000000..86ccf37e26b3
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+
+#define _GNU_SOURCE
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <sched.h>
+#include <linux/compiler.h>
+#include <bpf/libbpf.h>
+
+#include "network_helpers.h"
+#include "test_progs.h"
+#include "test_btf_skc_cls_ingress.skel.h"
+
+static struct test_btf_skc_cls_ingress *skel;
+struct sockaddr_in6 srv_sa6;
+static __u32 duration;
+
+#define PROG_PIN_FILE "/sys/fs/bpf/btf_skc_cls_ingress"
+
+static int write_sysctl(const char *sysctl, const char *value)
+{
+ int fd, err, len;
+
+ fd = open(sysctl, O_WRONLY);
+ if (CHECK(fd == -1, "open sysctl", "open(%s): %s (%d)\n",
+ sysctl, strerror(errno), errno))
+ return -1;
+
+ len = strlen(value);
+ err = write(fd, value, len);
+ close(fd);
+ if (CHECK(err != len, "write sysctl",
+ "write(%s, %s, %d): err:%d %s (%d)\n",
+ sysctl, value, len, err, strerror(errno), errno))
+ return -1;
+
+ return 0;
+}
+
+static int prepare_netns(void)
+{
+ if (CHECK(unshare(CLONE_NEWNET), "create netns",
+ "unshare(CLONE_NEWNET): %s (%d)",
+ strerror(errno), errno))
+ return -1;
+
+ if (CHECK(system("ip link set dev lo up"),
+ "ip link set dev lo up", "failed\n"))
+ return -1;
+
+ if (CHECK(system("tc qdisc add dev lo clsact"),
+ "tc qdisc add dev lo clsact", "failed\n"))
+ return -1;
+
+ if (CHECK(system("tc filter add dev lo ingress bpf direct-action object-pinned " PROG_PIN_FILE),
+ "install tc cls-prog at ingress", "failed\n"))
+ return -1;
+
+ /* Ensure 20 bytes options (i.e. in total 40 bytes tcp header) for the
+ * bpf_tcp_gen_syncookie() helper.
+ */
+ if (write_sysctl("/proc/sys/net/ipv4/tcp_window_scaling", "1") ||
+ write_sysctl("/proc/sys/net/ipv4/tcp_timestamps", "1") ||
+ write_sysctl("/proc/sys/net/ipv4/tcp_sack", "1"))
+ return -1;
+
+ return 0;
+}
+
+static void reset_test(void)
+{
+ memset(&skel->bss->srv_sa6, 0, sizeof(skel->bss->srv_sa6));
+ skel->bss->listen_tp_sport = 0;
+ skel->bss->req_sk_sport = 0;
+ skel->bss->recv_cookie = 0;
+ skel->bss->gen_cookie = 0;
+ skel->bss->linum = 0;
+}
+
+static void print_err_line(void)
+{
+ if (skel->bss->linum)
+ printf("bpf prog error at line %u\n", skel->bss->linum);
+}
+
+static void test_conn(void)
+{
+ int listen_fd = -1, cli_fd = -1, err;
+ socklen_t addrlen = sizeof(srv_sa6);
+ int srv_port;
+
+ if (write_sysctl("/proc/sys/net/ipv4/tcp_syncookies", "1"))
+ return;
+
+ listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
+ if (CHECK_FAIL(listen_fd == -1))
+ return;
+
+ err = getsockname(listen_fd, (struct sockaddr *)&srv_sa6, &addrlen);
+ if (CHECK(err, "getsockname(listen_fd)", "err:%d errno:%d\n", err,
+ errno))
+ goto done;
+ memcpy(&skel->bss->srv_sa6, &srv_sa6, sizeof(srv_sa6));
+ srv_port = ntohs(srv_sa6.sin6_port);
+
+ cli_fd = connect_to_fd(listen_fd, 0);
+ if (CHECK_FAIL(cli_fd == -1))
+ goto done;
+
+ if (CHECK(skel->bss->listen_tp_sport != srv_port ||
+ skel->bss->req_sk_sport != srv_port,
+ "Unexpected sk src port",
+ "listen_tp_sport:%u req_sk_sport:%u expected:%u\n",
+ skel->bss->listen_tp_sport, skel->bss->req_sk_sport,
+ srv_port))
+ goto done;
+
+ if (CHECK(skel->bss->gen_cookie || skel->bss->recv_cookie,
+ "Unexpected syncookie states",
+ "gen_cookie:%u recv_cookie:%u\n",
+ skel->bss->gen_cookie, skel->bss->recv_cookie))
+ goto done;
+
+ CHECK(skel->bss->linum, "bpf prog detected error", "at line %u\n",
+ skel->bss->linum);
+
+done:
+ if (listen_fd != -1)
+ close(listen_fd);
+ if (cli_fd != -1)
+ close(cli_fd);
+}
+
+static void test_syncookie(void)
+{
+ int listen_fd = -1, cli_fd = -1, err;
+ socklen_t addrlen = sizeof(srv_sa6);
+ int srv_port;
+
+ /* Enforce syncookie mode */
+ if (write_sysctl("/proc/sys/net/ipv4/tcp_syncookies", "2"))
+ return;
+
+ listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
+ if (CHECK_FAIL(listen_fd == -1))
+ return;
+
+ err = getsockname(listen_fd, (struct sockaddr *)&srv_sa6, &addrlen);
+ if (CHECK(err, "getsockname(listen_fd)", "err:%d errno:%d\n", err,
+ errno))
+ goto done;
+ memcpy(&skel->bss->srv_sa6, &srv_sa6, sizeof(srv_sa6));
+ srv_port = ntohs(srv_sa6.sin6_port);
+
+ cli_fd = connect_to_fd(listen_fd, 0);
+ if (CHECK_FAIL(cli_fd == -1))
+ goto done;
+
+ if (CHECK(skel->bss->listen_tp_sport != srv_port,
+ "Unexpected tp src port",
+ "listen_tp_sport:%u expected:%u\n",
+ skel->bss->listen_tp_sport, srv_port))
+ goto done;
+
+ if (CHECK(skel->bss->req_sk_sport,
+ "Unexpected req_sk src port",
+ "req_sk_sport:%u expected:0\n",
+ skel->bss->req_sk_sport))
+ goto done;
+
+ if (CHECK(!skel->bss->gen_cookie ||
+ skel->bss->gen_cookie != skel->bss->recv_cookie,
+ "Unexpected syncookie states",
+ "gen_cookie:%u recv_cookie:%u\n",
+ skel->bss->gen_cookie, skel->bss->recv_cookie))
+ goto done;
+
+ CHECK(skel->bss->linum, "bpf prog detected error", "at line %u\n",
+ skel->bss->linum);
+
+done:
+ if (listen_fd != -1)
+ close(listen_fd);
+ if (cli_fd != -1)
+ close(cli_fd);
+}
+
+struct test {
+ const char *desc;
+ void (*run)(void);
+};
+
+#define DEF_TEST(name) { #name, test_##name }
+static struct test tests[] = {
+ DEF_TEST(conn),
+ DEF_TEST(syncookie),
+};
+
+void test_btf_skc_cls_ingress(void)
+{
+ int i, err;
+
+ skel = test_btf_skc_cls_ingress__open_and_load();
+ if (CHECK(!skel, "test_btf_skc_cls_ingress__open_and_load", "failed\n"))
+ return;
+
+ err = bpf_program__pin(skel->progs.cls_ingress, PROG_PIN_FILE);
+ if (CHECK(err, "bpf_program__pin",
+ "cannot pin bpf prog to %s. err:%d\n", PROG_PIN_FILE, err)) {
+ test_btf_skc_cls_ingress__destroy(skel);
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ if (!test__start_subtest(tests[i].desc))
+ continue;
+
+ if (prepare_netns())
+ break;
+
+ tests[i].run();
+
+ print_err_line();
+ reset_test();
+ }
+
+ bpf_program__unpin(skel->progs.cls_ingress, PROG_PIN_FILE);
+ test_btf_skc_cls_ingress__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/btf_write.c b/tools/testing/selftests/bpf/prog_tests/btf_write.c
new file mode 100644
index 000000000000..314e1e7c36df
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/btf_write.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+#include <test_progs.h>
+#include <bpf/btf.h>
+
+static int duration = 0;
+
+void test_btf_write() {
+ const struct btf_var_secinfo *vi;
+ const struct btf_type *t;
+ const struct btf_member *m;
+ const struct btf_enum *v;
+ const struct btf_param *p;
+ struct btf *btf;
+ int id, err, str_off;
+
+ btf = btf__new_empty();
+ if (CHECK(IS_ERR(btf), "new_empty", "failed: %ld\n", PTR_ERR(btf)))
+ return;
+
+ str_off = btf__find_str(btf, "int");
+ ASSERT_EQ(str_off, -ENOENT, "int_str_missing_off");
+
+ str_off = btf__add_str(btf, "int");
+ ASSERT_EQ(str_off, 1, "int_str_off");
+
+ str_off = btf__find_str(btf, "int");
+ ASSERT_EQ(str_off, 1, "int_str_found_off");
+
+ /* BTF_KIND_INT */
+ id = btf__add_int(btf, "int", 4, BTF_INT_SIGNED);
+ ASSERT_EQ(id, 1, "int_id");
+
+ t = btf__type_by_id(btf, 1);
+ /* should re-use previously added "int" string */
+ ASSERT_EQ(t->name_off, str_off, "int_name_off");
+ ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "int", "int_name");
+ ASSERT_EQ(btf_kind(t), BTF_KIND_INT, "int_kind");
+ ASSERT_EQ(t->size, 4, "int_sz");
+ ASSERT_EQ(btf_int_encoding(t), BTF_INT_SIGNED, "int_enc");
+ ASSERT_EQ(btf_int_bits(t), 32, "int_bits");
+
+ /* invalid int size */
+ id = btf__add_int(btf, "bad sz int", 7, 0);
+ ASSERT_ERR(id, "int_bad_sz");
+ /* invalid encoding */
+ id = btf__add_int(btf, "bad enc int", 4, 123);
+ ASSERT_ERR(id, "int_bad_enc");
+ /* NULL name */
+ id = btf__add_int(btf, NULL, 4, 0);
+ ASSERT_ERR(id, "int_bad_null_name");
+ /* empty name */
+ id = btf__add_int(btf, "", 4, 0);
+ ASSERT_ERR(id, "int_bad_empty_name");
+
+ /* PTR/CONST/VOLATILE/RESTRICT */
+ id = btf__add_ptr(btf, 1);
+ ASSERT_EQ(id, 2, "ptr_id");
+ t = btf__type_by_id(btf, 2);
+ ASSERT_EQ(btf_kind(t), BTF_KIND_PTR, "ptr_kind");
+ ASSERT_EQ(t->type, 1, "ptr_type");
+
+ id = btf__add_const(btf, 5); /* points forward to restrict */
+ ASSERT_EQ(id, 3, "const_id");
+ t = btf__type_by_id(btf, 3);
+ ASSERT_EQ(btf_kind(t), BTF_KIND_CONST, "const_kind");
+ ASSERT_EQ(t->type, 5, "const_type");
+
+ id = btf__add_volatile(btf, 3);
+ ASSERT_EQ(id, 4, "volatile_id");
+ t = btf__type_by_id(btf, 4);
+ ASSERT_EQ(btf_kind(t), BTF_KIND_VOLATILE, "volatile_kind");
+ ASSERT_EQ(t->type, 3, "volatile_type");
+
+ id = btf__add_restrict(btf, 4);
+ ASSERT_EQ(id, 5, "restrict_id");
+ t = btf__type_by_id(btf, 5);
+ ASSERT_EQ(btf_kind(t), BTF_KIND_RESTRICT, "restrict_kind");
+ ASSERT_EQ(t->type, 4, "restrict_type");
+
+ /* ARRAY */
+ id = btf__add_array(btf, 1, 2, 10); /* int *[10] */
+ ASSERT_EQ(id, 6, "array_id");
+ t = btf__type_by_id(btf, 6);
+ ASSERT_EQ(btf_kind(t), BTF_KIND_ARRAY, "array_kind");
+ ASSERT_EQ(btf_array(t)->index_type, 1, "array_index_type");
+ ASSERT_EQ(btf_array(t)->type, 2, "array_elem_type");
+ ASSERT_EQ(btf_array(t)->nelems, 10, "array_nelems");
+
+ /* STRUCT */
+ err = btf__add_field(btf, "field", 1, 0, 0);
+ ASSERT_ERR(err, "no_struct_field");
+ id = btf__add_struct(btf, "s1", 8);
+ ASSERT_EQ(id, 7, "struct_id");
+ err = btf__add_field(btf, "f1", 1, 0, 0);
+ ASSERT_OK(err, "f1_res");
+ err = btf__add_field(btf, "f2", 1, 32, 16);
+ ASSERT_OK(err, "f2_res");
+
+ t = btf__type_by_id(btf, 7);
+ ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "s1", "struct_name");
+ ASSERT_EQ(btf_kind(t), BTF_KIND_STRUCT, "struct_kind");
+ ASSERT_EQ(btf_vlen(t), 2, "struct_vlen");
+ ASSERT_EQ(btf_kflag(t), true, "struct_kflag");
+ ASSERT_EQ(t->size, 8, "struct_sz");
+ m = btf_members(t) + 0;
+ ASSERT_STREQ(btf__str_by_offset(btf, m->name_off), "f1", "f1_name");
+ ASSERT_EQ(m->type, 1, "f1_type");
+ ASSERT_EQ(btf_member_bit_offset(t, 0), 0, "f1_bit_off");
+ ASSERT_EQ(btf_member_bitfield_size(t, 0), 0, "f1_bit_sz");
+ m = btf_members(t) + 1;
+ ASSERT_STREQ(btf__str_by_offset(btf, m->name_off), "f2", "f2_name");
+ ASSERT_EQ(m->type, 1, "f2_type");
+ ASSERT_EQ(btf_member_bit_offset(t, 1), 32, "f2_bit_off");
+ ASSERT_EQ(btf_member_bitfield_size(t, 1), 16, "f2_bit_sz");
+
+ /* UNION */
+ id = btf__add_union(btf, "u1", 8);
+ ASSERT_EQ(id, 8, "union_id");
+
+ /* invalid, non-zero offset */
+ err = btf__add_field(btf, "field", 1, 1, 0);
+ ASSERT_ERR(err, "no_struct_field");
+
+ err = btf__add_field(btf, "f1", 1, 0, 16);
+ ASSERT_OK(err, "f1_res");
+
+ t = btf__type_by_id(btf, 8);
+ ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "u1", "union_name");
+ ASSERT_EQ(btf_kind(t), BTF_KIND_UNION, "union_kind");
+ ASSERT_EQ(btf_vlen(t), 1, "union_vlen");
+ ASSERT_EQ(btf_kflag(t), true, "union_kflag");
+ ASSERT_EQ(t->size, 8, "union_sz");
+ m = btf_members(t) + 0;
+ ASSERT_STREQ(btf__str_by_offset(btf, m->name_off), "f1", "f1_name");
+ ASSERT_EQ(m->type, 1, "f1_type");
+ ASSERT_EQ(btf_member_bit_offset(t, 0), 0, "f1_bit_off");
+ ASSERT_EQ(btf_member_bitfield_size(t, 0), 16, "f1_bit_sz");
+
+ /* ENUM */
+ id = btf__add_enum(btf, "e1", 4);
+ ASSERT_EQ(id, 9, "enum_id");
+ err = btf__add_enum_value(btf, "v1", 1);
+ ASSERT_OK(err, "v1_res");
+ err = btf__add_enum_value(btf, "v2", 2);
+ ASSERT_OK(err, "v2_res");
+
+ t = btf__type_by_id(btf, 9);
+ ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "e1", "enum_name");
+ ASSERT_EQ(btf_kind(t), BTF_KIND_ENUM, "enum_kind");
+ ASSERT_EQ(btf_vlen(t), 2, "enum_vlen");
+ ASSERT_EQ(t->size, 4, "enum_sz");
+ v = btf_enum(t) + 0;
+ ASSERT_STREQ(btf__str_by_offset(btf, v->name_off), "v1", "v1_name");
+ ASSERT_EQ(v->val, 1, "v1_val");
+ v = btf_enum(t) + 1;
+ ASSERT_STREQ(btf__str_by_offset(btf, v->name_off), "v2", "v2_name");
+ ASSERT_EQ(v->val, 2, "v2_val");
+
+ /* FWDs */
+ id = btf__add_fwd(btf, "struct_fwd", BTF_FWD_STRUCT);
+ ASSERT_EQ(id, 10, "struct_fwd_id");
+ t = btf__type_by_id(btf, 10);
+ ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "struct_fwd", "fwd_name");
+ ASSERT_EQ(btf_kind(t), BTF_KIND_FWD, "fwd_kind");
+ ASSERT_EQ(btf_kflag(t), 0, "fwd_kflag");
+
+ id = btf__add_fwd(btf, "union_fwd", BTF_FWD_UNION);
+ ASSERT_EQ(id, 11, "union_fwd_id");
+ t = btf__type_by_id(btf, 11);
+ ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "union_fwd", "fwd_name");
+ ASSERT_EQ(btf_kind(t), BTF_KIND_FWD, "fwd_kind");
+ ASSERT_EQ(btf_kflag(t), 1, "fwd_kflag");
+
+ id = btf__add_fwd(btf, "enum_fwd", BTF_FWD_ENUM);
+ ASSERT_EQ(id, 12, "enum_fwd_id");
+ t = btf__type_by_id(btf, 12);
+ ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "enum_fwd", "fwd_name");
+ ASSERT_EQ(btf_kind(t), BTF_KIND_ENUM, "enum_fwd_kind");
+ ASSERT_EQ(btf_vlen(t), 0, "enum_fwd_kind");
+ ASSERT_EQ(t->size, 4, "enum_fwd_sz");
+
+ /* TYPEDEF */
+ id = btf__add_typedef(btf, "typedef1", 1);
+ ASSERT_EQ(id, 13, "typedef_fwd_id");
+ t = btf__type_by_id(btf, 13);
+ ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "typedef1", "typedef_name");
+ ASSERT_EQ(btf_kind(t), BTF_KIND_TYPEDEF, "typedef_kind");
+ ASSERT_EQ(t->type, 1, "typedef_type");
+
+ /* FUNC & FUNC_PROTO */
+ id = btf__add_func(btf, "func1", BTF_FUNC_GLOBAL, 15);
+ ASSERT_EQ(id, 14, "func_id");
+ t = btf__type_by_id(btf, 14);
+ ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "func1", "func_name");
+ ASSERT_EQ(t->type, 15, "func_type");
+ ASSERT_EQ(btf_kind(t), BTF_KIND_FUNC, "func_kind");
+ ASSERT_EQ(btf_vlen(t), BTF_FUNC_GLOBAL, "func_vlen");
+
+ id = btf__add_func_proto(btf, 1);
+ ASSERT_EQ(id, 15, "func_proto_id");
+ err = btf__add_func_param(btf, "p1", 1);
+ ASSERT_OK(err, "p1_res");
+ err = btf__add_func_param(btf, "p2", 2);
+ ASSERT_OK(err, "p2_res");
+
+ t = btf__type_by_id(btf, 15);
+ ASSERT_EQ(btf_kind(t), BTF_KIND_FUNC_PROTO, "func_proto_kind");
+ ASSERT_EQ(btf_vlen(t), 2, "func_proto_vlen");
+ ASSERT_EQ(t->type, 1, "func_proto_ret_type");
+ p = btf_params(t) + 0;
+ ASSERT_STREQ(btf__str_by_offset(btf, p->name_off), "p1", "p1_name");
+ ASSERT_EQ(p->type, 1, "p1_type");
+ p = btf_params(t) + 1;
+ ASSERT_STREQ(btf__str_by_offset(btf, p->name_off), "p2", "p2_name");
+ ASSERT_EQ(p->type, 2, "p2_type");
+
+ /* VAR */
+ id = btf__add_var(btf, "var1", BTF_VAR_GLOBAL_ALLOCATED, 1);
+ ASSERT_EQ(id, 16, "var_id");
+ t = btf__type_by_id(btf, 16);
+ ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "var1", "var_name");
+ ASSERT_EQ(btf_kind(t), BTF_KIND_VAR, "var_kind");
+ ASSERT_EQ(t->type, 1, "var_type");
+ ASSERT_EQ(btf_var(t)->linkage, BTF_VAR_GLOBAL_ALLOCATED, "var_type");
+
+ /* DATASECT */
+ id = btf__add_datasec(btf, "datasec1", 12);
+ ASSERT_EQ(id, 17, "datasec_id");
+ err = btf__add_datasec_var_info(btf, 1, 4, 8);
+ ASSERT_OK(err, "v1_res");
+
+ t = btf__type_by_id(btf, 17);
+ ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "datasec1", "datasec_name");
+ ASSERT_EQ(t->size, 12, "datasec_sz");
+ ASSERT_EQ(btf_kind(t), BTF_KIND_DATASEC, "datasec_kind");
+ ASSERT_EQ(btf_vlen(t), 1, "datasec_vlen");
+ vi = btf_var_secinfos(t) + 0;
+ ASSERT_EQ(vi->type, 1, "v1_type");
+ ASSERT_EQ(vi->offset, 4, "v1_off");
+ ASSERT_EQ(vi->size, 8, "v1_sz");
+
+ btf__free(btf);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/cls_redirect.c b/tools/testing/selftests/bpf/prog_tests/cls_redirect.c
index f259085cca6a..9781d85cb223 100644
--- a/tools/testing/selftests/bpf/prog_tests/cls_redirect.c
+++ b/tools/testing/selftests/bpf/prog_tests/cls_redirect.c
@@ -12,10 +12,13 @@
#include "progs/test_cls_redirect.h"
#include "test_cls_redirect.skel.h"
+#include "test_cls_redirect_subprogs.skel.h"
#define ENCAP_IP INADDR_LOOPBACK
#define ENCAP_PORT (1234)
+static int duration = 0;
+
struct addr_port {
in_port_t port;
union {
@@ -361,30 +364,18 @@ static void close_fds(int *fds, int n)
close(fds[i]);
}
-void test_cls_redirect(void)
+static void test_cls_redirect_common(struct bpf_program *prog)
{
- struct test_cls_redirect *skel = NULL;
struct bpf_prog_test_run_attr tattr = {};
int families[] = { AF_INET, AF_INET6 };
struct sockaddr_storage ss;
struct sockaddr *addr;
socklen_t slen;
int i, j, err;
-
int servers[__NR_KIND][ARRAY_SIZE(families)] = {};
int conns[__NR_KIND][ARRAY_SIZE(families)] = {};
struct tuple tuples[__NR_KIND][ARRAY_SIZE(families)];
- skel = test_cls_redirect__open();
- if (CHECK_FAIL(!skel))
- return;
-
- skel->rodata->ENCAPSULATION_IP = htonl(ENCAP_IP);
- skel->rodata->ENCAPSULATION_PORT = htons(ENCAP_PORT);
-
- if (CHECK_FAIL(test_cls_redirect__load(skel)))
- goto cleanup;
-
addr = (struct sockaddr *)&ss;
for (i = 0; i < ARRAY_SIZE(families); i++) {
slen = prepare_addr(&ss, families[i]);
@@ -402,7 +393,7 @@ void test_cls_redirect(void)
goto cleanup;
}
- tattr.prog_fd = bpf_program__fd(skel->progs.cls_redirect);
+ tattr.prog_fd = bpf_program__fd(prog);
for (i = 0; i < ARRAY_SIZE(tests); i++) {
struct test_cfg *test = &tests[i];
@@ -450,7 +441,58 @@ void test_cls_redirect(void)
}
cleanup:
- test_cls_redirect__destroy(skel);
close_fds((int *)servers, sizeof(servers) / sizeof(servers[0][0]));
close_fds((int *)conns, sizeof(conns) / sizeof(conns[0][0]));
}
+
+static void test_cls_redirect_inlined(void)
+{
+ struct test_cls_redirect *skel;
+ int err;
+
+ skel = test_cls_redirect__open();
+ if (CHECK(!skel, "skel_open", "failed\n"))
+ return;
+
+ skel->rodata->ENCAPSULATION_IP = htonl(ENCAP_IP);
+ skel->rodata->ENCAPSULATION_PORT = htons(ENCAP_PORT);
+
+ err = test_cls_redirect__load(skel);
+ if (CHECK(err, "skel_load", "failed: %d\n", err))
+ goto cleanup;
+
+ test_cls_redirect_common(skel->progs.cls_redirect);
+
+cleanup:
+ test_cls_redirect__destroy(skel);
+}
+
+static void test_cls_redirect_subprogs(void)
+{
+ struct test_cls_redirect_subprogs *skel;
+ int err;
+
+ skel = test_cls_redirect_subprogs__open();
+ if (CHECK(!skel, "skel_open", "failed\n"))
+ return;
+
+ skel->rodata->ENCAPSULATION_IP = htonl(ENCAP_IP);
+ skel->rodata->ENCAPSULATION_PORT = htons(ENCAP_PORT);
+
+ err = test_cls_redirect_subprogs__load(skel);
+ if (CHECK(err, "skel_load", "failed: %d\n", err))
+ goto cleanup;
+
+ test_cls_redirect_common(skel->progs.cls_redirect);
+
+cleanup:
+ test_cls_redirect_subprogs__destroy(skel);
+}
+
+void test_cls_redirect(void)
+{
+ if (test__start_subtest("cls_redirect_inlined"))
+ test_cls_redirect_inlined();
+ if (test__start_subtest("cls_redirect_subprogs"))
+ test_cls_redirect_subprogs();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/core_autosize.c b/tools/testing/selftests/bpf/prog_tests/core_autosize.c
new file mode 100644
index 000000000000..981c251453d9
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/core_autosize.c
@@ -0,0 +1,225 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+
+#include <test_progs.h>
+#include <bpf/btf.h>
+
+/* real layout and sizes according to test's (32-bit) BTF
+ * needs to be defined before skeleton is included */
+struct test_struct___real {
+ unsigned int ptr; /* can't use `void *`, it is always 8 byte in BPF target */
+ unsigned int val2;
+ unsigned long long val1;
+ unsigned short val3;
+ unsigned char val4;
+ unsigned char _pad;
+};
+
+#include "test_core_autosize.skel.h"
+
+static int duration = 0;
+
+static struct {
+ unsigned long long ptr_samesized;
+ unsigned long long val1_samesized;
+ unsigned long long val2_samesized;
+ unsigned long long val3_samesized;
+ unsigned long long val4_samesized;
+ struct test_struct___real output_samesized;
+
+ unsigned long long ptr_downsized;
+ unsigned long long val1_downsized;
+ unsigned long long val2_downsized;
+ unsigned long long val3_downsized;
+ unsigned long long val4_downsized;
+ struct test_struct___real output_downsized;
+
+ unsigned long long ptr_probed;
+ unsigned long long val1_probed;
+ unsigned long long val2_probed;
+ unsigned long long val3_probed;
+ unsigned long long val4_probed;
+
+ unsigned long long ptr_signed;
+ unsigned long long val1_signed;
+ unsigned long long val2_signed;
+ unsigned long long val3_signed;
+ unsigned long long val4_signed;
+ struct test_struct___real output_signed;
+} out;
+
+void test_core_autosize(void)
+{
+ char btf_file[] = "/tmp/core_autosize.btf.XXXXXX";
+ int err, fd = -1, zero = 0;
+ int char_id, short_id, int_id, long_long_id, void_ptr_id, id;
+ struct test_core_autosize* skel = NULL;
+ struct bpf_object_load_attr load_attr = {};
+ struct bpf_program *prog;
+ struct bpf_map *bss_map;
+ struct btf *btf = NULL;
+ size_t written;
+ const void *raw_data;
+ __u32 raw_sz;
+ FILE *f = NULL;
+
+ btf = btf__new_empty();
+ if (!ASSERT_OK_PTR(btf, "empty_btf"))
+ return;
+ /* Emit the following struct with 32-bit pointer size:
+ *
+ * struct test_struct {
+ * void *ptr;
+ * unsigned long val2;
+ * unsigned long long val1;
+ * unsigned short val3;
+ * unsigned char val4;
+ * char: 8;
+ * };
+ *
+ * This struct is going to be used as the "kernel BTF" for this test.
+ * It's equivalent memory-layout-wise to test_struct__real above.
+ */
+
+ /* force 32-bit pointer size */
+ btf__set_pointer_size(btf, 4);
+
+ char_id = btf__add_int(btf, "unsigned char", 1, 0);
+ ASSERT_EQ(char_id, 1, "char_id");
+ short_id = btf__add_int(btf, "unsigned short", 2, 0);
+ ASSERT_EQ(short_id, 2, "short_id");
+ /* "long unsigned int" of 4 byte size tells BTF that sizeof(void *) == 4 */
+ int_id = btf__add_int(btf, "long unsigned int", 4, 0);
+ ASSERT_EQ(int_id, 3, "int_id");
+ long_long_id = btf__add_int(btf, "unsigned long long", 8, 0);
+ ASSERT_EQ(long_long_id, 4, "long_long_id");
+ void_ptr_id = btf__add_ptr(btf, 0);
+ ASSERT_EQ(void_ptr_id, 5, "void_ptr_id");
+
+ id = btf__add_struct(btf, "test_struct", 20 /* bytes */);
+ ASSERT_EQ(id, 6, "struct_id");
+ err = btf__add_field(btf, "ptr", void_ptr_id, 0, 0);
+ err = err ?: btf__add_field(btf, "val2", int_id, 32, 0);
+ err = err ?: btf__add_field(btf, "val1", long_long_id, 64, 0);
+ err = err ?: btf__add_field(btf, "val3", short_id, 128, 0);
+ err = err ?: btf__add_field(btf, "val4", char_id, 144, 0);
+ ASSERT_OK(err, "struct_fields");
+
+ fd = mkstemp(btf_file);
+ if (CHECK(fd < 0, "btf_tmp", "failed to create file: %d\n", fd))
+ goto cleanup;
+ f = fdopen(fd, "w");
+ if (!ASSERT_OK_PTR(f, "btf_fdopen"))
+ goto cleanup;
+
+ raw_data = btf__get_raw_data(btf, &raw_sz);
+ if (!ASSERT_OK_PTR(raw_data, "raw_data"))
+ goto cleanup;
+ written = fwrite(raw_data, 1, raw_sz, f);
+ if (CHECK(written != raw_sz, "btf_write", "written: %zu, errno: %d\n", written, errno))
+ goto cleanup;
+ fflush(f);
+ fclose(f);
+ f = NULL;
+ close(fd);
+ fd = -1;
+
+ /* open and load BPF program with custom BTF as the kernel BTF */
+ skel = test_core_autosize__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ /* disable handle_signed() for now */
+ prog = bpf_object__find_program_by_name(skel->obj, "handle_signed");
+ if (!ASSERT_OK_PTR(prog, "prog_find"))
+ goto cleanup;
+ bpf_program__set_autoload(prog, false);
+
+ load_attr.obj = skel->obj;
+ load_attr.target_btf_path = btf_file;
+ err = bpf_object__load_xattr(&load_attr);
+ if (!ASSERT_OK(err, "prog_load"))
+ goto cleanup;
+
+ prog = bpf_object__find_program_by_name(skel->obj, "handle_samesize");
+ if (!ASSERT_OK_PTR(prog, "prog_find"))
+ goto cleanup;
+ skel->links.handle_samesize = bpf_program__attach(prog);
+ if (!ASSERT_OK_PTR(skel->links.handle_samesize, "prog_attach"))
+ goto cleanup;
+
+ prog = bpf_object__find_program_by_name(skel->obj, "handle_downsize");
+ if (!ASSERT_OK_PTR(prog, "prog_find"))
+ goto cleanup;
+ skel->links.handle_downsize = bpf_program__attach(prog);
+ if (!ASSERT_OK_PTR(skel->links.handle_downsize, "prog_attach"))
+ goto cleanup;
+
+ prog = bpf_object__find_program_by_name(skel->obj, "handle_probed");
+ if (!ASSERT_OK_PTR(prog, "prog_find"))
+ goto cleanup;
+ skel->links.handle_probed = bpf_program__attach(prog);
+ if (!ASSERT_OK_PTR(skel->links.handle_probed, "prog_attach"))
+ goto cleanup;
+
+ usleep(1);
+
+ bss_map = bpf_object__find_map_by_name(skel->obj, "test_cor.bss");
+ if (!ASSERT_OK_PTR(bss_map, "bss_map_find"))
+ goto cleanup;
+
+ err = bpf_map_lookup_elem(bpf_map__fd(bss_map), &zero, (void *)&out);
+ if (!ASSERT_OK(err, "bss_lookup"))
+ goto cleanup;
+
+ ASSERT_EQ(out.ptr_samesized, 0x01020304, "ptr_samesized");
+ ASSERT_EQ(out.val1_samesized, 0x1020304050607080, "val1_samesized");
+ ASSERT_EQ(out.val2_samesized, 0x0a0b0c0d, "val2_samesized");
+ ASSERT_EQ(out.val3_samesized, 0xfeed, "val3_samesized");
+ ASSERT_EQ(out.val4_samesized, 0xb9, "val4_samesized");
+ ASSERT_EQ(out.output_samesized.ptr, 0x01020304, "ptr_samesized");
+ ASSERT_EQ(out.output_samesized.val1, 0x1020304050607080, "val1_samesized");
+ ASSERT_EQ(out.output_samesized.val2, 0x0a0b0c0d, "val2_samesized");
+ ASSERT_EQ(out.output_samesized.val3, 0xfeed, "val3_samesized");
+ ASSERT_EQ(out.output_samesized.val4, 0xb9, "val4_samesized");
+
+ ASSERT_EQ(out.ptr_downsized, 0x01020304, "ptr_downsized");
+ ASSERT_EQ(out.val1_downsized, 0x1020304050607080, "val1_downsized");
+ ASSERT_EQ(out.val2_downsized, 0x0a0b0c0d, "val2_downsized");
+ ASSERT_EQ(out.val3_downsized, 0xfeed, "val3_downsized");
+ ASSERT_EQ(out.val4_downsized, 0xb9, "val4_downsized");
+ ASSERT_EQ(out.output_downsized.ptr, 0x01020304, "ptr_downsized");
+ ASSERT_EQ(out.output_downsized.val1, 0x1020304050607080, "val1_downsized");
+ ASSERT_EQ(out.output_downsized.val2, 0x0a0b0c0d, "val2_downsized");
+ ASSERT_EQ(out.output_downsized.val3, 0xfeed, "val3_downsized");
+ ASSERT_EQ(out.output_downsized.val4, 0xb9, "val4_downsized");
+
+ ASSERT_EQ(out.ptr_probed, 0x01020304, "ptr_probed");
+ ASSERT_EQ(out.val1_probed, 0x1020304050607080, "val1_probed");
+ ASSERT_EQ(out.val2_probed, 0x0a0b0c0d, "val2_probed");
+ ASSERT_EQ(out.val3_probed, 0xfeed, "val3_probed");
+ ASSERT_EQ(out.val4_probed, 0xb9, "val4_probed");
+
+ test_core_autosize__destroy(skel);
+ skel = NULL;
+
+ /* now re-load with handle_signed() enabled, it should fail loading */
+ skel = test_core_autosize__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ load_attr.obj = skel->obj;
+ load_attr.target_btf_path = btf_file;
+ err = bpf_object__load_xattr(&load_attr);
+ if (!ASSERT_ERR(err, "bad_prog_load"))
+ goto cleanup;
+
+cleanup:
+ if (f)
+ fclose(f);
+ if (fd >= 0)
+ close(fd);
+ remove(btf_file);
+ btf__free(btf);
+ test_core_autosize__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/core_reloc.c b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
index a54eafc5e4b3..30e40ff4b0d8 100644
--- a/tools/testing/selftests/bpf/prog_tests/core_reloc.c
+++ b/tools/testing/selftests/bpf/prog_tests/core_reloc.c
@@ -3,6 +3,9 @@
#include "progs/core_reloc_types.h"
#include <sys/mman.h>
#include <sys/syscall.h>
+#include <bpf/btf.h>
+
+static int duration = 0;
#define STRUCT_TO_CHAR_PTR(struct_name) (const char *)&(struct struct_name)
@@ -177,14 +180,13 @@
.fails = true, \
}
-#define EXISTENCE_CASE_COMMON(name) \
+#define FIELD_EXISTS_CASE_COMMON(name) \
.case_name = #name, \
.bpf_obj_file = "test_core_reloc_existence.o", \
- .btf_src_file = "btf__core_reloc_" #name ".o", \
- .relaxed_core_relocs = true
+ .btf_src_file = "btf__core_reloc_" #name ".o" \
-#define EXISTENCE_ERR_CASE(name) { \
- EXISTENCE_CASE_COMMON(name), \
+#define FIELD_EXISTS_ERR_CASE(name) { \
+ FIELD_EXISTS_CASE_COMMON(name), \
.fails = true, \
}
@@ -253,6 +255,61 @@
.fails = true, \
}
+#define TYPE_BASED_CASE_COMMON(name) \
+ .case_name = #name, \
+ .bpf_obj_file = "test_core_reloc_type_based.o", \
+ .btf_src_file = "btf__core_reloc_" #name ".o" \
+
+#define TYPE_BASED_CASE(name, ...) { \
+ TYPE_BASED_CASE_COMMON(name), \
+ .output = STRUCT_TO_CHAR_PTR(core_reloc_type_based_output) \
+ __VA_ARGS__, \
+ .output_len = sizeof(struct core_reloc_type_based_output), \
+}
+
+#define TYPE_BASED_ERR_CASE(name) { \
+ TYPE_BASED_CASE_COMMON(name), \
+ .fails = true, \
+}
+
+#define TYPE_ID_CASE_COMMON(name) \
+ .case_name = #name, \
+ .bpf_obj_file = "test_core_reloc_type_id.o", \
+ .btf_src_file = "btf__core_reloc_" #name ".o" \
+
+#define TYPE_ID_CASE(name, setup_fn) { \
+ TYPE_ID_CASE_COMMON(name), \
+ .output = STRUCT_TO_CHAR_PTR(core_reloc_type_id_output) {}, \
+ .output_len = sizeof(struct core_reloc_type_id_output), \
+ .setup = setup_fn, \
+}
+
+#define TYPE_ID_ERR_CASE(name) { \
+ TYPE_ID_CASE_COMMON(name), \
+ .fails = true, \
+}
+
+#define ENUMVAL_CASE_COMMON(name) \
+ .case_name = #name, \
+ .bpf_obj_file = "test_core_reloc_enumval.o", \
+ .btf_src_file = "btf__core_reloc_" #name ".o" \
+
+#define ENUMVAL_CASE(name, ...) { \
+ ENUMVAL_CASE_COMMON(name), \
+ .output = STRUCT_TO_CHAR_PTR(core_reloc_enumval_output) \
+ __VA_ARGS__, \
+ .output_len = sizeof(struct core_reloc_enumval_output), \
+}
+
+#define ENUMVAL_ERR_CASE(name) { \
+ ENUMVAL_CASE_COMMON(name), \
+ .fails = true, \
+}
+
+struct core_reloc_test_case;
+
+typedef int (*setup_test_fn)(struct core_reloc_test_case *test);
+
struct core_reloc_test_case {
const char *case_name;
const char *bpf_obj_file;
@@ -264,8 +321,136 @@ struct core_reloc_test_case {
bool fails;
bool relaxed_core_relocs;
bool direct_raw_tp;
+ setup_test_fn setup;
};
+static int find_btf_type(const struct btf *btf, const char *name, __u32 kind)
+{
+ int id;
+
+ id = btf__find_by_name_kind(btf, name, kind);
+ if (CHECK(id <= 0, "find_type_id", "failed to find '%s', kind %d: %d\n", name, kind, id))
+ return -1;
+
+ return id;
+}
+
+static int setup_type_id_case_local(struct core_reloc_test_case *test)
+{
+ struct core_reloc_type_id_output *exp = (void *)test->output;
+ struct btf *local_btf = btf__parse(test->bpf_obj_file, NULL);
+ struct btf *targ_btf = btf__parse(test->btf_src_file, NULL);
+ const struct btf_type *t;
+ const char *name;
+ int i;
+
+ if (CHECK(IS_ERR(local_btf), "local_btf", "failed: %ld\n", PTR_ERR(local_btf)) ||
+ CHECK(IS_ERR(targ_btf), "targ_btf", "failed: %ld\n", PTR_ERR(targ_btf))) {
+ btf__free(local_btf);
+ btf__free(targ_btf);
+ return -EINVAL;
+ }
+
+ exp->local_anon_struct = -1;
+ exp->local_anon_union = -1;
+ exp->local_anon_enum = -1;
+ exp->local_anon_func_proto_ptr = -1;
+ exp->local_anon_void_ptr = -1;
+ exp->local_anon_arr = -1;
+
+ for (i = 1; i <= btf__get_nr_types(local_btf); i++)
+ {
+ t = btf__type_by_id(local_btf, i);
+ /* we are interested only in anonymous types */
+ if (t->name_off)
+ continue;
+
+ if (btf_is_struct(t) && btf_vlen(t) &&
+ (name = btf__name_by_offset(local_btf, btf_members(t)[0].name_off)) &&
+ strcmp(name, "marker_field") == 0) {
+ exp->local_anon_struct = i;
+ } else if (btf_is_union(t) && btf_vlen(t) &&
+ (name = btf__name_by_offset(local_btf, btf_members(t)[0].name_off)) &&
+ strcmp(name, "marker_field") == 0) {
+ exp->local_anon_union = i;
+ } else if (btf_is_enum(t) && btf_vlen(t) &&
+ (name = btf__name_by_offset(local_btf, btf_enum(t)[0].name_off)) &&
+ strcmp(name, "MARKER_ENUM_VAL") == 0) {
+ exp->local_anon_enum = i;
+ } else if (btf_is_ptr(t) && (t = btf__type_by_id(local_btf, t->type))) {
+ if (btf_is_func_proto(t) && (t = btf__type_by_id(local_btf, t->type)) &&
+ btf_is_int(t) && (name = btf__name_by_offset(local_btf, t->name_off)) &&
+ strcmp(name, "_Bool") == 0) {
+ /* ptr -> func_proto -> _Bool */
+ exp->local_anon_func_proto_ptr = i;
+ } else if (btf_is_void(t)) {
+ /* ptr -> void */
+ exp->local_anon_void_ptr = i;
+ }
+ } else if (btf_is_array(t) && (t = btf__type_by_id(local_btf, btf_array(t)->type)) &&
+ btf_is_int(t) && (name = btf__name_by_offset(local_btf, t->name_off)) &&
+ strcmp(name, "_Bool") == 0) {
+ /* _Bool[] */
+ exp->local_anon_arr = i;
+ }
+ }
+
+ exp->local_struct = find_btf_type(local_btf, "a_struct", BTF_KIND_STRUCT);
+ exp->local_union = find_btf_type(local_btf, "a_union", BTF_KIND_UNION);
+ exp->local_enum = find_btf_type(local_btf, "an_enum", BTF_KIND_ENUM);
+ exp->local_int = find_btf_type(local_btf, "int", BTF_KIND_INT);
+ exp->local_struct_typedef = find_btf_type(local_btf, "named_struct_typedef", BTF_KIND_TYPEDEF);
+ exp->local_func_proto_typedef = find_btf_type(local_btf, "func_proto_typedef", BTF_KIND_TYPEDEF);
+ exp->local_arr_typedef = find_btf_type(local_btf, "arr_typedef", BTF_KIND_TYPEDEF);
+
+ btf__free(local_btf);
+ btf__free(targ_btf);
+ return 0;
+}
+
+static int setup_type_id_case_success(struct core_reloc_test_case *test) {
+ struct core_reloc_type_id_output *exp = (void *)test->output;
+ struct btf *targ_btf = btf__parse(test->btf_src_file, NULL);
+ int err;
+
+ err = setup_type_id_case_local(test);
+ if (err)
+ return err;
+
+ targ_btf = btf__parse(test->btf_src_file, NULL);
+
+ exp->targ_struct = find_btf_type(targ_btf, "a_struct", BTF_KIND_STRUCT);
+ exp->targ_union = find_btf_type(targ_btf, "a_union", BTF_KIND_UNION);
+ exp->targ_enum = find_btf_type(targ_btf, "an_enum", BTF_KIND_ENUM);
+ exp->targ_int = find_btf_type(targ_btf, "int", BTF_KIND_INT);
+ exp->targ_struct_typedef = find_btf_type(targ_btf, "named_struct_typedef", BTF_KIND_TYPEDEF);
+ exp->targ_func_proto_typedef = find_btf_type(targ_btf, "func_proto_typedef", BTF_KIND_TYPEDEF);
+ exp->targ_arr_typedef = find_btf_type(targ_btf, "arr_typedef", BTF_KIND_TYPEDEF);
+
+ btf__free(targ_btf);
+ return 0;
+}
+
+static int setup_type_id_case_failure(struct core_reloc_test_case *test)
+{
+ struct core_reloc_type_id_output *exp = (void *)test->output;
+ int err;
+
+ err = setup_type_id_case_local(test);
+ if (err)
+ return err;
+
+ exp->targ_struct = 0;
+ exp->targ_union = 0;
+ exp->targ_enum = 0;
+ exp->targ_int = 0;
+ exp->targ_struct_typedef = 0;
+ exp->targ_func_proto_typedef = 0;
+ exp->targ_arr_typedef = 0;
+
+ return 0;
+}
+
static struct core_reloc_test_case test_cases[] = {
/* validate we can find kernel image and use its BTF for relocs */
{
@@ -364,7 +549,7 @@ static struct core_reloc_test_case test_cases[] = {
/* validate field existence checks */
{
- EXISTENCE_CASE_COMMON(existence),
+ FIELD_EXISTS_CASE_COMMON(existence),
.input = STRUCT_TO_CHAR_PTR(core_reloc_existence) {
.a = 1,
.b = 2,
@@ -388,7 +573,7 @@ static struct core_reloc_test_case test_cases[] = {
.output_len = sizeof(struct core_reloc_existence_output),
},
{
- EXISTENCE_CASE_COMMON(existence___minimal),
+ FIELD_EXISTS_CASE_COMMON(existence___minimal),
.input = STRUCT_TO_CHAR_PTR(core_reloc_existence___minimal) {
.a = 42,
},
@@ -408,12 +593,12 @@ static struct core_reloc_test_case test_cases[] = {
.output_len = sizeof(struct core_reloc_existence_output),
},
- EXISTENCE_ERR_CASE(existence__err_int_sz),
- EXISTENCE_ERR_CASE(existence__err_int_type),
- EXISTENCE_ERR_CASE(existence__err_int_kind),
- EXISTENCE_ERR_CASE(existence__err_arr_kind),
- EXISTENCE_ERR_CASE(existence__err_arr_value_type),
- EXISTENCE_ERR_CASE(existence__err_struct_type),
+ FIELD_EXISTS_ERR_CASE(existence__err_int_sz),
+ FIELD_EXISTS_ERR_CASE(existence__err_int_type),
+ FIELD_EXISTS_ERR_CASE(existence__err_int_kind),
+ FIELD_EXISTS_ERR_CASE(existence__err_arr_kind),
+ FIELD_EXISTS_ERR_CASE(existence__err_arr_value_type),
+ FIELD_EXISTS_ERR_CASE(existence__err_struct_type),
/* bitfield relocation checks */
BITFIELDS_CASE(bitfields, {
@@ -452,11 +637,117 @@ static struct core_reloc_test_case test_cases[] = {
/* size relocation checks */
SIZE_CASE(size),
SIZE_CASE(size___diff_sz),
+ SIZE_ERR_CASE(size___err_ambiguous),
+
+ /* validate type existence and size relocations */
+ TYPE_BASED_CASE(type_based, {
+ .struct_exists = 1,
+ .union_exists = 1,
+ .enum_exists = 1,
+ .typedef_named_struct_exists = 1,
+ .typedef_anon_struct_exists = 1,
+ .typedef_struct_ptr_exists = 1,
+ .typedef_int_exists = 1,
+ .typedef_enum_exists = 1,
+ .typedef_void_ptr_exists = 1,
+ .typedef_func_proto_exists = 1,
+ .typedef_arr_exists = 1,
+ .struct_sz = sizeof(struct a_struct),
+ .union_sz = sizeof(union a_union),
+ .enum_sz = sizeof(enum an_enum),
+ .typedef_named_struct_sz = sizeof(named_struct_typedef),
+ .typedef_anon_struct_sz = sizeof(anon_struct_typedef),
+ .typedef_struct_ptr_sz = sizeof(struct_ptr_typedef),
+ .typedef_int_sz = sizeof(int_typedef),
+ .typedef_enum_sz = sizeof(enum_typedef),
+ .typedef_void_ptr_sz = sizeof(void_ptr_typedef),
+ .typedef_func_proto_sz = sizeof(func_proto_typedef),
+ .typedef_arr_sz = sizeof(arr_typedef),
+ }),
+ TYPE_BASED_CASE(type_based___all_missing, {
+ /* all zeros */
+ }),
+ TYPE_BASED_CASE(type_based___diff_sz, {
+ .struct_exists = 1,
+ .union_exists = 1,
+ .enum_exists = 1,
+ .typedef_named_struct_exists = 1,
+ .typedef_anon_struct_exists = 1,
+ .typedef_struct_ptr_exists = 1,
+ .typedef_int_exists = 1,
+ .typedef_enum_exists = 1,
+ .typedef_void_ptr_exists = 1,
+ .typedef_func_proto_exists = 1,
+ .typedef_arr_exists = 1,
+ .struct_sz = sizeof(struct a_struct___diff_sz),
+ .union_sz = sizeof(union a_union___diff_sz),
+ .enum_sz = sizeof(enum an_enum___diff_sz),
+ .typedef_named_struct_sz = sizeof(named_struct_typedef___diff_sz),
+ .typedef_anon_struct_sz = sizeof(anon_struct_typedef___diff_sz),
+ .typedef_struct_ptr_sz = sizeof(struct_ptr_typedef___diff_sz),
+ .typedef_int_sz = sizeof(int_typedef___diff_sz),
+ .typedef_enum_sz = sizeof(enum_typedef___diff_sz),
+ .typedef_void_ptr_sz = sizeof(void_ptr_typedef___diff_sz),
+ .typedef_func_proto_sz = sizeof(func_proto_typedef___diff_sz),
+ .typedef_arr_sz = sizeof(arr_typedef___diff_sz),
+ }),
+ TYPE_BASED_CASE(type_based___incompat, {
+ .enum_exists = 1,
+ .enum_sz = sizeof(enum an_enum),
+ }),
+ TYPE_BASED_CASE(type_based___fn_wrong_args, {
+ .struct_exists = 1,
+ .struct_sz = sizeof(struct a_struct),
+ }),
+
+ /* BTF_TYPE_ID_LOCAL/BTF_TYPE_ID_TARGET tests */
+ TYPE_ID_CASE(type_id, setup_type_id_case_success),
+ TYPE_ID_CASE(type_id___missing_targets, setup_type_id_case_failure),
+
+ /* Enumerator value existence and value relocations */
+ ENUMVAL_CASE(enumval, {
+ .named_val1_exists = true,
+ .named_val2_exists = true,
+ .named_val3_exists = true,
+ .anon_val1_exists = true,
+ .anon_val2_exists = true,
+ .anon_val3_exists = true,
+ .named_val1 = 1,
+ .named_val2 = 2,
+ .anon_val1 = 0x10,
+ .anon_val2 = 0x20,
+ }),
+ ENUMVAL_CASE(enumval___diff, {
+ .named_val1_exists = true,
+ .named_val2_exists = true,
+ .named_val3_exists = true,
+ .anon_val1_exists = true,
+ .anon_val2_exists = true,
+ .anon_val3_exists = true,
+ .named_val1 = 101,
+ .named_val2 = 202,
+ .anon_val1 = 0x11,
+ .anon_val2 = 0x22,
+ }),
+ ENUMVAL_CASE(enumval___val3_missing, {
+ .named_val1_exists = true,
+ .named_val2_exists = true,
+ .named_val3_exists = false,
+ .anon_val1_exists = true,
+ .anon_val2_exists = true,
+ .anon_val3_exists = false,
+ .named_val1 = 111,
+ .named_val2 = 222,
+ .anon_val1 = 0x111,
+ .anon_val2 = 0x222,
+ }),
+ ENUMVAL_ERR_CASE(enumval___err_missing),
};
struct data {
char in[256];
char out[256];
+ bool skip;
uint64_t my_pid_tgid;
};
@@ -472,7 +763,7 @@ void test_core_reloc(void)
struct bpf_object_load_attr load_attr = {};
struct core_reloc_test_case *test_case;
const char *tp_name, *probe_name;
- int err, duration = 0, i, equal;
+ int err, i, equal;
struct bpf_link *link = NULL;
struct bpf_map *data_map;
struct bpf_program *prog;
@@ -488,11 +779,13 @@ void test_core_reloc(void)
if (!test__start_subtest(test_case->case_name))
continue;
- DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
- .relaxed_core_relocs = test_case->relaxed_core_relocs,
- );
+ if (test_case->setup) {
+ err = test_case->setup(test_case);
+ if (CHECK(err, "test_setup", "test #%d setup failed: %d\n", i, err))
+ continue;
+ }
- obj = bpf_object__open_file(test_case->bpf_obj_file, &opts);
+ obj = bpf_object__open_file(test_case->bpf_obj_file, NULL);
if (CHECK(IS_ERR(obj), "obj_open", "failed to open '%s': %ld\n",
test_case->bpf_obj_file, PTR_ERR(obj)))
continue;
@@ -515,15 +808,10 @@ void test_core_reloc(void)
load_attr.log_level = 0;
load_attr.target_btf_path = test_case->btf_src_file;
err = bpf_object__load_xattr(&load_attr);
- if (test_case->fails) {
- CHECK(!err, "obj_load_fail",
- "should fail to load prog '%s'\n", probe_name);
+ if (err) {
+ if (!test_case->fails)
+ CHECK(false, "obj_load", "failed to load prog '%s': %d\n", probe_name, err);
goto cleanup;
- } else {
- if (CHECK(err, "obj_load",
- "failed to load prog '%s': %d\n",
- probe_name, err))
- goto cleanup;
}
data_map = bpf_object__find_map_by_name(obj, "test_cor.bss");
@@ -551,6 +839,16 @@ void test_core_reloc(void)
/* trigger test run */
usleep(1);
+ if (data->skip) {
+ test__skip();
+ goto cleanup;
+ }
+
+ if (test_case->fails) {
+ CHECK(false, "obj_load_fail", "should fail to load prog '%s'\n", probe_name);
+ goto cleanup;
+ }
+
equal = memcmp(data->out, test_case->output,
test_case->output_len) == 0;
if (CHECK(!equal, "check_result",
diff --git a/tools/testing/selftests/bpf/prog_tests/d_path.c b/tools/testing/selftests/bpf/prog_tests/d_path.c
new file mode 100644
index 000000000000..0a577a248d34
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/d_path.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+#define _GNU_SOURCE
+#include <test_progs.h>
+#include <sys/stat.h>
+#include <linux/sched.h>
+#include <sys/syscall.h>
+
+#define MAX_PATH_LEN 128
+#define MAX_FILES 7
+
+#include "test_d_path.skel.h"
+
+static int duration;
+
+static struct {
+ __u32 cnt;
+ char paths[MAX_FILES][MAX_PATH_LEN];
+} src;
+
+static int set_pathname(int fd, pid_t pid)
+{
+ char buf[MAX_PATH_LEN];
+
+ snprintf(buf, MAX_PATH_LEN, "/proc/%d/fd/%d", pid, fd);
+ return readlink(buf, src.paths[src.cnt++], MAX_PATH_LEN);
+}
+
+static int trigger_fstat_events(pid_t pid)
+{
+ int sockfd = -1, procfd = -1, devfd = -1;
+ int localfd = -1, indicatorfd = -1;
+ int pipefd[2] = { -1, -1 };
+ struct stat fileStat;
+ int ret = -1;
+
+ /* unmountable pseudo-filesystems */
+ if (CHECK(pipe(pipefd) < 0, "trigger", "pipe failed\n"))
+ return ret;
+ /* unmountable pseudo-filesystems */
+ sockfd = socket(AF_INET, SOCK_STREAM, 0);
+ if (CHECK(sockfd < 0, "trigger", "socket failed\n"))
+ goto out_close;
+ /* mountable pseudo-filesystems */
+ procfd = open("/proc/self/comm", O_RDONLY);
+ if (CHECK(procfd < 0, "trigger", "open /proc/self/comm failed\n"))
+ goto out_close;
+ devfd = open("/dev/urandom", O_RDONLY);
+ if (CHECK(devfd < 0, "trigger", "open /dev/urandom failed\n"))
+ goto out_close;
+ localfd = open("/tmp/d_path_loadgen.txt", O_CREAT | O_RDONLY, 0644);
+ if (CHECK(localfd < 0, "trigger", "open /tmp/d_path_loadgen.txt failed\n"))
+ goto out_close;
+ /* bpf_d_path will return path with (deleted) */
+ remove("/tmp/d_path_loadgen.txt");
+ indicatorfd = open("/tmp/", O_PATH);
+ if (CHECK(indicatorfd < 0, "trigger", "open /tmp/ failed\n"))
+ goto out_close;
+
+ ret = set_pathname(pipefd[0], pid);
+ if (CHECK(ret < 0, "trigger", "set_pathname failed for pipe[0]\n"))
+ goto out_close;
+ ret = set_pathname(pipefd[1], pid);
+ if (CHECK(ret < 0, "trigger", "set_pathname failed for pipe[1]\n"))
+ goto out_close;
+ ret = set_pathname(sockfd, pid);
+ if (CHECK(ret < 0, "trigger", "set_pathname failed for socket\n"))
+ goto out_close;
+ ret = set_pathname(procfd, pid);
+ if (CHECK(ret < 0, "trigger", "set_pathname failed for proc\n"))
+ goto out_close;
+ ret = set_pathname(devfd, pid);
+ if (CHECK(ret < 0, "trigger", "set_pathname failed for dev\n"))
+ goto out_close;
+ ret = set_pathname(localfd, pid);
+ if (CHECK(ret < 0, "trigger", "set_pathname failed for file\n"))
+ goto out_close;
+ ret = set_pathname(indicatorfd, pid);
+ if (CHECK(ret < 0, "trigger", "set_pathname failed for dir\n"))
+ goto out_close;
+
+ /* triggers vfs_getattr */
+ fstat(pipefd[0], &fileStat);
+ fstat(pipefd[1], &fileStat);
+ fstat(sockfd, &fileStat);
+ fstat(procfd, &fileStat);
+ fstat(devfd, &fileStat);
+ fstat(localfd, &fileStat);
+ fstat(indicatorfd, &fileStat);
+
+out_close:
+ /* triggers filp_close */
+ close(pipefd[0]);
+ close(pipefd[1]);
+ close(sockfd);
+ close(procfd);
+ close(devfd);
+ close(localfd);
+ close(indicatorfd);
+ return ret;
+}
+
+void test_d_path(void)
+{
+ struct test_d_path__bss *bss;
+ struct test_d_path *skel;
+ int err;
+
+ skel = test_d_path__open_and_load();
+ if (CHECK(!skel, "setup", "d_path skeleton failed\n"))
+ goto cleanup;
+
+ err = test_d_path__attach(skel);
+ if (CHECK(err, "setup", "attach failed: %d\n", err))
+ goto cleanup;
+
+ bss = skel->bss;
+ bss->my_pid = getpid();
+
+ err = trigger_fstat_events(bss->my_pid);
+ if (err < 0)
+ goto cleanup;
+
+ if (CHECK(!bss->called_stat,
+ "stat",
+ "trampoline for security_inode_getattr was not called\n"))
+ goto cleanup;
+
+ if (CHECK(!bss->called_close,
+ "close",
+ "trampoline for filp_close was not called\n"))
+ goto cleanup;
+
+ for (int i = 0; i < MAX_FILES; i++) {
+ CHECK(strncmp(src.paths[i], bss->paths_stat[i], MAX_PATH_LEN),
+ "check",
+ "failed to get stat path[%d]: %s vs %s\n",
+ i, src.paths[i], bss->paths_stat[i]);
+ CHECK(strncmp(src.paths[i], bss->paths_close[i], MAX_PATH_LEN),
+ "check",
+ "failed to get close path[%d]: %s vs %s\n",
+ i, src.paths[i], bss->paths_close[i]);
+ /* The d_path helper returns size plus NUL char, hence + 1 */
+ CHECK(bss->rets_stat[i] != strlen(bss->paths_stat[i]) + 1,
+ "check",
+ "failed to match stat return [%d]: %d vs %zd [%s]\n",
+ i, bss->rets_stat[i], strlen(bss->paths_stat[i]) + 1,
+ bss->paths_stat[i]);
+ CHECK(bss->rets_close[i] != strlen(bss->paths_stat[i]) + 1,
+ "check",
+ "failed to match stat return [%d]: %d vs %zd [%s]\n",
+ i, bss->rets_close[i], strlen(bss->paths_close[i]) + 1,
+ bss->paths_stat[i]);
+ }
+
+cleanup:
+ test_d_path__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
index 197d0d217b56..5c0448910426 100644
--- a/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
@@ -2,36 +2,79 @@
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
#include <network_helpers.h>
+#include <bpf/btf.h>
+
+typedef int (*test_cb)(struct bpf_object *obj);
+
+static int check_data_map(struct bpf_object *obj, int prog_cnt, bool reset)
+{
+ struct bpf_map *data_map = NULL, *map;
+ __u64 *result = NULL;
+ const int zero = 0;
+ __u32 duration = 0;
+ int ret = -1, i;
+
+ result = malloc((prog_cnt + 32 /* spare */) * sizeof(__u64));
+ if (CHECK(!result, "alloc_memory", "failed to alloc memory"))
+ return -ENOMEM;
+
+ bpf_object__for_each_map(map, obj)
+ if (bpf_map__is_internal(map)) {
+ data_map = map;
+ break;
+ }
+ if (CHECK(!data_map, "find_data_map", "data map not found\n"))
+ goto out;
+
+ ret = bpf_map_lookup_elem(bpf_map__fd(data_map), &zero, result);
+ if (CHECK(ret, "get_result",
+ "failed to get output data: %d\n", ret))
+ goto out;
+
+ for (i = 0; i < prog_cnt; i++) {
+ if (CHECK(result[i] != 1, "result",
+ "fexit_bpf2bpf result[%d] failed err %llu\n",
+ i, result[i]))
+ goto out;
+ result[i] = 0;
+ }
+ if (reset) {
+ ret = bpf_map_update_elem(bpf_map__fd(data_map), &zero, result, 0);
+ if (CHECK(ret, "reset_result", "failed to reset result\n"))
+ goto out;
+ }
+
+ ret = 0;
+out:
+ free(result);
+ return ret;
+}
static void test_fexit_bpf2bpf_common(const char *obj_file,
const char *target_obj_file,
int prog_cnt,
const char **prog_name,
- bool run_prog)
+ bool run_prog,
+ test_cb cb)
{
- struct bpf_object *obj = NULL, *pkt_obj;
- int err, pkt_fd, i;
- struct bpf_link **link = NULL;
+ struct bpf_object *obj = NULL, *tgt_obj;
struct bpf_program **prog = NULL;
+ struct bpf_link **link = NULL;
__u32 duration = 0, retval;
- struct bpf_map *data_map;
- const int zero = 0;
- __u64 *result = NULL;
+ int err, tgt_fd, i;
err = bpf_prog_load(target_obj_file, BPF_PROG_TYPE_UNSPEC,
- &pkt_obj, &pkt_fd);
+ &tgt_obj, &tgt_fd);
if (CHECK(err, "tgt_prog_load", "file %s err %d errno %d\n",
target_obj_file, err, errno))
return;
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
- .attach_prog_fd = pkt_fd,
+ .attach_prog_fd = tgt_fd,
);
link = calloc(sizeof(struct bpf_link *), prog_cnt);
prog = calloc(sizeof(struct bpf_program *), prog_cnt);
- result = malloc((prog_cnt + 32 /* spare */) * sizeof(__u64));
- if (CHECK(!link || !prog || !result, "alloc_memory",
- "failed to alloc memory"))
+ if (CHECK(!link || !prog, "alloc_memory", "failed to alloc memory"))
goto close_prog;
obj = bpf_object__open_file(obj_file, &opts);
@@ -53,39 +96,33 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
goto close_prog;
}
- if (!run_prog)
- goto close_prog;
+ if (cb) {
+ err = cb(obj);
+ if (err)
+ goto close_prog;
+ }
- data_map = bpf_object__find_map_by_name(obj, "fexit_bp.bss");
- if (CHECK(!data_map, "find_data_map", "data map not found\n"))
+ if (!run_prog)
goto close_prog;
- err = bpf_prog_test_run(pkt_fd, 1, &pkt_v6, sizeof(pkt_v6),
+ err = bpf_prog_test_run(tgt_fd, 1, &pkt_v6, sizeof(pkt_v6),
NULL, NULL, &retval, &duration);
CHECK(err || retval, "ipv6",
"err %d errno %d retval %d duration %d\n",
err, errno, retval, duration);
- err = bpf_map_lookup_elem(bpf_map__fd(data_map), &zero, result);
- if (CHECK(err, "get_result",
- "failed to get output data: %d\n", err))
+ if (check_data_map(obj, prog_cnt, false))
goto close_prog;
- for (i = 0; i < prog_cnt; i++)
- if (CHECK(result[i] != 1, "result", "fexit_bpf2bpf failed err %llu\n",
- result[i]))
- goto close_prog;
-
close_prog:
for (i = 0; i < prog_cnt; i++)
if (!IS_ERR_OR_NULL(link[i]))
bpf_link__destroy(link[i]);
if (!IS_ERR_OR_NULL(obj))
bpf_object__close(obj);
- bpf_object__close(pkt_obj);
+ bpf_object__close(tgt_obj);
free(link);
free(prog);
- free(result);
}
static void test_target_no_callees(void)
@@ -96,7 +133,7 @@ static void test_target_no_callees(void)
test_fexit_bpf2bpf_common("./fexit_bpf2bpf_simple.o",
"./test_pkt_md_access.o",
ARRAY_SIZE(prog_name),
- prog_name, true);
+ prog_name, true, NULL);
}
static void test_target_yes_callees(void)
@@ -110,7 +147,7 @@ static void test_target_yes_callees(void)
test_fexit_bpf2bpf_common("./fexit_bpf2bpf.o",
"./test_pkt_access.o",
ARRAY_SIZE(prog_name),
- prog_name, true);
+ prog_name, true, NULL);
}
static void test_func_replace(void)
@@ -123,11 +160,12 @@ static void test_func_replace(void)
"freplace/get_skb_len",
"freplace/get_skb_ifindex",
"freplace/get_constant",
+ "freplace/test_pkt_write_access_subprog",
};
test_fexit_bpf2bpf_common("./fexit_bpf2bpf.o",
"./test_pkt_access.o",
ARRAY_SIZE(prog_name),
- prog_name, true);
+ prog_name, true, NULL);
}
static void test_func_replace_verify(void)
@@ -138,13 +176,198 @@ static void test_func_replace_verify(void)
test_fexit_bpf2bpf_common("./freplace_connect4.o",
"./connect4_prog.o",
ARRAY_SIZE(prog_name),
- prog_name, false);
+ prog_name, false, NULL);
+}
+
+static int test_second_attach(struct bpf_object *obj)
+{
+ const char *prog_name = "freplace/get_constant";
+ const char *tgt_name = prog_name + 9; /* cut off freplace/ */
+ const char *tgt_obj_file = "./test_pkt_access.o";
+ struct bpf_program *prog = NULL;
+ struct bpf_object *tgt_obj;
+ __u32 duration = 0, retval;
+ struct bpf_link *link;
+ int err = 0, tgt_fd;
+
+ prog = bpf_object__find_program_by_title(obj, prog_name);
+ if (CHECK(!prog, "find_prog", "prog %s not found\n", prog_name))
+ return -ENOENT;
+
+ err = bpf_prog_load(tgt_obj_file, BPF_PROG_TYPE_UNSPEC,
+ &tgt_obj, &tgt_fd);
+ if (CHECK(err, "second_prog_load", "file %s err %d errno %d\n",
+ tgt_obj_file, err, errno))
+ return err;
+
+ link = bpf_program__attach_freplace(prog, tgt_fd, tgt_name);
+ if (CHECK(IS_ERR(link), "second_link", "failed to attach second link prog_fd %d tgt_fd %d\n", bpf_program__fd(prog), tgt_fd))
+ goto out;
+
+ err = bpf_prog_test_run(tgt_fd, 1, &pkt_v6, sizeof(pkt_v6),
+ NULL, NULL, &retval, &duration);
+ if (CHECK(err || retval, "ipv6",
+ "err %d errno %d retval %d duration %d\n",
+ err, errno, retval, duration))
+ goto out;
+
+ err = check_data_map(obj, 1, true);
+ if (err)
+ goto out;
+
+out:
+ bpf_link__destroy(link);
+ bpf_object__close(tgt_obj);
+ return err;
+}
+
+static void test_func_replace_multi(void)
+{
+ const char *prog_name[] = {
+ "freplace/get_constant",
+ };
+ test_fexit_bpf2bpf_common("./freplace_get_constant.o",
+ "./test_pkt_access.o",
+ ARRAY_SIZE(prog_name),
+ prog_name, true, test_second_attach);
+}
+
+static void test_fmod_ret_freplace(void)
+{
+ struct bpf_object *freplace_obj = NULL, *pkt_obj, *fmod_obj = NULL;
+ const char *freplace_name = "./freplace_get_constant.o";
+ const char *fmod_ret_name = "./fmod_ret_freplace.o";
+ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts);
+ const char *tgt_name = "./test_pkt_access.o";
+ struct bpf_link *freplace_link = NULL;
+ struct bpf_program *prog;
+ __u32 duration = 0;
+ int err, pkt_fd;
+
+ err = bpf_prog_load(tgt_name, BPF_PROG_TYPE_UNSPEC,
+ &pkt_obj, &pkt_fd);
+ /* the target prog should load fine */
+ if (CHECK(err, "tgt_prog_load", "file %s err %d errno %d\n",
+ tgt_name, err, errno))
+ return;
+ opts.attach_prog_fd = pkt_fd;
+
+ freplace_obj = bpf_object__open_file(freplace_name, &opts);
+ if (CHECK(IS_ERR_OR_NULL(freplace_obj), "freplace_obj_open",
+ "failed to open %s: %ld\n", freplace_name,
+ PTR_ERR(freplace_obj)))
+ goto out;
+
+ err = bpf_object__load(freplace_obj);
+ if (CHECK(err, "freplace_obj_load", "err %d\n", err))
+ goto out;
+
+ prog = bpf_program__next(NULL, freplace_obj);
+ freplace_link = bpf_program__attach_trace(prog);
+ if (CHECK(IS_ERR(freplace_link), "freplace_attach_trace", "failed to link\n"))
+ goto out;
+
+ opts.attach_prog_fd = bpf_program__fd(prog);
+ fmod_obj = bpf_object__open_file(fmod_ret_name, &opts);
+ if (CHECK(IS_ERR_OR_NULL(fmod_obj), "fmod_obj_open",
+ "failed to open %s: %ld\n", fmod_ret_name,
+ PTR_ERR(fmod_obj)))
+ goto out;
+
+ err = bpf_object__load(fmod_obj);
+ if (CHECK(!err, "fmod_obj_load", "loading fmod_ret should fail\n"))
+ goto out;
+
+out:
+ bpf_link__destroy(freplace_link);
+ bpf_object__close(freplace_obj);
+ bpf_object__close(fmod_obj);
+ bpf_object__close(pkt_obj);
+}
+
+
+static void test_func_sockmap_update(void)
+{
+ const char *prog_name[] = {
+ "freplace/cls_redirect",
+ };
+ test_fexit_bpf2bpf_common("./freplace_cls_redirect.o",
+ "./test_cls_redirect.o",
+ ARRAY_SIZE(prog_name),
+ prog_name, false, NULL);
+}
+
+static void test_obj_load_failure_common(const char *obj_file,
+ const char *target_obj_file)
+
+{
+ /*
+ * standalone test that asserts failure to load freplace prog
+ * because of invalid return code.
+ */
+ struct bpf_object *obj = NULL, *pkt_obj;
+ int err, pkt_fd;
+ __u32 duration = 0;
+
+ err = bpf_prog_load(target_obj_file, BPF_PROG_TYPE_UNSPEC,
+ &pkt_obj, &pkt_fd);
+ /* the target prog should load fine */
+ if (CHECK(err, "tgt_prog_load", "file %s err %d errno %d\n",
+ target_obj_file, err, errno))
+ return;
+ DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
+ .attach_prog_fd = pkt_fd,
+ );
+
+ obj = bpf_object__open_file(obj_file, &opts);
+ if (CHECK(IS_ERR_OR_NULL(obj), "obj_open",
+ "failed to open %s: %ld\n", obj_file,
+ PTR_ERR(obj)))
+ goto close_prog;
+
+ /* It should fail to load the program */
+ err = bpf_object__load(obj);
+ if (CHECK(!err, "bpf_obj_load should fail", "err %d\n", err))
+ goto close_prog;
+
+close_prog:
+ if (!IS_ERR_OR_NULL(obj))
+ bpf_object__close(obj);
+ bpf_object__close(pkt_obj);
+}
+
+static void test_func_replace_return_code(void)
+{
+ /* test invalid return code in the replaced program */
+ test_obj_load_failure_common("./freplace_connect_v4_prog.o",
+ "./connect4_prog.o");
+}
+
+static void test_func_map_prog_compatibility(void)
+{
+ /* test with spin lock map value in the replaced program */
+ test_obj_load_failure_common("./freplace_attach_probe.o",
+ "./test_attach_probe.o");
}
void test_fexit_bpf2bpf(void)
{
- test_target_no_callees();
- test_target_yes_callees();
- test_func_replace();
- test_func_replace_verify();
+ if (test__start_subtest("target_no_callees"))
+ test_target_no_callees();
+ if (test__start_subtest("target_yes_callees"))
+ test_target_yes_callees();
+ if (test__start_subtest("func_replace"))
+ test_func_replace();
+ if (test__start_subtest("func_replace_verify"))
+ test_func_replace_verify();
+ if (test__start_subtest("func_sockmap_update"))
+ test_func_sockmap_update();
+ if (test__start_subtest("func_replace_return_code"))
+ test_func_replace_return_code();
+ if (test__start_subtest("func_map_prog_compatibility"))
+ test_func_map_prog_compatibility();
+ if (test__start_subtest("func_replace_multi"))
+ test_func_replace_multi();
+ if (test__start_subtest("fmod_ret_freplace"))
+ test_fmod_ret_freplace();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/global_data_init.c b/tools/testing/selftests/bpf/prog_tests/global_data_init.c
index 3bdaa5a40744..ee46b11f1f9a 100644
--- a/tools/testing/selftests/bpf/prog_tests/global_data_init.c
+++ b/tools/testing/selftests/bpf/prog_tests/global_data_init.c
@@ -12,7 +12,8 @@ void test_global_data_init(void)
size_t sz;
obj = bpf_object__open_file(file, NULL);
- if (CHECK_FAIL(!obj))
+ err = libbpf_get_error(obj);
+ if (CHECK_FAIL(err))
return;
map = bpf_object__find_map_by_name(obj, "test_glo.rodata");
diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms.c b/tools/testing/selftests/bpf/prog_tests/ksyms.c
index e3d6777226a8..b295969b263b 100644
--- a/tools/testing/selftests/bpf/prog_tests/ksyms.c
+++ b/tools/testing/selftests/bpf/prog_tests/ksyms.c
@@ -7,39 +7,28 @@
static int duration;
-static __u64 kallsyms_find(const char *sym)
-{
- char type, name[500];
- __u64 addr, res = 0;
- FILE *f;
-
- f = fopen("/proc/kallsyms", "r");
- if (CHECK(!f, "kallsyms_fopen", "failed to open: %d\n", errno))
- return 0;
-
- while (fscanf(f, "%llx %c %499s%*[^\n]\n", &addr, &type, name) > 0) {
- if (strcmp(name, sym) == 0) {
- res = addr;
- goto out;
- }
- }
-
- CHECK(false, "not_found", "symbol %s not found\n", sym);
-out:
- fclose(f);
- return res;
-}
-
void test_ksyms(void)
{
- __u64 link_fops_addr = kallsyms_find("bpf_link_fops");
const char *btf_path = "/sys/kernel/btf/vmlinux";
struct test_ksyms *skel;
struct test_ksyms__data *data;
+ __u64 link_fops_addr, per_cpu_start_addr;
struct stat st;
__u64 btf_size;
int err;
+ err = kallsyms_find("bpf_link_fops", &link_fops_addr);
+ if (CHECK(err == -EINVAL, "kallsyms_fopen", "failed to open: %d\n", errno))
+ return;
+ if (CHECK(err == -ENOENT, "ksym_find", "symbol 'bpf_link_fops' not found\n"))
+ return;
+
+ err = kallsyms_find("__per_cpu_start", &per_cpu_start_addr);
+ if (CHECK(err == -EINVAL, "kallsyms_fopen", "failed to open: %d\n", errno))
+ return;
+ if (CHECK(err == -ENOENT, "ksym_find", "symbol 'per_cpu_start' not found\n"))
+ return;
+
if (CHECK(stat(btf_path, &st), "stat_btf", "err %d\n", errno))
return;
btf_size = st.st_size;
@@ -63,8 +52,9 @@ void test_ksyms(void)
"got %llu, exp %llu\n", data->out__bpf_link_fops1, (__u64)0);
CHECK(data->out__btf_size != btf_size, "btf_size",
"got %llu, exp %llu\n", data->out__btf_size, btf_size);
- CHECK(data->out__per_cpu_start != 0, "__per_cpu_start",
- "got %llu, exp %llu\n", data->out__per_cpu_start, (__u64)0);
+ CHECK(data->out__per_cpu_start != per_cpu_start_addr, "__per_cpu_start",
+ "got %llu, exp %llu\n", data->out__per_cpu_start,
+ per_cpu_start_addr);
cleanup:
test_ksyms__destroy(skel);
diff --git a/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c b/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c
new file mode 100644
index 000000000000..28e26bd3e0ca
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/ksyms_btf.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Google */
+
+#include <test_progs.h>
+#include <bpf/libbpf.h>
+#include <bpf/btf.h>
+#include "test_ksyms_btf.skel.h"
+
+static int duration;
+
+void test_ksyms_btf(void)
+{
+ __u64 runqueues_addr, bpf_prog_active_addr;
+ __u32 this_rq_cpu;
+ int this_bpf_prog_active;
+ struct test_ksyms_btf *skel = NULL;
+ struct test_ksyms_btf__data *data;
+ struct btf *btf;
+ int percpu_datasec;
+ int err;
+
+ err = kallsyms_find("runqueues", &runqueues_addr);
+ if (CHECK(err == -EINVAL, "kallsyms_fopen", "failed to open: %d\n", errno))
+ return;
+ if (CHECK(err == -ENOENT, "ksym_find", "symbol 'runqueues' not found\n"))
+ return;
+
+ err = kallsyms_find("bpf_prog_active", &bpf_prog_active_addr);
+ if (CHECK(err == -EINVAL, "kallsyms_fopen", "failed to open: %d\n", errno))
+ return;
+ if (CHECK(err == -ENOENT, "ksym_find", "symbol 'bpf_prog_active' not found\n"))
+ return;
+
+ btf = libbpf_find_kernel_btf();
+ if (CHECK(IS_ERR(btf), "btf_exists", "failed to load kernel BTF: %ld\n",
+ PTR_ERR(btf)))
+ return;
+
+ percpu_datasec = btf__find_by_name_kind(btf, ".data..percpu",
+ BTF_KIND_DATASEC);
+ if (percpu_datasec < 0) {
+ printf("%s:SKIP:no PERCPU DATASEC in kernel btf\n",
+ __func__);
+ test__skip();
+ goto cleanup;
+ }
+
+ skel = test_ksyms_btf__open_and_load();
+ if (CHECK(!skel, "skel_open", "failed to open and load skeleton\n"))
+ goto cleanup;
+
+ err = test_ksyms_btf__attach(skel);
+ if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
+ goto cleanup;
+
+ /* trigger tracepoint */
+ usleep(1);
+
+ data = skel->data;
+ CHECK(data->out__runqueues_addr != runqueues_addr, "runqueues_addr",
+ "got %llu, exp %llu\n",
+ (unsigned long long)data->out__runqueues_addr,
+ (unsigned long long)runqueues_addr);
+ CHECK(data->out__bpf_prog_active_addr != bpf_prog_active_addr, "bpf_prog_active_addr",
+ "got %llu, exp %llu\n",
+ (unsigned long long)data->out__bpf_prog_active_addr,
+ (unsigned long long)bpf_prog_active_addr);
+
+ CHECK(data->out__rq_cpu == -1, "rq_cpu",
+ "got %u, exp != -1\n", data->out__rq_cpu);
+ CHECK(data->out__bpf_prog_active < 0, "bpf_prog_active",
+ "got %d, exp >= 0\n", data->out__bpf_prog_active);
+ CHECK(data->out__cpu_0_rq_cpu != 0, "cpu_rq(0)->cpu",
+ "got %u, exp 0\n", data->out__cpu_0_rq_cpu);
+
+ this_rq_cpu = data->out__this_rq_cpu;
+ CHECK(this_rq_cpu != data->out__rq_cpu, "this_rq_cpu",
+ "got %u, exp %u\n", this_rq_cpu, data->out__rq_cpu);
+
+ this_bpf_prog_active = data->out__this_bpf_prog_active;
+ CHECK(this_bpf_prog_active != data->out__bpf_prog_active, "this_bpf_prog_active",
+ "got %d, exp %d\n", this_bpf_prog_active,
+ data->out__bpf_prog_active);
+
+cleanup:
+ btf__free(btf);
+ test_ksyms_btf__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/l4lb_all.c b/tools/testing/selftests/bpf/prog_tests/l4lb_all.c
index c2d373e294bb..8073105548ff 100644
--- a/tools/testing/selftests/bpf/prog_tests/l4lb_all.c
+++ b/tools/testing/selftests/bpf/prog_tests/l4lb_all.c
@@ -80,9 +80,8 @@ out:
void test_l4lb_all(void)
{
- const char *file1 = "./test_l4lb.o";
- const char *file2 = "./test_l4lb_noinline.o";
-
- test_l4lb(file1);
- test_l4lb(file2);
+ if (test__start_subtest("l4lb_inline"))
+ test_l4lb("test_l4lb.o");
+ if (test__start_subtest("l4lb_noinline"))
+ test_l4lb("test_l4lb_noinline.o");
}
diff --git a/tools/testing/selftests/bpf/prog_tests/metadata.c b/tools/testing/selftests/bpf/prog_tests/metadata.c
new file mode 100644
index 000000000000..2c53eade88e3
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/metadata.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Copyright 2020 Google LLC.
+ */
+
+#include <test_progs.h>
+#include <cgroup_helpers.h>
+#include <network_helpers.h>
+
+#include "metadata_unused.skel.h"
+#include "metadata_used.skel.h"
+
+static int duration;
+
+static int prog_holds_map(int prog_fd, int map_fd)
+{
+ struct bpf_prog_info prog_info = {};
+ struct bpf_prog_info map_info = {};
+ __u32 prog_info_len;
+ __u32 map_info_len;
+ __u32 *map_ids;
+ int nr_maps;
+ int ret;
+ int i;
+
+ map_info_len = sizeof(map_info);
+ ret = bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len);
+ if (ret)
+ return -errno;
+
+ prog_info_len = sizeof(prog_info);
+ ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
+ if (ret)
+ return -errno;
+
+ map_ids = calloc(prog_info.nr_map_ids, sizeof(__u32));
+ if (!map_ids)
+ return -ENOMEM;
+
+ nr_maps = prog_info.nr_map_ids;
+ memset(&prog_info, 0, sizeof(prog_info));
+ prog_info.nr_map_ids = nr_maps;
+ prog_info.map_ids = ptr_to_u64(map_ids);
+ prog_info_len = sizeof(prog_info);
+
+ ret = bpf_obj_get_info_by_fd(prog_fd, &prog_info, &prog_info_len);
+ if (ret) {
+ ret = -errno;
+ goto free_map_ids;
+ }
+
+ ret = -ENOENT;
+ for (i = 0; i < prog_info.nr_map_ids; i++) {
+ if (map_ids[i] == map_info.id) {
+ ret = 0;
+ break;
+ }
+ }
+
+free_map_ids:
+ free(map_ids);
+ return ret;
+}
+
+static void test_metadata_unused(void)
+{
+ struct metadata_unused *obj;
+ int err;
+
+ obj = metadata_unused__open_and_load();
+ if (CHECK(!obj, "skel-load", "errno %d", errno))
+ return;
+
+ err = prog_holds_map(bpf_program__fd(obj->progs.prog),
+ bpf_map__fd(obj->maps.rodata));
+ if (CHECK(err, "prog-holds-rodata", "errno: %d", err))
+ return;
+
+ /* Assert that we can access the metadata in skel and the values are
+ * what we expect.
+ */
+ if (CHECK(strncmp(obj->rodata->bpf_metadata_a, "foo",
+ sizeof(obj->rodata->bpf_metadata_a)),
+ "bpf_metadata_a", "expected \"foo\", value differ"))
+ goto close_bpf_object;
+ if (CHECK(obj->rodata->bpf_metadata_b != 1, "bpf_metadata_b",
+ "expected 1, got %d", obj->rodata->bpf_metadata_b))
+ goto close_bpf_object;
+
+ /* Assert that binding metadata map to prog again succeeds. */
+ err = bpf_prog_bind_map(bpf_program__fd(obj->progs.prog),
+ bpf_map__fd(obj->maps.rodata), NULL);
+ CHECK(err, "rebind_map", "errno %d, expected 0", errno);
+
+close_bpf_object:
+ metadata_unused__destroy(obj);
+}
+
+static void test_metadata_used(void)
+{
+ struct metadata_used *obj;
+ int err;
+
+ obj = metadata_used__open_and_load();
+ if (CHECK(!obj, "skel-load", "errno %d", errno))
+ return;
+
+ err = prog_holds_map(bpf_program__fd(obj->progs.prog),
+ bpf_map__fd(obj->maps.rodata));
+ if (CHECK(err, "prog-holds-rodata", "errno: %d", err))
+ return;
+
+ /* Assert that we can access the metadata in skel and the values are
+ * what we expect.
+ */
+ if (CHECK(strncmp(obj->rodata->bpf_metadata_a, "bar",
+ sizeof(obj->rodata->bpf_metadata_a)),
+ "metadata_a", "expected \"bar\", value differ"))
+ goto close_bpf_object;
+ if (CHECK(obj->rodata->bpf_metadata_b != 2, "metadata_b",
+ "expected 2, got %d", obj->rodata->bpf_metadata_b))
+ goto close_bpf_object;
+
+ /* Assert that binding metadata map to prog again succeeds. */
+ err = bpf_prog_bind_map(bpf_program__fd(obj->progs.prog),
+ bpf_map__fd(obj->maps.rodata), NULL);
+ CHECK(err, "rebind_map", "errno %d, expected 0", errno);
+
+close_bpf_object:
+ metadata_used__destroy(obj);
+}
+
+void test_metadata(void)
+{
+ if (test__start_subtest("unused"))
+ test_metadata_unused();
+
+ if (test__start_subtest("used"))
+ test_metadata_used();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/pe_preserve_elems.c b/tools/testing/selftests/bpf/prog_tests/pe_preserve_elems.c
new file mode 100644
index 000000000000..673d38395253
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/pe_preserve_elems.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2019 Facebook */
+#include <test_progs.h>
+#include <linux/bpf.h>
+#include "test_pe_preserve_elems.skel.h"
+
+static int duration;
+
+static void test_one_map(struct bpf_map *map, struct bpf_program *prog,
+ bool has_share_pe)
+{
+ int err, key = 0, pfd = -1, mfd = bpf_map__fd(map);
+ DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts);
+ struct perf_event_attr attr = {
+ .size = sizeof(struct perf_event_attr),
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_CPU_CLOCK,
+ };
+
+ pfd = syscall(__NR_perf_event_open, &attr, 0 /* pid */,
+ -1 /* cpu 0 */, -1 /* group id */, 0 /* flags */);
+ if (CHECK(pfd < 0, "perf_event_open", "failed\n"))
+ return;
+
+ err = bpf_map_update_elem(mfd, &key, &pfd, BPF_ANY);
+ close(pfd);
+ if (CHECK(err < 0, "bpf_map_update_elem", "failed\n"))
+ return;
+
+ err = bpf_prog_test_run_opts(bpf_program__fd(prog), &opts);
+ if (CHECK(err < 0, "bpf_prog_test_run_opts", "failed\n"))
+ return;
+ if (CHECK(opts.retval != 0, "bpf_perf_event_read_value",
+ "failed with %d\n", opts.retval))
+ return;
+
+ /* closing mfd, prog still holds a reference on map */
+ close(mfd);
+
+ err = bpf_prog_test_run_opts(bpf_program__fd(prog), &opts);
+ if (CHECK(err < 0, "bpf_prog_test_run_opts", "failed\n"))
+ return;
+
+ if (has_share_pe) {
+ CHECK(opts.retval != 0, "bpf_perf_event_read_value",
+ "failed with %d\n", opts.retval);
+ } else {
+ CHECK(opts.retval != -ENOENT, "bpf_perf_event_read_value",
+ "should have failed with %d, but got %d\n", -ENOENT,
+ opts.retval);
+ }
+}
+
+void test_pe_preserve_elems(void)
+{
+ struct test_pe_preserve_elems *skel;
+
+ skel = test_pe_preserve_elems__open_and_load();
+ if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
+ return;
+
+ test_one_map(skel->maps.array_1, skel->progs.read_array_1, false);
+ test_one_map(skel->maps.array_2, skel->progs.read_array_2, true);
+
+ test_pe_preserve_elems__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/perf_buffer.c b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c
index c33ec180b3f2..ca9f0895ec84 100644
--- a/tools/testing/selftests/bpf/prog_tests/perf_buffer.c
+++ b/tools/testing/selftests/bpf/prog_tests/perf_buffer.c
@@ -7,6 +7,8 @@
#include "test_perf_buffer.skel.h"
#include "bpf/libbpf_internal.h"
+static int duration;
+
/* AddressSanitizer sometimes crashes due to data dereference below, due to
* this being mmap()'ed memory. Disable instrumentation with
* no_sanitize_address attribute
@@ -24,13 +26,31 @@ static void on_sample(void *ctx, int cpu, void *data, __u32 size)
CPU_SET(cpu, cpu_seen);
}
+int trigger_on_cpu(int cpu)
+{
+ cpu_set_t cpu_set;
+ int err;
+
+ CPU_ZERO(&cpu_set);
+ CPU_SET(cpu, &cpu_set);
+
+ err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
+ if (err && CHECK(err, "set_affinity", "cpu #%d, err %d\n", cpu, err))
+ return err;
+
+ usleep(1);
+
+ return 0;
+}
+
void test_perf_buffer(void)
{
- int err, on_len, nr_on_cpus = 0, nr_cpus, i, duration = 0;
+ int err, on_len, nr_on_cpus = 0, nr_cpus, i;
struct perf_buffer_opts pb_opts = {};
struct test_perf_buffer *skel;
- cpu_set_t cpu_set, cpu_seen;
+ cpu_set_t cpu_seen;
struct perf_buffer *pb;
+ int last_fd = -1, fd;
bool *online;
nr_cpus = libbpf_num_possible_cpus();
@@ -63,6 +83,9 @@ void test_perf_buffer(void)
if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb)))
goto out_close;
+ CHECK(perf_buffer__epoll_fd(pb) < 0, "epoll_fd",
+ "bad fd: %d\n", perf_buffer__epoll_fd(pb));
+
/* trigger kprobe on every CPU */
CPU_ZERO(&cpu_seen);
for (i = 0; i < nr_cpus; i++) {
@@ -71,16 +94,8 @@ void test_perf_buffer(void)
continue;
}
- CPU_ZERO(&cpu_set);
- CPU_SET(i, &cpu_set);
-
- err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set),
- &cpu_set);
- if (err && CHECK(err, "set_affinity", "cpu #%d, err %d\n",
- i, err))
+ if (trigger_on_cpu(i))
goto out_close;
-
- usleep(1);
}
/* read perf buffer */
@@ -92,6 +107,34 @@ void test_perf_buffer(void)
"expect %d, seen %d\n", nr_on_cpus, CPU_COUNT(&cpu_seen)))
goto out_free_pb;
+ if (CHECK(perf_buffer__buffer_cnt(pb) != nr_cpus, "buf_cnt",
+ "got %zu, expected %d\n", perf_buffer__buffer_cnt(pb), nr_cpus))
+ goto out_close;
+
+ for (i = 0; i < nr_cpus; i++) {
+ if (i >= on_len || !online[i])
+ continue;
+
+ fd = perf_buffer__buffer_fd(pb, i);
+ CHECK(fd < 0 || last_fd == fd, "fd_check", "last fd %d == fd %d\n", last_fd, fd);
+ last_fd = fd;
+
+ err = perf_buffer__consume_buffer(pb, i);
+ if (CHECK(err, "drain_buf", "cpu %d, err %d\n", i, err))
+ goto out_close;
+
+ CPU_CLR(i, &cpu_seen);
+ if (trigger_on_cpu(i))
+ goto out_close;
+
+ err = perf_buffer__consume_buffer(pb, i);
+ if (CHECK(err, "consume_buf", "cpu %d, err %d\n", i, err))
+ goto out_close;
+
+ if (CHECK(!CPU_ISSET(i, &cpu_seen), "cpu_seen", "cpu %d not seen\n", i))
+ goto out_close;
+ }
+
out_free_pb:
perf_buffer__free(pb);
out_close:
diff --git a/tools/testing/selftests/bpf/prog_tests/pinning.c b/tools/testing/selftests/bpf/prog_tests/pinning.c
index 041952524c55..fcf54b3a1dd0 100644
--- a/tools/testing/selftests/bpf/prog_tests/pinning.c
+++ b/tools/testing/selftests/bpf/prog_tests/pinning.c
@@ -37,7 +37,7 @@ void test_pinning(void)
struct stat statbuf = {};
struct bpf_object *obj;
struct bpf_map *map;
- int err;
+ int err, map_fd;
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
.pin_root_path = custpath,
);
@@ -213,6 +213,53 @@ void test_pinning(void)
if (CHECK(err, "stat custpinpath", "err %d errno %d\n", err, errno))
goto out;
+ /* remove the custom pin path to re-test it with reuse fd below */
+ err = unlink(custpinpath);
+ if (CHECK(err, "unlink custpinpath", "err %d errno %d\n", err, errno))
+ goto out;
+
+ err = rmdir(custpath);
+ if (CHECK(err, "rmdir custpindir", "err %d errno %d\n", err, errno))
+ goto out;
+
+ bpf_object__close(obj);
+
+ /* test pinning at custom path with reuse fd */
+ obj = bpf_object__open_file(file, NULL);
+ err = libbpf_get_error(obj);
+ if (CHECK(err, "default open", "err %d errno %d\n", err, errno)) {
+ obj = NULL;
+ goto out;
+ }
+
+ map_fd = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(__u32),
+ sizeof(__u64), 1, 0);
+ if (CHECK(map_fd < 0, "create pinmap manually", "fd %d\n", map_fd))
+ goto out;
+
+ map = bpf_object__find_map_by_name(obj, "pinmap");
+ if (CHECK(!map, "find map", "NULL map"))
+ goto close_map_fd;
+
+ err = bpf_map__reuse_fd(map, map_fd);
+ if (CHECK(err, "reuse pinmap fd", "err %d errno %d\n", err, errno))
+ goto close_map_fd;
+
+ err = bpf_map__set_pin_path(map, custpinpath);
+ if (CHECK(err, "set pin path", "err %d errno %d\n", err, errno))
+ goto close_map_fd;
+
+ err = bpf_object__load(obj);
+ if (CHECK(err, "custom load", "err %d errno %d\n", err, errno))
+ goto close_map_fd;
+
+ /* check that pinmap was pinned at the custom path */
+ err = stat(custpinpath, &statbuf);
+ if (CHECK(err, "stat custpinpath", "err %d errno %d\n", err, errno))
+ goto close_map_fd;
+
+close_map_fd:
+ close(map_fd);
out:
unlink(pinpath);
unlink(nopinpath);
diff --git a/tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c b/tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c
new file mode 100644
index 000000000000..c5fb191874ac
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/raw_tp_test_run.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2019 Facebook */
+#include <test_progs.h>
+#include <linux/bpf.h>
+#include "bpf/libbpf_internal.h"
+#include "test_raw_tp_test_run.skel.h"
+
+static int duration;
+
+void test_raw_tp_test_run(void)
+{
+ struct bpf_prog_test_run_attr test_attr = {};
+ int comm_fd = -1, err, nr_online, i, prog_fd;
+ __u64 args[2] = {0x1234ULL, 0x5678ULL};
+ int expected_retval = 0x1234 + 0x5678;
+ struct test_raw_tp_test_run *skel;
+ char buf[] = "new_name";
+ bool *online = NULL;
+ DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .ctx_in = args,
+ .ctx_size_in = sizeof(args),
+ .flags = BPF_F_TEST_RUN_ON_CPU,
+ );
+
+ err = parse_cpu_mask_file("/sys/devices/system/cpu/online", &online,
+ &nr_online);
+ if (CHECK(err, "parse_cpu_mask_file", "err %d\n", err))
+ return;
+
+ skel = test_raw_tp_test_run__open_and_load();
+ if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
+ goto cleanup;
+
+ err = test_raw_tp_test_run__attach(skel);
+ if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
+ goto cleanup;
+
+ comm_fd = open("/proc/self/comm", O_WRONLY|O_TRUNC);
+ if (CHECK(comm_fd < 0, "open /proc/self/comm", "err %d\n", errno))
+ goto cleanup;
+
+ err = write(comm_fd, buf, sizeof(buf));
+ CHECK(err < 0, "task rename", "err %d", errno);
+
+ CHECK(skel->bss->count == 0, "check_count", "didn't increase\n");
+ CHECK(skel->data->on_cpu != 0xffffffff, "check_on_cpu", "got wrong value\n");
+
+ prog_fd = bpf_program__fd(skel->progs.rename);
+ test_attr.prog_fd = prog_fd;
+ test_attr.ctx_in = args;
+ test_attr.ctx_size_in = sizeof(__u64);
+
+ err = bpf_prog_test_run_xattr(&test_attr);
+ CHECK(err == 0, "test_run", "should fail for too small ctx\n");
+
+ test_attr.ctx_size_in = sizeof(args);
+ err = bpf_prog_test_run_xattr(&test_attr);
+ CHECK(err < 0, "test_run", "err %d\n", errno);
+ CHECK(test_attr.retval != expected_retval, "check_retval",
+ "expect 0x%x, got 0x%x\n", expected_retval, test_attr.retval);
+
+ for (i = 0; i < nr_online; i++) {
+ if (!online[i])
+ continue;
+
+ opts.cpu = i;
+ opts.retval = 0;
+ err = bpf_prog_test_run_opts(prog_fd, &opts);
+ CHECK(err < 0, "test_run_opts", "err %d\n", errno);
+ CHECK(skel->data->on_cpu != i, "check_on_cpu",
+ "expect %d got %d\n", i, skel->data->on_cpu);
+ CHECK(opts.retval != expected_retval,
+ "check_retval", "expect 0x%x, got 0x%x\n",
+ expected_retval, opts.retval);
+ }
+
+ /* invalid cpu ID should fail with ENXIO */
+ opts.cpu = 0xffffffff;
+ err = bpf_prog_test_run_opts(prog_fd, &opts);
+ CHECK(err != -1 || errno != ENXIO,
+ "test_run_opts_fail",
+ "should failed with ENXIO\n");
+
+ /* non-zero cpu w/o BPF_F_TEST_RUN_ON_CPU should fail with EINVAL */
+ opts.cpu = 1;
+ opts.flags = 0;
+ err = bpf_prog_test_run_opts(prog_fd, &opts);
+ CHECK(err != -1 || errno != EINVAL,
+ "test_run_opts_fail",
+ "should failed with EINVAL\n");
+
+cleanup:
+ close(comm_fd);
+ test_raw_tp_test_run__destroy(skel);
+ free(online);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
index fc0d7f4f02cf..ac1ee10cffd8 100644
--- a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
+++ b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
@@ -27,7 +27,7 @@ void test_reference_tracking(void)
const char *title;
/* Ignore .text sections */
- title = bpf_program__title(prog, false);
+ title = bpf_program__section_name(prog);
if (strstr(title, ".text") != NULL)
continue;
diff --git a/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c
index 3b127cab4864..6ace5e9efec1 100644
--- a/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c
+++ b/tools/testing/selftests/bpf/prog_tests/resolve_btfids.c
@@ -28,6 +28,12 @@ struct symbol test_symbols[] = {
{ "func", BTF_KIND_FUNC, -1 },
};
+/* Align the .BTF_ids section to 4 bytes */
+asm (
+".pushsection " BTF_IDS_SECTION " ,\"a\"; \n"
+".balign 4, 0; \n"
+".popsection; \n");
+
BTF_ID_LIST(test_list_local)
BTF_ID_UNUSED
BTF_ID(typedef, S)
@@ -47,6 +53,15 @@ BTF_ID(struct, S)
BTF_ID(union, U)
BTF_ID(func, func)
+BTF_SET_START(test_set)
+BTF_ID(typedef, S)
+BTF_ID(typedef, T)
+BTF_ID(typedef, U)
+BTF_ID(struct, S)
+BTF_ID(union, U)
+BTF_ID(func, func)
+BTF_SET_END(test_set)
+
static int
__resolve_symbol(struct btf *btf, int type_id)
{
@@ -116,12 +131,40 @@ int test_resolve_btfids(void)
*/
for (j = 0; j < ARRAY_SIZE(test_lists); j++) {
test_list = test_lists[j];
- for (i = 0; i < ARRAY_SIZE(test_symbols) && !ret; i++) {
+ for (i = 0; i < ARRAY_SIZE(test_symbols); i++) {
ret = CHECK(test_list[i] != test_symbols[i].id,
"id_check",
"wrong ID for %s (%d != %d)\n",
test_symbols[i].name,
test_list[i], test_symbols[i].id);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* Check BTF_SET_START(test_set) IDs */
+ for (i = 0; i < test_set.cnt; i++) {
+ bool found = false;
+
+ for (j = 0; j < ARRAY_SIZE(test_symbols); j++) {
+ if (test_symbols[j].id != test_set.ids[i])
+ continue;
+ found = true;
+ break;
+ }
+
+ ret = CHECK(!found, "id_check",
+ "ID %d not found in test_symbols\n",
+ test_set.ids[i]);
+ if (ret)
+ break;
+
+ if (i > 0) {
+ ret = CHECK(test_set.ids[i - 1] > test_set.ids[i],
+ "sort_check",
+ "test_set is not sorted\n");
+ if (ret)
+ break;
}
}
diff --git a/tools/testing/selftests/bpf/prog_tests/sk_assign.c b/tools/testing/selftests/bpf/prog_tests/sk_assign.c
index 47fa04adc147..3a469099f30d 100644
--- a/tools/testing/selftests/bpf/prog_tests/sk_assign.c
+++ b/tools/testing/selftests/bpf/prog_tests/sk_assign.c
@@ -49,7 +49,7 @@ configure_stack(void)
sprintf(tc_cmd, "%s %s %s %s", "tc filter add dev lo ingress bpf",
"direct-action object-file ./test_sk_assign.o",
"section classifier/sk_assign_test",
- (env.verbosity < VERBOSE_VERY) ? " 2>/dev/null" : "");
+ (env.verbosity < VERBOSE_VERY) ? " 2>/dev/null" : "verbose");
if (CHECK(system(tc_cmd), "BPF load failed;",
"run with -vv for more info\n"))
return false;
@@ -265,9 +265,10 @@ void test_sk_assign(void)
TEST("ipv6 udp port redir", AF_INET6, SOCK_DGRAM, false),
TEST("ipv6 udp addr redir", AF_INET6, SOCK_DGRAM, true),
};
- int server = -1;
+ __s64 server = -1;
int server_map;
int self_net;
+ int i;
self_net = open(NS_SELF, O_RDONLY);
if (CHECK_FAIL(self_net < 0)) {
@@ -286,7 +287,7 @@ void test_sk_assign(void)
goto cleanup;
}
- for (int i = 0; i < ARRAY_SIZE(tests) && !READ_ONCE(stop); i++) {
+ for (i = 0; i < ARRAY_SIZE(tests) && !READ_ONCE(stop); i++) {
struct test_sk_cfg *test = &tests[i];
const struct sockaddr *addr;
const int zero = 0;
diff --git a/tools/testing/selftests/bpf/prog_tests/snprintf_btf.c b/tools/testing/selftests/bpf/prog_tests/snprintf_btf.c
new file mode 100644
index 000000000000..686b40f11a45
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/snprintf_btf.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <linux/btf.h>
+#include "netif_receive_skb.skel.h"
+
+/* Demonstrate that bpf_snprintf_btf succeeds and that various data types
+ * are formatted correctly.
+ */
+void test_snprintf_btf(void)
+{
+ struct netif_receive_skb *skel;
+ struct netif_receive_skb__bss *bss;
+ int err, duration = 0;
+
+ skel = netif_receive_skb__open();
+ if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
+ return;
+
+ err = netif_receive_skb__load(skel);
+ if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err))
+ goto cleanup;
+
+ bss = skel->bss;
+
+ err = netif_receive_skb__attach(skel);
+ if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
+ goto cleanup;
+
+ /* generate receive event */
+ err = system("ping -c 1 127.0.0.1 > /dev/null");
+ if (CHECK(err, "system", "ping failed: %d\n", err))
+ goto cleanup;
+
+ if (bss->skip) {
+ printf("%s:SKIP:no __builtin_btf_type_id\n", __func__);
+ test__skip();
+ goto cleanup;
+ }
+
+ /*
+ * Make sure netif_receive_skb program was triggered
+ * and it set expected return values from bpf_trace_printk()s
+ * and all tests ran.
+ */
+ if (CHECK(bss->ret <= 0,
+ "bpf_snprintf_btf: got return value",
+ "ret <= 0 %ld test %d\n", bss->ret, bss->ran_subtests))
+ goto cleanup;
+
+ if (CHECK(bss->ran_subtests == 0, "check if subtests ran",
+ "no subtests ran, did BPF program run?"))
+ goto cleanup;
+
+ if (CHECK(bss->num_subtests != bss->ran_subtests,
+ "check all subtests ran",
+ "only ran %d of %d tests\n", bss->num_subtests,
+ bss->ran_subtests))
+ goto cleanup;
+
+cleanup:
+ netif_receive_skb__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/sock_fields.c b/tools/testing/selftests/bpf/prog_tests/sock_fields.c
new file mode 100644
index 000000000000..af87118e748e
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/sock_fields.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Facebook */
+
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+#include <linux/compiler.h>
+
+#include "network_helpers.h"
+#include "cgroup_helpers.h"
+#include "test_progs.h"
+#include "bpf_rlimit.h"
+#include "test_sock_fields.skel.h"
+
+enum bpf_linum_array_idx {
+ EGRESS_LINUM_IDX,
+ INGRESS_LINUM_IDX,
+ __NR_BPF_LINUM_ARRAY_IDX,
+};
+
+struct bpf_spinlock_cnt {
+ struct bpf_spin_lock lock;
+ __u32 cnt;
+};
+
+#define PARENT_CGROUP "/test-bpf-sock-fields"
+#define CHILD_CGROUP "/test-bpf-sock-fields/child"
+#define DATA "Hello BPF!"
+#define DATA_LEN sizeof(DATA)
+
+static struct sockaddr_in6 srv_sa6, cli_sa6;
+static int sk_pkt_out_cnt10_fd;
+static struct test_sock_fields *skel;
+static int sk_pkt_out_cnt_fd;
+static __u64 parent_cg_id;
+static __u64 child_cg_id;
+static int linum_map_fd;
+static __u32 duration;
+
+static __u32 egress_linum_idx = EGRESS_LINUM_IDX;
+static __u32 ingress_linum_idx = INGRESS_LINUM_IDX;
+
+static void print_sk(const struct bpf_sock *sk, const char *prefix)
+{
+ char src_ip4[24], dst_ip4[24];
+ char src_ip6[64], dst_ip6[64];
+
+ inet_ntop(AF_INET, &sk->src_ip4, src_ip4, sizeof(src_ip4));
+ inet_ntop(AF_INET6, &sk->src_ip6, src_ip6, sizeof(src_ip6));
+ inet_ntop(AF_INET, &sk->dst_ip4, dst_ip4, sizeof(dst_ip4));
+ inet_ntop(AF_INET6, &sk->dst_ip6, dst_ip6, sizeof(dst_ip6));
+
+ printf("%s: state:%u bound_dev_if:%u family:%u type:%u protocol:%u mark:%u priority:%u "
+ "src_ip4:%x(%s) src_ip6:%x:%x:%x:%x(%s) src_port:%u "
+ "dst_ip4:%x(%s) dst_ip6:%x:%x:%x:%x(%s) dst_port:%u\n",
+ prefix,
+ sk->state, sk->bound_dev_if, sk->family, sk->type, sk->protocol,
+ sk->mark, sk->priority,
+ sk->src_ip4, src_ip4,
+ sk->src_ip6[0], sk->src_ip6[1], sk->src_ip6[2], sk->src_ip6[3],
+ src_ip6, sk->src_port,
+ sk->dst_ip4, dst_ip4,
+ sk->dst_ip6[0], sk->dst_ip6[1], sk->dst_ip6[2], sk->dst_ip6[3],
+ dst_ip6, ntohs(sk->dst_port));
+}
+
+static void print_tp(const struct bpf_tcp_sock *tp, const char *prefix)
+{
+ printf("%s: snd_cwnd:%u srtt_us:%u rtt_min:%u snd_ssthresh:%u rcv_nxt:%u "
+ "snd_nxt:%u snd:una:%u mss_cache:%u ecn_flags:%u "
+ "rate_delivered:%u rate_interval_us:%u packets_out:%u "
+ "retrans_out:%u total_retrans:%u segs_in:%u data_segs_in:%u "
+ "segs_out:%u data_segs_out:%u lost_out:%u sacked_out:%u "
+ "bytes_received:%llu bytes_acked:%llu\n",
+ prefix,
+ tp->snd_cwnd, tp->srtt_us, tp->rtt_min, tp->snd_ssthresh,
+ tp->rcv_nxt, tp->snd_nxt, tp->snd_una, tp->mss_cache,
+ tp->ecn_flags, tp->rate_delivered, tp->rate_interval_us,
+ tp->packets_out, tp->retrans_out, tp->total_retrans,
+ tp->segs_in, tp->data_segs_in, tp->segs_out,
+ tp->data_segs_out, tp->lost_out, tp->sacked_out,
+ tp->bytes_received, tp->bytes_acked);
+}
+
+static void check_result(void)
+{
+ struct bpf_tcp_sock srv_tp, cli_tp, listen_tp;
+ struct bpf_sock srv_sk, cli_sk, listen_sk;
+ __u32 ingress_linum, egress_linum;
+ int err;
+
+ err = bpf_map_lookup_elem(linum_map_fd, &egress_linum_idx,
+ &egress_linum);
+ CHECK(err == -1, "bpf_map_lookup_elem(linum_map_fd)",
+ "err:%d errno:%d\n", err, errno);
+
+ err = bpf_map_lookup_elem(linum_map_fd, &ingress_linum_idx,
+ &ingress_linum);
+ CHECK(err == -1, "bpf_map_lookup_elem(linum_map_fd)",
+ "err:%d errno:%d\n", err, errno);
+
+ memcpy(&srv_sk, &skel->bss->srv_sk, sizeof(srv_sk));
+ memcpy(&srv_tp, &skel->bss->srv_tp, sizeof(srv_tp));
+ memcpy(&cli_sk, &skel->bss->cli_sk, sizeof(cli_sk));
+ memcpy(&cli_tp, &skel->bss->cli_tp, sizeof(cli_tp));
+ memcpy(&listen_sk, &skel->bss->listen_sk, sizeof(listen_sk));
+ memcpy(&listen_tp, &skel->bss->listen_tp, sizeof(listen_tp));
+
+ print_sk(&listen_sk, "listen_sk");
+ print_sk(&srv_sk, "srv_sk");
+ print_sk(&cli_sk, "cli_sk");
+ print_tp(&listen_tp, "listen_tp");
+ print_tp(&srv_tp, "srv_tp");
+ print_tp(&cli_tp, "cli_tp");
+
+ CHECK(listen_sk.state != 10 ||
+ listen_sk.family != AF_INET6 ||
+ listen_sk.protocol != IPPROTO_TCP ||
+ memcmp(listen_sk.src_ip6, &in6addr_loopback,
+ sizeof(listen_sk.src_ip6)) ||
+ listen_sk.dst_ip6[0] || listen_sk.dst_ip6[1] ||
+ listen_sk.dst_ip6[2] || listen_sk.dst_ip6[3] ||
+ listen_sk.src_port != ntohs(srv_sa6.sin6_port) ||
+ listen_sk.dst_port,
+ "listen_sk",
+ "Unexpected. Check listen_sk output. ingress_linum:%u\n",
+ ingress_linum);
+
+ CHECK(srv_sk.state == 10 ||
+ !srv_sk.state ||
+ srv_sk.family != AF_INET6 ||
+ srv_sk.protocol != IPPROTO_TCP ||
+ memcmp(srv_sk.src_ip6, &in6addr_loopback,
+ sizeof(srv_sk.src_ip6)) ||
+ memcmp(srv_sk.dst_ip6, &in6addr_loopback,
+ sizeof(srv_sk.dst_ip6)) ||
+ srv_sk.src_port != ntohs(srv_sa6.sin6_port) ||
+ srv_sk.dst_port != cli_sa6.sin6_port,
+ "srv_sk", "Unexpected. Check srv_sk output. egress_linum:%u\n",
+ egress_linum);
+
+ CHECK(!skel->bss->lsndtime, "srv_tp", "Unexpected lsndtime:0\n");
+
+ CHECK(cli_sk.state == 10 ||
+ !cli_sk.state ||
+ cli_sk.family != AF_INET6 ||
+ cli_sk.protocol != IPPROTO_TCP ||
+ memcmp(cli_sk.src_ip6, &in6addr_loopback,
+ sizeof(cli_sk.src_ip6)) ||
+ memcmp(cli_sk.dst_ip6, &in6addr_loopback,
+ sizeof(cli_sk.dst_ip6)) ||
+ cli_sk.src_port != ntohs(cli_sa6.sin6_port) ||
+ cli_sk.dst_port != srv_sa6.sin6_port,
+ "cli_sk", "Unexpected. Check cli_sk output. egress_linum:%u\n",
+ egress_linum);
+
+ CHECK(listen_tp.data_segs_out ||
+ listen_tp.data_segs_in ||
+ listen_tp.total_retrans ||
+ listen_tp.bytes_acked,
+ "listen_tp",
+ "Unexpected. Check listen_tp output. ingress_linum:%u\n",
+ ingress_linum);
+
+ CHECK(srv_tp.data_segs_out != 2 ||
+ srv_tp.data_segs_in ||
+ srv_tp.snd_cwnd != 10 ||
+ srv_tp.total_retrans ||
+ srv_tp.bytes_acked < 2 * DATA_LEN,
+ "srv_tp", "Unexpected. Check srv_tp output. egress_linum:%u\n",
+ egress_linum);
+
+ CHECK(cli_tp.data_segs_out ||
+ cli_tp.data_segs_in != 2 ||
+ cli_tp.snd_cwnd != 10 ||
+ cli_tp.total_retrans ||
+ cli_tp.bytes_received < 2 * DATA_LEN,
+ "cli_tp", "Unexpected. Check cli_tp output. egress_linum:%u\n",
+ egress_linum);
+
+ CHECK(skel->bss->parent_cg_id != parent_cg_id,
+ "parent_cg_id", "%zu != %zu\n",
+ (size_t)skel->bss->parent_cg_id, (size_t)parent_cg_id);
+
+ CHECK(skel->bss->child_cg_id != child_cg_id,
+ "child_cg_id", "%zu != %zu\n",
+ (size_t)skel->bss->child_cg_id, (size_t)child_cg_id);
+}
+
+static void check_sk_pkt_out_cnt(int accept_fd, int cli_fd)
+{
+ struct bpf_spinlock_cnt pkt_out_cnt = {}, pkt_out_cnt10 = {};
+ int err;
+
+ pkt_out_cnt.cnt = ~0;
+ pkt_out_cnt10.cnt = ~0;
+ err = bpf_map_lookup_elem(sk_pkt_out_cnt_fd, &accept_fd, &pkt_out_cnt);
+ if (!err)
+ err = bpf_map_lookup_elem(sk_pkt_out_cnt10_fd, &accept_fd,
+ &pkt_out_cnt10);
+
+ /* The bpf prog only counts for fullsock and
+ * passive connection did not become fullsock until 3WHS
+ * had been finished, so the bpf prog only counted two data
+ * packet out.
+ */
+ CHECK(err || pkt_out_cnt.cnt < 0xeB9F + 2 ||
+ pkt_out_cnt10.cnt < 0xeB9F + 20,
+ "bpf_map_lookup_elem(sk_pkt_out_cnt, &accept_fd)",
+ "err:%d errno:%d pkt_out_cnt:%u pkt_out_cnt10:%u\n",
+ err, errno, pkt_out_cnt.cnt, pkt_out_cnt10.cnt);
+
+ pkt_out_cnt.cnt = ~0;
+ pkt_out_cnt10.cnt = ~0;
+ err = bpf_map_lookup_elem(sk_pkt_out_cnt_fd, &cli_fd, &pkt_out_cnt);
+ if (!err)
+ err = bpf_map_lookup_elem(sk_pkt_out_cnt10_fd, &cli_fd,
+ &pkt_out_cnt10);
+ /* Active connection is fullsock from the beginning.
+ * 1 SYN and 1 ACK during 3WHS
+ * 2 Acks on data packet.
+ *
+ * The bpf_prog initialized it to 0xeB9F.
+ */
+ CHECK(err || pkt_out_cnt.cnt < 0xeB9F + 4 ||
+ pkt_out_cnt10.cnt < 0xeB9F + 40,
+ "bpf_map_lookup_elem(sk_pkt_out_cnt, &cli_fd)",
+ "err:%d errno:%d pkt_out_cnt:%u pkt_out_cnt10:%u\n",
+ err, errno, pkt_out_cnt.cnt, pkt_out_cnt10.cnt);
+}
+
+static int init_sk_storage(int sk_fd, __u32 pkt_out_cnt)
+{
+ struct bpf_spinlock_cnt scnt = {};
+ int err;
+
+ scnt.cnt = pkt_out_cnt;
+ err = bpf_map_update_elem(sk_pkt_out_cnt_fd, &sk_fd, &scnt,
+ BPF_NOEXIST);
+ if (CHECK(err, "bpf_map_update_elem(sk_pkt_out_cnt_fd)",
+ "err:%d errno:%d\n", err, errno))
+ return err;
+
+ err = bpf_map_update_elem(sk_pkt_out_cnt10_fd, &sk_fd, &scnt,
+ BPF_NOEXIST);
+ if (CHECK(err, "bpf_map_update_elem(sk_pkt_out_cnt10_fd)",
+ "err:%d errno:%d\n", err, errno))
+ return err;
+
+ return 0;
+}
+
+static void test(void)
+{
+ int listen_fd = -1, cli_fd = -1, accept_fd = -1, err, i;
+ socklen_t addrlen = sizeof(struct sockaddr_in6);
+ char buf[DATA_LEN];
+
+ /* Prepare listen_fd */
+ listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
+ /* start_server() has logged the error details */
+ if (CHECK_FAIL(listen_fd == -1))
+ goto done;
+
+ err = getsockname(listen_fd, (struct sockaddr *)&srv_sa6, &addrlen);
+ if (CHECK(err, "getsockname(listen_fd)", "err:%d errno:%d\n", err,
+ errno))
+ goto done;
+ memcpy(&skel->bss->srv_sa6, &srv_sa6, sizeof(srv_sa6));
+
+ cli_fd = connect_to_fd(listen_fd, 0);
+ if (CHECK_FAIL(cli_fd == -1))
+ goto done;
+
+ err = getsockname(cli_fd, (struct sockaddr *)&cli_sa6, &addrlen);
+ if (CHECK(err, "getsockname(cli_fd)", "err:%d errno:%d\n",
+ err, errno))
+ goto done;
+
+ accept_fd = accept(listen_fd, NULL, NULL);
+ if (CHECK(accept_fd == -1, "accept(listen_fd)",
+ "accept_fd:%d errno:%d\n",
+ accept_fd, errno))
+ goto done;
+
+ if (init_sk_storage(accept_fd, 0xeB9F))
+ goto done;
+
+ for (i = 0; i < 2; i++) {
+ /* Send some data from accept_fd to cli_fd.
+ * MSG_EOR to stop kernel from coalescing two pkts.
+ */
+ err = send(accept_fd, DATA, DATA_LEN, MSG_EOR);
+ if (CHECK(err != DATA_LEN, "send(accept_fd)",
+ "err:%d errno:%d\n", err, errno))
+ goto done;
+
+ err = recv(cli_fd, buf, DATA_LEN, 0);
+ if (CHECK(err != DATA_LEN, "recv(cli_fd)", "err:%d errno:%d\n",
+ err, errno))
+ goto done;
+ }
+
+ shutdown(cli_fd, SHUT_WR);
+ err = recv(accept_fd, buf, 1, 0);
+ if (CHECK(err, "recv(accept_fd) for fin", "err:%d errno:%d\n",
+ err, errno))
+ goto done;
+ shutdown(accept_fd, SHUT_WR);
+ err = recv(cli_fd, buf, 1, 0);
+ if (CHECK(err, "recv(cli_fd) for fin", "err:%d errno:%d\n",
+ err, errno))
+ goto done;
+ check_sk_pkt_out_cnt(accept_fd, cli_fd);
+ check_result();
+
+done:
+ if (accept_fd != -1)
+ close(accept_fd);
+ if (cli_fd != -1)
+ close(cli_fd);
+ if (listen_fd != -1)
+ close(listen_fd);
+}
+
+void test_sock_fields(void)
+{
+ struct bpf_link *egress_link = NULL, *ingress_link = NULL;
+ int parent_cg_fd = -1, child_cg_fd = -1;
+
+ /* Create a cgroup, get fd, and join it */
+ parent_cg_fd = test__join_cgroup(PARENT_CGROUP);
+ if (CHECK_FAIL(parent_cg_fd < 0))
+ return;
+ parent_cg_id = get_cgroup_id(PARENT_CGROUP);
+ if (CHECK_FAIL(!parent_cg_id))
+ goto done;
+
+ child_cg_fd = test__join_cgroup(CHILD_CGROUP);
+ if (CHECK_FAIL(child_cg_fd < 0))
+ goto done;
+ child_cg_id = get_cgroup_id(CHILD_CGROUP);
+ if (CHECK_FAIL(!child_cg_id))
+ goto done;
+
+ skel = test_sock_fields__open_and_load();
+ if (CHECK(!skel, "test_sock_fields__open_and_load", "failed\n"))
+ goto done;
+
+ egress_link = bpf_program__attach_cgroup(skel->progs.egress_read_sock_fields,
+ child_cg_fd);
+ if (CHECK(IS_ERR(egress_link), "attach_cgroup(egress)", "err:%ld\n",
+ PTR_ERR(egress_link)))
+ goto done;
+
+ ingress_link = bpf_program__attach_cgroup(skel->progs.ingress_read_sock_fields,
+ child_cg_fd);
+ if (CHECK(IS_ERR(ingress_link), "attach_cgroup(ingress)", "err:%ld\n",
+ PTR_ERR(ingress_link)))
+ goto done;
+
+ linum_map_fd = bpf_map__fd(skel->maps.linum_map);
+ sk_pkt_out_cnt_fd = bpf_map__fd(skel->maps.sk_pkt_out_cnt);
+ sk_pkt_out_cnt10_fd = bpf_map__fd(skel->maps.sk_pkt_out_cnt10);
+
+ test();
+
+done:
+ bpf_link__destroy(egress_link);
+ bpf_link__destroy(ingress_link);
+ test_sock_fields__destroy(skel);
+ if (child_cg_fd != -1)
+ close(child_cg_fd);
+ if (parent_cg_fd != -1)
+ close(parent_cg_fd);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
index 96e7b7f84c65..85f73261fab0 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
@@ -4,6 +4,9 @@
#include "test_progs.h"
#include "test_skmsg_load_helpers.skel.h"
+#include "test_sockmap_update.skel.h"
+#include "test_sockmap_invalid_update.skel.h"
+#include "bpf_iter_sockmap.skel.h"
#define TCP_REPAIR 19 /* TCP sock is under repair right now */
@@ -45,6 +48,37 @@ error:
return -1;
}
+static void compare_cookies(struct bpf_map *src, struct bpf_map *dst)
+{
+ __u32 i, max_entries = bpf_map__max_entries(src);
+ int err, duration = 0, src_fd, dst_fd;
+
+ src_fd = bpf_map__fd(src);
+ dst_fd = bpf_map__fd(dst);
+
+ for (i = 0; i < max_entries; i++) {
+ __u64 src_cookie, dst_cookie;
+
+ err = bpf_map_lookup_elem(src_fd, &i, &src_cookie);
+ if (err && errno == ENOENT) {
+ err = bpf_map_lookup_elem(dst_fd, &i, &dst_cookie);
+ CHECK(!err, "map_lookup_elem(dst)", "element %u not deleted\n", i);
+ CHECK(err && errno != ENOENT, "map_lookup_elem(dst)", "%s\n",
+ strerror(errno));
+ continue;
+ }
+ if (CHECK(err, "lookup_elem(src)", "%s\n", strerror(errno)))
+ continue;
+
+ err = bpf_map_lookup_elem(dst_fd, &i, &dst_cookie);
+ if (CHECK(err, "lookup_elem(dst)", "%s\n", strerror(errno)))
+ continue;
+
+ CHECK(dst_cookie != src_cookie, "cookie mismatch",
+ "%llu != %llu (pos %u)\n", dst_cookie, src_cookie, i);
+ }
+}
+
/* Create a map, populate it with one socket, and free the map. */
static void test_sockmap_create_update_free(enum bpf_map_type map_type)
{
@@ -101,6 +135,151 @@ out:
test_skmsg_load_helpers__destroy(skel);
}
+static void test_sockmap_update(enum bpf_map_type map_type)
+{
+ struct bpf_prog_test_run_attr tattr;
+ int err, prog, src, duration = 0;
+ struct test_sockmap_update *skel;
+ struct bpf_map *dst_map;
+ const __u32 zero = 0;
+ char dummy[14] = {0};
+ __s64 sk;
+
+ sk = connected_socket_v4();
+ if (CHECK(sk == -1, "connected_socket_v4", "cannot connect\n"))
+ return;
+
+ skel = test_sockmap_update__open_and_load();
+ if (CHECK(!skel, "open_and_load", "cannot load skeleton\n"))
+ goto close_sk;
+
+ prog = bpf_program__fd(skel->progs.copy_sock_map);
+ src = bpf_map__fd(skel->maps.src);
+ if (map_type == BPF_MAP_TYPE_SOCKMAP)
+ dst_map = skel->maps.dst_sock_map;
+ else
+ dst_map = skel->maps.dst_sock_hash;
+
+ err = bpf_map_update_elem(src, &zero, &sk, BPF_NOEXIST);
+ if (CHECK(err, "update_elem(src)", "errno=%u\n", errno))
+ goto out;
+
+ tattr = (struct bpf_prog_test_run_attr){
+ .prog_fd = prog,
+ .repeat = 1,
+ .data_in = dummy,
+ .data_size_in = sizeof(dummy),
+ };
+
+ err = bpf_prog_test_run_xattr(&tattr);
+ if (CHECK_ATTR(err || !tattr.retval, "bpf_prog_test_run",
+ "errno=%u retval=%u\n", errno, tattr.retval))
+ goto out;
+
+ compare_cookies(skel->maps.src, dst_map);
+
+out:
+ test_sockmap_update__destroy(skel);
+close_sk:
+ close(sk);
+}
+
+static void test_sockmap_invalid_update(void)
+{
+ struct test_sockmap_invalid_update *skel;
+ int duration = 0;
+
+ skel = test_sockmap_invalid_update__open_and_load();
+ if (CHECK(skel, "open_and_load", "verifier accepted map_update\n"))
+ test_sockmap_invalid_update__destroy(skel);
+}
+
+static void test_sockmap_copy(enum bpf_map_type map_type)
+{
+ DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
+ int err, len, src_fd, iter_fd, duration = 0;
+ union bpf_iter_link_info linfo = {};
+ __u32 i, num_sockets, num_elems;
+ struct bpf_iter_sockmap *skel;
+ __s64 *sock_fd = NULL;
+ struct bpf_link *link;
+ struct bpf_map *src;
+ char buf[64];
+
+ skel = bpf_iter_sockmap__open_and_load();
+ if (CHECK(!skel, "bpf_iter_sockmap__open_and_load", "skeleton open_and_load failed\n"))
+ return;
+
+ if (map_type == BPF_MAP_TYPE_SOCKMAP) {
+ src = skel->maps.sockmap;
+ num_elems = bpf_map__max_entries(src);
+ num_sockets = num_elems - 1;
+ } else {
+ src = skel->maps.sockhash;
+ num_elems = bpf_map__max_entries(src) - 1;
+ num_sockets = num_elems;
+ }
+
+ sock_fd = calloc(num_sockets, sizeof(*sock_fd));
+ if (CHECK(!sock_fd, "calloc(sock_fd)", "failed to allocate\n"))
+ goto out;
+
+ for (i = 0; i < num_sockets; i++)
+ sock_fd[i] = -1;
+
+ src_fd = bpf_map__fd(src);
+
+ for (i = 0; i < num_sockets; i++) {
+ sock_fd[i] = connected_socket_v4();
+ if (CHECK(sock_fd[i] == -1, "connected_socket_v4", "cannot connect\n"))
+ goto out;
+
+ err = bpf_map_update_elem(src_fd, &i, &sock_fd[i], BPF_NOEXIST);
+ if (CHECK(err, "map_update", "failed: %s\n", strerror(errno)))
+ goto out;
+ }
+
+ linfo.map.map_fd = src_fd;
+ opts.link_info = &linfo;
+ opts.link_info_len = sizeof(linfo);
+ link = bpf_program__attach_iter(skel->progs.copy, &opts);
+ if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
+ goto out;
+
+ iter_fd = bpf_iter_create(bpf_link__fd(link));
+ if (CHECK(iter_fd < 0, "create_iter", "create_iter failed\n"))
+ goto free_link;
+
+ /* do some tests */
+ while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
+ ;
+ if (CHECK(len < 0, "read", "failed: %s\n", strerror(errno)))
+ goto close_iter;
+
+ /* test results */
+ if (CHECK(skel->bss->elems != num_elems, "elems", "got %u expected %u\n",
+ skel->bss->elems, num_elems))
+ goto close_iter;
+
+ if (CHECK(skel->bss->socks != num_sockets, "socks", "got %u expected %u\n",
+ skel->bss->socks, num_sockets))
+ goto close_iter;
+
+ compare_cookies(src, skel->maps.dst);
+
+close_iter:
+ close(iter_fd);
+free_link:
+ bpf_link__destroy(link);
+out:
+ for (i = 0; sock_fd && i < num_sockets; i++)
+ if (sock_fd[i] >= 0)
+ close(sock_fd[i]);
+ if (sock_fd)
+ free(sock_fd);
+ bpf_iter_sockmap__destroy(skel);
+}
+
void test_sockmap_basic(void)
{
if (test__start_subtest("sockmap create_update_free"))
@@ -111,4 +290,14 @@ void test_sockmap_basic(void)
test_skmsg_helpers(BPF_MAP_TYPE_SOCKMAP);
if (test__start_subtest("sockhash sk_msg load helpers"))
test_skmsg_helpers(BPF_MAP_TYPE_SOCKHASH);
+ if (test__start_subtest("sockmap update"))
+ test_sockmap_update(BPF_MAP_TYPE_SOCKMAP);
+ if (test__start_subtest("sockhash update"))
+ test_sockmap_update(BPF_MAP_TYPE_SOCKHASH);
+ if (test__start_subtest("sockmap update in unsafe context"))
+ test_sockmap_invalid_update();
+ if (test__start_subtest("sockmap copy"))
+ test_sockmap_copy(BPF_MAP_TYPE_SOCKMAP);
+ if (test__start_subtest("sockhash copy"))
+ test_sockmap_copy(BPF_MAP_TYPE_SOCKHASH);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
index 5f54c6aec7f0..b25c9c45c148 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockopt_sk.c
@@ -45,9 +45,9 @@ static int getsetsockopt(void)
goto err;
}
- if (*(int *)big_buf != 0x08) {
+ if (*big_buf != 0x08) {
log_err("Unexpected getsockopt(IP_TOS) optval 0x%x != 0x08",
- *(int *)big_buf);
+ (int)*big_buf);
goto err;
}
diff --git a/tools/testing/selftests/bpf/prog_tests/subprogs.c b/tools/testing/selftests/bpf/prog_tests/subprogs.c
new file mode 100644
index 000000000000..a00abf58c037
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/subprogs.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+#include <test_progs.h>
+#include <time.h>
+#include "test_subprogs.skel.h"
+
+static int duration;
+
+void test_subprogs(void)
+{
+ struct test_subprogs *skel;
+ int err;
+
+ skel = test_subprogs__open_and_load();
+ if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
+ return;
+
+ err = test_subprogs__attach(skel);
+ if (CHECK(err, "skel_attach", "failed to attach skeleton: %d\n", err))
+ goto cleanup;
+
+ usleep(1);
+
+ CHECK(skel->bss->res1 != 12, "res1", "got %d, exp %d\n", skel->bss->res1, 12);
+ CHECK(skel->bss->res2 != 17, "res2", "got %d, exp %d\n", skel->bss->res2, 17);
+ CHECK(skel->bss->res3 != 19, "res3", "got %d, exp %d\n", skel->bss->res3, 19);
+ CHECK(skel->bss->res4 != 36, "res4", "got %d, exp %d\n", skel->bss->res4, 36);
+
+cleanup:
+ test_subprogs__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/tailcalls.c b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
index bb8fe646dd9f..ee27d68d2a1c 100644
--- a/tools/testing/selftests/bpf/prog_tests/tailcalls.c
+++ b/tools/testing/selftests/bpf/prog_tests/tailcalls.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
+#include <network_helpers.h>
/* test_tailcall_1 checks basic functionality by patching multiple locations
* in a single program for a single tail call slot with nop->jmp, jmp->nop
@@ -472,6 +473,329 @@ out:
bpf_object__close(obj);
}
+/* test_tailcall_bpf2bpf_1 purpose is to make sure that tailcalls are working
+ * correctly in correlation with BPF subprograms
+ */
+static void test_tailcall_bpf2bpf_1(void)
+{
+ int err, map_fd, prog_fd, main_fd, i;
+ struct bpf_map *prog_array;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ __u32 retval, duration;
+ char prog_name[32];
+
+ err = bpf_prog_load("tailcall_bpf2bpf1.o", BPF_PROG_TYPE_SCHED_CLS,
+ &obj, &prog_fd);
+ if (CHECK_FAIL(err))
+ return;
+
+ prog = bpf_object__find_program_by_title(obj, "classifier");
+ if (CHECK_FAIL(!prog))
+ goto out;
+
+ main_fd = bpf_program__fd(prog);
+ if (CHECK_FAIL(main_fd < 0))
+ goto out;
+
+ prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
+ if (CHECK_FAIL(!prog_array))
+ goto out;
+
+ map_fd = bpf_map__fd(prog_array);
+ if (CHECK_FAIL(map_fd < 0))
+ goto out;
+
+ /* nop -> jmp */
+ for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
+ snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
+
+ prog = bpf_object__find_program_by_title(obj, prog_name);
+ if (CHECK_FAIL(!prog))
+ goto out;
+
+ prog_fd = bpf_program__fd(prog);
+ if (CHECK_FAIL(prog_fd < 0))
+ goto out;
+
+ err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
+ if (CHECK_FAIL(err))
+ goto out;
+ }
+
+ err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
+ 0, &retval, &duration);
+ CHECK(err || retval != 1, "tailcall",
+ "err %d errno %d retval %d\n", err, errno, retval);
+
+ /* jmp -> nop, call subprog that will do tailcall */
+ i = 1;
+ err = bpf_map_delete_elem(map_fd, &i);
+ if (CHECK_FAIL(err))
+ goto out;
+
+ err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
+ 0, &retval, &duration);
+ CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
+ err, errno, retval);
+
+ /* make sure that subprog can access ctx and entry prog that
+ * called this subprog can properly return
+ */
+ i = 0;
+ err = bpf_map_delete_elem(map_fd, &i);
+ if (CHECK_FAIL(err))
+ goto out;
+
+ err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
+ 0, &retval, &duration);
+ CHECK(err || retval != sizeof(pkt_v4) * 2,
+ "tailcall", "err %d errno %d retval %d\n",
+ err, errno, retval);
+out:
+ bpf_object__close(obj);
+}
+
+/* test_tailcall_bpf2bpf_2 checks that the count value of the tail call limit
+ * enforcement matches with expectations when tailcall is preceded with
+ * bpf2bpf call.
+ */
+static void test_tailcall_bpf2bpf_2(void)
+{
+ int err, map_fd, prog_fd, main_fd, data_fd, i, val;
+ struct bpf_map *prog_array, *data_map;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ __u32 retval, duration;
+ char buff[128] = {};
+
+ err = bpf_prog_load("tailcall_bpf2bpf2.o", BPF_PROG_TYPE_SCHED_CLS,
+ &obj, &prog_fd);
+ if (CHECK_FAIL(err))
+ return;
+
+ prog = bpf_object__find_program_by_title(obj, "classifier");
+ if (CHECK_FAIL(!prog))
+ goto out;
+
+ main_fd = bpf_program__fd(prog);
+ if (CHECK_FAIL(main_fd < 0))
+ goto out;
+
+ prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
+ if (CHECK_FAIL(!prog_array))
+ goto out;
+
+ map_fd = bpf_map__fd(prog_array);
+ if (CHECK_FAIL(map_fd < 0))
+ goto out;
+
+ prog = bpf_object__find_program_by_title(obj, "classifier/0");
+ if (CHECK_FAIL(!prog))
+ goto out;
+
+ prog_fd = bpf_program__fd(prog);
+ if (CHECK_FAIL(prog_fd < 0))
+ goto out;
+
+ i = 0;
+ err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
+ if (CHECK_FAIL(err))
+ goto out;
+
+ err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
+ &duration, &retval, NULL);
+ CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n",
+ err, errno, retval);
+
+ data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
+ if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
+ return;
+
+ data_fd = bpf_map__fd(data_map);
+ if (CHECK_FAIL(map_fd < 0))
+ return;
+
+ i = 0;
+ err = bpf_map_lookup_elem(data_fd, &i, &val);
+ CHECK(err || val != 33, "tailcall count", "err %d errno %d count %d\n",
+ err, errno, val);
+
+ i = 0;
+ err = bpf_map_delete_elem(map_fd, &i);
+ if (CHECK_FAIL(err))
+ goto out;
+
+ err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
+ &duration, &retval, NULL);
+ CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
+ err, errno, retval);
+out:
+ bpf_object__close(obj);
+}
+
+/* test_tailcall_bpf2bpf_3 checks that non-trivial amount of stack (up to
+ * 256 bytes) can be used within bpf subprograms that have the tailcalls
+ * in them
+ */
+static void test_tailcall_bpf2bpf_3(void)
+{
+ int err, map_fd, prog_fd, main_fd, i;
+ struct bpf_map *prog_array;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ __u32 retval, duration;
+ char prog_name[32];
+
+ err = bpf_prog_load("tailcall_bpf2bpf3.o", BPF_PROG_TYPE_SCHED_CLS,
+ &obj, &prog_fd);
+ if (CHECK_FAIL(err))
+ return;
+
+ prog = bpf_object__find_program_by_title(obj, "classifier");
+ if (CHECK_FAIL(!prog))
+ goto out;
+
+ main_fd = bpf_program__fd(prog);
+ if (CHECK_FAIL(main_fd < 0))
+ goto out;
+
+ prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
+ if (CHECK_FAIL(!prog_array))
+ goto out;
+
+ map_fd = bpf_map__fd(prog_array);
+ if (CHECK_FAIL(map_fd < 0))
+ goto out;
+
+ for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
+ snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
+
+ prog = bpf_object__find_program_by_title(obj, prog_name);
+ if (CHECK_FAIL(!prog))
+ goto out;
+
+ prog_fd = bpf_program__fd(prog);
+ if (CHECK_FAIL(prog_fd < 0))
+ goto out;
+
+ err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
+ if (CHECK_FAIL(err))
+ goto out;
+ }
+
+ err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
+ &duration, &retval, NULL);
+ CHECK(err || retval != sizeof(pkt_v4) * 3,
+ "tailcall", "err %d errno %d retval %d\n",
+ err, errno, retval);
+
+ i = 1;
+ err = bpf_map_delete_elem(map_fd, &i);
+ if (CHECK_FAIL(err))
+ goto out;
+
+ err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
+ &duration, &retval, NULL);
+ CHECK(err || retval != sizeof(pkt_v4),
+ "tailcall", "err %d errno %d retval %d\n",
+ err, errno, retval);
+
+ i = 0;
+ err = bpf_map_delete_elem(map_fd, &i);
+ if (CHECK_FAIL(err))
+ goto out;
+
+ err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
+ &duration, &retval, NULL);
+ CHECK(err || retval != sizeof(pkt_v4) * 2,
+ "tailcall", "err %d errno %d retval %d\n",
+ err, errno, retval);
+out:
+ bpf_object__close(obj);
+}
+
+/* test_tailcall_bpf2bpf_4 checks that tailcall counter is correctly preserved
+ * across tailcalls combined with bpf2bpf calls. for making sure that tailcall
+ * counter behaves correctly, bpf program will go through following flow:
+ *
+ * entry -> entry_subprog -> tailcall0 -> bpf_func0 -> subprog0 ->
+ * -> tailcall1 -> bpf_func1 -> subprog1 -> tailcall2 -> bpf_func2 ->
+ * subprog2 [here bump global counter] --------^
+ *
+ * We go through first two tailcalls and start counting from the subprog2 where
+ * the loop begins. At the end of the test make sure that the global counter is
+ * equal to 31, because tailcall counter includes the first two tailcalls
+ * whereas global counter is incremented only on loop presented on flow above.
+ */
+static void test_tailcall_bpf2bpf_4(void)
+{
+ int err, map_fd, prog_fd, main_fd, data_fd, i, val;
+ struct bpf_map *prog_array, *data_map;
+ struct bpf_program *prog;
+ struct bpf_object *obj;
+ __u32 retval, duration;
+ char prog_name[32];
+
+ err = bpf_prog_load("tailcall_bpf2bpf4.o", BPF_PROG_TYPE_SCHED_CLS,
+ &obj, &prog_fd);
+ if (CHECK_FAIL(err))
+ return;
+
+ prog = bpf_object__find_program_by_title(obj, "classifier");
+ if (CHECK_FAIL(!prog))
+ goto out;
+
+ main_fd = bpf_program__fd(prog);
+ if (CHECK_FAIL(main_fd < 0))
+ goto out;
+
+ prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
+ if (CHECK_FAIL(!prog_array))
+ goto out;
+
+ map_fd = bpf_map__fd(prog_array);
+ if (CHECK_FAIL(map_fd < 0))
+ goto out;
+
+ for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
+ snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
+
+ prog = bpf_object__find_program_by_title(obj, prog_name);
+ if (CHECK_FAIL(!prog))
+ goto out;
+
+ prog_fd = bpf_program__fd(prog);
+ if (CHECK_FAIL(prog_fd < 0))
+ goto out;
+
+ err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
+ if (CHECK_FAIL(err))
+ goto out;
+ }
+
+ err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
+ &duration, &retval, NULL);
+ CHECK(err || retval != sizeof(pkt_v4) * 3, "tailcall", "err %d errno %d retval %d\n",
+ err, errno, retval);
+
+ data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
+ if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
+ return;
+
+ data_fd = bpf_map__fd(data_map);
+ if (CHECK_FAIL(map_fd < 0))
+ return;
+
+ i = 0;
+ err = bpf_map_lookup_elem(data_fd, &i, &val);
+ CHECK(err || val != 31, "tailcall count", "err %d errno %d count %d\n",
+ err, errno, val);
+
+out:
+ bpf_object__close(obj);
+}
+
void test_tailcalls(void)
{
if (test__start_subtest("tailcall_1"))
@@ -484,4 +808,12 @@ void test_tailcalls(void)
test_tailcall_4();
if (test__start_subtest("tailcall_5"))
test_tailcall_5();
+ if (test__start_subtest("tailcall_bpf2bpf_1"))
+ test_tailcall_bpf2bpf_1();
+ if (test__start_subtest("tailcall_bpf2bpf_2"))
+ test_tailcall_bpf2bpf_2();
+ if (test__start_subtest("tailcall_bpf2bpf_3"))
+ test_tailcall_bpf2bpf_3();
+ if (test__start_subtest("tailcall_bpf2bpf_4"))
+ test_tailcall_bpf2bpf_4();
}
diff --git a/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c b/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c
new file mode 100644
index 000000000000..c85174cdcb77
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/tcp_hdr_options.c
@@ -0,0 +1,610 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+
+#define _GNU_SOURCE
+#include <sched.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/socket.h>
+#include <linux/compiler.h>
+
+#include "test_progs.h"
+#include "cgroup_helpers.h"
+#include "network_helpers.h"
+#include "test_tcp_hdr_options.h"
+#include "test_tcp_hdr_options.skel.h"
+#include "test_misc_tcp_hdr_options.skel.h"
+
+#define LO_ADDR6 "::1"
+#define CG_NAME "/tcpbpf-hdr-opt-test"
+
+struct bpf_test_option exp_passive_estab_in;
+struct bpf_test_option exp_active_estab_in;
+struct bpf_test_option exp_passive_fin_in;
+struct bpf_test_option exp_active_fin_in;
+struct hdr_stg exp_passive_hdr_stg;
+struct hdr_stg exp_active_hdr_stg = { .active = true, };
+
+static struct test_misc_tcp_hdr_options *misc_skel;
+static struct test_tcp_hdr_options *skel;
+static int lport_linum_map_fd;
+static int hdr_stg_map_fd;
+static __u32 duration;
+static int cg_fd;
+
+struct sk_fds {
+ int srv_fd;
+ int passive_fd;
+ int active_fd;
+ int passive_lport;
+ int active_lport;
+};
+
+static int create_netns(void)
+{
+ if (CHECK(unshare(CLONE_NEWNET), "create netns",
+ "unshare(CLONE_NEWNET): %s (%d)",
+ strerror(errno), errno))
+ return -1;
+
+ if (CHECK(system("ip link set dev lo up"), "run ip cmd",
+ "failed to bring lo link up\n"))
+ return -1;
+
+ return 0;
+}
+
+static int write_sysctl(const char *sysctl, const char *value)
+{
+ int fd, err, len;
+
+ fd = open(sysctl, O_WRONLY);
+ if (CHECK(fd == -1, "open sysctl", "open(%s): %s (%d)\n",
+ sysctl, strerror(errno), errno))
+ return -1;
+
+ len = strlen(value);
+ err = write(fd, value, len);
+ close(fd);
+ if (CHECK(err != len, "write sysctl",
+ "write(%s, %s): err:%d %s (%d)\n",
+ sysctl, value, err, strerror(errno), errno))
+ return -1;
+
+ return 0;
+}
+
+static void print_hdr_stg(const struct hdr_stg *hdr_stg, const char *prefix)
+{
+ fprintf(stderr, "%s{active:%u, resend_syn:%u, syncookie:%u, fastopen:%u}\n",
+ prefix ? : "", hdr_stg->active, hdr_stg->resend_syn,
+ hdr_stg->syncookie, hdr_stg->fastopen);
+}
+
+static void print_option(const struct bpf_test_option *opt, const char *prefix)
+{
+ fprintf(stderr, "%s{flags:0x%x, max_delack_ms:%u, rand:0x%x}\n",
+ prefix ? : "", opt->flags, opt->max_delack_ms, opt->rand);
+}
+
+static void sk_fds_close(struct sk_fds *sk_fds)
+{
+ close(sk_fds->srv_fd);
+ close(sk_fds->passive_fd);
+ close(sk_fds->active_fd);
+}
+
+static int sk_fds_shutdown(struct sk_fds *sk_fds)
+{
+ int ret, abyte;
+
+ shutdown(sk_fds->active_fd, SHUT_WR);
+ ret = read(sk_fds->passive_fd, &abyte, sizeof(abyte));
+ if (CHECK(ret != 0, "read-after-shutdown(passive_fd):",
+ "ret:%d %s (%d)\n",
+ ret, strerror(errno), errno))
+ return -1;
+
+ shutdown(sk_fds->passive_fd, SHUT_WR);
+ ret = read(sk_fds->active_fd, &abyte, sizeof(abyte));
+ if (CHECK(ret != 0, "read-after-shutdown(active_fd):",
+ "ret:%d %s (%d)\n",
+ ret, strerror(errno), errno))
+ return -1;
+
+ return 0;
+}
+
+static int sk_fds_connect(struct sk_fds *sk_fds, bool fast_open)
+{
+ const char fast[] = "FAST!!!";
+ struct sockaddr_in6 addr6;
+ socklen_t len;
+
+ sk_fds->srv_fd = start_server(AF_INET6, SOCK_STREAM, LO_ADDR6, 0, 0);
+ if (CHECK(sk_fds->srv_fd == -1, "start_server", "%s (%d)\n",
+ strerror(errno), errno))
+ goto error;
+
+ if (fast_open)
+ sk_fds->active_fd = fastopen_connect(sk_fds->srv_fd, fast,
+ sizeof(fast), 0);
+ else
+ sk_fds->active_fd = connect_to_fd(sk_fds->srv_fd, 0);
+
+ if (CHECK_FAIL(sk_fds->active_fd == -1)) {
+ close(sk_fds->srv_fd);
+ goto error;
+ }
+
+ len = sizeof(addr6);
+ if (CHECK(getsockname(sk_fds->srv_fd, (struct sockaddr *)&addr6,
+ &len), "getsockname(srv_fd)", "%s (%d)\n",
+ strerror(errno), errno))
+ goto error_close;
+ sk_fds->passive_lport = ntohs(addr6.sin6_port);
+
+ len = sizeof(addr6);
+ if (CHECK(getsockname(sk_fds->active_fd, (struct sockaddr *)&addr6,
+ &len), "getsockname(active_fd)", "%s (%d)\n",
+ strerror(errno), errno))
+ goto error_close;
+ sk_fds->active_lport = ntohs(addr6.sin6_port);
+
+ sk_fds->passive_fd = accept(sk_fds->srv_fd, NULL, 0);
+ if (CHECK(sk_fds->passive_fd == -1, "accept(srv_fd)", "%s (%d)\n",
+ strerror(errno), errno))
+ goto error_close;
+
+ if (fast_open) {
+ char bytes_in[sizeof(fast)];
+ int ret;
+
+ ret = read(sk_fds->passive_fd, bytes_in, sizeof(bytes_in));
+ if (CHECK(ret != sizeof(fast), "read fastopen syn data",
+ "expected=%lu actual=%d\n", sizeof(fast), ret)) {
+ close(sk_fds->passive_fd);
+ goto error_close;
+ }
+ }
+
+ return 0;
+
+error_close:
+ close(sk_fds->active_fd);
+ close(sk_fds->srv_fd);
+
+error:
+ memset(sk_fds, -1, sizeof(*sk_fds));
+ return -1;
+}
+
+static int check_hdr_opt(const struct bpf_test_option *exp,
+ const struct bpf_test_option *act,
+ const char *hdr_desc)
+{
+ if (CHECK(memcmp(exp, act, sizeof(*exp)),
+ "expected-vs-actual", "unexpected %s\n", hdr_desc)) {
+ print_option(exp, "expected: ");
+ print_option(act, " actual: ");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int check_hdr_stg(const struct hdr_stg *exp, int fd,
+ const char *stg_desc)
+{
+ struct hdr_stg act;
+
+ if (CHECK(bpf_map_lookup_elem(hdr_stg_map_fd, &fd, &act),
+ "map_lookup(hdr_stg_map_fd)", "%s %s (%d)\n",
+ stg_desc, strerror(errno), errno))
+ return -1;
+
+ if (CHECK(memcmp(exp, &act, sizeof(*exp)),
+ "expected-vs-actual", "unexpected %s\n", stg_desc)) {
+ print_hdr_stg(exp, "expected: ");
+ print_hdr_stg(&act, " actual: ");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int check_error_linum(const struct sk_fds *sk_fds)
+{
+ unsigned int nr_errors = 0;
+ struct linum_err linum_err;
+ int lport;
+
+ lport = sk_fds->passive_lport;
+ if (!bpf_map_lookup_elem(lport_linum_map_fd, &lport, &linum_err)) {
+ fprintf(stderr,
+ "bpf prog error out at lport:passive(%d), linum:%u err:%d\n",
+ lport, linum_err.linum, linum_err.err);
+ nr_errors++;
+ }
+
+ lport = sk_fds->active_lport;
+ if (!bpf_map_lookup_elem(lport_linum_map_fd, &lport, &linum_err)) {
+ fprintf(stderr,
+ "bpf prog error out at lport:active(%d), linum:%u err:%d\n",
+ lport, linum_err.linum, linum_err.err);
+ nr_errors++;
+ }
+
+ return nr_errors;
+}
+
+static void check_hdr_and_close_fds(struct sk_fds *sk_fds)
+{
+ const __u32 expected_inherit_cb_flags =
+ BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG |
+ BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG |
+ BPF_SOCK_OPS_STATE_CB_FLAG;
+
+ if (sk_fds_shutdown(sk_fds))
+ goto check_linum;
+
+ if (CHECK(expected_inherit_cb_flags != skel->bss->inherit_cb_flags,
+ "Unexpected inherit_cb_flags", "0x%x != 0x%x\n",
+ skel->bss->inherit_cb_flags, expected_inherit_cb_flags))
+ goto check_linum;
+
+ if (check_hdr_stg(&exp_passive_hdr_stg, sk_fds->passive_fd,
+ "passive_hdr_stg"))
+ goto check_linum;
+
+ if (check_hdr_stg(&exp_active_hdr_stg, sk_fds->active_fd,
+ "active_hdr_stg"))
+ goto check_linum;
+
+ if (check_hdr_opt(&exp_passive_estab_in, &skel->bss->passive_estab_in,
+ "passive_estab_in"))
+ goto check_linum;
+
+ if (check_hdr_opt(&exp_active_estab_in, &skel->bss->active_estab_in,
+ "active_estab_in"))
+ goto check_linum;
+
+ if (check_hdr_opt(&exp_passive_fin_in, &skel->bss->passive_fin_in,
+ "passive_fin_in"))
+ goto check_linum;
+
+ check_hdr_opt(&exp_active_fin_in, &skel->bss->active_fin_in,
+ "active_fin_in");
+
+check_linum:
+ CHECK_FAIL(check_error_linum(sk_fds));
+ sk_fds_close(sk_fds);
+}
+
+static void prepare_out(void)
+{
+ skel->bss->active_syn_out = exp_passive_estab_in;
+ skel->bss->passive_synack_out = exp_active_estab_in;
+
+ skel->bss->active_fin_out = exp_passive_fin_in;
+ skel->bss->passive_fin_out = exp_active_fin_in;
+}
+
+static void reset_test(void)
+{
+ size_t optsize = sizeof(struct bpf_test_option);
+ int lport, err;
+
+ memset(&skel->bss->passive_synack_out, 0, optsize);
+ memset(&skel->bss->passive_fin_out, 0, optsize);
+
+ memset(&skel->bss->passive_estab_in, 0, optsize);
+ memset(&skel->bss->passive_fin_in, 0, optsize);
+
+ memset(&skel->bss->active_syn_out, 0, optsize);
+ memset(&skel->bss->active_fin_out, 0, optsize);
+
+ memset(&skel->bss->active_estab_in, 0, optsize);
+ memset(&skel->bss->active_fin_in, 0, optsize);
+
+ skel->bss->inherit_cb_flags = 0;
+
+ skel->data->test_kind = TCPOPT_EXP;
+ skel->data->test_magic = 0xeB9F;
+
+ memset(&exp_passive_estab_in, 0, optsize);
+ memset(&exp_active_estab_in, 0, optsize);
+ memset(&exp_passive_fin_in, 0, optsize);
+ memset(&exp_active_fin_in, 0, optsize);
+
+ memset(&exp_passive_hdr_stg, 0, sizeof(exp_passive_hdr_stg));
+ memset(&exp_active_hdr_stg, 0, sizeof(exp_active_hdr_stg));
+ exp_active_hdr_stg.active = true;
+
+ err = bpf_map_get_next_key(lport_linum_map_fd, NULL, &lport);
+ while (!err) {
+ bpf_map_delete_elem(lport_linum_map_fd, &lport);
+ err = bpf_map_get_next_key(lport_linum_map_fd, &lport, &lport);
+ }
+}
+
+static void fastopen_estab(void)
+{
+ struct bpf_link *link;
+ struct sk_fds sk_fds;
+
+ hdr_stg_map_fd = bpf_map__fd(skel->maps.hdr_stg_map);
+ lport_linum_map_fd = bpf_map__fd(skel->maps.lport_linum_map);
+
+ exp_passive_estab_in.flags = OPTION_F_RAND | OPTION_F_MAX_DELACK_MS;
+ exp_passive_estab_in.rand = 0xfa;
+ exp_passive_estab_in.max_delack_ms = 11;
+
+ exp_active_estab_in.flags = OPTION_F_RAND | OPTION_F_MAX_DELACK_MS;
+ exp_active_estab_in.rand = 0xce;
+ exp_active_estab_in.max_delack_ms = 22;
+
+ exp_passive_hdr_stg.fastopen = true;
+
+ prepare_out();
+
+ /* Allow fastopen without fastopen cookie */
+ if (write_sysctl("/proc/sys/net/ipv4/tcp_fastopen", "1543"))
+ return;
+
+ link = bpf_program__attach_cgroup(skel->progs.estab, cg_fd);
+ if (CHECK(IS_ERR(link), "attach_cgroup(estab)", "err: %ld\n",
+ PTR_ERR(link)))
+ return;
+
+ if (sk_fds_connect(&sk_fds, true)) {
+ bpf_link__destroy(link);
+ return;
+ }
+
+ check_hdr_and_close_fds(&sk_fds);
+ bpf_link__destroy(link);
+}
+
+static void syncookie_estab(void)
+{
+ struct bpf_link *link;
+ struct sk_fds sk_fds;
+
+ hdr_stg_map_fd = bpf_map__fd(skel->maps.hdr_stg_map);
+ lport_linum_map_fd = bpf_map__fd(skel->maps.lport_linum_map);
+
+ exp_passive_estab_in.flags = OPTION_F_RAND | OPTION_F_MAX_DELACK_MS;
+ exp_passive_estab_in.rand = 0xfa;
+ exp_passive_estab_in.max_delack_ms = 11;
+
+ exp_active_estab_in.flags = OPTION_F_RAND | OPTION_F_MAX_DELACK_MS |
+ OPTION_F_RESEND;
+ exp_active_estab_in.rand = 0xce;
+ exp_active_estab_in.max_delack_ms = 22;
+
+ exp_passive_hdr_stg.syncookie = true;
+ exp_active_hdr_stg.resend_syn = true,
+
+ prepare_out();
+
+ /* Clear the RESEND to ensure the bpf prog can learn
+ * want_cookie and set the RESEND by itself.
+ */
+ skel->bss->passive_synack_out.flags &= ~OPTION_F_RESEND;
+
+ /* Enforce syncookie mode */
+ if (write_sysctl("/proc/sys/net/ipv4/tcp_syncookies", "2"))
+ return;
+
+ link = bpf_program__attach_cgroup(skel->progs.estab, cg_fd);
+ if (CHECK(IS_ERR(link), "attach_cgroup(estab)", "err: %ld\n",
+ PTR_ERR(link)))
+ return;
+
+ if (sk_fds_connect(&sk_fds, false)) {
+ bpf_link__destroy(link);
+ return;
+ }
+
+ check_hdr_and_close_fds(&sk_fds);
+ bpf_link__destroy(link);
+}
+
+static void fin(void)
+{
+ struct bpf_link *link;
+ struct sk_fds sk_fds;
+
+ hdr_stg_map_fd = bpf_map__fd(skel->maps.hdr_stg_map);
+ lport_linum_map_fd = bpf_map__fd(skel->maps.lport_linum_map);
+
+ exp_passive_fin_in.flags = OPTION_F_RAND;
+ exp_passive_fin_in.rand = 0xfa;
+
+ exp_active_fin_in.flags = OPTION_F_RAND;
+ exp_active_fin_in.rand = 0xce;
+
+ prepare_out();
+
+ if (write_sysctl("/proc/sys/net/ipv4/tcp_syncookies", "1"))
+ return;
+
+ link = bpf_program__attach_cgroup(skel->progs.estab, cg_fd);
+ if (CHECK(IS_ERR(link), "attach_cgroup(estab)", "err: %ld\n",
+ PTR_ERR(link)))
+ return;
+
+ if (sk_fds_connect(&sk_fds, false)) {
+ bpf_link__destroy(link);
+ return;
+ }
+
+ check_hdr_and_close_fds(&sk_fds);
+ bpf_link__destroy(link);
+}
+
+static void __simple_estab(bool exprm)
+{
+ struct bpf_link *link;
+ struct sk_fds sk_fds;
+
+ hdr_stg_map_fd = bpf_map__fd(skel->maps.hdr_stg_map);
+ lport_linum_map_fd = bpf_map__fd(skel->maps.lport_linum_map);
+
+ exp_passive_estab_in.flags = OPTION_F_RAND | OPTION_F_MAX_DELACK_MS;
+ exp_passive_estab_in.rand = 0xfa;
+ exp_passive_estab_in.max_delack_ms = 11;
+
+ exp_active_estab_in.flags = OPTION_F_RAND | OPTION_F_MAX_DELACK_MS;
+ exp_active_estab_in.rand = 0xce;
+ exp_active_estab_in.max_delack_ms = 22;
+
+ prepare_out();
+
+ if (!exprm) {
+ skel->data->test_kind = 0xB9;
+ skel->data->test_magic = 0;
+ }
+
+ if (write_sysctl("/proc/sys/net/ipv4/tcp_syncookies", "1"))
+ return;
+
+ link = bpf_program__attach_cgroup(skel->progs.estab, cg_fd);
+ if (CHECK(IS_ERR(link), "attach_cgroup(estab)", "err: %ld\n",
+ PTR_ERR(link)))
+ return;
+
+ if (sk_fds_connect(&sk_fds, false)) {
+ bpf_link__destroy(link);
+ return;
+ }
+
+ check_hdr_and_close_fds(&sk_fds);
+ bpf_link__destroy(link);
+}
+
+static void no_exprm_estab(void)
+{
+ __simple_estab(false);
+}
+
+static void simple_estab(void)
+{
+ __simple_estab(true);
+}
+
+static void misc(void)
+{
+ const char send_msg[] = "MISC!!!";
+ char recv_msg[sizeof(send_msg)];
+ const unsigned int nr_data = 2;
+ struct bpf_link *link;
+ struct sk_fds sk_fds;
+ int i, ret;
+
+ lport_linum_map_fd = bpf_map__fd(misc_skel->maps.lport_linum_map);
+
+ if (write_sysctl("/proc/sys/net/ipv4/tcp_syncookies", "1"))
+ return;
+
+ link = bpf_program__attach_cgroup(misc_skel->progs.misc_estab, cg_fd);
+ if (CHECK(IS_ERR(link), "attach_cgroup(misc_estab)", "err: %ld\n",
+ PTR_ERR(link)))
+ return;
+
+ if (sk_fds_connect(&sk_fds, false)) {
+ bpf_link__destroy(link);
+ return;
+ }
+
+ for (i = 0; i < nr_data; i++) {
+ /* MSG_EOR to ensure skb will not be combined */
+ ret = send(sk_fds.active_fd, send_msg, sizeof(send_msg),
+ MSG_EOR);
+ if (CHECK(ret != sizeof(send_msg), "send(msg)", "ret:%d\n",
+ ret))
+ goto check_linum;
+
+ ret = read(sk_fds.passive_fd, recv_msg, sizeof(recv_msg));
+ if (CHECK(ret != sizeof(send_msg), "read(msg)", "ret:%d\n",
+ ret))
+ goto check_linum;
+ }
+
+ if (sk_fds_shutdown(&sk_fds))
+ goto check_linum;
+
+ CHECK(misc_skel->bss->nr_syn != 1, "unexpected nr_syn",
+ "expected (1) != actual (%u)\n",
+ misc_skel->bss->nr_syn);
+
+ CHECK(misc_skel->bss->nr_data != nr_data, "unexpected nr_data",
+ "expected (%u) != actual (%u)\n",
+ nr_data, misc_skel->bss->nr_data);
+
+ /* The last ACK may have been delayed, so it is either 1 or 2. */
+ CHECK(misc_skel->bss->nr_pure_ack != 1 &&
+ misc_skel->bss->nr_pure_ack != 2,
+ "unexpected nr_pure_ack",
+ "expected (1 or 2) != actual (%u)\n",
+ misc_skel->bss->nr_pure_ack);
+
+ CHECK(misc_skel->bss->nr_fin != 1, "unexpected nr_fin",
+ "expected (1) != actual (%u)\n",
+ misc_skel->bss->nr_fin);
+
+check_linum:
+ CHECK_FAIL(check_error_linum(&sk_fds));
+ sk_fds_close(&sk_fds);
+ bpf_link__destroy(link);
+}
+
+struct test {
+ const char *desc;
+ void (*run)(void);
+};
+
+#define DEF_TEST(name) { #name, name }
+static struct test tests[] = {
+ DEF_TEST(simple_estab),
+ DEF_TEST(no_exprm_estab),
+ DEF_TEST(syncookie_estab),
+ DEF_TEST(fastopen_estab),
+ DEF_TEST(fin),
+ DEF_TEST(misc),
+};
+
+void test_tcp_hdr_options(void)
+{
+ int i;
+
+ skel = test_tcp_hdr_options__open_and_load();
+ if (CHECK(!skel, "open and load skel", "failed"))
+ return;
+
+ misc_skel = test_misc_tcp_hdr_options__open_and_load();
+ if (CHECK(!misc_skel, "open and load misc test skel", "failed"))
+ goto skel_destroy;
+
+ cg_fd = test__join_cgroup(CG_NAME);
+ if (CHECK_FAIL(cg_fd < 0))
+ goto skel_destroy;
+
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ if (!test__start_subtest(tests[i].desc))
+ continue;
+
+ if (create_netns())
+ break;
+
+ tests[i].run();
+
+ reset_test();
+ }
+
+ close(cg_fd);
+skel_destroy:
+ test_misc_tcp_hdr_options__destroy(misc_skel);
+ test_tcp_hdr_options__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_bpffs.c b/tools/testing/selftests/bpf/prog_tests/test_bpffs.c
new file mode 100644
index 000000000000..172c999e523c
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_bpffs.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+#define _GNU_SOURCE
+#include <sched.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <test_progs.h>
+
+#define TDIR "/sys/kernel/debug"
+
+static int read_iter(char *file)
+{
+ /* 1024 should be enough to get contiguous 4 "iter" letters at some point */
+ char buf[1024];
+ int fd, len;
+
+ fd = open(file, 0);
+ if (fd < 0)
+ return -1;
+ while ((len = read(fd, buf, sizeof(buf))) > 0)
+ if (strstr(buf, "iter")) {
+ close(fd);
+ return 0;
+ }
+ close(fd);
+ return -1;
+}
+
+static int fn(void)
+{
+ int err, duration = 0;
+
+ err = unshare(CLONE_NEWNS);
+ if (CHECK(err, "unshare", "failed: %d\n", errno))
+ goto out;
+
+ err = mount("", "/", "", MS_REC | MS_PRIVATE, NULL);
+ if (CHECK(err, "mount /", "failed: %d\n", errno))
+ goto out;
+
+ err = umount(TDIR);
+ if (CHECK(err, "umount " TDIR, "failed: %d\n", errno))
+ goto out;
+
+ err = mount("none", TDIR, "tmpfs", 0, NULL);
+ if (CHECK(err, "mount", "mount root failed: %d\n", errno))
+ goto out;
+
+ err = mkdir(TDIR "/fs1", 0777);
+ if (CHECK(err, "mkdir "TDIR"/fs1", "failed: %d\n", errno))
+ goto out;
+ err = mkdir(TDIR "/fs2", 0777);
+ if (CHECK(err, "mkdir "TDIR"/fs2", "failed: %d\n", errno))
+ goto out;
+
+ err = mount("bpf", TDIR "/fs1", "bpf", 0, NULL);
+ if (CHECK(err, "mount bpffs "TDIR"/fs1", "failed: %d\n", errno))
+ goto out;
+ err = mount("bpf", TDIR "/fs2", "bpf", 0, NULL);
+ if (CHECK(err, "mount bpffs " TDIR "/fs2", "failed: %d\n", errno))
+ goto out;
+
+ err = read_iter(TDIR "/fs1/maps.debug");
+ if (CHECK(err, "reading " TDIR "/fs1/maps.debug", "failed\n"))
+ goto out;
+ err = read_iter(TDIR "/fs2/progs.debug");
+ if (CHECK(err, "reading " TDIR "/fs2/progs.debug", "failed\n"))
+ goto out;
+out:
+ umount(TDIR "/fs1");
+ umount(TDIR "/fs2");
+ rmdir(TDIR "/fs1");
+ rmdir(TDIR "/fs2");
+ umount(TDIR);
+ exit(err);
+}
+
+void test_test_bpffs(void)
+{
+ int err, duration = 0, status = 0;
+ pid_t pid;
+
+ pid = fork();
+ if (CHECK(pid == -1, "clone", "clone failed %d", errno))
+ return;
+ if (pid == 0)
+ fn();
+ err = waitpid(pid, &status, 0);
+ if (CHECK(err == -1 && errno != ECHILD, "waitpid", "failed %d", errno))
+ return;
+ if (CHECK(WEXITSTATUS(status), "bpffs test ", "failed %d", WEXITSTATUS(status)))
+ return;
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_local_storage.c b/tools/testing/selftests/bpf/prog_tests/test_local_storage.c
new file mode 100644
index 000000000000..91cd6f357246
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_local_storage.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (C) 2020 Google LLC.
+ */
+
+#include <test_progs.h>
+#include <linux/limits.h>
+
+#include "local_storage.skel.h"
+#include "network_helpers.h"
+
+int create_and_unlink_file(void)
+{
+ char fname[PATH_MAX] = "/tmp/fileXXXXXX";
+ int fd;
+
+ fd = mkstemp(fname);
+ if (fd < 0)
+ return fd;
+
+ close(fd);
+ unlink(fname);
+ return 0;
+}
+
+void test_test_local_storage(void)
+{
+ struct local_storage *skel = NULL;
+ int err, duration = 0, serv_sk = -1;
+
+ skel = local_storage__open_and_load();
+ if (CHECK(!skel, "skel_load", "lsm skeleton failed\n"))
+ goto close_prog;
+
+ err = local_storage__attach(skel);
+ if (CHECK(err, "attach", "lsm attach failed: %d\n", err))
+ goto close_prog;
+
+ skel->bss->monitored_pid = getpid();
+
+ err = create_and_unlink_file();
+ if (CHECK(err < 0, "exec_cmd", "err %d errno %d\n", err, errno))
+ goto close_prog;
+
+ CHECK(skel->data->inode_storage_result != 0, "inode_storage_result",
+ "inode_local_storage not set\n");
+
+ serv_sk = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0);
+ if (CHECK(serv_sk < 0, "start_server", "failed to start server\n"))
+ goto close_prog;
+
+ CHECK(skel->data->sk_storage_result != 0, "sk_storage_result",
+ "sk_local_storage not set\n");
+
+ close(serv_sk);
+
+close_prog:
+ local_storage__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_lsm.c b/tools/testing/selftests/bpf/prog_tests/test_lsm.c
index b17eb2045c1d..6ab29226c99b 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_lsm.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_lsm.c
@@ -10,6 +10,7 @@
#include <unistd.h>
#include <malloc.h>
#include <stdlib.h>
+#include <unistd.h>
#include "lsm.skel.h"
@@ -55,6 +56,7 @@ void test_test_lsm(void)
{
struct lsm *skel = NULL;
int err, duration = 0;
+ int buf = 1234;
skel = lsm__open_and_load();
if (CHECK(!skel, "skel_load", "lsm skeleton failed\n"))
@@ -81,6 +83,13 @@ void test_test_lsm(void)
CHECK(skel->bss->mprotect_count != 1, "mprotect_count",
"mprotect_count = %d\n", skel->bss->mprotect_count);
+ syscall(__NR_setdomainname, &buf, -2L);
+ syscall(__NR_setdomainname, 0, -3L);
+ syscall(__NR_setdomainname, ~0L, -4L);
+
+ CHECK(skel->bss->copy_test != 3, "copy_test",
+ "copy_test = %d\n", skel->bss->copy_test);
+
close_prog:
lsm__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/prog_tests/test_overhead.c b/tools/testing/selftests/bpf/prog_tests/test_overhead.c
index 2702df2b2343..9966685866fd 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_overhead.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_overhead.c
@@ -61,10 +61,9 @@ void test_test_overhead(void)
const char *raw_tp_name = "raw_tp/task_rename";
const char *fentry_name = "fentry/__set_task_comm";
const char *fexit_name = "fexit/__set_task_comm";
- const char *fmodret_name = "fmod_ret/__set_task_comm";
const char *kprobe_func = "__set_task_comm";
struct bpf_program *kprobe_prog, *kretprobe_prog, *raw_tp_prog;
- struct bpf_program *fentry_prog, *fexit_prog, *fmodret_prog;
+ struct bpf_program *fentry_prog, *fexit_prog;
struct bpf_object *obj;
struct bpf_link *link;
int err, duration = 0;
@@ -97,11 +96,6 @@ void test_test_overhead(void)
if (CHECK(!fexit_prog, "find_probe",
"prog '%s' not found\n", fexit_name))
goto cleanup;
- fmodret_prog = bpf_object__find_program_by_title(obj, fmodret_name);
- if (CHECK(!fmodret_prog, "find_probe",
- "prog '%s' not found\n", fmodret_name))
- goto cleanup;
-
err = bpf_object__load(obj);
if (CHECK(err, "obj_load", "err %d\n", err))
goto cleanup;
@@ -148,12 +142,6 @@ void test_test_overhead(void)
test_run("fexit");
bpf_link__destroy(link);
- /* attach fmod_ret */
- link = bpf_program__attach_trace(fmodret_prog);
- if (CHECK(IS_ERR(link), "attach fmod_ret", "err %ld\n", PTR_ERR(link)))
- goto cleanup;
- test_run("fmod_ret");
- bpf_link__destroy(link);
cleanup:
prctl(PR_SET_NAME, comm, 0L, 0L, 0L);
bpf_object__close(obj);
diff --git a/tools/testing/selftests/bpf/prog_tests/test_profiler.c b/tools/testing/selftests/bpf/prog_tests/test_profiler.c
new file mode 100644
index 000000000000..4ca275101ee0
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/test_profiler.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+#include <test_progs.h>
+#include "progs/profiler.h"
+#include "profiler1.skel.h"
+#include "profiler2.skel.h"
+#include "profiler3.skel.h"
+
+static int sanity_run(struct bpf_program *prog)
+{
+ struct bpf_prog_test_run_attr test_attr = {};
+ __u64 args[] = {1, 2, 3};
+ __u32 duration = 0;
+ int err, prog_fd;
+
+ prog_fd = bpf_program__fd(prog);
+ test_attr.prog_fd = prog_fd;
+ test_attr.ctx_in = args;
+ test_attr.ctx_size_in = sizeof(args);
+ err = bpf_prog_test_run_xattr(&test_attr);
+ if (CHECK(err || test_attr.retval, "test_run",
+ "err %d errno %d retval %d duration %d\n",
+ err, errno, test_attr.retval, duration))
+ return -1;
+ return 0;
+}
+
+void test_test_profiler(void)
+{
+ struct profiler1 *profiler1_skel = NULL;
+ struct profiler2 *profiler2_skel = NULL;
+ struct profiler3 *profiler3_skel = NULL;
+ __u32 duration = 0;
+ int err;
+
+ profiler1_skel = profiler1__open_and_load();
+ if (CHECK(!profiler1_skel, "profiler1_skel_load", "profiler1 skeleton failed\n"))
+ goto cleanup;
+
+ err = profiler1__attach(profiler1_skel);
+ if (CHECK(err, "profiler1_attach", "profiler1 attach failed: %d\n", err))
+ goto cleanup;
+
+ if (sanity_run(profiler1_skel->progs.raw_tracepoint__sched_process_exec))
+ goto cleanup;
+
+ profiler2_skel = profiler2__open_and_load();
+ if (CHECK(!profiler2_skel, "profiler2_skel_load", "profiler2 skeleton failed\n"))
+ goto cleanup;
+
+ err = profiler2__attach(profiler2_skel);
+ if (CHECK(err, "profiler2_attach", "profiler2 attach failed: %d\n", err))
+ goto cleanup;
+
+ if (sanity_run(profiler2_skel->progs.raw_tracepoint__sched_process_exec))
+ goto cleanup;
+
+ profiler3_skel = profiler3__open_and_load();
+ if (CHECK(!profiler3_skel, "profiler3_skel_load", "profiler3 skeleton failed\n"))
+ goto cleanup;
+
+ err = profiler3__attach(profiler3_skel);
+ if (CHECK(err, "profiler3_attach", "profiler3 attach failed: %d\n", err))
+ goto cleanup;
+
+ if (sanity_run(profiler3_skel->progs.raw_tracepoint__sched_process_exec))
+ goto cleanup;
+cleanup:
+ profiler1__destroy(profiler1_skel);
+ profiler2__destroy(profiler2_skel);
+ profiler3__destroy(profiler3_skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/trace_ext.c b/tools/testing/selftests/bpf/prog_tests/trace_ext.c
new file mode 100644
index 000000000000..924441d4362d
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/trace_ext.c
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+#include <test_progs.h>
+#include <network_helpers.h>
+#include <sys/stat.h>
+#include <linux/sched.h>
+#include <sys/syscall.h>
+
+#include "test_pkt_md_access.skel.h"
+#include "test_trace_ext.skel.h"
+#include "test_trace_ext_tracing.skel.h"
+
+static __u32 duration;
+
+void test_trace_ext(void)
+{
+ struct test_pkt_md_access *skel_pkt = NULL;
+ struct test_trace_ext_tracing *skel_trace = NULL;
+ struct test_trace_ext_tracing__bss *bss_trace;
+ struct test_trace_ext *skel_ext = NULL;
+ struct test_trace_ext__bss *bss_ext;
+ int err, pkt_fd, ext_fd;
+ struct bpf_program *prog;
+ char buf[100];
+ __u32 retval;
+ __u64 len;
+
+ /* open/load/attach test_pkt_md_access */
+ skel_pkt = test_pkt_md_access__open_and_load();
+ if (CHECK(!skel_pkt, "setup", "classifier/test_pkt_md_access open failed\n"))
+ goto cleanup;
+
+ err = test_pkt_md_access__attach(skel_pkt);
+ if (CHECK(err, "setup", "classifier/test_pkt_md_access attach failed: %d\n", err))
+ goto cleanup;
+
+ prog = skel_pkt->progs.test_pkt_md_access;
+ pkt_fd = bpf_program__fd(prog);
+
+ /* open extension */
+ skel_ext = test_trace_ext__open();
+ if (CHECK(!skel_ext, "setup", "freplace/test_pkt_md_access open failed\n"))
+ goto cleanup;
+
+ /* set extension's attach target - test_pkt_md_access */
+ prog = skel_ext->progs.test_pkt_md_access_new;
+ bpf_program__set_attach_target(prog, pkt_fd, "test_pkt_md_access");
+
+ /* load/attach extension */
+ err = test_trace_ext__load(skel_ext);
+ if (CHECK(err, "setup", "freplace/test_pkt_md_access load failed\n")) {
+ libbpf_strerror(err, buf, sizeof(buf));
+ fprintf(stderr, "%s\n", buf);
+ goto cleanup;
+ }
+
+ err = test_trace_ext__attach(skel_ext);
+ if (CHECK(err, "setup", "freplace/test_pkt_md_access attach failed: %d\n", err))
+ goto cleanup;
+
+ prog = skel_ext->progs.test_pkt_md_access_new;
+ ext_fd = bpf_program__fd(prog);
+
+ /* open tracing */
+ skel_trace = test_trace_ext_tracing__open();
+ if (CHECK(!skel_trace, "setup", "tracing/test_pkt_md_access_new open failed\n"))
+ goto cleanup;
+
+ /* set tracing's attach target - fentry */
+ prog = skel_trace->progs.fentry;
+ bpf_program__set_attach_target(prog, ext_fd, "test_pkt_md_access_new");
+
+ /* set tracing's attach target - fexit */
+ prog = skel_trace->progs.fexit;
+ bpf_program__set_attach_target(prog, ext_fd, "test_pkt_md_access_new");
+
+ /* load/attach tracing */
+ err = test_trace_ext_tracing__load(skel_trace);
+ if (CHECK(err, "setup", "tracing/test_pkt_md_access_new load failed\n")) {
+ libbpf_strerror(err, buf, sizeof(buf));
+ fprintf(stderr, "%s\n", buf);
+ goto cleanup;
+ }
+
+ err = test_trace_ext_tracing__attach(skel_trace);
+ if (CHECK(err, "setup", "tracing/test_pkt_md_access_new attach failed: %d\n", err))
+ goto cleanup;
+
+ /* trigger the test */
+ err = bpf_prog_test_run(pkt_fd, 1, &pkt_v4, sizeof(pkt_v4),
+ NULL, NULL, &retval, &duration);
+ CHECK(err || retval, "run", "err %d errno %d retval %d\n", err, errno, retval);
+
+ bss_ext = skel_ext->bss;
+ bss_trace = skel_trace->bss;
+
+ len = bss_ext->ext_called;
+
+ CHECK(bss_ext->ext_called == 0,
+ "check", "failed to trigger freplace/test_pkt_md_access\n");
+ CHECK(bss_trace->fentry_called != len,
+ "check", "failed to trigger fentry/test_pkt_md_access_new\n");
+ CHECK(bss_trace->fexit_called != len,
+ "check", "failed to trigger fexit/test_pkt_md_access_new\n");
+
+cleanup:
+ test_trace_ext_tracing__destroy(skel_trace);
+ test_trace_ext__destroy(skel_ext);
+ test_pkt_md_access__destroy(skel_pkt);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c b/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c
index f284f72158ef..0281095de266 100644
--- a/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c
+++ b/tools/testing/selftests/bpf/prog_tests/xdp_noinline.c
@@ -1,11 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <network_helpers.h>
+#include "test_xdp_noinline.skel.h"
void test_xdp_noinline(void)
{
- const char *file = "./test_xdp_noinline.o";
unsigned int nr_cpus = bpf_num_possible_cpus();
+ struct test_xdp_noinline *skel;
struct vip key = {.protocol = 6};
struct vip_meta {
__u32 flags;
@@ -24,59 +25,43 @@ void test_xdp_noinline(void)
__u8 flags;
} real_def = {.dst = MAGIC_VAL};
__u32 ch_key = 11, real_num = 3;
- __u32 duration, retval, size;
- int err, i, prog_fd, map_fd;
+ __u32 duration = 0, retval, size;
+ int err, i;
__u64 bytes = 0, pkts = 0;
- struct bpf_object *obj;
char buf[128];
u32 *magic = (u32 *)buf;
- err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
- if (CHECK_FAIL(err))
+ skel = test_xdp_noinline__open_and_load();
+ if (CHECK(!skel, "skel_open_and_load", "failed\n"))
return;
- map_fd = bpf_find_map(__func__, obj, "vip_map");
- if (map_fd < 0)
- goto out;
- bpf_map_update_elem(map_fd, &key, &value, 0);
+ bpf_map_update_elem(bpf_map__fd(skel->maps.vip_map), &key, &value, 0);
+ bpf_map_update_elem(bpf_map__fd(skel->maps.ch_rings), &ch_key, &real_num, 0);
+ bpf_map_update_elem(bpf_map__fd(skel->maps.reals), &real_num, &real_def, 0);
- map_fd = bpf_find_map(__func__, obj, "ch_rings");
- if (map_fd < 0)
- goto out;
- bpf_map_update_elem(map_fd, &ch_key, &real_num, 0);
-
- map_fd = bpf_find_map(__func__, obj, "reals");
- if (map_fd < 0)
- goto out;
- bpf_map_update_elem(map_fd, &real_num, &real_def, 0);
-
- err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4),
+ err = bpf_prog_test_run(bpf_program__fd(skel->progs.balancer_ingress_v4),
+ NUM_ITER, &pkt_v4, sizeof(pkt_v4),
buf, &size, &retval, &duration);
CHECK(err || retval != 1 || size != 54 ||
*magic != MAGIC_VAL, "ipv4",
"err %d errno %d retval %d size %d magic %x\n",
err, errno, retval, size, *magic);
- err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6),
+ err = bpf_prog_test_run(bpf_program__fd(skel->progs.balancer_ingress_v6),
+ NUM_ITER, &pkt_v6, sizeof(pkt_v6),
buf, &size, &retval, &duration);
CHECK(err || retval != 1 || size != 74 ||
*magic != MAGIC_VAL, "ipv6",
"err %d errno %d retval %d size %d magic %x\n",
err, errno, retval, size, *magic);
- map_fd = bpf_find_map(__func__, obj, "stats");
- if (map_fd < 0)
- goto out;
- bpf_map_lookup_elem(map_fd, &stats_key, stats);
+ bpf_map_lookup_elem(bpf_map__fd(skel->maps.stats), &stats_key, stats);
for (i = 0; i < nr_cpus; i++) {
bytes += stats[i].bytes;
pkts += stats[i].pkts;
}
- if (CHECK_FAIL(bytes != MAGIC_BYTES * NUM_ITER * 2 ||
- pkts != NUM_ITER * 2)) {
- printf("test_xdp_noinline:FAIL:stats %lld %lld\n",
- bytes, pkts);
- }
-out:
- bpf_object__close(obj);
+ CHECK(bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2,
+ "stats", "bytes %lld pkts %lld\n",
+ (unsigned long long)bytes, (unsigned long long)pkts);
+ test_xdp_noinline__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/progs/bpf_cubic.c b/tools/testing/selftests/bpf/progs/bpf_cubic.c
index ef574087f1e1..6939bfd8690f 100644
--- a/tools/testing/selftests/bpf/progs/bpf_cubic.c
+++ b/tools/testing/selftests/bpf/progs/bpf_cubic.c
@@ -15,6 +15,8 @@
*/
#include <linux/bpf.h>
+#include <linux/stddef.h>
+#include <linux/tcp.h>
#include "bpf_tcp_helpers.h"
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/bpf_dctcp.c b/tools/testing/selftests/bpf/progs/bpf_dctcp.c
index 3fb4260570b1..4dc1a967776a 100644
--- a/tools/testing/selftests/bpf/progs/bpf_dctcp.c
+++ b/tools/testing/selftests/bpf/progs/bpf_dctcp.c
@@ -9,6 +9,8 @@
#include <stddef.h>
#include <linux/bpf.h>
#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/tcp.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "bpf_tcp_helpers.h"
diff --git a/tools/testing/selftests/bpf/progs/bpf_flow.c b/tools/testing/selftests/bpf/progs/bpf_flow.c
index de6de9221518..5a65f6b51377 100644
--- a/tools/testing/selftests/bpf/progs/bpf_flow.c
+++ b/tools/testing/selftests/bpf/progs/bpf_flow.c
@@ -118,18 +118,18 @@ static __always_inline int parse_eth_proto(struct __sk_buff *skb, __be16 proto)
switch (proto) {
case bpf_htons(ETH_P_IP):
- bpf_tail_call(skb, &jmp_table, IP);
+ bpf_tail_call_static(skb, &jmp_table, IP);
break;
case bpf_htons(ETH_P_IPV6):
- bpf_tail_call(skb, &jmp_table, IPV6);
+ bpf_tail_call_static(skb, &jmp_table, IPV6);
break;
case bpf_htons(ETH_P_MPLS_MC):
case bpf_htons(ETH_P_MPLS_UC):
- bpf_tail_call(skb, &jmp_table, MPLS);
+ bpf_tail_call_static(skb, &jmp_table, MPLS);
break;
case bpf_htons(ETH_P_8021Q):
case bpf_htons(ETH_P_8021AD):
- bpf_tail_call(skb, &jmp_table, VLAN);
+ bpf_tail_call_static(skb, &jmp_table, VLAN);
break;
default:
/* Protocol not supported */
@@ -246,10 +246,10 @@ static __always_inline int parse_ipv6_proto(struct __sk_buff *skb, __u8 nexthdr)
switch (nexthdr) {
case IPPROTO_HOPOPTS:
case IPPROTO_DSTOPTS:
- bpf_tail_call(skb, &jmp_table, IPV6OP);
+ bpf_tail_call_static(skb, &jmp_table, IPV6OP);
break;
case IPPROTO_FRAGMENT:
- bpf_tail_call(skb, &jmp_table, IPV6FR);
+ bpf_tail_call_static(skb, &jmp_table, IPV6FR);
break;
default:
return parse_ip_proto(skb, nexthdr);
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter.h b/tools/testing/selftests/bpf/progs/bpf_iter.h
index c196280df90d..6a1255465fd6 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter.h
+++ b/tools/testing/selftests/bpf/progs/bpf_iter.h
@@ -13,6 +13,12 @@
#define udp6_sock udp6_sock___not_used
#define bpf_iter__bpf_map_elem bpf_iter__bpf_map_elem___not_used
#define bpf_iter__bpf_sk_storage_map bpf_iter__bpf_sk_storage_map___not_used
+#define bpf_iter__sockmap bpf_iter__sockmap___not_used
+#define btf_ptr btf_ptr___not_used
+#define BTF_F_COMPACT BTF_F_COMPACT___not_used
+#define BTF_F_NONAME BTF_F_NONAME___not_used
+#define BTF_F_PTR_RAW BTF_F_PTR_RAW___not_used
+#define BTF_F_ZERO BTF_F_ZERO___not_used
#include "vmlinux.h"
#undef bpf_iter_meta
#undef bpf_iter__bpf_map
@@ -26,6 +32,12 @@
#undef udp6_sock
#undef bpf_iter__bpf_map_elem
#undef bpf_iter__bpf_sk_storage_map
+#undef bpf_iter__sockmap
+#undef btf_ptr
+#undef BTF_F_COMPACT
+#undef BTF_F_NONAME
+#undef BTF_F_PTR_RAW
+#undef BTF_F_ZERO
struct bpf_iter_meta {
struct seq_file *seq;
@@ -96,3 +108,23 @@ struct bpf_iter__bpf_sk_storage_map {
struct sock *sk;
void *value;
};
+
+struct bpf_iter__sockmap {
+ struct bpf_iter_meta *meta;
+ struct bpf_map *map;
+ void *key;
+ struct sock *sk;
+};
+
+struct btf_ptr {
+ void *ptr;
+ __u32 type_id;
+ __u32 flags;
+};
+
+enum {
+ BTF_F_COMPACT = (1ULL << 0),
+ BTF_F_NONAME = (1ULL << 1),
+ BTF_F_PTR_RAW = (1ULL << 2),
+ BTF_F_ZERO = (1ULL << 3),
+};
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_sockmap.c b/tools/testing/selftests/bpf/progs/bpf_iter_sockmap.c
new file mode 100644
index 000000000000..f3af0e30cead
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_sockmap.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Cloudflare */
+#include "bpf_iter.h"
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <errno.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 64);
+ __type(key, __u32);
+ __type(value, __u64);
+} sockmap SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKHASH);
+ __uint(max_entries, 64);
+ __type(key, __u32);
+ __type(value, __u64);
+} sockhash SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKHASH);
+ __uint(max_entries, 64);
+ __type(key, __u32);
+ __type(value, __u64);
+} dst SEC(".maps");
+
+__u32 elems = 0;
+__u32 socks = 0;
+
+SEC("iter/sockmap")
+int copy(struct bpf_iter__sockmap *ctx)
+{
+ struct sock *sk = ctx->sk;
+ __u32 tmp, *key = ctx->key;
+ int ret;
+
+ if (!key)
+ return 0;
+
+ elems++;
+
+ /* We need a temporary buffer on the stack, since the verifier doesn't
+ * let us use the pointer from the context as an argument to the helper.
+ */
+ tmp = *key;
+
+ if (sk) {
+ socks++;
+ return bpf_map_update_elem(&dst, &tmp, sk, 0) != 0;
+ }
+
+ ret = bpf_map_delete_elem(&dst, &tmp);
+ return ret && ret != -ENOENT;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_btf.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_btf.c
new file mode 100644
index 000000000000..a1ddc36f13ec
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_btf.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020, Oracle and/or its affiliates. */
+#include "bpf_iter.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+
+#include <errno.h>
+
+char _license[] SEC("license") = "GPL";
+
+long tasks = 0;
+long seq_err = 0;
+bool skip = false;
+
+SEC("iter/task")
+int dump_task_struct(struct bpf_iter__task *ctx)
+{
+ struct seq_file *seq = ctx->meta->seq;
+ struct task_struct *task = ctx->task;
+ static struct btf_ptr ptr = { };
+ long ret;
+
+#if __has_builtin(__builtin_btf_type_id)
+ ptr.type_id = bpf_core_type_id_kernel(struct task_struct);
+ ptr.ptr = task;
+
+ if (ctx->meta->seq_num == 0)
+ BPF_SEQ_PRINTF(seq, "Raw BTF task\n");
+
+ ret = bpf_seq_printf_btf(seq, &ptr, sizeof(ptr), 0);
+ switch (ret) {
+ case 0:
+ tasks++;
+ break;
+ case -ERANGE:
+ /* NULL task or task->fs, don't count it as an error. */
+ break;
+ case -E2BIG:
+ return 1;
+ default:
+ seq_err = ret;
+ break;
+ }
+#else
+ skip = true;
+#endif
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c b/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c
index 8b787baa2654..b2f7c7c5f952 100644
--- a/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c
+++ b/tools/testing/selftests/bpf/progs/bpf_iter_task_file.c
@@ -6,6 +6,9 @@
char _license[] SEC("license") = "GPL";
+int count = 0;
+int tgid = 0;
+
SEC("iter/task_file")
int dump_task_file(struct bpf_iter__task_file *ctx)
{
@@ -17,8 +20,13 @@ int dump_task_file(struct bpf_iter__task_file *ctx)
if (task == (void *)0 || file == (void *)0)
return 0;
- if (ctx->meta->seq_num == 0)
+ if (ctx->meta->seq_num == 0) {
+ count = 0;
BPF_SEQ_PRINTF(seq, " tgid gid fd file\n");
+ }
+
+ if (tgid == task->tgid && task->tgid != task->pid)
+ count++;
BPF_SEQ_PRINTF(seq, "%8d %8d %8d %lx\n", task->tgid, task->pid, fd,
(long)file->f_op);
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_enumval.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_enumval.c
new file mode 100644
index 000000000000..48e62f3f074f
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_enumval.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_enumval x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_enumval___diff.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_enumval___diff.c
new file mode 100644
index 000000000000..53e5e5a76888
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_enumval___diff.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_enumval___diff x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_enumval___err_missing.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_enumval___err_missing.c
new file mode 100644
index 000000000000..d024fb2ac06e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_enumval___err_missing.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_enumval___err_missing x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_enumval___val3_missing.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_enumval___val3_missing.c
new file mode 100644
index 000000000000..9de6595d250c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_enumval___val3_missing.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_enumval___val3_missing x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_size___err_ambiguous.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_size___err_ambiguous.c
new file mode 100644
index 000000000000..f3e9904df9c2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_size___err_ambiguous.c
@@ -0,0 +1,4 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_size___err_ambiguous1 x,
+ struct core_reloc_size___err_ambiguous2 y) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_type_based.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_type_based.c
new file mode 100644
index 000000000000..fc3f69e58c71
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_type_based.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_type_based x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___all_missing.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___all_missing.c
new file mode 100644
index 000000000000..51511648b4ec
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___all_missing.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_type_based___all_missing x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___diff_sz.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___diff_sz.c
new file mode 100644
index 000000000000..67db3dceb279
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___diff_sz.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_type_based___diff_sz x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___fn_wrong_args.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___fn_wrong_args.c
new file mode 100644
index 000000000000..b357fc65431d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___fn_wrong_args.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_type_based___fn_wrong_args x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___incompat.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___incompat.c
new file mode 100644
index 000000000000..8ddf20d33d9e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_type_based___incompat.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_type_based___incompat x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_type_id.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_type_id.c
new file mode 100644
index 000000000000..abbe5bddcefd
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_type_id.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_type_id x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf__core_reloc_type_id___missing_targets.c b/tools/testing/selftests/bpf/progs/btf__core_reloc_type_id___missing_targets.c
new file mode 100644
index 000000000000..24e7caf4f013
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf__core_reloc_type_id___missing_targets.c
@@ -0,0 +1,3 @@
+#include "core_reloc_types.h"
+
+void f(struct core_reloc_type_id___missing_targets x) {}
diff --git a/tools/testing/selftests/bpf/progs/btf_ptr.h b/tools/testing/selftests/bpf/progs/btf_ptr.h
new file mode 100644
index 000000000000..c3c9797c67db
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/btf_ptr.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2020, Oracle and/or its affiliates. */
+/* "undefine" structs in vmlinux.h, because we "override" them below */
+#define btf_ptr btf_ptr___not_used
+#define BTF_F_COMPACT BTF_F_COMPACT___not_used
+#define BTF_F_NONAME BTF_F_NONAME___not_used
+#define BTF_F_PTR_RAW BTF_F_PTR_RAW___not_used
+#define BTF_F_ZERO BTF_F_ZERO___not_used
+#include "vmlinux.h"
+#undef btf_ptr
+#undef BTF_F_COMPACT
+#undef BTF_F_NONAME
+#undef BTF_F_PTR_RAW
+#undef BTF_F_ZERO
+
+struct btf_ptr {
+ void *ptr;
+ __u32 type_id;
+ __u32 flags;
+};
+
+enum {
+ BTF_F_COMPACT = (1ULL << 0),
+ BTF_F_NONAME = (1ULL << 1),
+ BTF_F_PTR_RAW = (1ULL << 2),
+ BTF_F_ZERO = (1ULL << 3),
+};
diff --git a/tools/testing/selftests/bpf/progs/connect4_prog.c b/tools/testing/selftests/bpf/progs/connect4_prog.c
index b1b2773c0b9d..a943d394fd3a 100644
--- a/tools/testing/selftests/bpf/progs/connect4_prog.c
+++ b/tools/testing/selftests/bpf/progs/connect4_prog.c
@@ -23,6 +23,10 @@
#define TCP_CA_NAME_MAX 16
#endif
+#ifndef TCP_NOTSENT_LOWAT
+#define TCP_NOTSENT_LOWAT 25
+#endif
+
#ifndef IFNAMSIZ
#define IFNAMSIZ 16
#endif
@@ -128,6 +132,18 @@ static __inline int set_keepalive(struct bpf_sock_addr *ctx)
return 0;
}
+static __inline int set_notsent_lowat(struct bpf_sock_addr *ctx)
+{
+ int lowat = 65535;
+
+ if (ctx->type == SOCK_STREAM) {
+ if (bpf_setsockopt(ctx, SOL_TCP, TCP_NOTSENT_LOWAT, &lowat, sizeof(lowat)))
+ return 1;
+ }
+
+ return 0;
+}
+
SEC("cgroup/connect4")
int connect_v4_prog(struct bpf_sock_addr *ctx)
{
@@ -148,6 +164,9 @@ int connect_v4_prog(struct bpf_sock_addr *ctx)
if (set_keepalive(ctx))
return 0;
+ if (set_notsent_lowat(ctx))
+ return 0;
+
if (ctx->type != SOCK_STREAM && ctx->type != SOCK_DGRAM)
return 0;
else if (ctx->type == SOCK_STREAM)
diff --git a/tools/testing/selftests/bpf/progs/core_reloc_types.h b/tools/testing/selftests/bpf/progs/core_reloc_types.h
index 69139ed66216..e6e616cb7bc9 100644
--- a/tools/testing/selftests/bpf/progs/core_reloc_types.h
+++ b/tools/testing/selftests/bpf/progs/core_reloc_types.h
@@ -652,7 +652,7 @@ struct core_reloc_misc_extensible {
};
/*
- * EXISTENCE
+ * FIELD EXISTENCE
*/
struct core_reloc_existence_output {
int a_exists;
@@ -809,3 +809,353 @@ struct core_reloc_size___diff_sz {
void *ptr_field;
enum { OTHER_VALUE = 0xFFFFFFFFFFFFFFFF } enum_field;
};
+
+/* Error case of two candidates with the fields (int_field) at the same
+ * offset, but with differing final relocation values: size 4 vs size 1
+ */
+struct core_reloc_size___err_ambiguous1 {
+ /* int at offset 0 */
+ int int_field;
+
+ struct { int x; } struct_field;
+ union { int x; } union_field;
+ int arr_field[4];
+ void *ptr_field;
+ enum { VALUE___1 = 123 } enum_field;
+};
+
+struct core_reloc_size___err_ambiguous2 {
+ /* char at offset 0 */
+ char int_field;
+
+ struct { int x; } struct_field;
+ union { int x; } union_field;
+ int arr_field[4];
+ void *ptr_field;
+ enum { VALUE___2 = 123 } enum_field;
+};
+
+/*
+ * TYPE EXISTENCE & SIZE
+ */
+struct core_reloc_type_based_output {
+ bool struct_exists;
+ bool union_exists;
+ bool enum_exists;
+ bool typedef_named_struct_exists;
+ bool typedef_anon_struct_exists;
+ bool typedef_struct_ptr_exists;
+ bool typedef_int_exists;
+ bool typedef_enum_exists;
+ bool typedef_void_ptr_exists;
+ bool typedef_func_proto_exists;
+ bool typedef_arr_exists;
+
+ int struct_sz;
+ int union_sz;
+ int enum_sz;
+ int typedef_named_struct_sz;
+ int typedef_anon_struct_sz;
+ int typedef_struct_ptr_sz;
+ int typedef_int_sz;
+ int typedef_enum_sz;
+ int typedef_void_ptr_sz;
+ int typedef_func_proto_sz;
+ int typedef_arr_sz;
+};
+
+struct a_struct {
+ int x;
+};
+
+union a_union {
+ int y;
+ int z;
+};
+
+typedef struct a_struct named_struct_typedef;
+
+typedef struct { int x, y, z; } anon_struct_typedef;
+
+typedef struct {
+ int a, b, c;
+} *struct_ptr_typedef;
+
+enum an_enum {
+ AN_ENUM_VAL1 = 1,
+ AN_ENUM_VAL2 = 2,
+ AN_ENUM_VAL3 = 3,
+};
+
+typedef int int_typedef;
+
+typedef enum { TYPEDEF_ENUM_VAL1, TYPEDEF_ENUM_VAL2 } enum_typedef;
+
+typedef void *void_ptr_typedef;
+
+typedef int (*func_proto_typedef)(long);
+
+typedef char arr_typedef[20];
+
+struct core_reloc_type_based {
+ struct a_struct f1;
+ union a_union f2;
+ enum an_enum f3;
+ named_struct_typedef f4;
+ anon_struct_typedef f5;
+ struct_ptr_typedef f6;
+ int_typedef f7;
+ enum_typedef f8;
+ void_ptr_typedef f9;
+ func_proto_typedef f10;
+ arr_typedef f11;
+};
+
+/* no types in target */
+struct core_reloc_type_based___all_missing {
+};
+
+/* different type sizes, extra modifiers, anon vs named enums, etc */
+struct a_struct___diff_sz {
+ long x;
+ int y;
+ char z;
+};
+
+union a_union___diff_sz {
+ char yy;
+ char zz;
+};
+
+typedef struct a_struct___diff_sz named_struct_typedef___diff_sz;
+
+typedef struct { long xx, yy, zzz; } anon_struct_typedef___diff_sz;
+
+typedef struct {
+ char aa[1], bb[2], cc[3];
+} *struct_ptr_typedef___diff_sz;
+
+enum an_enum___diff_sz {
+ AN_ENUM_VAL1___diff_sz = 0x123412341234,
+ AN_ENUM_VAL2___diff_sz = 2,
+};
+
+typedef unsigned long int_typedef___diff_sz;
+
+typedef enum an_enum___diff_sz enum_typedef___diff_sz;
+
+typedef const void * const void_ptr_typedef___diff_sz;
+
+typedef int_typedef___diff_sz (*func_proto_typedef___diff_sz)(char);
+
+typedef int arr_typedef___diff_sz[2];
+
+struct core_reloc_type_based___diff_sz {
+ struct a_struct___diff_sz f1;
+ union a_union___diff_sz f2;
+ enum an_enum___diff_sz f3;
+ named_struct_typedef___diff_sz f4;
+ anon_struct_typedef___diff_sz f5;
+ struct_ptr_typedef___diff_sz f6;
+ int_typedef___diff_sz f7;
+ enum_typedef___diff_sz f8;
+ void_ptr_typedef___diff_sz f9;
+ func_proto_typedef___diff_sz f10;
+ arr_typedef___diff_sz f11;
+};
+
+/* incompatibilities between target and local types */
+union a_struct___incompat { /* union instead of struct */
+ int x;
+};
+
+struct a_union___incompat { /* struct instead of union */
+ int y;
+ int z;
+};
+
+/* typedef to union, not to struct */
+typedef union a_struct___incompat named_struct_typedef___incompat;
+
+/* typedef to void pointer, instead of struct */
+typedef void *anon_struct_typedef___incompat;
+
+/* extra pointer indirection */
+typedef struct {
+ int a, b, c;
+} **struct_ptr_typedef___incompat;
+
+/* typedef of a struct with int, instead of int */
+typedef struct { int x; } int_typedef___incompat;
+
+/* typedef to func_proto, instead of enum */
+typedef int (*enum_typedef___incompat)(void);
+
+/* pointer to char instead of void */
+typedef char *void_ptr_typedef___incompat;
+
+/* void return type instead of int */
+typedef void (*func_proto_typedef___incompat)(long);
+
+/* multi-dimensional array instead of a single-dimensional */
+typedef int arr_typedef___incompat[20][2];
+
+struct core_reloc_type_based___incompat {
+ union a_struct___incompat f1;
+ struct a_union___incompat f2;
+ /* the only valid one is enum, to check that something still succeeds */
+ enum an_enum f3;
+ named_struct_typedef___incompat f4;
+ anon_struct_typedef___incompat f5;
+ struct_ptr_typedef___incompat f6;
+ int_typedef___incompat f7;
+ enum_typedef___incompat f8;
+ void_ptr_typedef___incompat f9;
+ func_proto_typedef___incompat f10;
+ arr_typedef___incompat f11;
+};
+
+/* func_proto with incompatible signature */
+typedef void (*func_proto_typedef___fn_wrong_ret1)(long);
+typedef int * (*func_proto_typedef___fn_wrong_ret2)(long);
+typedef struct { int x; } int_struct_typedef;
+typedef int_struct_typedef (*func_proto_typedef___fn_wrong_ret3)(long);
+typedef int (*func_proto_typedef___fn_wrong_arg)(void *);
+typedef int (*func_proto_typedef___fn_wrong_arg_cnt1)(long, long);
+typedef int (*func_proto_typedef___fn_wrong_arg_cnt2)(void);
+
+struct core_reloc_type_based___fn_wrong_args {
+ /* one valid type to make sure relos still work */
+ struct a_struct f1;
+ func_proto_typedef___fn_wrong_ret1 f2;
+ func_proto_typedef___fn_wrong_ret2 f3;
+ func_proto_typedef___fn_wrong_ret3 f4;
+ func_proto_typedef___fn_wrong_arg f5;
+ func_proto_typedef___fn_wrong_arg_cnt1 f6;
+ func_proto_typedef___fn_wrong_arg_cnt2 f7;
+};
+
+/*
+ * TYPE ID MAPPING (LOCAL AND TARGET)
+ */
+struct core_reloc_type_id_output {
+ int local_anon_struct;
+ int local_anon_union;
+ int local_anon_enum;
+ int local_anon_func_proto_ptr;
+ int local_anon_void_ptr;
+ int local_anon_arr;
+
+ int local_struct;
+ int local_union;
+ int local_enum;
+ int local_int;
+ int local_struct_typedef;
+ int local_func_proto_typedef;
+ int local_arr_typedef;
+
+ int targ_struct;
+ int targ_union;
+ int targ_enum;
+ int targ_int;
+ int targ_struct_typedef;
+ int targ_func_proto_typedef;
+ int targ_arr_typedef;
+};
+
+struct core_reloc_type_id {
+ struct a_struct f1;
+ union a_union f2;
+ enum an_enum f3;
+ named_struct_typedef f4;
+ func_proto_typedef f5;
+ arr_typedef f6;
+};
+
+struct core_reloc_type_id___missing_targets {
+ /* nothing */
+};
+
+/*
+ * ENUMERATOR VALUE EXISTENCE AND VALUE RELOCATION
+ */
+struct core_reloc_enumval_output {
+ bool named_val1_exists;
+ bool named_val2_exists;
+ bool named_val3_exists;
+ bool anon_val1_exists;
+ bool anon_val2_exists;
+ bool anon_val3_exists;
+
+ int named_val1;
+ int named_val2;
+ int anon_val1;
+ int anon_val2;
+};
+
+enum named_enum {
+ NAMED_ENUM_VAL1 = 1,
+ NAMED_ENUM_VAL2 = 2,
+ NAMED_ENUM_VAL3 = 3,
+};
+
+typedef enum {
+ ANON_ENUM_VAL1 = 0x10,
+ ANON_ENUM_VAL2 = 0x20,
+ ANON_ENUM_VAL3 = 0x30,
+} anon_enum;
+
+struct core_reloc_enumval {
+ enum named_enum f1;
+ anon_enum f2;
+};
+
+/* differing enumerator values */
+enum named_enum___diff {
+ NAMED_ENUM_VAL1___diff = 101,
+ NAMED_ENUM_VAL2___diff = 202,
+ NAMED_ENUM_VAL3___diff = 303,
+};
+
+typedef enum {
+ ANON_ENUM_VAL1___diff = 0x11,
+ ANON_ENUM_VAL2___diff = 0x22,
+ ANON_ENUM_VAL3___diff = 0x33,
+} anon_enum___diff;
+
+struct core_reloc_enumval___diff {
+ enum named_enum___diff f1;
+ anon_enum___diff f2;
+};
+
+/* missing (optional) third enum value */
+enum named_enum___val3_missing {
+ NAMED_ENUM_VAL1___val3_missing = 111,
+ NAMED_ENUM_VAL2___val3_missing = 222,
+};
+
+typedef enum {
+ ANON_ENUM_VAL1___val3_missing = 0x111,
+ ANON_ENUM_VAL2___val3_missing = 0x222,
+} anon_enum___val3_missing;
+
+struct core_reloc_enumval___val3_missing {
+ enum named_enum___val3_missing f1;
+ anon_enum___val3_missing f2;
+};
+
+/* missing (mandatory) second enum value, should fail */
+enum named_enum___err_missing {
+ NAMED_ENUM_VAL1___err_missing = 1,
+ NAMED_ENUM_VAL3___err_missing = 3,
+};
+
+typedef enum {
+ ANON_ENUM_VAL1___err_missing = 0x111,
+ ANON_ENUM_VAL3___err_missing = 0x222,
+} anon_enum___err_missing;
+
+struct core_reloc_enumval___err_missing {
+ enum named_enum___err_missing f1;
+ anon_enum___err_missing f2;
+};
diff --git a/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c
index 98e1efe14549..49a84a3a2306 100644
--- a/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c
+++ b/tools/testing/selftests/bpf/progs/fexit_bpf2bpf.c
@@ -1,8 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <linux/stddef.h>
+#include <linux/if_ether.h>
#include <linux/ipv6.h>
#include <linux/bpf.h>
+#include <linux/tcp.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
#include <bpf/bpf_tracing.h>
@@ -151,4 +153,29 @@ int new_get_constant(long val)
test_get_constant = 1;
return test_get_constant; /* original get_constant() returns val - 122 */
}
+
+__u64 test_pkt_write_access_subprog = 0;
+SEC("freplace/test_pkt_write_access_subprog")
+int new_test_pkt_write_access_subprog(struct __sk_buff *skb, __u32 off)
+{
+
+ void *data = (void *)(long)skb->data;
+ void *data_end = (void *)(long)skb->data_end;
+ struct tcphdr *tcp;
+
+ if (off > sizeof(struct ethhdr) + sizeof(struct ipv6hdr))
+ return -1;
+
+ tcp = data + off;
+ if (tcp + 1 > data_end)
+ return -1;
+
+ /* make modifications to the packet data */
+ tcp->check++;
+ tcp->syn = 0;
+
+ test_pkt_write_access_subprog = 1;
+ return 0;
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/fmod_ret_freplace.c b/tools/testing/selftests/bpf/progs/fmod_ret_freplace.c
new file mode 100644
index 000000000000..c8943ccee6c0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/fmod_ret_freplace.c
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+volatile __u64 test_fmod_ret = 0;
+SEC("fmod_ret/security_new_get_constant")
+int BPF_PROG(fmod_ret_test, long val, int ret)
+{
+ test_fmod_ret = 1;
+ return 120;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/freplace_attach_probe.c b/tools/testing/selftests/bpf/progs/freplace_attach_probe.c
new file mode 100644
index 000000000000..bb2a77c5b62b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/freplace_attach_probe.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Facebook
+
+#include <linux/ptrace.h>
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#define VAR_NUM 2
+
+struct hmap_elem {
+ struct bpf_spin_lock lock;
+ int var[VAR_NUM];
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, struct hmap_elem);
+} hash_map SEC(".maps");
+
+SEC("freplace/handle_kprobe")
+int new_handle_kprobe(struct pt_regs *ctx)
+{
+ struct hmap_elem zero = {}, *val;
+ int key = 0;
+
+ val = bpf_map_lookup_elem(&hash_map, &key);
+ if (!val)
+ return 1;
+ /* spin_lock in hash map */
+ bpf_spin_lock(&val->lock);
+ val->var[0] = 99;
+ bpf_spin_unlock(&val->lock);
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/freplace_cls_redirect.c b/tools/testing/selftests/bpf/progs/freplace_cls_redirect.c
new file mode 100644
index 000000000000..68a5a9db928a
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/freplace_cls_redirect.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Facebook
+
+#include <linux/stddef.h>
+#include <linux/bpf.h>
+#include <linux/pkt_cls.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_helpers.h>
+
+struct bpf_map_def SEC("maps") sock_map = {
+ .type = BPF_MAP_TYPE_SOCKMAP,
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .max_entries = 2,
+};
+
+SEC("freplace/cls_redirect")
+int freplace_cls_redirect_test(struct __sk_buff *skb)
+{
+ int ret = 0;
+ const int zero = 0;
+ struct bpf_sock *sk;
+
+ sk = bpf_map_lookup_elem(&sock_map, &zero);
+ if (!sk)
+ return TC_ACT_SHOT;
+
+ ret = bpf_map_update_elem(&sock_map, &zero, sk, 0);
+ bpf_sk_release(sk);
+
+ return ret == 0 ? TC_ACT_OK : TC_ACT_SHOT;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/freplace_connect_v4_prog.c b/tools/testing/selftests/bpf/progs/freplace_connect_v4_prog.c
new file mode 100644
index 000000000000..544e5ac90461
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/freplace_connect_v4_prog.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Facebook
+
+#include <linux/stddef.h>
+#include <linux/ipv6.h>
+#include <linux/bpf.h>
+#include <linux/in.h>
+#include <sys/socket.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+SEC("freplace/connect_v4_prog")
+int new_connect_v4_prog(struct bpf_sock_addr *ctx)
+{
+ // return value thats in invalid range
+ return 255;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/freplace_get_constant.c b/tools/testing/selftests/bpf/progs/freplace_get_constant.c
new file mode 100644
index 000000000000..705e4b64dfc2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/freplace_get_constant.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+volatile __u64 test_get_constant = 0;
+SEC("freplace/get_constant")
+int security_new_get_constant(long val)
+{
+ if (val != 123)
+ return 0;
+ test_get_constant = 1;
+ return test_get_constant; /* original get_constant() returns val - 122 */
+}
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/local_storage.c b/tools/testing/selftests/bpf/progs/local_storage.c
new file mode 100644
index 000000000000..0758ba229ae0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/local_storage.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2020 Google LLC.
+ */
+
+#include <errno.h>
+#include <linux/bpf.h>
+#include <stdbool.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+#define DUMMY_STORAGE_VALUE 0xdeadbeef
+
+int monitored_pid = 0;
+int inode_storage_result = -1;
+int sk_storage_result = -1;
+
+struct dummy_storage {
+ __u32 value;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_INODE_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct dummy_storage);
+} inode_storage_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC | BPF_F_CLONE);
+ __type(key, int);
+ __type(value, struct dummy_storage);
+} sk_storage_map SEC(".maps");
+
+/* TODO Use vmlinux.h once BTF pruning for embedded types is fixed.
+ */
+struct sock {} __attribute__((preserve_access_index));
+struct sockaddr {} __attribute__((preserve_access_index));
+struct socket {
+ struct sock *sk;
+} __attribute__((preserve_access_index));
+
+struct inode {} __attribute__((preserve_access_index));
+struct dentry {
+ struct inode *d_inode;
+} __attribute__((preserve_access_index));
+struct file {
+ struct inode *f_inode;
+} __attribute__((preserve_access_index));
+
+
+SEC("lsm/inode_unlink")
+int BPF_PROG(unlink_hook, struct inode *dir, struct dentry *victim)
+{
+ __u32 pid = bpf_get_current_pid_tgid() >> 32;
+ struct dummy_storage *storage;
+
+ if (pid != monitored_pid)
+ return 0;
+
+ storage = bpf_inode_storage_get(&inode_storage_map, victim->d_inode, 0,
+ BPF_SK_STORAGE_GET_F_CREATE);
+ if (!storage)
+ return 0;
+
+ if (storage->value == DUMMY_STORAGE_VALUE)
+ inode_storage_result = -1;
+
+ inode_storage_result =
+ bpf_inode_storage_delete(&inode_storage_map, victim->d_inode);
+
+ return 0;
+}
+
+SEC("lsm/socket_bind")
+int BPF_PROG(socket_bind, struct socket *sock, struct sockaddr *address,
+ int addrlen)
+{
+ __u32 pid = bpf_get_current_pid_tgid() >> 32;
+ struct dummy_storage *storage;
+
+ if (pid != monitored_pid)
+ return 0;
+
+ storage = bpf_sk_storage_get(&sk_storage_map, sock->sk, 0,
+ BPF_SK_STORAGE_GET_F_CREATE);
+ if (!storage)
+ return 0;
+
+ if (storage->value == DUMMY_STORAGE_VALUE)
+ sk_storage_result = -1;
+
+ sk_storage_result = bpf_sk_storage_delete(&sk_storage_map, sock->sk);
+ return 0;
+}
+
+SEC("lsm/socket_post_create")
+int BPF_PROG(socket_post_create, struct socket *sock, int family, int type,
+ int protocol, int kern)
+{
+ __u32 pid = bpf_get_current_pid_tgid() >> 32;
+ struct dummy_storage *storage;
+
+ if (pid != monitored_pid)
+ return 0;
+
+ storage = bpf_sk_storage_get(&sk_storage_map, sock->sk, 0,
+ BPF_SK_STORAGE_GET_F_CREATE);
+ if (!storage)
+ return 0;
+
+ storage->value = DUMMY_STORAGE_VALUE;
+
+ return 0;
+}
+
+SEC("lsm/file_open")
+int BPF_PROG(file_open, struct file *file)
+{
+ __u32 pid = bpf_get_current_pid_tgid() >> 32;
+ struct dummy_storage *storage;
+
+ if (pid != monitored_pid)
+ return 0;
+
+ if (!file->f_inode)
+ return 0;
+
+ storage = bpf_inode_storage_get(&inode_storage_map, file->f_inode, 0,
+ BPF_LOCAL_STORAGE_GET_F_CREATE);
+ if (!storage)
+ return 0;
+
+ storage->value = DUMMY_STORAGE_VALUE;
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/lsm.c b/tools/testing/selftests/bpf/progs/lsm.c
index b4598d4bc4f7..ff4d343b94b5 100644
--- a/tools/testing/selftests/bpf/progs/lsm.c
+++ b/tools/testing/selftests/bpf/progs/lsm.c
@@ -9,6 +9,27 @@
#include <bpf/bpf_tracing.h>
#include <errno.h>
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} array SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} hash SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_LRU_HASH);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} lru_hash SEC(".maps");
+
char _license[] SEC("license") = "GPL";
int monitored_pid = 0;
@@ -36,13 +57,54 @@ int BPF_PROG(test_int_hook, struct vm_area_struct *vma,
return ret;
}
-SEC("lsm/bprm_committed_creds")
+SEC("lsm.s/bprm_committed_creds")
int BPF_PROG(test_void_hook, struct linux_binprm *bprm)
{
__u32 pid = bpf_get_current_pid_tgid() >> 32;
+ char args[64];
+ __u32 key = 0;
+ __u64 *value;
if (monitored_pid == pid)
bprm_count++;
+ bpf_copy_from_user(args, sizeof(args), (void *)bprm->vma->vm_mm->arg_start);
+ bpf_copy_from_user(args, sizeof(args), (void *)bprm->mm->arg_start);
+
+ value = bpf_map_lookup_elem(&array, &key);
+ if (value)
+ *value = 0;
+ value = bpf_map_lookup_elem(&hash, &key);
+ if (value)
+ *value = 0;
+ value = bpf_map_lookup_elem(&lru_hash, &key);
+ if (value)
+ *value = 0;
+
+ return 0;
+}
+SEC("lsm/task_free") /* lsm/ is ok, lsm.s/ fails */
+int BPF_PROG(test_task_free, struct task_struct *task)
+{
+ return 0;
+}
+
+int copy_test = 0;
+
+SEC("fentry.s/__x64_sys_setdomainname")
+int BPF_PROG(test_sys_setdomainname, struct pt_regs *regs)
+{
+ void *ptr = (void *)PT_REGS_PARM1(regs);
+ int len = PT_REGS_PARM2(regs);
+ int buf = 0;
+ long ret;
+
+ ret = bpf_copy_from_user(&buf, sizeof(buf), ptr);
+ if (len == -2 && ret == 0 && buf == 1234)
+ copy_test++;
+ if (len == -3 && ret == -EFAULT)
+ copy_test++;
+ if (len == -4 && ret == -EFAULT)
+ copy_test++;
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/map_ptr_kern.c b/tools/testing/selftests/bpf/progs/map_ptr_kern.c
index 473665cac67e..c325405751e2 100644
--- a/tools/testing/selftests/bpf/progs/map_ptr_kern.c
+++ b/tools/testing/selftests/bpf/progs/map_ptr_kern.c
@@ -82,6 +82,14 @@ static inline int check_default(struct bpf_map *indirect,
return 1;
}
+static __noinline int
+check_default_noinline(struct bpf_map *indirect, struct bpf_map *direct)
+{
+ VERIFY(check(indirect, direct, sizeof(__u32), sizeof(__u32),
+ MAX_ENTRIES));
+ return 1;
+}
+
typedef struct {
int counter;
} atomic_t;
@@ -107,7 +115,7 @@ static inline int check_hash(void)
struct bpf_map *map = (struct bpf_map *)&m_hash;
int i;
- VERIFY(check_default(&hash->map, map));
+ VERIFY(check_default_noinline(&hash->map, map));
VERIFY(hash->n_buckets == MAX_ENTRIES);
VERIFY(hash->elem_size == 64);
@@ -589,7 +597,7 @@ static inline int check_stack(void)
return 1;
}
-struct bpf_sk_storage_map {
+struct bpf_local_storage_map {
struct bpf_map map;
} __attribute__((preserve_access_index));
@@ -602,8 +610,8 @@ struct {
static inline int check_sk_storage(void)
{
- struct bpf_sk_storage_map *sk_storage =
- (struct bpf_sk_storage_map *)&m_sk_storage;
+ struct bpf_local_storage_map *sk_storage =
+ (struct bpf_local_storage_map *)&m_sk_storage;
struct bpf_map *map = (struct bpf_map *)&m_sk_storage;
VERIFY(check(&sk_storage->map, map, sizeof(__u32), sizeof(__u32), 0));
diff --git a/tools/testing/selftests/bpf/progs/metadata_unused.c b/tools/testing/selftests/bpf/progs/metadata_unused.c
new file mode 100644
index 000000000000..672a0d19f8d0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/metadata_unused.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+volatile const char bpf_metadata_a[] SEC(".rodata") = "foo";
+volatile const int bpf_metadata_b SEC(".rodata") = 1;
+
+SEC("cgroup_skb/egress")
+int prog(struct xdp_md *ctx)
+{
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/metadata_used.c b/tools/testing/selftests/bpf/progs/metadata_used.c
new file mode 100644
index 000000000000..b7198e65383d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/metadata_used.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+volatile const char bpf_metadata_a[] SEC(".rodata") = "bar";
+volatile const int bpf_metadata_b SEC(".rodata") = 2;
+
+SEC("cgroup_skb/egress")
+int prog(struct xdp_md *ctx)
+{
+ return bpf_metadata_b ? 1 : 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/netif_receive_skb.c b/tools/testing/selftests/bpf/progs/netif_receive_skb.c
new file mode 100644
index 000000000000..6b670039ea67
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/netif_receive_skb.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020, Oracle and/or its affiliates. */
+
+#include "btf_ptr.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+
+#include <errno.h>
+
+long ret = 0;
+int num_subtests = 0;
+int ran_subtests = 0;
+bool skip = false;
+
+#define STRSIZE 2048
+#define EXPECTED_STRSIZE 256
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#endif
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, char[STRSIZE]);
+} strdata SEC(".maps");
+
+static int __strncmp(const void *m1, const void *m2, size_t len)
+{
+ const unsigned char *s1 = m1;
+ const unsigned char *s2 = m2;
+ int i, delta = 0;
+
+ for (i = 0; i < len; i++) {
+ delta = s1[i] - s2[i];
+ if (delta || s1[i] == 0 || s2[i] == 0)
+ break;
+ }
+ return delta;
+}
+
+#if __has_builtin(__builtin_btf_type_id)
+#define TEST_BTF(_str, _type, _flags, _expected, ...) \
+ do { \
+ static const char _expectedval[EXPECTED_STRSIZE] = \
+ _expected; \
+ static const char _ptrtype[64] = #_type; \
+ __u64 _hflags = _flags | BTF_F_COMPACT; \
+ static _type _ptrdata = __VA_ARGS__; \
+ static struct btf_ptr _ptr = { }; \
+ int _cmp; \
+ \
+ ++num_subtests; \
+ if (ret < 0) \
+ break; \
+ ++ran_subtests; \
+ _ptr.ptr = &_ptrdata; \
+ _ptr.type_id = bpf_core_type_id_kernel(_type); \
+ if (_ptr.type_id <= 0) { \
+ ret = -EINVAL; \
+ break; \
+ } \
+ ret = bpf_snprintf_btf(_str, STRSIZE, \
+ &_ptr, sizeof(_ptr), _hflags); \
+ if (ret) \
+ break; \
+ _cmp = __strncmp(_str, _expectedval, EXPECTED_STRSIZE); \
+ if (_cmp != 0) { \
+ bpf_printk("(%d) got %s", _cmp, _str); \
+ bpf_printk("(%d) expected %s", _cmp, \
+ _expectedval); \
+ ret = -EBADMSG; \
+ break; \
+ } \
+ } while (0)
+#endif
+
+/* Use where expected data string matches its stringified declaration */
+#define TEST_BTF_C(_str, _type, _flags, ...) \
+ TEST_BTF(_str, _type, _flags, "(" #_type ")" #__VA_ARGS__, \
+ __VA_ARGS__)
+
+/* TRACE_EVENT(netif_receive_skb,
+ * TP_PROTO(struct sk_buff *skb),
+ */
+SEC("tp_btf/netif_receive_skb")
+int BPF_PROG(trace_netif_receive_skb, struct sk_buff *skb)
+{
+ static __u64 flags[] = { 0, BTF_F_COMPACT, BTF_F_ZERO, BTF_F_PTR_RAW,
+ BTF_F_NONAME, BTF_F_COMPACT | BTF_F_ZERO |
+ BTF_F_PTR_RAW | BTF_F_NONAME };
+ static struct btf_ptr p = { };
+ __u32 key = 0;
+ int i, __ret;
+ char *str;
+
+#if __has_builtin(__builtin_btf_type_id)
+ str = bpf_map_lookup_elem(&strdata, &key);
+ if (!str)
+ return 0;
+
+ /* Ensure we can write skb string representation */
+ p.type_id = bpf_core_type_id_kernel(struct sk_buff);
+ p.ptr = skb;
+ for (i = 0; i < ARRAY_SIZE(flags); i++) {
+ ++num_subtests;
+ ret = bpf_snprintf_btf(str, STRSIZE, &p, sizeof(p), 0);
+ if (ret < 0)
+ bpf_printk("returned %d when writing skb", ret);
+ ++ran_subtests;
+ }
+
+ /* Check invalid ptr value */
+ p.ptr = 0;
+ __ret = bpf_snprintf_btf(str, STRSIZE, &p, sizeof(p), 0);
+ if (__ret >= 0) {
+ bpf_printk("printing NULL should generate error, got (%d)",
+ __ret);
+ ret = -ERANGE;
+ }
+
+ /* Verify type display for various types. */
+
+ /* simple int */
+ TEST_BTF_C(str, int, 0, 1234);
+ TEST_BTF(str, int, BTF_F_NONAME, "1234", 1234);
+ /* zero value should be printed at toplevel */
+ TEST_BTF(str, int, 0, "(int)0", 0);
+ TEST_BTF(str, int, BTF_F_NONAME, "0", 0);
+ TEST_BTF(str, int, BTF_F_ZERO, "(int)0", 0);
+ TEST_BTF(str, int, BTF_F_NONAME | BTF_F_ZERO, "0", 0);
+ TEST_BTF_C(str, int, 0, -4567);
+ TEST_BTF(str, int, BTF_F_NONAME, "-4567", -4567);
+
+ /* simple char */
+ TEST_BTF_C(str, char, 0, 100);
+ TEST_BTF(str, char, BTF_F_NONAME, "100", 100);
+ /* zero value should be printed at toplevel */
+ TEST_BTF(str, char, 0, "(char)0", 0);
+ TEST_BTF(str, char, BTF_F_NONAME, "0", 0);
+ TEST_BTF(str, char, BTF_F_ZERO, "(char)0", 0);
+ TEST_BTF(str, char, BTF_F_NONAME | BTF_F_ZERO, "0", 0);
+
+ /* simple typedef */
+ TEST_BTF_C(str, uint64_t, 0, 100);
+ TEST_BTF(str, u64, BTF_F_NONAME, "1", 1);
+ /* zero value should be printed at toplevel */
+ TEST_BTF(str, u64, 0, "(u64)0", 0);
+ TEST_BTF(str, u64, BTF_F_NONAME, "0", 0);
+ TEST_BTF(str, u64, BTF_F_ZERO, "(u64)0", 0);
+ TEST_BTF(str, u64, BTF_F_NONAME|BTF_F_ZERO, "0", 0);
+
+ /* typedef struct */
+ TEST_BTF_C(str, atomic_t, 0, {.counter = (int)1,});
+ TEST_BTF(str, atomic_t, BTF_F_NONAME, "{1,}", {.counter = 1,});
+ /* typedef with 0 value should be printed at toplevel */
+ TEST_BTF(str, atomic_t, 0, "(atomic_t){}", {.counter = 0,});
+ TEST_BTF(str, atomic_t, BTF_F_NONAME, "{}", {.counter = 0,});
+ TEST_BTF(str, atomic_t, BTF_F_ZERO, "(atomic_t){.counter = (int)0,}",
+ {.counter = 0,});
+ TEST_BTF(str, atomic_t, BTF_F_NONAME|BTF_F_ZERO,
+ "{0,}", {.counter = 0,});
+
+ /* enum where enum value does (and does not) exist */
+ TEST_BTF_C(str, enum bpf_cmd, 0, BPF_MAP_CREATE);
+ TEST_BTF(str, enum bpf_cmd, 0, "(enum bpf_cmd)BPF_MAP_CREATE", 0);
+ TEST_BTF(str, enum bpf_cmd, BTF_F_NONAME, "BPF_MAP_CREATE",
+ BPF_MAP_CREATE);
+ TEST_BTF(str, enum bpf_cmd, BTF_F_NONAME|BTF_F_ZERO,
+ "BPF_MAP_CREATE", 0);
+
+ TEST_BTF(str, enum bpf_cmd, BTF_F_ZERO, "(enum bpf_cmd)BPF_MAP_CREATE",
+ BPF_MAP_CREATE);
+ TEST_BTF(str, enum bpf_cmd, BTF_F_NONAME|BTF_F_ZERO,
+ "BPF_MAP_CREATE", BPF_MAP_CREATE);
+ TEST_BTF_C(str, enum bpf_cmd, 0, 2000);
+ TEST_BTF(str, enum bpf_cmd, BTF_F_NONAME, "2000", 2000);
+
+ /* simple struct */
+ TEST_BTF_C(str, struct btf_enum, 0,
+ {.name_off = (__u32)3,.val = (__s32)-1,});
+ TEST_BTF(str, struct btf_enum, BTF_F_NONAME, "{3,-1,}",
+ { .name_off = 3, .val = -1,});
+ TEST_BTF(str, struct btf_enum, BTF_F_NONAME, "{-1,}",
+ { .name_off = 0, .val = -1,});
+ TEST_BTF(str, struct btf_enum, BTF_F_NONAME|BTF_F_ZERO, "{0,-1,}",
+ { .name_off = 0, .val = -1,});
+ /* empty struct should be printed */
+ TEST_BTF(str, struct btf_enum, 0, "(struct btf_enum){}",
+ { .name_off = 0, .val = 0,});
+ TEST_BTF(str, struct btf_enum, BTF_F_NONAME, "{}",
+ { .name_off = 0, .val = 0,});
+ TEST_BTF(str, struct btf_enum, BTF_F_ZERO,
+ "(struct btf_enum){.name_off = (__u32)0,.val = (__s32)0,}",
+ { .name_off = 0, .val = 0,});
+
+ /* struct with pointers */
+ TEST_BTF(str, struct list_head, BTF_F_PTR_RAW,
+ "(struct list_head){.next = (struct list_head *)0x0000000000000001,}",
+ { .next = (struct list_head *)1 });
+ /* NULL pointer should not be displayed */
+ TEST_BTF(str, struct list_head, BTF_F_PTR_RAW,
+ "(struct list_head){}",
+ { .next = (struct list_head *)0 });
+
+ /* struct with char array */
+ TEST_BTF(str, struct bpf_prog_info, 0,
+ "(struct bpf_prog_info){.name = (char[])['f','o','o',],}",
+ { .name = "foo",});
+ TEST_BTF(str, struct bpf_prog_info, BTF_F_NONAME,
+ "{['f','o','o',],}",
+ {.name = "foo",});
+ /* leading null char means do not display string */
+ TEST_BTF(str, struct bpf_prog_info, 0,
+ "(struct bpf_prog_info){}",
+ {.name = {'\0', 'f', 'o', 'o'}});
+ /* handle non-printable characters */
+ TEST_BTF(str, struct bpf_prog_info, 0,
+ "(struct bpf_prog_info){.name = (char[])[1,2,3,],}",
+ { .name = {1, 2, 3, 0}});
+
+ /* struct with non-char array */
+ TEST_BTF(str, struct __sk_buff, 0,
+ "(struct __sk_buff){.cb = (__u32[])[1,2,3,4,5,],}",
+ { .cb = {1, 2, 3, 4, 5,},});
+ TEST_BTF(str, struct __sk_buff, BTF_F_NONAME,
+ "{[1,2,3,4,5,],}",
+ { .cb = { 1, 2, 3, 4, 5},});
+ /* For non-char, arrays, show non-zero values only */
+ TEST_BTF(str, struct __sk_buff, 0,
+ "(struct __sk_buff){.cb = (__u32[])[1,],}",
+ { .cb = { 0, 0, 1, 0, 0},});
+
+ /* struct with bitfields */
+ TEST_BTF_C(str, struct bpf_insn, 0,
+ {.code = (__u8)1,.dst_reg = (__u8)0x2,.src_reg = (__u8)0x3,.off = (__s16)4,.imm = (__s32)5,});
+ TEST_BTF(str, struct bpf_insn, BTF_F_NONAME, "{1,0x2,0x3,4,5,}",
+ {.code = 1, .dst_reg = 0x2, .src_reg = 0x3, .off = 4,
+ .imm = 5,});
+#else
+ skip = true;
+#endif
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/profiler.h b/tools/testing/selftests/bpf/progs/profiler.h
new file mode 100644
index 000000000000..3bac4fdd4bdf
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/profiler.h
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+#pragma once
+
+#define TASK_COMM_LEN 16
+#define MAX_ANCESTORS 4
+#define MAX_PATH 256
+#define KILL_TARGET_LEN 64
+#define CTL_MAXNAME 10
+#define MAX_ARGS_LEN 4096
+#define MAX_FILENAME_LEN 512
+#define MAX_ENVIRON_LEN 8192
+#define MAX_PATH_DEPTH 32
+#define MAX_FILEPATH_LENGTH (MAX_PATH_DEPTH * MAX_PATH)
+#define MAX_CGROUPS_PATH_DEPTH 8
+
+#define MAX_METADATA_PAYLOAD_LEN TASK_COMM_LEN
+
+#define MAX_CGROUP_PAYLOAD_LEN \
+ (MAX_PATH * 2 + (MAX_PATH * MAX_CGROUPS_PATH_DEPTH))
+
+#define MAX_CAP_PAYLOAD_LEN (MAX_METADATA_PAYLOAD_LEN + MAX_CGROUP_PAYLOAD_LEN)
+
+#define MAX_SYSCTL_PAYLOAD_LEN \
+ (MAX_METADATA_PAYLOAD_LEN + MAX_CGROUP_PAYLOAD_LEN + CTL_MAXNAME + MAX_PATH)
+
+#define MAX_KILL_PAYLOAD_LEN \
+ (MAX_METADATA_PAYLOAD_LEN + MAX_CGROUP_PAYLOAD_LEN + TASK_COMM_LEN + \
+ KILL_TARGET_LEN)
+
+#define MAX_EXEC_PAYLOAD_LEN \
+ (MAX_METADATA_PAYLOAD_LEN + MAX_CGROUP_PAYLOAD_LEN + MAX_FILENAME_LEN + \
+ MAX_ARGS_LEN + MAX_ENVIRON_LEN)
+
+#define MAX_FILEMOD_PAYLOAD_LEN \
+ (MAX_METADATA_PAYLOAD_LEN + MAX_CGROUP_PAYLOAD_LEN + MAX_FILEPATH_LENGTH + \
+ MAX_FILEPATH_LENGTH)
+
+enum data_type {
+ INVALID_EVENT,
+ EXEC_EVENT,
+ FORK_EVENT,
+ KILL_EVENT,
+ SYSCTL_EVENT,
+ FILEMOD_EVENT,
+ MAX_DATA_TYPE_EVENT
+};
+
+enum filemod_type {
+ FMOD_OPEN,
+ FMOD_LINK,
+ FMOD_SYMLINK,
+};
+
+struct ancestors_data_t {
+ pid_t ancestor_pids[MAX_ANCESTORS];
+ uint32_t ancestor_exec_ids[MAX_ANCESTORS];
+ uint64_t ancestor_start_times[MAX_ANCESTORS];
+ uint32_t num_ancestors;
+};
+
+struct var_metadata_t {
+ enum data_type type;
+ pid_t pid;
+ uint32_t exec_id;
+ uid_t uid;
+ gid_t gid;
+ uint64_t start_time;
+ uint32_t cpu_id;
+ uint64_t bpf_stats_num_perf_events;
+ uint64_t bpf_stats_start_ktime_ns;
+ uint8_t comm_length;
+};
+
+struct cgroup_data_t {
+ ino_t cgroup_root_inode;
+ ino_t cgroup_proc_inode;
+ uint64_t cgroup_root_mtime;
+ uint64_t cgroup_proc_mtime;
+ uint16_t cgroup_root_length;
+ uint16_t cgroup_proc_length;
+ uint16_t cgroup_full_length;
+ int cgroup_full_path_root_pos;
+};
+
+struct var_sysctl_data_t {
+ struct var_metadata_t meta;
+ struct cgroup_data_t cgroup_data;
+ struct ancestors_data_t ancestors_info;
+ uint8_t sysctl_val_length;
+ uint16_t sysctl_path_length;
+ char payload[MAX_SYSCTL_PAYLOAD_LEN];
+};
+
+struct var_kill_data_t {
+ struct var_metadata_t meta;
+ struct cgroup_data_t cgroup_data;
+ struct ancestors_data_t ancestors_info;
+ pid_t kill_target_pid;
+ int kill_sig;
+ uint32_t kill_count;
+ uint64_t last_kill_time;
+ uint8_t kill_target_name_length;
+ uint8_t kill_target_cgroup_proc_length;
+ char payload[MAX_KILL_PAYLOAD_LEN];
+ size_t payload_length;
+};
+
+struct var_exec_data_t {
+ struct var_metadata_t meta;
+ struct cgroup_data_t cgroup_data;
+ pid_t parent_pid;
+ uint32_t parent_exec_id;
+ uid_t parent_uid;
+ uint64_t parent_start_time;
+ uint16_t bin_path_length;
+ uint16_t cmdline_length;
+ uint16_t environment_length;
+ char payload[MAX_EXEC_PAYLOAD_LEN];
+};
+
+struct var_fork_data_t {
+ struct var_metadata_t meta;
+ pid_t parent_pid;
+ uint32_t parent_exec_id;
+ uint64_t parent_start_time;
+ char payload[MAX_METADATA_PAYLOAD_LEN];
+};
+
+struct var_filemod_data_t {
+ struct var_metadata_t meta;
+ struct cgroup_data_t cgroup_data;
+ enum filemod_type fmod_type;
+ unsigned int dst_flags;
+ uint32_t src_device_id;
+ uint32_t dst_device_id;
+ ino_t src_inode;
+ ino_t dst_inode;
+ uint16_t src_filepath_length;
+ uint16_t dst_filepath_length;
+ char payload[MAX_FILEMOD_PAYLOAD_LEN];
+};
+
+struct profiler_config_struct {
+ bool fetch_cgroups_from_bpf;
+ ino_t cgroup_fs_inode;
+ ino_t cgroup_login_session_inode;
+ uint64_t kill_signals_mask;
+ ino_t inode_filter;
+ uint32_t stale_info_secs;
+ bool use_variable_buffers;
+ bool read_environ_from_exec;
+ bool enable_cgroup_v1_resolver;
+};
+
+struct bpf_func_stats_data {
+ uint64_t time_elapsed_ns;
+ uint64_t num_executions;
+ uint64_t num_perf_events;
+};
+
+struct bpf_func_stats_ctx {
+ uint64_t start_time_ns;
+ struct bpf_func_stats_data* bpf_func_stats_data_val;
+};
+
+enum bpf_function_id {
+ profiler_bpf_proc_sys_write,
+ profiler_bpf_sched_process_exec,
+ profiler_bpf_sched_process_exit,
+ profiler_bpf_sys_enter_kill,
+ profiler_bpf_do_filp_open_ret,
+ profiler_bpf_sched_process_fork,
+ profiler_bpf_vfs_link,
+ profiler_bpf_vfs_symlink,
+ profiler_bpf_max_function_id
+};
diff --git a/tools/testing/selftests/bpf/progs/profiler.inc.h b/tools/testing/selftests/bpf/progs/profiler.inc.h
new file mode 100644
index 000000000000..00578311a423
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/profiler.inc.h
@@ -0,0 +1,969 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+#include <vmlinux.h>
+#include <bpf/bpf_core_read.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#include "profiler.h"
+
+#ifndef NULL
+#define NULL 0
+#endif
+
+#define O_WRONLY 00000001
+#define O_RDWR 00000002
+#define O_DIRECTORY 00200000
+#define __O_TMPFILE 020000000
+#define O_TMPFILE (__O_TMPFILE | O_DIRECTORY)
+#define MAX_ERRNO 4095
+#define S_IFMT 00170000
+#define S_IFSOCK 0140000
+#define S_IFLNK 0120000
+#define S_IFREG 0100000
+#define S_IFBLK 0060000
+#define S_IFDIR 0040000
+#define S_IFCHR 0020000
+#define S_IFIFO 0010000
+#define S_ISUID 0004000
+#define S_ISGID 0002000
+#define S_ISVTX 0001000
+#define S_ISLNK(m) (((m)&S_IFMT) == S_IFLNK)
+#define S_ISDIR(m) (((m)&S_IFMT) == S_IFDIR)
+#define S_ISCHR(m) (((m)&S_IFMT) == S_IFCHR)
+#define S_ISBLK(m) (((m)&S_IFMT) == S_IFBLK)
+#define S_ISFIFO(m) (((m)&S_IFMT) == S_IFIFO)
+#define S_ISSOCK(m) (((m)&S_IFMT) == S_IFSOCK)
+#define IS_ERR_VALUE(x) (unsigned long)(void*)(x) >= (unsigned long)-MAX_ERRNO
+
+#define KILL_DATA_ARRAY_SIZE 8
+
+struct var_kill_data_arr_t {
+ struct var_kill_data_t array[KILL_DATA_ARRAY_SIZE];
+};
+
+union any_profiler_data_t {
+ struct var_exec_data_t var_exec;
+ struct var_kill_data_t var_kill;
+ struct var_sysctl_data_t var_sysctl;
+ struct var_filemod_data_t var_filemod;
+ struct var_fork_data_t var_fork;
+ struct var_kill_data_arr_t var_kill_data_arr;
+};
+
+volatile struct profiler_config_struct bpf_config = {};
+
+#define FETCH_CGROUPS_FROM_BPF (bpf_config.fetch_cgroups_from_bpf)
+#define CGROUP_FS_INODE (bpf_config.cgroup_fs_inode)
+#define CGROUP_LOGIN_SESSION_INODE \
+ (bpf_config.cgroup_login_session_inode)
+#define KILL_SIGNALS (bpf_config.kill_signals_mask)
+#define STALE_INFO (bpf_config.stale_info_secs)
+#define INODE_FILTER (bpf_config.inode_filter)
+#define READ_ENVIRON_FROM_EXEC (bpf_config.read_environ_from_exec)
+#define ENABLE_CGROUP_V1_RESOLVER (bpf_config.enable_cgroup_v1_resolver)
+
+struct kernfs_iattrs___52 {
+ struct iattr ia_iattr;
+};
+
+struct kernfs_node___52 {
+ union /* kernfs_node_id */ {
+ struct {
+ u32 ino;
+ u32 generation;
+ };
+ u64 id;
+ } id;
+};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, u32);
+ __type(value, union any_profiler_data_t);
+} data_heap SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(int));
+} events SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, KILL_DATA_ARRAY_SIZE);
+ __type(key, u32);
+ __type(value, struct var_kill_data_arr_t);
+} var_tpid_to_data SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(max_entries, profiler_bpf_max_function_id);
+ __type(key, u32);
+ __type(value, struct bpf_func_stats_data);
+} bpf_func_stats SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, u32);
+ __type(value, bool);
+ __uint(max_entries, 16);
+} allowed_devices SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, u64);
+ __type(value, bool);
+ __uint(max_entries, 1024);
+} allowed_file_inodes SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, u64);
+ __type(value, bool);
+ __uint(max_entries, 1024);
+} allowed_directory_inodes SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, u32);
+ __type(value, bool);
+ __uint(max_entries, 16);
+} disallowed_exec_inodes SEC(".maps");
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0]))
+#endif
+
+static INLINE bool IS_ERR(const void* ptr)
+{
+ return IS_ERR_VALUE((unsigned long)ptr);
+}
+
+static INLINE u32 get_userspace_pid()
+{
+ return bpf_get_current_pid_tgid() >> 32;
+}
+
+static INLINE bool is_init_process(u32 tgid)
+{
+ return tgid == 1 || tgid == 0;
+}
+
+static INLINE unsigned long
+probe_read_lim(void* dst, void* src, unsigned long len, unsigned long max)
+{
+ len = len < max ? len : max;
+ if (len > 1) {
+ if (bpf_probe_read(dst, len, src))
+ return 0;
+ } else if (len == 1) {
+ if (bpf_probe_read(dst, 1, src))
+ return 0;
+ }
+ return len;
+}
+
+static INLINE int get_var_spid_index(struct var_kill_data_arr_t* arr_struct,
+ int spid)
+{
+#ifdef UNROLL
+#pragma unroll
+#endif
+ for (int i = 0; i < ARRAY_SIZE(arr_struct->array); i++)
+ if (arr_struct->array[i].meta.pid == spid)
+ return i;
+ return -1;
+}
+
+static INLINE void populate_ancestors(struct task_struct* task,
+ struct ancestors_data_t* ancestors_data)
+{
+ struct task_struct* parent = task;
+ u32 num_ancestors, ppid;
+
+ ancestors_data->num_ancestors = 0;
+#ifdef UNROLL
+#pragma unroll
+#endif
+ for (num_ancestors = 0; num_ancestors < MAX_ANCESTORS; num_ancestors++) {
+ parent = BPF_CORE_READ(parent, real_parent);
+ if (parent == NULL)
+ break;
+ ppid = BPF_CORE_READ(parent, tgid);
+ if (is_init_process(ppid))
+ break;
+ ancestors_data->ancestor_pids[num_ancestors] = ppid;
+ ancestors_data->ancestor_exec_ids[num_ancestors] =
+ BPF_CORE_READ(parent, self_exec_id);
+ ancestors_data->ancestor_start_times[num_ancestors] =
+ BPF_CORE_READ(parent, start_time);
+ ancestors_data->num_ancestors = num_ancestors;
+ }
+}
+
+static INLINE void* read_full_cgroup_path(struct kernfs_node* cgroup_node,
+ struct kernfs_node* cgroup_root_node,
+ void* payload,
+ int* root_pos)
+{
+ void* payload_start = payload;
+ size_t filepart_length;
+
+#ifdef UNROLL
+#pragma unroll
+#endif
+ for (int i = 0; i < MAX_CGROUPS_PATH_DEPTH; i++) {
+ filepart_length =
+ bpf_probe_read_str(payload, MAX_PATH, BPF_CORE_READ(cgroup_node, name));
+ if (!cgroup_node)
+ return payload;
+ if (cgroup_node == cgroup_root_node)
+ *root_pos = payload - payload_start;
+ if (filepart_length <= MAX_PATH) {
+ barrier_var(filepart_length);
+ payload += filepart_length;
+ }
+ cgroup_node = BPF_CORE_READ(cgroup_node, parent);
+ }
+ return payload;
+}
+
+static ino_t get_inode_from_kernfs(struct kernfs_node* node)
+{
+ struct kernfs_node___52* node52 = (void*)node;
+
+ if (bpf_core_field_exists(node52->id.ino)) {
+ barrier_var(node52);
+ return BPF_CORE_READ(node52, id.ino);
+ } else {
+ barrier_var(node);
+ return (u64)BPF_CORE_READ(node, id);
+ }
+}
+
+int pids_cgrp_id = 1;
+
+static INLINE void* populate_cgroup_info(struct cgroup_data_t* cgroup_data,
+ struct task_struct* task,
+ void* payload)
+{
+ struct kernfs_node* root_kernfs =
+ BPF_CORE_READ(task, nsproxy, cgroup_ns, root_cset, dfl_cgrp, kn);
+ struct kernfs_node* proc_kernfs = BPF_CORE_READ(task, cgroups, dfl_cgrp, kn);
+
+ if (ENABLE_CGROUP_V1_RESOLVER) {
+#ifdef UNROLL
+#pragma unroll
+#endif
+ for (int i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
+ struct cgroup_subsys_state* subsys =
+ BPF_CORE_READ(task, cgroups, subsys[i]);
+ if (subsys != NULL) {
+ int subsys_id = BPF_CORE_READ(subsys, ss, id);
+ if (subsys_id == pids_cgrp_id) {
+ proc_kernfs = BPF_CORE_READ(subsys, cgroup, kn);
+ root_kernfs = BPF_CORE_READ(subsys, ss, root, kf_root, kn);
+ break;
+ }
+ }
+ }
+ }
+
+ cgroup_data->cgroup_root_inode = get_inode_from_kernfs(root_kernfs);
+ cgroup_data->cgroup_proc_inode = get_inode_from_kernfs(proc_kernfs);
+
+ if (bpf_core_field_exists(root_kernfs->iattr->ia_mtime)) {
+ cgroup_data->cgroup_root_mtime =
+ BPF_CORE_READ(root_kernfs, iattr, ia_mtime.tv_nsec);
+ cgroup_data->cgroup_proc_mtime =
+ BPF_CORE_READ(proc_kernfs, iattr, ia_mtime.tv_nsec);
+ } else {
+ struct kernfs_iattrs___52* root_iattr =
+ (struct kernfs_iattrs___52*)BPF_CORE_READ(root_kernfs, iattr);
+ cgroup_data->cgroup_root_mtime =
+ BPF_CORE_READ(root_iattr, ia_iattr.ia_mtime.tv_nsec);
+
+ struct kernfs_iattrs___52* proc_iattr =
+ (struct kernfs_iattrs___52*)BPF_CORE_READ(proc_kernfs, iattr);
+ cgroup_data->cgroup_proc_mtime =
+ BPF_CORE_READ(proc_iattr, ia_iattr.ia_mtime.tv_nsec);
+ }
+
+ cgroup_data->cgroup_root_length = 0;
+ cgroup_data->cgroup_proc_length = 0;
+ cgroup_data->cgroup_full_length = 0;
+
+ size_t cgroup_root_length =
+ bpf_probe_read_str(payload, MAX_PATH, BPF_CORE_READ(root_kernfs, name));
+ barrier_var(cgroup_root_length);
+ if (cgroup_root_length <= MAX_PATH) {
+ barrier_var(cgroup_root_length);
+ cgroup_data->cgroup_root_length = cgroup_root_length;
+ payload += cgroup_root_length;
+ }
+
+ size_t cgroup_proc_length =
+ bpf_probe_read_str(payload, MAX_PATH, BPF_CORE_READ(proc_kernfs, name));
+ barrier_var(cgroup_proc_length);
+ if (cgroup_proc_length <= MAX_PATH) {
+ barrier_var(cgroup_proc_length);
+ cgroup_data->cgroup_proc_length = cgroup_proc_length;
+ payload += cgroup_proc_length;
+ }
+
+ if (FETCH_CGROUPS_FROM_BPF) {
+ cgroup_data->cgroup_full_path_root_pos = -1;
+ void* payload_end_pos = read_full_cgroup_path(proc_kernfs, root_kernfs, payload,
+ &cgroup_data->cgroup_full_path_root_pos);
+ cgroup_data->cgroup_full_length = payload_end_pos - payload;
+ payload = payload_end_pos;
+ }
+
+ return (void*)payload;
+}
+
+static INLINE void* populate_var_metadata(struct var_metadata_t* metadata,
+ struct task_struct* task,
+ u32 pid, void* payload)
+{
+ u64 uid_gid = bpf_get_current_uid_gid();
+
+ metadata->uid = (u32)uid_gid;
+ metadata->gid = uid_gid >> 32;
+ metadata->pid = pid;
+ metadata->exec_id = BPF_CORE_READ(task, self_exec_id);
+ metadata->start_time = BPF_CORE_READ(task, start_time);
+ metadata->comm_length = 0;
+
+ size_t comm_length = bpf_core_read_str(payload, TASK_COMM_LEN, &task->comm);
+ barrier_var(comm_length);
+ if (comm_length <= TASK_COMM_LEN) {
+ barrier_var(comm_length);
+ metadata->comm_length = comm_length;
+ payload += comm_length;
+ }
+
+ return (void*)payload;
+}
+
+static INLINE struct var_kill_data_t*
+get_var_kill_data(struct pt_regs* ctx, int spid, int tpid, int sig)
+{
+ int zero = 0;
+ struct var_kill_data_t* kill_data = bpf_map_lookup_elem(&data_heap, &zero);
+
+ if (kill_data == NULL)
+ return NULL;
+ struct task_struct* task = (struct task_struct*)bpf_get_current_task();
+
+ void* payload = populate_var_metadata(&kill_data->meta, task, spid, kill_data->payload);
+ payload = populate_cgroup_info(&kill_data->cgroup_data, task, payload);
+ size_t payload_length = payload - (void*)kill_data->payload;
+ kill_data->payload_length = payload_length;
+ populate_ancestors(task, &kill_data->ancestors_info);
+ kill_data->meta.type = KILL_EVENT;
+ kill_data->kill_target_pid = tpid;
+ kill_data->kill_sig = sig;
+ kill_data->kill_count = 1;
+ kill_data->last_kill_time = bpf_ktime_get_ns();
+ return kill_data;
+}
+
+static INLINE int trace_var_sys_kill(void* ctx, int tpid, int sig)
+{
+ if ((KILL_SIGNALS & (1ULL << sig)) == 0)
+ return 0;
+
+ u32 spid = get_userspace_pid();
+ struct var_kill_data_arr_t* arr_struct = bpf_map_lookup_elem(&var_tpid_to_data, &tpid);
+
+ if (arr_struct == NULL) {
+ struct var_kill_data_t* kill_data = get_var_kill_data(ctx, spid, tpid, sig);
+ int zero = 0;
+
+ if (kill_data == NULL)
+ return 0;
+ arr_struct = bpf_map_lookup_elem(&data_heap, &zero);
+ if (arr_struct == NULL)
+ return 0;
+ bpf_probe_read(&arr_struct->array[0], sizeof(arr_struct->array[0]), kill_data);
+ } else {
+ int index = get_var_spid_index(arr_struct, spid);
+
+ if (index == -1) {
+ struct var_kill_data_t* kill_data =
+ get_var_kill_data(ctx, spid, tpid, sig);
+ if (kill_data == NULL)
+ return 0;
+#ifdef UNROLL
+#pragma unroll
+#endif
+ for (int i = 0; i < ARRAY_SIZE(arr_struct->array); i++)
+ if (arr_struct->array[i].meta.pid == 0) {
+ bpf_probe_read(&arr_struct->array[i],
+ sizeof(arr_struct->array[i]), kill_data);
+ bpf_map_update_elem(&var_tpid_to_data, &tpid,
+ arr_struct, 0);
+
+ return 0;
+ }
+ return 0;
+ }
+
+ struct var_kill_data_t* kill_data = &arr_struct->array[index];
+
+ u64 delta_sec =
+ (bpf_ktime_get_ns() - kill_data->last_kill_time) / 1000000000;
+
+ if (delta_sec < STALE_INFO) {
+ kill_data->kill_count++;
+ kill_data->last_kill_time = bpf_ktime_get_ns();
+ bpf_probe_read(&arr_struct->array[index],
+ sizeof(arr_struct->array[index]),
+ kill_data);
+ } else {
+ struct var_kill_data_t* kill_data =
+ get_var_kill_data(ctx, spid, tpid, sig);
+ if (kill_data == NULL)
+ return 0;
+ bpf_probe_read(&arr_struct->array[index],
+ sizeof(arr_struct->array[index]),
+ kill_data);
+ }
+ }
+ bpf_map_update_elem(&var_tpid_to_data, &tpid, arr_struct, 0);
+ return 0;
+}
+
+static INLINE void bpf_stats_enter(struct bpf_func_stats_ctx* bpf_stat_ctx,
+ enum bpf_function_id func_id)
+{
+ int func_id_key = func_id;
+
+ bpf_stat_ctx->start_time_ns = bpf_ktime_get_ns();
+ bpf_stat_ctx->bpf_func_stats_data_val =
+ bpf_map_lookup_elem(&bpf_func_stats, &func_id_key);
+ if (bpf_stat_ctx->bpf_func_stats_data_val)
+ bpf_stat_ctx->bpf_func_stats_data_val->num_executions++;
+}
+
+static INLINE void bpf_stats_exit(struct bpf_func_stats_ctx* bpf_stat_ctx)
+{
+ if (bpf_stat_ctx->bpf_func_stats_data_val)
+ bpf_stat_ctx->bpf_func_stats_data_val->time_elapsed_ns +=
+ bpf_ktime_get_ns() - bpf_stat_ctx->start_time_ns;
+}
+
+static INLINE void
+bpf_stats_pre_submit_var_perf_event(struct bpf_func_stats_ctx* bpf_stat_ctx,
+ struct var_metadata_t* meta)
+{
+ if (bpf_stat_ctx->bpf_func_stats_data_val) {
+ bpf_stat_ctx->bpf_func_stats_data_val->num_perf_events++;
+ meta->bpf_stats_num_perf_events =
+ bpf_stat_ctx->bpf_func_stats_data_val->num_perf_events;
+ }
+ meta->bpf_stats_start_ktime_ns = bpf_stat_ctx->start_time_ns;
+ meta->cpu_id = bpf_get_smp_processor_id();
+}
+
+static INLINE size_t
+read_absolute_file_path_from_dentry(struct dentry* filp_dentry, void* payload)
+{
+ size_t length = 0;
+ size_t filepart_length;
+ struct dentry* parent_dentry;
+
+#ifdef UNROLL
+#pragma unroll
+#endif
+ for (int i = 0; i < MAX_PATH_DEPTH; i++) {
+ filepart_length = bpf_probe_read_str(payload, MAX_PATH,
+ BPF_CORE_READ(filp_dentry, d_name.name));
+ barrier_var(filepart_length);
+ if (filepart_length > MAX_PATH)
+ break;
+ barrier_var(filepart_length);
+ payload += filepart_length;
+ length += filepart_length;
+
+ parent_dentry = BPF_CORE_READ(filp_dentry, d_parent);
+ if (filp_dentry == parent_dentry)
+ break;
+ filp_dentry = parent_dentry;
+ }
+
+ return length;
+}
+
+static INLINE bool
+is_ancestor_in_allowed_inodes(struct dentry* filp_dentry)
+{
+ struct dentry* parent_dentry;
+#ifdef UNROLL
+#pragma unroll
+#endif
+ for (int i = 0; i < MAX_PATH_DEPTH; i++) {
+ u64 dir_ino = BPF_CORE_READ(filp_dentry, d_inode, i_ino);
+ bool* allowed_dir = bpf_map_lookup_elem(&allowed_directory_inodes, &dir_ino);
+
+ if (allowed_dir != NULL)
+ return true;
+ parent_dentry = BPF_CORE_READ(filp_dentry, d_parent);
+ if (filp_dentry == parent_dentry)
+ break;
+ filp_dentry = parent_dentry;
+ }
+ return false;
+}
+
+static INLINE bool is_dentry_allowed_for_filemod(struct dentry* file_dentry,
+ u32* device_id,
+ u64* file_ino)
+{
+ u32 dev_id = BPF_CORE_READ(file_dentry, d_sb, s_dev);
+ *device_id = dev_id;
+ bool* allowed_device = bpf_map_lookup_elem(&allowed_devices, &dev_id);
+
+ if (allowed_device == NULL)
+ return false;
+
+ u64 ino = BPF_CORE_READ(file_dentry, d_inode, i_ino);
+ *file_ino = ino;
+ bool* allowed_file = bpf_map_lookup_elem(&allowed_file_inodes, &ino);
+
+ if (allowed_file == NULL)
+ if (!is_ancestor_in_allowed_inodes(BPF_CORE_READ(file_dentry, d_parent)))
+ return false;
+ return true;
+}
+
+SEC("kprobe/proc_sys_write")
+ssize_t BPF_KPROBE(kprobe__proc_sys_write,
+ struct file* filp, const char* buf,
+ size_t count, loff_t* ppos)
+{
+ struct bpf_func_stats_ctx stats_ctx;
+ bpf_stats_enter(&stats_ctx, profiler_bpf_proc_sys_write);
+
+ u32 pid = get_userspace_pid();
+ int zero = 0;
+ struct var_sysctl_data_t* sysctl_data =
+ bpf_map_lookup_elem(&data_heap, &zero);
+ if (!sysctl_data)
+ goto out;
+
+ struct task_struct* task = (struct task_struct*)bpf_get_current_task();
+ sysctl_data->meta.type = SYSCTL_EVENT;
+ void* payload = populate_var_metadata(&sysctl_data->meta, task, pid, sysctl_data->payload);
+ payload = populate_cgroup_info(&sysctl_data->cgroup_data, task, payload);
+
+ populate_ancestors(task, &sysctl_data->ancestors_info);
+
+ sysctl_data->sysctl_val_length = 0;
+ sysctl_data->sysctl_path_length = 0;
+
+ size_t sysctl_val_length = bpf_probe_read_str(payload, CTL_MAXNAME, buf);
+ barrier_var(sysctl_val_length);
+ if (sysctl_val_length <= CTL_MAXNAME) {
+ barrier_var(sysctl_val_length);
+ sysctl_data->sysctl_val_length = sysctl_val_length;
+ payload += sysctl_val_length;
+ }
+
+ size_t sysctl_path_length = bpf_probe_read_str(payload, MAX_PATH,
+ BPF_CORE_READ(filp, f_path.dentry, d_name.name));
+ barrier_var(sysctl_path_length);
+ if (sysctl_path_length <= MAX_PATH) {
+ barrier_var(sysctl_path_length);
+ sysctl_data->sysctl_path_length = sysctl_path_length;
+ payload += sysctl_path_length;
+ }
+
+ bpf_stats_pre_submit_var_perf_event(&stats_ctx, &sysctl_data->meta);
+ unsigned long data_len = payload - (void*)sysctl_data;
+ data_len = data_len > sizeof(struct var_sysctl_data_t)
+ ? sizeof(struct var_sysctl_data_t)
+ : data_len;
+ bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, sysctl_data, data_len);
+out:
+ bpf_stats_exit(&stats_ctx);
+ return 0;
+}
+
+SEC("tracepoint/syscalls/sys_enter_kill")
+int tracepoint__syscalls__sys_enter_kill(struct trace_event_raw_sys_enter* ctx)
+{
+ struct bpf_func_stats_ctx stats_ctx;
+
+ bpf_stats_enter(&stats_ctx, profiler_bpf_sys_enter_kill);
+ int pid = ctx->args[0];
+ int sig = ctx->args[1];
+ int ret = trace_var_sys_kill(ctx, pid, sig);
+ bpf_stats_exit(&stats_ctx);
+ return ret;
+};
+
+SEC("raw_tracepoint/sched_process_exit")
+int raw_tracepoint__sched_process_exit(void* ctx)
+{
+ int zero = 0;
+ struct bpf_func_stats_ctx stats_ctx;
+ bpf_stats_enter(&stats_ctx, profiler_bpf_sched_process_exit);
+
+ u32 tpid = get_userspace_pid();
+
+ struct var_kill_data_arr_t* arr_struct = bpf_map_lookup_elem(&var_tpid_to_data, &tpid);
+ struct var_kill_data_t* kill_data = bpf_map_lookup_elem(&data_heap, &zero);
+
+ if (arr_struct == NULL || kill_data == NULL)
+ goto out;
+
+ struct task_struct* task = (struct task_struct*)bpf_get_current_task();
+ struct kernfs_node* proc_kernfs = BPF_CORE_READ(task, cgroups, dfl_cgrp, kn);
+
+#ifdef UNROLL
+#pragma unroll
+#endif
+ for (int i = 0; i < ARRAY_SIZE(arr_struct->array); i++) {
+ struct var_kill_data_t* past_kill_data = &arr_struct->array[i];
+
+ if (past_kill_data != NULL && past_kill_data->kill_target_pid == tpid) {
+ bpf_probe_read(kill_data, sizeof(*past_kill_data), past_kill_data);
+ void* payload = kill_data->payload;
+ size_t offset = kill_data->payload_length;
+ if (offset >= MAX_METADATA_PAYLOAD_LEN + MAX_CGROUP_PAYLOAD_LEN)
+ return 0;
+ payload += offset;
+
+ kill_data->kill_target_name_length = 0;
+ kill_data->kill_target_cgroup_proc_length = 0;
+
+ size_t comm_length = bpf_core_read_str(payload, TASK_COMM_LEN, &task->comm);
+ barrier_var(comm_length);
+ if (comm_length <= TASK_COMM_LEN) {
+ barrier_var(comm_length);
+ kill_data->kill_target_name_length = comm_length;
+ payload += comm_length;
+ }
+
+ size_t cgroup_proc_length = bpf_probe_read_str(payload, KILL_TARGET_LEN,
+ BPF_CORE_READ(proc_kernfs, name));
+ barrier_var(cgroup_proc_length);
+ if (cgroup_proc_length <= KILL_TARGET_LEN) {
+ barrier_var(cgroup_proc_length);
+ kill_data->kill_target_cgroup_proc_length = cgroup_proc_length;
+ payload += cgroup_proc_length;
+ }
+
+ bpf_stats_pre_submit_var_perf_event(&stats_ctx, &kill_data->meta);
+ unsigned long data_len = (void*)payload - (void*)kill_data;
+ data_len = data_len > sizeof(struct var_kill_data_t)
+ ? sizeof(struct var_kill_data_t)
+ : data_len;
+ bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, kill_data, data_len);
+ }
+ }
+ bpf_map_delete_elem(&var_tpid_to_data, &tpid);
+out:
+ bpf_stats_exit(&stats_ctx);
+ return 0;
+}
+
+SEC("raw_tracepoint/sched_process_exec")
+int raw_tracepoint__sched_process_exec(struct bpf_raw_tracepoint_args* ctx)
+{
+ struct bpf_func_stats_ctx stats_ctx;
+ bpf_stats_enter(&stats_ctx, profiler_bpf_sched_process_exec);
+
+ struct linux_binprm* bprm = (struct linux_binprm*)ctx->args[2];
+ u64 inode = BPF_CORE_READ(bprm, file, f_inode, i_ino);
+
+ bool* should_filter_binprm = bpf_map_lookup_elem(&disallowed_exec_inodes, &inode);
+ if (should_filter_binprm != NULL)
+ goto out;
+
+ int zero = 0;
+ struct var_exec_data_t* proc_exec_data = bpf_map_lookup_elem(&data_heap, &zero);
+ if (!proc_exec_data)
+ goto out;
+
+ if (INODE_FILTER && inode != INODE_FILTER)
+ return 0;
+
+ u32 pid = get_userspace_pid();
+ struct task_struct* task = (struct task_struct*)bpf_get_current_task();
+
+ proc_exec_data->meta.type = EXEC_EVENT;
+ proc_exec_data->bin_path_length = 0;
+ proc_exec_data->cmdline_length = 0;
+ proc_exec_data->environment_length = 0;
+ void* payload = populate_var_metadata(&proc_exec_data->meta, task, pid,
+ proc_exec_data->payload);
+ payload = populate_cgroup_info(&proc_exec_data->cgroup_data, task, payload);
+
+ struct task_struct* parent_task = BPF_CORE_READ(task, real_parent);
+ proc_exec_data->parent_pid = BPF_CORE_READ(parent_task, tgid);
+ proc_exec_data->parent_uid = BPF_CORE_READ(parent_task, real_cred, uid.val);
+ proc_exec_data->parent_exec_id = BPF_CORE_READ(parent_task, self_exec_id);
+ proc_exec_data->parent_start_time = BPF_CORE_READ(parent_task, start_time);
+
+ const char* filename = BPF_CORE_READ(bprm, filename);
+ size_t bin_path_length = bpf_probe_read_str(payload, MAX_FILENAME_LEN, filename);
+ barrier_var(bin_path_length);
+ if (bin_path_length <= MAX_FILENAME_LEN) {
+ barrier_var(bin_path_length);
+ proc_exec_data->bin_path_length = bin_path_length;
+ payload += bin_path_length;
+ }
+
+ void* arg_start = (void*)BPF_CORE_READ(task, mm, arg_start);
+ void* arg_end = (void*)BPF_CORE_READ(task, mm, arg_end);
+ unsigned int cmdline_length = probe_read_lim(payload, arg_start,
+ arg_end - arg_start, MAX_ARGS_LEN);
+
+ if (cmdline_length <= MAX_ARGS_LEN) {
+ barrier_var(cmdline_length);
+ proc_exec_data->cmdline_length = cmdline_length;
+ payload += cmdline_length;
+ }
+
+ if (READ_ENVIRON_FROM_EXEC) {
+ void* env_start = (void*)BPF_CORE_READ(task, mm, env_start);
+ void* env_end = (void*)BPF_CORE_READ(task, mm, env_end);
+ unsigned long env_len = probe_read_lim(payload, env_start,
+ env_end - env_start, MAX_ENVIRON_LEN);
+ if (cmdline_length <= MAX_ENVIRON_LEN) {
+ proc_exec_data->environment_length = env_len;
+ payload += env_len;
+ }
+ }
+
+ bpf_stats_pre_submit_var_perf_event(&stats_ctx, &proc_exec_data->meta);
+ unsigned long data_len = payload - (void*)proc_exec_data;
+ data_len = data_len > sizeof(struct var_exec_data_t)
+ ? sizeof(struct var_exec_data_t)
+ : data_len;
+ bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, proc_exec_data, data_len);
+out:
+ bpf_stats_exit(&stats_ctx);
+ return 0;
+}
+
+SEC("kretprobe/do_filp_open")
+int kprobe_ret__do_filp_open(struct pt_regs* ctx)
+{
+ struct bpf_func_stats_ctx stats_ctx;
+ bpf_stats_enter(&stats_ctx, profiler_bpf_do_filp_open_ret);
+
+ struct file* filp = (struct file*)PT_REGS_RC_CORE(ctx);
+
+ if (filp == NULL || IS_ERR(filp))
+ goto out;
+ unsigned int flags = BPF_CORE_READ(filp, f_flags);
+ if ((flags & (O_RDWR | O_WRONLY)) == 0)
+ goto out;
+ if ((flags & O_TMPFILE) > 0)
+ goto out;
+ struct inode* file_inode = BPF_CORE_READ(filp, f_inode);
+ umode_t mode = BPF_CORE_READ(file_inode, i_mode);
+ if (S_ISDIR(mode) || S_ISCHR(mode) || S_ISBLK(mode) || S_ISFIFO(mode) ||
+ S_ISSOCK(mode))
+ goto out;
+
+ struct dentry* filp_dentry = BPF_CORE_READ(filp, f_path.dentry);
+ u32 device_id = 0;
+ u64 file_ino = 0;
+ if (!is_dentry_allowed_for_filemod(filp_dentry, &device_id, &file_ino))
+ goto out;
+
+ int zero = 0;
+ struct var_filemod_data_t* filemod_data = bpf_map_lookup_elem(&data_heap, &zero);
+ if (!filemod_data)
+ goto out;
+
+ u32 pid = get_userspace_pid();
+ struct task_struct* task = (struct task_struct*)bpf_get_current_task();
+
+ filemod_data->meta.type = FILEMOD_EVENT;
+ filemod_data->fmod_type = FMOD_OPEN;
+ filemod_data->dst_flags = flags;
+ filemod_data->src_inode = 0;
+ filemod_data->dst_inode = file_ino;
+ filemod_data->src_device_id = 0;
+ filemod_data->dst_device_id = device_id;
+ filemod_data->src_filepath_length = 0;
+ filemod_data->dst_filepath_length = 0;
+
+ void* payload = populate_var_metadata(&filemod_data->meta, task, pid,
+ filemod_data->payload);
+ payload = populate_cgroup_info(&filemod_data->cgroup_data, task, payload);
+
+ size_t len = read_absolute_file_path_from_dentry(filp_dentry, payload);
+ barrier_var(len);
+ if (len <= MAX_FILEPATH_LENGTH) {
+ barrier_var(len);
+ payload += len;
+ filemod_data->dst_filepath_length = len;
+ }
+ bpf_stats_pre_submit_var_perf_event(&stats_ctx, &filemod_data->meta);
+ unsigned long data_len = payload - (void*)filemod_data;
+ data_len = data_len > sizeof(*filemod_data) ? sizeof(*filemod_data) : data_len;
+ bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, filemod_data, data_len);
+out:
+ bpf_stats_exit(&stats_ctx);
+ return 0;
+}
+
+SEC("kprobe/vfs_link")
+int BPF_KPROBE(kprobe__vfs_link,
+ struct dentry* old_dentry, struct inode* dir,
+ struct dentry* new_dentry, struct inode** delegated_inode)
+{
+ struct bpf_func_stats_ctx stats_ctx;
+ bpf_stats_enter(&stats_ctx, profiler_bpf_vfs_link);
+
+ u32 src_device_id = 0;
+ u64 src_file_ino = 0;
+ u32 dst_device_id = 0;
+ u64 dst_file_ino = 0;
+ if (!is_dentry_allowed_for_filemod(old_dentry, &src_device_id, &src_file_ino) &&
+ !is_dentry_allowed_for_filemod(new_dentry, &dst_device_id, &dst_file_ino))
+ goto out;
+
+ int zero = 0;
+ struct var_filemod_data_t* filemod_data = bpf_map_lookup_elem(&data_heap, &zero);
+ if (!filemod_data)
+ goto out;
+
+ u32 pid = get_userspace_pid();
+ struct task_struct* task = (struct task_struct*)bpf_get_current_task();
+
+ filemod_data->meta.type = FILEMOD_EVENT;
+ filemod_data->fmod_type = FMOD_LINK;
+ filemod_data->dst_flags = 0;
+ filemod_data->src_inode = src_file_ino;
+ filemod_data->dst_inode = dst_file_ino;
+ filemod_data->src_device_id = src_device_id;
+ filemod_data->dst_device_id = dst_device_id;
+ filemod_data->src_filepath_length = 0;
+ filemod_data->dst_filepath_length = 0;
+
+ void* payload = populate_var_metadata(&filemod_data->meta, task, pid,
+ filemod_data->payload);
+ payload = populate_cgroup_info(&filemod_data->cgroup_data, task, payload);
+
+ size_t len = read_absolute_file_path_from_dentry(old_dentry, payload);
+ barrier_var(len);
+ if (len <= MAX_FILEPATH_LENGTH) {
+ barrier_var(len);
+ payload += len;
+ filemod_data->src_filepath_length = len;
+ }
+
+ len = read_absolute_file_path_from_dentry(new_dentry, payload);
+ barrier_var(len);
+ if (len <= MAX_FILEPATH_LENGTH) {
+ barrier_var(len);
+ payload += len;
+ filemod_data->dst_filepath_length = len;
+ }
+
+ bpf_stats_pre_submit_var_perf_event(&stats_ctx, &filemod_data->meta);
+ unsigned long data_len = payload - (void*)filemod_data;
+ data_len = data_len > sizeof(*filemod_data) ? sizeof(*filemod_data) : data_len;
+ bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, filemod_data, data_len);
+out:
+ bpf_stats_exit(&stats_ctx);
+ return 0;
+}
+
+SEC("kprobe/vfs_symlink")
+int BPF_KPROBE(kprobe__vfs_symlink, struct inode* dir, struct dentry* dentry,
+ const char* oldname)
+{
+ struct bpf_func_stats_ctx stats_ctx;
+ bpf_stats_enter(&stats_ctx, profiler_bpf_vfs_symlink);
+
+ u32 dst_device_id = 0;
+ u64 dst_file_ino = 0;
+ if (!is_dentry_allowed_for_filemod(dentry, &dst_device_id, &dst_file_ino))
+ goto out;
+
+ int zero = 0;
+ struct var_filemod_data_t* filemod_data = bpf_map_lookup_elem(&data_heap, &zero);
+ if (!filemod_data)
+ goto out;
+
+ u32 pid = get_userspace_pid();
+ struct task_struct* task = (struct task_struct*)bpf_get_current_task();
+
+ filemod_data->meta.type = FILEMOD_EVENT;
+ filemod_data->fmod_type = FMOD_SYMLINK;
+ filemod_data->dst_flags = 0;
+ filemod_data->src_inode = 0;
+ filemod_data->dst_inode = dst_file_ino;
+ filemod_data->src_device_id = 0;
+ filemod_data->dst_device_id = dst_device_id;
+ filemod_data->src_filepath_length = 0;
+ filemod_data->dst_filepath_length = 0;
+
+ void* payload = populate_var_metadata(&filemod_data->meta, task, pid,
+ filemod_data->payload);
+ payload = populate_cgroup_info(&filemod_data->cgroup_data, task, payload);
+
+ size_t len = bpf_probe_read_str(payload, MAX_FILEPATH_LENGTH, oldname);
+ barrier_var(len);
+ if (len <= MAX_FILEPATH_LENGTH) {
+ barrier_var(len);
+ payload += len;
+ filemod_data->src_filepath_length = len;
+ }
+ len = read_absolute_file_path_from_dentry(dentry, payload);
+ barrier_var(len);
+ if (len <= MAX_FILEPATH_LENGTH) {
+ barrier_var(len);
+ payload += len;
+ filemod_data->dst_filepath_length = len;
+ }
+ bpf_stats_pre_submit_var_perf_event(&stats_ctx, &filemod_data->meta);
+ unsigned long data_len = payload - (void*)filemod_data;
+ data_len = data_len > sizeof(*filemod_data) ? sizeof(*filemod_data) : data_len;
+ bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, filemod_data, data_len);
+out:
+ bpf_stats_exit(&stats_ctx);
+ return 0;
+}
+
+SEC("raw_tracepoint/sched_process_fork")
+int raw_tracepoint__sched_process_fork(struct bpf_raw_tracepoint_args* ctx)
+{
+ struct bpf_func_stats_ctx stats_ctx;
+ bpf_stats_enter(&stats_ctx, profiler_bpf_sched_process_fork);
+
+ int zero = 0;
+ struct var_fork_data_t* fork_data = bpf_map_lookup_elem(&data_heap, &zero);
+ if (!fork_data)
+ goto out;
+
+ struct task_struct* parent = (struct task_struct*)ctx->args[0];
+ struct task_struct* child = (struct task_struct*)ctx->args[1];
+ fork_data->meta.type = FORK_EVENT;
+
+ void* payload = populate_var_metadata(&fork_data->meta, child,
+ BPF_CORE_READ(child, pid), fork_data->payload);
+ fork_data->parent_pid = BPF_CORE_READ(parent, pid);
+ fork_data->parent_exec_id = BPF_CORE_READ(parent, self_exec_id);
+ fork_data->parent_start_time = BPF_CORE_READ(parent, start_time);
+ bpf_stats_pre_submit_var_perf_event(&stats_ctx, &fork_data->meta);
+
+ unsigned long data_len = payload - (void*)fork_data;
+ data_len = data_len > sizeof(*fork_data) ? sizeof(*fork_data) : data_len;
+ bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, fork_data, data_len);
+out:
+ bpf_stats_exit(&stats_ctx);
+ return 0;
+}
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/profiler1.c b/tools/testing/selftests/bpf/progs/profiler1.c
new file mode 100644
index 000000000000..4df9088bfc00
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/profiler1.c
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+#define barrier_var(var) asm volatile("" : "=r"(var) : "0"(var))
+#define UNROLL
+#define INLINE __always_inline
+#include "profiler.inc.h"
diff --git a/tools/testing/selftests/bpf/progs/profiler2.c b/tools/testing/selftests/bpf/progs/profiler2.c
new file mode 100644
index 000000000000..0f32a3cbf556
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/profiler2.c
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+#define barrier_var(var) /**/
+/* undef #define UNROLL */
+#define INLINE /**/
+#include "profiler.inc.h"
diff --git a/tools/testing/selftests/bpf/progs/profiler3.c b/tools/testing/selftests/bpf/progs/profiler3.c
new file mode 100644
index 000000000000..6249fc31ccb0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/profiler3.c
@@ -0,0 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+#define barrier_var(var) /**/
+#define UNROLL
+#define INLINE __noinline
+#include "profiler.inc.h"
diff --git a/tools/testing/selftests/bpf/progs/pyperf.h b/tools/testing/selftests/bpf/progs/pyperf.h
index cc615b82b56e..2fb7adafb6b6 100644
--- a/tools/testing/selftests/bpf/progs/pyperf.h
+++ b/tools/testing/selftests/bpf/progs/pyperf.h
@@ -67,7 +67,12 @@ typedef struct {
void* co_name; // PyCodeObject.co_name
} FrameData;
-static __always_inline void *get_thread_state(void *tls_base, PidData *pidData)
+#ifdef SUBPROGS
+__noinline
+#else
+__always_inline
+#endif
+static void *get_thread_state(void *tls_base, PidData *pidData)
{
void* thread_state;
int key;
@@ -155,7 +160,9 @@ struct {
} stackmap SEC(".maps");
#ifdef GLOBAL_FUNC
-__attribute__((noinline))
+__noinline
+#elif defined(SUBPROGS)
+static __noinline
#else
static __always_inline
#endif
diff --git a/tools/testing/selftests/bpf/progs/pyperf_subprogs.c b/tools/testing/selftests/bpf/progs/pyperf_subprogs.c
new file mode 100644
index 000000000000..60e27a7f0cca
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/pyperf_subprogs.c
@@ -0,0 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+#define STACK_MAX_LEN 50
+#define SUBPROGS
+#include "pyperf.h"
diff --git a/tools/testing/selftests/bpf/progs/strobemeta.h b/tools/testing/selftests/bpf/progs/strobemeta.h
index ad61b722a9de..7de534f38c3f 100644
--- a/tools/testing/selftests/bpf/progs/strobemeta.h
+++ b/tools/testing/selftests/bpf/progs/strobemeta.h
@@ -266,8 +266,12 @@ struct tls_index {
uint64_t offset;
};
-static __always_inline void *calc_location(struct strobe_value_loc *loc,
- void *tls_base)
+#ifdef SUBPROGS
+__noinline
+#else
+__always_inline
+#endif
+static void *calc_location(struct strobe_value_loc *loc, void *tls_base)
{
/*
* tls_mode value is:
@@ -327,10 +331,15 @@ static __always_inline void *calc_location(struct strobe_value_loc *loc,
: NULL;
}
-static __always_inline void read_int_var(struct strobemeta_cfg *cfg,
- size_t idx, void *tls_base,
- struct strobe_value_generic *value,
- struct strobemeta_payload *data)
+#ifdef SUBPROGS
+__noinline
+#else
+__always_inline
+#endif
+static void read_int_var(struct strobemeta_cfg *cfg,
+ size_t idx, void *tls_base,
+ struct strobe_value_generic *value,
+ struct strobemeta_payload *data)
{
void *location = calc_location(&cfg->int_locs[idx], tls_base);
if (!location)
@@ -440,8 +449,13 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg,
* read_strobe_meta returns NULL, if no metadata was read; otherwise returns
* pointer to *right after* payload ends
*/
-static __always_inline void *read_strobe_meta(struct task_struct *task,
- struct strobemeta_payload *data)
+#ifdef SUBPROGS
+__noinline
+#else
+__always_inline
+#endif
+static void *read_strobe_meta(struct task_struct *task,
+ struct strobemeta_payload *data)
{
pid_t pid = bpf_get_current_pid_tgid() >> 32;
struct strobe_value_generic value = {0};
diff --git a/tools/testing/selftests/bpf/progs/strobemeta_subprogs.c b/tools/testing/selftests/bpf/progs/strobemeta_subprogs.c
new file mode 100644
index 000000000000..b6c01f8fc559
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/strobemeta_subprogs.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+// Copyright (c) 2019 Facebook
+
+#define STROBE_MAX_INTS 2
+#define STROBE_MAX_STRS 25
+#define STROBE_MAX_MAPS 13
+#define STROBE_MAX_MAP_ENTRIES 20
+#define NO_UNROLL
+#define SUBPROGS
+#include "strobemeta.h"
diff --git a/tools/testing/selftests/bpf/progs/tailcall1.c b/tools/testing/selftests/bpf/progs/tailcall1.c
index 1f407e65ae52..7115bcefbe8a 100644
--- a/tools/testing/selftests/bpf/progs/tailcall1.c
+++ b/tools/testing/selftests/bpf/progs/tailcall1.c
@@ -26,20 +26,20 @@ int entry(struct __sk_buff *skb)
/* Multiple locations to make sure we patch
* all of them.
*/
- bpf_tail_call(skb, &jmp_table, 0);
- bpf_tail_call(skb, &jmp_table, 0);
- bpf_tail_call(skb, &jmp_table, 0);
- bpf_tail_call(skb, &jmp_table, 0);
-
- bpf_tail_call(skb, &jmp_table, 1);
- bpf_tail_call(skb, &jmp_table, 1);
- bpf_tail_call(skb, &jmp_table, 1);
- bpf_tail_call(skb, &jmp_table, 1);
-
- bpf_tail_call(skb, &jmp_table, 2);
- bpf_tail_call(skb, &jmp_table, 2);
- bpf_tail_call(skb, &jmp_table, 2);
- bpf_tail_call(skb, &jmp_table, 2);
+ bpf_tail_call_static(skb, &jmp_table, 0);
+ bpf_tail_call_static(skb, &jmp_table, 0);
+ bpf_tail_call_static(skb, &jmp_table, 0);
+ bpf_tail_call_static(skb, &jmp_table, 0);
+
+ bpf_tail_call_static(skb, &jmp_table, 1);
+ bpf_tail_call_static(skb, &jmp_table, 1);
+ bpf_tail_call_static(skb, &jmp_table, 1);
+ bpf_tail_call_static(skb, &jmp_table, 1);
+
+ bpf_tail_call_static(skb, &jmp_table, 2);
+ bpf_tail_call_static(skb, &jmp_table, 2);
+ bpf_tail_call_static(skb, &jmp_table, 2);
+ bpf_tail_call_static(skb, &jmp_table, 2);
return 3;
}
diff --git a/tools/testing/selftests/bpf/progs/tailcall2.c b/tools/testing/selftests/bpf/progs/tailcall2.c
index a093e739cf0e..0431e4fe7efd 100644
--- a/tools/testing/selftests/bpf/progs/tailcall2.c
+++ b/tools/testing/selftests/bpf/progs/tailcall2.c
@@ -13,14 +13,14 @@ struct {
SEC("classifier/0")
int bpf_func_0(struct __sk_buff *skb)
{
- bpf_tail_call(skb, &jmp_table, 1);
+ bpf_tail_call_static(skb, &jmp_table, 1);
return 0;
}
SEC("classifier/1")
int bpf_func_1(struct __sk_buff *skb)
{
- bpf_tail_call(skb, &jmp_table, 2);
+ bpf_tail_call_static(skb, &jmp_table, 2);
return 1;
}
@@ -33,25 +33,25 @@ int bpf_func_2(struct __sk_buff *skb)
SEC("classifier/3")
int bpf_func_3(struct __sk_buff *skb)
{
- bpf_tail_call(skb, &jmp_table, 4);
+ bpf_tail_call_static(skb, &jmp_table, 4);
return 3;
}
SEC("classifier/4")
int bpf_func_4(struct __sk_buff *skb)
{
- bpf_tail_call(skb, &jmp_table, 3);
+ bpf_tail_call_static(skb, &jmp_table, 3);
return 4;
}
SEC("classifier")
int entry(struct __sk_buff *skb)
{
- bpf_tail_call(skb, &jmp_table, 0);
+ bpf_tail_call_static(skb, &jmp_table, 0);
/* Check multi-prog update. */
- bpf_tail_call(skb, &jmp_table, 2);
+ bpf_tail_call_static(skb, &jmp_table, 2);
/* Check tail call limit. */
- bpf_tail_call(skb, &jmp_table, 3);
+ bpf_tail_call_static(skb, &jmp_table, 3);
return 3;
}
diff --git a/tools/testing/selftests/bpf/progs/tailcall3.c b/tools/testing/selftests/bpf/progs/tailcall3.c
index cabda877cf0a..739dc2a51e74 100644
--- a/tools/testing/selftests/bpf/progs/tailcall3.c
+++ b/tools/testing/selftests/bpf/progs/tailcall3.c
@@ -16,14 +16,14 @@ SEC("classifier/0")
int bpf_func_0(struct __sk_buff *skb)
{
count++;
- bpf_tail_call(skb, &jmp_table, 0);
+ bpf_tail_call_static(skb, &jmp_table, 0);
return 1;
}
SEC("classifier")
int entry(struct __sk_buff *skb)
{
- bpf_tail_call(skb, &jmp_table, 0);
+ bpf_tail_call_static(skb, &jmp_table, 0);
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf1.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf1.c
new file mode 100644
index 000000000000..0103f3dd9f02
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf1.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 2);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+#define TAIL_FUNC(x) \
+ SEC("classifier/" #x) \
+ int bpf_func_##x(struct __sk_buff *skb) \
+ { \
+ return x; \
+ }
+TAIL_FUNC(0)
+TAIL_FUNC(1)
+
+static __noinline
+int subprog_tail(struct __sk_buff *skb)
+{
+ bpf_tail_call_static(skb, &jmp_table, 0);
+
+ return skb->len * 2;
+}
+
+SEC("classifier")
+int entry(struct __sk_buff *skb)
+{
+ bpf_tail_call_static(skb, &jmp_table, 1);
+
+ return subprog_tail(skb);
+}
+
+char __license[] SEC("license") = "GPL";
+int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c
new file mode 100644
index 000000000000..7b1c04183824
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf2.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_legacy.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+static __noinline
+int subprog_tail(struct __sk_buff *skb)
+{
+ if (load_byte(skb, 0))
+ bpf_tail_call_static(skb, &jmp_table, 1);
+ else
+ bpf_tail_call_static(skb, &jmp_table, 0);
+ return 1;
+}
+
+static volatile int count;
+
+SEC("classifier/0")
+int bpf_func_0(struct __sk_buff *skb)
+{
+ count++;
+ return subprog_tail(skb);
+}
+
+SEC("classifier")
+int entry(struct __sk_buff *skb)
+{
+ bpf_tail_call_static(skb, &jmp_table, 0);
+
+ return 0;
+}
+
+char __license[] SEC("license") = "GPL";
+int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c
new file mode 100644
index 000000000000..0d5482bea6c9
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf3.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include "bpf_legacy.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 2);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+__noinline
+int subprog_tail2(struct __sk_buff *skb)
+{
+ volatile char arr[64] = {};
+
+ if (load_word(skb, 0) || load_half(skb, 0))
+ bpf_tail_call_static(skb, &jmp_table, 10);
+ else
+ bpf_tail_call_static(skb, &jmp_table, 1);
+
+ return skb->len;
+}
+
+static __noinline
+int subprog_tail(struct __sk_buff *skb)
+{
+ volatile char arr[64] = {};
+
+ bpf_tail_call_static(skb, &jmp_table, 0);
+
+ return skb->len * 2;
+}
+
+SEC("classifier/0")
+int bpf_func_0(struct __sk_buff *skb)
+{
+ volatile char arr[128] = {};
+
+ return subprog_tail2(skb);
+}
+
+SEC("classifier/1")
+int bpf_func_1(struct __sk_buff *skb)
+{
+ volatile char arr[128] = {};
+
+ return skb->len * 3;
+}
+
+SEC("classifier")
+int entry(struct __sk_buff *skb)
+{
+ volatile char arr[128] = {};
+
+ return subprog_tail(skb);
+}
+
+char __license[] SEC("license") = "GPL";
+int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c
new file mode 100644
index 000000000000..9a1b166b7fbe
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/tailcall_bpf2bpf4.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
+ __uint(max_entries, 3);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} jmp_table SEC(".maps");
+
+static volatile int count;
+
+__noinline
+int subprog_tail_2(struct __sk_buff *skb)
+{
+ bpf_tail_call_static(skb, &jmp_table, 2);
+ return skb->len * 3;
+}
+
+__noinline
+int subprog_tail_1(struct __sk_buff *skb)
+{
+ bpf_tail_call_static(skb, &jmp_table, 1);
+ return skb->len * 2;
+}
+
+__noinline
+int subprog_tail(struct __sk_buff *skb)
+{
+ bpf_tail_call_static(skb, &jmp_table, 0);
+ return skb->len;
+}
+
+SEC("classifier/1")
+int bpf_func_1(struct __sk_buff *skb)
+{
+ return subprog_tail_2(skb);
+}
+
+SEC("classifier/2")
+int bpf_func_2(struct __sk_buff *skb)
+{
+ count++;
+ return subprog_tail_2(skb);
+}
+
+SEC("classifier/0")
+int bpf_func_0(struct __sk_buff *skb)
+{
+ return subprog_tail_1(skb);
+}
+
+SEC("classifier")
+int entry(struct __sk_buff *skb)
+{
+ return subprog_tail(skb);
+}
+
+char __license[] SEC("license") = "GPL";
+int _version SEC("version") = 1;
diff --git a/tools/testing/selftests/bpf/progs/test_btf_map_in_map.c b/tools/testing/selftests/bpf/progs/test_btf_map_in_map.c
index e5093796be97..c1e0c8c7c55f 100644
--- a/tools/testing/selftests/bpf/progs/test_btf_map_in_map.c
+++ b/tools/testing/selftests/bpf/progs/test_btf_map_in_map.c
@@ -11,6 +11,13 @@ struct inner_map {
} inner_map1 SEC(".maps"),
inner_map2 SEC(".maps");
+struct inner_map_sz2 {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(max_entries, 2);
+ __type(key, int);
+ __type(value, int);
+} inner_map_sz2 SEC(".maps");
+
struct outer_arr {
__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
__uint(max_entries, 3);
@@ -34,6 +41,43 @@ struct outer_arr {
.values = { (void *)&inner_map1, 0, (void *)&inner_map2 },
};
+struct inner_map_sz3 {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(map_flags, BPF_F_INNER_MAP);
+ __uint(max_entries, 3);
+ __type(key, int);
+ __type(value, int);
+} inner_map3 SEC(".maps"),
+ inner_map4 SEC(".maps");
+
+struct inner_map_sz4 {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(map_flags, BPF_F_INNER_MAP);
+ __uint(max_entries, 5);
+ __type(key, int);
+ __type(value, int);
+} inner_map5 SEC(".maps");
+
+struct outer_arr_dyn {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __uint(max_entries, 3);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(int));
+ __array(values, struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(map_flags, BPF_F_INNER_MAP);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+ });
+} outer_arr_dyn SEC(".maps") = {
+ .values = {
+ [0] = (void *)&inner_map3,
+ [1] = (void *)&inner_map4,
+ [2] = (void *)&inner_map5,
+ },
+};
+
struct outer_hash {
__uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
__uint(max_entries, 5);
@@ -50,6 +94,30 @@ struct outer_hash {
},
};
+struct sockarr_sz1 {
+ __uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
+ __uint(max_entries, 1);
+ __type(key, int);
+ __type(value, int);
+} sockarr_sz1 SEC(".maps");
+
+struct sockarr_sz2 {
+ __uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
+ __uint(max_entries, 2);
+ __type(key, int);
+ __type(value, int);
+} sockarr_sz2 SEC(".maps");
+
+struct outer_sockarr_sz1 {
+ __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(int));
+ __array(values, struct sockarr_sz1);
+} outer_sockarr SEC(".maps") = {
+ .values = { (void *)&sockarr_sz1 },
+};
+
int input = 0;
SEC("raw_tp/sys_enter")
@@ -70,6 +138,12 @@ int handle__sys_enter(void *ctx)
val = input + 1;
bpf_map_update_elem(inner_map, &key, &val, 0);
+ inner_map = bpf_map_lookup_elem(&outer_arr_dyn, &key);
+ if (!inner_map)
+ return 1;
+ val = input + 2;
+ bpf_map_update_elem(inner_map, &key, &val, 0);
+
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/test_btf_skc_cls_ingress.c b/tools/testing/selftests/bpf/progs/test_btf_skc_cls_ingress.c
new file mode 100644
index 000000000000..9a6b85dd52d2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_btf_skc_cls_ingress.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+
+#include <string.h>
+#include <errno.h>
+#include <netinet/in.h>
+#include <linux/stddef.h>
+#include <linux/bpf.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/if_ether.h>
+#include <linux/pkt_cls.h>
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+#include "bpf_tcp_helpers.h"
+
+struct sockaddr_in6 srv_sa6 = {};
+__u16 listen_tp_sport = 0;
+__u16 req_sk_sport = 0;
+__u32 recv_cookie = 0;
+__u32 gen_cookie = 0;
+__u32 linum = 0;
+
+#define LOG() ({ if (!linum) linum = __LINE__; })
+
+static void test_syncookie_helper(struct ipv6hdr *ip6h, struct tcphdr *th,
+ struct tcp_sock *tp,
+ struct __sk_buff *skb)
+{
+ if (th->syn) {
+ __s64 mss_cookie;
+ void *data_end;
+
+ data_end = (void *)(long)(skb->data_end);
+
+ if (th->doff * 4 != 40) {
+ LOG();
+ return;
+ }
+
+ if ((void *)th + 40 > data_end) {
+ LOG();
+ return;
+ }
+
+ mss_cookie = bpf_tcp_gen_syncookie(tp, ip6h, sizeof(*ip6h),
+ th, 40);
+ if (mss_cookie < 0) {
+ if (mss_cookie != -ENOENT)
+ LOG();
+ } else {
+ gen_cookie = (__u32)mss_cookie;
+ }
+ } else if (gen_cookie) {
+ /* It was in cookie mode */
+ int ret = bpf_tcp_check_syncookie(tp, ip6h, sizeof(*ip6h),
+ th, sizeof(*th));
+
+ if (ret < 0) {
+ if (ret != -ENOENT)
+ LOG();
+ } else {
+ recv_cookie = bpf_ntohl(th->ack_seq) - 1;
+ }
+ }
+}
+
+static int handle_ip6_tcp(struct ipv6hdr *ip6h, struct __sk_buff *skb)
+{
+ struct bpf_sock_tuple *tuple;
+ struct bpf_sock *bpf_skc;
+ unsigned int tuple_len;
+ struct tcphdr *th;
+ void *data_end;
+
+ data_end = (void *)(long)(skb->data_end);
+
+ th = (struct tcphdr *)(ip6h + 1);
+ if (th + 1 > data_end)
+ return TC_ACT_OK;
+
+ /* Is it the testing traffic? */
+ if (th->dest != srv_sa6.sin6_port)
+ return TC_ACT_OK;
+
+ tuple_len = sizeof(tuple->ipv6);
+ tuple = (struct bpf_sock_tuple *)&ip6h->saddr;
+ if ((void *)tuple + tuple_len > data_end) {
+ LOG();
+ return TC_ACT_OK;
+ }
+
+ bpf_skc = bpf_skc_lookup_tcp(skb, tuple, tuple_len,
+ BPF_F_CURRENT_NETNS, 0);
+ if (!bpf_skc) {
+ LOG();
+ return TC_ACT_OK;
+ }
+
+ if (bpf_skc->state == BPF_TCP_NEW_SYN_RECV) {
+ struct request_sock *req_sk;
+
+ req_sk = (struct request_sock *)bpf_skc_to_tcp_request_sock(bpf_skc);
+ if (!req_sk) {
+ LOG();
+ goto release;
+ }
+
+ if (bpf_sk_assign(skb, req_sk, 0)) {
+ LOG();
+ goto release;
+ }
+
+ req_sk_sport = req_sk->__req_common.skc_num;
+
+ bpf_sk_release(req_sk);
+ return TC_ACT_OK;
+ } else if (bpf_skc->state == BPF_TCP_LISTEN) {
+ struct tcp_sock *tp;
+
+ tp = bpf_skc_to_tcp_sock(bpf_skc);
+ if (!tp) {
+ LOG();
+ goto release;
+ }
+
+ if (bpf_sk_assign(skb, tp, 0)) {
+ LOG();
+ goto release;
+ }
+
+ listen_tp_sport = tp->inet_conn.icsk_inet.sk.__sk_common.skc_num;
+
+ test_syncookie_helper(ip6h, th, tp, skb);
+ bpf_sk_release(tp);
+ return TC_ACT_OK;
+ }
+
+ if (bpf_sk_assign(skb, bpf_skc, 0))
+ LOG();
+
+release:
+ bpf_sk_release(bpf_skc);
+ return TC_ACT_OK;
+}
+
+SEC("classifier/ingress")
+int cls_ingress(struct __sk_buff *skb)
+{
+ struct ipv6hdr *ip6h;
+ struct ethhdr *eth;
+ void *data_end;
+
+ data_end = (void *)(long)(skb->data_end);
+
+ eth = (struct ethhdr *)(long)(skb->data);
+ if (eth + 1 > data_end)
+ return TC_ACT_OK;
+
+ if (eth->h_proto != bpf_htons(ETH_P_IPV6))
+ return TC_ACT_OK;
+
+ ip6h = (struct ipv6hdr *)(eth + 1);
+ if (ip6h + 1 > data_end)
+ return TC_ACT_OK;
+
+ if (ip6h->nexthdr == IPPROTO_TCP)
+ return handle_ip6_tcp(ip6h, skb);
+
+ return TC_ACT_OK;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect.c b/tools/testing/selftests/bpf/progs/test_cls_redirect.c
index f0b72e86bee5..c9f8464996ea 100644
--- a/tools/testing/selftests/bpf/progs/test_cls_redirect.c
+++ b/tools/testing/selftests/bpf/progs/test_cls_redirect.c
@@ -22,6 +22,12 @@
#include "test_cls_redirect.h"
+#ifdef SUBPROGS
+#define INLINING __noinline
+#else
+#define INLINING __always_inline
+#endif
+
#define offsetofend(TYPE, MEMBER) \
(offsetof(TYPE, MEMBER) + sizeof((((TYPE *)0)->MEMBER)))
@@ -125,7 +131,7 @@ typedef struct buf {
uint8_t *const tail;
} buf_t;
-static size_t buf_off(const buf_t *buf)
+static __always_inline size_t buf_off(const buf_t *buf)
{
/* Clang seems to optimize constructs like
* a - b + c
@@ -145,7 +151,7 @@ static size_t buf_off(const buf_t *buf)
return off;
}
-static bool buf_copy(buf_t *buf, void *dst, size_t len)
+static __always_inline bool buf_copy(buf_t *buf, void *dst, size_t len)
{
if (bpf_skb_load_bytes(buf->skb, buf_off(buf), dst, len)) {
return false;
@@ -155,7 +161,7 @@ static bool buf_copy(buf_t *buf, void *dst, size_t len)
return true;
}
-static bool buf_skip(buf_t *buf, const size_t len)
+static __always_inline bool buf_skip(buf_t *buf, const size_t len)
{
/* Check whether off + len is valid in the non-linear part. */
if (buf_off(buf) + len > buf->skb->len) {
@@ -173,7 +179,7 @@ static bool buf_skip(buf_t *buf, const size_t len)
* If scratch is not NULL, the function will attempt to load non-linear
* data via bpf_skb_load_bytes. On success, scratch is returned.
*/
-static void *buf_assign(buf_t *buf, const size_t len, void *scratch)
+static __always_inline void *buf_assign(buf_t *buf, const size_t len, void *scratch)
{
if (buf->head + len > buf->tail) {
if (scratch == NULL) {
@@ -188,7 +194,7 @@ static void *buf_assign(buf_t *buf, const size_t len, void *scratch)
return ptr;
}
-static bool pkt_skip_ipv4_options(buf_t *buf, const struct iphdr *ipv4)
+static INLINING bool pkt_skip_ipv4_options(buf_t *buf, const struct iphdr *ipv4)
{
if (ipv4->ihl <= 5) {
return true;
@@ -197,13 +203,13 @@ static bool pkt_skip_ipv4_options(buf_t *buf, const struct iphdr *ipv4)
return buf_skip(buf, (ipv4->ihl - 5) * 4);
}
-static bool ipv4_is_fragment(const struct iphdr *ip)
+static INLINING bool ipv4_is_fragment(const struct iphdr *ip)
{
uint16_t frag_off = ip->frag_off & bpf_htons(IP_OFFSET_MASK);
return (ip->frag_off & bpf_htons(IP_MF)) != 0 || frag_off > 0;
}
-static struct iphdr *pkt_parse_ipv4(buf_t *pkt, struct iphdr *scratch)
+static __always_inline struct iphdr *pkt_parse_ipv4(buf_t *pkt, struct iphdr *scratch)
{
struct iphdr *ipv4 = buf_assign(pkt, sizeof(*ipv4), scratch);
if (ipv4 == NULL) {
@@ -222,7 +228,7 @@ static struct iphdr *pkt_parse_ipv4(buf_t *pkt, struct iphdr *scratch)
}
/* Parse the L4 ports from a packet, assuming a layout like TCP or UDP. */
-static bool pkt_parse_icmp_l4_ports(buf_t *pkt, flow_ports_t *ports)
+static INLINING bool pkt_parse_icmp_l4_ports(buf_t *pkt, flow_ports_t *ports)
{
if (!buf_copy(pkt, ports, sizeof(*ports))) {
return false;
@@ -237,7 +243,7 @@ static bool pkt_parse_icmp_l4_ports(buf_t *pkt, flow_ports_t *ports)
return true;
}
-static uint16_t pkt_checksum_fold(uint32_t csum)
+static INLINING uint16_t pkt_checksum_fold(uint32_t csum)
{
/* The highest reasonable value for an IPv4 header
* checksum requires two folds, so we just do that always.
@@ -247,7 +253,7 @@ static uint16_t pkt_checksum_fold(uint32_t csum)
return (uint16_t)~csum;
}
-static void pkt_ipv4_checksum(struct iphdr *iph)
+static INLINING void pkt_ipv4_checksum(struct iphdr *iph)
{
iph->check = 0;
@@ -268,10 +274,11 @@ static void pkt_ipv4_checksum(struct iphdr *iph)
iph->check = pkt_checksum_fold(acc);
}
-static bool pkt_skip_ipv6_extension_headers(buf_t *pkt,
- const struct ipv6hdr *ipv6,
- uint8_t *upper_proto,
- bool *is_fragment)
+static INLINING
+bool pkt_skip_ipv6_extension_headers(buf_t *pkt,
+ const struct ipv6hdr *ipv6,
+ uint8_t *upper_proto,
+ bool *is_fragment)
{
/* We understand five extension headers.
* https://tools.ietf.org/html/rfc8200#section-4.1 states that all
@@ -336,7 +343,7 @@ static bool pkt_skip_ipv6_extension_headers(buf_t *pkt,
* scratch is allocated on the stack. However, this usage should be safe since
* it's the callers stack after all.
*/
-static inline __attribute__((__always_inline__)) struct ipv6hdr *
+static __always_inline struct ipv6hdr *
pkt_parse_ipv6(buf_t *pkt, struct ipv6hdr *scratch, uint8_t *proto,
bool *is_fragment)
{
@@ -354,20 +361,20 @@ pkt_parse_ipv6(buf_t *pkt, struct ipv6hdr *scratch, uint8_t *proto,
/* Global metrics, per CPU
*/
-struct bpf_map_def metrics_map SEC("maps") = {
- .type = BPF_MAP_TYPE_PERCPU_ARRAY,
- .key_size = sizeof(unsigned int),
- .value_size = sizeof(metrics_t),
- .max_entries = 1,
-};
-
-static metrics_t *get_global_metrics(void)
+struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(max_entries, 1);
+ __type(key, unsigned int);
+ __type(value, metrics_t);
+} metrics_map SEC(".maps");
+
+static INLINING metrics_t *get_global_metrics(void)
{
uint64_t key = 0;
return bpf_map_lookup_elem(&metrics_map, &key);
}
-static ret_t accept_locally(struct __sk_buff *skb, encap_headers_t *encap)
+static INLINING ret_t accept_locally(struct __sk_buff *skb, encap_headers_t *encap)
{
const int payload_off =
sizeof(*encap) +
@@ -388,8 +395,8 @@ static ret_t accept_locally(struct __sk_buff *skb, encap_headers_t *encap)
return bpf_redirect(skb->ifindex, BPF_F_INGRESS);
}
-static ret_t forward_with_gre(struct __sk_buff *skb, encap_headers_t *encap,
- struct in_addr *next_hop, metrics_t *metrics)
+static INLINING ret_t forward_with_gre(struct __sk_buff *skb, encap_headers_t *encap,
+ struct in_addr *next_hop, metrics_t *metrics)
{
metrics->forwarded_packets_total_gre++;
@@ -509,8 +516,8 @@ static ret_t forward_with_gre(struct __sk_buff *skb, encap_headers_t *encap,
return bpf_redirect(skb->ifindex, 0);
}
-static ret_t forward_to_next_hop(struct __sk_buff *skb, encap_headers_t *encap,
- struct in_addr *next_hop, metrics_t *metrics)
+static INLINING ret_t forward_to_next_hop(struct __sk_buff *skb, encap_headers_t *encap,
+ struct in_addr *next_hop, metrics_t *metrics)
{
/* swap L2 addresses */
/* This assumes that packets are received from a router.
@@ -546,7 +553,7 @@ static ret_t forward_to_next_hop(struct __sk_buff *skb, encap_headers_t *encap,
return bpf_redirect(skb->ifindex, 0);
}
-static ret_t skip_next_hops(buf_t *pkt, int n)
+static INLINING ret_t skip_next_hops(buf_t *pkt, int n)
{
switch (n) {
case 1:
@@ -566,8 +573,8 @@ static ret_t skip_next_hops(buf_t *pkt, int n)
* pkt is positioned just after the variable length GLB header
* iff the call is successful.
*/
-static ret_t get_next_hop(buf_t *pkt, encap_headers_t *encap,
- struct in_addr *next_hop)
+static INLINING ret_t get_next_hop(buf_t *pkt, encap_headers_t *encap,
+ struct in_addr *next_hop)
{
if (encap->unigue.next_hop > encap->unigue.hop_count) {
return TC_ACT_SHOT;
@@ -601,8 +608,8 @@ static ret_t get_next_hop(buf_t *pkt, encap_headers_t *encap,
* return value, and calling code works while still being "generic" to
* IPv4 and IPv6.
*/
-static uint64_t fill_tuple(struct bpf_sock_tuple *tuple, void *iph,
- uint64_t iphlen, uint16_t sport, uint16_t dport)
+static INLINING uint64_t fill_tuple(struct bpf_sock_tuple *tuple, void *iph,
+ uint64_t iphlen, uint16_t sport, uint16_t dport)
{
switch (iphlen) {
case sizeof(struct iphdr): {
@@ -630,9 +637,9 @@ static uint64_t fill_tuple(struct bpf_sock_tuple *tuple, void *iph,
}
}
-static verdict_t classify_tcp(struct __sk_buff *skb,
- struct bpf_sock_tuple *tuple, uint64_t tuplen,
- void *iph, struct tcphdr *tcp)
+static INLINING verdict_t classify_tcp(struct __sk_buff *skb,
+ struct bpf_sock_tuple *tuple, uint64_t tuplen,
+ void *iph, struct tcphdr *tcp)
{
struct bpf_sock *sk =
bpf_skc_lookup_tcp(skb, tuple, tuplen, BPF_F_CURRENT_NETNS, 0);
@@ -663,8 +670,8 @@ static verdict_t classify_tcp(struct __sk_buff *skb,
return UNKNOWN;
}
-static verdict_t classify_udp(struct __sk_buff *skb,
- struct bpf_sock_tuple *tuple, uint64_t tuplen)
+static INLINING verdict_t classify_udp(struct __sk_buff *skb,
+ struct bpf_sock_tuple *tuple, uint64_t tuplen)
{
struct bpf_sock *sk =
bpf_sk_lookup_udp(skb, tuple, tuplen, BPF_F_CURRENT_NETNS, 0);
@@ -681,9 +688,9 @@ static verdict_t classify_udp(struct __sk_buff *skb,
return UNKNOWN;
}
-static verdict_t classify_icmp(struct __sk_buff *skb, uint8_t proto,
- struct bpf_sock_tuple *tuple, uint64_t tuplen,
- metrics_t *metrics)
+static INLINING verdict_t classify_icmp(struct __sk_buff *skb, uint8_t proto,
+ struct bpf_sock_tuple *tuple, uint64_t tuplen,
+ metrics_t *metrics)
{
switch (proto) {
case IPPROTO_TCP:
@@ -698,7 +705,7 @@ static verdict_t classify_icmp(struct __sk_buff *skb, uint8_t proto,
}
}
-static verdict_t process_icmpv4(buf_t *pkt, metrics_t *metrics)
+static INLINING verdict_t process_icmpv4(buf_t *pkt, metrics_t *metrics)
{
struct icmphdr icmp;
if (!buf_copy(pkt, &icmp, sizeof(icmp))) {
@@ -745,7 +752,7 @@ static verdict_t process_icmpv4(buf_t *pkt, metrics_t *metrics)
sizeof(tuple.ipv4), metrics);
}
-static verdict_t process_icmpv6(buf_t *pkt, metrics_t *metrics)
+static INLINING verdict_t process_icmpv6(buf_t *pkt, metrics_t *metrics)
{
struct icmp6hdr icmp6;
if (!buf_copy(pkt, &icmp6, sizeof(icmp6))) {
@@ -797,8 +804,8 @@ static verdict_t process_icmpv6(buf_t *pkt, metrics_t *metrics)
metrics);
}
-static verdict_t process_tcp(buf_t *pkt, void *iph, uint64_t iphlen,
- metrics_t *metrics)
+static INLINING verdict_t process_tcp(buf_t *pkt, void *iph, uint64_t iphlen,
+ metrics_t *metrics)
{
metrics->l4_protocol_packets_total_tcp++;
@@ -819,8 +826,8 @@ static verdict_t process_tcp(buf_t *pkt, void *iph, uint64_t iphlen,
return classify_tcp(pkt->skb, &tuple, tuplen, iph, tcp);
}
-static verdict_t process_udp(buf_t *pkt, void *iph, uint64_t iphlen,
- metrics_t *metrics)
+static INLINING verdict_t process_udp(buf_t *pkt, void *iph, uint64_t iphlen,
+ metrics_t *metrics)
{
metrics->l4_protocol_packets_total_udp++;
@@ -837,7 +844,7 @@ static verdict_t process_udp(buf_t *pkt, void *iph, uint64_t iphlen,
return classify_udp(pkt->skb, &tuple, tuplen);
}
-static verdict_t process_ipv4(buf_t *pkt, metrics_t *metrics)
+static INLINING verdict_t process_ipv4(buf_t *pkt, metrics_t *metrics)
{
metrics->l3_protocol_packets_total_ipv4++;
@@ -874,7 +881,7 @@ static verdict_t process_ipv4(buf_t *pkt, metrics_t *metrics)
}
}
-static verdict_t process_ipv6(buf_t *pkt, metrics_t *metrics)
+static INLINING verdict_t process_ipv6(buf_t *pkt, metrics_t *metrics)
{
metrics->l3_protocol_packets_total_ipv6++;
diff --git a/tools/testing/selftests/bpf/progs/test_cls_redirect_subprogs.c b/tools/testing/selftests/bpf/progs/test_cls_redirect_subprogs.c
new file mode 100644
index 000000000000..eed26b70e3a2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_cls_redirect_subprogs.c
@@ -0,0 +1,2 @@
+#define SUBPROGS
+#include "test_cls_redirect.c"
diff --git a/tools/testing/selftests/bpf/progs/test_core_autosize.c b/tools/testing/selftests/bpf/progs/test_core_autosize.c
new file mode 100644
index 000000000000..44f5aa2e8956
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_core_autosize.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+
+#include <linux/bpf.h>
+#include <stdint.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+char _license[] SEC("license") = "GPL";
+
+/* fields of exactly the same size */
+struct test_struct___samesize {
+ void *ptr;
+ unsigned long long val1;
+ unsigned int val2;
+ unsigned short val3;
+ unsigned char val4;
+} __attribute((preserve_access_index));
+
+/* unsigned fields that have to be downsized by libbpf */
+struct test_struct___downsize {
+ void *ptr;
+ unsigned long val1;
+ unsigned long val2;
+ unsigned long val3;
+ unsigned long val4;
+ /* total sz: 40 */
+} __attribute__((preserve_access_index));
+
+/* fields with signed integers of wrong size, should be rejected */
+struct test_struct___signed {
+ void *ptr;
+ long val1;
+ long val2;
+ long val3;
+ long val4;
+} __attribute((preserve_access_index));
+
+/* real layout and sizes according to test's (32-bit) BTF */
+struct test_struct___real {
+ unsigned int ptr; /* can't use `void *`, it is always 8 byte in BPF target */
+ unsigned int val2;
+ unsigned long long val1;
+ unsigned short val3;
+ unsigned char val4;
+ unsigned char _pad;
+ /* total sz: 20 */
+};
+
+struct test_struct___real input = {
+ .ptr = 0x01020304,
+ .val1 = 0x1020304050607080,
+ .val2 = 0x0a0b0c0d,
+ .val3 = 0xfeed,
+ .val4 = 0xb9,
+ ._pad = 0xff, /* make sure no accidental zeros are present */
+};
+
+unsigned long long ptr_samesized = 0;
+unsigned long long val1_samesized = 0;
+unsigned long long val2_samesized = 0;
+unsigned long long val3_samesized = 0;
+unsigned long long val4_samesized = 0;
+struct test_struct___real output_samesized = {};
+
+unsigned long long ptr_downsized = 0;
+unsigned long long val1_downsized = 0;
+unsigned long long val2_downsized = 0;
+unsigned long long val3_downsized = 0;
+unsigned long long val4_downsized = 0;
+struct test_struct___real output_downsized = {};
+
+unsigned long long ptr_probed = 0;
+unsigned long long val1_probed = 0;
+unsigned long long val2_probed = 0;
+unsigned long long val3_probed = 0;
+unsigned long long val4_probed = 0;
+
+unsigned long long ptr_signed = 0;
+unsigned long long val1_signed = 0;
+unsigned long long val2_signed = 0;
+unsigned long long val3_signed = 0;
+unsigned long long val4_signed = 0;
+struct test_struct___real output_signed = {};
+
+SEC("raw_tp/sys_exit")
+int handle_samesize(void *ctx)
+{
+ struct test_struct___samesize *in = (void *)&input;
+ struct test_struct___samesize *out = (void *)&output_samesized;
+
+ ptr_samesized = (unsigned long long)in->ptr;
+ val1_samesized = in->val1;
+ val2_samesized = in->val2;
+ val3_samesized = in->val3;
+ val4_samesized = in->val4;
+
+ out->ptr = in->ptr;
+ out->val1 = in->val1;
+ out->val2 = in->val2;
+ out->val3 = in->val3;
+ out->val4 = in->val4;
+
+ return 0;
+}
+
+SEC("raw_tp/sys_exit")
+int handle_downsize(void *ctx)
+{
+ struct test_struct___downsize *in = (void *)&input;
+ struct test_struct___downsize *out = (void *)&output_downsized;
+
+ ptr_downsized = (unsigned long long)in->ptr;
+ val1_downsized = in->val1;
+ val2_downsized = in->val2;
+ val3_downsized = in->val3;
+ val4_downsized = in->val4;
+
+ out->ptr = in->ptr;
+ out->val1 = in->val1;
+ out->val2 = in->val2;
+ out->val3 = in->val3;
+ out->val4 = in->val4;
+
+ return 0;
+}
+
+SEC("raw_tp/sys_enter")
+int handle_probed(void *ctx)
+{
+ struct test_struct___downsize *in = (void *)&input;
+ __u64 tmp;
+
+ tmp = 0;
+ bpf_core_read(&tmp, bpf_core_field_size(in->ptr), &in->ptr);
+ ptr_probed = tmp;
+
+ tmp = 0;
+ bpf_core_read(&tmp, bpf_core_field_size(in->val1), &in->val1);
+ val1_probed = tmp;
+
+ tmp = 0;
+ bpf_core_read(&tmp, bpf_core_field_size(in->val2), &in->val2);
+ val2_probed = tmp;
+
+ tmp = 0;
+ bpf_core_read(&tmp, bpf_core_field_size(in->val3), &in->val3);
+ val3_probed = tmp;
+
+ tmp = 0;
+ bpf_core_read(&tmp, bpf_core_field_size(in->val4), &in->val4);
+ val4_probed = tmp;
+
+ return 0;
+}
+
+SEC("raw_tp/sys_enter")
+int handle_signed(void *ctx)
+{
+ struct test_struct___signed *in = (void *)&input;
+ struct test_struct___signed *out = (void *)&output_signed;
+
+ val2_signed = in->val2;
+ val3_signed = in->val3;
+ val4_signed = in->val4;
+
+ out->val2= in->val2;
+ out->val3= in->val3;
+ out->val4= in->val4;
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_enumval.c b/tools/testing/selftests/bpf/progs/test_core_reloc_enumval.c
new file mode 100644
index 000000000000..e7ef3dada2bf
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_enumval.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Facebook
+
+#include <linux/bpf.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ char in[256];
+ char out[256];
+ bool skip;
+} data = {};
+
+enum named_enum {
+ NAMED_ENUM_VAL1 = 1,
+ NAMED_ENUM_VAL2 = 2,
+ NAMED_ENUM_VAL3 = 3,
+};
+
+typedef enum {
+ ANON_ENUM_VAL1 = 0x10,
+ ANON_ENUM_VAL2 = 0x20,
+ ANON_ENUM_VAL3 = 0x30,
+} anon_enum;
+
+struct core_reloc_enumval_output {
+ bool named_val1_exists;
+ bool named_val2_exists;
+ bool named_val3_exists;
+ bool anon_val1_exists;
+ bool anon_val2_exists;
+ bool anon_val3_exists;
+
+ int named_val1;
+ int named_val2;
+ int anon_val1;
+ int anon_val2;
+};
+
+SEC("raw_tracepoint/sys_enter")
+int test_core_enumval(void *ctx)
+{
+#if __has_builtin(__builtin_preserve_enum_value)
+ struct core_reloc_enumval_output *out = (void *)&data.out;
+ enum named_enum named = 0;
+ anon_enum anon = 0;
+
+ out->named_val1_exists = bpf_core_enum_value_exists(named, NAMED_ENUM_VAL1);
+ out->named_val2_exists = bpf_core_enum_value_exists(enum named_enum, NAMED_ENUM_VAL2);
+ out->named_val3_exists = bpf_core_enum_value_exists(enum named_enum, NAMED_ENUM_VAL3);
+
+ out->anon_val1_exists = bpf_core_enum_value_exists(anon, ANON_ENUM_VAL1);
+ out->anon_val2_exists = bpf_core_enum_value_exists(anon_enum, ANON_ENUM_VAL2);
+ out->anon_val3_exists = bpf_core_enum_value_exists(anon_enum, ANON_ENUM_VAL3);
+
+ out->named_val1 = bpf_core_enum_value(named, NAMED_ENUM_VAL1);
+ out->named_val2 = bpf_core_enum_value(named, NAMED_ENUM_VAL2);
+ /* NAMED_ENUM_VAL3 value is optional */
+
+ out->anon_val1 = bpf_core_enum_value(anon, ANON_ENUM_VAL1);
+ out->anon_val2 = bpf_core_enum_value(anon, ANON_ENUM_VAL2);
+ /* ANON_ENUM_VAL3 value is optional */
+#else
+ data.skip = true;
+#endif
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c b/tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
index aba928fd60d3..145028b52ad8 100644
--- a/tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_kernel.c
@@ -3,6 +3,7 @@
#include <linux/bpf.h>
#include <stdint.h>
+#include <stdbool.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_core_read.h>
@@ -11,6 +12,7 @@ char _license[] SEC("license") = "GPL";
struct {
char in[256];
char out[256];
+ bool skip;
uint64_t my_pid_tgid;
} data = {};
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_type_based.c b/tools/testing/selftests/bpf/progs/test_core_reloc_type_based.c
new file mode 100644
index 000000000000..fb60f8195c53
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_type_based.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Facebook
+
+#include <linux/bpf.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ char in[256];
+ char out[256];
+ bool skip;
+} data = {};
+
+struct a_struct {
+ int x;
+};
+
+union a_union {
+ int y;
+ int z;
+};
+
+typedef struct a_struct named_struct_typedef;
+
+typedef struct { int x, y, z; } anon_struct_typedef;
+
+typedef struct {
+ int a, b, c;
+} *struct_ptr_typedef;
+
+enum an_enum {
+ AN_ENUM_VAL1 = 1,
+ AN_ENUM_VAL2 = 2,
+ AN_ENUM_VAL3 = 3,
+};
+
+typedef int int_typedef;
+
+typedef enum { TYPEDEF_ENUM_VAL1, TYPEDEF_ENUM_VAL2 } enum_typedef;
+
+typedef void *void_ptr_typedef;
+
+typedef int (*func_proto_typedef)(long);
+
+typedef char arr_typedef[20];
+
+struct core_reloc_type_based_output {
+ bool struct_exists;
+ bool union_exists;
+ bool enum_exists;
+ bool typedef_named_struct_exists;
+ bool typedef_anon_struct_exists;
+ bool typedef_struct_ptr_exists;
+ bool typedef_int_exists;
+ bool typedef_enum_exists;
+ bool typedef_void_ptr_exists;
+ bool typedef_func_proto_exists;
+ bool typedef_arr_exists;
+
+ int struct_sz;
+ int union_sz;
+ int enum_sz;
+ int typedef_named_struct_sz;
+ int typedef_anon_struct_sz;
+ int typedef_struct_ptr_sz;
+ int typedef_int_sz;
+ int typedef_enum_sz;
+ int typedef_void_ptr_sz;
+ int typedef_func_proto_sz;
+ int typedef_arr_sz;
+};
+
+SEC("raw_tracepoint/sys_enter")
+int test_core_type_based(void *ctx)
+{
+#if __has_builtin(__builtin_preserve_type_info)
+ struct core_reloc_type_based_output *out = (void *)&data.out;
+
+ out->struct_exists = bpf_core_type_exists(struct a_struct);
+ out->union_exists = bpf_core_type_exists(union a_union);
+ out->enum_exists = bpf_core_type_exists(enum an_enum);
+ out->typedef_named_struct_exists = bpf_core_type_exists(named_struct_typedef);
+ out->typedef_anon_struct_exists = bpf_core_type_exists(anon_struct_typedef);
+ out->typedef_struct_ptr_exists = bpf_core_type_exists(struct_ptr_typedef);
+ out->typedef_int_exists = bpf_core_type_exists(int_typedef);
+ out->typedef_enum_exists = bpf_core_type_exists(enum_typedef);
+ out->typedef_void_ptr_exists = bpf_core_type_exists(void_ptr_typedef);
+ out->typedef_func_proto_exists = bpf_core_type_exists(func_proto_typedef);
+ out->typedef_arr_exists = bpf_core_type_exists(arr_typedef);
+
+ out->struct_sz = bpf_core_type_size(struct a_struct);
+ out->union_sz = bpf_core_type_size(union a_union);
+ out->enum_sz = bpf_core_type_size(enum an_enum);
+ out->typedef_named_struct_sz = bpf_core_type_size(named_struct_typedef);
+ out->typedef_anon_struct_sz = bpf_core_type_size(anon_struct_typedef);
+ out->typedef_struct_ptr_sz = bpf_core_type_size(struct_ptr_typedef);
+ out->typedef_int_sz = bpf_core_type_size(int_typedef);
+ out->typedef_enum_sz = bpf_core_type_size(enum_typedef);
+ out->typedef_void_ptr_sz = bpf_core_type_size(void_ptr_typedef);
+ out->typedef_func_proto_sz = bpf_core_type_size(func_proto_typedef);
+ out->typedef_arr_sz = bpf_core_type_size(arr_typedef);
+#else
+ data.skip = true;
+#endif
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_core_reloc_type_id.c b/tools/testing/selftests/bpf/progs/test_core_reloc_type_id.c
new file mode 100644
index 000000000000..22aba3f6e344
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_core_reloc_type_id.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Facebook
+
+#include <linux/bpf.h>
+#include <stdint.h>
+#include <stdbool.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+ char in[256];
+ char out[256];
+ bool skip;
+} data = {};
+
+/* some types are shared with test_core_reloc_type_based.c */
+struct a_struct {
+ int x;
+};
+
+union a_union {
+ int y;
+ int z;
+};
+
+enum an_enum {
+ AN_ENUM_VAL1 = 1,
+ AN_ENUM_VAL2 = 2,
+ AN_ENUM_VAL3 = 3,
+};
+
+typedef struct a_struct named_struct_typedef;
+
+typedef int (*func_proto_typedef)(long);
+
+typedef char arr_typedef[20];
+
+struct core_reloc_type_id_output {
+ int local_anon_struct;
+ int local_anon_union;
+ int local_anon_enum;
+ int local_anon_func_proto_ptr;
+ int local_anon_void_ptr;
+ int local_anon_arr;
+
+ int local_struct;
+ int local_union;
+ int local_enum;
+ int local_int;
+ int local_struct_typedef;
+ int local_func_proto_typedef;
+ int local_arr_typedef;
+
+ int targ_struct;
+ int targ_union;
+ int targ_enum;
+ int targ_int;
+ int targ_struct_typedef;
+ int targ_func_proto_typedef;
+ int targ_arr_typedef;
+};
+
+/* preserve types even if Clang doesn't support built-in */
+struct a_struct t1 = {};
+union a_union t2 = {};
+enum an_enum t3 = 0;
+named_struct_typedef t4 = {};
+func_proto_typedef t5 = 0;
+arr_typedef t6 = {};
+
+SEC("raw_tracepoint/sys_enter")
+int test_core_type_id(void *ctx)
+{
+ /* We use __builtin_btf_type_id() in this tests, but up until the time
+ * __builtin_preserve_type_info() was added it contained a bug that
+ * would make this test fail. The bug was fixed ([0]) with addition of
+ * __builtin_preserve_type_info(), though, so that's what we are using
+ * to detect whether this test has to be executed, however strange
+ * that might look like.
+ *
+ * [0] https://reviews.llvm.org/D85174
+ */
+#if __has_builtin(__builtin_preserve_type_info)
+ struct core_reloc_type_id_output *out = (void *)&data.out;
+
+ out->local_anon_struct = bpf_core_type_id_local(struct { int marker_field; });
+ out->local_anon_union = bpf_core_type_id_local(union { int marker_field; });
+ out->local_anon_enum = bpf_core_type_id_local(enum { MARKER_ENUM_VAL = 123 });
+ out->local_anon_func_proto_ptr = bpf_core_type_id_local(_Bool(*)(int));
+ out->local_anon_void_ptr = bpf_core_type_id_local(void *);
+ out->local_anon_arr = bpf_core_type_id_local(_Bool[47]);
+
+ out->local_struct = bpf_core_type_id_local(struct a_struct);
+ out->local_union = bpf_core_type_id_local(union a_union);
+ out->local_enum = bpf_core_type_id_local(enum an_enum);
+ out->local_int = bpf_core_type_id_local(int);
+ out->local_struct_typedef = bpf_core_type_id_local(named_struct_typedef);
+ out->local_func_proto_typedef = bpf_core_type_id_local(func_proto_typedef);
+ out->local_arr_typedef = bpf_core_type_id_local(arr_typedef);
+
+ out->targ_struct = bpf_core_type_id_kernel(struct a_struct);
+ out->targ_union = bpf_core_type_id_kernel(union a_union);
+ out->targ_enum = bpf_core_type_id_kernel(enum an_enum);
+ out->targ_int = bpf_core_type_id_kernel(int);
+ out->targ_struct_typedef = bpf_core_type_id_kernel(named_struct_typedef);
+ out->targ_func_proto_typedef = bpf_core_type_id_kernel(func_proto_typedef);
+ out->targ_arr_typedef = bpf_core_type_id_kernel(arr_typedef);
+#else
+ data.skip = true;
+#endif
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_d_path.c b/tools/testing/selftests/bpf/progs/test_d_path.c
new file mode 100644
index 000000000000..84e1f883f97b
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_d_path.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#define MAX_PATH_LEN 128
+#define MAX_FILES 7
+
+pid_t my_pid = 0;
+__u32 cnt_stat = 0;
+__u32 cnt_close = 0;
+char paths_stat[MAX_FILES][MAX_PATH_LEN] = {};
+char paths_close[MAX_FILES][MAX_PATH_LEN] = {};
+int rets_stat[MAX_FILES] = {};
+int rets_close[MAX_FILES] = {};
+
+int called_stat = 0;
+int called_close = 0;
+
+SEC("fentry/security_inode_getattr")
+int BPF_PROG(prog_stat, struct path *path, struct kstat *stat,
+ __u32 request_mask, unsigned int query_flags)
+{
+ pid_t pid = bpf_get_current_pid_tgid() >> 32;
+ __u32 cnt = cnt_stat;
+ int ret;
+
+ called_stat = 1;
+
+ if (pid != my_pid)
+ return 0;
+
+ if (cnt >= MAX_FILES)
+ return 0;
+ ret = bpf_d_path(path, paths_stat[cnt], MAX_PATH_LEN);
+
+ rets_stat[cnt] = ret;
+ cnt_stat++;
+ return 0;
+}
+
+SEC("fentry/filp_close")
+int BPF_PROG(prog_close, struct file *file, void *id)
+{
+ pid_t pid = bpf_get_current_pid_tgid() >> 32;
+ __u32 cnt = cnt_close;
+ int ret;
+
+ called_close = 1;
+
+ if (pid != my_pid)
+ return 0;
+
+ if (cnt >= MAX_FILES)
+ return 0;
+ ret = bpf_d_path(&file->f_path,
+ paths_close[cnt], MAX_PATH_LEN);
+
+ rets_close[cnt] = ret;
+ cnt_close++;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_ksyms_btf.c b/tools/testing/selftests/bpf/progs/test_ksyms_btf.c
new file mode 100644
index 000000000000..bb8ea9270f29
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_ksyms_btf.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Google */
+
+#include "vmlinux.h"
+
+#include <bpf/bpf_helpers.h>
+
+__u64 out__runqueues_addr = -1;
+__u64 out__bpf_prog_active_addr = -1;
+
+__u32 out__rq_cpu = -1; /* percpu struct fields */
+int out__bpf_prog_active = -1; /* percpu int */
+
+__u32 out__this_rq_cpu = -1;
+int out__this_bpf_prog_active = -1;
+
+__u32 out__cpu_0_rq_cpu = -1; /* cpu_rq(0)->cpu */
+
+extern const struct rq runqueues __ksym; /* struct type global var. */
+extern const int bpf_prog_active __ksym; /* int type global var. */
+
+SEC("raw_tp/sys_enter")
+int handler(const void *ctx)
+{
+ struct rq *rq;
+ int *active;
+ __u32 cpu;
+
+ out__runqueues_addr = (__u64)&runqueues;
+ out__bpf_prog_active_addr = (__u64)&bpf_prog_active;
+
+ cpu = bpf_get_smp_processor_id();
+
+ /* test bpf_per_cpu_ptr() */
+ rq = (struct rq *)bpf_per_cpu_ptr(&runqueues, cpu);
+ if (rq)
+ out__rq_cpu = rq->cpu;
+ active = (int *)bpf_per_cpu_ptr(&bpf_prog_active, cpu);
+ if (active)
+ out__bpf_prog_active = *active;
+
+ rq = (struct rq *)bpf_per_cpu_ptr(&runqueues, 0);
+ if (rq) /* should always be valid, but we can't spare the check. */
+ out__cpu_0_rq_cpu = rq->cpu;
+
+ /* test bpf_this_cpu_ptr */
+ rq = (struct rq *)bpf_this_cpu_ptr(&runqueues);
+ out__this_rq_cpu = rq->cpu;
+ active = (int *)bpf_this_cpu_ptr(&bpf_prog_active);
+ out__this_bpf_prog_active = *active;
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c b/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c
index 28351936a438..b9e2753f4f91 100644
--- a/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c
+++ b/tools/testing/selftests/bpf/progs/test_l4lb_noinline.c
@@ -17,9 +17,7 @@
#include "test_iptunnel_common.h"
#include <bpf/bpf_endian.h>
-int _version SEC("version") = 1;
-
-static __u32 rol32(__u32 word, unsigned int shift)
+static __always_inline __u32 rol32(__u32 word, unsigned int shift)
{
return (word << shift) | (word >> ((-shift) & 31));
}
@@ -52,7 +50,7 @@ static __u32 rol32(__u32 word, unsigned int shift)
typedef unsigned int u32;
-static u32 jhash(const void *key, u32 length, u32 initval)
+static __noinline u32 jhash(const void *key, u32 length, u32 initval)
{
u32 a, b, c;
const unsigned char *k = key;
@@ -88,7 +86,7 @@ static u32 jhash(const void *key, u32 length, u32 initval)
return c;
}
-static u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
+static __noinline u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
{
a += initval;
b += initval;
@@ -97,7 +95,7 @@ static u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
return c;
}
-static u32 jhash_2words(u32 a, u32 b, u32 initval)
+static __noinline u32 jhash_2words(u32 a, u32 b, u32 initval)
{
return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
}
@@ -200,8 +198,7 @@ struct {
__type(value, struct ctl_value);
} ctl_array SEC(".maps");
-static __u32 get_packet_hash(struct packet_description *pckt,
- bool ipv6)
+static __noinline __u32 get_packet_hash(struct packet_description *pckt, bool ipv6)
{
if (ipv6)
return jhash_2words(jhash(pckt->srcv6, 16, MAX_VIPS),
@@ -210,10 +207,10 @@ static __u32 get_packet_hash(struct packet_description *pckt,
return jhash_2words(pckt->src, pckt->ports, CH_RINGS_SIZE);
}
-static bool get_packet_dst(struct real_definition **real,
- struct packet_description *pckt,
- struct vip_meta *vip_info,
- bool is_ipv6)
+static __noinline bool get_packet_dst(struct real_definition **real,
+ struct packet_description *pckt,
+ struct vip_meta *vip_info,
+ bool is_ipv6)
{
__u32 hash = get_packet_hash(pckt, is_ipv6);
__u32 key = RING_SIZE * vip_info->vip_num + hash % RING_SIZE;
@@ -233,8 +230,8 @@ static bool get_packet_dst(struct real_definition **real,
return true;
}
-static int parse_icmpv6(void *data, void *data_end, __u64 off,
- struct packet_description *pckt)
+static __noinline int parse_icmpv6(void *data, void *data_end, __u64 off,
+ struct packet_description *pckt)
{
struct icmp6hdr *icmp_hdr;
struct ipv6hdr *ip6h;
@@ -255,8 +252,8 @@ static int parse_icmpv6(void *data, void *data_end, __u64 off,
return TC_ACT_UNSPEC;
}
-static int parse_icmp(void *data, void *data_end, __u64 off,
- struct packet_description *pckt)
+static __noinline int parse_icmp(void *data, void *data_end, __u64 off,
+ struct packet_description *pckt)
{
struct icmphdr *icmp_hdr;
struct iphdr *iph;
@@ -280,8 +277,8 @@ static int parse_icmp(void *data, void *data_end, __u64 off,
return TC_ACT_UNSPEC;
}
-static bool parse_udp(void *data, __u64 off, void *data_end,
- struct packet_description *pckt)
+static __noinline bool parse_udp(void *data, __u64 off, void *data_end,
+ struct packet_description *pckt)
{
struct udphdr *udp;
udp = data + off;
@@ -299,8 +296,8 @@ static bool parse_udp(void *data, __u64 off, void *data_end,
return true;
}
-static bool parse_tcp(void *data, __u64 off, void *data_end,
- struct packet_description *pckt)
+static __noinline bool parse_tcp(void *data, __u64 off, void *data_end,
+ struct packet_description *pckt)
{
struct tcphdr *tcp;
@@ -321,8 +318,8 @@ static bool parse_tcp(void *data, __u64 off, void *data_end,
return true;
}
-static int process_packet(void *data, __u64 off, void *data_end,
- bool is_ipv6, struct __sk_buff *skb)
+static __noinline int process_packet(void *data, __u64 off, void *data_end,
+ bool is_ipv6, struct __sk_buff *skb)
{
void *pkt_start = (void *)(long)skb->data;
struct packet_description pckt = {};
diff --git a/tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c b/tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c
new file mode 100644
index 000000000000..6077a025092c
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_misc_tcp_hdr_options.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+
+#include <stddef.h>
+#include <errno.h>
+#include <stdbool.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/socket.h>
+#include <linux/bpf.h>
+#include <linux/types.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+#define BPF_PROG_TEST_TCP_HDR_OPTIONS
+#include "test_tcp_hdr_options.h"
+
+__u16 last_addr16_n = __bpf_htons(1);
+__u16 active_lport_n = 0;
+__u16 active_lport_h = 0;
+__u16 passive_lport_n = 0;
+__u16 passive_lport_h = 0;
+
+/* options received at passive side */
+unsigned int nr_pure_ack = 0;
+unsigned int nr_data = 0;
+unsigned int nr_syn = 0;
+unsigned int nr_fin = 0;
+
+/* Check the header received from the active side */
+static int __check_active_hdr_in(struct bpf_sock_ops *skops, bool check_syn)
+{
+ union {
+ struct tcphdr th;
+ struct ipv6hdr ip6;
+ struct tcp_exprm_opt exprm_opt;
+ struct tcp_opt reg_opt;
+ __u8 data[100]; /* IPv6 (40) + Max TCP hdr (60) */
+ } hdr = {};
+ __u64 load_flags = check_syn ? BPF_LOAD_HDR_OPT_TCP_SYN : 0;
+ struct tcphdr *pth;
+ int ret;
+
+ hdr.reg_opt.kind = 0xB9;
+
+ /* The option is 4 bytes long instead of 2 bytes */
+ ret = bpf_load_hdr_opt(skops, &hdr.reg_opt, 2, load_flags);
+ if (ret != -ENOSPC)
+ RET_CG_ERR(ret);
+
+ /* Test searching magic with regular kind */
+ hdr.reg_opt.len = 4;
+ ret = bpf_load_hdr_opt(skops, &hdr.reg_opt, sizeof(hdr.reg_opt),
+ load_flags);
+ if (ret != -EINVAL)
+ RET_CG_ERR(ret);
+
+ hdr.reg_opt.len = 0;
+ ret = bpf_load_hdr_opt(skops, &hdr.reg_opt, sizeof(hdr.reg_opt),
+ load_flags);
+ if (ret != 4 || hdr.reg_opt.len != 4 || hdr.reg_opt.kind != 0xB9 ||
+ hdr.reg_opt.data[0] != 0xfa || hdr.reg_opt.data[1] != 0xce)
+ RET_CG_ERR(ret);
+
+ /* Test searching experimental option with invalid kind length */
+ hdr.exprm_opt.kind = TCPOPT_EXP;
+ hdr.exprm_opt.len = 5;
+ hdr.exprm_opt.magic = 0;
+ ret = bpf_load_hdr_opt(skops, &hdr.exprm_opt, sizeof(hdr.exprm_opt),
+ load_flags);
+ if (ret != -EINVAL)
+ RET_CG_ERR(ret);
+
+ /* Test searching experimental option with 0 magic value */
+ hdr.exprm_opt.len = 4;
+ ret = bpf_load_hdr_opt(skops, &hdr.exprm_opt, sizeof(hdr.exprm_opt),
+ load_flags);
+ if (ret != -ENOMSG)
+ RET_CG_ERR(ret);
+
+ hdr.exprm_opt.magic = __bpf_htons(0xeB9F);
+ ret = bpf_load_hdr_opt(skops, &hdr.exprm_opt, sizeof(hdr.exprm_opt),
+ load_flags);
+ if (ret != 4 || hdr.exprm_opt.len != 4 ||
+ hdr.exprm_opt.kind != TCPOPT_EXP ||
+ hdr.exprm_opt.magic != __bpf_htons(0xeB9F))
+ RET_CG_ERR(ret);
+
+ if (!check_syn)
+ return CG_OK;
+
+ /* Test loading from skops->syn_skb if sk_state == TCP_NEW_SYN_RECV
+ *
+ * Test loading from tp->saved_syn for other sk_state.
+ */
+ ret = bpf_getsockopt(skops, SOL_TCP, TCP_BPF_SYN_IP, &hdr.ip6,
+ sizeof(hdr.ip6));
+ if (ret != -ENOSPC)
+ RET_CG_ERR(ret);
+
+ if (hdr.ip6.saddr.s6_addr16[7] != last_addr16_n ||
+ hdr.ip6.daddr.s6_addr16[7] != last_addr16_n)
+ RET_CG_ERR(0);
+
+ ret = bpf_getsockopt(skops, SOL_TCP, TCP_BPF_SYN_IP, &hdr, sizeof(hdr));
+ if (ret < 0)
+ RET_CG_ERR(ret);
+
+ pth = (struct tcphdr *)(&hdr.ip6 + 1);
+ if (pth->dest != passive_lport_n || pth->source != active_lport_n)
+ RET_CG_ERR(0);
+
+ ret = bpf_getsockopt(skops, SOL_TCP, TCP_BPF_SYN, &hdr, sizeof(hdr));
+ if (ret < 0)
+ RET_CG_ERR(ret);
+
+ if (hdr.th.dest != passive_lport_n || hdr.th.source != active_lport_n)
+ RET_CG_ERR(0);
+
+ return CG_OK;
+}
+
+static int check_active_syn_in(struct bpf_sock_ops *skops)
+{
+ return __check_active_hdr_in(skops, true);
+}
+
+static int check_active_hdr_in(struct bpf_sock_ops *skops)
+{
+ struct tcphdr *th;
+
+ if (__check_active_hdr_in(skops, false) == CG_ERR)
+ return CG_ERR;
+
+ th = skops->skb_data;
+ if (th + 1 > skops->skb_data_end)
+ RET_CG_ERR(0);
+
+ if (tcp_hdrlen(th) < skops->skb_len)
+ nr_data++;
+
+ if (th->fin)
+ nr_fin++;
+
+ if (th->ack && !th->fin && tcp_hdrlen(th) == skops->skb_len)
+ nr_pure_ack++;
+
+ return CG_OK;
+}
+
+static int active_opt_len(struct bpf_sock_ops *skops)
+{
+ int err;
+
+ /* Reserve more than enough to allow the -EEXIST test in
+ * the write_active_opt().
+ */
+ err = bpf_reserve_hdr_opt(skops, 12, 0);
+ if (err)
+ RET_CG_ERR(err);
+
+ return CG_OK;
+}
+
+static int write_active_opt(struct bpf_sock_ops *skops)
+{
+ struct tcp_exprm_opt exprm_opt = {};
+ struct tcp_opt win_scale_opt = {};
+ struct tcp_opt reg_opt = {};
+ struct tcphdr *th;
+ int err, ret;
+
+ exprm_opt.kind = TCPOPT_EXP;
+ exprm_opt.len = 4;
+ exprm_opt.magic = __bpf_htons(0xeB9F);
+
+ reg_opt.kind = 0xB9;
+ reg_opt.len = 4;
+ reg_opt.data[0] = 0xfa;
+ reg_opt.data[1] = 0xce;
+
+ win_scale_opt.kind = TCPOPT_WINDOW;
+
+ err = bpf_store_hdr_opt(skops, &exprm_opt, sizeof(exprm_opt), 0);
+ if (err)
+ RET_CG_ERR(err);
+
+ /* Store the same exprm option */
+ err = bpf_store_hdr_opt(skops, &exprm_opt, sizeof(exprm_opt), 0);
+ if (err != -EEXIST)
+ RET_CG_ERR(err);
+
+ err = bpf_store_hdr_opt(skops, &reg_opt, sizeof(reg_opt), 0);
+ if (err)
+ RET_CG_ERR(err);
+ err = bpf_store_hdr_opt(skops, &reg_opt, sizeof(reg_opt), 0);
+ if (err != -EEXIST)
+ RET_CG_ERR(err);
+
+ /* Check the option has been written and can be searched */
+ ret = bpf_load_hdr_opt(skops, &exprm_opt, sizeof(exprm_opt), 0);
+ if (ret != 4 || exprm_opt.len != 4 || exprm_opt.kind != TCPOPT_EXP ||
+ exprm_opt.magic != __bpf_htons(0xeB9F))
+ RET_CG_ERR(ret);
+
+ reg_opt.len = 0;
+ ret = bpf_load_hdr_opt(skops, &reg_opt, sizeof(reg_opt), 0);
+ if (ret != 4 || reg_opt.len != 4 || reg_opt.kind != 0xB9 ||
+ reg_opt.data[0] != 0xfa || reg_opt.data[1] != 0xce)
+ RET_CG_ERR(ret);
+
+ th = skops->skb_data;
+ if (th + 1 > skops->skb_data_end)
+ RET_CG_ERR(0);
+
+ if (th->syn) {
+ active_lport_h = skops->local_port;
+ active_lport_n = th->source;
+
+ /* Search the win scale option written by kernel
+ * in the SYN packet.
+ */
+ ret = bpf_load_hdr_opt(skops, &win_scale_opt,
+ sizeof(win_scale_opt), 0);
+ if (ret != 3 || win_scale_opt.len != 3 ||
+ win_scale_opt.kind != TCPOPT_WINDOW)
+ RET_CG_ERR(ret);
+
+ /* Write the win scale option that kernel
+ * has already written.
+ */
+ err = bpf_store_hdr_opt(skops, &win_scale_opt,
+ sizeof(win_scale_opt), 0);
+ if (err != -EEXIST)
+ RET_CG_ERR(err);
+ }
+
+ return CG_OK;
+}
+
+static int handle_hdr_opt_len(struct bpf_sock_ops *skops)
+{
+ __u8 tcp_flags = skops_tcp_flags(skops);
+
+ if ((tcp_flags & TCPHDR_SYNACK) == TCPHDR_SYNACK)
+ /* Check the SYN from bpf_sock_ops_kern->syn_skb */
+ return check_active_syn_in(skops);
+
+ /* Passive side should have cleared the write hdr cb by now */
+ if (skops->local_port == passive_lport_h)
+ RET_CG_ERR(0);
+
+ return active_opt_len(skops);
+}
+
+static int handle_write_hdr_opt(struct bpf_sock_ops *skops)
+{
+ if (skops->local_port == passive_lport_h)
+ RET_CG_ERR(0);
+
+ return write_active_opt(skops);
+}
+
+static int handle_parse_hdr(struct bpf_sock_ops *skops)
+{
+ /* Passive side is not writing any non-standard/unknown
+ * option, so the active side should never be called.
+ */
+ if (skops->local_port == active_lport_h)
+ RET_CG_ERR(0);
+
+ return check_active_hdr_in(skops);
+}
+
+static int handle_passive_estab(struct bpf_sock_ops *skops)
+{
+ int err;
+
+ /* No more write hdr cb */
+ bpf_sock_ops_cb_flags_set(skops,
+ skops->bpf_sock_ops_cb_flags &
+ ~BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG);
+
+ /* Recheck the SYN but check the tp->saved_syn this time */
+ err = check_active_syn_in(skops);
+ if (err == CG_ERR)
+ return err;
+
+ nr_syn++;
+
+ /* The ack has header option written by the active side also */
+ return check_active_hdr_in(skops);
+}
+
+SEC("sockops/misc_estab")
+int misc_estab(struct bpf_sock_ops *skops)
+{
+ int true_val = 1;
+
+ switch (skops->op) {
+ case BPF_SOCK_OPS_TCP_LISTEN_CB:
+ passive_lport_h = skops->local_port;
+ passive_lport_n = __bpf_htons(passive_lport_h);
+ bpf_setsockopt(skops, SOL_TCP, TCP_SAVE_SYN,
+ &true_val, sizeof(true_val));
+ set_hdr_cb_flags(skops, 0);
+ break;
+ case BPF_SOCK_OPS_TCP_CONNECT_CB:
+ set_hdr_cb_flags(skops, 0);
+ break;
+ case BPF_SOCK_OPS_PARSE_HDR_OPT_CB:
+ return handle_parse_hdr(skops);
+ case BPF_SOCK_OPS_HDR_OPT_LEN_CB:
+ return handle_hdr_opt_len(skops);
+ case BPF_SOCK_OPS_WRITE_HDR_OPT_CB:
+ return handle_write_hdr_opt(skops);
+ case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB:
+ return handle_passive_estab(skops);
+ }
+
+ return CG_OK;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_overhead.c b/tools/testing/selftests/bpf/progs/test_overhead.c
index 42403d088abc..abb7344b531f 100644
--- a/tools/testing/selftests/bpf/progs/test_overhead.c
+++ b/tools/testing/selftests/bpf/progs/test_overhead.c
@@ -39,10 +39,4 @@ int BPF_PROG(prog5, struct task_struct *tsk, const char *buf, bool exec)
return 0;
}
-SEC("fmod_ret/__set_task_comm")
-int BPF_PROG(prog6, struct task_struct *tsk, const char *buf, bool exec)
-{
- return !tsk;
-}
-
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_pe_preserve_elems.c b/tools/testing/selftests/bpf/progs/test_pe_preserve_elems.c
new file mode 100644
index 000000000000..fb22de7c365d
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_pe_preserve_elems.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Facebook
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(int));
+} array_1 SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+ __uint(max_entries, 1);
+ __uint(key_size, sizeof(int));
+ __uint(value_size, sizeof(int));
+ __uint(map_flags, BPF_F_PRESERVE_ELEMS);
+} array_2 SEC(".maps");
+
+SEC("raw_tp/sched_switch")
+int BPF_PROG(read_array_1)
+{
+ struct bpf_perf_event_value val;
+
+ return bpf_perf_event_read_value(&array_1, 0, &val, sizeof(val));
+}
+
+SEC("raw_tp/task_rename")
+int BPF_PROG(read_array_2)
+{
+ struct bpf_perf_event_value val;
+
+ return bpf_perf_event_read_value(&array_2, 0, &val, sizeof(val));
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_pkt_access.c b/tools/testing/selftests/bpf/progs/test_pkt_access.c
index e72eba4a93d2..852051064507 100644
--- a/tools/testing/selftests/bpf/progs/test_pkt_access.c
+++ b/tools/testing/selftests/bpf/progs/test_pkt_access.c
@@ -79,6 +79,24 @@ int get_skb_ifindex(int val, struct __sk_buff *skb, int var)
return skb->ifindex * val * var;
}
+__attribute__ ((noinline))
+int test_pkt_write_access_subprog(struct __sk_buff *skb, __u32 off)
+{
+ void *data = (void *)(long)skb->data;
+ void *data_end = (void *)(long)skb->data_end;
+ struct tcphdr *tcp = NULL;
+
+ if (off > sizeof(struct ethhdr) + sizeof(struct ipv6hdr))
+ return -1;
+
+ tcp = data + off;
+ if (tcp + 1 > data_end)
+ return -1;
+ /* make modification to the packet data */
+ tcp->check++;
+ return 0;
+}
+
SEC("classifier/test_pkt_access")
int test_pkt_access(struct __sk_buff *skb)
{
@@ -117,6 +135,8 @@ int test_pkt_access(struct __sk_buff *skb)
if (test_pkt_access_subprog3(3, skb) != skb->len * 3 * skb->ifindex)
return TC_ACT_SHOT;
if (tcp) {
+ if (test_pkt_write_access_subprog(skb, (void *)tcp - data))
+ return TC_ACT_SHOT;
if (((void *)(tcp) + 20) > data_end || proto != 6)
return TC_ACT_SHOT;
barrier(); /* to force ordering of checks */
diff --git a/tools/testing/selftests/bpf/progs/test_raw_tp_test_run.c b/tools/testing/selftests/bpf/progs/test_raw_tp_test_run.c
new file mode 100644
index 000000000000..4c63cc87b9d0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_raw_tp_test_run.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+__u32 count = 0;
+__u32 on_cpu = 0xffffffff;
+
+SEC("raw_tp/task_rename")
+int BPF_PROG(rename, struct task_struct *task, char *comm)
+{
+
+ count++;
+ if ((__u64) task == 0x1234ULL && (__u64) comm == 0x5678ULL) {
+ on_cpu = bpf_get_smp_processor_id();
+ return (long)task + (long)comm;
+ }
+
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sk_lookup.c b/tools/testing/selftests/bpf/progs/test_sk_lookup.c
index bbf8296f4d66..1032b292af5b 100644
--- a/tools/testing/selftests/bpf/progs/test_sk_lookup.c
+++ b/tools/testing/selftests/bpf/progs/test_sk_lookup.c
@@ -19,6 +19,17 @@
#define IP6(aaaa, bbbb, cccc, dddd) \
{ bpf_htonl(aaaa), bpf_htonl(bbbb), bpf_htonl(cccc), bpf_htonl(dddd) }
+/* Macros for least-significant byte and word accesses. */
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define LSE_INDEX(index, size) (index)
+#else
+#define LSE_INDEX(index, size) ((size) - (index) - 1)
+#endif
+#define LSB(value, index) \
+ (((__u8 *)&(value))[LSE_INDEX((index), sizeof(value))])
+#define LSW(value, index) \
+ (((__u16 *)&(value))[LSE_INDEX((index), sizeof(value) / 2)])
+
#define MAX_SOCKS 32
struct {
@@ -369,171 +380,146 @@ int ctx_narrow_access(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
int err, family;
- __u16 *half;
- __u8 *byte;
bool v4;
v4 = (ctx->family == AF_INET);
/* Narrow loads from family field */
- byte = (__u8 *)&ctx->family;
- half = (__u16 *)&ctx->family;
- if (byte[0] != (v4 ? AF_INET : AF_INET6) ||
- byte[1] != 0 || byte[2] != 0 || byte[3] != 0)
+ if (LSB(ctx->family, 0) != (v4 ? AF_INET : AF_INET6) ||
+ LSB(ctx->family, 1) != 0 || LSB(ctx->family, 2) != 0 || LSB(ctx->family, 3) != 0)
return SK_DROP;
- if (half[0] != (v4 ? AF_INET : AF_INET6))
+ if (LSW(ctx->family, 0) != (v4 ? AF_INET : AF_INET6))
return SK_DROP;
- byte = (__u8 *)&ctx->protocol;
- if (byte[0] != IPPROTO_TCP ||
- byte[1] != 0 || byte[2] != 0 || byte[3] != 0)
+ /* Narrow loads from protocol field */
+ if (LSB(ctx->protocol, 0) != IPPROTO_TCP ||
+ LSB(ctx->protocol, 1) != 0 || LSB(ctx->protocol, 2) != 0 || LSB(ctx->protocol, 3) != 0)
return SK_DROP;
- half = (__u16 *)&ctx->protocol;
- if (half[0] != IPPROTO_TCP)
+ if (LSW(ctx->protocol, 0) != IPPROTO_TCP)
return SK_DROP;
/* Narrow loads from remote_port field. Expect non-0 value. */
- byte = (__u8 *)&ctx->remote_port;
- if (byte[0] == 0 && byte[1] == 0 && byte[2] == 0 && byte[3] == 0)
+ if (LSB(ctx->remote_port, 0) == 0 && LSB(ctx->remote_port, 1) == 0 &&
+ LSB(ctx->remote_port, 2) == 0 && LSB(ctx->remote_port, 3) == 0)
return SK_DROP;
- half = (__u16 *)&ctx->remote_port;
- if (half[0] == 0)
+ if (LSW(ctx->remote_port, 0) == 0)
return SK_DROP;
/* Narrow loads from local_port field. Expect DST_PORT. */
- byte = (__u8 *)&ctx->local_port;
- if (byte[0] != ((DST_PORT >> 0) & 0xff) ||
- byte[1] != ((DST_PORT >> 8) & 0xff) ||
- byte[2] != 0 || byte[3] != 0)
+ if (LSB(ctx->local_port, 0) != ((DST_PORT >> 0) & 0xff) ||
+ LSB(ctx->local_port, 1) != ((DST_PORT >> 8) & 0xff) ||
+ LSB(ctx->local_port, 2) != 0 || LSB(ctx->local_port, 3) != 0)
return SK_DROP;
- half = (__u16 *)&ctx->local_port;
- if (half[0] != DST_PORT)
+ if (LSW(ctx->local_port, 0) != DST_PORT)
return SK_DROP;
/* Narrow loads from IPv4 fields */
if (v4) {
/* Expect non-0.0.0.0 in remote_ip4 */
- byte = (__u8 *)&ctx->remote_ip4;
- if (byte[0] == 0 && byte[1] == 0 &&
- byte[2] == 0 && byte[3] == 0)
+ if (LSB(ctx->remote_ip4, 0) == 0 && LSB(ctx->remote_ip4, 1) == 0 &&
+ LSB(ctx->remote_ip4, 2) == 0 && LSB(ctx->remote_ip4, 3) == 0)
return SK_DROP;
- half = (__u16 *)&ctx->remote_ip4;
- if (half[0] == 0 && half[1] == 0)
+ if (LSW(ctx->remote_ip4, 0) == 0 && LSW(ctx->remote_ip4, 1) == 0)
return SK_DROP;
/* Expect DST_IP4 in local_ip4 */
- byte = (__u8 *)&ctx->local_ip4;
- if (byte[0] != ((DST_IP4 >> 0) & 0xff) ||
- byte[1] != ((DST_IP4 >> 8) & 0xff) ||
- byte[2] != ((DST_IP4 >> 16) & 0xff) ||
- byte[3] != ((DST_IP4 >> 24) & 0xff))
+ if (LSB(ctx->local_ip4, 0) != ((DST_IP4 >> 0) & 0xff) ||
+ LSB(ctx->local_ip4, 1) != ((DST_IP4 >> 8) & 0xff) ||
+ LSB(ctx->local_ip4, 2) != ((DST_IP4 >> 16) & 0xff) ||
+ LSB(ctx->local_ip4, 3) != ((DST_IP4 >> 24) & 0xff))
return SK_DROP;
- half = (__u16 *)&ctx->local_ip4;
- if (half[0] != ((DST_IP4 >> 0) & 0xffff) ||
- half[1] != ((DST_IP4 >> 16) & 0xffff))
+ if (LSW(ctx->local_ip4, 0) != ((DST_IP4 >> 0) & 0xffff) ||
+ LSW(ctx->local_ip4, 1) != ((DST_IP4 >> 16) & 0xffff))
return SK_DROP;
} else {
/* Expect 0.0.0.0 IPs when family != AF_INET */
- byte = (__u8 *)&ctx->remote_ip4;
- if (byte[0] != 0 || byte[1] != 0 &&
- byte[2] != 0 || byte[3] != 0)
+ if (LSB(ctx->remote_ip4, 0) != 0 || LSB(ctx->remote_ip4, 1) != 0 ||
+ LSB(ctx->remote_ip4, 2) != 0 || LSB(ctx->remote_ip4, 3) != 0)
return SK_DROP;
- half = (__u16 *)&ctx->remote_ip4;
- if (half[0] != 0 || half[1] != 0)
+ if (LSW(ctx->remote_ip4, 0) != 0 || LSW(ctx->remote_ip4, 1) != 0)
return SK_DROP;
- byte = (__u8 *)&ctx->local_ip4;
- if (byte[0] != 0 || byte[1] != 0 &&
- byte[2] != 0 || byte[3] != 0)
+ if (LSB(ctx->local_ip4, 0) != 0 || LSB(ctx->local_ip4, 1) != 0 ||
+ LSB(ctx->local_ip4, 2) != 0 || LSB(ctx->local_ip4, 3) != 0)
return SK_DROP;
- half = (__u16 *)&ctx->local_ip4;
- if (half[0] != 0 || half[1] != 0)
+ if (LSW(ctx->local_ip4, 0) != 0 || LSW(ctx->local_ip4, 1) != 0)
return SK_DROP;
}
/* Narrow loads from IPv6 fields */
if (!v4) {
- /* Expenct non-:: IP in remote_ip6 */
- byte = (__u8 *)&ctx->remote_ip6;
- if (byte[0] == 0 && byte[1] == 0 &&
- byte[2] == 0 && byte[3] == 0 &&
- byte[4] == 0 && byte[5] == 0 &&
- byte[6] == 0 && byte[7] == 0 &&
- byte[8] == 0 && byte[9] == 0 &&
- byte[10] == 0 && byte[11] == 0 &&
- byte[12] == 0 && byte[13] == 0 &&
- byte[14] == 0 && byte[15] == 0)
+ /* Expect non-:: IP in remote_ip6 */
+ if (LSB(ctx->remote_ip6[0], 0) == 0 && LSB(ctx->remote_ip6[0], 1) == 0 &&
+ LSB(ctx->remote_ip6[0], 2) == 0 && LSB(ctx->remote_ip6[0], 3) == 0 &&
+ LSB(ctx->remote_ip6[1], 0) == 0 && LSB(ctx->remote_ip6[1], 1) == 0 &&
+ LSB(ctx->remote_ip6[1], 2) == 0 && LSB(ctx->remote_ip6[1], 3) == 0 &&
+ LSB(ctx->remote_ip6[2], 0) == 0 && LSB(ctx->remote_ip6[2], 1) == 0 &&
+ LSB(ctx->remote_ip6[2], 2) == 0 && LSB(ctx->remote_ip6[2], 3) == 0 &&
+ LSB(ctx->remote_ip6[3], 0) == 0 && LSB(ctx->remote_ip6[3], 1) == 0 &&
+ LSB(ctx->remote_ip6[3], 2) == 0 && LSB(ctx->remote_ip6[3], 3) == 0)
return SK_DROP;
- half = (__u16 *)&ctx->remote_ip6;
- if (half[0] == 0 && half[1] == 0 &&
- half[2] == 0 && half[3] == 0 &&
- half[4] == 0 && half[5] == 0 &&
- half[6] == 0 && half[7] == 0)
+ if (LSW(ctx->remote_ip6[0], 0) == 0 && LSW(ctx->remote_ip6[0], 1) == 0 &&
+ LSW(ctx->remote_ip6[1], 0) == 0 && LSW(ctx->remote_ip6[1], 1) == 0 &&
+ LSW(ctx->remote_ip6[2], 0) == 0 && LSW(ctx->remote_ip6[2], 1) == 0 &&
+ LSW(ctx->remote_ip6[3], 0) == 0 && LSW(ctx->remote_ip6[3], 1) == 0)
return SK_DROP;
-
/* Expect DST_IP6 in local_ip6 */
- byte = (__u8 *)&ctx->local_ip6;
- if (byte[0] != ((DST_IP6[0] >> 0) & 0xff) ||
- byte[1] != ((DST_IP6[0] >> 8) & 0xff) ||
- byte[2] != ((DST_IP6[0] >> 16) & 0xff) ||
- byte[3] != ((DST_IP6[0] >> 24) & 0xff) ||
- byte[4] != ((DST_IP6[1] >> 0) & 0xff) ||
- byte[5] != ((DST_IP6[1] >> 8) & 0xff) ||
- byte[6] != ((DST_IP6[1] >> 16) & 0xff) ||
- byte[7] != ((DST_IP6[1] >> 24) & 0xff) ||
- byte[8] != ((DST_IP6[2] >> 0) & 0xff) ||
- byte[9] != ((DST_IP6[2] >> 8) & 0xff) ||
- byte[10] != ((DST_IP6[2] >> 16) & 0xff) ||
- byte[11] != ((DST_IP6[2] >> 24) & 0xff) ||
- byte[12] != ((DST_IP6[3] >> 0) & 0xff) ||
- byte[13] != ((DST_IP6[3] >> 8) & 0xff) ||
- byte[14] != ((DST_IP6[3] >> 16) & 0xff) ||
- byte[15] != ((DST_IP6[3] >> 24) & 0xff))
+ if (LSB(ctx->local_ip6[0], 0) != ((DST_IP6[0] >> 0) & 0xff) ||
+ LSB(ctx->local_ip6[0], 1) != ((DST_IP6[0] >> 8) & 0xff) ||
+ LSB(ctx->local_ip6[0], 2) != ((DST_IP6[0] >> 16) & 0xff) ||
+ LSB(ctx->local_ip6[0], 3) != ((DST_IP6[0] >> 24) & 0xff) ||
+ LSB(ctx->local_ip6[1], 0) != ((DST_IP6[1] >> 0) & 0xff) ||
+ LSB(ctx->local_ip6[1], 1) != ((DST_IP6[1] >> 8) & 0xff) ||
+ LSB(ctx->local_ip6[1], 2) != ((DST_IP6[1] >> 16) & 0xff) ||
+ LSB(ctx->local_ip6[1], 3) != ((DST_IP6[1] >> 24) & 0xff) ||
+ LSB(ctx->local_ip6[2], 0) != ((DST_IP6[2] >> 0) & 0xff) ||
+ LSB(ctx->local_ip6[2], 1) != ((DST_IP6[2] >> 8) & 0xff) ||
+ LSB(ctx->local_ip6[2], 2) != ((DST_IP6[2] >> 16) & 0xff) ||
+ LSB(ctx->local_ip6[2], 3) != ((DST_IP6[2] >> 24) & 0xff) ||
+ LSB(ctx->local_ip6[3], 0) != ((DST_IP6[3] >> 0) & 0xff) ||
+ LSB(ctx->local_ip6[3], 1) != ((DST_IP6[3] >> 8) & 0xff) ||
+ LSB(ctx->local_ip6[3], 2) != ((DST_IP6[3] >> 16) & 0xff) ||
+ LSB(ctx->local_ip6[3], 3) != ((DST_IP6[3] >> 24) & 0xff))
return SK_DROP;
- half = (__u16 *)&ctx->local_ip6;
- if (half[0] != ((DST_IP6[0] >> 0) & 0xffff) ||
- half[1] != ((DST_IP6[0] >> 16) & 0xffff) ||
- half[2] != ((DST_IP6[1] >> 0) & 0xffff) ||
- half[3] != ((DST_IP6[1] >> 16) & 0xffff) ||
- half[4] != ((DST_IP6[2] >> 0) & 0xffff) ||
- half[5] != ((DST_IP6[2] >> 16) & 0xffff) ||
- half[6] != ((DST_IP6[3] >> 0) & 0xffff) ||
- half[7] != ((DST_IP6[3] >> 16) & 0xffff))
+ if (LSW(ctx->local_ip6[0], 0) != ((DST_IP6[0] >> 0) & 0xffff) ||
+ LSW(ctx->local_ip6[0], 1) != ((DST_IP6[0] >> 16) & 0xffff) ||
+ LSW(ctx->local_ip6[1], 0) != ((DST_IP6[1] >> 0) & 0xffff) ||
+ LSW(ctx->local_ip6[1], 1) != ((DST_IP6[1] >> 16) & 0xffff) ||
+ LSW(ctx->local_ip6[2], 0) != ((DST_IP6[2] >> 0) & 0xffff) ||
+ LSW(ctx->local_ip6[2], 1) != ((DST_IP6[2] >> 16) & 0xffff) ||
+ LSW(ctx->local_ip6[3], 0) != ((DST_IP6[3] >> 0) & 0xffff) ||
+ LSW(ctx->local_ip6[3], 1) != ((DST_IP6[3] >> 16) & 0xffff))
return SK_DROP;
} else {
/* Expect :: IPs when family != AF_INET6 */
- byte = (__u8 *)&ctx->remote_ip6;
- if (byte[0] != 0 || byte[1] != 0 ||
- byte[2] != 0 || byte[3] != 0 ||
- byte[4] != 0 || byte[5] != 0 ||
- byte[6] != 0 || byte[7] != 0 ||
- byte[8] != 0 || byte[9] != 0 ||
- byte[10] != 0 || byte[11] != 0 ||
- byte[12] != 0 || byte[13] != 0 ||
- byte[14] != 0 || byte[15] != 0)
+ if (LSB(ctx->remote_ip6[0], 0) != 0 || LSB(ctx->remote_ip6[0], 1) != 0 ||
+ LSB(ctx->remote_ip6[0], 2) != 0 || LSB(ctx->remote_ip6[0], 3) != 0 ||
+ LSB(ctx->remote_ip6[1], 0) != 0 || LSB(ctx->remote_ip6[1], 1) != 0 ||
+ LSB(ctx->remote_ip6[1], 2) != 0 || LSB(ctx->remote_ip6[1], 3) != 0 ||
+ LSB(ctx->remote_ip6[2], 0) != 0 || LSB(ctx->remote_ip6[2], 1) != 0 ||
+ LSB(ctx->remote_ip6[2], 2) != 0 || LSB(ctx->remote_ip6[2], 3) != 0 ||
+ LSB(ctx->remote_ip6[3], 0) != 0 || LSB(ctx->remote_ip6[3], 1) != 0 ||
+ LSB(ctx->remote_ip6[3], 2) != 0 || LSB(ctx->remote_ip6[3], 3) != 0)
return SK_DROP;
- half = (__u16 *)&ctx->remote_ip6;
- if (half[0] != 0 || half[1] != 0 ||
- half[2] != 0 || half[3] != 0 ||
- half[4] != 0 || half[5] != 0 ||
- half[6] != 0 || half[7] != 0)
+ if (LSW(ctx->remote_ip6[0], 0) != 0 || LSW(ctx->remote_ip6[0], 1) != 0 ||
+ LSW(ctx->remote_ip6[1], 0) != 0 || LSW(ctx->remote_ip6[1], 1) != 0 ||
+ LSW(ctx->remote_ip6[2], 0) != 0 || LSW(ctx->remote_ip6[2], 1) != 0 ||
+ LSW(ctx->remote_ip6[3], 0) != 0 || LSW(ctx->remote_ip6[3], 1) != 0)
return SK_DROP;
- byte = (__u8 *)&ctx->local_ip6;
- if (byte[0] != 0 || byte[1] != 0 ||
- byte[2] != 0 || byte[3] != 0 ||
- byte[4] != 0 || byte[5] != 0 ||
- byte[6] != 0 || byte[7] != 0 ||
- byte[8] != 0 || byte[9] != 0 ||
- byte[10] != 0 || byte[11] != 0 ||
- byte[12] != 0 || byte[13] != 0 ||
- byte[14] != 0 || byte[15] != 0)
+ if (LSB(ctx->local_ip6[0], 0) != 0 || LSB(ctx->local_ip6[0], 1) != 0 ||
+ LSB(ctx->local_ip6[0], 2) != 0 || LSB(ctx->local_ip6[0], 3) != 0 ||
+ LSB(ctx->local_ip6[1], 0) != 0 || LSB(ctx->local_ip6[1], 1) != 0 ||
+ LSB(ctx->local_ip6[1], 2) != 0 || LSB(ctx->local_ip6[1], 3) != 0 ||
+ LSB(ctx->local_ip6[2], 0) != 0 || LSB(ctx->local_ip6[2], 1) != 0 ||
+ LSB(ctx->local_ip6[2], 2) != 0 || LSB(ctx->local_ip6[2], 3) != 0 ||
+ LSB(ctx->local_ip6[3], 0) != 0 || LSB(ctx->local_ip6[3], 1) != 0 ||
+ LSB(ctx->local_ip6[3], 2) != 0 || LSB(ctx->local_ip6[3], 3) != 0)
return SK_DROP;
- half = (__u16 *)&ctx->local_ip6;
- if (half[0] != 0 || half[1] != 0 ||
- half[2] != 0 || half[3] != 0 ||
- half[4] != 0 || half[5] != 0 ||
- half[6] != 0 || half[7] != 0)
+ if (LSW(ctx->remote_ip6[0], 0) != 0 || LSW(ctx->remote_ip6[0], 1) != 0 ||
+ LSW(ctx->remote_ip6[1], 0) != 0 || LSW(ctx->remote_ip6[1], 1) != 0 ||
+ LSW(ctx->remote_ip6[2], 0) != 0 || LSW(ctx->remote_ip6[2], 1) != 0 ||
+ LSW(ctx->remote_ip6[3], 0) != 0 || LSW(ctx->remote_ip6[3], 1) != 0)
return SK_DROP;
}
diff --git a/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c b/tools/testing/selftests/bpf/progs/test_sock_fields.c
index 9bcaa37f476a..81b57b9aaaea 100644
--- a/tools/testing/selftests/bpf/progs/test_sock_fields_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_sock_fields.c
@@ -7,19 +7,7 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
-
-enum bpf_addr_array_idx {
- ADDR_SRV_IDX,
- ADDR_CLI_IDX,
- __NR_BPF_ADDR_ARRAY_IDX,
-};
-
-enum bpf_result_array_idx {
- EGRESS_SRV_IDX,
- EGRESS_CLI_IDX,
- INGRESS_LISTEN_IDX,
- __NR_BPF_RESULT_ARRAY_IDX,
-};
+#include "bpf_tcp_helpers.h"
enum bpf_linum_array_idx {
EGRESS_LINUM_IDX,
@@ -29,27 +17,6 @@ enum bpf_linum_array_idx {
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
- __uint(max_entries, __NR_BPF_ADDR_ARRAY_IDX);
- __type(key, __u32);
- __type(value, struct sockaddr_in6);
-} addr_map SEC(".maps");
-
-struct {
- __uint(type, BPF_MAP_TYPE_ARRAY);
- __uint(max_entries, __NR_BPF_RESULT_ARRAY_IDX);
- __type(key, __u32);
- __type(value, struct bpf_sock);
-} sock_result_map SEC(".maps");
-
-struct {
- __uint(type, BPF_MAP_TYPE_ARRAY);
- __uint(max_entries, __NR_BPF_RESULT_ARRAY_IDX);
- __type(key, __u32);
- __type(value, struct bpf_tcp_sock);
-} tcp_sock_result_map SEC(".maps");
-
-struct {
- __uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, __NR_BPF_LINUM_ARRAY_IDX);
__type(key, __u32);
__type(value, __u32);
@@ -74,6 +41,17 @@ struct {
__type(value, struct bpf_spinlock_cnt);
} sk_pkt_out_cnt10 SEC(".maps");
+struct bpf_tcp_sock listen_tp = {};
+struct sockaddr_in6 srv_sa6 = {};
+struct bpf_tcp_sock cli_tp = {};
+struct bpf_tcp_sock srv_tp = {};
+struct bpf_sock listen_sk = {};
+struct bpf_sock srv_sk = {};
+struct bpf_sock cli_sk = {};
+__u64 parent_cg_id = 0;
+__u64 child_cg_id = 0;
+__u64 lsndtime = 0;
+
static bool is_loopback6(__u32 *a6)
{
return !a6[0] && !a6[1] && !a6[2] && a6[3] == bpf_htonl(1);
@@ -130,62 +108,86 @@ static void tpcpy(struct bpf_tcp_sock *dst,
dst->bytes_acked = src->bytes_acked;
}
-#define RETURN { \
+/* Always return CG_OK so that no pkt will be filtered out */
+#define CG_OK 1
+
+#define RET_LOG() ({ \
linum = __LINE__; \
- bpf_map_update_elem(&linum_map, &linum_idx, &linum, 0); \
- return 1; \
-}
+ bpf_map_update_elem(&linum_map, &linum_idx, &linum, BPF_NOEXIST); \
+ return CG_OK; \
+})
SEC("cgroup_skb/egress")
int egress_read_sock_fields(struct __sk_buff *skb)
{
struct bpf_spinlock_cnt cli_cnt_init = { .lock = 0, .cnt = 0xeB9F };
- __u32 srv_idx = ADDR_SRV_IDX, cli_idx = ADDR_CLI_IDX, result_idx;
struct bpf_spinlock_cnt *pkt_out_cnt, *pkt_out_cnt10;
- struct sockaddr_in6 *srv_sa6, *cli_sa6;
struct bpf_tcp_sock *tp, *tp_ret;
struct bpf_sock *sk, *sk_ret;
__u32 linum, linum_idx;
+ struct tcp_sock *ktp;
linum_idx = EGRESS_LINUM_IDX;
sk = skb->sk;
- if (!sk || sk->state == 10)
- RETURN;
+ if (!sk)
+ RET_LOG();
+ /* Not the testing egress traffic or
+ * TCP_LISTEN (10) socket will be copied at the ingress side.
+ */
+ if (sk->family != AF_INET6 || !is_loopback6(sk->src_ip6) ||
+ sk->state == 10)
+ return CG_OK;
+
+ if (sk->src_port == bpf_ntohs(srv_sa6.sin6_port)) {
+ /* Server socket */
+ sk_ret = &srv_sk;
+ tp_ret = &srv_tp;
+ } else if (sk->dst_port == srv_sa6.sin6_port) {
+ /* Client socket */
+ sk_ret = &cli_sk;
+ tp_ret = &cli_tp;
+ } else {
+ /* Not the testing egress traffic */
+ return CG_OK;
+ }
+
+ /* It must be a fullsock for cgroup_skb/egress prog */
sk = bpf_sk_fullsock(sk);
- if (!sk || sk->family != AF_INET6 || sk->protocol != IPPROTO_TCP ||
- !is_loopback6(sk->src_ip6))
- RETURN;
+ if (!sk)
+ RET_LOG();
+
+ /* Not the testing egress traffic */
+ if (sk->protocol != IPPROTO_TCP)
+ return CG_OK;
tp = bpf_tcp_sock(sk);
if (!tp)
- RETURN;
+ RET_LOG();
- srv_sa6 = bpf_map_lookup_elem(&addr_map, &srv_idx);
- cli_sa6 = bpf_map_lookup_elem(&addr_map, &cli_idx);
- if (!srv_sa6 || !cli_sa6)
- RETURN;
+ skcpy(sk_ret, sk);
+ tpcpy(tp_ret, tp);
- if (sk->src_port == bpf_ntohs(srv_sa6->sin6_port))
- result_idx = EGRESS_SRV_IDX;
- else if (sk->src_port == bpf_ntohs(cli_sa6->sin6_port))
- result_idx = EGRESS_CLI_IDX;
- else
- RETURN;
+ if (sk_ret == &srv_sk) {
+ ktp = bpf_skc_to_tcp_sock(sk);
- sk_ret = bpf_map_lookup_elem(&sock_result_map, &result_idx);
- tp_ret = bpf_map_lookup_elem(&tcp_sock_result_map, &result_idx);
- if (!sk_ret || !tp_ret)
- RETURN;
+ if (!ktp)
+ RET_LOG();
- skcpy(sk_ret, sk);
- tpcpy(tp_ret, tp);
+ lsndtime = ktp->lsndtime;
+
+ child_cg_id = bpf_sk_cgroup_id(ktp);
+ if (!child_cg_id)
+ RET_LOG();
+
+ parent_cg_id = bpf_sk_ancestor_cgroup_id(ktp, 2);
+ if (!parent_cg_id)
+ RET_LOG();
- if (result_idx == EGRESS_SRV_IDX) {
/* The userspace has created it for srv sk */
- pkt_out_cnt = bpf_sk_storage_get(&sk_pkt_out_cnt, sk, 0, 0);
- pkt_out_cnt10 = bpf_sk_storage_get(&sk_pkt_out_cnt10, sk,
+ pkt_out_cnt = bpf_sk_storage_get(&sk_pkt_out_cnt, ktp, 0, 0);
+ pkt_out_cnt10 = bpf_sk_storage_get(&sk_pkt_out_cnt10, ktp,
0, 0);
} else {
pkt_out_cnt = bpf_sk_storage_get(&sk_pkt_out_cnt, sk,
@@ -197,7 +199,7 @@ int egress_read_sock_fields(struct __sk_buff *skb)
}
if (!pkt_out_cnt || !pkt_out_cnt10)
- RETURN;
+ RET_LOG();
/* Even both cnt and cnt10 have lock defined in their BTF,
* intentionally one cnt takes lock while one does not
@@ -208,48 +210,44 @@ int egress_read_sock_fields(struct __sk_buff *skb)
pkt_out_cnt10->cnt += 10;
bpf_spin_unlock(&pkt_out_cnt10->lock);
- RETURN;
+ return CG_OK;
}
SEC("cgroup_skb/ingress")
int ingress_read_sock_fields(struct __sk_buff *skb)
{
- __u32 srv_idx = ADDR_SRV_IDX, result_idx = INGRESS_LISTEN_IDX;
- struct bpf_tcp_sock *tp, *tp_ret;
- struct bpf_sock *sk, *sk_ret;
- struct sockaddr_in6 *srv_sa6;
+ struct bpf_tcp_sock *tp;
__u32 linum, linum_idx;
+ struct bpf_sock *sk;
linum_idx = INGRESS_LINUM_IDX;
sk = skb->sk;
- if (!sk || sk->family != AF_INET6 || !is_loopback6(sk->src_ip6))
- RETURN;
+ if (!sk)
+ RET_LOG();
- srv_sa6 = bpf_map_lookup_elem(&addr_map, &srv_idx);
- if (!srv_sa6 || sk->src_port != bpf_ntohs(srv_sa6->sin6_port))
- RETURN;
+ /* Not the testing ingress traffic to the server */
+ if (sk->family != AF_INET6 || !is_loopback6(sk->src_ip6) ||
+ sk->src_port != bpf_ntohs(srv_sa6.sin6_port))
+ return CG_OK;
- if (sk->state != 10 && sk->state != 12)
- RETURN;
+ /* Only interested in TCP_LISTEN */
+ if (sk->state != 10)
+ return CG_OK;
- sk = bpf_get_listener_sock(sk);
+ /* It must be a fullsock for cgroup_skb/ingress prog */
+ sk = bpf_sk_fullsock(sk);
if (!sk)
- RETURN;
+ RET_LOG();
tp = bpf_tcp_sock(sk);
if (!tp)
- RETURN;
-
- sk_ret = bpf_map_lookup_elem(&sock_result_map, &result_idx);
- tp_ret = bpf_map_lookup_elem(&tcp_sock_result_map, &result_idx);
- if (!sk_ret || !tp_ret)
- RETURN;
+ RET_LOG();
- skcpy(sk_ret, sk);
- tpcpy(tp_ret, tp);
+ skcpy(&listen_sk, sk);
+ tpcpy(&listen_tp, tp);
- RETURN;
+ return CG_OK;
}
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_invalid_update.c b/tools/testing/selftests/bpf/progs/test_sockmap_invalid_update.c
new file mode 100644
index 000000000000..02a59e220cbc
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_invalid_update.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Cloudflare
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} map SEC(".maps");
+
+SEC("sockops")
+int bpf_sockmap(struct bpf_sock_ops *skops)
+{
+ __u32 key = 0;
+
+ if (skops->sk)
+ bpf_map_update_elem(&map, &key, skops->sk, 0);
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h
index 3dca4c2e2418..1858435de7aa 100644
--- a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h
@@ -131,39 +131,55 @@ int bpf_prog2(struct __sk_buff *skb)
}
-SEC("sk_skb3")
-int bpf_prog3(struct __sk_buff *skb)
+static inline void bpf_write_pass(struct __sk_buff *skb, int offset)
{
- const int one = 1;
- int err, *f, ret = SK_PASS;
+ int err = bpf_skb_pull_data(skb, 6 + offset);
void *data_end;
char *c;
- err = bpf_skb_pull_data(skb, 19);
if (err)
- goto tls_out;
+ return;
c = (char *)(long)skb->data;
data_end = (void *)(long)skb->data_end;
- if (c + 18 < data_end)
- memcpy(&c[13], "PASS", 4);
+ if (c + 5 + offset < data_end)
+ memcpy(c + offset, "PASS", 4);
+}
+
+SEC("sk_skb3")
+int bpf_prog3(struct __sk_buff *skb)
+{
+ int err, *f, ret = SK_PASS;
+ const int one = 1;
+
f = bpf_map_lookup_elem(&sock_skb_opts, &one);
if (f && *f) {
__u64 flags = 0;
ret = 0;
flags = *f;
+
+ err = bpf_skb_adjust_room(skb, -13, 0, 0);
+ if (err)
+ return SK_DROP;
+ err = bpf_skb_adjust_room(skb, 4, 0, 0);
+ if (err)
+ return SK_DROP;
+ bpf_write_pass(skb, 0);
#ifdef SOCKMAP
return bpf_sk_redirect_map(skb, &tls_sock_map, ret, flags);
#else
return bpf_sk_redirect_hash(skb, &tls_sock_map, &ret, flags);
#endif
}
-
f = bpf_map_lookup_elem(&sock_skb_opts, &one);
if (f && *f)
ret = SK_DROP;
+ err = bpf_skb_adjust_room(skb, 4, 0, 0);
+ if (err)
+ return SK_DROP;
+ bpf_write_pass(skb, 13);
tls_out:
return ret;
}
diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_update.c b/tools/testing/selftests/bpf/progs/test_sockmap_update.c
new file mode 100644
index 000000000000..9d0c9f28cab2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_sockmap_update.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Cloudflare
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} src SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKMAP);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} dst_sock_map SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SOCKHASH);
+ __uint(max_entries, 1);
+ __type(key, __u32);
+ __type(value, __u64);
+} dst_sock_hash SEC(".maps");
+
+SEC("classifier/copy_sock_map")
+int copy_sock_map(void *ctx)
+{
+ struct bpf_sock *sk;
+ bool failed = false;
+ __u32 key = 0;
+
+ sk = bpf_map_lookup_elem(&src, &key);
+ if (!sk)
+ return SK_DROP;
+
+ if (bpf_map_update_elem(&dst_sock_map, &key, sk, 0))
+ failed = true;
+
+ if (bpf_map_update_elem(&dst_sock_hash, &key, sk, 0))
+ failed = true;
+
+ bpf_sk_release(sk);
+ return failed ? SK_DROP : SK_PASS;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_subprogs.c b/tools/testing/selftests/bpf/progs/test_subprogs.c
new file mode 100644
index 000000000000..d3c5673c0218
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_subprogs.c
@@ -0,0 +1,103 @@
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+const char LICENSE[] SEC("license") = "GPL";
+
+__noinline int sub1(int x)
+{
+ return x + 1;
+}
+
+static __noinline int sub5(int v);
+
+__noinline int sub2(int y)
+{
+ return sub5(y + 2);
+}
+
+static __noinline int sub3(int z)
+{
+ return z + 3 + sub1(4);
+}
+
+static __noinline int sub4(int w)
+{
+ return w + sub3(5) + sub1(6);
+}
+
+/* sub5() is an identitify function, just to test weirder functions layout and
+ * call patterns
+ */
+static __noinline int sub5(int v)
+{
+ return sub1(v) - 1; /* compensates sub1()'s + 1 */
+}
+
+/* unfortunately verifier rejects `struct task_struct *t` as an unkown pointer
+ * type, so we need to accept pointer as integer and then cast it inside the
+ * function
+ */
+__noinline int get_task_tgid(uintptr_t t)
+{
+ /* this ensures that CO-RE relocs work in multi-subprogs .text */
+ return BPF_CORE_READ((struct task_struct *)(void *)t, tgid);
+}
+
+int res1 = 0;
+int res2 = 0;
+int res3 = 0;
+int res4 = 0;
+
+SEC("raw_tp/sys_enter")
+int prog1(void *ctx)
+{
+ /* perform some CO-RE relocations to ensure they work with multi-prog
+ * sections correctly
+ */
+ struct task_struct *t = (void *)bpf_get_current_task();
+
+ if (!BPF_CORE_READ(t, pid) || !get_task_tgid((uintptr_t)t))
+ return 1;
+
+ res1 = sub1(1) + sub3(2); /* (1 + 1) + (2 + 3 + (4 + 1)) = 12 */
+ return 0;
+}
+
+SEC("raw_tp/sys_exit")
+int prog2(void *ctx)
+{
+ struct task_struct *t = (void *)bpf_get_current_task();
+
+ if (!BPF_CORE_READ(t, pid) || !get_task_tgid((uintptr_t)t))
+ return 1;
+
+ res2 = sub2(3) + sub3(4); /* (3 + 2) + (4 + 3 + (4 + 1)) = 17 */
+ return 0;
+}
+
+/* prog3 has the same section name as prog1 */
+SEC("raw_tp/sys_enter")
+int prog3(void *ctx)
+{
+ struct task_struct *t = (void *)bpf_get_current_task();
+
+ if (!BPF_CORE_READ(t, pid) || !get_task_tgid((uintptr_t)t))
+ return 1;
+
+ res3 = sub3(5) + 6; /* (5 + 3 + (4 + 1)) + 6 = 19 */
+ return 0;
+}
+
+/* prog4 has the same section name as prog2 */
+SEC("raw_tp/sys_exit")
+int prog4(void *ctx)
+{
+ struct task_struct *t = (void *)bpf_get_current_task();
+
+ if (!BPF_CORE_READ(t, pid) || !get_task_tgid((uintptr_t)t))
+ return 1;
+
+ res4 = sub4(7) + sub1(8); /* (7 + (5 + 3 + (4 + 1)) + (6 + 1)) + (8 + 1) = 36 */
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
index 458b0d69133e..553a282d816a 100644
--- a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
+++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c
@@ -18,11 +18,11 @@
#define MAX_ULONG_STR_LEN 7
#define MAX_VALUE_STR_LEN (TCP_MEM_LOOPS * MAX_ULONG_STR_LEN)
+const char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string";
static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx)
{
- volatile char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string";
unsigned char i;
- char name[64];
+ char name[sizeof(tcp_mem_name)];
int ret;
memset(name, 0, sizeof(name));
diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
index b2e6f9b0894d..2b64bc563a12 100644
--- a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
+++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c
@@ -18,11 +18,11 @@
#define MAX_ULONG_STR_LEN 7
#define MAX_VALUE_STR_LEN (TCP_MEM_LOOPS * MAX_ULONG_STR_LEN)
+const char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string_to_stress_byte_loop";
static __attribute__((noinline)) int is_tcp_mem(struct bpf_sysctl *ctx)
{
- volatile char tcp_mem_name[] = "net/ipv4/tcp_mem/very_very_very_very_long_pointless_string_to_stress_byte_loop";
unsigned char i;
- char name[64];
+ char name[sizeof(tcp_mem_name)];
int ret;
memset(name, 0, sizeof(name));
diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c
index 50525235380e..5489823c83fc 100644
--- a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c
+++ b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c
@@ -19,11 +19,11 @@
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#endif
+const char tcp_mem_name[] = "net/ipv4/tcp_mem";
static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx)
{
- char tcp_mem_name[] = "net/ipv4/tcp_mem";
unsigned char i;
- char name[64];
+ char name[sizeof(tcp_mem_name)];
int ret;
memset(name, 0, sizeof(name));
diff --git a/tools/testing/selftests/bpf/progs/test_tc_neigh.c b/tools/testing/selftests/bpf/progs/test_tc_neigh.c
new file mode 100644
index 000000000000..fe182616b112
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_tc_neigh.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdint.h>
+#include <stdbool.h>
+
+#include <linux/bpf.h>
+#include <linux/stddef.h>
+#include <linux/pkt_cls.h>
+#include <linux/if_ether.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#ifndef ctx_ptr
+# define ctx_ptr(field) (void *)(long)(field)
+#endif
+
+#define ip4_src 0xac100164 /* 172.16.1.100 */
+#define ip4_dst 0xac100264 /* 172.16.2.100 */
+
+#define ip6_src { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x01, 0xde, 0xad, 0xbe, 0xef, 0xca, 0xfe }
+#define ip6_dst { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
+ 0x00, 0x02, 0xde, 0xad, 0xbe, 0xef, 0xca, 0xfe }
+
+#ifndef v6_equal
+# define v6_equal(a, b) (a.s6_addr32[0] == b.s6_addr32[0] && \
+ a.s6_addr32[1] == b.s6_addr32[1] && \
+ a.s6_addr32[2] == b.s6_addr32[2] && \
+ a.s6_addr32[3] == b.s6_addr32[3])
+#endif
+
+enum {
+ dev_src,
+ dev_dst,
+};
+
+struct bpf_map_def SEC("maps") ifindex_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .max_entries = 2,
+};
+
+static __always_inline bool is_remote_ep_v4(struct __sk_buff *skb,
+ __be32 addr)
+{
+ void *data_end = ctx_ptr(skb->data_end);
+ void *data = ctx_ptr(skb->data);
+ struct iphdr *ip4h;
+
+ if (data + sizeof(struct ethhdr) > data_end)
+ return false;
+
+ ip4h = (struct iphdr *)(data + sizeof(struct ethhdr));
+ if ((void *)(ip4h + 1) > data_end)
+ return false;
+
+ return ip4h->daddr == addr;
+}
+
+static __always_inline bool is_remote_ep_v6(struct __sk_buff *skb,
+ struct in6_addr addr)
+{
+ void *data_end = ctx_ptr(skb->data_end);
+ void *data = ctx_ptr(skb->data);
+ struct ipv6hdr *ip6h;
+
+ if (data + sizeof(struct ethhdr) > data_end)
+ return false;
+
+ ip6h = (struct ipv6hdr *)(data + sizeof(struct ethhdr));
+ if ((void *)(ip6h + 1) > data_end)
+ return false;
+
+ return v6_equal(ip6h->daddr, addr);
+}
+
+static __always_inline int get_dev_ifindex(int which)
+{
+ int *ifindex = bpf_map_lookup_elem(&ifindex_map, &which);
+
+ return ifindex ? *ifindex : 0;
+}
+
+SEC("chk_egress") int tc_chk(struct __sk_buff *skb)
+{
+ void *data_end = ctx_ptr(skb->data_end);
+ void *data = ctx_ptr(skb->data);
+ __u32 *raw = data;
+
+ if (data + sizeof(struct ethhdr) > data_end)
+ return TC_ACT_SHOT;
+
+ return !raw[0] && !raw[1] && !raw[2] ? TC_ACT_SHOT : TC_ACT_OK;
+}
+
+SEC("dst_ingress") int tc_dst(struct __sk_buff *skb)
+{
+ __u8 zero[ETH_ALEN * 2];
+ bool redirect = false;
+
+ switch (skb->protocol) {
+ case __bpf_constant_htons(ETH_P_IP):
+ redirect = is_remote_ep_v4(skb, __bpf_constant_htonl(ip4_src));
+ break;
+ case __bpf_constant_htons(ETH_P_IPV6):
+ redirect = is_remote_ep_v6(skb, (struct in6_addr)ip6_src);
+ break;
+ }
+
+ if (!redirect)
+ return TC_ACT_OK;
+
+ __builtin_memset(&zero, 0, sizeof(zero));
+ if (bpf_skb_store_bytes(skb, 0, &zero, sizeof(zero), 0) < 0)
+ return TC_ACT_SHOT;
+
+ return bpf_redirect_neigh(get_dev_ifindex(dev_src), 0);
+}
+
+SEC("src_ingress") int tc_src(struct __sk_buff *skb)
+{
+ __u8 zero[ETH_ALEN * 2];
+ bool redirect = false;
+
+ switch (skb->protocol) {
+ case __bpf_constant_htons(ETH_P_IP):
+ redirect = is_remote_ep_v4(skb, __bpf_constant_htonl(ip4_dst));
+ break;
+ case __bpf_constant_htons(ETH_P_IPV6):
+ redirect = is_remote_ep_v6(skb, (struct in6_addr)ip6_dst);
+ break;
+ }
+
+ if (!redirect)
+ return TC_ACT_OK;
+
+ __builtin_memset(&zero, 0, sizeof(zero));
+ if (bpf_skb_store_bytes(skb, 0, &zero, sizeof(zero), 0) < 0)
+ return TC_ACT_SHOT;
+
+ return bpf_redirect_neigh(get_dev_ifindex(dev_dst), 0);
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_tc_peer.c b/tools/testing/selftests/bpf/progs/test_tc_peer.c
new file mode 100644
index 000000000000..fc84a7685aa2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_tc_peer.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <stdint.h>
+#include <stdbool.h>
+
+#include <linux/bpf.h>
+#include <linux/stddef.h>
+#include <linux/pkt_cls.h>
+
+#include <bpf/bpf_helpers.h>
+
+enum {
+ dev_src,
+ dev_dst,
+};
+
+struct bpf_map_def SEC("maps") ifindex_map = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(int),
+ .value_size = sizeof(int),
+ .max_entries = 2,
+};
+
+static __always_inline int get_dev_ifindex(int which)
+{
+ int *ifindex = bpf_map_lookup_elem(&ifindex_map, &which);
+
+ return ifindex ? *ifindex : 0;
+}
+
+SEC("chk_egress") int tc_chk(struct __sk_buff *skb)
+{
+ return TC_ACT_SHOT;
+}
+
+SEC("dst_ingress") int tc_dst(struct __sk_buff *skb)
+{
+ return bpf_redirect_peer(get_dev_ifindex(dev_src), 0);
+}
+
+SEC("src_ingress") int tc_src(struct __sk_buff *skb)
+{
+ return bpf_redirect_peer(get_dev_ifindex(dev_dst), 0);
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_tcp_hdr_options.c b/tools/testing/selftests/bpf/progs/test_tcp_hdr_options.c
new file mode 100644
index 000000000000..678bd0fad29e
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_tcp_hdr_options.c
@@ -0,0 +1,626 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020 Facebook */
+
+#include <stddef.h>
+#include <errno.h>
+#include <stdbool.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <linux/tcp.h>
+#include <linux/socket.h>
+#include <linux/bpf.h>
+#include <linux/types.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+#define BPF_PROG_TEST_TCP_HDR_OPTIONS
+#include "test_tcp_hdr_options.h"
+
+#ifndef sizeof_field
+#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
+#endif
+
+__u8 test_kind = TCPOPT_EXP;
+__u16 test_magic = 0xeB9F;
+__u32 inherit_cb_flags = 0;
+
+struct bpf_test_option passive_synack_out = {};
+struct bpf_test_option passive_fin_out = {};
+
+struct bpf_test_option passive_estab_in = {};
+struct bpf_test_option passive_fin_in = {};
+
+struct bpf_test_option active_syn_out = {};
+struct bpf_test_option active_fin_out = {};
+
+struct bpf_test_option active_estab_in = {};
+struct bpf_test_option active_fin_in = {};
+
+struct {
+ __uint(type, BPF_MAP_TYPE_SK_STORAGE);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+ __type(key, int);
+ __type(value, struct hdr_stg);
+} hdr_stg_map SEC(".maps");
+
+static bool skops_want_cookie(const struct bpf_sock_ops *skops)
+{
+ return skops->args[0] == BPF_WRITE_HDR_TCP_SYNACK_COOKIE;
+}
+
+static bool skops_current_mss(const struct bpf_sock_ops *skops)
+{
+ return skops->args[0] == BPF_WRITE_HDR_TCP_CURRENT_MSS;
+}
+
+static __u8 option_total_len(__u8 flags)
+{
+ __u8 i, len = 1; /* +1 for flags */
+
+ if (!flags)
+ return 0;
+
+ /* RESEND bit does not use a byte */
+ for (i = OPTION_RESEND + 1; i < __NR_OPTION_FLAGS; i++)
+ len += !!TEST_OPTION_FLAGS(flags, i);
+
+ if (test_kind == TCPOPT_EXP)
+ return len + TCP_BPF_EXPOPT_BASE_LEN;
+ else
+ return len + 2; /* +1 kind, +1 kind-len */
+}
+
+static void write_test_option(const struct bpf_test_option *test_opt,
+ __u8 *data)
+{
+ __u8 offset = 0;
+
+ data[offset++] = test_opt->flags;
+ if (TEST_OPTION_FLAGS(test_opt->flags, OPTION_MAX_DELACK_MS))
+ data[offset++] = test_opt->max_delack_ms;
+
+ if (TEST_OPTION_FLAGS(test_opt->flags, OPTION_RAND))
+ data[offset++] = test_opt->rand;
+}
+
+static int store_option(struct bpf_sock_ops *skops,
+ const struct bpf_test_option *test_opt)
+{
+ union {
+ struct tcp_exprm_opt exprm;
+ struct tcp_opt regular;
+ } write_opt;
+ int err;
+
+ if (test_kind == TCPOPT_EXP) {
+ write_opt.exprm.kind = TCPOPT_EXP;
+ write_opt.exprm.len = option_total_len(test_opt->flags);
+ write_opt.exprm.magic = __bpf_htons(test_magic);
+ write_opt.exprm.data32 = 0;
+ write_test_option(test_opt, write_opt.exprm.data);
+ err = bpf_store_hdr_opt(skops, &write_opt.exprm,
+ sizeof(write_opt.exprm), 0);
+ } else {
+ write_opt.regular.kind = test_kind;
+ write_opt.regular.len = option_total_len(test_opt->flags);
+ write_opt.regular.data32 = 0;
+ write_test_option(test_opt, write_opt.regular.data);
+ err = bpf_store_hdr_opt(skops, &write_opt.regular,
+ sizeof(write_opt.regular), 0);
+ }
+
+ if (err)
+ RET_CG_ERR(err);
+
+ return CG_OK;
+}
+
+static int parse_test_option(struct bpf_test_option *opt, const __u8 *start)
+{
+ opt->flags = *start++;
+
+ if (TEST_OPTION_FLAGS(opt->flags, OPTION_MAX_DELACK_MS))
+ opt->max_delack_ms = *start++;
+
+ if (TEST_OPTION_FLAGS(opt->flags, OPTION_RAND))
+ opt->rand = *start++;
+
+ return 0;
+}
+
+static int load_option(struct bpf_sock_ops *skops,
+ struct bpf_test_option *test_opt, bool from_syn)
+{
+ union {
+ struct tcp_exprm_opt exprm;
+ struct tcp_opt regular;
+ } search_opt;
+ int ret, load_flags = from_syn ? BPF_LOAD_HDR_OPT_TCP_SYN : 0;
+
+ if (test_kind == TCPOPT_EXP) {
+ search_opt.exprm.kind = TCPOPT_EXP;
+ search_opt.exprm.len = 4;
+ search_opt.exprm.magic = __bpf_htons(test_magic);
+ search_opt.exprm.data32 = 0;
+ ret = bpf_load_hdr_opt(skops, &search_opt.exprm,
+ sizeof(search_opt.exprm), load_flags);
+ if (ret < 0)
+ return ret;
+ return parse_test_option(test_opt, search_opt.exprm.data);
+ } else {
+ search_opt.regular.kind = test_kind;
+ search_opt.regular.len = 0;
+ search_opt.regular.data32 = 0;
+ ret = bpf_load_hdr_opt(skops, &search_opt.regular,
+ sizeof(search_opt.regular), load_flags);
+ if (ret < 0)
+ return ret;
+ return parse_test_option(test_opt, search_opt.regular.data);
+ }
+}
+
+static int synack_opt_len(struct bpf_sock_ops *skops)
+{
+ struct bpf_test_option test_opt = {};
+ __u8 optlen;
+ int err;
+
+ if (!passive_synack_out.flags)
+ return CG_OK;
+
+ err = load_option(skops, &test_opt, true);
+
+ /* bpf_test_option is not found */
+ if (err == -ENOMSG)
+ return CG_OK;
+
+ if (err)
+ RET_CG_ERR(err);
+
+ optlen = option_total_len(passive_synack_out.flags);
+ if (optlen) {
+ err = bpf_reserve_hdr_opt(skops, optlen, 0);
+ if (err)
+ RET_CG_ERR(err);
+ }
+
+ return CG_OK;
+}
+
+static int write_synack_opt(struct bpf_sock_ops *skops)
+{
+ struct bpf_test_option opt;
+
+ if (!passive_synack_out.flags)
+ /* We should not even be called since no header
+ * space has been reserved.
+ */
+ RET_CG_ERR(0);
+
+ opt = passive_synack_out;
+ if (skops_want_cookie(skops))
+ SET_OPTION_FLAGS(opt.flags, OPTION_RESEND);
+
+ return store_option(skops, &opt);
+}
+
+static int syn_opt_len(struct bpf_sock_ops *skops)
+{
+ __u8 optlen;
+ int err;
+
+ if (!active_syn_out.flags)
+ return CG_OK;
+
+ optlen = option_total_len(active_syn_out.flags);
+ if (optlen) {
+ err = bpf_reserve_hdr_opt(skops, optlen, 0);
+ if (err)
+ RET_CG_ERR(err);
+ }
+
+ return CG_OK;
+}
+
+static int write_syn_opt(struct bpf_sock_ops *skops)
+{
+ if (!active_syn_out.flags)
+ RET_CG_ERR(0);
+
+ return store_option(skops, &active_syn_out);
+}
+
+static int fin_opt_len(struct bpf_sock_ops *skops)
+{
+ struct bpf_test_option *opt;
+ struct hdr_stg *hdr_stg;
+ __u8 optlen;
+ int err;
+
+ if (!skops->sk)
+ RET_CG_ERR(0);
+
+ hdr_stg = bpf_sk_storage_get(&hdr_stg_map, skops->sk, NULL, 0);
+ if (!hdr_stg)
+ RET_CG_ERR(0);
+
+ if (hdr_stg->active)
+ opt = &active_fin_out;
+ else
+ opt = &passive_fin_out;
+
+ optlen = option_total_len(opt->flags);
+ if (optlen) {
+ err = bpf_reserve_hdr_opt(skops, optlen, 0);
+ if (err)
+ RET_CG_ERR(err);
+ }
+
+ return CG_OK;
+}
+
+static int write_fin_opt(struct bpf_sock_ops *skops)
+{
+ struct bpf_test_option *opt;
+ struct hdr_stg *hdr_stg;
+
+ if (!skops->sk)
+ RET_CG_ERR(0);
+
+ hdr_stg = bpf_sk_storage_get(&hdr_stg_map, skops->sk, NULL, 0);
+ if (!hdr_stg)
+ RET_CG_ERR(0);
+
+ if (hdr_stg->active)
+ opt = &active_fin_out;
+ else
+ opt = &passive_fin_out;
+
+ if (!opt->flags)
+ RET_CG_ERR(0);
+
+ return store_option(skops, opt);
+}
+
+static int resend_in_ack(struct bpf_sock_ops *skops)
+{
+ struct hdr_stg *hdr_stg;
+
+ if (!skops->sk)
+ return -1;
+
+ hdr_stg = bpf_sk_storage_get(&hdr_stg_map, skops->sk, NULL, 0);
+ if (!hdr_stg)
+ return -1;
+
+ return !!hdr_stg->resend_syn;
+}
+
+static int nodata_opt_len(struct bpf_sock_ops *skops)
+{
+ int resend;
+
+ resend = resend_in_ack(skops);
+ if (resend < 0)
+ RET_CG_ERR(0);
+
+ if (resend)
+ return syn_opt_len(skops);
+
+ return CG_OK;
+}
+
+static int write_nodata_opt(struct bpf_sock_ops *skops)
+{
+ int resend;
+
+ resend = resend_in_ack(skops);
+ if (resend < 0)
+ RET_CG_ERR(0);
+
+ if (resend)
+ return write_syn_opt(skops);
+
+ return CG_OK;
+}
+
+static int data_opt_len(struct bpf_sock_ops *skops)
+{
+ /* Same as the nodata version. Mostly to show
+ * an example usage on skops->skb_len.
+ */
+ return nodata_opt_len(skops);
+}
+
+static int write_data_opt(struct bpf_sock_ops *skops)
+{
+ return write_nodata_opt(skops);
+}
+
+static int current_mss_opt_len(struct bpf_sock_ops *skops)
+{
+ /* Reserve maximum that may be needed */
+ int err;
+
+ err = bpf_reserve_hdr_opt(skops, option_total_len(OPTION_MASK), 0);
+ if (err)
+ RET_CG_ERR(err);
+
+ return CG_OK;
+}
+
+static int handle_hdr_opt_len(struct bpf_sock_ops *skops)
+{
+ __u8 tcp_flags = skops_tcp_flags(skops);
+
+ if ((tcp_flags & TCPHDR_SYNACK) == TCPHDR_SYNACK)
+ return synack_opt_len(skops);
+
+ if (tcp_flags & TCPHDR_SYN)
+ return syn_opt_len(skops);
+
+ if (tcp_flags & TCPHDR_FIN)
+ return fin_opt_len(skops);
+
+ if (skops_current_mss(skops))
+ /* The kernel is calculating the MSS */
+ return current_mss_opt_len(skops);
+
+ if (skops->skb_len)
+ return data_opt_len(skops);
+
+ return nodata_opt_len(skops);
+}
+
+static int handle_write_hdr_opt(struct bpf_sock_ops *skops)
+{
+ __u8 tcp_flags = skops_tcp_flags(skops);
+ struct tcphdr *th;
+
+ if ((tcp_flags & TCPHDR_SYNACK) == TCPHDR_SYNACK)
+ return write_synack_opt(skops);
+
+ if (tcp_flags & TCPHDR_SYN)
+ return write_syn_opt(skops);
+
+ if (tcp_flags & TCPHDR_FIN)
+ return write_fin_opt(skops);
+
+ th = skops->skb_data;
+ if (th + 1 > skops->skb_data_end)
+ RET_CG_ERR(0);
+
+ if (skops->skb_len > tcp_hdrlen(th))
+ return write_data_opt(skops);
+
+ return write_nodata_opt(skops);
+}
+
+static int set_delack_max(struct bpf_sock_ops *skops, __u8 max_delack_ms)
+{
+ __u32 max_delack_us = max_delack_ms * 1000;
+
+ return bpf_setsockopt(skops, SOL_TCP, TCP_BPF_DELACK_MAX,
+ &max_delack_us, sizeof(max_delack_us));
+}
+
+static int set_rto_min(struct bpf_sock_ops *skops, __u8 peer_max_delack_ms)
+{
+ __u32 min_rto_us = peer_max_delack_ms * 1000;
+
+ return bpf_setsockopt(skops, SOL_TCP, TCP_BPF_RTO_MIN, &min_rto_us,
+ sizeof(min_rto_us));
+}
+
+static int handle_active_estab(struct bpf_sock_ops *skops)
+{
+ struct hdr_stg init_stg = {
+ .active = true,
+ };
+ int err;
+
+ err = load_option(skops, &active_estab_in, false);
+ if (err && err != -ENOMSG)
+ RET_CG_ERR(err);
+
+ init_stg.resend_syn = TEST_OPTION_FLAGS(active_estab_in.flags,
+ OPTION_RESEND);
+ if (!skops->sk || !bpf_sk_storage_get(&hdr_stg_map, skops->sk,
+ &init_stg,
+ BPF_SK_STORAGE_GET_F_CREATE))
+ RET_CG_ERR(0);
+
+ if (init_stg.resend_syn)
+ /* Don't clear the write_hdr cb now because
+ * the ACK may get lost and retransmit may
+ * be needed.
+ *
+ * PARSE_ALL_HDR cb flag is set to learn if this
+ * resend_syn option has received by the peer.
+ *
+ * The header option will be resent until a valid
+ * packet is received at handle_parse_hdr()
+ * and all hdr cb flags will be cleared in
+ * handle_parse_hdr().
+ */
+ set_parse_all_hdr_cb_flags(skops);
+ else if (!active_fin_out.flags)
+ /* No options will be written from now */
+ clear_hdr_cb_flags(skops);
+
+ if (active_syn_out.max_delack_ms) {
+ err = set_delack_max(skops, active_syn_out.max_delack_ms);
+ if (err)
+ RET_CG_ERR(err);
+ }
+
+ if (active_estab_in.max_delack_ms) {
+ err = set_rto_min(skops, active_estab_in.max_delack_ms);
+ if (err)
+ RET_CG_ERR(err);
+ }
+
+ return CG_OK;
+}
+
+static int handle_passive_estab(struct bpf_sock_ops *skops)
+{
+ struct hdr_stg init_stg = {};
+ struct tcphdr *th;
+ int err;
+
+ inherit_cb_flags = skops->bpf_sock_ops_cb_flags;
+
+ err = load_option(skops, &passive_estab_in, true);
+ if (err == -ENOENT) {
+ /* saved_syn is not found. It was in syncookie mode.
+ * We have asked the active side to resend the options
+ * in ACK, so try to find the bpf_test_option from ACK now.
+ */
+ err = load_option(skops, &passive_estab_in, false);
+ init_stg.syncookie = true;
+ }
+
+ /* ENOMSG: The bpf_test_option is not found which is fine.
+ * Bail out now for all other errors.
+ */
+ if (err && err != -ENOMSG)
+ RET_CG_ERR(err);
+
+ th = skops->skb_data;
+ if (th + 1 > skops->skb_data_end)
+ RET_CG_ERR(0);
+
+ if (th->syn) {
+ /* Fastopen */
+
+ /* Cannot clear cb_flags to stop write_hdr cb.
+ * synack is not sent yet for fast open.
+ * Even it was, the synack may need to be retransmitted.
+ *
+ * PARSE_ALL_HDR cb flag is set to learn
+ * if synack has reached the peer.
+ * All cb_flags will be cleared in handle_parse_hdr().
+ */
+ set_parse_all_hdr_cb_flags(skops);
+ init_stg.fastopen = true;
+ } else if (!passive_fin_out.flags) {
+ /* No options will be written from now */
+ clear_hdr_cb_flags(skops);
+ }
+
+ if (!skops->sk ||
+ !bpf_sk_storage_get(&hdr_stg_map, skops->sk, &init_stg,
+ BPF_SK_STORAGE_GET_F_CREATE))
+ RET_CG_ERR(0);
+
+ if (passive_synack_out.max_delack_ms) {
+ err = set_delack_max(skops, passive_synack_out.max_delack_ms);
+ if (err)
+ RET_CG_ERR(err);
+ }
+
+ if (passive_estab_in.max_delack_ms) {
+ err = set_rto_min(skops, passive_estab_in.max_delack_ms);
+ if (err)
+ RET_CG_ERR(err);
+ }
+
+ return CG_OK;
+}
+
+static int handle_parse_hdr(struct bpf_sock_ops *skops)
+{
+ struct hdr_stg *hdr_stg;
+ struct tcphdr *th;
+
+ if (!skops->sk)
+ RET_CG_ERR(0);
+
+ th = skops->skb_data;
+ if (th + 1 > skops->skb_data_end)
+ RET_CG_ERR(0);
+
+ hdr_stg = bpf_sk_storage_get(&hdr_stg_map, skops->sk, NULL, 0);
+ if (!hdr_stg)
+ RET_CG_ERR(0);
+
+ if (hdr_stg->resend_syn || hdr_stg->fastopen)
+ /* The PARSE_ALL_HDR cb flag was turned on
+ * to ensure that the previously written
+ * options have reached the peer.
+ * Those previously written option includes:
+ * - Active side: resend_syn in ACK during syncookie
+ * or
+ * - Passive side: SYNACK during fastopen
+ *
+ * A valid packet has been received here after
+ * the 3WHS, so the PARSE_ALL_HDR cb flag
+ * can be cleared now.
+ */
+ clear_parse_all_hdr_cb_flags(skops);
+
+ if (hdr_stg->resend_syn && !active_fin_out.flags)
+ /* Active side resent the syn option in ACK
+ * because the server was in syncookie mode.
+ * A valid packet has been received, so
+ * clear header cb flags if there is no
+ * more option to send.
+ */
+ clear_hdr_cb_flags(skops);
+
+ if (hdr_stg->fastopen && !passive_fin_out.flags)
+ /* Passive side was in fastopen.
+ * A valid packet has been received, so
+ * the SYNACK has reached the peer.
+ * Clear header cb flags if there is no more
+ * option to send.
+ */
+ clear_hdr_cb_flags(skops);
+
+ if (th->fin) {
+ struct bpf_test_option *fin_opt;
+ int err;
+
+ if (hdr_stg->active)
+ fin_opt = &active_fin_in;
+ else
+ fin_opt = &passive_fin_in;
+
+ err = load_option(skops, fin_opt, false);
+ if (err && err != -ENOMSG)
+ RET_CG_ERR(err);
+ }
+
+ return CG_OK;
+}
+
+SEC("sockops/estab")
+int estab(struct bpf_sock_ops *skops)
+{
+ int true_val = 1;
+
+ switch (skops->op) {
+ case BPF_SOCK_OPS_TCP_LISTEN_CB:
+ bpf_setsockopt(skops, SOL_TCP, TCP_SAVE_SYN,
+ &true_val, sizeof(true_val));
+ set_hdr_cb_flags(skops, BPF_SOCK_OPS_STATE_CB_FLAG);
+ break;
+ case BPF_SOCK_OPS_TCP_CONNECT_CB:
+ set_hdr_cb_flags(skops, 0);
+ break;
+ case BPF_SOCK_OPS_PARSE_HDR_OPT_CB:
+ return handle_parse_hdr(skops);
+ case BPF_SOCK_OPS_HDR_OPT_LEN_CB:
+ return handle_hdr_opt_len(skops);
+ case BPF_SOCK_OPS_WRITE_HDR_OPT_CB:
+ return handle_write_hdr_opt(skops);
+ case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB:
+ return handle_passive_estab(skops);
+ case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB:
+ return handle_active_estab(skops);
+ }
+
+ return CG_OK;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_trace_ext.c b/tools/testing/selftests/bpf/progs/test_trace_ext.c
new file mode 100644
index 000000000000..d19a634d0e78
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_trace_ext.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2019 Facebook
+#include <linux/bpf.h>
+#include <stdbool.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+#include <bpf/bpf_tracing.h>
+
+__u64 ext_called = 0;
+
+SEC("freplace/test_pkt_md_access")
+int test_pkt_md_access_new(struct __sk_buff *skb)
+{
+ ext_called = skb->len;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_trace_ext_tracing.c b/tools/testing/selftests/bpf/progs/test_trace_ext_tracing.c
new file mode 100644
index 000000000000..52f3baf98f20
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_trace_ext_tracing.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+__u64 fentry_called = 0;
+
+SEC("fentry/test_pkt_md_access_new")
+int BPF_PROG(fentry, struct sk_buff *skb)
+{
+ fentry_called = skb->len;
+ return 0;
+}
+
+__u64 fexit_called = 0;
+
+SEC("fexit/test_pkt_md_access_new")
+int BPF_PROG(fexit, struct sk_buff *skb)
+{
+ fexit_called = skb->len;
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_vmlinux.c b/tools/testing/selftests/bpf/progs/test_vmlinux.c
index 29fa09d6a6c6..e9dfa0313d1b 100644
--- a/tools/testing/selftests/bpf/progs/test_vmlinux.c
+++ b/tools/testing/selftests/bpf/progs/test_vmlinux.c
@@ -19,12 +19,14 @@ SEC("tp/syscalls/sys_enter_nanosleep")
int handle__tp(struct trace_event_raw_sys_enter *args)
{
struct __kernel_timespec *ts;
+ long tv_nsec;
if (args->id != __NR_nanosleep)
return 0;
ts = (void *)args->args[0];
- if (BPF_CORE_READ(ts, tv_nsec) != MY_TV_NSEC)
+ if (bpf_probe_read_user(&tv_nsec, sizeof(ts->tv_nsec), &ts->tv_nsec) ||
+ tv_nsec != MY_TV_NSEC)
return 0;
tp_called = true;
@@ -35,12 +37,14 @@ SEC("raw_tp/sys_enter")
int BPF_PROG(handle__raw_tp, struct pt_regs *regs, long id)
{
struct __kernel_timespec *ts;
+ long tv_nsec;
if (id != __NR_nanosleep)
return 0;
ts = (void *)PT_REGS_PARM1_CORE(regs);
- if (BPF_CORE_READ(ts, tv_nsec) != MY_TV_NSEC)
+ if (bpf_probe_read_user(&tv_nsec, sizeof(ts->tv_nsec), &ts->tv_nsec) ||
+ tv_nsec != MY_TV_NSEC)
return 0;
raw_tp_called = true;
@@ -51,12 +55,14 @@ SEC("tp_btf/sys_enter")
int BPF_PROG(handle__tp_btf, struct pt_regs *regs, long id)
{
struct __kernel_timespec *ts;
+ long tv_nsec;
if (id != __NR_nanosleep)
return 0;
ts = (void *)PT_REGS_PARM1_CORE(regs);
- if (BPF_CORE_READ(ts, tv_nsec) != MY_TV_NSEC)
+ if (bpf_probe_read_user(&tv_nsec, sizeof(ts->tv_nsec), &ts->tv_nsec) ||
+ tv_nsec != MY_TV_NSEC)
return 0;
tp_btf_called = true;
diff --git a/tools/testing/selftests/bpf/progs/test_xdp_noinline.c b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
index 8beecec166d9..3a67921f62b5 100644
--- a/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
+++ b/tools/testing/selftests/bpf/progs/test_xdp_noinline.c
@@ -16,7 +16,7 @@
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_endian.h>
-static __u32 rol32(__u32 word, unsigned int shift)
+static __always_inline __u32 rol32(__u32 word, unsigned int shift)
{
return (word << shift) | (word >> ((-shift) & 31));
}
@@ -49,7 +49,7 @@ static __u32 rol32(__u32 word, unsigned int shift)
typedef unsigned int u32;
-static __attribute__ ((noinline))
+static __noinline
u32 jhash(const void *key, u32 length, u32 initval)
{
u32 a, b, c;
@@ -86,7 +86,7 @@ u32 jhash(const void *key, u32 length, u32 initval)
return c;
}
-__attribute__ ((noinline))
+__noinline
u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
{
a += initval;
@@ -96,7 +96,7 @@ u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval)
return c;
}
-__attribute__ ((noinline))
+__noinline
u32 jhash_2words(u32 a, u32 b, u32 initval)
{
return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2));
@@ -213,7 +213,7 @@ struct eth_hdr {
unsigned short eth_proto;
};
-static inline __u64 calc_offset(bool is_ipv6, bool is_icmp)
+static __noinline __u64 calc_offset(bool is_ipv6, bool is_icmp)
{
__u64 off = sizeof(struct eth_hdr);
if (is_ipv6) {
@@ -797,8 +797,8 @@ out:
return XDP_DROP;
}
-__attribute__ ((section("xdp-test"), used))
-int balancer_ingress(struct xdp_md *ctx)
+SEC("xdp-test-v4")
+int balancer_ingress_v4(struct xdp_md *ctx)
{
void *data = (void *)(long)ctx->data;
void *data_end = (void *)(long)ctx->data_end;
@@ -812,11 +812,27 @@ int balancer_ingress(struct xdp_md *ctx)
eth_proto = bpf_ntohs(eth->eth_proto);
if (eth_proto == ETH_P_IP)
return process_packet(data, nh_off, data_end, 0, ctx);
- else if (eth_proto == ETH_P_IPV6)
+ else
+ return XDP_DROP;
+}
+
+SEC("xdp-test-v6")
+int balancer_ingress_v6(struct xdp_md *ctx)
+{
+ void *data = (void *)(long)ctx->data;
+ void *data_end = (void *)(long)ctx->data_end;
+ struct eth_hdr *eth = data;
+ __u32 eth_proto;
+ __u32 nh_off;
+
+ nh_off = sizeof(struct eth_hdr);
+ if (data + nh_off > data_end)
+ return XDP_DROP;
+ eth_proto = bpf_ntohs(eth->eth_proto);
+ if (eth_proto == ETH_P_IPV6)
return process_packet(data, nh_off, data_end, 1, ctx);
else
return XDP_DROP;
}
-char _license[] __attribute__ ((section("license"), used)) = "GPL";
-int _version __attribute__ ((section("version"), used)) = 1;
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/trigger_bench.c b/tools/testing/selftests/bpf/progs/trigger_bench.c
index 8b36b6640e7e..9a4d09590b3d 100644
--- a/tools/testing/selftests/bpf/progs/trigger_bench.c
+++ b/tools/testing/selftests/bpf/progs/trigger_bench.c
@@ -39,6 +39,13 @@ int bench_trigger_fentry(void *ctx)
return 0;
}
+SEC("fentry.s/__x64_sys_getpgid")
+int bench_trigger_fentry_sleep(void *ctx)
+{
+ __sync_add_and_fetch(&hits, 1);
+ return 0;
+}
+
SEC("fmod_ret/__x64_sys_getpgid")
int bench_trigger_fmodret(void *ctx)
{
diff --git a/tools/testing/selftests/bpf/test_bpftool_build.sh b/tools/testing/selftests/bpf/test_bpftool_build.sh
index ac349a5cea7e..2db3c60e1e61 100755
--- a/tools/testing/selftests/bpf/test_bpftool_build.sh
+++ b/tools/testing/selftests/bpf/test_bpftool_build.sh
@@ -85,6 +85,23 @@ make_with_tmpdir() {
echo
}
+make_doc_and_clean() {
+ echo -e "\$PWD: $PWD"
+ echo -e "command: make -s $* doc >/dev/null"
+ RST2MAN_OPTS="--exit-status=1" make $J -s $* doc
+ if [ $? -ne 0 ] ; then
+ ERROR=1
+ printf "FAILURE: Errors or warnings when building documentation\n"
+ fi
+ (
+ if [ $# -ge 1 ] ; then
+ cd ${@: -1}
+ fi
+ make -s doc-clean
+ )
+ echo
+}
+
echo "Trying to build bpftool"
echo -e "... through kbuild\n"
@@ -145,3 +162,7 @@ make_and_clean
make_with_tmpdir OUTPUT
make_with_tmpdir O
+
+echo -e "Checking documentation build\n"
+# From tools/bpf/bpftool
+make_doc_and_clean
diff --git a/tools/testing/selftests/bpf/test_bpftool_metadata.sh b/tools/testing/selftests/bpf/test_bpftool_metadata.sh
new file mode 100755
index 000000000000..1bf81b49457a
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_bpftool_metadata.sh
@@ -0,0 +1,82 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+TESTNAME=bpftool_metadata
+BPF_FS=$(awk '$3 == "bpf" {print $2; exit}' /proc/mounts)
+BPF_DIR=$BPF_FS/test_$TESTNAME
+
+_cleanup()
+{
+ set +e
+ rm -rf $BPF_DIR 2> /dev/null
+}
+
+cleanup_skip()
+{
+ echo "selftests: $TESTNAME [SKIP]"
+ _cleanup
+
+ exit $ksft_skip
+}
+
+cleanup()
+{
+ if [ "$?" = 0 ]; then
+ echo "selftests: $TESTNAME [PASS]"
+ else
+ echo "selftests: $TESTNAME [FAILED]"
+ fi
+ _cleanup
+}
+
+if [ $(id -u) -ne 0 ]; then
+ echo "selftests: $TESTNAME [SKIP] Need root privileges"
+ exit $ksft_skip
+fi
+
+if [ -z "$BPF_FS" ]; then
+ echo "selftests: $TESTNAME [SKIP] Could not run test without bpffs mounted"
+ exit $ksft_skip
+fi
+
+if ! bpftool version > /dev/null 2>&1; then
+ echo "selftests: $TESTNAME [SKIP] Could not run test without bpftool"
+ exit $ksft_skip
+fi
+
+set -e
+
+trap cleanup_skip EXIT
+
+mkdir $BPF_DIR
+
+trap cleanup EXIT
+
+bpftool prog load metadata_unused.o $BPF_DIR/unused
+
+METADATA_PLAIN="$(bpftool prog)"
+echo "$METADATA_PLAIN" | grep 'a = "foo"' > /dev/null
+echo "$METADATA_PLAIN" | grep 'b = 1' > /dev/null
+
+bpftool prog --json | grep '"metadata":{"a":"foo","b":1}' > /dev/null
+
+bpftool map | grep 'metadata.rodata' > /dev/null
+
+rm $BPF_DIR/unused
+
+bpftool prog load metadata_used.o $BPF_DIR/used
+
+METADATA_PLAIN="$(bpftool prog)"
+echo "$METADATA_PLAIN" | grep 'a = "bar"' > /dev/null
+echo "$METADATA_PLAIN" | grep 'b = 2' > /dev/null
+
+bpftool prog --json | grep '"metadata":{"a":"bar","b":2}' > /dev/null
+
+bpftool map | grep 'metadata.rodata' > /dev/null
+
+rm $BPF_DIR/used
+
+exit 0
diff --git a/tools/testing/selftests/bpf/test_current_pid_tgid_new_ns.c b/tools/testing/selftests/bpf/test_current_pid_tgid_new_ns.c
index ed253f252cd0..ec53b1ef90d2 100644
--- a/tools/testing/selftests/bpf/test_current_pid_tgid_new_ns.c
+++ b/tools/testing/selftests/bpf/test_current_pid_tgid_new_ns.c
@@ -156,4 +156,5 @@ cleanup:
bpf_object__close(obj);
}
}
+ return 0;
}
diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h
index dbb820dde138..238f5f61189e 100644
--- a/tools/testing/selftests/bpf/test_progs.h
+++ b/tools/testing/selftests/bpf/test_progs.h
@@ -130,6 +130,69 @@ extern int test__join_cgroup(const char *path);
#define CHECK_ATTR(condition, tag, format...) \
_CHECK(condition, tag, tattr.duration, format)
+#define ASSERT_EQ(actual, expected, name) ({ \
+ static int duration = 0; \
+ typeof(actual) ___act = (actual); \
+ typeof(expected) ___exp = (expected); \
+ bool ___ok = ___act == ___exp; \
+ CHECK(!___ok, (name), \
+ "unexpected %s: actual %lld != expected %lld\n", \
+ (name), (long long)(___act), (long long)(___exp)); \
+ ___ok; \
+})
+
+#define ASSERT_STREQ(actual, expected, name) ({ \
+ static int duration = 0; \
+ const char *___act = actual; \
+ const char *___exp = expected; \
+ bool ___ok = strcmp(___act, ___exp) == 0; \
+ CHECK(!___ok, (name), \
+ "unexpected %s: actual '%s' != expected '%s'\n", \
+ (name), ___act, ___exp); \
+ ___ok; \
+})
+
+#define ASSERT_OK(res, name) ({ \
+ static int duration = 0; \
+ long long ___res = (res); \
+ bool ___ok = ___res == 0; \
+ CHECK(!___ok, (name), "unexpected error: %lld\n", ___res); \
+ ___ok; \
+})
+
+#define ASSERT_ERR(res, name) ({ \
+ static int duration = 0; \
+ long long ___res = (res); \
+ bool ___ok = ___res < 0; \
+ CHECK(!___ok, (name), "unexpected success: %lld\n", ___res); \
+ ___ok; \
+})
+
+#define ASSERT_NULL(ptr, name) ({ \
+ static int duration = 0; \
+ const void *___res = (ptr); \
+ bool ___ok = !___res; \
+ CHECK(!___ok, (name), "unexpected pointer: %p\n", ___res); \
+ ___ok; \
+})
+
+#define ASSERT_OK_PTR(ptr, name) ({ \
+ static int duration = 0; \
+ const void *___res = (ptr); \
+ bool ___ok = !IS_ERR_OR_NULL(___res); \
+ CHECK(!___ok, (name), \
+ "unexpected error: %ld\n", PTR_ERR(___res)); \
+ ___ok; \
+})
+
+#define ASSERT_ERR_PTR(ptr, name) ({ \
+ static int duration = 0; \
+ const void *___res = (ptr); \
+ bool ___ok = IS_ERR(___res) \
+ CHECK(!___ok, (name), "unexpected pointer: %p\n", ___res); \
+ ___ok; \
+})
+
static inline __u64 ptr_to_u64(const void *ptr)
{
return (__u64) (unsigned long) ptr;
diff --git a/tools/testing/selftests/bpf/test_sock_fields.c b/tools/testing/selftests/bpf/test_sock_fields.c
deleted file mode 100644
index 6c9f269c396d..000000000000
--- a/tools/testing/selftests/bpf/test_sock_fields.c
+++ /dev/null
@@ -1,482 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2019 Facebook */
-
-#include <sys/socket.h>
-#include <sys/epoll.h>
-#include <netinet/in.h>
-#include <arpa/inet.h>
-#include <unistd.h>
-#include <stdlib.h>
-#include <string.h>
-#include <errno.h>
-
-#include <bpf/bpf.h>
-#include <bpf/libbpf.h>
-
-#include "cgroup_helpers.h"
-#include "bpf_rlimit.h"
-
-enum bpf_addr_array_idx {
- ADDR_SRV_IDX,
- ADDR_CLI_IDX,
- __NR_BPF_ADDR_ARRAY_IDX,
-};
-
-enum bpf_result_array_idx {
- EGRESS_SRV_IDX,
- EGRESS_CLI_IDX,
- INGRESS_LISTEN_IDX,
- __NR_BPF_RESULT_ARRAY_IDX,
-};
-
-enum bpf_linum_array_idx {
- EGRESS_LINUM_IDX,
- INGRESS_LINUM_IDX,
- __NR_BPF_LINUM_ARRAY_IDX,
-};
-
-struct bpf_spinlock_cnt {
- struct bpf_spin_lock lock;
- __u32 cnt;
-};
-
-#define CHECK(condition, tag, format...) ({ \
- int __ret = !!(condition); \
- if (__ret) { \
- printf("%s(%d):FAIL:%s ", __func__, __LINE__, tag); \
- printf(format); \
- printf("\n"); \
- exit(-1); \
- } \
-})
-
-#define TEST_CGROUP "/test-bpf-sock-fields"
-#define DATA "Hello BPF!"
-#define DATA_LEN sizeof(DATA)
-
-static struct sockaddr_in6 srv_sa6, cli_sa6;
-static int sk_pkt_out_cnt10_fd;
-static int sk_pkt_out_cnt_fd;
-static int linum_map_fd;
-static int addr_map_fd;
-static int tp_map_fd;
-static int sk_map_fd;
-
-static __u32 addr_srv_idx = ADDR_SRV_IDX;
-static __u32 addr_cli_idx = ADDR_CLI_IDX;
-
-static __u32 egress_srv_idx = EGRESS_SRV_IDX;
-static __u32 egress_cli_idx = EGRESS_CLI_IDX;
-static __u32 ingress_listen_idx = INGRESS_LISTEN_IDX;
-
-static __u32 egress_linum_idx = EGRESS_LINUM_IDX;
-static __u32 ingress_linum_idx = INGRESS_LINUM_IDX;
-
-static void init_loopback6(struct sockaddr_in6 *sa6)
-{
- memset(sa6, 0, sizeof(*sa6));
- sa6->sin6_family = AF_INET6;
- sa6->sin6_addr = in6addr_loopback;
-}
-
-static void print_sk(const struct bpf_sock *sk)
-{
- char src_ip4[24], dst_ip4[24];
- char src_ip6[64], dst_ip6[64];
-
- inet_ntop(AF_INET, &sk->src_ip4, src_ip4, sizeof(src_ip4));
- inet_ntop(AF_INET6, &sk->src_ip6, src_ip6, sizeof(src_ip6));
- inet_ntop(AF_INET, &sk->dst_ip4, dst_ip4, sizeof(dst_ip4));
- inet_ntop(AF_INET6, &sk->dst_ip6, dst_ip6, sizeof(dst_ip6));
-
- printf("state:%u bound_dev_if:%u family:%u type:%u protocol:%u mark:%u priority:%u "
- "src_ip4:%x(%s) src_ip6:%x:%x:%x:%x(%s) src_port:%u "
- "dst_ip4:%x(%s) dst_ip6:%x:%x:%x:%x(%s) dst_port:%u\n",
- sk->state, sk->bound_dev_if, sk->family, sk->type, sk->protocol,
- sk->mark, sk->priority,
- sk->src_ip4, src_ip4,
- sk->src_ip6[0], sk->src_ip6[1], sk->src_ip6[2], sk->src_ip6[3],
- src_ip6, sk->src_port,
- sk->dst_ip4, dst_ip4,
- sk->dst_ip6[0], sk->dst_ip6[1], sk->dst_ip6[2], sk->dst_ip6[3],
- dst_ip6, ntohs(sk->dst_port));
-}
-
-static void print_tp(const struct bpf_tcp_sock *tp)
-{
- printf("snd_cwnd:%u srtt_us:%u rtt_min:%u snd_ssthresh:%u rcv_nxt:%u "
- "snd_nxt:%u snd:una:%u mss_cache:%u ecn_flags:%u "
- "rate_delivered:%u rate_interval_us:%u packets_out:%u "
- "retrans_out:%u total_retrans:%u segs_in:%u data_segs_in:%u "
- "segs_out:%u data_segs_out:%u lost_out:%u sacked_out:%u "
- "bytes_received:%llu bytes_acked:%llu\n",
- tp->snd_cwnd, tp->srtt_us, tp->rtt_min, tp->snd_ssthresh,
- tp->rcv_nxt, tp->snd_nxt, tp->snd_una, tp->mss_cache,
- tp->ecn_flags, tp->rate_delivered, tp->rate_interval_us,
- tp->packets_out, tp->retrans_out, tp->total_retrans,
- tp->segs_in, tp->data_segs_in, tp->segs_out,
- tp->data_segs_out, tp->lost_out, tp->sacked_out,
- tp->bytes_received, tp->bytes_acked);
-}
-
-static void check_result(void)
-{
- struct bpf_tcp_sock srv_tp, cli_tp, listen_tp;
- struct bpf_sock srv_sk, cli_sk, listen_sk;
- __u32 ingress_linum, egress_linum;
- int err;
-
- err = bpf_map_lookup_elem(linum_map_fd, &egress_linum_idx,
- &egress_linum);
- CHECK(err == -1, "bpf_map_lookup_elem(linum_map_fd)",
- "err:%d errno:%d", err, errno);
-
- err = bpf_map_lookup_elem(linum_map_fd, &ingress_linum_idx,
- &ingress_linum);
- CHECK(err == -1, "bpf_map_lookup_elem(linum_map_fd)",
- "err:%d errno:%d", err, errno);
-
- err = bpf_map_lookup_elem(sk_map_fd, &egress_srv_idx, &srv_sk);
- CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &egress_srv_idx)",
- "err:%d errno:%d", err, errno);
- err = bpf_map_lookup_elem(tp_map_fd, &egress_srv_idx, &srv_tp);
- CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &egress_srv_idx)",
- "err:%d errno:%d", err, errno);
-
- err = bpf_map_lookup_elem(sk_map_fd, &egress_cli_idx, &cli_sk);
- CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &egress_cli_idx)",
- "err:%d errno:%d", err, errno);
- err = bpf_map_lookup_elem(tp_map_fd, &egress_cli_idx, &cli_tp);
- CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &egress_cli_idx)",
- "err:%d errno:%d", err, errno);
-
- err = bpf_map_lookup_elem(sk_map_fd, &ingress_listen_idx, &listen_sk);
- CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &ingress_listen_idx)",
- "err:%d errno:%d", err, errno);
- err = bpf_map_lookup_elem(tp_map_fd, &ingress_listen_idx, &listen_tp);
- CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &ingress_listen_idx)",
- "err:%d errno:%d", err, errno);
-
- printf("listen_sk: ");
- print_sk(&listen_sk);
- printf("\n");
-
- printf("srv_sk: ");
- print_sk(&srv_sk);
- printf("\n");
-
- printf("cli_sk: ");
- print_sk(&cli_sk);
- printf("\n");
-
- printf("listen_tp: ");
- print_tp(&listen_tp);
- printf("\n");
-
- printf("srv_tp: ");
- print_tp(&srv_tp);
- printf("\n");
-
- printf("cli_tp: ");
- print_tp(&cli_tp);
- printf("\n");
-
- CHECK(listen_sk.state != 10 ||
- listen_sk.family != AF_INET6 ||
- listen_sk.protocol != IPPROTO_TCP ||
- memcmp(listen_sk.src_ip6, &in6addr_loopback,
- sizeof(listen_sk.src_ip6)) ||
- listen_sk.dst_ip6[0] || listen_sk.dst_ip6[1] ||
- listen_sk.dst_ip6[2] || listen_sk.dst_ip6[3] ||
- listen_sk.src_port != ntohs(srv_sa6.sin6_port) ||
- listen_sk.dst_port,
- "Unexpected listen_sk",
- "Check listen_sk output. ingress_linum:%u",
- ingress_linum);
-
- CHECK(srv_sk.state == 10 ||
- !srv_sk.state ||
- srv_sk.family != AF_INET6 ||
- srv_sk.protocol != IPPROTO_TCP ||
- memcmp(srv_sk.src_ip6, &in6addr_loopback,
- sizeof(srv_sk.src_ip6)) ||
- memcmp(srv_sk.dst_ip6, &in6addr_loopback,
- sizeof(srv_sk.dst_ip6)) ||
- srv_sk.src_port != ntohs(srv_sa6.sin6_port) ||
- srv_sk.dst_port != cli_sa6.sin6_port,
- "Unexpected srv_sk", "Check srv_sk output. egress_linum:%u",
- egress_linum);
-
- CHECK(cli_sk.state == 10 ||
- !cli_sk.state ||
- cli_sk.family != AF_INET6 ||
- cli_sk.protocol != IPPROTO_TCP ||
- memcmp(cli_sk.src_ip6, &in6addr_loopback,
- sizeof(cli_sk.src_ip6)) ||
- memcmp(cli_sk.dst_ip6, &in6addr_loopback,
- sizeof(cli_sk.dst_ip6)) ||
- cli_sk.src_port != ntohs(cli_sa6.sin6_port) ||
- cli_sk.dst_port != srv_sa6.sin6_port,
- "Unexpected cli_sk", "Check cli_sk output. egress_linum:%u",
- egress_linum);
-
- CHECK(listen_tp.data_segs_out ||
- listen_tp.data_segs_in ||
- listen_tp.total_retrans ||
- listen_tp.bytes_acked,
- "Unexpected listen_tp", "Check listen_tp output. ingress_linum:%u",
- ingress_linum);
-
- CHECK(srv_tp.data_segs_out != 2 ||
- srv_tp.data_segs_in ||
- srv_tp.snd_cwnd != 10 ||
- srv_tp.total_retrans ||
- srv_tp.bytes_acked != 2 * DATA_LEN,
- "Unexpected srv_tp", "Check srv_tp output. egress_linum:%u",
- egress_linum);
-
- CHECK(cli_tp.data_segs_out ||
- cli_tp.data_segs_in != 2 ||
- cli_tp.snd_cwnd != 10 ||
- cli_tp.total_retrans ||
- cli_tp.bytes_received != 2 * DATA_LEN,
- "Unexpected cli_tp", "Check cli_tp output. egress_linum:%u",
- egress_linum);
-}
-
-static void check_sk_pkt_out_cnt(int accept_fd, int cli_fd)
-{
- struct bpf_spinlock_cnt pkt_out_cnt = {}, pkt_out_cnt10 = {};
- int err;
-
- pkt_out_cnt.cnt = ~0;
- pkt_out_cnt10.cnt = ~0;
- err = bpf_map_lookup_elem(sk_pkt_out_cnt_fd, &accept_fd, &pkt_out_cnt);
- if (!err)
- err = bpf_map_lookup_elem(sk_pkt_out_cnt10_fd, &accept_fd,
- &pkt_out_cnt10);
-
- /* The bpf prog only counts for fullsock and
- * passive conneciton did not become fullsock until 3WHS
- * had been finished.
- * The bpf prog only counted two data packet out but we
- * specially init accept_fd's pkt_out_cnt by 2 in
- * init_sk_storage(). Hence, 4 here.
- */
- CHECK(err || pkt_out_cnt.cnt != 4 || pkt_out_cnt10.cnt != 40,
- "bpf_map_lookup_elem(sk_pkt_out_cnt, &accept_fd)",
- "err:%d errno:%d pkt_out_cnt:%u pkt_out_cnt10:%u",
- err, errno, pkt_out_cnt.cnt, pkt_out_cnt10.cnt);
-
- pkt_out_cnt.cnt = ~0;
- pkt_out_cnt10.cnt = ~0;
- err = bpf_map_lookup_elem(sk_pkt_out_cnt_fd, &cli_fd, &pkt_out_cnt);
- if (!err)
- err = bpf_map_lookup_elem(sk_pkt_out_cnt10_fd, &cli_fd,
- &pkt_out_cnt10);
- /* Active connection is fullsock from the beginning.
- * 1 SYN and 1 ACK during 3WHS
- * 2 Acks on data packet.
- *
- * The bpf_prog initialized it to 0xeB9F.
- */
- CHECK(err || pkt_out_cnt.cnt != 0xeB9F + 4 ||
- pkt_out_cnt10.cnt != 0xeB9F + 40,
- "bpf_map_lookup_elem(sk_pkt_out_cnt, &cli_fd)",
- "err:%d errno:%d pkt_out_cnt:%u pkt_out_cnt10:%u",
- err, errno, pkt_out_cnt.cnt, pkt_out_cnt10.cnt);
-}
-
-static void init_sk_storage(int sk_fd, __u32 pkt_out_cnt)
-{
- struct bpf_spinlock_cnt scnt = {};
- int err;
-
- scnt.cnt = pkt_out_cnt;
- err = bpf_map_update_elem(sk_pkt_out_cnt_fd, &sk_fd, &scnt,
- BPF_NOEXIST);
- CHECK(err, "bpf_map_update_elem(sk_pkt_out_cnt_fd)",
- "err:%d errno:%d", err, errno);
-
- scnt.cnt *= 10;
- err = bpf_map_update_elem(sk_pkt_out_cnt10_fd, &sk_fd, &scnt,
- BPF_NOEXIST);
- CHECK(err, "bpf_map_update_elem(sk_pkt_out_cnt10_fd)",
- "err:%d errno:%d", err, errno);
-}
-
-static void test(void)
-{
- int listen_fd, cli_fd, accept_fd, epfd, err;
- struct epoll_event ev;
- socklen_t addrlen;
- int i;
-
- addrlen = sizeof(struct sockaddr_in6);
- ev.events = EPOLLIN;
-
- epfd = epoll_create(1);
- CHECK(epfd == -1, "epoll_create()", "epfd:%d errno:%d", epfd, errno);
-
- /* Prepare listen_fd */
- listen_fd = socket(AF_INET6, SOCK_STREAM | SOCK_NONBLOCK, 0);
- CHECK(listen_fd == -1, "socket()", "listen_fd:%d errno:%d",
- listen_fd, errno);
-
- init_loopback6(&srv_sa6);
- err = bind(listen_fd, (struct sockaddr *)&srv_sa6, sizeof(srv_sa6));
- CHECK(err, "bind(listen_fd)", "err:%d errno:%d", err, errno);
-
- err = getsockname(listen_fd, (struct sockaddr *)&srv_sa6, &addrlen);
- CHECK(err, "getsockname(listen_fd)", "err:%d errno:%d", err, errno);
-
- err = listen(listen_fd, 1);
- CHECK(err, "listen(listen_fd)", "err:%d errno:%d", err, errno);
-
- /* Prepare cli_fd */
- cli_fd = socket(AF_INET6, SOCK_STREAM | SOCK_NONBLOCK, 0);
- CHECK(cli_fd == -1, "socket()", "cli_fd:%d errno:%d", cli_fd, errno);
-
- init_loopback6(&cli_sa6);
- err = bind(cli_fd, (struct sockaddr *)&cli_sa6, sizeof(cli_sa6));
- CHECK(err, "bind(cli_fd)", "err:%d errno:%d", err, errno);
-
- err = getsockname(cli_fd, (struct sockaddr *)&cli_sa6, &addrlen);
- CHECK(err, "getsockname(cli_fd)", "err:%d errno:%d",
- err, errno);
-
- /* Update addr_map with srv_sa6 and cli_sa6 */
- err = bpf_map_update_elem(addr_map_fd, &addr_srv_idx, &srv_sa6, 0);
- CHECK(err, "map_update", "err:%d errno:%d", err, errno);
-
- err = bpf_map_update_elem(addr_map_fd, &addr_cli_idx, &cli_sa6, 0);
- CHECK(err, "map_update", "err:%d errno:%d", err, errno);
-
- /* Connect from cli_sa6 to srv_sa6 */
- err = connect(cli_fd, (struct sockaddr *)&srv_sa6, addrlen);
- printf("srv_sa6.sin6_port:%u cli_sa6.sin6_port:%u\n\n",
- ntohs(srv_sa6.sin6_port), ntohs(cli_sa6.sin6_port));
- CHECK(err && errno != EINPROGRESS,
- "connect(cli_fd)", "err:%d errno:%d", err, errno);
-
- ev.data.fd = listen_fd;
- err = epoll_ctl(epfd, EPOLL_CTL_ADD, listen_fd, &ev);
- CHECK(err, "epoll_ctl(EPOLL_CTL_ADD, listen_fd)", "err:%d errno:%d",
- err, errno);
-
- /* Accept the connection */
- /* Have some timeout in accept(listen_fd). Just in case. */
- err = epoll_wait(epfd, &ev, 1, 1000);
- CHECK(err != 1 || ev.data.fd != listen_fd,
- "epoll_wait(listen_fd)",
- "err:%d errno:%d ev.data.fd:%d listen_fd:%d",
- err, errno, ev.data.fd, listen_fd);
-
- accept_fd = accept(listen_fd, NULL, NULL);
- CHECK(accept_fd == -1, "accept(listen_fd)", "accept_fd:%d errno:%d",
- accept_fd, errno);
- close(listen_fd);
-
- ev.data.fd = cli_fd;
- err = epoll_ctl(epfd, EPOLL_CTL_ADD, cli_fd, &ev);
- CHECK(err, "epoll_ctl(EPOLL_CTL_ADD, cli_fd)", "err:%d errno:%d",
- err, errno);
-
- init_sk_storage(accept_fd, 2);
-
- for (i = 0; i < 2; i++) {
- /* Send some data from accept_fd to cli_fd */
- err = send(accept_fd, DATA, DATA_LEN, 0);
- CHECK(err != DATA_LEN, "send(accept_fd)", "err:%d errno:%d",
- err, errno);
-
- /* Have some timeout in recv(cli_fd). Just in case. */
- err = epoll_wait(epfd, &ev, 1, 1000);
- CHECK(err != 1 || ev.data.fd != cli_fd,
- "epoll_wait(cli_fd)", "err:%d errno:%d ev.data.fd:%d cli_fd:%d",
- err, errno, ev.data.fd, cli_fd);
-
- err = recv(cli_fd, NULL, 0, MSG_TRUNC);
- CHECK(err, "recv(cli_fd)", "err:%d errno:%d", err, errno);
- }
-
- check_sk_pkt_out_cnt(accept_fd, cli_fd);
-
- close(epfd);
- close(accept_fd);
- close(cli_fd);
-
- check_result();
-}
-
-int main(int argc, char **argv)
-{
- struct bpf_prog_load_attr attr = {
- .file = "test_sock_fields_kern.o",
- .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
- .prog_flags = BPF_F_TEST_RND_HI32,
- };
- int cgroup_fd, egress_fd, ingress_fd, err;
- struct bpf_program *ingress_prog;
- struct bpf_object *obj;
- struct bpf_map *map;
-
- /* Create a cgroup, get fd, and join it */
- cgroup_fd = cgroup_setup_and_join(TEST_CGROUP);
- CHECK(cgroup_fd < 0, "cgroup_setup_and_join()",
- "cgroup_fd:%d errno:%d", cgroup_fd, errno);
- atexit(cleanup_cgroup_environment);
-
- err = bpf_prog_load_xattr(&attr, &obj, &egress_fd);
- CHECK(err, "bpf_prog_load_xattr()", "err:%d", err);
-
- ingress_prog = bpf_object__find_program_by_title(obj,
- "cgroup_skb/ingress");
- CHECK(!ingress_prog,
- "bpf_object__find_program_by_title(cgroup_skb/ingress)",
- "not found");
- ingress_fd = bpf_program__fd(ingress_prog);
-
- err = bpf_prog_attach(egress_fd, cgroup_fd, BPF_CGROUP_INET_EGRESS, 0);
- CHECK(err == -1, "bpf_prog_attach(CPF_CGROUP_INET_EGRESS)",
- "err:%d errno%d", err, errno);
-
- err = bpf_prog_attach(ingress_fd, cgroup_fd,
- BPF_CGROUP_INET_INGRESS, 0);
- CHECK(err == -1, "bpf_prog_attach(CPF_CGROUP_INET_INGRESS)",
- "err:%d errno%d", err, errno);
- close(cgroup_fd);
-
- map = bpf_object__find_map_by_name(obj, "addr_map");
- CHECK(!map, "cannot find addr_map", "(null)");
- addr_map_fd = bpf_map__fd(map);
-
- map = bpf_object__find_map_by_name(obj, "sock_result_map");
- CHECK(!map, "cannot find sock_result_map", "(null)");
- sk_map_fd = bpf_map__fd(map);
-
- map = bpf_object__find_map_by_name(obj, "tcp_sock_result_map");
- CHECK(!map, "cannot find tcp_sock_result_map", "(null)");
- tp_map_fd = bpf_map__fd(map);
-
- map = bpf_object__find_map_by_name(obj, "linum_map");
- CHECK(!map, "cannot find linum_map", "(null)");
- linum_map_fd = bpf_map__fd(map);
-
- map = bpf_object__find_map_by_name(obj, "sk_pkt_out_cnt");
- CHECK(!map, "cannot find sk_pkt_out_cnt", "(null)");
- sk_pkt_out_cnt_fd = bpf_map__fd(map);
-
- map = bpf_object__find_map_by_name(obj, "sk_pkt_out_cnt10");
- CHECK(!map, "cannot find sk_pkt_out_cnt10", "(null)");
- sk_pkt_out_cnt10_fd = bpf_map__fd(map);
-
- test();
-
- bpf_object__close(obj);
- cleanup_cgroup_environment();
-
- printf("PASS\n");
-
- return 0;
-}
diff --git a/tools/testing/selftests/bpf/test_socket_cookie.c b/tools/testing/selftests/bpf/test_socket_cookie.c
index 154a8fd2a48d..ca7ca87e91aa 100644
--- a/tools/testing/selftests/bpf/test_socket_cookie.c
+++ b/tools/testing/selftests/bpf/test_socket_cookie.c
@@ -151,7 +151,7 @@ static int run_test(int cgfd)
}
bpf_object__for_each_program(prog, pobj) {
- prog_name = bpf_program__title(prog, /*needs_copy*/ false);
+ prog_name = bpf_program__section_name(prog);
if (libbpf_attach_type_by_name(prog_name, &attach_type))
goto err;
diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c
index 9b6fb00dc7a0..0fa1e421c3d7 100644
--- a/tools/testing/selftests/bpf/test_sockmap.c
+++ b/tools/testing/selftests/bpf/test_sockmap.c
@@ -86,6 +86,7 @@ int txmsg_ktls_skb_redir;
int ktls;
int peek_flag;
int skb_use_parser;
+int txmsg_omit_skb_parser;
static const struct option long_options[] = {
{"help", no_argument, NULL, 'h' },
@@ -111,6 +112,7 @@ static const struct option long_options[] = {
{"txmsg_redir_skb", no_argument, &txmsg_redir_skb, 1 },
{"ktls", no_argument, &ktls, 1 },
{"peek", no_argument, &peek_flag, 1 },
+ {"txmsg_omit_skb_parser", no_argument, &txmsg_omit_skb_parser, 1},
{"whitelist", required_argument, NULL, 'n' },
{"blacklist", required_argument, NULL, 'b' },
{0, 0, NULL, 0 }
@@ -175,6 +177,7 @@ static void test_reset(void)
txmsg_apply = txmsg_cork = 0;
txmsg_ingress = txmsg_redir_skb = 0;
txmsg_ktls_skb = txmsg_ktls_skb_drop = txmsg_ktls_skb_redir = 0;
+ txmsg_omit_skb_parser = 0;
skb_use_parser = 0;
}
@@ -518,28 +521,13 @@ static int msg_verify_data(struct msghdr *msg, int size, int chunk_sz)
if (i == 0 && txmsg_ktls_skb) {
if (msg->msg_iov[i].iov_len < 4)
return -EIO;
- if (txmsg_ktls_skb_redir) {
- if (memcmp(&d[13], "PASS", 4) != 0) {
- fprintf(stderr,
- "detected redirect ktls_skb data error with skb ingress update @iov[%i]:%i \"%02x %02x %02x %02x\" != \"PASS\"\n", i, 0, d[13], d[14], d[15], d[16]);
- return -EIO;
- }
- d[13] = 0;
- d[14] = 1;
- d[15] = 2;
- d[16] = 3;
- j = 13;
- } else if (txmsg_ktls_skb) {
- if (memcmp(d, "PASS", 4) != 0) {
- fprintf(stderr,
- "detected ktls_skb data error with skb ingress update @iov[%i]:%i \"%02x %02x %02x %02x\" != \"PASS\"\n", i, 0, d[0], d[1], d[2], d[3]);
- return -EIO;
- }
- d[0] = 0;
- d[1] = 1;
- d[2] = 2;
- d[3] = 3;
+ if (memcmp(d, "PASS", 4) != 0) {
+ fprintf(stderr,
+ "detected skb data error with skb ingress update @iov[%i]:%i \"%02x %02x %02x %02x\" != \"PASS\"\n",
+ i, 0, d[0], d[1], d[2], d[3]);
+ return -EIO;
}
+ j = 4; /* advance index past PASS header */
}
for (; j < msg->msg_iov[i].iov_len && size; j++) {
@@ -927,13 +915,15 @@ static int run_options(struct sockmap_options *options, int cg_fd, int test)
goto run;
/* Attach programs to sockmap */
- err = bpf_prog_attach(prog_fd[0], map_fd[0],
- BPF_SK_SKB_STREAM_PARSER, 0);
- if (err) {
- fprintf(stderr,
- "ERROR: bpf_prog_attach (sockmap %i->%i): %d (%s)\n",
- prog_fd[0], map_fd[0], err, strerror(errno));
- return err;
+ if (!txmsg_omit_skb_parser) {
+ err = bpf_prog_attach(prog_fd[0], map_fd[0],
+ BPF_SK_SKB_STREAM_PARSER, 0);
+ if (err) {
+ fprintf(stderr,
+ "ERROR: bpf_prog_attach (sockmap %i->%i): %d (%s)\n",
+ prog_fd[0], map_fd[0], err, strerror(errno));
+ return err;
+ }
}
err = bpf_prog_attach(prog_fd[1], map_fd[0],
@@ -946,13 +936,15 @@ static int run_options(struct sockmap_options *options, int cg_fd, int test)
/* Attach programs to TLS sockmap */
if (txmsg_ktls_skb) {
- err = bpf_prog_attach(prog_fd[0], map_fd[8],
- BPF_SK_SKB_STREAM_PARSER, 0);
- if (err) {
- fprintf(stderr,
- "ERROR: bpf_prog_attach (TLS sockmap %i->%i): %d (%s)\n",
- prog_fd[0], map_fd[8], err, strerror(errno));
- return err;
+ if (!txmsg_omit_skb_parser) {
+ err = bpf_prog_attach(prog_fd[0], map_fd[8],
+ BPF_SK_SKB_STREAM_PARSER, 0);
+ if (err) {
+ fprintf(stderr,
+ "ERROR: bpf_prog_attach (TLS sockmap %i->%i): %d (%s)\n",
+ prog_fd[0], map_fd[8], err, strerror(errno));
+ return err;
+ }
}
err = bpf_prog_attach(prog_fd[2], map_fd[8],
@@ -1480,12 +1472,29 @@ static void test_txmsg_skb(int cgrp, struct sockmap_options *opt)
txmsg_ktls_skb_drop = 0;
txmsg_ktls_skb_redir = 1;
test_exec(cgrp, opt);
+ txmsg_ktls_skb_redir = 0;
+
+ /* Tests that omit skb_parser */
+ txmsg_omit_skb_parser = 1;
+ ktls = 0;
+ txmsg_ktls_skb = 0;
+ test_exec(cgrp, opt);
+
+ txmsg_ktls_skb_drop = 1;
+ test_exec(cgrp, opt);
+ txmsg_ktls_skb_drop = 0;
+
+ txmsg_ktls_skb_redir = 1;
+ test_exec(cgrp, opt);
+
+ ktls = 1;
+ test_exec(cgrp, opt);
+ txmsg_omit_skb_parser = 0;
opt->data_test = data;
ktls = k;
}
-
/* Test cork with hung data. This tests poor usage patterns where
* cork can leave data on the ring if user program is buggy and
* doesn't flush them somehow. They do take some time however
diff --git a/tools/testing/selftests/bpf/test_tc_redirect.sh b/tools/testing/selftests/bpf/test_tc_redirect.sh
new file mode 100755
index 000000000000..6d7482562140
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_tc_redirect.sh
@@ -0,0 +1,204 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# This test sets up 3 netns (src <-> fwd <-> dst). There is no direct veth link
+# between src and dst. The netns fwd has veth links to each src and dst. The
+# client is in src and server in dst. The test installs a TC BPF program to each
+# host facing veth in fwd which calls into i) bpf_redirect_neigh() to perform the
+# neigh addr population and redirect or ii) bpf_redirect_peer() for namespace
+# switch from ingress side; it also installs a checker prog on the egress side
+# to drop unexpected traffic.
+
+if [[ $EUID -ne 0 ]]; then
+ echo "This script must be run as root"
+ echo "FAIL"
+ exit 1
+fi
+
+# check that needed tools are present
+command -v nc >/dev/null 2>&1 || \
+ { echo >&2 "nc is not available"; exit 1; }
+command -v dd >/dev/null 2>&1 || \
+ { echo >&2 "dd is not available"; exit 1; }
+command -v timeout >/dev/null 2>&1 || \
+ { echo >&2 "timeout is not available"; exit 1; }
+command -v ping >/dev/null 2>&1 || \
+ { echo >&2 "ping is not available"; exit 1; }
+command -v ping6 >/dev/null 2>&1 || \
+ { echo >&2 "ping6 is not available"; exit 1; }
+command -v perl >/dev/null 2>&1 || \
+ { echo >&2 "perl is not available"; exit 1; }
+command -v jq >/dev/null 2>&1 || \
+ { echo >&2 "jq is not available"; exit 1; }
+command -v bpftool >/dev/null 2>&1 || \
+ { echo >&2 "bpftool is not available"; exit 1; }
+
+readonly GREEN='\033[0;92m'
+readonly RED='\033[0;31m'
+readonly NC='\033[0m' # No Color
+
+readonly PING_ARG="-c 3 -w 10 -q"
+
+readonly TIMEOUT=10
+
+readonly NS_SRC="ns-src-$(mktemp -u XXXXXX)"
+readonly NS_FWD="ns-fwd-$(mktemp -u XXXXXX)"
+readonly NS_DST="ns-dst-$(mktemp -u XXXXXX)"
+
+readonly IP4_SRC="172.16.1.100"
+readonly IP4_DST="172.16.2.100"
+
+readonly IP6_SRC="::1:dead:beef:cafe"
+readonly IP6_DST="::2:dead:beef:cafe"
+
+readonly IP4_SLL="169.254.0.1"
+readonly IP4_DLL="169.254.0.2"
+readonly IP4_NET="169.254.0.0"
+
+netns_cleanup()
+{
+ ip netns del ${NS_SRC}
+ ip netns del ${NS_FWD}
+ ip netns del ${NS_DST}
+}
+
+netns_setup()
+{
+ ip netns add "${NS_SRC}"
+ ip netns add "${NS_FWD}"
+ ip netns add "${NS_DST}"
+
+ ip link add veth_src type veth peer name veth_src_fwd
+ ip link add veth_dst type veth peer name veth_dst_fwd
+
+ ip link set veth_src netns ${NS_SRC}
+ ip link set veth_src_fwd netns ${NS_FWD}
+
+ ip link set veth_dst netns ${NS_DST}
+ ip link set veth_dst_fwd netns ${NS_FWD}
+
+ ip -netns ${NS_SRC} addr add ${IP4_SRC}/32 dev veth_src
+ ip -netns ${NS_DST} addr add ${IP4_DST}/32 dev veth_dst
+
+ # The fwd netns automatically get a v6 LL address / routes, but also
+ # needs v4 one in order to start ARP probing. IP4_NET route is added
+ # to the endpoints so that the ARP processing will reply.
+
+ ip -netns ${NS_FWD} addr add ${IP4_SLL}/32 dev veth_src_fwd
+ ip -netns ${NS_FWD} addr add ${IP4_DLL}/32 dev veth_dst_fwd
+
+ ip -netns ${NS_SRC} addr add ${IP6_SRC}/128 dev veth_src nodad
+ ip -netns ${NS_DST} addr add ${IP6_DST}/128 dev veth_dst nodad
+
+ ip -netns ${NS_SRC} link set dev veth_src up
+ ip -netns ${NS_FWD} link set dev veth_src_fwd up
+
+ ip -netns ${NS_DST} link set dev veth_dst up
+ ip -netns ${NS_FWD} link set dev veth_dst_fwd up
+
+ ip -netns ${NS_SRC} route add ${IP4_DST}/32 dev veth_src scope global
+ ip -netns ${NS_SRC} route add ${IP4_NET}/16 dev veth_src scope global
+ ip -netns ${NS_FWD} route add ${IP4_SRC}/32 dev veth_src_fwd scope global
+
+ ip -netns ${NS_SRC} route add ${IP6_DST}/128 dev veth_src scope global
+ ip -netns ${NS_FWD} route add ${IP6_SRC}/128 dev veth_src_fwd scope global
+
+ ip -netns ${NS_DST} route add ${IP4_SRC}/32 dev veth_dst scope global
+ ip -netns ${NS_DST} route add ${IP4_NET}/16 dev veth_dst scope global
+ ip -netns ${NS_FWD} route add ${IP4_DST}/32 dev veth_dst_fwd scope global
+
+ ip -netns ${NS_DST} route add ${IP6_SRC}/128 dev veth_dst scope global
+ ip -netns ${NS_FWD} route add ${IP6_DST}/128 dev veth_dst_fwd scope global
+
+ fmac_src=$(ip netns exec ${NS_FWD} cat /sys/class/net/veth_src_fwd/address)
+ fmac_dst=$(ip netns exec ${NS_FWD} cat /sys/class/net/veth_dst_fwd/address)
+
+ ip -netns ${NS_SRC} neigh add ${IP4_DST} dev veth_src lladdr $fmac_src
+ ip -netns ${NS_DST} neigh add ${IP4_SRC} dev veth_dst lladdr $fmac_dst
+
+ ip -netns ${NS_SRC} neigh add ${IP6_DST} dev veth_src lladdr $fmac_src
+ ip -netns ${NS_DST} neigh add ${IP6_SRC} dev veth_dst lladdr $fmac_dst
+}
+
+netns_test_connectivity()
+{
+ set +e
+
+ ip netns exec ${NS_DST} bash -c "nc -4 -l -p 9004 &"
+ ip netns exec ${NS_DST} bash -c "nc -6 -l -p 9006 &"
+
+ TEST="TCPv4 connectivity test"
+ ip netns exec ${NS_SRC} bash -c "timeout ${TIMEOUT} dd if=/dev/zero bs=1000 count=100 > /dev/tcp/${IP4_DST}/9004"
+ if [ $? -ne 0 ]; then
+ echo -e "${TEST}: ${RED}FAIL${NC}"
+ exit 1
+ fi
+ echo -e "${TEST}: ${GREEN}PASS${NC}"
+
+ TEST="TCPv6 connectivity test"
+ ip netns exec ${NS_SRC} bash -c "timeout ${TIMEOUT} dd if=/dev/zero bs=1000 count=100 > /dev/tcp/${IP6_DST}/9006"
+ if [ $? -ne 0 ]; then
+ echo -e "${TEST}: ${RED}FAIL${NC}"
+ exit 1
+ fi
+ echo -e "${TEST}: ${GREEN}PASS${NC}"
+
+ TEST="ICMPv4 connectivity test"
+ ip netns exec ${NS_SRC} ping $PING_ARG ${IP4_DST}
+ if [ $? -ne 0 ]; then
+ echo -e "${TEST}: ${RED}FAIL${NC}"
+ exit 1
+ fi
+ echo -e "${TEST}: ${GREEN}PASS${NC}"
+
+ TEST="ICMPv6 connectivity test"
+ ip netns exec ${NS_SRC} ping6 $PING_ARG ${IP6_DST}
+ if [ $? -ne 0 ]; then
+ echo -e "${TEST}: ${RED}FAIL${NC}"
+ exit 1
+ fi
+ echo -e "${TEST}: ${GREEN}PASS${NC}"
+
+ set -e
+}
+
+hex_mem_str()
+{
+ perl -e 'print join(" ", unpack("(H2)8", pack("L", @ARGV)))' $1
+}
+
+netns_setup_bpf()
+{
+ local obj=$1
+
+ ip netns exec ${NS_FWD} tc qdisc add dev veth_src_fwd clsact
+ ip netns exec ${NS_FWD} tc filter add dev veth_src_fwd ingress bpf da obj $obj sec src_ingress
+ ip netns exec ${NS_FWD} tc filter add dev veth_src_fwd egress bpf da obj $obj sec chk_egress
+
+ ip netns exec ${NS_FWD} tc qdisc add dev veth_dst_fwd clsact
+ ip netns exec ${NS_FWD} tc filter add dev veth_dst_fwd ingress bpf da obj $obj sec dst_ingress
+ ip netns exec ${NS_FWD} tc filter add dev veth_dst_fwd egress bpf da obj $obj sec chk_egress
+
+ veth_src=$(ip netns exec ${NS_FWD} cat /sys/class/net/veth_src_fwd/ifindex)
+ veth_dst=$(ip netns exec ${NS_FWD} cat /sys/class/net/veth_dst_fwd/ifindex)
+
+ progs=$(ip netns exec ${NS_FWD} bpftool net --json | jq -r '.[] | .tc | map(.id) | .[]')
+ for prog in $progs; do
+ map=$(bpftool prog show id $prog --json | jq -r '.map_ids | .? | .[]')
+ if [ ! -z "$map" ]; then
+ bpftool map update id $map key hex $(hex_mem_str 0) value hex $(hex_mem_str $veth_src)
+ bpftool map update id $map key hex $(hex_mem_str 1) value hex $(hex_mem_str $veth_dst)
+ fi
+ done
+}
+
+trap netns_cleanup EXIT
+set -e
+
+netns_setup
+netns_setup_bpf test_tc_neigh.o
+netns_test_connectivity
+netns_cleanup
+netns_setup
+netns_setup_bpf test_tc_peer.o
+netns_test_connectivity
diff --git a/tools/testing/selftests/bpf/test_tcp_hdr_options.h b/tools/testing/selftests/bpf/test_tcp_hdr_options.h
new file mode 100644
index 000000000000..6118e3ab61fc
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_tcp_hdr_options.h
@@ -0,0 +1,152 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2020 Facebook */
+
+#ifndef _TEST_TCP_HDR_OPTIONS_H
+#define _TEST_TCP_HDR_OPTIONS_H
+
+struct bpf_test_option {
+ __u8 flags;
+ __u8 max_delack_ms;
+ __u8 rand;
+} __attribute__((packed));
+
+enum {
+ OPTION_RESEND,
+ OPTION_MAX_DELACK_MS,
+ OPTION_RAND,
+ __NR_OPTION_FLAGS,
+};
+
+#define OPTION_F_RESEND (1 << OPTION_RESEND)
+#define OPTION_F_MAX_DELACK_MS (1 << OPTION_MAX_DELACK_MS)
+#define OPTION_F_RAND (1 << OPTION_RAND)
+#define OPTION_MASK ((1 << __NR_OPTION_FLAGS) - 1)
+
+#define TEST_OPTION_FLAGS(flags, option) (1 & ((flags) >> (option)))
+#define SET_OPTION_FLAGS(flags, option) ((flags) |= (1 << (option)))
+
+/* Store in bpf_sk_storage */
+struct hdr_stg {
+ bool active;
+ bool resend_syn; /* active side only */
+ bool syncookie; /* passive side only */
+ bool fastopen; /* passive side only */
+};
+
+struct linum_err {
+ unsigned int linum;
+ int err;
+};
+
+#define TCPHDR_FIN 0x01
+#define TCPHDR_SYN 0x02
+#define TCPHDR_RST 0x04
+#define TCPHDR_PSH 0x08
+#define TCPHDR_ACK 0x10
+#define TCPHDR_URG 0x20
+#define TCPHDR_ECE 0x40
+#define TCPHDR_CWR 0x80
+#define TCPHDR_SYNACK (TCPHDR_SYN | TCPHDR_ACK)
+
+#define TCPOPT_EOL 0
+#define TCPOPT_NOP 1
+#define TCPOPT_WINDOW 3
+#define TCPOPT_EXP 254
+
+#define TCP_BPF_EXPOPT_BASE_LEN 4
+#define MAX_TCP_HDR_LEN 60
+#define MAX_TCP_OPTION_SPACE 40
+
+#ifdef BPF_PROG_TEST_TCP_HDR_OPTIONS
+
+#define CG_OK 1
+#define CG_ERR 0
+
+#ifndef SOL_TCP
+#define SOL_TCP 6
+#endif
+
+struct tcp_exprm_opt {
+ __u8 kind;
+ __u8 len;
+ __u16 magic;
+ union {
+ __u8 data[4];
+ __u32 data32;
+ };
+} __attribute__((packed));
+
+struct tcp_opt {
+ __u8 kind;
+ __u8 len;
+ union {
+ __u8 data[4];
+ __u32 data32;
+ };
+} __attribute__((packed));
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(max_entries, 2);
+ __type(key, int);
+ __type(value, struct linum_err);
+} lport_linum_map SEC(".maps");
+
+static inline unsigned int tcp_hdrlen(const struct tcphdr *th)
+{
+ return th->doff << 2;
+}
+
+static inline __u8 skops_tcp_flags(const struct bpf_sock_ops *skops)
+{
+ return skops->skb_tcp_flags;
+}
+
+static inline void clear_hdr_cb_flags(struct bpf_sock_ops *skops)
+{
+ bpf_sock_ops_cb_flags_set(skops,
+ skops->bpf_sock_ops_cb_flags &
+ ~(BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG |
+ BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG));
+}
+
+static inline void set_hdr_cb_flags(struct bpf_sock_ops *skops, __u32 extra)
+{
+ bpf_sock_ops_cb_flags_set(skops,
+ skops->bpf_sock_ops_cb_flags |
+ BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG |
+ BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG |
+ extra);
+}
+static inline void
+clear_parse_all_hdr_cb_flags(struct bpf_sock_ops *skops)
+{
+ bpf_sock_ops_cb_flags_set(skops,
+ skops->bpf_sock_ops_cb_flags &
+ ~BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG);
+}
+
+static inline void
+set_parse_all_hdr_cb_flags(struct bpf_sock_ops *skops)
+{
+ bpf_sock_ops_cb_flags_set(skops,
+ skops->bpf_sock_ops_cb_flags |
+ BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG);
+}
+
+#define RET_CG_ERR(__err) ({ \
+ struct linum_err __linum_err; \
+ int __lport; \
+ \
+ __linum_err.linum = __LINE__; \
+ __linum_err.err = __err; \
+ __lport = skops->local_port; \
+ bpf_map_update_elem(&lport_linum_map, &__lport, &__linum_err, BPF_NOEXIST); \
+ clear_hdr_cb_flags(skops); \
+ clear_parse_all_hdr_cb_flags(skops); \
+ return CG_ERR; \
+})
+
+#endif /* BPF_PROG_TEST_TCP_HDR_OPTIONS */
+
+#endif /* _TEST_TCP_HDR_OPTIONS_H */
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 78a6bae56ea6..9be395d9dc64 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -114,6 +114,7 @@ struct bpf_test {
bpf_testdata_struct_t retvals[MAX_TEST_RUNS];
};
enum bpf_attach_type expected_attach_type;
+ const char *kfunc;
};
/* Note we want this to be 64 bit aligned so that the end of our array is
@@ -984,8 +985,24 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
attr.log_level = 4;
attr.prog_flags = pflags;
+ if (prog_type == BPF_PROG_TYPE_TRACING && test->kfunc) {
+ attr.attach_btf_id = libbpf_find_vmlinux_btf_id(test->kfunc,
+ attr.expected_attach_type);
+ if (attr.attach_btf_id < 0) {
+ printf("FAIL\nFailed to find BTF ID for '%s'!\n",
+ test->kfunc);
+ (*errors)++;
+ return;
+ }
+ }
+
fd_prog = bpf_load_program_xattr(&attr, bpf_vlog, sizeof(bpf_vlog));
- if (fd_prog < 0 && !bpf_probe_prog_type(prog_type, 0)) {
+
+ /* BPF_PROG_TYPE_TRACING requires more setup and
+ * bpf_probe_prog_type won't give correct answer
+ */
+ if (fd_prog < 0 && prog_type != BPF_PROG_TYPE_TRACING &&
+ !bpf_probe_prog_type(prog_type, 0)) {
printf("SKIP (unsupported program type %d)\n", prog_type);
skips++;
goto close_fds;
diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c
index 4d0e913bbb22..1bbd1d9830c8 100644
--- a/tools/testing/selftests/bpf/trace_helpers.c
+++ b/tools/testing/selftests/bpf/trace_helpers.c
@@ -90,6 +90,33 @@ long ksym_get_addr(const char *name)
return 0;
}
+/* open kallsyms and read symbol addresses on the fly. Without caching all symbols,
+ * this is faster than load + find.
+ */
+int kallsyms_find(const char *sym, unsigned long long *addr)
+{
+ char type, name[500];
+ unsigned long long value;
+ int err = 0;
+ FILE *f;
+
+ f = fopen("/proc/kallsyms", "r");
+ if (!f)
+ return -EINVAL;
+
+ while (fscanf(f, "%llx %c %499s%*[^\n]\n", &value, &type, name) > 0) {
+ if (strcmp(name, sym) == 0) {
+ *addr = value;
+ goto out;
+ }
+ }
+ err = -ENOENT;
+
+out:
+ fclose(f);
+ return err;
+}
+
void read_trace_pipe(void)
{
int trace_fd;
diff --git a/tools/testing/selftests/bpf/trace_helpers.h b/tools/testing/selftests/bpf/trace_helpers.h
index 25ef597dd03f..f62fdef9e589 100644
--- a/tools/testing/selftests/bpf/trace_helpers.h
+++ b/tools/testing/selftests/bpf/trace_helpers.h
@@ -12,6 +12,10 @@ struct ksym {
int load_kallsyms(void);
struct ksym *ksym_search(long key);
long ksym_get_addr(const char *name);
+
+/* open kallsyms and find addresses on the fly, faster than load + search. */
+int kallsyms_find(const char *sym, unsigned long long *addr);
+
void read_trace_pipe(void);
#endif
diff --git a/tools/testing/selftests/bpf/verifier/and.c b/tools/testing/selftests/bpf/verifier/and.c
index d781bc86e100..ca8fdb1b3f01 100644
--- a/tools/testing/selftests/bpf/verifier/and.c
+++ b/tools/testing/selftests/bpf/verifier/and.c
@@ -48,3 +48,19 @@
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
+{
+ "check known subreg with unknown reg",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+ BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 32),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xFFFF1234),
+ /* Upper bits are unknown but AND above masks out 1 zero'ing lower bits */
+ BPF_JMP32_IMM(BPF_JLT, BPF_REG_0, 1, 1),
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 512),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .retval = 0
+},
diff --git a/tools/testing/selftests/bpf/verifier/basic.c b/tools/testing/selftests/bpf/verifier/basic.c
index b8d18642653a..de84f0d57082 100644
--- a/tools/testing/selftests/bpf/verifier/basic.c
+++ b/tools/testing/selftests/bpf/verifier/basic.c
@@ -2,7 +2,7 @@
"empty prog",
.insns = {
},
- .errstr = "unknown opcode 00",
+ .errstr = "last insn is not an exit or jmp",
.result = REJECT,
},
{
diff --git a/tools/testing/selftests/bpf/verifier/bounds.c b/tools/testing/selftests/bpf/verifier/bounds.c
index 4d6645f2874c..dac40de3f868 100644
--- a/tools/testing/selftests/bpf/verifier/bounds.c
+++ b/tools/testing/selftests/bpf/verifier/bounds.c
@@ -557,3 +557,149 @@
.result = ACCEPT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
+{
+ "bounds check for reg = 0, reg xor 1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_ALU64_IMM(BPF_XOR, BPF_REG_1, 1),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+},
+{
+ "bounds check for reg32 = 0, reg32 xor 1",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV32_IMM(BPF_REG_1, 0),
+ BPF_ALU32_IMM(BPF_XOR, BPF_REG_1, 1),
+ BPF_JMP32_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+},
+{
+ "bounds check for reg = 2, reg xor 3",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_1, 2),
+ BPF_ALU64_IMM(BPF_XOR, BPF_REG_1, 3),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+},
+{
+ "bounds check for reg = any, reg xor 3",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU64_IMM(BPF_XOR, BPF_REG_1, 3),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = REJECT,
+ .errstr = "invalid access to map value",
+ .errstr_unpriv = "invalid access to map value",
+},
+{
+ "bounds check for reg32 = any, reg32 xor 3",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_ALU32_IMM(BPF_XOR, BPF_REG_1, 3),
+ BPF_JMP32_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = REJECT,
+ .errstr = "invalid access to map value",
+ .errstr_unpriv = "invalid access to map value",
+},
+{
+ "bounds check for reg > 0, reg xor 3",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JLE, BPF_REG_1, 0, 3),
+ BPF_ALU64_IMM(BPF_XOR, BPF_REG_1, 3),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+},
+{
+ "bounds check for reg32 > 0, reg32 xor 3",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP32_IMM(BPF_JLE, BPF_REG_1, 0, 3),
+ BPF_ALU32_IMM(BPF_XOR, BPF_REG_1, 3),
+ BPF_JMP32_IMM(BPF_JGE, BPF_REG_1, 0, 1),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_8b = { 3 },
+ .result = ACCEPT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c
index 94258c6b5235..c4f5d909e58a 100644
--- a/tools/testing/selftests/bpf/verifier/calls.c
+++ b/tools/testing/selftests/bpf/verifier/calls.c
@@ -647,13 +647,14 @@
.result = REJECT,
},
{
- "calls: ld_abs with changing ctx data in callee",
+ "calls: subprog call with ld_abs in main prog",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_LD_ABS(BPF_B, 0),
BPF_LD_ABS(BPF_H, 0),
BPF_LD_ABS(BPF_W, 0),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5),
BPF_MOV64_REG(BPF_REG_6, BPF_REG_7),
BPF_LD_ABS(BPF_B, 0),
@@ -666,8 +667,7 @@
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
- .errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed",
- .result = REJECT,
+ .result = ACCEPT,
},
{
"calls: two calls with bad fallthrough",
diff --git a/tools/testing/selftests/bpf/verifier/d_path.c b/tools/testing/selftests/bpf/verifier/d_path.c
new file mode 100644
index 000000000000..b988396379a7
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/d_path.c
@@ -0,0 +1,37 @@
+{
+ "d_path accept",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_MOV64_IMM(BPF_REG_6, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
+ BPF_LD_IMM64(BPF_REG_3, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_d_path),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACING,
+ .expected_attach_type = BPF_TRACE_FENTRY,
+ .kfunc = "dentry_open",
+},
+{
+ "d_path reject",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_MOV64_IMM(BPF_REG_6, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_6, 0),
+ BPF_LD_IMM64(BPF_REG_3, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_d_path),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "helper call is not allowed in probe",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACING,
+ .expected_attach_type = BPF_TRACE_FENTRY,
+ .kfunc = "d_path",
+},
diff --git a/tools/testing/selftests/bpf/verifier/direct_packet_access.c b/tools/testing/selftests/bpf/verifier/direct_packet_access.c
index 2c5fbe7bcd27..ae72536603fe 100644
--- a/tools/testing/selftests/bpf/verifier/direct_packet_access.c
+++ b/tools/testing/selftests/bpf/verifier/direct_packet_access.c
@@ -529,7 +529,7 @@
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
- .errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
+ .errstr = "invalid access to packet, off=0 size=8, R5(id=2,off=0,r=0)",
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
diff --git a/tools/testing/selftests/bpf/verifier/ld_imm64.c b/tools/testing/selftests/bpf/verifier/ld_imm64.c
index 3856dba733e9..f9297900cea6 100644
--- a/tools/testing/selftests/bpf/verifier/ld_imm64.c
+++ b/tools/testing/selftests/bpf/verifier/ld_imm64.c
@@ -51,14 +51,6 @@
.result = REJECT,
},
{
- "test5 ld_imm64",
- .insns = {
- BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
- },
- .errstr = "invalid bpf_ld_imm64 insn",
- .result = REJECT,
-},
-{
"test6 ld_imm64",
.insns = {
BPF_RAW_INSN(BPF_LD | BPF_IMM | BPF_DW, 0, 0, 0, 0),
diff --git a/tools/testing/selftests/bpf/verifier/map_ptr.c b/tools/testing/selftests/bpf/verifier/map_ptr.c
index b52209db8250..637f9293bda8 100644
--- a/tools/testing/selftests/bpf/verifier/map_ptr.c
+++ b/tools/testing/selftests/bpf/verifier/map_ptr.c
@@ -60,3 +60,35 @@
.result = ACCEPT,
.retval = 1,
},
+{
+ "bpf_map_ptr: r = 0, map_ptr = map_ptr + r",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 4 },
+ .result = ACCEPT,
+},
+{
+ "bpf_map_ptr: r = 0, r = r + map_ptr",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_LD_MAP_FD(BPF_REG_0, 0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_16b = { 4 },
+ .result = ACCEPT,
+},
diff --git a/tools/testing/selftests/bpf/verifier/ref_tracking.c b/tools/testing/selftests/bpf/verifier/ref_tracking.c
index 056e0273bf12..006b5bd99c08 100644
--- a/tools/testing/selftests/bpf/verifier/ref_tracking.c
+++ b/tools/testing/selftests/bpf/verifier/ref_tracking.c
@@ -854,3 +854,50 @@
.errstr = "Unreleased reference",
.result = REJECT,
},
+{
+ "reference tracking: bpf_sk_release(btf_tcp_sock)",
+ .insns = {
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_skc_to_tcp_sock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = ACCEPT,
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "unknown func",
+},
+{
+ "reference tracking: use ptr from bpf_skc_to_tcp_sock() after release",
+ .insns = {
+ BPF_SK_LOOKUP(sk_lookup_tcp),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_skc_to_tcp_sock),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_EMIT_CALL(BPF_FUNC_sk_release),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_7, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "invalid mem access",
+ .result_unpriv = REJECT,
+ .errstr_unpriv = "unknown func",
+},
diff --git a/tools/testing/selftests/bpf/verifier/regalloc.c b/tools/testing/selftests/bpf/verifier/regalloc.c
new file mode 100644
index 000000000000..4ad7e05de706
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/regalloc.c
@@ -0,0 +1,269 @@
+{
+ "regalloc basic",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 20, 4),
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_2, 0, 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 4 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "regalloc negative",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 24, 4),
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_2, 0, 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_7, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 4 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=48 off=48 size=1",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "regalloc src_reg mark",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 20, 5),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_JMP_REG(BPF_JSGE, BPF_REG_3, BPF_REG_2, 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 4 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "regalloc src_reg negative",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 22, 5),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_JMP_REG(BPF_JSGE, BPF_REG_3, BPF_REG_2, 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 4 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=48 off=44 size=8",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "regalloc and spill",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 20, 7),
+ /* r0 has upper bound that should propagate into r2 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8), /* spill r2 */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 0), /* clear r0 and r2 */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -8), /* fill r3 */
+ BPF_JMP_REG(BPF_JSGE, BPF_REG_0, BPF_REG_3, 2),
+ /* r3 has lower and upper bounds */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 4 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "regalloc and spill negative",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 48, 7),
+ /* r0 has upper bound that should propagate into r2 */
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -8), /* spill r2 */
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 0), /* clear r0 and r2 */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -8), /* fill r3 */
+ BPF_JMP_REG(BPF_JSGE, BPF_REG_0, BPF_REG_3, 2),
+ /* r3 has lower and upper bounds */
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_3),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 4 },
+ .result = REJECT,
+ .errstr = "invalid access to map value, value_size=48 off=48 size=8",
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "regalloc three regs",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_4, BPF_REG_2),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 12, 5),
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_2, 0, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_4),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 4 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "regalloc after call",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_9, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 20, 4),
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_9, 0, 3),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_8),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_9),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 4 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "regalloc in callee",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_7),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 20, 5),
+ BPF_JMP_IMM(BPF_JSLT, BPF_REG_2, 0, 4),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 4 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
+{
+ "regalloc, spill, JEQ",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), /* spill r0 */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 0),
+ /* The verifier will walk the rest twice with r0 == 0 and r0 == map_value */
+ BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_2, 20, 0),
+ /* The verifier will walk the rest two more times with r0 == 20 and r0 == unknown */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_10, -8), /* fill r3 with map_value */
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 1), /* skip ldx if map_value == NULL */
+ /* Buggy verifier will think that r3 == 20 here */
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), /* read from map_value */
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map_hash_48b = { 4 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+},
diff --git a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_policer.sh b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_policer.sh
index 47edf099a17e..508a702f0021 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_policer.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/devlink_trap_policer.sh
@@ -207,7 +207,7 @@ __rate_test()
RET=0
- devlink trap policer set $DEVLINK_DEV policer $id rate 1000 burst 16
+ devlink trap policer set $DEVLINK_DEV policer $id rate 1000 burst 512
devlink trap group set $DEVLINK_DEV group l3_drops policer $id
# Send packets at highest possible rate and make sure they are dropped
@@ -220,8 +220,8 @@ __rate_test()
rate=$(trap_rate_get)
pct=$((100 * (rate - 1000) / 1000))
- ((-5 <= pct && pct <= 5))
- check_err $? "Expected rate 1000 pps, got $rate pps, which is $pct% off. Required accuracy is +-5%"
+ ((-10 <= pct && pct <= 10))
+ check_err $? "Expected rate 1000 pps, got $rate pps, which is $pct% off. Required accuracy is +-10%"
log_info "Expected rate 1000 pps, measured rate $rate pps"
drop_rate=$(policer_drop_rate_get $id)
@@ -288,35 +288,12 @@ __burst_test()
RET=0
- devlink trap policer set $DEVLINK_DEV policer $id rate 1000 burst 32
+ devlink trap policer set $DEVLINK_DEV policer $id rate 1000 burst 512
devlink trap group set $DEVLINK_DEV group l3_drops policer $id
- # Send a burst of 64 packets and make sure that about 32 are received
- # and the rest are dropped by the policer
- log_info "=== Tx burst size: 64, Policer burst size: 32 pps ==="
-
- t0_rx=$(devlink_trap_rx_packets_get blackhole_route)
- t0_drop=$(devlink_trap_policer_rx_dropped_get $id)
-
- start_traffic $h1 192.0.2.1 198.51.100.100 $rp1_mac -c 64
-
- t1_rx=$(devlink_trap_rx_packets_get blackhole_route)
- t1_drop=$(devlink_trap_policer_rx_dropped_get $id)
-
- rx=$((t1_rx - t0_rx))
- pct=$((100 * (rx - 32) / 32))
- ((-20 <= pct && pct <= 20))
- check_err $? "Expected burst size of 32 packets, got $rx packets, which is $pct% off. Required accuracy is +-20%"
- log_info "Expected burst size of 32 packets, measured burst size of $rx packets"
-
- drop=$((t1_drop - t0_drop))
- (( drop > 0 ))
- check_err $? "Expected non-zero policer drops, got 0"
- log_info "Measured policer drops of $drop packets"
-
# Send a burst of 16 packets and make sure that 16 are received
# and that none are dropped by the policer
- log_info "=== Tx burst size: 16, Policer burst size: 32 pps ==="
+ log_info "=== Tx burst size: 16, Policer burst size: 512 ==="
t0_rx=$(devlink_trap_rx_packets_get blackhole_route)
t0_drop=$(devlink_trap_policer_rx_dropped_get $id)
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_ets_strict.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_ets_strict.sh
index 6d1790b5de7a..e9f8718af979 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_ets_strict.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_ets_strict.sh
@@ -147,17 +147,26 @@ switch_create()
# Make sure that ingress quotas are smaller than egress so that there is
# room for both streams of traffic to be admitted to shared buffer.
+ devlink_pool_size_thtype_save 0
devlink_pool_size_thtype_set 0 dynamic 10000000
+ devlink_pool_size_thtype_save 4
devlink_pool_size_thtype_set 4 dynamic 10000000
+ devlink_port_pool_th_save $swp1 0
devlink_port_pool_th_set $swp1 0 6
+ devlink_tc_bind_pool_th_save $swp1 1 ingress
devlink_tc_bind_pool_th_set $swp1 1 ingress 0 6
+ devlink_port_pool_th_save $swp2 0
devlink_port_pool_th_set $swp2 0 6
+ devlink_tc_bind_pool_th_save $swp2 2 ingress
devlink_tc_bind_pool_th_set $swp2 2 ingress 0 6
+ devlink_tc_bind_pool_th_save $swp3 1 egress
devlink_tc_bind_pool_th_set $swp3 1 egress 4 7
+ devlink_tc_bind_pool_th_save $swp3 2 egress
devlink_tc_bind_pool_th_set $swp3 2 egress 4 7
+ devlink_port_pool_th_save $swp3 4
devlink_port_pool_th_set $swp3 4 7
}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_headroom.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_headroom.sh
new file mode 100755
index 000000000000..27de3d9ed08e
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_headroom.sh
@@ -0,0 +1,379 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+ALL_TESTS="
+ test_defaults
+ test_dcb_ets
+ test_mtu
+ test_pfc
+ test_int_buf
+ test_tc_priomap
+ test_tc_mtu
+ test_tc_sizes
+ test_tc_int_buf
+"
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+NUM_NETIFS=0
+source $lib_dir/lib.sh
+source $lib_dir/devlink_lib.sh
+source qos_lib.sh
+
+swp=$NETIF_NO_CABLE
+
+cleanup()
+{
+ pre_cleanup
+}
+
+get_prio_pg()
+{
+ __mlnx_qos -i $swp | sed -n '/^PFC/,/^[^[:space:]]/p' |
+ grep buffer | sed 's/ \+/ /g' | cut -d' ' -f 2-
+}
+
+get_prio_pfc()
+{
+ __mlnx_qos -i $swp | sed -n '/^PFC/,/^[^[:space:]]/p' |
+ grep enabled | sed 's/ \+/ /g' | cut -d' ' -f 2-
+}
+
+get_prio_tc()
+{
+ __mlnx_qos -i $swp | sed -n '/^tc/,$p' |
+ awk '/^tc/ { TC = $2 }
+ /priority:/ { PRIO[$2]=TC }
+ END {
+ for (i in PRIO)
+ printf("%d ", PRIO[i])
+ }'
+}
+
+get_buf_size()
+{
+ local idx=$1; shift
+
+ __mlnx_qos -i $swp | grep Receive | sed 's/.*: //' | cut -d, -f $((idx + 1))
+}
+
+get_tot_size()
+{
+ __mlnx_qos -i $swp | grep Receive | sed 's/.*total_size=//'
+}
+
+check_prio_pg()
+{
+ local expect=$1; shift
+
+ local current=$(get_prio_pg)
+ test "$current" = "$expect"
+ check_err $? "prio2buffer is '$current', expected '$expect'"
+}
+
+check_prio_pfc()
+{
+ local expect=$1; shift
+
+ local current=$(get_prio_pfc)
+ test "$current" = "$expect"
+ check_err $? "prio PFC is '$current', expected '$expect'"
+}
+
+check_prio_tc()
+{
+ local expect=$1; shift
+
+ local current=$(get_prio_tc)
+ test "$current" = "$expect"
+ check_err $? "prio_tc is '$current', expected '$expect'"
+}
+
+__check_buf_size()
+{
+ local idx=$1; shift
+ local expr=$1; shift
+ local what=$1; shift
+
+ local current=$(get_buf_size $idx)
+ ((current $expr))
+ check_err $? "${what}buffer $idx size is '$current', expected '$expr'"
+ echo $current
+}
+
+check_buf_size()
+{
+ __check_buf_size "$@" > /dev/null
+}
+
+test_defaults()
+{
+ RET=0
+
+ check_prio_pg "0 0 0 0 0 0 0 0 "
+ check_prio_tc "0 0 0 0 0 0 0 0 "
+ check_prio_pfc "0 0 0 0 0 0 0 0 "
+
+ log_test "Default headroom configuration"
+}
+
+test_dcb_ets()
+{
+ RET=0
+
+ __mlnx_qos -i $swp --prio_tc=0,2,4,6,1,3,5,7 > /dev/null
+
+ check_prio_pg "0 2 4 6 1 3 5 7 "
+ check_prio_tc "0 2 4 6 1 3 5 7 "
+ check_prio_pfc "0 0 0 0 0 0 0 0 "
+
+ __mlnx_qos -i $swp --prio_tc=0,0,0,0,0,0,0,0 > /dev/null
+
+ check_prio_pg "0 0 0 0 0 0 0 0 "
+ check_prio_tc "0 0 0 0 0 0 0 0 "
+
+ __mlnx_qos -i $swp --prio2buffer=1,3,5,7,0,2,4,6 &> /dev/null
+ check_fail $? "prio2buffer accepted in DCB mode"
+
+ log_test "Configuring headroom through ETS"
+}
+
+test_mtu()
+{
+ local what=$1; shift
+ local buf0size_2
+ local buf0size
+
+ RET=0
+ buf0size=$(__check_buf_size 0 "> 0")
+
+ mtu_set $swp 3000
+ buf0size_2=$(__check_buf_size 0 "> $buf0size" "MTU 3000: ")
+ mtu_restore $swp
+
+ mtu_set $swp 6000
+ check_buf_size 0 "> $buf0size_2" "MTU 6000: "
+ mtu_restore $swp
+
+ check_buf_size 0 "== $buf0size"
+
+ log_test "${what}MTU impacts buffer size"
+}
+
+test_tc_mtu()
+{
+ # In TC mode, MTU still impacts the threshold below which a buffer is
+ # not permitted to go.
+
+ tc qdisc replace dev $swp root handle 1: bfifo limit 1.5M
+ test_mtu "TC: "
+ tc qdisc delete dev $swp root
+}
+
+test_pfc()
+{
+ RET=0
+
+ __mlnx_qos -i $swp --prio_tc=0,0,0,0,0,1,2,3 > /dev/null
+
+ local buf0size=$(get_buf_size 0)
+ local buf1size=$(get_buf_size 1)
+ local buf2size=$(get_buf_size 2)
+ local buf3size=$(get_buf_size 3)
+ check_buf_size 0 "> 0"
+ check_buf_size 1 "> 0"
+ check_buf_size 2 "> 0"
+ check_buf_size 3 "> 0"
+ check_buf_size 4 "== 0"
+ check_buf_size 5 "== 0"
+ check_buf_size 6 "== 0"
+ check_buf_size 7 "== 0"
+
+ log_test "Buffer size sans PFC"
+
+ RET=0
+
+ __mlnx_qos -i $swp --pfc=0,0,0,0,0,1,1,1 --cable_len=0 > /dev/null
+
+ check_prio_pg "0 0 0 0 0 1 2 3 "
+ check_prio_pfc "0 0 0 0 0 1 1 1 "
+ check_buf_size 0 "== $buf0size"
+ check_buf_size 1 "> $buf1size"
+ check_buf_size 2 "> $buf2size"
+ check_buf_size 3 "> $buf3size"
+
+ local buf1size=$(get_buf_size 1)
+ check_buf_size 2 "== $buf1size"
+ check_buf_size 3 "== $buf1size"
+
+ log_test "PFC: Cable length 0"
+
+ RET=0
+
+ __mlnx_qos -i $swp --pfc=0,0,0,0,0,1,1,1 --cable_len=1000 > /dev/null
+
+ check_buf_size 0 "== $buf0size"
+ check_buf_size 1 "> $buf1size"
+ check_buf_size 2 "> $buf1size"
+ check_buf_size 3 "> $buf1size"
+
+ log_test "PFC: Cable length 1000"
+
+ RET=0
+
+ __mlnx_qos -i $swp --pfc=0,0,0,0,0,0,0,0 --cable_len=0 > /dev/null
+ __mlnx_qos -i $swp --prio_tc=0,0,0,0,0,0,0,0 > /dev/null
+
+ check_prio_pg "0 0 0 0 0 0 0 0 "
+ check_prio_tc "0 0 0 0 0 0 0 0 "
+ check_buf_size 0 "> 0"
+ check_buf_size 1 "== 0"
+ check_buf_size 2 "== 0"
+ check_buf_size 3 "== 0"
+ check_buf_size 4 "== 0"
+ check_buf_size 5 "== 0"
+ check_buf_size 6 "== 0"
+ check_buf_size 7 "== 0"
+
+ log_test "PFC: Restore defaults"
+}
+
+test_tc_priomap()
+{
+ RET=0
+
+ __mlnx_qos -i $swp --prio_tc=0,1,2,3,4,5,6,7 > /dev/null
+ check_prio_pg "0 1 2 3 4 5 6 7 "
+
+ tc qdisc replace dev $swp root handle 1: bfifo limit 1.5M
+ check_prio_pg "0 0 0 0 0 0 0 0 "
+
+ __mlnx_qos -i $swp --prio2buffer=1,3,5,7,0,2,4,6 > /dev/null
+ check_prio_pg "1 3 5 7 0 2 4 6 "
+
+ tc qdisc delete dev $swp root
+ check_prio_pg "0 1 2 3 4 5 6 7 "
+
+ # Clean up.
+ tc qdisc replace dev $swp root handle 1: bfifo limit 1.5M
+ __mlnx_qos -i $swp --prio2buffer=0,0,0,0,0,0,0,0 > /dev/null
+ tc qdisc delete dev $swp root
+ __mlnx_qos -i $swp --prio_tc=0,0,0,0,0,0,0,0 > /dev/null
+
+ log_test "TC: priomap"
+}
+
+test_tc_sizes()
+{
+ local cell_size=$(devlink_cell_size_get)
+ local size=$((cell_size * 1000))
+
+ RET=0
+
+ __mlnx_qos -i $swp --buffer_size=$size,0,0,0,0,0,0,0 &> /dev/null
+ check_fail $? "buffer_size should fail before qdisc is added"
+
+ tc qdisc replace dev $swp root handle 1: bfifo limit 1.5M
+
+ __mlnx_qos -i $swp --buffer_size=$size,0,0,0,0,0,0,0 > /dev/null
+ check_err $? "buffer_size should pass after qdisc is added"
+ check_buf_size 0 "== $size" "set size: "
+
+ mtu_set $swp 6000
+ check_buf_size 0 "== $size" "set MTU: "
+ mtu_restore $swp
+
+ __mlnx_qos -i $swp --buffer_size=0,0,0,0,0,0,0,0 > /dev/null
+
+ # After replacing the qdisc for the same kind, buffer_size still has to
+ # work.
+ tc qdisc replace dev $swp root handle 1: bfifo limit 1M
+
+ __mlnx_qos -i $swp --buffer_size=$size,0,0,0,0,0,0,0 > /dev/null
+ check_buf_size 0 "== $size" "post replace, set size: "
+
+ __mlnx_qos -i $swp --buffer_size=0,0,0,0,0,0,0,0 > /dev/null
+
+ # Likewise after replacing for a different kind.
+ tc qdisc replace dev $swp root handle 2: prio bands 8
+
+ __mlnx_qos -i $swp --buffer_size=$size,0,0,0,0,0,0,0 > /dev/null
+ check_buf_size 0 "== $size" "post replace different kind, set size: "
+
+ tc qdisc delete dev $swp root
+
+ __mlnx_qos -i $swp --buffer_size=$size,0,0,0,0,0,0,0 &> /dev/null
+ check_fail $? "buffer_size should fail after qdisc is deleted"
+
+ log_test "TC: buffer size"
+}
+
+test_int_buf()
+{
+ local what=$1; shift
+
+ RET=0
+
+ local buf0size=$(get_buf_size 0)
+ local tot_size=$(get_tot_size)
+
+ # Size of internal buffer and buffer 9.
+ local dsize=$((tot_size - buf0size))
+
+ tc qdisc add dev $swp clsact
+ tc filter add dev $swp egress matchall skip_sw action mirred egress mirror dev $swp
+
+ local buf0size_2=$(get_buf_size 0)
+ local tot_size_2=$(get_tot_size)
+ local dsize_2=$((tot_size_2 - buf0size_2))
+
+ # Egress SPAN should have added to the "invisible" buffer configuration.
+ ((dsize_2 > dsize))
+ check_err $? "Invisible buffers account for '$dsize_2', expected '> $dsize'"
+
+ mtu_set $swp 3000
+
+ local buf0size_3=$(get_buf_size 0)
+ local tot_size_3=$(get_tot_size)
+ local dsize_3=$((tot_size_3 - buf0size_3))
+
+ # MTU change might change buffer 0, which will show at total, but the
+ # hidden buffers should stay the same size.
+ ((dsize_3 == dsize_2))
+ check_err $? "MTU change: Invisible buffers account for '$dsize_3', expected '== $dsize_2'"
+
+ mtu_restore $swp
+ tc qdisc del dev $swp clsact
+
+ # After SPAN removal, hidden buffers should be back to the original sizes.
+ local buf0size_4=$(get_buf_size 0)
+ local tot_size_4=$(get_tot_size)
+ local dsize_4=$((tot_size_4 - buf0size_4))
+ ((dsize_4 == dsize))
+ check_err $? "SPAN removed: Invisible buffers account for '$dsize_4', expected '== $dsize'"
+
+ log_test "${what}internal buffer size"
+}
+
+test_tc_int_buf()
+{
+ local cell_size=$(devlink_cell_size_get)
+ local size=$((cell_size * 1000))
+
+ tc qdisc replace dev $swp root handle 1: bfifo limit 1.5M
+ test_int_buf "TC: "
+
+ __mlnx_qos -i $swp --buffer_size=$size,0,0,0,0,0,0,0 > /dev/null
+ test_int_buf "TC+buffsize: "
+
+ __mlnx_qos -i $swp --buffer_size=0,0,0,0,0,0,0,0 > /dev/null
+ tc qdisc delete dev $swp root
+}
+
+trap cleanup EXIT
+
+bail_on_lldpad
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_lib.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_lib.sh
index faa51012cdac..0bf76f13c030 100644
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_lib.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_lib.sh
@@ -82,3 +82,17 @@ bail_on_lldpad()
fi
fi
}
+
+__mlnx_qos()
+{
+ local err
+
+ mlnx_qos "$@" 2>/dev/null
+ err=$?
+
+ if ((err)); then
+ echo "Error ($err) in mlnx_qos $@" >/dev/stderr
+ fi
+
+ return $err
+}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
index b025daea062d..8f164c80e215 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
@@ -145,12 +145,17 @@ switch_create()
# Make sure that ingress quotas are smaller than egress so that there is
# room for both streams of traffic to be admitted to shared buffer.
+ devlink_port_pool_th_save $swp1 0
devlink_port_pool_th_set $swp1 0 5
+ devlink_tc_bind_pool_th_save $swp1 0 ingress
devlink_tc_bind_pool_th_set $swp1 0 ingress 0 5
+ devlink_port_pool_th_save $swp2 0
devlink_port_pool_th_set $swp2 0 5
+ devlink_tc_bind_pool_th_save $swp2 1 ingress
devlink_tc_bind_pool_th_set $swp2 1 ingress 0 5
+ devlink_port_pool_th_save $swp3 4
devlink_port_pool_th_set $swp3 4 12
}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh
new file mode 100755
index 000000000000..4d900bc1f76c
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/mlxsw/qos_pfc.sh
@@ -0,0 +1,403 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# This test injects a 10-MB burst of traffic with VLAN tag and 802.1p priority
+# of 1. This stream is consistently prioritized as priority 1, is put to PG
+# buffer 1, and scheduled at TC 1.
+#
+# - the stream first ingresses through $swp1, where it is forwarded to $swp3
+#
+# - then it ingresses through $swp4. Here it is put to a lossless buffer and put
+# to a small pool ("PFC pool"). The traffic is forwarded to $swp2, which is
+# shaped, and thus the PFC pool eventually fills, therefore the headroom
+# fills, and $swp3 is paused.
+#
+# - since $swp3 now can't send traffic, the traffic ingressing $swp1 is kept at
+# a pool ("overflow pool"). The overflow pool needs to be large enough to
+# contain the whole burst.
+#
+# - eventually the PFC pool gets some traffic out, headroom therefore gets some
+# traffic to the pool, and $swp3 is unpaused again. This way the traffic is
+# gradually forwarded from the overflow pool, through the PFC pool, out of
+# $swp2, and eventually to $h2.
+#
+# - if PFC works, all lossless flow packets that ingress through $swp1 should
+# also be seen ingressing $h2. If it doesn't, there will be drops due to
+# discrepancy between the speeds of $swp1 and $h2.
+#
+# - it should all play out relatively quickly, so that SLL and HLL will not
+# cause drops.
+#
+# +-----------------------+
+# | H1 |
+# | + $h1.111 |
+# | | 192.0.2.33/28 |
+# | | |
+# | + $h1 |
+# +---|-------------------+ +--------------------+
+# | | |
+# +---|----------------------|--------------------|---------------------------+
+# | + $swp1 $swp3 + + $swp4 |
+# | | iPOOL1 iPOOL0 | | iPOOL2 |
+# | | ePOOL4 ePOOL5 | | ePOOL4 |
+# | | 1Gbps | | 1Gbps |
+# | | PFC:enabled=1 | | PFC:enabled=1 |
+# | +-|----------------------|-+ +-|------------------------+ |
+# | | + $swp1.111 $swp3.111 + | | + $swp4.111 | |
+# | | | | | |
+# | | BR1 | | BR2 | |
+# | | | | | |
+# | | | | + $swp2.111 | |
+# | +--------------------------+ +---------|----------------+ |
+# | | |
+# | iPOOL0: 500KB dynamic | |
+# | iPOOL1: 10MB static | |
+# | iPOOL2: 1MB static + $swp2 |
+# | ePOOL4: 500KB dynamic | iPOOL0 |
+# | ePOOL5: 10MB static | ePOOL6 |
+# | ePOOL6: "infinite" static | 200Mbps shaper |
+# +-------------------------------------------------------|-------------------+
+# |
+# +---|-------------------+
+# | + $h2 H2 |
+# | | |
+# | + $h2.111 |
+# | 192.0.2.34/28 |
+# +-----------------------+
+#
+# iPOOL0+ePOOL4 is a helper pool for control traffic etc.
+# iPOOL1+ePOOL5 are overflow pools.
+# iPOOL2+ePOOL6 are PFC pools.
+
+ALL_TESTS="
+ ping_ipv4
+ test_qos_pfc
+"
+
+lib_dir=$(dirname $0)/../../../net/forwarding
+
+NUM_NETIFS=6
+source $lib_dir/lib.sh
+source $lib_dir/devlink_lib.sh
+source qos_lib.sh
+
+_1KB=1000
+_100KB=$((100 * _1KB))
+_500KB=$((500 * _1KB))
+_1MB=$((1000 * _1KB))
+_10MB=$((10 * _1MB))
+
+h1_create()
+{
+ simple_if_init $h1
+ mtu_set $h1 10000
+
+ vlan_create $h1 111 v$h1 192.0.2.33/28
+}
+
+h1_destroy()
+{
+ vlan_destroy $h1 111
+
+ mtu_restore $h1
+ simple_if_fini $h1
+}
+
+h2_create()
+{
+ simple_if_init $h2
+ mtu_set $h2 10000
+
+ vlan_create $h2 111 v$h2 192.0.2.34/28
+}
+
+h2_destroy()
+{
+ vlan_destroy $h2 111
+
+ mtu_restore $h2
+ simple_if_fini $h2
+}
+
+switch_create()
+{
+ # pools
+ # -----
+
+ devlink_pool_size_thtype_save 0
+ devlink_pool_size_thtype_save 4
+ devlink_pool_size_thtype_save 1
+ devlink_pool_size_thtype_save 5
+ devlink_pool_size_thtype_save 2
+ devlink_pool_size_thtype_save 6
+
+ devlink_port_pool_th_save $swp1 1
+ devlink_port_pool_th_save $swp2 6
+ devlink_port_pool_th_save $swp3 5
+ devlink_port_pool_th_save $swp4 2
+
+ devlink_tc_bind_pool_th_save $swp1 1 ingress
+ devlink_tc_bind_pool_th_save $swp2 1 egress
+ devlink_tc_bind_pool_th_save $swp3 1 egress
+ devlink_tc_bind_pool_th_save $swp4 1 ingress
+
+ # Control traffic pools. Just reduce the size. Keep them dynamic so that
+ # we don't need to change all the uninteresting quotas.
+ devlink_pool_size_thtype_set 0 dynamic $_500KB
+ devlink_pool_size_thtype_set 4 dynamic $_500KB
+
+ # Overflow pools.
+ devlink_pool_size_thtype_set 1 static $_10MB
+ devlink_pool_size_thtype_set 5 static $_10MB
+
+ # PFC pools. As per the writ, the size of egress PFC pool should be
+ # infinice, but actually it just needs to be large enough to not matter
+ # in practice, so reuse the 10MB limit.
+ devlink_pool_size_thtype_set 2 static $_1MB
+ devlink_pool_size_thtype_set 6 static $_10MB
+
+ # $swp1
+ # -----
+
+ ip link set dev $swp1 up
+ mtu_set $swp1 10000
+ vlan_create $swp1 111
+ ip link set dev $swp1.111 type vlan ingress-qos-map 0:0 1:1
+
+ devlink_port_pool_th_set $swp1 1 $_10MB
+ devlink_tc_bind_pool_th_set $swp1 1 ingress 1 $_10MB
+
+ # Configure qdisc so that we can configure PG and therefore pool
+ # assignment.
+ tc qdisc replace dev $swp1 root handle 1: \
+ ets bands 8 strict 8 priomap 7 6
+ __mlnx_qos -i $swp1 --prio2buffer=0,1,0,0,0,0,0,0 >/dev/null
+
+ # $swp2
+ # -----
+
+ ip link set dev $swp2 up
+ mtu_set $swp2 10000
+ vlan_create $swp2 111
+ ip link set dev $swp2.111 type vlan egress-qos-map 0:0 1:1
+
+ devlink_port_pool_th_set $swp2 6 $_10MB
+ devlink_tc_bind_pool_th_set $swp2 1 egress 6 $_10MB
+
+ # prio 0->TC0 (band 7), 1->TC1 (band 6). TC1 is shaped.
+ tc qdisc replace dev $swp2 root handle 1: \
+ ets bands 8 strict 8 priomap 7 6
+ tc qdisc replace dev $swp2 parent 1:7 handle 17: \
+ tbf rate 200Mbit burst 131072 limit 1M
+
+ # $swp3
+ # -----
+
+ ip link set dev $swp3 up
+ mtu_set $swp3 10000
+ vlan_create $swp3 111
+ ip link set dev $swp3.111 type vlan egress-qos-map 0:0 1:1
+
+ devlink_port_pool_th_set $swp3 5 $_10MB
+ devlink_tc_bind_pool_th_set $swp3 1 egress 5 $_10MB
+
+ # prio 0->TC0 (band 7), 1->TC1 (band 6)
+ tc qdisc replace dev $swp3 root handle 1: \
+ ets bands 8 strict 8 priomap 7 6
+
+ # Need to enable PFC so that PAUSE takes effect. Therefore need to put
+ # the lossless prio into a buffer of its own. Don't bother with buffer
+ # sizes though, there is not going to be any pressure in the "backward"
+ # direction.
+ __mlnx_qos -i $swp3 --prio2buffer=0,1,0,0,0,0,0,0 >/dev/null
+ __mlnx_qos -i $swp3 --pfc=0,1,0,0,0,0,0,0 >/dev/null
+
+ # $swp4
+ # -----
+
+ ip link set dev $swp4 up
+ mtu_set $swp4 10000
+ vlan_create $swp4 111
+ ip link set dev $swp4.111 type vlan ingress-qos-map 0:0 1:1
+
+ devlink_port_pool_th_set $swp4 2 $_1MB
+ devlink_tc_bind_pool_th_set $swp4 1 ingress 2 $_1MB
+
+ # Configure qdisc so that we can hand-tune headroom.
+ tc qdisc replace dev $swp4 root handle 1: \
+ ets bands 8 strict 8 priomap 7 6
+ __mlnx_qos -i $swp4 --prio2buffer=0,1,0,0,0,0,0,0 >/dev/null
+ __mlnx_qos -i $swp4 --pfc=0,1,0,0,0,0,0,0 >/dev/null
+ # PG0 will get autoconfigured to Xoff, give PG1 arbitrarily 100K, which
+ # is (-2*MTU) about 80K of delay provision.
+ __mlnx_qos -i $swp3 --buffer_size=0,$_100KB,0,0,0,0,0,0 >/dev/null
+
+ # bridges
+ # -------
+
+ ip link add name br1 type bridge vlan_filtering 0
+ ip link set dev $swp1.111 master br1
+ ip link set dev $swp3.111 master br1
+ ip link set dev br1 up
+
+ ip link add name br2 type bridge vlan_filtering 0
+ ip link set dev $swp2.111 master br2
+ ip link set dev $swp4.111 master br2
+ ip link set dev br2 up
+}
+
+switch_destroy()
+{
+ # Do this first so that we can reset the limits to values that are only
+ # valid for the original static / dynamic setting.
+ devlink_pool_size_thtype_restore 6
+ devlink_pool_size_thtype_restore 5
+ devlink_pool_size_thtype_restore 4
+ devlink_pool_size_thtype_restore 2
+ devlink_pool_size_thtype_restore 1
+ devlink_pool_size_thtype_restore 0
+
+ # bridges
+ # -------
+
+ ip link set dev br2 down
+ ip link set dev $swp4.111 nomaster
+ ip link set dev $swp2.111 nomaster
+ ip link del dev br2
+
+ ip link set dev br1 down
+ ip link set dev $swp3.111 nomaster
+ ip link set dev $swp1.111 nomaster
+ ip link del dev br1
+
+ # $swp4
+ # -----
+
+ __mlnx_qos -i $swp4 --buffer_size=0,0,0,0,0,0,0,0 >/dev/null
+ __mlnx_qos -i $swp4 --pfc=0,0,0,0,0,0,0,0 >/dev/null
+ __mlnx_qos -i $swp4 --prio2buffer=0,0,0,0,0,0,0,0 >/dev/null
+ tc qdisc del dev $swp4 root
+
+ devlink_tc_bind_pool_th_restore $swp4 1 ingress
+ devlink_port_pool_th_restore $swp4 2
+
+ vlan_destroy $swp4 111
+ mtu_restore $swp4
+ ip link set dev $swp4 down
+
+ # $swp3
+ # -----
+
+ __mlnx_qos -i $swp3 --pfc=0,0,0,0,0,0,0,0 >/dev/null
+ __mlnx_qos -i $swp3 --prio2buffer=0,0,0,0,0,0,0,0 >/dev/null
+ tc qdisc del dev $swp3 root
+
+ devlink_tc_bind_pool_th_restore $swp3 1 egress
+ devlink_port_pool_th_restore $swp3 5
+
+ vlan_destroy $swp3 111
+ mtu_restore $swp3
+ ip link set dev $swp3 down
+
+ # $swp2
+ # -----
+
+ tc qdisc del dev $swp2 parent 1:7
+ tc qdisc del dev $swp2 root
+
+ devlink_tc_bind_pool_th_restore $swp2 1 egress
+ devlink_port_pool_th_restore $swp2 6
+
+ vlan_destroy $swp2 111
+ mtu_restore $swp2
+ ip link set dev $swp2 down
+
+ # $swp1
+ # -----
+
+ __mlnx_qos -i $swp1 --prio2buffer=0,0,0,0,0,0,0,0 >/dev/null
+ tc qdisc del dev $swp1 root
+
+ devlink_tc_bind_pool_th_restore $swp1 1 ingress
+ devlink_port_pool_th_restore $swp1 1
+
+ vlan_destroy $swp1 111
+ mtu_restore $swp1
+ ip link set dev $swp1 down
+}
+
+setup_prepare()
+{
+ h1=${NETIFS[p1]}
+ swp1=${NETIFS[p2]}
+
+ swp2=${NETIFS[p3]}
+ h2=${NETIFS[p4]}
+
+ swp3=${NETIFS[p5]}
+ swp4=${NETIFS[p6]}
+
+ h2mac=$(mac_get $h2)
+
+ vrf_prepare
+
+ h1_create
+ h2_create
+ switch_create
+}
+
+cleanup()
+{
+ pre_cleanup
+
+ switch_destroy
+ h2_destroy
+ h1_destroy
+
+ vrf_cleanup
+}
+
+ping_ipv4()
+{
+ ping_test $h1 192.0.2.34
+}
+
+test_qos_pfc()
+{
+ RET=0
+
+ # 10M pool, each packet is 8K of payload + headers
+ local pkts=$((_10MB / 8050))
+ local size=$((pkts * 8050))
+ local in0=$(ethtool_stats_get $swp1 rx_octets_prio_1)
+ local out0=$(ethtool_stats_get $swp2 tx_octets_prio_1)
+
+ $MZ $h1 -p 8000 -Q 1:111 -A 192.0.2.33 -B 192.0.2.34 \
+ -a own -b $h2mac -c $pkts -t udp -q
+ sleep 2
+
+ local in1=$(ethtool_stats_get $swp1 rx_octets_prio_1)
+ local out1=$(ethtool_stats_get $swp2 tx_octets_prio_1)
+
+ local din=$((in1 - in0))
+ local dout=$((out1 - out0))
+
+ local pct_in=$((din * 100 / size))
+
+ ((pct_in > 95 && pct_in < 105))
+ check_err $? "Relative ingress out of expected bounds, $pct_in% should be 100%"
+
+ ((dout == din))
+ check_err $? "$((din - dout)) bytes out of $din ingressed got lost"
+
+ log_test "PFC"
+}
+
+trap cleanup EXIT
+
+bail_on_lldpad
+setup_prepare
+setup_wait
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_ets.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_ets.sh
index 94c37124a840..af64bc9ea8ab 100755
--- a/tools/testing/selftests/drivers/net/mlxsw/sch_ets.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/sch_ets.sh
@@ -27,11 +27,17 @@ switch_create()
# amount of traffic that is admitted to the shared buffers. This makes
# sure that there is always enough traffic of all types to select from
# for the DWRR process.
+ devlink_port_pool_th_save $swp1 0
devlink_port_pool_th_set $swp1 0 12
+ devlink_tc_bind_pool_th_save $swp1 0 ingress
devlink_tc_bind_pool_th_set $swp1 0 ingress 0 12
+ devlink_port_pool_th_save $swp2 4
devlink_port_pool_th_set $swp2 4 12
+ devlink_tc_bind_pool_th_save $swp2 7 egress
devlink_tc_bind_pool_th_set $swp2 7 egress 4 5
+ devlink_tc_bind_pool_th_save $swp2 6 egress
devlink_tc_bind_pool_th_set $swp2 6 egress 4 5
+ devlink_tc_bind_pool_th_save $swp2 5 egress
devlink_tc_bind_pool_th_set $swp2 5 egress 4 5
# Note: sch_ets_core.sh uses VLAN ingress-qos-map to assign packet
diff --git a/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
index 517297a14ecf..b0cb1aaffdda 100644
--- a/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/sch_red_core.sh
@@ -208,6 +208,7 @@ switch_create()
ip link set dev br2_11 up
local size=$(devlink_pool_size_thtype 0 | cut -d' ' -f 1)
+ devlink_port_pool_th_save $swp3 8
devlink_port_pool_th_set $swp3 8 $size
}
diff --git a/tools/testing/selftests/drivers/net/mlxsw/tc_police_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/tc_police_scale.sh
index 4b96561c462f..3e3e06ea5703 100644
--- a/tools/testing/selftests/drivers/net/mlxsw/tc_police_scale.sh
+++ b/tools/testing/selftests/drivers/net/mlxsw/tc_police_scale.sh
@@ -24,6 +24,13 @@ tc_police_switch_destroy()
simple_if_fini $swp1
}
+tc_police_addr()
+{
+ local num=$1; shift
+
+ printf "2001:db8:1::%x" $num
+}
+
tc_police_rules_create()
{
local count=$1; shift
@@ -34,8 +41,9 @@ tc_police_rules_create()
for ((i = 0; i < count; ++i)); do
cat >> $TC_POLICE_BATCH_FILE <<-EOF
filter add dev $swp1 ingress \
- prot ip \
- flower skip_sw \
+ prot ipv6 \
+ pref 1000 \
+ flower skip_sw dst_ip $(tc_police_addr $i) \
action police rate 10mbit burst 100k \
conform-exceed drop/ok
EOF
diff --git a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
index de4b32fc4223..40909c254365 100755
--- a/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
+++ b/tools/testing/selftests/drivers/net/netdevsim/devlink.sh
@@ -23,6 +23,27 @@ fw_flash_test()
devlink dev flash $DL_HANDLE file dummy
check_err $? "Failed to flash with status updates on"
+ devlink dev flash $DL_HANDLE file dummy component fw.mgmt
+ check_err $? "Failed to flash with component attribute"
+
+ devlink dev flash $DL_HANDLE file dummy overwrite settings
+ check_fail $? "Flash with overwrite settings should be rejected"
+
+ echo "1"> $DEBUGFS_DIR/fw_update_overwrite_mask
+ check_err $? "Failed to change allowed overwrite mask"
+
+ devlink dev flash $DL_HANDLE file dummy overwrite settings
+ check_err $? "Failed to flash with settings overwrite enabled"
+
+ devlink dev flash $DL_HANDLE file dummy overwrite identifiers
+ check_fail $? "Flash with overwrite settings should be identifiers"
+
+ echo "3"> $DEBUGFS_DIR/fw_update_overwrite_mask
+ check_err $? "Failed to change allowed overwrite mask"
+
+ devlink dev flash $DL_HANDLE file dummy overwrite identifiers overwrite settings
+ check_err $? "Failed to flash with settings and identifiers overwrite enabled"
+
echo "n"> $DEBUGFS_DIR/fw_update_status
check_err $? "Failed to disable status updates"
diff --git a/tools/testing/selftests/drivers/net/netdevsim/ethtool-pause.sh b/tools/testing/selftests/drivers/net/netdevsim/ethtool-pause.sh
new file mode 100755
index 000000000000..25c896b9e2eb
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/netdevsim/ethtool-pause.sh
@@ -0,0 +1,108 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0-only
+
+NSIM_ID=$((RANDOM % 1024))
+NSIM_DEV_SYS=/sys/bus/netdevsim/devices/netdevsim$NSIM_ID
+NSIM_DEV_DFS=/sys/kernel/debug/netdevsim/netdevsim$NSIM_ID/ports/0
+NSIM_NETDEV=
+num_passes=0
+num_errors=0
+
+function cleanup_nsim {
+ if [ -e $NSIM_DEV_SYS ]; then
+ echo $NSIM_ID > /sys/bus/netdevsim/del_device
+ fi
+}
+
+function cleanup {
+ cleanup_nsim
+}
+
+trap cleanup EXIT
+
+function get_netdev_name {
+ local -n old=$1
+
+ new=$(ls /sys/class/net)
+
+ for netdev in $new; do
+ for check in $old; do
+ [ $netdev == $check ] && break
+ done
+
+ if [ $netdev != $check ]; then
+ echo $netdev
+ break
+ fi
+ done
+}
+
+function check {
+ local code=$1
+ local str=$2
+ local exp_str=$3
+
+ if [ $code -ne 0 ]; then
+ ((num_errors++))
+ return
+ fi
+
+ if [ "$str" != "$exp_str" ]; then
+ echo -e "Expected: '$exp_str', got '$str'"
+ ((num_errors++))
+ return
+ fi
+
+ ((num_passes++))
+}
+
+# Bail if ethtool is too old
+if ! ethtool -h | grep include-stat 2>&1 >/dev/null; then
+ echo "SKIP: No --include-statistics support in ethtool"
+ exit 4
+fi
+
+# Make a netdevsim
+old_netdevs=$(ls /sys/class/net)
+
+modprobe netdevsim
+echo $NSIM_ID > /sys/bus/netdevsim/new_device
+
+NSIM_NETDEV=`get_netdev_name old_netdevs`
+
+set -o pipefail
+
+echo n > $NSIM_DEV_DFS/ethtool/pause/report_stats_tx
+echo n > $NSIM_DEV_DFS/ethtool/pause/report_stats_rx
+
+s=$(ethtool --json -a $NSIM_NETDEV | jq '.[].statistics')
+check $? "$s" "null"
+
+s=$(ethtool -I --json -a $NSIM_NETDEV | jq '.[].statistics')
+check $? "$s" "{}"
+
+echo y > $NSIM_DEV_DFS/ethtool/pause/report_stats_tx
+
+s=$(ethtool -I --json -a $NSIM_NETDEV | jq '.[].statistics | length')
+check $? "$s" "1"
+
+s=$(ethtool -I --json -a $NSIM_NETDEV | jq '.[].statistics.tx_pause_frames')
+check $? "$s" "2"
+
+echo y > $NSIM_DEV_DFS/ethtool/pause/report_stats_rx
+
+s=$(ethtool -I --json -a $NSIM_NETDEV | jq '.[].statistics | length')
+check $? "$s" "2"
+
+s=$(ethtool -I --json -a $NSIM_NETDEV | jq '.[].statistics.rx_pause_frames')
+check $? "$s" "1"
+s=$(ethtool -I --json -a $NSIM_NETDEV | jq '.[].statistics.tx_pause_frames')
+check $? "$s" "2"
+
+if [ $num_errors -eq 0 ]; then
+ echo "PASSED all $((num_passes)) checks"
+ exit 0
+else
+ echo "FAILED $num_errors/$((num_errors+num_passes)) checks"
+ exit 1
+fi
diff --git a/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh b/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
index ba1d53b9f815..1b08e042cf94 100644..100755
--- a/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
+++ b/tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
@@ -7,6 +7,7 @@ NSIM_DEV_SYS=/sys/bus/netdevsim/devices/netdevsim$NSIM_ID
NSIM_DEV_DFS=/sys/kernel/debug/netdevsim/netdevsim$NSIM_ID
NSIM_NETDEV=
HAS_ETHTOOL=
+STATIC_ENTRIES=
EXIT_STATUS=0
num_cases=0
num_errors=0
@@ -193,6 +194,21 @@ function check_tables {
sleep 0.02
((retries--))
done
+
+ if [ -n "$HAS_ETHTOOL" -a -n "${STATIC_ENTRIES[0]}" ]; then
+ fail=0
+ for i in "${!STATIC_ENTRIES[@]}"; do
+ pp_expected=`pre_ethtool ${STATIC_ENTRIES[i]}`
+ cnt=$(ethtool --show-tunnels $NSIM_NETDEV | grep -c "$pp_expected")
+ if [ $cnt -ne 1 ]; then
+ err_cnt "ethtool static entry: $pfx - $msg"
+ echo " check_table: ethtool does not contain '$pp_expected'"
+ ethtool --show-tunnels $NSIM_NETDEV
+ fail=1
+ fi
+ done
+ [ $fail == 0 ] && pass_cnt
+ fi
}
function print_table {
@@ -775,6 +791,157 @@ for port in 0 1; do
exp1=( 0 0 0 0 )
done
+cleanup_nsim
+
+# shared port tables
+pfx="table sharing"
+
+echo $NSIM_ID > /sys/bus/netdevsim/new_device
+echo 0 > $NSIM_DEV_SYS/del_port
+
+echo 0 > $NSIM_DEV_DFS/udp_ports_open_only
+echo 1 > $NSIM_DEV_DFS/udp_ports_sleep
+echo 1 > $NSIM_DEV_DFS/udp_ports_shared
+
+old_netdevs=$(ls /sys/class/net)
+echo 1 > $NSIM_DEV_SYS/new_port
+NSIM_NETDEV=`get_netdev_name old_netdevs`
+old_netdevs=$(ls /sys/class/net)
+echo 2 > $NSIM_DEV_SYS/new_port
+NSIM_NETDEV2=`get_netdev_name old_netdevs`
+
+msg="VxLAN v4 devices"
+exp0=( `mke 4789 1` 0 0 0 )
+exp1=( 0 0 0 0 )
+new_vxlan vxlan0 4789 $NSIM_NETDEV
+new_vxlan vxlan1 4789 $NSIM_NETDEV2
+
+msg="VxLAN v4 devices go down"
+exp0=( 0 0 0 0 )
+ifconfig vxlan1 down
+ifconfig vxlan0 down
+check_tables
+
+for ifc in vxlan0 vxlan1; do
+ ifconfig $ifc up
+done
+
+msg="VxLAN v6 device"
+exp0=( `mke 4789 1` `mke 4790 1` 0 0 )
+new_vxlan vxlanC 4790 $NSIM_NETDEV 6
+
+msg="Geneve device"
+exp1=( `mke 6081 2` 0 0 0 )
+new_geneve gnv0 6081
+
+msg="NIC device goes down"
+ifconfig $NSIM_NETDEV down
+check_tables
+
+msg="NIC device goes up again"
+ifconfig $NSIM_NETDEV up
+check_tables
+
+for i in `seq 2`; do
+ msg="turn feature off - 1, rep $i"
+ ethtool -K $NSIM_NETDEV rx-udp_tunnel-port-offload off
+ check_tables
+
+ msg="turn feature off - 2, rep $i"
+ exp0=( 0 0 0 0 )
+ exp1=( 0 0 0 0 )
+ ethtool -K $NSIM_NETDEV2 rx-udp_tunnel-port-offload off
+ check_tables
+
+ msg="turn feature on - 1, rep $i"
+ exp0=( `mke 4789 1` `mke 4790 1` 0 0 )
+ exp1=( `mke 6081 2` 0 0 0 )
+ ethtool -K $NSIM_NETDEV rx-udp_tunnel-port-offload on
+ check_tables
+
+ msg="turn feature on - 2, rep $i"
+ ethtool -K $NSIM_NETDEV2 rx-udp_tunnel-port-offload on
+ check_tables
+done
+
+msg="tunnels destroyed 1"
+cleanup_tuns
+exp0=( 0 0 0 0 )
+exp1=( 0 0 0 0 )
+check_tables
+
+overflow_table0 "overflow NIC table"
+
+msg="re-add a port"
+
+echo 2 > $NSIM_DEV_SYS/del_port
+echo 2 > $NSIM_DEV_SYS/new_port
+check_tables
+
+msg="replace VxLAN in overflow table"
+exp0=( `mke 10000 1` `mke 10004 1` `mke 10002 1` `mke 10003 1` )
+del_dev vxlan1
+
+msg="vacate VxLAN in overflow table"
+exp0=( `mke 10000 1` `mke 10004 1` 0 `mke 10003 1` )
+del_dev vxlan2
+
+echo 1 > $NSIM_DEV_DFS/ports/$port/udp_ports_reset
+check_tables
+
+msg="tunnels destroyed 2"
+cleanup_tuns
+exp0=( 0 0 0 0 )
+exp1=( 0 0 0 0 )
+check_tables
+
+echo 1 > $NSIM_DEV_SYS/del_port
+echo 2 > $NSIM_DEV_SYS/del_port
+
+cleanup_nsim
+
+# Static IANA port
+pfx="static IANA vxlan"
+
+echo $NSIM_ID > /sys/bus/netdevsim/new_device
+echo 0 > $NSIM_DEV_SYS/del_port
+
+echo 1 > $NSIM_DEV_DFS/udp_ports_static_iana_vxlan
+STATIC_ENTRIES=( `mke 4789 1` )
+
+port=1
+old_netdevs=$(ls /sys/class/net)
+echo $port > $NSIM_DEV_SYS/new_port
+NSIM_NETDEV=`get_netdev_name old_netdevs`
+
+msg="check empty"
+exp0=( 0 0 0 0 )
+exp1=( 0 0 0 0 )
+check_tables
+
+msg="add on static port"
+new_vxlan vxlan0 4789 $NSIM_NETDEV
+new_vxlan vxlan1 4789 $NSIM_NETDEV
+
+msg="add on different port"
+exp0=( `mke 4790 1` 0 0 0 )
+new_vxlan vxlan2 4790 $NSIM_NETDEV
+
+cleanup_tuns
+
+msg="tunnels destroyed"
+exp0=( 0 0 0 0 )
+exp1=( 0 0 0 0 )
+check_tables
+
+msg="different type"
+new_geneve gnv0 4789
+
+cleanup_tuns
+cleanup_nsim
+
+# END
+
modprobe -r netdevsim
if [ $num_errors -eq 0 ]; then
diff --git a/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh b/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh
new file mode 100755
index 000000000000..beee0d5646a6
--- /dev/null
+++ b/tools/testing/selftests/drivers/net/ocelot/tc_flower_chains.sh
@@ -0,0 +1,316 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+# Copyright 2020 NXP Semiconductors
+
+WAIT_TIME=1
+NUM_NETIFS=4
+lib_dir=$(dirname $0)/../../../net/forwarding
+source $lib_dir/tc_common.sh
+source $lib_dir/lib.sh
+
+require_command tcpdump
+
+#
+# +---------------------------------------------+
+# | DUT ports Generator ports |
+# | +--------+ +--------+ +--------+ +--------+ |
+# | | | | | | | | | |
+# | | eth0 | | eth1 | | eth2 | | eth3 | |
+# | | | | | | | | | |
+# +-+--------+-+--------+-+--------+-+--------+-+
+# | | | |
+# | | | |
+# | +-----------+ |
+# | |
+# +--------------------------------+
+
+eth0=${NETIFS[p1]}
+eth1=${NETIFS[p2]}
+eth2=${NETIFS[p3]}
+eth3=${NETIFS[p4]}
+
+eth0_mac="de:ad:be:ef:00:00"
+eth1_mac="de:ad:be:ef:00:01"
+eth2_mac="de:ad:be:ef:00:02"
+eth3_mac="de:ad:be:ef:00:03"
+
+# Helpers to map a VCAP IS1 and VCAP IS2 lookup and policy to a chain number
+# used by the kernel driver. The numbers are:
+# VCAP IS1 lookup 0: 10000
+# VCAP IS1 lookup 1: 11000
+# VCAP IS1 lookup 2: 12000
+# VCAP IS2 lookup 0 policy 0: 20000
+# VCAP IS2 lookup 0 policy 1: 20001
+# VCAP IS2 lookup 0 policy 255: 20255
+# VCAP IS2 lookup 1 policy 0: 21000
+# VCAP IS2 lookup 1 policy 1: 21001
+# VCAP IS2 lookup 1 policy 255: 21255
+IS1()
+{
+ local lookup=$1
+
+ echo $((10000 + 1000 * lookup))
+}
+
+IS2()
+{
+ local lookup=$1
+ local pag=$2
+
+ echo $((20000 + 1000 * lookup + pag))
+}
+
+ES0()
+{
+ echo 0
+}
+
+# The Ocelot switches have a fixed ingress pipeline composed of:
+#
+# +----------------------------------------------+ +-----------------------------------------+
+# | VCAP IS1 | | VCAP IS2 |
+# | | | |
+# | +----------+ +----------+ +----------+ | | +----------+ +----------+ |
+# | | Lookup 0 | | Lookup 1 | | Lookup 2 | | --+------> PAG 0: | Lookup 0 | -> | Lookup 1 | |
+# | +----------+ -> +----------+ -> +----------+ | | | +----------+ +----------+ |
+# | |key&action| |key&action| |key&action| | | | |key&action| |key&action| |
+# | |key&action| |key&action| |key&action| | | | | .. | | .. | |
+# | | .. | | .. | | .. | | | | +----------+ +----------+ |
+# | +----------+ +----------+ +----------+ | | | |
+# | selects PAG | | | +----------+ +----------+ |
+# +----------------------------------------------+ +------> PAG 1: | Lookup 0 | -> | Lookup 1 | |
+# | | +----------+ +----------+ |
+# | | |key&action| |key&action| |
+# | | | .. | | .. | |
+# | | +----------+ +----------+ |
+# | | ... |
+# | | |
+# | | +----------+ +----------+ |
+# +----> PAG 254: | Lookup 0 | -> | Lookup 1 | |
+# | | +----------+ +----------+ |
+# | | |key&action| |key&action| |
+# | | | .. | | .. | |
+# | | +----------+ +----------+ |
+# | | |
+# | | +----------+ +----------+ |
+# +----> PAG 255: | Lookup 0 | -> | Lookup 1 | |
+# | +----------+ +----------+ |
+# | |key&action| |key&action| |
+# | | .. | | .. | |
+# | +----------+ +----------+ |
+# +-----------------------------------------+
+#
+# Both the VCAP IS1 (Ingress Stage 1) and IS2 (Ingress Stage 2) are indexed
+# (looked up) multiple times: IS1 3 times, and IS2 2 times. Each filter
+# (key and action pair) can be configured to only match during the first, or
+# second, etc, lookup.
+#
+# During one TCAM lookup, the filter processing stops at the first entry that
+# matches, then the pipeline jumps to the next lookup.
+# The driver maps each individual lookup of each individual ingress TCAM to a
+# separate chain number. For correct rule offloading, it is mandatory that each
+# filter installed in one TCAM is terminated by a non-optional GOTO action to
+# the next lookup from the fixed pipeline.
+#
+# A chain can only be used if there is a GOTO action correctly set up from the
+# prior lookup in the processing pipeline. Setting up all chains is not
+# mandatory.
+
+# NOTE: VCAP IS1 currently uses only S1_NORMAL half keys and VCAP IS2
+# dynamically chooses between MAC_ETYPE, ARP, IP4_TCP_UDP, IP4_OTHER, which are
+# all half keys as well.
+
+create_tcam_skeleton()
+{
+ local eth=$1
+
+ tc qdisc add dev $eth clsact
+
+ # VCAP IS1 is the Ingress Classification TCAM and can offload the
+ # following actions:
+ # - skbedit priority
+ # - vlan pop
+ # - vlan modify
+ # - goto (only in lookup 2, the last IS1 lookup)
+ tc filter add dev $eth ingress chain 0 pref 49152 flower \
+ skip_sw action goto chain $(IS1 0)
+ tc filter add dev $eth ingress chain $(IS1 0) pref 49152 \
+ flower skip_sw action goto chain $(IS1 1)
+ tc filter add dev $eth ingress chain $(IS1 1) pref 49152 \
+ flower skip_sw action goto chain $(IS1 2)
+ tc filter add dev $eth ingress chain $(IS1 2) pref 49152 \
+ flower skip_sw action goto chain $(IS2 0 0)
+
+ # VCAP IS2 is the Security Enforcement ingress TCAM and can offload the
+ # following actions:
+ # - trap
+ # - drop
+ # - police
+ # The two VCAP IS2 lookups can be segmented into up to 256 groups of
+ # rules, called Policies. A Policy is selected through the Policy
+ # Association Group (PAG) action of VCAP IS1 (which is the
+ # GOTO offload).
+ tc filter add dev $eth ingress chain $(IS2 0 0) pref 49152 \
+ flower skip_sw action goto chain $(IS2 1 0)
+}
+
+setup_prepare()
+{
+ create_tcam_skeleton $eth0
+
+ ip link add br0 type bridge
+ ip link set $eth0 master br0
+ ip link set $eth1 master br0
+ ip link set br0 up
+
+ ip link add link $eth3 name $eth3.100 type vlan id 100
+ ip link set $eth3.100 up
+
+ ip link add link $eth3 name $eth3.200 type vlan id 200
+ ip link set $eth3.200 up
+
+ tc filter add dev $eth0 ingress chain $(IS1 1) pref 1 \
+ protocol 802.1Q flower skip_sw vlan_id 100 \
+ action vlan pop \
+ action goto chain $(IS1 2)
+
+ tc filter add dev $eth0 egress chain $(ES0) pref 1 \
+ flower skip_sw indev $eth1 \
+ action vlan push protocol 802.1Q id 100
+
+ tc filter add dev $eth0 ingress chain $(IS1 0) pref 2 \
+ protocol ipv4 flower skip_sw src_ip 10.1.1.2 \
+ action skbedit priority 7 \
+ action goto chain $(IS1 1)
+
+ tc filter add dev $eth0 ingress chain $(IS2 0 0) pref 1 \
+ protocol ipv4 flower skip_sw ip_proto udp dst_port 5201 \
+ action police rate 50mbit burst 64k \
+ action goto chain $(IS2 1 0)
+}
+
+cleanup()
+{
+ ip link del $eth3.200
+ ip link del $eth3.100
+ tc qdisc del dev $eth0 clsact
+ ip link del br0
+}
+
+test_vlan_pop()
+{
+ printf "Testing VLAN pop.. "
+
+ tcpdump_start $eth2
+
+ # Work around Mausezahn VLAN builder bug
+ # (https://github.com/netsniff-ng/netsniff-ng/issues/225) by using
+ # an 8021q upper
+ $MZ $eth3.100 -q -c 1 -p 64 -a $eth3_mac -b $eth2_mac -t ip
+
+ sleep 1
+
+ tcpdump_stop
+
+ if tcpdump_show | grep -q "$eth3_mac > $eth2_mac, ethertype IPv4"; then
+ echo "OK"
+ else
+ echo "FAIL"
+ fi
+
+ tcpdump_cleanup
+}
+
+test_vlan_push()
+{
+ printf "Testing VLAN push.. "
+
+ tcpdump_start $eth3.100
+
+ $MZ $eth2 -q -c 1 -p 64 -a $eth2_mac -b $eth3_mac -t ip
+
+ sleep 1
+
+ tcpdump_stop
+
+ if tcpdump_show | grep -q "$eth2_mac > $eth3_mac"; then
+ echo "OK"
+ else
+ echo "FAIL"
+ fi
+
+ tcpdump_cleanup
+}
+
+test_vlan_modify()
+{
+ printf "Testing VLAN modification.. "
+
+ ip link set br0 type bridge vlan_filtering 1
+ bridge vlan add dev $eth0 vid 200
+ bridge vlan add dev $eth0 vid 300
+ bridge vlan add dev $eth1 vid 300
+
+ tc filter add dev $eth0 ingress chain $(IS1 2) pref 3 \
+ protocol 802.1Q flower skip_sw vlan_id 200 \
+ action vlan modify id 300 \
+ action goto chain $(IS2 0 0)
+
+ tcpdump_start $eth2
+
+ $MZ $eth3.200 -q -c 1 -p 64 -a $eth3_mac -b $eth2_mac -t ip
+
+ sleep 1
+
+ tcpdump_stop
+
+ if tcpdump_show | grep -q "$eth3_mac > $eth2_mac, .* vlan 300"; then
+ echo "OK"
+ else
+ echo "FAIL"
+ fi
+
+ tcpdump_cleanup
+
+ tc filter del dev $eth0 ingress chain $(IS1 2) pref 3
+
+ bridge vlan del dev $eth0 vid 200
+ bridge vlan del dev $eth0 vid 300
+ bridge vlan del dev $eth1 vid 300
+ ip link set br0 type bridge vlan_filtering 0
+}
+
+test_skbedit_priority()
+{
+ local num_pkts=100
+
+ printf "Testing frame prioritization.. "
+
+ before=$(ethtool_stats_get $eth0 'rx_green_prio_7')
+
+ $MZ $eth3 -q -c $num_pkts -p 64 -a $eth3_mac -b $eth2_mac -t ip -A 10.1.1.2
+
+ after=$(ethtool_stats_get $eth0 'rx_green_prio_7')
+
+ if [ $((after - before)) = $num_pkts ]; then
+ echo "OK"
+ else
+ echo "FAIL"
+ fi
+}
+
+trap cleanup EXIT
+
+ALL_TESTS="
+ test_vlan_pop
+ test_vlan_push
+ test_vlan_modify
+ test_skbedit_priority
+"
+
+setup_prepare
+setup_wait
+
+tests_run
+
+exit $EXIT_STATUS
diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore
index 742c499328b2..61ae899cfc17 100644
--- a/tools/testing/selftests/net/.gitignore
+++ b/tools/testing/selftests/net/.gitignore
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
+ipsec
msg_zerocopy
socket
psock_fanout
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 9491bbaa0831..ef352477cac6 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -19,6 +19,8 @@ TEST_PROGS += txtimestamp.sh
TEST_PROGS += vrf-xfrm-tests.sh
TEST_PROGS += rxtimestamp.sh
TEST_PROGS += devlink_port_split.py
+TEST_PROGS += drop_monitor_tests.sh
+TEST_PROGS += vrf_route_leaking.sh
TEST_PROGS_EXTENDED := in_netns.sh
TEST_GEN_FILES = socket nettest
TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any
@@ -29,6 +31,7 @@ TEST_GEN_FILES += tcp_fastopen_backup_key
TEST_GEN_FILES += fin_ack_lat
TEST_GEN_FILES += reuseaddr_ports_exhausted
TEST_GEN_FILES += hwtstamp_config rxtimestamp timestamping txtimestamp
+TEST_GEN_FILES += ipsec
TEST_GEN_PROGS = reuseport_bpf reuseport_bpf_cpu reuseport_bpf_numa
TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls
diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
index 3b42c06b5985..43649242adc0 100644
--- a/tools/testing/selftests/net/config
+++ b/tools/testing/selftests/net/config
@@ -24,10 +24,12 @@ CONFIG_IP_NF_NAT=m
CONFIG_NF_TABLES=m
CONFIG_NF_TABLES_IPV6=y
CONFIG_NF_TABLES_IPV4=y
-CONFIG_NFT_CHAIN_NAT_IPV6=m
-CONFIG_NFT_CHAIN_NAT_IPV4=m
+CONFIG_NFT_NAT=m
CONFIG_NET_SCH_FQ=m
CONFIG_NET_SCH_ETF=m
CONFIG_NET_SCH_NETEM=y
CONFIG_TEST_BLACKHOLE_DEV=m
CONFIG_KALLSYMS=y
+CONFIG_TRACEPOINTS=y
+CONFIG_NET_DROP_MONITOR=m
+CONFIG_NETDEVSIM=m
diff --git a/tools/testing/selftests/net/drop_monitor_tests.sh b/tools/testing/selftests/net/drop_monitor_tests.sh
new file mode 100755
index 000000000000..b7650e30d18b
--- /dev/null
+++ b/tools/testing/selftests/net/drop_monitor_tests.sh
@@ -0,0 +1,215 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+# This test is for checking drop monitor functionality.
+
+ret=0
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+# all tests in this script. Can be overridden with -t option
+TESTS="
+ sw_drops
+ hw_drops
+"
+
+IP="ip -netns ns1"
+TC="tc -netns ns1"
+DEVLINK="devlink -N ns1"
+NS_EXEC="ip netns exec ns1"
+NETDEVSIM_PATH=/sys/bus/netdevsim/
+DEV_ADDR=1337
+DEV=netdevsim${DEV_ADDR}
+DEVLINK_DEV=netdevsim/${DEV}
+
+log_test()
+{
+ local rc=$1
+ local expected=$2
+ local msg="$3"
+
+ if [ ${rc} -eq ${expected} ]; then
+ printf " TEST: %-60s [ OK ]\n" "${msg}"
+ nsuccess=$((nsuccess+1))
+ else
+ ret=1
+ nfail=$((nfail+1))
+ printf " TEST: %-60s [FAIL]\n" "${msg}"
+ fi
+}
+
+setup()
+{
+ modprobe netdevsim &> /dev/null
+
+ set -e
+ ip netns add ns1
+ $IP link add dummy10 up type dummy
+
+ $NS_EXEC echo "$DEV_ADDR 1" > ${NETDEVSIM_PATH}/new_device
+ udevadm settle
+ local netdev=$($NS_EXEC ls ${NETDEVSIM_PATH}/devices/${DEV}/net/)
+ $IP link set dev $netdev up
+
+ set +e
+}
+
+cleanup()
+{
+ $NS_EXEC echo "$DEV_ADDR" > ${NETDEVSIM_PATH}/del_device
+ ip netns del ns1
+}
+
+sw_drops_test()
+{
+ echo
+ echo "Software drops test"
+
+ setup
+
+ local dir=$(mktemp -d)
+
+ $TC qdisc add dev dummy10 clsact
+ $TC filter add dev dummy10 egress pref 1 handle 101 proto ip \
+ flower dst_ip 192.0.2.10 action drop
+
+ $NS_EXEC mausezahn dummy10 -a 00:11:22:33:44:55 -b 00:aa:bb:cc:dd:ee \
+ -A 192.0.2.1 -B 192.0.2.10 -t udp sp=12345,dp=54321 -c 0 -q \
+ -d 100msec &
+ timeout 5 dwdump -o sw -w ${dir}/packets.pcap
+ (( $(tshark -r ${dir}/packets.pcap \
+ -Y 'ip.dst == 192.0.2.10' 2> /dev/null | wc -l) != 0))
+ log_test $? 0 "Capturing active software drops"
+
+ rm ${dir}/packets.pcap
+
+ { kill %% && wait %%; } 2>/dev/null
+ timeout 5 dwdump -o sw -w ${dir}/packets.pcap
+ (( $(tshark -r ${dir}/packets.pcap \
+ -Y 'ip.dst == 192.0.2.10' 2> /dev/null | wc -l) == 0))
+ log_test $? 0 "Capturing inactive software drops"
+
+ rm -r $dir
+
+ cleanup
+}
+
+hw_drops_test()
+{
+ echo
+ echo "Hardware drops test"
+
+ setup
+
+ local dir=$(mktemp -d)
+
+ $DEVLINK trap set $DEVLINK_DEV trap blackhole_route action trap
+ timeout 5 dwdump -o hw -w ${dir}/packets.pcap
+ (( $(tshark -r ${dir}/packets.pcap \
+ -Y 'net_dm.hw_trap_name== blackhole_route' 2> /dev/null \
+ | wc -l) != 0))
+ log_test $? 0 "Capturing active hardware drops"
+
+ rm ${dir}/packets.pcap
+
+ $DEVLINK trap set $DEVLINK_DEV trap blackhole_route action drop
+ timeout 5 dwdump -o hw -w ${dir}/packets.pcap
+ (( $(tshark -r ${dir}/packets.pcap \
+ -Y 'net_dm.hw_trap_name== blackhole_route' 2> /dev/null \
+ | wc -l) == 0))
+ log_test $? 0 "Capturing inactive hardware drops"
+
+ rm -r $dir
+
+ cleanup
+}
+
+################################################################################
+# usage
+
+usage()
+{
+ cat <<EOF
+usage: ${0##*/} OPTS
+
+ -t <test> Test(s) to run (default: all)
+ (options: $TESTS)
+EOF
+}
+
+################################################################################
+# main
+
+while getopts ":t:h" opt; do
+ case $opt in
+ t) TESTS=$OPTARG;;
+ h) usage; exit 0;;
+ *) usage; exit 1;;
+ esac
+done
+
+if [ "$(id -u)" -ne 0 ];then
+ echo "SKIP: Need root privileges"
+ exit $ksft_skip;
+fi
+
+if [ ! -x "$(command -v ip)" ]; then
+ echo "SKIP: Could not run test without ip tool"
+ exit $ksft_skip
+fi
+
+if [ ! -x "$(command -v devlink)" ]; then
+ echo "SKIP: Could not run test without devlink tool"
+ exit $ksft_skip
+fi
+
+if [ ! -x "$(command -v tshark)" ]; then
+ echo "SKIP: Could not run test without tshark tool"
+ exit $ksft_skip
+fi
+
+if [ ! -x "$(command -v dwdump)" ]; then
+ echo "SKIP: Could not run test without dwdump tool"
+ exit $ksft_skip
+fi
+
+if [ ! -x "$(command -v udevadm)" ]; then
+ echo "SKIP: Could not run test without udevadm tool"
+ exit $ksft_skip
+fi
+
+if [ ! -x "$(command -v timeout)" ]; then
+ echo "SKIP: Could not run test without timeout tool"
+ exit $ksft_skip
+fi
+
+if [ ! -x "$(command -v mausezahn)" ]; then
+ echo "SKIP: Could not run test without mausezahn tool"
+ exit $ksft_skip
+fi
+
+tshark -G fields 2> /dev/null | grep -q net_dm
+if [ $? -ne 0 ]; then
+ echo "SKIP: tshark too old, missing net_dm dissector"
+ exit $ksft_skip
+fi
+
+# start clean
+cleanup &> /dev/null
+
+for t in $TESTS
+do
+ case $t in
+ sw_drops|sw) sw_drops_test;;
+ hw_drops|hw) hw_drops_test;;
+
+ help) echo "Test names: $TESTS"; exit 0;;
+ esac
+done
+
+if [ "$TESTS" != "none" ]; then
+ printf "\nTests passed: %3d\n" ${nsuccess}
+ printf "Tests failed: %3d\n" ${nfail}
+fi
+
+exit $ret
diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh
index 22dc2f3d428b..eb693a3b7b4a 100755
--- a/tools/testing/selftests/net/fib_nexthops.sh
+++ b/tools/testing/selftests/net/fib_nexthops.sh
@@ -411,9 +411,16 @@ ipv6_fdb_grp_fcnal()
run_cmd "$IP -6 ro add 2001:db8:101::1/128 nhid 103"
log_test $? 2 "Route add with fdb nexthop group"
+ run_cmd "$IP nexthop del id 61"
+ run_cmd "$BRIDGE fdb get to 02:02:00:00:00:13 dev vx10 self"
+ log_test $? 0 "Fdb entry after deleting a single nexthop"
+
run_cmd "$IP nexthop del id 102"
log_test $? 0 "Fdb nexthop delete"
+ run_cmd "$BRIDGE fdb get to 02:02:00:00:00:13 dev vx10 self"
+ log_test $? 254 "Fdb entry after deleting a nexthop group"
+
$IP link del dev vx10
}
@@ -484,9 +491,16 @@ ipv4_fdb_grp_fcnal()
run_cmd "$IP ro add 172.16.0.0/22 nhid 103"
log_test $? 2 "Route add with fdb nexthop group"
+ run_cmd "$IP nexthop del id 12"
+ run_cmd "$BRIDGE fdb get to 02:02:00:00:00:13 dev vx10 self"
+ log_test $? 0 "Fdb entry after deleting a single nexthop"
+
run_cmd "$IP nexthop del id 102"
log_test $? 0 "Fdb nexthop delete"
+ run_cmd "$BRIDGE fdb get to 02:02:00:00:00:13 dev vx10 self"
+ log_test $? 254 "Fdb entry after deleting a nexthop group"
+
$IP link del dev vx10
}
@@ -739,6 +753,36 @@ ipv6_fcnal_runtime()
run_cmd "$IP nexthop replace id 81 via 172.16.1.1 dev veth1"
log_test $? 2 "Nexthop replace of group entry - v6 route, v4 nexthop"
+ run_cmd "$IP nexthop add id 86 via 2001:db8:92::2 dev veth3"
+ run_cmd "$IP nexthop add id 87 via 172.16.1.1 dev veth1"
+ run_cmd "$IP nexthop add id 88 via 172.16.1.1 dev veth1"
+ run_cmd "$IP nexthop add id 124 group 86/87/88"
+ run_cmd "$IP ro replace 2001:db8:101::1/128 nhid 124"
+ log_test $? 2 "IPv6 route can not have a group with v4 and v6 gateways"
+
+ run_cmd "$IP nexthop del id 88"
+ run_cmd "$IP ro replace 2001:db8:101::1/128 nhid 124"
+ log_test $? 2 "IPv6 route can not have a group with v4 and v6 gateways"
+
+ run_cmd "$IP nexthop del id 87"
+ run_cmd "$IP ro replace 2001:db8:101::1/128 nhid 124"
+ log_test $? 0 "IPv6 route using a group after removing v4 gateways"
+
+ run_cmd "$IP ro delete 2001:db8:101::1/128"
+ run_cmd "$IP nexthop add id 87 via 172.16.1.1 dev veth1"
+ run_cmd "$IP nexthop add id 88 via 172.16.1.1 dev veth1"
+ run_cmd "$IP nexthop replace id 124 group 86/87/88"
+ run_cmd "$IP ro replace 2001:db8:101::1/128 nhid 124"
+ log_test $? 2 "IPv6 route can not have a group with v4 and v6 gateways"
+
+ run_cmd "$IP nexthop replace id 88 via 2001:db8:92::2 dev veth3"
+ run_cmd "$IP ro replace 2001:db8:101::1/128 nhid 124"
+ log_test $? 2 "IPv6 route can not have a group with v4 and v6 gateways"
+
+ run_cmd "$IP nexthop replace id 87 via 2001:db8:92::2 dev veth3"
+ run_cmd "$IP ro replace 2001:db8:101::1/128 nhid 124"
+ log_test $? 0 "IPv6 route using a group after replacing v4 gateways"
+
$IP nexthop flush >/dev/null 2>&1
#
diff --git a/tools/testing/selftests/net/forwarding/devlink_lib.sh b/tools/testing/selftests/net/forwarding/devlink_lib.sh
index 75fe24bcb9cd..9c12c4fd3afc 100644
--- a/tools/testing/selftests/net/forwarding/devlink_lib.sh
+++ b/tools/testing/selftests/net/forwarding/devlink_lib.sh
@@ -5,7 +5,7 @@
# Defines
if [[ ! -v DEVLINK_DEV ]]; then
- DEVLINK_DEV=$(devlink port show "${NETIFS[p1]}" -j \
+ DEVLINK_DEV=$(devlink port show "${NETIFS[p1]:-$NETIF_NO_CABLE}" -j \
| jq -r '.port | keys[]' | cut -d/ -f-2)
if [ -z "$DEVLINK_DEV" ]; then
echo "SKIP: ${NETIFS[p1]} has no devlink device registered for it"
@@ -117,6 +117,12 @@ devlink_reload()
declare -A DEVLINK_ORIG
+# Changing pool type from static to dynamic causes reinterpretation of threshold
+# values. They therefore need to be saved before pool type is changed, then the
+# pool type can be changed, and then the new values need to be set up. Therefore
+# instead of saving the current state implicitly in the _set call, provide
+# functions for all three primitives: save, set, and restore.
+
devlink_port_pool_threshold()
{
local port=$1; shift
@@ -126,14 +132,21 @@ devlink_port_pool_threshold()
| jq '.port_pool."'"$port"'"[].threshold'
}
-devlink_port_pool_th_set()
+devlink_port_pool_th_save()
{
local port=$1; shift
local pool=$1; shift
- local th=$1; shift
local key="port_pool($port,$pool).threshold"
DEVLINK_ORIG[$key]=$(devlink_port_pool_threshold $port $pool)
+}
+
+devlink_port_pool_th_set()
+{
+ local port=$1; shift
+ local pool=$1; shift
+ local th=$1; shift
+
devlink sb port pool set $port pool $pool th $th
}
@@ -142,8 +155,13 @@ devlink_port_pool_th_restore()
local port=$1; shift
local pool=$1; shift
local key="port_pool($port,$pool).threshold"
+ local -a orig=(${DEVLINK_ORIG[$key]})
- devlink sb port pool set $port pool $pool th ${DEVLINK_ORIG[$key]}
+ if [[ -z $orig ]]; then
+ echo "WARNING: Mismatched devlink_port_pool_th_restore"
+ else
+ devlink sb port pool set $port pool $pool th $orig
+ fi
}
devlink_pool_size_thtype()
@@ -154,14 +172,20 @@ devlink_pool_size_thtype()
| jq -r '.pool[][] | (.size, .thtype)'
}
+devlink_pool_size_thtype_save()
+{
+ local pool=$1; shift
+ local key="pool($pool).size_thtype"
+
+ DEVLINK_ORIG[$key]=$(devlink_pool_size_thtype $pool)
+}
+
devlink_pool_size_thtype_set()
{
local pool=$1; shift
local thtype=$1; shift
local size=$1; shift
- local key="pool($pool).size_thtype"
- DEVLINK_ORIG[$key]=$(devlink_pool_size_thtype $pool)
devlink sb pool set "$DEVLINK_DEV" pool $pool size $size thtype $thtype
}
@@ -171,8 +195,12 @@ devlink_pool_size_thtype_restore()
local key="pool($pool).size_thtype"
local -a orig=(${DEVLINK_ORIG[$key]})
- devlink sb pool set "$DEVLINK_DEV" pool $pool \
- size ${orig[0]} thtype ${orig[1]}
+ if [[ -z ${orig[0]} ]]; then
+ echo "WARNING: Mismatched devlink_pool_size_thtype_restore"
+ else
+ devlink sb pool set "$DEVLINK_DEV" pool $pool \
+ size ${orig[0]} thtype ${orig[1]}
+ fi
}
devlink_tc_bind_pool_th()
@@ -185,6 +213,16 @@ devlink_tc_bind_pool_th()
| jq -r '.tc_bind[][] | (.pool, .threshold)'
}
+devlink_tc_bind_pool_th_save()
+{
+ local port=$1; shift
+ local tc=$1; shift
+ local dir=$1; shift
+ local key="tc_bind($port,$dir,$tc).pool_th"
+
+ DEVLINK_ORIG[$key]=$(devlink_tc_bind_pool_th $port $tc $dir)
+}
+
devlink_tc_bind_pool_th_set()
{
local port=$1; shift
@@ -192,9 +230,7 @@ devlink_tc_bind_pool_th_set()
local dir=$1; shift
local pool=$1; shift
local th=$1; shift
- local key="tc_bind($port,$dir,$tc).pool_th"
- DEVLINK_ORIG[$key]=$(devlink_tc_bind_pool_th $port $tc $dir)
devlink sb tc bind set $port tc $tc type $dir pool $pool th $th
}
@@ -206,8 +242,12 @@ devlink_tc_bind_pool_th_restore()
local key="tc_bind($port,$dir,$tc).pool_th"
local -a orig=(${DEVLINK_ORIG[$key]})
- devlink sb tc bind set $port tc $tc type $dir \
- pool ${orig[0]} th ${orig[1]}
+ if [[ -z ${orig[0]} ]]; then
+ echo "WARNING: Mismatched devlink_tc_bind_pool_th_restore"
+ else
+ devlink sb tc bind set $port tc $tc type $dir \
+ pool ${orig[0]} th ${orig[1]}
+ fi
}
devlink_traps_num_get()
@@ -509,3 +549,9 @@ devlink_cpu_port_get()
echo "$DEVLINK_DEV/$cpu_dl_port_num"
}
+
+devlink_cell_size_get()
+{
+ devlink sb pool show "$DEVLINK_DEV" pool 0 -j \
+ | jq '.pool[][].cell_size'
+}
diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh
index 977fc2b326a2..927f9ba49e08 100644
--- a/tools/testing/selftests/net/forwarding/lib.sh
+++ b/tools/testing/selftests/net/forwarding/lib.sh
@@ -1227,3 +1227,46 @@ stop_traffic()
# Suppress noise from killing mausezahn.
{ kill %% && wait %%; } 2>/dev/null
}
+
+tcpdump_start()
+{
+ local if_name=$1; shift
+ local ns=$1; shift
+
+ capfile=$(mktemp)
+ capout=$(mktemp)
+
+ if [ -z $ns ]; then
+ ns_cmd=""
+ else
+ ns_cmd="ip netns exec ${ns}"
+ fi
+
+ if [ -z $SUDO_USER ] ; then
+ capuser=""
+ else
+ capuser="-Z $SUDO_USER"
+ fi
+
+ $ns_cmd tcpdump -e -n -Q in -i $if_name \
+ -s 65535 -B 32768 $capuser -w $capfile > "$capout" 2>&1 &
+ cappid=$!
+
+ sleep 1
+}
+
+tcpdump_stop()
+{
+ $ns_cmd kill $cappid
+ sleep 1
+}
+
+tcpdump_cleanup()
+{
+ rm $capfile $capout
+}
+
+tcpdump_show()
+{
+ tcpdump -e -n -r $capfile 2>&1
+}
diff --git a/tools/testing/selftests/net/forwarding/mirror_lib.sh b/tools/testing/selftests/net/forwarding/mirror_lib.sh
index c33bfd7ba214..13db1cb50e57 100644
--- a/tools/testing/selftests/net/forwarding/mirror_lib.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_lib.sh
@@ -31,7 +31,7 @@ mirror_test()
local t0=$(tc_rule_stats_get $dev $pref)
$MZ $vrf_name ${sip:+-A $sip} -B $dip -a own -b bc -q \
- -c 10 -d 100ms -t icmp type=8
+ -c 10 -d 100msec -t icmp type=8
sleep 0.5
local t1=$(tc_rule_stats_get $dev $pref)
local delta=$((t1 - t0))
diff --git a/tools/testing/selftests/net/ipsec.c b/tools/testing/selftests/net/ipsec.c
new file mode 100644
index 000000000000..17ced7d6ce25
--- /dev/null
+++ b/tools/testing/selftests/net/ipsec.c
@@ -0,0 +1,2195 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ipsec.c - Check xfrm on veth inside a net-ns.
+ * Copyright (c) 2018 Dmitry Safonov
+ */
+
+#define _GNU_SOURCE
+
+#include <arpa/inet.h>
+#include <asm/types.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <linux/limits.h>
+#include <linux/netlink.h>
+#include <linux/random.h>
+#include <linux/rtnetlink.h>
+#include <linux/veth.h>
+#include <linux/xfrm.h>
+#include <netinet/in.h>
+#include <net/if.h>
+#include <sched.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <time.h>
+#include <unistd.h>
+
+#include "../kselftest.h"
+
+#define printk(fmt, ...) \
+ ksft_print_msg("%d[%u] " fmt "\n", getpid(), __LINE__, ##__VA_ARGS__)
+
+#define pr_err(fmt, ...) printk(fmt ": %m", ##__VA_ARGS__)
+
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
+
+#define IPV4_STR_SZ 16 /* xxx.xxx.xxx.xxx is longest + \0 */
+#define MAX_PAYLOAD 2048
+#define XFRM_ALGO_KEY_BUF_SIZE 512
+#define MAX_PROCESSES (1 << 14) /* /16 mask divided by /30 subnets */
+#define INADDR_A ((in_addr_t) 0x0a000000) /* 10.0.0.0 */
+#define INADDR_B ((in_addr_t) 0xc0a80000) /* 192.168.0.0 */
+
+/* /30 mask for one veth connection */
+#define PREFIX_LEN 30
+#define child_ip(nr) (4*nr + 1)
+#define grchild_ip(nr) (4*nr + 2)
+
+#define VETH_FMT "ktst-%d"
+#define VETH_LEN 12
+
+static int nsfd_parent = -1;
+static int nsfd_childa = -1;
+static int nsfd_childb = -1;
+static long page_size;
+
+/*
+ * ksft_cnt is static in kselftest, so isn't shared with children.
+ * We have to send a test result back to parent and count there.
+ * results_fd is a pipe with test feedback from children.
+ */
+static int results_fd[2];
+
+const unsigned int ping_delay_nsec = 50 * 1000 * 1000;
+const unsigned int ping_timeout = 300;
+const unsigned int ping_count = 100;
+const unsigned int ping_success = 80;
+
+static void randomize_buffer(void *buf, size_t buflen)
+{
+ int *p = (int *)buf;
+ size_t words = buflen / sizeof(int);
+ size_t leftover = buflen % sizeof(int);
+
+ if (!buflen)
+ return;
+
+ while (words--)
+ *p++ = rand();
+
+ if (leftover) {
+ int tmp = rand();
+
+ memcpy(buf + buflen - leftover, &tmp, leftover);
+ }
+
+ return;
+}
+
+static int unshare_open(void)
+{
+ const char *netns_path = "/proc/self/ns/net";
+ int fd;
+
+ if (unshare(CLONE_NEWNET) != 0) {
+ pr_err("unshare()");
+ return -1;
+ }
+
+ fd = open(netns_path, O_RDONLY);
+ if (fd <= 0) {
+ pr_err("open(%s)", netns_path);
+ return -1;
+ }
+
+ return fd;
+}
+
+static int switch_ns(int fd)
+{
+ if (setns(fd, CLONE_NEWNET)) {
+ pr_err("setns()");
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * Running the test inside a new parent net namespace to bother less
+ * about cleanup on error-path.
+ */
+static int init_namespaces(void)
+{
+ nsfd_parent = unshare_open();
+ if (nsfd_parent <= 0)
+ return -1;
+
+ nsfd_childa = unshare_open();
+ if (nsfd_childa <= 0)
+ return -1;
+
+ if (switch_ns(nsfd_parent))
+ return -1;
+
+ nsfd_childb = unshare_open();
+ if (nsfd_childb <= 0)
+ return -1;
+
+ if (switch_ns(nsfd_parent))
+ return -1;
+ return 0;
+}
+
+static int netlink_sock(int *sock, uint32_t *seq_nr, int proto)
+{
+ if (*sock > 0) {
+ seq_nr++;
+ return 0;
+ }
+
+ *sock = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, proto);
+ if (*sock <= 0) {
+ pr_err("socket(AF_NETLINK)");
+ return -1;
+ }
+
+ randomize_buffer(seq_nr, sizeof(*seq_nr));
+
+ return 0;
+}
+
+static inline struct rtattr *rtattr_hdr(struct nlmsghdr *nh)
+{
+ return (struct rtattr *)((char *)(nh) + RTA_ALIGN((nh)->nlmsg_len));
+}
+
+static int rtattr_pack(struct nlmsghdr *nh, size_t req_sz,
+ unsigned short rta_type, const void *payload, size_t size)
+{
+ /* NLMSG_ALIGNTO == RTA_ALIGNTO, nlmsg_len already aligned */
+ struct rtattr *attr = rtattr_hdr(nh);
+ size_t nl_size = RTA_ALIGN(nh->nlmsg_len) + RTA_LENGTH(size);
+
+ if (req_sz < nl_size) {
+ printk("req buf is too small: %zu < %zu", req_sz, nl_size);
+ return -1;
+ }
+ nh->nlmsg_len = nl_size;
+
+ attr->rta_len = RTA_LENGTH(size);
+ attr->rta_type = rta_type;
+ memcpy(RTA_DATA(attr), payload, size);
+
+ return 0;
+}
+
+static struct rtattr *_rtattr_begin(struct nlmsghdr *nh, size_t req_sz,
+ unsigned short rta_type, const void *payload, size_t size)
+{
+ struct rtattr *ret = rtattr_hdr(nh);
+
+ if (rtattr_pack(nh, req_sz, rta_type, payload, size))
+ return 0;
+
+ return ret;
+}
+
+static inline struct rtattr *rtattr_begin(struct nlmsghdr *nh, size_t req_sz,
+ unsigned short rta_type)
+{
+ return _rtattr_begin(nh, req_sz, rta_type, 0, 0);
+}
+
+static inline void rtattr_end(struct nlmsghdr *nh, struct rtattr *attr)
+{
+ char *nlmsg_end = (char *)nh + nh->nlmsg_len;
+
+ attr->rta_len = nlmsg_end - (char *)attr;
+}
+
+static int veth_pack_peerb(struct nlmsghdr *nh, size_t req_sz,
+ const char *peer, int ns)
+{
+ struct ifinfomsg pi;
+ struct rtattr *peer_attr;
+
+ memset(&pi, 0, sizeof(pi));
+ pi.ifi_family = AF_UNSPEC;
+ pi.ifi_change = 0xFFFFFFFF;
+
+ peer_attr = _rtattr_begin(nh, req_sz, VETH_INFO_PEER, &pi, sizeof(pi));
+ if (!peer_attr)
+ return -1;
+
+ if (rtattr_pack(nh, req_sz, IFLA_IFNAME, peer, strlen(peer)))
+ return -1;
+
+ if (rtattr_pack(nh, req_sz, IFLA_NET_NS_FD, &ns, sizeof(ns)))
+ return -1;
+
+ rtattr_end(nh, peer_attr);
+
+ return 0;
+}
+
+static int netlink_check_answer(int sock)
+{
+ struct nlmsgerror {
+ struct nlmsghdr hdr;
+ int error;
+ struct nlmsghdr orig_msg;
+ } answer;
+
+ if (recv(sock, &answer, sizeof(answer), 0) < 0) {
+ pr_err("recv()");
+ return -1;
+ } else if (answer.hdr.nlmsg_type != NLMSG_ERROR) {
+ printk("expected NLMSG_ERROR, got %d", (int)answer.hdr.nlmsg_type);
+ return -1;
+ } else if (answer.error) {
+ printk("NLMSG_ERROR: %d: %s",
+ answer.error, strerror(-answer.error));
+ return answer.error;
+ }
+
+ return 0;
+}
+
+static int veth_add(int sock, uint32_t seq, const char *peera, int ns_a,
+ const char *peerb, int ns_b)
+{
+ uint16_t flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE;
+ struct {
+ struct nlmsghdr nh;
+ struct ifinfomsg info;
+ char attrbuf[MAX_PAYLOAD];
+ } req;
+ const char veth_type[] = "veth";
+ struct rtattr *link_info, *info_data;
+
+ memset(&req, 0, sizeof(req));
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(req.info));
+ req.nh.nlmsg_type = RTM_NEWLINK;
+ req.nh.nlmsg_flags = flags;
+ req.nh.nlmsg_seq = seq;
+ req.info.ifi_family = AF_UNSPEC;
+ req.info.ifi_change = 0xFFFFFFFF;
+
+ if (rtattr_pack(&req.nh, sizeof(req), IFLA_IFNAME, peera, strlen(peera)))
+ return -1;
+
+ if (rtattr_pack(&req.nh, sizeof(req), IFLA_NET_NS_FD, &ns_a, sizeof(ns_a)))
+ return -1;
+
+ link_info = rtattr_begin(&req.nh, sizeof(req), IFLA_LINKINFO);
+ if (!link_info)
+ return -1;
+
+ if (rtattr_pack(&req.nh, sizeof(req), IFLA_INFO_KIND, veth_type, sizeof(veth_type)))
+ return -1;
+
+ info_data = rtattr_begin(&req.nh, sizeof(req), IFLA_INFO_DATA);
+ if (!info_data)
+ return -1;
+
+ if (veth_pack_peerb(&req.nh, sizeof(req), peerb, ns_b))
+ return -1;
+
+ rtattr_end(&req.nh, info_data);
+ rtattr_end(&req.nh, link_info);
+
+ if (send(sock, &req, req.nh.nlmsg_len, 0) < 0) {
+ pr_err("send()");
+ return -1;
+ }
+ return netlink_check_answer(sock);
+}
+
+static int ip4_addr_set(int sock, uint32_t seq, const char *intf,
+ struct in_addr addr, uint8_t prefix)
+{
+ uint16_t flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE;
+ struct {
+ struct nlmsghdr nh;
+ struct ifaddrmsg info;
+ char attrbuf[MAX_PAYLOAD];
+ } req;
+
+ memset(&req, 0, sizeof(req));
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(req.info));
+ req.nh.nlmsg_type = RTM_NEWADDR;
+ req.nh.nlmsg_flags = flags;
+ req.nh.nlmsg_seq = seq;
+ req.info.ifa_family = AF_INET;
+ req.info.ifa_prefixlen = prefix;
+ req.info.ifa_index = if_nametoindex(intf);
+
+#ifdef DEBUG
+ {
+ char addr_str[IPV4_STR_SZ] = {};
+
+ strncpy(addr_str, inet_ntoa(addr), IPV4_STR_SZ - 1);
+
+ printk("ip addr set %s", addr_str);
+ }
+#endif
+
+ if (rtattr_pack(&req.nh, sizeof(req), IFA_LOCAL, &addr, sizeof(addr)))
+ return -1;
+
+ if (rtattr_pack(&req.nh, sizeof(req), IFA_ADDRESS, &addr, sizeof(addr)))
+ return -1;
+
+ if (send(sock, &req, req.nh.nlmsg_len, 0) < 0) {
+ pr_err("send()");
+ return -1;
+ }
+ return netlink_check_answer(sock);
+}
+
+static int link_set_up(int sock, uint32_t seq, const char *intf)
+{
+ struct {
+ struct nlmsghdr nh;
+ struct ifinfomsg info;
+ char attrbuf[MAX_PAYLOAD];
+ } req;
+
+ memset(&req, 0, sizeof(req));
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(req.info));
+ req.nh.nlmsg_type = RTM_NEWLINK;
+ req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ req.nh.nlmsg_seq = seq;
+ req.info.ifi_family = AF_UNSPEC;
+ req.info.ifi_change = 0xFFFFFFFF;
+ req.info.ifi_index = if_nametoindex(intf);
+ req.info.ifi_flags = IFF_UP;
+ req.info.ifi_change = IFF_UP;
+
+ if (send(sock, &req, req.nh.nlmsg_len, 0) < 0) {
+ pr_err("send()");
+ return -1;
+ }
+ return netlink_check_answer(sock);
+}
+
+static int ip4_route_set(int sock, uint32_t seq, const char *intf,
+ struct in_addr src, struct in_addr dst)
+{
+ struct {
+ struct nlmsghdr nh;
+ struct rtmsg rt;
+ char attrbuf[MAX_PAYLOAD];
+ } req;
+ unsigned int index = if_nametoindex(intf);
+
+ memset(&req, 0, sizeof(req));
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(req.rt));
+ req.nh.nlmsg_type = RTM_NEWROUTE;
+ req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_CREATE;
+ req.nh.nlmsg_seq = seq;
+ req.rt.rtm_family = AF_INET;
+ req.rt.rtm_dst_len = 32;
+ req.rt.rtm_table = RT_TABLE_MAIN;
+ req.rt.rtm_protocol = RTPROT_BOOT;
+ req.rt.rtm_scope = RT_SCOPE_LINK;
+ req.rt.rtm_type = RTN_UNICAST;
+
+ if (rtattr_pack(&req.nh, sizeof(req), RTA_DST, &dst, sizeof(dst)))
+ return -1;
+
+ if (rtattr_pack(&req.nh, sizeof(req), RTA_PREFSRC, &src, sizeof(src)))
+ return -1;
+
+ if (rtattr_pack(&req.nh, sizeof(req), RTA_OIF, &index, sizeof(index)))
+ return -1;
+
+ if (send(sock, &req, req.nh.nlmsg_len, 0) < 0) {
+ pr_err("send()");
+ return -1;
+ }
+
+ return netlink_check_answer(sock);
+}
+
+static int tunnel_set_route(int route_sock, uint32_t *route_seq, char *veth,
+ struct in_addr tunsrc, struct in_addr tundst)
+{
+ if (ip4_addr_set(route_sock, (*route_seq)++, "lo",
+ tunsrc, PREFIX_LEN)) {
+ printk("Failed to set ipv4 addr");
+ return -1;
+ }
+
+ if (ip4_route_set(route_sock, (*route_seq)++, veth, tunsrc, tundst)) {
+ printk("Failed to set ipv4 route");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int init_child(int nsfd, char *veth, unsigned int src, unsigned int dst)
+{
+ struct in_addr intsrc = inet_makeaddr(INADDR_B, src);
+ struct in_addr tunsrc = inet_makeaddr(INADDR_A, src);
+ struct in_addr tundst = inet_makeaddr(INADDR_A, dst);
+ int route_sock = -1, ret = -1;
+ uint32_t route_seq;
+
+ if (switch_ns(nsfd))
+ return -1;
+
+ if (netlink_sock(&route_sock, &route_seq, NETLINK_ROUTE)) {
+ printk("Failed to open netlink route socket in child");
+ return -1;
+ }
+
+ if (ip4_addr_set(route_sock, route_seq++, veth, intsrc, PREFIX_LEN)) {
+ printk("Failed to set ipv4 addr");
+ goto err;
+ }
+
+ if (link_set_up(route_sock, route_seq++, veth)) {
+ printk("Failed to bring up %s", veth);
+ goto err;
+ }
+
+ if (tunnel_set_route(route_sock, &route_seq, veth, tunsrc, tundst)) {
+ printk("Failed to add tunnel route on %s", veth);
+ goto err;
+ }
+ ret = 0;
+
+err:
+ close(route_sock);
+ return ret;
+}
+
+#define ALGO_LEN 64
+enum desc_type {
+ CREATE_TUNNEL = 0,
+ ALLOCATE_SPI,
+ MONITOR_ACQUIRE,
+ EXPIRE_STATE,
+ EXPIRE_POLICY,
+};
+const char *desc_name[] = {
+ "create tunnel",
+ "alloc spi",
+ "monitor acquire",
+ "expire state",
+ "expire policy"
+};
+struct xfrm_desc {
+ enum desc_type type;
+ uint8_t proto;
+ char a_algo[ALGO_LEN];
+ char e_algo[ALGO_LEN];
+ char c_algo[ALGO_LEN];
+ char ae_algo[ALGO_LEN];
+ unsigned int icv_len;
+ /* unsigned key_len; */
+};
+
+enum msg_type {
+ MSG_ACK = 0,
+ MSG_EXIT,
+ MSG_PING,
+ MSG_XFRM_PREPARE,
+ MSG_XFRM_ADD,
+ MSG_XFRM_DEL,
+ MSG_XFRM_CLEANUP,
+};
+
+struct test_desc {
+ enum msg_type type;
+ union {
+ struct {
+ in_addr_t reply_ip;
+ unsigned int port;
+ } ping;
+ struct xfrm_desc xfrm_desc;
+ } body;
+};
+
+struct test_result {
+ struct xfrm_desc desc;
+ unsigned int res;
+};
+
+static void write_test_result(unsigned int res, struct xfrm_desc *d)
+{
+ struct test_result tr = {};
+ ssize_t ret;
+
+ tr.desc = *d;
+ tr.res = res;
+
+ ret = write(results_fd[1], &tr, sizeof(tr));
+ if (ret != sizeof(tr))
+ pr_err("Failed to write the result in pipe %zd", ret);
+}
+
+static void write_msg(int fd, struct test_desc *msg, bool exit_of_fail)
+{
+ ssize_t bytes = write(fd, msg, sizeof(*msg));
+
+ /* Make sure that write/read is atomic to a pipe */
+ BUILD_BUG_ON(sizeof(struct test_desc) > PIPE_BUF);
+
+ if (bytes < 0) {
+ pr_err("write()");
+ if (exit_of_fail)
+ exit(KSFT_FAIL);
+ }
+ if (bytes != sizeof(*msg)) {
+ pr_err("sent part of the message %zd/%zu", bytes, sizeof(*msg));
+ if (exit_of_fail)
+ exit(KSFT_FAIL);
+ }
+}
+
+static void read_msg(int fd, struct test_desc *msg, bool exit_of_fail)
+{
+ ssize_t bytes = read(fd, msg, sizeof(*msg));
+
+ if (bytes < 0) {
+ pr_err("read()");
+ if (exit_of_fail)
+ exit(KSFT_FAIL);
+ }
+ if (bytes != sizeof(*msg)) {
+ pr_err("got incomplete message %zd/%zu", bytes, sizeof(*msg));
+ if (exit_of_fail)
+ exit(KSFT_FAIL);
+ }
+}
+
+static int udp_ping_init(struct in_addr listen_ip, unsigned int u_timeout,
+ unsigned int *server_port, int sock[2])
+{
+ struct sockaddr_in server;
+ struct timeval t = { .tv_sec = 0, .tv_usec = u_timeout };
+ socklen_t s_len = sizeof(server);
+
+ sock[0] = socket(AF_INET, SOCK_DGRAM, 0);
+ if (sock[0] < 0) {
+ pr_err("socket()");
+ return -1;
+ }
+
+ server.sin_family = AF_INET;
+ server.sin_port = 0;
+ memcpy(&server.sin_addr.s_addr, &listen_ip, sizeof(struct in_addr));
+
+ if (bind(sock[0], (struct sockaddr *)&server, s_len)) {
+ pr_err("bind()");
+ goto err_close_server;
+ }
+
+ if (getsockname(sock[0], (struct sockaddr *)&server, &s_len)) {
+ pr_err("getsockname()");
+ goto err_close_server;
+ }
+
+ *server_port = ntohs(server.sin_port);
+
+ if (setsockopt(sock[0], SOL_SOCKET, SO_RCVTIMEO, (const char *)&t, sizeof t)) {
+ pr_err("setsockopt()");
+ goto err_close_server;
+ }
+
+ sock[1] = socket(AF_INET, SOCK_DGRAM, 0);
+ if (sock[1] < 0) {
+ pr_err("socket()");
+ goto err_close_server;
+ }
+
+ return 0;
+
+err_close_server:
+ close(sock[0]);
+ return -1;
+}
+
+static int udp_ping_send(int sock[2], in_addr_t dest_ip, unsigned int port,
+ char *buf, size_t buf_len)
+{
+ struct sockaddr_in server;
+ const struct sockaddr *dest_addr = (struct sockaddr *)&server;
+ char *sock_buf[buf_len];
+ ssize_t r_bytes, s_bytes;
+
+ server.sin_family = AF_INET;
+ server.sin_port = htons(port);
+ server.sin_addr.s_addr = dest_ip;
+
+ s_bytes = sendto(sock[1], buf, buf_len, 0, dest_addr, sizeof(server));
+ if (s_bytes < 0) {
+ pr_err("sendto()");
+ return -1;
+ } else if (s_bytes != buf_len) {
+ printk("send part of the message: %zd/%zu", s_bytes, sizeof(server));
+ return -1;
+ }
+
+ r_bytes = recv(sock[0], sock_buf, buf_len, 0);
+ if (r_bytes < 0) {
+ if (errno != EAGAIN)
+ pr_err("recv()");
+ return -1;
+ } else if (r_bytes == 0) { /* EOF */
+ printk("EOF on reply to ping");
+ return -1;
+ } else if (r_bytes != buf_len || memcmp(buf, sock_buf, buf_len)) {
+ printk("ping reply packet is corrupted %zd/%zu", r_bytes, buf_len);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int udp_ping_reply(int sock[2], in_addr_t dest_ip, unsigned int port,
+ char *buf, size_t buf_len)
+{
+ struct sockaddr_in server;
+ const struct sockaddr *dest_addr = (struct sockaddr *)&server;
+ char *sock_buf[buf_len];
+ ssize_t r_bytes, s_bytes;
+
+ server.sin_family = AF_INET;
+ server.sin_port = htons(port);
+ server.sin_addr.s_addr = dest_ip;
+
+ r_bytes = recv(sock[0], sock_buf, buf_len, 0);
+ if (r_bytes < 0) {
+ if (errno != EAGAIN)
+ pr_err("recv()");
+ return -1;
+ }
+ if (r_bytes == 0) { /* EOF */
+ printk("EOF on reply to ping");
+ return -1;
+ }
+ if (r_bytes != buf_len || memcmp(buf, sock_buf, buf_len)) {
+ printk("ping reply packet is corrupted %zd/%zu", r_bytes, buf_len);
+ return -1;
+ }
+
+ s_bytes = sendto(sock[1], buf, buf_len, 0, dest_addr, sizeof(server));
+ if (s_bytes < 0) {
+ pr_err("sendto()");
+ return -1;
+ } else if (s_bytes != buf_len) {
+ printk("send part of the message: %zd/%zu", s_bytes, sizeof(server));
+ return -1;
+ }
+
+ return 0;
+}
+
+typedef int (*ping_f)(int sock[2], in_addr_t dest_ip, unsigned int port,
+ char *buf, size_t buf_len);
+static int do_ping(int cmd_fd, char *buf, size_t buf_len, struct in_addr from,
+ bool init_side, int d_port, in_addr_t to, ping_f func)
+{
+ struct test_desc msg;
+ unsigned int s_port, i, ping_succeeded = 0;
+ int ping_sock[2];
+ char to_str[IPV4_STR_SZ] = {}, from_str[IPV4_STR_SZ] = {};
+
+ if (udp_ping_init(from, ping_timeout, &s_port, ping_sock)) {
+ printk("Failed to init ping");
+ return -1;
+ }
+
+ memset(&msg, 0, sizeof(msg));
+ msg.type = MSG_PING;
+ msg.body.ping.port = s_port;
+ memcpy(&msg.body.ping.reply_ip, &from, sizeof(from));
+
+ write_msg(cmd_fd, &msg, 0);
+ if (init_side) {
+ /* The other end sends ip to ping */
+ read_msg(cmd_fd, &msg, 0);
+ if (msg.type != MSG_PING)
+ return -1;
+ to = msg.body.ping.reply_ip;
+ d_port = msg.body.ping.port;
+ }
+
+ for (i = 0; i < ping_count ; i++) {
+ struct timespec sleep_time = {
+ .tv_sec = 0,
+ .tv_nsec = ping_delay_nsec,
+ };
+
+ ping_succeeded += !func(ping_sock, to, d_port, buf, page_size);
+ nanosleep(&sleep_time, 0);
+ }
+
+ close(ping_sock[0]);
+ close(ping_sock[1]);
+
+ strncpy(to_str, inet_ntoa(*(struct in_addr *)&to), IPV4_STR_SZ - 1);
+ strncpy(from_str, inet_ntoa(from), IPV4_STR_SZ - 1);
+
+ if (ping_succeeded < ping_success) {
+ printk("ping (%s) %s->%s failed %u/%u times",
+ init_side ? "send" : "reply", from_str, to_str,
+ ping_count - ping_succeeded, ping_count);
+ return -1;
+ }
+
+#ifdef DEBUG
+ printk("ping (%s) %s->%s succeeded %u/%u times",
+ init_side ? "send" : "reply", from_str, to_str,
+ ping_succeeded, ping_count);
+#endif
+
+ return 0;
+}
+
+static int xfrm_fill_key(char *name, char *buf,
+ size_t buf_len, unsigned int *key_len)
+{
+ /* TODO: use set/map instead */
+ if (strncmp(name, "digest_null", ALGO_LEN) == 0)
+ *key_len = 0;
+ else if (strncmp(name, "ecb(cipher_null)", ALGO_LEN) == 0)
+ *key_len = 0;
+ else if (strncmp(name, "cbc(des)", ALGO_LEN) == 0)
+ *key_len = 64;
+ else if (strncmp(name, "hmac(md5)", ALGO_LEN) == 0)
+ *key_len = 128;
+ else if (strncmp(name, "cmac(aes)", ALGO_LEN) == 0)
+ *key_len = 128;
+ else if (strncmp(name, "xcbc(aes)", ALGO_LEN) == 0)
+ *key_len = 128;
+ else if (strncmp(name, "cbc(cast5)", ALGO_LEN) == 0)
+ *key_len = 128;
+ else if (strncmp(name, "cbc(serpent)", ALGO_LEN) == 0)
+ *key_len = 128;
+ else if (strncmp(name, "hmac(sha1)", ALGO_LEN) == 0)
+ *key_len = 160;
+ else if (strncmp(name, "hmac(rmd160)", ALGO_LEN) == 0)
+ *key_len = 160;
+ else if (strncmp(name, "cbc(des3_ede)", ALGO_LEN) == 0)
+ *key_len = 192;
+ else if (strncmp(name, "hmac(sha256)", ALGO_LEN) == 0)
+ *key_len = 256;
+ else if (strncmp(name, "cbc(aes)", ALGO_LEN) == 0)
+ *key_len = 256;
+ else if (strncmp(name, "cbc(camellia)", ALGO_LEN) == 0)
+ *key_len = 256;
+ else if (strncmp(name, "cbc(twofish)", ALGO_LEN) == 0)
+ *key_len = 256;
+ else if (strncmp(name, "rfc3686(ctr(aes))", ALGO_LEN) == 0)
+ *key_len = 288;
+ else if (strncmp(name, "hmac(sha384)", ALGO_LEN) == 0)
+ *key_len = 384;
+ else if (strncmp(name, "cbc(blowfish)", ALGO_LEN) == 0)
+ *key_len = 448;
+ else if (strncmp(name, "hmac(sha512)", ALGO_LEN) == 0)
+ *key_len = 512;
+ else if (strncmp(name, "rfc4106(gcm(aes))-128", ALGO_LEN) == 0)
+ *key_len = 160;
+ else if (strncmp(name, "rfc4543(gcm(aes))-128", ALGO_LEN) == 0)
+ *key_len = 160;
+ else if (strncmp(name, "rfc4309(ccm(aes))-128", ALGO_LEN) == 0)
+ *key_len = 152;
+ else if (strncmp(name, "rfc4106(gcm(aes))-192", ALGO_LEN) == 0)
+ *key_len = 224;
+ else if (strncmp(name, "rfc4543(gcm(aes))-192", ALGO_LEN) == 0)
+ *key_len = 224;
+ else if (strncmp(name, "rfc4309(ccm(aes))-192", ALGO_LEN) == 0)
+ *key_len = 216;
+ else if (strncmp(name, "rfc4106(gcm(aes))-256", ALGO_LEN) == 0)
+ *key_len = 288;
+ else if (strncmp(name, "rfc4543(gcm(aes))-256", ALGO_LEN) == 0)
+ *key_len = 288;
+ else if (strncmp(name, "rfc4309(ccm(aes))-256", ALGO_LEN) == 0)
+ *key_len = 280;
+ else if (strncmp(name, "rfc7539(chacha20,poly1305)-128", ALGO_LEN) == 0)
+ *key_len = 0;
+
+ if (*key_len > buf_len) {
+ printk("Can't pack a key - too big for buffer");
+ return -1;
+ }
+
+ randomize_buffer(buf, *key_len);
+
+ return 0;
+}
+
+static int xfrm_state_pack_algo(struct nlmsghdr *nh, size_t req_sz,
+ struct xfrm_desc *desc)
+{
+ struct {
+ union {
+ struct xfrm_algo alg;
+ struct xfrm_algo_aead aead;
+ struct xfrm_algo_auth auth;
+ } u;
+ char buf[XFRM_ALGO_KEY_BUF_SIZE];
+ } alg = {};
+ size_t alen, elen, clen, aelen;
+ unsigned short type;
+
+ alen = strlen(desc->a_algo);
+ elen = strlen(desc->e_algo);
+ clen = strlen(desc->c_algo);
+ aelen = strlen(desc->ae_algo);
+
+ /* Verify desc */
+ switch (desc->proto) {
+ case IPPROTO_AH:
+ if (!alen || elen || clen || aelen) {
+ printk("BUG: buggy ah desc");
+ return -1;
+ }
+ strncpy(alg.u.alg.alg_name, desc->a_algo, ALGO_LEN - 1);
+ if (xfrm_fill_key(desc->a_algo, alg.u.alg.alg_key,
+ sizeof(alg.buf), &alg.u.alg.alg_key_len))
+ return -1;
+ type = XFRMA_ALG_AUTH;
+ break;
+ case IPPROTO_COMP:
+ if (!clen || elen || alen || aelen) {
+ printk("BUG: buggy comp desc");
+ return -1;
+ }
+ strncpy(alg.u.alg.alg_name, desc->c_algo, ALGO_LEN - 1);
+ if (xfrm_fill_key(desc->c_algo, alg.u.alg.alg_key,
+ sizeof(alg.buf), &alg.u.alg.alg_key_len))
+ return -1;
+ type = XFRMA_ALG_COMP;
+ break;
+ case IPPROTO_ESP:
+ if (!((alen && elen) ^ aelen) || clen) {
+ printk("BUG: buggy esp desc");
+ return -1;
+ }
+ if (aelen) {
+ alg.u.aead.alg_icv_len = desc->icv_len;
+ strncpy(alg.u.aead.alg_name, desc->ae_algo, ALGO_LEN - 1);
+ if (xfrm_fill_key(desc->ae_algo, alg.u.aead.alg_key,
+ sizeof(alg.buf), &alg.u.aead.alg_key_len))
+ return -1;
+ type = XFRMA_ALG_AEAD;
+ } else {
+
+ strncpy(alg.u.alg.alg_name, desc->e_algo, ALGO_LEN - 1);
+ type = XFRMA_ALG_CRYPT;
+ if (xfrm_fill_key(desc->e_algo, alg.u.alg.alg_key,
+ sizeof(alg.buf), &alg.u.alg.alg_key_len))
+ return -1;
+ if (rtattr_pack(nh, req_sz, type, &alg, sizeof(alg)))
+ return -1;
+
+ strncpy(alg.u.alg.alg_name, desc->a_algo, ALGO_LEN);
+ type = XFRMA_ALG_AUTH;
+ if (xfrm_fill_key(desc->a_algo, alg.u.alg.alg_key,
+ sizeof(alg.buf), &alg.u.alg.alg_key_len))
+ return -1;
+ }
+ break;
+ default:
+ printk("BUG: unknown proto in desc");
+ return -1;
+ }
+
+ if (rtattr_pack(nh, req_sz, type, &alg, sizeof(alg)))
+ return -1;
+
+ return 0;
+}
+
+static inline uint32_t gen_spi(struct in_addr src)
+{
+ return htonl(inet_lnaof(src));
+}
+
+static int xfrm_state_add(int xfrm_sock, uint32_t seq, uint32_t spi,
+ struct in_addr src, struct in_addr dst,
+ struct xfrm_desc *desc)
+{
+ struct {
+ struct nlmsghdr nh;
+ struct xfrm_usersa_info info;
+ char attrbuf[MAX_PAYLOAD];
+ } req;
+
+ memset(&req, 0, sizeof(req));
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(req.info));
+ req.nh.nlmsg_type = XFRM_MSG_NEWSA;
+ req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ req.nh.nlmsg_seq = seq;
+
+ /* Fill selector. */
+ memcpy(&req.info.sel.daddr, &dst, sizeof(dst));
+ memcpy(&req.info.sel.saddr, &src, sizeof(src));
+ req.info.sel.family = AF_INET;
+ req.info.sel.prefixlen_d = PREFIX_LEN;
+ req.info.sel.prefixlen_s = PREFIX_LEN;
+
+ /* Fill id */
+ memcpy(&req.info.id.daddr, &dst, sizeof(dst));
+ /* Note: zero-spi cannot be deleted */
+ req.info.id.spi = spi;
+ req.info.id.proto = desc->proto;
+
+ memcpy(&req.info.saddr, &src, sizeof(src));
+
+ /* Fill lifteme_cfg */
+ req.info.lft.soft_byte_limit = XFRM_INF;
+ req.info.lft.hard_byte_limit = XFRM_INF;
+ req.info.lft.soft_packet_limit = XFRM_INF;
+ req.info.lft.hard_packet_limit = XFRM_INF;
+
+ req.info.family = AF_INET;
+ req.info.mode = XFRM_MODE_TUNNEL;
+
+ if (xfrm_state_pack_algo(&req.nh, sizeof(req), desc))
+ return -1;
+
+ if (send(xfrm_sock, &req, req.nh.nlmsg_len, 0) < 0) {
+ pr_err("send()");
+ return -1;
+ }
+
+ return netlink_check_answer(xfrm_sock);
+}
+
+static bool xfrm_usersa_found(struct xfrm_usersa_info *info, uint32_t spi,
+ struct in_addr src, struct in_addr dst,
+ struct xfrm_desc *desc)
+{
+ if (memcmp(&info->sel.daddr, &dst, sizeof(dst)))
+ return false;
+
+ if (memcmp(&info->sel.saddr, &src, sizeof(src)))
+ return false;
+
+ if (info->sel.family != AF_INET ||
+ info->sel.prefixlen_d != PREFIX_LEN ||
+ info->sel.prefixlen_s != PREFIX_LEN)
+ return false;
+
+ if (info->id.spi != spi || info->id.proto != desc->proto)
+ return false;
+
+ if (memcmp(&info->id.daddr, &dst, sizeof(dst)))
+ return false;
+
+ if (memcmp(&info->saddr, &src, sizeof(src)))
+ return false;
+
+ if (info->lft.soft_byte_limit != XFRM_INF ||
+ info->lft.hard_byte_limit != XFRM_INF ||
+ info->lft.soft_packet_limit != XFRM_INF ||
+ info->lft.hard_packet_limit != XFRM_INF)
+ return false;
+
+ if (info->family != AF_INET || info->mode != XFRM_MODE_TUNNEL)
+ return false;
+
+ /* XXX: check xfrm algo, see xfrm_state_pack_algo(). */
+
+ return true;
+}
+
+static int xfrm_state_check(int xfrm_sock, uint32_t seq, uint32_t spi,
+ struct in_addr src, struct in_addr dst,
+ struct xfrm_desc *desc)
+{
+ struct {
+ struct nlmsghdr nh;
+ char attrbuf[MAX_PAYLOAD];
+ } req;
+ struct {
+ struct nlmsghdr nh;
+ union {
+ struct xfrm_usersa_info info;
+ int error;
+ };
+ char attrbuf[MAX_PAYLOAD];
+ } answer;
+ struct xfrm_address_filter filter = {};
+ bool found = false;
+
+
+ memset(&req, 0, sizeof(req));
+ req.nh.nlmsg_len = NLMSG_LENGTH(0);
+ req.nh.nlmsg_type = XFRM_MSG_GETSA;
+ req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_DUMP;
+ req.nh.nlmsg_seq = seq;
+
+ /*
+ * Add dump filter by source address as there may be other tunnels
+ * in this netns (if tests run in parallel).
+ */
+ filter.family = AF_INET;
+ filter.splen = 0x1f; /* 0xffffffff mask see addr_match() */
+ memcpy(&filter.saddr, &src, sizeof(src));
+ if (rtattr_pack(&req.nh, sizeof(req), XFRMA_ADDRESS_FILTER,
+ &filter, sizeof(filter)))
+ return -1;
+
+ if (send(xfrm_sock, &req, req.nh.nlmsg_len, 0) < 0) {
+ pr_err("send()");
+ return -1;
+ }
+
+ while (1) {
+ if (recv(xfrm_sock, &answer, sizeof(answer), 0) < 0) {
+ pr_err("recv()");
+ return -1;
+ }
+ if (answer.nh.nlmsg_type == NLMSG_ERROR) {
+ printk("NLMSG_ERROR: %d: %s",
+ answer.error, strerror(-answer.error));
+ return -1;
+ } else if (answer.nh.nlmsg_type == NLMSG_DONE) {
+ if (found)
+ return 0;
+ printk("didn't find allocated xfrm state in dump");
+ return -1;
+ } else if (answer.nh.nlmsg_type == XFRM_MSG_NEWSA) {
+ if (xfrm_usersa_found(&answer.info, spi, src, dst, desc))
+ found = true;
+ }
+ }
+}
+
+static int xfrm_set(int xfrm_sock, uint32_t *seq,
+ struct in_addr src, struct in_addr dst,
+ struct in_addr tunsrc, struct in_addr tundst,
+ struct xfrm_desc *desc)
+{
+ int err;
+
+ err = xfrm_state_add(xfrm_sock, (*seq)++, gen_spi(src), src, dst, desc);
+ if (err) {
+ printk("Failed to add xfrm state");
+ return -1;
+ }
+
+ err = xfrm_state_add(xfrm_sock, (*seq)++, gen_spi(src), dst, src, desc);
+ if (err) {
+ printk("Failed to add xfrm state");
+ return -1;
+ }
+
+ /* Check dumps for XFRM_MSG_GETSA */
+ err = xfrm_state_check(xfrm_sock, (*seq)++, gen_spi(src), src, dst, desc);
+ err |= xfrm_state_check(xfrm_sock, (*seq)++, gen_spi(src), dst, src, desc);
+ if (err) {
+ printk("Failed to check xfrm state");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int xfrm_policy_add(int xfrm_sock, uint32_t seq, uint32_t spi,
+ struct in_addr src, struct in_addr dst, uint8_t dir,
+ struct in_addr tunsrc, struct in_addr tundst, uint8_t proto)
+{
+ struct {
+ struct nlmsghdr nh;
+ struct xfrm_userpolicy_info info;
+ char attrbuf[MAX_PAYLOAD];
+ } req;
+ struct xfrm_user_tmpl tmpl;
+
+ memset(&req, 0, sizeof(req));
+ memset(&tmpl, 0, sizeof(tmpl));
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(req.info));
+ req.nh.nlmsg_type = XFRM_MSG_NEWPOLICY;
+ req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ req.nh.nlmsg_seq = seq;
+
+ /* Fill selector. */
+ memcpy(&req.info.sel.daddr, &dst, sizeof(tundst));
+ memcpy(&req.info.sel.saddr, &src, sizeof(tunsrc));
+ req.info.sel.family = AF_INET;
+ req.info.sel.prefixlen_d = PREFIX_LEN;
+ req.info.sel.prefixlen_s = PREFIX_LEN;
+
+ /* Fill lifteme_cfg */
+ req.info.lft.soft_byte_limit = XFRM_INF;
+ req.info.lft.hard_byte_limit = XFRM_INF;
+ req.info.lft.soft_packet_limit = XFRM_INF;
+ req.info.lft.hard_packet_limit = XFRM_INF;
+
+ req.info.dir = dir;
+
+ /* Fill tmpl */
+ memcpy(&tmpl.id.daddr, &dst, sizeof(dst));
+ /* Note: zero-spi cannot be deleted */
+ tmpl.id.spi = spi;
+ tmpl.id.proto = proto;
+ tmpl.family = AF_INET;
+ memcpy(&tmpl.saddr, &src, sizeof(src));
+ tmpl.mode = XFRM_MODE_TUNNEL;
+ tmpl.aalgos = (~(uint32_t)0);
+ tmpl.ealgos = (~(uint32_t)0);
+ tmpl.calgos = (~(uint32_t)0);
+
+ if (rtattr_pack(&req.nh, sizeof(req), XFRMA_TMPL, &tmpl, sizeof(tmpl)))
+ return -1;
+
+ if (send(xfrm_sock, &req, req.nh.nlmsg_len, 0) < 0) {
+ pr_err("send()");
+ return -1;
+ }
+
+ return netlink_check_answer(xfrm_sock);
+}
+
+static int xfrm_prepare(int xfrm_sock, uint32_t *seq,
+ struct in_addr src, struct in_addr dst,
+ struct in_addr tunsrc, struct in_addr tundst, uint8_t proto)
+{
+ if (xfrm_policy_add(xfrm_sock, (*seq)++, gen_spi(src), src, dst,
+ XFRM_POLICY_OUT, tunsrc, tundst, proto)) {
+ printk("Failed to add xfrm policy");
+ return -1;
+ }
+
+ if (xfrm_policy_add(xfrm_sock, (*seq)++, gen_spi(src), dst, src,
+ XFRM_POLICY_IN, tunsrc, tundst, proto)) {
+ printk("Failed to add xfrm policy");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int xfrm_policy_del(int xfrm_sock, uint32_t seq,
+ struct in_addr src, struct in_addr dst, uint8_t dir,
+ struct in_addr tunsrc, struct in_addr tundst)
+{
+ struct {
+ struct nlmsghdr nh;
+ struct xfrm_userpolicy_id id;
+ char attrbuf[MAX_PAYLOAD];
+ } req;
+
+ memset(&req, 0, sizeof(req));
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(req.id));
+ req.nh.nlmsg_type = XFRM_MSG_DELPOLICY;
+ req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ req.nh.nlmsg_seq = seq;
+
+ /* Fill id */
+ memcpy(&req.id.sel.daddr, &dst, sizeof(tundst));
+ memcpy(&req.id.sel.saddr, &src, sizeof(tunsrc));
+ req.id.sel.family = AF_INET;
+ req.id.sel.prefixlen_d = PREFIX_LEN;
+ req.id.sel.prefixlen_s = PREFIX_LEN;
+ req.id.dir = dir;
+
+ if (send(xfrm_sock, &req, req.nh.nlmsg_len, 0) < 0) {
+ pr_err("send()");
+ return -1;
+ }
+
+ return netlink_check_answer(xfrm_sock);
+}
+
+static int xfrm_cleanup(int xfrm_sock, uint32_t *seq,
+ struct in_addr src, struct in_addr dst,
+ struct in_addr tunsrc, struct in_addr tundst)
+{
+ if (xfrm_policy_del(xfrm_sock, (*seq)++, src, dst,
+ XFRM_POLICY_OUT, tunsrc, tundst)) {
+ printk("Failed to add xfrm policy");
+ return -1;
+ }
+
+ if (xfrm_policy_del(xfrm_sock, (*seq)++, dst, src,
+ XFRM_POLICY_IN, tunsrc, tundst)) {
+ printk("Failed to add xfrm policy");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int xfrm_state_del(int xfrm_sock, uint32_t seq, uint32_t spi,
+ struct in_addr src, struct in_addr dst, uint8_t proto)
+{
+ struct {
+ struct nlmsghdr nh;
+ struct xfrm_usersa_id id;
+ char attrbuf[MAX_PAYLOAD];
+ } req;
+ xfrm_address_t saddr = {};
+
+ memset(&req, 0, sizeof(req));
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(req.id));
+ req.nh.nlmsg_type = XFRM_MSG_DELSA;
+ req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ req.nh.nlmsg_seq = seq;
+
+ memcpy(&req.id.daddr, &dst, sizeof(dst));
+ req.id.family = AF_INET;
+ req.id.proto = proto;
+ /* Note: zero-spi cannot be deleted */
+ req.id.spi = spi;
+
+ memcpy(&saddr, &src, sizeof(src));
+ if (rtattr_pack(&req.nh, sizeof(req), XFRMA_SRCADDR, &saddr, sizeof(saddr)))
+ return -1;
+
+ if (send(xfrm_sock, &req, req.nh.nlmsg_len, 0) < 0) {
+ pr_err("send()");
+ return -1;
+ }
+
+ return netlink_check_answer(xfrm_sock);
+}
+
+static int xfrm_delete(int xfrm_sock, uint32_t *seq,
+ struct in_addr src, struct in_addr dst,
+ struct in_addr tunsrc, struct in_addr tundst, uint8_t proto)
+{
+ if (xfrm_state_del(xfrm_sock, (*seq)++, gen_spi(src), src, dst, proto)) {
+ printk("Failed to remove xfrm state");
+ return -1;
+ }
+
+ if (xfrm_state_del(xfrm_sock, (*seq)++, gen_spi(src), dst, src, proto)) {
+ printk("Failed to remove xfrm state");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int xfrm_state_allocspi(int xfrm_sock, uint32_t *seq,
+ uint32_t spi, uint8_t proto)
+{
+ struct {
+ struct nlmsghdr nh;
+ struct xfrm_userspi_info spi;
+ } req;
+ struct {
+ struct nlmsghdr nh;
+ union {
+ struct xfrm_usersa_info info;
+ int error;
+ };
+ } answer;
+
+ memset(&req, 0, sizeof(req));
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(req.spi));
+ req.nh.nlmsg_type = XFRM_MSG_ALLOCSPI;
+ req.nh.nlmsg_flags = NLM_F_REQUEST;
+ req.nh.nlmsg_seq = (*seq)++;
+
+ req.spi.info.family = AF_INET;
+ req.spi.min = spi;
+ req.spi.max = spi;
+ req.spi.info.id.proto = proto;
+
+ if (send(xfrm_sock, &req, req.nh.nlmsg_len, 0) < 0) {
+ pr_err("send()");
+ return KSFT_FAIL;
+ }
+
+ if (recv(xfrm_sock, &answer, sizeof(answer), 0) < 0) {
+ pr_err("recv()");
+ return KSFT_FAIL;
+ } else if (answer.nh.nlmsg_type == XFRM_MSG_NEWSA) {
+ uint32_t new_spi = htonl(answer.info.id.spi);
+
+ if (new_spi != spi) {
+ printk("allocated spi is different from requested: %#x != %#x",
+ new_spi, spi);
+ return KSFT_FAIL;
+ }
+ return KSFT_PASS;
+ } else if (answer.nh.nlmsg_type != NLMSG_ERROR) {
+ printk("expected NLMSG_ERROR, got %d", (int)answer.nh.nlmsg_type);
+ return KSFT_FAIL;
+ }
+
+ printk("NLMSG_ERROR: %d: %s", answer.error, strerror(-answer.error));
+ return (answer.error) ? KSFT_FAIL : KSFT_PASS;
+}
+
+static int netlink_sock_bind(int *sock, uint32_t *seq, int proto, uint32_t groups)
+{
+ struct sockaddr_nl snl = {};
+ socklen_t addr_len;
+ int ret = -1;
+
+ snl.nl_family = AF_NETLINK;
+ snl.nl_groups = groups;
+
+ if (netlink_sock(sock, seq, proto)) {
+ printk("Failed to open xfrm netlink socket");
+ return -1;
+ }
+
+ if (bind(*sock, (struct sockaddr *)&snl, sizeof(snl)) < 0) {
+ pr_err("bind()");
+ goto out_close;
+ }
+
+ addr_len = sizeof(snl);
+ if (getsockname(*sock, (struct sockaddr *)&snl, &addr_len) < 0) {
+ pr_err("getsockname()");
+ goto out_close;
+ }
+ if (addr_len != sizeof(snl)) {
+ printk("Wrong address length %d", addr_len);
+ goto out_close;
+ }
+ if (snl.nl_family != AF_NETLINK) {
+ printk("Wrong address family %d", snl.nl_family);
+ goto out_close;
+ }
+ return 0;
+
+out_close:
+ close(*sock);
+ return ret;
+}
+
+static int xfrm_monitor_acquire(int xfrm_sock, uint32_t *seq, unsigned int nr)
+{
+ struct {
+ struct nlmsghdr nh;
+ union {
+ struct xfrm_user_acquire acq;
+ int error;
+ };
+ char attrbuf[MAX_PAYLOAD];
+ } req;
+ struct xfrm_user_tmpl xfrm_tmpl = {};
+ int xfrm_listen = -1, ret = KSFT_FAIL;
+ uint32_t seq_listen;
+
+ if (netlink_sock_bind(&xfrm_listen, &seq_listen, NETLINK_XFRM, XFRMNLGRP_ACQUIRE))
+ return KSFT_FAIL;
+
+ memset(&req, 0, sizeof(req));
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(req.acq));
+ req.nh.nlmsg_type = XFRM_MSG_ACQUIRE;
+ req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ req.nh.nlmsg_seq = (*seq)++;
+
+ req.acq.policy.sel.family = AF_INET;
+ req.acq.aalgos = 0xfeed;
+ req.acq.ealgos = 0xbaad;
+ req.acq.calgos = 0xbabe;
+
+ xfrm_tmpl.family = AF_INET;
+ xfrm_tmpl.id.proto = IPPROTO_ESP;
+ if (rtattr_pack(&req.nh, sizeof(req), XFRMA_TMPL, &xfrm_tmpl, sizeof(xfrm_tmpl)))
+ goto out_close;
+
+ if (send(xfrm_sock, &req, req.nh.nlmsg_len, 0) < 0) {
+ pr_err("send()");
+ goto out_close;
+ }
+
+ if (recv(xfrm_sock, &req, sizeof(req), 0) < 0) {
+ pr_err("recv()");
+ goto out_close;
+ } else if (req.nh.nlmsg_type != NLMSG_ERROR) {
+ printk("expected NLMSG_ERROR, got %d", (int)req.nh.nlmsg_type);
+ goto out_close;
+ }
+
+ if (req.error) {
+ printk("NLMSG_ERROR: %d: %s", req.error, strerror(-req.error));
+ ret = req.error;
+ goto out_close;
+ }
+
+ if (recv(xfrm_listen, &req, sizeof(req), 0) < 0) {
+ pr_err("recv()");
+ goto out_close;
+ }
+
+ if (req.acq.aalgos != 0xfeed || req.acq.ealgos != 0xbaad
+ || req.acq.calgos != 0xbabe) {
+ printk("xfrm_user_acquire has changed %x %x %x",
+ req.acq.aalgos, req.acq.ealgos, req.acq.calgos);
+ goto out_close;
+ }
+
+ ret = KSFT_PASS;
+out_close:
+ close(xfrm_listen);
+ return ret;
+}
+
+static int xfrm_expire_state(int xfrm_sock, uint32_t *seq,
+ unsigned int nr, struct xfrm_desc *desc)
+{
+ struct {
+ struct nlmsghdr nh;
+ union {
+ struct xfrm_user_expire expire;
+ int error;
+ };
+ } req;
+ struct in_addr src, dst;
+ int xfrm_listen = -1, ret = KSFT_FAIL;
+ uint32_t seq_listen;
+
+ src = inet_makeaddr(INADDR_B, child_ip(nr));
+ dst = inet_makeaddr(INADDR_B, grchild_ip(nr));
+
+ if (xfrm_state_add(xfrm_sock, (*seq)++, gen_spi(src), src, dst, desc)) {
+ printk("Failed to add xfrm state");
+ return KSFT_FAIL;
+ }
+
+ if (netlink_sock_bind(&xfrm_listen, &seq_listen, NETLINK_XFRM, XFRMNLGRP_EXPIRE))
+ return KSFT_FAIL;
+
+ memset(&req, 0, sizeof(req));
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(req.expire));
+ req.nh.nlmsg_type = XFRM_MSG_EXPIRE;
+ req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ req.nh.nlmsg_seq = (*seq)++;
+
+ memcpy(&req.expire.state.id.daddr, &dst, sizeof(dst));
+ req.expire.state.id.spi = gen_spi(src);
+ req.expire.state.id.proto = desc->proto;
+ req.expire.state.family = AF_INET;
+ req.expire.hard = 0xff;
+
+ if (send(xfrm_sock, &req, req.nh.nlmsg_len, 0) < 0) {
+ pr_err("send()");
+ goto out_close;
+ }
+
+ if (recv(xfrm_sock, &req, sizeof(req), 0) < 0) {
+ pr_err("recv()");
+ goto out_close;
+ } else if (req.nh.nlmsg_type != NLMSG_ERROR) {
+ printk("expected NLMSG_ERROR, got %d", (int)req.nh.nlmsg_type);
+ goto out_close;
+ }
+
+ if (req.error) {
+ printk("NLMSG_ERROR: %d: %s", req.error, strerror(-req.error));
+ ret = req.error;
+ goto out_close;
+ }
+
+ if (recv(xfrm_listen, &req, sizeof(req), 0) < 0) {
+ pr_err("recv()");
+ goto out_close;
+ }
+
+ if (req.expire.hard != 0x1) {
+ printk("expire.hard is not set: %x", req.expire.hard);
+ goto out_close;
+ }
+
+ ret = KSFT_PASS;
+out_close:
+ close(xfrm_listen);
+ return ret;
+}
+
+static int xfrm_expire_policy(int xfrm_sock, uint32_t *seq,
+ unsigned int nr, struct xfrm_desc *desc)
+{
+ struct {
+ struct nlmsghdr nh;
+ union {
+ struct xfrm_user_polexpire expire;
+ int error;
+ };
+ } req;
+ struct in_addr src, dst, tunsrc, tundst;
+ int xfrm_listen = -1, ret = KSFT_FAIL;
+ uint32_t seq_listen;
+
+ src = inet_makeaddr(INADDR_B, child_ip(nr));
+ dst = inet_makeaddr(INADDR_B, grchild_ip(nr));
+ tunsrc = inet_makeaddr(INADDR_A, child_ip(nr));
+ tundst = inet_makeaddr(INADDR_A, grchild_ip(nr));
+
+ if (xfrm_policy_add(xfrm_sock, (*seq)++, gen_spi(src), src, dst,
+ XFRM_POLICY_OUT, tunsrc, tundst, desc->proto)) {
+ printk("Failed to add xfrm policy");
+ return KSFT_FAIL;
+ }
+
+ if (netlink_sock_bind(&xfrm_listen, &seq_listen, NETLINK_XFRM, XFRMNLGRP_EXPIRE))
+ return KSFT_FAIL;
+
+ memset(&req, 0, sizeof(req));
+ req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(req.expire));
+ req.nh.nlmsg_type = XFRM_MSG_POLEXPIRE;
+ req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
+ req.nh.nlmsg_seq = (*seq)++;
+
+ /* Fill selector. */
+ memcpy(&req.expire.pol.sel.daddr, &dst, sizeof(tundst));
+ memcpy(&req.expire.pol.sel.saddr, &src, sizeof(tunsrc));
+ req.expire.pol.sel.family = AF_INET;
+ req.expire.pol.sel.prefixlen_d = PREFIX_LEN;
+ req.expire.pol.sel.prefixlen_s = PREFIX_LEN;
+ req.expire.pol.dir = XFRM_POLICY_OUT;
+ req.expire.hard = 0xff;
+
+ if (send(xfrm_sock, &req, req.nh.nlmsg_len, 0) < 0) {
+ pr_err("send()");
+ goto out_close;
+ }
+
+ if (recv(xfrm_sock, &req, sizeof(req), 0) < 0) {
+ pr_err("recv()");
+ goto out_close;
+ } else if (req.nh.nlmsg_type != NLMSG_ERROR) {
+ printk("expected NLMSG_ERROR, got %d", (int)req.nh.nlmsg_type);
+ goto out_close;
+ }
+
+ if (req.error) {
+ printk("NLMSG_ERROR: %d: %s", req.error, strerror(-req.error));
+ ret = req.error;
+ goto out_close;
+ }
+
+ if (recv(xfrm_listen, &req, sizeof(req), 0) < 0) {
+ pr_err("recv()");
+ goto out_close;
+ }
+
+ if (req.expire.hard != 0x1) {
+ printk("expire.hard is not set: %x", req.expire.hard);
+ goto out_close;
+ }
+
+ ret = KSFT_PASS;
+out_close:
+ close(xfrm_listen);
+ return ret;
+}
+
+static int child_serv(int xfrm_sock, uint32_t *seq,
+ unsigned int nr, int cmd_fd, void *buf, struct xfrm_desc *desc)
+{
+ struct in_addr src, dst, tunsrc, tundst;
+ struct test_desc msg;
+ int ret = KSFT_FAIL;
+
+ src = inet_makeaddr(INADDR_B, child_ip(nr));
+ dst = inet_makeaddr(INADDR_B, grchild_ip(nr));
+ tunsrc = inet_makeaddr(INADDR_A, child_ip(nr));
+ tundst = inet_makeaddr(INADDR_A, grchild_ip(nr));
+
+ /* UDP pinging without xfrm */
+ if (do_ping(cmd_fd, buf, page_size, src, true, 0, 0, udp_ping_send)) {
+ printk("ping failed before setting xfrm");
+ return KSFT_FAIL;
+ }
+
+ memset(&msg, 0, sizeof(msg));
+ msg.type = MSG_XFRM_PREPARE;
+ memcpy(&msg.body.xfrm_desc, desc, sizeof(*desc));
+ write_msg(cmd_fd, &msg, 1);
+
+ if (xfrm_prepare(xfrm_sock, seq, src, dst, tunsrc, tundst, desc->proto)) {
+ printk("failed to prepare xfrm");
+ goto cleanup;
+ }
+
+ memset(&msg, 0, sizeof(msg));
+ msg.type = MSG_XFRM_ADD;
+ memcpy(&msg.body.xfrm_desc, desc, sizeof(*desc));
+ write_msg(cmd_fd, &msg, 1);
+ if (xfrm_set(xfrm_sock, seq, src, dst, tunsrc, tundst, desc)) {
+ printk("failed to set xfrm");
+ goto delete;
+ }
+
+ /* UDP pinging with xfrm tunnel */
+ if (do_ping(cmd_fd, buf, page_size, tunsrc,
+ true, 0, 0, udp_ping_send)) {
+ printk("ping failed for xfrm");
+ goto delete;
+ }
+
+ ret = KSFT_PASS;
+delete:
+ /* xfrm delete */
+ memset(&msg, 0, sizeof(msg));
+ msg.type = MSG_XFRM_DEL;
+ memcpy(&msg.body.xfrm_desc, desc, sizeof(*desc));
+ write_msg(cmd_fd, &msg, 1);
+
+ if (xfrm_delete(xfrm_sock, seq, src, dst, tunsrc, tundst, desc->proto)) {
+ printk("failed ping to remove xfrm");
+ ret = KSFT_FAIL;
+ }
+
+cleanup:
+ memset(&msg, 0, sizeof(msg));
+ msg.type = MSG_XFRM_CLEANUP;
+ memcpy(&msg.body.xfrm_desc, desc, sizeof(*desc));
+ write_msg(cmd_fd, &msg, 1);
+ if (xfrm_cleanup(xfrm_sock, seq, src, dst, tunsrc, tundst)) {
+ printk("failed ping to cleanup xfrm");
+ ret = KSFT_FAIL;
+ }
+ return ret;
+}
+
+static int child_f(unsigned int nr, int test_desc_fd, int cmd_fd, void *buf)
+{
+ struct xfrm_desc desc;
+ struct test_desc msg;
+ int xfrm_sock = -1;
+ uint32_t seq;
+
+ if (switch_ns(nsfd_childa))
+ exit(KSFT_FAIL);
+
+ if (netlink_sock(&xfrm_sock, &seq, NETLINK_XFRM)) {
+ printk("Failed to open xfrm netlink socket");
+ exit(KSFT_FAIL);
+ }
+
+ /* Check that seq sock is ready, just for sure. */
+ memset(&msg, 0, sizeof(msg));
+ msg.type = MSG_ACK;
+ write_msg(cmd_fd, &msg, 1);
+ read_msg(cmd_fd, &msg, 1);
+ if (msg.type != MSG_ACK) {
+ printk("Ack failed");
+ exit(KSFT_FAIL);
+ }
+
+ for (;;) {
+ ssize_t received = read(test_desc_fd, &desc, sizeof(desc));
+ int ret;
+
+ if (received == 0) /* EOF */
+ break;
+
+ if (received != sizeof(desc)) {
+ pr_err("read() returned %zd", received);
+ exit(KSFT_FAIL);
+ }
+
+ switch (desc.type) {
+ case CREATE_TUNNEL:
+ ret = child_serv(xfrm_sock, &seq, nr,
+ cmd_fd, buf, &desc);
+ break;
+ case ALLOCATE_SPI:
+ ret = xfrm_state_allocspi(xfrm_sock, &seq,
+ -1, desc.proto);
+ break;
+ case MONITOR_ACQUIRE:
+ ret = xfrm_monitor_acquire(xfrm_sock, &seq, nr);
+ break;
+ case EXPIRE_STATE:
+ ret = xfrm_expire_state(xfrm_sock, &seq, nr, &desc);
+ break;
+ case EXPIRE_POLICY:
+ ret = xfrm_expire_policy(xfrm_sock, &seq, nr, &desc);
+ break;
+ default:
+ printk("Unknown desc type %d", desc.type);
+ exit(KSFT_FAIL);
+ }
+ write_test_result(ret, &desc);
+ }
+
+ close(xfrm_sock);
+
+ msg.type = MSG_EXIT;
+ write_msg(cmd_fd, &msg, 1);
+ exit(KSFT_PASS);
+}
+
+static void grand_child_serv(unsigned int nr, int cmd_fd, void *buf,
+ struct test_desc *msg, int xfrm_sock, uint32_t *seq)
+{
+ struct in_addr src, dst, tunsrc, tundst;
+ bool tun_reply;
+ struct xfrm_desc *desc = &msg->body.xfrm_desc;
+
+ src = inet_makeaddr(INADDR_B, grchild_ip(nr));
+ dst = inet_makeaddr(INADDR_B, child_ip(nr));
+ tunsrc = inet_makeaddr(INADDR_A, grchild_ip(nr));
+ tundst = inet_makeaddr(INADDR_A, child_ip(nr));
+
+ switch (msg->type) {
+ case MSG_EXIT:
+ exit(KSFT_PASS);
+ case MSG_ACK:
+ write_msg(cmd_fd, msg, 1);
+ break;
+ case MSG_PING:
+ tun_reply = memcmp(&dst, &msg->body.ping.reply_ip, sizeof(in_addr_t));
+ /* UDP pinging without xfrm */
+ if (do_ping(cmd_fd, buf, page_size, tun_reply ? tunsrc : src,
+ false, msg->body.ping.port,
+ msg->body.ping.reply_ip, udp_ping_reply)) {
+ printk("ping failed before setting xfrm");
+ }
+ break;
+ case MSG_XFRM_PREPARE:
+ if (xfrm_prepare(xfrm_sock, seq, src, dst, tunsrc, tundst,
+ desc->proto)) {
+ xfrm_cleanup(xfrm_sock, seq, src, dst, tunsrc, tundst);
+ printk("failed to prepare xfrm");
+ }
+ break;
+ case MSG_XFRM_ADD:
+ if (xfrm_set(xfrm_sock, seq, src, dst, tunsrc, tundst, desc)) {
+ xfrm_cleanup(xfrm_sock, seq, src, dst, tunsrc, tundst);
+ printk("failed to set xfrm");
+ }
+ break;
+ case MSG_XFRM_DEL:
+ if (xfrm_delete(xfrm_sock, seq, src, dst, tunsrc, tundst,
+ desc->proto)) {
+ xfrm_cleanup(xfrm_sock, seq, src, dst, tunsrc, tundst);
+ printk("failed to remove xfrm");
+ }
+ break;
+ case MSG_XFRM_CLEANUP:
+ if (xfrm_cleanup(xfrm_sock, seq, src, dst, tunsrc, tundst)) {
+ printk("failed to cleanup xfrm");
+ }
+ break;
+ default:
+ printk("got unknown msg type %d", msg->type);
+ };
+}
+
+static int grand_child_f(unsigned int nr, int cmd_fd, void *buf)
+{
+ struct test_desc msg;
+ int xfrm_sock = -1;
+ uint32_t seq;
+
+ if (switch_ns(nsfd_childb))
+ exit(KSFT_FAIL);
+
+ if (netlink_sock(&xfrm_sock, &seq, NETLINK_XFRM)) {
+ printk("Failed to open xfrm netlink socket");
+ exit(KSFT_FAIL);
+ }
+
+ do {
+ read_msg(cmd_fd, &msg, 1);
+ grand_child_serv(nr, cmd_fd, buf, &msg, xfrm_sock, &seq);
+ } while (1);
+
+ close(xfrm_sock);
+ exit(KSFT_FAIL);
+}
+
+static int start_child(unsigned int nr, char *veth, int test_desc_fd[2])
+{
+ int cmd_sock[2];
+ void *data_map;
+ pid_t child;
+
+ if (init_child(nsfd_childa, veth, child_ip(nr), grchild_ip(nr)))
+ return -1;
+
+ if (init_child(nsfd_childb, veth, grchild_ip(nr), child_ip(nr)))
+ return -1;
+
+ child = fork();
+ if (child < 0) {
+ pr_err("fork()");
+ return -1;
+ } else if (child) {
+ /* in parent - selftest */
+ return switch_ns(nsfd_parent);
+ }
+
+ if (close(test_desc_fd[1])) {
+ pr_err("close()");
+ return -1;
+ }
+
+ /* child */
+ data_map = mmap(0, page_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ if (data_map == MAP_FAILED) {
+ pr_err("mmap()");
+ return -1;
+ }
+
+ randomize_buffer(data_map, page_size);
+
+ if (socketpair(PF_LOCAL, SOCK_SEQPACKET, 0, cmd_sock)) {
+ pr_err("socketpair()");
+ return -1;
+ }
+
+ child = fork();
+ if (child < 0) {
+ pr_err("fork()");
+ return -1;
+ } else if (child) {
+ if (close(cmd_sock[0])) {
+ pr_err("close()");
+ return -1;
+ }
+ return child_f(nr, test_desc_fd[0], cmd_sock[1], data_map);
+ }
+ if (close(cmd_sock[1])) {
+ pr_err("close()");
+ return -1;
+ }
+ return grand_child_f(nr, cmd_sock[0], data_map);
+}
+
+static void exit_usage(char **argv)
+{
+ printk("Usage: %s [nr_process]", argv[0]);
+ exit(KSFT_FAIL);
+}
+
+static int __write_desc(int test_desc_fd, struct xfrm_desc *desc)
+{
+ ssize_t ret;
+
+ ret = write(test_desc_fd, desc, sizeof(*desc));
+
+ if (ret == sizeof(*desc))
+ return 0;
+
+ pr_err("Writing test's desc failed %ld", ret);
+
+ return -1;
+}
+
+static int write_desc(int proto, int test_desc_fd,
+ char *a, char *e, char *c, char *ae)
+{
+ struct xfrm_desc desc = {};
+
+ desc.type = CREATE_TUNNEL;
+ desc.proto = proto;
+
+ if (a)
+ strncpy(desc.a_algo, a, ALGO_LEN - 1);
+ if (e)
+ strncpy(desc.e_algo, e, ALGO_LEN - 1);
+ if (c)
+ strncpy(desc.c_algo, c, ALGO_LEN - 1);
+ if (ae)
+ strncpy(desc.ae_algo, ae, ALGO_LEN - 1);
+
+ return __write_desc(test_desc_fd, &desc);
+}
+
+int proto_list[] = { IPPROTO_AH, IPPROTO_COMP, IPPROTO_ESP };
+char *ah_list[] = {
+ "digest_null", "hmac(md5)", "hmac(sha1)", "hmac(sha256)",
+ "hmac(sha384)", "hmac(sha512)", "hmac(rmd160)",
+ "xcbc(aes)", "cmac(aes)"
+};
+char *comp_list[] = {
+ "deflate",
+#if 0
+ /* No compression backend realization */
+ "lzs", "lzjh"
+#endif
+};
+char *e_list[] = {
+ "ecb(cipher_null)", "cbc(des)", "cbc(des3_ede)", "cbc(cast5)",
+ "cbc(blowfish)", "cbc(aes)", "cbc(serpent)", "cbc(camellia)",
+ "cbc(twofish)", "rfc3686(ctr(aes))"
+};
+char *ae_list[] = {
+#if 0
+ /* not implemented */
+ "rfc4106(gcm(aes))", "rfc4309(ccm(aes))", "rfc4543(gcm(aes))",
+ "rfc7539esp(chacha20,poly1305)"
+#endif
+};
+
+const unsigned int proto_plan = ARRAY_SIZE(ah_list) + ARRAY_SIZE(comp_list) \
+ + (ARRAY_SIZE(ah_list) * ARRAY_SIZE(e_list)) \
+ + ARRAY_SIZE(ae_list);
+
+static int write_proto_plan(int fd, int proto)
+{
+ unsigned int i;
+
+ switch (proto) {
+ case IPPROTO_AH:
+ for (i = 0; i < ARRAY_SIZE(ah_list); i++) {
+ if (write_desc(proto, fd, ah_list[i], 0, 0, 0))
+ return -1;
+ }
+ break;
+ case IPPROTO_COMP:
+ for (i = 0; i < ARRAY_SIZE(comp_list); i++) {
+ if (write_desc(proto, fd, 0, 0, comp_list[i], 0))
+ return -1;
+ }
+ break;
+ case IPPROTO_ESP:
+ for (i = 0; i < ARRAY_SIZE(ah_list); i++) {
+ int j;
+
+ for (j = 0; j < ARRAY_SIZE(e_list); j++) {
+ if (write_desc(proto, fd, ah_list[i],
+ e_list[j], 0, 0))
+ return -1;
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(ae_list); i++) {
+ if (write_desc(proto, fd, 0, 0, 0, ae_list[i]))
+ return -1;
+ }
+ break;
+ default:
+ printk("BUG: Specified unknown proto %d", proto);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Some structures in xfrm uapi header differ in size between
+ * 64-bit and 32-bit ABI:
+ *
+ * 32-bit UABI | 64-bit UABI
+ * -------------------------------------|-------------------------------------
+ * sizeof(xfrm_usersa_info) = 220 | sizeof(xfrm_usersa_info) = 224
+ * sizeof(xfrm_userpolicy_info) = 164 | sizeof(xfrm_userpolicy_info) = 168
+ * sizeof(xfrm_userspi_info) = 228 | sizeof(xfrm_userspi_info) = 232
+ * sizeof(xfrm_user_acquire) = 276 | sizeof(xfrm_user_acquire) = 280
+ * sizeof(xfrm_user_expire) = 224 | sizeof(xfrm_user_expire) = 232
+ * sizeof(xfrm_user_polexpire) = 168 | sizeof(xfrm_user_polexpire) = 176
+ *
+ * Check the affected by the UABI difference structures.
+ */
+const unsigned int compat_plan = 4;
+static int write_compat_struct_tests(int test_desc_fd)
+{
+ struct xfrm_desc desc = {};
+
+ desc.type = ALLOCATE_SPI;
+ desc.proto = IPPROTO_AH;
+ strncpy(desc.a_algo, ah_list[0], ALGO_LEN - 1);
+
+ if (__write_desc(test_desc_fd, &desc))
+ return -1;
+
+ desc.type = MONITOR_ACQUIRE;
+ if (__write_desc(test_desc_fd, &desc))
+ return -1;
+
+ desc.type = EXPIRE_STATE;
+ if (__write_desc(test_desc_fd, &desc))
+ return -1;
+
+ desc.type = EXPIRE_POLICY;
+ if (__write_desc(test_desc_fd, &desc))
+ return -1;
+
+ return 0;
+}
+
+static int write_test_plan(int test_desc_fd)
+{
+ unsigned int i;
+ pid_t child;
+
+ child = fork();
+ if (child < 0) {
+ pr_err("fork()");
+ return -1;
+ }
+ if (child) {
+ if (close(test_desc_fd))
+ printk("close(): %m");
+ return 0;
+ }
+
+ if (write_compat_struct_tests(test_desc_fd))
+ exit(KSFT_FAIL);
+
+ for (i = 0; i < ARRAY_SIZE(proto_list); i++) {
+ if (write_proto_plan(test_desc_fd, proto_list[i]))
+ exit(KSFT_FAIL);
+ }
+
+ exit(KSFT_PASS);
+}
+
+static int children_cleanup(void)
+{
+ unsigned ret = KSFT_PASS;
+
+ while (1) {
+ int status;
+ pid_t p = wait(&status);
+
+ if ((p < 0) && errno == ECHILD)
+ break;
+
+ if (p < 0) {
+ pr_err("wait()");
+ return KSFT_FAIL;
+ }
+
+ if (!WIFEXITED(status)) {
+ ret = KSFT_FAIL;
+ continue;
+ }
+
+ if (WEXITSTATUS(status) == KSFT_FAIL)
+ ret = KSFT_FAIL;
+ }
+
+ return ret;
+}
+
+typedef void (*print_res)(const char *, ...);
+
+static int check_results(void)
+{
+ struct test_result tr = {};
+ struct xfrm_desc *d = &tr.desc;
+ int ret = KSFT_PASS;
+
+ while (1) {
+ ssize_t received = read(results_fd[0], &tr, sizeof(tr));
+ print_res result;
+
+ if (received == 0) /* EOF */
+ break;
+
+ if (received != sizeof(tr)) {
+ pr_err("read() returned %zd", received);
+ return KSFT_FAIL;
+ }
+
+ switch (tr.res) {
+ case KSFT_PASS:
+ result = ksft_test_result_pass;
+ break;
+ case KSFT_FAIL:
+ default:
+ result = ksft_test_result_fail;
+ ret = KSFT_FAIL;
+ }
+
+ result(" %s: [%u, '%s', '%s', '%s', '%s', %u]\n",
+ desc_name[d->type], (unsigned int)d->proto, d->a_algo,
+ d->e_algo, d->c_algo, d->ae_algo, d->icv_len);
+ }
+
+ return ret;
+}
+
+int main(int argc, char **argv)
+{
+ unsigned int nr_process = 1;
+ int route_sock = -1, ret = KSFT_SKIP;
+ int test_desc_fd[2];
+ uint32_t route_seq;
+ unsigned int i;
+
+ if (argc > 2)
+ exit_usage(argv);
+
+ if (argc > 1) {
+ char *endptr;
+
+ errno = 0;
+ nr_process = strtol(argv[1], &endptr, 10);
+ if ((errno == ERANGE && (nr_process == LONG_MAX || nr_process == LONG_MIN))
+ || (errno != 0 && nr_process == 0)
+ || (endptr == argv[1]) || (*endptr != '\0')) {
+ printk("Failed to parse [nr_process]");
+ exit_usage(argv);
+ }
+
+ if (nr_process > MAX_PROCESSES || !nr_process) {
+ printk("nr_process should be between [1; %u]",
+ MAX_PROCESSES);
+ exit_usage(argv);
+ }
+ }
+
+ srand(time(NULL));
+ page_size = sysconf(_SC_PAGESIZE);
+ if (page_size < 1)
+ ksft_exit_skip("sysconf(): %m\n");
+
+ if (pipe2(test_desc_fd, O_DIRECT) < 0)
+ ksft_exit_skip("pipe(): %m\n");
+
+ if (pipe2(results_fd, O_DIRECT) < 0)
+ ksft_exit_skip("pipe(): %m\n");
+
+ if (init_namespaces())
+ ksft_exit_skip("Failed to create namespaces\n");
+
+ if (netlink_sock(&route_sock, &route_seq, NETLINK_ROUTE))
+ ksft_exit_skip("Failed to open netlink route socket\n");
+
+ for (i = 0; i < nr_process; i++) {
+ char veth[VETH_LEN];
+
+ snprintf(veth, VETH_LEN, VETH_FMT, i);
+
+ if (veth_add(route_sock, route_seq++, veth, nsfd_childa, veth, nsfd_childb)) {
+ close(route_sock);
+ ksft_exit_fail_msg("Failed to create veth device");
+ }
+
+ if (start_child(i, veth, test_desc_fd)) {
+ close(route_sock);
+ ksft_exit_fail_msg("Child %u failed to start", i);
+ }
+ }
+
+ if (close(route_sock) || close(test_desc_fd[0]) || close(results_fd[1]))
+ ksft_exit_fail_msg("close(): %m");
+
+ ksft_set_plan(proto_plan + compat_plan);
+
+ if (write_test_plan(test_desc_fd[1]))
+ ksft_exit_fail_msg("Failed to write test plan to pipe");
+
+ ret = check_results();
+
+ if (children_cleanup() == KSFT_FAIL)
+ exit(KSFT_FAIL);
+
+ exit(ret);
+}
diff --git a/tools/testing/selftests/net/mptcp/Makefile b/tools/testing/selftests/net/mptcp/Makefile
index aa254aefc2c3..00bb158b4a5d 100644
--- a/tools/testing/selftests/net/mptcp/Makefile
+++ b/tools/testing/selftests/net/mptcp/Makefile
@@ -5,7 +5,8 @@ KSFT_KHDR_INSTALL := 1
CFLAGS = -Wall -Wl,--no-as-needed -O2 -g -I$(top_srcdir)/usr/include
-TEST_PROGS := mptcp_connect.sh pm_netlink.sh mptcp_join.sh diag.sh
+TEST_PROGS := mptcp_connect.sh pm_netlink.sh mptcp_join.sh diag.sh \
+ simult_flows.sh
TEST_GEN_FILES = mptcp_connect pm_nl_ctl
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.c b/tools/testing/selftests/net/mptcp/mptcp_connect.c
index 090620c3e10c..77bb62feb872 100644
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.c
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.c
@@ -54,6 +54,7 @@ static int pf = AF_INET;
static int cfg_sndbuf;
static int cfg_rcvbuf;
static bool cfg_join;
+static bool cfg_remove;
static int cfg_wait;
static void die_usage(void)
@@ -65,8 +66,8 @@ static void die_usage(void)
fprintf(stderr, "\t-S num -- set SO_SNDBUF to num\n");
fprintf(stderr, "\t-R num -- set SO_RCVBUF to num\n");
fprintf(stderr, "\t-p num -- use port num\n");
- fprintf(stderr, "\t-m [MPTCP|TCP] -- use tcp or mptcp sockets\n");
- fprintf(stderr, "\t-s [mmap|poll] -- use poll (default) or mmap\n");
+ fprintf(stderr, "\t-s [MPTCP|TCP] -- use mptcp(default) or tcp sockets\n");
+ fprintf(stderr, "\t-m [poll|mmap|sendfile] -- use poll(default)/mmap+write/sendfile\n");
fprintf(stderr, "\t-u -- check mptcp ulp\n");
fprintf(stderr, "\t-w num -- wait num sec before closing the socket\n");
exit(1);
@@ -271,6 +272,9 @@ static size_t do_rnd_write(const int fd, char *buf, const size_t len)
if (cfg_join && first && do_w > 100)
do_w = 100;
+ if (cfg_remove && do_w > 50)
+ do_w = 50;
+
bw = write(fd, buf, do_w);
if (bw < 0)
perror("write");
@@ -281,6 +285,9 @@ static size_t do_rnd_write(const int fd, char *buf, const size_t len)
first = false;
}
+ if (cfg_remove)
+ usleep(200000);
+
return bw;
}
@@ -428,7 +435,7 @@ static int copyfd_io_poll(int infd, int peerfd, int outfd)
}
/* leave some time for late join/announce */
- if (cfg_join)
+ if (cfg_join || cfg_remove)
usleep(cfg_wait);
close(peerfd);
@@ -686,7 +693,7 @@ static void maybe_close(int fd)
{
unsigned int r = rand();
- if (!cfg_join && (r & 1))
+ if (!(cfg_join || cfg_remove) && (r & 1))
close(fd);
}
@@ -822,13 +829,18 @@ static void parse_opts(int argc, char **argv)
{
int c;
- while ((c = getopt(argc, argv, "6jlp:s:hut:m:S:R:w:")) != -1) {
+ while ((c = getopt(argc, argv, "6jrlp:s:hut:m:S:R:w:")) != -1) {
switch (c) {
case 'j':
cfg_join = true;
cfg_mode = CFG_MODE_POLL;
cfg_wait = 400000;
break;
+ case 'r':
+ cfg_remove = true;
+ cfg_mode = CFG_MODE_POLL;
+ cfg_wait = 400000;
+ break;
case 'l':
listen_mode = true;
break;
diff --git a/tools/testing/selftests/net/mptcp/mptcp_connect.sh b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
index 57d75b7f6220..2cfd87d94db8 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_connect.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_connect.sh
@@ -14,9 +14,8 @@ capture=false
timeout=30
ipv6=true
ethtool_random_on=true
-tc_delay="$((RANDOM%400))"
+tc_delay="$((RANDOM%50))"
tc_loss=$((RANDOM%101))
-tc_reorder=""
testmode=""
sndbuf=0
rcvbuf=0
@@ -444,9 +443,9 @@ do_transfer()
duration=$(printf "(duration %05sms)" $duration)
if [ ${rets} -ne 0 ] || [ ${retc} -ne 0 ]; then
echo "$duration [ FAIL ] client exit code $retc, server $rets" 1>&2
- echo "\nnetns ${listener_ns} socket stat for $port:" 1>&2
+ echo -e "\nnetns ${listener_ns} socket stat for ${port}:" 1>&2
ip netns exec ${listener_ns} ss -nita 1>&2 -o "sport = :$port"
- echo "\nnetns ${connector_ns} socket stat for $port:" 1>&2
+ echo -e "\nnetns ${connector_ns} socket stat for ${port}:" 1>&2
ip netns exec ${connector_ns} ss -nita 1>&2 -o "dport = :$port"
cat "$capout"
@@ -628,30 +627,32 @@ for sender in "$ns1" "$ns2" "$ns3" "$ns4";do
do_ping "$ns4" $sender dead:beef:3::1
done
-[ -n "$tc_loss" ] && tc -net "$ns2" qdisc add dev ns2eth3 root netem loss random $tc_loss
+[ -n "$tc_loss" ] && tc -net "$ns2" qdisc add dev ns2eth3 root netem loss random $tc_loss delay ${tc_delay}ms
echo -n "INFO: Using loss of $tc_loss "
test "$tc_delay" -gt 0 && echo -n "delay $tc_delay ms "
+reorder_delay=$(($tc_delay / 4))
+
if [ -z "${tc_reorder}" ]; then
reorder1=$((RANDOM%10))
reorder1=$((100 - reorder1))
reorder2=$((RANDOM%100))
- if [ $tc_delay -gt 0 ] && [ $reorder1 -lt 100 ] && [ $reorder2 -gt 0 ]; then
+ if [ $reorder_delay -gt 0 ] && [ $reorder1 -lt 100 ] && [ $reorder2 -gt 0 ]; then
tc_reorder="reorder ${reorder1}% ${reorder2}%"
- echo -n "$tc_reorder "
+ echo -n "$tc_reorder with delay ${reorder_delay}ms "
fi
elif [ "$tc_reorder" = "0" ];then
tc_reorder=""
-elif [ "$tc_delay" -gt 0 ];then
+elif [ "$reorder_delay" -gt 0 ];then
# reordering requires some delay
tc_reorder="reorder $tc_reorder"
- echo -n "$tc_reorder "
+ echo -n "$tc_reorder with delay ${reorder_delay}ms "
fi
echo "on ns3eth4"
-tc -net "$ns3" qdisc add dev ns3eth4 root netem delay ${tc_delay}ms $tc_reorder
+tc -net "$ns3" qdisc add dev ns3eth4 root netem delay ${reorder_delay}ms $tc_reorder
for sender in $ns1 $ns2 $ns3 $ns4;do
run_tests_lo "$ns1" "$sender" 10.0.1.1 1
diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
index f39c1129ce5f..08f53d86dedc 100755
--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
+++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
@@ -8,6 +8,7 @@ cin=""
cout=""
ksft_skip=4
timeout=30
+mptcp_connect=""
capture=0
TEST_COUNT=0
@@ -132,6 +133,8 @@ do_transfer()
cl_proto="$3"
srv_proto="$4"
connect_addr="$5"
+ rm_nr_ns1="$6"
+ rm_nr_ns2="$7"
port=$((10000+$TEST_COUNT))
TEST_COUNT=$((TEST_COUNT+1))
@@ -156,14 +159,44 @@ do_transfer()
sleep 1
fi
- ip netns exec ${listener_ns} ./mptcp_connect -j -t $timeout -l -p $port -s ${srv_proto} 0.0.0.0 < "$sin" > "$sout" &
+ if [[ $rm_nr_ns1 -eq 0 && $rm_nr_ns2 -eq 0 ]]; then
+ mptcp_connect="./mptcp_connect -j"
+ else
+ mptcp_connect="./mptcp_connect -r"
+ fi
+
+ ip netns exec ${listener_ns} $mptcp_connect -t $timeout -l -p $port -s ${srv_proto} 0.0.0.0 < "$sin" > "$sout" &
spid=$!
sleep 1
- ip netns exec ${connector_ns} ./mptcp_connect -j -t $timeout -p $port -s ${cl_proto} $connect_addr < "$cin" > "$cout" &
+ ip netns exec ${connector_ns} $mptcp_connect -t $timeout -p $port -s ${cl_proto} $connect_addr < "$cin" > "$cout" &
cpid=$!
+ if [ $rm_nr_ns1 -gt 0 ]; then
+ counter=1
+ sleep 1
+
+ while [ $counter -le $rm_nr_ns1 ]
+ do
+ ip netns exec ${listener_ns} ./pm_nl_ctl del $counter
+ sleep 1
+ let counter+=1
+ done
+ fi
+
+ if [ $rm_nr_ns2 -gt 0 ]; then
+ counter=1
+ sleep 1
+
+ while [ $counter -le $rm_nr_ns2 ]
+ do
+ ip netns exec ${connector_ns} ./pm_nl_ctl del $counter
+ sleep 1
+ let counter+=1
+ done
+ fi
+
wait $cpid
retc=$?
wait $spid
@@ -176,9 +209,9 @@ do_transfer()
if [ ${rets} -ne 0 ] || [ ${retc} -ne 0 ]; then
echo " client exit code $retc, server $rets" 1>&2
- echo "\nnetns ${listener_ns} socket stat for $port:" 1>&2
+ echo -e "\nnetns ${listener_ns} socket stat for ${port}:" 1>&2
ip netns exec ${listener_ns} ss -nita 1>&2 -o "sport = :$port"
- echo "\nnetns ${connector_ns} socket stat for $port:" 1>&2
+ echo -e "\nnetns ${connector_ns} socket stat for ${port}:" 1>&2
ip netns exec ${connector_ns} ss -nita 1>&2 -o "dport = :$port"
cat "$capout"
@@ -219,7 +252,24 @@ run_tests()
connect_addr="$3"
lret=0
- do_transfer ${listener_ns} ${connector_ns} MPTCP MPTCP ${connect_addr}
+ do_transfer ${listener_ns} ${connector_ns} MPTCP MPTCP ${connect_addr} 0 0
+ lret=$?
+ if [ $lret -ne 0 ]; then
+ ret=$lret
+ return
+ fi
+}
+
+run_remove_tests()
+{
+ listener_ns="$1"
+ connector_ns="$2"
+ connect_addr="$3"
+ rm_nr_ns1="$4"
+ rm_nr_ns2="$5"
+ lret=0
+
+ do_transfer ${listener_ns} ${connector_ns} MPTCP MPTCP ${connect_addr} ${rm_nr_ns1} ${rm_nr_ns2}
lret=$?
if [ $lret -ne 0 ]; then
ret=$lret
@@ -276,6 +326,80 @@ chk_join_nr()
fi
}
+chk_add_nr()
+{
+ local add_nr=$1
+ local echo_nr=$2
+ local count
+ local dump_stats
+
+ printf "%-39s %s" " " "add"
+ count=`ip netns exec $ns2 nstat -as | grep MPTcpExtAddAddr | awk '{print $2}'`
+ [ -z "$count" ] && count=0
+ if [ "$count" != "$add_nr" ]; then
+ echo "[fail] got $count ADD_ADDR[s] expected $add_nr"
+ ret=1
+ dump_stats=1
+ else
+ echo -n "[ ok ]"
+ fi
+
+ echo -n " - echo "
+ count=`ip netns exec $ns1 nstat -as | grep MPTcpExtEchoAdd | awk '{print $2}'`
+ [ -z "$count" ] && count=0
+ if [ "$count" != "$echo_nr" ]; then
+ echo "[fail] got $count ADD_ADDR echo[s] expected $echo_nr"
+ ret=1
+ dump_stats=1
+ else
+ echo "[ ok ]"
+ fi
+
+ if [ "${dump_stats}" = 1 ]; then
+ echo Server ns stats
+ ip netns exec $ns1 nstat -as | grep MPTcp
+ echo Client ns stats
+ ip netns exec $ns2 nstat -as | grep MPTcp
+ fi
+}
+
+chk_rm_nr()
+{
+ local rm_addr_nr=$1
+ local rm_subflow_nr=$2
+ local count
+ local dump_stats
+
+ printf "%-39s %s" " " "rm "
+ count=`ip netns exec $ns1 nstat -as | grep MPTcpExtRmAddr | awk '{print $2}'`
+ [ -z "$count" ] && count=0
+ if [ "$count" != "$rm_addr_nr" ]; then
+ echo "[fail] got $count RM_ADDR[s] expected $rm_addr_nr"
+ ret=1
+ dump_stats=1
+ else
+ echo -n "[ ok ]"
+ fi
+
+ echo -n " - sf "
+ count=`ip netns exec $ns2 nstat -as | grep MPTcpExtRmSubflow | awk '{print $2}'`
+ [ -z "$count" ] && count=0
+ if [ "$count" != "$rm_subflow_nr" ]; then
+ echo "[fail] got $count RM_SUBFLOW[s] expected $rm_subflow_nr"
+ ret=1
+ dump_stats=1
+ else
+ echo "[ ok ]"
+ fi
+
+ if [ "${dump_stats}" = 1 ]; then
+ echo Server ns stats
+ ip netns exec $ns1 nstat -as | grep MPTcp
+ echo Client ns stats
+ ip netns exec $ns2 nstat -as | grep MPTcp
+ fi
+}
+
sin=$(mktemp)
sout=$(mktemp)
cin=$(mktemp)
@@ -332,6 +456,7 @@ reset
ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr "unused signal address" 0 0 0
+chk_add_nr 1 1
# accept and use add_addr
reset
@@ -340,6 +465,7 @@ ip netns exec $ns2 ./pm_nl_ctl limits 1 1
ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr "signal address" 1 1 1
+chk_add_nr 1 1
# accept and use add_addr with an additional subflow
# note: signal address in server ns and local addresses in client ns must
@@ -352,6 +478,7 @@ ip netns exec $ns2 ./pm_nl_ctl limits 1 2
ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr "subflow and signal" 2 2 2
+chk_add_nr 1 1
# accept and use add_addr with additional subflows
reset
@@ -362,6 +489,59 @@ ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
ip netns exec $ns2 ./pm_nl_ctl add 10.0.4.2 flags subflow
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr "multiple subflows and signal" 3 3 3
+chk_add_nr 1 1
+
+# single subflow, remove
+reset
+ip netns exec $ns1 ./pm_nl_ctl limits 0 1
+ip netns exec $ns2 ./pm_nl_ctl limits 0 1
+ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
+run_remove_tests $ns1 $ns2 10.0.1.1 0 1
+chk_join_nr "remove single subflow" 1 1 1
+chk_rm_nr 1 1
+
+# multiple subflows, remove
+reset
+ip netns exec $ns1 ./pm_nl_ctl limits 0 2
+ip netns exec $ns2 ./pm_nl_ctl limits 0 2
+ip netns exec $ns2 ./pm_nl_ctl add 10.0.2.2 flags subflow
+ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
+run_remove_tests $ns1 $ns2 10.0.1.1 0 2
+chk_join_nr "remove multiple subflows" 2 2 2
+chk_rm_nr 2 2
+
+# single address, remove
+reset
+ip netns exec $ns1 ./pm_nl_ctl limits 0 1
+ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal
+ip netns exec $ns2 ./pm_nl_ctl limits 1 1
+run_remove_tests $ns1 $ns2 10.0.1.1 1 0
+chk_join_nr "remove single address" 1 1 1
+chk_add_nr 1 1
+chk_rm_nr 0 0
+
+# subflow and signal, remove
+reset
+ip netns exec $ns1 ./pm_nl_ctl limits 0 2
+ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal
+ip netns exec $ns2 ./pm_nl_ctl limits 1 2
+ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
+run_remove_tests $ns1 $ns2 10.0.1.1 1 1
+chk_join_nr "remove subflow and signal" 2 2 2
+chk_add_nr 1 1
+chk_rm_nr 1 1
+
+# subflows and signal, remove
+reset
+ip netns exec $ns1 ./pm_nl_ctl limits 0 3
+ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal
+ip netns exec $ns2 ./pm_nl_ctl limits 1 3
+ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
+ip netns exec $ns2 ./pm_nl_ctl add 10.0.4.2 flags subflow
+run_remove_tests $ns1 $ns2 10.0.1.1 1 2
+chk_join_nr "remove subflows and signal" 3 3 3
+chk_add_nr 1 1
+chk_rm_nr 2 2
# single subflow, syncookies
reset_with_cookies
@@ -396,6 +576,7 @@ ip netns exec $ns2 ./pm_nl_ctl limits 1 1
ip netns exec $ns1 ./pm_nl_ctl add 10.0.2.1 flags signal
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr "signal address with syn cookies" 1 1 1
+chk_add_nr 1 1
# test cookie with subflow and signal
reset_with_cookies
@@ -405,6 +586,7 @@ ip netns exec $ns2 ./pm_nl_ctl limits 1 2
ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr "subflow and signal w cookies" 2 2 2
+chk_add_nr 1 1
# accept and use add_addr with additional subflows
reset_with_cookies
@@ -415,5 +597,6 @@ ip netns exec $ns2 ./pm_nl_ctl add 10.0.3.2 flags subflow
ip netns exec $ns2 ./pm_nl_ctl add 10.0.4.2 flags subflow
run_tests $ns1 $ns2 10.0.1.1
chk_join_nr "subflows and signal w. cookies" 3 3 3
+chk_add_nr 1 1
exit $ret
diff --git a/tools/testing/selftests/net/mptcp/simult_flows.sh b/tools/testing/selftests/net/mptcp/simult_flows.sh
new file mode 100755
index 000000000000..2f649b431456
--- /dev/null
+++ b/tools/testing/selftests/net/mptcp/simult_flows.sh
@@ -0,0 +1,293 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+
+rndh=$(printf %x $sec)-$(mktemp -u XXXXXX)
+ns1="ns1-$rndh"
+ns2="ns2-$rndh"
+ns3="ns3-$rndh"
+capture=false
+ksft_skip=4
+timeout=30
+test_cnt=1
+ret=0
+bail=0
+
+usage() {
+ echo "Usage: $0 [ -b ] [ -c ] [ -d ]"
+ echo -e "\t-b: bail out after first error, otherwise runs al testcases"
+ echo -e "\t-c: capture packets for each test using tcpdump (default: no capture)"
+ echo -e "\t-d: debug this script"
+}
+
+cleanup()
+{
+ rm -f "$cin" "$cout"
+ rm -f "$sin" "$sout"
+ rm -f "$capout"
+
+ local netns
+ for netns in "$ns1" "$ns2" "$ns3";do
+ ip netns del $netns
+ done
+}
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without ip tool"
+ exit $ksft_skip
+fi
+
+# "$ns1" ns2 ns3
+# ns1eth1 ns2eth1 ns2eth3 ns3eth1
+# netem
+# ns1eth2 ns2eth2
+# netem
+
+setup()
+{
+ large=$(mktemp)
+ small=$(mktemp)
+ sout=$(mktemp)
+ cout=$(mktemp)
+ capout=$(mktemp)
+ size=$((2048 * 4096))
+ dd if=/dev/zero of=$small bs=4096 count=20 >/dev/null 2>&1
+ dd if=/dev/zero of=$large bs=4096 count=$((size / 4096)) >/dev/null 2>&1
+
+ trap cleanup EXIT
+
+ for i in "$ns1" "$ns2" "$ns3";do
+ ip netns add $i || exit $ksft_skip
+ ip -net $i link set lo up
+ done
+
+ ip link add ns1eth1 netns "$ns1" type veth peer name ns2eth1 netns "$ns2"
+ ip link add ns1eth2 netns "$ns1" type veth peer name ns2eth2 netns "$ns2"
+ ip link add ns2eth3 netns "$ns2" type veth peer name ns3eth1 netns "$ns3"
+
+ ip -net "$ns1" addr add 10.0.1.1/24 dev ns1eth1
+ ip -net "$ns1" addr add dead:beef:1::1/64 dev ns1eth1 nodad
+ ip -net "$ns1" link set ns1eth1 up mtu 1500
+ ip -net "$ns1" route add default via 10.0.1.2
+ ip -net "$ns1" route add default via dead:beef:1::2
+
+ ip -net "$ns1" addr add 10.0.2.1/24 dev ns1eth2
+ ip -net "$ns1" addr add dead:beef:2::1/64 dev ns1eth2 nodad
+ ip -net "$ns1" link set ns1eth2 up mtu 1500
+ ip -net "$ns1" route add default via 10.0.2.2 metric 101
+ ip -net "$ns1" route add default via dead:beef:2::2 metric 101
+
+ ip netns exec "$ns1" ./pm_nl_ctl limits 1 1
+ ip netns exec "$ns1" ./pm_nl_ctl add 10.0.2.1 dev ns1eth2 flags subflow
+ ip netns exec "$ns1" sysctl -q net.ipv4.conf.all.rp_filter=0
+
+ ip -net "$ns2" addr add 10.0.1.2/24 dev ns2eth1
+ ip -net "$ns2" addr add dead:beef:1::2/64 dev ns2eth1 nodad
+ ip -net "$ns2" link set ns2eth1 up mtu 1500
+
+ ip -net "$ns2" addr add 10.0.2.2/24 dev ns2eth2
+ ip -net "$ns2" addr add dead:beef:2::2/64 dev ns2eth2 nodad
+ ip -net "$ns2" link set ns2eth2 up mtu 1500
+
+ ip -net "$ns2" addr add 10.0.3.2/24 dev ns2eth3
+ ip -net "$ns2" addr add dead:beef:3::2/64 dev ns2eth3 nodad
+ ip -net "$ns2" link set ns2eth3 up mtu 1500
+ ip netns exec "$ns2" sysctl -q net.ipv4.ip_forward=1
+ ip netns exec "$ns2" sysctl -q net.ipv6.conf.all.forwarding=1
+
+ ip -net "$ns3" addr add 10.0.3.3/24 dev ns3eth1
+ ip -net "$ns3" addr add dead:beef:3::3/64 dev ns3eth1 nodad
+ ip -net "$ns3" link set ns3eth1 up mtu 1500
+ ip -net "$ns3" route add default via 10.0.3.2
+ ip -net "$ns3" route add default via dead:beef:3::2
+
+ ip netns exec "$ns3" ./pm_nl_ctl limits 1 1
+}
+
+# $1: ns, $2: port
+wait_local_port_listen()
+{
+ local listener_ns="${1}"
+ local port="${2}"
+
+ local port_hex i
+
+ port_hex="$(printf "%04X" "${port}")"
+ for i in $(seq 10); do
+ ip netns exec "${listener_ns}" cat /proc/net/tcp* | \
+ awk "BEGIN {rc=1} {if (\$2 ~ /:${port_hex}\$/ && \$4 ~ /0A/) {rc=0; exit}} END {exit rc}" &&
+ break
+ sleep 0.1
+ done
+}
+
+do_transfer()
+{
+ local cin=$1
+ local sin=$2
+ local max_time=$3
+ local port
+ port=$((10000+$test_cnt))
+ test_cnt=$((test_cnt+1))
+
+ :> "$cout"
+ :> "$sout"
+ :> "$capout"
+
+ local addr_port
+ addr_port=$(printf "%s:%d" ${connect_addr} ${port})
+
+ if $capture; then
+ local capuser
+ if [ -z $SUDO_USER ] ; then
+ capuser=""
+ else
+ capuser="-Z $SUDO_USER"
+ fi
+
+ local capfile="${rndh}-${port}"
+ local capopt="-i any -s 65535 -B 32768 ${capuser}"
+
+ ip netns exec ${ns3} tcpdump ${capopt} -w "${capfile}-listener.pcap" >> "${capout}" 2>&1 &
+ local cappid_listener=$!
+
+ ip netns exec ${ns1} tcpdump ${capopt} -w "${capfile}-connector.pcap" >> "${capout}" 2>&1 &
+ local cappid_connector=$!
+
+ sleep 1
+ fi
+
+ ip netns exec ${ns3} ./mptcp_connect -jt $timeout -l -p $port 0.0.0.0 < "$sin" > "$sout" &
+ local spid=$!
+
+ wait_local_port_listen "${ns3}" "${port}"
+
+ local start
+ start=$(date +%s%3N)
+ ip netns exec ${ns1} ./mptcp_connect -jt $timeout -p $port 10.0.3.3 < "$cin" > "$cout" &
+ local cpid=$!
+
+ wait $cpid
+ local retc=$?
+ wait $spid
+ local rets=$?
+
+ local stop
+ stop=$(date +%s%3N)
+
+ if $capture; then
+ sleep 1
+ kill ${cappid_listener}
+ kill ${cappid_connector}
+ fi
+
+ local duration
+ duration=$((stop-start))
+
+ cmp $sin $cout > /dev/null 2>&1
+ local cmps=$?
+ cmp $cin $sout > /dev/null 2>&1
+ local cmpc=$?
+
+ printf "%16s" "$duration max $max_time "
+ if [ $retc -eq 0 ] && [ $rets -eq 0 ] && \
+ [ $cmpc -eq 0 ] && [ $cmps -eq 0 ] && \
+ [ $duration -lt $max_time ]; then
+ echo "[ OK ]"
+ cat "$capout"
+ return 0
+ fi
+
+ echo " [ fail ]"
+ echo "client exit code $retc, server $rets" 1>&2
+ echo -e "\nnetns ${ns3} socket stat for $port:" 1>&2
+ ip netns exec ${ns3} ss -nita 1>&2 -o "sport = :$port"
+ echo -e "\nnetns ${ns1} socket stat for $port:" 1>&2
+ ip netns exec ${ns1} ss -nita 1>&2 -o "dport = :$port"
+ ls -l $sin $cout
+ ls -l $cin $sout
+
+ cat "$capout"
+ return 1
+}
+
+run_test()
+{
+ local rate1=$1
+ local rate2=$2
+ local delay1=$3
+ local delay2=$4
+ local lret
+ local dev
+ shift 4
+ local msg=$*
+
+ [ $delay1 -gt 0 ] && delay1="delay $delay1" || delay1=""
+ [ $delay2 -gt 0 ] && delay2="delay $delay2" || delay2=""
+
+ for dev in ns1eth1 ns1eth2; do
+ tc -n $ns1 qdisc del dev $dev root >/dev/null 2>&1
+ done
+ for dev in ns2eth1 ns2eth2; do
+ tc -n $ns2 qdisc del dev $dev root >/dev/null 2>&1
+ done
+ tc -n $ns1 qdisc add dev ns1eth1 root netem rate ${rate1}mbit $delay1
+ tc -n $ns1 qdisc add dev ns1eth2 root netem rate ${rate2}mbit $delay2
+ tc -n $ns2 qdisc add dev ns2eth1 root netem rate ${rate1}mbit $delay1
+ tc -n $ns2 qdisc add dev ns2eth2 root netem rate ${rate2}mbit $delay2
+
+ # time is measure in ms
+ local time=$((size * 8 * 1000 / (( $rate1 + $rate2) * 1024 *1024) ))
+
+ # mptcp_connect will do some sleeps to allow the mp_join handshake
+ # completion
+ time=$((time + 1350))
+
+ printf "%-50s" "$msg"
+ do_transfer $small $large $((time * 11 / 10))
+ lret=$?
+ if [ $lret -ne 0 ]; then
+ ret=$lret
+ [ $bail -eq 0 ] || exit $ret
+ fi
+
+ printf "%-50s" "$msg - reverse direction"
+ do_transfer $large $small $((time * 11 / 10))
+ lret=$?
+ if [ $lret -ne 0 ]; then
+ ret=$lret
+ [ $bail -eq 0 ] || exit $ret
+ fi
+}
+
+while getopts "bcdh" option;do
+ case "$option" in
+ "h")
+ usage $0
+ exit 0
+ ;;
+ "b")
+ bail=1
+ ;;
+ "c")
+ capture=true
+ ;;
+ "d")
+ set -x
+ ;;
+ "?")
+ usage $0
+ exit 1
+ ;;
+ esac
+done
+
+setup
+run_test 10 10 0 0 "balanced bwidth"
+run_test 10 10 1 50 "balanced bwidth with unbalanced delay"
+
+# we still need some additional infrastructure to pass the following test-cases
+# run_test 30 10 0 0 "unbalanced bwidth"
+# run_test 30 10 1 50 "unbalanced bwidth with unbalanced delay"
+# run_test 30 10 50 1 "unbalanced bwidth with opposed, unbalanced delay"
+exit $ret
diff --git a/tools/testing/selftests/net/nettest.c b/tools/testing/selftests/net/nettest.c
index 93208caacbe6..f75c53ce0a2d 100644
--- a/tools/testing/selftests/net/nettest.c
+++ b/tools/testing/selftests/net/nettest.c
@@ -1667,6 +1667,8 @@ int main(int argc, char *argv[])
case 'R':
args.type = SOCK_RAW;
args.port = 0;
+ if (!args.protocol)
+ args.protocol = IPPROTO_RAW;
break;
case 'P':
pe = getprotobyname(optarg);
diff --git a/tools/testing/selftests/net/psock_snd.sh b/tools/testing/selftests/net/psock_snd.sh
index 6331d91b86a6..170be65e0816 100755
--- a/tools/testing/selftests/net/psock_snd.sh
+++ b/tools/testing/selftests/net/psock_snd.sh
@@ -45,7 +45,7 @@ echo "raw vnet hdr"
echo "raw csum_off"
./in_netns.sh ./psock_snd -v -c
-echo "raw csum_off with bad offset (fails)"
+echo "raw csum_off with bad offset (expected to fail)"
(! ./in_netns.sh ./psock_snd -v -c -C)
@@ -57,7 +57,7 @@ echo "raw min size"
echo "raw mtu size"
./in_netns.sh ./psock_snd -l "${mss}"
-echo "raw mtu size + 1 (fails)"
+echo "raw mtu size + 1 (expected to fail)"
(! ./in_netns.sh ./psock_snd -l "${mss_exceeds}")
# fails due to ARPHRD_ETHER check in packet_extra_vlan_len_allowed
@@ -65,19 +65,19 @@ echo "raw mtu size + 1 (fails)"
# echo "raw vlan mtu size"
# ./in_netns.sh ./psock_snd -V -l "${mss}"
-echo "raw vlan mtu size + 1 (fails)"
+echo "raw vlan mtu size + 1 (expected to fail)"
(! ./in_netns.sh ./psock_snd -V -l "${mss_exceeds}")
echo "dgram mtu size"
./in_netns.sh ./psock_snd -d -l "${mss}"
-echo "dgram mtu size + 1 (fails)"
+echo "dgram mtu size + 1 (expected to fail)"
(! ./in_netns.sh ./psock_snd -d -l "${mss_exceeds}")
-echo "raw truncate hlen (fails: does not arrive)"
+echo "raw truncate hlen (expected to fail: does not arrive)"
(! ./in_netns.sh ./psock_snd -t "$((${vnet_hlen} + ${eth_hlen}))")
-echo "raw truncate hlen - 1 (fails: EINVAL)"
+echo "raw truncate hlen - 1 (expected to fail: EINVAL)"
(! ./in_netns.sh ./psock_snd -t "$((${vnet_hlen} + ${eth_hlen} - 1))")
@@ -86,13 +86,13 @@ echo "raw truncate hlen - 1 (fails: EINVAL)"
echo "raw gso min size"
./in_netns.sh ./psock_snd -v -c -g -l "${mss_exceeds}"
-echo "raw gso min size - 1 (fails)"
+echo "raw gso min size - 1 (expected to fail)"
(! ./in_netns.sh ./psock_snd -v -c -g -l "${mss}")
echo "raw gso max size"
./in_netns.sh ./psock_snd -v -c -g -l "${max_mss}"
-echo "raw gso max size + 1 (fails)"
+echo "raw gso max size + 1 (expected to fail)"
(! ./in_netns.sh ./psock_snd -v -c -g -l "${max_mss_exceeds}")
echo "OK. All tests passed"
diff --git a/tools/testing/selftests/net/tcp_mmap.c b/tools/testing/selftests/net/tcp_mmap.c
index a61b7b3da549..00f837c9bc6c 100644
--- a/tools/testing/selftests/net/tcp_mmap.c
+++ b/tools/testing/selftests/net/tcp_mmap.c
@@ -123,6 +123,28 @@ void hash_zone(void *zone, unsigned int length)
#define ALIGN_UP(x, align_to) (((x) + ((align_to)-1)) & ~((align_to)-1))
#define ALIGN_PTR_UP(p, ptr_align_to) ((typeof(p))ALIGN_UP((unsigned long)(p), ptr_align_to))
+
+static void *mmap_large_buffer(size_t need, size_t *allocated)
+{
+ void *buffer;
+ size_t sz;
+
+ /* Attempt to use huge pages if possible. */
+ sz = ALIGN_UP(need, map_align);
+ buffer = mmap(NULL, sz, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0);
+
+ if (buffer == (void *)-1) {
+ sz = need;
+ buffer = mmap(NULL, sz, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (buffer != (void *)-1)
+ fprintf(stderr, "MAP_HUGETLB attempt failed, look at /sys/kernel/mm/hugepages for optimal performance\n");
+ }
+ *allocated = sz;
+ return buffer;
+}
+
void *child_thread(void *arg)
{
unsigned long total_mmap = 0, total = 0;
@@ -135,6 +157,7 @@ void *child_thread(void *arg)
void *addr = NULL;
double throughput;
struct rusage ru;
+ size_t buffer_sz;
int lu, fd;
fd = (int)(unsigned long)arg;
@@ -142,9 +165,9 @@ void *child_thread(void *arg)
gettimeofday(&t0, NULL);
fcntl(fd, F_SETFL, O_NDELAY);
- buffer = malloc(chunk_size);
- if (!buffer) {
- perror("malloc");
+ buffer = mmap_large_buffer(chunk_size, &buffer_sz);
+ if (buffer == (void *)-1) {
+ perror("mmap");
goto error;
}
if (zflg) {
@@ -179,6 +202,10 @@ void *child_thread(void *arg)
total_mmap += zc.length;
if (xflg)
hash_zone(addr, zc.length);
+ /* It is more efficient to unmap the pages right now,
+ * instead of doing this in next TCP_ZEROCOPY_RECEIVE.
+ */
+ madvise(addr, zc.length, MADV_DONTNEED);
total += zc.length;
}
if (zc.recv_skip_hint) {
@@ -230,7 +257,7 @@ end:
ru.ru_nvcsw);
}
error:
- free(buffer);
+ munmap(buffer, buffer_sz);
close(fd);
if (zflg)
munmap(raddr, chunk_size + map_align);
@@ -347,6 +374,7 @@ int main(int argc, char *argv[])
uint64_t total = 0;
char *host = NULL;
int fd, c, on = 1;
+ size_t buffer_sz;
char *buffer;
int sflg = 0;
int mss = 0;
@@ -437,8 +465,8 @@ int main(int argc, char *argv[])
}
do_accept(fdlisten);
}
- buffer = mmap(NULL, chunk_size, PROT_READ | PROT_WRITE,
- MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+
+ buffer = mmap_large_buffer(chunk_size, &buffer_sz);
if (buffer == (char *)-1) {
perror("mmap");
exit(1);
@@ -484,6 +512,6 @@ int main(int argc, char *argv[])
total += wr;
}
close(fd);
- munmap(buffer, chunk_size);
+ munmap(buffer, buffer_sz);
return 0;
}
diff --git a/tools/testing/selftests/net/vrf_route_leaking.sh b/tools/testing/selftests/net/vrf_route_leaking.sh
new file mode 100755
index 000000000000..23cf924754a5
--- /dev/null
+++ b/tools/testing/selftests/net/vrf_route_leaking.sh
@@ -0,0 +1,626 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (c) 2019 David Ahern <dsahern@gmail.com>. All rights reserved.
+# Copyright (c) 2020 Michael Jeanson <mjeanson@efficios.com>. All rights reserved.
+#
+# Requires CONFIG_NET_VRF, CONFIG_VETH, CONFIG_BRIDGE and CONFIG_NET_NS.
+#
+#
+# Symmetric routing topology
+#
+# blue red
+# +----+ .253 +----+ .253 +----+
+# | h1 |-------------------| r1 |-------------------| h2 |
+# +----+ .1 +----+ .2 +----+
+# 172.16.1/24 172.16.2/24
+# 2001:db8:16:1/64 2001:db8:16:2/64
+#
+#
+# Route from h1 to h2 and back goes through r1, incoming vrf blue has a route
+# to the outgoing vrf red for the n2 network and red has a route back to n1.
+# The red VRF interface has a MTU of 1400.
+#
+# The first test sends a ping with a ttl of 1 from h1 to h2 and parses the
+# output of the command to check that a ttl expired error is received.
+#
+# The second test runs traceroute from h1 to h2 and parses the output to check
+# for a hop on r1.
+#
+# The third test sends a ping with a packet size of 1450 from h1 to h2 and
+# parses the output of the command to check that a fragmentation error is
+# received.
+#
+#
+# Asymmetric routing topology
+#
+# This topology represents a customer setup where the issue with icmp errors
+# and VRF route leaking was initialy reported. The MTU test isn't done here
+# because of the lack of a return route in the red VRF.
+#
+# blue red
+# .253 +----+ .253
+# +----| r1 |----+
+# | +----+ |
+# +----+ | | +----+
+# | h1 |--------------+ +--------------| h2 |
+# +----+ .1 | | .2 +----+
+# 172.16.1/24 | +----+ | 172.16.2/24
+# 2001:db8:16:1/64 +----| r2 |----+ 2001:db8:16:2/64
+# .254 +----+ .254
+#
+#
+# Route from h1 to h2 goes through r1, incoming vrf blue has a route to the
+# outgoing vrf red for the n2 network but red doesn't have a route back to n1.
+# Route from h2 to h1 goes through r2.
+#
+# The objective is to check that the incoming vrf routing table is selected
+# to send an ICMP error back to the source when the ttl of a packet reaches 1
+# while it is forwarded between different vrfs.
+
+VERBOSE=0
+PAUSE_ON_FAIL=no
+DEFAULT_TTYPE=sym
+
+H1_N1=172.16.1.0/24
+H1_N1_6=2001:db8:16:1::/64
+
+H1_N1_IP=172.16.1.1
+R1_N1_IP=172.16.1.253
+R2_N1_IP=172.16.1.254
+
+H1_N1_IP6=2001:db8:16:1::1
+R1_N1_IP6=2001:db8:16:1::253
+R2_N1_IP6=2001:db8:16:1::254
+
+H2_N2=172.16.2.0/24
+H2_N2_6=2001:db8:16:2::/64
+
+H2_N2_IP=172.16.2.2
+R1_N2_IP=172.16.2.253
+R2_N2_IP=172.16.2.254
+
+H2_N2_IP6=2001:db8:16:2::2
+R1_N2_IP6=2001:db8:16:2::253
+R2_N2_IP6=2001:db8:16:2::254
+
+################################################################################
+# helpers
+
+log_section()
+{
+ echo
+ echo "###########################################################################"
+ echo "$*"
+ echo "###########################################################################"
+ echo
+}
+
+log_test()
+{
+ local rc=$1
+ local expected=$2
+ local msg="$3"
+
+ if [ "${rc}" -eq "${expected}" ]; then
+ printf "TEST: %-60s [ OK ]\n" "${msg}"
+ nsuccess=$((nsuccess+1))
+ else
+ ret=1
+ nfail=$((nfail+1))
+ printf "TEST: %-60s [FAIL]\n" "${msg}"
+ if [ "${PAUSE_ON_FAIL}" = "yes" ]; then
+ echo
+ echo "hit enter to continue, 'q' to quit"
+ read -r a
+ [ "$a" = "q" ] && exit 1
+ fi
+ fi
+}
+
+run_cmd()
+{
+ local cmd="$*"
+ local out
+ local rc
+
+ if [ "$VERBOSE" = "1" ]; then
+ echo "COMMAND: $cmd"
+ fi
+
+ # shellcheck disable=SC2086
+ out=$(eval $cmd 2>&1)
+ rc=$?
+ if [ "$VERBOSE" = "1" ] && [ -n "$out" ]; then
+ echo "$out"
+ fi
+
+ [ "$VERBOSE" = "1" ] && echo
+
+ return $rc
+}
+
+run_cmd_grep()
+{
+ local grep_pattern="$1"
+ shift
+ local cmd="$*"
+ local out
+ local rc
+
+ if [ "$VERBOSE" = "1" ]; then
+ echo "COMMAND: $cmd"
+ fi
+
+ # shellcheck disable=SC2086
+ out=$(eval $cmd 2>&1)
+ if [ "$VERBOSE" = "1" ] && [ -n "$out" ]; then
+ echo "$out"
+ fi
+
+ echo "$out" | grep -q "$grep_pattern"
+ rc=$?
+
+ [ "$VERBOSE" = "1" ] && echo
+
+ return $rc
+}
+
+################################################################################
+# setup and teardown
+
+cleanup()
+{
+ local ns
+
+ for ns in h1 h2 r1 r2; do
+ ip netns del $ns 2>/dev/null
+ done
+}
+
+setup_vrf()
+{
+ local ns=$1
+
+ ip -netns "${ns}" rule del pref 0
+ ip -netns "${ns}" rule add pref 32765 from all lookup local
+ ip -netns "${ns}" -6 rule del pref 0
+ ip -netns "${ns}" -6 rule add pref 32765 from all lookup local
+}
+
+create_vrf()
+{
+ local ns=$1
+ local vrf=$2
+ local table=$3
+
+ ip -netns "${ns}" link add "${vrf}" type vrf table "${table}"
+ ip -netns "${ns}" link set "${vrf}" up
+ ip -netns "${ns}" route add vrf "${vrf}" unreachable default metric 8192
+ ip -netns "${ns}" -6 route add vrf "${vrf}" unreachable default metric 8192
+
+ ip -netns "${ns}" addr add 127.0.0.1/8 dev "${vrf}"
+ ip -netns "${ns}" -6 addr add ::1 dev "${vrf}" nodad
+}
+
+setup_sym()
+{
+ local ns
+
+ # make sure we are starting with a clean slate
+ cleanup
+
+ #
+ # create nodes as namespaces
+ #
+ for ns in h1 h2 r1; do
+ ip netns add $ns
+ ip -netns $ns link set lo up
+
+ case "${ns}" in
+ h[12]) ip netns exec $ns sysctl -q -w net.ipv6.conf.all.forwarding=0
+ ip netns exec $ns sysctl -q -w net.ipv6.conf.all.keep_addr_on_down=1
+ ;;
+ r1) ip netns exec $ns sysctl -q -w net.ipv4.ip_forward=1
+ ip netns exec $ns sysctl -q -w net.ipv6.conf.all.forwarding=1
+ esac
+ done
+
+ #
+ # create interconnects
+ #
+ ip -netns h1 link add eth0 type veth peer name r1h1
+ ip -netns h1 link set r1h1 netns r1 name eth0 up
+
+ ip -netns h2 link add eth0 type veth peer name r1h2
+ ip -netns h2 link set r1h2 netns r1 name eth1 up
+
+ #
+ # h1
+ #
+ ip -netns h1 addr add dev eth0 ${H1_N1_IP}/24
+ ip -netns h1 -6 addr add dev eth0 ${H1_N1_IP6}/64 nodad
+ ip -netns h1 link set eth0 up
+
+ # h1 to h2 via r1
+ ip -netns h1 route add ${H2_N2} via ${R1_N1_IP} dev eth0
+ ip -netns h1 -6 route add ${H2_N2_6} via "${R1_N1_IP6}" dev eth0
+
+ #
+ # h2
+ #
+ ip -netns h2 addr add dev eth0 ${H2_N2_IP}/24
+ ip -netns h2 -6 addr add dev eth0 ${H2_N2_IP6}/64 nodad
+ ip -netns h2 link set eth0 up
+
+ # h2 to h1 via r1
+ ip -netns h2 route add default via ${R1_N2_IP} dev eth0
+ ip -netns h2 -6 route add default via ${R1_N2_IP6} dev eth0
+
+ #
+ # r1
+ #
+ setup_vrf r1
+ create_vrf r1 blue 1101
+ create_vrf r1 red 1102
+ ip -netns r1 link set mtu 1400 dev eth1
+ ip -netns r1 link set eth0 vrf blue up
+ ip -netns r1 link set eth1 vrf red up
+ ip -netns r1 addr add dev eth0 ${R1_N1_IP}/24
+ ip -netns r1 -6 addr add dev eth0 ${R1_N1_IP6}/64 nodad
+ ip -netns r1 addr add dev eth1 ${R1_N2_IP}/24
+ ip -netns r1 -6 addr add dev eth1 ${R1_N2_IP6}/64 nodad
+
+ # Route leak from blue to red
+ ip -netns r1 route add vrf blue ${H2_N2} dev red
+ ip -netns r1 -6 route add vrf blue ${H2_N2_6} dev red
+
+ # Route leak from red to blue
+ ip -netns r1 route add vrf red ${H1_N1} dev blue
+ ip -netns r1 -6 route add vrf red ${H1_N1_6} dev blue
+
+
+ # Wait for ip config to settle
+ sleep 2
+}
+
+setup_asym()
+{
+ local ns
+
+ # make sure we are starting with a clean slate
+ cleanup
+
+ #
+ # create nodes as namespaces
+ #
+ for ns in h1 h2 r1 r2; do
+ ip netns add $ns
+ ip -netns $ns link set lo up
+
+ case "${ns}" in
+ h[12]) ip netns exec $ns sysctl -q -w net.ipv6.conf.all.forwarding=0
+ ip netns exec $ns sysctl -q -w net.ipv6.conf.all.keep_addr_on_down=1
+ ;;
+ r[12]) ip netns exec $ns sysctl -q -w net.ipv4.ip_forward=1
+ ip netns exec $ns sysctl -q -w net.ipv6.conf.all.forwarding=1
+ esac
+ done
+
+ #
+ # create interconnects
+ #
+ ip -netns h1 link add eth0 type veth peer name r1h1
+ ip -netns h1 link set r1h1 netns r1 name eth0 up
+
+ ip -netns h1 link add eth1 type veth peer name r2h1
+ ip -netns h1 link set r2h1 netns r2 name eth0 up
+
+ ip -netns h2 link add eth0 type veth peer name r1h2
+ ip -netns h2 link set r1h2 netns r1 name eth1 up
+
+ ip -netns h2 link add eth1 type veth peer name r2h2
+ ip -netns h2 link set r2h2 netns r2 name eth1 up
+
+ #
+ # h1
+ #
+ ip -netns h1 link add br0 type bridge
+ ip -netns h1 link set br0 up
+ ip -netns h1 addr add dev br0 ${H1_N1_IP}/24
+ ip -netns h1 -6 addr add dev br0 ${H1_N1_IP6}/64 nodad
+ ip -netns h1 link set eth0 master br0 up
+ ip -netns h1 link set eth1 master br0 up
+
+ # h1 to h2 via r1
+ ip -netns h1 route add ${H2_N2} via ${R1_N1_IP} dev br0
+ ip -netns h1 -6 route add ${H2_N2_6} via "${R1_N1_IP6}" dev br0
+
+ #
+ # h2
+ #
+ ip -netns h2 link add br0 type bridge
+ ip -netns h2 link set br0 up
+ ip -netns h2 addr add dev br0 ${H2_N2_IP}/24
+ ip -netns h2 -6 addr add dev br0 ${H2_N2_IP6}/64 nodad
+ ip -netns h2 link set eth0 master br0 up
+ ip -netns h2 link set eth1 master br0 up
+
+ # h2 to h1 via r2
+ ip -netns h2 route add default via ${R2_N2_IP} dev br0
+ ip -netns h2 -6 route add default via ${R2_N2_IP6} dev br0
+
+ #
+ # r1
+ #
+ setup_vrf r1
+ create_vrf r1 blue 1101
+ create_vrf r1 red 1102
+ ip -netns r1 link set mtu 1400 dev eth1
+ ip -netns r1 link set eth0 vrf blue up
+ ip -netns r1 link set eth1 vrf red up
+ ip -netns r1 addr add dev eth0 ${R1_N1_IP}/24
+ ip -netns r1 -6 addr add dev eth0 ${R1_N1_IP6}/64 nodad
+ ip -netns r1 addr add dev eth1 ${R1_N2_IP}/24
+ ip -netns r1 -6 addr add dev eth1 ${R1_N2_IP6}/64 nodad
+
+ # Route leak from blue to red
+ ip -netns r1 route add vrf blue ${H2_N2} dev red
+ ip -netns r1 -6 route add vrf blue ${H2_N2_6} dev red
+
+ # No route leak from red to blue
+
+ #
+ # r2
+ #
+ ip -netns r2 addr add dev eth0 ${R2_N1_IP}/24
+ ip -netns r2 -6 addr add dev eth0 ${R2_N1_IP6}/64 nodad
+ ip -netns r2 addr add dev eth1 ${R2_N2_IP}/24
+ ip -netns r2 -6 addr add dev eth1 ${R2_N2_IP6}/64 nodad
+
+ # Wait for ip config to settle
+ sleep 2
+}
+
+check_connectivity()
+{
+ ip netns exec h1 ping -c1 -w1 ${H2_N2_IP} >/dev/null 2>&1
+ log_test $? 0 "Basic IPv4 connectivity"
+ return $?
+}
+
+check_connectivity6()
+{
+ ip netns exec h1 "${ping6}" -c1 -w1 ${H2_N2_IP6} >/dev/null 2>&1
+ log_test $? 0 "Basic IPv6 connectivity"
+ return $?
+}
+
+check_traceroute()
+{
+ if [ ! -x "$(command -v traceroute)" ]; then
+ echo "SKIP: Could not run IPV4 test without traceroute"
+ return 1
+ fi
+}
+
+check_traceroute6()
+{
+ if [ ! -x "$(command -v traceroute6)" ]; then
+ echo "SKIP: Could not run IPV6 test without traceroute6"
+ return 1
+ fi
+}
+
+ipv4_traceroute()
+{
+ local ttype="$1"
+
+ [ "x$ttype" = "x" ] && ttype="$DEFAULT_TTYPE"
+
+ log_section "IPv4 ($ttype route): VRF ICMP error route lookup traceroute"
+
+ check_traceroute || return
+
+ setup_"$ttype"
+
+ check_connectivity || return
+
+ run_cmd_grep "${R1_N1_IP}" ip netns exec h1 traceroute ${H2_N2_IP}
+ log_test $? 0 "Traceroute reports a hop on r1"
+}
+
+ipv4_traceroute_asym()
+{
+ ipv4_traceroute asym
+}
+
+ipv6_traceroute()
+{
+ local ttype="$1"
+
+ [ "x$ttype" = "x" ] && ttype="$DEFAULT_TTYPE"
+
+ log_section "IPv6 ($ttype route): VRF ICMP error route lookup traceroute"
+
+ check_traceroute6 || return
+
+ setup_"$ttype"
+
+ check_connectivity6 || return
+
+ run_cmd_grep "${R1_N1_IP6}" ip netns exec h1 traceroute6 ${H2_N2_IP6}
+ log_test $? 0 "Traceroute6 reports a hop on r1"
+}
+
+ipv6_traceroute_asym()
+{
+ ipv6_traceroute asym
+}
+
+ipv4_ping_ttl()
+{
+ local ttype="$1"
+
+ [ "x$ttype" = "x" ] && ttype="$DEFAULT_TTYPE"
+
+ log_section "IPv4 ($ttype route): VRF ICMP ttl error route lookup ping"
+
+ setup_"$ttype"
+
+ check_connectivity || return
+
+ run_cmd_grep "Time to live exceeded" ip netns exec h1 ping -t1 -c1 -W2 ${H2_N2_IP}
+ log_test $? 0 "Ping received ICMP ttl exceeded"
+}
+
+ipv4_ping_ttl_asym()
+{
+ ipv4_ping_ttl asym
+}
+
+ipv4_ping_frag()
+{
+ local ttype="$1"
+
+ [ "x$ttype" = "x" ] && ttype="$DEFAULT_TTYPE"
+
+ log_section "IPv4 ($ttype route): VRF ICMP fragmentation error route lookup ping"
+
+ setup_"$ttype"
+
+ check_connectivity || return
+
+ run_cmd_grep "Frag needed" ip netns exec h1 ping -s 1450 -Mdo -c1 -W2 ${H2_N2_IP}
+ log_test $? 0 "Ping received ICMP Frag needed"
+}
+
+ipv4_ping_frag_asym()
+{
+ ipv4_ping_frag asym
+}
+
+ipv6_ping_ttl()
+{
+ local ttype="$1"
+
+ [ "x$ttype" = "x" ] && ttype="$DEFAULT_TTYPE"
+
+ log_section "IPv6 ($ttype route): VRF ICMP ttl error route lookup ping"
+
+ setup_"$ttype"
+
+ check_connectivity6 || return
+
+ run_cmd_grep "Time exceeded: Hop limit" ip netns exec h1 "${ping6}" -t1 -c1 -W2 ${H2_N2_IP6}
+ log_test $? 0 "Ping received ICMP Hop limit"
+}
+
+ipv6_ping_ttl_asym()
+{
+ ipv6_ping_ttl asym
+}
+
+ipv6_ping_frag()
+{
+ local ttype="$1"
+
+ [ "x$ttype" = "x" ] && ttype="$DEFAULT_TTYPE"
+
+ log_section "IPv6 ($ttype route): VRF ICMP fragmentation error route lookup ping"
+
+ setup_"$ttype"
+
+ check_connectivity6 || return
+
+ run_cmd_grep "Packet too big" ip netns exec h1 "${ping6}" -s 1450 -Mdo -c1 -W2 ${H2_N2_IP6}
+ log_test $? 0 "Ping received ICMP Packet too big"
+}
+
+ipv6_ping_frag_asym()
+{
+ ipv6_ping_frag asym
+}
+
+################################################################################
+# usage
+
+usage()
+{
+ cat <<EOF
+usage: ${0##*/} OPTS
+
+ -4 Run IPv4 tests only
+ -6 Run IPv6 tests only
+ -t TEST Run only TEST
+ -p Pause on fail
+ -v verbose mode (show commands and output)
+EOF
+}
+
+################################################################################
+# main
+
+# Some systems don't have a ping6 binary anymore
+command -v ping6 > /dev/null 2>&1 && ping6=$(command -v ping6) || ping6=$(command -v ping)
+
+TESTS_IPV4="ipv4_ping_ttl ipv4_traceroute ipv4_ping_frag ipv4_ping_ttl_asym ipv4_traceroute_asym"
+TESTS_IPV6="ipv6_ping_ttl ipv6_traceroute ipv6_ping_frag ipv6_ping_ttl_asym ipv6_traceroute_asym"
+
+ret=0
+nsuccess=0
+nfail=0
+
+while getopts :46t:pvh o
+do
+ case $o in
+ 4) TESTS=ipv4;;
+ 6) TESTS=ipv6;;
+ t) TESTS=$OPTARG;;
+ p) PAUSE_ON_FAIL=yes;;
+ v) VERBOSE=1;;
+ h) usage; exit 0;;
+ *) usage; exit 1;;
+ esac
+done
+
+#
+# show user test config
+#
+if [ -z "$TESTS" ]; then
+ TESTS="$TESTS_IPV4 $TESTS_IPV6"
+elif [ "$TESTS" = "ipv4" ]; then
+ TESTS="$TESTS_IPV4"
+elif [ "$TESTS" = "ipv6" ]; then
+ TESTS="$TESTS_IPV6"
+fi
+
+for t in $TESTS
+do
+ case $t in
+ ipv4_ping_ttl|ping) ipv4_ping_ttl;;&
+ ipv4_ping_ttl_asym|ping) ipv4_ping_ttl_asym;;&
+ ipv4_traceroute|traceroute) ipv4_traceroute;;&
+ ipv4_traceroute_asym|traceroute) ipv4_traceroute_asym;;&
+ ipv4_ping_frag|ping) ipv4_ping_frag;;&
+
+ ipv6_ping_ttl|ping) ipv6_ping_ttl;;&
+ ipv6_ping_ttl_asym|ping) ipv6_ping_ttl_asym;;&
+ ipv6_traceroute|traceroute) ipv6_traceroute;;&
+ ipv6_traceroute_asym|traceroute) ipv6_traceroute_asym;;&
+ ipv6_ping_frag|ping) ipv6_ping_frag;;&
+
+ # setup namespaces and config, but do not run any tests
+ setup_sym|setup) setup_sym; exit 0;;
+ setup_asym) setup_asym; exit 0;;
+
+ help) echo "Test names: $TESTS"; exit 0;;
+ esac
+done
+
+cleanup
+
+printf "\nTests passed: %3d\n" ${nsuccess}
+printf "Tests failed: %3d\n" ${nfail}
+
+exit $ret
diff --git a/tools/testing/selftests/netfilter/nf-queue.c b/tools/testing/selftests/netfilter/nf-queue.c
index 29c73bce38fa..9e56b9d47037 100644
--- a/tools/testing/selftests/netfilter/nf-queue.c
+++ b/tools/testing/selftests/netfilter/nf-queue.c
@@ -17,9 +17,12 @@
struct options {
bool count_packets;
+ bool gso_enabled;
int verbose;
unsigned int queue_num;
unsigned int timeout;
+ uint32_t verdict;
+ uint32_t delay_ms;
};
static unsigned int queue_stats[5];
@@ -27,7 +30,7 @@ static struct options opts;
static void help(const char *p)
{
- printf("Usage: %s [-c|-v [-vv] ] [-t timeout] [-q queue_num]\n", p);
+ printf("Usage: %s [-c|-v [-vv] ] [-t timeout] [-q queue_num] [-Qdst_queue ] [ -d ms_delay ] [-G]\n", p);
}
static int parse_attr_cb(const struct nlattr *attr, void *data)
@@ -162,7 +165,7 @@ nfq_build_cfg_params(char *buf, uint8_t mode, int range, int queue_num)
}
static struct nlmsghdr *
-nfq_build_verdict(char *buf, int id, int queue_num, int verd)
+nfq_build_verdict(char *buf, int id, int queue_num, uint32_t verd)
{
struct nfqnl_msg_verdict_hdr vh = {
.verdict = htonl(verd),
@@ -189,9 +192,6 @@ static void print_stats(void)
unsigned int last, total;
int i;
- if (!opts.count_packets)
- return;
-
total = 0;
last = queue_stats[0];
@@ -234,7 +234,8 @@ struct mnl_socket *open_queue(void)
nlh = nfq_build_cfg_params(buf, NFQNL_COPY_PACKET, 0xFFFF, queue_num);
- flags = NFQA_CFG_F_GSO | NFQA_CFG_F_UID_GID;
+ flags = opts.gso_enabled ? NFQA_CFG_F_GSO : 0;
+ flags |= NFQA_CFG_F_UID_GID;
mnl_attr_put_u32(nlh, NFQA_CFG_FLAGS, htonl(flags));
mnl_attr_put_u32(nlh, NFQA_CFG_MASK, htonl(flags));
@@ -255,6 +256,17 @@ struct mnl_socket *open_queue(void)
return nl;
}
+static void sleep_ms(uint32_t delay)
+{
+ struct timespec ts = { .tv_sec = delay / 1000 };
+
+ delay %= 1000;
+
+ ts.tv_nsec = delay * 1000llu * 1000llu;
+
+ nanosleep(&ts, NULL);
+}
+
static int mainloop(void)
{
unsigned int buflen = 64 * 1024 + MNL_SOCKET_BUFFER_SIZE;
@@ -278,7 +290,7 @@ static int mainloop(void)
ret = mnl_socket_recvfrom(nl, buf, buflen);
if (ret == -1) {
- if (errno == ENOBUFS)
+ if (errno == ENOBUFS || errno == EINTR)
continue;
if (errno == EAGAIN) {
@@ -298,7 +310,10 @@ static int mainloop(void)
}
id = ret - MNL_CB_OK;
- nlh = nfq_build_verdict(buf, id, opts.queue_num, NF_ACCEPT);
+ if (opts.delay_ms)
+ sleep_ms(opts.delay_ms);
+
+ nlh = nfq_build_verdict(buf, id, opts.queue_num, opts.verdict);
if (mnl_socket_sendto(nl, nlh, nlh->nlmsg_len) < 0) {
perror("mnl_socket_sendto");
exit(EXIT_FAILURE);
@@ -314,7 +329,7 @@ static void parse_opts(int argc, char **argv)
{
int c;
- while ((c = getopt(argc, argv, "chvt:q:")) != -1) {
+ while ((c = getopt(argc, argv, "chvt:q:Q:d:G")) != -1) {
switch (c) {
case 'c':
opts.count_packets = true;
@@ -328,20 +343,48 @@ static void parse_opts(int argc, char **argv)
if (opts.queue_num > 0xffff)
opts.queue_num = 0;
break;
+ case 'Q':
+ opts.verdict = atoi(optarg);
+ if (opts.verdict > 0xffff) {
+ fprintf(stderr, "Expected destination queue number\n");
+ exit(1);
+ }
+
+ opts.verdict <<= 16;
+ opts.verdict |= NF_QUEUE;
+ break;
+ case 'd':
+ opts.delay_ms = atoi(optarg);
+ if (opts.delay_ms == 0) {
+ fprintf(stderr, "Expected nonzero delay (in milliseconds)\n");
+ exit(1);
+ }
+ break;
case 't':
opts.timeout = atoi(optarg);
break;
+ case 'G':
+ opts.gso_enabled = false;
+ break;
case 'v':
opts.verbose++;
break;
}
}
+
+ if (opts.verdict != NF_ACCEPT && (opts.verdict >> 16 == opts.queue_num)) {
+ fprintf(stderr, "Cannot use same destination and source queue\n");
+ exit(1);
+ }
}
int main(int argc, char *argv[])
{
int ret;
+ opts.verdict = NF_ACCEPT;
+ opts.gso_enabled = true;
+
parse_opts(argc, argv);
ret = mainloop();
diff --git a/tools/testing/selftests/netfilter/nft_meta.sh b/tools/testing/selftests/netfilter/nft_meta.sh
index d250b84dd5bc..087f0e6e71ce 100755
--- a/tools/testing/selftests/netfilter/nft_meta.sh
+++ b/tools/testing/selftests/netfilter/nft_meta.sh
@@ -7,8 +7,7 @@ ksft_skip=4
sfx=$(mktemp -u "XXXXXXXX")
ns0="ns0-$sfx"
-nft --version > /dev/null 2>&1
-if [ $? -ne 0 ];then
+if ! nft --version > /dev/null 2>&1; then
echo "SKIP: Could not run test without nft tool"
exit $ksft_skip
fi
@@ -24,6 +23,8 @@ ip -net "$ns0" addr add 127.0.0.1 dev lo
trap cleanup EXIT
+currentyear=$(date +%G)
+lastyear=$((currentyear-1))
ip netns exec "$ns0" nft -f /dev/stdin <<EOF
table inet filter {
counter iifcount {}
@@ -33,6 +34,9 @@ table inet filter {
counter infproto4count {}
counter il4protocounter {}
counter imarkcounter {}
+ counter icpu0counter {}
+ counter ilastyearcounter {}
+ counter icurrentyearcounter {}
counter oifcount {}
counter oifnamecount {}
@@ -54,6 +58,9 @@ table inet filter {
meta nfproto ipv4 counter name "infproto4count"
meta l4proto icmp counter name "il4protocounter"
meta mark 42 counter name "imarkcounter"
+ meta cpu 0 counter name "icpu0counter"
+ meta time "$lastyear-01-01" - "$lastyear-12-31" counter name ilastyearcounter
+ meta time "$currentyear-01-01" - "$currentyear-12-31" counter name icurrentyearcounter
}
chain output {
@@ -84,11 +91,10 @@ check_one_counter()
local want="packets $2"
local verbose="$3"
- cnt=$(ip netns exec "$ns0" nft list counter inet filter $cname | grep -q "$want")
- if [ $? -ne 0 ];then
+ if ! ip netns exec "$ns0" nft list counter inet filter $cname | grep -q "$want"; then
echo "FAIL: $cname, want \"$want\", got"
ret=1
- ip netns exec "$ns0" nft list counter inet filter $counter
+ ip netns exec "$ns0" nft list counter inet filter $cname
fi
}
@@ -100,8 +106,7 @@ check_lo_counters()
for counter in iifcount iifnamecount iifgroupcount iiftypecount infproto4count \
oifcount oifnamecount oifgroupcount oiftypecount onfproto4count \
- il4protocounter \
- ol4protocounter \
+ il4protocounter icurrentyearcounter ol4protocounter \
; do
check_one_counter "$counter" "$want" "$verbose"
done
@@ -116,9 +121,22 @@ check_one_counter oskuidcounter "1" true
check_one_counter oskgidcounter "1" true
check_one_counter imarkcounter "1" true
check_one_counter omarkcounter "1" true
+check_one_counter ilastyearcounter "0" true
if [ $ret -eq 0 ];then
echo "OK: nftables meta iif/oif counters at expected values"
+else
+ exit $ret
+fi
+
+#First CPU execution and counter
+taskset -p 01 $$ > /dev/null
+ip netns exec "$ns0" nft reset counters > /dev/null
+ip netns exec "$ns0" ping -q -c 1 127.0.0.1 > /dev/null
+check_one_counter icpu0counter "2" true
+
+if [ $ret -eq 0 ];then
+ echo "OK: nftables meta cpu counter at expected values"
fi
exit $ret
diff --git a/tools/testing/selftests/netfilter/nft_queue.sh b/tools/testing/selftests/netfilter/nft_queue.sh
index 6898448b4266..3d202b90b33d 100755
--- a/tools/testing/selftests/netfilter/nft_queue.sh
+++ b/tools/testing/selftests/netfilter/nft_queue.sh
@@ -12,6 +12,7 @@ sfx=$(mktemp -u "XXXXXXXX")
ns1="ns1-$sfx"
ns2="ns2-$sfx"
nsrouter="nsrouter-$sfx"
+timeout=4
cleanup()
{
@@ -20,6 +21,7 @@ cleanup()
ip netns del ${nsrouter}
rm -f "$TMPFILE0"
rm -f "$TMPFILE1"
+ rm -f "$TMPFILE2" "$TMPFILE3"
}
nft --version > /dev/null 2>&1
@@ -42,6 +44,8 @@ fi
TMPFILE0=$(mktemp)
TMPFILE1=$(mktemp)
+TMPFILE2=$(mktemp)
+TMPFILE3=$(mktemp)
trap cleanup EXIT
ip netns add ${ns1}
@@ -83,7 +87,7 @@ load_ruleset() {
local name=$1
local prio=$2
-ip netns exec ${nsrouter} nft -f - <<EOF
+ip netns exec ${nsrouter} nft -f /dev/stdin <<EOF
table inet $name {
chain nfq {
ip protocol icmp queue bypass
@@ -118,7 +122,7 @@ EOF
load_counter_ruleset() {
local prio=$1
-ip netns exec ${nsrouter} nft -f - <<EOF
+ip netns exec ${nsrouter} nft -f /dev/stdin <<EOF
table inet countrules {
chain pre {
type filter hook prerouting priority $prio; policy accept;
@@ -175,7 +179,7 @@ test_ping_router() {
test_queue_blackhole() {
local proto=$1
-ip netns exec ${nsrouter} nft -f - <<EOF
+ip netns exec ${nsrouter} nft -f /dev/stdin <<EOF
table $proto blackh {
chain forward {
type filter hook forward priority 0; policy accept;
@@ -184,10 +188,10 @@ table $proto blackh {
}
EOF
if [ $proto = "ip" ] ;then
- ip netns exec ${ns1} ping -c 1 -q 10.0.2.99 > /dev/null
+ ip netns exec ${ns1} ping -W 2 -c 1 -q 10.0.2.99 > /dev/null
lret=$?
elif [ $proto = "ip6" ]; then
- ip netns exec ${ns1} ping -c 1 -q dead:2::99 > /dev/null
+ ip netns exec ${ns1} ping -W 2 -c 1 -q dead:2::99 > /dev/null
lret=$?
else
lret=111
@@ -214,8 +218,8 @@ test_queue()
local last=""
# spawn nf-queue listeners
- ip netns exec ${nsrouter} ./nf-queue -c -q 0 -t 3 > "$TMPFILE0" &
- ip netns exec ${nsrouter} ./nf-queue -c -q 1 -t 3 > "$TMPFILE1" &
+ ip netns exec ${nsrouter} ./nf-queue -c -q 0 -t $timeout > "$TMPFILE0" &
+ ip netns exec ${nsrouter} ./nf-queue -c -q 1 -t $timeout > "$TMPFILE1" &
sleep 1
test_ping
ret=$?
@@ -250,11 +254,11 @@ test_queue()
test_tcp_forward()
{
- ip netns exec ${nsrouter} ./nf-queue -q 2 -t 10 &
+ ip netns exec ${nsrouter} ./nf-queue -q 2 -t $timeout &
local nfqpid=$!
tmpfile=$(mktemp) || exit 1
- dd conv=sparse status=none if=/dev/zero bs=1M count=100 of=$tmpfile
+ dd conv=sparse status=none if=/dev/zero bs=1M count=200 of=$tmpfile
ip netns exec ${ns2} nc -w 5 -l -p 12345 <"$tmpfile" >/dev/null &
local rpid=$!
@@ -270,15 +274,13 @@ test_tcp_forward()
test_tcp_localhost()
{
- tc -net "${nsrouter}" qdisc add dev lo root netem loss random 1%
-
tmpfile=$(mktemp) || exit 1
- dd conv=sparse status=none if=/dev/zero bs=1M count=900 of=$tmpfile
+ dd conv=sparse status=none if=/dev/zero bs=1M count=200 of=$tmpfile
ip netns exec ${nsrouter} nc -w 5 -l -p 12345 <"$tmpfile" >/dev/null &
local rpid=$!
- ip netns exec ${nsrouter} ./nf-queue -q 3 -t 30 &
+ ip netns exec ${nsrouter} ./nf-queue -q 3 -t $timeout &
local nfqpid=$!
sleep 1
@@ -287,6 +289,47 @@ test_tcp_localhost()
wait $rpid
[ $? -eq 0 ] && echo "PASS: tcp via loopback"
+ wait 2>/dev/null
+}
+
+test_tcp_localhost_requeue()
+{
+ip netns exec ${nsrouter} nft -f /dev/stdin <<EOF
+flush ruleset
+table inet filter {
+ chain output {
+ type filter hook output priority 0; policy accept;
+ tcp dport 12345 limit rate 1/second burst 1 packets counter queue num 0
+ }
+ chain post {
+ type filter hook postrouting priority 0; policy accept;
+ tcp dport 12345 limit rate 1/second burst 1 packets counter queue num 0
+ }
+}
+EOF
+ tmpfile=$(mktemp) || exit 1
+ dd conv=sparse status=none if=/dev/zero bs=1M count=200 of=$tmpfile
+ ip netns exec ${nsrouter} nc -w 5 -l -p 12345 <"$tmpfile" >/dev/null &
+ local rpid=$!
+
+ ip netns exec ${nsrouter} ./nf-queue -c -q 1 -t $timeout > "$TMPFILE2" &
+
+ # nfqueue 1 will be called via output hook. But this time,
+ # re-queue the packet to nfqueue program on queue 2.
+ ip netns exec ${nsrouter} ./nf-queue -G -d 150 -c -q 0 -Q 1 -t $timeout > "$TMPFILE3" &
+
+ sleep 1
+ ip netns exec ${nsrouter} nc -w 5 127.0.0.1 12345 <"$tmpfile" > /dev/null
+ rm -f "$tmpfile"
+
+ wait
+
+ if ! diff -u "$TMPFILE2" "$TMPFILE3" ; then
+ echo "FAIL: lost packets during requeue?!" 1>&2
+ return
+ fi
+
+ echo "PASS: tcp via loopback and re-queueing"
}
ip netns exec ${nsrouter} sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
@@ -328,5 +371,6 @@ test_queue 20
test_tcp_forward
test_tcp_localhost
+test_tcp_localhost_requeue
exit $ret